filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_15259
|
__version__ = '0.3.0'
__author__ = 'hallazzang'
__author_email__ = '[email protected]'
__license__ = 'MIT License'
__copyright__ = 'Copyright 2015-2018 by hallazzang'
USER_AGENT = 'pyneis/{}'.format(__version__)
from .client import NeisClient
from .domain import get_proper_domain
__all__ = [
'NeisClient',
'get_proper_domain'
]
|
the-stack_106_15263
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import os
import shutil
import signal
import sys
import threading
import warnings
from threading import RLock
from tempfile import NamedTemporaryFile
from py4j.protocol import Py4JError
from pyspark import accumulators
from pyspark.accumulators import Accumulator
from pyspark.broadcast import Broadcast
from pyspark.conf import SparkConf
from pyspark.files import SparkFiles
from pyspark.java_gateway import launch_gateway
from pyspark.serializers import PickleSerializer, BatchedSerializer, UTF8Deserializer, \
PairDeserializer, AutoBatchedSerializer, NoOpSerializer
from pyspark.storagelevel import StorageLevel
from pyspark.rdd import RDD, _load_from_socket, ignore_unicode_prefix
from pyspark.traceback_utils import CallSite, first_spark_call
from pyspark.status import StatusTracker
from pyspark.profiler import ProfilerCollector, BasicProfiler
###########################
# HVCL Sumin
###########################
# import Vispark RDD
from pyspark.vislib.vispark import VisparkRDD
if sys.version > '3':
xrange = range
__all__ = ['SparkContext']
# These are special default configs for PySpark, they will overwrite
# the default ones for Spark if they are not configured by user.
DEFAULT_CONFIGS = {
"spark.serializer.objectStreamReset": 100,
"spark.rdd.compress": True,
}
class SparkContext(object):
"""
Main entry point for Spark functionality. A SparkContext represents the
connection to a Spark cluster, and can be used to create L{RDD} and
broadcast variables on that cluster.
"""
_gateway = None
_jvm = None
_next_accum_id = 0
_active_spark_context = None
_lock = RLock()
_python_includes = None # zip and egg files that need to be added to PYTHONPATH
PACKAGE_EXTENSIONS = ('.zip', '.egg', '.jar')
def __init__(self, master=None, appName=None, sparkHome=None, pyFiles=None,
environment=None, batchSize=0, serializer=PickleSerializer(), conf=None,
gateway=None, jsc=None, profiler_cls=BasicProfiler):
"""
Create a new SparkContext. At least the master and app name should be set,
either through the named parameters here or through C{conf}.
:param master: Cluster URL to connect to
(e.g. mesos://host:port, spark://host:port, local[4]).
:param appName: A name for your job, to display on the cluster web UI.
:param sparkHome: Location where Spark is installed on cluster nodes.
:param pyFiles: Collection of .zip or .py files to send to the cluster
and add to PYTHONPATH. These can be paths on the local file
system or HDFS, HTTP, HTTPS, or FTP URLs.
:param environment: A dictionary of environment variables to set on
worker nodes.
:param batchSize: The number of Python objects represented as a single
Java object. Set 1 to disable batching, 0 to automatically choose
the batch size based on object sizes, or -1 to use an unlimited
batch size
:param serializer: The serializer for RDDs.
:param conf: A L{SparkConf} object setting Spark properties.
:param gateway: Use an existing gateway and JVM, otherwise a new JVM
will be instantiated.
:param jsc: The JavaSparkContext instance (optional).
:param profiler_cls: A class of custom Profiler used to do profiling
(default is pyspark.profiler.BasicProfiler).
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> sc2 = SparkContext('local', 'test2') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
"""
self._callsite = first_spark_call() or CallSite(None, None, None)
SparkContext._ensure_initialized(self, gateway=gateway, conf=conf)
try:
self._do_init(master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls)
except:
# If an error occurs, clean up in order to allow future SparkContext creation:
self.stop()
raise
def _do_init(self, master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls):
self.environment = environment or {}
# java gateway must have been launched at this point.
if conf is not None and conf._jconf is not None:
# conf has been initialized in JVM properly, so use conf directly. This represent the
# scenario that JVM has been launched before SparkConf is created (e.g. SparkContext is
# created and then stopped, and we create a new SparkConf and new SparkContext again)
self._conf = conf
else:
self._conf = SparkConf(_jvm=SparkContext._jvm)
self._batchSize = batchSize # -1 represents an unlimited batch size
self._unbatched_serializer = serializer
if batchSize == 0:
self.serializer = AutoBatchedSerializer(self._unbatched_serializer)
else:
self.serializer = BatchedSerializer(self._unbatched_serializer,
batchSize)
# Set any parameters passed directly to us on the conf
if master:
self._conf.setMaster(master)
if appName:
self._conf.setAppName(appName)
if sparkHome:
self._conf.setSparkHome(sparkHome)
if environment:
for key, value in environment.items():
self._conf.setExecutorEnv(key, value)
for key, value in DEFAULT_CONFIGS.items():
self._conf.setIfMissing(key, value)
# Check that we have at least the required parameters
if not self._conf.contains("spark.master"):
raise Exception("A master URL must be set in your configuration")
if not self._conf.contains("spark.app.name"):
raise Exception("An application name must be set in your configuration")
# Read back our properties from the conf in case we loaded some of them from
# the classpath or an external config file
self.master = self._conf.get("spark.master")
self.appName = self._conf.get("spark.app.name")
self.sparkHome = self._conf.get("spark.home", None)
for (k, v) in self._conf.getAll():
if k.startswith("spark.executorEnv."):
varName = k[len("spark.executorEnv."):]
self.environment[varName] = v
if sys.version >= '3.3' and 'PYTHONHASHSEED' not in os.environ:
# disable randomness of hash of string in worker, if this is not
# launched by spark-submit
self.environment["PYTHONHASHSEED"] = "0"
# Create the Java SparkContext through Py4J
self._jsc = jsc or self._initialize_context(self._conf._jconf)
# Reset the SparkConf to the one actually used by the SparkContext in JVM.
self._conf = SparkConf(_jconf=self._jsc.sc().conf())
# Create a single Accumulator in Java that we'll send all our updates through;
# they will be passed back to us through a TCP server
self._accumulatorServer = accumulators._start_update_server()
(host, port) = self._accumulatorServer.server_address
self._javaAccumulator = self._jvm.PythonAccumulatorV2(host, port)
self._jsc.sc().register(self._javaAccumulator)
self.pythonExec = os.environ.get("PYSPARK_PYTHON", 'python')
self.pythonVer = "%d.%d" % sys.version_info[:2]
if sys.version_info < (2, 7):
warnings.warn("Support for Python 2.6 is deprecated as of Spark 2.0.0")
# Broadcast's __reduce__ method stores Broadcast instances here.
# This allows other code to determine which Broadcast instances have
# been pickled, so it can determine which Java broadcast objects to
# send.
self._pickled_broadcast_vars = set()
SparkFiles._sc = self
root_dir = SparkFiles.getRootDirectory()
sys.path.insert(1, root_dir)
# Deploy any code dependencies specified in the constructor
self._python_includes = list()
for path in (pyFiles or []):
self.addPyFile(path)
# Deploy code dependencies set by spark-submit; these will already have been added
# with SparkContext.addFile, so we just need to add them to the PYTHONPATH
for path in self._conf.get("spark.submit.pyFiles", "").split(","):
if path != "":
(dirname, filename) = os.path.split(path)
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
sys.path.insert(1, os.path.join(SparkFiles.getRootDirectory(), filename))
# Create a temporary directory inside spark.local.dir:
local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(self._jsc.sc().conf())
self._temp_dir = \
self._jvm.org.apache.spark.util.Utils.createTempDir(local_dir, "pyspark") \
.getAbsolutePath()
# profiling stats collected for each PythonRDD
if self._conf.get("spark.python.profile", "false") == "true":
dump_path = self._conf.get("spark.python.profile.dump", None)
self.profiler_collector = ProfilerCollector(profiler_cls, dump_path)
else:
self.profiler_collector = None
# create a signal handler which would be invoked on receiving SIGINT
def signal_handler(signal, frame):
self.cancelAllJobs()
raise KeyboardInterrupt()
# see http://stackoverflow.com/questions/23206787/
if isinstance(threading.current_thread(), threading._MainThread):
signal.signal(signal.SIGINT, signal_handler)
def _initialize_context(self, jconf):
"""
Initialize SparkContext in function to allow subclass specific initialization
"""
return self._jvm.JavaSparkContext(jconf)
@classmethod
def _ensure_initialized(cls, instance=None, gateway=None, conf=None):
"""
Checks whether a SparkContext is initialized or not.
Throws error if a SparkContext is already running.
"""
with SparkContext._lock:
if not SparkContext._gateway:
SparkContext._gateway = gateway or launch_gateway(conf)
SparkContext._jvm = SparkContext._gateway.jvm
if instance:
if (SparkContext._active_spark_context and
SparkContext._active_spark_context != instance):
currentMaster = SparkContext._active_spark_context.master
currentAppName = SparkContext._active_spark_context.appName
callsite = SparkContext._active_spark_context._callsite
# Raise error if there is already a running Spark context
raise ValueError(
"Cannot run multiple SparkContexts at once; "
"existing SparkContext(app=%s, master=%s)"
" created by %s at %s:%s "
% (currentAppName, currentMaster,
callsite.function, callsite.file, callsite.linenum))
else:
SparkContext._active_spark_context = instance
def __getnewargs__(self):
# This method is called when attempting to pickle SparkContext, which is always an error:
raise Exception(
"It appears that you are attempting to reference SparkContext from a broadcast "
"variable, action, or transformation. SparkContext can only be used on the driver, "
"not in code that it run on workers. For more information, see SPARK-5063."
)
def __enter__(self):
"""
Enable 'with SparkContext(...) as sc: app(sc)' syntax.
"""
return self
def __exit__(self, type, value, trace):
"""
Enable 'with SparkContext(...) as sc: app' syntax.
Specifically stop the context on exit of the with block.
"""
self.stop()
@classmethod
def getOrCreate(cls, conf=None):
"""
Get or instantiate a SparkContext and register it as a singleton object.
:param conf: SparkConf (optional)
"""
with SparkContext._lock:
if SparkContext._active_spark_context is None:
SparkContext(conf=conf or SparkConf())
return SparkContext._active_spark_context
def setLogLevel(self, logLevel):
"""
Control our logLevel. This overrides any user-defined log settings.
Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
"""
self._jsc.setLogLevel(logLevel)
@classmethod
def setSystemProperty(cls, key, value):
"""
Set a Java system property, such as spark.executor.memory. This must
must be invoked before instantiating SparkContext.
"""
SparkContext._ensure_initialized()
SparkContext._jvm.java.lang.System.setProperty(key, value)
@property
def version(self):
"""
The version of Spark on which this application is running.
"""
return self._jsc.version()
@property
@ignore_unicode_prefix
def applicationId(self):
"""
A unique identifier for the Spark application.
Its format depends on the scheduler implementation.
* in case of local spark app something like 'local-1433865536131'
* in case of YARN something like 'application_1433865536131_34483'
>>> sc.applicationId # doctest: +ELLIPSIS
u'local-...'
"""
return self._jsc.sc().applicationId()
@property
def uiWebUrl(self):
"""Return the URL of the SparkUI instance started by this SparkContext"""
return self._jsc.sc().uiWebUrl().get()
@property
def startTime(self):
"""Return the epoch time when the Spark Context was started."""
return self._jsc.startTime()
@property
def defaultParallelism(self):
"""
Default level of parallelism to use when not given by user (e.g. for
reduce tasks)
"""
return self._jsc.sc().defaultParallelism()
@property
def defaultMinPartitions(self):
"""
Default min number of partitions for Hadoop RDDs when not given by user
"""
return self._jsc.sc().defaultMinPartitions()
def stop(self):
"""
Shut down the SparkContext.
"""
if getattr(self, "_jsc", None):
try:
self._jsc.stop()
except Py4JError:
# Case: SPARK-18523
warnings.warn(
'Unable to cleanly shutdown Spark JVM process.'
' It is possible that the process has crashed,'
' been killed or may also be in a zombie state.',
RuntimeWarning
)
pass
finally:
self._jsc = None
if getattr(self, "_accumulatorServer", None):
self._accumulatorServer.shutdown()
self._accumulatorServer = None
with SparkContext._lock:
SparkContext._active_spark_context = None
def emptyRDD(self):
"""
Create an RDD that has no partitions or elements.
"""
return RDD(self._jsc.emptyRDD(), self, NoOpSerializer())
def range(self, start, end=None, step=1, numSlices=None):
"""
Create a new RDD of int containing elements from `start` to `end`
(exclusive), increased by `step` every element. Can be called the same
way as python's built-in range() function. If called with a single argument,
the argument is interpreted as `end`, and `start` is set to 0.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numSlices: the number of partitions of the new RDD
:return: An RDD of int
>>> sc.range(5).collect()
[0, 1, 2, 3, 4]
>>> sc.range(2, 4).collect()
[2, 3]
>>> sc.range(1, 7, 2).collect()
[1, 3, 5]
"""
if end is None:
end = start
start = 0
return self.parallelize(xrange(start, end, step), numSlices)
def parallelize(self, c, numSlices=None):
"""
Distribute a local Python collection to form an RDD. Using xrange
is recommended if the input represents a range for performance.
>>> sc.parallelize([0, 2, 3, 4, 6], 5).glom().collect()
[[0], [2], [3], [4], [6]]
>>> sc.parallelize(xrange(0, 6, 2), 5).glom().collect()
[[], [0], [], [2], [4]]
"""
numSlices = int(numSlices) if numSlices is not None else self.defaultParallelism
if isinstance(c, xrange):
size = len(c)
if size == 0:
return self.parallelize([], numSlices)
step = c[1] - c[0] if size > 1 else 1
start0 = c[0]
def getStart(split):
return start0 + int((split * size / numSlices)) * step
def f(split, iterator):
return xrange(getStart(split), getStart(split + 1), step)
return self.parallelize([], numSlices).mapPartitionsWithIndex(f)
# Calling the Java parallelize() method with an ArrayList is too slow,
# because it sends O(n) Py4J commands. As an alternative, serialized
# objects are written to a file and loaded through textFile().
tempFile = NamedTemporaryFile(delete=False, dir=self._temp_dir)
try:
# Make sure we distribute data evenly if it's smaller than self.batchSize
if "__len__" not in dir(c):
c = list(c) # Make it a list so we can compute its length
batchSize = max(1, min(len(c) // numSlices, self._batchSize or 1024))
serializer = BatchedSerializer(self._unbatched_serializer, batchSize)
serializer.dump_stream(c, tempFile)
tempFile.close()
readRDDFromFile = self._jvm.PythonRDD.readRDDFromFile
jrdd = readRDDFromFile(self._jsc, tempFile.name, numSlices)
finally:
# readRDDFromFile eagerily reads the file so we can delete right after.
os.unlink(tempFile.name)
return RDD(jrdd, self, serializer)
def pickleFile(self, name, minPartitions=None):
"""
Load an RDD previously saved using L{RDD.saveAsPickleFile} method.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize(range(10)).saveAsPickleFile(tmpFile.name, 5)
>>> sorted(sc.pickleFile(tmpFile.name, 3).collect())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.objectFile(name, minPartitions), self)
@ignore_unicode_prefix
def textFile(self, name, minPartitions=None, use_unicode=True):
"""
Read a text file from HDFS, a local file system (available on all
nodes), or any Hadoop-supported file system URI, and return it as an
RDD of Strings.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
>>> path = os.path.join(tempdir, "sample-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello world!")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello world!']
"""
minPartitions = minPartitions or min(self.defaultParallelism, 2)
return RDD(self._jsc.textFile(name, minPartitions), self,
UTF8Deserializer(use_unicode))
###################
# HVCL Sumin
# Modified wholeTextFiles to support Vispark Tag
@ignore_unicode_prefix
def wholeTextFiles(self, path, minPartitions=None, use_unicode=True, tag=None):
"""
Read a directory of text files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system
URI. Each file is read as a single record and returned in a
key-value pair, where the key is the path of each file, the
value is the content of each file.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
For example, if you have the following files::
hdfs://a-hdfs-path/part-00000
hdfs://a-hdfs-path/part-00001
...
hdfs://a-hdfs-path/part-nnnnn
Do C{rdd = sparkContext.wholeTextFiles("hdfs://a-hdfs-path")},
then C{rdd} contains::
(a-hdfs-path/part-00000, its content)
(a-hdfs-path/part-00001, its content)
...
(a-hdfs-path/part-nnnnn, its content)
.. note:: Small files are preferred, as each file will be loaded
fully in memory.
>>> dirPath = os.path.join(tempdir, "files")
>>> os.mkdir(dirPath)
>>> with open(os.path.join(dirPath, "1.txt"), "w") as file1:
... _ = file1.write("1")
>>> with open(os.path.join(dirPath, "2.txt"), "w") as file2:
... _ = file2.write("2")
>>> textFiles = sc.wholeTextFiles(dirPath)
>>> sorted(textFiles.collect())
[(u'.../1.txt', u'1'), (u'.../2.txt', u'2')]
"""
if tag==None :
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.wholeTextFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(use_unicode), UTF8Deserializer(use_unicode)))
elif tag=='VISPARK':
minPartitions = minPartitions or self.defaultParallelism
tmp_RDD = RDD(self._jsc.wholeTextFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(use_unicode), UTF8Deserializer(use_unicode)))
return self.vispark(target_rdd=tmp_RDD,path=path)
###################
# HVCL Sumin
# Modified binaryFiles to support Vispark Tag
def binaryFiles(self, path, minPartitions=None, tag=None, halo=0):
"""
.. note:: Experimental
Read a directory of binary files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system URI
as a byte array. Each file is read as a single record and returned
in a key-value pair, where the key is the path of each file, the
value is the content of each file.
.. note:: Small files are preferred, large file is also allowable, but
may cause bad performance.
"""
if tag==None:
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.binaryFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(), NoOpSerializer()))
elif tag=='VISPARK':
minPartitions = minPartitions or self.defaultMinPartitions
tmp_RDD = RDD(self._jsc.binaryFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(), NoOpSerializer()))
#PairDeserializer(UTF8Deserializer(), NoOpSerializer),tag='GPU')
return self.vispark(target_rdd=tmp_RDD,path=path, halo=halo)
def binaryRecords(self, path, recordLength):
"""
.. note:: Experimental
Load data from a flat binary file, assuming each record is a set of numbers
with the specified numerical format (see ByteBuffer), and the number of
bytes per record is constant.
:param path: Directory to the input data files
:param recordLength: The length at which to split the records
"""
return RDD(self._jsc.binaryRecords(path, recordLength), self, NoOpSerializer())
def _dictToJavaMap(self, d):
jm = self._jvm.java.util.HashMap()
if not d:
d = {}
for k, v in d.items():
jm[k] = v
return jm
def sequenceFile(self, path, keyClass=None, valueClass=None, keyConverter=None,
valueConverter=None, minSplits=None, batchSize=0):
"""
Read a Hadoop SequenceFile with arbitrary key and value Writable class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is as follows:
1. A Java RDD is created from the SequenceFile or other InputFormat, and the key
and value Writable classes
2. Serialization is attempted via Pyrolite pickling
3. If this fails, the fallback is to call 'toString' on each key and value
4. C{PickleSerializer} is used to deserialize pickled objects on the Python side
:param path: path to sequncefile
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter:
:param valueConverter:
:param minSplits: minimum splits in dataset
(default min(2, sc.defaultParallelism))
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
minSplits = minSplits or min(self.defaultParallelism, 2)
jrdd = self._jvm.PythonRDD.sequenceFile(self._jsc, path, keyClass, valueClass,
keyConverter, valueConverter, minSplits, batchSize)
return RDD(jrdd, self)
def newAPIHadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for sc.sequenceFile.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java
:param path: path to Hadoop file
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def newAPIHadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for sc.sequenceFile.
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for sc.sequenceFile.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java.
:param path: path to Hadoop file
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapred.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for sc.sequenceFile.
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapred.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def _checkpointFile(self, name, input_deserializer):
jrdd = self._jsc.checkpointFile(name)
return RDD(jrdd, self, input_deserializer)
@ignore_unicode_prefix
def union(self, rdds):
"""
Build the union of a list of RDDs.
This supports unions() of RDDs with different serialized formats,
although this forces them to be reserialized using the default
serializer:
>>> path = os.path.join(tempdir, "union-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello']
>>> parallelized = sc.parallelize(["World!"])
>>> sorted(sc.union([textFile, parallelized]).collect())
[u'Hello', 'World!']
"""
first_jrdd_deserializer = rdds[0]._jrdd_deserializer
if any(x._jrdd_deserializer != first_jrdd_deserializer for x in rdds):
rdds = [x._reserialize() for x in rdds]
first = rdds[0]._jrdd
rest = [x._jrdd for x in rdds[1:]]
return RDD(self._jsc.union(first, rest), self, rdds[0]._jrdd_deserializer)
def broadcast(self, value):
"""
Broadcast a read-only variable to the cluster, returning a
L{Broadcast<pyspark.broadcast.Broadcast>}
object for reading it in distributed functions. The variable will
be sent to each cluster only once.
"""
return Broadcast(self, value, self._pickled_broadcast_vars)
def accumulator(self, value, accum_param=None):
"""
Create an L{Accumulator} with the given initial value, using a given
L{AccumulatorParam} helper object to define how to add values of the
data type if provided. Default AccumulatorParams are used for integers
and floating-point numbers if you do not provide one. For other types,
a custom AccumulatorParam can be used.
"""
if accum_param is None:
if isinstance(value, int):
accum_param = accumulators.INT_ACCUMULATOR_PARAM
elif isinstance(value, float):
accum_param = accumulators.FLOAT_ACCUMULATOR_PARAM
elif isinstance(value, complex):
accum_param = accumulators.COMPLEX_ACCUMULATOR_PARAM
else:
raise TypeError("No default accumulator param for type %s" % type(value))
SparkContext._next_accum_id += 1
return Accumulator(SparkContext._next_accum_id - 1, value, accum_param)
def addFile(self, path, recursive=False):
"""
Add a file to be downloaded with this Spark job on every node.
The C{path} passed can be either a local file, a file in HDFS
(or other Hadoop-supported filesystems), or an HTTP, HTTPS or
FTP URI.
To access the file in Spark jobs, use
L{SparkFiles.get(fileName)<pyspark.files.SparkFiles.get>} with the
filename to find its download location.
A directory can be given if the recursive option is set to True.
Currently directories are only supported for Hadoop-supported filesystems.
>>> from pyspark import SparkFiles
>>> path = os.path.join(tempdir, "test.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("100")
>>> sc.addFile(path)
>>> def func(iterator):
... with open(SparkFiles.get("test.txt")) as testFile:
... fileVal = int(testFile.readline())
... return [x * fileVal for x in iterator]
>>> sc.parallelize([1, 2, 3, 4]).mapPartitions(func).collect()
[100, 200, 300, 400]
"""
self._jsc.sc().addFile(path, recursive)
def addPyFile(self, path):
"""
Add a .py or .zip dependency for all tasks to be executed on this
SparkContext in the future. The C{path} passed can be either a local
file, a file in HDFS (or other Hadoop-supported filesystems), or an
HTTP, HTTPS or FTP URI.
"""
self.addFile(path)
(dirname, filename) = os.path.split(path) # dirname may be directory or HDFS/S3 prefix
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
# for tests in local mode
sys.path.insert(1, os.path.join(SparkFiles.getRootDirectory(), filename))
if sys.version > '3':
import importlib
importlib.invalidate_caches()
def setCheckpointDir(self, dirName):
"""
Set the directory under which RDDs are going to be checkpointed. The
directory must be a HDFS path if running on a cluster.
"""
self._jsc.sc().setCheckpointDir(dirName)
def _getJavaStorageLevel(self, storageLevel):
"""
Returns a Java StorageLevel based on a pyspark.StorageLevel.
"""
if not isinstance(storageLevel, StorageLevel):
raise Exception("storageLevel must be of type pyspark.StorageLevel")
newStorageLevel = self._jvm.org.apache.spark.storage.StorageLevel
return newStorageLevel(storageLevel.useDisk,
storageLevel.useMemory,
storageLevel.useOffHeap,
storageLevel.deserialized,
storageLevel.replication)
def setJobGroup(self, groupId, description, interruptOnCancel=False):
"""
Assigns a group ID to all the jobs started by this thread until the group ID is set to a
different value or cleared.
Often, a unit of execution in an application consists of multiple Spark actions or jobs.
Application programmers can use this method to group all those jobs together and give a
group description. Once set, the Spark web UI will associate such jobs with this group.
The application can use L{SparkContext.cancelJobGroup} to cancel all
running jobs in this group.
>>> import threading
>>> from time import sleep
>>> result = "Not Set"
>>> lock = threading.Lock()
>>> def map_func(x):
... sleep(100)
... raise Exception("Task should have been cancelled")
>>> def start_job(x):
... global result
... try:
... sc.setJobGroup("job_to_cancel", "some description")
... result = sc.parallelize(range(x)).map(map_func).collect()
... except Exception as e:
... result = "Cancelled"
... lock.release()
>>> def stop_job():
... sleep(5)
... sc.cancelJobGroup("job_to_cancel")
>>> supress = lock.acquire()
>>> supress = threading.Thread(target=start_job, args=(10,)).start()
>>> supress = threading.Thread(target=stop_job).start()
>>> supress = lock.acquire()
>>> print(result)
Cancelled
If interruptOnCancel is set to true for the job group, then job cancellation will result
in Thread.interrupt() being called on the job's executor threads. This is useful to help
ensure that the tasks are actually stopped in a timely manner, but is off by default due
to HDFS-1208, where HDFS may respond to Thread.interrupt() by marking nodes as dead.
"""
self._jsc.setJobGroup(groupId, description, interruptOnCancel)
def setLocalProperty(self, key, value):
"""
Set a local property that affects jobs submitted from this thread, such as the
Spark fair scheduler pool.
"""
self._jsc.setLocalProperty(key, value)
def getLocalProperty(self, key):
"""
Get a local property set in this thread, or null if it is missing. See
L{setLocalProperty}
"""
return self._jsc.getLocalProperty(key)
def sparkUser(self):
"""
Get SPARK_USER for user who is running SparkContext.
"""
return self._jsc.sc().sparkUser()
def cancelJobGroup(self, groupId):
"""
Cancel active jobs for the specified group. See L{SparkContext.setJobGroup}
for more information.
"""
self._jsc.sc().cancelJobGroup(groupId)
def cancelAllJobs(self):
"""
Cancel all jobs that have been scheduled or are running.
"""
self._jsc.sc().cancelAllJobs()
def statusTracker(self):
"""
Return :class:`StatusTracker` object
"""
return StatusTracker(self._jsc.statusTracker())
def runJob(self, rdd, partitionFunc, partitions=None, allowLocal=False):
"""
Executes the given partitionFunc on the specified set of partitions,
returning the result as an array of elements.
If 'partitions' is not specified, this will run over all partitions.
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part])
[0, 1, 4, 9, 16, 25]
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part], [0, 2], True)
[0, 1, 16, 25]
"""
if partitions is None:
partitions = range(rdd._jrdd.partitions().size())
# Implementation note: This is implemented as a mapPartitions followed
# by runJob() in order to avoid having to pass a Python lambda into
# SparkContext#runJob.
mappedRDD = rdd.mapPartitions(partitionFunc)
port = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, partitions)
return list(_load_from_socket(port, mappedRDD._jrdd_deserializer))
def show_profiles(self):
""" Print the profile stats to stdout """
self.profiler_collector.show_profiles()
def dump_profiles(self, path):
""" Dump the profile stats into directory `path`
"""
self.profiler_collector.dump_profiles(path)
def getConf(self):
conf = SparkConf()
conf.setAll(self._conf.getAll())
return conf
########################
# HVCL Sumin
# Vispark constructor
def vispark(self, numSlices = None, target_rdd =None, path=None, name=None, halo=0, raw_to_array = False):
return VisparkRDD(target_rdd=target_rdd, path=path, name=name, halo=halo, raw_to_array=raw_to_array)
def _test():
import atexit
import doctest
import tempfile
globs = globals().copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest')
globs['tempdir'] = tempfile.mkdtemp()
atexit.register(lambda: shutil.rmtree(globs['tempdir']))
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
the-stack_106_15268
|
# Copyright (c) 2016 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from django_fsm import transition
from django.core.exceptions import ValidationError
from django.db.models.signals import pre_delete
from django.dispatch import receiver
from silver.models.billing_entities import Provider
from silver.models.documents.base import (
BillingDocumentBase, BillingDocumentManager, BillingDocumentQuerySet)
from silver.models.documents.entries import DocumentEntry
from silver.models.documents.invoice import Invoice
class ProformaManager(BillingDocumentManager):
def get_queryset(self):
queryset = super(BillingDocumentManager, self).get_queryset()
return queryset.filter(kind='proforma').prefetch_related('proforma_entries__product_code',
'proforma_entries__invoice')
class Proforma(BillingDocumentBase):
objects = ProformaManager.from_queryset(BillingDocumentQuerySet)()
class Meta:
proxy = True
def __init__(self, *args, **kwargs):
super(Proforma, self).__init__(*args, **kwargs)
provider_field = self._meta.get_field("provider")
provider_field.related_name = "proformas"
customer_field = self._meta.get_field("customer")
customer_field.related_name = "proformas"
@property
def transactions(self):
return self.proforma_transactions.all()
def clean(self):
super(Proforma, self).clean()
if not self.series:
if not hasattr(self, 'provider'):
# the clean method is called even if the clean_fields method
# raises exceptions, to we check if the provider was specified
pass
elif not self.provider.proforma_series:
err_msg = {'series': 'You must either specify the series or '
'set a default proforma_series for the '
'provider.'}
raise ValidationError(err_msg)
@transition(field='state', source=BillingDocumentBase.STATES.DRAFT,
target=BillingDocumentBase.STATES.ISSUED)
def issue(self, issue_date=None, due_date=None):
self.archived_provider = self.provider.get_proforma_archivable_field_values()
super(Proforma, self)._issue(issue_date, due_date)
@transition(field='state', source=BillingDocumentBase.STATES.ISSUED,
target=BillingDocumentBase.STATES.PAID)
def pay(self, paid_date=None):
super(Proforma, self)._pay(paid_date)
if not self.related_document:
self.related_document = self._new_invoice()
self.related_document.issue()
self.related_document.pay(paid_date=paid_date)
# if the proforma is paid, the invoice due_date should be issue_date
self.related_document.due_date = self.related_document.issue_date
self.related_document.save()
self.save()
def create_invoice(self):
if self.state != BillingDocumentBase.STATES.ISSUED:
raise ValueError("You can't create an invoice from a %s proforma, "
"only from an issued one" % self.state)
if self.related_document:
raise ValueError("This proforma already has an invoice { %s }"
% self.related_document)
self.related_document = self._new_invoice()
self.related_document.issue()
self.save()
return self.related_document
def _new_invoice(self):
# Generate the new invoice based this proforma
invoice_fields = self.fields_for_automatic_invoice_generation
invoice_fields.update({'related_document': self})
invoice = Invoice.objects.create(**invoice_fields)
# For all the entries in the proforma => add the link to the new
# invoice
DocumentEntry.objects.filter(proforma=self).update(invoice=invoice)
return invoice
@property
def _starting_number(self):
return self.provider.proforma_starting_number
@property
def default_series(self):
try:
return self.provider.proforma_series
except Provider.DoesNotExist:
return ''
@property
def fields_for_automatic_invoice_generation(self):
fields = ['customer', 'provider', 'archived_customer',
'archived_provider', 'paid_date', 'cancel_date',
'sales_tax_percent', 'sales_tax_name', 'currency',
'transaction_currency', 'transaction_xe_rate',
'transaction_xe_date']
return {field: getattr(self, field, None) for field in fields}
@property
def entries(self):
return self.proforma_entries.all()
@receiver(pre_delete, sender=Proforma)
def delete_proforma_pdf_from_storage(sender, instance, **kwargs):
if instance.pdf:
# Delete the proforma's PDF
instance.pdf.pdf_file.delete(False)
|
the-stack_106_15271
|
import logging
import os
from pandaloginvestigator.core.io import file_input
from pandaloginvestigator.core.utils import string_utils
logger = logging.getLogger(__name__)
def read_result_corrupted(dir_results_path):
"""
Reads the corrupted processes list form the results file in the specified directory. Returns a dictionary
containing as key the log file name. The value of each key is given by a list of tuples in the form (malware,
origin, parent). Both malware and parent are tuples of the form (malware_name, malware_pid).
:param dir_results_path: path to the result folder
:return: dictionary of corrupted processes by file name
"""
corrupted_dict = {}
file_path = dir_results_path + '/corrupted_processes.txt'
if not os.path.isfile(file_path):
logger.error('ERROR: corrupted_processes.txt file not found')
quit()
with open(file_path, 'r', encoding='utf-8', errors='replace') as corrupted_file:
last_file_name = ''
for line in corrupted_file:
if string_utils.filename in line:
last_file_name = file_input.filename_from_analysis(line)
corrupted_dict[last_file_name] = []
elif line.strip():
line = line.split('\t')
malware = (line[2].strip(), line[3].strip())
origin = line[4].strip()
parent = (line[6].strip(), line[7].strip())
corrupted_dict[last_file_name].append([malware, origin, parent])
return corrupted_dict
def read_result_suspect(dir_results_path):
"""
Reads and returns the dictionary of suspects from the result file.
:param dir_results_path: path to the results files directory
:return: dictionary mapping suspect indices to samples uuid
"""
suspects_dict = {}
with open(os.path.join(dir_results_path, 'suspects.txt'), 'r', encoding='utf-8', errors='replace') as s_file:
last_file_name = ''
for line in s_file:
if string_utils.filename in line:
last_file_name = file_input.filename_from_analysis(line)
elif string_utils.suspect_ind in line:
index = float(line.split('\t')[1].strip())
suspects_dict[last_file_name] = index
return suspects_dict
|
the-stack_106_15272
|
#!/usr/bin/env python3
from abc import ABC, abstractmethod
import torch
from .marginal_log_likelihood import MarginalLogLikelihood
class _ApproximateMarginalLogLikelihood(MarginalLogLikelihood, ABC):
r"""
An approximate marginal log likelihood (typically a bound) for approximate GP models.
We expect that :attr:`model` is a :obj:`gpytorch.models.ApproximateGP`.
Args:
:attr:`likelihood` (:obj:`gpytorch.likelihoods.Likelihood`):
The likelihood for the model
:attr:`model` (:obj:`gpytorch.models.ApproximateGP`):
The approximate GP model
:attr:`num_data` (int):
The total number of training data points (necessary for SGD)
:attr:`beta` (float - default 1.):
A multiplicative factor for the KL divergence term.
Setting it to 1 (default) recovers true variational inference
(as derived in `Scalable Variational Gaussian Process Classification`_).
Setting it to anything less than 1 reduces the regularization effect of the model
(similarly to what was proposed in `the beta-VAE paper`_).
:attr:`combine_terms` (bool):
Whether or not to sum the expected NLL with the KL terms (default True)
"""
def __init__(self, likelihood, model, num_data, beta=1.0, combine_terms=True):
super().__init__(likelihood, model)
self.combine_terms = combine_terms
self.num_data = num_data
self.beta = beta
@abstractmethod
def _log_likelihood_term(self, approximate_dist_f, target, **kwargs):
raise NotImplementedError
def forward(self, approximate_dist_f, target, **kwargs):
r"""
Computes the Variational ELBO given :math:`q(\mathbf f)` and `\mathbf y`.
Calling this function will call the likelihood's `expected_log_prob` function.
Args:
:attr:`approximate_dist_f` (:obj:`gpytorch.distributions.MultivariateNormal`):
:math:`q(\mathbf f)` the outputs of the latent function (the :obj:`gpytorch.models.ApproximateGP`)
:attr:`target` (`torch.Tensor`):
:math:`\mathbf y` The target values
:attr:`**kwargs`:
Additional arguments passed to the likelihood's `expected_log_prob` function.
"""
# Get likelihood term and KL term
num_batch = approximate_dist_f.event_shape[0]
log_likelihood = self._log_likelihood_term(approximate_dist_f, target, **kwargs).div(num_batch)
kl_divergence = self.model.variational_strategy.kl_divergence().div(self.num_data / self.beta)
# Add any additional registered loss terms
added_loss = torch.zeros_like(log_likelihood)
had_added_losses = False
for added_loss_term in self.model.added_loss_terms():
added_loss.add_(added_loss_term.loss())
had_added_losses = True
# Log prior term
log_prior = torch.zeros_like(log_likelihood)
for _, prior, closure, _ in self.named_priors():
log_prior.add_(prior.log_prob(closure()).sum().div(self.num_data))
if self.combine_terms:
return log_likelihood - kl_divergence + log_prior - added_loss
else:
if had_added_losses:
return log_likelihood, kl_divergence, log_prior.div(self.num_data), added_loss
else:
return log_likelihood, kl_divergence, log_prior.div(self.num_data)
|
the-stack_106_15273
|
class Category:
def __init__(self, name):
self.fund = 0
self.spent = 0
self.ledger = []
self.name = name
def __str__(self):
half = (30 - len(self.name)) // 2
table = "*" * half + self.name + "*" * half
table += ("*" if len(table) == 29 else "") + "\n"
for item in self.ledger:
desc = item["description"][:23]
value = "{:.2f}".format(item["amount"])[:7]
table += desc + " " * (30 - len(desc) - len(value)) + value + "\n"
table += "Total: {:0.2f}".format(self.fund)
return table
def deposit(self, amount, description = ""):
self.fund += amount
self.ledger.append({"amount": amount, "description": description})
def withdraw(self, amount, description = ""):
if not self.check_funds(amount):
return False
self.ledger.append({"amount": -amount, "description": description})
self.fund -= amount
self.spent += amount
return True
def get_balance(self):
return self.fund
def transfer(self, amount, category):
result = self.withdraw(amount, "Transfer to {}".format(category.name))
if result:
category.deposit(amount, "Transfer from {}".format(self.name))
self.spent += amount
return result
def check_funds(self, amount):
return True if self.fund >= amount else False
def create_spend_chart(categories):
cats = []
maxLength = 0
total = 0
for cat in categories:
if len(cat.name) > maxLength: maxLength = len(cat.name)
total += cat.spent
cats.append((cat.spent, cat.name))
# title
chart = ["Percentage spent by category"]
# chart
for i in range(100, -1, -10):
line = "{:3d}".format(i) + "| "
for percent, _ in cats:
line += "o " if round(percent / total * 100) >= i else " "
chart.append(line)
# hr line
line = " " * 4 + "-" * (len(chart[-1]) - 4)
chart.append(line)
# names
for i in range(maxLength):
line = " " * 5
for _, name in cats:
line += "{} ".format(name[i]) if len(name) > i else " "
chart.append(line)
return "\n".join(chart)
|
the-stack_106_15275
|
import pyautogui as pag
from contextlib import contextmanager
@contextmanager
def tab_switch():
pag.hotkey('alt', 'tab')
yield
pag.hotkey('alt', 'tab')
def enter_invoice(invoice):
with tab_switch():
pag.write(f'{invoice.timestamp:%d%m%y}')
pag.press('tab')
pag.write(str(invoice.number))
pag.press('down', 2)
pag.write(str(invoice.amount).replace('.',','))
pag.press('down', 3)
pag.press('tab')
pag.write(invoice.workorder)
def upload_file(invoice):
with tab_switch():
pag.keyDown('shift')
pag.press('tab', 7)
pag.keyUp('shift')
|
the-stack_106_15277
|
#
# Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email [email protected] or [email protected].
#
"""
This class act as object recovery scheduler which add key value to
the underlying messaging platform.
"""
#!/usr/bin/python3.6
import os
import traceback
import sched
import time
import logging
from logging import handlers
import datetime
import math
import json
import signal
import sys
from s3backgrounddelete.cortx_s3_config import CORTXS3Config
from s3backgrounddelete.cortx_s3_index_api import CORTXS3IndexApi
from s3backgrounddelete.cortx_s3_signal import DynamicConfigHandler
from s3backgrounddelete.cortx_s3_constants import MESSAGE_BUS, RABBIT_MQ
#from s3backgrounddelete.IEMutil import IEMutil
class ObjectRecoveryScheduler(object):
"""Scheduler which will add key value to rabbitmq message queue."""
def __init__(self, producer_name):
"""Initialise logger and configuration."""
self.data = None
self.config = CORTXS3Config()
self.create_logger_directory()
self.create_logger()
self.signal = DynamicConfigHandler(self)
self.logger.info("Initialising the Object Recovery Scheduler")
self.producer = None
self.producer_name = producer_name
@staticmethod
def isObjectLeakEntryOlderThan(leakRecord, OlderInMins = 15):
object_leak_time = leakRecord["create_timestamp"]
now = datetime.datetime.utcnow()
date_time_obj = datetime.datetime.strptime(object_leak_time, "%Y-%m-%dT%H:%M:%S.000Z")
timeDelta = now - date_time_obj
timeDeltaInMns = math.floor(timeDelta.total_seconds()/60)
return (timeDeltaInMns >= OlderInMins)
def add_kv_to_msgbus(self, marker = None):
"""Add object key value to msgbus topic."""
self.logger.info("Inside add_kv_to_msgbus.")
try:
from s3backgrounddelete.object_recovery_msgbus import ObjectRecoveryMsgbus
if not self.producer:
self.producer = ObjectRecoveryMsgbus(
self.config,
self.logger)
threshold = self.config.get_threshold()
self.logger.debug("Threshold is : " + str(threshold))
count = self.producer.get_count()
self.logger.debug("Count of unread msgs is : " + str(count))
if int(count) < threshold:
self.logger.debug("Count of unread messages is less than threshold value.Hence continuing...")
else:
#do nothing
self.logger.info("Queue has more messages than threshold value. Hence skipping addition of further entries.")
return
# Cleanup all entries and enqueue only 1000 entries
#PurgeAPI Here
self.producer.purge()
result, index_response = CORTXS3IndexApi(
self.config, logger=self.logger).list(
self.config.get_probable_delete_index_id(), self.config.get_max_keys(), marker)
if result:
self.logger.info("Index listing result :" +
str(index_response.get_index_content()))
probable_delete_json = index_response.get_index_content()
probable_delete_oid_list = probable_delete_json["Keys"]
is_truncated = probable_delete_json["IsTruncated"]
if (probable_delete_oid_list is not None):
for record in probable_delete_oid_list:
# Check if record is older than the pre-configured 'time to process' delay
leak_processing_delay = self.config.get_leak_processing_delay_in_mins()
try:
objLeakVal = json.loads(record["Value"])
except ValueError as error:
self.logger.error(
"Failed to parse JSON data for: " + str(record) + " due to: " + error)
continue
if (objLeakVal is None):
self.logger.error("No value associated with " + str(record) + ". Skipping entry")
continue
# Check if object leak entry is older than 15mins or a preconfigured duration
if (not ObjectRecoveryScheduler.isObjectLeakEntryOlderThan(objLeakVal, leak_processing_delay)):
self.logger.info("Object leak entry " + record["Key"] +
" is NOT older than " + str(leak_processing_delay) +
"mins. Skipping entry")
continue
self.logger.info(
"Object recovery queue sending data :" +
str(record))
ret = self.producer.send_data(record, producer_id = self.producer_name)
if not ret:
# TODO - Do Audit logging
self.logger.error(
"Object recovery queue send data "+ str(record) +
" failed :")
else:
self.logger.info(
"Object recovery queue send data successfully :" +
str(record))
else:
self.logger.info(
"Index listing result empty. Ignoring adding entry to object recovery queue")
else:
self.logger.error("Failed to retrive Index listing:")
except Exception as exception:
self.logger.error(
"add_kv_to_msgbus send data exception: {}".format(exception))
self.logger.debug(
"traceback : {}".format(traceback.format_exc()))
def add_kv_to_queue(self, marker = None):
"""Add object key value to object recovery queue."""
self.logger.info("Adding kv list to queue")
try:
from s3backgrounddelete.object_recovery_queue import ObjectRecoveryRabbitMq
mq_client = ObjectRecoveryRabbitMq(
self.config,
self.config.get_rabbitmq_username(),
self.config.get_rabbitmq_password(),
self.config.get_rabbitmq_host(),
self.config.get_rabbitmq_exchange(),
self.config.get_rabbitmq_queue_name(),
self.config.get_rabbitmq_mode(),
self.config.get_rabbitmq_durable(),
self.logger)
# Cleanup all entries and enqueue only 1000 entries
mq_client.purge_queue(self.config.get_rabbitmq_queue_name())
result, index_response = CORTXS3IndexApi(
self.config, logger=self.logger).list(
self.config.get_probable_delete_index_id(), self.config.get_max_keys(), marker)
if result:
self.logger.info("Index listing result :" +
str(index_response.get_index_content()))
probable_delete_json = index_response.get_index_content()
probable_delete_oid_list = probable_delete_json["Keys"]
is_truncated = probable_delete_json["IsTruncated"]
if (probable_delete_oid_list is not None):
for record in probable_delete_oid_list:
# Check if record is older than the pre-configured 'time to process' delay
leak_processing_delay = self.config.get_leak_processing_delay_in_mins()
try:
objLeakVal = json.loads(record["Value"])
except ValueError as error:
self.logger.error(
"Failed to parse JSON data for: " + str(record) + " due to: " + error)
continue
if (objLeakVal is None):
self.logger.error("No value associated with " + str(record) + ". Skipping entry")
continue
# Check if object leak entry is older than 15mins or a preconfigured duration
if (not ObjectRecoveryScheduler.isObjectLeakEntryOlderThan(objLeakVal, leak_processing_delay)):
self.logger.info("Object leak entry " + record["Key"] +
" is NOT older than " + str(leak_processing_delay) +
"mins. Skipping entry")
continue
self.logger.info(
"Object recovery queue sending data :" +
str(record))
ret, msg = mq_client.send_data(
record, self.config.get_rabbitmq_queue_name())
if not ret:
#IEMutil("ERROR", IEMutil.RABBIT_MQ_CONN_FAILURE, IEMutil.RABBIT_MQ_CONN_FAILURE_STR)
self.logger.error(
"Object recovery queue send data "+ str(record) +
" failed :" + msg)
else:
self.logger.info(
"Object recovery queue send data successfully :" +
str(record))
else:
self.logger.info(
"Index listing result empty. Ignoring adding entry to object recovery queue")
pass
else:
self.logger.error("Failed to retrive Index listing:")
except BaseException:
self.logger.error(
"Object recovery queue send data exception:" + traceback.format_exc())
finally:
if mq_client:
self.logger.info("Closing the mqclient")
mq_client.close()
def schedule_periodically(self):
"""Schedule RabbitMQ producer to add key value to queue on hourly basis."""
# Run RabbitMQ producer periodically on hourly basis
self.logger.info("Producer " + str(self.producer_name) + " started at : " + str(datetime.datetime.now()))
scheduled_run = sched.scheduler(time.time, time.sleep)
def periodic_run(scheduler):
"""Add key value to queue using scheduler."""
if self.config.get_messaging_platform() == MESSAGE_BUS:
self.add_kv_to_msgbus()
elif self.config.get_messaging_platform() == RABBIT_MQ:
self.add_kv_to_queue()
else:
self.logger.error(
"Invalid argument specified in messaging_platform use message_bus or rabbit_mq")
return
scheduled_run.enter(
self.config.get_schedule_interval(), 1, periodic_run, (scheduler,))
scheduled_run.enter(self.config.get_schedule_interval(),
1, periodic_run, (scheduled_run,))
scheduled_run.run()
def create_logger(self):
"""Create logger, file handler, console handler and formatter."""
# create logger with "object_recovery_scheduler"
self.logger = logging.getLogger(
self.config.get_scheduler_logger_name())
self.logger.setLevel(self.config.get_file_log_level())
# https://docs.python.org/3/library/logging.handlers.html#logging.handlers.RotatingFileHandler
fhandler = logging.handlers.RotatingFileHandler(self.config.get_scheduler_logger_file(), mode='a',
maxBytes = self.config.get_max_bytes(),
backupCount = self.config.get_backup_count(), encoding=None,
delay=False )
fhandler.setLevel(self.config.get_file_log_level())
# create console handler with a higher log level
chandler = logging.StreamHandler()
chandler.setLevel(self.config.get_console_log_level())
# create formatter and add it to the handlers
formatter = logging.Formatter(self.config.get_log_format())
fhandler.setFormatter(formatter)
chandler.setFormatter(formatter)
# add the handlers to the logger
self.logger.addHandler(fhandler)
self.logger.addHandler(chandler)
def create_logger_directory(self):
"""Create log directory if not exsists."""
self._logger_directory = os.path.join(self.config.get_logger_directory())
if not os.path.isdir(self._logger_directory):
try:
os.mkdir(self._logger_directory)
except BaseException:
self.logger.error(
"Unable to create log directory at " + self._logger_directory)
if __name__ == "__main__":
SCHEDULER = ObjectRecoveryScheduler(sys.argv[1])
SCHEDULER.schedule_periodically()
|
the-stack_106_15278
|
# -*- coding: utf-8 -*-
"""
mongoop.triggers.graphite
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2016 by Lujeni.
:license: BSD, see LICENSE for more details.
"""
import logging
import os
from collections import Counter
import graphitesend
from mongoop.triggers import BaseTrigger
logging.basicConfig(
level=logging.INFO, format='[%(asctime)s] [%(levelname)s] %(message)s')
PATH = os.path.dirname(os.path.abspath(__file__))
class MongoopTrigger(BaseTrigger):
def op_nok(self, operations):
try:
counter = Counter([ns['ns'] for ns in operations])
graphite = graphitesend.init(**self.params.get('graphitesend_params', {}))
graphite.send('total', sum(counter.values()))
if self.params.get('metric_per_ns'):
graphite.send_dict(counter)
except Exception as e:
logging.error('unable to run :: {} :: {}'.format(self.name, e))
return False
else:
logging.info('run :: {} :: send OK'.format(self.name))
return True
|
the-stack_106_15279
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import inspect
from ibm_botocore.docs.params import RequestParamsDocumenter
from ibm_botocore.docs.params import ResponseParamsDocumenter
from ibm_botocore.docs.example import ResponseExampleDocumenter
from ibm_botocore.docs.example import RequestExampleDocumenter
AWS_DOC_BASE = 'https://docs.aws.amazon.com/goto/WebAPI'
def get_instance_public_methods(instance):
"""Retrieves an objects public methods
:param instance: The instance of the class to inspect
:rtype: dict
:returns: A dictionary that represents an instance's methods where
the keys are the name of the methods and the
values are the handler to the method.
"""
instance_members = inspect.getmembers(instance)
instance_methods = {}
for name, member in instance_members:
if not name.startswith('_'):
if inspect.ismethod(member):
instance_methods[name] = member
return instance_methods
def document_model_driven_signature(section, name, operation_model,
include=None, exclude=None):
"""Documents the signature of a model-driven method
:param section: The section to write the documentation to.
:param name: The name of the method
:param operation_model: The operation model for the method
:type include: Dictionary where keys are parameter names and
values are the shapes of the parameter names.
:param include: The parameter shapes to include in the documentation.
:type exclude: List of the names of the parameters to exclude.
:param exclude: The names of the parameters to exclude from
documentation.
"""
params = {}
if operation_model.input_shape:
params = operation_model.input_shape.members
parameter_names = list(params.keys())
if include is not None:
for member in include:
parameter_names.append(member.name)
if exclude is not None:
for member in exclude:
if member in parameter_names:
parameter_names.remove(member)
signature_params = ''
if parameter_names:
signature_params = '**kwargs'
section.style.start_sphinx_py_method(name, signature_params)
def document_custom_signature(section, name, method,
include=None, exclude=None):
"""Documents the signature of a custom method
:param section: The section to write the documentation to.
:param name: The name of the method
:param method: The handle to the method being documented
:type include: Dictionary where keys are parameter names and
values are the shapes of the parameter names.
:param include: The parameter shapes to include in the documentation.
:type exclude: List of the names of the parameters to exclude.
:param exclude: The names of the parameters to exclude from
documentation.
"""
args, varargs, keywords, defaults = inspect.getargspec(method)
args = args[1:]
signature_params = inspect.formatargspec(
args, varargs, keywords, defaults)
signature_params = signature_params.lstrip('(')
signature_params = signature_params.rstrip(')')
section.style.start_sphinx_py_method(name, signature_params)
def document_custom_method(section, method_name, method):
"""Documents a non-data driven method
:param section: The section to write the documentation to.
:param method_name: The name of the method
:param method: The handle to the method being documented
"""
document_custom_signature(
section, method_name, method)
method_intro_section = section.add_new_section('method-intro')
method_intro_section.writeln('')
doc_string = inspect.getdoc(method)
if doc_string is not None:
method_intro_section.style.write_py_doc_string(doc_string)
def document_model_driven_method(section, method_name, operation_model,
event_emitter, method_description=None,
example_prefix=None, include_input=None,
include_output=None, exclude_input=None,
exclude_output=None, document_output=True,
include_signature=True):
"""Documents an individual method
:param section: The section to write to
:param method_name: The name of the method
:param operation_model: The model of the operation
:param event_emitter: The event emitter to use to emit events
:param example_prefix: The prefix to use in the method example.
:type include_input: Dictionary where keys are parameter names and
values are the shapes of the parameter names.
:param include_input: The parameter shapes to include in the
input documentation.
:type include_output: Dictionary where keys are parameter names and
values are the shapes of the parameter names.
:param include_input: The parameter shapes to include in the
output documentation.
:type exclude_input: List of the names of the parameters to exclude.
:param exclude_input: The names of the parameters to exclude from
input documentation.
:type exclude_output: List of the names of the parameters to exclude.
:param exclude_input: The names of the parameters to exclude from
output documentation.
:param document_output: A boolean flag to indicate whether to
document the output.
:param include_signature: Whether or not to include the signature.
It is useful for generating docstrings.
"""
# Add the signature if specified.
if include_signature:
document_model_driven_signature(
section, method_name, operation_model, include=include_input,
exclude=exclude_input)
# Add the description for the method.
method_intro_section = section.add_new_section('method-intro')
method_intro_section.include_doc_string(method_description)
if operation_model.deprecated:
method_intro_section.style.start_danger()
method_intro_section.writeln(
'This operation is deprecated and may not function as '
'expected. This operation should not be used going forward '
'and is only kept for the purpose of backwards compatiblity.')
method_intro_section.style.end_danger()
service_uid = operation_model.service_model.metadata.get('uid')
if service_uid is not None:
method_intro_section.style.new_paragraph()
method_intro_section.write("See also: ")
link = '%s/%s/%s' % (AWS_DOC_BASE, service_uid,
operation_model.name)
method_intro_section.style.external_link(title="AWS API Documentation",
link=link)
method_intro_section.writeln('')
# Add the example section.
example_section = section.add_new_section('example')
example_section.style.new_paragraph()
example_section.style.bold('Request Syntax')
context = {
'special_shape_types': {
'streaming_input_shape': operation_model.get_streaming_input(),
'streaming_output_shape': operation_model.get_streaming_output(),
'eventstream_output_shape': operation_model.get_event_stream_output(),
},
}
if operation_model.input_shape:
RequestExampleDocumenter(
service_name=operation_model.service_model.service_name,
operation_name=operation_model.name,
event_emitter=event_emitter, context=context).document_example(
example_section, operation_model.input_shape,
prefix=example_prefix, include=include_input,
exclude=exclude_input)
else:
example_section.style.new_paragraph()
example_section.style.start_codeblock()
example_section.write(example_prefix + '()')
# Add the request parameter documentation.
request_params_section = section.add_new_section('request-params')
if operation_model.input_shape:
RequestParamsDocumenter(
service_name=operation_model.service_model.service_name,
operation_name=operation_model.name,
event_emitter=event_emitter, context=context).document_params(
request_params_section, operation_model.input_shape,
include=include_input, exclude=exclude_input)
# Add the return value documentation
return_section = section.add_new_section('return')
return_section.style.new_line()
if operation_model.output_shape is not None and document_output:
return_section.write(':rtype: dict')
return_section.style.new_line()
return_section.write(':returns: ')
return_section.style.indent()
return_section.style.new_line()
# If the operation is an event stream, describe the tagged union
event_stream_output = operation_model.get_event_stream_output()
if event_stream_output:
event_section = return_section.add_new_section('event-stream')
event_section.style.new_paragraph()
event_section.write(
'The response of this operation contains an '
':class:`.EventStream` member. When iterated the '
':class:`.EventStream` will yield events based on the '
'structure below, where only one of the top level keys '
'will be present for any given event.'
)
event_section.style.new_line()
# Add an example return value
return_example_section = return_section.add_new_section('example')
return_example_section.style.new_line()
return_example_section.style.bold('Response Syntax')
return_example_section.style.new_paragraph()
ResponseExampleDocumenter(
service_name=operation_model.service_model.service_name,
operation_name=operation_model.name,
event_emitter=event_emitter,
context=context).document_example(
return_example_section, operation_model.output_shape,
include=include_output, exclude=exclude_output)
# Add a description for the return value
return_description_section = return_section.add_new_section(
'description')
return_description_section.style.new_line()
return_description_section.style.bold('Response Structure')
return_description_section.style.new_paragraph()
ResponseParamsDocumenter(
service_name=operation_model.service_model.service_name,
operation_name=operation_model.name,
event_emitter=event_emitter,
context=context).document_params(
return_description_section, operation_model.output_shape,
include=include_output, exclude=exclude_output)
else:
return_section.write(':returns: None')
|
the-stack_106_15280
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Definition of CUDA/GPU operator strategy."""
# pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import
from tvm import topi
from tvm.auto_scheduler import is_auto_scheduler_enabled
from tvm.te import SpecializedCondition
from tvm.contrib import nvcc
from tvm._ffi import get_global_func
from .generic import *
from .. import op as _op
@schedule_injective.register(["cuda", "gpu"])
def schedule_injective_cuda(attrs, outs, target):
"""schedule injective ops for cuda"""
with target:
return topi.cuda.schedule_injective(outs)
@schedule_reduce.register(["cuda", "gpu"])
def schedule_reduce_cuda(attrs, outs, target):
"""schedule reduction ops for cuda"""
with target:
return topi.cuda.schedule_reduce(outs)
@schedule_concatenate.register(["cuda", "gpu"])
def schedule_concatenate_cuda(attrs, outs, target):
"""schedule concatenate for cuda"""
with target:
return topi.cuda.schedule_injective(outs)
@schedule_pool.register(["cuda", "gpu"])
def schedule_pool_cuda(attrs, outs, target):
"""schedule pooling ops for cuda"""
with target:
return topi.cuda.schedule_pool(outs, attrs.layout)
@schedule_pool_grad.register(["cuda", "gpu"])
def schedule_pool_grad_cuda(attrs, outs, target):
"""schedule pooling gradient ops for cuda"""
with target:
return topi.cuda.schedule_pool_grad(outs)
@schedule_adaptive_pool.register(["cuda", "gpu"])
def schedule_adaptive_pool_cuda(attrs, outs, target):
"""schedule adaptive pooling ops for cuda"""
with target:
return topi.cuda.schedule_adaptive_pool(outs, attrs.layout)
@softmax_strategy.register(["cuda", "gpu"])
def softmax_strategy_cuda(attrs, inputs, out_type, target):
"""softmax cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_softmax(topi.nn.softmax),
wrap_topi_schedule(topi.cuda.schedule_softmax),
name="softmax.cuda",
)
if target.kind.name == "cuda" and "cudnn" in target.libs:
strategy.add_implementation(
wrap_compute_softmax(topi.cuda.softmax_cudnn),
wrap_topi_schedule(topi.cuda.schedule_softmax_cudnn),
name="softmax.cudnn",
plevel=15,
)
return strategy
@schedule_log_softmax.register(["cuda", "gpu"])
def schedule_log_softmax_cuda(attrs, outs, target):
"""scheudle log_softmax for cuda"""
with target:
return topi.cuda.schedule_softmax(outs)
@schedule_lrn.register(["cuda", "gpu"])
def schedule_lrn_cuda(attrs, outs, target):
"""schedule LRN for cuda"""
with target:
return topi.cuda.schedule_lrn(outs)
@conv2d_strategy.register(["cuda", "gpu"])
def conv2d_strategy_cuda(attrs, inputs, out_type, target):
"""conv2d cuda strategy"""
strategy = _op.OpStrategy()
data, kernel = inputs
stride_h, stride_w = attrs.get_int_tuple("strides")
dilation_h, dilation_w = attrs.get_int_tuple("dilation")
padding = attrs.get_int_tuple("padding")
groups = attrs.groups
layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
if dilation_h < 1 or dilation_w < 1:
raise ValueError("dilation should be positive value")
if groups == 1:
if layout == "NCHW":
assert kernel_layout == "OIHW"
if data.dtype in ("int8", "uint8") and kernel.dtype in ("int8", "uint8"):
assert data.dtype == kernel.dtype
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.conv2d_nchw_int8),
wrap_topi_schedule(topi.cuda.schedule_conv2d_nchw_int8),
name="conv2d_nchw_int8.cuda",
)
else:
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.conv2d_nchw),
wrap_topi_schedule(topi.cuda.schedule_conv2d_nchw),
name="conv2d_nchw.cuda",
)
_, _, kh, kw = get_const_tuple(kernel.shape)
if (
(2 < kh < 8 and 2 < kw < 8 and kh == kw)
and (stride_h == 1 and stride_w == 1)
and (dilation_h == 1 and dilation_w == 1)
):
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.conv2d_nchw_winograd),
wrap_topi_schedule(topi.cuda.schedule_conv2d_nchw_winograd),
name="conv2d_nchw_winograd.cuda",
plevel=5,
)
elif layout == "HWCN":
assert kernel_layout == "HWIO"
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.conv2d_hwcn),
wrap_topi_schedule(topi.cuda.schedule_conv2d_hwcn),
name="conv2d_hwcn.cuda",
)
elif layout == "NHWC":
assert kernel_layout == "HWIO"
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.conv2d_nhwc),
wrap_topi_schedule(topi.cuda.schedule_conv2d_nhwc),
name="conv2d_nhwc.cuda",
)
N, H, W, _ = get_const_tuple(data.shape)
KH, KW, CI, CO = get_const_tuple(kernel.shape)
# Winograd shape related judgment
(
judge_winograd_tensorcore,
judge_winograd_autotvm,
judge_winograd_auto_scheduler,
) = judge_winograd(
N,
H,
W,
KH,
KW,
CI,
CO,
padding,
stride_h,
stride_w,
dilation_h,
dilation_w,
data.dtype,
kernel.dtype,
pre_flag=False,
)
if judge_winograd_autotvm:
if (
target.kind.name == "cuda"
and nvcc.have_tensorcore(target=target)
and judge_winograd_tensorcore
):
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.conv2d_nhwc_winograd_tensorcore),
wrap_topi_schedule(topi.cuda.schedule_conv2d_nhwc_winograd_tensorcore),
name="conv2d_nhwc_winograd_tensorcore.cuda",
plevel=5,
)
else:
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.conv2d_nhwc_winograd_direct),
wrap_topi_schedule(topi.cuda.schedule_conv2d_nhwc_winograd_direct),
name="conv2d_nhwc_winograd_direct.cuda",
plevel=5,
)
if (
target.kind.name == "cuda"
and nvcc.have_tensorcore(target=target)
and (
(N % 16 == 0 and CI % 16 == 0 and CO % 16 == 0)
or (N % 8 == 0 and CI % 16 == 0 and CO % 32 == 0)
or (N % 32 == 0 and CI % 16 == 0 and CO % 8 == 0)
)
):
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.conv2d_nhwc_tensorcore),
wrap_topi_schedule(topi.cuda.schedule_conv2d_nhwc_tensorcore),
name="conv2d_nhwc_tensorcore.cuda",
plevel=20,
)
# register auto-scheduler implementations
if is_auto_scheduler_enabled() and judge_winograd_auto_scheduler:
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_winograd_nhwc),
naive_schedule, # this implementation should never be picked by autotvm
name="conv2d_nhwc.winograd",
plevel=15,
)
elif layout == "HWNC":
assert kernel_layout in ["HWOI", "HWOI16o16i", "HWOI8o32i", "HWOI32o16i"]
_, _, N, in_channels = get_const_tuple(data.shape)
pre_computed = len(kernel.shape) == 6
if pre_computed:
_, _, oc_chunk, _, oc_block_factor, _ = get_const_tuple(kernel.shape)
out_channels = oc_chunk * oc_block_factor
else:
_, _, out_channels, _ = get_const_tuple(kernel.shape)
tensorcore_dtypes = ["int4", "uint4", "int8", "uint8"]
if (
(N % 16 == 0 and in_channels % 16 == 0 and out_channels % 16 == 0)
or (N % 8 == 0 and in_channels % 16 == 0 and out_channels % 32 == 0)
or (N % 32 == 0 and in_channels % 16 == 0 and out_channels % 8 == 0)
and (data.dtype in tensorcore_dtypes and kernel.dtype in tensorcore_dtypes)
):
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.conv2d_hwnc_tensorcore),
wrap_topi_schedule(topi.cuda.schedule_conv2d_hwnc_tensorcore),
name="conv2d_hwnc_tensorcore_direct.cuda",
plevel=20,
)
else:
raise RuntimeError(
"Unsupported shape for conv2d HWNC.\
Need to satisfy tensor core schedule."
)
elif layout == "NCHW4c" and data.dtype in ["int8", "uint8"]:
assert kernel_layout == "OIHW4o4i"
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.conv2d_NCHWc_int8, True),
wrap_topi_schedule(topi.cuda.schedule_conv2d_NCHWc_int8),
name="conv2d_NCHWc_int8.cuda",
)
else:
raise RuntimeError("Unsupported conv2d layout {} for CUDA".format(layout))
# add cudnn implementation
if target.kind.name == "cuda" and "cudnn" in target.libs:
if layout in ["NCHW", "NHWC"] and padding[0] == padding[2] and padding[1] == padding[3]:
strategy.add_implementation(
wrap_compute_conv2d(
topi.cuda.conv2d_cudnn, need_data_layout=True, has_groups=True
),
wrap_topi_schedule(topi.cuda.schedule_conv2d_cudnn),
name="conv2d_cudnn.cuda",
plevel=25,
)
elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups):
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.depthwise_conv2d_nchw),
wrap_topi_schedule(topi.cuda.schedule_depthwise_conv2d_nchw),
name="depthwise_conv2d_nchw.cuda",
)
elif layout == "NHWC":
assert kernel_layout == "HWOI"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_nhwc),
wrap_topi_schedule(topi.cuda.schedule_depthwise_conv2d_nhwc),
name="depthwise_conv2d_nhwc.cuda",
)
else:
raise RuntimeError("Unsupported depthwise_conv2d layout {}".format(layout))
else: # group_conv2d
# add cudnn implementation, if any
cudnn_impl = False
if target.kind.name == "cuda" and "cudnn" in target.libs:
if layout in ["NCHW", "NHWC"] and padding[0] == padding[2] and padding[1] == padding[3]:
strategy.add_implementation(
wrap_compute_conv2d(
topi.cuda.conv2d_cudnn, need_data_layout=True, has_groups=True
),
wrap_topi_schedule(topi.cuda.schedule_conv2d_cudnn),
name="conv2d_cudnn.cuda",
plevel=25,
)
cudnn_impl = True
if layout == "NCHW":
# TODO(@vinx13, @icemelon9): Use group_conv2d_NCHWc_int8 when dtype is int8/uint8.
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.group_conv2d_nchw, has_groups=True),
wrap_topi_schedule(topi.cuda.schedule_group_conv2d_nchw),
name="group_conv2d_nchw.cuda",
)
elif layout == "NCHW4c" and data.dtype in ["int8", "uint8"]:
assert kernel_layout == "OIHW4o4i"
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.group_conv2d_NCHWc_int8, True),
wrap_topi_schedule(topi.cuda.schedule_group_conv2d_NCHWc_int8),
name="group_conv2d_NCHWc_int8.cuda",
)
elif not cudnn_impl:
raise RuntimeError("Unsupported group_conv2d layout {}".format(layout))
return strategy
def judge_winograd(
N,
H,
W,
KH,
KW,
CI,
CO,
padding,
stride_h,
stride_w,
dilation_h,
dilation_w,
data_dtype,
kernel_dtype,
pre_flag,
):
"""Winograd judgement about tensorcore and shape"""
if H % 8 == 0:
tile_size = 4
else:
tile_size = 2
if pre_flag:
alpha = KH
KH = KW = alpha + 1 - tile_size
pt, pl, pb, pr = topi.nn.get_pad_tuple(padding, (KH, KW))
OH = (H + pt + pb - KH) // stride_h + 1
OW = (W + pl + pr - KW) // stride_w + 1
nH, nW = (OH + tile_size - 1) // tile_size, (OW + tile_size - 1) // tile_size
P = N * nH * nW
judge_winograd_tensorcore = (
(P % 16 == 0 and CI % 16 == 0 and CO % 16 == 0)
or (P % 8 == 0 and CI % 16 == 0 and CO % 32 == 0)
or (P % 32 == 0 and CI % 16 == 0 and CO % 8 == 0)
)
judge_winograd_autotvm = (
2 < KH < 8
and 2 < KW < 8
and KH == KW
and stride_h == 1
and stride_w == 1
and dilation_h == 1
and dilation_w == 1
)
judge_winograd_auto_scheduler = (
("float" in data_dtype and "float" in kernel_dtype)
and (KH == 3 and KW == 3)
and (stride_h == 1 and stride_w == 1)
and (dilation_h == 1 and dilation_w == 1)
)
return judge_winograd_tensorcore, judge_winograd_autotvm, judge_winograd_auto_scheduler
@conv2d_winograd_without_weight_transfrom_strategy.register(["cuda", "gpu"])
def conv2d_winograd_without_weight_transfrom_strategy_cuda(attrs, inputs, out_type, target):
"""conv2d_winograd_without_weight_transfrom cuda strategy"""
dilation = attrs.get_int_tuple("dilation")
groups = attrs.get_int("groups")
layout = attrs.data_layout
data, kernel = inputs
stride_h, stride_w = attrs.get_int_tuple("strides")
padding = attrs.get_int_tuple("padding")
assert dilation == (1, 1), "Do not support dilate now"
assert groups == 1, "Do not supoort arbitrary group number"
strategy = _op.OpStrategy()
if layout == "NCHW":
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.conv2d_nchw_winograd_without_weight_transform),
wrap_topi_schedule(topi.cuda.schedule_conv2d_nchw_winograd_without_weight_transform),
name="conv2d_nchw_winograd_without_weight_transform.cuda",
)
elif layout == "NHWC":
N, H, W, _ = get_const_tuple(data.shape)
alpha, _, CI, CO = get_const_tuple(kernel.shape)
dilation_h, dilation_w = dilation
judge_winograd_tensorcore, _, _ = judge_winograd(
N,
H,
W,
alpha,
alpha,
CI,
CO,
padding,
stride_h,
stride_w,
dilation_h,
dilation_w,
data.dtype,
kernel.dtype,
pre_flag=True,
)
if (
target.kind.name == "cuda"
and nvcc.have_tensorcore(target=target)
and judge_winograd_tensorcore
):
strategy.add_implementation(
wrap_compute_conv2d(
topi.cuda.conv2d_nhwc_winograd_tensorcore_without_weight_transform
),
wrap_topi_schedule(
topi.cuda.schedule_conv2d_nhwc_winograd_tensorcore_without_weight_transform
),
name="conv2d_nhwc_winograd_tensorcore_without_weight_transform.cuda",
)
else:
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.conv2d_nhwc_winograd_direct_without_weight_transform),
wrap_topi_schedule(
topi.cuda.schedule_conv2d_nhwc_winograd_direct_without_weight_transform
),
name="conv2d_nhwc_winograd_direct_without_weight_transform.cuda",
)
if is_auto_scheduler_enabled():
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_winograd_nhwc_without_weight_transform),
naive_schedule, # this implementation should never be picked by autotvm
name="conv2d_nhwc_winograd_without_weight_transform",
plevel=15,
)
else:
raise RuntimeError(
"Unsupported conv2d_winograd_without_weight_transfrom layout {}".format(layout)
)
return strategy
@deformable_conv2d_strategy.register(["cuda", "gpu"])
def deformable_conv2d_strategy_cuda(attrs, inputs, out_type, target):
"""deformable_conv2d cuda strategy"""
layout = attrs.data_layout
strategy = _op.OpStrategy()
if layout == "NCHW":
strategy.add_implementation(
wrap_compute_deformable_conv2d(topi.cuda.deformable_conv2d_nchw),
wrap_topi_schedule(topi.cuda.schedule_deformable_conv2d_nchw),
name="deformable_conv2d_nchw.cuda",
)
elif layout == "NHWC":
# This implementation should never be picked by autotvm
strategy.add_implementation(
wrap_compute_deformable_conv2d(topi.nn.deformable_conv2d_nhwc),
naive_schedule,
name="deformable_conv2d_nhwc.cuda",
)
else:
raise RuntimeError("Layout %s is not supported in deformable conv2d on CUDA" % layout)
return strategy
@conv2d_transpose_strategy.register(["cuda", "gpu"])
def conv2d_transpose_strategy_cuda(attrs, inputs, out_type, target):
"""conv2d_transpose cuda strategy"""
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
assert layout == "NCHW", "only support nchw for now"
assert dilation == (1, 1), "not support dilate now"
assert groups == 1, "only support groups == 1 for now"
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_conv2d_transpose(topi.cuda.conv2d_transpose_nchw),
wrap_topi_schedule(topi.cuda.schedule_conv2d_transpose_nchw),
name="conv2d_transpose_nchw.cuda",
)
return strategy
@conv3d_transpose_strategy.register(["cuda", "gpu"])
def conv3d_transpose_strategy_cuda(attrs, inputs, out_type, target):
"""conv3d_transpose cuda strategy"""
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
assert layout == "NCDHW", "only support ncdhw for now"
assert dilation == (1, 1, 1), "not support dilate now"
assert groups == 1, "only support groups == 1 for now"
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_conv3d_transpose(topi.cuda.conv3d_transpose_ncdhw),
wrap_topi_schedule(topi.cuda.schedule_conv3d_transpose_ncdhw),
name="conv3d_transpose_ncdhw.cuda",
)
return strategy
@conv3d_strategy.register(["cuda", "gpu"])
def conv3d_strategy_cuda(attrs, inputs, out_type, target):
"""conv3d cuda strategy"""
strategy = _op.OpStrategy()
data, kernel = inputs
layout = attrs.data_layout
_, stride_h, stride_w = attrs.get_int_tuple("strides")
_, dilation_h, dilation_w = attrs.get_int_tuple("dilation")
assert layout in ["NCDHW", "NDHWC"], "Not support this layout {} yet".format(layout)
if layout == "NCDHW":
strategy.add_implementation(
wrap_compute_conv3d(topi.cuda.conv3d_ncdhw),
wrap_topi_schedule(topi.cuda.schedule_conv3d_ncdhw),
name="conv3d_ncdhw.cuda",
plevel=10,
)
_, _, _, kh, kw = get_const_tuple(kernel.shape)
if (
2 < kh < 8
and 2 < kw < 8
and kh == kw
and stride_h == 1
and stride_w == 1
and dilation_h == 1
and dilation_w == 1
):
strategy.add_implementation(
wrap_compute_conv3d(topi.cuda.conv3d_ncdhw_winograd),
wrap_topi_schedule(topi.cuda.schedule_conv3d_ncdhw_winograd),
name="conv3d_ncdhw_winograd.cuda",
plevel=5,
)
else: # layout == "NDHWC":
strategy.add_implementation(
wrap_compute_conv3d(topi.cuda.conv3d_ndhwc),
wrap_topi_schedule(topi.cuda.schedule_conv3d_ndhwc),
name="conv3d_ndhwc.cuda",
plevel=10,
)
N, _, _, _, _ = get_const_tuple(data.shape)
_, _, _, CI, CO = get_const_tuple(kernel.shape)
if target.kind.name == "cuda":
if nvcc.have_tensorcore(target=target):
if (
(N % 16 == 0 and CI % 16 == 0 and CO % 16 == 0)
or (N % 8 == 0 and CI % 16 == 0 and CO % 32 == 0)
or (N % 32 == 0 and CI % 16 == 0 and CO % 8 == 0)
):
strategy.add_implementation(
wrap_compute_conv3d(topi.cuda.conv3d_ndhwc_tensorcore),
wrap_topi_schedule(topi.cuda.schedule_conv3d_ndhwc_tensorcore),
name="conv3d_ndhwc_tensorcore.cuda",
plevel=20,
)
if target.kind.name == "cuda" and "cudnn" in target.libs:
strategy.add_implementation(
wrap_compute_conv3d(topi.cuda.conv3d_cudnn, True),
wrap_topi_schedule(topi.cuda.schedule_conv3d_cudnn),
name="conv3d_cudnn.cuda",
plevel=25,
)
return strategy
@conv3d_winograd_without_weight_transfrom_strategy.register(["cuda", "gpu"])
def conv3d_winograd_without_weight_transfrom_strategy_cuda(attrs, inputs, out_type, target):
"""conv3d_winograd_without_weight_transfrom cuda strategy"""
dilation = attrs.get_int_tuple("dilation")
groups = attrs.get_int("groups")
layout = attrs.data_layout
assert dilation == (1, 1, 1), "Do not support dilate now"
assert groups == 1, "Do not supoort arbitrary group number"
strategy = _op.OpStrategy()
if layout == "NCDHW":
strategy.add_implementation(
wrap_compute_conv3d(topi.cuda.conv3d_ncdhw_winograd_without_weight_transform),
wrap_topi_schedule(topi.cuda.schedule_conv3d_ncdhw_winograd_without_weight_transform),
name="conv3d_ncdhw_winograd_without_weight_transform.cuda",
)
else:
raise RuntimeError(
"Unsupported conv3d_winograd_without_weight_transfrom layout {}".format(layout)
)
return strategy
@conv1d_strategy.register(["cuda", "gpu"])
def conv1d_strategy_cuda(attrs, inputs, out_type, target):
"""conv1d cuda strategy"""
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
if dilation[0] < 1:
raise ValueError("dilation should be a positive value")
strategy = _op.OpStrategy()
if layout == "NCW":
strategy.add_implementation(
wrap_compute_conv1d(topi.cuda.conv1d_ncw),
wrap_topi_schedule(topi.cuda.schedule_conv1d_ncw),
name="conv1d_ncw.cuda",
)
elif layout == "NWC":
strategy.add_implementation(
wrap_compute_conv1d(topi.cuda.conv1d_nwc),
wrap_topi_schedule(topi.cuda.schedule_conv1d_nwc),
name="conv1d_nwc.cuda",
)
else:
raise ValueError("Unsupported conv1d layout {}".format(layout))
return strategy
@conv1d_transpose_strategy.register(["cuda", "gpu"])
def conv1d_transpose_strategy_cuda(attrs, inputs, out_type, target):
"""conv1d_transpose cuda strategy"""
strategy = _op.OpStrategy()
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
assert layout == "NCW", "conv1d_transpose ncw only supported"
assert dilation == (1,), "conv1d_transpose dilation is not supported"
assert groups == 1, "conv1d_transpose groups == 1 only supported"
strategy.add_implementation(
wrap_compute_conv1d_transpose(topi.cuda.conv1d_transpose_ncw),
wrap_topi_schedule(topi.cuda.schedule_conv1d_transpose_ncw),
name="conv1d_transpose_ncw.cuda",
)
return strategy
@dense_strategy.register(["cuda", "gpu"])
def dense_strategy_cuda(attrs, inputs, out_type, target):
"""dense cuda strategy"""
strategy = _op.OpStrategy()
data, weights = inputs
b, i = get_const_tuple(data.shape)
o, _ = get_const_tuple(weights.shape)
if out_type.dtype == "int8":
strategy.add_implementation(
wrap_compute_dense(topi.cuda.dense_int8),
wrap_topi_schedule(topi.cuda.schedule_dense_int8),
name="dense_int8.cuda",
)
else:
strategy.add_implementation(
wrap_compute_dense(topi.cuda.dense_small_batch),
wrap_topi_schedule(topi.cuda.schedule_dense_small_batch),
name="dense_small_batch.cuda",
)
with SpecializedCondition(b >= 32):
strategy.add_implementation(
wrap_compute_dense(topi.cuda.dense_large_batch),
wrap_topi_schedule(topi.cuda.schedule_dense_large_batch),
name="dense_large_batch.cuda",
plevel=5,
)
if target.kind.name == "cuda":
if nvcc.have_tensorcore(target=target):
if (
(
data.dtype in ["float16", "int8", "uint8"]
and (
(i % 16 == 0 and b % 16 == 0 and o % 16 == 0)
or (i % 16 == 0 and b % 8 == 0 and o % 32 == 0)
or (i % 16 == 0 and b % 32 == 0 and o % 8 == 0)
)
)
or (
data.dtype in ["int4", "uint4"]
and i % 32 == 0
and b % 8 == 0
and o % 8 == 0
)
or (
data.dtype in ["int1", "uint1"]
and i % 128 == 0
and b % 8 == 0
and o % 8 == 0
)
):
strategy.add_implementation(
wrap_compute_dense(topi.cuda.dense_tensorcore),
wrap_topi_schedule(topi.cuda.schedule_dense_tensorcore),
name="dense_tensorcore.cuda",
plevel=20,
)
if target.kind.name == "cuda" and "cublas" in target.libs:
strategy.add_implementation(
wrap_compute_dense(topi.cuda.dense_cublas),
wrap_topi_schedule(topi.cuda.schedule_dense_cublas),
name="dense_cublas.cuda",
plevel=25,
)
return strategy
@batch_matmul_strategy.register(["cuda", "gpu"])
def batch_matmul_strategy_cuda(attrs, inputs, out_type, target):
"""batch_matmul cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_batch_matmul(topi.cuda.batch_matmul),
wrap_topi_schedule(topi.cuda.schedule_batch_matmul),
name="batch_matmul.cuda",
plevel=10,
)
if target.kind.name == "cuda" and "cublas" in target.libs:
strategy.add_implementation(
wrap_compute_batch_matmul(topi.cuda.batch_matmul_cublas),
wrap_topi_schedule(topi.generic.schedule_extern),
name="batch_matmul_cublas.cuda",
plevel=15,
)
if target.kind.name == "cuda" and nvcc.have_tensorcore(target=target):
x, y = inputs
_, M, K = get_const_tuple(x.shape)
_, N, K = get_const_tuple(y.shape)
if x.dtype in ["float16", "int8", "uint8"] and (
(M % 8 == 0 and K % 16 == 0 and N % 32 == 0)
or (M % 16 == 0 and K % 16 == 0 and N % 16 == 0)
or (M % 32 == 0 and K % 16 == 0 and N % 8 == 0)
):
strategy.add_implementation(
wrap_compute_batch_matmul(topi.cuda.batch_matmul_tensorcore),
wrap_topi_schedule(topi.cuda.schedule_batch_matmul_tensorcore),
name="batch_matmul_tensorcore.cuda",
plevel=20,
)
return strategy
@sparse_dense_strategy.register(["cuda", "gpu"])
def sparse_dense_strategy_cuda(attrs, inputs, out_type, target):
"""sparse dense cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_sparse_dense(topi.cuda.sparse_dense),
wrap_topi_schedule(topi.cuda.schedule_sparse_dense),
name="sparse_dense.cuda",
plevel=10,
)
return strategy
@sparse_dense_padded_strategy.register(["cuda", "gpu"])
def sparse_dense_padded_strategy_cuda(attrs, inputs, out_type, target):
"""sparse dense cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_sparse_dense(topi.cuda.sparse_dense_padded),
wrap_topi_schedule(topi.cuda.schedule_sparse_dense_padded),
name="sparse_dense_padded.cuda",
plevel=10,
)
return strategy
@scatter_strategy.register(["cuda", "gpu"])
def scatter_cuda(attrs, inputs, out_type, target):
"""scatter cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_scatter(topi.cuda.scatter),
wrap_topi_schedule(topi.generic.schedule_extern),
name="scatter.cuda",
plevel=10,
)
return strategy
@scatter_add_strategy.register(["cuda", "gpu"])
def scatter_add_cuda(attrs, inputs, out_type, target):
"""scatter_add cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_scatter(topi.cuda.scatter_add),
wrap_topi_schedule(topi.generic.schedule_extern),
name="scatter_add.cuda",
plevel=10,
)
return strategy
@scatter_nd_strategy.register(["cuda", "gpu"])
def scatter_nd_cuda(attrs, inputs, out_type, target):
"""scatter_nd cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_scatter_nd(topi.cuda.scatter_nd),
wrap_topi_schedule(topi.generic.schedule_extern),
name="scatter_nd.cuda",
plevel=10,
)
@sort_strategy.register(["cuda", "gpu"])
def sort_strategy_cuda(attrs, inputs, out_type, target):
"""sort cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_sort(topi.cuda.sort),
wrap_topi_schedule(topi.cuda.schedule_sort),
name="sort.cuda",
)
if target.kind.name == "cuda" and get_global_func(
"tvm.contrib.thrust.sort", allow_missing=True
):
strategy.add_implementation(
wrap_compute_sort(topi.cuda.sort_thrust),
wrap_topi_schedule(topi.cuda.schedule_sort),
name="sort_thrust.cuda",
plevel=15,
)
return strategy
@argsort_strategy.register(["cuda", "gpu"])
def argsort_strategy_cuda(attrs, inputs, out_type, target):
"""argsort cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_argsort(topi.cuda.argsort),
wrap_topi_schedule(topi.cuda.schedule_argsort),
name="argsort.cuda",
)
if target.kind.name == "cuda" and get_global_func(
"tvm.contrib.thrust.sort", allow_missing=True
):
strategy.add_implementation(
wrap_compute_argsort(topi.cuda.argsort_thrust),
wrap_topi_schedule(topi.cuda.schedule_argsort),
name="argsort_thrust.cuda",
plevel=15,
)
return strategy
@topk_strategy.register(["cuda", "gpu"])
def topk_strategy_cuda(attrs, inputs, out_type, target):
"""topk cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_topk(topi.cuda.topk),
wrap_topi_schedule(topi.cuda.schedule_topk),
name="topk.cuda",
)
if target.kind.name == "cuda" and get_global_func(
"tvm.contrib.thrust.sort", allow_missing=True
):
strategy.add_implementation(
wrap_compute_topk(topi.cuda.topk_thrust),
wrap_topi_schedule(topi.cuda.schedule_topk),
name="topk_thrust.cuda",
plevel=15,
)
return strategy
@multibox_prior_strategy.register(["cuda", "gpu"])
def multibox_prior_strategy_cuda(attrs, inputs, out_type, target):
"""multibox_prior cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_multibox_prior(topi.cuda.multibox_prior),
wrap_topi_schedule(topi.cuda.schedule_multibox_prior),
name="multibox_prior.cuda",
)
return strategy
@multibox_transform_loc_strategy.register(["cuda", "gpu"])
def multibox_transform_loc_strategy_cuda(attrs, inputs, out_type, target):
"""multibox_transform_loc cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_multibox_transform_loc(topi.cuda.multibox_transform_loc),
wrap_topi_schedule(topi.cuda.schedule_multibox_transform_loc),
name="multibox_transform_loc.cuda",
)
return strategy
@get_valid_counts_strategy.register(["cuda", "gpu"])
def get_valid_counts_strategy_cuda(attrs, inputs, out_type, target):
"""get_valid_counts cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_get_valid_counts(topi.cuda.get_valid_counts),
wrap_topi_schedule(topi.cuda.schedule_get_valid_counts),
name="get_valid_counts.cuda",
)
return strategy
@nms_strategy.register(["cuda", "gpu"])
def nms_strategy_cuda(attrs, inputs, out_type, target):
"""nms cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_nms(topi.cuda.non_max_suppression),
wrap_topi_schedule(topi.cuda.schedule_nms),
name="nms.cuda",
)
return strategy
@roi_align_strategy.register(["cuda", "gpu"])
def roi_align_strategy_cuda(attrs, inputs, out_type, target):
"""roi_align cuda strategy"""
strategy = _op.OpStrategy()
layout = attrs.layout
assert layout == "NCHW", "only support nchw for now"
strategy.add_implementation(
wrap_compute_roi_align(topi.vision.rcnn.roi_align_nchw),
wrap_topi_schedule(topi.cuda.schedule_roi_align),
name="roi_align_nchw.cuda",
)
return strategy
@schedule_roi_pool.register(["cuda", "gpu"])
def schedule_roi_pool_cuda(attrs, outs, target):
"""schedule roi_pool for cuda"""
with target:
return topi.cuda.schedule_roi_pool(outs)
@proposal_strategy.register(["cuda", "gpu"])
def proposal_strategy_cuda(attrs, inputs, out_type, target):
"""proposal cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_proposal(topi.cuda.proposal),
wrap_topi_schedule(topi.cuda.schedule_proposal),
name="proposal.cuda",
)
return strategy
@correlation_strategy.register(["cuda", "gpu"])
def correlation_strategy_cuda(attrs, inputs, out_type, target):
"""correlation cuda strategy"""
layout = attrs.layout
assert layout == "NCHW", "Only support NCHW layout"
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_correlation(topi.cuda.correlation_nchw),
wrap_topi_schedule(topi.cuda.schedule_correlation_nchw),
name="correlation.cuda",
)
return strategy
@argwhere_strategy.register(["cuda", "gpu"])
def argwhere_strategy_cuda(attrs, inputs, out_type, target):
"""argwhere cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_argwhere(topi.cuda.argwhere),
wrap_topi_schedule(topi.cuda.schedule_argwhere),
name="argwhere.cuda",
)
return strategy
|
the-stack_106_15281
|
from bs4 import BeautifulSoup
from .extracter import HTMLExtracter
class CryptoScraper:
def __init__(self):
self.base_url = 'https://www.coingecko.com/en/coins/%s/news'
async def __extract_html(self, crypto_name):
url = self.base_url % crypto_name.lower()
extracter = HTMLExtracter(url, params={})
return await extracter.extract()
def __scrap_urls(self, div):
headers = div.find_all('header')
return [header.find('a')['href'] for header in headers]
def __scrap_headings(self, div):
headers = div.find_all('header')
return [header.find('a').text for header in headers]
def __scrap_paragraphs(self, div):
paragraphs = div.find_all('div', {'class': 'post-body'})
return [paragraph.text for paragraph in paragraphs]
async def scrap(self, crypto_name):
html = await self.__extract_html(crypto_name)
soup = BeautifulSoup(html, 'html.parser')
raw_news = soup.find('div', {'id': 'news'})
if not raw_news:
return []
urls = self.__scrap_urls(raw_news)
headings = self.__scrap_headings(raw_news)
paragraphs = self.__scrap_paragraphs(raw_news)
scrapped_news = []
for index in range(10):
url = urls[index]
heading = headings[index]
paragraph = paragraphs[index]
scrapped_news.append({
'url': url,
'heading': heading,
'paragraph': paragraph
})
return scrapped_news
pass
|
the-stack_106_15283
|
#
# Sudoku Game
# by Furkan Ercevik
# 31 October 2021
# This program uses Sudoku_solver.py and pygame to make a Sudoku game with various features
#
import copy
import datetime
from Sudoku_solver import Sudoku
from Sudoku_solver import get_square_coors
import pygame
import sys
import time
import random
# Pygame window setup
pygame.init()
SCREEN_WIDTH, SCREEN_HEIGHT = 800, 700
screen = pygame.display.set_mode([SCREEN_WIDTH, SCREEN_HEIGHT])
pygame.display.set_caption("Sudoku")
# CONSTANTS
LOGO = pygame.image.load("assets/letter_s.png")
GRID = pygame.image.load("assets/blank-sudoku-grid.png")
pygame.display.set_icon(LOGO)
COLORS = {"WHITE": (255, 255, 255), "BLACK": (0, 0, 0), "GREEN": (0, 255, 0), "RED": (255, 0, 0),
"LBLUE": (173, 216, 230), "BLUE": (65, 105, 225), "YELLOW": (255, 255, 0),
"DARKGRAY": (105, 105, 105), "GRAY": (220, 220, 220)}
DELAY = 0.0001
FONT = pygame.font.SysFont("Trebuchet", 35)
NOTE_FONT = pygame.font.SysFont("Trebuchet", 20)
HEADING_FONT = pygame.font.SysFont("Trebuchet", 40)
ALLOWED_MISTAKES = 3
ALLOWED_HINTS = 5
class Puzzle(object):
"""
Makes Puzzle objects that can hold Square objects
"""
start_x, start_y = (8, 7)
delta_x, deltay_y = (88, 66)
def __init__(self):
"""
Initializes a Puzzle object
"""
self.board = []
self.solved_board = []
self.random_generate_board()
self.squares = []
for r in range(9):
ls = []
for c in range(9):
# If the value is set ahead of time make sure it's immutable
if self.board[r][c]:
ls.append(Square(r, c, self.start_x + c * self.delta_x, self.start_y + r * self.deltay_y, 80, 60,
str(self.board[r][c]), mutable=False))
# Otherwise make sure it's mutable
else:
ls.append(Square(r, c, self.start_x + c * self.delta_x, self.start_y + r * self.deltay_y, 80, 60))
self.squares.append(ls)
def random_generate_board(self) -> None:
"""
Generates a random unsolved sudoku puzzle
:return: None
"""
# Generate a full random sudoku puzzle
b = [[0] * 9 for _ in range(9)]
numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9]
b[0][0] = random.choice(numbers)
sudoku = Sudoku(b)
sudoku.solve()
# Generates the blank spaces
blanks = 55
for p in random.sample(range(81), blanks):
sudoku.board[p // 9][p % 9] = 0
self.board = copy.deepcopy(sudoku.board)
sudoku.solve()
self.solved_board = sudoku.board
def check(self) -> int:
"""
Checks for correctness of all values in the squares and resets the values of the incorrect squares
:return: int representing the number of mistakes
"""
returnable = 0
# For all the squares in the puzzle check that they match with the squares of the solved board
for i in range(9):
for j in range(9):
# If the value exists
sq = self.squares[i][j].get_val()
# If sq is nonzero and it doesn't match the corresponding value in the solved board
# replace and return False
if sq and self.solved_board[i][j] != sq:
self.squares[i][j].replace("")
returnable += 1
return returnable
def filled(self) -> bool:
"""
Checks if all the squares are filled
:return: True if all squares are filled, False if they aren't
"""
for row in self.squares:
for sq in row:
if not sq.text:
return False
return True
def hint(self) -> None:
"""
Modifies an empty square with a correct answer
:return:
"""
idx = random.randint(0, 81)
r = idx // 9
c = idx % 9
# While the square's value is filled
while self.squares[r][c].get_val():
idx = random.randint(0, 82)
r = idx // 9
c = idx % 9
self.squares[r][c].replace(self.solved_board[r][c])
self.squares[r][c].active = None
def visual_solve(self, window: pygame.surface, delay: float) -> bool:
"""
Creates a Sudoku object and calls solve on it with visualization on
:param window:
:param delay:
:return:
"""
sudo = Sudoku(board=self.board)
return sudo.solve(self.squares, window, delay)
def draw(self, s: pygame.surface) -> None:
"""
Draws the tiles of the board as well as the adjacent tiles if a given tile is active
:param s: pygame window
:return: None
"""
# Checks if there are any active squares
flag = False
active_sq = None
for row in self.squares:
for sq in row:
# If there is an active square highlight all the adjacent squares
if sq.active:
flag = True
active_sq = sq
sq.draw(s)
# If there are active squares highlight the adjacent squares
if flag:
draw_others = self.neighbor_squares(active_sq.r, active_sq.c)
for elem in draw_others:
r, c = elem
self.squares[r][c].draw(s, color=COLORS["BLUE"], adj=True)
def handle_event(self, event: pygame.event):
"""
Handles movement key events
:return: True if movement occurred
"""
# Get the active square
active_squares = []
for i in range(9):
active_squares.extend((list(filter(lambda s: s.active, self.squares[i]))))
if len(active_squares) > 0:
active_sq = active_squares.pop()
else:
active_sq = None
# If there is an active square
if active_sq:
row = active_sq.r
col = active_sq.c
# If there is a key press
if event.type == pygame.KEYDOWN:
# Check what kind of key was pressed
if (event.key == pygame.K_UP or event.key == pygame.K_w) and row > 0:
# If it was a movement key, toggle the activity status of the current active square and the next
# available square if there is no mutable square in that direction there is no new_active_sq
active_sq.toggle()
new_active_sq = active_sq
i = row - 1
while i >= 0:
available = self.squares[i][col].active
if available is not None:
new_active_sq = self.squares[i][col]
break
else:
i -= 1
new_active_sq.toggle()
elif (event.key == pygame.K_DOWN or event.key == pygame.K_s) and row < 8:
active_sq.toggle()
new_active_sq = active_sq
i = row + 1
while i <= 8:
available = self.squares[i][col].active
if available is not None:
new_active_sq = self.squares[i][col]
break
else:
i += 1
new_active_sq.toggle()
elif (event.key == pygame.K_RIGHT or event.key == pygame.K_d) and col < 8:
active_sq.toggle()
new_active_sq = active_sq
i = col + 1
while i <= 8:
available = self.squares[row][i].active
if available is not None:
new_active_sq = self.squares[row][i]
break
else:
i += 1
new_active_sq.toggle()
elif (event.key == pygame.K_LEFT or event.key == pygame.K_a) and col > 0:
active_sq.toggle()
new_active_sq = active_sq
i = col - 1
while i >= 0:
available = self.squares[row][i].active
if available is not None:
new_active_sq = self.squares[row][i]
break
else:
i -= 1
new_active_sq.toggle()
def neighbor_squares(self, r: int, c: int) -> list:
"""
Returns a list of neighboring squares for a given square's row and column index
:param r: row index
:param c: col index
:return: list of neighboring squares' indices
"""
neighbors = []
s = Sudoku(self.board)
neighbors.extend(s.get_col_idx(r, c))
neighbors.extend(s.get_row_idx(r, c))
neighbors.extend(get_square_coors((r, c)))
return neighbors
def deactivate(self, s: pygame.surface):
for row in self.squares:
for sq in row:
if sq.note_mode:
sq.note_mode = False
sq.note = []
sq.b_color = (255, 255, 255)
sq.draw(s)
if sq.active:
sq.active = False
sq.b_color = (255, 255, 255)
sq.draw(s)
pygame.display.update(sq.rect)
class Square(object):
"""
Makes Square objects that represents the individual squares of the Sudoku puzzle
"""
def __init__(self, r, c, x, y, w, h, text="", mutable=True):
"""
Initializes a Square object
:param x: x_position
:param y: y_position
:param w: width
:param h: height
:param text: current value
:param mutable: determines if the text's values will be mutable by the user; default value is True
"""
self.r = r
self.c = c
self.text = str(text)
self.rect = pygame.Rect(x, y, w, h)
self.text_surface = FONT.render(text, True, COLORS["BLACK"])
self.b_color = COLORS["WHITE"]
self.active = None if not mutable else False
self.note_mode = False
self.note = []
def handle_event(self, event: pygame.event) -> None:
"""
Handles events for Square objects
:param event: pygame.event
:return: None
"""
# If the event is a mouse click
if event.type == pygame.MOUSEBUTTONDOWN:
# Check if the square can be active
if self.active is not None:
# Check if the rectangle collides with the event
if self.rect.collidepoint(event.pos):
self.active = not self.active
self.b_color = COLORS["LBLUE"] if self.active else COLORS['WHITE']
# Check if note_mode is on and if so disable it and reset the notes
self.note_mode = False
self.note = []
else:
self.active = False
self.b_color = COLORS["WHITE"]
# If the event is a keypress
if event.type == pygame.KEYDOWN:
# If note mode is enabled
if self.note_mode:
# Was backspace key pressed
if event.key == pygame.K_BACKSPACE:
try:
self.note.pop()
# If there are no more note numbers left set the box's note mode to False
except IndexError:
self.note_mode = False
# Add the integer to the notes
try:
if int(event.unicode) in range(1, 10):
self.note.append(event.unicode)
self.note.sort()
except ValueError:
pass
# If the cell is active check for other key presses
if self.active:
# If the key press is a backspace
if event.key == pygame.K_BACKSPACE:
self.text = ""
# If the event key is a movement key render the text surface
elif event.key in [pygame.K_UP, pygame.K_DOWN, pygame.K_RIGHT, pygame.K_LEFT, pygame.K_w, pygame.K_s,
pygame.K_a, pygame.K_d]:
self.text_surface = FONT.render(self.text, True, COLORS["RED"])
# Toggle note_mode when key N is pressed
elif event.key == pygame.K_n:
self.note_mode = not self.note_mode
self.b_color = COLORS["GRAY"] if self.note_mode else COLORS["LBLUE"]
else:
try:
val = int(event.unicode)
self.text = event.unicode if val in range(1, 10) else ""
except ValueError:
self.text = ""
self.text_surface = FONT.render(self.text, True, COLORS["RED"])
def draw(self, s: pygame.surface, color=None, adj=False) -> None:
"""
Blits a white rect onto the previous text to replace it, renders the text surface, then resets the text color
If note mode is enabled blit a light gray rectangle
:param adj: flag to determine if the square is being drawn adjacently
:param s: pygame surface to draw on
:param color: auxilliary color choice for drawing highlighted squares
:return: None
"""
if not self.note_mode:
# Reset the rectangle
if color:
pygame.draw.rect(s, color, self.rect)
else:
pygame.draw.rect(s, self.b_color, self.rect)
# Output the text onto the screen with the current surface
s.blit(self.text_surface, (self.rect.x + 32, self.rect.y + 22))
# Reset the text surface color
color = COLORS["RED"] if self.active is not None else COLORS["BLACK"]
self.text_surface = FONT.render(str(self.text), True, color)
else:
if adj:
pygame.draw.rect(s, (135, 206, 250), self.rect)
else:
pygame.draw.rect(s, self.b_color, self.rect)
for v in self.note:
integer = int(v)
surface = NOTE_FONT.render(v, True, COLORS["DARKGRAY"])
if integer in range(1, 4):
s.blit(surface, (self.rect.x + 5 + (integer-1) * 20, self.rect.y))
elif integer in range(4, 7):
s.blit(surface,
(self.rect.x + 5 + (integer-4) * 20, self.rect.y + 20))
elif integer in range(7, 10):
s.blit(surface,
(self.rect.x + 5 + (integer-7) * 20, self.rect.y + 40))
pygame.display.update(self.rect)
def toggle(self):
"""
Toggles the activity status if the cell's active isn't None as well as the color of the box
:return: None
"""
if self.active is not None:
self.active = not self.active
if self.active:
self.b_color = COLORS["LBLUE"]
else:
self.b_color = COLORS["WHITE"]
# For use with Sudoku algorithms
def delete(self) -> None:
"""
Renders the text surface with a white color
:return: None
"""
# Clear the number values
self.text_surface = FONT.render(str(self.text), True, COLORS["WHITE"])
self.text = ""
def replace(self, value: str) -> None:
"""
Renders the text surface with a green color
:param value: the value to replace the original text with
:return: None
"""
self.text = value
self.text_surface = FONT.render(str(self.text), True, (0, 255, 0))
def get_val(self) -> int:
"""
Provides the value of the Square object
:return: an integer representation of the text stored within the square object
"""
return int(self.text) if self.text else 0
def instructions():
print("+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+")
print("| Welcome to my Sudoku game!! |")
print("+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+")
print("| CONTROLS |")
print("| MOUSECLICK = Select/Deselect a square |")
print("| ENTER = Check for accuracy |")
print("| H = Ask for a hint |")
print("| N = enable/disable notes for a square |")
print("| SPACEBAR = Solve puzzle entirely |")
print("| WASD = up, down, left, right, respectively |")
print("| UP,DOWN,LEFT,RIGHT = up, down, left, right |")
print("| ESC = QUIT |")
print("+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+")
def play(delay=0.001) -> None:
"""
Plays the sudoku game
:param delay: delay of the visual solve
:return: None
"""
instructions()
# Get the start time
start_time = datetime.datetime.now()
# Create the Puzzle object to represent the game board
puzzle = Puzzle()
hints_left = ALLOWED_HINTS
mistakes = 0
# Blit the sudoku grid
screen.blit(GRID, (0, 0))
# Game loop
solved = False
clock = pygame.time.Clock()
while True:
# Draw the clock on the screen
elapsed_time = datetime.datetime.now() - start_time
s = elapsed_time.seconds
minutes, seconds = divmod(s, 60)
time_str = f'Time: {minutes:02}:{seconds:02}'
time_surface = HEADING_FONT.render(str(time_str), True, COLORS["WHITE"])
# Clear the old time with the new time
pygame.draw.rect(screen, COLORS["BLACK"], pygame.Rect(SCREEN_WIDTH-195, SCREEN_HEIGHT - 60, 200, 60))
screen.blit(time_surface, (SCREEN_WIDTH-195, SCREEN_HEIGHT-60))
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
# Call handle_event on the squares
for sq_row in puzzle.squares:
for sq in sq_row:
sq.handle_event(event)
# Let the puzzle handle the movement events
puzzle.handle_event(event)
# If a key press is heard check for various conditions
if event.type == pygame.KEYDOWN:
# If key is ESCAPE quit the game
if event.key == pygame.K_ESCAPE:
print("Game ended.")
pygame.quit()
sys.exit()
# If it's ENTER check the puzzle
# 13 is the event key representing ENTER
if event.key == 13:
mistake = puzzle.check()
win = puzzle.filled()
# If there was a mistake
if mistake:
mistakes += mistake
# If too many mistakes are made
if mistakes >= ALLOWED_MISTAKES:
print("Puzzle could not be solved.")
puzzle.visual_solve(screen, delay=0)
pygame.quit()
sys.exit()
# If the player correctly fills the puzzle
elif win:
print(f"Puzzle was solved in {minutes} minutes and {seconds} seconds")
if minutes < 5:
print(f"Hmmm are you a puzzle solver too??")
pygame.quit()
sys.exit()
# For auto solving
if event.key == pygame.K_SPACE:
# Deactivate all the notes and highlighting
puzzle.deactivate(screen)
# Solve with the delay
solved = puzzle.visual_solve(screen, delay=delay)
# For hints
if event.key == pygame.K_h:
if hints_left > 0:
hints_left -= 1
puzzle.hint()
else:
print("Sorry buddy you're out of hints.")
# If the program takes too long just quit lmao
if datetime.datetime.now() - start_time > datetime.timedelta(minutes=10):
pygame.quit()
print("Bummer...the puzzle appears unsolvable.")
sys.exit()
# Draw the mistakes in the bottom left
mistake_surface = HEADING_FONT.render("Mistakes: " + str(mistakes), True, COLORS["RED"])
pygame.draw.rect(screen, COLORS["BLACK"], pygame.Rect(30, SCREEN_HEIGHT - 60, 200, 400))
screen.blit(mistake_surface, (30, SCREEN_HEIGHT - 60))
# Draw the Sudoku name
title_surface = HEADING_FONT.render("Welcome to Sudoku", True, COLORS["YELLOW"])
pygame.draw.rect(screen, COLORS["BLACK"], pygame.Rect(260, SCREEN_HEIGHT - 60, 270, 400))
screen.blit(title_surface, (260, SCREEN_HEIGHT - 60))
# Draw the boxes
puzzle.draw(screen)
# Update the display
pygame.display.flip()
clock.tick(30)
# If the puzzle is solved
if solved:
print(f"Puzzle was auto-solved in {minutes} minutes and {seconds} seconds.")
time.sleep(5)
pygame.quit()
sys.exit()
play(delay=DELAY)
|
the-stack_106_15286
|
from dataclasses import dataclass
@dataclass
class GraphConfig:
min_weight: int = 1
min_degree: int = 1
community_detection: bool = False
min_community_size: int = 1
as_undirected: bool = True
@dataclass
class DataCollectionConfig:
exploration_depth: int = 3
random_subset_mode: str = "percent"
random_subset_size: int = 20
expansion_type: str = "coreball"
degree: int = 2
max_nodes_per_hop: int = 1000
number_of_nodes: int = None
@dataclass
class SamplingConfig:
graph = GraphConfig() # graph construction parameters
data_collection = DataCollectionConfig() # spikyball parameters
def __init__(self, graph, data_collection):
self.graph = graph
self.data_collection = data_collection
@dataclass
class TwitterConfig:
min_mentions: int = 0
max_day_old: int = 30
max_tweets_per_user: int = 200
nb_popular_tweets: int = 10
users_to_remove = []
api_version: int = 1
@dataclass
class SyntheticConfig:
min_degree: int = 1
@dataclass
class WikipediaConfig:
lang: str = 'en'
pages_ignored = []
|
the-stack_106_15287
|
from typing import Optional
from fastapi import Query
from maggma.api.query_operator import QueryOperator
from maggma.api.utils import STORE_PARAMS
from collections import defaultdict
class SubstrateStructureQuery(QueryOperator):
"""
Method to generate a query for film and substrate data.
"""
def query(
self,
film_orientation: Optional[str] = Query(
None,
description="Comma separated integers defining the film surface orientation.",
),
substrate_orientation: Optional[str] = Query(
None,
description="Comma separated integers defining the substrate surface orientation.",
),
) -> STORE_PARAMS:
crit = defaultdict(dict) # type: dict
if film_orientation:
crit["film_orient"] = " ".join(
[entry.strip() for entry in film_orientation.split(",")]
)
if substrate_orientation:
crit["orient"] = " ".join(
[entry.strip() for entry in substrate_orientation.split(",")]
)
return {"criteria": crit}
def ensure_indexes(self): # pragma: no cover
keys = ["film_id", "sub_id", "sub_form", "film_orient", "orient"]
return [(key, False) for key in keys]
class EnergyAreaQuery(QueryOperator):
"""
Method to generate a query for ranges of substrate
elastic energies and minimum coincident areas.
"""
def query(
self,
area_max: Optional[float] = Query(
None,
description="Maximum value for the minimum coincident interface area in Ų.",
),
area_min: Optional[float] = Query(
None,
description="Minimum value for the minimum coincident interface area in Ų.",
),
energy_max: Optional[float] = Query(
None, description="Maximum value for the energy in meV.",
),
energy_min: Optional[float] = Query(
None, description="Minimum value for the energy in meV.",
),
) -> STORE_PARAMS:
crit = defaultdict(dict) # type: dict
d = {
"area": [area_min, area_max],
"energy": [energy_min, energy_max],
}
for entry in d:
if d[entry][0] is not None:
crit[entry]["$gte"] = d[entry][0]
if d[entry][1] is not None:
crit[entry]["$lte"] = d[entry][1]
return {"criteria": crit}
def ensure_indexes(self): # pragma: no cover
keys = ["area", "energy"]
return [(key, False) for key in keys]
|
the-stack_106_15288
|
# Copyright 2018 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import random
import pandas as pd
import sys
from copy import deepcopy
from ..model import RunObject
from ..utils import get_in, logger
hyper_types = ["list", "grid", "random"]
default_max_evals = 10
def get_generator(spec, execution):
tuning_strategy = spec.tuning_strategy
hyperparams = spec.hyperparams
if not spec.param_file and not hyperparams:
return None
if tuning_strategy and tuning_strategy not in hyper_types:
raise ValueError("unsupported hyperparams type ({})".format(tuning_strategy))
if spec.param_file and hyperparams:
raise ValueError("hyperparams and param_file cannot be used together")
obj = None
if spec.param_file:
obj = execution.get_dataitem(spec.param_file)
if not tuning_strategy and obj.suffix == ".csv":
tuning_strategy = "list"
if not tuning_strategy or tuning_strategy in ["grid", "random"]:
hyperparams = json.loads(obj.get())
if not tuning_strategy or tuning_strategy == "grid":
return GridGenerator(hyperparams)
if tuning_strategy == "random":
return RandomGenerator(hyperparams)
if obj:
df = obj.as_df()
else:
df = pd.DataFrame(hyperparams)
return ListGenerator(df)
class TaskGenerator:
def generate(self, run: RunObject):
pass
class GridGenerator(TaskGenerator):
def __init__(self, hyperparams):
self.hyperparams = hyperparams
def generate(self, run: RunObject):
i = 0
params = self.grid_to_list()
max = len(next(iter(params.values())))
while i < max:
newrun = deepcopy(run)
newrun.spec.hyperparams = None
newrun.spec.param_file = None
param_dict = newrun.spec.parameters or {}
for key, values in params.items():
param_dict[key] = values[i]
newrun.spec.parameters = param_dict
newrun.metadata.iteration = i + 1
i += 1
yield newrun
def grid_to_list(self):
arr = {}
lastlen = 1
for pk, pv in self.hyperparams.items():
for p in arr.keys():
arr[p] = arr[p] * len(pv)
expanded = []
for i in range(len(pv)):
expanded += [pv[i]] * lastlen
arr[pk] = expanded
lastlen = lastlen * len(pv)
return arr
class RandomGenerator(TaskGenerator):
def __init__(self, hyperparams: dict):
self.hyperparams = hyperparams
self.max_evals = default_max_evals
if "MAX_EVALS" in hyperparams:
self.max_evals = hyperparams.pop("MAX_EVALS")
def generate(self, run: RunObject):
i = 0
while i < self.max_evals:
newrun = deepcopy(run)
newrun.spec.hyperparams = None
newrun.spec.param_file = None
param_dict = newrun.spec.parameters or {}
params = {k: random.sample(v, 1)[0] for k, v in self.hyperparams.items()}
for key, values in params.items():
param_dict[key] = values
newrun.spec.parameters = param_dict
newrun.metadata.iteration = i + 1
i += 1
yield newrun
class ListGenerator(TaskGenerator):
def __init__(self, df):
self.df = df
def generate(self, run: RunObject):
i = 0
for _, row in self.df.iterrows():
newrun = deepcopy(run)
newrun.spec.hyperparams = None
newrun.spec.param_file = None
param_dict = newrun.spec.parameters or {}
for key, values in row.to_dict().items():
param_dict[key] = values
newrun.spec.parameters = param_dict
newrun.metadata.iteration = i + 1
i += 1
yield newrun
def selector(results: list, criteria):
if not criteria:
return 0, 0
idx = criteria.find(".")
if idx < 0:
op = "max"
else:
op = criteria[:idx]
criteria = criteria[idx + 1 :]
best_id = 0
best_item = 0
if op == "max":
best_val = sys.float_info.min
elif op == "min":
best_val = sys.float_info.max
else:
logger.error("unsupported selector {}.{}".format(op, criteria))
return 0, 0
i = 0
for task in results:
state = get_in(task, ["status", "state"])
id = get_in(task, ["metadata", "iteration"])
val = get_in(task, ["status", "results", criteria])
if isinstance(val, str):
try:
val = float(val)
except Exception:
val = None
if state != "error" and val is not None:
if (op == "max" and val > best_val) or (op == "min" and val < best_val):
best_id, best_item, best_val = id, i, val
i += 1
return best_item, best_id
|
the-stack_106_15289
|
"""
This module provides a set of utilities for interpreting and creating requirements files
(e.g. pip's `requirements.txt`), which is useful for managing ML software environments.
"""
import json
import sys
import subprocess
import tempfile
import os
import pkg_resources
import importlib_metadata
from itertools import filterfalse, chain
from collections import namedtuple
import logging
from mlflow.exceptions import MlflowException
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils.autologging_utils.versioning import _strip_dev_version_suffix
from mlflow.utils.databricks_utils import is_in_databricks_runtime
from packaging.version import Version, InvalidVersion
_logger = logging.getLogger(__name__)
def _is_comment(line):
return line.startswith("#")
def _is_empty(line):
return line == ""
def _strip_inline_comment(line):
return line[: line.find(" #")].rstrip() if " #" in line else line
def _is_requirements_file(line):
return line.startswith("-r ") or line.startswith("--requirement ")
def _is_constraints_file(line):
return line.startswith("-c ") or line.startswith("--constraint ")
def _join_continued_lines(lines):
"""
Joins lines ending with '\\'.
>>> _join_continued_lines["a\\", "b\\", "c"]
>>> 'abc'
"""
continued_lines = []
for line in lines:
if line.endswith("\\"):
continued_lines.append(line.rstrip("\\"))
else:
continued_lines.append(line)
yield "".join(continued_lines)
continued_lines.clear()
# The last line ends with '\'
if continued_lines:
yield "".join(continued_lines)
# Represents a pip requirement.
#
# :param req_str: A requirement string (e.g. "scikit-learn == 0.24.2").
# :param is_constraint: A boolean indicating whether this requirement is a constraint.
_Requirement = namedtuple("_Requirement", ["req_str", "is_constraint"])
def _parse_requirements(requirements_file, is_constraint):
"""
A simplified version of `pip._internal.req.parse_requirements` which performs the following
operations on the given requirements file and yields the parsed requirements.
- Remove comments and blank lines
- Join continued lines
- Resolve requirements file references (e.g. '-r requirements.txt')
- Resolve constraints file references (e.g. '-c constraints.txt')
:param requirements_file: A string path to a requirements file on the local filesystem.
:param is_constraint: Indicates the parsed requirements file is a constraint file.
:return: A list of ``_Requirement`` instances.
References:
- `pip._internal.req.parse_requirements`:
https://github.com/pypa/pip/blob/7a77484a492c8f1e1f5ef24eaf71a43df9ea47eb/src/pip/_internal/req/req_file.py#L118
- Requirements File Format:
https://pip.pypa.io/en/stable/cli/pip_install/#requirements-file-format
- Constraints Files:
https://pip.pypa.io/en/stable/user_guide/#constraints-files
"""
with open(requirements_file) as f:
lines = f.read().splitlines()
lines = map(str.strip, lines)
lines = map(_strip_inline_comment, lines)
lines = _join_continued_lines(lines)
lines = filterfalse(_is_comment, lines)
lines = filterfalse(_is_empty, lines)
for line in lines:
if _is_requirements_file(line):
req_file = line.split(maxsplit=1)[1]
# If `req_file` is an absolute path, `os.path.join` returns `req_file`:
# https://docs.python.org/3/library/os.path.html#os.path.join
abs_path = os.path.join(os.path.dirname(requirements_file), req_file)
yield from _parse_requirements(abs_path, is_constraint=False)
elif _is_constraints_file(line):
req_file = line.split(maxsplit=1)[1]
abs_path = os.path.join(os.path.dirname(requirements_file), req_file)
yield from _parse_requirements(abs_path, is_constraint=True)
else:
yield _Requirement(line, is_constraint)
def _flatten(iterable):
return chain.from_iterable(iterable)
def _canonicalize_package_name(pkg_name):
return pkg_name.lower().replace("_", "-")
_MODULE_TO_PACKAGES = importlib_metadata.packages_distributions()
def _module_to_packages(module_name):
"""
Returns a list of packages that provide the specified module.
"""
return _MODULE_TO_PACKAGES.get(module_name, [])
def _get_requires_recursive(pkg_name):
"""
Recursively yields both direct and transitive dependencies of the specified package.
"""
if pkg_name not in pkg_resources.working_set.by_key:
return
package = pkg_resources.working_set.by_key[pkg_name]
reqs = package.requires()
if len(reqs) == 0:
return
for req in reqs:
yield req.name
yield from _get_requires_recursive(req.name)
def _prune_packages(packages):
"""
Prunes packages required by other packages. For example, `["scikit-learn", "numpy"]` is pruned
to `["scikit-learn"]`.
"""
packages = set(packages)
requires = set(_flatten(map(_get_requires_recursive, packages)))
return packages - requires
def _run_command(cmd):
"""
Runs the specified command. If it exits with non-zero status, `MlflowException` is raised.
"""
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
stdout = stdout.decode("utf-8")
stderr = stderr.decode("utf-8")
if proc.returncode != 0:
msg = "\n".join(
[
f"Encountered an unexpected error while running {cmd}",
f"exit status: {proc.returncode}",
f"stdout: {stdout}",
f"stderr: {stderr}",
]
)
raise MlflowException(msg)
def _get_installed_version(package, module=None):
"""
Obtains the installed package version using `importlib_metadata.version`. If it fails, use
`__import__(module or package).__version__`.
"""
try:
version = importlib_metadata.version(package)
except importlib_metadata.PackageNotFoundError:
# Note `importlib_metadata.version(package)` is not necessarily equal to
# `__import__(package).__version__`. See the example for pytorch below.
#
# Example
# -------
# $ pip install torch==1.9.0
# $ python -c "import torch; print(torch.__version__)"
# 1.9.0+cu102
# $ python -c "import importlib_metadata; print(importlib_metadata.version('torch'))"
# 1.9.0
version = __import__(module or package).__version__
# In Databricks, strip a dev version suffix for pyspark (e.g. '3.1.2.dev0' -> '3.1.2')
# and make it installable from PyPI.
if package == "pyspark" and is_in_databricks_runtime():
version = _strip_dev_version_suffix(version)
return version
def _infer_requirements(model_uri, flavor):
"""
Infers the pip requirements of the specified model by creating a subprocess and loading
the model in it to determine which packages are imported.
:param model_uri: The URI of the model.
:param: flavor: The flavor name of the model.
:return: A list of inferred pip requirements.
"""
# Import `_capture_module` here to avoid causing circular imports.
from mlflow.utils import _capture_modules
local_model_path = _download_artifact_from_uri(model_uri)
# Run `_capture_modules.py` to capture modules imported during the loading procedure
with tempfile.TemporaryDirectory() as tmpdir:
output_file = os.path.join(tmpdir, "output.txt")
_run_command(
[
sys.executable,
_capture_modules.__file__,
"--model-path",
local_model_path,
"--flavor",
flavor,
"--output-file",
output_file,
"--sys-path",
json.dumps(sys.path),
],
)
with open(output_file) as f:
modules = f.read().splitlines()
packages = _flatten(map(_module_to_packages, modules))
packages = map(_canonicalize_package_name, packages)
excluded_packages = [
# Certain packages (e.g. scikit-learn 0.24.2) imports `setuptools` or `pkg_resources`
# (a module provided by `setuptools`) to process or interact with package metadata.
# It should be safe to exclude `setuptools` because it's rare to encounter a python
# environment where `setuptools` is not pre-installed.
"setuptools",
# Certain flavors (e.g. pytorch) import mlflow while loading a model, but mlflow should
# not be counted as a model requirement.
"mlflow",
]
packages = _prune_packages(packages) - set(excluded_packages)
return sorted(map(_get_pinned_requirement, packages))
def _get_local_version_label(version):
"""
Extracts a local version label from `version`.
:param version: A version string.
"""
try:
return Version(version).local
except InvalidVersion:
return None
def _strip_local_version_label(version):
"""
Strips a local version label in `version`.
Local version identifiers:
https://www.python.org/dev/peps/pep-0440/#local-version-identifiers
:param version: A version string to strip.
"""
class IgnoreLocal(Version):
@property
def local(self):
return None
try:
return str(IgnoreLocal(version))
except InvalidVersion:
return version
def _get_pinned_requirement(package, version=None, module=None):
"""
Returns a string representing a pinned pip requirement to install the specified package and
version (e.g. 'mlflow==1.2.3').
:param package: The name of the package.
:param version: The version of the package. If None, defaults to the installed version.
:param module: The name of the top-level module provided by the package . For example,
if `package` is 'scikit-learn', `module` should be 'sklearn'. If None, defaults
to `package`.
"""
if version is None:
version_raw = _get_installed_version(module or package)
local_version_label = _get_local_version_label(version_raw)
if local_version_label:
version = _strip_local_version_label(version_raw)
msg = (
"Found {package} version ({version_raw}) contains a local version label "
"(+{local_version_label}). MLflow logged a pip requirement for this package as "
"'{package}=={version_logged}' without the local version label to make it "
"installable from PyPI. To specify pip requirements containing local version "
"labels, please use `conda_env` or `pip_requirements`."
).format(
package=package,
version_raw=version_raw,
version_logged=version,
local_version_label=local_version_label,
)
_logger.warning(msg)
else:
version = version_raw
return f"{package}=={version}"
|
the-stack_106_15291
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from boa.builtins import concat, sha256, state, ToScriptHash
from boa.interop.System.Runtime import CheckWitness, Serialize, Deserialize, GetTime, Notify
from boa.interop.System.Storage import GetContext, Put, Get
from boa.interop.Ontology.Native import Invoke
OPERATIONS = [
'grant_copyright', # 授权
'check_copyright', # 检查授权
'create_copyright', # 创建版权
'transfer_copyright', # 交易版权
'buy_copyright', # 购买版权
]
OWNER_KEY_PREFIX = 'copyright_owner_'
GRANTS_KEY_PREFIX = 'copyright_grants_'
ctx = GetContext()
def Main(operation, args):
if operation == 'grant_copyright':
return grant_copyright(args[0], args[1], args[2])
elif operation == 'check_copyright':
return check_copyright(args[0], args[1])
elif operation == 'create_copyright':
return create_copyright(args[0], args[1])
elif operation == 'transfer_copyright':
return transfer_copyright(args[0], args[1])
elif operation == 'buy_copyright':
return buy_copyright(args[0], args[1])
else:
return False
def create_copyright(address, copyright):
copyright = sha256(copyright)
if not CheckWitness(address):
return False
if check_owner_exists(copyright):
return False
Notify(['create', copyright, address])
return set_owner(copyright, address)
def check_copyright(copyright, address):
copyright = sha256(copyright)
owner = get_owner(copyright)
if owner == address:
return True
grants = get_grants(copyright)
if not grants:
return False
for grant, expired in grants:
if grant == address:
return expired is None or expired > GetTime()
return False
def grant_copyright(copyright, address, expired):
copyright = sha256(copyright)
if not check_owner(copyright):
return False
Notify(['grant', copyright, address])
return put_grants(copyright, address, expired)
def transfer_copyright(copyright, address):
copyright = sha256(copyright)
if not check_owner(copyright):
return False
Notify(['transfer', copyright, address])
return set_owner(copyright, address)
def buy_copyright(copyright, address):
copyright = sha256(copyright)
if check_owner(copyright):
return False
owner_adr = get_owner(copyright)
Notify(['buy', copyright, address])
param = state(address, owner_adr, 10)
OntContract = ToScriptHash("AFmseVrdL9f9oyCzZefL9tG6UbvhUMqNMV")
res = Invoke(0, OntContract, "transfer", [param])
if res != b'\x01':
raise Exception("transfer ont error.")
Notify("transferONT succeed")
return put_grants(copyright, address, 120)
def get_owner(copyright):
key = get_key(OWNER_KEY_PREFIX, copyright)
return Get(ctx, key)
def set_owner(copyright, address):
key = get_key(OWNER_KEY_PREFIX, copyright)
return Put(ctx, key, address)
def check_owner(copyright):
owner = get_owner(copyright)
return owner and CheckWitness(owner)
def check_owner_exists(copyright):
owner = get_owner(copyright)
return not not owner
def get_grants(copyright):
key = get_key(GRANTS_KEY_PREFIX, copyright)
grants = Get(ctx, key)
if not grants:
return False
return [Deserialize(x) for x in grants]
def put_grants(copyright, address, expired):
key = get_key(GRANTS_KEY_PREFIX, copyright)
grants = get_grants(copyright)
if not grants:
grants = []
grants.append([address, GetTime() + expired if expired != 0 else None])
serialized = [Serialize(x) for x in grants]
return Put(ctx, key, serialized)
def get_key(prefix, copyright):
return concat(prefix, copyright)
|
the-stack_106_15294
|
from collections import OrderedDict
import torch
from torch import nn
from torchvision.models import densenet169, densenet201
from torchdistill.models.custom.bottleneck.base import BottleneckBase
from torchdistill.models.custom.bottleneck.processor import get_bottleneck_processor
from torchdistill.models.registry import register_model_class, register_model_func
@register_model_class
class Bottleneck4DenseNets(BottleneckBase):
"""
Head Network Distillation: Splitting Distilled Deep Neural Networks for Resource-constrained Edge Computing Systems
"""
def __init__(self, bottleneck_channel=12, bottleneck_idx=7, compressor=None, decompressor=None):
modules = [
nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, bottleneck_channel, kernel_size=2, stride=2, padding=1, bias=False),
nn.BatchNorm2d(bottleneck_channel),
nn.ReLU(inplace=True),
nn.Conv2d(bottleneck_channel, 512, kernel_size=2, stride=1, padding=1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, kernel_size=2, stride=1, padding=1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 256, kernel_size=2, stride=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=2, stride=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=2, stride=1, bias=False),
nn.AvgPool2d(kernel_size=2, stride=2)
]
encoder = nn.Sequential(*modules[:bottleneck_idx])
decoder = nn.Sequential(*modules[bottleneck_idx:])
super().__init__(encoder=encoder, decoder=decoder, compressor=compressor, decompressor=decompressor)
@register_model_class
class CustomDenseNet(nn.Module):
def __init__(self, bottleneck, short_feature_names, org_densenet):
super().__init__()
module_dict = OrderedDict()
module_dict['bottleneck'] = bottleneck
short_features_set = set(short_feature_names)
if 'classifier' in short_features_set:
short_features_set.remove('classifier')
for child_name, child_module in org_densenet.features.named_children():
if child_name in short_features_set:
module_dict[child_name] = child_module
self.features = nn.Sequential(module_dict)
self.relu = nn.ReLU(inplace=True)
self.adaptive_avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.classifier = org_densenet.classifier
def forward(self, x):
z = self.features(x)
z = self.relu(z)
z = self.adaptive_avgpool(z)
z = torch.flatten(z, 1)
return self.classifier(z)
@register_model_func
def custom_densenet169(bottleneck_channel=12, bottleneck_idx=7, compressor=None, decompressor=None,
short_feature_names=None, **kwargs):
if short_feature_names is None:
short_feature_names = ['denseblock3', 'transition3', 'denseblock4', 'norm5']
if compressor is not None:
compressor = get_bottleneck_processor(compressor['name'], **compressor['params'])
if decompressor is not None:
decompressor = get_bottleneck_processor(decompressor['name'], **decompressor['params'])
bottleneck = Bottleneck4DenseNets(bottleneck_channel, bottleneck_idx, compressor, decompressor)
org_model = densenet169(**kwargs)
return CustomDenseNet(bottleneck, short_feature_names, org_model)
@register_model_func
def custom_densenet201(bottleneck_channel=12, bottleneck_idx=7, compressor=None, decompressor=None,
short_feature_names=None, **kwargs):
if short_feature_names is None:
short_feature_names = ['denseblock3', 'transition3', 'denseblock4', 'norm5']
if compressor is not None:
compressor = get_bottleneck_processor(compressor['name'], **compressor['params'])
if decompressor is not None:
decompressor = get_bottleneck_processor(decompressor['name'], **decompressor['params'])
bottleneck = Bottleneck4DenseNets(bottleneck_channel, bottleneck_idx, compressor, decompressor)
org_model = densenet201(**kwargs)
return CustomDenseNet(bottleneck, short_feature_names, org_model)
|
the-stack_106_15295
|
# -*- coding: utf-8 -*-
# pylint: disable=C1801, C0330
import codecs
import os
import sys
import demjson
from bs4 import BeautifulSoup
import datetime_z
import PixivHelper
from PixivException import PixivException
class FanboxArtist(object):
artistId = 0
creatorId = ""
nextUrl = None
hasNextPage = False
_tzInfo = None
# require additional API call
artistName = ""
artistToken = ""
SUPPORTING = 0
FOLLOWING = 1
CUSTOM = 2
@classmethod
def parseArtistIds(cls, page):
ids = list()
js = demjson.decode(page)
if "error" in js and js["error"]:
raise PixivException("Error when requesting Fanbox", 9999, page)
if "body" in js and js["body"] is not None:
js_body = js["body"]
if "supportingPlans" in js["body"]:
js_body = js_body["supportingPlans"]
for creator in js_body:
ids.append(creator["user"]["userId"])
return ids
def __init__(self, artist_id, artist_name, creator_id, tzInfo=None):
self.artistId = int(artist_id)
self.artistName = artist_name
self.creatorId = creator_id
self._tzInfo = tzInfo
def __str__(self):
return f"FanboxArtist({self.artistId}, {self.creatorId}, {self.artistName})"
def parsePosts(self, page):
js = demjson.decode(page)
if "error" in js and js["error"]:
raise PixivException(
"Error when requesting Fanbox artist: {0}".format(self.artistId), 9999, page)
if js["body"] is not None:
js_body = js["body"]
posts = list()
if "creator" in js_body:
self.artistName = js_body["creator"]["user"]["name"]
if "post" in js_body:
# new api
post_root = js_body["post"]
else:
# https://www.pixiv.net/ajax/fanbox/post?postId={0}
# or old api
post_root = js_body
for jsPost in post_root["items"]:
post_id = int(jsPost["id"])
post = FanboxPost(post_id, self, jsPost, tzInfo=self._tzInfo)
posts.append(post)
# sanity check
assert (self.artistId == int(jsPost["user"]["userId"])), "Different user id from constructor!"
self.nextUrl = post_root["nextUrl"]
if self.nextUrl is not None and len(self.nextUrl) > 0:
self.hasNextPage = True
return posts
class FanboxPost(object):
imageId = 0
imageTitle = ""
coverImageUrl = ""
worksDate = ""
worksDateDateTime = None
updatedDate = ""
updatedDateDatetime = None
# image|text|file|article|video|entry
_supportedType = ["image", "text", "file", "article", "video", "entry"]
type = ""
body_text = ""
images = None
likeCount = 0
parent = None
is_restricted = False
feeRequired = 0
# compatibility
imageMode = ""
imageCount = 0
_tzInfo = None
linkToFile = None
# not implemented
worksResolution = ""
worksTools = ""
searchTags = ""
imageTags = list()
bookmark_count = 0
image_response_count = 0
embeddedFiles = None
provider = None
def __init__(self, post_id, parent, page, tzInfo=None):
self.images = list()
self.embeddedFiles = list()
self.imageId = int(post_id)
self.parent = parent
self._tzInfo = tzInfo
self.linkToFile = dict()
self.parsePost(page)
if not self.is_restricted:
self.parseBody(page)
if self.type == 'image':
self.parseImages(page)
if self.type == 'file':
self.parseFiles(page)
# compatibility for PixivHelper.makeFilename()
self.imageCount = len(self.images)
if self.imageCount > 0:
self.imageMode = "manga"
def __str__(self):
if self.parent is not None:
return f"FanboxPost({self.parent}: {self.imageId}, {self.imageTitle}, {self.type}, {self.feeRequired})"
else:
return f"FanboxPost({self.imageId}, {self.imageTitle}, {self.type}, {self.feeRequired})"
def parsePost(self, jsPost):
self.imageTitle = jsPost["title"]
self.coverImageUrl = jsPost["coverImageUrl"]
if self.coverImageUrl is not None and self.coverImageUrl not in self.embeddedFiles:
self.embeddedFiles.append(jsPost["coverImageUrl"])
self.worksDate = jsPost["publishedDatetime"]
self.worksDateDateTime = datetime_z.parse_datetime(self.worksDate)
self.updatedDate = jsPost["updatedDatetime"]
self.updatedDateDatetime = datetime_z.parse_datetime(self.updatedDate)
if "feeRequired" in jsPost:
self.feeRequired = jsPost["feeRequired"]
# Issue #420
if self._tzInfo is not None:
self.worksDateDateTime = self.worksDateDateTime.astimezone(self._tzInfo)
self.type = jsPost["type"]
if self.type not in FanboxPost._supportedType:
raise PixivException(f"Unsupported post type = {self.type} for post = {self.imageId}", errorCode=9999, htmlPage=jsPost)
self.likeCount = int(jsPost["likeCount"])
if jsPost["body"] is None:
self.is_restricted = True
def parseBody(self, jsPost):
''' Parse general data for text and article'''
self.body_text = ""
embedData = list()
if "text" in jsPost["body"]:
self.body_text = jsPost["body"]["text"]
# Issue #544
elif "html" in jsPost["body"]:
self.body_text = jsPost["body"]["html"]
# Issue #611: try to parse all images in the html body for compatibility
parsed = BeautifulSoup(self.body_text, features="html5lib")
links = parsed.findAll('a')
for link in links:
if link["href"].find("//fanbox.pixiv.net/images/entry/") > 0:
self.try_add(link["href"], self.embeddedFiles)
self.try_add(link["href"], self.images)
images = parsed.findAll('img')
for image in images:
if "data-src-original" in image.attrs:
self.try_add(image["data-src-original"], self.embeddedFiles)
self.try_add(image["data-src-original"], self.images)
parsed.decompose()
del parsed
if "thumbnailUrl" in jsPost["body"] and jsPost["body"]["thumbnailUrl"] is not None:
# set the thumbnail as the cover image is not exists.
if self.coverImageUrl is None:
PixivHelper.get_logger().debug("Missing coverImageUrl, using thumbnailUrl instead as cover.")
self.coverImageUrl = jsPost["body"]["thumbnailUrl"]
self.embeddedFiles.append(jsPost["body"]["thumbnailUrl"])
if "embedMap" in jsPost["body"] and jsPost["body"]["embedMap"] is not None and len(jsPost["body"]["embedMap"]) > 0:
for embed in jsPost["body"]["embedMap"]:
embedData.append(jsPost["body"]["embedMap"][embed])
self.embeddedFiles.append(jsPost["body"]["embedMap"][embed])
if "blocks" in jsPost["body"] and jsPost["body"]["blocks"] is not None:
for block in jsPost["body"]["blocks"]:
if block["type"] == "p":
if "links" in block:
pointer = 0
block_text = ""
for i in range(0, len(block["links"])):
link = block["links"][i]
link_offset = link["offset"]
block_text += block["text"][pointer:link_offset]
pointer = link_offset + link["length"]
block_text += "<a href='{0}'>{1}</a>".format(
link["url"],
block["text"][link_offset:pointer])
block_text += block["text"][pointer:]
else:
block_text = block["text"]
self.body_text = f"{self.body_text}<p>{block_text}</p>"
elif block["type"] == "image":
imageId = block["imageId"]
if imageId not in jsPost["body"]["imageMap"]:
continue
originalUrl = jsPost["body"]["imageMap"][imageId]["originalUrl"]
thumbnailUrl = jsPost["body"]["imageMap"][imageId]["thumbnailUrl"]
self.body_text = f"{self.body_text}<br /><a href='{originalUrl}'><img src='{thumbnailUrl}'/></a>"
self.try_add(originalUrl, self.images)
self.try_add(originalUrl, self.embeddedFiles)
elif block["type"] == "file":
fileId = block["fileId"]
if fileId not in jsPost["body"]["fileMap"]:
continue
fileUrl = jsPost["body"]["fileMap"][fileId]["url"]
fileName = jsPost["body"]["fileMap"][fileId]["name"]
self.body_text = f"{self.body_text}<br /><a href='{fileUrl}'>{fileName}</a>"
self.try_add(fileUrl, self.images)
self.try_add(fileUrl, self.embeddedFiles)
elif block["type"] == "embed": # Implement #470
embedId = block["embedId"]
if embedId in jsPost["body"]["embedMap"]:
embedStr = self.getEmbedData(jsPost["body"]["embedMap"][embedId], jsPost)
self.body_text = f"{self.body_text}<br />{embedStr}"
else:
PixivHelper.print_and_log("warn", f"Found missing embedId: {embedId} for {self.imageId}")
# Issue #476
if "video" in jsPost["body"]:
self.body_text = u"{0}<br />{1}".format(
self.body_text,
self.getEmbedData(jsPost["body"]["video"], jsPost))
def getEmbedData(self, embedData, jsPost) -> str:
# Issue #881
content_provider_path = os.path.abspath(os.path.dirname(sys.executable) + os.sep + "content_provider.json")
if not os.path.exists(content_provider_path):
content_provider_path = os.path.abspath("./content_provider.json")
if not os.path.exists(content_provider_path):
raise PixivException(f"Missing content_provider.json, please get it from https://github.com/Nandaka/PixivUtil2/blob/master/content_provider.json! Expected location => {content_provider_path}",
errorCode=PixivException.MISSING_CONFIG,
htmlPage=None)
cfg = demjson.decode_file(content_provider_path)
embed_cfg = cfg["embedConfig"]
current_provider = embedData["serviceProvider"]
if current_provider in embed_cfg:
if embed_cfg[current_provider]["ignore"]:
return ""
content_id = None
for key in embed_cfg[current_provider]["keys"]:
if key in embedData:
content_id = embedData[key]
break
if content_id is not None and len(content_id) > 0:
content_format = embed_cfg[current_provider]["format"]
return content_format.format(content_id)
else:
msg = "Empty content_id for embed provider = {0} for post = {1}, please update content_provider.json."
raise PixivException(msg.format(embedData["serviceProvider"], self.imageId),
errorCode=9999,
htmlPage=jsPost)
else:
msg = "Unsupported embed provider = {0} for post = {1}, please update content_provider.json."
raise PixivException(msg.format(embedData["serviceProvider"], self.imageId),
errorCode=9999,
htmlPage=jsPost)
def parseImages(self, jsPost):
for image in jsPost["body"]["images"]:
self.try_add(image["originalUrl"], self.images)
self.try_add(image["originalUrl"], self.embeddedFiles)
def parseFiles(self, jsPost):
for image in jsPost["body"]["files"]:
self.try_add(image["url"], self.images)
self.try_add(image["url"], self.embeddedFiles)
def try_add(self, item, list_data):
if self.coverImageUrl == item:
return
if item not in list_data:
list_data.append(item)
def printPost(self):
print("Post = {0}".format(self.imageId))
print("Title = {0}".format(self.imageTitle))
print("Type = {0}".format(self.type))
print("Created Date = {0}".format(self.worksDate))
print("Is Restricted = {0}".format(self.is_restricted))
def WriteInfo(self, filename):
info = None
try:
# Issue #421 ensure subdir exists.
PixivHelper.makeSubdirs(filename)
info = codecs.open(filename, 'wb', encoding='utf-8')
except IOError:
info = codecs.open(str(self.imageId) + ".txt",
'wb', encoding='utf-8')
PixivHelper.get_logger().exception("Error when saving image info: %s, file is saved to: %s.txt", filename, self.imageId)
info.write(u"ArtistID = {0}\r\n".format(self.parent.artistId))
info.write(u"ArtistName = {0}\r\n".format(self.parent.artistName))
info.write(u"ImageID = {0}\r\n".format(self.imageId))
info.write(u"Title = {0}\r\n".format(self.imageTitle))
info.write(u"Caption = {0}\r\n".format(self.body_text))
# info.write(u"Tags = " + ", ".join(self.imageTags) + "\r\n")
if self.is_restricted:
info.write(
u"Image Mode = {0}, Restricted\r\n".format(self.type))
else:
info.write(u"Image Mode = {0}\r\n".format(self.type))
info.write(u"Pages = {0}\r\n".format(self.imageCount))
info.write(u"Date = {0}\r\n".format(self.worksDate))
# info.write(u"Resolution = " + self.worksResolution + "\r\n")
# info.write(u"Tools = " + self.worksTools + "\r\n")
info.write(u"Like Count = {0}\r\n".format(self.likeCount))
info.write(u"Link = https://www.pixiv.net/fanbox/creator/{0}/post/{1}\r\n".format(
self.parent.artistId, self.imageId))
# info.write("Ugoira Data = " + str(self.ugoira_data) + "\r\n")
if len(self.embeddedFiles) > 0:
info.write("Urls =\r\n")
for link in self.embeddedFiles:
info.write(" - {0}\r\n".format(link))
info.close()
def WriteHtml(self, html_template, useAbsolutePaths, filename):
info = None
try:
PixivHelper.makeSubdirs(filename)
info = codecs.open(filename, 'wb', encoding='utf-8')
except IOError:
info = codecs.open(str(self.imageId) + ".html",
'wb', encoding='utf-8')
PixivHelper.get_logger().exception("Error when saving article html: %s, file is saved to: %s.html",
filename, self.imageId)
cover_image = ""
if self.coverImageUrl:
cover_image = f'<div class="cover"><img src="{self.coverImageUrl}"/></div>'
page = html_template.replace("%coverImage%", cover_image)
page = page.replace("%coverImageUrl%", self.coverImageUrl or "")
page = page.replace("%artistName%", self.parent.artistName)
page = page.replace("%imageTitle%", self.imageTitle)
page = page.replace("%worksDate%", self.worksDate)
token_body_text = ""
token_images = ""
token_text = ""
if self.type == "article":
token_body_text = f'<div class="article">{self.body_text}</div>'
else:
token_images = '<div class="non-article images">{0}</div>'.format(
"".join(['<a href="{0}">{1}</a>'.format(x,
f'<img scr="{0}"/>' if x[x.rindex(".") + 1:].lower() in ["jpg", "jpeg", "png", "bmp"] else x)for x in self.images]))
token_text = '<div class="non-article text">{0}</div>'.format(
"".join(['<p>{0}</p>'.format(x.rstrip()) for x in self.body_text.split("\n")]))
page = page.replace("%body_text(article)%", token_body_text)
page = page.replace("%images(non-article)%", token_images)
page = page.replace("%text(non-article)%", token_text)
page = BeautifulSoup(page, features="html5lib")
imageATags = page.find_all("a", attrs={"href": True})
for imageATag in imageATags:
tag = imageATag.img
if tag:
tag["src"] = imageATag["href"]
root = page.find("div", attrs={"class": "root"})
if root:
root["class"].append("non-article" if self.type != "article" else "article")
page = page.prettify()
html_dir = os.path.dirname(filename)
for k, v in self.linkToFile.items():
if not useAbsolutePaths:
try:
v = os.path.relpath(v, html_dir)
except ValueError:
PixivHelper.get_logger().exception("Error when converting local paths to relative ones, absolute paths are used", filename, self.imageId)
v = "file://" + v
else:
v = "file://" + v
page = page.replace(k, v)
info.write(page)
info.close()
|
the-stack_106_15296
|
import re
import os
import socket
from threading import Thread
import time
import ttfw_idf
global g_client_response
global g_msg_to_client
g_client_response = b""
g_msg_to_client = b" 3XYZ"
def get_my_ip():
s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s1.connect(("8.8.8.8", 80))
my_ip = s1.getsockname()[0]
s1.close()
return my_ip
def chat_server_sketch(my_ip):
global g_client_response
print("Starting the server on {}".format(my_ip))
port = 2222
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(600)
s.bind((my_ip, port))
s.listen(1)
q,addr = s.accept()
print("connection accepted")
q.settimeout(30)
q.send(g_msg_to_client)
data = q.recv(1024)
# check if received initial empty message
if (len(data) > 4):
g_client_response = data
else:
g_client_response = q.recv(1024)
print("received from client {}".format(g_client_response))
s.close()
print("server closed")
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_asio_chat_client(env, extra_data):
"""
steps: |
1. Test to start simple tcp server
2. `dut1` joins AP
3. Test injects server IP to `dut1`via stdin
4. Test evaluates `dut1` receives a message server placed
5. Test injects a message to `dut1` to be sent as chat_client message
6. Test evaluates received test message in host server
"""
global g_client_response
global g_msg_to_client
test_msg = "ABC"
dut1 = env.get_dut("chat_client", "examples/protocols/asio/chat_client", dut_class=ttfw_idf.ESP32DUT)
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, "asio_chat_client.bin")
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("asio_chat_client_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("asio_chat_client_size", bin_size // 1024, dut1.TARGET)
# 1. start a tcp server on the host
host_ip = get_my_ip()
thread1 = Thread(target=chat_server_sketch, args=(host_ip,))
thread1.start()
# 2. start the dut test and wait till client gets IP address
dut1.start_app()
dut1.expect(re.compile(r" IPv4 address: ([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)"), timeout=30)
# 3. send host's IP to the client i.e. the `dut1`
dut1.write(host_ip)
# 4. client `dut1` should receive a message
dut1.expect(g_msg_to_client[4:].decode()) # Strip out the front 4 bytes of message len (see chat_message protocol)
# 5. write test message from `dut1` chat_client to the server
dut1.write(test_msg)
while len(g_client_response) == 0:
time.sleep(1)
g_client_response = g_client_response.decode()
print(g_client_response)
# 6. evaluate host_server received this message
if (g_client_response[4:7] == test_msg):
print("PASS: Received correct message")
pass
else:
print("Failure!")
raise ValueError('Wrong data received from asi tcp server: {} (expected:{})'.format(g_client_response[4:7], test_msg))
thread1.join()
if __name__ == '__main__':
test_examples_protocol_asio_chat_client()
|
the-stack_106_15297
|
from icm20948 import ICM20948 # https://github.com/pimoroni/icm20948-python
from dataclasses import dataclass
import math
@dataclass
class IMUSensorFusion:
gyro_sens: float = 65.5
gx_offset: float = -0.5
gy_offset: float = -0.8
accel_sens: float = 8.192
pitch: float = 0
roll: float = 0
alpha: float = 0.7
dt: float = 0.32
imu = ICM20948(0x69)
def read(self):
"""
Read and calibrate
Based on: https://andrewmourcos.github.io/blog/2020/11/21/complementary-filter.html
"""
aX, aY, aZ, gX, gY, gZ = self.imu.read_accelerometer_gyro_data()
accel = (aX / self.accel_sens, aY / self.accel_sens, aZ / self.accel_sens)
gyro = (gX + self.gx_offset, gY + self.gy_offset, gZ)
self.pitch += (gyro[0] / self.gyro_sens) * self.dt
self.roll -= (gyro[1] / self.gyro_sens) * self.dt
# Only use accelerometer when it's steady (magnitude is near 1g)
forceMagnitude = math.sqrt(accel[0] ** 2 + accel[1] ** 2 + accel[2] ** 2)
if 0.9 < forceMagnitude < 1.1:
self.pitch = self.pitch * self.alpha + math.atan2(accel[1],
math.sqrt(accel[0] ** 2 + accel[2] ** 2)) * (
180 / math.pi) * (1 - self.alpha)
self.roll = self.roll * self.alpha + math.atan2(-accel[0], accel[2]) * (180 / math.pi) * (
1 - self.alpha)
p = (self.pitch * 180 / math.pi)
r = (self.roll * 180 / math.pi)
return round(p), round(r)
|
the-stack_106_15299
|
## Modification from original HardNet implementation in
## https://raw.githubusercontent.com/DagnyT/hardnet/master/code/dataloaders/HPatchesDatasetCreator.py
## I need to clean it a little bit and modify some things, but it works
import os
import numpy as np
import cv2
import sys
import json
import keras
from tqdm import tqdm
import glob
import random
splits = ['a', 'b', 'c', 'view', 'illum']
tps = ['ref', 'e1', 'e2', 'e3', 'e4', 'e5', 'h1', 'h2', 'h3', 'h4', 'h5', \
't1', 't2', 't3', 't4', 't5']
class DenoiseHPatches(keras.utils.Sequence):
"""Class for loading an HPatches sequence from a sequence folder"""
itr = tps
def __init__(self, seqs, batch_size=32):
self.all_paths = []
self.batch_size = batch_size
self.dim = (32, 32)
self.n_channels = 1
self.sequences = {}
self.sequences_n = {}
for base in tqdm(seqs):
name = base.split('/')
self.name = name[-1]
self.base = base
for t in self.itr:
im_path = os.path.join(base, t + '.png')
img_n = cv2.imread(im_path.replace('.png', '_noise.png'), 0)
img = cv2.imread(im_path, 0)
N = img.shape[0] / 32
seq_im = np.array(np.split(img, N),
dtype=np.uint8)
seq_im_n = np.array(np.split(img_n, N),
dtype=np.uint8)
for i in range(int(N)):
path = os.path.join(base, t, str(i) + '.png')
self.all_paths.append(path)
self.sequences[path] = seq_im[i]
self.sequences_n[path] = seq_im_n[i]
self.on_epoch_end()
def get_images(self, index):
path = self.all_paths[index]
img = self.sequences[path].astype(np.float32)
img_n = self.sequences_n[path].astype(np.float32)
return img, img_n
def __len__(self):
'''Denotes the number of batches per epoch'''
return int(np.floor(len(self.all_paths) / self.batch_size))
def __getitem__(self, index):
img_clean = np.empty((self.batch_size,) + self.dim + (self.n_channels,))
img_noise = np.empty((self.batch_size,) + self.dim + (self.n_channels,))
for i in range(self.batch_size):
img, img_n = self.get_images(index * self.batch_size + i)
img_clean[i] = np.expand_dims(img, -1)
img_noise[i] = np.expand_dims(img_n, -1)
return img_noise, img_clean
def on_epoch_end(self):
# 'Updates indexes after each epoch'
random.shuffle(self.all_paths)
class hpatches_sequence_folder:
"""Class for loading an HPatches sequence from a sequence folder"""
itr = tps
def __init__(self, base, noise=1):
name = base.split('/')
self.name = name[-1]
self.base = base
if noise:
noise_path = '_noise'
else:
noise_path = ''
for t in self.itr:
im_path = os.path.join(base, t + noise_path + '.png')
im = cv2.imread(im_path, 0)
self.N = im.shape[0] / 32
setattr(self, t, np.split(im, self.N))
def generate_triplets(labels, num_triplets, batch_size):
def create_indices(_labels):
inds = dict()
for idx, ind in enumerate(_labels):
if ind not in inds:
inds[ind] = []
inds[ind].append(idx)
return inds
triplets = []
indices = create_indices(np.asarray(labels))
unique_labels = np.unique(np.asarray(labels))
n_classes = unique_labels.shape[0]
# add only unique indices in batch
already_idxs = set()
for x in tqdm(range(num_triplets)):
if len(already_idxs) >= batch_size:
already_idxs = set()
c1 = np.random.randint(0, n_classes)
while c1 in already_idxs:
c1 = np.random.randint(0, n_classes)
already_idxs.add(c1)
c2 = np.random.randint(0, n_classes)
while c1 == c2:
c2 = np.random.randint(0, n_classes)
if len(indices[c1]) == 2: # hack to speed up process
n1, n2 = 0, 1
else:
n1 = np.random.randint(0, len(indices[c1]))
n2 = np.random.randint(11, 15)
while n1 == n2:
n2 = np.random.randint(11, 15)
n3 = np.random.randint(0, len(indices[c2]))
triplets.append([indices[c1][n1], indices[c1][n2], indices[c2][n3]])
return np.array(triplets)
class HPatches():
def __init__(self, train=True, transform=None, download=False, train_fnames=[],
test_fnames=[], denoise_model=None, use_clean=False):
self.train = train
self.transform = transform
self.train_fnames = train_fnames
self.test_fnames = test_fnames
self.denoise_model = denoise_model
self.use_clean = use_clean
def set_denoise_model(self, denoise_model):
self.denoise_model = denoise_model
def denoise_patches(self, patches):
batch_size = 100
for i in tqdm(range(int(len(patches) / batch_size)), file=sys.stdout):
batch = patches[i * batch_size:(i + 1) * batch_size]
batch = np.expand_dims(batch, -1)
batch = np.clip(self.denoise_model.predict(batch).astype(int),
0, 255).astype(np.uint8)[:, :, :, 0]
patches[i * batch_size:(i + 1) * batch_size] = batch
batch = patches[i * batch_size:]
batch = np.expand_dims(batch, -1)
batch = np.clip(self.denoise_model.predict(batch).astype(int),
0, 255).astype(np.uint8)[:, :, :, 0]
patches[i * batch_size:] = batch
return patches
def read_image_file(self, data_dir, train=1):
"""Return a Tensor containing the patches
"""
if self.denoise_model and not self.use_clean:
print('Using denoised patches')
elif not self.denoise_model and not self.use_clean:
print('Using noisy patches')
elif self.use_clean:
print('Using clean patches')
sys.stdout.flush()
patches = []
labels = []
counter = 0
hpatches_sequences = [x[1] for x in os.walk(data_dir)][0]
if train:
list_dirs = self.train_fnames
else:
list_dirs = self.test_fnames
for directory in tqdm(hpatches_sequences, file=sys.stdout):
if (directory in list_dirs):
for tp in tps:
if self.use_clean:
sequence_path = os.path.join(data_dir, directory, tp) + '.png'
else:
sequence_path = os.path.join(data_dir, directory, tp) + '_noise.png'
image = cv2.imread(sequence_path, 0)
h, w = image.shape
n_patches = int(h / w)
for i in range(n_patches):
patch = image[i * (w): (i + 1) * (w), 0:w]
patch = cv2.resize(patch, (32, 32))
patch = np.array(patch, dtype=np.uint8)
patches.append(patch)
labels.append(i + counter)
counter += n_patches
patches = np.array(patches, dtype=np.uint8)
if self.denoise_model and not self.use_clean:
print('Denoising patches...')
patches = self.denoise_patches(patches)
return patches, labels
class DataGeneratorDesc(keras.utils.Sequence):
# 'Generates data for Keras'
def __init__(self, data, labels, num_triplets=1000000, batch_size=50, dim=(32, 32), n_channels=1, shuffle=True):
# 'Initialization'
self.transform = None
self.out_triplets = True
self.dim = dim
self.batch_size = batch_size
self.n_channels = n_channels
self.shuffle = shuffle
self.data = data
self.labels = labels
self.num_triplets = num_triplets
self.on_epoch_end()
def get_image(self, t):
def transform_img(img):
if self.transform is not None:
img = transform(img.numpy())
return img
a, p, n = self.data[t[0]], self.data[t[1]], self.data[t[2]]
img_a = transform_img(a).astype(float)
img_p = transform_img(p).astype(float)
img_n = transform_img(n).astype(float)
img_a = np.expand_dims(img_a, -1)
img_p = np.expand_dims(img_p, -1)
img_n = np.expand_dims(img_n, -1)
if self.out_triplets:
return img_a, img_p, img_n
else:
return img_a, img_p
def __len__(self):
'''Denotes the number of batches per epoch'''
return int(np.floor(len(self.triplets) / self.batch_size))
def __getitem__(self, index):
y = np.zeros((self.batch_size, 1))
img_a = np.empty((self.batch_size,) + self.dim + (self.n_channels,))
img_p = np.empty((self.batch_size,) + self.dim + (self.n_channels,))
if self.out_triplets:
img_n = np.empty((self.batch_size,) + self.dim + (self.n_channels,))
for i in range(self.batch_size):
t = self.triplets[self.batch_size * index + i]
img_a_t, img_p_t, img_n_t = self.get_image(t)
img_a[i] = img_a_t
img_p[i] = img_p_t
if self.out_triplets:
img_n[i] = img_n_t
return {'a': img_a, 'p': img_p, 'n': img_n}, y
def on_epoch_end(self):
# 'Updates indexes after each epoch'
self.triplets = generate_triplets(self.labels, self.num_triplets, 32)
hpatches_dir = './hpatches'
splits_path = './splits.json'
splits_json = json.load(open(splits_path, 'rb'))
split = splits_json['a']
train_fnames = split['train']
test_fnames = split['test']
seqs = glob.glob(hpatches_dir+'/*')
seqs = [os.path.abspath(p) for p in seqs]
seqs_train = list(filter(lambda x: x.split('/')[-1] in train_fnames, seqs))
seqs_test = list(filter(lambda x: x.split('/')[-1] in split['test'], seqs))
hPatches = HPatches(train_fnames=train_fnames, test_fnames=test_fnames,
denoise_model=None, use_clean=True)
x, y = hPatches.read_image_file(data_dir='./hpatches', train=1)
print("length of patches= {}".format(len(x)))
print("Length of labels= {}".format(len(y)))
|
the-stack_106_15300
|
from typing import Callable, List
import pytest
from django.http import HttpResponse
from django.views import View
from apirouter import APIRouter
pytestmark = [pytest.mark.urls(__name__)]
def append_response_after(response: HttpResponse, item: str) -> str:
response.setdefault("x-after", "")
items: List[str] = response["x-after"].split()
items.append(item)
items_str = " ".join(items)
response["x-after"] = items_str
return items_str
def make_decorator(key: str):
def decorator(view_func: Callable):
def wrapped(request, *args, **kwargs):
response = view_func(request, *args, **kwargs)
append_response_after(response, key)
return response
return wrapped
return decorator
router = APIRouter(decorators=[make_decorator("router1"), make_decorator("router2")])
@router.route("/func", methods=["GET"])
@make_decorator("func1")
@make_decorator("func2")
def func_get(request):
return HttpResponse("GET /func")
@router.view("/view", decorators=[make_decorator("view1"), make_decorator("view2")])
class RouteView(View):
def get(self, request):
return HttpResponse("GET /view")
urlpatterns = router.urls
def test_func_decorators(client):
response = client.get("/func")
assert response.status_code == 200
assert response.content == b"GET /func"
assert response["x-after"] == "func2 func1 router2 router1"
def test_view_decorators(client):
response = client.get("/view")
assert response.status_code == 200
assert response.content == b"GET /view"
assert response["x-after"] == "view2 view1 router2 router1"
|
the-stack_106_15301
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pip install twine
import io
import os
import sys
from shutil import rmtree
from setuptools import setup, Command
NAME = 'pysemantics'
PACKAGE = 'pysemantics'
DESCRIPTION = 'NLP client for python'
URL = 'https://github.com/bstoilov/digitalowl-pysemantics'
EMAIL = '[email protected]'
AUTHOR = 'Borislav Stoilov'
REQUIRES_PYTHON = '>=3.5.0'
REQUIRED = ['requests', 'sklearn', 'spatial', 'numpy']
here = os.path.abspath(os.path.dirname(__file__))
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
about = {}
with open(os.path.join(here, PACKAGE, '__version__.py')) as f:
exec(f.read(), about)
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
install_requires=REQUIRED,
include_package_data=True,
packages=[PACKAGE],
zip_safe=False,
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
],
cmdclass={
'upload': UploadCommand,
},
)
|
the-stack_106_15302
|
import sys
from os import path
from setuptools import find_packages, setup
import versioneer
min_version = (3, 6)
if sys.version_info < min_version:
error = """
ads-deploy does not support Python {0}.{1}.
Python {2}.{3} and above is required. Check your Python version like so:
python3 --version
This may be due to an out-of-date pip. Make sure you have pip >= 9.0.1.
Upgrade pip like so:
pip install --upgrade pip
""".format(*sys.version_info[:2], *min_version)
sys.exit(error)
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as readme_file:
readme = readme_file.read()
with open(path.join(here, 'requirements.txt')) as requirements_file:
# Parse requirements.txt, ignoring any commented-out lines.
requirements = [line for line in requirements_file.read().splitlines()
if not line.startswith('#')]
git_requirements = [r for r in requirements if r.startswith('git+')]
if git_requirements:
print('User must install the following packages manually:')
print()
print("\n".join(f'* {r}' for r in git_requirements))
print()
setup(
name='ads-deploy',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license='BSD',
author='SLAC National Accelerator Laboratory',
packages=find_packages(exclude=['docs', 'tests']),
description='TwinCAT ADS EPICS IOC deployment tools',
long_description=readme,
long_description_content_type='text/markdown',
url='https://github.com/pcdshub/ads-deploy',
entry_points={'console_scripts': ['ads-deploy=ads_deploy.__main__:main']},
include_package_data=True,
package_data={
'ads_deploy': ['templates/*.jinja2',
'templates/*.macro',
'windows/*.cmd',
'windows/*.sh',
]
},
install_requires=requirements,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Natural Language :: English',
'Programming Language :: Python :: 3',
],
)
|
the-stack_106_15303
|
# A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2017-2021 NV Access Limited, Joseph Lee
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
"""App module for Windows 10 Modern Keyboard aka new touch keyboard panel.
The chief feature is allowing NVDA to announce selected emoji when using the keyboard to search for and select one.
Other features include announcing candidates for misspellings if suggestions for hardware keyboard is selected, and managing cloud clipboard paste.
This is applicable on Windows 10 Fall Creators Update and later."""
import appModuleHandler
import api
import eventHandler
import speech
import braille
import ui
import config
import winVersion
import controlTypes
from NVDAObjects.UIA import UIA
from NVDAObjects.behaviors import CandidateItem as CandidateItemBehavior
class ImeCandidateUI(UIA):
"""
The UIAutomation-based IME candidate UI (such as for the modern Chinese Microsoft Quick input).
This class ensures NVDA is notified of the first selected item when the UI is shown.
"""
def event_show(self):
# The IME candidate UI is shown.
# Report the current candidates page and the currently selected item.
# Sometimes UIA does not fire an elementSelected event when it is first opened,
# Therefore we must fake it here.
if (self.UIAAutomationId == "IME_Prediction_Window"):
candidateItem = self.firstChild
eventHandler.queueEvent("UIA_elementSelected", candidateItem)
elif (
self.firstChild
and self.firstChild.role == controlTypes.ROLE_LIST
and isinstance(self.firstChild.firstChild, ImeCandidateItem)
):
candidateItem = self.firstChild.firstChild
eventHandler.queueEvent("UIA_elementSelected", candidateItem)
class ImeCandidateItem(CandidateItemBehavior, UIA):
"""
A UIAutomation-based IME candidate Item (such as for the modern Chinese Microsoft Quick input).
This class presents Ime candidate items in the standard way NVDA does for all other IMEs.
E.g. reports entire candidate page content if it is new or has changed pages,
And reports the currently selected item, including symbol descriptions.
"""
keyboardShortcut = ""
def _get_candidateNumber(self):
number = super(ImeCandidateItem, self).keyboardShortcut
try:
number = int(number)
except (ValueError, TypeError):
pass
return number
def _get_parent(self):
parent = super(ImeCandidateItem, self).parent
# Translators: A label for a 'candidate' list
# which contains symbols the user can choose from when typing east-asian characters into a document.
parent.name = _("Candidate")
parent.description = None
return parent
def _get_name(self):
try:
number = int(self.candidateNumber)
except (TypeError, ValueError):
return super(ImeCandidateItem, self).name
candidate = super(ImeCandidateItem, self).name
return self.getFormattedCandidateName(number, candidate)
def _get_description(self):
candidate = super(ImeCandidateItem, self).name
return self.getFormattedCandidateDescription(candidate)
def _get_basicText(self):
return super(ImeCandidateItem, self).name
def event_UIA_elementSelected(self):
oldNav = api.getNavigatorObject()
if isinstance(oldNav, ImeCandidateItem) and self.name == oldNav.name:
# Duplicate selection event fired on the candidate item. Ignore it.
return
api.setNavigatorObject(self)
speech.cancelSpeech()
# Report the entire current page of candidate items if it is newly shown or it has changed.
if config.conf["inputComposition"]["autoReportAllCandidates"]:
oldText = getattr(self.appModule, '_lastImeCandidateVisibleText', '')
newText = self.visibleCandidateItemsText
if not isinstance(oldNav, ImeCandidateItem) or newText != oldText:
self.appModule._lastImeCandidateVisibleText = newText
# speak the new page
ui.message(newText)
# Now just report the currently selected candidate item.
self.reportFocus()
class AppModule(appModuleHandler.AppModule):
# Cache the most recently selected item.
_recentlySelected = None
def event_UIA_elementSelected(self, obj, nextHandler):
# Logic for IME candidate items is handled all within its own object
# Therefore pass these events straight on.
if isinstance(obj, ImeCandidateItem):
return nextHandler()
# #7273: When this is fired on categories, the first emoji from the new category is selected but not announced.
# Therefore, move the navigator object to that item if possible.
# However, in recent builds, name change event is also fired.
# For consistent experience, report the new category first by traversing through controls.
# #8189: do not announce candidates list itself (not items), as this is repeated each time candidate items are selected.
if obj.UIAElement.cachedAutomationID == "CandidateList": return
speech.cancelSpeech()
# Sometimes, due to bad tree traversal or wrong item getting selected, something other than the selected item sees this event.
# Sometimes clipboard candidates list gets selected, so ask NvDA to descend one more level.
if obj.UIAElement.cachedAutomationID == "TEMPLATE_PART_ClipboardItemsList":
obj = obj.firstChild
# In build 18262, emoji panel may open to People group and skin tone modifier or the list housing them gets selected.
elif obj.UIAElement.cachedAutomationID == "SkinTonePanelModifier_ListView":
obj = obj.next
elif obj.parent.UIAElement.cachedAutomationID == "SkinTonePanelModifier_ListView":
# But this will point to nothing if emoji search results are not people.
if obj.parent.next is not None: obj = obj.parent.next
else: obj = obj.parent.parent.firstChild
candidate = obj
if obj and obj.UIAElement.cachedClassName == "ListViewItem" and obj.parent and isinstance(obj.parent, UIA) and obj.parent.UIAElement.cachedAutomationID != "TEMPLATE_PART_ClipboardItemsList":
# The difference between emoji panel and suggestions list is absence of categories/emoji separation.
# Turns out automation ID for the container is different, observed in build 17666 when opening clipboard copy history.
candidate = obj.parent.previous
if candidate is not None:
# Emoji categories list.
ui.message(candidate.name)
obj = candidate.firstChild
if obj is not None:
api.setNavigatorObject(obj)
obj.reportFocus()
braille.handler.message(braille.getPropertiesBraille(
name=obj.name,
role=obj.role,
positionInfo=obj.positionInfo
))
# Cache selected item.
self._recentlySelected = obj.name
else:
# Translators: presented when there is no emoji when searching for one in Windows 10 Fall Creators Update and later.
ui.message(_("No emoji"))
nextHandler()
def event_UIA_window_windowOpen(self, obj, nextHandler):
firstChild = obj.firstChild
# Handle Ime Candidate UI being shown
if isinstance(firstChild, ImeCandidateUI):
eventHandler.queueEvent("show", firstChild)
return
# Make sure to announce most recently used emoji first in post-1709 builds.
# Fake the announcement by locating 'most recently used" category and calling selected event on this.
# However, in build 17666 and later, child count is the same for both emoji panel and hardware keyboard candidates list.
# Thankfully first child automation ID's are different for each modern input technology.
# However this event is raised when the input panel closes.
if obj.firstChild is None:
return
# #9104: different aspects of modern input panel are represented by automation iD's.
childAutomationID = obj.firstChild.UIAElement.cachedAutomationID
# Emoji panel for 1709 (build 16299) and 1803 (17134).
emojiPanelInitial = winVersion.WIN10_1709
# This event is properly raised in build 17134.
emojiPanelWindowOpenEvent = winVersion.WIN10_1803
if (
emojiPanelInitial <= winVersion.getWinVer() <= emojiPanelWindowOpenEvent
and childAutomationID in (
"TEMPLATE_PART_ExpressiveInputFullViewFuntionBarItemControl",
"TEMPLATE_PART_ExpressiveInputFullViewFuntionBarCloseButton"
)
):
self.event_UIA_elementSelected(obj.lastChild.firstChild, nextHandler)
# Handle hardware keyboard suggestions.
# Treat it the same as CJK composition list - don't announce this if candidate announcement setting is off.
elif childAutomationID == "CandidateWindowControl" and config.conf["inputComposition"]["autoReportAllCandidates"]:
try:
self.event_UIA_elementSelected(obj.firstChild.firstChild.firstChild, nextHandler)
except AttributeError:
# Because this is dictation window.
pass
# Emoji panel in build 17666 and later (unless this changes).
elif childAutomationID == "TEMPLATE_PART_ExpressionGroupedFullView":
self._emojiPanelJustOpened = True
try:
self.event_UIA_elementSelected(obj.firstChild.children[-2].firstChild.firstChild, nextHandler)
except AttributeError:
# In build 18272's emoji panel, emoji list becomes empty in some situations.
pass
# Clipboard history.
# Move to clipboard list so element selected event can pick it up.
# #9103: if clipboard is empty, a status message is displayed instead, and luckily it is located where clipboard data items can be found.
elif childAutomationID == "TEMPLATE_PART_ClipboardTitleBar":
self.event_UIA_elementSelected(obj.children[-2], nextHandler)
nextHandler()
# Argh, name change event is fired right after emoji panel opens in build 17666 and later.
_emojiPanelJustOpened = False
def event_nameChange(self, obj, nextHandler):
# Logic for IME candidate items is handled all within its own object
# Therefore pass these events straight on.
if isinstance(obj, ImeCandidateItem):
return nextHandler()
elif isinstance(obj, ImeCandidateUI):
return nextHandler()
# On some systems, touch keyboard keys keeps firing name change event.
# In build 17704, whenever skin tones are selected, name change is fired by emoji entries (GridViewItem).
if ((obj.UIAElement.cachedClassName in ("CRootKey", "GridViewItem"))
# Just ignore useless clipboard status.
# Also top emoji search result must be announced for better user experience.
or (obj.UIAElement.cachedAutomationID in ("TEMPLATE_PART_ClipboardItemsList", "TEMPLATE_PART_Search_TextBlock"))
# And no, emoji entries should not be announced here.
or (self._recentlySelected is not None and self._recentlySelected in obj.name)):
return
# The word "blank" is kept announced, so suppress this on build 17666 and later.
if winVersion.getWinVer().build > 17134:
# In build 17672 and later,
# return immediately when element selected event on clipboard item was fired just prior to this.
# In some cases, parent will be None, as seen when emoji panel is closed in build 18267.
try:
if obj.UIAElement.cachedAutomationID == "TEMPLATE_PART_ClipboardItemIndex" or obj.parent.UIAElement.cachedAutomationID == "TEMPLATE_PART_ClipboardItemsList": return
except AttributeError:
return
if not self._emojiPanelJustOpened or obj.UIAElement.cachedAutomationID != "TEMPLATE_PART_ExpressionGroupedFullView": speech.cancelSpeech()
self._emojiPanelJustOpened = False
# Don't forget to add "Microsoft Candidate UI" as something that should be suppressed.
if obj.UIAElement.cachedAutomationID not in ("TEMPLATE_PART_ExpressionFullViewItemsGrid", "TEMPLATE_PART_ClipboardItemIndex", "CandidateWindowControl"):
ui.message(obj.name)
nextHandler()
def chooseNVDAObjectOverlayClasses(self, obj, clsList):
if isinstance(obj, UIA):
if obj.role == controlTypes.ROLE_LISTITEM and (
(
obj.parent.UIAAutomationId in (
"ExpandedCandidateList",
"TEMPLATE_PART_AdaptiveSuggestionList",
)
and obj.parent.parent.UIAAutomationId == "IME_Candidate_Window"
)
or obj.parent.UIAAutomationId in ("IME_Candidate_Window", "IME_Prediction_Window")
):
clsList.insert(0, ImeCandidateItem)
elif obj.role == controlTypes.ROLE_PANE and obj.UIAAutomationId in (
"IME_Candidate_Window",
"IME_Prediction_Window"
):
clsList.insert(0, ImeCandidateUI)
|
the-stack_106_15306
|
import heat as ht
import math
import torch
class Spectral(ht.ClusteringMixin, ht.BaseEstimator):
def __init__(
self,
n_clusters=None,
gamma=1.0,
metric="rbf",
laplacian="fully_connected",
threshold=1.0,
boundary="upper",
n_lanczos=300,
assign_labels="kmeans",
**params
):
"""
Spectral clustering
Parameters
----------
n_clusters : int, optional
gamma : float, default=1.0
Kernel coefficient sigma for 'rbf', ignored for affinity='euclidean'
metric : string
How to construct the similarity matrix.
'rbf' : construct the similarity matrix using a radial basis function (RBF) kernel.
'euclidean' : construct the similarity matrix as only euclidean distance
laplacian : string
How to calculate the graph laplacian (affinity)
Currently supported : 'fully_connected', 'eNeighbour'
threshold : float
Threshold for affinity matrix if laplacian='eNeighbour'
Ignorded for laplacian='fully_connected'
boundary : string
How to interpret threshold: 'upper', 'lower'
Ignorded for laplacian='fully_connected'
n_lanczos : int
number of Lanczos iterations for Eigenvalue decomposition
assign_labels: str, default = 'kmeans'
The strategy to use to assign labels in the embedding space.
'kmeans'
**params: dict
Parameter dictionary for the assign_labels estimator
"""
self.n_clusters = n_clusters
self.gamma = gamma
self.metric = metric
self.laplacian = laplacian
self.threshold = threshold
self.boundary = boundary
self.n_lanczos = n_lanczos
self.assign_labels = assign_labels
if metric == "rbf":
sig = math.sqrt(1 / (2 * gamma))
self._laplacian = ht.graph.Laplacian(
lambda x: ht.spatial.rbf(x, sigma=sig, quadratic_expansion=True),
definition="norm_sym",
mode=laplacian,
threshold_key=boundary,
threshold_value=threshold,
)
elif metric == "euclidean":
self._laplacian = ht.graph.Laplacian(
lambda x: ht.spatial.cdist(x, quadratic_expansion=True),
definition="norm_sym",
mode=laplacian,
threshold_key=boundary,
threshold_value=threshold,
)
else:
raise NotImplementedError("Other kernels currently not supported")
if assign_labels == "kmeans":
self._cluster = ht.cluster.KMeans(params)
else:
raise NotImplementedError(
"Other Label Assignment Algorithms are currently not available"
)
# in-place properties
self._labels = None
@property
def labels_(self):
"""
Returns
-------
ht.DNDarray, shape=(n_points):
Labels of each point.
"""
return self._labels
def _spectral_embedding(self, X):
"""
Helper function to embed the dataset X into the eigenvectors of the graph Laplacian matrix
Returns
-------
ht.DNDarray, shape=(m_lanczos):
Eigenvalues of the graph's Laplacian matrix.
ht.DNDarray, shape=(n, m_lanczos):
Eigenvectors of the graph's Laplacian matrix.
"""
L = self._laplacian.construct(X)
# 3. Eigenvalue and -vector calculation via Lanczos Algorithm
v0 = ht.ones((L.shape[0],), dtype=L.dtype, split=0, device=L.device) / math.sqrt(L.shape[0])
V, T = ht.lanczos(L, self.n_lanczos, v0)
# 4. Calculate and Sort Eigenvalues and Eigenvectors of tridiagonal matrix T
eval, evec = torch.eig(T._DNDarray__array, eigenvectors=True)
# If x is an Eigenvector of T, then y = V@x is the corresponding Eigenvector of L
eval, idx = torch.sort(eval[:, 0], dim=0)
eigenvalues = ht.array(eval)
eigenvectors = ht.matmul(V, ht.array(evec))[:, idx]
return eigenvalues, eigenvectors
def fit(self, X):
"""
Computes the low-dim representation by calculation of eigenspectrum (eigenvalues and eigenvectors) of the graph
laplacian from the similarity matrix and fits the eigenvectors that correspond to the k lowest eigenvalues with
a seperate clustering algorithm (currently only kmeans is supported). Similarity metrics for adjacency
calculations are supported via spatial.distance. The eigenvalues and eigenvectors are computed by reducing the
Laplacian via lanczos iterations and using the torch eigenvalue solver on this smaller matrix. If other
eigenvalue decompostion methods are supported, this will be expanded.
Parameters
----------
X : ht.DNDarray, shape=(n_samples, n_features)
Training instances to cluster.
"""
# 1. input sanitation
if not isinstance(X, ht.DNDarray):
raise ValueError("input needs to be a ht.DNDarray, but was {}".format(type(X)))
if X.split is not None and X.split != 0:
raise NotImplementedError("Not implemented for other splitting-axes")
# 2. Embed Dataset into lower-dimensional Eigenvector space
eigenvalues, eigenvectors = self._spectral_embedding(X)
# 3. Find the spectral gap, if number of clusters is not defined from the outside
if self.n_clusters is None:
diff = eigenvalues[1:] - eigenvalues[:-1]
tmp = ht.where(diff == diff.max()).item()
self.n_clusters = tmp + 1
components = eigenvectors[:, : self.n_clusters].copy()
params = self._cluster.get_params()
params["n_clusters"] = self.n_clusters
self._cluster.set_params(**params)
self._cluster.fit(components)
self._labels = self._cluster.labels_
self._cluster_centers = self._cluster.cluster_centers_
return self
def predict(self, X):
"""
Predict the closest cluster each sample in X belongs to.
X is transformed to the low-dim representation by calculation of eigenspectrum (eigenvalues and eigenvectors) of
the graph laplacian from the similarity matrix. Inference of lables is done by extraction of the closest
centroid of the n_clusters eigenvectors from the previously fitted clustering algorithm (kmeans).
Caution: Calculation of the low-dim representation requires some time!
Parameters
----------
X : ht.DNDarray, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : ht.DNDarray, shape = [n_samples,]
Index of the cluster each sample belongs to.
"""
# input sanitation
if not isinstance(X, ht.DNDarray):
raise ValueError("input needs to be a ht.DNDarray, but was {}".format(type(X)))
if X.split is not None and X.split != 0:
raise NotImplementedError("Not implemented for other splitting-axes")
_, eigenvectors = self._spectral_embedding(X)
components = eigenvectors[:, : self.n_clusters].copy()
return self._cluster.predict(components)
|
the-stack_106_15309
|
# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
# Written by François Marelli <[email protected]>
#
# This file is part of CBI Toolbox.
#
# CBI Toolbox is free software: you can redistribute it and/or modify
# it under the terms of the 3-Clause BSD License.
#
# CBI Toolbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# 3-Clause BSD License for more details.
#
# You should have received a copy of the 3-Clause BSD License along
# with CBI Toolbox. If not, see https://opensource.org/licenses/BSD-3-Clause.
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import json
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('svg')
matplotlib.rcParams['font.family'] = 'serif'
matplotlib.rcParams['pdf.fonttype'] = 42
path = os.environ['OVC_PATH']
gpath = os.path.join(path, 'graph')
with open(os.path.join(gpath, 'results.json'), 'r') as fp:
results = json.load(fp)
nas = (30, 50)
dnas = np.arange(10, 101, 5) / 100
radon_snr = results['radon']
fss_snr = results['fss']
fps_snr = results['fps']
dc_snr = results['dc']
fdc_snr = results['fdc']
fig, plots = plt.subplots(2, len(nas), figsize=(
5.5, 4.5), sharey=True, sharex=True)
lgs = None
for plot, na in zip(plots[0], nas):
lgs = []
na_idx = int((na - dnas[0] * 100) / 5)
lgs.append(plot.hlines(radon_snr, dnas[0], dnas[-1],
'C3', linestyles=':', label='X-Ray'))
lgs.append(plot.hlines(fps_snr[str(na)], dnas[0], dnas[-1],
'C2', linestyles='--', label='FPS-OPT, no deconv'))
lgs.append(plot.plot(dnas, dc_snr[str(na)],
label='FPS-OPT, BW deconv')[0])
lgs.append(plot.plot(dnas, fdc_snr[str(na)], 'C4',
label='FPS-OPT, GBM deconv', linestyle=':', marker='.', markersize=6)[0])
lgs.append(plot.hlines(fss_snr[str(na)], dnas[0], dnas[-1],
'C1', linestyles='-.', label='FSS-OPT'))
lgs.append(plot.plot(na/100, dc_snr[str(na)][na_idx], '*', color='0.1',
markersize=9, label='Imaging NA')[0])
lgs.insert(0, lgs.pop())
plot.set_title('Imaging NA={}'.format(na / 100))
plot.grid()
plot.set_xlim((dnas[0], dnas[-1]))
with open(os.path.join(gpath, 'noise.json'), 'r') as fp:
results = json.load(fp)
radon_snr = results['radon']
fss_snr = results['fss']
fps_snr = results['fps']
dc_snr = results['dc']
fdc_snr = results['fdc']
for (plot, na) in zip(plots[1], nas):
na_idx = int((na - dnas[0] * 100) / 5)
plot.hlines(radon_snr, dnas[0], dnas[-1],
'C3', linestyles=':', label='X-Ray')
plot.hlines(fss_snr[str(na)], dnas[0], dnas[-1],
'C1', linestyles='-.', label='FSS-OPT')
plot.plot(dnas, dc_snr[str(na)],
label='FPS-OPT, BW')
plot.plot(dnas, fdc_snr[str(na)], 'C4',
label='FPS-OPT, GBM', linestyle=':', marker='.', markersize=6)
plot.hlines(fps_snr[str(na)], dnas[0], dnas[-1],
'C2', linestyles='--', label='FPS-OPT, no deconv')
plot.plot(na/100, dc_snr[str(na)][na_idx], 'D', color='0.1',
markersize=6, label='Imaging NA')
plot.set_xlabel('Filtering NA')
plot.grid()
plot.set_xlim((dnas[0], dnas[-1]))
plots[1][0].legend(handles=lgs, loc='upper center',
bbox_to_anchor=(0, -0.46, 1.89, 0.2), framealpha=1, ncol=3)
plots[0][0].set_ylabel('PSNR [dB], clean')
plots[1][0].set_ylabel('PSNR [dB], noisy')
plt.subplots_adjust(wspace=0.04, hspace=0.07, left=0.09,
right=0.975, top=0.95, bottom=0.21)
plt.savefig(os.path.join(gpath, 'combined.pdf'))
|
the-stack_106_15310
|
#!/usr/bin/env python3
# lib/feed.py
# Jan-Mar 2020 <[email protected]>
import cbor2
import os
from lib import event
from lib import pcap
from lib import crypto
class FEED:
def __init__(self, fname, fid=None, signer=None,
create_if_notexisting=False,
digestmod = 'sha256'):
self.fname = fname
self.fid = fid
self.signer = signer
self.cine = create_if_notexisting
self.digestmod = digestmod
self.seq = 0
self.pcap = pcap.PCAP(fname)
self.hprev = None
try:
self.pcap.open('r')
# find highest seq number:
w = self.pcap.read_backwards(True)
e = event.EVENT(digestmod=self.digestmod)
e.from_wire(w)
if fid != None and e.fid != fid:
print("feed ID mismatch:", e.fid, "instead of", fid)
self.pcap.close()
self.pcap = None
return
self.fid, self.seq = e.fid, e.seq
self.hprev = e.get_ref()
self.pcap.close()
except Exception as e:
if not self.cine:
self.pcap = None
print(f"error opening file {fname}")
else:
self.pcap.open('w')
self.pcap.close()
def _append(self, w): # blindly append the bytes in w to a log file
p = self.pcap
p.open('a')
p.write(w)
p.close()
try:
os.sync()
except:
pass
self.seq += 1
def write(self, c): # create new log extension with given content
if self.seq == 0:
self.hprev = None
e = event.EVENT(fid=self.fid, seq=self.seq+1,
hprev=self.hprev, content=c,
digestmod=self.digestmod)
metabits = e.mk_metabits(self.signer.get_sinfo())
signature = self.signer.sign(metabits)
w = e.to_wire(signature)
self._append(w)
self.hprev = e.get_ref()
return w
def is_valid_extension(self, e):
if e.fid != self.fid or e.seq != self.seq+1:
print(f" out-of-seq (expected: {self.seq+1}, actual: {e.seq})")
return False
r = False
if e.sinfo == crypto.SIGNINFO_ED25519:
r = crypto.ED25519.verify(e.fid, e.metabits, e.signature)
elif isinstance(self.signer, crypto.HMAC):
if e.sinfo != self.signer.sinfo:
print(" signature type mismatch")
else:
r = crypto.HMAC.verify(crypto.sinfo2mod[e.sinfo],
self.signer.get_private_key(),
e.metabits, e.signature)
if not r:
print(" invalid signature")
return False
if self.hprev != e.hprev:
print(f" invalid hash chaining: expected={self.hprev}, actual={e.hprev}")
return False
return True
def ingest(self, e): # append event to log only if a valid extension
# return False if failing
try:
if not self.is_valid_extension(e):
print(" invalid extension")
return False
self._append(e.to_wire())
self.hprev = e.get_ref()
return True
except Exception as x:
print(x)
pass
print(" invalid packet")
return False
def __len__(self):
return self.seq
def __iter__(self):
return FEED_ITER(self.fname, self.digestmod)
class FEED_ITER:
def __init__(self, fn, digestmod='sha256'):
self.pcap = pcap.PCAP(fn)
self.pcap.open('r')
self.digestmod = digestmod
def __next__(self):
pkt = self.pcap.read()
if not pkt:
self.pcap.close()
raise StopIteration
e = event.EVENT(digestmod=self.digestmod)
e.from_wire(pkt)
return e
# ----------------------------------------------------------------------
if __name__ == '__main__':
import argparse
import os
import sys
import crypto
def load_keyfile(fn):
with open(fn, 'r') as f:
key = eval(f.read())
if key['type'] == 'ed25519':
fid = bytes.fromhex(key['public'])
signer = crypto.ED25519(bytes.fromhex(key['private']))
digestmod = 'sha256'
elif key['type'] in ['hmac_sha256', 'hmac_sha1', 'hmac_md5']:
fid = bytes.fromhex(key['feed_id'])
digestmod = key['type'][5:]
signer = crypto.HMAC(digestmod, bytes.fromhex(key['private']))
return fid, signer, digestmod
parser = argparse.ArgumentParser(description='BACnet feed tool')
parser.add_argument('--keyfile')
parser.add_argument('pcapfile', metavar='PCAPFILE')
parser.add_argument('CMD', choices=['create','dump','append','check'])
args = parser.parse_args()
if args.CMD == 'dump':
pcap.dump(args.pcapfile)
elif args.CMD in ['create','append']:
if args.keyfile == None:
print("missing keyfile parameter")
sys.exit()
fid, signer, digestmod = load_keyfile(args.keyfile)
if args.CMD == 'create':
try:
os.remove(args.pcapfile)
except:
pass
feed = FEED(args.pcapfile, fid, signer, True, digestmod=digestmod)
else:
feed = FEED(args.pcapfile, fid, signer, digestmod=digestmod)
print("# enter payload of first event as a Python data structure, end with CTRL-D:")
content = sys.stdin.read()
feed.write(eval(content))
elif args.CMD == 'check':
if args.keyfile != None:
fid, signer, digestmod = load_keyfile(args.keyfile)
else:
fid, signer, digestmod = None, None, None
f = FEED(args.pcapfile, fid=fid, signer=signer, digestmod=digestmod)
if f.pcap == None:
sys.exit()
f.seq = 0
f.hprev = None
print(f"Checking feed {f.fid.hex()}")
for e in f:
# print(e)
if not f.is_valid_extension(e):
print(f"-> event {f.seq+1}: chaining or signature problem")
else:
print(f"-> event {e.seq}: ok, content={e.content().__repr__()}")
f.seq += 1
f.hprev = e.get_ref()
# eof
|
the-stack_106_15313
|
import os
import tensorflow as tf
import numpy as np
from modules.modules_bandpass import sampler, generator, discriminator
from utils.model_utils import l1_loss
from utils.tf_forward_tan import lddmm
class VariationalCycleGAN(object):
def __init__(self, dim_pitch=1, dim_mfc=23, bandpass_filters=64,
n_frames=128, discriminator=discriminator,
generator=generator,sampler=sampler, lddmm=lddmm,
mode='train', log_dir='./log', pre_train=None):
self.n_frames = n_frames
self.pitch_shape = [None, dim_pitch, None] #[batch_size, num_features, num_frames]
self.mfc_shape = [None, dim_mfc, None, bandpass_filters] #[batch_size, num_features, num_frames, #filters]
self.center_diff_mat = np.zeros((n_frames, n_frames), np.float32)
for i in range(self.n_frames-1):
self.center_diff_mat[i,i+1] = 0.5
for i in range(1, self.n_frames):
self.center_diff_mat[i,i-1] = -0.5
self.first_order_diff_mat = np.eye(self.n_frames, dtype=np.float32)
for i in range(1, self.n_frames):
self.first_order_diff_mat[i-1,i] = -1
# Create the kernel for lddmm
self.kernel = tf.expand_dims(tf.constant([6,50],
dtype=tf.float32), axis=0)
self.sampler = sampler
self.generator = generator
self.discriminator = discriminator
self.lddmm = lddmm
self.mode = mode
self.build_model()
self.optimizer_initializer()
self.saver = tf.train.Saver()
self.sess = tf.Session()
if pre_train is not None:
self.saver.restore(self.sess, pre_train)
else:
self.sess.run(tf.global_variables_initializer())
if self.mode == 'train':
self.train_step = 0
def build_model(self):
# Placeholders for real training samples
self.pitch_A_real = tf.placeholder(tf.float32, shape=self.pitch_shape,
name='pitch_A_real')
self.pitch_B_real = tf.placeholder(tf.float32, shape=self.pitch_shape,
name='pitch_B_real')
self.mfc_A_real = tf.placeholder(tf.float32, shape=self.mfc_shape,
name='mfc_A_real')
self.mfc_B_real = tf.placeholder(tf.float32, shape=self.mfc_shape,
name='mfc_B_real')
# Placeholders for fake generated samples
self.pitch_A_fake = tf.placeholder(tf.float32, shape=self.pitch_shape,
name='pitch_A_fake')
self.pitch_B_fake = tf.placeholder(tf.float32, shape=self.pitch_shape,
name='pitch_B_fake')
self.mfc_A_fake = tf.placeholder(tf.float32, shape=self.mfc_shape,
name='mfc_A_fake')
self.mfc_B_fake = tf.placeholder(tf.float32, shape=self.mfc_shape,
name='mfc_B_fake')
# Placeholders for momenta variables
self.momenta_A2B_real = tf.placeholder(tf.float32, shape=self.pitch_shape,
name='momenta_A2B_real')
self.momenta_B2A_real = tf.placeholder(tf.float32, shape=self.pitch_shape,
name='momenta_B2A_real')
# Placeholder for test samples
self.pitch_A_test = tf.placeholder(tf.float32, shape=self.pitch_shape,
name='pitch_A_test')
self.mfc_A_test = tf.placeholder(tf.float32, shape=self.mfc_shape,
name='mfc_A_test')
self.pitch_B_test = tf.placeholder(tf.float32, shape=self.pitch_shape,
name='pitch_B_test')
self.mfc_B_test = tf.placeholder(tf.float32, shape=self.mfc_shape,
name='mfc_B_test')
# Place holder for lambda_cycle and lambda_identity
self.lambda_pitch = tf.placeholder(tf.float32, None,
name='lambda_cycle_pitch')
self.lambda_mfc = tf.placeholder(tf.float32, None,
name='lambda_cycle_mfc')
self.lambda_momenta = tf.placeholder(tf.float32, None,
name='lambda_momenta')
'''
Generator A
'''
# Generate pitch from A to B
self.momenta_generation_A2B = self.sampler(input_pitch=self.pitch_A_real,
input_mfc=self.mfc_A_real, reuse=False, scope_name='sampler_A2B')
self.pitch_generation_A2B = self.lddmm(x=self.pitch_A_real,
p=self.momenta_generation_A2B, kernel=self.kernel, reuse=True, scope_name='lddmm')
self.mfc_generation_A2B = self.generator(input_pitch=self.pitch_generation_A2B,
input_mfc=self.mfc_A_real, reuse=False, scope_name='generator_A2B')
'''
Generator B
'''
# Generate pitch from B to A
self.momenta_generation_B2A = self.sampler(input_pitch=self.pitch_B_real,
input_mfc=self.mfc_B_real, reuse=False, scope_name='sampler_B2A')
self.pitch_generation_B2A = self.lddmm(x=self.pitch_B_real,
p=self.momenta_generation_B2A, kernel=self.kernel, reuse=True, scope_name='lddmm')
self.mfc_generation_B2A = self.generator(input_pitch=self.pitch_generation_B2A,
input_mfc=self.mfc_B_real, reuse=False, scope_name='generator_B2A')
'''
Initialize the discriminators
'''
# Discriminator initialized to keep parameters in memory
self.discrimination_B_fake = self.discriminator(input_mfc=tf.concat([self.mfc_A_real,
self.mfc_generation_A2B], axis=1), input_pitch=tf.concat([self.pitch_A_real,
self.pitch_generation_A2B], axis=1), reuse=False, scope_name='discriminator_A')
self.discrimination_A_fake = self.discriminator(input_mfc=tf.concat([self.mfc_B_real,
self.mfc_generation_B2A], axis=1), input_pitch=tf.concat([self.pitch_B_real,
self.pitch_generation_B2A], axis=1), reuse=False, scope_name='discriminator_B')
'''
Computing loss for generators
'''
self.momenta_loss_A2B = l1_loss(self.momenta_A2B_real, self.momenta_generation_A2B)
self.momenta_loss_B2A = l1_loss(self.momenta_B2A_real, self.momenta_generation_B2A)
self.pitch_loss_A2B = l1_loss(self.pitch_B_real, self.pitch_generation_A2B)
self.pitch_loss_B2A = l1_loss(self.pitch_A_real, self.pitch_generation_B2A)
self.mfc_loss_A2B = l1_loss(self.mfc_B_real, self.mfc_generation_A2B)
self.mfc_loss_B2A = l1_loss(self.mfc_A_real, self.mfc_generation_B2A)
# Merge the loss for generators A2B
self.loss_A2B \
= self.lambda_pitch*self.pitch_loss_A2B + self.lambda_mfc*self.mfc_loss_A2B \
+ self.lambda_momenta * self.momenta_loss_A2B
self.loss_B2A \
= self.lambda_pitch*self.pitch_loss_B2A + self.lambda_mfc*self.mfc_loss_B2A \
+ self.lambda_momenta * self.momenta_loss_B2A
self.generator_loss = self.loss_A2B + self.loss_B2A
# Compute the discriminator probability for pair of inputs
self.discrimination_input_A_real_B_fake \
= self.discriminator(input_mfc=tf.concat([self.mfc_A_real, self.mfc_B_fake], axis=1),
input_pitch=tf.concat([self.pitch_A_real, self.pitch_B_fake], axis=1),
reuse=True, scope_name='discriminator_A')
self.discrimination_input_A_fake_B_real \
= self.discriminator(input_mfc=tf.concat([self.mfc_A_fake, self.mfc_B_real], axis=1),
input_pitch=tf.concat([self.pitch_A_fake, self.pitch_B_real], axis=1),
reuse=True, scope_name='discriminator_A')
self.discrimination_input_B_real_A_fake \
= self.discriminator(input_mfc=tf.concat([self.mfc_B_real, self.mfc_A_fake], axis=1),
input_pitch=tf.concat([self.pitch_B_real, self.pitch_A_fake], axis=1),
reuse=True, scope_name='discriminator_B')
self.discrimination_input_B_fake_A_real \
= self.discriminator(input_mfc=tf.concat([self.mfc_B_fake, self.mfc_A_real], axis=1),
input_pitch=tf.concat([self.pitch_B_fake, self.pitch_A_real], axis=1),
reuse=True, scope_name='discriminator_B')
# Compute discriminator loss for backprop
self.discriminator_loss_input_A_real \
= l1_loss(y=tf.zeros_like(self.discrimination_input_A_real_B_fake),
y_hat=self.discrimination_input_A_real_B_fake)
self.discriminator_loss_input_A_fake \
= l1_loss(y=tf.ones_like(self.discrimination_input_A_fake_B_real),
y_hat=self.discrimination_input_A_fake_B_real)
self.discriminator_loss_A = (self.discriminator_loss_input_A_real \
+ self.discriminator_loss_input_A_fake) / 2.0
self.discriminator_loss_input_B_real \
= l1_loss(y=tf.zeros_like(self.discrimination_input_B_real_A_fake),
y_hat=self.discrimination_input_B_real_A_fake)
self.discriminator_loss_input_B_fake \
= l1_loss(y=tf.ones_like(self.discrimination_input_B_fake_A_real),
y_hat=self.discrimination_input_B_fake_A_real)
self.discriminator_loss_B = (self.discriminator_loss_input_B_real \
+ self.discriminator_loss_input_B_fake) / 2.0
# Merge the two discriminators into one
self.discriminator_loss = (self.discriminator_loss_A + self.discriminator_loss_B) / 2.0
# Categorize variables to optimize the two sets separately
trainable_variables = tf.trainable_variables()
self.discriminator_vars = [var for var in trainable_variables if 'discriminator' in var.name]
self.generator_vars = [var for var in trainable_variables if 'generator' in var.name \
or 'sampler' in var.name]
# Reserved for test
self.momenta_A2B_test = self.sampler(input_pitch=self.pitch_A_test,
input_mfc=self.mfc_A_test, reuse=True, scope_name='sampler_A2B')
self.pitch_A2B_test = self.lddmm(x=self.pitch_A_test,
p=self.momenta_A2B_test, kernel=self.kernel, reuse=True, scope_name='lddmm')
self.mfc_A2B_test = self.generator(input_pitch=self.pitch_A2B_test,
input_mfc=self.mfc_A_test, reuse=True, scope_name='generator_A2B')
self.momenta_B2A_test = self.sampler(input_pitch=self.pitch_B_test,
input_mfc=self.mfc_B_test, reuse=True, scope_name='sampler_B2A')
self.pitch_B2A_test = self.lddmm(x=self.pitch_B_test,
p=self.momenta_B2A_test, kernel=self.kernel, reuse=True, scope_name='lddmm')
self.mfc_B2A_test = self.generator(input_pitch=self.pitch_B2A_test,
input_mfc=self.mfc_B_test, reuse=True, scope_name='generator_B2A')
def optimizer_initializer(self):
self.generator_learning_rate = tf.placeholder(tf.float32, None,
name='generator_learning_rate')
self.discriminator_learning_rate = tf.placeholder(tf.float32, None,
name='discriminator_learning_rate')
self.discriminator_optimizer \
= tf.train.AdamOptimizer(learning_rate=self.discriminator_learning_rate,
beta1=0.5).minimize(self.discriminator_loss,
var_list=self.discriminator_vars)
self.generator_optimizer \
= tf.train.AdamOptimizer(learning_rate=self.generator_learning_rate,
beta1=0.5).minimize(self.generator_loss, var_list=self.generator_vars)
def train(self, pitch_A, mfc_A, momenta_A2B, pitch_B, mfc_B,
momenta_B2A, lambda_pitch, lambda_mfc, lambda_momenta,
generator_learning_rate):
generated_momenta_B, generated_pitch_B, generated_mfc_B, \
momenta_loss_A2B, pitch_loss_A2B, mfc_loss_A2B, generated_momenta_A, \
generated_pitch_A, generated_mfc_A, momenta_loss_B2A, pitch_loss_B2A, \
mfc_loss_B2A, _ \
= self.sess.run([self.momenta_generation_A2B, self.pitch_generation_A2B,
self.mfc_generation_A2B, self.momenta_loss_A2B, self.pitch_loss_A2B,
self.mfc_loss_A2B, self.momenta_generation_B2A, self.pitch_generation_B2A,
self.mfc_generation_B2A, self.momenta_loss_B2A, self.pitch_loss_B2A,
self.mfc_loss_B2A, self.generator_optimizer],
feed_dict = {self.lambda_pitch:lambda_pitch,
self.lambda_mfc:lambda_mfc, self.lambda_momenta:lambda_momenta,
self.pitch_A_real:pitch_A, self.mfc_A_real:mfc_A,
self.momenta_A2B_real:momenta_A2B,
self.momenta_B2A_real:momenta_B2A,
self.pitch_B_real:pitch_B, self.mfc_B_real:mfc_B,
self.generator_learning_rate:generator_learning_rate})
# self.writer.add_summary(generator_summaries, self.train_step)
# self.writer.add_summary(discriminator_summaries, self.train_step)
self.train_step += 1
return momenta_loss_A2B, momenta_loss_B2A, pitch_loss_A2B, pitch_loss_B2A, \
mfc_loss_A2B, mfc_loss_B2A, generated_momenta_A, generated_pitch_A, \
generated_mfc_A, generated_momenta_B, generated_pitch_B, generated_mfc_B
def test(self, mfc_A, pitch_A, mfc_B, pitch_B):
gen_mom_B, gen_pitch_B, gen_mfc_B, = self.sess.run([self.momenta_A2B_test, \
self.pitch_A2B_test, self.mfc_A2B_test], \
feed_dict={self.pitch_A_test:pitch_A, \
self.mfc_A_test:mfc_A})
gen_mom_A, gen_pitch_A, gen_mfc_A = self.sess.run([self.momenta_B2A_test, \
self.pitch_B2A_test, self.mfc_B2A_test], \
feed_dict={self.pitch_B_test:pitch_B, \
self.mfc_B_test:mfc_B})
return gen_pitch_A, gen_mfc_A, gen_pitch_B, gen_mfc_B, gen_mom_A, gen_mom_B
def test_compact(self, input_pitch, input_mfc, direction):
if direction == 'A2B':
generated_pitch, generated_mfc = self.sess.run([self.pitch_A2B_test,
self.mfc_A2B_test], feed_dict = {self.pitch_A_test:input_pitch,
self.mfc_A_test:input_mfc})
elif direction == 'B2A':
generated_pitch, generated_mfc = self.sess.run([self.pitch_B2A_test,
self.mfc_B2A_test], feed_dict = {self.pitch_B_test:input_pitch,
self.mfc_B_test:input_mfc})
else:
raise Exception('Conversion direction must be specified.')
return generated_pitch, generated_mfc
def save(self, directory, filename):
if not os.path.exists(directory):
os.makedirs(directory)
self.saver.save(self.sess, \
os.path.join(directory, filename))
def load(self, filepath):
self.saver.restore(self.sess, filepath)
def summary(self):
with tf.name_scope('generator_summaries'):
cycle_loss_summary = tf.summary.scalar('cycle_loss', \
self.cycle_loss)
identity_loss_summary = tf.summary.scalar('identity_loss', \
self.identity_loss)
generator_loss_A2B_summary = tf.summary.scalar('generator_loss_A2B', \
self.generator_loss_A2B)
generator_loss_B2A_summary = tf.summary.scalar('generator_loss_B2A', \
self.generator_loss_B2A)
generator_loss_summary = tf.summary.scalar('generator_loss', \
self.generator_loss)
generator_summaries = tf.summary.merge([cycle_loss_summary, \
identity_loss_summary, \
generator_loss_A2B_summary, \
generator_loss_B2A_summary, \
generator_loss_summary])
with tf.name_scope('discriminator_summaries'):
discriminator_loss_A_summary \
= tf.summary.scalar('discriminator_loss_A', \
self.discriminator_loss_A)
discriminator_loss_B_summary \
= tf.summary.scalar('discriminator_loss_B', \
self.discriminator_loss_B)
discriminator_loss_summary \
= tf.summary.scalar('discriminator_loss', \
self.discriminator_loss)
discriminator_summaries \
= tf.summary.merge([discriminator_loss_A_summary, \
discriminator_loss_B_summary, \
discriminator_loss_summary])
return generator_summaries, discriminator_summaries
if __name__ == '__main__':
model = VariationalCycleGAN(num_features = 23)
print('Graph Compile Successful.')
|
the-stack_106_15315
|
import logging
import re
import sys
from collections import namedtuple
import click
from aws_sso_lib.sso import list_available_roles, login
from .utils import configure_logging, get_instance, GetInstanceError, Printer
LOGGER = logging.getLogger(__name__)
HEADER_FIELDS = {
"id": "Account ID",
"name": "Account name",
"role": "Role name"
}
@click.command()
@click.option("--sso-start-url", "-u", metavar="URL", help="Your AWS SSO start URL")
@click.option("--sso-region", metavar="REGION", help="The AWS region your AWS SSO instance is deployed in")
@click.option("--account-id", "-a", "account_values", metavar="ACCOUNT_ID", multiple=True, default=[], help="List roles for a specific account ID, can be specified multiple times")
@click.option("--account", "account_values", multiple=True, hidden=True)
@click.option("--role-name", "-r", "role_name_patterns", metavar="REGEX", multiple=True, default=[], help="Filter roles by a regular expression, can be specified multiple times")
@click.option("--separator", "--sep", metavar="SEP", help="Field separator for output")
@click.option("--header/--no-header", default=True, help="Include or supress the header row")
@click.option("--sort-by", type=click.Choice(["id,role", "name,role", "role,id", "role,name"]), default=None, help="Specify how the output is sorted")
@click.option("--force-refresh", is_flag=True, help="Re-login")
@click.option("--verbose", "-v", count=True)
def roles(
sso_start_url,
sso_region,
account_values,
role_name_patterns,
separator,
header,
sort_by,
force_refresh,
verbose):
"""List your available accounts and roles.
--sso-start-url and --sso-region are not needed if a single value can be found for them in your ~/.aws/config
or in the environment variables AWS_DEFAULT_SSO_START_URL and AWS_DEFAULT_SSO_REGION.
You can filter the list by providing account IDs and role name patterns.
"""
configure_logging(LOGGER, verbose)
if not account_values:
account_ids = None
account_filter = lambda id, name: True
elif all(re.match(r"^\d{12}$", a) for a in account_values):
account_ids = account_values
account_filter = lambda id, name: True
else:
account_ids = None
def account_filter(id, name):
for value in account_values:
if id.startswith(value) or id.endswith(value) or re.search(value, name):
return True
return False
if sort_by:
sort_by_keys = sort_by.split(",")
elif not separator:
sort_by_keys = ("name", "role")
else:
sort_by_keys = None
if not sort_by_keys:
header_field_keys = ("name", "id", "role")
elif sort_by_keys[0] == "id":
header_field_keys = ("id", "name", "role")
elif sort_by_keys[0] == "name":
header_field_keys = ("name", "id", "role")
elif sort_by_keys[1] == "id":
header_field_keys = ("role", "id", "name")
else:
header_field_keys = ("role", "name", "id")
header_fields = [HEADER_FIELDS[k] for k in header_field_keys]
Row = namedtuple("Row", header_field_keys)
if sort_by_keys:
sort_key = lambda v: tuple(getattr(v, key) for key in sort_by_keys)
else:
sort_key = None
try:
instance = get_instance(
sso_start_url,
sso_region,
)
except GetInstanceError as e:
LOGGER.fatal(str(e))
sys.exit(1)
login(instance.start_url, instance.region, force_refresh=force_refresh)
printer = Printer(
separator=separator,
default_separator=" ",
sort_key=sort_key,
header_fields=header_fields,
disable_header=not header,
skip_repeated_values=False,
)
printer.print_header_before()
for account_id, account_name, role_name in list_available_roles(instance.start_url, instance.region, account_id=account_ids):
if not account_filter(account_id, account_name):
continue
if role_name_patterns:
for pattern in role_name_patterns:
if re.search(pattern, role_name):
break
else:
continue
printer.add_row(Row(id=account_id, name=account_name, role=role_name))
printer.print_after()
if __name__ == "__main__":
roles(prog_name="python -m aws_sso_util.roles") #pylint: disable=unexpected-keyword-arg,no-value-for-parameter
|
the-stack_106_15316
|
""" test partial slicing on Series/Frame """
import pytest
from datetime import datetime, date
import numpy as np
import pandas as pd
import operator as op
from pandas import (DatetimeIndex, Series, DataFrame,
date_range, Index, Timedelta, Timestamp)
from pandas.util import testing as tm
class TestSlicing(object):
def test_slice_year(self):
dti = DatetimeIndex(freq='B', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
result = s['2005']
expected = s[s.index.year == 2005]
tm.assert_series_equal(result, expected)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
result = df.loc['2005']
expected = df[df.index.year == 2005]
tm.assert_frame_equal(result, expected)
rng = date_range('1/1/2000', '1/1/2010')
result = rng.get_loc('2009')
expected = slice(3288, 3653)
assert result == expected
def test_slice_quarter(self):
dti = DatetimeIndex(freq='D', start=datetime(2000, 6, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
assert len(s['2001Q1']) == 90
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
assert len(df.loc['1Q01']) == 90
def test_slice_month(self):
dti = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
assert len(s['2005-11']) == 30
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
assert len(df.loc['2005-11']) == 30
tm.assert_series_equal(s['2005-11'], s['11-2005'])
def test_partial_slice(self):
rng = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-05':'2006-02']
expected = s['20050501':'20060228']
tm.assert_series_equal(result, expected)
result = s['2005-05':]
expected = s['20050501':]
tm.assert_series_equal(result, expected)
result = s[:'2006-02']
expected = s[:'20060228']
tm.assert_series_equal(result, expected)
result = s['2005-1-1']
assert result == s.iloc[0]
pytest.raises(Exception, s.__getitem__, '2004-12-31')
def test_partial_slice_daily(self):
rng = DatetimeIndex(freq='H', start=datetime(2005, 1, 31), periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-31']
tm.assert_series_equal(result, s.iloc[:24])
pytest.raises(Exception, s.__getitem__, '2004-12-31 00')
def test_partial_slice_hourly(self):
rng = DatetimeIndex(freq='T', start=datetime(2005, 1, 1, 20, 0, 0),
periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-1']
tm.assert_series_equal(result, s.iloc[:60 * 4])
result = s['2005-1-1 20']
tm.assert_series_equal(result, s.iloc[:60])
assert s['2005-1-1 20:00'] == s.iloc[0]
pytest.raises(Exception, s.__getitem__, '2004-12-31 00:15')
def test_partial_slice_minutely(self):
rng = DatetimeIndex(freq='S', start=datetime(2005, 1, 1, 23, 59, 0),
periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-1 23:59']
tm.assert_series_equal(result, s.iloc[:60])
result = s['2005-1-1']
tm.assert_series_equal(result, s.iloc[:60])
assert s[Timestamp('2005-1-1 23:59:00')] == s.iloc[0]
pytest.raises(Exception, s.__getitem__, '2004-12-31 00:00:00')
def test_partial_slice_second_precision(self):
rng = DatetimeIndex(start=datetime(2005, 1, 1, 0, 0, 59,
microsecond=999990),
periods=20, freq='US')
s = Series(np.arange(20), rng)
tm.assert_series_equal(s['2005-1-1 00:00'], s.iloc[:10])
tm.assert_series_equal(s['2005-1-1 00:00:59'], s.iloc[:10])
tm.assert_series_equal(s['2005-1-1 00:01'], s.iloc[10:])
tm.assert_series_equal(s['2005-1-1 00:01:00'], s.iloc[10:])
assert s[Timestamp('2005-1-1 00:00:59.999990')] == s.iloc[0]
tm.assert_raises_regex(KeyError, '2005-1-1 00:00:00',
lambda: s['2005-1-1 00:00:00'])
def test_partial_slicing_dataframe(self):
# GH14856
# Test various combinations of string slicing resolution vs.
# index resolution
# - If string resolution is less precise than index resolution,
# string is considered a slice
# - If string resolution is equal to or more precise than index
# resolution, string is considered an exact match
formats = ['%Y', '%Y-%m', '%Y-%m-%d', '%Y-%m-%d %H',
'%Y-%m-%d %H:%M', '%Y-%m-%d %H:%M:%S']
resolutions = ['year', 'month', 'day', 'hour', 'minute', 'second']
for rnum, resolution in enumerate(resolutions[2:], 2):
# we check only 'day', 'hour', 'minute' and 'second'
unit = Timedelta("1 " + resolution)
middate = datetime(2012, 1, 1, 0, 0, 0)
index = DatetimeIndex([middate - unit,
middate, middate + unit])
values = [1, 2, 3]
df = DataFrame({'a': values}, index, dtype=np.int64)
assert df.index.resolution == resolution
# Timestamp with the same resolution as index
# Should be exact match for Series (return scalar)
# and raise KeyError for Frame
for timestamp, expected in zip(index, values):
ts_string = timestamp.strftime(formats[rnum])
# make ts_string as precise as index
result = df['a'][ts_string]
assert isinstance(result, np.int64)
assert result == expected
pytest.raises(KeyError, df.__getitem__, ts_string)
# Timestamp with resolution less precise than index
for fmt in formats[:rnum]:
for element, theslice in [[0, slice(None, 1)],
[1, slice(1, None)]]:
ts_string = index[element].strftime(fmt)
# Series should return slice
result = df['a'][ts_string]
expected = df['a'][theslice]
tm.assert_series_equal(result, expected)
# Frame should return slice as well
result = df[ts_string]
expected = df[theslice]
tm.assert_frame_equal(result, expected)
# Timestamp with resolution more precise than index
# Compatible with existing key
# Should return scalar for Series
# and raise KeyError for Frame
for fmt in formats[rnum + 1:]:
ts_string = index[1].strftime(fmt)
result = df['a'][ts_string]
assert isinstance(result, np.int64)
assert result == 2
pytest.raises(KeyError, df.__getitem__, ts_string)
# Not compatible with existing key
# Should raise KeyError
for fmt, res in list(zip(formats, resolutions))[rnum + 1:]:
ts = index[1] + Timedelta("1 " + res)
ts_string = ts.strftime(fmt)
pytest.raises(KeyError, df['a'].__getitem__, ts_string)
pytest.raises(KeyError, df.__getitem__, ts_string)
def test_partial_slicing_with_multiindex(self):
# GH 4758
# partial string indexing with a multi-index buggy
df = DataFrame({'ACCOUNT': ["ACCT1", "ACCT1", "ACCT1", "ACCT2"],
'TICKER': ["ABC", "MNP", "XYZ", "XYZ"],
'val': [1, 2, 3, 4]},
index=date_range("2013-06-19 09:30:00",
periods=4, freq='5T'))
df_multi = df.set_index(['ACCOUNT', 'TICKER'], append=True)
expected = DataFrame([
[1]
], index=Index(['ABC'], name='TICKER'), columns=['val'])
result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1')]
tm.assert_frame_equal(result, expected)
expected = df_multi.loc[
(pd.Timestamp('2013-06-19 09:30:00', tz=None), 'ACCT1', 'ABC')]
result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1', 'ABC')]
tm.assert_series_equal(result, expected)
# this is a KeyError as we don't do partial string selection on
# multi-levels
def f():
df_multi.loc[('2013-06-19', 'ACCT1', 'ABC')]
pytest.raises(KeyError, f)
# GH 4294
# partial slice on a series mi
s = pd.DataFrame(np.random.rand(1000, 1000), index=pd.date_range(
'2000-1-1', periods=1000)).stack()
s2 = s[:-1].copy()
expected = s2['2000-1-4']
result = s2[pd.Timestamp('2000-1-4')]
tm.assert_series_equal(result, expected)
result = s[pd.Timestamp('2000-1-4')]
expected = s['2000-1-4']
tm.assert_series_equal(result, expected)
df2 = pd.DataFrame(s)
expected = df2.xs('2000-1-4')
result = df2.loc[pd.Timestamp('2000-1-4')]
tm.assert_frame_equal(result, expected)
def test_partial_slice_doesnt_require_monotonicity(self):
# For historical reasons.
s = pd.Series(np.arange(10), pd.date_range('2014-01-01', periods=10))
nonmonotonic = s[[3, 5, 4]]
expected = nonmonotonic.iloc[:0]
timestamp = pd.Timestamp('2014-01-10')
tm.assert_series_equal(nonmonotonic['2014-01-10':], expected)
tm.assert_raises_regex(KeyError,
r"Timestamp\('2014-01-10 00:00:00'\)",
lambda: nonmonotonic[timestamp:])
tm.assert_series_equal(nonmonotonic.loc['2014-01-10':], expected)
tm.assert_raises_regex(KeyError,
r"Timestamp\('2014-01-10 00:00:00'\)",
lambda: nonmonotonic.loc[timestamp:])
def test_loc_datetime_length_one(self):
# GH16071
df = pd.DataFrame(columns=['1'],
index=pd.date_range('2016-10-01T00:00:00',
'2016-10-01T23:59:59'))
result = df.loc[datetime(2016, 10, 1):]
tm.assert_frame_equal(result, df)
result = df.loc['2016-10-01T00:00:00':]
tm.assert_frame_equal(result, df)
@pytest.mark.parametrize('datetimelike', [
Timestamp('20130101'), datetime(2013, 1, 1),
date(2013, 1, 1), np.datetime64('2013-01-01T00:00', 'ns')])
@pytest.mark.parametrize('op,expected', [
(op.lt, [True, False, False, False]),
(op.le, [True, True, False, False]),
(op.eq, [False, True, False, False]),
(op.gt, [False, False, False, True])])
def test_selection_by_datetimelike(self, datetimelike, op, expected):
# GH issue #17965, test for ability to compare datetime64[ns] columns
# to datetimelike
df = DataFrame({'A': [pd.Timestamp('20120101'),
pd.Timestamp('20130101'),
np.nan, pd.Timestamp('20130103')]})
result = op(df.A, datetimelike)
expected = Series(expected, name='A')
tm.assert_series_equal(result, expected)
|
the-stack_106_15317
|
import uuid
import pytest
from procrastinate import exceptions, jobs, manager
from .. import conftest
pytestmark = pytest.mark.asyncio
async def test_manager_defer_job(job_manager, job_factory, connector):
job = await job_manager.defer_job_async(
job=job_factory(
task_kwargs={"a": "b"}, queue="marsupilami", task_name="bla", lock="sher"
)
)
assert job.id == 1
assert connector.jobs == {
1: {
"args": {"a": "b"},
"attempts": 0,
"id": 1,
"lock": "sher",
"queueing_lock": None,
"queue_name": "marsupilami",
"scheduled_at": None,
"status": "todo",
"task_name": "bla",
}
}
async def test_manager_defer_job_no_lock(job_manager, job_factory, connector):
await job_manager.defer_job_async(job=job_factory())
assert uuid.UUID(connector.jobs[1]["lock"])
async def test_manager_defer_job_connector_exception(
mocker, job_manager, job_factory, connector
):
connector.execute_query_one_async = mocker.Mock(
side_effect=exceptions.ConnectorException
)
with pytest.raises(exceptions.ConnectorException):
await job_manager.defer_job_async(job=job_factory(task_kwargs={"a": "b"}))
async def test_manager_defer_job_unique_violation_exception(
mocker, job_manager, job_factory, connector
):
connector.execute_query_one_async = mocker.Mock(
side_effect=exceptions.UniqueViolation(
constraint_name="procrastinate_jobs_queueing_lock_idx"
)
)
with pytest.raises(exceptions.AlreadyEnqueued):
await job_manager.defer_job_async(job=job_factory(task_kwargs={"a": "b"}))
async def test_manager_defer_job_unique_violation_exception_other_constraint(
mocker, job_manager, job_factory, connector
):
connector.execute_query_one_async = mocker.Mock(
side_effect=exceptions.UniqueViolation(constraint_name="some_other_constraint")
)
with pytest.raises(exceptions.ConnectorException):
await job_manager.defer_job_async(job=job_factory(task_kwargs={"a": "b"}))
async def test_fetch_job_no_suitable_job(job_manager):
assert await job_manager.fetch_job(queues=None) is None
async def test_fetch_job(job_manager, job_factory):
job = job_factory(id=None)
await job_manager.defer_job_async(job=job)
expected_job = job.evolve(id=1, status="doing")
assert await job_manager.fetch_job(queues=None) == expected_job
async def test_get_stalled_jobs_not_stalled(job_manager, job_factory):
job = job_factory(id=1)
await job_manager.defer_job_async(job=job)
assert await job_manager.get_stalled_jobs(nb_seconds=1000) == []
async def test_get_stalled_jobs_stalled(job_manager, job_factory, connector):
job = job_factory()
await job_manager.defer_job_async(job=job)
await job_manager.fetch_job(queues=None)
connector.events[1][-1]["at"] = conftest.aware_datetime(2000, 1, 1)
expected_job = job.evolve(id=1, status="doing")
assert await job_manager.get_stalled_jobs(nb_seconds=1000) == [expected_job]
@pytest.mark.parametrize(
"include_error, statuses",
[(False, ("succeeded",)), (True, ("succeeded", "failed"))],
)
async def test_delete_old_jobs(
job_manager, job_factory, connector, include_error, statuses, mocker
):
await job_manager.delete_old_jobs(
nb_hours=5, queue="marsupilami", include_error=include_error
)
assert connector.queries == [
(
"delete_old_jobs",
{"nb_hours": 5, "queue": "marsupilami", "statuses": statuses},
)
]
async def test_finish_job(job_manager, job_factory, connector):
job = job_factory(id=1)
await job_manager.defer_job_async(job=job)
await job_manager.finish_job(
job=job, status=jobs.Status.SUCCEEDED, delete_job=False
)
assert connector.queries[-1] == (
"finish_job",
{"job_id": 1, "status": "succeeded", "delete_job": False},
)
async def test_finish_job_with_deletion(job_manager, job_factory, connector):
job = job_factory(id=1)
await job_manager.defer_job_async(job=job)
await job_manager.finish_job(job=job, status=jobs.Status.SUCCEEDED, delete_job=True)
assert connector.queries[-1] == (
"finish_job",
{"job_id": 1, "status": "succeeded", "delete_job": True},
)
assert 1 not in connector.jobs
async def test_retry_job(job_manager, job_factory, connector):
job = job_factory(id=1)
await job_manager.defer_job_async(job=job)
retry_at = conftest.aware_datetime(2000, 1, 1)
await job_manager.retry_job(job=job, retry_at=retry_at)
assert connector.queries[-1] == (
"retry_job",
{"job_id": 1, "retry_at": retry_at},
)
@pytest.mark.parametrize(
"queues, channels",
[
(None, ["procrastinate_any_queue"]),
(["a", "b"], ["procrastinate_queue#a", "procrastinate_queue#b"]),
],
)
async def test_listen_for_jobs(job_manager, connector, mocker, queues, channels):
event = mocker.Mock()
await job_manager.listen_for_jobs(queues=queues, event=event)
assert connector.notify_event is event
assert connector.notify_channels == channels
@pytest.fixture
def configure(app):
@app.task
def foo(timestamp):
pass
return foo.configure
async def test_defer_periodic_job(configure):
deferrer = configure(task_kwargs={"timestamp": 1234567890})
result = await deferrer.job_manager.defer_periodic_job(
job=deferrer.job,
periodic_id="",
defer_timestamp=1234567890,
)
assert result == 1
async def test_defer_periodic_job_with_suffixes(configure):
deferrer = configure(task_kwargs={"timestamp": 1234567890})
result = [
await deferrer.job_manager.defer_periodic_job(
job=deferrer.job,
periodic_id="1",
defer_timestamp=1234567890,
),
await deferrer.job_manager.defer_periodic_job(
job=deferrer.job,
periodic_id="2",
defer_timestamp=1234567890,
),
]
assert result == [1, 2]
async def test_defer_periodic_job_unique_violation(configure):
deferrer1 = configure(
queueing_lock="bla",
task_kwargs={"timestamp": 1234567890},
)
deferrer2 = configure(
queueing_lock="bla",
task_kwargs={"timestamp": 1234567891},
)
await deferrer1.job_manager.defer_periodic_job(
job=deferrer1.job,
periodic_id="",
defer_timestamp=1234567890,
)
with pytest.raises(exceptions.AlreadyEnqueued):
await deferrer2.job_manager.defer_periodic_job(
job=deferrer2.job,
periodic_id="",
defer_timestamp=1234567891,
)
async def test_defer_periodic_job_wrong_timestamp(configure):
deferrer = configure(
queueing_lock="bla",
task_kwargs={"timestamp": 1000000000},
)
with pytest.raises(exceptions.InvalidTimestamp):
await deferrer.job_manager.defer_periodic_job(
job=deferrer.job,
periodic_id="",
defer_timestamp=1234567890,
)
def test_raise_already_enqueued_right_constraint(job_manager):
class UniqueViolation(Exception):
constraint_name = manager.QUEUEING_LOCK_CONSTRAINT
with pytest.raises(exceptions.AlreadyEnqueued) as exc_info:
job_manager._raise_already_enqueued(exc=UniqueViolation(), queueing_lock="foo")
assert "queueing lock foo" in str(exc_info.value)
def test_raise_already_enqueued_wrong_constraint(job_manager):
class UniqueViolation(Exception):
constraint_name = "foo"
with pytest.raises(UniqueViolation):
job_manager._raise_already_enqueued(exc=UniqueViolation(), queueing_lock="foo")
|
the-stack_106_15318
|
import json
import operator
import re
import sys
import time
from datetime import datetime
import pymongo
sys.path.append('../')
from application.Connections import Connection
from application.utils import general
def getTopics(keywords):
if keywords != [""]:
with Connection.Instance().get_cursor() as cur:
sql = (
"SELECT topic_id "
"FROM topics "
"WHERE is_publish = %s"
)
cur.execute(sql, [True])
var = cur.fetchall()
published_topics = []
if var is not None:
published_topics = [i[0] for i in var]
print(published_topics)
keywords_in_dictionary = [re.compile(key, re.IGNORECASE) for key in keywords]
topic_list = {}
for topic_id in Connection.Instance().hashtags.collection_names():
if topic_id != "counters" and int(topic_id) in published_topics:
try:
count = list(Connection.Instance().hashtags[str(topic_id)].aggregate([
{'$unwind': "$month"},
{'$project': {
'hashtag': '$month.hashtag', 'count': '$month.count'
}
},
{
'$match': {
'hashtag': {'$in': keywords_in_dictionary}
}
},
{
'$group':
{
'_id': {},
'count': {'$sum': "$count"}
}
}]))[0]['count']
topic_list[str(topic_id)] = count
except:
pass
topics = topic_list.keys()
topics = [int(topic_id) for topic_id in topics]
print(sorted(topic_list.items(), key=operator.itemgetter(1), reverse=True))
if len(topics) == 0:
topics = [-1]
sql = (
"SELECT topic_id, topic_name, topic_description "
"FROM topics "
"WHERE is_publish = %s and topic_id IN %s"
)
cur.execute(sql, [True, tuple(topics)])
var = cur.fetchall()
topics = {str(i[0]): {'topic_name': i[1], 'description': i[2]} for i in var}
result = []
for topic in sorted(topic_list.items(), key=operator.itemgetter(1), reverse=True):
if str(topic[0]) in topics:
temp = topics[str(topic[0])]
temp['topic_id'] = int(topic[0])
result.append(temp)
return json.dumps({'topics': result})
else:
with Connection.Instance().get_cursor() as cur:
sql = (
"SELECT topic_id, topic_name, topic_description "
"FROM topics "
"WHERE is_publish = %s "
"ORDER BY topic_id"
)
cur.execute(sql, [True])
var = cur.fetchall()
topics = [{'topic_id': i[0], 'topic_name': i[1], 'description': i[2]} for i in var]
return json.dumps({'topics': topics})
def getNewsFeeds(date, cursor, forbidden_domain, topics):
if topics == [""]:
return json.dumps({})
dates = ['yesterday', 'week', 'month']
result = {}
if date not in dates:
result['Error'] = 'invalid date'
return json.dumps(result)
# feeds = list(Connection.Instance().filteredNewsPoolDB[themeid].find({'name': date}, {date: 1}))
# feeds = list(feeds[0][date][cursor:cursor+20])
# date = general_utils.determine_date(date)
news = []
for topic_id in topics:
# if len(news) >= cursor + 20:
# break
feeds = list(Connection.Instance().filteredNewsPoolDB[str(topic_id)].find({'name': date}, {date: 1}))
if len(feeds) > 0:
news = news + feeds[0][date]
news = news[cursor:cursor + 20]
cursor = int(cursor) + 20
if cursor >= 60:
cursor = 0
result['next_cursor'] = cursor
result['next_cursor_str'] = str(cursor)
result['news'] = news
return json.dumps(result)
def getAudiences(topic_id):
if topic_id is None:
return json.dumps({})
audiences = list(Connection.Instance().infDB[str(topic_id)].find({}, {'_id': 0, 'screen_name': 1, 'location': 1,
'name': 1, 'profile_image_url': 1, 'lang': 1,
'summary': 1, 'full_text': 1,
'time_zone': 1}).sort(
[('rank', pymongo.ASCENDING)]))
return json.dumps({'audiences': audiences})
def getNews(news_ids, keywords, languages, cities, countries, user_location, user_language, cursor, since, until,
domains, topics):
cursor = int(cursor)
if news_ids == [""] and keywords == [""] and since == "" and until == "" and \
languages == [""] and cities == [""] and countries == [""] and user_location == [""] \
and user_language == [""] and domains == [""]:
return json.dumps({'news': [], 'next_cursor': 0, 'next_cursor_str': "0"})
aggregate_dictionary = []
find_dictionary = {}
date_dictionary = {}
if news_ids != [""]:
news_ids_in_dictionary = [int(one_id) for one_id in news_ids]
find_dictionary['link_id'] = {'$in': news_ids_in_dictionary}
if keywords != [""]:
keywords_in_dictionary = [re.compile(key, re.IGNORECASE) for key in keywords]
find_dictionary['$or'] = [{'title': {'$in': keywords_in_dictionary}},
{'summary': {'$in': keywords_in_dictionary}},
{'full_text': {'$in': keywords_in_dictionary}}]
if domains != [""]:
domains_in_dictionary = [re.compile(key, re.IGNORECASE) for key in domains]
find_dictionary['domain'] = {'$nin': domains_in_dictionary}
if languages != [""]:
language_dictionary = [lang for lang in languages]
find_dictionary['language'] = {'$in': language_dictionary}
if cities != [""]:
city_dictionary = [re.compile(city, re.IGNORECASE) for city in cities]
find_dictionary['location.cities'] = {'$in': city_dictionary}
aggregate_dictionary.append({'$unwind': '$location.cities'})
if countries != [""]:
country_dictionary = [re.compile(country, re.IGNORECASE) for country in countries]
find_dictionary['location.countries'] = {'$in': country_dictionary}
aggregate_dictionary.append({'$unwind': '$location.countries'})
if user_location != [""]:
user_location_dictionary = [re.compile(city, re.IGNORECASE) for city in user_location]
find_dictionary['mentions.location'] = {'$in': user_location_dictionary}
aggregate_dictionary.append({'$unwind': '$mentions'})
if user_language != [""]:
user_language_dictionary = [re.compile(country, re.IGNORECASE) for country in user_language]
find_dictionary['mentions.language'] = {'$in': user_language_dictionary}
aggregate_dictionary.append({'$unwind': '$mentions'})
if since != "":
try:
since_in_dictionary = datetime.strptime(since, "%d-%m-%Y")
date_dictionary['$gte'] = since_in_dictionary
except ValueError:
return json.dumps({'error': "please, enter a valid since day. DAY-MONTH-YEAR"})
if until != "":
try:
until_in_dictionary = datetime.strptime(until, "%d-%m-%Y")
date_dictionary['$lte'] = until_in_dictionary
except ValueError:
return json.dumps({'error': "please, enter a valid since day. DAY-MONTH-YEAR"})
if date_dictionary != {}:
find_dictionary['published_at'] = date_dictionary
aggregate_dictionary.append({'$match': find_dictionary})
if user_language == [""] and user_location == [""]:
aggregate_dictionary.append({'$project': {'mentions': 0}})
aggregate_dictionary.append({'$project': {'_id': 0, 'bookmark': 0, 'bookmark_date': 0, 'location': 0}})
aggregate_dictionary.append({'$sort': {'link_id': -1}})
print(aggregate_dictionary)
topics_filter = []
if topics != [""]:
topics_filter = [int(one_id) for one_id in topics]
news = []
for alertid in Connection.Instance().newsPoolDB.collection_names():
if alertid != "counters":
if len(news) >= cursor + 20:
break
if topics_filter == []:
news = news + list(
Connection.Instance().newsPoolDB[str(alertid)].aggregate(aggregate_dictionary, allowDiskUse=True))
else:
if int(alertid) in topics_filter:
news = news + list(Connection.Instance().newsPoolDB[str(alertid)].aggregate(aggregate_dictionary,
allowDiskUse=True))
next_cursor = cursor + 20
if len(news) < cursor + 20:
next_cursor = 0
result = {
'next_cursor': next_cursor,
'next_cursor_str': str(next_cursor),
'news': news[cursor:cursor + 20]
}
return json.dumps(result, default=general.date_formatter)
def getHastags(topic_id, date):
if topic_id is None:
return json.dumps({})
hashtag_list = list(
Connection.Instance().hashtags[str(topic_id)].find({'name': date}, {'_id': 0, 'modified_date': 0}))
hashtags = []
if len(hashtag_list) > 0:
with Connection.Instance().get_cursor() as cur:
sql = (
"SELECT ARRAY_AGG(hashtag) FROM topic_hashtag WHERE topic_id = %s ;"
)
cur.execute(sql, [topic_id])
var = cur.fetchone()
tags = var[0] if var[0] is not None else []
hashtags = [item for item in hashtag_list[0][date] if item['hashtag'] not in tags]
return json.dumps({'hashtags': hashtags})
def getEvents(topic_id, filterField, cursor):
now = time.time()
cursor = int(cursor)
print(now)
ret = []
if filterField == 'interested':
ret = Connection.Instance().events[str(topic_id)].aggregate([
{'$match': {'end_time': {'$gte': now}}},
{'$project': {'_id': 0}},
{'$sort': {'interested': -1}},
{'$skip': int(cursor)},
{'$limit': 10}
])
elif filterField == 'date':
ret = Connection.Instance().events[str(topic_id)].aggregate([
{'$match': {'end_time': {'$gte': now}}},
{'$project': {'_id': 0}},
{'$sort': {'start_time': 1}},
{'$skip': int(cursor)},
{'$limit': 10}
])
ret = list(ret)
temp = {'events': ret}
return temp
def getConversations(topic_id, timeFilter, paging):
curser = Connection.Instance().conversations[str(topic_id)].find({"time_filter": timeFilter},
{"posts": {"$slice": [int(paging), 10]}, "_id": 0})
for document in curser:
docs = []
for submission in document["posts"]:
if not submission["numberOfComments"]:
continue
comments = []
for comment in submission["comments"]:
comment["relative_indent"] = 0
if submission['source'] == 'reddit':
comment["created_time"] = comment["created_time"]
else:
comment["created_time"] = comment["created_time"][:10] + " " + comment["created_time"][11:18]
comments.append(comment)
temp = {"title": submission["title"], "source": submission["source"], "comments": comments,
"url": submission["url"], "commentNumber": submission["numberOfComments"],
'subreddit': submission['subreddit'], 'created_time': submission['created_time']}
if "post_text" in submission:
temp["post_text"] = submission["post_text"]
else:
temp["post_text"] = ""
docs.append(temp)
prev = 0
for values in docs:
for current in values["comments"]:
current["relative_indent"] = current["indent_number"] - prev
prev = current["indent_number"]
return docs
|
the-stack_106_15319
|
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
from keras.models import Sequential
from keras.layers.convolutional import ZeroPadding2D,Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.core import Lambda,Flatten,Dense,Dropout
from keras.utils.data_utils import get_file
import numpy as np
import utils
import json
def vgg_preprocess(x):
"""
Mean value of RGB value
"""
vgg_mean = np.array([123.68, 116.779, 103.939], dtype = np.float32).reshape(3,1,1) # 3 is number of channels
x = x - vgg_mean
return x[:, ::-1] # reverse axis rgb -> bgr
'''
Re-creation of vgg16 imagenet model.
For documentation refer, vgg16.py
'''
class Vgg16Shabeer():
def __init__(self):
self.FILE_PATH = 'http://files.fast.ai/models/'
self.get_classes()
self.create()
def get_classes(self):
fname = 'imagenet_class_index.json'
fpath = get_file(fname, self.FILE_PATH+fname, cache_subdir = 'models/', cache_dir = utils.get_keras_cache_dir())
with open(fpath) as f:
class_dict = json.load(f)
#class_dict looks like {"0": ["n01440764", "tench"],
# "1": ["n01443537", "goldfish"],
# ....}
self.classes = [class_dict[str(i)][1] for i in range(len(class_dict))]
def get_batches(self, path, batch_size, gen = ImageDataGenerator(), class_mode = 'categorical', shuffle = True):
return gen.flow_from_directory(path, target_size = (224,224), batch_size = batch_size, class_mode = class_mode, shuffle = shuffle)
def ConvBlock(self, layers, filters):
model = self.model
for i in range(layers):
model.add(ZeroPadding2D(padding=(1,1)))
model.add(Conv2D(filters, kernel_size = (3,3), activation = 'relu'))
model.add(MaxPooling2D(pool_size = (2,2), strides = (2,2)))
def FCBlock(self):
model = self.model
model.add(Dense(4096, activation ='relu'))
model.add(Dropout(rate = 0.5))
def create(self):
model = self.model = Sequential()
model.add(Lambda(function = vgg_preprocess, input_shape=(3,224,224), output_shape = (3, 224, 224)))
self.ConvBlock(2,64)
self.ConvBlock(2,128)
self.ConvBlock(3,256)
self.ConvBlock(3,512)
self.ConvBlock(3,512)
model.add(Flatten())
self.FCBlock()
self.FCBlock()
model.add(Dense(units = 1000, activation = 'softmax'))
fname = 'vgg16.h5'
model.load_weights(get_file(fname, origin = self.FILE_PATH+fname , cache_subdir='models', cache_dir = utils.get_keras_cache_dir()))
def compile(self, lr = 0.001):
self.model.compile(optimizer = Adam(lr = lr), loss = 'categorical_crossentropy', metrics = ['accuracy'])
def ft(self,num):
model = self.model
# Remove last layer, which has 1000 output
model.pop()
for layer in model.layers:
layer.trainable = False
# Add a dense layer with number of outputs matching input parameter - num
model.add(Dense(num, activation = 'softmax'))
# now compile the model, to apply the changes done.
self.compile()
def finetune(self, batches):
self.ft(batches.num_class)
classes = list(iter(batches.class_indices))
# batches.class_indices is a dict with the class name as key and an index as value
# eg. {'cats': 0, 'dogs': 1}
# sort
for c in batches.class_indices:
classes[batches.class_indices[c]] = c
self.classes = classes
def fit_data(self, trn, labels, val, val_labels, nb_epoch =1, batch_size=64):
self.model.fit(x=trn, y= labels, validation_data = (val,val_labels), epochs = nb_epoch, batch_size = batch_size)
def fit(self, batches, val_batches, nb_epoch = 1):
self.model.fit_generator(batches, steps_per_epoch = batches.samples, epochs = nb_epoch, validation_data = val_batches, validation_steps = val_batches.samples)
def predict(self, imgs, details = False):
# predict probability of each class
all_predictions = self.model.predict(imgs)
# get index of highest probability
idxs = np.argmax(all_predictions, axis=1)
# get values of highest probability
preds = [all_predictions[i, idxs[i]] for i in range(len(idxs))]
# get class label corresponding to highest probability
classes = [self.classes[idx] for idx in idxs]
return np.array(all_predictions), idxs, classes
def test(self, path, batch_size = 8):
test_batches = get_batches(path, batch_size = batch_size, shuffle=False, class_mode = None)
return test_batches, self.model.predict_generator(test_batches, steps = test_batches.samples)
|
the-stack_106_15320
|
## -*- coding: utf-8 -*-
from joern.all import JoernSteps
from igraph import *
from .general_op import *
import pickle
from py2neo.packages.httpstream import http
http.socket_timeout = 9999
def get_all_use_bydefnode(db, node_id):
query_str = "g.v(%d).in('USE')" % node_id
results = db.runGremlinQuery(query_str)
list_re = []
for re in results:
if re.properties['type'] == 'Statement':
continue
else:
list_re.append(re)
return list_re
def get_all_def_bydefnode(db, node_id):
query_str = "g.v(%d).in('DEF')" % node_id
results = db.runGremlinQuery(query_str)
list_re = []
for re in results:
if re.properties['type'] == 'Statement':
continue
else:
list_re.append(re)
return list_re
def get_exprstmt_node(db):
query_expr_str = "queryNodeIndex('type:ExpressionStatement')"
#results = db.runGremlinQuery(query_expr_str)
results_1 = db.runGremlinQuery(query_expr_str)
query_iddecl_str = 'queryNodeIndex("type:IdentifierDeclStatement")'
results_2 = db.runGremlinQuery(query_iddecl_str)
results = results_1 + results_2
return results
def get_pointers_node(db):
list_pointers_node = []
query_iddecl_str = 'queryNodeIndex("type:IdentifierDeclStatement")'
results = db.runGremlinQuery(query_iddecl_str)
if results != []:
for re in results:
code = re.properties['code']
if code.find(' = ') != -1:
code = code.split(' = ')[0]
if code.find('*') != -1:
list_pointers_node.append(re)
query_param_str = 'queryNodeIndex("type:Parameter")'
results = db.runGremlinQuery(query_param_str)
if results != []:
for re in results:
code = re.properties['code']
if code.find(' = ') != -1:
code = code.split(' = ')[0]
if code.find('*') != -1:
list_pointers_node.append(re)
return list_pointers_node
def get_arrays_node(db):
list_arrays_node = []
query_iddecl_str = "queryNodeIndex('type:IdentifierDeclStatement')"
results = db.runGremlinQuery(query_iddecl_str)
if results != []:
for re in results:
code = re.properties['code']
if code.find(' = ') != -1:
code = code.split(' = ')[0]
if code.find(' [ ') != -1:
list_arrays_node.append(re)
query_param_str = "queryNodeIndex('type:Parameter')"
results = db.runGremlinQuery(query_param_str)
if results != []:
for re in results:
code = re.properties['code']
if code.find(' = ') != -1:
code = code.split(' = ')[0]
if code.find(' [ ') != -1:
list_arrays_node.append(re)
return list_arrays_node
def get_def_node(db, cfg_node_id):
query_str = "g.v(%d).out('DEF')" % cfg_node_id
results = db.runGremlinQuery(query_str)
return results
def getFunctionNodeByName(db, funcname):
query_str = "queryNodeIndex('type:Function AND name:%s')" % funcname
results = db.runGremlinQuery(query_str)
return results
def get_parameter_by_funcid(db, func_id):
query_str = "g.v(%d).out('IS_FUNCTION_OF_CFG').out('CONTROLS').filter{ it.type == 'Parameter' }.id" % func_id
results = db.runGremlinQuery(query_str)
return results
def isNodeExist(g, nodeName):
if not g.vs:
return False
else:
return nodeName in g.vs['name']
def getALLFuncNode(db):
query_str = "queryNodeIndex('type:Function')"
results = db.runGremlinQuery(query_str)
return results
def getFuncNode(db, func_name):
query_str = 'getFunctionsByName("' + func_name + '")'
func_node = db.runGremlinQuery(query_str)
return func_node
def getFuncFile(db, func_id):
query_str = "g.v(%d).in('IS_FILE_OF').filepath" % func_id
ret = db.runGremlinQuery(query_str)
print(ret)
return ret[0]
def getCFGNodes(db, func_id):
query_str = 'queryNodeIndex("functionId:%s AND isCFGNode:True")' % func_id
cfgNodes = db.runGremlinQuery(query_str)
return cfgNodes
def getDDGEdges(db, func_id):
query_str = """queryNodeIndex('functionId:%s AND isCFGNode:True').outE('REACHES')""" % (func_id)
ddgEdges = db.runGremlinQuery(query_str)
return ddgEdges
def getCDGEdges(db, func_id):
query_str = """queryNodeIndex('functionId:%s AND isCFGNode:True').outE('CONTROLS')""" % (func_id)
cdgEdges = db.runGremlinQuery(query_str)
return cdgEdges
def getCFGEdges(db, func_id):
query_str = """queryNodeIndex('functionId:%s AND isCFGNode:True').outE('FLOWS_TO')""" % (func_id)
cfgEdges = db.runGremlinQuery(query_str)
return cfgEdges
def drawGraph(db, edges, func_entry_node, graph_type):
g = Graph(directed=True)
func_id = func_entry_node._id
filepath = getFuncFile(db, func_id)
for edge in edges:
if edge.start_node.properties['code'] == 'ENTRY':
startNode = str(edge.start_node.properties['functionId'])
else:
startNode = str(edge.start_node._id)
if edge.start_node.properties['code'] == 'ERROR':
continue
if isNodeExist(g, startNode) == False:
if edge.start_node.properties['code'] == 'ENTRY':
node_prop = {'code': func_entry_node.properties['name'], 'type': func_entry_node.properties['type'],
'location': func_entry_node.properties['location'], 'filepath':filepath, 'functionId':str(edge.start_node.properties['functionId'])}
else:
node_prop = {'code': edge.start_node.properties['code'], 'type': edge.start_node.properties['type'],
'location': edge.start_node.properties['location'], 'filepath':filepath, 'functionId':str(edge.start_node.properties['functionId'])}
g.add_vertex(startNode, **node_prop)#id is 'name'
endNode = str(edge.end_node._id)
if isNodeExist(g, endNode) == False:
if graph_type == 'pdg' and edge.end_node.properties['code'] == 'EXIT':
continue
if edge.end_node.properties['code'] == 'ERROR':
continue
node_prop = {'code': edge.end_node.properties['code'], 'type': edge.end_node.properties['type'],
'location': edge.end_node.properties['location'], 'filepath':filepath, 'functionId':str(edge.end_node.properties['functionId'])}
g.add_vertex(endNode, **node_prop)
if graph_type == 'pdg':
edge_prop = {'var': edge.properties['var']}
else:
edge_prop = {'var': edge.properties['flowLabel']}
g.add_edge(startNode, endNode, **edge_prop)
return g
def translatePDGByNode(db, func_node):
func_id = func_node._id
ddgEdges = getDDGEdges(db, func_id)
cdgEdges = getCDGEdges(db, func_id)
Edges = ddgEdges + cdgEdges
graph_type = 'pdg'
g = drawGraph(db, Edges, func_node, graph_type)
return g
def translateCFGByNode(db, func_node):
func_id = func_node._id
Edges = getCFGEdges(db, func_id)
graph_type = 'cfg'
g = drawGraph(db, Edges, func_node, graph_type)
return g
def getUSENodesVar(db, func_id):
query = "g.v(%s).out('USE').code" % func_id
ret = db.runGremlinQuery(query)
if ret == []:
return False
else:
return ret
def getDEFNodesVar(db, func_id):
query = "g.v(%s).out('DEF').code" % func_id
ret = db.runGremlinQuery(query)
if ret == []:
return False
else:
return ret
def getUseDefVarByPDG(db, pdg):
dict_cfg2use = {}
dict_cfg2def = {}
#print pdg
#need_to_addedge_node = []
for node in pdg.vs:
if node['type'] == 'Function':
continue
func_id = node['name']
use_node = getUSENodesVar(db, func_id)
def_node = getDEFNodesVar(db, func_id)
if node['type'] == 'Statement':
if def_node == False:
code = node['code'].replace('\n', ' ')
if code.find(" = ") != -1:
value = code.split(" = ")[0].strip().split(' ')
if value[-1] == ']':
newvalue = code.split(" [ ")[0].strip().split(' ')
if '->' in newvalue:
a_index = newvalue.index('->')
n_value = ' '.join([newvalue[a_index-1], '->', newvalue[a_index+1]])
newvalue[a_index-1] = n_value
del newvalue[a_index]
del newvalue[a_index]
def_node = newvalue
else:
if '->' in value:
a_index = value.index('->')
n_value = ' '.join([value[a_index-1], '->', value[a_index+1]])
ob_value = value[a_index-1]
value[a_index-1] = n_value
del value[a_index]
del value[a_index]
value.append(ob_value.replace('*', ''))
def_node = value
#need_to_addedge_node.append(node['name'])
if use_node == False:
if code.find(" = ") != -1:
value = code.split(" = ")[1].strip().split(' ')
newvalue = []
for v in value:
if v == '*' or v == '+' or v == '-' or v == '->' or v == '(' or v == ')' or v == '[' or v == ']' or v == '&' or v == '.' or v == '::' or v == ';' or v == ',':
continue
else:
newvalue.append(v.strip())
else:
value = code.split(' ')
newvalue = []
for v in value:
if v == '*' or v == '+' or v == '-' or v == '->' or v == '(' or v == ')' or v == '[' or v == ']' or v == '&' or v == '.' or v == '::' or v == ';' or v == ',':
continue
else:
newvalue.append(v.strip())
use_node = newvalue
if use_node:
use_node = [code.replace('*', '').replace('&', '').strip() for code in use_node]
if def_node:
def_node = [code.replace('*', '').replace('&', '').strip() for code in def_node]
else:#add define node
new_def_node = getReturnVarOfAPI(node['code'])#get modify value of api_func
if node['name'] == '2078':
print("new_def_node", new_def_node)
if new_def_node:
def_node = []
for code in new_def_node:
new_code = code.replace('*', '').replace('&', '').strip()
def_node.append(new_code)
if new_code not in use_node:
use_node.append(new_code)
if use_node:
dict_cfg2use[node['name']] = use_node
if def_node:
dict_cfg2def[node['name']] = def_node
return dict_cfg2use, dict_cfg2def
def getFuncNodeByFile(db, filenodeID):
query_str = 'g.v(%d).out("IS_FILE_OF")' % filenodeID
results = db.runGremlinQuery(query_str)
_list = []
for re in results:
if re.properties['type'] == 'Function':
_list.append(re)
else:
continue
return _list
def getAllFuncfileByTestID(db, testID):
testID = '*/' + testID + '/*'
query_str = "queryNodeIndex('type:File AND filepath:%s').id" % testID
results = db.runGremlinQuery(query_str)
return results
def get_calls_id(db, func_name):
query_str = 'getCallsTo("%s").id' % func_name
results = db.runGremlinQuery(query_str)
return results
def getCFGNodeByCallee(db, node_ast_id):
#print "start"
query_str = "g.v(%s).in('IS_AST_PARENT')" % node_ast_id
results = db.runGremlinQuery(query_str)
#print "end"
if results == []:
return None
for node in results:
if 'isCFGNode' in node.properties and node.properties['isCFGNode'] == 'True':
return node
else:
node = getCFGNodeByCallee(db, node._id)
return node
def getCalleeNode(db, func_id):
query_str = "queryNodeIndex('type:Callee AND functionId:%d')" % func_id
results = db.runGremlinQuery(query_str)
return results
def get_all_calls_node(db, testID):
list_all_funcID = [node._id for node in getFuncNodeInTestID(db, testID)]
print("list_all_funcID", list_all_funcID)
print("lenth", len(list_all_funcID))
if len(list_all_funcID)>130:
print(">100")
return False
list_all_callee_node = []
for func_id in list_all_funcID:#allfile in a testID
list_all_callee_node += getCalleeNode(db, func_id)
if list_all_callee_node == []:
return False
else:
return [(str(node._id), node.properties['code'], str(node.properties['functionId'])) for node in list_all_callee_node]
def getFuncNodeInTestID(db, testID):
list_all_file_id = getAllFuncfileByTestID(db, testID)
list_all_func_node = []
if list_all_file_id == []:
return False
for file_id in list_all_file_id:
list_func_node = getFuncNodeByFile(db, file_id)
list_all_func_node += list_func_node
return list_all_func_node
def getClassByObjectAndFuncID(db, objectname, func_id):
#print objectname, func_id
all_cfg_node = getCFGNodes(db, func_id)
for cfg_node in all_cfg_node:
if cfg_node.properties['code'] == objectname and cfg_node.properties['type'] == 'Statement':
print(objectname, func_id, cfg_node.properties['code'], cfg_node._id)
query_str_1 = "queryNodeIndex('type:Statement AND code:%s AND functionId:%s')" % (objectname, func_id)
results_1 = db.runGremlinQuery(query_str_1)
if results_1 == []:
return False
else:
ob_cfgNode = results_1[0]
location_row = ob_cfgNode.properties['location'].split(':')[0]
query_str_2 = "queryNodeIndex('type:ExpressionStatement AND functionId:%s')" % func_id
results_2 = db.runGremlinQuery(query_str_2)
if results_2 == []:
return False
classname = False
for node in results_2:
print(node.properties['location'].split(':')[0], location_row)
if node.properties['location'].split(':')[0] == location_row:
classname = node.properties['code']
break
else:
continue
return classname
elif cfg_node.properties['code'].find(' '+objectname+' = new') != -1:
temp_value = cfg_node.properties['code'].split(' '+objectname+' = new')[1].replace('*', '').strip()
if temp_value.split(' ')[0] != 'const':
classname = temp_value.split(' ')[0]
else:
classname = temp_value.split(' ')[1]
return classname
def getDeleteNode(db, func_id):
query_str = "queryNodeIndex('code:delete AND functionId:%d')" % func_id
results = db.runGremlinQuery(query_str)
return results
def get_all_delete_node(db, testID):
list_all_funcID = [node._id for node in getFuncNodeInTestID(db, testID)]
print("list_all_funcID", list_all_funcID)
list_all_delete_node = []
for func_id in list_all_funcID:#allfile in a testID
list_all_delete_node += getDeleteNode(db, func_id)
if list_all_delete_node == []:
return False
else:
return list_all_delete_node
def getDeclNode(db, func_id):
query_str = "queryNodeIndex('type:IdentifierDeclStatement AND functionId:%d')" % func_id
results = db.runGremlinQuery(query_str)
return results
def get_all_iddecl_node(db, testID):
list_all_funcID = [node._id for node in getFuncNodeInTestID(db, testID)]
print("list_all_funcID", list_all_funcID)
list_all_decl_node = []
for func_id in list_all_funcID:#allfile in a testID
list_all_decl_node += getDeclNode(db, func_id)
if list_all_decl_node == []:
return False
else:
return list_all_decl_node
def getCallGraph(db, testID):
list_all_func_node = getFuncNodeInTestID(db, testID)
#print "list_all_func_node", list_all_func_node
if list_all_func_node == [] or list_all_func_node == False:
return False
call_g = Graph(directed=True)
for func_node in list_all_func_node:
# print(func_node)
prop = {'funcname':func_node.properties['name'], 'type': func_node.properties['type'], 'filepath': func_node.properties['filepath']}
call_g.add_vertex(str(func_node._id), **prop)
list_all_callee = get_all_calls_node(db, testID)#we must limit result in testID, it already get callee node
#print '3 ', list_all_callee
if list_all_callee == False:
return False
for func_node in list_all_func_node:
function_name = func_node.properties['name']
#print "function_name", function_name
tag = False
if function_name.find('::') != -1:#if is a function in class, have two problems
func_name = function_name.split('::')[-1].strip()
classname = function_name.split('::')[0].strip()
if func_name == classname:#is a class::class, is a statementnode or a iddeclnode
print(1)
list_callee_id = []
list_delete_node = get_all_delete_node(db, testID)
if list_delete_node == False:
continue
for node in list_delete_node:
functionID = node.properties["functionId"]
all_cfg_node = getCFGNodes(db, functionID)
delete_loc = node.properties['location'].split(':')[0]
for cfg_node in all_cfg_node:
if cfg_node.properties['location'] != None and cfg_node.properties['location'].split(':')[0] == delete_loc and cfg_node.properties['code'] != 'delete' and cfg_node.properties['code'] != '[' and cfg_node.properties['code'] != '[':
objectname = cfg_node.properties['code']
ob_classname = getClassByObjectAndFuncID(db, objectname, functionID)
pdg = getFuncPDGByfuncIDAndtestID(functionID, testID)
if pdg == False:
continue
if ob_classname == classname:
for p_n in pdg.vs:
#print p_n['name'], str(node._id), str(cfg_node._id)
if p_n['name'] == str(node._id):
list_s = p_n.predecessors()
for edge in pdg.es:
if pdg.vs[edge.tuple[0]] in list_s and pdg.vs[edge.tuple[1]] == p_n and edge['var'] == objectname:
#print (functionID, str(pdg.vs[edge.tuple[0]]['name']))
list_callee_id.append((str(functionID), str(pdg.vs[edge.tuple[0]]['name'])))
else:
continue
elif p_n['name'] == str(cfg_node._id):
list_s = p_n.predecessors()
for edge in pdg.es:
if pdg.vs[edge.tuple[0]] in list_s and pdg.vs[edge.tuple[1]] == p_n and edge['var'] == objectname:
list_callee_id.append((functionID, str(pdg.vs[edge.tuple[0]]['name'])))
else:
continue
else:
continue
else:
continue
elif func_name.replace('~', '') == classname:#is a class::~class
list_callee_id = []
list_delete_node = get_all_delete_node(db, testID)
if list_delete_node == False:
continue
for node in list_delete_node:
functionID = node.properties["functionId"]
all_cfg_node = getCFGNodes(db, functionID)
delete_loc = node.properties['location'].split(':')[0]
for cfg_node in all_cfg_node:
if cfg_node.properties['location'] != None and cfg_node.properties['location'].split(':')[0] == delete_loc and cfg_node.properties['code'] != 'delete' and cfg_node.properties['code'] != '[' and cfg_node.properties['code'] != '[':
objectname = cfg_node.properties['code']
#print objectname
ob_classname = getClassByObjectAndFuncID(db, objectname, functionID)
if ob_classname == classname:
pdg = getFuncPDGByfuncIDAndtestID(functionID, testID)
if pdg == False:
continue
for p_n in pdg.vs:
if p_n['name'] == str(node._id):
list_callee_id.append((functionID, str(node._id)))
elif p_n['name'] == str(cfg_node._id):
list_callee_id.append((functionID, str(cfg_node._id))) #delete and its object node
else:
continue
else:
continue
else:
print(3)
tag = 'func'
list_callee_id = []
for _t in list_all_callee:#_t is a tuple, _t[0] is nodeid, 1 is funcname, 2 is func_id
if _t[1].find('-> '+ func_name) != -1:#maybe is a class->funcname()
objectname = _t[1].split(' -> '+ func_name)[0].strip()
ob_classname = getClassByObjectAndFuncID(db, objectname, _t[2])
if ob_classname == classname:
list_callee_id.append(_t[0])
else:
continue
else:
continue
else:
tag = 'func'
list_callee_id = []
for _t in list_all_callee:
if _t[1] == function_name:
list_callee_id.append(_t[0])
#print 4, list_callee_id
if list_callee_id == []:
continue
else:
#change ast node to cfgnode
list_callee_CFGNode = []
if tag == 'func':
#print 'z'
for node_id in list_callee_id:
#print 1
callee_cfgnode = getCFGNodeByCallee(db, node_id)
#print callee_cfgnode
#print 2
if callee_cfgnode == None:
print('ERROR', callee_cfgnode)
continue
else:
list_callee_CFGNode.append(callee_cfgnode)
#print 'x'
for node in list_callee_CFGNode:
startNode = str(node.properties['functionId'])
endNode = str(func_node._id)
var = str(node._id)
call_g = addDataEdge(call_g, startNode, endNode, var)#var is callee node id
else:
#print 'y'
for node in list_callee_id:
startNode = str(node[0])
endNode = str(func_node._id)
var = str(node[1])
call_g = addDataEdge(call_g, startNode, endNode, var)#var is callee node id
return call_g
def deliverCallGraph(working_directory):
j = JoernSteps()
j.connectToDatabase()
call_graph_path = working_directory + 'dict_call2cfgNodeID_funcID'
pdg_db_path = working_directory + 'pdg_db'
list_testID = os.listdir(pdg_db_path)
for testID in list_testID:
#if testID != '69055':
# continue
if os.path.exists(os.path.join(call_graph_path, str(testID))):
continue
call_g = getCallGraph(j, testID)
if call_g == False:
continue
_dict = {}
for edge in call_g.es:
endnode = call_g.vs[edge.tuple[1]]
if endnode['name'] not in _dict:
_dict[endnode['name']] = [(edge['var'], call_g.vs[edge.tuple[0]]['name'])]
else:
_dict[endnode['name']].append((edge['var'], call_g.vs[edge.tuple[0]]['name']))
if not os.path.exists(os.path.join(call_graph_path, str(testID))):
os.makedirs(os.path.join(call_graph_path, str(testID)))
filepath = os.path.join(call_graph_path, str(testID), "dict.pkl")
f = open(filepath, 'wb')
pickle.dump(_dict, f, True)
f.close()
|
the-stack_106_15321
|
import json
import argparse
import yaml
from classes.downloader import Downloader
dir = "../docs/api/properties"
Downloader.initDir(dir)
def properties_generator():
f = open("settings.yml", "r+")
settings = yaml.load(f)
loop_flg = True
page = 1
while loop_flg:
print("page\t"+str(page))
data = Downloader.main(settings, "properties" , page)
page += 1
if len(data) > 0:
for i in range(len(data)):
obj = data[i]
oid = str(obj["o:id"])
with open(dir+"/"+oid+".json", 'w') as outfile:
json.dump(obj, outfile, ensure_ascii=False,
indent=4, sort_keys=True, separators=(',', ': '))
else:
loop_flg = False
if __name__ == "__main__":
properties_generator()
|
the-stack_106_15322
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bigip_profile_analytics
short_description: Manage HTTP analytics profiles on a BIG-IP
description:
- Manage HTTP analytics profiles on a BIG-IP device.
version_added: "1.0.0"
options:
name:
description:
- Specifies the name of the profile.
type: str
required: True
parent:
description:
- Specifies the profile from which this profile inherits settings.
- When creating a new profile, if this parameter is not specified, the default
is the system-supplied C(analytics) profile.
type: str
description:
description:
- Description of the profile.
type: str
collect_geo:
description:
- Enables or disables the collection of the names of the countries
from where the traffic was sent.
type: bool
collect_ip:
description:
- Enables or disables the collection of client IPs statistics.
type: bool
collect_max_tps_and_throughput:
description:
- Enables or disables the collection of maximum TPS and throughput
for all collected entities.
type: bool
collect_page_load_time:
description:
- Enables or disables the collection of the page load time
statistics.
type: bool
collect_url:
description:
- Enables or disables the collection of requested URL statistics.
type: bool
collect_user_agent:
description:
- Enables or disables the collection of user agents.
type: bool
collect_user_sessions:
description:
- Enables or disables the collection of the unique user sessions.
type: bool
collected_stats_external_logging:
description:
- Enables or disables the external logging of the collected
statistics.
type: bool
collected_stats_internal_logging:
description:
- Enables or disables the internal logging of the collected
statistics.
type: bool
external_logging_publisher:
description:
- Specifies the external logging publisher used to send statistical
data to one or more destinations.
type: str
notification_by_syslog:
description:
- Enables or disables logging of the analytics alerts into the
Syslog.
type: bool
notification_by_email:
description:
- Enables or disables sending the analytics alerts by email.
type: bool
notification_email_addresses:
description:
- Specifies which email addresses receive alerts by email when
C(notification_by_email) is enabled.
type: list
elements: str
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
state:
description:
- When C(present), ensures the profile exists.
- When C(absent), ensures the profile is removed.
type: str
choices:
- present
- absent
default: present
extends_documentation_fragment: f5networks.f5_modules.f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create a profile
bigip_profile_analytics:
name: profile1
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
param1:
description: The new param1 value of the resource.
returned: changed
type: bool
sample: true
param2:
description: The new param2 value of the resource.
returned: changed
type: str
sample: Foo is bar
'''
from datetime import datetime
from ansible.module_utils.basic import (
AnsibleModule, env_fallback
)
from ..module_utils.bigip import F5RestClient
from ..module_utils.common import (
F5ModuleError, AnsibleF5Parameters, transform_name, f5_argument_spec, flatten_boolean, fq_name
)
from ..module_utils.compare import cmp_simple_list
from ..module_utils.icontrol import tmos_version
from ..module_utils.teem import send_teem
class Parameters(AnsibleF5Parameters):
api_map = {
'defaultsFrom': 'parent',
'collectGeo': 'collect_geo',
'collectIp': 'collect_ip',
'collectMaxTpsAndThroughput': 'collect_max_tps_and_throughput',
'collectPageLoadTime': 'collect_page_load_time',
'collectUrl': 'collect_url',
'collectUserAgent': 'collect_user_agent',
'collectUserSessions': 'collect_user_sessions',
'collectedStatsExternalLogging': 'collected_stats_external_logging',
'collectedStatsInternalLogging': 'collected_stats_internal_logging',
'externalLoggingPublisher': 'external_logging_publisher',
'notificationBySyslog': 'notification_by_syslog',
'notificationByEmail': 'notification_by_email',
'notificationEmailAddresses': 'notification_email_addresses'
}
api_attributes = [
'description',
'defaultsFrom',
'collectGeo',
'collectIp',
'collectMaxTpsAndThroughput',
'collectPageLoadTime',
'collectUrl',
'collectUserAgent',
'collectUserSessions',
'collectedStatsExternalLogging',
'collectedStatsInternalLogging',
'externalLoggingPublisher',
'notificationBySyslog',
'notificationByEmail',
'notificationEmailAddresses',
]
returnables = [
'collect_geo',
'collect_ip',
'collect_max_tps_and_throughput',
'collect_page_load_time',
'collect_url',
'collect_user_agent',
'collect_user_sessions',
'collected_stats_external_logging',
'collected_stats_internal_logging',
'description',
'external_logging_publisher',
'notification_by_syslog',
'notification_by_email',
'notification_email_addresses',
'parent',
]
updatables = [
'collect_geo',
'collect_ip',
'collect_max_tps_and_throughput',
'collect_page_load_time',
'collect_url',
'collect_user_agent',
'collect_user_sessions',
'collected_stats_external_logging',
'collected_stats_internal_logging',
'description',
'external_logging_publisher',
'notification_by_syslog',
'notification_by_email',
'notification_email_addresses',
'parent',
]
@property
def external_logging_publisher(self):
if self._values['external_logging_publisher'] is None:
return None
if self._values['external_logging_publisher'] in ['none', '']:
return ''
result = fq_name(self.partition, self._values['external_logging_publisher'])
return result
@property
def collect_geo(self):
return flatten_boolean(self._values['collect_geo'])
@property
def collect_ip(self):
return flatten_boolean(self._values['collect_ip'])
@property
def collect_max_tps_and_throughput(self):
return flatten_boolean(self._values['collect_max_tps_and_throughput'])
@property
def collect_page_load_time(self):
return flatten_boolean(self._values['collect_page_load_time'])
@property
def collect_url(self):
return flatten_boolean(self._values['collect_url'])
@property
def collect_user_agent(self):
return flatten_boolean(self._values['collect_user_agent'])
@property
def collect_user_sessions(self):
return flatten_boolean(self._values['collect_user_sessions'])
@property
def collected_stats_external_logging(self):
return flatten_boolean(self._values['collected_stats_external_logging'])
@property
def collected_stats_internal_logging(self):
return flatten_boolean(self._values['collected_stats_internal_logging'])
@property
def notification_by_syslog(self):
return flatten_boolean(self._values['notification_by_syslog'])
@property
def notification_by_email(self):
return flatten_boolean(self._values['notification_by_email'])
class ApiParameters(Parameters):
@property
def description(self):
if self._values['description'] in [None, 'none']:
return None
return self._values['description']
class ModuleParameters(Parameters):
@property
def description(self):
if self._values['description'] is None:
return None
elif self._values['description'] in ['none', '']:
return ''
return self._values['description']
@property
def parent(self):
if self._values['parent'] is None:
return None
result = fq_name(self.partition, self._values['parent'])
return result
@property
def notification_email_addresses(self):
if self._values['notification_email_addresses'] is None:
return None
elif (len(self._values['notification_email_addresses']) == 1 and
self._values['notification_email_addresses'][0] in ['', 'none']):
return []
return self._values['notification_email_addresses']
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def collect_geo(self):
if self._values['collect_geo'] is None:
return None
elif self._values['collect_geo'] == 'yes':
return 'enabled'
return 'disabled'
@property
def collect_ip(self):
if self._values['collect_ip'] is None:
return None
elif self._values['collect_ip'] == 'yes':
return 'enabled'
return 'disabled'
@property
def collect_max_tps_and_throughput(self):
if self._values['collect_max_tps_and_throughput'] is None:
return None
elif self._values['collect_max_tps_and_throughput'] == 'yes':
return 'enabled'
return 'disabled'
@property
def collect_page_load_time(self):
if self._values['collect_page_load_time'] is None:
return None
elif self._values['collect_page_load_time'] == 'yes':
return 'enabled'
return 'disabled'
@property
def collect_url(self):
if self._values['collect_url'] is None:
return None
elif self._values['collect_url'] == 'yes':
return 'enabled'
return 'disabled'
@property
def collect_user_agent(self):
if self._values['collect_user_agent'] is None:
return None
elif self._values['collect_user_agent'] == 'yes':
return 'enabled'
return 'disabled'
@property
def collect_user_sessions(self):
if self._values['collect_user_sessions'] is None:
return None
elif self._values['collect_user_sessions'] == 'yes':
return 'enabled'
return 'disabled'
@property
def collected_stats_external_logging(self):
if self._values['collected_stats_external_logging'] is None:
return None
elif self._values['collected_stats_external_logging'] == 'yes':
return 'enabled'
return 'disabled'
@property
def collected_stats_internal_logging(self):
if self._values['collected_stats_internal_logging'] is None:
return None
elif self._values['collected_stats_internal_logging'] == 'yes':
return 'enabled'
return 'disabled'
@property
def notification_by_syslog(self):
if self._values['notification_by_syslog'] is None:
return None
elif self._values['notification_by_syslog'] == 'yes':
return 'enabled'
return 'disabled'
@property
def notification_by_email(self):
if self._values['notification_by_email'] is None:
return None
elif self._values['notification_by_email'] == 'yes':
return 'enabled'
return 'disabled'
class ReportableChanges(Changes):
@property
def collect_geo(self):
return flatten_boolean(self._values['collect_geo'])
@property
def collect_ip(self):
return flatten_boolean(self._values['collect_ip'])
@property
def collect_max_tps_and_throughput(self):
return flatten_boolean(self._values['collect_max_tps_and_throughput'])
@property
def collect_page_load_time(self):
return flatten_boolean(self._values['collect_page_load_time'])
@property
def collect_url(self):
return flatten_boolean(self._values['collect_url'])
@property
def collect_user_agent(self):
return flatten_boolean(self._values['collect_user_agent'])
@property
def collect_user_sessions(self):
return flatten_boolean(self._values['collect_user_sessions'])
@property
def collected_stats_external_logging(self):
return flatten_boolean(self._values['collected_stats_external_logging'])
@property
def collected_stats_internal_logging(self):
return flatten_boolean(self._values['collected_stats_internal_logging'])
@property
def notification_by_syslog(self):
return flatten_boolean(self._values['notification_by_syslog'])
@property
def notification_by_email(self):
return flatten_boolean(self._values['notification_by_email'])
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def description(self):
if self.want.description is None:
return None
if self.have.description is None and self.want.description == '':
return None
if self.want.description != self.have.description:
return self.want.description
@property
def notification_email_addresses(self):
return cmp_simple_list(self.want.notification_email_addresses, self.have.notification_email_addresses)
@property
def external_logging_publisher(self):
if self.want.external_logging_publisher is None:
return None
if self.have.external_logging_publisher is None and self.want.external_logging_publisher == '':
return None
if self.want.external_logging_publisher != self.have.external_logging_publisher:
return self.want.external_logging_publisher
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def exec_module(self):
start = datetime.now().isoformat()
version = tmos_version(self.client)
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
send_teem(start, self.module, version)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/analytics/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
errors = [401, 403, 409, 500, 501, 502, 503, 504]
if resp.status in errors or 'code' in response and response['code'] in errors:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/analytics/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 409, 404]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/analytics/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 404]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/analytics/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/analytics/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent=dict(),
description=dict(),
collect_geo=dict(type='bool'),
collect_ip=dict(type='bool'),
collect_max_tps_and_throughput=dict(type='bool'),
collect_page_load_time=dict(type='bool'),
collect_url=dict(type='bool'),
collect_user_agent=dict(type='bool'),
collect_user_sessions=dict(type='bool'),
collected_stats_external_logging=dict(type='bool'),
collected_stats_internal_logging=dict(type='bool'),
external_logging_publisher=dict(),
notification_by_syslog=dict(type='bool'),
notification_by_email=dict(type='bool'),
notification_email_addresses=dict(
type='list',
elements='str',
),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
|
the-stack_106_15325
|
# encoding: utf-8
from opendatatools.common import RestAgent
from bs4 import BeautifulSoup
import pandas as pd
import json
class AnjukeAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
self.city_map = {}
def load_city(self):
url = 'https://www.anjuke.com/sy-city.html'
response = self.do_request(url)
if response is None:
return {}
soup = BeautifulSoup(response, "html5lib")
divs = soup.find_all('div')
for div in divs:
if div.has_attr('class') and 'letter_city' in div['class']:
links = div.find_all('a')
for link in links:
url = link['href']
city = link.text
self.city_map[city] = url
@staticmethod
def extract_word(text, start_tag, end_tag):
index1 = text.find(start_tag)
index2 = text.find(end_tag, index1)
return text[index1 + len(start_tag): index2]
def get_city_list(self):
if len(self.city_map) == 0:
self.load_city()
return self.city_map.keys()
def get_real_house_price(self, city):
if len(self.city_map) == 0:
self.load_city()
if city not in self.city_map:
return None, "城市输入错误"
url = self.city_map[city]
url_market = url + "/market/"
response = self.do_request(url_market)
if response is None:
return None, '获取数据失败'
content = AnjukeAgent.extract_word(response, 'drawChart(', ');')
xyear = json.loads('{' + AnjukeAgent.extract_word(content, 'xyear:{', '},') + '}')
ydata = json.loads(AnjukeAgent.extract_word(content, 'ydata:', '] '))
list_date = []
for month, year in xyear.items():
month = int(month.replace('月', ''))
year = int(year.replace('年', ''))
date = '%04d%02d' % (year, month)
list_date.append(date)
list_price = ydata[0]['data']
df = pd.DataFrame({'date': list_date, 'price': list_price})
df['city'] = city
return df, ''
|
the-stack_106_15326
|
from .abstract_test import AbstractTest
from ..problems import OptGapC3
from ..partitioning.hard_coded_partitioning import HardCodedPartitioning
from ..algorithms.ncflow.ncflow_edge_per_iter import NCFlowEdgePerIter as NcfEpi
# This test illustrates the optimality gap from relaxing condition C3
# That is, that all the demands be satisfiable in order for optimality to hold.
# Here, a flow has multiple bottlenecks in different meta-nodes and
# flows that share only one of those bottlenecks lose flow; leading to optimality gap.
class OptGapC3Test(AbstractTest):
def __init__(self):
super().__init__()
self.problem = OptGapC3()
@property
def name(self):
return 'optgapc3'
def run(self):
ncf = NcfEpi.new_max_flow(4)
hc = HardCodedPartitioning(partition_vector=[0, 0, 1, 2, 2, 3, 4])
ncf.solve(self.problem, hc)
self.assert_feasibility(ncf)
# this is a shame; the optimal solution here should be 8; we get 1.0
self.assert_geq_epsilon(ncf.obj_val, 1.0)
self.assert_leq_epsilon(ncf.obj_val, 8.0)
|
the-stack_106_15329
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from sklearn import preprocessing
import numpy
import pandas as pd
path_no_attacks_cleaned = "data/data_no_attack_cleaned.csv"
path_no_attacks_normalized = "data/data_no_attack_normalized.csv"
path_attacks_cleaned = "data/data_attack_cleaned.csv"
path_attacks_normalized = "data/data_attack_normalized.csv"
def to_csv(path,dataframe):
numpy.savetxt(path, dataframe, delimiter=",")
def read_df(path):
return pd.read_csv(path, low_memory=False)
def normalize_filtered_data(df):
min_max_scaler = preprocessing.MinMaxScaler()
df_norm = min_max_scaler.fit_transform(df)
return df_norm
if __name__ == '__main__':
df_no_attacks = read_df(path_no_attacks_cleaned)
df_attacks = read_df(path_attacks_cleaned)
df_norm_no_attacks = normalize_filtered_data(df_no_attacks)
df_norm_attacks = normalize_filtered_data(df_attacks)
to_csv(path_no_attacks_normalized,df_norm_no_attacks)
to_csv(path_attacks_normalized,df_norm_attacks)
|
the-stack_106_15330
|
#%%
from localsearch.local_search import LocalSearchAlgorithm, NeighborhoodRelation
from typing import Callable
import networkx as nx
import random
import math
class SimulatedAnnealing(LocalSearchAlgorithm):
def __init__(self, relation: NeighborhoodRelation, cost_fn: Callable):
super().__init__(relation, cost_fn)
def run(self,
initialInstance: nx.Graph,
max_iter: int,
cooling_schedule: Callable,
constant: int):
max_sol, max_cost = initialInstance, self.cost(initialInstance, self.p1_symbol, self.p2_symbol)
cur_sol, cur_cost = max_sol, max_cost
for i in range(max_iter):
neighborhood = self.relation.neighbors(cur_sol)
for solution in neighborhood:
if (c := self.cost(solution, self.p1_symbol, self.p2_symbol)) >= cur_cost:
cur_sol, cur_cost = solution, c
if cur_cost > max_cost:
max_sol, max_cost = cur_sol, cur_cost
else:
delta_cost = abs(c - cur_cost)
if random.random() < math.exp(-1 * (delta_cost / (cooling_schedule(i+1) * constant))):
cur_sol, cur_cost = solution, c
break
return max_sol
|
the-stack_106_15331
|
'''Melhore o jogo do DESAFIO 028
onde o computador vai "pensar" em um número
entre 0 a 10. Só que agora o jogador
vai tantar adivinhar até acartar.
mostrando no final quantos palpitas
foram necessários para vencer.'''
from random import randint
computador = randint(0,10)
print (' Olá, eu sou seu computador... \n irei pensar em um número de 0 até 10.')
print('=' *40)
print(' Será que você consegue adivinhar qual foi?')
acertou = False
palpites = 0
while not acertou:
jogador = int(input(' Qual o seu palpite? '))
palpites+=1
print('='*40)
if jogador == computador:
acertou = True
else:
if jogador < computador:
print ('Mais... tente novamente.')
elif jogador > computador:
print('Menos... jogue novamente.')
print(' Acertou com {} tentativas, parabéns!'.format(palpites))
|
the-stack_106_15332
|
import json
from tmdbv3api import Movie, TMDb
from datetime import date
from pathlib import Path
def update_tmdb_database():
"""
Update the trending movies from TMDb
"""
# Loading the configuration file:
conf = load_config()
tmdb = TMDb()
tmdb.api_key = conf['tmdb_api_key']
tmdb_poster_url = 'https://www.themoviedb.org/t/p/w400'
tmdb.language = conf['tmdb_language']
tmdb.debug = True
popular_today_file = f'app/tmdb/{date.today()}.json'
path = Path(popular_today_file)
if path.is_file():
with open(popular_today_file, 'r', encoding='utf-8') as pop_file:
popular_films = json.loads(pop_file.read())
else:
movie = Movie()
popular = movie.popular()
formatted_pop = {}
for p in popular:
formatted_pop.update({
p.id: {
'title': p.title,
'poster_path': tmdb_poster_url + p.poster_path,
}
})
with open(popular_today_file, 'w', encoding='utf-8') as pop_file:
pop_file.write(json.dumps(formatted_pop, indent=4))
popular_films = formatted_pop
return popular_films
def load_config():
"""
Load configuration file located in app/settings.json
"""
with open('app/settings.json', 'r') as conf_file:
return json.loads(conf_file.read())
|
the-stack_106_15333
|
class queue:
s=[]
def push(self):
a=input("Enter any number :")
queue.s.append(a)
def display(self):
l=len(queue.s)
for i in range(l):
print (queue.s[i])
def peek(self):
if (queue.s==[]):
print("Queue Empty")
else:
print("First Element :- ",queue.s[0])
a=queue()
c="y"
while(c=="y"):
print ("Enter 1. To PUSH ")
print ("Enter 2. To POP ")
print ("Enter 3. To PEEK ")
print ("Enter 4. To Display ")
print("________________________________________________________________")
choice=int(input("Enter Your Choice :- "))
if (choice==1):
a.push()
elif (choice==2):
if (a.s==[]):
print ("queue Empty")
else:
print ("Deleted element is : ",a.s.pop(0))
elif (choice==3):
a.peek()
elif (choice==4):
a.display()
else:
print("Wrong Input")
c=input("If You Wanna Continue Enter 'y' :- ")
if c!='y':
print("Bye")
quit
|
the-stack_106_15336
|
from PyQt5 import QtCore
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QLabel, QVBoxLayout
class NNodeContentBase(QLabel): # , Serializable):
def __init__(self, node, parent=None, input = None):
super().__init__()
self.node = node
self.node.value = None
self.setParent(parent)
layout = QVBoxLayout()
self.lbl = QLabel(self)
self.lbl.setAlignment(QtCore.Qt.AlignCenter)
self.lbl.setStyleSheet("border: 1px solid black;")
layout.addWidget(self.lbl)
self.setLayout(layout)
self.lbl_size = None
def update(self):
if not self.lbl_size:
self.lbl_size = [self.lbl.width(), self.lbl.height()]
if self.node.value != None:
pm = QPixmap.fromImage(self.node.value)
self.lbl.setPixmap(pm.scaled(self.lbl_size[0],self.lbl_size[1], QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation))
|
the-stack_106_15338
|
from werkzeug.utils import secure_filename
import os
import ntpath
from pathlib import Path
ALLOWED_EXTENSIONS = {'csv', 'json', 'sql', 'xml'}
UPLOAD_FOLDER = os.getcwd() + '/data'
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
class FileUpload():
"""
place all methods needed to upload and read data
"""
# upload a validated file
def dump_file(file):
# check if filename is present
if file.filename == '':
return 'No selected file'
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(UPLOAD_FOLDER, filename))
return file.filename
# read a known file
def read_file(filename):
data_folder = os.getcwd() + '/data'
file_to_open = os.path.join(data_folder, filename)
print(file_to_open)
f = open(file_to_open)
# return filename and the opened file
filename_extension = ntpath.basename(file_to_open).split(".")[1]
data = {
"filename": ntpath.basename(file_to_open),
"filename_extension": filename_extension,
"file": f
}
return data
|
the-stack_106_15339
|
import logging
import asyncio
import sys
import json
import pickle
from kademlia.network import Server
if len(sys.argv) != 5:
print("Usage: python set.py <bootstrap node> <bootstrap port> <key> <value>")
sys.exit(1)
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
log = logging.getLogger('kademlia')
log.addHandler(handler)
log.setLevel(logging.DEBUG)
lista = {"timestamp": 1647391451.1517725, "amount": 30, "sender": "a", "receiver": "b"}
nlista = pickle.dumps(lista)
async def run():
server = Server()
await server.listen(8469)
bootstrap_node = (('127.0.0.1', 100))
await server.bootstrap([bootstrap_node])
await server.set(lista.get('sender'), nlista)
server.stop()
asyncio.run(run())
|
the-stack_106_15344
|
# importing PIL -- useful in reading/writing/viewing images
from PIL import Image
# importing math -- useful in tan inverse, cos(theta) etc
import math
# importing numpy -- useful in matrix operations
import numpy as np
# importing numpy -- useful in matrix operations
import matplotlib.pyplot as plt
#importing sys library
import sys
# function to read image at a given path
def readImage(path):
return np.asarray(Image.open(path))
# convert np.array to Image.object
def getImage(img,mode1):
return Image.fromarray(img, mode = mode1 )
def getArray(img):
return np.asarray(img)
def saveImage(newimg,mode1,image_name):
newimg=Image.fromarray(newimg,mode=mode1)
newimg.save(image_name)
# function to find dct of size
def find_matrix_for_dct(size):
mat = np.zeros((size,size), dtype=float)
for v in range(0,size):
for u in range(0,size):
if(u==0):
mat[u][v] = 1.0/np.sqrt(size)
else:
mat[u][v] = (np.sqrt(2.0)/np.sqrt(size))*np.cos(((2*v+1)*np.pi*u)/(2*size))
return mat
#function to find dct of image
def find_dct_image(image,cmat,size):
newimg=np.zeros(image.shape , dtype = float)
image = image - 128
i = 0
while( i < image.shape[0] ):
j = 0
while( j < image.shape[1] ):
temp = image[i:i+size,j:j+size]
newimg[i:i+size,j:j+size] = (cmat.dot(temp)).dot(cmat.transpose())
j=j+size
i=i+size
return newimg
#function to find the inverse cosine transform of image
def inverse_dct_image(image,cmat,size):
newimg=np.zeros(image.shape , dtype = float)
i = 0
while( i < image.shape[0] ):
j = 0
while( j < image.shape[1] ):
temp = image[i:i+size,j:j+size]
temp2 = (np.rint((cmat.transpose().dot(temp)).dot(cmat)))+128
newimg[i:i+size,j:j+size] = temp2
j=j+size
i=i+size
newimg = newimg.astype(np.uint8)
return newimg
#function to select cofficients from image
def selecting_cofficent(max_number_coff , dct_img,size):
# initialising new image map1 and average_energy
newimg= np.zeros(dct_img.shape, dtype = float)
map1 = np.zeros((size,size) , dtype = float)
average_energy = np.zeros((size,size) ,dtype = float)
i=0
# loop to find the average energy of each pixel
while( i < dct_img.shape[0] ):
j=0
while( j < dct_img.shape[1] ):
average_energy = average_energy + dct_img[i:i+size,j:j+size]
j=j+size
i=i+size
# finding average energy and sorting array to find the thershold
average_energy = abs((average_energy*1.0)/((dct_img.shape[0]*dct_img.shape[1])/(size*size)))
print("average energy")
print(average_energy)
plt.clf()
plt.imsave('average_energy_'+str(size)+'_'+str(max_number_coff)+'.jpeg',abs(average_energy).astype(np.uint8),cmap = 'gray' )
temp = average_energy.flatten()
temp.sort()
total_zero_cofficient = 0
# loop to find the map
for i in range(size):
for j in range(size):
if(average_energy[i][j] >= temp[-max_number_coff]):
map1[i,j] = 1
else :
map1[i,j] = 0
total_zero_cofficient += 1
print("map :")
print(map1)
# total number of zero cofficient in the matrix
total_zero_cofficient = (total_zero_cofficient * (dct_img.shape[0]*dct_img.shape[1])) / (size*size)
# image after thresholding
i=0
while( i < dct_img.shape[0] ):
j=0
while( j < dct_img.shape[1] ):
for t in range(i,i+size):
for s in range(j,j+size):
if(map1[t-i,s-j]==1):
newimg[t,s] = dct_img[t,s]
else:
newimg[t,s] = 0
j=j+size
i=i+size
#print(total_zero_cofficient)
return newimg , map1 , average_energy , total_zero_cofficient
def for_colour_image(max_coff,size):
print(" ")
print("size: "+str(size)+" max_coff: "+str(max_coff))
image = getImage(readImage(sys.argv[1]),'RGB')
image = getArray(image)
image = image.astype(float)
cmat = find_matrix_for_dct(size)
uncompressed_img = np.zeros(image.shape , dtype = np.uint8)
#print(image.shape[2])
total_zero_cofficient = 0
for k in range(0,image.shape[2]):
dct_img = find_dct_image(image[:,:,k],cmat,size)
dct_compressed_img , map1 , average_energy , total_zero_cofficient1 = selecting_cofficent(max_coff , dct_img,size)
total_zero_cofficient += total_zero_cofficient1
temp = inverse_dct_image(dct_compressed_img,cmat,size)
uncompressed_img[:,:,k]=temp
saveImage(uncompressed_img,'RGB',"dct_compressed"+str(max_coff)+"_"+str(size)+"_"+sys.argv[1])
MSE = image.astype(float) - uncompressed_img.astype(float)
MSE = np.sum(MSE*MSE)/(image.shape[0]*image.shape[1])
print("MSE/pixel :"+str(MSE))
print("total coefficients which are converted to zero: "+str(total_zero_cofficient))
def for_blackandwhite_image(max_coff,size):
# getting the image as numpy array
image = getImage(readImage(sys.argv[1]),'L')
image = getArray(image)
print(image.shape)
image = image.astype(float)
# finding the matrix for dct
cmat = find_matrix_for_dct(size)
#finding dct of image
dct_img = find_dct_image(image,cmat,size)
dct_compressed_img , map1 , average_energy , total_zero_cofficient = selecting_cofficent(max_coff , dct_img,size)
uncompressed_img = inverse_dct_image(dct_compressed_img,cmat,size)
saveImage(uncompressed_img,'L',"zdct_compressed"+str(max_coff)+"_"+str(size)+"_"+"zfinal.jpg")
MSE = image.astype(float) - uncompressed_img.astype(float)
MSE = np.sum(MSE*MSE)/(image.shape[0]*image.shape[1])
print("MSE : "+ str(MSE))
for_blackandwhite_image(10,8)
for_blackandwhite_image(5,8)
for_blackandwhite_image(40,16)
for_blackandwhite_image(10,16)
for_blackandwhite_image(150,16)
# for_colour_image(10,8)
# for_colour_image(5,8)
# for_colour_image(40,16)
# for_colour_image(150,16)
# for_colour_image(10,16)
# for_colour_image(30,8)
|
the-stack_106_15346
|
import csv
import hashlib
import hmac
import logging
import os
from json.decoder import JSONDecodeError
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union, overload
from urllib.parse import urlencode
import gevent
import requests
from gevent.lock import Semaphore
from typing_extensions import Literal
from rotkehlchen.assets.asset import Asset
from rotkehlchen.assets.converters import asset_from_poloniex
from rotkehlchen.constants.misc import ZERO
from rotkehlchen.constants.timing import QUERY_RETRY_TIMES
from rotkehlchen.errors import (
DeserializationError,
RemoteError,
UnknownAsset,
UnprocessableTradePair,
UnsupportedAsset,
)
from rotkehlchen.exchanges.data_structures import (
AssetMovement,
Loan,
Trade,
TradeType,
invert_pair,
trade_pair_from_assets,
)
from rotkehlchen.exchanges.exchange import ExchangeInterface
from rotkehlchen.fval import FVal
from rotkehlchen.inquirer import Inquirer
from rotkehlchen.logging import RotkehlchenLogsAdapter, make_sensitive
from rotkehlchen.serialization.deserialize import (
deserialize_asset_amount,
deserialize_fee,
deserialize_price,
deserialize_timestamp,
deserialize_timestamp_from_poloniex_date,
deserialize_trade_type,
get_pair_position_str,
)
from rotkehlchen.typing import (
ApiKey,
ApiSecret,
AssetMovementCategory,
Fee,
Location,
Timestamp,
TradePair,
)
from rotkehlchen.user_messages import MessagesAggregator
from rotkehlchen.utils.interfaces import cache_response_timewise, protect_with_lock
from rotkehlchen.utils.misc import create_timestamp, ts_now_in_ms
from rotkehlchen.utils.serialization import rlk_jsonloads_dict, rlk_jsonloads_list
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
def trade_from_poloniex(poloniex_trade: Dict[str, Any], pair: TradePair) -> Trade:
"""Turn a poloniex trade returned from poloniex trade history to our common trade
history format
Throws:
- UnsupportedAsset due to asset_from_poloniex()
- DeserializationError due to the data being in unexpected format
- UnprocessableTradePair due to the pair data being in an unexpected format
"""
try:
trade_type = deserialize_trade_type(poloniex_trade['type'])
amount = deserialize_asset_amount(poloniex_trade['amount'])
rate = deserialize_price(poloniex_trade['rate'])
perc_fee = deserialize_fee(poloniex_trade['fee'])
base_currency = asset_from_poloniex(get_pair_position_str(pair, 'first'))
quote_currency = asset_from_poloniex(get_pair_position_str(pair, 'second'))
timestamp = deserialize_timestamp_from_poloniex_date(poloniex_trade['date'])
except KeyError as e:
raise DeserializationError(
f'Poloniex trade deserialization error. Missing key entry for {str(e)} in trade dict',
)
cost = rate * amount
if trade_type == TradeType.BUY:
fee = Fee(amount * perc_fee)
fee_currency = quote_currency
elif trade_type == TradeType.SELL:
fee = Fee(cost * perc_fee)
fee_currency = base_currency
else:
raise DeserializationError(f'Got unexpected trade type "{trade_type}" for poloniex trade')
if poloniex_trade['category'] == 'settlement':
if trade_type == TradeType.BUY:
trade_type = TradeType.SETTLEMENT_BUY
else:
trade_type = TradeType.SETTLEMENT_SELL
log.debug(
'Processing poloniex Trade',
sensitive_log=True,
timestamp=timestamp,
order_type=trade_type,
pair=pair,
base_currency=base_currency,
quote_currency=quote_currency,
amount=amount,
fee=fee,
rate=rate,
)
# Use the converted assets in our pair
pair = trade_pair_from_assets(base_currency, quote_currency)
# Since in Poloniex the base currency is the cost currency, iow in poloniex
# for BTC_ETH we buy ETH with BTC and sell ETH for BTC, we need to turn it
# into the Rotkehlchen way which is following the base/quote approach.
pair = invert_pair(pair)
return Trade(
timestamp=timestamp,
location=Location.POLONIEX,
pair=pair,
trade_type=trade_type,
amount=amount,
rate=rate,
fee=fee,
fee_currency=fee_currency,
link=str(poloniex_trade['globalTradeID']),
)
def process_polo_loans(
msg_aggregator: MessagesAggregator,
data: List[Dict],
start_ts: Timestamp,
end_ts: Timestamp,
) -> List[Loan]:
"""Takes in the list of loans from poloniex as returned by the return_lending_history
api call, processes it and returns it into our loan format
"""
new_data = []
for loan in reversed(data):
log.debug('processing poloniex loan', **make_sensitive(loan))
try:
close_time = deserialize_timestamp_from_poloniex_date(loan['close'])
open_time = deserialize_timestamp_from_poloniex_date(loan['open'])
if open_time < start_ts:
continue
if close_time > end_ts:
continue
our_loan = Loan(
location=Location.POLONIEX,
open_time=open_time,
close_time=close_time,
currency=asset_from_poloniex(loan['currency']),
fee=deserialize_fee(loan['fee']),
earned=deserialize_asset_amount(loan['earned']),
amount_lent=deserialize_asset_amount(loan['amount']),
)
except UnsupportedAsset as e:
msg_aggregator.add_warning(
f'Found poloniex loan with unsupported asset'
f' {e.asset_name}. Ignoring it.',
)
continue
except UnknownAsset as e:
msg_aggregator.add_warning(
f'Found poloniex loan with unknown asset'
f' {e.asset_name}. Ignoring it.',
)
continue
except (DeserializationError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'Missing key entry for {msg}.'
msg_aggregator.add_error(
'Deserialization error while reading a poloniex loan. Check '
'logs for more details. Ignoring it.',
)
log.error(
'Deserialization error while reading a poloniex loan',
loan=loan,
error=msg,
)
continue
new_data.append(our_loan)
new_data.sort(key=lambda loan: loan.open_time)
return new_data
def _post_process(before: Dict) -> Dict:
"""Poloniex uses datetimes so turn them into timestamps here"""
after = before
if('return' in after):
if(isinstance(after['return'], list)):
for x in range(0, len(after['return'])):
if(isinstance(after['return'][x], dict)):
if('datetime' in after['return'][x] and
'timestamp' not in after['return'][x]):
after['return'][x]['timestamp'] = float(
create_timestamp(after['return'][x]['datetime']),
)
return after
class Poloniex(ExchangeInterface):
def __init__(
self,
api_key: ApiKey,
secret: ApiSecret,
database: 'DBHandler',
msg_aggregator: MessagesAggregator,
):
super(Poloniex, self).__init__('poloniex', api_key, secret, database)
self.uri = 'https://poloniex.com/'
self.public_uri = self.uri + 'public?command='
self.session.headers.update({
'Key': self.api_key,
})
self.msg_aggregator = msg_aggregator
self.nonce_lock = Semaphore()
def first_connection(self) -> None:
if self.first_connection_made:
return
self.first_connection_made = True
def validate_api_key(self) -> Tuple[bool, str]:
try:
self.return_fee_info()
except RemoteError as e:
error = str(e)
if 'Invalid API key' in error:
return False, 'Provided API Key or secret is invalid'
else:
raise
return True, ''
def api_query_dict(self, command: str, req: Optional[Dict] = None) -> Dict:
result = self._api_query(command, req)
assert isinstance(result, Dict)
return result
def api_query_list(self, command: str, req: Optional[Dict] = None) -> List:
result = self._api_query(command, req)
assert isinstance(result, List)
return result
def _single_query(self, command: str, req: Dict[str, Any]) -> Optional[requests.Response]:
"""A single api query for poloniex
Returns the response if all went well or None if a recoverable poloniex
error occured such as a 504.
Can raise:
- RemoteError if there is a problem with the response
- ConnectionError if there is a problem connecting to poloniex.
"""
if command == 'returnTicker' or command == 'returnCurrencies':
log.debug(f'Querying poloniex for {command}')
response = self.session.get(self.public_uri + command)
else:
req['command'] = command
with self.nonce_lock:
# Protect this region with a lock since poloniex will reject
# non-increasing nonces. So if two greenlets come in here at
# the same time one of them will fail
req['nonce'] = ts_now_in_ms()
post_data = str.encode(urlencode(req))
sign = hmac.new(self.secret, post_data, hashlib.sha512).hexdigest()
self.session.headers.update({'Sign': sign})
response = self.session.post('https://poloniex.com/tradingApi', req)
if response.status_code == 504:
# backoff and repeat
return None
elif response.status_code != 200:
raise RemoteError(
f'Poloniex query responded with error status code: {response.status_code}'
f' and text: {response.text}',
)
# else all is good
return response
def _api_query(self, command: str, req: Optional[Dict] = None) -> Union[Dict, List]:
"""An api query to poloniex. May make multiple requests
Can raise:
- RemoteError if there is a problem reaching poloniex or with the returned response
"""
if req is None:
req = {}
log.debug(
'Poloniex API query',
command=command,
post_data=req,
)
tries = QUERY_RETRY_TIMES
while tries >= 0:
try:
response = self._single_query(command, req)
except requests.exceptions.ConnectionError as e:
raise RemoteError(f'Poloniex API request failed due to {str(e)}')
if response is None:
if tries >= 1:
backoff_seconds = 20 / tries
log.debug(
f'Got a recoverable poloniex error. '
f'Backing off for {backoff_seconds}',
)
gevent.sleep(backoff_seconds)
tries -= 1
continue
else:
break
if response is None:
raise RemoteError(
f'Got a recoverable poloniex error and did not manage to get a '
f'request through even after {QUERY_RETRY_TIMES} '
f'incremental backoff retries',
)
result: Union[Dict, List]
try:
if command == 'returnLendingHistory':
result = rlk_jsonloads_list(response.text)
else:
# For some reason poloniex can also return [] for an empty trades result
if response.text == '[]':
result = {}
else:
result = rlk_jsonloads_dict(response.text)
result = _post_process(result)
except JSONDecodeError:
raise RemoteError(f'Poloniex returned invalid JSON response: {response.text}')
if isinstance(result, dict) and 'error' in result:
raise RemoteError(
'Poloniex query for "{}" returned error: {}'.format(
command,
result['error'],
))
return result
def return_currencies(self) -> Dict:
response = self.api_query_dict('returnCurrencies')
return response
def return_fee_info(self) -> Dict:
response = self.api_query_dict('returnFeeInfo')
return response
def return_lending_history(
self,
start_ts: Optional[Timestamp] = None,
end_ts: Optional[Timestamp] = None,
limit: Optional[int] = None,
) -> List:
"""Default limit for this endpoint seems to be 500 when I tried.
So to be sure all your loans are included put a very high limit per call
and also check if the limit was reached after each call.
Also maximum limit seems to be 12660
"""
req: Dict[str, Union[int, Timestamp]] = {}
if start_ts is not None:
req['start'] = start_ts
if end_ts is not None:
req['end'] = end_ts
if limit is not None:
req['limit'] = limit
response = self.api_query_list('returnLendingHistory', req)
return response
@overload
def return_trade_history( # pylint: disable=unused-argument, no-self-use
self,
currency_pair: Literal['all'],
start: Timestamp,
end: Timestamp,
) -> Dict:
...
@overload # noqa: F811
def return_trade_history( # noqa: F811 # pylint: disable=unused-argument, no-self-use
self,
currency_pair: Union[TradePair, str],
start: Timestamp,
end: Timestamp,
) -> Union[Dict, List]:
...
# TODO: As soon as a pyflakes release is made including
# https://github.com/PyCQA/pyflakes/pull/435 then remove the noqa from here,
# above and from other place in codebase where overload is used likethis
def return_trade_history( # noqa: F811
self,
currency_pair: Union[TradePair, str],
start: Timestamp,
end: Timestamp,
) -> Union[Dict, List]:
"""If `currency_pair` is all, then it returns a dictionary with each key
being a pair and each value a list of trades. If `currency_pair` is a specific
pair then a list is returned"""
return self._api_query('returnTradeHistory', {
'currencyPair': currency_pair,
'start': start,
'end': end,
'limit': 10000,
})
def return_deposits_withdrawals(
self,
start_ts: Timestamp,
end_ts: Timestamp,
) -> Dict:
response = self.api_query_dict(
'returnDepositsWithdrawals',
{'start': start_ts, 'end': end_ts},
)
return response
# ---- General exchanges interface ----
@protect_with_lock()
@cache_response_timewise()
def query_balances(self) -> Tuple[Optional[Dict[Asset, Dict[str, Any]]], str]:
try:
resp = self.api_query_dict('returnCompleteBalances', {"account": "all"})
except RemoteError as e:
msg = (
'Poloniex API request failed. Could not reach poloniex due '
'to {}'.format(e)
)
log.error(msg)
return None, msg
balances = {}
for poloniex_asset, v in resp.items():
available = FVal(v['available'])
on_orders = FVal(v['onOrders'])
if (available != FVal(0) or on_orders != FVal(0)):
try:
asset = asset_from_poloniex(poloniex_asset)
except UnsupportedAsset as e:
self.msg_aggregator.add_warning(
f'Found unsupported poloniex asset {e.asset_name}. '
f' Ignoring its balance query.',
)
continue
except UnknownAsset as e:
self.msg_aggregator.add_warning(
f'Found unknown poloniex asset {e.asset_name}. '
f' Ignoring its balance query.',
)
continue
except DeserializationError:
log.error(
f'Unexpected poloniex asset type. Expected string '
f' but got {type(poloniex_asset)}',
)
self.msg_aggregator.add_error(
'Found poloniex asset entry with non-string type. '
' Ignoring its balance query.',
)
continue
entry = {}
entry['amount'] = available + on_orders
try:
usd_price = Inquirer().find_usd_price(asset=asset)
except RemoteError as e:
self.msg_aggregator.add_error(
f'Error processing poloniex balance entry due to inability to '
f'query USD price: {str(e)}. Skipping balance entry',
)
continue
usd_value = entry['amount'] * usd_price
entry['usd_value'] = usd_value
balances[asset] = entry
log.debug(
'Poloniex balance query',
sensitive_log=True,
currency=asset,
amount=entry['amount'],
usd_value=usd_value,
)
return balances, ''
def query_online_trade_history(
self,
start_ts: Timestamp,
end_ts: Timestamp,
) -> List[Trade]:
raw_data = self.return_trade_history(
currency_pair='all',
start=start_ts,
end=end_ts,
)
results_length = 0
for _, v in raw_data.items():
results_length += len(v)
log.debug('Poloniex trade history query', results_num=results_length)
if results_length >= 10000:
raise ValueError(
'Poloniex api has a 10k limit to trade history. Have not implemented'
' a solution for more than 10k trades at the moment',
)
our_trades = []
for pair, trades in raw_data.items():
for trade in trades:
category = trade.get('category', None)
try:
if category == 'exchange' or category == 'settlement':
timestamp = deserialize_timestamp_from_poloniex_date(trade['date'])
if timestamp < start_ts or timestamp > end_ts:
continue
our_trades.append(trade_from_poloniex(trade, TradePair(pair)))
elif category == 'marginTrade':
# We don't take poloniex margin trades into account at the moment
continue
else:
self.msg_aggregator.add_error(
f'Error deserializing a poloniex trade. Unknown trade '
f'category {category} found.',
)
continue
except UnsupportedAsset as e:
self.msg_aggregator.add_warning(
f'Found poloniex trade with unsupported asset'
f' {e.asset_name}. Ignoring it.',
)
continue
except UnknownAsset as e:
self.msg_aggregator.add_warning(
f'Found poloniex trade with unknown asset'
f' {e.asset_name}. Ignoring it.',
)
continue
except (UnprocessableTradePair, DeserializationError) as e:
self.msg_aggregator.add_error(
'Error deserializing a poloniex trade. Check the logs '
'and open a bug report.',
)
log.error(
'Error deserializing poloniex trade',
trade=trade,
error=str(e),
)
continue
return our_trades
def parse_loan_csv(self) -> List:
"""Parses (if existing) the lendingHistory.csv and returns the history in a list
It can throw OSError, IOError if the file does not exist and csv.Error if
the file is not proper CSV"""
# the default filename, and should be (if at all) inside the data directory
path = os.path.join(self.db.user_data_dir, "lendingHistory.csv")
lending_history = []
with open(path, 'r') as csvfile:
history = csv.reader(csvfile, delimiter=',', quotechar='|')
next(history) # skip header row
for row in history:
try:
lending_history.append({
'currency': asset_from_poloniex(row[0]),
'earned': FVal(row[6]),
'amount': FVal(row[2]),
'fee': FVal(row[5]),
'open': row[7],
'close': row[8],
})
except UnsupportedAsset as e:
self.msg_aggregator.add_warning(
f'Found loan with asset {e.asset_name}. Ignoring it.',
)
continue
return lending_history
def query_loan_history(
self,
start_ts: Timestamp,
end_ts: Timestamp,
from_csv: Optional[bool] = False,
) -> List:
"""
WARNING: Querying from returnLendingHistory endpoint instead of reading from
the CSV file can potentially return unexpected/wrong results.
That is because the `returnLendingHistory` endpoint has a hidden limit
of 12660 results. In our code we use the limit of 12000 but poloniex may change
the endpoint to have a lower limit at which case this code will break.
To be safe compare results of both CSV and endpoint to make sure they agree!
"""
try:
if from_csv:
return self.parse_loan_csv()
except (OSError, csv.Error):
pass
loans_query_return_limit = 12000
result = self.return_lending_history(
start_ts=start_ts,
end_ts=end_ts,
limit=loans_query_return_limit,
)
data = list(result)
log.debug('Poloniex loan history query', results_num=len(data))
# since I don't think we have any guarantees about order of results
# using a set of loan ids is one way to make sure we get no duplicates
# if poloniex can guarantee me that the order is going to be ascending/descending
# per open/close time then this can be improved
id_set = set()
while len(result) == loans_query_return_limit:
# Find earliest timestamp to re-query the next batch
min_ts = end_ts
for loan in result:
ts = deserialize_timestamp_from_poloniex_date(loan['close'])
min_ts = min(min_ts, ts)
id_set.add(loan['id'])
result = self.return_lending_history(
start_ts=start_ts,
end_ts=min_ts,
limit=loans_query_return_limit,
)
log.debug('Poloniex loan history query', results_num=len(result))
for loan in result:
if loan['id'] not in id_set:
data.append(loan)
return data
def query_exchange_specific_history(
self,
start_ts: Timestamp,
end_ts: Timestamp,
) -> Optional[Any]:
"""The exchange specific history for poloniex is its loans"""
return self.query_loan_history(
start_ts=start_ts,
end_ts=end_ts,
from_csv=True, # TODO: Change this and make them queriable
)
def _deserialize_asset_movement(
self,
movement_type: AssetMovementCategory,
movement_data: Dict[str, Any],
) -> Optional[AssetMovement]:
"""Processes a single deposit/withdrawal from polo and deserializes it
Can log error/warning and return None if something went wrong at deserialization
"""
try:
if movement_type == AssetMovementCategory.DEPOSIT:
fee = Fee(ZERO)
uid_key = 'depositNumber'
else:
fee = deserialize_fee(movement_data['fee'])
uid_key = 'withdrawalNumber'
asset = asset_from_poloniex(movement_data['currency'])
return AssetMovement(
location=Location.POLONIEX,
category=movement_type,
timestamp=deserialize_timestamp(movement_data['timestamp']),
asset=asset,
amount=deserialize_asset_amount(movement_data['amount']),
fee_asset=asset,
fee=fee,
link=str(movement_data[uid_key]),
)
except UnsupportedAsset as e:
self.msg_aggregator.add_warning(
f'Found {str(movement_type)} of unsupported poloniex asset '
f'{e.asset_name}. Ignoring it.',
)
except UnknownAsset as e:
self.msg_aggregator.add_warning(
f'Found {str(movement_type)} of unknown poloniex asset '
f'{e.asset_name}. Ignoring it.',
)
except (DeserializationError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'Missing key entry for {msg}.'
self.msg_aggregator.add_error(
'Unexpected data encountered during deserialization of a poloniex '
'asset movement. Check logs for details and open a bug report.',
)
log.error(
f'Unexpected data encountered during deserialization of poloniex '
f'{str(movement_type)}: {movement_data}. Error was: {str(e)}',
)
return None
def query_online_deposits_withdrawals(
self,
start_ts: Timestamp,
end_ts: Timestamp,
) -> List[AssetMovement]:
result = self.return_deposits_withdrawals(start_ts, end_ts)
log.debug(
'Poloniex deposits/withdrawal query',
results_num=len(result['withdrawals']) + len(result['deposits']),
)
movements = []
for withdrawal in result['withdrawals']:
asset_movement = self._deserialize_asset_movement(
movement_type=AssetMovementCategory.WITHDRAWAL,
movement_data=withdrawal,
)
if asset_movement:
movements.append(asset_movement)
for deposit in result['deposits']:
asset_movement = self._deserialize_asset_movement(
movement_type=AssetMovementCategory.DEPOSIT,
movement_data=deposit,
)
if asset_movement:
movements.append(asset_movement)
return movements
|
the-stack_106_15347
|
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class PatchAction(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
PatchAction - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'media_type': 'str',
'action_template': 'ActionMapActionTemplate',
'architect_flow_fields': 'ArchitectFlowFields',
'web_messaging_offer_fields': 'WebMessagingOfferFields'
}
self.attribute_map = {
'media_type': 'mediaType',
'action_template': 'actionTemplate',
'architect_flow_fields': 'architectFlowFields',
'web_messaging_offer_fields': 'webMessagingOfferFields'
}
self._media_type = None
self._action_template = None
self._architect_flow_fields = None
self._web_messaging_offer_fields = None
@property
def media_type(self):
"""
Gets the media_type of this PatchAction.
Media type of action.
:return: The media_type of this PatchAction.
:rtype: str
"""
return self._media_type
@media_type.setter
def media_type(self, media_type):
"""
Sets the media_type of this PatchAction.
Media type of action.
:param media_type: The media_type of this PatchAction.
:type: str
"""
allowed_values = ["webchat", "webMessagingOffer", "contentOffer", "integrationAction", "architectFlow"]
if media_type.lower() not in map(str.lower, allowed_values):
# print("Invalid value for media_type -> " + media_type)
self._media_type = "outdated_sdk_version"
else:
self._media_type = media_type
@property
def action_template(self):
"""
Gets the action_template of this PatchAction.
Action template associated with the action map.
:return: The action_template of this PatchAction.
:rtype: ActionMapActionTemplate
"""
return self._action_template
@action_template.setter
def action_template(self, action_template):
"""
Sets the action_template of this PatchAction.
Action template associated with the action map.
:param action_template: The action_template of this PatchAction.
:type: ActionMapActionTemplate
"""
self._action_template = action_template
@property
def architect_flow_fields(self):
"""
Gets the architect_flow_fields of this PatchAction.
Architect Flow Id and input contract.
:return: The architect_flow_fields of this PatchAction.
:rtype: ArchitectFlowFields
"""
return self._architect_flow_fields
@architect_flow_fields.setter
def architect_flow_fields(self, architect_flow_fields):
"""
Sets the architect_flow_fields of this PatchAction.
Architect Flow Id and input contract.
:param architect_flow_fields: The architect_flow_fields of this PatchAction.
:type: ArchitectFlowFields
"""
self._architect_flow_fields = architect_flow_fields
@property
def web_messaging_offer_fields(self):
"""
Gets the web_messaging_offer_fields of this PatchAction.
Admin-configurable fields of a web messaging offer action.
:return: The web_messaging_offer_fields of this PatchAction.
:rtype: WebMessagingOfferFields
"""
return self._web_messaging_offer_fields
@web_messaging_offer_fields.setter
def web_messaging_offer_fields(self, web_messaging_offer_fields):
"""
Sets the web_messaging_offer_fields of this PatchAction.
Admin-configurable fields of a web messaging offer action.
:param web_messaging_offer_fields: The web_messaging_offer_fields of this PatchAction.
:type: WebMessagingOfferFields
"""
self._web_messaging_offer_fields = web_messaging_offer_fields
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
the-stack_106_15348
|
"""
This file offers the methods to automatically retrieve the graph Roseovarius lutimaris.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def RoseovariusLutimaris(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Roseovarius lutimaris graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Roseovarius lutimaris graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="RoseovariusLutimaris",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
the-stack_106_15349
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name='calculadora',
version='0.0.1',
author='Anderson Printes',
author_email='[email protected]',
description='Testing installation of Package',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/felipeprintes/calculadora',
project_urls = {
"Bug Tracker": "https://github.com/felipeprintes/calculadora/issues"
},
license='MIT',
packages=['calculadora'],
#install_requires=['requests'],
)
|
the-stack_106_15350
|
import time
from redis.exceptions import WatchError
class RedisThrottledQueue(object):
queue = None # the instantiated queue class
window = None # the window to use to limit requests
limit = None # number of requests in the given window
redis_conn = None # the redis connection
moderation = None # whether to use moderation or not
moderate_key = None # the last time the moderated queue was pulled
window_append = ":throttle_window" # appended to end of window queue key
time_append = ":throttle_time" # appended to end to time key
def __init__(self, redisConn, myQueue, throttleWindow, throttleLimit,
moderate=False, windowName=None, modName=None):
'''
For best performance, all instances of a throttled queue should have
the same settings
Limits outbound flow (pop) from any Redis Queue, does not hinder pushes
This queue is also temporary, which is why is is a bit complex
@param redis: The redis connection to use
@param queueClass: The instantiated RedisQueue class
(Queue, Stack, Priority)
@param throttleWindow: The time window to throttle pop requests (secs)
@param throttleLimit: The number of pops allows in a given time window
@param moderation: Set to True if you would like the queue to have
a more consistent outbound flow.
@param windowName: Use a different rolling window key name
@param modName: Use a different moderate time key name
'''
self.redis_conn = redisConn
self.queue = myQueue
self.window = float(throttleWindow)
self.limit = float(throttleLimit)
if windowName is None:
# default window name
self.window_key = self.queue.key + self.window_append
else:
self.window_key = windowName + self.window_append
# moderation is useless when only grabbing 1 item in x secs
if moderate and throttleLimit != 1:
self.moderation = self.window / self.limit
# used for communicating throttle moderation across queue instances
if modName is None:
self.moderate_key = self.queue.key + self.time_append
else:
self.moderate_key = modName + self.time_append
def __len__(self):
'''
Return the length of the queue
'''
return len(self.queue)
def clear(self):
'''
Clears all data associated with the throttled queue
'''
self.redis_conn.delete(self.window_key)
self.redis_conn.delete(self.moderate_key)
self.queue.clear()
def push(self, *args):
'''
Push a request into the queue
'''
self.queue.push(*args)
def pop(self, *args):
'''
Non-blocking from throttled queue standpoint, tries to return a
queue pop request, only will return a request if
the given time window has not been exceeded
@return: The item if the throttle limit has not been hit,
otherwise None
'''
if self.allowed():
return self.queue.pop(*args)
else:
return None
'''
Original Redis Throttle implementation from
http://opensourcehacker.com/2014/07/09/rolling-time-window-counters-with-redis-and-mitigating-botnet-driven-login-attacks/
Modified heavily to fit our class needs, plus locking
mechanisms around the operations
'''
def allowed(self):
'''
Check to see if the pop request is allowed
@return: True means the maximum was not been reached for the current
time window, thus allowing what ever operation follows
'''
# Expire old keys (hits)
expires = time.time() - self.window
self.redis_conn.zremrangebyscore(self.window_key, '-inf', expires)
# check if we are hitting too fast for moderation
if self.moderation:
with self.redis_conn.pipeline() as pipe:
try:
pipe.watch(self.moderate_key) # ---- LOCK
# from this point onward if no errors are raised we
# successfully incremented the counter
curr_time = time.time()
if self.is_moderated(curr_time, pipe):
return False
# passed the moderation limit, now check time window
# If we have less keys than max, update out moderate key
if self.test_hits():
# this is a valid transaction, set the new time
pipe.multi()
pipe.set(self.moderate_key, str(curr_time))
# expire it if it hasnt been touched in a while
pipe.expire(self.moderate_key, int(self.window * 2))
pipe.execute()
return True
except WatchError:
# watch was changed, another thread just incremented
# the value
return False
# If we currently have more keys than max,
# then limit the action
else:
return self.test_hits()
return False
def is_moderated(self, curr_time, pipe):
'''
Tests to see if the moderation limit is not exceeded
@return: True if the moderation limit is exceeded
'''
# get key, otherwise default the moderate key expired and
# we dont care
value = pipe.get(self.moderate_key)
if value is None:
value = 0.0
else:
value = float(value)
# check moderation difference
if (curr_time - value) < self.moderation:
return True
return False
def test_hits(self):
'''
Tests to see if the number of throttle queue hits is within our limit
@return: True if the queue was below the limit AND atomically updated
'''
with self.redis_conn.pipeline() as pipe:
try:
pipe.watch(self.window_key) # ---- LOCK
value = self.redis_conn.zcard(self.window_key)
if value < self.limit:
# push value into key
now = time.time()
pipe.multi()
pipe.zadd(self.window_key, now, now)
# expire it if it hasnt been touched in a while
pipe.expire(self.window_key, int(self.window * 2))
pipe.execute()
return True
except WatchError:
# watch was changed, another thread just messed with the
# queue so we can't tell if our result is ok
pass
return False
|
the-stack_106_15351
|
import numpy as np
import importlib.util
spec = importlib.util.spec_from_file_location("visual_analyzer", "../utils/environment_properties_analyzers/visual_analyzer.py")
visual_analyzer = importlib.util.module_from_spec(spec)
spec.loader.exec_module(visual_analyzer)
class EnvironmentProperties:
"""
This is the main class, which models the environment field with input parameters
"""
# Constructor
# The environment can be set from picture or analytically
# The environment can be homogeneous or heterogeneous
# Parameters can be constants or variable
def __init__(self, density=0, lambda_lame=0, mu_lame=0, v_p=0, v_s=0,
img_creating_parameters=None, analytical_creating_parameters=None):
if img_creating_parameters is not None:
self.init_params = dict(img_creating_parameters)
self.img_creating_parameters = dict(img_creating_parameters)
if analytical_creating_parameters is not None:
self.analytical_creating_parameters = analytical_creating_parameters
self.density = density
self.lambda_lame = lambda_lame
self.elasticity_quotient = lambda_lame
self.mu_lame = mu_lame
self.v_p = v_p
self.v_s = v_s
self.E = 0
self.nu_puass = 0
if v_p == 0 and v_s == 0 and mu_lame == 0 and lambda_lame != 0:
self.set_dens_and_lame_for_acoustic(density, lambda_lame)
if v_p == 0 and v_s == 0 and mu_lame != 0 and lambda_lame != 0:
self.set_dens_and_lame_for_seismic(density, lambda_lame, mu_lame)
if v_p != 0 and v_s == 0 and mu_lame == 0 and lambda_lame == 0:
self.set_dens_and_speeds_for_acoustic(density, v_p)
if v_p != 0 and v_s != 0 and mu_lame == 0 and lambda_lame == 0:
self.set_dens_and_speeds_for_seismic(density, v_p, v_s)
# Sets all parameters for acoustic, knowing v_p
def set_params_for_acoustic_using_v_p(self):
self.__calculate_params_for_acoustic_task_v_p()
# Sets all parameters for acoustic, knowing v_p
def set_params_for_acoustic_using_k(self):
self.__calculate_params_for_acoustic_task_k()
# Sets all parameters for seismic, knowing lame coefficients
def set_params_for_seismic_using_lame(self):
self.__calculate_params_for_seismic_task_vp_vs()
# Sets all parameters for seismic, knowing v_p and v_s
def set_params_for_seismic_using_speeds(self):
self.__calculate_params_for_seismic_task_lame()
# Calculate and sets density, elasticity_quotient and mu_lame coefficient for seismic task
def set_dens_and_lame_for_seismic(self, density, elasticity_quotient, mu_lame):
self.density = density
self.elasticity_quotient = elasticity_quotient
self.mu_lame = mu_lame
self.__calculate_Puass_and_E()
self.__calculate_speeds()
# Calculate and sets density and elasticity_quotient for acoustic task
def set_dens_and_lame_for_acoustic(self, density, elasticity_quotient):
self.density = density
self.elasticity_quotient = elasticity_quotient
self.v_p = (elasticity_quotient/density) ** 0.5
# Calculate and sets density, x and y velocities for seismic task
def set_dens_and_speeds_for_seismic(self, density, v_p, v_s):
self.density = density
self.v_p = v_p
self.v_s = v_s
self.__calculate_Lame_and_Puass_and_E()
# Calculate and sets density and x velocity for acoustic task
def set_dens_and_speeds_for_acoustic(self, density, v_p):
self.density = density
self.v_p = v_p
self.elasticity_quotient = (v_p**2) * density
# Returns all parameters in dictionary
def get_get_all_params(self):
params = {'Density = ': self.density, 'Lambda_Lame = ': self.elasticity_quotient, 'Mu_Lame = ': self.mu_lame,
'v_p = ': self.v_p, 'v_s = ': self.v_s,
'E = ': self.E, 'nu_puass = ': self.nu_puass}
return params
# Calculates all parameters for acoustic_task in heterogeneous environment, knowing v_p
def __calculate_params_for_acoustic_task_v_p(self):
for buf_color in self.init_params.keys():
init_params = self.init_params.get(buf_color)
density = init_params[0]
v_p = init_params[1]
elasticity_quotient = self.__calculate_lambda_lame(density, v_p)
self.img_creating_parameters.update({buf_color: [density, elasticity_quotient, v_p]})
# Calculates all parameters for acoustic_task in heterogeneous environment, knowing elasticity_quotient
def __calculate_params_for_acoustic_task_k(self):
for buf_color in self.init_params.keys():
init_params = self.init_params.get(buf_color)
density = init_params[0]
elasticity_quotient = init_params[1]
v_p = self.__calculate_v_p(density, elasticity_quotient)
self.img_creating_parameters.update({buf_color: [density, elasticity_quotient, v_p]})
# Calculates lambda_lame, knowing density and v_p
def __calculate_lambda_lame(self, density, v_p):
elasticity_quotient = (v_p ** 2) * density
return elasticity_quotient
# Calculates v_p, knowing density and elasticity_quotient
def __calculate_v_p(self, density, elasticity_quotient):
v_p = (elasticity_quotient / density) ** 0.5
return v_p
# Calculates all parameters for seismic_task in heterogeneous environment, knowing x and y velocities
def __calculate_params_for_seismic_task_vp_vs(self):
for buf_color in self.init_params.keys():
init_params = self.init_params.get(buf_color)
density = init_params[0]
v_p = init_params[1]
v_s = init_params[2]
self.set_dens_and_speeds_for_seismic(density, v_p, v_s)
mu_lame = self.mu_lame
lambda_lame = self.elasticity_quotient
self.img_creating_parameters.update({buf_color: [density, lambda_lame, mu_lame, v_p, v_s]})
# Calculates all parameters for seismic_task in heterogeneous environment, knowing lame parameters
def __calculate_params_for_seismic_task_lame(self):
for buf_color in self.init_params.keys():
init_params = self.init_params.get(buf_color)
density = init_params[0]
lambda_lame = init_params[1]
mu_lame = init_params[2]
self.set_dens_and_lame_for_seismic(density, lambda_lame, mu_lame)
v_p = self.v_p
v_s = self.v_s
self.img_creating_parameters.update({buf_color: [density, lambda_lame, mu_lame, v_p, v_s]})
# Calculates Lame parameters, Puasson's parameter and E parameter
def __calculate_Lame_and_Puass_and_E(self):
self.mu_lame = self.v_s ** 2 * self.density
self.nu_puass = (2 * self.mu_lame - self.v_p ** 2 * self.density) / (2 * (self.mu_lame - self.v_p ** 2 * self.density))
self.elasticity_quotient = 2 * self.mu_lame * self.nu_puass / (1 - 2 * self.nu_puass)
self.E = self.mu_lame * (3 * self.elasticity_quotient + 2 * self.mu_lame) / (self.elasticity_quotient + self.mu_lame)
# Calculates x and y velocities
def __calculate_speeds(self):
self.v_p = (self.E / self.density) ** 0.5
self.v_s = (self.mu_lame / self.density) ** 0.5
# Calculates Puasson's parameter and E parameter
def __calculate_Puass_and_E(self):
self.nu_puass = self.elasticity_quotient / (2 * (self.elasticity_quotient + self.mu_lame))
# self.E = self.mu_lame * (3 * self.elasticity_quotient + 2 * self.mu_lame) / (self.elasticity_quotient + self.mu_lame)
self.E = self.elasticity_quotient * (1 + self.nu_puass) * (1 - 2*self.nu_puass)/(self.nu_puass)
def create_environment_for_seismic(self, x=15, y=15):
"""
One of the main functions in class <Environment_properties> which returns the created environment field
for seismic task according to the pack of input parameters:
(density, v_p, v_c) or (density, elasticity_quotient, mu_lame)
Each element of returned <ndarray> contains list [v_p, v_s, density, elasticity_quotient, mu_lame]
which will be used in further calculations
:param x: Width
:param y: Height
:return field: numpy.ndarray(shape=(x, y))
"""
square = [self.v_p, self.v_s, self.density, self.elasticity_quotient, self.mu_lame]
field = np.ndarray(shape=(x, y), dtype=np.dtype(list))
field.fill(square)
return field
def create_environment_for_acoustic(self, x=15, y=15):
"""
One of the main function in class <Environment_properties> which returns the created environment field
for acoustic task according to the pack of input parameters:
(density, v_p) or (density, k)
Each element of returned <ndarray> contains list [v_p, density, k]
which will be used in further calculations
:param x: Width
:param y: Height
:return field: numpy.ndarray(shape=(x, y))
"""
square = [self.v_p, self.density, self.elasticity_quotient]
field = np.ndarray(shape=(x, y), dtype=np.dtype(list))
field.fill(square)
return field
def create_environment_from_image(self, image_path):
"""
One of the main function in class <Environment_properties> which returns the created environment field
gathered from the picture with describes the environment
with proper parameters for seismic task for each
pixel<->(density, elasticity_quotient, mu_lame)
or pixel<->(density, v_p, v_c) or correspondingly for acoustic task :
pixel<->(density, v_p) or pixel<->(density, k)
Each element of returned <array> contains corresponding list with parameters:
[density, elasticity_quotient, mu_lame, v_p, v_s] for seismic and
[density=0, elasticity_quotient=0, v_p=0] for acoustic
:param image_path: The relative path to the image, which describes the environment
:return field: numpy.ndarray(shape=(height, length)); each element of array contains properties of environment
"""
picture_parser = visual_analyzer.visual_analyzer(image_path, self.img_creating_parameters)
field = picture_parser.create_field()
return field
# density = 1000
# v_p = 200
# v_s = 400
# mu_lame = 56
# elasticity_quotient = 12
# props = EnvironmentProperties(density, elasticity_quotient, mu_lame)
# field = props.create_environment_for_seismic()
# print(field)
# #
# image_path = "three_col.jpg"
# params = {(254, 242, 0): [1, 200, 30], (255, 255, 255): [2, 100, 10]}
# properties = EnvironmentProperties(img_creating_parameters=params)
# properties.set_params_for_seismic_using_lame()
# field = properties.create_environment_from_image(image_path)
# print(field[663][1626])
# print(field[600][230])
# buf = tuple(map(tuple, image[0][0]))
# print(buf[0])
|
the-stack_106_15352
|
#!/usr/bin/python
#!/usr/bin/env python
# Copyright (C) 2008-2011 by
# George Asimenos, Robert C. Edgar, Serafim Batzoglou and Arend Sidow.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##############################
import sys
import evolverSimControl.lib.evolver_gff as gff
FileName = sys.argv[1]
def Die(s):
print >> sys.stderr, "**ERROR**", s, sys.argv
sys.exit(1)
def DoRec():
global GeneLos, GeneHis, GeneStrands, Label, Lo, Hi
if gff.Feature != "CDS" and gff.Feature != "UTR" and gff.Feature != "exon":
return
if Label == "":
Label = gff.Label
elif Label != gff.Label:
Die("More than one label in EVOLVER_GFF")
if Hi == -1:
Hi = gff.End
else:
if gff.End > Hi:
Hi = gff.End
GeneIndex = gff.GetRequiredIntAttr("gene_index")
if GeneIndex in GeneLos.keys():
if gff.Start < GeneLos[GeneIndex]:
GeneLos[GeneIndex] = gff.Start
if gff.End > GeneHis[GeneIndex]:
GeneHis[GeneIndex] = gff.End
if GeneStrands[GeneIndex] != gff.Strand:
Die("Gene on both strands")
else:
GeneLos[GeneIndex] = gff.Start
GeneHis[GeneIndex] = gff.End
GeneStrands[GeneIndex] = gff.Strand
Hi = -1
GeneLos = {}
GeneHis = {}
GeneStrands = {}
Label = ""
gff.GetRecs(FileName, DoRec)
gff.Source = "gene_lengths"
gff.Feature = "gene"
gff.Score = 0
gff.Frame = "."
TotGeneLength = 0
for GeneIndex in GeneLos.keys():
gff.Start = GeneLos[GeneIndex]
gff.End = GeneHis[GeneIndex]
gff.Strand = GeneStrands[GeneIndex]
gff.Attrs = "gene_index %u;" % GeneIndex
gff.WriteRec(sys.stdout)
TotGeneLength += gff.End - gff.Start + 1
print >> sys.stderr, "Max annot end %10u" % Hi
print >> sys.stderr, "Total gene length %10u" % TotGeneLength
print >> sys.stderr, "Pct %10.1f%%" % (float(TotGeneLength)*100/Hi)
|
the-stack_106_15354
|
# Copyright (c) 2011 Zadara Storage Inc.
# Copyright (c) 2011 OpenStack Foundation
# Copyright 2011 University of Southern California
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import mock
import webob
from jacket.api.storage.storage.contrib import types_extra_specs
from jacket.storage import exception
from jacket.storage import test
from jacket.tests.storage.unit.api import fakes
import jacket.wsgi.storage
def return_create_volume_type_extra_specs(context, volume_type_id,
extra_specs):
return stub_volume_type_extra_specs()
def return_volume_type_extra_specs(context, volume_type_id):
return stub_volume_type_extra_specs()
def return_empty_volume_type_extra_specs(context, volume_type_id):
return {}
def delete_volume_type_extra_specs(context, volume_type_id, key):
pass
def delete_volume_type_extra_specs_not_found(context, volume_type_id, key):
raise exception.VolumeTypeExtraSpecsNotFound("Not Found")
def stub_volume_type_extra_specs():
specs = {"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"}
return specs
def volume_type_get(context, id, inactive=False, expected_fields=None):
pass
class VolumeTypesExtraSpecsTest(test.TestCase):
def setUp(self):
super(VolumeTypesExtraSpecsTest, self).setUp()
self.flags(host='fake')
self.stubs.Set(jacket.storage.db, 'volume_type_get', volume_type_get)
self.api_path = '/v2/fake/os-volume-types/1/extra_specs'
self.controller = types_extra_specs.VolumeTypeExtraSpecsController()
"""to reset notifier drivers left over from other api/contrib tests"""
def test_index(self):
self.stubs.Set(jacket.storage.db, 'volume_type_extra_specs_get',
return_volume_type_extra_specs)
req = fakes.HTTPRequest.blank(self.api_path)
res_dict = self.controller.index(req, 1)
self.assertEqual('value1', res_dict['extra_specs']['key1'])
def test_index_no_data(self):
self.stubs.Set(jacket.storage.db, 'volume_type_extra_specs_get',
return_empty_volume_type_extra_specs)
req = fakes.HTTPRequest.blank(self.api_path)
res_dict = self.controller.index(req, 1)
self.assertEqual(0, len(res_dict['extra_specs']))
def test_show(self):
self.stubs.Set(jacket.storage.db, 'volume_type_extra_specs_get',
return_volume_type_extra_specs)
req = fakes.HTTPRequest.blank(self.api_path + '/key5')
res_dict = self.controller.show(req, 1, 'key5')
self.assertEqual('value5', res_dict['key5'])
def test_show_spec_not_found(self):
self.stubs.Set(jacket.storage.db, 'volume_type_extra_specs_get',
return_empty_volume_type_extra_specs)
req = fakes.HTTPRequest.blank(self.api_path + '/key6')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, 1, 'key6')
def test_delete(self):
self.stubs.Set(jacket.storage.db, 'volume_type_extra_specs_delete',
delete_volume_type_extra_specs)
self.assertEqual(0, len(self.notifier.notifications))
req = fakes.HTTPRequest.blank(self.api_path + '/key5')
self.controller.delete(req, 1, 'key5')
self.assertEqual(1, len(self.notifier.notifications))
def test_delete_not_found(self):
self.stubs.Set(jacket.storage.db, 'volume_type_extra_specs_delete',
delete_volume_type_extra_specs_not_found)
req = fakes.HTTPRequest.blank(self.api_path + '/key6')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, 1, 'key6')
@mock.patch(
'storage.api.storage.openstack.wsgi.Controller.validate_string_length')
def test_create(self, mock_validate):
self.stubs.Set(jacket.storage.db,
'volume_type_extra_specs_update_or_create',
return_create_volume_type_extra_specs)
body = {"extra_specs": {"key1": "value1"}}
self.assertEqual(0, len(self.notifier.notifications))
req = fakes.HTTPRequest.blank(self.api_path)
res_dict = self.controller.create(req, 1, body)
self.assertEqual(1, len(self.notifier.notifications))
self.assertTrue(mock_validate.called)
self.assertEqual('value1', res_dict['extra_specs']['key1'])
@mock.patch.object(jacket.storage.db, 'volume_type_extra_specs_update_or_create')
@mock.patch(
'storage.api.storage.openstack.wsgi.Controller.validate_string_length')
def test_create_key_allowed_chars(
self, mock_validate, volume_type_extra_specs_update_or_create):
mock_return_value = {"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"}
volume_type_extra_specs_update_or_create.\
return_value = mock_return_value
body = {"extra_specs": {"other_alphanum.-_:": "value1"}}
self.assertEqual(0, len(self.notifier.notifications))
req = fakes.HTTPRequest.blank(self.api_path)
res_dict = self.controller.create(req, 1, body)
self.assertEqual(1, len(self.notifier.notifications))
self.assertTrue(mock_validate.called)
self.assertEqual('value1',
res_dict['extra_specs']['other_alphanum.-_:'])
@mock.patch.object(jacket.storage.db, 'volume_type_extra_specs_update_or_create')
@mock.patch(
'storage.api.storage.openstack.wsgi.Controller.validate_string_length')
def test_create_too_many_keys_allowed_chars(
self, mock_validate, volume_type_extra_specs_update_or_create):
mock_return_value = {"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"}
volume_type_extra_specs_update_or_create.\
return_value = mock_return_value
body = {"extra_specs": {"other_alphanum.-_:": "value1",
"other2_alphanum.-_:": "value2",
"other3_alphanum.-_:": "value3"}}
self.assertEqual(0, len(self.notifier.notifications))
req = fakes.HTTPRequest.blank(self.api_path)
res_dict = self.controller.create(req, 1, body)
self.assertEqual(1, len(self.notifier.notifications))
self.assertTrue(mock_validate.called)
self.assertEqual('value1',
res_dict['extra_specs']['other_alphanum.-_:'])
self.assertEqual('value2',
res_dict['extra_specs']['other2_alphanum.-_:'])
self.assertEqual('value3',
res_dict['extra_specs']['other3_alphanum.-_:'])
@mock.patch(
'storage.api.storage.openstack.wsgi.Controller.validate_string_length')
def test_update_item(self, mock_validate):
self.stubs.Set(jacket.storage.db,
'volume_type_extra_specs_update_or_create',
return_create_volume_type_extra_specs)
body = {"key1": "value1"}
self.assertEqual(0, len(self.notifier.notifications))
req = fakes.HTTPRequest.blank(self.api_path + '/key1')
res_dict = self.controller.update(req, 1, 'key1', body)
self.assertEqual(1, len(self.notifier.notifications))
self.assertTrue(mock_validate.called)
self.assertEqual('value1', res_dict['key1'])
def test_update_item_too_many_keys(self):
self.stubs.Set(jacket.storage.db,
'volume_type_extra_specs_update_or_create',
return_create_volume_type_extra_specs)
body = {"key1": "value1", "key2": "value2"}
req = fakes.HTTPRequest.blank(self.api_path + '/key1')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 1, 'key1', body)
def test_update_item_body_uri_mismatch(self):
self.stubs.Set(jacket.storage.db,
'volume_type_extra_specs_update_or_create',
return_create_volume_type_extra_specs)
body = {"key1": "value1"}
req = fakes.HTTPRequest.blank(self.api_path + '/bad')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 1, 'bad', body)
def _extra_specs_empty_update(self, body):
req = fakes.HTTPRequest.blank('/v2/fake/types/1/extra_specs')
req.method = 'POST'
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, '1', body)
def test_update_no_body(self):
self._extra_specs_empty_update(body=None)
def test_update_empty_body(self):
self._extra_specs_empty_update(body={})
def _extra_specs_create_bad_body(self, body):
req = fakes.HTTPRequest.blank('/v2/fake/types/1/extra_specs')
req.method = 'POST'
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, '1', body)
def test_create_no_body(self):
self._extra_specs_create_bad_body(body=None)
def test_create_missing_volume(self):
body = {'foo': {'a': 'b'}}
self._extra_specs_create_bad_body(body=body)
def test_create_malformed_entity(self):
body = {'extra_specs': 'string'}
self._extra_specs_create_bad_body(body=body)
def test_create_invalid_key(self):
body = {"extra_specs": {"ke/y1": "value1"}}
self._extra_specs_create_bad_body(body=body)
def test_create_invalid_too_many_key(self):
body = {"key1": "value1", "ke/y2": "value2", "key3": "value3"}
self._extra_specs_create_bad_body(body=body)
class VolumeTypeExtraSpecsSerializerTest(test.TestCase):
def test_index_create_serializer(self):
serializer = types_extra_specs.VolumeTypeExtraSpecsTemplate()
# Just getting some input data
extra_specs = stub_volume_type_extra_specs()
text = serializer.serialize(dict(extra_specs=extra_specs))
tree = etree.fromstring(text)
self.assertEqual('extra_specs', tree.tag)
self.assertEqual(len(extra_specs), len(tree))
seen = set(extra_specs.keys())
for child in tree:
self.assertIn(child.tag, seen)
self.assertEqual(extra_specs[child.tag], child.text)
seen.remove(child.tag)
self.assertEqual(0, len(seen))
def test_update_show_serializer(self):
serializer = types_extra_specs.VolumeTypeExtraSpecTemplate()
exemplar = dict(key1='value1')
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('key1', tree.tag)
self.assertEqual('value1', tree.text)
self.assertEqual(0, len(tree))
|
the-stack_106_15355
|
import json
import scrapy
from locations.items import GeojsonPointItem
class JumboArgentinaSpider(scrapy.Spider):
name = "jumbo_argentina"
item_attributes = {"brand": "Jumbo"}
allowed_domains = ["www.jumbo.com.ar"]
def start_requests(self):
url = "https://www.jumbo.com.ar/Login/PreHomeService.aspx/TraerLocales"
headers = {
"Content-Type": "application/json",
}
form_data = {}
yield scrapy.http.FormRequest(
url=url,
method="POST",
formdata=form_data,
headers=headers,
callback=self.parse,
)
def parse(self, response):
result = response.body_as_unicode().replace("\\", "")
result = result[:5] + result[6:-2] + result[-1:]
data = json.loads(result)
ref = 0
for store in data["d"]["Locales"]:
properties = {
"ref": ref,
"name": store["Local"]["Nombre"].strip(),
"addr_full": store["Local"]["Direccion"].strip(),
"city": store["Local"]["Localidad"].strip(),
"state": store["Local"]["Provincia"].strip(),
"postcode": store["Local"]["CodigoPostal"].strip(),
"lat": float(store["Local"]["Latitud"].strip()),
"lon": float(store["Local"]["Longitud"].strip()),
"phone": store["Local"]["Telefono"].strip(),
}
ref += 1
yield GeojsonPointItem(**properties)
|
the-stack_106_15356
|
"""Download files from pubmed and biorxive."""
from os import path as ospath
import ftplib
from multiprocessing.dummy import Pool as ThreadPool
import json
class Downloader:
def __init__(self, *args, **kwargs):
self.server_url = kwargs['server_url']
self.root = kwargs['root']
self.files_path = kwargs['files_path']
self.connection = self.connect()
def connect(self):
try:
connection = ftplib.FTP(self.server_url)
connection.login()
# connection.cwd(root)
except:
raise
else:
return connection
def download(self, args):
file_name, path = args
connection = self.connect()
connection.cwd(self.root)
print("start downloading ", file_name)
with open(path, 'wb') as f:
connection.retrbinary('RETR ' + file_name, f.write)
print("finish downloading ", file_name)
def get_file_names(self):
file_list = []
connection = self.connect()
connection.cwd(self.root)
connection.retrlines('LIST', lambda x: file_list.append(x.split()))
with open("file_names.json", "w") as f:
json.dump(file_list, f, indent=4)
def run(self):
with open('file_names.json') as f:
file_list = json.load(f)
print(len(file_list)//3, "Files founded ....")
paths = [(file_name, ospath.join(self.files_path, file_name))
for *_, file_name in file_list
if file_name.endswith('gz')
][:5]
pool = ThreadPool()
pool.map(self.download, paths)
pool.close()
pool.join()
if __name__ == "__main__":
D = Downloader(server_url="ftp.ncbi.nlm.nih.gov",
root="pubmed/baseline/",
files_path="../gods-eye-files")
# D.get_file_names()
D.run()
|
the-stack_106_15357
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 11 21:57:41 2020
@author: inderpreet
calculate statistics for point estimates from QRNN applied to AWS
This script is used for Table 7 of the article.
"""
import os
import matplotlib.pyplot as plt
import numpy as np
import ICI.stats as stats
from typhon.retrieval.qrnn import set_backend, QRNN
set_backend("pytorch")
from tabulate import tabulate
from aws_test_data import awsTestData
#%% input parameters
depth = 4
width = 256
quantiles = np.array([0.002, 0.03, 0.16, 0.5, 0.84, 0.97, 0.998])
batchSize = 128
targets = [ 'C32','C33','C34', 'C35', 'C36']
iq = np.argwhere(quantiles == 0.5)[0,0]
inpath = os.path.expanduser("~/Dendrite/Projects/AWS-325GHz/AWS/")
#%%
for i,target in enumerate(targets):
inChannels = np.array([target,'C41', 'C42', 'C43', 'C44'])
file = os.path.join(inpath, 'qrnn_data', 'qrnn_%s_%s_%s.nc'%(depth, width, target))
test_data = awsTestData(os.path.join(inpath, "data", "TB_AWS_m60_p60_noise_four_test.nc"),
inChannels, option = 4)
i183, = np.argwhere(inChannels == target)[0]
qrnn = QRNN.load(file)
y_pre, y_prior, y0, y, y_pos_mean = stats.predict(test_data, qrnn, \
add_noise = False, aws = True)
im = np.abs(y_pre[:, 3] - y_prior[:, i183]) <= 5
print ((1 - np.sum(im)/im.size)* 100)
bia = stats.calculate_bias(y_prior, y0, y, y_pre[:, 3], im, i183)
std = stats.calculate_std(y_prior, y0, y, y_pre[:, 3], im, i183)
ske = stats.calculate_skew(y_prior, y0, y, y_pre[:, 3], im, i183)
mae = stats.calculate_mae(y_prior, y0, y, y_pre[:, 3], im, i183)
#%%
bia = list(bia )
mae = list(mae )
ske = list(ske )
std = list(std )
#%%
sets = []
for i in [0, 1, 2, 3, 4]:
l = [bia[i], mae[i], std[i], ske[i]]
sets.append(l)
sets_names = ['bias', 'mae', 'std', "skewness"]#, 'corrected(1sigma)', 'sreerekha et al', 'filtered(1sigma)']
table = [[sets_names[i], sets[0][i], \
sets[1][i],
sets[2][i],
sets[3][i],
sets[4][i],
] for i in range(4)]
print(tabulate(table
, tablefmt="latex", floatfmt=".2f"))
#%%
bins = np.arange(-40, 10, 0.5)
hist = np.histogram(y_pre[:, 3] - y0, bins, density = True)
fig, ax = plt.subplots(1, 1, figsize= [8,8])
ax.set_yscale('log')
ax.plot(bins[:-1], hist[0])
|
the-stack_106_15358
|
import os
import platform
import time
from colorama import init, Fore, Style
import module.log as log
import module.lang
init(autoreset=True)
def cls():
if platform.system() == "Windows":
os.system("cls")
if platform.system() == "Linux":
os.system("clear")
def menu(lst, cmd, cmd_h):
lest = str(len(lst)) + " "
space = len(lest) - 4
print(Fore.LIGHTWHITE_EX + Style.BRIGHT + _("Order") + " " * space + _("Item"))
for i in lst:
num = lst.index(i) + 1
txt = Style.BRIGHT + str(num) + " " + Style.NORMAL + i
while len(txt[:-len(i) - 1]) < len(lest):
txt = " ".join(txt.split(" ", 1))
print(txt)
print()
log.user(_("Usage: %s(command) (number)") % Style.BRIGHT)
log.user(_("Command list:"))
lens = list(map(len, cmd))
lest = cmd[lens.index(max(lens))] + "----"
for (k, t) in zip(cmd, cmd_h):
txt = k + "----" + t
while len(txt[:txt.rfind("-") + 1]) < len(lest):
txt = "--".join(txt.split("-", 1))
print(Style.BRIGHT + txt)
print()
log.inp(_("Command"))
user_ipt = input()
if user_ipt.find(" "):
user_cmd, user_ord = user_ipt.split(" ")
else:
user_cmd, user_ord = "", ""
if user_cmd in cmd:
return user_cmd, user_ord
log.error(_("Invalid command"))
time.sleep(2)
cls()
menu(lst, cmd, cmd_h)
|
the-stack_106_15360
|
# RT - Google Translator
from discord.ext import commands
import discord
from jishaku.functools import executor_function
from deep_translator import GoogleTranslator
from asyncio import sleep
from typing import List
CHP_HELP = {
"ja": ("翻訳専用チャンネル機能。",
"""# 翻訳チャンネルプラグイン - translate
これは`rt>translate <翻訳先言語コード>`をチャンネルのトピックに入れることで翻訳専用チャンネルにすることのできる機能です。
例:`rt>translate ja` (これをトピックに入れたチャンネルに送信したメッセージは全て日本語に翻訳されます。)
### 言語コード例
日本語 `ja`
英語 `en`
他は調べれば出るので`言語名 言語コード`とかで調べてください。
### エイリアス
trans, ほんやく, 翻訳
### これもあるよ
翻訳コマンドである`translate`で個人カテゴリーにあります。"""),
"en": ("Dedicated translation channel function", """# translation channel plugin - translate
This is a feature that allows you to make a channel dedicated to translation by putting `rt>translate <language code to translate to>` in the channel topic.
Example: `rt>translate ja` (all messages sent to a channel with this in the topic will be translated into Japanese).
### Language code example
```
Japanese `ja`
English `en`
```
Other codes can be found by looking up `<language name> code` or something like that.
### Alias
trans
### Also see
It's in the personal category with the `translate` command.""")
}
class Translator(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.bot.loop.create_task(self.on_command_added())
@executor_function
def translate(self, text: str, target: str) -> str:
return GoogleTranslator(target=target).translate(text)
async def on_command_added(self):
# ヘルプにチャンネルプラグイン版翻訳を追加するだけ。
await sleep(1.5)
for lang in CHP_HELP:
self.bot.cogs["DocHelp"].add_help(
"ChannelPlugin", "TranslateChannel",
lang, *CHP_HELP[lang]
)
@commands.command(
name="translate", aliases=["trans", "ほんやく", "翻訳"],
extras={
"headding": {"ja": "翻訳をします。", "en": "This can do translate."},
"parent": "Individual"
}
)
@commands.cooldown(1, 4, commands.BucketType.user)
async def translate_(self, ctx, lang, *, content):
"""!lang ja
--------
翻訳をします。
Parameters
----------
lang : 言語コード
どの言語に翻訳するかの言語コードです。
例えば日本語にしたい場合は`ja`で、英語にしたい場合は`en`です。
content : str
翻訳する内容です。
Examples
--------
`rt!translate ja I wanna be the guy!`
RT:男になりたい!
Aliases
-------
trans, ほんやく, 翻訳
See Also
--------
translate(チャンネルプラグイン) : 翻訳専用チャンネル機能。
!lang en
--------
This can do translate.
Parameters
----------
lang : language code
The language code for which language to translate.
If you want use japanese you do `ja` and If you want to use English you do `en`.
content : str
Translate content
Examples
--------
`rt!translate ja I wanna be the guy!`
RT:男になりたい!
Aliases
-------
trans
See Also
--------
translate(channel plugin) : Only for translate channel."""
await ctx.trigger_typing()
await ctx.reply(
embed=discord.Embed(
title={"ja": "翻訳結果",
"en": "translate result"},
description=await self.translate(content, lang),
color=self.bot.colors["normal"]
).set_footer(
text="Powered by Google Translate",
icon_url="http://tasuren.syanari.com/RT/GoogleTranslate.png"
)
)
@commands.Cog.listener()
async def on_message(self, message: discord.Message):
if isinstance(message.channel, discord.Thread):
return
if ((message.author.bot and not (
message.author.discriminator == "0000" and " #" in message.author.name
)) or not message.guild or not message.channel.topic):
return
for line in message.channel.topic.splitlines():
if line.startswith(("rt>translate", "rt>tran", "rt>翻訳", "rt>ほんやく")):
if 1 < len((splited := line.split())):
await self.translate_(
await self.bot.get_context(message),
splited[1], content=message.content
)
break
def setup(bot):
bot.add_cog(Translator(bot))
|
the-stack_106_15363
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetDataset
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1_DatasetService_GetDataset_sync]
from google.cloud import aiplatform_v1
def sample_get_dataset():
# Create a client
client = aiplatform_v1.DatasetServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetDatasetRequest(
name="name_value",
)
# Make the request
response = client.get_dataset(request=request)
# Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_DatasetService_GetDataset_sync]
|
the-stack_106_15364
|
from efm2riot import configuration, templates
import shutil
import glob
import sys
import os
def clean_dist(dist_directory):
"""
Clean the output folder.
"""
# Remove the old folder.
shutil.rmtree(dist_directory, ignore_errors=True)
# Create a new one
os.makedirs(dist_directory)
def copy_static(root_directory, dist_directory, sdk_directory, development):
"""
Copy all static files from source to target.
"""
for static in configuration.STATICS:
context = {
"development": development,
"dist": dist_directory,
"root": root_directory,
"sdk": sdk_directory,
}
source = templates.from_string(static["source"], context)
target = templates.from_string(static["target"], context)
target = os.path.join(dist_directory, target)
# Compute filters.
ignored = []
if "filters" in static:
for expression in static["filters"].keys():
new_expression = templates.from_string(
expression, context)
if not static["filters"][expression](context):
ignored.append(new_expression)
# Perform the action.
sys.stdout.write("Copying '%s'\n" % source)
if static["type"] == "directory":
recursive_overwrite(source, target, ignored)
else:
if source not in ignored:
shutil.copy(source, target)
def copy_templates(root_directory, dist_directory, sdk_directory,
cpus, families, boards, development):
"""
Copy all the templates.
"""
def _process(when, contexts):
for context in contexts:
for template in configuration.TEMPLATES:
if template["when"] == when:
context.update({
"development": development,
"dist": dist_directory,
"root": root_directory,
"sdk": sdk_directory,
})
source = templates.from_string(template["source"], context)
target = templates.from_string(template["target"], context)
target = os.path.join(dist_directory, target)
# Compute filters.
filters = {}
if "filters" in template:
for expression in template["filters"].keys():
new_expression = templates.from_string(
expression, context)
filters[new_expression] = \
template["filters"][expression]
# Perform the action.
sys.stdout.write("Processing '%s'\n" % source)
if template["type"] == "file":
if source in filters:
if not filters[source](context):
continue
templates.from_file(source, target, context)
elif template["type"] == "glob":
for source_file in glob.glob(source):
if os.path.isfile(source_file):
if source_file in filters:
if not filters[source_file](context):
continue
target_file = os.path.join(
target, os.path.basename(source_file))
templates.from_file(
source_file, target_file, context)
else:
raise Exception(
"Unsupported template: %s", template["type"])
_process("per_family", families)
_process("per_cpu", cpus)
_process("per_board", boards)
_process("once", [{
"families": [family["family"] for family in families],
"cpus": [cpu["cpu"] for cpu in cpus],
"boards": [board["board"] for board in boards]
}])
def copy_patches(root_directory, dist_directory, sdk_directory,
cpus, families, boards):
"""
Copy all the patches.
"""
def _process(when, contexts):
for context in contexts:
for patch in configuration.PATCHES:
if patch["when"] == when:
context.update({
"root": root_directory,
"sdk": sdk_directory,
"dist": dist_directory
})
source = templates.from_string(patch["source"], context)
target = templates.from_string(patch["target"], context)
target = os.path.join(dist_directory, target)
# Perform the action.
sys.stdout.write("Patching '%s'\n" % source)
if patch["type"] == "file":
with open(source, "r") as fp:
content = fp.read()
for method in patch["methods"]:
content = method(source, content)
if not os.path.isdir(target):
os.makedirs(target)
with open(target, "w") as fp:
fp.write(content)
elif patch["type"] == "glob":
for source_file in glob.glob(source):
if os.path.isfile(source_file):
target_file = os.path.join(
target, os.path.basename(source_file))
with open(source_file, "r") as fp:
content = fp.read()
for method in patch["methods"]:
content = method(source, content)
if not os.path.isdir(target):
os.makedirs(target)
with open(target_file, "w") as fp:
fp.write(content)
else:
raise Exception("Not supported")
_process("per_family", families)
_process("per_cpu", cpus)
_process("per_board", boards)
_process("once", [{
"families": [family["family"] for family in families],
"cpus": [cpu["cpu"] for cpu in cpus],
"boards": [board["board"] for board in boards]
}])
def recursive_overwrite(source, target, ignored=None):
"""
Wrapper for shutil.copytree that can merge source and target.
"""
if os.path.isdir(source):
if not os.path.isdir(target):
os.makedirs(target)
files = os.listdir(source)
if ignored is None:
ignored = set()
for f in files:
if os.path.join(source, f) not in ignored:
recursive_overwrite(
os.path.join(source, f), os.path.join(target, f), ignored)
else:
shutil.copyfile(source, target)
|
the-stack_106_15365
|
# Recupera la clave por fuerza bruta de una cifra César
ALFABETO = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# Almacena el criptograma
criptograma = input('Criptograma: ')
# Recorre una a una todas las claves (1-25)
for clave in range(1,len(ALFABETO)):
# Almacena la cadena descifrada
salida = ''
for simbolo in criptograma:
if simbolo in ALFABETO:
pos = ALFABETO.find(simbolo)
# Descifra el carácter
pos = (pos - clave) % len(ALFABETO)
# Añade el símbolo descifraado a la cadena
salida += ALFABETO[pos]
# Si hay un espacio y otro carácter no alfabético
# lo añade a la cadena sin tocar
else:
salida += simbolo
# Imprime en pantalla el resultado completo
print('Clave %d: %s' % (clave, salida))
|
the-stack_106_15366
|
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python AppEngine support for Bazel.
To create a Python WebApp for Google AppEngine, add the rules:
py_appengine_binary(
name = "mywebapp",
# data to put in the webapp, the directory structure of the data set
# will be maintained.
data = ["//mywebapp:data"],
configs = ["//mywebapp:app.yaml", //mywebapp:appengine_config.py],
srcs = ["main.py"],
)
#optional test
py_appengine_test(
name = "mywebapp_test",
srcs = ["main_test.py"],
deps = [":main"],
libraries = {"webapp2": "latest"},
)
To run locally:
bazel run :mywebapp
To deploy on Google app engine:
bazel run :mywebapp.deploy -- my-project-id [module.yaml files ...]
Finally, the appengine macro also create a .deploy target that will try to use
the AppEngine SDK to upload your application to AppEngine. It requires the
project ID as the first argument and takes 0 or more module YAML files. If no
YAML files are specified, only "app.yaml", the main module, will be deployed.
"""
load(":variables.bzl", "PY_SDK_VERSION", "PY_SDK_SHA256")
load(":sdk.bzl", "find_locally_or_download")
def py_appengine_repositories(version=PY_SDK_VERSION,
sha256=PY_SDK_SHA256):
find_locally_or_download(
name = "com_google_appengine_py",
lang = 'py',
sha256 = sha256,
version = version,
filename_pattern = "google_appengine_{version}.zip",
strip_prefix_pattern = "google_appengine",
)
def py_appengine_test(name, srcs, deps=[], data=[], libraries={}, size=None):
"""A variant of py_test that sets up an App Engine environment."""
extra_deps = ["@com_google_appengine_py//:appengine"]
for l in libraries:
extra_deps.append("@com_google_appengine_py//:{0}-{1}".format(l, libraries[l]))
native.py_test(
name = name,
deps = deps + extra_deps,
srcs = srcs,
data = data,
size = size,
)
def _py_appengine_binary_base_impl(ctx):
"""Implementation of the rule that creates
- the script to run locally
- the script to deploy
"""
# TODO(maximermilov): Add support for custom import paths.
config = ctx.actions.declare_file("appengine_config.py")
config_content = """
import os
import sys
module_space = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'external')
repo_dirs = [os.path.join(module_space, d) for d in os.listdir(module_space)]
sys.path.extend([d for d in repo_dirs if os.path.isdir(d)])
"""
symlinks = {
"appengine_config.py": config,
}
for c in ctx.attr.configs:
files = c.files.to_list()
for f in files:
if f.basename == "appengine_config.py":
# Symlink the user-provided appengine_config file(s) to avoid name
# collisions and add import(s) from the custom appengine_config being
# created.
new_path = f.short_path.replace("appengine_config", "real_appengine_config")
symlinks[new_path] = f
import_path = new_path.rsplit(".", 1)[0].replace("/", ".")
config_content += "\nimport {}\n".format(import_path)
elif f.extension == "yaml":
# Symlink YAML config files to the top-level directory.
symlinks[f.basename] = f
else:
# Fail if any .py files were provided that were not appengine_configs.
fail("Invalid config file provided: " + f.short_path)
ctx.actions.write(
output=config,
content=config_content,
)
runfiles = ctx.runfiles(
transitive_files=ctx.attr.devappserver.data_runfiles.files,
symlinks=symlinks,
).merge(ctx.attr.binary.data_runfiles).merge(ctx.attr.appcfg.data_runfiles)
substitutions = {
"%{appcfg}": ctx.attr.appcfg.files_to_run.executable.short_path,
"%{devappserver}":
ctx.attr.devappserver.files_to_run.executable.short_path,
"%{workspace_name}": ctx.workspace_name,
}
ctx.actions.expand_template(
output = ctx.outputs.executable,
template = ctx.file._runner_template,
substitutions = substitutions,
is_executable = True)
ctx.actions.expand_template(
output = ctx.outputs.deploy_sh,
template = ctx.file._deploy_template,
substitutions = substitutions,
is_executable = True)
return struct(runfiles=runfiles, py=ctx.attr.binary.py)
py_appengine_binary_base = rule(
_py_appengine_binary_base_impl,
attrs = {
"binary": attr.label(),
"devappserver": attr.label(default = Label("@com_google_appengine_py//:dev_appserver")),
"appcfg": attr.label(default = Label("@com_google_appengine_py//:appcfg")),
"configs": attr.label_list(allow_files = FileType([
".yaml",
".py",
])),
"_deploy_template": attr.label(
default = Label("//appengine/py:deploy_template"),
single_file = True,
),
"_runner_template": attr.label(
default = Label("//appengine/py:runner_template"),
single_file = True,
),
},
executable = True,
outputs = {
"deploy_sh": "%{name}_deploy.sh",
},
)
def py_appengine_binary(name, srcs, configs, deps=[], data=[]):
"""Convenience macro that builds the app and offers an executable
target to deploy on Google app engine.
"""
if not srcs:
fail("srcs should not be empty.")
# uses py_binary because it generates __init__.py files
native.py_binary(
name = "_py_appengine_" + name,
srcs = srcs,
deps = deps,
data = data,
main = srcs[0], # no entry point, use arbitrary source file
)
py_appengine_binary_base(
name=name,
binary=":_py_appengine_" + name,
configs=configs,
)
native.sh_binary(
name = "%s.deploy" % name,
srcs = ["%s_deploy.sh" % name],
data = [name],
)
def py_appengine_library(**kwargs):
"""Wrapper for py_library
"""
native.py_library(**kwargs)
|
the-stack_106_15367
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import bz2
from contextlib import contextmanager
from datetime import datetime
from glob import glob
from conda._vendor.toolz.itertoolz import groupby
from conda.gateways.disk.permissions import make_read_only
from conda.models.channel import Channel
from conda.resolve import Resolve
from itertools import chain
import json
from json import loads as json_loads
from logging import DEBUG, INFO, getLogger
import os
from os.path import basename, dirname, exists, isdir, isfile, join, lexists, relpath, islink
from random import sample
import re
from shlex import split
from shutil import copyfile, rmtree
from subprocess import check_call, CalledProcessError, check_output, Popen, PIPE
import sys
from tempfile import gettempdir
from unittest import TestCase
from uuid import uuid4
import pytest
import requests
from conda import CondaError, CondaMultiError, plan, __version__ as CONDA_VERSION, \
CONDA_PACKAGE_ROOT
from conda._vendor.auxlib.entity import EntityEncoder
from conda._vendor.auxlib.ish import dals
from conda.base.constants import CONDA_TARBALL_EXTENSION, PACKAGE_CACHE_MAGIC_FILE, SafetyChecks, \
PREFIX_MAGIC_FILE
from conda.base.context import Context, context, reset_context
from conda.cli.conda_argparse import do_call
from conda.cli.main import generate_parser, init_loggers
from conda.common.compat import PY2, iteritems, itervalues, text_type, ensure_text_type
from conda.common.io import argv, captured, disable_logger, env_var, stderr_log_level, dashlist
from conda.common.path import get_bin_directory_short_path, get_python_site_packages_short_path, \
pyc_path
from conda.common.serialize import yaml_load, json_dump
from conda.common.url import path_to_url
from conda.core.index import get_reduced_index
from conda.core.prefix_data import PrefixData, get_python_version_for_prefix
from conda.core.package_cache_data import PackageCacheData
from conda.core.subdir_data import create_cache_dir
from conda.exceptions import CommandArgumentError, DryRunExit, OperationNotAllowed, \
PackagesNotFoundError, RemoveError, conda_exception_handler, PackageNotInstalledError, \
DisallowedPackageError, UnsatisfiableError, DirectoryNotACondaEnvironmentError
from conda.gateways.anaconda_client import read_binstar_tokens
from conda.gateways.disk.create import mkdir_p
from conda.gateways.disk.delete import rm_rf
from conda.gateways.disk.update import touch
from conda.gateways.logging import TRACE
from conda.gateways.subprocess import subprocess_call
from conda.models.match_spec import MatchSpec
from conda.models.records import PackageRecord
from conda.models.version import VersionOrder
from conda.utils import on_win
try:
from unittest.mock import Mock, patch
except ImportError:
from mock import Mock, patch
log = getLogger(__name__)
TRACE, DEBUG, INFO = TRACE, DEBUG, INFO # these are so the imports aren't cleared, but it's easy to switch back and forth
TEST_LOG_LEVEL = DEBUG
stderr_log_level(TEST_LOG_LEVEL, 'conda')
stderr_log_level(TEST_LOG_LEVEL, 'requests')
PYTHON_BINARY = 'python.exe' if on_win else 'bin/python'
BIN_DIRECTORY = 'Scripts' if on_win else 'bin'
UINCODE_CHARACTERS = u"ōγђ家固한"
UINCODE_CHARACTERS = u"áêñßôç"
def escape_for_winpath(p):
return p.replace('\\', '\\\\')
def make_temp_prefix(name=None, create_directory=True):
tempdir = gettempdir()
if PY2:
dirpath = str(uuid4())[:8] if name is None else name
else:
random_unicode = ''.join(sample(UINCODE_CHARACTERS, len(UINCODE_CHARACTERS)))
dirpath = (str(uuid4())[:4] + ' ' + random_unicode) if name is None else name
prefix = join(tempdir, dirpath)
os.makedirs(prefix)
if create_directory:
assert isdir(prefix)
else:
os.removedirs(prefix)
return prefix
class Commands:
CONFIG = "config"
CLEAN = "clean"
CREATE = "create"
INFO = "info"
INSTALL = "install"
LIST = "list"
REMOVE = "remove"
SEARCH = "search"
UPDATE = "update"
def run_command(command, prefix, *arguments, **kwargs):
use_exception_handler = kwargs.get('use_exception_handler', False)
arguments = list(arguments)
p = generate_parser()
if command is Commands.CONFIG:
arguments.append('--file "{0}"'.format(join(prefix, 'condarc')))
if command in (Commands.LIST, Commands.CREATE, Commands.INSTALL,
Commands.REMOVE, Commands.UPDATE):
arguments.append('-p "{0}"'.format(prefix))
if command in (Commands.CREATE, Commands.INSTALL, Commands.REMOVE, Commands.UPDATE):
arguments.extend(["-y", "-q"])
arguments = list(map(escape_for_winpath, arguments))
command_line = "{0} {1}".format(command, " ".join(arguments))
split_command_line = split(command_line)
args = p.parse_args(split_command_line)
context._set_argparse_args(args)
init_loggers(context)
print("\n\nEXECUTING COMMAND >>> $ conda %s\n\n" % command_line, file=sys.stderr)
with stderr_log_level(TEST_LOG_LEVEL, 'conda'), stderr_log_level(TEST_LOG_LEVEL, 'requests'):
with argv(['python_api'] + split_command_line), captured() as c:
if use_exception_handler:
conda_exception_handler(do_call, args, p)
else:
do_call(args, p)
print(c.stderr, file=sys.stderr)
print(c.stdout, file=sys.stderr)
if command is Commands.CONFIG:
reload_config(prefix)
return c.stdout, c.stderr
@contextmanager
def make_temp_env(*packages, **kwargs):
name = kwargs.pop('name', None)
prefix = kwargs.pop('prefix', None) or make_temp_prefix(name)
assert isdir(prefix), prefix
with disable_logger('fetch'), disable_logger('dotupdate'):
try:
# try to clear any config that's been set by other tests
reset_context([os.path.join(prefix+os.sep, 'condarc')])
run_command(Commands.CREATE, prefix, *packages, **kwargs)
yield prefix
finally:
rmtree(prefix, ignore_errors=True)
@contextmanager
def make_temp_package_cache():
prefix = make_temp_prefix()
pkgs_dir = join(prefix, 'pkgs')
mkdir_p(pkgs_dir)
touch(join(pkgs_dir, PACKAGE_CACHE_MAGIC_FILE))
try:
with env_var('CONDA_PKGS_DIRS', pkgs_dir, reset_context):
assert context.pkgs_dirs == (pkgs_dir,)
yield pkgs_dir
finally:
rmtree(prefix, ignore_errors=True)
if pkgs_dir in PackageCacheData._cache_:
del PackageCacheData._cache_[pkgs_dir]
@contextmanager
def make_temp_channel(packages):
package_reqs = [pkg.replace('-', '=') for pkg in packages]
package_names = [pkg.split('-')[0] for pkg in packages]
with make_temp_env(*package_reqs) as prefix:
for package in packages:
assert package_is_installed(prefix, package.replace('-', '='))
data = [p for p in PrefixData(prefix).iter_records() if p['name'] in package_names]
run_command(Commands.REMOVE, prefix, *package_names)
for package in packages:
assert not package_is_installed(prefix, package.replace('-', '='))
assert package_is_installed(prefix, 'python')
repodata = {'info': {}, 'packages': {}}
tarfiles = {}
for package_data in data:
pkg_data = package_data
fname = pkg_data['fn']
tarfiles[fname] = join(PackageCacheData.first_writable().pkgs_dir, fname)
pkg_data = pkg_data.dump()
for field in ('url', 'channel', 'schannel'):
pkg_data.pop(field, None)
repodata['packages'][fname] = PackageRecord(**pkg_data)
with make_temp_env() as channel:
subchan = join(channel, context.subdir)
noarch_dir = join(channel, 'noarch')
channel = path_to_url(channel)
os.makedirs(subchan)
os.makedirs(noarch_dir)
for fname, tar_old_path in tarfiles.items():
tar_new_path = join(subchan, fname)
copyfile(tar_old_path, tar_new_path)
with open(join(subchan, 'repodata.json'), 'w') as f:
f.write(json.dumps(repodata, cls=EntityEncoder))
with open(join(noarch_dir, 'repodata.json'), 'w') as f:
f.write(json.dumps({}, cls=EntityEncoder))
yield channel
def create_temp_location():
tempdirdir = gettempdir()
dirname = str(uuid4())[:8]
return join(tempdirdir, dirname)
@contextmanager
def tempdir():
prefix = create_temp_location()
try:
os.makedirs(prefix)
yield prefix
finally:
if lexists(prefix):
rm_rf(prefix)
def reload_config(prefix):
prefix_condarc = join(prefix+os.sep, 'condarc')
reset_context([prefix_condarc])
def package_is_installed(prefix, spec):
spec = MatchSpec(spec)
prefix_recs = tuple(PrefixData(prefix).query(spec))
if len(prefix_recs) > 1:
raise AssertionError("Multiple packages installed.%s"
% (dashlist(prec.dist_str() for prec in prefix_recs)))
return bool(len(prefix_recs))
def get_conda_list_tuple(prefix, package_name):
stdout, stderr = run_command(Commands.LIST, prefix)
stdout_lines = stdout.split('\n')
package_line = next((line for line in stdout_lines
if line.lower().startswith(package_name + " ")), None)
return package_line.split()
def get_shortcut_dir():
assert on_win
user_mode = 'user' if exists(join(sys.prefix, u'.nonadmin')) else 'system'
try:
from menuinst.win32 import dirs_src as win_locations
return win_locations[user_mode]["start"][0]
except ImportError:
try:
from menuinst.win32 import dirs as win_locations
return win_locations[user_mode]["start"]
except ImportError:
raise
@pytest.mark.integration
class IntegrationTests(TestCase):
def setUp(self):
PackageCacheData.clear()
def test_install_python2_and_search(self):
with env_var('CONDA_ALLOW_NON_CHANNEL_URLS', 'true', reset_context):
with make_temp_env("python=2") as prefix:
assert exists(join(prefix, PYTHON_BINARY))
assert package_is_installed(prefix, 'python=2')
# regression test for #4513
run_command(Commands.CONFIG, prefix, "--add channels https://repo.continuum.io/pkgs/not-a-channel")
stdout, stderr = run_command(Commands.SEARCH, prefix, "python --json")
packages = json.loads(stdout)
assert len(packages) >= 1
stdout, stderr = run_command(Commands.SEARCH, prefix, "python --json --envs")
envs_result = json.loads(stdout)
assert any(match['location'] == prefix for match in envs_result)
stdout, stderr = run_command(Commands.SEARCH, prefix, "python --envs")
assert prefix in stdout
def test_create_install_update_remove_smoketest(self):
with make_temp_env("python=3.5") as prefix:
assert exists(join(prefix, PYTHON_BINARY))
assert package_is_installed(prefix, 'python=3')
run_command(Commands.INSTALL, prefix, 'flask=0.10')
assert package_is_installed(prefix, 'flask=0.10.1')
assert package_is_installed(prefix, 'python=3')
run_command(Commands.INSTALL, prefix, '--force-reinstall', 'flask=0.10')
assert package_is_installed(prefix, 'flask=0.10.1')
assert package_is_installed(prefix, 'python=3')
run_command(Commands.UPDATE, prefix, 'flask')
assert not package_is_installed(prefix, 'flask=0.10.1')
assert package_is_installed(prefix, 'flask')
assert package_is_installed(prefix, 'python=3')
run_command(Commands.REMOVE, prefix, 'flask')
assert not package_is_installed(prefix, 'flask=0.*')
assert package_is_installed(prefix, 'python=3')
stdout, stderr = run_command(Commands.LIST, prefix, '--revisions')
assert not stderr
assert " (rev 4)\n" in stdout
assert " (rev 5)\n" not in stdout
run_command(Commands.INSTALL, prefix, '--revision 0')
assert not package_is_installed(prefix, 'flask')
assert package_is_installed(prefix, 'python=3')
def test_safety_checks(self):
# This test uses https://anaconda.org/conda-test/spiffy-test-app/0.5/download/noarch/spiffy-test-app-0.5-pyh6afbcc8_0.tar.bz2
# which is a modification of https://anaconda.org/conda-test/spiffy-test-app/1.0/download/noarch/spiffy-test-app-1.0-pyh6afabb7_0.tar.bz2
# as documented in info/README within that package.
# I also had to fix the post-link script in the package by adding quotation marks to handle
# spaces in path names.
with make_temp_env() as prefix:
with open(join(prefix, 'condarc'), 'a') as fh:
fh.write("safety_checks: enabled\n")
reload_config(prefix)
assert context.safety_checks is SafetyChecks.enabled
with pytest.raises(CondaMultiError) as exc:
run_command(Commands.INSTALL, prefix, '-c conda-test spiffy-test-app=0.5')
error_message = text_type(exc.value)
message1 = dals("""
The path 'site-packages/spiffy_test_app-1.0-py2.7.egg-info/top_level.txt'
has an incorrect size.
reported size: 32 bytes
actual size: 16 bytes
""")
message2 = dals("""
The path 'site-packages/spiffy_test_app/__init__.py'
has a sha256 mismatch.
reported sha256: 1234567890123456789012345678901234567890123456789012345678901234
actual sha256: 32d822669b582f82da97225f69e3ef01ab8b63094e447a9acca148a6e79afbed
""")
assert message1 in error_message
assert message2 in error_message
with open(join(prefix, 'condarc'), 'a') as fh:
fh.write("safety_checks: warn\n")
reload_config(prefix)
assert context.safety_checks is SafetyChecks.warn
stdout, stderr = run_command(Commands.INSTALL, prefix, '-c conda-test spiffy-test-app=0.5')
assert message1 in stderr
assert message2 in stderr
assert package_is_installed(prefix, "spiffy-test-app=0.5")
with make_temp_env() as prefix:
with open(join(prefix, 'condarc'), 'a') as fh:
fh.write("safety_checks: disabled\n")
reload_config(prefix)
assert context.safety_checks is SafetyChecks.disabled
stdout, stderr = run_command(Commands.INSTALL, prefix, '-c conda-test spiffy-test-app=0.5')
assert message1 not in stderr
assert message2 not in stderr
assert package_is_installed(prefix, "spiffy-test-app=0.5")
def test_json_create_install_update_remove(self):
# regression test for #5384
def assert_json_parsable(content):
string = None
try:
for string in content and content.split('\0') or ():
json.loads(string)
except Exception as e:
log.warn(
"Problem parsing json output.\n"
" content: %s\n"
" string: %s\n"
" error: %r",
content, string, e
)
raise
try:
prefix = make_temp_prefix(str(uuid4())[:7])
stdout, stderr = run_command(Commands.CREATE, prefix, "python=3.5 --json --dry-run", use_exception_handler=True)
assert_json_parsable(stdout)
# regression test for #5825
# contents of LINK and UNLINK is expected to have Dist format
json_obj = json.loads(stdout)
dist_dump = json_obj['actions']['LINK'][0]
assert 'dist_name' in dist_dump
stdout, stderr = run_command(Commands.CREATE, prefix, "python=3.5 --json")
assert_json_parsable(stdout)
assert not stderr
json_obj = json.loads(stdout)
dist_dump = json_obj['actions']['LINK'][0]
assert 'dist_name' in dist_dump
stdout, stderr = run_command(Commands.INSTALL, prefix, 'flask=0.10 --json')
assert_json_parsable(stdout)
assert not stderr
assert package_is_installed(prefix, 'flask=0.10.1')
assert package_is_installed(prefix, 'python=3')
# Test force reinstall
stdout, stderr = run_command(Commands.INSTALL, prefix, '--force-reinstall', 'flask=0.10', '--json')
assert_json_parsable(stdout)
assert not stderr
assert package_is_installed(prefix, 'flask=0.10.1')
assert package_is_installed(prefix, 'python=3')
stdout, stderr = run_command(Commands.UPDATE, prefix, 'flask --json')
assert_json_parsable(stdout)
assert not stderr
assert not package_is_installed(prefix, 'flask=0.10.1')
assert package_is_installed(prefix, 'flask')
assert package_is_installed(prefix, 'python=3')
stdout, stderr = run_command(Commands.REMOVE, prefix, 'flask --json')
assert_json_parsable(stdout)
assert not stderr
assert not package_is_installed(prefix, 'flask=0.*')
assert package_is_installed(prefix, 'python=3')
# regression test for #5825
# contents of LINK and UNLINK is expected to have Dist format
json_obj = json.loads(stdout)
dist_dump = json_obj['actions']['UNLINK'][0]
assert 'dist_name' in dist_dump
stdout, stderr = run_command(Commands.LIST, prefix, '--revisions --json')
assert not stderr
json_obj = json.loads(stdout)
assert len(json_obj) == 5
assert json_obj[4]["rev"] == 4
stdout, stderr = run_command(Commands.INSTALL, prefix, '--revision 0', '--json')
assert_json_parsable(stdout)
assert not stderr
assert not package_is_installed(prefix, 'flask')
assert package_is_installed(prefix, 'python=3')
finally:
rmtree(prefix, ignore_errors=True)
def test_not_writable_env_raises_EnvironmentNotWritableError(self):
with make_temp_env() as prefix:
make_read_only(join(prefix, PREFIX_MAGIC_FILE))
stdout, stderr = run_command(Commands.INSTALL, prefix, "openssl", use_exception_handler=True)
assert "EnvironmentNotWritableError" in stderr
assert prefix in stderr
def test_conda_update_package_not_installed(self):
with make_temp_env() as prefix:
with pytest.raises(PackageNotInstalledError):
run_command(Commands.UPDATE, prefix, "sqlite openssl")
with pytest.raises(CondaError) as conda_error:
run_command(Commands.UPDATE, prefix, "conda-forge::*")
assert conda_error.value.message.startswith("Invalid spec for 'conda update'")
def test_noarch_python_package_with_entry_points(self):
with make_temp_env("-c conda-test flask") as prefix:
py_ver = get_python_version_for_prefix(prefix)
sp_dir = get_python_site_packages_short_path(py_ver)
py_file = sp_dir + "/flask/__init__.py"
pyc_file = pyc_path(py_file, py_ver)
assert isfile(join(prefix, py_file))
assert isfile(join(prefix, pyc_file))
exe_path = join(prefix, get_bin_directory_short_path(), 'flask')
if on_win:
exe_path += ".exe"
assert isfile(exe_path)
run_command(Commands.REMOVE, prefix, "flask")
assert not isfile(join(prefix, py_file))
assert not isfile(join(prefix, pyc_file))
assert not isfile(exe_path)
def test_noarch_python_package_without_entry_points(self):
# regression test for #4546
with make_temp_env("-c conda-test itsdangerous") as prefix:
py_ver = get_python_version_for_prefix(prefix)
sp_dir = get_python_site_packages_short_path(py_ver)
py_file = sp_dir + "/itsdangerous.py"
pyc_file = pyc_path(py_file, py_ver)
assert isfile(join(prefix, py_file))
assert isfile(join(prefix, pyc_file))
run_command(Commands.REMOVE, prefix, "itsdangerous")
assert not isfile(join(prefix, py_file))
assert not isfile(join(prefix, pyc_file))
def test_noarch_python_package_reinstall_on_pyver_change(self):
with make_temp_env("-c conda-test itsdangerous python=3") as prefix:
py_ver = get_python_version_for_prefix(prefix)
assert py_ver.startswith('3')
sp_dir = get_python_site_packages_short_path(py_ver)
py_file = sp_dir + "/itsdangerous.py"
pyc_file_py3 = pyc_path(py_file, py_ver)
assert isfile(join(prefix, py_file))
assert isfile(join(prefix, pyc_file_py3))
run_command(Commands.INSTALL, prefix, "python=2")
assert not isfile(join(prefix, pyc_file_py3)) # python3 pyc file should be gone
py_ver = get_python_version_for_prefix(prefix)
assert py_ver.startswith('2')
sp_dir = get_python_site_packages_short_path(py_ver)
py_file = sp_dir + "/itsdangerous.py"
pyc_file_py2 = pyc_path(py_file, py_ver)
assert isfile(join(prefix, py_file))
assert isfile(join(prefix, pyc_file_py2))
def test_noarch_generic_package(self):
with make_temp_env("-c conda-test font-ttf-inconsolata") as prefix:
assert isfile(join(prefix, 'fonts', 'Inconsolata-Regular.ttf'))
def test_override_channels(self):
with pytest.raises(OperationNotAllowed):
with env_var('CONDA_OVERRIDE_CHANNELS_ENABLED', 'no', reset_context):
with make_temp_env("--override-channels python") as prefix:
assert prefix
with pytest.raises(CommandArgumentError):
with make_temp_env("--override-channels python") as prefix:
assert prefix
stdout, stderr = run_command(Commands.SEARCH, None, "--override-channels -c conda-test flask --json")
assert not stderr
assert len(json.loads(stdout)["flask"]) < 3
assert json.loads(stdout)["flask"][0]["noarch"] == "python"
def test_create_empty_env(self):
with make_temp_env() as prefix:
assert exists(join(prefix, 'conda-meta/history'))
list_output = run_command(Commands.LIST, prefix)
stdout = list_output[0]
stderr = list_output[1]
expected_output = """# packages in environment at %s:
#
# Name Version Build Channel
""" % prefix
self.assertEqual(stdout, expected_output)
self.assertEqual(stderr, '')
revision_output = run_command(Commands.LIST, prefix, '--revisions')
stdout = revision_output[0]
stderr = revision_output[1]
assert stderr == ''
self.assertIsInstance(stdout, str)
@pytest.mark.skipif(on_win and context.subdir == "win-32", reason="conda-forge doesn't do win-32")
def test_strict_channel_priority(self):
stdout, stderr = run_command(
Commands.CREATE, "/",
"-c conda-forge -c defaults python=3.6 fiona --strict-channel-priority --dry-run --json",
use_exception_handler=True
)
assert not stderr
json_obj = json_loads(stdout)
channel_groups = groupby("channel",json_obj["actions"]["LINK"])
channel_groups = sorted(list(channel_groups))
# conda-forge should be the only channel in the solution on unix
# fiona->gdal->libgdal->m2w64-xz brings in pkgs/msys2 on win
if on_win:
assert channel_groups == ["conda-forge", "pkgs/msys2"]
else:
assert channel_groups == ["conda-forge"]
def test_strict_resolve_get_reduced_index(self):
channels = (Channel("defaults"),)
specs = (MatchSpec("anaconda"),)
index = get_reduced_index(None, channels, context.subdirs, specs)
r = Resolve(index, channels=channels)
with env_var("CONDA_CHANNEL_PRIORITY", "strict", reset_context):
reduced_index = r.get_reduced_index(specs)
channel_name_groups = {
name: {prec.channel.name for prec in group}
for name, group in iteritems(groupby("name", reduced_index))
}
channel_name_groups = {
name: channel_names for name, channel_names in iteritems(channel_name_groups)
if len(channel_names) > 1
}
assert {} == channel_name_groups
def test_list_with_pip_no_binary(self):
from conda.exports import rm_rf as _rm_rf
with make_temp_env("python=3.5 pip") as prefix:
check_call(PYTHON_BINARY + " -m pip install --no-binary flask flask==0.10.1",
cwd=prefix, shell=True)
PrefixData._cache_.clear()
stdout, stderr = run_command(Commands.LIST, prefix)
stdout_lines = stdout.split('\n')
assert any(line.endswith("pypi") for line in stdout_lines
if line.lower().startswith("flask"))
# regression test for #5847
# when using rm_rf on a directory
assert prefix in PrefixData._cache_
_rm_rf(join(prefix, get_python_site_packages_short_path("3.5")))
assert prefix not in PrefixData._cache_
def test_list_with_pip_wheel(self):
from conda.exports import rm_rf as _rm_rf
with make_temp_env("python=3.6 pip") as prefix:
check_call(PYTHON_BINARY + " -m pip install flask==0.10.1",
cwd=prefix, shell=True)
PrefixData._cache_.clear()
stdout, stderr = run_command(Commands.LIST, prefix)
stdout_lines = stdout.split('\n')
assert any(line.endswith("pypi") for line in stdout_lines
if line.lower().startswith("flask"))
# regression test for #3433
run_command(Commands.INSTALL, prefix, "python=3.5")
assert package_is_installed(prefix, 'python=3.5')
# regression test for #5847
# when using rm_rf on a file
assert prefix in PrefixData._cache_
_rm_rf(join(prefix, get_python_site_packages_short_path("3.5")), "os.py")
assert prefix not in PrefixData._cache_
# regression test for #5980, related to #5847
with make_temp_env() as prefix:
assert isdir(prefix)
assert prefix in PrefixData._cache_
rmtree(prefix)
assert not isdir(prefix)
assert prefix in PrefixData._cache_
_rm_rf(prefix)
assert not isdir(prefix)
assert prefix not in PrefixData._cache_
def test_install_tarball_from_local_channel(self):
# Regression test for #2812
# install from local channel
with make_temp_env() as prefix, make_temp_channel(["flask-0.10.1"]) as channel:
run_command(Commands.INSTALL, prefix, '-c', channel, 'flask=0.10.1', '--json')
assert package_is_installed(prefix, channel + '::' + 'flask')
flask_fname = [p for p in PrefixData(prefix).iter_records() if p['name'] == 'flask'][0]['fn']
run_command(Commands.REMOVE, prefix, 'flask')
assert not package_is_installed(prefix, 'flask=0')
# Regression test for 2970
# install from build channel as a tarball
tar_path = join(PackageCacheData.first_writable().pkgs_dir, flask_fname)
conda_bld = join(dirname(PackageCacheData.first_writable().pkgs_dir), 'conda-bld')
conda_bld_sub = join(conda_bld, context.subdir)
if not isdir(conda_bld_sub):
os.makedirs(conda_bld_sub)
tar_bld_path = join(conda_bld_sub, basename(tar_path))
copyfile(tar_path, tar_bld_path)
# CondaFileNotFoundError: '/home/travis/virtualenv/python2.7.9/conda-bld/linux-64/flask-0.10.1-py27_2.tar.bz2'.
run_command(Commands.INSTALL, prefix, tar_bld_path)
assert package_is_installed(prefix, 'flask')
# Regression test for #462
with make_temp_env(tar_bld_path) as prefix2:
assert package_is_installed(prefix2, 'flask')
def test_tarball_install_and_bad_metadata(self):
with make_temp_env("python flask=0.10.1 --json") as prefix:
assert package_is_installed(prefix, 'flask==0.10.1')
flask_data = [p for p in PrefixData(prefix).iter_records() if p['name'] == 'flask'][0]
run_command(Commands.REMOVE, prefix, 'flask')
assert not package_is_installed(prefix, 'flask==0.10.1')
assert package_is_installed(prefix, 'python')
flask_fname = flask_data['fn']
tar_old_path = join(PackageCacheData.first_writable().pkgs_dir, flask_fname)
assert isfile(tar_old_path)
with pytest.raises(DryRunExit):
run_command(Commands.INSTALL, prefix, tar_old_path, "--dry-run")
assert not package_is_installed(prefix, 'flask=0.*')
# regression test for #2886 (part 1 of 2)
# install tarball from package cache, default channel
run_command(Commands.INSTALL, prefix, tar_old_path)
assert package_is_installed(prefix, 'flask=0.*')
# regression test for #2626
# install tarball with full path, outside channel
tar_new_path = join(prefix, flask_fname)
copyfile(tar_old_path, tar_new_path)
run_command(Commands.INSTALL, prefix, '"%s"' % tar_new_path)
assert package_is_installed(prefix, 'flask=0')
# regression test for #2626
# install tarball with relative path, outside channel
run_command(Commands.REMOVE, prefix, 'flask')
assert not package_is_installed(prefix, 'flask=0.10.1')
tar_new_path = relpath(tar_new_path)
run_command(Commands.INSTALL, prefix, '"%s"' % tar_new_path)
assert package_is_installed(prefix, 'flask=0')
# regression test for #2886 (part 2 of 2)
# install tarball from package cache, local channel
run_command(Commands.REMOVE, prefix, 'flask', '--json')
assert not package_is_installed(prefix, 'flask=0')
run_command(Commands.INSTALL, prefix, tar_old_path)
# The last install was from the `local::` channel
assert package_is_installed(prefix, 'flask')
# regression test for #2599
# ignore json files in conda-meta that don't conform to name-version-build.json
if not on_win:
# xz is only a python dependency on unix
xz_prec = next(PrefixData(prefix).query("xz"))
dist_name = xz_prec.dist_str().split('::')[-1]
xz_prefix_data_json_path = join(prefix, 'conda-meta', dist_name + '.json')
copyfile(xz_prefix_data_json_path,
join(prefix, 'conda-meta', 'xz.json'))
rm_rf(xz_prefix_data_json_path)
assert not lexists(xz_prefix_data_json_path)
PrefixData._cache_ = {}
assert not package_is_installed(prefix, 'xz')
@pytest.mark.skipif(on_win, reason="windows python doesn't depend on readline")
def test_update_with_pinned_packages(self):
# regression test for #6914
with make_temp_env("python=2.7.12") as prefix:
assert package_is_installed(prefix, "readline=6.2")
open(join(prefix, 'conda-meta', 'history'), 'w').close()
PrefixData._cache_.clear()
run_command(Commands.UPDATE, prefix, "readline")
assert package_is_installed(prefix, "readline")
assert not package_is_installed(prefix, "readline=6.2")
assert package_is_installed(prefix, "python=2.7")
assert not package_is_installed(prefix, "python=2.7.12")
def test_remove_all(self):
with make_temp_env("python") as prefix:
assert exists(join(prefix, PYTHON_BINARY))
assert package_is_installed(prefix, 'python')
# regression test for #2154
with pytest.raises(PackagesNotFoundError) as exc:
run_command(Commands.REMOVE, prefix, 'python', 'foo', 'numpy')
assert repr(exc.value) == dals("""
PackagesNotFoundError: The following packages are missing from the target environment:
- foo
- numpy
""")
run_command(Commands.REMOVE, prefix, '--all')
assert not exists(prefix)
@pytest.mark.skipif(on_win, reason="windows usually doesn't support symlinks out-of-the box")
@patch('conda.core.link.hardlink_supported', side_effect=lambda x, y: False)
def test_allow_softlinks(self, hardlink_supported_mock):
hardlink_supported_mock._result_cache.clear()
with env_var("CONDA_ALLOW_SOFTLINKS", "true", reset_context):
with make_temp_env("pip") as prefix:
assert islink(join(prefix, get_python_site_packages_short_path(
get_python_version_for_prefix(prefix)), 'pip', '__init__.py'))
hardlink_supported_mock._result_cache.clear()
@pytest.mark.skipif(on_win, reason="nomkl not present on windows")
def test_remove_features(self):
with make_temp_env("python=2 numpy=1.13 nomkl") as prefix:
assert exists(join(prefix, PYTHON_BINARY))
assert package_is_installed(prefix, 'numpy')
assert package_is_installed(prefix, 'nomkl')
assert not package_is_installed(prefix, 'mkl')
# A consequence of discontinuing use of the 'features' key and instead
# using direct dependencies is that removing the feature means that
# packages associated with the track_features base package are completely removed
# and not replaced with equivalent non-variant packages as before.
run_command(Commands.REMOVE, prefix, '--features', 'nomkl')
# assert package_is_installed(prefix, 'numpy') # removed per above comment
assert not package_is_installed(prefix, 'nomkl')
# assert package_is_installed(prefix, 'mkl') # removed per above comment
@pytest.mark.skipif(on_win and context.bits == 32, reason="no 32-bit windows python on conda-forge")
@pytest.mark.skipif(on_win and datetime.now() <= datetime(2018, 11, 1), reason="conda-forge repodata needs vc patching")
def test_dash_c_usage_replacing_python(self):
# Regression test for #2606
with make_temp_env("-c conda-forge python=3.5") as prefix:
assert exists(join(prefix, PYTHON_BINARY))
assert package_is_installed(prefix, 'conda-forge::python=3.5')
run_command(Commands.INSTALL, prefix, "decorator")
assert package_is_installed(prefix, 'conda-forge::python=3.5')
with make_temp_env('--clone "%s"' % prefix) as clone_prefix:
assert package_is_installed(clone_prefix, 'conda-forge::python=3.5')
assert package_is_installed(clone_prefix, "decorator")
# Regression test for #2645
fn = glob(join(prefix, 'conda-meta', 'python-3.5*.json'))[-1]
with open(fn) as f:
data = json.load(f)
for field in ('url', 'channel', 'schannel'):
if field in data:
del data[field]
with open(fn, 'w') as f:
json.dump(data, f)
PrefixData._cache_ = {}
with make_temp_env('-c conda-forge --clone "%s"' % prefix) as clone_prefix:
assert package_is_installed(clone_prefix, 'python=3.5')
assert package_is_installed(clone_prefix, 'decorator')
def test_install_prune_flag(self):
with make_temp_env("python=3 flask") as prefix:
assert package_is_installed(prefix, 'flask')
assert package_is_installed(prefix, 'python=3')
run_command(Commands.REMOVE, prefix, "flask")
assert not package_is_installed(prefix, 'flask')
assert package_is_installed(prefix, 'itsdangerous')
assert package_is_installed(prefix, 'python=3')
run_command(Commands.INSTALL, prefix, 'pytz --prune')
assert not package_is_installed(prefix, 'itsdangerous')
assert package_is_installed(prefix, 'pytz')
assert package_is_installed(prefix, 'python=3')
@pytest.mark.skipif(on_win, reason="readline is only a python dependency on unix")
def test_remove_force_remove_flag(self):
with make_temp_env("python") as prefix:
assert package_is_installed(prefix, 'readline')
assert package_is_installed(prefix, 'python')
run_command(Commands.REMOVE, prefix, 'readline --force-remove')
assert not package_is_installed(prefix, 'readline')
assert package_is_installed(prefix, 'python')
def test_install_force_reinstall_flag(self):
with make_temp_env("python") as prefix:
stdout, stderr = run_command(Commands.INSTALL, prefix,
"--json --dry-run --force-reinstall python",
use_exception_handler=True)
output_obj = json.loads(stdout.strip())
unlink_actions = output_obj['actions']['UNLINK']
link_actions = output_obj['actions']['LINK']
assert len(unlink_actions) == len(link_actions) == 1
assert unlink_actions[0] == link_actions[0]
assert unlink_actions[0]['name'] == 'python'
def test_create_no_deps_flag(self):
with make_temp_env("python=2 flask --no-deps") as prefix:
assert package_is_installed(prefix, 'flask')
assert package_is_installed(prefix, 'python=2')
assert not package_is_installed(prefix, 'openssl')
assert not package_is_installed(prefix, 'itsdangerous')
def test_create_only_deps_flag(self):
with make_temp_env("python=2 flask --only-deps") as prefix:
assert not package_is_installed(prefix, 'flask')
assert package_is_installed(prefix, 'python')
if not on_win:
# python on windows doesn't actually have real dependencies
assert package_is_installed(prefix, 'openssl')
assert package_is_installed(prefix, 'itsdangerous')
def test_install_update_deps_flag(self):
with make_temp_env("flask==0.12 jinja2==2.8") as prefix:
assert package_is_installed(prefix, "python=3.6")
assert package_is_installed(prefix, "flask==0.12")
assert package_is_installed(prefix, "jinja2=2.8")
run_command(Commands.INSTALL, prefix, "flask --update-deps")
assert package_is_installed(prefix, "python=3.6")
assert package_is_installed(prefix, "flask>0.12")
assert package_is_installed(prefix, "jinja2>2.8")
def test_install_only_deps_flag(self):
with make_temp_env("flask==0.12 jinja2==2.8") as prefix:
assert package_is_installed(prefix, "python=3.6")
assert package_is_installed(prefix, "flask==0.12")
assert package_is_installed(prefix, "jinja2=2.8")
run_command(Commands.INSTALL, prefix, "flask --only-deps")
assert package_is_installed(prefix, "python=3.6")
assert package_is_installed(prefix, "flask==0.12")
assert package_is_installed(prefix, "jinja2=2.8")
with make_temp_env("flask==0.12 --only-deps") as prefix:
assert not package_is_installed(prefix, "flask")
def test_install_update_deps_only_deps_flags(self):
with make_temp_env("flask==0.12 jinja2==2.8") as prefix:
assert package_is_installed(prefix, "python=3.6")
assert package_is_installed(prefix, "flask==0.12")
assert package_is_installed(prefix, "jinja2=2.8")
run_command(Commands.INSTALL, prefix, "flask python=3.6 --update-deps --only-deps")
assert package_is_installed(prefix, "python=3.6")
assert package_is_installed(prefix, "flask==0.12")
assert package_is_installed(prefix, "jinja2>2.8")
@pytest.mark.skipif(on_win, reason="tensorflow package used in test not available on Windows")
def test_install_freeze_installed_flag(self):
with make_temp_env("bleach=2") as prefix:
assert package_is_installed(prefix, "bleach=2")
with pytest.raises(UnsatisfiableError):
run_command(Commands.INSTALL, prefix,
"conda-forge::tensorflow>=1.4 --dry-run --freeze-installed")
@pytest.mark.xfail(on_win, reason="nomkl not present on windows",
strict=True)
def test_install_features(self):
with make_temp_env("python=2 numpy=1.13 nomkl") as prefix:
assert package_is_installed(prefix, "numpy")
assert package_is_installed(prefix, "nomkl")
assert not package_is_installed(prefix, "mkl")
numpy_prec = PrefixData(prefix).get("numpy")
assert "nomkl" in numpy_prec.build
with make_temp_env("python=2 numpy=1.13") as prefix:
assert package_is_installed(prefix, "numpy")
assert not package_is_installed(prefix, "nomkl")
assert package_is_installed(prefix, "mkl")
numpy_prec = PrefixData(prefix).get("numpy")
assert "nomkl" not in numpy_prec.build
run_command(Commands.INSTALL, prefix, "nomkl")
assert package_is_installed(prefix, "numpy")
assert package_is_installed(prefix, "nomkl")
assert package_is_installed(prefix, "mkl") # it's fine for mkl to still be here I guess
numpy_prec = PrefixData(prefix).get("numpy")
assert "nomkl" in numpy_prec.build
run_command(Commands.INSTALL, prefix, "nomkl --prune")
assert not package_is_installed(prefix, "mkl")
assert not package_is_installed(prefix, "mkl_fft")
assert not package_is_installed(prefix, "mkl_random")
def test_clone_offline_simple(self):
with make_temp_env("python flask=0.10.1") as prefix:
assert package_is_installed(prefix, 'flask=0.10.1')
assert package_is_installed(prefix, 'python')
with make_temp_env('--clone "%s"' % prefix, "--offline") as clone_prefix:
assert context.offline
assert package_is_installed(clone_prefix, 'flask=0.10.1')
assert package_is_installed(clone_prefix, 'python')
with env_var('CONDA_DISALLOWED_PACKAGES', 'python', reset_context):
with pytest.raises(DisallowedPackageError) as exc:
with make_temp_env('--clone "%s"' % prefix, "--offline"):
pass
assert exc.value.dump_map()['package_ref']['name'] == 'python'
def test_conda_config_describe(self):
with make_temp_env() as prefix:
stdout, stderr = run_command(Commands.CONFIG, prefix, "--describe")
assert not stderr
skip_categories = ('CLI-only', 'Hidden and Undocumented')
documented_parameter_names = chain.from_iterable((
parameter_names for category, parameter_names in iteritems(context.category_map)
if category not in skip_categories
))
for param_name in documented_parameter_names:
assert re.search(r'^# # %s \(' % param_name, stdout, re.MULTILINE), param_name
stdout, stderr = run_command(Commands.CONFIG, prefix, "--describe --json")
assert not stderr
json_obj = json.loads(stdout.strip())
assert len(json_obj) >= 42
assert 'description' in json_obj[0]
with env_var('CONDA_QUIET', 'yes', reset_context):
stdout, stderr = run_command(Commands.CONFIG, prefix, "--show-sources")
assert not stderr
assert 'envvars' in stdout.strip()
stdout, stderr = run_command(Commands.CONFIG, prefix, "--show-sources --json")
assert not stderr
json_obj = json.loads(stdout.strip())
assert json_obj['envvars'] == {'quiet': True}
assert json_obj['cmd_line'] == {'json': True}
run_command(Commands.CONFIG, prefix, "--set changeps1 false")
with pytest.raises(CondaError):
run_command(Commands.CONFIG, prefix, "--write-default")
rm_rf(join(prefix, 'condarc'))
run_command(Commands.CONFIG, prefix, "--write-default")
with open(join(prefix, 'condarc')) as fh:
data = fh.read()
for param_name in documented_parameter_names:
assert re.search(r'^# %s \(' % param_name, data, re.MULTILINE), param_name
stdout, stderr = run_command(Commands.CONFIG, prefix, "--describe --json")
assert not stderr
json_obj = json.loads(stdout.strip())
assert len(json_obj) >= 42
assert 'description' in json_obj[0]
with env_var('CONDA_QUIET', 'yes', reset_context):
stdout, stderr = run_command(Commands.CONFIG, prefix, "--show-sources")
assert not stderr
assert 'envvars' in stdout.strip()
stdout, stderr = run_command(Commands.CONFIG, prefix, "--show-sources --json")
assert not stderr
json_obj = json.loads(stdout.strip())
assert json_obj['envvars'] == {'quiet': True}
assert json_obj['cmd_line'] == {'json': True}
def test_conda_config_validate(self):
with make_temp_env() as prefix:
run_command(Commands.CONFIG, prefix, "--set ssl_verify no")
stdout, stderr = run_command(Commands.CONFIG, prefix, "--validate")
assert not stdout
assert not stderr
try:
with open(join(prefix, 'condarc'), 'a') as fh:
fh.write('default_python: anaconda\n')
fh.write('ssl_verify: /path/doesnt/exist\n')
reload_config(prefix)
with pytest.raises(CondaMultiError) as exc:
run_command(Commands.CONFIG, prefix, "--validate")
assert len(exc.value.errors) == 2
assert "must be a boolean, a path to a certificate bundle file, or a path to a directory containing certificates of trusted CAs" in str(exc.value)
assert "default_python value 'anaconda' not of the form '[23].[0-9]'" in str(exc.value)
finally:
reset_context()
def test_rpy_search(self):
with make_temp_env("python=3.5") as prefix:
run_command(Commands.CONFIG, prefix, "--add channels https://repo.anaconda.com/pkgs/free")
run_command(Commands.CONFIG, prefix, "--remove channels defaults")
stdout, stderr = run_command(Commands.CONFIG, prefix, "--show", "--json")
json_obj = json_loads(stdout)
assert 'defaults' not in json_obj['channels']
assert package_is_installed(prefix, 'python')
assert 'r' not in context.channels
# assert conda search cannot find rpy2
stdout, stderr = run_command(Commands.SEARCH, prefix, "rpy2", "--json", use_exception_handler=True)
json_obj = json_loads(stdout.replace("Fetching package metadata ...", "").strip())
assert json_obj['exception_name'] == 'PackagesNotFoundError'
# add r channel
run_command(Commands.CONFIG, prefix, "--add channels r")
stdout, stderr = run_command(Commands.CONFIG, prefix, "--show", "--json")
json_obj = json_loads(stdout)
assert 'r' in json_obj['channels']
# assert conda search can now find rpy2
stdout, stderr = run_command(Commands.SEARCH, prefix, "rpy2", "--json")
json_obj = json_loads(stdout.replace("Fetching package metadata ...", "").strip())
def test_clone_offline_multichannel_with_untracked(self):
with make_temp_env("python=3.5") as prefix:
run_command(Commands.CONFIG, prefix, "--add channels https://repo.anaconda.com/pkgs/free")
run_command(Commands.CONFIG, prefix, "--remove channels defaults")
run_command(Commands.INSTALL, prefix, "-c conda-test flask")
touch(join(prefix, 'test.file')) # untracked file
with make_temp_env("--clone '%s'" % prefix, "--offline") as clone_prefix:
assert context.offline
assert package_is_installed(clone_prefix, 'python=3.5')
assert package_is_installed(clone_prefix, 'flask=0.11.1=py_0')
assert isfile(join(clone_prefix, 'test.file')) # untracked file
def test_package_pinning(self):
with make_temp_env("python=2.7 itsdangerous=0.23 pytz=2015.7") as prefix:
assert package_is_installed(prefix, "itsdangerous=0.23")
assert package_is_installed(prefix, "python=2.7")
assert package_is_installed(prefix, "pytz=2015.7")
with open(join(prefix, 'conda-meta', 'pinned'), 'w') as fh:
fh.write("itsdangerous 0.23\n")
run_command(Commands.UPDATE, prefix, "--all")
assert package_is_installed(prefix, "itsdangerous=0.23")
# assert not package_is_installed(prefix, "python-3.5") # should be python-3.6, but it's not because of add_defaults_to_specs
assert package_is_installed(prefix, "python=2.7")
assert not package_is_installed(prefix, "pytz=2015.7")
assert package_is_installed(prefix, "pytz")
run_command(Commands.UPDATE, prefix, "--all --no-pin")
assert package_is_installed(prefix, "python=2.7")
assert not package_is_installed(prefix, "itsdangerous=0.23")
def test_package_optional_pinning(self):
with make_temp_env("") as prefix:
run_command(Commands.CONFIG, prefix,
"--add pinned_packages", "python=3.6.1=2")
run_command(Commands.INSTALL, prefix, "openssl")
assert not package_is_installed(prefix, "python")
run_command(Commands.INSTALL, prefix, "flask")
assert package_is_installed(prefix, "python=3.6.1")
def test_update_deps_flag_absent(self):
with make_temp_env("python=2 itsdangerous=0.23") as prefix:
assert package_is_installed(prefix, 'python=2')
assert package_is_installed(prefix, 'itsdangerous=0.23')
assert not package_is_installed(prefix, 'flask')
run_command(Commands.INSTALL, prefix, 'flask')
assert package_is_installed(prefix, 'python=2')
assert package_is_installed(prefix, 'itsdangerous=0.23')
assert package_is_installed(prefix, 'flask')
def test_update_deps_flag_present(self):
with make_temp_env("python=2 itsdangerous=0.23") as prefix:
assert package_is_installed(prefix, 'python=2')
assert package_is_installed(prefix, 'itsdangerous=0.23')
assert not package_is_installed(prefix, 'flask')
run_command(Commands.INSTALL, prefix, '--update-deps python=2 flask')
assert package_is_installed(prefix, 'python=2')
assert not package_is_installed(prefix, 'itsdangerous=0.23')
assert package_is_installed(prefix, 'itsdangerous')
assert package_is_installed(prefix, 'flask')
@pytest.mark.skipif(True, reason="Add this test back someday.")
# @pytest.mark.skipif(not on_win, reason="shortcuts only relevant on Windows")
def test_shortcut_in_underscore_env_shows_message(self):
prefix = make_temp_prefix("_" + str(uuid4())[:7])
with make_temp_env(prefix=prefix):
stdout, stderr = run_command(Commands.INSTALL, prefix, "console_shortcut")
assert ("Environment name starts with underscore '_'. "
"Skipping menu installation." in stderr)
@pytest.mark.skipif(not on_win, reason="shortcuts only relevant on Windows")
def test_shortcut_not_attempted_with_no_shortcuts_arg(self):
prefix = make_temp_prefix("_" + str(uuid4())[:7])
shortcut_dir = get_shortcut_dir()
shortcut_file = join(shortcut_dir, "Anaconda Prompt ({0}).lnk".format(basename(prefix)))
with make_temp_env(prefix=prefix):
stdout, stderr = run_command(Commands.INSTALL, prefix, "console_shortcut",
"--no-shortcuts")
assert ("Environment name starts with underscore '_'. Skipping menu installation."
not in stderr)
assert not isfile(shortcut_file)
@pytest.mark.skipif(not on_win, reason="shortcuts only relevant on Windows")
def test_shortcut_creation_installs_shortcut(self):
shortcut_dir = get_shortcut_dir()
shortcut_dir = join(shortcut_dir, "Anaconda{0} ({1}-bit)"
"".format(sys.version_info.major, context.bits))
prefix = make_temp_prefix(str(uuid4())[:7])
shortcut_file = join(shortcut_dir, "Anaconda Prompt ({0}).lnk".format(basename(prefix)))
try:
with make_temp_env("console_shortcut", prefix=prefix):
assert package_is_installed(prefix, 'console_shortcut')
assert isfile(shortcut_file), ("Shortcut not found in menu dir. "
"Contents of dir:\n"
"{0}".format(os.listdir(shortcut_dir)))
# make sure that cleanup without specifying --shortcuts still removes shortcuts
run_command(Commands.REMOVE, prefix, 'console_shortcut')
assert not package_is_installed(prefix, 'console_shortcut')
assert not isfile(shortcut_file)
finally:
rmtree(prefix, ignore_errors=True)
if isfile(shortcut_file):
os.remove(shortcut_file)
@pytest.mark.skipif(not on_win, reason="shortcuts only relevant on Windows")
def test_shortcut_absent_does_not_barf_on_uninstall(self):
shortcut_dir = get_shortcut_dir()
shortcut_dir = join(shortcut_dir, "Anaconda{0} ({1}-bit)"
"".format(sys.version_info.major, context.bits))
prefix = make_temp_prefix(str(uuid4())[:7])
shortcut_file = join(shortcut_dir, "Anaconda Prompt ({0}).lnk".format(basename(prefix)))
assert not isfile(shortcut_file)
try:
# including --no-shortcuts should not get shortcuts installed
with make_temp_env("console_shortcut", "--no-shortcuts", prefix=prefix):
assert package_is_installed(prefix, 'console_shortcut')
assert not isfile(shortcut_file)
# make sure that cleanup without specifying --shortcuts still removes shortcuts
run_command(Commands.REMOVE, prefix, 'console_shortcut')
assert not package_is_installed(prefix, 'console_shortcut')
assert not isfile(shortcut_file)
finally:
rmtree(prefix, ignore_errors=True)
if isfile(shortcut_file):
os.remove(shortcut_file)
@pytest.mark.skipif(not on_win, reason="shortcuts only relevant on Windows")
def test_shortcut_absent_when_condarc_set(self):
shortcut_dir = get_shortcut_dir()
shortcut_dir = join(shortcut_dir, "Anaconda{0} ({1}-bit)"
"".format(sys.version_info.major, context.bits))
prefix = make_temp_prefix(str(uuid4())[:7])
shortcut_file = join(shortcut_dir, "Anaconda Prompt ({0}).lnk".format(basename(prefix)))
assert not isfile(shortcut_file)
# set condarc shortcuts: False
run_command(Commands.CONFIG, prefix, "--set shortcuts false")
stdout, stderr = run_command(Commands.CONFIG, prefix, "--get", "--json")
json_obj = json_loads(stdout)
assert json_obj['rc_path'] == join(prefix, 'condarc')
assert json_obj['get']['shortcuts'] is False
try:
with make_temp_env("console_shortcut", prefix=prefix):
# including shortcuts: False from condarc should not get shortcuts installed
assert package_is_installed(prefix, 'console_shortcut')
assert not isfile(shortcut_file)
# make sure that cleanup without specifying --shortcuts still removes shortcuts
run_command(Commands.REMOVE, prefix, 'console_shortcut')
assert not package_is_installed(prefix, 'console_shortcut')
assert not isfile(shortcut_file)
finally:
rmtree(prefix, ignore_errors=True)
if isfile(shortcut_file):
os.remove(shortcut_file)
def test_create_default_packages(self):
# Regression test for #3453
try:
prefix = make_temp_prefix(str(uuid4())[:7])
# set packages
run_command(Commands.CONFIG, prefix, "--add create_default_packages pip")
run_command(Commands.CONFIG, prefix, "--add create_default_packages flask")
stdout, stderr = run_command(Commands.CONFIG, prefix, "--show")
yml_obj = yaml_load(stdout)
assert yml_obj['create_default_packages'] == ['flask', 'pip']
assert not package_is_installed(prefix, 'python=2')
assert not package_is_installed(prefix, 'pytz')
assert not package_is_installed(prefix, 'flask')
with make_temp_env("python=2", "pytz", prefix=prefix):
assert package_is_installed(prefix, 'python=2')
assert package_is_installed(prefix, 'pytz')
assert package_is_installed(prefix, 'flask')
finally:
rmtree(prefix, ignore_errors=True)
def test_create_default_packages_no_default_packages(self):
try:
prefix = make_temp_prefix(str(uuid4())[:7])
# set packages
run_command(Commands.CONFIG, prefix, "--add create_default_packages pip")
run_command(Commands.CONFIG, prefix, "--add create_default_packages flask")
stdout, stderr = run_command(Commands.CONFIG, prefix, "--show")
yml_obj = yaml_load(stdout)
assert yml_obj['create_default_packages'] == ['flask', 'pip']
assert not package_is_installed(prefix, 'python=2')
assert not package_is_installed(prefix, 'pytz')
assert not package_is_installed(prefix, 'flask')
with make_temp_env("python=2", "pytz", "--no-default-packages", prefix=prefix):
assert package_is_installed(prefix, 'python=2')
assert package_is_installed(prefix, 'pytz')
assert not package_is_installed(prefix, 'flask')
finally:
rmtree(prefix, ignore_errors=True)
def test_create_dry_run(self):
# Regression test for #3453
prefix = '/some/place'
with pytest.raises(DryRunExit):
run_command(Commands.CREATE, prefix, "--dry-run")
stdout, stderr = run_command(Commands.CREATE, prefix, "--dry-run", use_exception_handler=True)
assert join('some', 'place') in stdout
# TODO: This assert passes locally but fails on CI boxes; figure out why and re-enable
# assert "The following empty environments will be CREATED" in stdout
prefix = '/another/place'
with pytest.raises(DryRunExit):
run_command(Commands.CREATE, prefix, "flask", "--dry-run")
stdout, stderr = run_command(Commands.CREATE, prefix, "flask", "--dry-run", use_exception_handler=True)
assert ":flask" in stdout
assert ":python" in stdout
assert join('another', 'place') in stdout
def test_create_dry_run_json(self):
prefix = '/some/place'
with pytest.raises(DryRunExit):
run_command(Commands.CREATE, prefix, "flask", "--dry-run", "--json")
stdout, stderr = run_command(Commands.CREATE, prefix, "flask", "--dry-run", "--json", use_exception_handler=True)
loaded = json.loads(stdout)
names = set(d['name'] for d in loaded['actions']['LINK'])
assert "python" in names
assert "flask" in names
def test_packages_not_found(self):
with make_temp_env() as prefix:
with pytest.raises(PackagesNotFoundError) as exc:
run_command(Commands.INSTALL, prefix, "not-a-real-package")
assert "not-a-real-package" in text_type(exc.value)
stdout, stderr = run_command(Commands.INSTALL, prefix, "not-a-real-package",
use_exception_handler=True)
assert "not-a-real-package" in stderr
def test_conda_pip_interop_dependency_satisfied_by_pip(self):
with make_temp_env("python") as prefix:
check_call(PYTHON_BINARY + " -m pip install itsdangerous",
cwd=prefix, shell=True)
PrefixData._cache_.clear()
stdout, stderr = run_command(Commands.LIST, prefix)
assert 'itsdangerous' in stdout
assert not stderr
stdout, stderr = run_command(Commands.INSTALL, prefix, 'flask --dry-run --json',
use_exception_handler=True)
json_obj = json.loads(stdout)
print(json_obj)
assert any(rec["name"] == "flask" for rec in json_obj["actions"]["LINK"])
assert not any(rec["name"] == "itsdangerous" for rec in json_obj["actions"]["LINK"])
stdout, stderr = run_command(Commands.SEARCH, prefix, "not-a-real-package", "--json",
use_exception_handler=True)
assert not stderr
json_obj = json_loads(stdout.strip())
assert json_obj['exception_name'] == 'PackagesNotFoundError'
assert not len(json_obj.keys()) == 0
@pytest.mark.skipif(context.subdir == "win-32", reason="metadata is wrong; give python2.7")
def test_conda_pip_interop_pip_clobbers_conda(self):
# 1. conda install old six
# 2. pip install -U six
# 3. conda list shows new six and deletes old conda record
# 4. probably need to purge something with the history file too?
with make_temp_env("six=1.9 pip=9.0.3 python=3.5") as prefix:
assert package_is_installed(prefix, "six=1.9.0")
assert package_is_installed(prefix, "python=3.5")
output = check_output(PYTHON_BINARY + " -m pip freeze", cwd=prefix, shell=True)
pkgs = set(ensure_text_type(v.strip()) for v in output.splitlines() if v.strip())
assert "six==1.9.0" in pkgs
py_ver = get_python_version_for_prefix(prefix)
sp_dir = get_python_site_packages_short_path(py_ver)
output = check_output(PYTHON_BINARY + " -m pip install -U six==1.10",
cwd=prefix, shell=True)
assert "Successfully installed six-1.10.0" in ensure_text_type(output)
PrefixData._cache_.clear()
stdout, stderr = run_command(Commands.LIST, prefix, "--json")
assert not stderr
json_obj = json.loads(stdout)
six_info = next(info for info in json_obj if info["name"] == "six")
assert six_info == {
"base_url": "https://conda.anaconda.org/pypi",
"build_number": 0,
"build_string": "pypi_0",
"channel": "pypi",
"dist_name": "six-1.10.0-pypi_0",
"name": "six",
"platform": "pypi",
"version": "1.10.0",
}
assert package_is_installed(prefix, "six=1.10.0")
output = check_output(PYTHON_BINARY + " -m pip freeze", cwd=prefix, shell=True)
pkgs = set(ensure_text_type(v.strip()) for v in output.splitlines() if v.strip())
assert "six==1.10.0" in pkgs
six_record = next(PrefixData(prefix).query("six"))
print(json_dump(six_record))
assert json_loads(json_dump(six_record)) == {
"build": "pypi_0",
"build_number": 0,
"channel": "https://conda.anaconda.org/pypi",
"constrains": [],
"depends": [
"python 3.5.*"
],
"files": [
sp_dir + "/" + "__pycache__/six.cpython-35.pyc",
sp_dir + "/" + "six-1.10.0.dist-info/DESCRIPTION.rst",
sp_dir + "/" + "six-1.10.0.dist-info/INSTALLER",
sp_dir + "/" + "six-1.10.0.dist-info/METADATA",
sp_dir + "/" + "six-1.10.0.dist-info/RECORD",
sp_dir + "/" + "six-1.10.0.dist-info/WHEEL",
sp_dir + "/" + "six-1.10.0.dist-info/metadata.json",
sp_dir + "/" + "six-1.10.0.dist-info/top_level.txt",
sp_dir + "/" + "six.py",
],
"fn": "six-1.10.0.dist-info",
"name": "six",
"package_type": "virtual_python_wheel",
"paths_data": {
"paths": [
{
"_path": sp_dir + "/" + "__pycache__/six.cpython-35.pyc",
"path_type": "hardlink",
"sha256": None,
"size_in_bytes": None
},
{
"_path": sp_dir + "/" + "six-1.10.0.dist-info/DESCRIPTION.rst",
"path_type": "hardlink",
"sha256": "QWBtSTT2zzabwJv1NQbTfClSX13m-Qc6tqU4TRL1RLs",
"size_in_bytes": 774
},
{
"_path": sp_dir + "/" + "six-1.10.0.dist-info/INSTALLER",
"path_type": "hardlink",
"sha256": "zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg",
"size_in_bytes": 4
},
{
"_path": sp_dir + "/" + "six-1.10.0.dist-info/METADATA",
"path_type": "hardlink",
"sha256": "5HceJsUnHof2IRamlCKO2MwNjve1eSP4rLzVQDfwpCQ",
"size_in_bytes": 1283
},
{
"_path": sp_dir + "/" + "six-1.10.0.dist-info/RECORD",
"path_type": "hardlink",
"sha256": None,
"size_in_bytes": None
},
{
"_path": sp_dir + "/" + "six-1.10.0.dist-info/WHEEL",
"path_type": "hardlink",
"sha256": "GrqQvamwgBV4nLoJe0vhYRSWzWsx7xjlt74FT0SWYfE",
"size_in_bytes": 110
},
{
"_path": sp_dir + "/" + "six-1.10.0.dist-info/metadata.json",
"path_type": "hardlink",
"sha256": "jtOeeTBubYDChl_5Ql5ZPlKoHgg6rdqRIjOz1e5Ek2U",
"size_in_bytes": 658
},
{
"_path": sp_dir + "/" + "six-1.10.0.dist-info/top_level.txt",
"path_type": "hardlink",
"sha256": "_iVH_iYEtEXnD8nYGQYpYFUvkUW9sEO1GYbkeKSAais",
"size_in_bytes": 4
},
{
"_path": sp_dir + "/" + "six.py",
"path_type": "hardlink",
"sha256": "A6hdJZVjI3t_geebZ9BzUvwRrIXo0lfwzQlM2LcKyas",
"size_in_bytes": 30098
}
],
"paths_version": 1
},
"subdir": "pypi",
"version": "1.10.0"
}
stdout, stderr = run_command(Commands.INSTALL, prefix, "six --satisfied-skip-solve")
assert not stderr
assert "All requested packages already installed." in stdout
stdout, stderr = run_command(Commands.INSTALL, prefix, "six")
assert not stderr
assert package_is_installed(prefix, "six>=1.11")
output = check_output(PYTHON_BINARY + " -m pip freeze", cwd=prefix, shell=True)
pkgs = set(ensure_text_type(v.strip()) for v in output.splitlines() if v.strip())
six_record = next(PrefixData(prefix).query("six"))
assert "six==%s" % six_record.version in pkgs
assert len(glob(join(prefix, "conda-meta", "six-*.json"))) == 1
output = check_output(PYTHON_BINARY + " -m pip install -U six==1.10",
cwd=prefix, shell=True)
print(output)
assert "Successfully installed six-1.10.0" in ensure_text_type(output)
PrefixData._cache_.clear()
assert package_is_installed(prefix, "six=1.10.0")
stdout, stderr = run_command(Commands.REMOVE, prefix, "six")
assert not stderr
assert "six-1.10.0-pypi_0" in stdout
assert not package_is_installed(prefix, "six")
assert not glob(join(prefix, sp_dir, "six*"))
def test_conda_pip_interop_conda_editable_package(self):
with make_temp_env("python=2.7") as prefix:
assert package_is_installed(prefix, "python")
# install an "editable" urllib3 that cannot be managed
output = check_output(PYTHON_BINARY + " -m pip install -e git://github.com/urllib3/[email protected]#egg=urllib3",
cwd=prefix, shell=True)
print(output)
assert isfile(join(prefix, "src", "urllib3", "urllib3", "__init__.py"))
PrefixData._cache_.clear()
assert package_is_installed(prefix, "urllib3")
urllib3_record = next(PrefixData(prefix).query("urllib3"))
urllib3_record_dump = urllib3_record.dump()
files = urllib3_record_dump.pop("files")
paths_data = urllib3_record_dump.pop("paths_data")
print(json_dump(urllib3_record_dump))
assert json_loads(json_dump(urllib3_record_dump)) == {
"build": "dev_0",
"build_number": 0,
"channel": "https://conda.anaconda.org/<develop>",
"constrains": [
"cryptography >=1.3.4",
"idna >=2.0.0",
"pyopenssl >=0.14",
"pysocks !=1.5.7,<2.0,>=1.5.6"
],
"depends": [
"python 2.7.*"
],
"fn": "urllib3-1.19.1-dev_0",
"name": "urllib3",
"package_type": "virtual_python_egg_link",
"subdir": "pypi",
"version": "1.19.1"
}
# the unmanageable urllib3 should prevent a new requests from being installed
stdout, stderr = run_command(Commands.INSTALL, prefix, "requests --dry-run --json",
use_exception_handler=True)
assert not stderr
json_obj = json_loads(stdout)
assert "UNLINK" not in json_obj["actions"]
link_dists = json_obj["actions"]["LINK"]
assert len(link_dists) == 1
assert link_dists[0]["name"] == "requests"
assert VersionOrder(link_dists[0]["version"]) < VersionOrder("2.16")
# should already be satisfied
stdout, stderr = run_command(Commands.INSTALL, prefix, "urllib3 -S")
assert "All requested packages already installed." in stdout
# should raise an error
with pytest.raises(PackagesNotFoundError):
# TODO: This raises PackagesNotFoundError, but the error should really explain
# that we can't install urllib3 because it's already installed and
# unmanageable. The error should suggest trying to use pip to uninstall it.
stdout, stderr = run_command(Commands.INSTALL, prefix, "urllib3=1.20 --dry-run")
# Now install a manageable urllib3.
output = check_output(PYTHON_BINARY + " -m pip install -U urllib3==1.20",
cwd=prefix, shell=True)
print(output)
PrefixData._cache_.clear()
assert package_is_installed(prefix, "urllib3")
urllib3_record = next(PrefixData(prefix).query("urllib3"))
urllib3_record_dump = urllib3_record.dump()
files = urllib3_record_dump.pop("files")
paths_data = urllib3_record_dump.pop("paths_data")
print(json_dump(urllib3_record_dump))
assert json_loads(json_dump(urllib3_record_dump)) == {
"build": "pypi_0",
"build_number": 0,
"channel": "https://conda.anaconda.org/pypi",
"constrains": [
"pysocks >=1.5.6,<2.0,!=1.5.7"
],
"depends": [
"python 2.7.*"
],
"fn": "urllib3-1.20.dist-info",
"name": "urllib3",
"package_type": "virtual_python_wheel",
"subdir": "pypi",
"version": "1.20"
}
# we should be able to install an unbundled requests that upgrades urllib3 in the process
stdout, stderr = run_command(Commands.INSTALL, prefix, "requests=2.18 --json")
assert package_is_installed(prefix, "requests")
assert package_is_installed(prefix, "urllib3>=1.21")
assert not stderr
json_obj = json_loads(stdout)
unlink_dists = json_obj["actions"]["UNLINK"]
assert len(unlink_dists) == 1
assert unlink_dists[0]["name"] == "urllib3"
assert unlink_dists[0]["channel"] == "pypi"
def test_conda_pip_interop_compatible_release_operator(self):
# Regression test for #7776
with make_temp_env("pip=10 six=1.9 appdirs") as prefix:
assert package_is_installed(prefix, "python")
assert package_is_installed(prefix, "six=1.9")
assert package_is_installed(prefix, "appdirs>=1.4.3")
p = Popen(PYTHON_BINARY + " -m pip install fs==2.1.0", stdout=PIPE, stderr=PIPE, cwd=prefix, shell=True)
stdout, stderr = p.communicate()
rc = p.returncode
assert int(rc) != 0
assert "Cannot uninstall" in text_type(stderr)
run_command(Commands.REMOVE, prefix, "six")
assert not package_is_installed(prefix, "six")
output = check_output(PYTHON_BINARY + " -m pip install fs==2.1.0", cwd=prefix, shell=True)
print(output)
PrefixData._cache_.clear()
assert package_is_installed(prefix, "fs==2.1.0")
# six_record = next(PrefixData(prefix).query("six"))
# print(json_dump(six_record.dump()))
assert package_is_installed(prefix, "six~=1.10")
stdout, stderr = run_command(Commands.LIST, prefix)
assert not stderr
assert "fs 2.1.0 pypi_0 pypi" in stdout
with pytest.raises(DryRunExit):
run_command(Commands.INSTALL, prefix, "agate=1.6 --dry-run")
@pytest.mark.skipif(on_win, reason="gawk is a windows only package")
def test_search_gawk_not_win_filter(self):
with make_temp_env() as prefix:
stdout, stderr = run_command(
Commands.SEARCH, prefix, "*gawk", "--platform", "win-64", "--json",
"-c", "https://repo.anaconda.com/pkgs/msys2 --json",
use_exception_handler=True,
)
json_obj = json_loads(stdout.replace("Fetching package metadata ...", "").strip())
assert "gawk" in json_obj.keys()
assert "m2-gawk" in json_obj.keys()
assert len(json_obj.keys()) == 2
@pytest.mark.skipif(not on_win, reason="gawk is a windows only package")
def test_search_gawk_on_win(self):
with make_temp_env() as prefix:
stdout, stderr = run_command(Commands.SEARCH, prefix, "*gawk", "--json", use_exception_handler=True)
json_obj = json_loads(stdout.replace("Fetching package metadata ...", "").strip())
assert "gawk" in json_obj.keys()
assert "m2-gawk" in json_obj.keys()
assert len(json_obj.keys()) == 2
@pytest.mark.skipif(not on_win, reason="gawk is a windows only package")
def test_search_gawk_on_win_filter(self):
with make_temp_env() as prefix:
stdout, stderr = run_command(Commands.SEARCH, prefix, "gawk", "--platform",
"linux-64", "--json", use_exception_handler=True)
json_obj = json_loads(stdout.replace("Fetching package metadata ...", "").strip())
assert not len(json_obj.keys()) == 0
def test_bad_anaconda_token_infinite_loop(self):
# This test is being changed around 2017-10-17, when the behavior of anaconda.org
# was changed. Previously, an expired token would return with a 401 response.
# Now, a 200 response is always given, with any public packages available on the channel.
response = requests.get("https://conda.anaconda.org/t/cqgccfm1mfma/data-portal/"
"%s/repodata.json" % context.subdir)
assert response.status_code == 200
try:
prefix = make_temp_prefix(str(uuid4())[:7])
channel_url = "https://conda.anaconda.org/t/cqgccfm1mfma/data-portal"
run_command(Commands.CONFIG, prefix, "--add channels %s" % channel_url)
stdout, stderr = run_command(Commands.CONFIG, prefix, "--show")
yml_obj = yaml_load(stdout)
assert yml_obj['channels'] == [channel_url, 'defaults']
with pytest.raises(PackagesNotFoundError):
run_command(Commands.SEARCH, prefix, "boltons", "--json")
stdout, stderr = run_command(Commands.SEARCH, prefix, "anaconda-mosaic", "--json")
json_obj = json.loads(stdout)
assert "anaconda-mosaic" in json_obj
assert len(json_obj["anaconda-mosaic"]) > 0
finally:
rmtree(prefix, ignore_errors=True)
reset_context()
def test_anaconda_token_with_private_package(self):
# TODO: should also write a test to use binstar_client to set the token,
# then let conda load the token
# Step 0. xfail if a token is set, for example when testing locally
tokens = read_binstar_tokens()
if tokens:
pytest.xfail("binstar token found in global configuration")
# Step 1. Make sure without the token we don't see the anyjson package
try:
prefix = make_temp_prefix(str(uuid4())[:7])
channel_url = "https://conda.anaconda.org/kalefranz"
run_command(Commands.CONFIG, prefix, "--add channels %s" % channel_url)
run_command(Commands.CONFIG, prefix, "--remove channels defaults")
stdout, stderr = run_command(Commands.CONFIG, prefix, "--show")
yml_obj = yaml_load(stdout)
assert yml_obj['channels'] == [channel_url]
stdout, stderr = run_command(Commands.SEARCH, prefix, "anyjson", "--platform",
"linux-64", "--json", use_exception_handler=True)
json_obj = json_loads(stdout)
assert json_obj['exception_name'] == 'PackagesNotFoundError'
finally:
rmtree(prefix, ignore_errors=True)
reset_context()
# Step 2. Now with the token make sure we can see the anyjson package
try:
prefix = make_temp_prefix(str(uuid4())[:7])
channel_url = "https://conda.anaconda.org/t/zlZvSlMGN7CB/kalefranz"
run_command(Commands.CONFIG, prefix, "--add channels %s" % channel_url)
run_command(Commands.CONFIG, prefix, "--remove channels defaults")
stdout, stderr = run_command(Commands.CONFIG, prefix, "--show")
yml_obj = yaml_load(stdout)
assert yml_obj['channels'] == [channel_url]
stdout, stderr = run_command(Commands.SEARCH, prefix, "anyjson", "--platform",
"linux-64", "--json")
json_obj = json_loads(stdout)
assert 'anyjson' in json_obj
finally:
rmtree(prefix, ignore_errors=True)
def test_clean_index_cache(self):
prefix = ''
# make sure we have something in the index cache
stdout, stderr = run_command(Commands.INFO, prefix, "flask --json")
assert "flask" in json_loads(stdout)
index_cache_dir = create_cache_dir()
assert glob(join(index_cache_dir, "*.json"))
# now clear it
run_command(Commands.CLEAN, prefix, "--index-cache")
assert not glob(join(index_cache_dir, "*.json"))
def test_use_index_cache(self):
from conda.gateways.connection.session import CondaSession
from conda.core.subdir_data import SubdirData
SubdirData._cache_.clear()
prefix = make_temp_prefix("_" + str(uuid4())[:7])
with make_temp_env(prefix=prefix):
# First, clear the index cache to make sure we start with an empty cache.
index_cache_dir = create_cache_dir()
run_command(Commands.CLEAN, '', "--index-cache")
assert not glob(join(index_cache_dir, "*.json"))
# Then, populate the index cache.
orig_get = CondaSession.get
with patch.object(CondaSession, 'get', autospec=True) as mock_method:
def side_effect(self, url, **kwargs):
# Make sure that we don't use the cache because of the
# corresponding HTTP header. This test is supposed to test
# whether the --use-index-cache causes the cache to be used.
result = orig_get(self, url, **kwargs)
for header in ['Etag', 'Last-Modified', 'Cache-Control']:
if header in result.headers:
del result.headers[header]
return result
SubdirData._cache_.clear()
mock_method.side_effect = side_effect
stdout, stderr = run_command(Commands.INFO, prefix, "flask --json")
assert mock_method.called
# Next run with --use-index-cache and make sure it actually hits the cache
# and does not go out fetching index data remotely.
with patch.object(CondaSession, 'get', autospec=True) as mock_method:
def side_effect(self, url, **kwargs):
if url.endswith('/repodata.json') or url.endswith('/repodata.json.bz2'):
raise AssertionError('Index cache was not hit')
else:
return orig_get(self, url, **kwargs)
mock_method.side_effect = side_effect
run_command(Commands.INSTALL, prefix, "flask", "--json", "--use-index-cache")
def test_offline_with_empty_index_cache(self):
from conda.core.subdir_data import SubdirData
SubdirData._cache_.clear()
try:
with make_temp_env() as prefix:
pkgs_dir = join(prefix, 'pkgs')
with env_var('CONDA_PKGS_DIRS', pkgs_dir, reset_context):
with make_temp_channel(['flask-0.10.1']) as channel:
# Clear the index cache.
index_cache_dir = create_cache_dir()
run_command(Commands.CLEAN, '', "--index-cache")
assert not exists(index_cache_dir)
# Then attempt to install a package with --offline. The package (flask) is
# available in a local channel, however its dependencies are not. Make sure
# that a) it fails because the dependencies are not available and b)
# we don't try to download the repodata from non-local channels but we do
# download repodata from local channels.
from conda.gateways.connection.session import CondaSession
orig_get = CondaSession.get
result_dict = {}
def side_effect(self, url, **kwargs):
if not url.startswith('file://'):
raise AssertionError('Attempt to fetch repodata: {}'.format(url))
if url.startswith(channel):
result_dict['local_channel_seen'] = True
return orig_get(self, url, **kwargs)
with patch.object(CondaSession, 'get', autospec=True) as mock_method:
mock_method.side_effect = side_effect
SubdirData._cache_.clear()
# This first install passes because flask and its dependencies are in the
# package cache.
assert not package_is_installed(prefix, "flask")
run_command(Commands.INSTALL, prefix, "-c", channel, "flask", "--offline")
assert package_is_installed(prefix, "flask")
# The mock should have been called with our local channel URL though.
assert result_dict.get('local_channel_seen')
# Fails because pytz cannot be found in available channels.
with pytest.raises(PackagesNotFoundError):
run_command(Commands.INSTALL, prefix, "-c", channel, "pytz", "--offline")
assert not package_is_installed(prefix, "pytz")
finally:
SubdirData._cache_.clear()
def test_create_from_extracted(self):
with make_temp_package_cache() as pkgs_dir:
assert context.pkgs_dirs == (pkgs_dir,)
def pkgs_dir_has_tarball(tarball_prefix):
return any(f.startswith(tarball_prefix) and f.endswith(CONDA_TARBALL_EXTENSION)
for f in os.listdir(pkgs_dir))
with make_temp_env() as prefix:
# First, make sure the openssl package is present in the cache,
# downloading it if needed
assert not pkgs_dir_has_tarball('openssl-')
run_command(Commands.INSTALL, prefix, 'openssl')
assert pkgs_dir_has_tarball('openssl-')
# Then, remove the tarball but keep the extracted directory around
run_command(Commands.CLEAN, prefix, '--tarballs --yes')
assert not pkgs_dir_has_tarball('openssl-')
with make_temp_env() as prefix:
# Finally, install openssl, enforcing the use of the extracted package.
# We expect that the tarball does not appear again because we simply
# linked the package from the extracted directory. If the tarball
# appeared again, we decided to re-download the package for some reason.
run_command(Commands.INSTALL, prefix, 'openssl --offline')
assert not pkgs_dir_has_tarball('openssl-')
def test_clean_tarballs_and_packages(self):
with make_temp_package_cache() as pkgs_dir:
with make_temp_env("flask") as prefix:
pkgs_dir_contents = [join(pkgs_dir, d) for d in os.listdir(pkgs_dir)]
pkgs_dir_dirs = [d for d in pkgs_dir_contents if isdir(d)]
pkgs_dir_tarballs = [f for f in pkgs_dir_contents if f.endswith('.tar.bz2')]
assert any(basename(d).startswith('flask-') for d in pkgs_dir_dirs)
assert any(basename(f).startswith('flask-') for f in pkgs_dir_tarballs)
# --json flag is regression test for #5451
run_command(Commands.CLEAN, prefix, "--packages --yes --json")
# --json flag is regression test for #5451
run_command(Commands.CLEAN, prefix, "--tarballs --yes --json")
pkgs_dir_contents = [join(pkgs_dir, d) for d in os.listdir(pkgs_dir)]
pkgs_dir_dirs = [d for d in pkgs_dir_contents if isdir(d)]
pkgs_dir_tarballs = [f for f in pkgs_dir_contents if f.endswith('.tar.bz2')]
assert any(basename(d).startswith('flask-') for d in pkgs_dir_dirs)
assert not any(basename(f).startswith('flask-') for f in pkgs_dir_tarballs)
run_command(Commands.CLEAN, prefix, "--packages --yes")
pkgs_dir_contents = [join(pkgs_dir, d) for d in os.listdir(pkgs_dir)]
pkgs_dir_dirs = [d for d in pkgs_dir_contents if isdir(d)]
assert not any(basename(d).startswith('flask-') for d in pkgs_dir_dirs)
def test_install_mkdir(self):
try:
prefix = make_temp_prefix()
assert isdir(prefix)
with pytest.raises(DirectoryNotACondaEnvironmentError):
run_command(Commands.INSTALL, prefix, "python=3.5.2", "--mkdir")
run_command(Commands.CREATE, prefix)
run_command(Commands.INSTALL, prefix, "python=3.5.2", "--mkdir")
assert package_is_installed(prefix, "python=3.5.2")
rm_rf(prefix)
assert not isdir(prefix)
# this part also a regression test for #4849
run_command(Commands.INSTALL, prefix, "python-dateutil=2.6.0", "python=3.5.2", "--mkdir")
assert package_is_installed(prefix, "python=3.5.2")
assert package_is_installed(prefix, "python-dateutil=2.6.0")
finally:
rmtree(prefix, ignore_errors=True)
@pytest.mark.skipif(on_win, reason="python doesn't have dependencies on windows")
def test_disallowed_packages(self):
with env_var('CONDA_DISALLOWED_PACKAGES', 'sqlite&flask', reset_context):
with make_temp_env() as prefix:
with pytest.raises(CondaMultiError) as exc:
run_command(Commands.INSTALL, prefix, 'python')
exc_val = exc.value.errors[0]
assert isinstance(exc_val, DisallowedPackageError)
assert exc_val.dump_map()['package_ref']['name'] == 'sqlite'
def test_dont_remove_conda_1(self):
pkgs_dirs = context.pkgs_dirs
prefix = make_temp_prefix()
with env_var('CONDA_ROOT_PREFIX', prefix, reset_context):
with env_var('CONDA_PKGS_DIRS', ','.join(pkgs_dirs), reset_context):
with make_temp_env(prefix=prefix):
stdout, stderr = run_command(Commands.INSTALL, prefix, "conda conda-build")
assert package_is_installed(prefix, "conda")
assert package_is_installed(prefix, "pycosat")
assert package_is_installed(prefix, "conda-build")
with pytest.raises(CondaMultiError) as exc:
run_command(Commands.REMOVE, prefix, 'conda')
assert any(isinstance(e, RemoveError) for e in exc.value.errors)
assert package_is_installed(prefix, "conda")
assert package_is_installed(prefix, "pycosat")
with pytest.raises(CondaMultiError) as exc:
run_command(Commands.REMOVE, prefix, 'pycosat')
assert any(isinstance(e, RemoveError) for e in exc.value.errors)
assert package_is_installed(prefix, "conda")
assert package_is_installed(prefix, "pycosat")
assert package_is_installed(prefix, "conda-build")
def test_dont_remove_conda_2(self):
# regression test for #6904
pkgs_dirs = context.pkgs_dirs
prefix = make_temp_prefix()
with env_var('CONDA_ROOT_PREFIX', prefix, reset_context):
with env_var('CONDA_PKGS_DIRS', ','.join(pkgs_dirs), reset_context):
with make_temp_env(prefix=prefix):
stdout, stderr = run_command(Commands.INSTALL, prefix, "conda")
assert package_is_installed(prefix, "conda")
assert package_is_installed(prefix, "pycosat")
with pytest.raises(CondaMultiError) as exc:
run_command(Commands.REMOVE, prefix, 'pycosat')
assert any(isinstance(e, RemoveError) for e in exc.value.errors)
assert package_is_installed(prefix, "conda")
assert package_is_installed(prefix, "pycosat")
with pytest.raises(CondaMultiError) as exc:
run_command(Commands.REMOVE, prefix, 'conda')
assert any(isinstance(e, RemoveError) for e in exc.value.errors)
assert package_is_installed(prefix, "conda")
assert package_is_installed(prefix, "pycosat")
def test_force_remove(self):
with make_temp_env() as prefix:
stdout, stderr = run_command(Commands.INSTALL, prefix, "flask")
assert package_is_installed(prefix, "flask")
assert package_is_installed(prefix, "jinja2")
stdout, stderr = run_command(Commands.REMOVE, prefix, "jinja2", "--force")
assert not package_is_installed(prefix, "jinja2")
assert package_is_installed(prefix, "flask")
stdout, stderr = run_command(Commands.REMOVE, prefix, "flask")
assert not package_is_installed(prefix, "flask")
# regression test for #3489
# don't raise for remove --all if environment doesn't exist
rm_rf(prefix)
run_command(Commands.REMOVE, prefix, "--all")
def test_download_only_flag(self):
from conda.core.link import UnlinkLinkTransaction
with patch.object(UnlinkLinkTransaction, 'execute') as mock_method:
with make_temp_env('openssl --download-only', use_exception_handler=True) as prefix:
assert mock_method.call_count == 0
with make_temp_env('openssl', use_exception_handler=True) as prefix:
assert mock_method.call_count == 1
def test_transactional_rollback_simple(self):
from conda.core.path_actions import CreatePrefixRecordAction
with patch.object(CreatePrefixRecordAction, 'execute') as mock_method:
with make_temp_env() as prefix:
mock_method.side_effect = KeyError('Bang bang!!')
with pytest.raises(CondaMultiError):
run_command(Commands.INSTALL, prefix, 'openssl')
assert not package_is_installed(prefix, 'openssl')
def test_transactional_rollback_upgrade_downgrade(self):
with make_temp_env("python=3.5") as prefix:
assert exists(join(prefix, PYTHON_BINARY))
assert package_is_installed(prefix, 'python=3')
run_command(Commands.INSTALL, prefix, 'flask=0.10.1')
assert package_is_installed(prefix, 'flask=0.10.1')
from conda.core.path_actions import CreatePrefixRecordAction
with patch.object(CreatePrefixRecordAction, 'execute') as mock_method:
mock_method.side_effect = KeyError('Bang bang!!')
with pytest.raises(CondaMultiError):
run_command(Commands.INSTALL, prefix, 'flask=0.11.1')
assert package_is_installed(prefix, 'flask=0.10.1')
def test_directory_not_a_conda_environment(self):
prefix = make_temp_prefix(str(uuid4())[:7])
try:
with pytest.raises(DirectoryNotACondaEnvironmentError):
run_command(Commands.INSTALL, prefix, "sqlite")
finally:
rm_rf(prefix)
def test_init_dev_and_NoBaseEnvironmentError(self):
conda_exe = join('Scripts', 'conda.exe') if on_win else join('bin', 'conda')
python_exe = 'python.exe' if on_win else join('bin', 'python')
with make_temp_env("conda=4.5.0", name='_' + str(uuid4())[:8]) as prefix:
result = subprocess_call("%s --version" % join(prefix, conda_exe))
assert result.rc == 0
assert not result.stderr
assert result.stdout.startswith("conda ")
conda_version = result.stdout.strip()[6:]
assert conda_version == "4.5.0"
result = subprocess_call(
("%s -m conda init cmd.exe --dev" if on_win else "%s -m conda init --dev")
% join(prefix, python_exe),
path=dirname(CONDA_PACKAGE_ROOT))
result = subprocess_call("%s --version" % join(prefix, conda_exe))
assert result.rc == 0
assert not result.stderr
assert result.stdout.startswith("conda ")
conda_version = result.stdout.strip()[6:]
assert conda_version == CONDA_VERSION
rm_rf(join(prefix, 'conda-meta', 'history'))
result = subprocess_call("%s info -a" % join(prefix, conda_exe))
print(result.stdout)
if not on_win:
# Windows has: Fatal Python error: failed to get random numbers to initialize Python
result = subprocess_call("%s install python" % join(prefix, conda_exe), env={"SHLVL": "1"},
raise_on_error=False)
assert result.rc == 1
assert "NoBaseEnvironmentError: This conda installation has no default base environment." in result.stderr
def test_conda_downgrade(self):
# Create an environment with the current conda under test, but include an earlier
# version of conda and other packages in that environment.
# Make sure we can flip back and forth.
conda_exe = join('Scripts', 'conda.exe') if on_win else join('bin', 'conda')
with env_var("CONDA_AUTO_UPDATE_CONDA", "false", reset_context):
with make_temp_env("conda=4.3.27 python=%s" % sys.version_info[0],
name='_' + str(uuid4())[:8]) as prefix: # rev 0
assert package_is_installed(prefix, "conda")
run_command(Commands.INSTALL, prefix, "mccabe") # rev 1
assert package_is_installed(prefix, "mccabe")
subprocess_call("%s install -p %s -y itsdangerous" % (join(prefix, conda_exe), prefix)) # rev 2
PrefixData._cache_.clear()
assert package_is_installed(prefix, "itsdangerous")
run_command(Commands.INSTALL, prefix, "lockfile") # rev 3
assert package_is_installed(prefix, "lockfile")
subprocess_call("%s install -p %s -y conda=4.3" % (join(prefix, conda_exe), prefix)) # rev 4
PrefixData._cache_.clear()
assert not package_is_installed(prefix, "conda=4.3.27")
subprocess_call("%s install -p %s -y colorama" % (join(prefix, conda_exe), prefix)) # rev 5
PrefixData._cache_.clear()
assert package_is_installed(prefix, "colorama")
stdout, stderr = run_command(Commands.LIST, prefix, "--revisions")
print(stdout)
PrefixData._cache_.clear()
run_command(Commands.INSTALL, prefix, "--rev 3")
PrefixData._cache_.clear()
assert package_is_installed(prefix, "conda=4.3.27")
assert not package_is_installed(prefix, "colorama")
subprocess_call("%s install -y -p %s --rev 1" % (join(prefix, conda_exe), prefix))
PrefixData._cache_.clear()
assert not package_is_installed(prefix, "itsdangerous")
PrefixData._cache_.clear()
assert package_is_installed(prefix, "conda=4.3.27")
assert package_is_installed(prefix, "python=%s" % sys.version_info[0])
result = subprocess_call("%s info --json" % join(prefix, conda_exe))
conda_info = json.loads(result.stdout)
assert conda_info["conda_version"] == "4.3.27"
@pytest.mark.skipif(on_win, reason="openssl only has a postlink script on unix")
def test_run_script_called(self):
import conda.core.link
with patch.object(conda.core.link, 'subprocess_call') as rs:
with make_temp_env("openssl=1.0.2j --no-deps") as prefix:
assert package_is_installed(prefix, 'openssl')
assert rs.call_count == 1
def test_conda_info_python(self):
stdout, stderr = run_command(Commands.INFO, None, "python=3.5")
assert "python 3.5.1 0" in stdout
def test_toolz_cytoolz_package_cache_regression(self):
with make_temp_env("python=3.5") as prefix:
pkgs_dir = join(prefix, 'pkgs')
with env_var('CONDA_PKGS_DIRS', pkgs_dir, reset_context):
assert context.pkgs_dirs == (pkgs_dir,)
run_command(Commands.INSTALL, prefix, "-c conda-forge toolz cytoolz")
assert package_is_installed(prefix, 'toolz')
def test_remove_spellcheck(self):
with make_temp_env("numpy=1.12") as prefix:
assert exists(join(prefix, PYTHON_BINARY))
assert package_is_installed(prefix, 'numpy')
with pytest.raises(PackagesNotFoundError) as exc:
run_command(Commands.REMOVE, prefix, 'numpi')
exc_string = '%r' % exc.value
assert exc_string.strip() == dals("""
PackagesNotFoundError: The following packages are missing from the target environment:
- numpi
""").strip()
assert package_is_installed(prefix, 'numpy')
def test_conda_list_json(self):
def pkg_info(s):
# function from nb_conda/envmanager.py
if hasattr(s, 'rsplit'): # proxy for isinstance(s, six.string_types)
name, version, build = s.rsplit('-', 2)
return {
'name': name,
'version': version,
'build': build
}
else:
return {
'name': s['name'],
'version': s['version'],
'build': s.get('build_string') or s['build']
}
with make_temp_env("python=3") as prefix:
stdout, stderr = run_command(Commands.LIST, prefix, '--json')
stdout_json = json.loads(stdout)
packages = [pkg_info(package) for package in stdout_json]
python_package = next(p for p in packages if p['name'] == 'python')
assert python_package['version'].startswith('3')
@pytest.mark.skipif(True, reason="get the rest of Solve API worked out first")
@pytest.mark.integration
class PrivateEnvIntegrationTests(TestCase):
def setUp(self):
PackageCacheData.clear()
self.pkgs_dirs = ','.join(context.pkgs_dirs)
self.prefix = create_temp_location()
run_command(Commands.CREATE, self.prefix)
self.preferred_env = "_spiffy-test-app_"
self.preferred_env_prefix = join(self.prefix, 'envs', self.preferred_env)
# self.save_path_conflict = os.environ.get('CONDA_PATH_CONFLICT')
self.saved_values = {}
self.saved_values['CONDA_ROOT_PREFIX'] = os.environ.get('CONDA_ROOT_PREFIX')
self.saved_values['CONDA_PKGS_DIRS'] = os.environ.get('CONDA_PKGS_DIRS')
self.saved_values['CONDA_ENABLE_PRIVATE_ENVS'] = os.environ.get('CONDA_ENABLE_PRIVATE_ENVS')
# os.environ['CONDA_PATH_CONFLICT'] = 'prevent'
os.environ['CONDA_ROOT_PREFIX'] = self.prefix
os.environ['CONDA_PKGS_DIRS'] = self.pkgs_dirs
os.environ['CONDA_ENABLE_PRIVATE_ENVS'] = 'true'
reset_context()
def tearDown(self):
rm_rf(self.prefix)
for key, value in iteritems(self.saved_values):
if value is not None:
os.environ[key] = value
else:
del os.environ[key]
reset_context()
def exe_file(self, prefix, exe_name):
if on_win:
exe_name = exe_name + '.exe'
return join(prefix, get_bin_directory_short_path(), exe_name)
@patch.object(Context, 'prefix_specified')
def test_simple_install_uninstall(self, prefix_specified):
prefix_specified.__get__ = Mock(return_value=False)
# >> simple progression install then uninstall <<
run_command(Commands.INSTALL, self.prefix, "-c conda-test spiffy-test-app")
assert not package_is_installed(self.prefix, "spiffy-test-app")
assert isfile(self.exe_file(self.prefix, 'spiffy-test-app'))
assert package_is_installed(self.preferred_env_prefix, "spiffy-test-app")
with env_var('YABBA-DABBA', 'doo'):
stdout, stderr, rc = subprocess_call(self.exe_file(self.prefix, 'spiffy-test-app'))
assert not stderr
assert rc == 0
json_d = json.loads(stdout)
assert json_d['YABBA-DABBA'] == 'doo'
run_command(Commands.INSTALL, self.prefix, "-c conda-test uses-spiffy-test-app")
assert not package_is_installed(self.prefix, "uses-spiffy-test-app")
assert package_is_installed(self.preferred_env_prefix, "uses-spiffy-test-app")
run_command(Commands.REMOVE, self.prefix, "uses-spiffy-test-app")
assert not package_is_installed(self.preferred_env_prefix, "uses-spiffy-test-app")
run_command(Commands.REMOVE, self.prefix, "spiffy-test-app")
assert not package_is_installed(self.prefix, "spiffy-test-app")
assert not isfile(self.exe_file(self.prefix, 'spiffy-test-app'))
assert not package_is_installed(self.preferred_env_prefix, "spiffy-test-app")
assert not isfile(self.exe_file(self.preferred_env_prefix, 'spiffy-test-app'))
@patch.object(Context, 'prefix_specified')
def test_install_dep_uninstall_base(self, prefix_specified):
prefix_specified.__get__ = Mock(return_value=False)
# >> install uses-spiffy-test-app, uninstall spiffy-test-app <<
run_command(Commands.INSTALL, self.prefix, "-c conda-test uses-spiffy-test-app")
assert package_is_installed(self.preferred_env_prefix, "spiffy-test-app")
assert package_is_installed(self.preferred_env_prefix, "uses-spiffy-test-app")
assert not package_is_installed(self.prefix, "spiffy-test-app")
assert not package_is_installed(self.prefix, "uses-spiffy-test-app")
with pytest.raises(PackagesNotFoundError):
run_command(Commands.REMOVE, self.prefix, "spiffy-test-app")
assert package_is_installed(self.preferred_env_prefix, "spiffy-test-app")
assert isfile(self.exe_file(self.preferred_env_prefix, 'spiffy-test-app'))
assert package_is_installed(self.preferred_env_prefix, "uses-spiffy-test-app")
assert not package_is_installed(self.prefix, "spiffy-test-app")
assert not isfile(self.exe_file(self.prefix, 'spiffy-test-app'))
run_command(Commands.REMOVE, self.prefix, "uses-spiffy-test-app")
assert not package_is_installed(self.preferred_env_prefix, "uses-spiffy-test-app")
# this part tests that the private environment was fully pruned
assert not package_is_installed(self.preferred_env_prefix, "spiffy-test-app")
assert not isfile(self.exe_file(self.preferred_env_prefix, 'spiffy-test-app'))
@patch.object(Context, 'prefix_specified')
def test_install_base_1_then_update(self, prefix_specified):
prefix_specified.__get__ = Mock(return_value=False)
# >> install spiffy-test-app 1.0, then update <<
run_command(Commands.INSTALL, self.prefix, "-c conda-test spiffy-test-app=1")
assert package_is_installed(self.prefix, "spiffy-test-app")
run_command(Commands.UPDATE, self.prefix, "-c conda-test spiffy-test-app")
assert not package_is_installed(self.prefix, "spiffy-test-app")
assert package_is_installed(self.preferred_env_prefix, "spiffy-test-app")
run_command(Commands.REMOVE, self.prefix, "spiffy-test-app")
assert not package_is_installed(self.preferred_env_prefix, "spiffy-test-app")
@patch.object(Context, 'prefix_specified')
def test_install_base_then_remove_from_private_env(self, prefix_specified):
prefix_specified.__get__ = Mock(return_value=False)
# >> install spiffy-test-app, then remove from preferred env <<
run_command(Commands.INSTALL, self.prefix, "-c conda-test spiffy-test-app")
assert package_is_installed(self.preferred_env_prefix, "spiffy-test-app")
run_command(Commands.REMOVE, self.preferred_env_prefix, "spiffy-test-app")
assert not package_is_installed(self.prefix, "spiffy-test-app")
assert not isfile(self.exe_file(self.prefix, 'spiffy-test-app'))
assert not package_is_installed(self.preferred_env_prefix, "spiffy-test-app")
assert not isfile(self.exe_file(self.preferred_env_prefix, 'spiffy-test-app'))
@patch.object(Context, 'prefix_specified')
def test_install_base_1_then_install_base_2(self, prefix_specified):
prefix_specified.__get__ = Mock(return_value=False)
# >> install spiffy-test-app 1.0, then install spiffy-test-app 2.0 <<
run_command(Commands.INSTALL, self.prefix, "-c conda-test spiffy-test-app=1")
assert package_is_installed(self.prefix, "spiffy-test-app")
run_command(Commands.INSTALL, self.prefix, "-c conda-test spiffy-test-app=2")
assert not package_is_installed(self.prefix, "spiffy-test-app")
assert package_is_installed(self.preferred_env_prefix, "spiffy-test-app")
run_command(Commands.REMOVE, self.prefix, "spiffy-test-app")
assert not package_is_installed(self.preferred_env_prefix, "spiffy-test-app")
@patch.object(Context, 'prefix_specified')
def test_install_base_2_then_install_base_1(self, prefix_specified):
prefix_specified.__get__ = Mock(return_value=False)
# >> install spiffy-test-app 2.0, then spiffy-test-app 1.0 <<
run_command(Commands.INSTALL, self.prefix, "-c conda-test spiffy-test-app")
assert package_is_installed(self.preferred_env_prefix, "spiffy-test-app")
run_command(Commands.INSTALL, self.prefix, "-c conda-test spiffy-test-app=1")
assert not package_is_installed(self.preferred_env_prefix, "spiffy-test-app")
assert package_is_installed(self.prefix, "spiffy-test-app")
@patch.object(Context, 'prefix_specified')
def test_install_base_2_then_install_dep_1(self, prefix_specified):
prefix_specified.__get__ = Mock(return_value=False)
# install spiffy-test-app 2.0, then uses-spiffy-test-app 1.0,
# which should suck spiffy-test-app back to the root prefix
run_command(Commands.INSTALL, self.prefix, "-c conda-test spiffy-test-app")
assert package_is_installed(self.preferred_env_prefix, "spiffy-test-app")
assert not package_is_installed(self.prefix, "spiffy-test-app")
assert not package_is_installed(self.prefix, "uses-spiffy-test-app")
assert not package_is_installed(self.preferred_env_prefix, "uses-spiffy-test-app")
run_command(Commands.INSTALL, self.prefix, "-c conda-test uses-spiffy-test-app=1")
assert package_is_installed(self.prefix, "spiffy-test-app-2")
assert package_is_installed(self.prefix, "uses-spiffy-test-app")
assert not package_is_installed(self.preferred_env_prefix, "spiffy-test-app")
assert not package_is_installed(self.preferred_env_prefix, "uses-spiffy-test-app")
@patch.object(Context, 'prefix_specified')
def test_install_dep_2_then_install_base_1(self, prefix_specified):
prefix_specified.__get__ = Mock(return_value=False)
# install uses-spiffy-test-app 2.0, then spiffy-test-app 1.0,
run_command(Commands.INSTALL, self.prefix, "-c conda-test uses-spiffy-test-app")
assert package_is_installed(self.preferred_env_prefix, "uses-spiffy-test-app")
assert package_is_installed(self.preferred_env_prefix, "spiffy-test-app")
assert not isfile(self.exe_file(self.prefix, 'spiffy-test-app'))
run_command(Commands.INSTALL, self.prefix, "-c conda-test spiffy-test-app=1")
assert package_is_installed(self.preferred_env_prefix, "spiffy-test-app=2")
assert package_is_installed(self.preferred_env_prefix, "uses-spiffy-test-app=2")
assert package_is_installed(self.prefix, "spiffy-test-app=1")
assert isfile(self.exe_file(self.prefix, 'spiffy-test-app'))
@patch.object(Context, 'prefix_specified')
def test_install_base_1_dep_2_together(self, prefix_specified):
prefix_specified.__get__ = Mock(return_value=False)
run_command(Commands.INSTALL, self.prefix, "-c conda-test spiffy-test-app=1 uses-spiffy-test-app")
assert package_is_installed(self.preferred_env_prefix, "spiffy-test-app=2")
assert package_is_installed(self.preferred_env_prefix, "uses-spiffy-test-app=2")
assert package_is_installed(self.prefix, "spiffy-test-app-1")
@patch.object(Context, 'prefix_specified')
def test_a2(self, prefix_specified):
prefix_specified.__get__ = Mock(return_value=False)
run_command(Commands.INSTALL, self.prefix, "-c conda-test uses-spiffy-test-app")
assert package_is_installed(self.preferred_env_prefix, "spiffy-test-app=2")
assert package_is_installed(self.preferred_env_prefix, "uses-spiffy-test-app=2")
assert not isfile(self.exe_file(self.prefix, 'spiffy-test-app'))
assert isfile(self.exe_file(self.preferred_env_prefix, 'spiffy-test-app'))
run_command(Commands.INSTALL, self.prefix, "-c conda-test needs-spiffy-test-app")
assert package_is_installed(self.preferred_env_prefix, "spiffy-test-app=2")
assert package_is_installed(self.preferred_env_prefix, "uses-spiffy-test-app=2")
assert package_is_installed(self.prefix, "needs-spiffy-test-app")
assert not package_is_installed(self.prefix, "uses-spiffy-test-app=2")
assert isfile(self.exe_file(self.prefix, 'spiffy-test-app'))
assert isfile(self.exe_file(self.preferred_env_prefix, 'spiffy-test-app'))
run_command(Commands.REMOVE, self.prefix, "uses-spiffy-test-app")
assert not package_is_installed(self.preferred_env_prefix, "spiffy-test-app=2")
assert not package_is_installed(self.preferred_env_prefix, "uses-spiffy-test-app=2")
assert package_is_installed(self.prefix, "needs-spiffy-test-app")
assert not package_is_installed(self.prefix, "uses-spiffy-test-app=2")
assert isfile(self.exe_file(self.prefix, 'spiffy-test-app'))
assert not isfile(self.exe_file(self.preferred_env_prefix, 'spiffy-test-app'))
run_command(Commands.REMOVE, self.prefix, "needs-spiffy-test-app")
assert not package_is_installed(self.prefix, "needs-spiffy-test-app")
assert package_is_installed(self.prefix, "spiffy-test-app-2")
assert isfile(self.exe_file(self.prefix, 'spiffy-test-app'))
@patch.object(Context, 'prefix_specified')
def test_b2(self, prefix_specified):
prefix_specified.__get__ = Mock(return_value=False)
run_command(Commands.INSTALL, self.prefix, "-c conda-test spiffy-test-app uses-spiffy-test-app")
assert package_is_installed(self.preferred_env_prefix, "spiffy-test-app=2")
assert package_is_installed(self.preferred_env_prefix, "uses-spiffy-test-app")
assert isfile(self.exe_file(self.prefix, 'spiffy-test-app'))
run_command(Commands.INSTALL, self.prefix, "-c conda-test needs-spiffy-test-app")
assert not package_is_installed(self.preferred_env_prefix, "spiffy-test-app=2")
assert not package_is_installed(self.preferred_env_prefix, "uses-spiffy-test-app=2")
assert package_is_installed(self.prefix, "needs-spiffy-test-app")
assert package_is_installed(self.prefix, "spiffy-test-app=2")
assert package_is_installed(self.prefix, "uses-spiffy-test-app")
@patch.object(Context, 'prefix_specified')
def test_c2(self, prefix_specified):
prefix_specified.__get__ = Mock(return_value=False)
run_command(Commands.INSTALL, self.prefix, "-c conda-test needs-spiffy-test-app")
assert package_is_installed(self.prefix, "spiffy-test-app=2")
assert package_is_installed(self.prefix, "needs-spiffy-test-app")
assert not package_is_installed(self.preferred_env_prefix, "spiffy-test-app=2")
run_command(Commands.INSTALL, self.prefix, "-c conda-test spiffy-test-app=2") # nothing to do
assert package_is_installed(self.prefix, "spiffy-test-app=2")
assert package_is_installed(self.prefix, "needs-spiffy-test-app")
assert not package_is_installed(self.preferred_env_prefix, "spiffy-test-app=2")
@patch.object(Context, 'prefix_specified')
def test_d2(self, prefix_specified):
prefix_specified.__get__ = Mock(return_value=False)
run_command(Commands.INSTALL, self.prefix, "-c conda-test spiffy-test-app")
assert package_is_installed(self.preferred_env_prefix, "spiffy-test-app=2")
assert isfile(self.exe_file(self.prefix, 'spiffy-test-app'))
assert isfile(self.exe_file(self.preferred_env_prefix, 'spiffy-test-app'))
run_command(Commands.INSTALL, self.prefix, "-c conda-test needs-spiffy-test-app")
assert not package_is_installed(self.preferred_env_prefix, "spiffy-test-app=2")
assert package_is_installed(self.prefix, "spiffy-test-app=2")
assert package_is_installed(self.prefix, "needs-spiffy-test-app")
assert not isfile(self.exe_file(self.preferred_env_prefix, 'spiffy-test-app'))
|
the-stack_106_15368
|
from django.db import models
from django.db.models import Q
from django.utils import timezone
class RunManager(models.Manager):
"""
Custom manager for the Run model.
"""
def scheduled(self):
"""
Return a QS filtered on run's are in the scheduled state.
"""
qs = self.get_query_set()
return qs.filter(enqueue_dts__isnull=True)
def enqueueable(self):
"""
Return a QS filtered on runs's that are ready to enqueue.
"""
job_qs = self.model.job.get_query_set()
active_jobs = job_qs.filter(
run__enqueue_dts__isnull=False,
run__return_dts__isnull=True,
)
scheduled = self.scheduled()
return scheduled.filter(
# make sure it should be running now
schedule_dts__lte=timezone.now(),
).exclude(
# exclude auto scheduled jobs when enqueue is disabled
# when job enqueue is disabled
Q(
job__enqueue_is_enabled=False,
is_manual=False
) |
# when job-template enqueue is disabled
Q(
job__job_template__enqueue_is_enabled=False,
is_manual=False
) |
# when project enqueue is disabled
Q(
job__job_template__project__enqueue_is_enabled=False,
is_manual=False
) |
# when worker-pool enqueue is disabled
Q(
job__worker_pool__enqueue_is_enabled=False,
is_manual=False
) |
# exclude jobs that are still active
Q(job__in=active_jobs)
)
class KillRequestManager(models.Manager):
"""
Custom manager for the KillRequest model.
"""
def killable(self):
"""
Return a QS filtered on requests that are killable.
"""
qs = self.get_query_set()
return qs.filter(
# this should be always the case
schedule_dts__lte=timezone.now(),
# make sure it hasn't been executed already
enqueue_dts__isnull=True,
execute_dts__isnull=True,
# make sure a pid is assigned to the run
run__pid__isnull=False,
# make sure the run is active
run__start_dts__isnull=False,
# make sure the run hasn't returned already
run__return_dts__isnull=True,
# make sure the run is assigned to a worker
run__worker__isnull=False,
)
|
the-stack_106_15371
|
# Copyright 2016-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A tool to run tests in many different ways.
from pathlib import Path
from collections import deque, namedtuple
from copy import deepcopy
import argparse
import asyncio
import datetime
import enum
import io
import json
import multiprocessing
import os
import pickle
import platform
import random
import re
import signal
import subprocess
import sys
import tempfile
import textwrap
import time
import typing as T
import xml.etree.ElementTree as et
from . import build
from . import environment
from . import mlog
from .coredata import major_versions_differ, MesonVersionMismatchException
from .coredata import version as coredata_version
from .dependencies import ExternalProgram
from .mesonlib import MesonException, get_wine_shortpath, split_args, join_args
from .backend.backends import TestProtocol, TestSerialisation
# GNU autotools interprets a return code of 77 from tests it executes to
# mean that the test should be skipped.
GNU_SKIP_RETURNCODE = 77
# GNU autotools interprets a return code of 99 from tests it executes to
# mean that the test failed even before testing what it is supposed to test.
GNU_ERROR_RETURNCODE = 99
def is_windows() -> bool:
platname = platform.system().lower()
return platname == 'windows'
def is_cygwin() -> bool:
return sys.platform == 'cygwin'
def determine_worker_count() -> int:
varname = 'MESON_TESTTHREADS'
if varname in os.environ:
try:
num_workers = int(os.environ[varname])
except ValueError:
print('Invalid value in {}, using 1 thread.'.format(varname))
num_workers = 1
else:
try:
# Fails in some weird environments such as Debian
# reproducible build.
num_workers = multiprocessing.cpu_count()
except Exception:
num_workers = 1
return num_workers
def add_arguments(parser: argparse.ArgumentParser) -> None:
parser.add_argument('--repeat', default=1, dest='repeat', type=int,
help='Number of times to run the tests.')
parser.add_argument('--no-rebuild', default=False, action='store_true',
help='Do not rebuild before running tests.')
parser.add_argument('--gdb', default=False, dest='gdb', action='store_true',
help='Run test under gdb.')
parser.add_argument('--gdb-path', default='gdb', dest='gdb_path',
help='Path to the gdb binary (default: gdb).')
parser.add_argument('--list', default=False, dest='list', action='store_true',
help='List available tests.')
parser.add_argument('--wrapper', default=None, dest='wrapper', type=split_args,
help='wrapper to run tests with (e.g. Valgrind)')
parser.add_argument('-C', default='.', dest='wd',
# https://github.com/python/typeshed/issues/3107
# https://github.com/python/mypy/issues/7177
type=os.path.abspath, # type: ignore
help='directory to cd into before running')
parser.add_argument('--suite', default=[], dest='include_suites', action='append', metavar='SUITE',
help='Only run tests belonging to the given suite.')
parser.add_argument('--no-suite', default=[], dest='exclude_suites', action='append', metavar='SUITE',
help='Do not run tests belonging to the given suite.')
parser.add_argument('--no-stdsplit', default=True, dest='split', action='store_false',
help='Do not split stderr and stdout in test logs.')
parser.add_argument('--print-errorlogs', default=False, action='store_true',
help="Whether to print failing tests' logs.")
parser.add_argument('--benchmark', default=False, action='store_true',
help="Run benchmarks instead of tests.")
parser.add_argument('--logbase', default='testlog',
help="Base name for log file.")
parser.add_argument('--num-processes', default=determine_worker_count(), type=int,
help='How many parallel processes to use.')
parser.add_argument('-v', '--verbose', default=False, action='store_true',
help='Do not redirect stdout and stderr')
parser.add_argument('-q', '--quiet', default=False, action='store_true',
help='Produce less output to the terminal.')
parser.add_argument('-t', '--timeout-multiplier', type=float, default=None,
help='Define a multiplier for test timeout, for example '
' when running tests in particular conditions they might take'
' more time to execute.')
parser.add_argument('--setup', default=None, dest='setup',
help='Which test setup to use.')
parser.add_argument('--test-args', default=[], type=split_args,
help='Arguments to pass to the specified test(s) or all tests')
parser.add_argument('args', nargs='*',
help='Optional list of test names to run. "testname" to run all tests with that name, '
'"subprojname:testname" to specifically run "testname" from "subprojname", '
'"subprojname:" to run all tests defined by "subprojname".')
def returncode_to_status(retcode: int) -> str:
# Note: We can't use `os.WIFSIGNALED(result.returncode)` and the related
# functions here because the status returned by subprocess is munged. It
# returns a negative value if the process was killed by a signal rather than
# the raw status returned by `wait()`. Also, If a shell sits between Meson
# the the actual unit test that shell is likely to convert a termination due
# to a signal into an exit status of 128 plus the signal number.
if retcode < 0:
signum = -retcode
try:
signame = signal.Signals(signum).name
except ValueError:
signame = 'SIGinvalid'
return '(killed by signal {} {})'.format(signum, signame)
if retcode <= 128:
return '(exit status {})'.format(retcode)
signum = retcode - 128
try:
signame = signal.Signals(signum).name
except ValueError:
signame = 'SIGinvalid'
return '(exit status {} or signal {} {})'.format(retcode, signum, signame)
def env_tuple_to_str(env: T.Iterable[T.Tuple[str, str]]) -> str:
return ''.join(["{}='{}' ".format(k, v) for k, v in env])
class TestException(MesonException):
pass
@enum.unique
class TestResult(enum.Enum):
OK = 'OK'
TIMEOUT = 'TIMEOUT'
INTERRUPT = 'INTERRUPT'
SKIP = 'SKIP'
FAIL = 'FAIL'
EXPECTEDFAIL = 'EXPECTEDFAIL'
UNEXPECTEDPASS = 'UNEXPECTEDPASS'
ERROR = 'ERROR'
@staticmethod
def maxlen() -> int:
return 14 # len(UNEXPECTEDPASS)
class TAPParser:
Plan = namedtuple('Plan', ['count', 'late', 'skipped', 'explanation'])
Bailout = namedtuple('Bailout', ['message'])
Test = namedtuple('Test', ['number', 'name', 'result', 'explanation'])
Error = namedtuple('Error', ['message'])
Version = namedtuple('Version', ['version'])
_MAIN = 1
_AFTER_TEST = 2
_YAML = 3
_RE_BAILOUT = re.compile(r'Bail out!\s*(.*)')
_RE_DIRECTIVE = re.compile(r'(?:\s*\#\s*([Ss][Kk][Ii][Pp]\S*|[Tt][Oo][Dd][Oo])\b\s*(.*))?')
_RE_PLAN = re.compile(r'1\.\.([0-9]+)' + _RE_DIRECTIVE.pattern)
_RE_TEST = re.compile(r'((?:not )?ok)\s*(?:([0-9]+)\s*)?([^#]*)' + _RE_DIRECTIVE.pattern)
_RE_VERSION = re.compile(r'TAP version ([0-9]+)')
_RE_YAML_START = re.compile(r'(\s+)---.*')
_RE_YAML_END = re.compile(r'\s+\.\.\.\s*')
def __init__(self, io: T.Iterator[str]):
self.io = io
def parse_test(self, ok: bool, num: int, name: str, directive: T.Optional[str], explanation: T.Optional[str]) -> \
T.Generator[T.Union['TAPParser.Test', 'TAPParser.Error'], None, None]:
name = name.strip()
explanation = explanation.strip() if explanation else None
if directive is not None:
directive = directive.upper()
if directive.startswith('SKIP'):
if ok:
yield self.Test(num, name, TestResult.SKIP, explanation)
return
elif directive == 'TODO':
yield self.Test(num, name, TestResult.UNEXPECTEDPASS if ok else TestResult.EXPECTEDFAIL, explanation)
return
else:
yield self.Error('invalid directive "{}"'.format(directive,))
yield self.Test(num, name, TestResult.OK if ok else TestResult.FAIL, explanation)
def parse(self) -> T.Generator[T.Union['TAPParser.Test', 'TAPParser.Error', 'TAPParser.Version', 'TAPParser.Plan', 'TAPParser.Bailout'], None, None]:
found_late_test = False
bailed_out = False
plan = None
lineno = 0
num_tests = 0
yaml_lineno = None
yaml_indent = ''
state = self._MAIN
version = 12
while True:
lineno += 1
try:
line = next(self.io).rstrip()
except StopIteration:
break
# YAML blocks are only accepted after a test
if state == self._AFTER_TEST:
if version >= 13:
m = self._RE_YAML_START.match(line)
if m:
state = self._YAML
yaml_lineno = lineno
yaml_indent = m.group(1)
continue
state = self._MAIN
elif state == self._YAML:
if self._RE_YAML_END.match(line):
state = self._MAIN
continue
if line.startswith(yaml_indent):
continue
yield self.Error('YAML block not terminated (started on line {})'.format(yaml_lineno))
state = self._MAIN
assert state == self._MAIN
if line.startswith('#'):
continue
m = self._RE_TEST.match(line)
if m:
if plan and plan.late and not found_late_test:
yield self.Error('unexpected test after late plan')
found_late_test = True
num_tests += 1
num = num_tests if m.group(2) is None else int(m.group(2))
if num != num_tests:
yield self.Error('out of order test numbers')
yield from self.parse_test(m.group(1) == 'ok', num,
m.group(3), m.group(4), m.group(5))
state = self._AFTER_TEST
continue
m = self._RE_PLAN.match(line)
if m:
if plan:
yield self.Error('more than one plan found')
else:
count = int(m.group(1))
skipped = (count == 0)
if m.group(2):
if m.group(2).upper().startswith('SKIP'):
if count > 0:
yield self.Error('invalid SKIP directive for plan')
skipped = True
else:
yield self.Error('invalid directive for plan')
plan = self.Plan(count=count, late=(num_tests > 0),
skipped=skipped, explanation=m.group(3))
yield plan
continue
m = self._RE_BAILOUT.match(line)
if m:
yield self.Bailout(m.group(1))
bailed_out = True
continue
m = self._RE_VERSION.match(line)
if m:
# The TAP version is only accepted as the first line
if lineno != 1:
yield self.Error('version number must be on the first line')
continue
version = int(m.group(1))
if version < 13:
yield self.Error('version number should be at least 13')
else:
yield self.Version(version=version)
continue
if not line:
continue
yield self.Error('unexpected input at line {}'.format((lineno,)))
if state == self._YAML:
yield self.Error('YAML block not terminated (started on line {})'.format(yaml_lineno))
if not bailed_out and plan and num_tests != plan.count:
if num_tests < plan.count:
yield self.Error('Too few tests run (expected {}, got {})'.format(plan.count, num_tests))
else:
yield self.Error('Too many tests run (expected {}, got {})'.format(plan.count, num_tests))
class JunitBuilder:
"""Builder for Junit test results.
Junit is impossible to stream out, it requires attributes counting the
total number of tests, failures, skips, and errors in the root element
and in each test suite. As such, we use a builder class to track each
test case, and calculate all metadata before writing it out.
For tests with multiple results (like from a TAP test), we record the
test as a suite with the project_name.test_name. This allows us to track
each result separately. For tests with only one result (such as exit-code
tests) we record each one into a suite with the name project_name. The use
of the project_name allows us to sort subproject tests separately from
the root project.
"""
def __init__(self, filename: str) -> None:
self.filename = filename
self.root = et.Element(
'testsuites', tests='0', errors='0', failures='0')
self.suites = {} # type: T.Dict[str, et.Element]
def log(self, name: str, test: 'TestRun') -> None:
"""Log a single test case."""
if test.junit is not None:
for suite in test.junit.findall('.//testsuite'):
# Assume that we don't need to merge anything here...
suite.attrib['name'] = '{}.{}.{}'.format(test.project, name, suite.attrib['name'])
# GTest can inject invalid attributes
for case in suite.findall('.//testcase[@result]'):
del case.attrib['result']
for case in suite.findall('.//testcase[@timestamp]'):
del case.attrib['timestamp']
self.root.append(suite)
return
# In this case we have a test binary with multiple results.
# We want to record this so that each result is recorded
# separately
if test.results:
suitename = '{}.{}'.format(test.project, name)
assert suitename not in self.suites, 'duplicate suite'
suite = self.suites[suitename] = et.Element(
'testsuite',
name=suitename,
tests=str(len(test.results)),
errors=str(sum(1 for r in test.results if r in
{TestResult.INTERRUPT, TestResult.ERROR})),
failures=str(sum(1 for r in test.results if r in
{TestResult.FAIL, TestResult.UNEXPECTEDPASS, TestResult.TIMEOUT})),
skipped=str(sum(1 for r in test.results if r is TestResult.SKIP)),
)
for i, result in enumerate(test.results):
# Both name and classname are required. Set them both to the
# number of the test in a TAP test, as TAP doesn't give names.
testcase = et.SubElement(suite, 'testcase', name=str(i), classname=str(i))
if result is TestResult.SKIP:
et.SubElement(testcase, 'skipped')
elif result is TestResult.ERROR:
et.SubElement(testcase, 'error')
elif result is TestResult.FAIL:
et.SubElement(testcase, 'failure')
elif result is TestResult.UNEXPECTEDPASS:
fail = et.SubElement(testcase, 'failure')
fail.text = 'Test unexpected passed.'
elif result is TestResult.INTERRUPT:
fail = et.SubElement(testcase, 'failure')
fail.text = 'Test was interrupted by user.'
elif result is TestResult.TIMEOUT:
fail = et.SubElement(testcase, 'failure')
fail.text = 'Test did not finish before configured timeout.'
if test.stdo:
out = et.SubElement(suite, 'system-out')
out.text = test.stdo.rstrip()
if test.stde:
err = et.SubElement(suite, 'system-err')
err.text = test.stde.rstrip()
else:
if test.project not in self.suites:
suite = self.suites[test.project] = et.Element(
'testsuite', name=test.project, tests='1', errors='0',
failures='0', skipped='0')
else:
suite = self.suites[test.project]
suite.attrib['tests'] = str(int(suite.attrib['tests']) + 1)
testcase = et.SubElement(suite, 'testcase', name=name, classname=name)
if test.res is TestResult.SKIP:
et.SubElement(testcase, 'skipped')
suite.attrib['skipped'] = str(int(suite.attrib['skipped']) + 1)
elif test.res is TestResult.ERROR:
et.SubElement(testcase, 'error')
suite.attrib['errors'] = str(int(suite.attrib['errors']) + 1)
elif test.res is TestResult.FAIL:
et.SubElement(testcase, 'failure')
suite.attrib['failures'] = str(int(suite.attrib['failures']) + 1)
if test.stdo:
out = et.SubElement(testcase, 'system-out')
out.text = test.stdo.rstrip()
if test.stde:
err = et.SubElement(testcase, 'system-err')
err.text = test.stde.rstrip()
def write(self) -> None:
"""Calculate total test counts and write out the xml result."""
for suite in self.suites.values():
self.root.append(suite)
# Skipped is really not allowed in the "testsuits" element
for attr in ['tests', 'errors', 'failures']:
self.root.attrib[attr] = str(int(self.root.attrib[attr]) + int(suite.attrib[attr]))
tree = et.ElementTree(self.root)
with open(self.filename, 'wb') as f:
tree.write(f, encoding='utf-8', xml_declaration=True)
class TestRun:
@classmethod
def make_gtest(cls, test: TestSerialisation, test_env: T.Dict[str, str],
returncode: int, starttime: float, duration: float,
stdo: T.Optional[str], stde: T.Optional[str],
cmd: T.Optional[T.List[str]]) -> 'TestRun':
filename = '{}.xml'.format(test.name)
if test.workdir:
filename = os.path.join(test.workdir, filename)
tree = et.parse(filename)
return cls.make_exitcode(
test, test_env, returncode, starttime, duration, stdo, stde, cmd,
junit=tree)
@classmethod
def make_exitcode(cls, test: TestSerialisation, test_env: T.Dict[str, str],
returncode: int, starttime: float, duration: float,
stdo: T.Optional[str], stde: T.Optional[str],
cmd: T.Optional[T.List[str]], **kwargs: T.Any) -> 'TestRun':
if returncode == GNU_SKIP_RETURNCODE:
res = TestResult.SKIP
elif returncode == GNU_ERROR_RETURNCODE:
res = TestResult.ERROR
elif test.should_fail:
res = TestResult.EXPECTEDFAIL if bool(returncode) else TestResult.UNEXPECTEDPASS
else:
res = TestResult.FAIL if bool(returncode) else TestResult.OK
return cls(test, test_env, res, [], returncode, starttime, duration, stdo, stde, cmd, **kwargs)
@classmethod
def make_tap(cls, test: TestSerialisation, test_env: T.Dict[str, str],
returncode: int, starttime: float, duration: float,
stdo: str, stde: str,
cmd: T.Optional[T.List[str]]) -> 'TestRun':
res = None # type: T.Optional[TestResult]
results = [] # type: T.List[TestResult]
failed = False
for i in TAPParser(io.StringIO(stdo)).parse():
if isinstance(i, TAPParser.Bailout):
results.append(TestResult.ERROR)
failed = True
elif isinstance(i, TAPParser.Test):
results.append(i.result)
if i.result not in {TestResult.OK, TestResult.EXPECTEDFAIL, TestResult.SKIP}:
failed = True
elif isinstance(i, TAPParser.Error):
results.append(TestResult.ERROR)
stde += '\nTAP parsing error: ' + i.message
failed = True
if returncode != 0:
res = TestResult.ERROR
stde += '\n(test program exited with status code {})'.format(returncode,)
if res is None:
# Now determine the overall result of the test based on the outcome of the subcases
if all(t is TestResult.SKIP for t in results):
# This includes the case where num_tests is zero
res = TestResult.SKIP
elif test.should_fail:
res = TestResult.EXPECTEDFAIL if failed else TestResult.UNEXPECTEDPASS
else:
res = TestResult.FAIL if failed else TestResult.OK
return cls(test, test_env, res, results, returncode, starttime, duration, stdo, stde, cmd)
def __init__(self, test: TestSerialisation, test_env: T.Dict[str, str],
res: TestResult, results: T.List[TestResult], returncode:
int, starttime: float, duration: float,
stdo: T.Optional[str], stde: T.Optional[str],
cmd: T.Optional[T.List[str]], *, junit: T.Optional[et.ElementTree] = None):
assert isinstance(res, TestResult)
self.res = res
self.results = results # May be an empty list
self.returncode = returncode
self.starttime = starttime
self.duration = duration
self.stdo = stdo
self.stde = stde
self.cmd = cmd
self.env = test_env
self.should_fail = test.should_fail
self.project = test.project_name
self.junit = junit
def get_log(self) -> str:
res = '--- command ---\n'
if self.cmd is None:
res += 'NONE\n'
else:
test_only_env = set(self.env.items()) - set(os.environ.items())
starttime_str = time.strftime("%H:%M:%S", time.gmtime(self.starttime))
res += '{} {}{}\n'.format(
starttime_str, env_tuple_to_str(test_only_env), ' '.join(self.cmd)
)
if self.stdo:
res += '--- stdout ---\n'
res += self.stdo
if self.stde:
if res[-1:] != '\n':
res += '\n'
res += '--- stderr ---\n'
res += self.stde
if res[-1:] != '\n':
res += '\n'
res += '-------\n\n'
return res
def decode(stream: T.Union[None, bytes]) -> str:
if stream is None:
return ''
try:
return stream.decode('utf-8')
except UnicodeDecodeError:
return stream.decode('iso-8859-1', errors='ignore')
def write_json_log(jsonlogfile: T.TextIO, test_name: str, result: TestRun) -> None:
jresult = {'name': test_name,
'stdout': result.stdo,
'result': result.res.value,
'starttime': result.starttime,
'duration': result.duration,
'returncode': result.returncode,
'env': result.env,
'command': result.cmd} # type: T.Dict[str, T.Any]
if result.stde:
jresult['stderr'] = result.stde
jsonlogfile.write(json.dumps(jresult) + '\n')
def run_with_mono(fname: str) -> bool:
return fname.endswith('.exe') and not (is_windows() or is_cygwin())
def check_testdata(objs: T.List[TestSerialisation]) -> T.List[TestSerialisation]:
if not isinstance(objs, list):
raise MesonVersionMismatchException('<unknown>', coredata_version)
for obj in objs:
if not isinstance(obj, TestSerialisation):
raise MesonVersionMismatchException('<unknown>', coredata_version)
if not hasattr(obj, 'version'):
raise MesonVersionMismatchException('<unknown>', coredata_version)
if major_versions_differ(obj.version, coredata_version):
raise MesonVersionMismatchException(obj.version, coredata_version)
return objs
def load_benchmarks(build_dir: str) -> T.List[TestSerialisation]:
datafile = Path(build_dir) / 'meson-private' / 'meson_benchmark_setup.dat'
if not datafile.is_file():
raise TestException('Directory {!r} does not seem to be a Meson build directory.'.format(build_dir))
with datafile.open('rb') as f:
objs = check_testdata(pickle.load(f))
return objs
def load_tests(build_dir: str) -> T.List[TestSerialisation]:
datafile = Path(build_dir) / 'meson-private' / 'meson_test_setup.dat'
if not datafile.is_file():
raise TestException('Directory {!r} does not seem to be a Meson build directory.'.format(build_dir))
with datafile.open('rb') as f:
objs = check_testdata(pickle.load(f))
return objs
# Custom waiting primitives for asyncio
async def try_wait_one(*awaitables: T.Any, timeout: T.Optional[T.Union[int, float]]) -> None:
try:
await asyncio.wait(awaitables,
timeout=timeout, return_when=asyncio.FIRST_COMPLETED)
except asyncio.TimeoutError:
pass
async def complete(future: asyncio.Future) -> None:
"""Wait for completion of the given future, ignoring cancellation."""
try:
await future
except asyncio.CancelledError:
pass
async def complete_all(futures: T.Iterable[asyncio.Future]) -> None:
"""Wait for completion of all the given futures, ignoring cancellation."""
while futures:
done, futures = await asyncio.wait(futures, return_when=asyncio.FIRST_EXCEPTION)
# Raise exceptions if needed for all the "done" futures
for f in done:
if not f.cancelled():
f.result()
class SingleTestRunner:
def __init__(self, test: TestSerialisation, test_env: T.Dict[str, str],
env: T.Dict[str, str], options: argparse.Namespace):
self.test = test
self.test_env = test_env
self.env = env
self.options = options
def _get_cmd(self) -> T.Optional[T.List[str]]:
if self.test.fname[0].endswith('.jar'):
return ['java', '-jar'] + self.test.fname
elif not self.test.is_cross_built and run_with_mono(self.test.fname[0]):
return ['mono'] + self.test.fname
elif self.test.cmd_is_built and self.test.needs_exe_wrapper:
if self.test.exe_runner is None:
# Can not run test on cross compiled executable
# because there is no execute wrapper.
return None
elif self.test.cmd_is_built:
# If the command is not built (ie, its a python script),
# then we don't check for the exe-wrapper
if not self.test.exe_runner.found():
msg = ('The exe_wrapper defined in the cross file {!r} was not '
'found. Please check the command and/or add it to PATH.')
raise TestException(msg.format(self.test.exe_runner.name))
return self.test.exe_runner.get_command() + self.test.fname
return self.test.fname
async def run(self) -> TestRun:
cmd = self._get_cmd()
if cmd is None:
skip_stdout = 'Not run because can not execute cross compiled binaries.'
return TestRun(self.test, self.test_env, TestResult.SKIP, [], GNU_SKIP_RETURNCODE, time.time(), 0.0, skip_stdout, None, None)
else:
wrap = TestHarness.get_wrapper(self.options)
if self.options.gdb:
self.test.timeout = None
return await self._run_cmd(wrap + cmd + self.test.cmd_args + self.options.test_args)
async def _run_subprocess(self, args: T.List[str], *, timeout: T.Optional[int],
stdout: T.IO, stderr: T.IO,
env: T.Dict[str, str], cwd: T.Optional[str]) -> T.Tuple[int, TestResult, T.Optional[str]]:
async def kill_process(p: asyncio.subprocess.Process) -> T.Optional[str]:
# Python does not provide multiplatform support for
# killing a process and all its children so we need
# to roll our own.
try:
if is_windows():
subprocess.run(['taskkill', '/F', '/T', '/PID', str(p.pid)])
else:
# Send a termination signal to the process group that setsid()
# created - giving it a chance to perform any cleanup.
os.killpg(p.pid, signal.SIGTERM)
# Make sure the termination signal actually kills the process
# group, otherwise retry with a SIGKILL.
await try_wait_one(p.wait(), timeout=0.5)
if p.returncode is not None:
return None
os.killpg(p.pid, signal.SIGKILL)
await try_wait_one(p.wait(), timeout=1)
if p.returncode is not None:
return None
# An earlier kill attempt has not worked for whatever reason.
# Try to kill it one last time with a direct call.
# If the process has spawned children, they will remain around.
p.kill()
await try_wait_one(p.wait(), timeout=1)
if p.returncode is not None:
return None
return 'Test process could not be killed.'
except ProcessLookupError:
# Sometimes (e.g. with Wine) this happens. There's nothing
# we can do, probably the process already died so just wait
# for the event loop to pick that up.
await p.wait()
return None
# Let gdb handle ^C instead of us
if self.options.gdb:
previous_sigint_handler = signal.getsignal(signal.SIGINT)
# Make the meson executable ignore SIGINT while gdb is running.
signal.signal(signal.SIGINT, signal.SIG_IGN)
def preexec_fn() -> None:
if self.options.gdb:
# Restore the SIGINT handler for the child process to
# ensure it can handle it.
signal.signal(signal.SIGINT, signal.SIG_DFL)
else:
# We don't want setsid() in gdb because gdb needs the
# terminal in order to handle ^C and not show tcsetpgrp()
# errors avoid not being able to use the terminal.
os.setsid()
p = await asyncio.create_subprocess_exec(*args,
stdout=stdout,
stderr=stderr,
env=env,
cwd=cwd,
preexec_fn=preexec_fn if not is_windows() else None)
result = None
additional_error = None
try:
await try_wait_one(p.wait(), timeout=timeout)
if p.returncode is None:
if self.options.verbose:
print('{} time out (After {} seconds)'.format(self.test.name, timeout))
additional_error = await kill_process(p)
result = TestResult.TIMEOUT
except asyncio.CancelledError:
# The main loop must have seen Ctrl-C.
additional_error = await kill_process(p)
result = TestResult.INTERRUPT
finally:
if self.options.gdb:
# Let us accept ^C again
signal.signal(signal.SIGINT, previous_sigint_handler)
return p.returncode or 0, result, additional_error
async def _run_cmd(self, cmd: T.List[str]) -> TestRun:
starttime = time.time()
if self.test.extra_paths:
self.env['PATH'] = os.pathsep.join(self.test.extra_paths + ['']) + self.env['PATH']
winecmd = []
for c in cmd:
winecmd.append(c)
if os.path.basename(c).startswith('wine'):
self.env['WINEPATH'] = get_wine_shortpath(
winecmd,
['Z:' + p for p in self.test.extra_paths] + self.env.get('WINEPATH', '').split(';')
)
break
# If MALLOC_PERTURB_ is not set, or if it is set to an empty value,
# (i.e., the test or the environment don't explicitly set it), set
# it ourselves. We do this unconditionally for regular tests
# because it is extremely useful to have.
# Setting MALLOC_PERTURB_="0" will completely disable this feature.
if ('MALLOC_PERTURB_' not in self.env or not self.env['MALLOC_PERTURB_']) and not self.options.benchmark:
self.env['MALLOC_PERTURB_'] = str(random.randint(1, 255))
stdout = None
stderr = None
if not self.options.verbose:
stdout = tempfile.TemporaryFile("wb+")
stderr = tempfile.TemporaryFile("wb+") if self.options.split else stdout
if self.test.protocol is TestProtocol.TAP and stderr is stdout:
stdout = tempfile.TemporaryFile("wb+")
extra_cmd = [] # type: T.List[str]
if self.test.protocol is TestProtocol.GTEST:
gtestname = self.test.name
if self.test.workdir:
gtestname = os.path.join(self.test.workdir, self.test.name)
extra_cmd.append('--gtest_output=xml:{}.xml'.format(gtestname))
if self.test.timeout is None:
timeout = None
elif self.options.timeout_multiplier is not None:
timeout = self.test.timeout * self.options.timeout_multiplier
else:
timeout = self.test.timeout
returncode, result, additional_error = await self._run_subprocess(cmd + extra_cmd,
timeout=timeout,
stdout=stdout,
stderr=stderr,
env=self.env,
cwd=self.test.workdir)
endtime = time.time()
duration = endtime - starttime
if additional_error is None:
if stdout is None:
stdo = ''
else:
stdout.seek(0)
stdo = decode(stdout.read())
if stderr is None or stderr is stdout:
stde = ''
else:
stderr.seek(0)
stde = decode(stderr.read())
else:
stdo = ""
stde = additional_error
if result:
return TestRun(self.test, self.test_env, result, [], returncode, starttime, duration, stdo, stde, cmd)
else:
if self.test.protocol is TestProtocol.EXITCODE:
return TestRun.make_exitcode(self.test, self.test_env, returncode, starttime, duration, stdo, stde, cmd)
elif self.test.protocol is TestProtocol.GTEST:
return TestRun.make_gtest(self.test, self.test_env, returncode, starttime, duration, stdo, stde, cmd)
else:
if self.options.verbose:
print(stdo, end='')
return TestRun.make_tap(self.test, self.test_env, returncode, starttime, duration, stdo, stde, cmd)
class TestHarness:
def __init__(self, options: argparse.Namespace):
self.options = options
self.collected_logs = [] # type: T.List[str]
self.collected_failures = [] # type: T.List[str]
self.fail_count = 0
self.expectedfail_count = 0
self.unexpectedpass_count = 0
self.success_count = 0
self.skip_count = 0
self.timeout_count = 0
self.is_run = False
self.tests = None
self.results = [] # type: T.List[TestRun]
self.logfilename = None # type: T.Optional[str]
self.logfile = None # type: T.Optional[T.TextIO]
self.jsonlogfile = None # type: T.Optional[T.TextIO]
self.junit = None # type: T.Optional[JunitBuilder]
if self.options.benchmark:
self.tests = load_benchmarks(options.wd)
else:
self.tests = load_tests(options.wd)
ss = set()
for t in self.tests:
for s in t.suite:
ss.add(s)
self.suites = list(ss)
def __del__(self) -> None:
self.close_logfiles()
def __enter__(self) -> 'TestHarness':
return self
def __exit__(self, exc_type: T.Any, exc_value: T.Any, traceback: T.Any) -> None:
self.close_logfiles()
def close_logfiles(self) -> None:
for f in ['logfile', 'jsonlogfile']:
lfile = getattr(self, f)
if lfile:
lfile.close()
setattr(self, f, None)
def merge_suite_options(self, options: argparse.Namespace, test: TestSerialisation) -> T.Dict[str, str]:
if ':' in options.setup:
if options.setup not in self.build_data.test_setups:
sys.exit("Unknown test setup '{}'.".format(options.setup))
current = self.build_data.test_setups[options.setup]
else:
full_name = test.project_name + ":" + options.setup
if full_name not in self.build_data.test_setups:
sys.exit("Test setup '{}' not found from project '{}'.".format(options.setup, test.project_name))
current = self.build_data.test_setups[full_name]
if not options.gdb:
options.gdb = current.gdb
if options.gdb:
options.verbose = True
if options.timeout_multiplier is None:
options.timeout_multiplier = current.timeout_multiplier
# if options.env is None:
# options.env = current.env # FIXME, should probably merge options here.
if options.wrapper is not None and current.exe_wrapper is not None:
sys.exit('Conflict: both test setup and command line specify an exe wrapper.')
if options.wrapper is None:
options.wrapper = current.exe_wrapper
return current.env.get_env(os.environ.copy())
def get_test_runner(self, test: TestSerialisation) -> SingleTestRunner:
options = deepcopy(self.options)
if not options.setup:
options.setup = self.build_data.test_setup_default_name
if options.setup:
env = self.merge_suite_options(options, test)
else:
env = os.environ.copy()
test_env = test.env.get_env(env)
env.update(test_env)
if (test.is_cross_built and test.needs_exe_wrapper and
test.exe_runner and test.exe_runner.found()):
env['MESON_EXE_WRAPPER'] = join_args(test.exe_runner.get_command())
return SingleTestRunner(test, test_env, env, options)
def process_test_result(self, result: TestRun) -> None:
if result.res is TestResult.TIMEOUT:
self.timeout_count += 1
elif result.res is TestResult.SKIP:
self.skip_count += 1
elif result.res is TestResult.OK:
self.success_count += 1
elif result.res in {TestResult.FAIL, TestResult.ERROR, TestResult.INTERRUPT}:
self.fail_count += 1
elif result.res is TestResult.EXPECTEDFAIL:
self.expectedfail_count += 1
elif result.res is TestResult.UNEXPECTEDPASS:
self.unexpectedpass_count += 1
else:
sys.exit('Unknown test result encountered: {}'.format(result.res))
def print_stats(self, test_count: int, name_max_len: int,
tests: T.List[TestSerialisation],
name: str, result: TestRun, i: int) -> None:
ok_statuses = (TestResult.OK, TestResult.EXPECTEDFAIL)
bad_statuses = (TestResult.FAIL, TestResult.TIMEOUT, TestResult.INTERRUPT,
TestResult.UNEXPECTEDPASS, TestResult.ERROR)
result_str = '{num:{numlen}}/{testcount} {name:{name_max_len}} {res:{reslen}} {dur:.2f}s'.format(
numlen=len(str(test_count)),
num=i,
testcount=test_count,
name_max_len=name_max_len,
name=name,
reslen=TestResult.maxlen(),
res=result.res.value,
dur=result.duration)
if result.res is TestResult.FAIL:
result_str += ' ' + returncode_to_status(result.returncode)
if result.res in bad_statuses:
self.collected_failures.append(result_str)
if not self.options.quiet or result.res not in ok_statuses:
decorator = mlog.plain
if result.res in bad_statuses:
decorator = mlog.red
elif result.res is TestResult.SKIP:
decorator = mlog.yellow
print(decorator(result_str).get_text(mlog.colorize_console()))
result_str += "\n\n" + result.get_log()
if result.res in bad_statuses:
if self.options.print_errorlogs:
self.collected_logs.append(result_str)
if self.logfile:
self.logfile.write(result_str)
if self.jsonlogfile:
write_json_log(self.jsonlogfile, name, result)
if self.junit:
self.junit.log(name, result)
def print_summary(self) -> None:
# Prepend a list of failures
msg = '' if len(self.collected_failures) < 1 else "\nSummary of Failures:\n\n"
msg += '\n'.join(self.collected_failures)
msg += textwrap.dedent('''
Ok: {:<4}
Expected Fail: {:<4}
Fail: {:<4}
Unexpected Pass: {:<4}
Skipped: {:<4}
Timeout: {:<4}
''').format(self.success_count, self.expectedfail_count, self.fail_count,
self.unexpectedpass_count, self.skip_count, self.timeout_count)
print(msg)
if self.logfile:
self.logfile.write(msg)
if self.junit:
self.junit.write()
def print_collected_logs(self) -> None:
if self.collected_logs:
if len(self.collected_logs) > 10:
print('\nThe output from 10 first failed tests:\n')
else:
print('\nThe output from the failed tests:\n')
for log in self.collected_logs[:10]:
lines = log.splitlines()
if len(lines) > 104:
print('\n'.join(lines[0:4]))
print('--- Listing only the last 100 lines from a long log. ---')
lines = lines[-100:]
for line in lines:
try:
print(line)
except UnicodeEncodeError:
line = line.encode('ascii', errors='replace').decode()
print(line)
def total_failure_count(self) -> int:
return self.fail_count + self.unexpectedpass_count + self.timeout_count
def doit(self) -> int:
if self.is_run:
raise RuntimeError('Test harness object can only be used once.')
self.is_run = True
tests = self.get_tests()
if not tests:
return 0
self.run_tests(tests)
return self.total_failure_count()
@staticmethod
def split_suite_string(suite: str) -> T.Tuple[str, str]:
if ':' in suite:
split = suite.split(':', 1)
assert len(split) == 2
return split[0], split[1]
else:
return suite, ""
@staticmethod
def test_in_suites(test: TestSerialisation, suites: T.List[str]) -> bool:
for suite in suites:
(prj_match, st_match) = TestHarness.split_suite_string(suite)
for prjst in test.suite:
(prj, st) = TestHarness.split_suite_string(prjst)
# the SUITE can be passed as
# suite_name
# or
# project_name:suite_name
# so we need to select only the test belonging to project_name
# this if handle the first case (i.e., SUITE == suite_name)
# in this way we can run tests belonging to different
# (sub)projects which share the same suite_name
if not st_match and st == prj_match:
return True
# these two conditions are needed to handle the second option
# i.e., SUITE == project_name:suite_name
# in this way we select the only the tests of
# project_name with suite_name
if prj_match and prj != prj_match:
continue
if st_match and st != st_match:
continue
return True
return False
def test_suitable(self, test: TestSerialisation) -> bool:
return ((not self.options.include_suites or
TestHarness.test_in_suites(test, self.options.include_suites)) and not
TestHarness.test_in_suites(test, self.options.exclude_suites))
def tests_from_args(self, tests: T.List[TestSerialisation]) -> T.Generator[TestSerialisation, None, None]:
'''
Allow specifying test names like "meson test foo1 foo2", where test('foo1', ...)
Also support specifying the subproject to run tests from like
"meson test subproj:" (all tests inside subproj) or "meson test subproj:foo1"
to run foo1 inside subproj. Coincidentally also "meson test :foo1" to
run all tests with that name across all subprojects, which is
identical to "meson test foo1"
'''
for arg in self.options.args:
if ':' in arg:
subproj, name = arg.split(':', maxsplit=1)
else:
subproj, name = '', arg
for t in tests:
if subproj and t.project_name != subproj:
continue
if name and t.name != name:
continue
yield t
def get_tests(self) -> T.List[TestSerialisation]:
if not self.tests:
print('No tests defined.')
return []
if self.options.include_suites or self.options.exclude_suites:
tests = []
for tst in self.tests:
if self.test_suitable(tst):
tests.append(tst)
else:
tests = self.tests
if self.options.args:
tests = list(self.tests_from_args(tests))
if not tests:
print('No suitable tests defined.')
return []
return tests
def open_log_files(self) -> None:
if not self.options.logbase or self.options.verbose:
return
namebase = None
logfile_base = os.path.join(self.options.wd, 'meson-logs', self.options.logbase)
if self.options.wrapper:
namebase = os.path.basename(self.get_wrapper(self.options)[0])
elif self.options.setup:
namebase = self.options.setup.replace(":", "_")
if namebase:
logfile_base += '-' + namebase.replace(' ', '_')
self.junit = JunitBuilder(logfile_base + '.junit.xml')
self.logfilename = logfile_base + '.txt'
self.jsonlogfilename = logfile_base + '.json'
self.jsonlogfile = open(self.jsonlogfilename, 'w', encoding='utf-8', errors='replace')
self.logfile = open(self.logfilename, 'w', encoding='utf-8', errors='surrogateescape')
self.logfile.write('Log of Meson test suite run on {}\n\n'.format(datetime.datetime.now().isoformat()))
inherit_env = env_tuple_to_str(os.environ.items())
self.logfile.write('Inherited environment: {}\n\n'.format(inherit_env))
@staticmethod
def get_wrapper(options: argparse.Namespace) -> T.List[str]:
wrap = [] # type: T.List[str]
if options.gdb:
wrap = [options.gdb_path, '--quiet', '--nh']
if options.repeat > 1:
wrap += ['-ex', 'run', '-ex', 'quit']
# Signal the end of arguments to gdb
wrap += ['--args']
if options.wrapper:
wrap += options.wrapper
return wrap
def get_pretty_suite(self, test: TestSerialisation) -> str:
if len(self.suites) > 1 and test.suite:
rv = TestHarness.split_suite_string(test.suite[0])[0]
s = "+".join(TestHarness.split_suite_string(s)[1] for s in test.suite)
if s:
rv += ":"
return rv + s + " / " + test.name
else:
return test.name
def run_tests(self, tests: T.List[TestSerialisation]) -> None:
# Replace with asyncio.run once we can require Python 3.7
loop = asyncio.get_event_loop()
loop.run_until_complete(self._run_tests(tests))
async def _run_tests(self, tests: T.List[TestSerialisation]) -> None:
semaphore = asyncio.Semaphore(self.options.num_processes)
futures = deque() # type: T.Deque[asyncio.Future]
running_tests = dict() # type: T.Dict[asyncio.Future, str]
test_count = len(tests)
name_max_len = max([len(self.get_pretty_suite(test)) for test in tests])
self.open_log_files()
startdir = os.getcwd()
if self.options.wd:
os.chdir(self.options.wd)
self.build_data = build.load(os.getcwd())
interrupted = False
async def run_test(test: SingleTestRunner,
name: str, index: int) -> None:
async with semaphore:
if interrupted or (self.options.repeat > 1 and self.fail_count):
return
res = await test.run()
self.process_test_result(res)
self.print_stats(test_count, name_max_len, tests, name, res, index)
def test_done(f: asyncio.Future) -> None:
if not f.cancelled():
f.result()
futures.remove(f)
try:
del running_tests[f]
except KeyError:
pass
def cancel_one_test(warn: bool) -> None:
future = futures.popleft()
futures.append(future)
if warn:
mlog.warning('CTRL-C detected, interrupting {}'.format(running_tests[future]))
del running_tests[future]
future.cancel()
def sigterm_handler() -> None:
nonlocal interrupted
if interrupted:
return
interrupted = True
mlog.warning('Received SIGTERM, exiting')
while running_tests:
cancel_one_test(False)
def sigint_handler() -> None:
# We always pick the longest-running future that has not been cancelled
# If all the tests have been CTRL-C'ed, just stop
nonlocal interrupted
if interrupted:
return
if running_tests:
cancel_one_test(True)
else:
mlog.warning('CTRL-C detected, exiting')
interrupted = True
if sys.platform != 'win32':
asyncio.get_event_loop().add_signal_handler(signal.SIGINT, sigint_handler)
asyncio.get_event_loop().add_signal_handler(signal.SIGTERM, sigterm_handler)
try:
for _ in range(self.options.repeat):
for i, test in enumerate(tests, 1):
visible_name = self.get_pretty_suite(test)
single_test = self.get_test_runner(test)
if not test.is_parallel or single_test.options.gdb:
await complete_all(futures)
future = asyncio.ensure_future(run_test(single_test, visible_name, i))
futures.append(future)
running_tests[future] = visible_name
future.add_done_callback(test_done)
if not test.is_parallel or single_test.options.gdb:
await complete(future)
if self.options.repeat > 1 and self.fail_count:
break
await complete_all(futures)
self.print_collected_logs()
self.print_summary()
if self.logfilename:
print('Full log written to {}'.format(self.logfilename))
finally:
if sys.platform != 'win32':
asyncio.get_event_loop().remove_signal_handler(signal.SIGINT)
asyncio.get_event_loop().remove_signal_handler(signal.SIGTERM)
os.chdir(startdir)
def list_tests(th: TestHarness) -> bool:
tests = th.get_tests()
for t in tests:
print(th.get_pretty_suite(t))
return not tests
def rebuild_all(wd: str) -> bool:
if not (Path(wd) / 'build.ninja').is_file():
print('Only ninja backend is supported to rebuild tests before running them.')
return True
ninja = environment.detect_ninja()
if not ninja:
print("Can't find ninja, can't rebuild test.")
return False
ret = subprocess.run(ninja + ['-C', wd]).returncode
if ret != 0:
print('Could not rebuild {}'.format(wd))
return False
return True
def run(options: argparse.Namespace) -> int:
if options.benchmark:
options.num_processes = 1
if options.verbose and options.quiet:
print('Can not be both quiet and verbose at the same time.')
return 1
check_bin = None
if options.gdb:
options.verbose = True
if options.wrapper:
print('Must not specify both a wrapper and gdb at the same time.')
return 1
check_bin = 'gdb'
if options.wrapper:
check_bin = options.wrapper[0]
if sys.platform == 'win32':
loop = asyncio.ProactorEventLoop()
asyncio.set_event_loop(loop)
if check_bin is not None:
exe = ExternalProgram(check_bin, silent=True)
if not exe.found():
print('Could not find requested program: {!r}'.format(check_bin))
return 1
if not options.list and not options.no_rebuild:
if not rebuild_all(options.wd):
# We return 125 here in case the build failed.
# The reason is that exit code 125 tells `git bisect run` that the current commit should be skipped.
# Thus users can directly use `meson test` to bisect without needing to handle the does-not-build case separately in a wrapper script.
return 125
with TestHarness(options) as th:
try:
if options.list:
return list_tests(th)
return th.doit()
except TestException as e:
print('Meson test encountered an error:\n')
if os.environ.get('MESON_FORCE_BACKTRACE'):
raise e
else:
print(e)
return 1
def run_with_args(args: T.List[str]) -> int:
parser = argparse.ArgumentParser(prog='meson test')
add_arguments(parser)
options = parser.parse_args(args)
return run(options)
|
the-stack_106_15372
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import ast
from sys import version_info
if version_info >= (3, 8):
ast_Num = ast_Str = ast_Bytes = ast_NameConstant = ast_Ellipsis = ast.Constant
else:
ast_Num = ast.Num
ast_Str = ast.Str
ast_Bytes = ast.Bytes
ast_NameConstant = ast.NameConstant
ast_Ellipsis = ast.Ellipsis
|
the-stack_106_15376
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.signal import argrelextrema
from PyroPara.filter import Filter
class STAfile:
def __init__(
self, *, path: str, beta: float = None, filter: Filter = None
) -> None:
self._df: pd.DataFrame = None
self.is_processed = False
self.filter = filter
self.path = path
self.beta = beta
self.local_minima: list = []
def load(self):
"""Load a STAfile data as pandas dataframe.
Args:
path (str): Path to a file.
"""
self._df = pd.read_csv(
self.path, sep=",", encoding="cp1250", skiprows=34
)
self._df.rename(
columns={
self._df.columns[0]: "temperature",
self._df.columns[3]: "mass",
self._df.columns[1]: "time",
},
inplace=True,
)
self._df.temperature += 273.15
self._df.mass /= 100
def process(self):
"""Calculates and filters first and second derivatives of df.mass array
Args:
Filter (Class): instance of Filter class
"""
# 1. TG
self._df["mass_filtered"] = self.filter.apply(
self._df.time, self._df.mass
)
# 2. DTG
self._df["mass_diff_unfiltered"] = -np.gradient(
self._df.mass_filtered, self._df.time
)
self._df["mass_diff_filtered"] = self.filter.apply(
self._df.time, self._df.mass_diff_unfiltered
)
# 3. DDTG
self._df["mass_diff2_unfiltered"] = abs(
np.gradient(self._df.mass_diff_filtered, self._df.time)
)
self._df["mass_diff2_filtered"] = abs(
self.filter.apply(self._df.time, self._df.mass_diff2_unfiltered)
)
self.is_processed = True
def calculate_local_minima(
self,
*,
minorder: int = 7,
min_temp: float = 500,
max_temp: float = 750,
):
"""Calculates <= 10 local minima in DDTG curve for a given
stafile, appends to stafile.local_minima
Args:
minorder (int, optional): Number of nearby points to consider
for possible local minimum. Defaults to 7.
min_temp (float, optional): Lower bound of temperature interval.
Defaults to 500.
max_temp (float, optional): Upper bound of temperature interval.
Defaults to 750.
"""
if not self.is_processed:
self.process()
self.local_minima.clear()
points = []
# counter = 1
while True:
points = self.find_local_minima(minorder, min_temp, max_temp)
if len(points) < 11 and len(points) > 0:
break
minorder = +1
self.local_minima.extend(points)
def find_local_minima(
self,
minorder: int,
min_temp: int,
max_temp: int,
):
"""Calculates local minima for given DDTG stafile with fixed parameters
Args:
minorder (int): Number of nearby points to consider
for possible local minimum.
min_temp (int): Lower bound of temperature interval.
max_temp (int): Upper bound of temperature interval.
Returns:
_type_: _description_
"""
mass_array = self._df.mass_diff2_filtered.to_numpy()
temperature_array = self._df.temperature
points = []
temp_minima = argrelextrema(mass_array, np.less_equal, order=minorder)
for min_ in temp_minima[0]:
if (
temperature_array[min_] >= min_temp
and temperature_array[min_] <= max_temp
):
tup = (temperature_array[min_], mass_array[min_])
points.append(tup)
return points
def plot(self):
y_filtered = self._df.mass_filterd
x = self._df.temperature
y = self._df.mass_unfiltered
plt.figure()
plt.plot(x, y_filtered, "-")
plt.plot(
x,
y,
"r",
alpha=0.3,
)
plt.plot(x, y_filtered, "-")
plt.plot(
x,
y,
"r",
alpha=0.3,
)
plt.plot(x, y_filtered, "-")
plt.title(f"{self.path}")
plt.set_xlabel("Temperature (K)")
plt.set_ylabel("MSL (0-1))")
# Plot of local minima points/lines #
temperature, mass = np.array(self.local_minima).T
plt.plot(temperature, mass, "kx")
plt.vlines(
temperature,
0,
max(y),
"k",
alpha=0.7,
)
plt.legend()
|
the-stack_106_15377
|
import re
from heapinspect.common import u64
from heapinspect.common import u32
from heapinspect.common import p64
from heapinspect.common import p32
class C_Struct(object):
'''This class handles memory dump of c structure.
Attributes:
_arch (str): '32' or '64'.
_vars (list): List of vars.
_dict (dict): Dict of vars.
_addr (ind): The start address of the memory dump.
other (str): Defined by `_code`.
Args:
code (str): The core of C_Struct, check example.
arch (:obj:str, optional): The arch of target. '64' by default.
Also support '32'.
endian (:obj:`str`, optional): The endian. 'little' by default.
Not support 'big' yet.
Raises:
NotImplementedError: If arch is not '64' or '32'.
Examples:
>>> code = 'struct test{int a; char b[10];}'
>>> test = C_Struct(code)
>>> print(test._vars)
[('int', 'a', 1), ('char', 'b', 10)]
>>> print(test.dict)
{'a': {'memdump': '', 'num': 1, 'typ': 'int'},
'b': {'memdump': '', 'num': 10, 'typ': 'char'}}
'''
def __init__(self, code, arch='64', endian='little'):
if arch == '64':
self._typ2size = {
'bool': 1,
'byte': 1,
'char': 1,
'uint16_t': 2,
'int': 4,
'ptr': 8,
'size_t': 8
}
elif arch == '32':
self._typ2size = {
'bool': 1,
'byte': 1,
'char': 1,
'int': 4,
'ptr': 4,
'size_t': 4
}
else:
raise NotImplementedError("Not supported arch for C_Struct")
self._arch = arch
self._endian = endian
self._code = code
self._struct_name = \
re.search('^\s*struct\s+(\w+)\s*{', code).groups()[0]
self._vars = []
for v in re.findall('\s*(\w*)\ (\w*)\[?(\d+)?\]?;', code):
typ, name, num = v
if num == '':
num = int(1)
else:
num = int(num)
self._vars.append((typ, name, num))
self._dict = {}
for v in self._vars:
typ, name, num = v
self._dict[name] = {"typ": typ, "memdump": None, "num": num}
self._addr = 0
self._mem = None
@property
def _size(self):
'''int: The size of the structure.
'''
size = 0
for v in self._vars:
typ, name, num = v
size += self._typ2size[typ] * num
return size
def _offset(self, var):
'''Get the offset of a var.
Args:
var (str): Var name. Also support index like 'list[10]'.
Return:
int: The offset of the var.
'''
offset = 0
var_name, var_index = re.findall('^(\w*)\[?(\d+)?\]?$', var)[0]
if var_index == '':
var_index = 0
else:
var_index = int(var_index)
for v in self._vars:
typ, name, num = v
if name == var_name:
offset += var_index * self._typ2size[typ]
break
offset += self._typ2size[typ] * num
return offset
def _addrof(self, var):
'''Get the address of a var.
Args:
var (str): Var name. Also support index like 'list[10]'.
Return:
int: The address of the var.
'''
return self._addr + self._offset(var)
def _sizeof(self, var):
'''Get the size of a var.
Note:
If var is a list name without index,
the entire list size will return.
Args:
var (str): Var name. Also support index like 'list[10]'.
Return:
int: The size of the var.
'''
var_name, var_index = re.findall('^(\w*)\[?(\d+)?\]?$', var)[0]
typ = self._dict[var_name]['typ']
num = self._dict[var_name]['num']
if var_index == '': # get total size
return self._typ2size[typ] * num
else:
return self._typ2size[typ] # get one size
def _init(self, memdump, addr=0):
'''Method to initialize the structure.
Args:
memdump (str): The memory dump of the struture.
addr (int): The start address of the memory dump.
'''
# assert len(memdump) >= self.size
if len(memdump) < self._size:
if type(memdump) == str:
try:
memdump = bytes(memdump, 'utf-8')
except:
pass
memdump.ljust(self._size, b'\0')
for v in self._vars:
typ, name, num = v
offset = self._offset(name)
size = self._sizeof(name)
self._dict[name]['memdump'] = memdump[offset:offset+size]
self._mem = memdump
self._addr = addr
def _copy(self):
'''Copy method of C_Struture.
'''
new_obj = C_Struct(self._code, self._arch, self._endian)
new_obj._init(self._mem, self._addr)
return new_obj
def _new(self, memdump, addr=0):
'''Generating new instance of the structure.
Note:
This is important method of C_Struct. Normally a C_Struct
is used as generator, and use this method to genrate other
concret instances with memdump and address.
Args:
memdump (str): Memory dump of the structure.
addr (int): Address of the structure.
Returns:
C_Struct: The new instance.
'''
new_obj = C_Struct(self._code, self._arch, self._endian)
new_obj._init(memdump, addr)
return new_obj
def __getattr__(self, var_name):
'''Get the value of a var in the structure.
Note:
Default methods and vars of C_Struct start with '_'.
Args:
var_name (str): The var name.
Returns:
int or str or list: size_t, int and ptr will return a number.
Others will return its memdump. Array will return a list.
'''
if var_name in self._dict:
typ = self._dict[var_name]['typ']
num = self._dict[var_name]['num']
memdump = self._dict[var_name]['memdump']
a_size = self._typ2size[typ]
unpack = str
if typ == 'int':
unpack = u32
elif (typ == 'size_t' or typ == 'ptr') and self._arch == '32':
unpack = u32
elif (typ == 'size_t' or typ == 'ptr') and self._arch == '64':
unpack = u64
if num > 1:
result = []
for i in range(num):
mem = memdump[i*a_size:i*a_size+a_size]
result.append(unpack(mem))
return result
else:
return unpack(memdump)
else:
return None
malloc_state_struct_new_64 = '''
struct malloc_state
{
int mutex;
int flags;
int have_fastchunks;
int align;
ptr fastbinsY[10];
ptr top;
ptr last_remainder;
ptr bins[254];
int binmap[4];
ptr next;
ptr next_free;
size_t attached_threads;
size_t system_mem;
size_t max_system_mem;
}
'''
'''str: malloc_state of glibc 2.27+ 64bit
'''
malloc_state_struct_new_32 = '''
struct malloc_state
{
int mutex;
int flags;
int have_fastchunks;
ptr fastbinsY[11];
ptr top;
ptr last_remainder;
ptr bins[254];
int binmap[4];
ptr next;
ptr next_free;
size_t attached_threads;
size_t system_mem;
size_t max_system_mem;
}
'''
'''str: malloc_state of glibc 2.27+ 32bit
'''
malloc_state_struct_old = '''
struct malloc_state
{
int mutex;
int flags;
ptr fastbinsY[10];
ptr top;
ptr last_remainder;
ptr bins[254];
int binmap[4];
ptr next;
ptr next_free;
size_t attached_threads;
size_t system_mem;
size_t max_system_mem;
}
'''
'''str: malloc_state of glibc 2.19 - 2.26 64bit, 2.19 - 2.25 32bit.
'''
malloc_state_struct_26_32 = '''
struct malloc_state
{
int mutex;
int flags;
ptr fastbinsY[11];
ptr top;
ptr last_remainder;
ptr bins[254];
int binmap[4];
ptr next;
ptr next_free;
size_t attached_threads;
size_t system_mem;
size_t max_system_mem;
}
'''
'''str: malloc_state of glibc 2.26 32bit.
Note:
malloc_state.fastbinsY is different from other glibc.
'''
malloc_chunk_struct = '''
struct malloc_chunk
{
size_t prev_size;
size_t size;
ptr fd;
ptr bk;
ptr fd_nextsize;
ptr bk_nextsize;
}
'''
'''str: malloc_chunk
'''
tcache_perthread_struct = '''
struct tcache_perthread_struct
{
char counts[64];
ptr entries[64];
}
'''
'''str: tcache_perthread
'''
tcache_perthread_struct2 = '''
struct tcache_perthread_struct
{
uint16_t counts[64];
ptr entries[64];
}
'''
'''str: tcache_perthread
'''
def malloc_state_generator(version='2.27', arch='64'):
'''Generate C_Struct of malloc_state (arena).
Args:
version (str): glibc version, '2.19' - '2.28'.
arch (str): '64' or '32'
Returns:
C_Struct: The corresponding C_Struct.
'''
if arch == '32' and version == '2.26':
return C_Struct(malloc_state_struct_26_32, arch)
elif version in ['2.27', '2.28']:
if arch == '64':
return C_Struct(malloc_state_struct_new_64, arch)
elif arch == '32':
return C_Struct(malloc_state_struct_new_32, arch)
else:
return C_Struct(malloc_state_struct_old, arch)
def malloc_chunk_generator(version='2.27', arch='64'):
'''Generate C_Struct of malloc_chunk.
Args:
version (str): glibc version, '2.19' - '2.28'.
arch (str): '64' or '32'
Returns:
C_Struct: The corresponding C_Struct.
'''
return C_Struct(malloc_chunk_struct, arch)
def tcache_struct_generator(version='2.27', arch='64'):
'''Generate C_Struct of tcache_perthread (tcache).
Args:
version (str): glibc version, '2.19' - '2.28'.
arch (str): '64' or '32'
Returns:
C_Struct: The corresponding C_Struct.
'''
if version in ["2.31"]:
return C_Struct(tcache_perthread_struct2, arch)
return C_Struct(tcache_perthread_struct, arch)
|
the-stack_106_15378
|
# DADSA - Assignment 1
# Reece Benson
from os import system as call
from collections import OrderedDict
class Menu():
# Define the variables we will be using
_app = None
_menu = None
_current_menu = "main"
def __init__(self, app):
# Set our Application
self._app = app
def load(self):
# Define our Menu
self._menu = { }
# Create our Menu
self._menu['main'] = { "new_season": "New Season", "load_season": "Load Season" }
self._menu['new_season'] = { "ns_players": "Players", "ns_tournaments": "Tournaments", "ns_prizemoney": "Prize Money", "ns_difficulty": "Difficulty", "back": "Back" }
self._menu['back'] = lambda: self.goback()
self._menu['ns_players'] = { "ns_viewplayers": "View Players", "ns_viewplayer": "View Player", "back": "Back" }
self._menu['ns_tournaments'] = { "ns_viewtournaments": "Example Tournament 1", "back": "Back" }
self._menu['ns_prizemoney'] = { "ns_setprizemoney": "Set Prize Money", "ns_viewprizemoney": "View Prize Money", "back": "Back" }
self._menu['ns_difficulty'] = { "ns_setdifficulty": "Set Difficulty", "ns_viewdifficulty": "View Difficulty", "back": "Back" }
self._menu['load_season'] = { }
# Append our Seasons to the "Load Season" Menu
for seasonId in self._app.handler.get_seasons():
season = self._app.handler.get_season(seasonId)
self._menu['load_season'].update({ "ls_"+str(seasonId): season.name() })
# Create our menu option for loading a season
self._menu['ls_'+str(seasonId)] = { "back": "Back" }
self._menu["load_season"].update({ "back": "Back" })
# Display our Menu
self.display("main")
def go_back(self):
print("attempt to go back")
def display(self, index = None, error = None):
# Clear our terminal window
#call("cls")
# Define our variables
cur_count = 0
menu_item = self.get_menu(index or "main")
# Error Handling
if(error != None):
print("\n", "Error!", error, "\n")
# Menu Title, set tree
print("Please select an option: ({})".format(index))
menu_counter = 0
for m in menu_item:
# Get our menu name
menu_name = menu_item[m]
# Increase our Counter
menu_counter += 1
# Is the Menu Item a Function?
m_type = None
if(callable(self._menu[m])): m_type = ""
else: m_type = "->"
# Print our Menu Item
print("{0}. {1} {2}".format(menu_counter, menu_name, m_type))
# Get User Input
self.get_input()
def validate_menu(self, index):
try:
menu_name = [ (v) for k,v in enumerate(self._menu) if(k == index) ][0]
return menu_name
except IndexError:
return None
def get_menu(self, menu_name):
# Check our Menu exists
if(not menu_name in self._menu):
return None
else:
return self._menu[menu_name]
def menu_exists(self, index):
# Find our indexed menu
menu_item = self.get_menu(self._current_menu)
menu_found = None
menu_counter = 0
for m in menu_item:
# Get our menu name
menu_name = menu_item[m]
# Increase our Counter
menu_counter += 1
# Has our menu been found?
if(menu_counter == index):
print("-- menu found")
# Check if it's a function or a submenu
if(callable(self._menu[m])):
# Call our function
print("-- function call")
menu_found = self._menu[m]
else:
menu_found = m
return menu_found
def get_input(self):
# Wrap this in a try/except to validate any errors with input
try:
# Get users input
resp = int(input('>>> '))
# Validate some set input calls
if(resp == "exit"):
raise KeyboardInterrupt
elif(resp == ""):
return self.display(None, "Please select a valid option!")
# Validate input from current menu
menu_selected = self.menu_exists(resp)
if(menu_selected != None and callable(menu_selected) != True):
self._current_menu = menu_selected
self.display(menu_selected)
print(menu_selected)
elif(callable(menu_selected)):
print("exec func")
else:
print("no menu", resp)
except KeyboardInterrupt:
self._app.exit()
except ValueError:
self.display(None, "Please select a valid option!")
def load_action(self, menu_id):
#TODO: Load Action from Menu_ID
print("Load Action")
|
the-stack_106_15382
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Lighting(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "mesh3d"
_path_str = "mesh3d.lighting"
_valid_props = {
"ambient",
"diffuse",
"facenormalsepsilon",
"fresnel",
"roughness",
"specular",
"vertexnormalsepsilon",
}
# ambient
# -------
@property
def ambient(self):
"""
Ambient light increases overall color visibility but can wash
out the image.
The 'ambient' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["ambient"]
@ambient.setter
def ambient(self, val):
self["ambient"] = val
# diffuse
# -------
@property
def diffuse(self):
"""
Represents the extent that incident rays are reflected in a
range of angles.
The 'diffuse' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["diffuse"]
@diffuse.setter
def diffuse(self, val):
self["diffuse"] = val
# facenormalsepsilon
# ------------------
@property
def facenormalsepsilon(self):
"""
Epsilon for face normals calculation avoids math issues arising
from degenerate geometry.
The 'facenormalsepsilon' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["facenormalsepsilon"]
@facenormalsepsilon.setter
def facenormalsepsilon(self, val):
self["facenormalsepsilon"] = val
# fresnel
# -------
@property
def fresnel(self):
"""
Represents the reflectance as a dependency of the viewing
angle; e.g. paper is reflective when viewing it from the edge
of the paper (almost 90 degrees), causing shine.
The 'fresnel' property is a number and may be specified as:
- An int or float in the interval [0, 5]
Returns
-------
int|float
"""
return self["fresnel"]
@fresnel.setter
def fresnel(self, val):
self["fresnel"] = val
# roughness
# ---------
@property
def roughness(self):
"""
Alters specular reflection; the rougher the surface, the wider
and less contrasty the shine.
The 'roughness' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["roughness"]
@roughness.setter
def roughness(self, val):
self["roughness"] = val
# specular
# --------
@property
def specular(self):
"""
Represents the level that incident rays are reflected in a
single direction, causing shine.
The 'specular' property is a number and may be specified as:
- An int or float in the interval [0, 2]
Returns
-------
int|float
"""
return self["specular"]
@specular.setter
def specular(self, val):
self["specular"] = val
# vertexnormalsepsilon
# --------------------
@property
def vertexnormalsepsilon(self):
"""
Epsilon for vertex normals calculation avoids math issues
arising from degenerate geometry.
The 'vertexnormalsepsilon' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["vertexnormalsepsilon"]
@vertexnormalsepsilon.setter
def vertexnormalsepsilon(self, val):
self["vertexnormalsepsilon"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
ambient
Ambient light increases overall color visibility but
can wash out the image.
diffuse
Represents the extent that incident rays are reflected
in a range of angles.
facenormalsepsilon
Epsilon for face normals calculation avoids math issues
arising from degenerate geometry.
fresnel
Represents the reflectance as a dependency of the
viewing angle; e.g. paper is reflective when viewing it
from the edge of the paper (almost 90 degrees), causing
shine.
roughness
Alters specular reflection; the rougher the surface,
the wider and less contrasty the shine.
specular
Represents the level that incident rays are reflected
in a single direction, causing shine.
vertexnormalsepsilon
Epsilon for vertex normals calculation avoids math
issues arising from degenerate geometry.
"""
def __init__(
self,
arg=None,
ambient=None,
diffuse=None,
facenormalsepsilon=None,
fresnel=None,
roughness=None,
specular=None,
vertexnormalsepsilon=None,
**kwargs,
):
"""
Construct a new Lighting object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.mesh3d.Lighting`
ambient
Ambient light increases overall color visibility but
can wash out the image.
diffuse
Represents the extent that incident rays are reflected
in a range of angles.
facenormalsepsilon
Epsilon for face normals calculation avoids math issues
arising from degenerate geometry.
fresnel
Represents the reflectance as a dependency of the
viewing angle; e.g. paper is reflective when viewing it
from the edge of the paper (almost 90 degrees), causing
shine.
roughness
Alters specular reflection; the rougher the surface,
the wider and less contrasty the shine.
specular
Represents the level that incident rays are reflected
in a single direction, causing shine.
vertexnormalsepsilon
Epsilon for vertex normals calculation avoids math
issues arising from degenerate geometry.
Returns
-------
Lighting
"""
super(Lighting, self).__init__("lighting")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.mesh3d.Lighting
constructor must be a dict or
an instance of :class:`plotly.graph_objs.mesh3d.Lighting`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("ambient", None)
_v = ambient if ambient is not None else _v
if _v is not None:
self["ambient"] = _v
_v = arg.pop("diffuse", None)
_v = diffuse if diffuse is not None else _v
if _v is not None:
self["diffuse"] = _v
_v = arg.pop("facenormalsepsilon", None)
_v = facenormalsepsilon if facenormalsepsilon is not None else _v
if _v is not None:
self["facenormalsepsilon"] = _v
_v = arg.pop("fresnel", None)
_v = fresnel if fresnel is not None else _v
if _v is not None:
self["fresnel"] = _v
_v = arg.pop("roughness", None)
_v = roughness if roughness is not None else _v
if _v is not None:
self["roughness"] = _v
_v = arg.pop("specular", None)
_v = specular if specular is not None else _v
if _v is not None:
self["specular"] = _v
_v = arg.pop("vertexnormalsepsilon", None)
_v = vertexnormalsepsilon if vertexnormalsepsilon is not None else _v
if _v is not None:
self["vertexnormalsepsilon"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
the-stack_106_15384
|
from Board import Board
from pyjava_ai import java_ai
from functools import partial
import tkinter as tk
from Virtual_board import VBoard
from move_calc import in_stalemate, in_checkmate
from time import sleep, time
EMPTY = ['0','0']
def run_visual():
#white ai:
ai1 = partial(java_ai, **{'N':4, 'post_strategy': 'kill,pawns'})
# black ai:
ai2 = partial(java_ai, **{'N':4, 'post_strategy': 'pawns'})
root = tk.Tk()
#root.protocol("WM_DELETE_WINDOW", quit_window())
b1 = Board(root, ai = ai1, ai2 = ai2)
root.mainloop()
def pre_score(board):
score = None
if in_checkmate(board, 'b'):
score = 1
elif in_checkmate(board, 'w'):
score = 0
elif in_stalemate(board):
score = .5
return score
def board_score(board):
white_points = 0
total_points = 0
value = {'p':1,'b':3,'n':3,'r':5,'q':9,'k':2}
ps = pre_score(board)
if ps is not None:
score = ps
else:
for i in range(8):
for j in range(8):
occ = board[i][j]
if occ != EMPTY:
if occ[0] == 'w':
white_points += value[occ[1]]
total_points += value[occ[1]]
score = white_points/total_points
return score
def run_battle(N1=2,N2=2, white = 'val', black='val', T = 1):
fname = f'game_results/{N1}{white}_V_{N2}{black}'
#white ai:
ai1 = partial(java_ai, **{'N':N1, 'scoring': white, 'verbose':False})
# black ai:
ai2 = partial(java_ai, **{'N':N2, 'scoring': black, 'verbose':False})
ais = {'w':ai1, 'b':ai2}
turn = 'w'
change = {'w':'b','b':'w'}
#print(board)
max_T = time() + 60*60*T # number of extra seconds to run
while time() < max_T:
board = VBoard()
n_moves = 0
for i in range(150):
n_moves = i
if in_checkmate(board, turn) or in_stalemate(board):
break
board = board.execute_move(*ais[turn](board, turn))
turn = change[turn]
#print(board)
print(board)
score = board_score(board)
print(score)
print()
with open(fname, 'a') as file:
file.write(f'{score:.3f}, {n_moves}\n')
if __name__ == "__main__":
import sys
n1 = sys.argv[1]
w = sys.argv[2]
n2 = sys.argv[3]
b = sys.argv[4]
t = sys.argv[5]
run_battle(N1=n1, N2 = n2, white=w, black=b, T=float(t))
|
the-stack_106_15386
|
import eight_mile.embeddings
from eight_mile.embeddings import *
from eight_mile.utils import exporter, optional_params, listify, idempotent_append, is_sequence
from baseline.utils import import_user_module, DEFAULT_DATA_CACHE
from eight_mile.downloads import AddonDownloader, EmbeddingDownloader
import logging
__all__ = []
__all__.extend(eight_mile.embeddings.__all__)
export = exporter(__all__)
logger = logging.getLogger("mead.layers")
MEAD_LAYERS_EMBEDDINGS = {}
MEAD_LAYERS_EMBEDDINGS_LOADERS = {}
@export
@optional_params
def register_embeddings(cls, name=None):
"""Register a function as a plug-in"""
if name is None:
name = cls.__name__
if name in MEAD_LAYERS_EMBEDDINGS:
raise Exception(
"Error: attempt to re-define previously registered handler {} (old: {}, new: {}) in registry".format(
name, MEAD_LAYERS_EMBEDDINGS[name], cls
)
)
MEAD_LAYERS_EMBEDDINGS[name] = cls
if hasattr(cls, "load"):
MEAD_LAYERS_EMBEDDINGS_LOADERS[name] = cls.load
return cls
@export
def create_embeddings(**kwargs):
embed_type = kwargs.get("embed_type", "default")
Constructor = MEAD_LAYERS_EMBEDDINGS.get(embed_type)
return Constructor(**kwargs)
MEAD_LAYERS_EMBEDDINGS_REDUCTION = {}
@export
@optional_params
def register_embeddings_reduction(cls, name=None):
"""Register a function as a plug-in"""
if name is None:
name = cls.__name__
if name in MEAD_LAYERS_EMBEDDINGS_REDUCTION:
raise Exception(
"Error: attempt to re-define previously registered handler {} (old: {}, new: {}) in registry".format(
name, MEAD_LAYERS_EMBEDDINGS_REDUCTION[name], cls
)
)
MEAD_LAYERS_EMBEDDINGS_REDUCTION[name] = cls
return cls
@export
def create_embeddings_reduction(**kwargs):
embed_type = kwargs.get("embed_reduction_type", "concat")
Constructor = MEAD_LAYERS_EMBEDDINGS_REDUCTION.get(embed_type)
if Constructor:
return Constructor(**kwargs)
return embed_type
def load_embeddings_overlay(global_embeddings_settings, embeddings_section, vocab, data_download_cache=DEFAULT_DATA_CACHE, name=None):
"""Creates a set of arbitrary sub-graph, DL-framework-specific embeddings by delegating to wired sub-module.
As part of this process, we take in an index of embeddings by name, a ``dict`` of ``Counter`` objects (keyed by
feature name), containing the number of times each token has been seen, and a `features` list which is a
sub-section of the mead config containing the `embeddings` section for each feature.
This method's job is to either create a sub-graph from a pretrained model, or to create a new random
initialized sub-graph, taking into account the input vocabulary counters. The embeddings model has control
to determine the actual word indices and sub-graph for the embeddings, both of which are returned from this
method. If some sort of feature selection is
performed, such as low count removal that would be required via the delegated methods
:param global_embeddings_settings: The embeddings index passed to mead driver
:param vocabs: A set of known ``Counter``s for each vocabulary consisting of a token key and count for each
:param features: The `features` sub-section of the mead config
:return: Returns a ``tuple`` comprised of a ``dict`` of (`feature name`, `Embedding`) and an updated vocab
"""
# Get the label out of the embeddings section in the features block of mead config
embed_label = embeddings_section.get('label', embeddings_section.get('labels'))
if name is None:
name = embed_label
# Get the type of embedding out of the embeddings section in the features block of mead config
embed_type = embeddings_section.get('type', 'default')
is_stacked = is_sequence(embed_label)
if is_stacked:
if embed_type != 'default':
logger.warning("You have requested a stack of pretrained embeddings but didnt request 'default' or representation")
# Backwards compat, copy from main block if not present locally
embeddings_section['unif'] = embeddings_section.get('unif', 0.1)
# Backwards compat, copy from main block if not present locally
embeddings_section['keep_unused'] = embeddings_section.get('keep_unused', False)
# Overlay any backend parameters
# Also, if we are in eager mode, we might have to place the embeddings explicitly on the CPU
embeddings_section['cpu_placement'] = bool(embeddings_section.get('cpu_placement', False))
if embed_label is not None:
# Allow local overrides to uniform initializer
embed_labels = listify(embed_label)
embed_files = []
for embed_label in embed_labels:
embeddings_global_config_i = global_embeddings_settings[embed_label]
if 'type' in embeddings_global_config_i:
embed_type_i = embeddings_global_config_i['type']
embed_type = embed_type_i
if embed_type_i != 'default' and is_stacked:
raise Exception("Stacking embeddings only works for 'default' pretrained word embeddings")
embed_file = embeddings_global_config_i.get('file')
unzip_file = embeddings_global_config_i.get('unzip', True)
embed_dsz = embeddings_global_config_i['dsz']
embed_sha1 = embeddings_global_config_i.get('sha1')
# Should we grab vocab here too?
embed_model = embeddings_global_config_i.get('model', {})
if 'dsz' not in embed_model and not is_stacked:
embed_model['dsz'] = embed_dsz
embeddings_section = {**embed_model, **embeddings_section}
try:
if embed_file:
embed_file = EmbeddingDownloader(embed_file, embed_dsz, embed_sha1, data_download_cache, unzip_file=unzip_file).download()
embed_files.append(embed_file)
else:
embed_files.append(None)
except Exception as e:
if is_stacked:
raise e
logger.warning(f"We were not able to download {embed_file}, passing to the addon")
embed_files.append(embed_file)
# If we have stacked embeddings (which only works with `default` model, we need to pass the list
# If not, grab the first item
embed_file = embed_files if is_stacked else embed_files[0]
embedding_bundle = load_embeddings(name, embed_file=embed_file, known_vocab=vocab, embed_type=embed_type,
data_download_cache=data_download_cache,
**embeddings_section)
else: # if there is no label given, assume we need random initialization vectors
dsz = embeddings_section.pop('dsz')
embedding_bundle = load_embeddings(name,
dsz=dsz,
known_vocab=vocab,
embed_type=embed_type,
data_download_cache=data_download_cache,
**embeddings_section)
return embedding_bundle
@export
def load_embeddings(name, **kwargs):
"""This method negotiates loading an embeddings sub-graph AND a corresponding vocabulary (lookup from word to int)
Embeddings and their addons may be downloaded from an http `GET` either via raw URL or using hub notation
(hub:v1:embeddings/hub:v1:addons)
This function behaves differently depending on its keyword arguments and the `embed_type`.
If the registered embeddings class contains a load method on it and we are given an `embed_file`,
we will assume that we need to load that file, and that the embeddings object wants its own load function used
for that. This would be typical, e.g, for a user-defined sub-graph LM.
For cases where no `embed_file` is provided and there is a `create` method on this class, we assume that the user
wants us to build a VSM (`baseline.embeddings.PretrainedEmbeddingsModel`) ourselves, and call
their create function, which will take in this VSM.
The VSM is then used to provide the vocabulary back, and the `create` function invokes the class constructor
with the sub-parts of VSM required to build the graph.
If there is no create method provided, and there is no load function provided, we simply invoke the
registered embeddings' constructor with the args, and assume there is a `get_vocab()` method on the
provided implementation
:param name: A unique string name for these embeddings
:param kwargs:
:Keyword Arguments:
* *embed_type* The key identifying the embedding type in the registry
:return:
"""
embed_type = kwargs.pop("embed_type", "default")
# Dynamically load a module if its needed
for module in listify(kwargs.get('module', kwargs.get('modules', []))):
import_user_module(module, kwargs.get('data_download_cache'))
embeddings_cls = MEAD_LAYERS_EMBEDDINGS[embed_type]
filename = kwargs.get("embed_file")
# If the embedding model has a load function, defer all the work to that. Basically just pass the kwargs in
# and let it do its magic
if hasattr(embeddings_cls, "load") and filename is not None:
model = embeddings_cls.load(filename, **kwargs)
return {"embeddings": model, "vocab": model.get_vocab()}
# If there isnt a load function, there must be a create() function where the first arg is a type of
# EmbeddingsModel
elif hasattr(embeddings_cls, "create"):
unif = kwargs.pop("unif", 0.1)
known_vocab = kwargs.pop("known_vocab", None)
keep_unused = kwargs.pop("keep_unused", False)
normalize = kwargs.pop("normalized", False)
preserve_vocab_indices = bool(kwargs.get('preserve_vocab_indices', False))
# if there is no filename, use random-init model
if filename is None:
dsz = kwargs.pop("dsz")
model = RandomInitVecModel(dsz, known_vocab=known_vocab, unif_weight=unif, counts=not preserve_vocab_indices)
# If there, is use the PretrainedEmbeddingsModel loader
else:
if is_sequence(filename) or preserve_vocab_indices:
model = PretrainedEmbeddingsStack(
listify(filename),
known_vocab=known_vocab,
normalize=normalize,
counts=not preserve_vocab_indices,
**kwargs
)
else:
model = PretrainedEmbeddingsModel(
filename,
known_vocab=known_vocab,
unif_weight=unif,
keep_unused=keep_unused,
normalize=normalize,
**kwargs,
)
# Then call create(model, name, **kwargs)
return {"embeddings": embeddings_cls.create(model, name, **kwargs), "vocab": model.get_vocab()}
# If we dont have a load function, but filename is none, we should just instantiate the class
model = embeddings_cls(name, **kwargs)
return {"embeddings": model, "vocab": model.get_vocab()}
|
the-stack_106_15387
|
from __future__ import unicode_literals
import io
import os
import shutil
import tempfile
import paramiko
from django.utils import six
from django.utils.encoding import force_str
from djblets.util.decorators import cached_property
from reviewboard.ssh.client import SSHClient
from reviewboard.ssh.errors import UnsupportedSSHKeyError
from reviewboard.ssh.storage import FileSSHStorage
from reviewboard.ssh.utils import humanize_key
from reviewboard.testing.testcase import TestCase
rsa_key_blob = """
-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQCsElbDaXtsLctXBQVu8N55ptQ3s1IDBP1nqL/0J3+L70DMjRXa
tVB9uPeZOPDJrWgu7Gn2k48oRkGdl+9+WtnaNvgb6jC9TU4gNXKtBeq/Q/NQgtLs
jBnhczMC90PnM+Bvq6TyDaufXqYP7w8Xk1TsW7nz58HMsIPOEA8Ajx+3PwIDAQAB
AoGAXgjbn4j6qSDRmemlkX5Spnq0SQhXTk0gytBermgTfP6wE9kaU1548Wvu665B
cIWyhMowEk+LkX/rhdstR4kQuhkgGtkO78YLjqmHPHuMYRn4Ea/1xdSYA1qOnLWR
GNbnnvYY9/YR5KhsmFbuG5wfA2V0Bw3ULm02jgGuCV7Y5okCQQDgN4md1qXmkO9S
XgfftE1r4ByqFwWzzFRTAFEFN2jULUwmM3B+L+1MUGjuKBk/tjfdv7QBPRJoO6xz
peG00nHNAkEAxHaIyIaaK9ajrke+tiSDZYs9HCnYHiVH2+3hg1vTHIrgO8VkjA93
A40Qaol+7dKzsC5TPll3k2uGnY+lo/RxOwJAI+RgEDc7KXSMCvhodEQNnLYsgIHc
9NJBsWO8lIQxML3rkbXsTRbo+q1ojq82k39c5A97BjO7jZn32i90uRhzBQJBALcQ
KHaJjeDpeM1thtRcA5+79a5ngzzbyjCxYSAwkO+YrEalsQIdau2BJVnQUtiyK8Mv
91syrIxOdjoc3uB+Zn8CQQCpvbEXIU/76MH/yDmgOk4+R8qo/yU6cgn7PTCWzGL7
SK+fSBGKFq+n2FxQIt9OWswQ+wbvq9jmJmLCGxuUSMPu
-----END RSA PRIVATE KEY-----
"""
dsa_key_blob = """
-----BEGIN DSA PRIVATE KEY-----
MIIBugIBAAKBgQDddn3Hr3guZXLlmRLlneT0HSUa3gx3dYVCMr/b7UXu7gMxG919
C6Tzjk300tgxDpTnmq1OVwoQA44tIFYlxvw9KnxttnPe+Ny7nocGDApBXMLfaZLN
QbAlsBxTEVPB6CxtF9srVs3SXNbQddGI/PidEK00Fe1jwNnv0aC43LCFFwIVAM/d
qNnjATC1+ub/4dwnbO4sL2zlAoGAVM/g9ePoFxdldGKh40SaNHjkSw9GMo72HioD
KkSBNJ2Es/8ppX6Wkgi3WWZNsMruTTnVyWPqPIPpt58yqyMYtqSVVmoK7ihyxbxW
dUtG9rrNwo9/OqfvUxGFYE0suBnNR29lKKlWT+Sk5Cjd+5BpGZ6ptaxgvkYDFkyX
JrWBXzUCgYA0u51vP+h9InIxYxAr64Y72rungv/2Y409vvEbnBDK42na8SJ4fNZF
CUa4Y8KQ8bUaKyBbiXz/r+zbzA7D5kxsdBMeUmHjQhMIGiMxvGfPLw/9jWR2pcFH
DPCGtVEaccnAOCgOEfgRGq5MG/i0YCFj7AIdLQchGiUDVPJNFK8KNwIUUDs/Ac/t
NnIFhSieTpeXxmozkks=
-----END DSA PRIVATE KEY-----
"""
class TestKeys(object):
"""Keys used for unit tests.
This is used to access keys across any and all unit tests that need them,
in a way that reduces overhead by constructing each key only once and
only on first access, caching it for future lookups.
"""
@cached_property
def rsa_key(self):
"""A stable RSA key for testing."""
return paramiko.RSAKey.from_private_key(io.StringIO(rsa_key_blob))
@cached_property
def dsa_key(self):
"""A stable DSA key for testing."""
return paramiko.DSSKey.from_private_key(io.StringIO(dsa_key_blob))
@cached_property
def rsa_key_b64(self):
"""Base64 encoding for the RSA key."""
return test_keys.rsa_key.get_base64()
@cached_property
def dsa_key_b64(self):
"""Base64 encoding for the DSA key."""
return test_keys.dsa_key.get_base64()
test_keys = TestKeys()
class SSHTestCase(TestCase):
def setUp(self):
super(SSHTestCase, self).setUp()
self.old_home = os.getenv('HOME')
self.tempdir = None
os.environ[str('RBSSH_ALLOW_AGENT')] = str('0')
FileSSHStorage._ssh_dir = None
def tearDown(self):
super(SSHTestCase, self).tearDown()
self._set_home(self.old_home)
if self.tempdir:
shutil.rmtree(self.tempdir)
@property
def key1(self):
"""Legacy alias for TestKeys.rsa_key."""
return test_keys.rsa_key
@property
def key2(self):
"""Legacy alias for TestKeys.dsa_key."""
return test_keys.dsa_key
@property
def key1_b64(self):
"""Legacy alias for TestKeys.rsa_key_b64."""
return test_keys.rsa_key_b64
@property
def key2_b64(self):
"""Legacy alias for TestKeys.dsa_key_b64."""
return test_keys.dsa_key_b64
def _set_home(self, homedir):
os.environ[str('HOME')] = force_str(homedir)
class FileSSHStorageTests(SSHTestCase):
"""Unit tests for FileSSHStorage."""
def setUp(self):
super(FileSSHStorageTests, self).setUp()
self.tempdir = tempfile.mkdtemp(prefix='rb-tests-home-')
self._set_home(self.tempdir)
def test_get_ssh_dir_with_dot_ssh(self):
"""Testing FileSSHStorage.get_ssh_dir with ~/.ssh"""
sshdir = os.path.join(self.tempdir, '.ssh')
storage = FileSSHStorage()
self.assertEqual(storage.get_ssh_dir(), sshdir)
def test_get_ssh_dir_with_ssh(self):
"""Testing FileSSHStorage.get_ssh_dir with ~/ssh"""
sshdir = os.path.join(self.tempdir, 'ssh')
os.mkdir(sshdir, 0o700)
storage = FileSSHStorage()
self.assertEqual(storage.get_ssh_dir(), sshdir)
def test_get_ssh_dir_with_dot_ssh_and_localsite(self):
"""Testing FileSSHStorage.get_ssh_dir with ~/.ssh and localsite"""
sshdir = os.path.join(self.tempdir, '.ssh', 'site-1')
storage = FileSSHStorage(namespace='site-1')
self.assertEqual(storage.get_ssh_dir(), sshdir)
def test_get_ssh_dir_with_ssh_and_localsite(self):
"""Testing FileSSHStorage.get_ssh_dir with ~/ssh and localsite"""
sshdir = os.path.join(self.tempdir, 'ssh')
os.mkdir(sshdir, 0o700)
sshdir = os.path.join(sshdir, 'site-1')
storage = FileSSHStorage(namespace='site-1')
self.assertEqual(storage.get_ssh_dir(), sshdir)
def test_write_user_key_unsupported(self):
"""Testing FileSSHStorage.write_user_key with unsupported key type"""
class FakeKey(object):
pass
storage = FileSSHStorage()
self.assertRaises(UnsupportedSSHKeyError,
lambda: storage.write_user_key(FakeKey()))
def test_read_host_keys(self):
"""Testing FileSSHStorage.read_host_keys"""
storage = FileSSHStorage()
storage.ensure_ssh_dir()
line1 = 'host1 ssh-rsa %s' % test_keys.rsa_key_b64
line2 = 'host2 ssh-dss %s' % test_keys.dsa_key_b64
filename = storage.get_host_keys_filename()
with open(filename, 'w') as fp:
fp.write('%s\n' % line1)
fp.write('\n')
fp.write('# foo\n')
fp.write('%s \n' % line2)
lines = storage.read_host_keys()
self.assertEqual(len(lines), 2)
self.assertEqual(lines[0], line1)
self.assertEqual(lines[1], line2)
def test_add_host_key(self):
"""Testing FileSSHStorage.add_host_key"""
storage = FileSSHStorage()
storage.add_host_key('host1', test_keys.rsa_key)
filename = storage.get_host_keys_filename()
with open(filename, 'r') as fp:
lines = fp.readlines()
self.assertEqual(len(lines), 1)
self.assertEqual(lines[0],
'host1 ssh-rsa %s\n' % test_keys.rsa_key_b64)
def test_replace_host_key(self):
"""Testing FileSSHStorage.replace_host_key"""
storage = FileSSHStorage()
storage.add_host_key('host1', test_keys.rsa_key)
storage.replace_host_key('host1', test_keys.rsa_key,
test_keys.dsa_key)
filename = storage.get_host_keys_filename()
with open(filename, 'r') as fp:
lines = fp.readlines()
self.assertEqual(len(lines), 1)
self.assertEqual(lines[0],
'host1 ssh-dss %s\n' % test_keys.dsa_key_b64)
def test_replace_host_key_no_known_hosts(self):
"""Testing FileSSHStorage.replace_host_key with no known hosts file"""
storage = FileSSHStorage()
storage.replace_host_key('host1', test_keys.rsa_key, test_keys.dsa_key)
filename = storage.get_host_keys_filename()
with open(filename, 'r') as fp:
lines = fp.readlines()
self.assertEqual(len(lines), 1)
self.assertEqual(lines[0],
'host1 ssh-dss %s\n' % test_keys.dsa_key_b64)
class SSHClientTests(SSHTestCase):
"""Unit tests for SSHClient."""
def setUp(self):
super(SSHClientTests, self).setUp()
self.tempdir = tempfile.mkdtemp(prefix='rb-tests-home-')
def test_generate_user_key(self, namespace=None):
"""Testing SSHClient.generate_user_key"""
self._set_home(self.tempdir)
client = SSHClient(namespace=namespace)
key = client.generate_user_key(bits=1024)
key_file = os.path.join(client.storage.get_ssh_dir(), 'id_rsa')
self.assertTrue(os.path.exists(key_file))
self.assertEqual(client.get_user_key(), key)
def test_generate_user_key_with_localsite(self):
"""Testing SSHClient.generate_user_key with localsite"""
self.test_generate_user_key('site-1')
def test_delete_user_key(self, namespace=None):
"""Testing SSHClient.delete_user_key"""
self._set_home(self.tempdir)
client = SSHClient(namespace=namespace)
client.import_user_key(test_keys.rsa_key)
key_file = os.path.join(client.storage.get_ssh_dir(), 'id_rsa')
self.assertTrue(os.path.exists(key_file))
self.assertEqual(client.get_user_key(), test_keys.rsa_key)
client.delete_user_key()
self.assertFalse(os.path.exists(key_file))
def test_delete_user_key_with_localsite(self):
"""Testing SSHClient.delete_user_key with localsite"""
self.test_delete_user_key('site-1')
def test_add_host_key(self, namespace=None):
"""Testing SSHClient.add_host_key"""
self._set_home(self.tempdir)
client = SSHClient(namespace=namespace)
client.add_host_key('example.com', test_keys.rsa_key)
known_hosts_file = client.storage.get_host_keys_filename()
self.assertTrue(os.path.exists(known_hosts_file))
with open(known_hosts_file, 'r') as f:
lines = f.readlines()
self.assertEqual(len(lines), 1)
self.assertEqual(lines[0].split(),
['example.com', test_keys.rsa_key.get_name(),
test_keys.rsa_key_b64])
def test_add_host_key_with_localsite(self):
"""Testing SSHClient.add_host_key with localsite"""
self.test_add_host_key('site-1')
def test_replace_host_key(self, namespace=None):
"""Testing SSHClient.replace_host_key"""
self._set_home(self.tempdir)
client = SSHClient(namespace=namespace)
client.add_host_key('example.com', test_keys.rsa_key)
client.replace_host_key('example.com', test_keys.rsa_key,
test_keys.dsa_key)
known_hosts_file = client.storage.get_host_keys_filename()
self.assertTrue(os.path.exists(known_hosts_file))
with open(known_hosts_file, 'r') as f:
lines = f.readlines()
self.assertEqual(len(lines), 1)
self.assertEqual(lines[0].split(),
['example.com', test_keys.dsa_key.get_name(),
test_keys.dsa_key_b64])
def test_replace_host_key_with_localsite(self):
"""Testing SSHClient.replace_host_key with localsite"""
self.test_replace_host_key('site-1')
def test_import_user_key(self, namespace=None):
"""Testing SSHClient.import_user_key"""
self._set_home(self.tempdir)
client = SSHClient(namespace=namespace)
client.import_user_key(test_keys.rsa_key)
self.assertEqual(client.get_user_key(), test_keys.rsa_key)
def test_import_user_key_with_localsite(self):
"""Testing SSHClient.import_user_key with localsite"""
self.test_import_user_key('site-1')
class UtilsTests(SSHTestCase):
"""Unit tests for reviewboard.ssh.utils."""
def test_humanize_key_with_rsa_key(self):
"""Testing humanize_key with RSA key"""
humanized = humanize_key(test_keys.rsa_key)
self.assertIsInstance(humanized, six.text_type)
self.assertEqual(humanized,
'76:ec:40:bd:69:9e:b1:e4:47:a9:e3:74:82:ec:0c:0f')
def test_humanize_key_with_dsa_key(self):
"""Testing humanize_key with DSA key"""
humanized = humanize_key(test_keys.dsa_key)
self.assertIsInstance(humanized, six.text_type)
self.assertEqual(humanized,
'62:4b:7f:b0:94:57:e2:bb:e7:d8:a4:88:88:c6:10:38')
|
the-stack_106_15388
|
# -*- coding: utf-8 -*-
'''
The static grains, these are the core, or built in grains.
When grains are loaded they are not loaded in the same way that modules are
loaded, grain functions are detected and executed, the functions MUST
return a dict which will be applied to the main grains dict. This module
will always be executed first, so that any grains loaded here in the core
module can be overwritten just by returning dict keys with the same value
as those returned here
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import socket
import sys
import re
import platform
import logging
import locale
import uuid
from errno import EACCES, EPERM
import datetime
import warnings
import time
# pylint: disable=import-error
try:
import dateutil.tz
_DATEUTIL_TZ = True
except ImportError:
_DATEUTIL_TZ = False
__proxyenabled__ = ['*']
__FQDN__ = None
# Extend the default list of supported distros. This will be used for the
# /etc/DISTRO-release checking that is part of linux_distribution()
from platform import _supported_dists
_supported_dists += ('arch', 'mageia', 'meego', 'vmware', 'bluewhite64',
'slamd64', 'ovs', 'system', 'mint', 'oracle', 'void')
# linux_distribution deprecated in py3.7
try:
from platform import linux_distribution as _deprecated_linux_distribution
def linux_distribution(**kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return _deprecated_linux_distribution(**kwargs)
except ImportError:
from distro import linux_distribution
# Import salt libs
import salt.exceptions
import salt.log
import salt.utils.dns
import salt.utils.files
import salt.utils.network
import salt.utils.path
import salt.utils.pkg.rpm
import salt.utils.platform
import salt.utils.stringutils
from salt.ext import six
from salt.ext.six.moves import range
if salt.utils.platform.is_windows():
import salt.utils.win_osinfo
# Solve the Chicken and egg problem where grains need to run before any
# of the modules are loaded and are generally available for any usage.
import salt.modules.cmdmod
import salt.modules.smbios
__salt__ = {
'cmd.run': salt.modules.cmdmod._run_quiet,
'cmd.retcode': salt.modules.cmdmod._retcode_quiet,
'cmd.run_all': salt.modules.cmdmod._run_all_quiet,
'smbios.records': salt.modules.smbios.records,
'smbios.get': salt.modules.smbios.get,
}
log = logging.getLogger(__name__)
HAS_WMI = False
if salt.utils.platform.is_windows():
# attempt to import the python wmi module
# the Windows minion uses WMI for some of its grains
try:
import wmi # pylint: disable=import-error
import salt.utils.winapi
import win32api
import salt.utils.win_reg
HAS_WMI = True
except ImportError:
log.exception(
'Unable to import Python wmi module, some core grains '
'will be missing'
)
HAS_UNAME = True
if not hasattr(os, 'uname'):
HAS_UNAME = False
_INTERFACES = {}
def _windows_cpudata():
'''
Return some CPU information on Windows minions
'''
# Provides:
# num_cpus
# cpu_model
grains = {}
if 'NUMBER_OF_PROCESSORS' in os.environ:
# Cast to int so that the logic isn't broken when used as a
# conditional in templating. Also follows _linux_cpudata()
try:
grains['num_cpus'] = int(os.environ['NUMBER_OF_PROCESSORS'])
except ValueError:
grains['num_cpus'] = 1
grains['cpu_model'] = salt.utils.win_reg.read_value(
hive="HKEY_LOCAL_MACHINE",
key="HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0",
vname="ProcessorNameString").get('vdata')
return grains
def _linux_cpudata():
'''
Return some CPU information for Linux minions
'''
# Provides:
# num_cpus
# cpu_model
# cpu_flags
grains = {}
cpuinfo = '/proc/cpuinfo'
# Parse over the cpuinfo file
if os.path.isfile(cpuinfo):
with salt.utils.files.fopen(cpuinfo, 'r') as _fp:
for line in _fp:
comps = line.split(':')
if not len(comps) > 1:
continue
key = comps[0].strip()
val = comps[1].strip()
if key == 'processor':
grains['num_cpus'] = int(val) + 1
elif key == 'model name':
grains['cpu_model'] = val
elif key == 'flags':
grains['cpu_flags'] = val.split()
elif key == 'Features':
grains['cpu_flags'] = val.split()
# ARM support - /proc/cpuinfo
#
# Processor : ARMv6-compatible processor rev 7 (v6l)
# BogoMIPS : 697.95
# Features : swp half thumb fastmult vfp edsp java tls
# CPU implementer : 0x41
# CPU architecture: 7
# CPU variant : 0x0
# CPU part : 0xb76
# CPU revision : 7
#
# Hardware : BCM2708
# Revision : 0002
# Serial : 00000000
elif key == 'Processor':
grains['cpu_model'] = val.split('-')[0]
grains['num_cpus'] = 1
if 'num_cpus' not in grains:
grains['num_cpus'] = 0
if 'cpu_model' not in grains:
grains['cpu_model'] = 'Unknown'
if 'cpu_flags' not in grains:
grains['cpu_flags'] = []
return grains
def _linux_gpu_data():
'''
num_gpus: int
gpus:
- vendor: nvidia|amd|ati|...
model: string
'''
if __opts__.get('enable_lspci', True) is False:
return {}
if __opts__.get('enable_gpu_grains', True) is False:
return {}
lspci = salt.utils.path.which('lspci')
if not lspci:
log.debug(
'The `lspci` binary is not available on the system. GPU grains '
'will not be available.'
)
return {}
# dominant gpu vendors to search for (MUST be lowercase for matching below)
known_vendors = ['nvidia', 'amd', 'ati', 'intel', 'cirrus logic', 'vmware', 'matrox', 'aspeed']
gpu_classes = ('vga compatible controller', '3d controller')
devs = []
try:
lspci_out = __salt__['cmd.run']('{0} -vmm'.format(lspci))
cur_dev = {}
error = False
# Add a blank element to the lspci_out.splitlines() list,
# otherwise the last device is not evaluated as a cur_dev and ignored.
lspci_list = lspci_out.splitlines()
lspci_list.append('')
for line in lspci_list:
# check for record-separating empty lines
if line == '':
if cur_dev.get('Class', '').lower() in gpu_classes:
devs.append(cur_dev)
cur_dev = {}
continue
if re.match(r'^\w+:\s+.*', line):
key, val = line.split(':', 1)
cur_dev[key.strip()] = val.strip()
else:
error = True
log.debug('Unexpected lspci output: \'%s\'', line)
if error:
log.warning(
'Error loading grains, unexpected linux_gpu_data output, '
'check that you have a valid shell configured and '
'permissions to run lspci command'
)
except OSError:
pass
gpus = []
for gpu in devs:
vendor_strings = gpu['Vendor'].lower().split()
# default vendor to 'unknown', overwrite if we match a known one
vendor = 'unknown'
for name in known_vendors:
# search for an 'expected' vendor name in the list of strings
if name in vendor_strings:
vendor = name
break
gpus.append({'vendor': vendor, 'model': gpu['Device']})
grains = {}
grains['num_gpus'] = len(gpus)
grains['gpus'] = gpus
return grains
def _netbsd_gpu_data():
'''
num_gpus: int
gpus:
- vendor: nvidia|amd|ati|...
model: string
'''
known_vendors = ['nvidia', 'amd', 'ati', 'intel', 'cirrus logic', 'vmware', 'matrox', 'aspeed']
gpus = []
try:
pcictl_out = __salt__['cmd.run']('pcictl pci0 list')
for line in pcictl_out.splitlines():
for vendor in known_vendors:
vendor_match = re.match(
r'[0-9:]+ ({0}) (.+) \(VGA .+\)'.format(vendor),
line,
re.IGNORECASE
)
if vendor_match:
gpus.append({'vendor': vendor_match.group(1), 'model': vendor_match.group(2)})
except OSError:
pass
grains = {}
grains['num_gpus'] = len(gpus)
grains['gpus'] = gpus
return grains
def _osx_gpudata():
'''
num_gpus: int
gpus:
- vendor: nvidia|amd|ati|...
model: string
'''
gpus = []
try:
pcictl_out = __salt__['cmd.run']('system_profiler SPDisplaysDataType')
for line in pcictl_out.splitlines():
fieldname, _, fieldval = line.partition(': ')
if fieldname.strip() == "Chipset Model":
vendor, _, model = fieldval.partition(' ')
vendor = vendor.lower()
gpus.append({'vendor': vendor, 'model': model})
except OSError:
pass
grains = {}
grains['num_gpus'] = len(gpus)
grains['gpus'] = gpus
return grains
def _bsd_cpudata(osdata):
'''
Return CPU information for BSD-like systems
'''
# Provides:
# cpuarch
# num_cpus
# cpu_model
# cpu_flags
sysctl = salt.utils.path.which('sysctl')
arch = salt.utils.path.which('arch')
cmds = {}
if sysctl:
cmds.update({
'num_cpus': '{0} -n hw.ncpu'.format(sysctl),
'cpuarch': '{0} -n hw.machine'.format(sysctl),
'cpu_model': '{0} -n hw.model'.format(sysctl),
})
if arch and osdata['kernel'] == 'OpenBSD':
cmds['cpuarch'] = '{0} -s'.format(arch)
if osdata['kernel'] == 'Darwin':
cmds['cpu_model'] = '{0} -n machdep.cpu.brand_string'.format(sysctl)
cmds['cpu_flags'] = '{0} -n machdep.cpu.features'.format(sysctl)
grains = dict([(k, __salt__['cmd.run'](v)) for k, v in six.iteritems(cmds)])
if 'cpu_flags' in grains and isinstance(grains['cpu_flags'], six.string_types):
grains['cpu_flags'] = grains['cpu_flags'].split(' ')
if osdata['kernel'] == 'NetBSD':
grains['cpu_flags'] = []
for line in __salt__['cmd.run']('cpuctl identify 0').splitlines():
cpu_match = re.match(r'cpu[0-9]:\ features[0-9]?\ .+<(.+)>', line)
if cpu_match:
flag = cpu_match.group(1).split(',')
grains['cpu_flags'].extend(flag)
if osdata['kernel'] == 'FreeBSD' and os.path.isfile('/var/run/dmesg.boot'):
grains['cpu_flags'] = []
# TODO: at least it needs to be tested for BSD other then FreeBSD
with salt.utils.files.fopen('/var/run/dmesg.boot', 'r') as _fp:
cpu_here = False
for line in _fp:
if line.startswith('CPU: '):
cpu_here = True # starts CPU descr
continue
if cpu_here:
if not line.startswith(' '):
break # game over
if 'Features' in line:
start = line.find('<')
end = line.find('>')
if start > 0 and end > 0:
flag = line[start + 1:end].split(',')
grains['cpu_flags'].extend(flag)
try:
grains['num_cpus'] = int(grains['num_cpus'])
except ValueError:
grains['num_cpus'] = 1
return grains
def _sunos_cpudata():
'''
Return the CPU information for Solaris-like systems
'''
# Provides:
# cpuarch
# num_cpus
# cpu_model
# cpu_flags
grains = {}
grains['cpu_flags'] = []
grains['cpuarch'] = __salt__['cmd.run']('isainfo -k')
psrinfo = '/usr/sbin/psrinfo 2>/dev/null'
grains['num_cpus'] = len(__salt__['cmd.run'](psrinfo, python_shell=True).splitlines())
kstat_info = 'kstat -p cpu_info:*:*:brand'
for line in __salt__['cmd.run'](kstat_info).splitlines():
match = re.match(r'(\w+:\d+:\w+\d+:\w+)\s+(.+)', line)
if match:
grains['cpu_model'] = match.group(2)
isainfo = 'isainfo -n -v'
for line in __salt__['cmd.run'](isainfo).splitlines():
match = re.match(r'^\s+(.+)', line)
if match:
cpu_flags = match.group(1).split()
grains['cpu_flags'].extend(cpu_flags)
return grains
def _aix_cpudata():
'''
Return CPU information for AIX systems
'''
# Provides:
# cpuarch
# num_cpus
# cpu_model
# cpu_flags
grains = {}
cmd = salt.utils.path.which('prtconf')
if cmd:
data = __salt__['cmd.run']('{0}'.format(cmd)) + os.linesep
for dest, regstring in (('cpuarch', r'(?im)^\s*Processor\s+Type:\s+(\S+)'),
('cpu_flags', r'(?im)^\s*Processor\s+Version:\s+(\S+)'),
('cpu_model', r'(?im)^\s*Processor\s+Implementation\s+Mode:\s+(.*)'),
('num_cpus', r'(?im)^\s*Number\s+Of\s+Processors:\s+(\S+)')):
for regex in [re.compile(r) for r in [regstring]]:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains[dest] = res.group(1).strip().replace("'", '')
else:
log.error('The \'prtconf\' binary was not found in $PATH.')
return grains
def _linux_memdata():
'''
Return the memory information for Linux-like systems
'''
grains = {'mem_total': 0, 'swap_total': 0}
meminfo = '/proc/meminfo'
if os.path.isfile(meminfo):
with salt.utils.files.fopen(meminfo, 'r') as ifile:
for line in ifile:
comps = line.rstrip('\n').split(':')
if not len(comps) > 1:
continue
if comps[0].strip() == 'MemTotal':
# Use floor division to force output to be an integer
grains['mem_total'] = int(comps[1].split()[0]) // 1024
if comps[0].strip() == 'SwapTotal':
# Use floor division to force output to be an integer
grains['swap_total'] = int(comps[1].split()[0]) // 1024
return grains
def _osx_memdata():
'''
Return the memory information for BSD-like systems
'''
grains = {'mem_total': 0, 'swap_total': 0}
sysctl = salt.utils.path.which('sysctl')
if sysctl:
mem = __salt__['cmd.run']('{0} -n hw.memsize'.format(sysctl))
swap_total = __salt__['cmd.run']('{0} -n vm.swapusage'.format(sysctl)).split()[2].replace(',', '.')
if swap_total.endswith('K'):
_power = 2**10
elif swap_total.endswith('M'):
_power = 2**20
elif swap_total.endswith('G'):
_power = 2**30
swap_total = float(swap_total[:-1]) * _power
grains['mem_total'] = int(mem) // 1024 // 1024
grains['swap_total'] = int(swap_total) // 1024 // 1024
return grains
def _bsd_memdata(osdata):
'''
Return the memory information for BSD-like systems
'''
grains = {'mem_total': 0, 'swap_total': 0}
sysctl = salt.utils.path.which('sysctl')
if sysctl:
mem = __salt__['cmd.run']('{0} -n hw.physmem'.format(sysctl))
if osdata['kernel'] == 'NetBSD' and mem.startswith('-'):
mem = __salt__['cmd.run']('{0} -n hw.physmem64'.format(sysctl))
grains['mem_total'] = int(mem) // 1024 // 1024
if osdata['kernel'] in ['OpenBSD', 'NetBSD']:
swapctl = salt.utils.path.which('swapctl')
swap_data = __salt__['cmd.run']('{0} -sk'.format(swapctl))
if swap_data == 'no swap devices configured':
swap_total = 0
else:
swap_total = swap_data.split(' ')[1]
else:
swap_total = __salt__['cmd.run']('{0} -n vm.swap_total'.format(sysctl))
grains['swap_total'] = int(swap_total) // 1024 // 1024
return grains
def _sunos_memdata():
'''
Return the memory information for SunOS-like systems
'''
grains = {'mem_total': 0, 'swap_total': 0}
prtconf = '/usr/sbin/prtconf 2>/dev/null'
for line in __salt__['cmd.run'](prtconf, python_shell=True).splitlines():
comps = line.split(' ')
if comps[0].strip() == 'Memory' and comps[1].strip() == 'size:':
grains['mem_total'] = int(comps[2].strip())
swap_cmd = salt.utils.path.which('swap')
swap_data = __salt__['cmd.run']('{0} -s'.format(swap_cmd)).split()
try:
swap_avail = int(swap_data[-2][:-1])
swap_used = int(swap_data[-4][:-1])
swap_total = (swap_avail + swap_used) // 1024
except ValueError:
swap_total = None
grains['swap_total'] = swap_total
return grains
def _aix_memdata():
'''
Return the memory information for AIX systems
'''
grains = {'mem_total': 0, 'swap_total': 0}
prtconf = salt.utils.path.which('prtconf')
if prtconf:
for line in __salt__['cmd.run'](prtconf, python_shell=True).splitlines():
comps = [x for x in line.strip().split(' ') if x]
if len(comps) > 2 and 'Memory' in comps[0] and 'Size' in comps[1]:
grains['mem_total'] = int(comps[2])
break
else:
log.error('The \'prtconf\' binary was not found in $PATH.')
swap_cmd = salt.utils.path.which('swap')
if swap_cmd:
swap_data = __salt__['cmd.run']('{0} -s'.format(swap_cmd)).split()
try:
swap_total = (int(swap_data[-2]) + int(swap_data[-6])) * 4
except ValueError:
swap_total = None
grains['swap_total'] = swap_total
else:
log.error('The \'swap\' binary was not found in $PATH.')
return grains
def _windows_memdata():
'''
Return the memory information for Windows systems
'''
grains = {'mem_total': 0}
# get the Total Physical memory as reported by msinfo32
tot_bytes = win32api.GlobalMemoryStatusEx()['TotalPhys']
# return memory info in gigabytes
grains['mem_total'] = int(tot_bytes / (1024 ** 2))
return grains
def _memdata(osdata):
'''
Gather information about the system memory
'''
# Provides:
# mem_total
# swap_total, for supported systems.
grains = {'mem_total': 0}
if osdata['kernel'] == 'Linux':
grains.update(_linux_memdata())
elif osdata['kernel'] in ('FreeBSD', 'OpenBSD', 'NetBSD'):
grains.update(_bsd_memdata(osdata))
elif osdata['kernel'] == 'Darwin':
grains.update(_osx_memdata())
elif osdata['kernel'] == 'SunOS':
grains.update(_sunos_memdata())
elif osdata['kernel'] == 'AIX':
grains.update(_aix_memdata())
elif osdata['kernel'] == 'Windows' and HAS_WMI:
grains.update(_windows_memdata())
return grains
def _aix_get_machine_id():
'''
Parse the output of lsattr -El sys0 for os_uuid
'''
grains = {}
cmd = salt.utils.path.which('lsattr')
if cmd:
data = __salt__['cmd.run']('{0} -El sys0'.format(cmd)) + os.linesep
uuid_regexes = [re.compile(r'(?im)^\s*os_uuid\s+(\S+)\s+(.*)')]
for regex in uuid_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains['machine_id'] = res.group(1).strip()
break
else:
log.error('The \'lsattr\' binary was not found in $PATH.')
return grains
def _windows_virtual(osdata):
'''
Returns what type of virtual hardware is under the hood, kvm or physical
'''
# Provides:
# virtual
# virtual_subtype
grains = dict()
if osdata['kernel'] != 'Windows':
return grains
grains['virtual'] = 'physical'
# It is possible that the 'manufacturer' and/or 'productname' grains
# exist but have a value of None.
manufacturer = osdata.get('manufacturer', '')
if manufacturer is None:
manufacturer = ''
productname = osdata.get('productname', '')
if productname is None:
productname = ''
if 'QEMU' in manufacturer:
# FIXME: Make this detect between kvm or qemu
grains['virtual'] = 'kvm'
if 'Bochs' in manufacturer:
grains['virtual'] = 'kvm'
# Product Name: (oVirt) www.ovirt.org
# Red Hat Community virtualization Project based on kvm
elif 'oVirt' in productname:
grains['virtual'] = 'kvm'
grains['virtual_subtype'] = 'oVirt'
# Red Hat Enterprise Virtualization
elif 'RHEV Hypervisor' in productname:
grains['virtual'] = 'kvm'
grains['virtual_subtype'] = 'rhev'
# Product Name: VirtualBox
elif 'VirtualBox' in productname:
grains['virtual'] = 'VirtualBox'
# Product Name: VMware Virtual Platform
elif 'VMware Virtual Platform' in productname:
grains['virtual'] = 'VMware'
# Manufacturer: Microsoft Corporation
# Product Name: Virtual Machine
elif 'Microsoft' in manufacturer and \
'Virtual Machine' in productname:
grains['virtual'] = 'VirtualPC'
# Manufacturer: Parallels Software International Inc.
elif 'Parallels Software' in manufacturer:
grains['virtual'] = 'Parallels'
# Apache CloudStack
elif 'CloudStack KVM Hypervisor' in productname:
grains['virtual'] = 'kvm'
grains['virtual_subtype'] = 'cloudstack'
return grains
def _virtual(osdata):
'''
Returns what type of virtual hardware is under the hood, kvm or physical
'''
# This is going to be a monster, if you are running a vm you can test this
# grain with please submit patches!
# Provides:
# virtual
# virtual_subtype
grains = {'virtual': 'physical'}
# Skip the below loop on platforms which have none of the desired cmds
# This is a temporary measure until we can write proper virtual hardware
# detection.
skip_cmds = ('AIX',)
# list of commands to be executed to determine the 'virtual' grain
_cmds = ['systemd-detect-virt', 'virt-what', 'dmidecode']
# test first for virt-what, which covers most of the desired functionality
# on most platforms
if not salt.utils.platform.is_windows() and osdata['kernel'] not in skip_cmds:
if salt.utils.path.which('virt-what'):
_cmds = ['virt-what']
# Check if enable_lspci is True or False
if __opts__.get('enable_lspci', True) is True:
# /proc/bus/pci does not exists, lspci will fail
if os.path.exists('/proc/bus/pci'):
_cmds += ['lspci']
# Add additional last resort commands
if osdata['kernel'] in skip_cmds:
_cmds = ()
# Quick backout for BrandZ (Solaris LX Branded zones)
# Don't waste time trying other commands to detect the virtual grain
if HAS_UNAME and osdata['kernel'] == 'Linux' and 'BrandZ virtual linux' in os.uname():
grains['virtual'] = 'zone'
return grains
failed_commands = set()
for command in _cmds:
args = []
if osdata['kernel'] == 'Darwin':
command = 'system_profiler'
args = ['SPDisplaysDataType']
elif osdata['kernel'] == 'SunOS':
virtinfo = salt.utils.path.which('virtinfo')
if virtinfo:
try:
ret = __salt__['cmd.run_all']('{0} -a'.format(virtinfo))
except salt.exceptions.CommandExecutionError:
if salt.log.is_logging_configured():
failed_commands.add(virtinfo)
else:
if ret['stdout'].endswith('not supported'):
command = 'prtdiag'
else:
command = 'virtinfo'
else:
command = 'prtdiag'
cmd = salt.utils.path.which(command)
if not cmd:
continue
cmd = '{0} {1}'.format(cmd, ' '.join(args))
try:
ret = __salt__['cmd.run_all'](cmd)
if ret['retcode'] > 0:
if salt.log.is_logging_configured():
# systemd-detect-virt always returns > 0 on non-virtualized
# systems
# prtdiag only works in the global zone, skip if it fails
if salt.utils.platform.is_windows() or 'systemd-detect-virt' in cmd or 'prtdiag' in cmd:
continue
failed_commands.add(command)
continue
except salt.exceptions.CommandExecutionError:
if salt.log.is_logging_configured():
if salt.utils.platform.is_windows():
continue
failed_commands.add(command)
continue
output = ret['stdout']
if command == "system_profiler":
macoutput = output.lower()
if '0x1ab8' in macoutput:
grains['virtual'] = 'Parallels'
if 'parallels' in macoutput:
grains['virtual'] = 'Parallels'
if 'vmware' in macoutput:
grains['virtual'] = 'VMware'
if '0x15ad' in macoutput:
grains['virtual'] = 'VMware'
if 'virtualbox' in macoutput:
grains['virtual'] = 'VirtualBox'
# Break out of the loop so the next log message is not issued
break
elif command == 'systemd-detect-virt':
if output in ('qemu', 'kvm', 'oracle', 'xen', 'bochs', 'chroot', 'uml', 'systemd-nspawn'):
grains['virtual'] = output
break
elif 'vmware' in output:
grains['virtual'] = 'VMware'
break
elif 'microsoft' in output:
grains['virtual'] = 'VirtualPC'
break
elif 'lxc' in output:
grains['virtual'] = 'LXC'
break
elif 'systemd-nspawn' in output:
grains['virtual'] = 'LXC'
break
elif command == 'virt-what':
try:
output = output.splitlines()[-1]
except IndexError:
pass
if output in ('kvm', 'qemu', 'uml', 'xen', 'lxc'):
grains['virtual'] = output
break
elif 'vmware' in output:
grains['virtual'] = 'VMware'
break
elif 'parallels' in output:
grains['virtual'] = 'Parallels'
break
elif 'hyperv' in output:
grains['virtual'] = 'HyperV'
break
elif command == 'dmidecode':
# Product Name: VirtualBox
if 'Vendor: QEMU' in output:
# FIXME: Make this detect between kvm or qemu
grains['virtual'] = 'kvm'
if 'Manufacturer: QEMU' in output:
grains['virtual'] = 'kvm'
if 'Vendor: Bochs' in output:
grains['virtual'] = 'kvm'
if 'Manufacturer: Bochs' in output:
grains['virtual'] = 'kvm'
if 'BHYVE' in output:
grains['virtual'] = 'bhyve'
# Product Name: (oVirt) www.ovirt.org
# Red Hat Community virtualization Project based on kvm
elif 'Manufacturer: oVirt' in output:
grains['virtual'] = 'kvm'
grains['virtual_subtype'] = 'ovirt'
# Red Hat Enterprise Virtualization
elif 'Product Name: RHEV Hypervisor' in output:
grains['virtual'] = 'kvm'
grains['virtual_subtype'] = 'rhev'
elif 'VirtualBox' in output:
grains['virtual'] = 'VirtualBox'
# Product Name: VMware Virtual Platform
elif 'VMware' in output:
grains['virtual'] = 'VMware'
# Manufacturer: Microsoft Corporation
# Product Name: Virtual Machine
elif ': Microsoft' in output and 'Virtual Machine' in output:
grains['virtual'] = 'VirtualPC'
# Manufacturer: Parallels Software International Inc.
elif 'Parallels Software' in output:
grains['virtual'] = 'Parallels'
elif 'Manufacturer: Google' in output:
grains['virtual'] = 'kvm'
# Proxmox KVM
elif 'Vendor: SeaBIOS' in output:
grains['virtual'] = 'kvm'
# Break out of the loop, lspci parsing is not necessary
break
elif command == 'lspci':
# dmidecode not available or the user does not have the necessary
# permissions
model = output.lower()
if 'vmware' in model:
grains['virtual'] = 'VMware'
# 00:04.0 System peripheral: InnoTek Systemberatung GmbH
# VirtualBox Guest Service
elif 'virtualbox' in model:
grains['virtual'] = 'VirtualBox'
elif 'qemu' in model:
grains['virtual'] = 'kvm'
elif 'virtio' in model:
grains['virtual'] = 'kvm'
# Break out of the loop so the next log message is not issued
break
elif command == 'prtdiag':
model = output.lower().split("\n")[0]
if 'vmware' in model:
grains['virtual'] = 'VMware'
elif 'virtualbox' in model:
grains['virtual'] = 'VirtualBox'
elif 'qemu' in model:
grains['virtual'] = 'kvm'
elif 'joyent smartdc hvm' in model:
grains['virtual'] = 'kvm'
break
elif command == 'virtinfo':
grains['virtual'] = 'LDOM'
break
choices = ('Linux', 'HP-UX')
isdir = os.path.isdir
sysctl = salt.utils.path.which('sysctl')
if osdata['kernel'] in choices:
if os.path.isdir('/proc'):
try:
self_root = os.stat('/')
init_root = os.stat('/proc/1/root/.')
if self_root != init_root:
grains['virtual_subtype'] = 'chroot'
except (IOError, OSError):
pass
if isdir('/proc/vz'):
if os.path.isfile('/proc/vz/version'):
grains['virtual'] = 'openvzhn'
elif os.path.isfile('/proc/vz/veinfo'):
grains['virtual'] = 'openvzve'
# a posteriori, it's expected for these to have failed:
failed_commands.discard('lspci')
failed_commands.discard('dmidecode')
# Provide additional detection for OpenVZ
if os.path.isfile('/proc/self/status'):
with salt.utils.files.fopen('/proc/self/status') as status_file:
vz_re = re.compile(r'^envID:\s+(\d+)$')
for line in status_file:
vz_match = vz_re.match(line.rstrip('\n'))
if vz_match and int(vz_match.groups()[0]) != 0:
grains['virtual'] = 'openvzve'
elif vz_match and int(vz_match.groups()[0]) == 0:
grains['virtual'] = 'openvzhn'
if isdir('/proc/sys/xen') or \
isdir('/sys/bus/xen') or isdir('/proc/xen'):
if os.path.isfile('/proc/xen/xsd_kva'):
# Tested on CentOS 5.3 / 2.6.18-194.26.1.el5xen
# Tested on CentOS 5.4 / 2.6.18-164.15.1.el5xen
grains['virtual_subtype'] = 'Xen Dom0'
else:
if osdata.get('productname', '') == 'HVM domU':
# Requires dmidecode!
grains['virtual_subtype'] = 'Xen HVM DomU'
elif os.path.isfile('/proc/xen/capabilities') and \
os.access('/proc/xen/capabilities', os.R_OK):
with salt.utils.files.fopen('/proc/xen/capabilities') as fhr:
if 'control_d' not in fhr.read():
# Tested on CentOS 5.5 / 2.6.18-194.3.1.el5xen
grains['virtual_subtype'] = 'Xen PV DomU'
else:
# Shouldn't get to this, but just in case
grains['virtual_subtype'] = 'Xen Dom0'
# Tested on Fedora 10 / 2.6.27.30-170.2.82 with xen
# Tested on Fedora 15 / 2.6.41.4-1 without running xen
elif isdir('/sys/bus/xen'):
if 'xen:' in __salt__['cmd.run']('dmesg').lower():
grains['virtual_subtype'] = 'Xen PV DomU'
elif os.path.isfile('/sys/bus/xen/drivers/xenconsole'):
# An actual DomU will have the xenconsole driver
grains['virtual_subtype'] = 'Xen PV DomU'
# If a Dom0 or DomU was detected, obviously this is xen
if 'dom' in grains.get('virtual_subtype', '').lower():
grains['virtual'] = 'xen'
# Check container type after hypervisors, to avoid variable overwrite on containers running in virtual environment.
if os.path.isfile('/proc/1/cgroup'):
try:
with salt.utils.files.fopen('/proc/1/cgroup', 'r') as fhr:
fhr_contents = fhr.read()
if ':/lxc/' in fhr_contents:
grains['virtual_subtype'] = 'LXC'
elif ':/kubepods/' in fhr_contents:
grains['virtual_subtype'] = 'kubernetes'
elif ':/libpod_parent/' in fhr_contents:
grains['virtual_subtype'] = 'libpod'
else:
if any(x in fhr_contents
for x in (':/system.slice/docker', ':/docker/',
':/docker-ce/')):
grains['virtual_subtype'] = 'Docker'
except IOError:
pass
if os.path.isfile('/proc/cpuinfo'):
with salt.utils.files.fopen('/proc/cpuinfo', 'r') as fhr:
if 'QEMU Virtual CPU' in fhr.read():
grains['virtual'] = 'kvm'
if os.path.isfile('/sys/devices/virtual/dmi/id/product_name'):
try:
with salt.utils.files.fopen('/sys/devices/virtual/dmi/id/product_name', 'r') as fhr:
output = salt.utils.stringutils.to_unicode(fhr.read(), errors='replace')
if 'VirtualBox' in output:
grains['virtual'] = 'VirtualBox'
elif 'RHEV Hypervisor' in output:
grains['virtual'] = 'kvm'
grains['virtual_subtype'] = 'rhev'
elif 'oVirt Node' in output:
grains['virtual'] = 'kvm'
grains['virtual_subtype'] = 'ovirt'
elif 'Google' in output:
grains['virtual'] = 'gce'
elif 'BHYVE' in output:
grains['virtual'] = 'bhyve'
except IOError:
pass
elif osdata['kernel'] == 'FreeBSD':
kenv = salt.utils.path.which('kenv')
if kenv:
product = __salt__['cmd.run'](
'{0} smbios.system.product'.format(kenv)
)
maker = __salt__['cmd.run'](
'{0} smbios.system.maker'.format(kenv)
)
if product.startswith('VMware'):
grains['virtual'] = 'VMware'
if product.startswith('VirtualBox'):
grains['virtual'] = 'VirtualBox'
if maker.startswith('Xen'):
grains['virtual_subtype'] = '{0} {1}'.format(maker, product)
grains['virtual'] = 'xen'
if maker.startswith('Microsoft') and product.startswith('Virtual'):
grains['virtual'] = 'VirtualPC'
if maker.startswith('OpenStack'):
grains['virtual'] = 'OpenStack'
if maker.startswith('Bochs'):
grains['virtual'] = 'kvm'
if sysctl:
hv_vendor = __salt__['cmd.run']('{0} hw.hv_vendor'.format(sysctl))
model = __salt__['cmd.run']('{0} hw.model'.format(sysctl))
jail = __salt__['cmd.run'](
'{0} -n security.jail.jailed'.format(sysctl)
)
if 'bhyve' in hv_vendor:
grains['virtual'] = 'bhyve'
if jail == '1':
grains['virtual_subtype'] = 'jail'
if 'QEMU Virtual CPU' in model:
grains['virtual'] = 'kvm'
elif osdata['kernel'] == 'OpenBSD':
if 'manufacturer' in osdata:
if osdata['manufacturer'] in ['QEMU', 'Red Hat', 'Joyent']:
grains['virtual'] = 'kvm'
if osdata['manufacturer'] == 'OpenBSD':
grains['virtual'] = 'vmm'
elif osdata['kernel'] == 'SunOS':
if grains['virtual'] == 'LDOM':
roles = []
for role in ('control', 'io', 'root', 'service'):
subtype_cmd = '{0} -c current get -H -o value {1}-role'.format(cmd, role)
ret = __salt__['cmd.run_all']('{0}'.format(subtype_cmd))
if ret['stdout'] == 'true':
roles.append(role)
if roles:
grains['virtual_subtype'] = roles
else:
# Check if it's a "regular" zone. (i.e. Solaris 10/11 zone)
zonename = salt.utils.path.which('zonename')
if zonename:
zone = __salt__['cmd.run']('{0}'.format(zonename))
if zone != 'global':
grains['virtual'] = 'zone'
# Check if it's a branded zone (i.e. Solaris 8/9 zone)
if isdir('/.SUNWnative'):
grains['virtual'] = 'zone'
elif osdata['kernel'] == 'NetBSD':
if sysctl:
if 'QEMU Virtual CPU' in __salt__['cmd.run'](
'{0} -n machdep.cpu_brand'.format(sysctl)):
grains['virtual'] = 'kvm'
elif 'invalid' not in __salt__['cmd.run'](
'{0} -n machdep.xen.suspend'.format(sysctl)):
grains['virtual'] = 'Xen PV DomU'
elif 'VMware' in __salt__['cmd.run'](
'{0} -n machdep.dmi.system-vendor'.format(sysctl)):
grains['virtual'] = 'VMware'
# NetBSD has Xen dom0 support
elif __salt__['cmd.run'](
'{0} -n machdep.idle-mechanism'.format(sysctl)) == 'xen':
if os.path.isfile('/var/run/xenconsoled.pid'):
grains['virtual_subtype'] = 'Xen Dom0'
for command in failed_commands:
log.info(
"Although '%s' was found in path, the current user "
'cannot execute it. Grains output might not be '
'accurate.', command
)
return grains
def _virtual_hv(osdata):
'''
Returns detailed hypervisor information from sysfs
Currently this seems to be used only by Xen
'''
grains = {}
# Bail early if we're not running on Xen
try:
if 'xen' not in osdata['virtual']:
return grains
except KeyError:
return grains
# Try to get the exact hypervisor version from sysfs
try:
version = {}
for fn in ('major', 'minor', 'extra'):
with salt.utils.files.fopen('/sys/hypervisor/version/{}'.format(fn), 'r') as fhr:
version[fn] = salt.utils.stringutils.to_unicode(fhr.read().strip())
grains['virtual_hv_version'] = '{}.{}{}'.format(version['major'], version['minor'], version['extra'])
grains['virtual_hv_version_info'] = [version['major'], version['minor'], version['extra']]
except (IOError, OSError, KeyError):
pass
# Try to read and decode the supported feature set of the hypervisor
# Based on https://github.com/brendangregg/Misc/blob/master/xen/xen-features.py
# Table data from include/xen/interface/features.h
xen_feature_table = {0: 'writable_page_tables',
1: 'writable_descriptor_tables',
2: 'auto_translated_physmap',
3: 'supervisor_mode_kernel',
4: 'pae_pgdir_above_4gb',
5: 'mmu_pt_update_preserve_ad',
7: 'gnttab_map_avail_bits',
8: 'hvm_callback_vector',
9: 'hvm_safe_pvclock',
10: 'hvm_pirqs',
11: 'dom0',
12: 'grant_map_identity',
13: 'memory_op_vnode_supported',
14: 'ARM_SMCCC_supported'}
try:
with salt.utils.files.fopen('/sys/hypervisor/properties/features', 'r') as fhr:
features = salt.utils.stringutils.to_unicode(fhr.read().strip())
enabled_features = []
for bit, feat in six.iteritems(xen_feature_table):
if int(features, 16) & (1 << bit):
enabled_features.append(feat)
grains['virtual_hv_features'] = features
grains['virtual_hv_features_list'] = enabled_features
except (IOError, OSError, KeyError):
pass
return grains
def _ps(osdata):
'''
Return the ps grain
'''
grains = {}
bsd_choices = ('FreeBSD', 'NetBSD', 'OpenBSD', 'MacOS')
if osdata['os'] in bsd_choices:
grains['ps'] = 'ps auxwww'
elif osdata['os_family'] == 'Solaris':
grains['ps'] = '/usr/ucb/ps auxwww'
elif osdata['os'] == 'Windows':
grains['ps'] = 'tasklist.exe'
elif osdata.get('virtual', '') == 'openvzhn':
grains['ps'] = (
'ps -fH -p $(grep -l \"^envID:[[:space:]]*0\\$\" '
'/proc/[0-9]*/status | sed -e \"s=/proc/\\([0-9]*\\)/.*=\\1=\") '
'| awk \'{ $7=\"\"; print }\''
)
elif osdata['os_family'] == 'AIX':
grains['ps'] = '/usr/bin/ps auxww'
elif osdata['os_family'] == 'NILinuxRT':
grains['ps'] = 'ps -o user,pid,ppid,tty,time,comm'
else:
grains['ps'] = 'ps -efHww'
return grains
def _clean_value(key, val):
'''
Clean out well-known bogus values.
If it isn't clean (for example has value 'None'), return None.
Otherwise, return the original value.
NOTE: This logic also exists in the smbios module. This function is
for use when not using smbios to retrieve the value.
'''
if (val is None or not val or
re.match('none', val, flags=re.IGNORECASE)):
return None
elif 'uuid' in key:
# Try each version (1-5) of RFC4122 to check if it's actually a UUID
for uuidver in range(1, 5):
try:
uuid.UUID(val, version=uuidver)
return val
except ValueError:
continue
log.trace('HW %s value %s is an invalid UUID', key, val.replace('\n', ' '))
return None
elif re.search('serial|part|version', key):
# 'To be filled by O.E.M.
# 'Not applicable' etc.
# 'Not specified' etc.
# 0000000, 1234567 etc.
# begone!
if (re.match(r'^[0]+$', val) or
re.match(r'[0]?1234567[8]?[9]?[0]?', val) or
re.search(r'sernum|part[_-]?number|specified|filled|applicable', val, flags=re.IGNORECASE)):
return None
elif re.search('asset|manufacturer', key):
# AssetTag0. Manufacturer04. Begone.
if re.search(r'manufacturer|to be filled|available|asset|^no(ne|t)', val, flags=re.IGNORECASE):
return None
else:
# map unspecified, undefined, unknown & whatever to None
if (re.search(r'to be filled', val, flags=re.IGNORECASE) or
re.search(r'un(known|specified)|no(t|ne)? (asset|provided|defined|available|present|specified)',
val, flags=re.IGNORECASE)):
return None
return val
def _windows_os_release_grain(caption, product_type):
'''
helper function for getting the osrelease grain
:return:
'''
# This creates the osrelease grain based on the Windows Operating
# System Product Name. As long as Microsoft maintains a similar format
# this should be future proof
version = 'Unknown'
release = ''
if 'Server' in caption:
for item in caption.split(' '):
# If it's all digits, then it's version
if re.match(r'\d+', item):
version = item
# If it starts with R and then numbers, it's the release
# ie: R2
if re.match(r'^R\d+$', item):
release = item
os_release = '{0}Server{1}'.format(version, release)
else:
for item in caption.split(' '):
# If it's a number, decimal number, Thin or Vista, then it's the
# version
if re.match(r'^(\d+(\.\d+)?)|Thin|Vista|XP$', item):
version = item
os_release = version
# If the version is still Unknown, revert back to the old way of getting
# the os_release
# https://github.com/saltstack/salt/issues/52339
if os_release in ['Unknown']:
os_release = platform.release()
server = {'Vista': '2008Server',
'7': '2008ServerR2',
'8': '2012Server',
'8.1': '2012ServerR2',
'10': '2016Server'}
# Starting with Python 2.7.12 and 3.5.2 the `platform.uname()`
# function started reporting the Desktop version instead of the
# Server version on # Server versions of Windows, so we need to look
# those up. So, if you find a Server Platform that's a key in the
# server dictionary, then lookup the actual Server Release.
# (Product Type 1 is Desktop, Everything else is Server)
if product_type > 1 and os_release in server:
os_release = server[os_release]
return os_release
def _windows_platform_data():
'''
Use the platform module for as much as we can.
'''
# Provides:
# kernelrelease
# kernelversion
# osversion
# osrelease
# osservicepack
# osmanufacturer
# manufacturer
# productname
# biosversion
# serialnumber
# osfullname
# timezone
# windowsdomain
# windowsdomaintype
# motherboard.productname
# motherboard.serialnumber
# virtual
if not HAS_WMI:
return {}
with salt.utils.winapi.Com():
wmi_c = wmi.WMI()
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394102%28v=vs.85%29.aspx
systeminfo = wmi_c.Win32_ComputerSystem()[0]
# https://msdn.microsoft.com/en-us/library/aa394239(v=vs.85).aspx
osinfo = wmi_c.Win32_OperatingSystem()[0]
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394077(v=vs.85).aspx
biosinfo = wmi_c.Win32_BIOS()[0]
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394498(v=vs.85).aspx
timeinfo = wmi_c.Win32_TimeZone()[0]
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394072(v=vs.85).aspx
motherboard = {'product': None,
'serial': None}
try:
motherboardinfo = wmi_c.Win32_BaseBoard()[0]
motherboard['product'] = motherboardinfo.Product
motherboard['serial'] = motherboardinfo.SerialNumber
except IndexError:
log.debug('Motherboard info not available on this system')
kernel_version = platform.version()
info = salt.utils.win_osinfo.get_os_version_info()
net_info = salt.utils.win_osinfo.get_join_info()
service_pack = None
if info['ServicePackMajor'] > 0:
service_pack = ''.join(['SP', six.text_type(info['ServicePackMajor'])])
os_release = _windows_os_release_grain(caption=osinfo.Caption,
product_type=osinfo.ProductType)
grains = {
'kernelrelease': _clean_value('kernelrelease', osinfo.Version),
'kernelversion': _clean_value('kernelversion', kernel_version),
'osversion': _clean_value('osversion', osinfo.Version),
'osrelease': _clean_value('osrelease', os_release),
'osservicepack': _clean_value('osservicepack', service_pack),
'osmanufacturer': _clean_value('osmanufacturer', osinfo.Manufacturer),
'manufacturer': _clean_value('manufacturer', systeminfo.Manufacturer),
'productname': _clean_value('productname', systeminfo.Model),
# bios name had a bunch of whitespace appended to it in my testing
# 'PhoenixBIOS 4.0 Release 6.0 '
'biosversion': _clean_value('biosversion', biosinfo.Name.strip()),
'serialnumber': _clean_value('serialnumber', biosinfo.SerialNumber),
'osfullname': _clean_value('osfullname', osinfo.Caption),
'timezone': _clean_value('timezone', timeinfo.Description),
'windowsdomain': _clean_value('windowsdomain', net_info['Domain']),
'windowsdomaintype': _clean_value('windowsdomaintype', net_info['DomainType']),
'motherboard': {
'productname': _clean_value('motherboard.productname', motherboard['product']),
'serialnumber': _clean_value('motherboard.serialnumber', motherboard['serial']),
}
}
# test for virtualized environments
# I only had VMware available so the rest are unvalidated
if 'VRTUAL' in biosinfo.Version: # (not a typo)
grains['virtual'] = 'HyperV'
elif 'A M I' in biosinfo.Version:
grains['virtual'] = 'VirtualPC'
elif 'VMware' in systeminfo.Model:
grains['virtual'] = 'VMware'
elif 'VirtualBox' in systeminfo.Model:
grains['virtual'] = 'VirtualBox'
elif 'Xen' in biosinfo.Version:
grains['virtual'] = 'Xen'
if 'HVM domU' in systeminfo.Model:
grains['virtual_subtype'] = 'HVM domU'
elif 'OpenStack' in systeminfo.Model:
grains['virtual'] = 'OpenStack'
elif 'AMAZON' in biosinfo.Version:
grains['virtual'] = 'EC2'
return grains
def _osx_platform_data():
'''
Additional data for macOS systems
Returns: A dictionary containing values for the following:
- model_name
- boot_rom_version
- smc_version
- system_serialnumber
'''
cmd = 'system_profiler SPHardwareDataType'
hardware = __salt__['cmd.run'](cmd)
grains = {}
for line in hardware.splitlines():
field_name, _, field_val = line.partition(': ')
if field_name.strip() == "Model Name":
key = 'model_name'
grains[key] = _clean_value(key, field_val)
if field_name.strip() == "Boot ROM Version":
key = 'boot_rom_version'
grains[key] = _clean_value(key, field_val)
if field_name.strip() == "SMC Version (system)":
key = 'smc_version'
grains[key] = _clean_value(key, field_val)
if field_name.strip() == "Serial Number (system)":
key = 'system_serialnumber'
grains[key] = _clean_value(key, field_val)
return grains
def id_():
'''
Return the id
'''
return {'id': __opts__.get('id', '')}
_REPLACE_LINUX_RE = re.compile(r'\W(?:gnu/)?linux', re.IGNORECASE)
# This maps (at most) the first ten characters (no spaces, lowercased) of
# 'osfullname' to the 'os' grain that Salt traditionally uses.
# Please see os_data() and _supported_dists.
# If your system is not detecting properly it likely needs an entry here.
_OS_NAME_MAP = {
'redhatente': 'RedHat',
'gentoobase': 'Gentoo',
'archarm': 'Arch ARM',
'arch': 'Arch',
'debian': 'Debian',
'raspbian': 'Raspbian',
'fedoraremi': 'Fedora',
'chapeau': 'Chapeau',
'korora': 'Korora',
'amazonami': 'Amazon',
'alt': 'ALT',
'enterprise': 'OEL',
'oracleserv': 'OEL',
'cloudserve': 'CloudLinux',
'cloudlinux': 'CloudLinux',
'pidora': 'Fedora',
'scientific': 'ScientificLinux',
'synology': 'Synology',
'nilrt': 'NILinuxRT',
'poky': 'Poky',
'manjaro': 'Manjaro',
'manjarolin': 'Manjaro',
'univention': 'Univention',
'antergos': 'Antergos',
'sles': 'SUSE',
'void': 'Void',
'slesexpand': 'RES',
'linuxmint': 'Mint',
'neon': 'KDE neon',
}
# Map the 'os' grain to the 'os_family' grain
# These should always be capitalized entries as the lookup comes
# post-_OS_NAME_MAP. If your system is having trouble with detection, please
# make sure that the 'os' grain is capitalized and working correctly first.
_OS_FAMILY_MAP = {
'Ubuntu': 'Debian',
'Fedora': 'RedHat',
'Chapeau': 'RedHat',
'Korora': 'RedHat',
'FedBerry': 'RedHat',
'CentOS': 'RedHat',
'GoOSe': 'RedHat',
'Scientific': 'RedHat',
'Amazon': 'RedHat',
'CloudLinux': 'RedHat',
'OVS': 'RedHat',
'OEL': 'RedHat',
'XCP': 'RedHat',
'XCP-ng': 'RedHat',
'XenServer': 'RedHat',
'RES': 'RedHat',
'Sangoma': 'RedHat',
'Mandrake': 'Mandriva',
'ESXi': 'VMware',
'Mint': 'Debian',
'VMwareESX': 'VMware',
'Bluewhite64': 'Bluewhite',
'Slamd64': 'Slackware',
'SLES': 'Suse',
'SUSE Enterprise Server': 'Suse',
'SUSE Enterprise Server': 'Suse',
'SLED': 'Suse',
'openSUSE': 'Suse',
'SUSE': 'Suse',
'openSUSE Leap': 'Suse',
'openSUSE Tumbleweed': 'Suse',
'SLES_SAP': 'Suse',
'Solaris': 'Solaris',
'SmartOS': 'Solaris',
'OmniOS': 'Solaris',
'OpenIndiana Development': 'Solaris',
'OpenIndiana': 'Solaris',
'OpenSolaris Development': 'Solaris',
'OpenSolaris': 'Solaris',
'Oracle Solaris': 'Solaris',
'Arch ARM': 'Arch',
'Manjaro': 'Arch',
'Antergos': 'Arch',
'ALT': 'RedHat',
'Trisquel': 'Debian',
'GCEL': 'Debian',
'Linaro': 'Debian',
'elementary OS': 'Debian',
'elementary': 'Debian',
'Univention': 'Debian',
'ScientificLinux': 'RedHat',
'Raspbian': 'Debian',
'Devuan': 'Debian',
'antiX': 'Debian',
'Kali': 'Debian',
'neon': 'Debian',
'Cumulus': 'Debian',
'Deepin': 'Debian',
'NILinuxRT': 'NILinuxRT',
'KDE neon': 'Debian',
'Void': 'Void',
'IDMS': 'Debian',
'Funtoo': 'Gentoo',
'AIX': 'AIX',
'TurnKey': 'Debian',
}
# Matches any possible format:
# DISTRIB_ID="Ubuntu"
# DISTRIB_ID='Mageia'
# DISTRIB_ID=Fedora
# DISTRIB_RELEASE='10.10'
# DISTRIB_CODENAME='squeeze'
# DISTRIB_DESCRIPTION='Ubuntu 10.10'
_LSB_REGEX = re.compile((
'^(DISTRIB_(?:ID|RELEASE|CODENAME|DESCRIPTION))=(?:\'|")?'
'([\\w\\s\\.\\-_]+)(?:\'|")?'
))
def _linux_bin_exists(binary):
'''
Does a binary exist in linux (depends on which, type, or whereis)
'''
for search_cmd in ('which', 'type -ap'):
try:
return __salt__['cmd.retcode'](
'{0} {1}'.format(search_cmd, binary)
) == 0
except salt.exceptions.CommandExecutionError:
pass
try:
return len(__salt__['cmd.run_all'](
'whereis -b {0}'.format(binary)
)['stdout'].split()) > 1
except salt.exceptions.CommandExecutionError:
return False
def _get_interfaces():
'''
Provide a dict of the connected interfaces and their ip addresses
'''
global _INTERFACES
if not _INTERFACES:
_INTERFACES = salt.utils.network.interfaces()
return _INTERFACES
def _parse_lsb_release():
ret = {}
try:
log.trace('Attempting to parse /etc/lsb-release')
with salt.utils.files.fopen('/etc/lsb-release') as ifile:
for line in ifile:
try:
key, value = _LSB_REGEX.match(line.rstrip('\n')).groups()[:2]
except AttributeError:
pass
else:
# Adds lsb_distrib_{id,release,codename,description}
ret['lsb_{0}'.format(key.lower())] = value.rstrip()
except (IOError, OSError) as exc:
log.trace('Failed to parse /etc/lsb-release: %s', exc)
return ret
def _parse_os_release(*os_release_files):
'''
Parse os-release and return a parameter dictionary
See http://www.freedesktop.org/software/systemd/man/os-release.html
for specification of the file format.
'''
ret = {}
for filename in os_release_files:
try:
with salt.utils.files.fopen(filename) as ifile:
regex = re.compile('^([\\w]+)=(?:\'|")?(.*?)(?:\'|")?$')
for line in ifile:
match = regex.match(line.strip())
if match:
# Shell special characters ("$", quotes, backslash,
# backtick) are escaped with backslashes
ret[match.group(1)] = re.sub(
r'\\([$"\'\\`])', r'\1', match.group(2)
)
break
except (IOError, OSError):
pass
return ret
def _parse_cpe_name(cpe):
'''
Parse CPE_NAME data from the os-release
Info: https://csrc.nist.gov/projects/security-content-automation-protocol/scap-specifications/cpe
Note: cpe:2.3:part:vendor:product:version:update:edition:lang:sw_edition:target_sw:target_hw:other
however some OS's do not have the full 13 elements, for example:
CPE_NAME="cpe:2.3:o:amazon:amazon_linux:2"
:param cpe:
:return:
'''
part = {
'o': 'operating system',
'h': 'hardware',
'a': 'application',
}
ret = {}
cpe = (cpe or '').split(':')
if len(cpe) > 4 and cpe[0] == 'cpe':
if cpe[1].startswith('/'): # WFN to URI
ret['vendor'], ret['product'], ret['version'] = cpe[2:5]
ret['phase'] = cpe[5] if len(cpe) > 5 else None
ret['part'] = part.get(cpe[1][1:])
elif len(cpe) == 6 and cpe[1] == '2.3': # WFN to a string
ret['vendor'], ret['product'], ret['version'] = [x if x != '*' else None for x in cpe[3:6]]
ret['phase'] = None
ret['part'] = part.get(cpe[2])
elif len(cpe) > 7 and len(cpe) <= 13 and cpe[1] == '2.3': # WFN to a string
ret['vendor'], ret['product'], ret['version'], ret['phase'] = [x if x != '*' else None for x in cpe[3:7]]
ret['part'] = part.get(cpe[2])
return ret
def os_data():
'''
Return grains pertaining to the operating system
'''
grains = {
'num_gpus': 0,
'gpus': [],
}
# Windows Server 2008 64-bit
# ('Windows', 'MINIONNAME', '2008ServerR2', '6.1.7601', 'AMD64',
# 'Intel64 Fam ily 6 Model 23 Stepping 6, GenuineIntel')
# Ubuntu 10.04
# ('Linux', 'MINIONNAME', '2.6.32-38-server',
# '#83-Ubuntu SMP Wed Jan 4 11:26:59 UTC 2012', 'x86_64', '')
# pylint: disable=unpacking-non-sequence
(grains['kernel'], grains['nodename'],
grains['kernelrelease'], grains['kernelversion'], grains['cpuarch'], _) = platform.uname()
# pylint: enable=unpacking-non-sequence
if salt.utils.platform.is_proxy():
grains['kernel'] = 'proxy'
grains['kernelrelease'] = 'proxy'
grains['kernelversion'] = 'proxy'
grains['osrelease'] = 'proxy'
grains['os'] = 'proxy'
grains['os_family'] = 'proxy'
grains['osfullname'] = 'proxy'
elif salt.utils.platform.is_windows():
grains['os'] = 'Windows'
grains['os_family'] = 'Windows'
grains.update(_memdata(grains))
grains.update(_windows_platform_data())
grains.update(_windows_cpudata())
grains.update(_windows_virtual(grains))
grains.update(_ps(grains))
if 'Server' in grains['osrelease']:
osrelease_info = grains['osrelease'].split('Server', 1)
osrelease_info[1] = osrelease_info[1].lstrip('R')
else:
osrelease_info = grains['osrelease'].split('.')
for idx, value in enumerate(osrelease_info):
if not value.isdigit():
continue
osrelease_info[idx] = int(value)
grains['osrelease_info'] = tuple(osrelease_info)
grains['osfinger'] = '{os}-{ver}'.format(
os=grains['os'],
ver=grains['osrelease'])
grains['init'] = 'Windows'
return grains
elif salt.utils.platform.is_linux():
# Add SELinux grain, if you have it
if _linux_bin_exists('selinuxenabled'):
log.trace('Adding selinux grains')
grains['selinux'] = {}
grains['selinux']['enabled'] = __salt__['cmd.retcode'](
'selinuxenabled'
) == 0
if _linux_bin_exists('getenforce'):
grains['selinux']['enforced'] = __salt__['cmd.run'](
'getenforce'
).strip()
# Add systemd grain, if you have it
if _linux_bin_exists('systemctl') and _linux_bin_exists('localectl'):
log.trace('Adding systemd grains')
grains['systemd'] = {}
systemd_info = __salt__['cmd.run'](
'systemctl --version'
).splitlines()
grains['systemd']['version'] = systemd_info[0].split()[1]
grains['systemd']['features'] = systemd_info[1]
# Add init grain
grains['init'] = 'unknown'
log.trace('Adding init grain')
try:
os.stat('/run/systemd/system')
grains['init'] = 'systemd'
except (OSError, IOError):
try:
with salt.utils.files.fopen('/proc/1/cmdline') as fhr:
init_cmdline = fhr.read().replace('\x00', ' ').split()
except (IOError, OSError):
pass
else:
try:
init_bin = salt.utils.path.which(init_cmdline[0])
except IndexError:
# Emtpy init_cmdline
init_bin = None
log.warning('Unable to fetch data from /proc/1/cmdline')
if init_bin is not None and init_bin.endswith('bin/init'):
supported_inits = (b'upstart', b'sysvinit', b'systemd')
edge_len = max(len(x) for x in supported_inits) - 1
try:
buf_size = __opts__['file_buffer_size']
except KeyError:
# Default to the value of file_buffer_size for the minion
buf_size = 262144
try:
with salt.utils.files.fopen(init_bin, 'rb') as fp_:
edge = b''
buf = fp_.read(buf_size).lower()
while buf:
buf = edge + buf
for item in supported_inits:
if item in buf:
if six.PY3:
item = item.decode('utf-8')
grains['init'] = item
buf = b''
break
edge = buf[-edge_len:]
buf = fp_.read(buf_size).lower()
except (IOError, OSError) as exc:
log.error(
'Unable to read from init_bin (%s): %s',
init_bin, exc
)
elif salt.utils.path.which('supervisord') in init_cmdline:
grains['init'] = 'supervisord'
elif salt.utils.path.which('dumb-init') in init_cmdline:
# https://github.com/Yelp/dumb-init
grains['init'] = 'dumb-init'
elif salt.utils.path.which('tini') in init_cmdline:
# https://github.com/krallin/tini
grains['init'] = 'tini'
elif init_cmdline == ['runit']:
grains['init'] = 'runit'
elif '/sbin/my_init' in init_cmdline:
# Phusion Base docker container use runit for srv mgmt, but
# my_init as pid1
grains['init'] = 'runit'
else:
log.debug(
'Could not determine init system from command line: (%s)',
' '.join(init_cmdline)
)
# Add lsb grains on any distro with lsb-release. Note that this import
# can fail on systems with lsb-release installed if the system package
# does not install the python package for the python interpreter used by
# Salt (i.e. python2 or python3)
try:
log.trace('Getting lsb_release distro information')
import lsb_release # pylint: disable=import-error
release = lsb_release.get_distro_information()
for key, value in six.iteritems(release):
key = key.lower()
lsb_param = 'lsb_{0}{1}'.format(
'' if key.startswith('distrib_') else 'distrib_',
key
)
grains[lsb_param] = value
# Catch a NameError to workaround possible breakage in lsb_release
# See https://github.com/saltstack/salt/issues/37867
except (ImportError, NameError):
# if the python library isn't available, try to parse
# /etc/lsb-release using regex
log.trace('lsb_release python bindings not available')
grains.update(_parse_lsb_release())
if grains.get('lsb_distrib_description', '').lower().startswith('antergos'):
# Antergos incorrectly configures their /etc/lsb-release,
# setting the DISTRIB_ID to "Arch". This causes the "os" grain
# to be incorrectly set to "Arch".
grains['osfullname'] = 'Antergos Linux'
elif 'lsb_distrib_id' not in grains:
log.trace(
'Failed to get lsb_distrib_id, trying to parse os-release'
)
os_release = _parse_os_release('/etc/os-release', '/usr/lib/os-release')
if os_release:
if 'NAME' in os_release:
grains['lsb_distrib_id'] = os_release['NAME'].strip()
if 'VERSION_ID' in os_release:
grains['lsb_distrib_release'] = os_release['VERSION_ID']
if 'VERSION_CODENAME' in os_release:
grains['lsb_distrib_codename'] = os_release['VERSION_CODENAME']
elif 'PRETTY_NAME' in os_release:
codename = os_release['PRETTY_NAME']
# https://github.com/saltstack/salt/issues/44108
if os_release['ID'] == 'debian':
codename_match = re.search(r'\((\w+)\)$', codename)
if codename_match:
codename = codename_match.group(1)
grains['lsb_distrib_codename'] = codename
if 'CPE_NAME' in os_release:
cpe = _parse_cpe_name(os_release['CPE_NAME'])
if not cpe:
log.error('Broken CPE_NAME format in /etc/os-release!')
elif cpe.get('vendor', '').lower() in ['suse', 'opensuse']:
grains['os'] = "SUSE"
# openSUSE `osfullname` grain normalization
if os_release.get("NAME") == "openSUSE Leap":
grains['osfullname'] = "Leap"
elif os_release.get("VERSION") == "Tumbleweed":
grains['osfullname'] = os_release["VERSION"]
# Override VERSION_ID, if CPE_NAME around
if cpe.get('version') and cpe.get('vendor') == 'opensuse': # Keep VERSION_ID for SLES
grains['lsb_distrib_release'] = cpe['version']
elif os.path.isfile('/etc/SuSE-release'):
log.trace('Parsing distrib info from /etc/SuSE-release')
grains['lsb_distrib_id'] = 'SUSE'
version = ''
patch = ''
with salt.utils.files.fopen('/etc/SuSE-release') as fhr:
for line in fhr:
if 'enterprise' in line.lower():
grains['lsb_distrib_id'] = 'SLES'
grains['lsb_distrib_codename'] = re.sub(r'\(.+\)', '', line).strip()
elif 'version' in line.lower():
version = re.sub(r'[^0-9]', '', line)
elif 'patchlevel' in line.lower():
patch = re.sub(r'[^0-9]', '', line)
grains['lsb_distrib_release'] = version
if patch:
grains['lsb_distrib_release'] += '.' + patch
patchstr = 'SP' + patch
if grains['lsb_distrib_codename'] and patchstr not in grains['lsb_distrib_codename']:
grains['lsb_distrib_codename'] += ' ' + patchstr
if not grains.get('lsb_distrib_codename'):
grains['lsb_distrib_codename'] = 'n.a'
elif os.path.isfile('/etc/altlinux-release'):
log.trace('Parsing distrib info from /etc/altlinux-release')
# ALT Linux
grains['lsb_distrib_id'] = 'altlinux'
with salt.utils.files.fopen('/etc/altlinux-release') as ifile:
# This file is symlinked to from:
# /etc/fedora-release
# /etc/redhat-release
# /etc/system-release
for line in ifile:
# ALT Linux Sisyphus (unstable)
comps = line.split()
if comps[0] == 'ALT':
grains['lsb_distrib_release'] = comps[2]
grains['lsb_distrib_codename'] = \
comps[3].replace('(', '').replace(')', '')
elif os.path.isfile('/etc/centos-release'):
log.trace('Parsing distrib info from /etc/centos-release')
# CentOS Linux
grains['lsb_distrib_id'] = 'CentOS'
with salt.utils.files.fopen('/etc/centos-release') as ifile:
for line in ifile:
# Need to pull out the version and codename
# in the case of custom content in /etc/centos-release
find_release = re.compile(r'\d+\.\d+')
find_codename = re.compile(r'(?<=\()(.*?)(?=\))')
release = find_release.search(line)
codename = find_codename.search(line)
if release is not None:
grains['lsb_distrib_release'] = release.group()
if codename is not None:
grains['lsb_distrib_codename'] = codename.group()
elif os.path.isfile('/etc.defaults/VERSION') \
and os.path.isfile('/etc.defaults/synoinfo.conf'):
grains['osfullname'] = 'Synology'
log.trace(
'Parsing Synology distrib info from /etc/.defaults/VERSION'
)
with salt.utils.files.fopen('/etc.defaults/VERSION', 'r') as fp_:
synoinfo = {}
for line in fp_:
try:
key, val = line.rstrip('\n').split('=')
except ValueError:
continue
if key in ('majorversion', 'minorversion',
'buildnumber'):
synoinfo[key] = val.strip('"')
if len(synoinfo) != 3:
log.warning(
'Unable to determine Synology version info. '
'Please report this, as it is likely a bug.'
)
else:
grains['osrelease'] = (
'{majorversion}.{minorversion}-{buildnumber}'
.format(**synoinfo)
)
# Use the already intelligent platform module to get distro info
# (though apparently it's not intelligent enough to strip quotes)
log.trace(
'Getting OS name, release, and codename from '
'platform.linux_distribution()'
)
(osname, osrelease, oscodename) = \
[x.strip('"').strip("'") for x in
linux_distribution(supported_dists=_supported_dists)]
# Try to assign these three names based on the lsb info, they tend to
# be more accurate than what python gets from /etc/DISTRO-release.
# It's worth noting that Ubuntu has patched their Python distribution
# so that linux_distribution() does the /etc/lsb-release parsing, but
# we do it anyway here for the sake for full portability.
if 'osfullname' not in grains:
# If NI Linux RT distribution, set the grains['osfullname'] to 'nilrt'
if grains.get('lsb_distrib_id', '').lower().startswith('nilrt'):
grains['osfullname'] = 'nilrt'
else:
grains['osfullname'] = grains.get('lsb_distrib_id', osname).strip()
if 'osrelease' not in grains:
# NOTE: This is a workaround for CentOS 7 os-release bug
# https://bugs.centos.org/view.php?id=8359
# /etc/os-release contains no minor distro release number so we fall back to parse
# /etc/centos-release file instead.
# Commit introducing this comment should be reverted after the upstream bug is released.
if 'CentOS Linux 7' in grains.get('lsb_distrib_codename', ''):
grains.pop('lsb_distrib_release', None)
grains['osrelease'] = grains.get('lsb_distrib_release', osrelease).strip()
grains['oscodename'] = grains.get('lsb_distrib_codename', '').strip() or oscodename
if 'Red Hat' in grains['oscodename']:
grains['oscodename'] = oscodename
distroname = _REPLACE_LINUX_RE.sub('', grains['osfullname']).strip()
# return the first ten characters with no spaces, lowercased
shortname = distroname.replace(' ', '').lower()[:10]
# this maps the long names from the /etc/DISTRO-release files to the
# traditional short names that Salt has used.
if 'os' not in grains:
grains['os'] = _OS_NAME_MAP.get(shortname, distroname)
grains.update(_linux_cpudata())
grains.update(_linux_gpu_data())
elif grains['kernel'] == 'SunOS':
if salt.utils.platform.is_smartos():
# See https://github.com/joyent/smartos-live/issues/224
if HAS_UNAME:
uname_v = os.uname()[3] # format: joyent_20161101T004406Z
else:
uname_v = os.name
uname_v = uname_v[uname_v.index('_')+1:]
grains['os'] = grains['osfullname'] = 'SmartOS'
# store a parsed version of YYYY.MM.DD as osrelease
grains['osrelease'] = ".".join([
uname_v.split('T')[0][0:4],
uname_v.split('T')[0][4:6],
uname_v.split('T')[0][6:8],
])
# store a untouched copy of the timestamp in osrelease_stamp
grains['osrelease_stamp'] = uname_v
elif os.path.isfile('/etc/release'):
with salt.utils.files.fopen('/etc/release', 'r') as fp_:
rel_data = fp_.read()
try:
release_re = re.compile(
r'((?:Open|Oracle )?Solaris|OpenIndiana|OmniOS) (Development)?'
r'\s*(\d+\.?\d*|v\d+)\s?[A-Z]*\s?(r\d+|\d+\/\d+|oi_\S+|snv_\S+)?'
)
osname, development, osmajorrelease, osminorrelease = release_re.search(rel_data).groups()
except AttributeError:
# Set a blank osrelease grain and fallback to 'Solaris'
# as the 'os' grain.
grains['os'] = grains['osfullname'] = 'Solaris'
grains['osrelease'] = ''
else:
if development is not None:
osname = ' '.join((osname, development))
if HAS_UNAME:
uname_v = os.uname()[3]
else:
uname_v = os.name
grains['os'] = grains['osfullname'] = osname
if osname in ['Oracle Solaris'] and uname_v.startswith(osmajorrelease):
# Oracla Solars 11 and up have minor version in uname
grains['osrelease'] = uname_v
elif osname in ['OmniOS']:
# OmniOS
osrelease = []
osrelease.append(osmajorrelease[1:])
osrelease.append(osminorrelease[1:])
grains['osrelease'] = ".".join(osrelease)
grains['osrelease_stamp'] = uname_v
else:
# Sun Solaris 10 and earlier/comparable
osrelease = []
osrelease.append(osmajorrelease)
if osminorrelease:
osrelease.append(osminorrelease)
grains['osrelease'] = ".".join(osrelease)
grains['osrelease_stamp'] = uname_v
grains.update(_sunos_cpudata())
elif grains['kernel'] == 'VMkernel':
grains['os'] = 'ESXi'
elif grains['kernel'] == 'Darwin':
osrelease = __salt__['cmd.run']('sw_vers -productVersion')
osname = __salt__['cmd.run']('sw_vers -productName')
osbuild = __salt__['cmd.run']('sw_vers -buildVersion')
grains['os'] = 'MacOS'
grains['os_family'] = 'MacOS'
grains['osfullname'] = "{0} {1}".format(osname, osrelease)
grains['osrelease'] = osrelease
grains['osbuild'] = osbuild
grains['init'] = 'launchd'
grains.update(_bsd_cpudata(grains))
grains.update(_osx_gpudata())
grains.update(_osx_platform_data())
elif grains['kernel'] == 'AIX':
osrelease = __salt__['cmd.run']('oslevel')
osrelease_techlevel = __salt__['cmd.run']('oslevel -r')
osname = __salt__['cmd.run']('uname')
grains['os'] = 'AIX'
grains['osfullname'] = osname
grains['osrelease'] = osrelease
grains['osrelease_techlevel'] = osrelease_techlevel
grains.update(_aix_cpudata())
else:
grains['os'] = grains['kernel']
if grains['kernel'] == 'FreeBSD':
try:
grains['osrelease'] = __salt__['cmd.run']('freebsd-version -u').split('-')[0]
except salt.exceptions.CommandExecutionError:
# freebsd-version was introduced in 10.0.
# derive osrelease from kernelversion prior to that
grains['osrelease'] = grains['kernelrelease'].split('-')[0]
grains.update(_bsd_cpudata(grains))
if grains['kernel'] in ('OpenBSD', 'NetBSD'):
grains.update(_bsd_cpudata(grains))
grains['osrelease'] = grains['kernelrelease'].split('-')[0]
if grains['kernel'] == 'NetBSD':
grains.update(_netbsd_gpu_data())
if not grains['os']:
grains['os'] = 'Unknown {0}'.format(grains['kernel'])
grains['os_family'] = 'Unknown'
else:
# this assigns family names based on the os name
# family defaults to the os name if not found
grains['os_family'] = _OS_FAMILY_MAP.get(grains['os'],
grains['os'])
# Build the osarch grain. This grain will be used for platform-specific
# considerations such as package management. Fall back to the CPU
# architecture.
if grains.get('os_family') == 'Debian':
osarch = __salt__['cmd.run']('dpkg --print-architecture').strip()
elif grains.get('os_family') in ['RedHat', 'Suse']:
osarch = salt.utils.pkg.rpm.get_osarch()
elif grains.get('os_family') in ('NILinuxRT', 'Poky'):
archinfo = {}
for line in __salt__['cmd.run']('opkg print-architecture').splitlines():
if line.startswith('arch'):
_, arch, priority = line.split()
archinfo[arch.strip()] = int(priority.strip())
# Return osarch in priority order (higher to lower)
osarch = sorted(archinfo, key=archinfo.get, reverse=True)
else:
osarch = grains['cpuarch']
grains['osarch'] = osarch
grains.update(_memdata(grains))
# Get the hardware and bios data
grains.update(_hw_data(grains))
# Load the virtual machine info
grains.update(_virtual(grains))
grains.update(_virtual_hv(grains))
grains.update(_ps(grains))
if grains.get('osrelease', ''):
osrelease_info = grains['osrelease'].split('.')
for idx, value in enumerate(osrelease_info):
if not value.isdigit():
continue
osrelease_info[idx] = int(value)
grains['osrelease_info'] = tuple(osrelease_info)
try:
grains['osmajorrelease'] = int(grains['osrelease_info'][0])
except (IndexError, TypeError, ValueError):
log.debug(
'Unable to derive osmajorrelease from osrelease_info \'%s\'. '
'The osmajorrelease grain will not be set.',
grains['osrelease_info']
)
os_name = grains['os' if grains.get('os') in (
'Debian', 'FreeBSD', 'OpenBSD', 'NetBSD', 'Mac', 'Raspbian') else 'osfullname']
grains['osfinger'] = '{0}-{1}'.format(
os_name, grains['osrelease'] if os_name in ('Ubuntu',) else grains['osrelease_info'][0])
return grains
def locale_info():
'''
Provides
defaultlanguage
defaultencoding
'''
grains = {}
grains['locale_info'] = {}
if salt.utils.platform.is_proxy():
return grains
try:
(
grains['locale_info']['defaultlanguage'],
grains['locale_info']['defaultencoding']
) = locale.getdefaultlocale()
except Exception:
# locale.getdefaultlocale can ValueError!! Catch anything else it
# might do, per #2205
grains['locale_info']['defaultlanguage'] = 'unknown'
grains['locale_info']['defaultencoding'] = 'unknown'
grains['locale_info']['detectedencoding'] = __salt_system_encoding__
grains['locale_info']['timezone'] = 'unknown'
if _DATEUTIL_TZ:
try:
grains['locale_info']['timezone'] = datetime.datetime.now(dateutil.tz.tzlocal()).tzname()
except UnicodeDecodeError:
# Because the method 'tzname' is not a part of salt the decoding error cant be fixed.
# The error is in datetime in the python2 lib
if salt.utils.platform.is_windows():
grains['locale_info']['timezone'] = time.tzname[0].decode('mbcs')
return grains
def hostname():
'''
Return fqdn, hostname, domainname
.. note::
On Windows the ``domain`` grain may refer to the dns entry for the host
instead of the Windows domain to which the host is joined. It may also
be empty if not a part of any domain. Refer to the ``windowsdomain``
grain instead
'''
# This is going to need some work
# Provides:
# fqdn
# host
# localhost
# domain
global __FQDN__
grains = {}
if salt.utils.platform.is_proxy():
return grains
grains['localhost'] = socket.gethostname()
if __FQDN__ is None:
__FQDN__ = salt.utils.network.get_fqhostname()
# On some distros (notably FreeBSD) if there is no hostname set
# salt.utils.network.get_fqhostname() will return None.
# In this case we punt and log a message at error level, but force the
# hostname and domain to be localhost.localdomain
# Otherwise we would stacktrace below
if __FQDN__ is None: # still!
log.error('Having trouble getting a hostname. Does this machine have its hostname and domain set properly?')
__FQDN__ = 'localhost.localdomain'
grains['fqdn'] = __FQDN__
(grains['host'], grains['domain']) = grains['fqdn'].partition('.')[::2]
return grains
def append_domain():
'''
Return append_domain if set
'''
grain = {}
if salt.utils.platform.is_proxy():
return grain
if 'append_domain' in __opts__:
grain['append_domain'] = __opts__['append_domain']
return grain
def fqdns():
'''
Return all known FQDNs for the system by enumerating all interfaces and
then trying to reverse resolve them (excluding 'lo' interface).
'''
# Provides:
# fqdns
grains = {}
fqdns = set()
addresses = salt.utils.network.ip_addrs(include_loopback=False,
interface_data=_INTERFACES)
addresses.extend(salt.utils.network.ip_addrs6(include_loopback=False,
interface_data=_INTERFACES))
err_message = 'An exception occurred resolving address \'%s\': %s'
for ip in addresses:
try:
fqdns.add(socket.getfqdn(socket.gethostbyaddr(ip)[0]))
except socket.herror as err:
if err.errno == 0:
# No FQDN for this IP address, so we don't need to know this all the time.
log.debug("Unable to resolve address %s: %s", ip, err)
else:
log.error(err_message, ip, err)
except (socket.error, socket.gaierror, socket.timeout) as err:
log.error(err_message, ip, err)
grains['fqdns'] = sorted(list(fqdns))
return grains
def ip_fqdn():
'''
Return ip address and FQDN grains
'''
if salt.utils.platform.is_proxy():
return {}
ret = {}
ret['ipv4'] = salt.utils.network.ip_addrs(include_loopback=True)
ret['ipv6'] = salt.utils.network.ip_addrs6(include_loopback=True)
_fqdn = hostname()['fqdn']
for socket_type, ipv_num in ((socket.AF_INET, '4'), (socket.AF_INET6, '6')):
key = 'fqdn_ip' + ipv_num
if not ret['ipv' + ipv_num]:
ret[key] = []
else:
try:
start_time = datetime.datetime.utcnow()
info = socket.getaddrinfo(_fqdn, None, socket_type)
ret[key] = list(set(item[4][0] for item in info))
except socket.error:
timediff = datetime.datetime.utcnow() - start_time
if timediff.seconds > 5 and __opts__['__role'] == 'master':
log.warning(
'Unable to find IPv%s record for "%s" causing a %s '
'second timeout when rendering grains. Set the dns or '
'/etc/hosts for IPv%s to clear this.',
ipv_num, _fqdn, timediff, ipv_num
)
ret[key] = []
return ret
def ip_interfaces():
'''
Provide a dict of the connected interfaces and their ip addresses
The addresses will be passed as a list for each interface
'''
# Provides:
# ip_interfaces
if salt.utils.platform.is_proxy():
return {}
ret = {}
ifaces = _get_interfaces()
for face in ifaces:
iface_ips = []
for inet in ifaces[face].get('inet', []):
if 'address' in inet:
iface_ips.append(inet['address'])
for inet in ifaces[face].get('inet6', []):
if 'address' in inet:
iface_ips.append(inet['address'])
for secondary in ifaces[face].get('secondary', []):
if 'address' in secondary:
iface_ips.append(secondary['address'])
ret[face] = iface_ips
return {'ip_interfaces': ret}
def ip4_interfaces():
'''
Provide a dict of the connected interfaces and their ip4 addresses
The addresses will be passed as a list for each interface
'''
# Provides:
# ip_interfaces
if salt.utils.platform.is_proxy():
return {}
ret = {}
ifaces = _get_interfaces()
for face in ifaces:
iface_ips = []
for inet in ifaces[face].get('inet', []):
if 'address' in inet:
iface_ips.append(inet['address'])
for secondary in ifaces[face].get('secondary', []):
if 'address' in secondary:
iface_ips.append(secondary['address'])
ret[face] = iface_ips
return {'ip4_interfaces': ret}
def ip6_interfaces():
'''
Provide a dict of the connected interfaces and their ip6 addresses
The addresses will be passed as a list for each interface
'''
# Provides:
# ip_interfaces
if salt.utils.platform.is_proxy():
return {}
ret = {}
ifaces = _get_interfaces()
for face in ifaces:
iface_ips = []
for inet in ifaces[face].get('inet6', []):
if 'address' in inet:
iface_ips.append(inet['address'])
for secondary in ifaces[face].get('secondary', []):
if 'address' in secondary:
iface_ips.append(secondary['address'])
ret[face] = iface_ips
return {'ip6_interfaces': ret}
def hwaddr_interfaces():
'''
Provide a dict of the connected interfaces and their
hw addresses (Mac Address)
'''
# Provides:
# hwaddr_interfaces
ret = {}
ifaces = _get_interfaces()
for face in ifaces:
if 'hwaddr' in ifaces[face]:
ret[face] = ifaces[face]['hwaddr']
return {'hwaddr_interfaces': ret}
def dns():
'''
Parse the resolver configuration file
.. versionadded:: 2016.3.0
'''
# Provides:
# dns
if salt.utils.platform.is_windows() or 'proxyminion' in __opts__:
return {}
resolv = salt.utils.dns.parse_resolv()
for key in ('nameservers', 'ip4_nameservers', 'ip6_nameservers',
'sortlist'):
if key in resolv:
resolv[key] = [six.text_type(i) for i in resolv[key]]
return {'dns': resolv} if resolv else {}
def get_machine_id():
'''
Provide the machine-id for machine/virtualization combination
'''
# Provides:
# machine-id
if platform.system() == 'AIX':
return _aix_get_machine_id()
locations = ['/etc/machine-id', '/var/lib/dbus/machine-id']
existing_locations = [loc for loc in locations if os.path.exists(loc)]
if not existing_locations:
return {}
else:
with salt.utils.files.fopen(existing_locations[0]) as machineid:
return {'machine_id': machineid.read().strip()}
def path():
'''
Return the path
'''
# Provides:
# path
return {'path': os.environ.get('PATH', '').strip()}
def pythonversion():
'''
Return the Python version
'''
# Provides:
# pythonversion
return {'pythonversion': list(sys.version_info)}
def pythonpath():
'''
Return the Python path
'''
# Provides:
# pythonpath
return {'pythonpath': sys.path}
def pythonexecutable():
'''
Return the python executable in use
'''
# Provides:
# pythonexecutable
return {'pythonexecutable': sys.executable}
def saltpath():
'''
Return the path of the salt module
'''
# Provides:
# saltpath
salt_path = os.path.abspath(os.path.join(__file__, os.path.pardir))
return {'saltpath': os.path.dirname(salt_path)}
def saltversion():
'''
Return the version of salt
'''
# Provides:
# saltversion
from salt.version import __version__
return {'saltversion': __version__}
def zmqversion():
'''
Return the zeromq version
'''
# Provides:
# zmqversion
try:
import zmq
return {'zmqversion': zmq.zmq_version()} # pylint: disable=no-member
except ImportError:
return {}
def saltversioninfo():
'''
Return the version_info of salt
.. versionadded:: 0.17.0
'''
# Provides:
# saltversioninfo
from salt.version import __version_info__
return {'saltversioninfo': list(__version_info__)}
def _hw_data(osdata):
'''
Get system specific hardware data from dmidecode
Provides
biosversion
productname
manufacturer
serialnumber
biosreleasedate
uuid
.. versionadded:: 0.9.5
'''
if salt.utils.platform.is_proxy():
return {}
grains = {}
if osdata['kernel'] == 'Linux' and os.path.exists('/sys/class/dmi/id'):
# On many Linux distributions basic firmware information is available via sysfs
# requires CONFIG_DMIID to be enabled in the Linux kernel configuration
sysfs_firmware_info = {
'biosversion': 'bios_version',
'productname': 'product_name',
'manufacturer': 'sys_vendor',
'biosreleasedate': 'bios_date',
'uuid': 'product_uuid',
'serialnumber': 'product_serial'
}
for key, fw_file in sysfs_firmware_info.items():
contents_file = os.path.join('/sys/class/dmi/id', fw_file)
if os.path.exists(contents_file):
try:
with salt.utils.files.fopen(contents_file, 'r') as ifile:
grains[key] = salt.utils.stringutils.to_unicode(ifile.read().strip(), errors='replace')
if key == 'uuid':
grains['uuid'] = grains['uuid'].lower()
except (IOError, OSError) as err:
# PermissionError is new to Python 3, but corresponds to the EACESS and
# EPERM error numbers. Use those instead here for PY2 compatibility.
if err.errno == EACCES or err.errno == EPERM:
# Skip the grain if non-root user has no access to the file.
pass
elif salt.utils.path.which_bin(['dmidecode', 'smbios']) is not None and not (
salt.utils.platform.is_smartos() or
( # SunOS on SPARC - 'smbios: failed to load SMBIOS: System does not export an SMBIOS table'
osdata['kernel'] == 'SunOS' and
osdata['cpuarch'].startswith('sparc')
)):
# On SmartOS (possibly SunOS also) smbios only works in the global zone
# smbios is also not compatible with linux's smbios (smbios -s = print summarized)
grains = {
'biosversion': __salt__['smbios.get']('bios-version'),
'productname': __salt__['smbios.get']('system-product-name'),
'manufacturer': __salt__['smbios.get']('system-manufacturer'),
'biosreleasedate': __salt__['smbios.get']('bios-release-date'),
'uuid': __salt__['smbios.get']('system-uuid')
}
grains = dict([(key, val) for key, val in grains.items() if val is not None])
uuid = __salt__['smbios.get']('system-uuid')
if uuid is not None:
grains['uuid'] = uuid.lower()
for serial in ('system-serial-number', 'chassis-serial-number', 'baseboard-serial-number'):
serial = __salt__['smbios.get'](serial)
if serial is not None:
grains['serialnumber'] = serial
break
elif salt.utils.path.which_bin(['fw_printenv']) is not None:
# ARM Linux devices expose UBOOT env variables via fw_printenv
hwdata = {
'manufacturer': 'manufacturer',
'serialnumber': 'serial#',
'productname': 'DeviceDesc',
}
for grain_name, cmd_key in six.iteritems(hwdata):
result = __salt__['cmd.run_all']('fw_printenv {0}'.format(cmd_key))
if result['retcode'] == 0:
uboot_keyval = result['stdout'].split('=')
grains[grain_name] = _clean_value(grain_name, uboot_keyval[1])
elif osdata['kernel'] == 'FreeBSD':
# On FreeBSD /bin/kenv (already in base system)
# can be used instead of dmidecode
kenv = salt.utils.path.which('kenv')
if kenv:
# In theory, it will be easier to add new fields to this later
fbsd_hwdata = {
'biosversion': 'smbios.bios.version',
'manufacturer': 'smbios.system.maker',
'serialnumber': 'smbios.system.serial',
'productname': 'smbios.system.product',
'biosreleasedate': 'smbios.bios.reldate',
'uuid': 'smbios.system.uuid',
}
for key, val in six.iteritems(fbsd_hwdata):
value = __salt__['cmd.run']('{0} {1}'.format(kenv, val))
grains[key] = _clean_value(key, value)
elif osdata['kernel'] == 'OpenBSD':
sysctl = salt.utils.path.which('sysctl')
hwdata = {'biosversion': 'hw.version',
'manufacturer': 'hw.vendor',
'productname': 'hw.product',
'serialnumber': 'hw.serialno',
'uuid': 'hw.uuid'}
for key, oid in six.iteritems(hwdata):
value = __salt__['cmd.run']('{0} -n {1}'.format(sysctl, oid))
if not value.endswith(' value is not available'):
grains[key] = _clean_value(key, value)
elif osdata['kernel'] == 'NetBSD':
sysctl = salt.utils.path.which('sysctl')
nbsd_hwdata = {
'biosversion': 'machdep.dmi.board-version',
'manufacturer': 'machdep.dmi.system-vendor',
'serialnumber': 'machdep.dmi.system-serial',
'productname': 'machdep.dmi.system-product',
'biosreleasedate': 'machdep.dmi.bios-date',
'uuid': 'machdep.dmi.system-uuid',
}
for key, oid in six.iteritems(nbsd_hwdata):
result = __salt__['cmd.run_all']('{0} -n {1}'.format(sysctl, oid))
if result['retcode'] == 0:
grains[key] = _clean_value(key, result['stdout'])
elif osdata['kernel'] == 'Darwin':
grains['manufacturer'] = 'Apple Inc.'
sysctl = salt.utils.path.which('sysctl')
hwdata = {'productname': 'hw.model'}
for key, oid in hwdata.items():
value = __salt__['cmd.run']('{0} -b {1}'.format(sysctl, oid))
if not value.endswith(' is invalid'):
grains[key] = _clean_value(key, value)
elif osdata['kernel'] == 'SunOS' and osdata['cpuarch'].startswith('sparc'):
# Depending on the hardware model, commands can report different bits
# of information. With that said, consolidate the output from various
# commands and attempt various lookups.
data = ""
for (cmd, args) in (('/usr/sbin/prtdiag', '-v'), ('/usr/sbin/prtconf', '-vp'), ('/usr/sbin/virtinfo', '-a')):
if salt.utils.path.which(cmd): # Also verifies that cmd is executable
data += __salt__['cmd.run']('{0} {1}'.format(cmd, args))
data += '\n'
sn_regexes = [
re.compile(r) for r in [
r'(?im)^\s*Chassis\s+Serial\s+Number\n-+\n(\S+)', # prtdiag
r'(?im)^\s*chassis-sn:\s*(\S+)', # prtconf
r'(?im)^\s*Chassis\s+Serial#:\s*(\S+)', # virtinfo
]
]
obp_regexes = [
re.compile(r) for r in [
r'(?im)^\s*System\s+PROM\s+revisions.*\nVersion\n-+\nOBP\s+(\S+)\s+(\S+)', # prtdiag
r'(?im)^\s*version:\s*\'OBP\s+(\S+)\s+(\S+)', # prtconf
]
]
fw_regexes = [
re.compile(r) for r in [
r'(?im)^\s*Sun\s+System\s+Firmware\s+(\S+)\s+(\S+)', # prtdiag
]
]
uuid_regexes = [
re.compile(r) for r in [
r'(?im)^\s*Domain\s+UUID:\s*(\S+)', # virtinfo
]
]
manufacture_regexes = [
re.compile(r) for r in [
r'(?im)^\s*System\s+Configuration:\s*(.*)(?=sun)', # prtdiag
]
]
product_regexes = [
re.compile(r) for r in [
r'(?im)^\s*System\s+Configuration:\s*.*?sun\d\S+[^\S\r\n]*(.*)', # prtdiag
r'(?im)^[^\S\r\n]*banner-name:[^\S\r\n]*(.*)', # prtconf
r'(?im)^[^\S\r\n]*product-name:[^\S\r\n]*(.*)', # prtconf
]
]
sn_regexes = [
re.compile(r) for r in [
r'(?im)Chassis\s+Serial\s+Number\n-+\n(\S+)', # prtdiag
r'(?i)Chassis\s+Serial#:\s*(\S+)', # virtinfo
r'(?i)chassis-sn:\s*(\S+)', # prtconf
]
]
obp_regexes = [
re.compile(r) for r in [
r'(?im)System\s+PROM\s+revisions.*\nVersion\n-+\nOBP\s+(\S+)\s+(\S+)', # prtdiag
r'(?im)version:\s*\'OBP\s+(\S+)\s+(\S+)', # prtconf
]
]
fw_regexes = [
re.compile(r) for r in [
r'(?i)Sun\s+System\s+Firmware\s+(\S+)\s+(\S+)', # prtdiag
]
]
uuid_regexes = [
re.compile(r) for r in [
r'(?i)Domain\s+UUID:\s+(\S+)', # virtinfo
]
]
for regex in sn_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains['serialnumber'] = res.group(1).strip().replace("'", "")
break
for regex in obp_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
obp_rev, obp_date = res.groups()[0:2] # Limit the number in case we found the data in multiple places
grains['biosversion'] = obp_rev.strip().replace("'", "")
grains['biosreleasedate'] = obp_date.strip().replace("'", "")
for regex in fw_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
fw_rev, fw_date = res.groups()[0:2]
grains['systemfirmware'] = fw_rev.strip().replace("'", "")
grains['systemfirmwaredate'] = fw_date.strip().replace("'", "")
break
for regex in uuid_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains['uuid'] = res.group(1).strip().replace("'", "")
break
for regex in manufacture_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains['manufacture'] = res.group(1).strip().replace("'", "")
break
for regex in product_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
t_productname = res.group(1).strip().replace("'", "")
if t_productname:
grains['product'] = t_productname
grains['productname'] = t_productname
break
elif osdata['kernel'] == 'AIX':
cmd = salt.utils.path.which('prtconf')
if cmd:
data = __salt__['cmd.run']('{0}'.format(cmd)) + os.linesep
for dest, regstring in (('serialnumber', r'(?im)^\s*Machine\s+Serial\s+Number:\s+(\S+)'),
('systemfirmware', r'(?im)^\s*Firmware\s+Version:\s+(.*)')):
for regex in [re.compile(r) for r in [regstring]]:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains[dest] = res.group(1).strip().replace("'", '')
product_regexes = [re.compile(r'(?im)^\s*System\s+Model:\s+(\S+)')]
for regex in product_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains['manufacturer'], grains['productname'] = res.group(1).strip().replace("'", "").split(",")
break
else:
log.error('The \'prtconf\' binary was not found in $PATH.')
elif osdata['kernel'] == 'AIX':
cmd = salt.utils.path.which('prtconf')
if data:
data = __salt__['cmd.run']('{0}'.format(cmd)) + os.linesep
for dest, regstring in (('serialnumber', r'(?im)^\s*Machine\s+Serial\s+Number:\s+(\S+)'),
('systemfirmware', r'(?im)^\s*Firmware\s+Version:\s+(.*)')):
for regex in [re.compile(r) for r in [regstring]]:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains[dest] = res.group(1).strip().replace("'", '')
product_regexes = [re.compile(r'(?im)^\s*System\s+Model:\s+(\S+)')]
for regex in product_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains['manufacturer'], grains['productname'] = res.group(1).strip().replace("'", "").split(",")
break
else:
log.error('The \'prtconf\' binary was not found in $PATH.')
return grains
def get_server_id():
'''
Provides an integer based on the FQDN of a machine.
Useful as server-id in MySQL replication or anywhere else you'll need an ID
like this.
'''
# Provides:
# server_id
if salt.utils.platform.is_proxy():
return {}
id_ = __opts__.get('id', '')
id_hash = None
py_ver = sys.version_info[:2]
if py_ver >= (3, 3):
# Python 3.3 enabled hash randomization, so we need to shell out to get
# a reliable hash.
id_hash = __salt__['cmd.run'](
[sys.executable, '-c', 'print(hash("{0}"))'.format(id_)],
env={'PYTHONHASHSEED': '0'}
)
try:
id_hash = int(id_hash)
except (TypeError, ValueError):
log.debug(
'Failed to hash the ID to get the server_id grain. Result of '
'hash command: %s', id_hash
)
id_hash = None
if id_hash is None:
# Python < 3.3 or error encountered above
id_hash = hash(id_)
return {'server_id': abs(id_hash % (2 ** 31))}
def get_master():
'''
Provides the minion with the name of its master.
This is useful in states to target other services running on the master.
'''
# Provides:
# master
return {'master': __opts__.get('master', '')}
def default_gateway():
'''
Populates grains which describe whether a server has a default gateway
configured or not. Uses `ip -4 route show` and `ip -6 route show` and greps
for a `default` at the beginning of any line. Assuming the standard
`default via <ip>` format for default gateways, it will also parse out the
ip address of the default gateway, and put it in ip4_gw or ip6_gw.
If the `ip` command is unavailable, no grains will be populated.
Currently does not support multiple default gateways. The grains will be
set to the first default gateway found.
List of grains:
ip4_gw: True # ip/True/False if default ipv4 gateway
ip6_gw: True # ip/True/False if default ipv6 gateway
ip_gw: True # True if either of the above is True, False otherwise
'''
grains = {}
ip_bin = salt.utils.path.which('ip')
if not ip_bin:
return {}
grains['ip_gw'] = False
grains['ip4_gw'] = False
grains['ip6_gw'] = False
for ip_version in ('4', '6'):
try:
out = __salt__['cmd.run']([ip_bin, '-' + ip_version, 'route', 'show'])
for line in out.splitlines():
if line.startswith('default'):
grains['ip_gw'] = True
grains['ip{0}_gw'.format(ip_version)] = True
try:
via, gw_ip = line.split()[1:3]
except ValueError:
pass
else:
if via == 'via':
grains['ip{0}_gw'.format(ip_version)] = gw_ip
break
except Exception:
continue
return grains
|
the-stack_106_15389
|
#!/usr/bin/env python3
from gpiozero import Button
from signal import pause
print("""Scroll HAT Mini: buttons.py
Demonstrates the use of Scroll HAT Mini's buttons with gpiozero.
Press Ctrl+C to exit!
""")
def pressed(button):
button_name = button_map[button.pin.number]
print(f"Button {button_name} pressed!")
button_map = {5: "A",
6: "B",
16: "X",
24: "Y"}
button_a = Button(5)
button_b = Button(6)
button_x = Button(16)
button_y = Button(24)
try:
button_a.when_pressed = pressed
button_b.when_pressed = pressed
button_x.when_pressed = pressed
button_y.when_pressed = pressed
pause()
except KeyboardInterrupt:
button_a.close()
button_b.close()
button_x.close()
button_y.close()
|
the-stack_106_15390
|
"""
An image renderer that uses ueberzug
"""
import os
import time
import urwid
import ueberzug.lib.v0 as ueberzug
import urwid_ueberzogen as uw_uz
import lookatme.config
from lookatme.exceptions import IgnoredByContrib
CANVAS = None
def root_urwid_widget(to_wrap):
global CANVAS
CANVAS = ueberzug.Canvas().__enter__()
return uw_uz.Container(CANVAS, urwid.Pile([to_wrap]), visibility=ueberzug.Visibility.VISIBLE)
def image(link_uri, title, text):
base_dir = lookatme.config.SLIDE_SOURCE_DIR
full_path = os.path.join(base_dir, link_uri)
if not os.path.exists(full_path):
raise Exception("Local files only for images! (for now) {!r}".format(link_uri))
placement = CANVAS.create_placement(
time.time(),
path=full_path,
scaler=ueberzug.ScalerOption.FIT_CONTAIN.value,
)
try:
height = int(text)
except:
height = 30
blank_box = urwid.BoxAdapter(urwid.SolidFill(" "), height=height)
img = uw_uz.Image(placement, blank_box)
return [img]
def shutdown():
if CANVAS is None:
return
CANVAS.__exit__()
|
the-stack_106_15391
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
"""
tf2onnx.rewriter.rnn_utils - rnn support
"""
from __future__ import unicode_literals
import logging
from enum import Enum
import numpy as np
from onnx import helper
from tf2onnx import utils
from tf2onnx.graph import Node
from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher # pylint: disable=unused-import
# pylint: disable=invalid-name,unused-argument,missing-docstring
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("tf2onnx.rewriter.rnn_utils")
class REWRITER_RESULT(Enum):
SKIP = 1
OK = 2
FAIL = 3
class RnnWeight:
def __init__(self, node, np_val, np_dtype):
self.node = node
self.value = np_val
self.dtype = np_dtype
class RnnWeights:
def __init__(self, kernel, bias, forget_bias):
self.kernel = kernel
self.bias = bias
self.forget_bias = forget_bias
class RnnInitializers:
def __init__(self, c_init, h_init, c_h_shared_init):
self.c_init_input_id = None
self.h_init_input_id = None
self.share_init_node = None
self.share_init_input_id = None
if c_h_shared_init:
self.share_init_input_id = c_h_shared_init
self.share_init_node = True
else:
self.c_init_input_id = c_init
self.h_init_input_id = h_init
self.share_init_node = False
class RnnProperties:
def __init__(self):
# RNN input who are outside of rnn scope
self.input_node = None
self.input_id = None
self.var_initializers = {}
self.onnx_input_ids = {}
self.time_major = False
self.x_input_id = None # used to serve lstm's 1st input
self.input_size = None
self.hidden_size = None
self.batch_size_node = None # only for fill constant workaround
def is_valid(self):
if not self.input_node:
log.error("no input node found for current rnn, skip")
return False
log.debug("input node with port id %s", self.input_id)
return True
# TensorFlow LSTMCell/BasicLSTMCell computation graph matching
xc_pattern = OpTypePattern('Split', inputs=[
OpTypePattern("Const"), # axis for split
OpTypePattern("BiasAdd", name="bias_add", inputs=[
OpTypePattern("MatMul", inputs=[
OpTypePattern("ConcatV2|Concat", name="xh"),
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="cell_kernel"),
]),
]),
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="cell_bias"),
]),
]),
])
lstmcell_pattern = \
OpTypePattern('Mul', name='ht', inputs=[
OpTypePattern("Sigmoid", name="ot", inputs=[xc_pattern]),
OpTypePattern('Tanh', inputs=[
OpTypePattern("Add", name="ct", inputs=[
OpTypePattern("Mul", inputs=[
OpTypePattern("Sigmoid", name="ft", inputs=[
OpTypePattern("Add", inputs=[
xc_pattern,
OpTypePattern("*", name="ft_bias"),
]),
]),
OpTypePattern("*"),
]),
OpTypePattern("Mul", inputs=[
OpTypePattern("Sigmoid", name="it", inputs=[xc_pattern]),
OpTypePattern("Tanh", name="gt", inputs=[xc_pattern]),
]),
]),
]),
])
# input sequence: top to down, left to right
# split into update gate and reset gate
gru_split_pattern = \
OpTypePattern("Split", inputs=[
OpTypePattern("Const"), # split dim, a constant
OpTypePattern("Sigmoid", inputs=[
OpTypePattern("BiasAdd", inputs=[
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="gate_bias")
]),
OpTypePattern("MatMul", name="update_reset_gate", inputs=[
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="gate_kernel")
]),
OpTypePattern("ConcatV2|Concat", name="cell_inputs")
])
])
])
])
grucell_pattern = \
OpTypePattern("Add", name="cell_output", inputs=[
OpTypePattern("Mul", inputs=[
gru_split_pattern,
OpTypePattern("Identity")
]),
OpTypePattern("Mul", inputs=[
OpTypePattern("Sub", inputs=[
OpTypePattern("Const"), # 1-u
gru_split_pattern
]),
OpTypePattern("*", name="optional_activation", inputs=[
OpTypePattern("BiasAdd", inputs=[
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="hidden_bias")
]),
OpTypePattern("MatMul", inputs=[
OpTypePattern("Enter", inputs=[
OpTypePattern("*", name="hidden_kernel")
]),
OpTypePattern("ConcatV2|Concat")
])
])
])
])
])
grublockcell_pattern = OpTypePattern("GRUBlockCell", name="GRUBlockCell")
class RNNUnitType(Enum):
LSTMCell = 0 # TF LSTMCell and BasicLSTMCell share the same pattern
GRUCell = 1
GRUBlockCell = 2
# describe the body graph's input and output node
class SubGraphMetadata(object):
def __init__(self, g, input_ids, output_ids, initial_input_ids):
self.g = g
self.input_ids = input_ids
self.output_ids = output_ids
self.initial_input_ids = initial_input_ids
# sub-graph boundary
self.other_enter_input_ids = []
class BodyGraphDict():
BODY_GRAPH_DICT = {}
def __init__(self, g):
self.g = g
@staticmethod
def add_body_graph_info(body_owner_name, body_graph):
if body_owner_name not in BodyGraphDict.BODY_GRAPH_DICT:
BodyGraphDict.BODY_GRAPH_DICT[body_owner_name] = body_graph
else:
raise ValueError("body_owner_name " + body_owner_name + " already exists as a key")
@staticmethod
def pop_body_graph_info(body_owner_name):
val = BodyGraphDict.BODY_GRAPH_DICT[body_owner_name]
del BodyGraphDict.BODY_GRAPH_DICT[body_owner_name]
return val
@staticmethod
def has_body_graph_info(body_owner_name):
return body_owner_name in BodyGraphDict.BODY_GRAPH_DICT
@staticmethod
def get_body_graph_output_names():
output_names = []
for k in BodyGraphDict.BODY_GRAPH_DICT:
_output_names = BodyGraphDict.BODY_GRAPH_DICT[k].output_ids
output_names.extend(_output_names)
return set(output_names)
rnn_cell_patterns = {
RNNUnitType.LSTMCell: lstmcell_pattern,
RNNUnitType.GRUCell: grucell_pattern,
RNNUnitType.GRUBlockCell: grublockcell_pattern
}
def get_pattern(cell_type_name):
return rnn_cell_patterns[cell_type_name]
def get_weights_from_const_node(node):
temp = node
val = None
dtype = None
# this would help ignore Identity in non-const_folded graph.
while temp.type == 'Identity':
temp = temp.inputs[0]
if temp and temp.type == 'Const':
val = temp.get_tensor_value()
dtype = utils.ONNX_TO_NUMPY_DTYPE[temp.dtype]
log.debug("found weights %s", temp.name)
else:
log.error("weight node seems not to be Const, skip, node name is %s", temp.name)
return None
return RnnWeight(node, val, dtype)
def check_is_timemajor_transpose(node):
# TensorFlow transpose node has perm as its second input
if node.type != "Transpose":
return False
perm_node = node.inputs[1]
if perm_node.is_const():
return list(node.inputs[1].get_tensor_value()) == [1, 0, 2]
if check_is_unfolded_perm(perm_node):
return True
raise ValueError("Not supported yet")
# todo: fix this
def check_is_unfolded_perm(perm_node):
# For some case, like HallWay, the perm is a ConcatV2,
# but it should be calculated when constant-fold. TODO: investigate why not constant fold.
# current workaround: use np to calculate the val explicitly.
if perm_node.type == "ConcatV2" and len(perm_node.inputs) == 3:
const_node_val = perm_node.inputs[0].get_tensor_value()
if list(const_node_val) != [1, 0]:
return False
range_node = perm_node.inputs[1]
range_start = range_node.inputs[0].get_tensor_value()
range_limit = range_node.inputs[1].get_tensor_value()
range_delta = range_node.inputs[2].get_tensor_value()
if range_node.type == "Range" and range_start == [2] and range_limit == [3] and range_delta == [1]:
# we just hard code this now
# todo: refine this
return True
return False
def make_onnx_node(g, op_type, inputs, attr=None, output_count=1, skip_conversion=True, op_name_scope=None):
if attr is None:
attr = {}
op_name_basis = op_type
if op_name_scope:
op_name_basis = "_".join([op_name_scope, op_type])
node_name = utils.make_name(op_name_basis)
outputs = [node_name + ":" + str(i) for i in np.arange(output_count)]
node = Node(
helper.make_node(op_type, inputs, outputs, name=node_name, **attr),
g, skip_conversion=skip_conversion)
return node
def is_reverse_op(op):
return op.type in ("ReverseV2", "ReverseSequence")
def is_concat_op(op):
return op.type in ("ConcatV2", "ConcatV3")
def is_tensor_array_scatter_op(op):
return op.type in ("TensorArrayScatterV2", "TensorArrayScatterV3")
def is_tensor_array_gather_op(op):
return op.type in ("TensorArrayGatherV2", "TensorArrayGatherV3")
def is_tensor_array_read_op(op):
return op.type in ("TensorArrayReadV2", "TensorArrayReadV3")
def is_tensor_array_write_op(op):
return op.type in ("TensorArrayWriteV2", "TensorArrayWriteV3")
def is_tensor_array_op(op):
return op.type in ("TensorArrayV2", "TensorArrayV3")
def is_tensor_array_size_op(op):
return op.type in ("TensorArraySizeV2", "TensorArraySizeV3")
def is_placeholder_op(op):
return op.type == "Placeholder"
def is_loopcond_op(op):
return op.type == "LoopCond"
|
the-stack_106_15398
|
# from https://github.com/nasa-jpl/itslive
import pandas as pd
from shapely.geometry import box
import ipywidgets as widgets
import itertools
from ipyleaflet import projections, basemaps, DrawControl
import numpy as np
north_3413 = {
'name': 'EPSG:3413',
'custom': True,
'proj4def': '+proj=stere +lat_0=90 +lat_ts=70 +lon_0=-45 +k=1 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs',
'origin': [-4194304, 4194304],
'bounds': [
[-4194304, -4194304],
[4194304, 4194304]
],
'resolutions': [
16384.0,
8192.0,
4096.0,
2048.0,
1024.0,
512.0,
256.0
]
}
south_3031 = {
'name': 'EPSG:3031',
'custom': True,
'proj4def': '+proj=stere +lat_0=-90 +lat_ts=-71 +lon_0=0 +k=1 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs',
'origin': [-4194304, 4194304],
'bounds': [
[-4194304, -4194304],
[4194304, 4194304]
],
'resolutions': [
16384.0,
8192.0,
4096.0,
2048.0,
1024.0,
512.0,
256.0
]
}
projections = {
'global': {
'base_map': basemaps.NASAGIBS.BlueMarble,
'projection': projections.EPSG3857,
'center': (0,0)
},
'north': {
'base_map': basemaps.NASAGIBS.BlueMarble3413,
'projection': north_3413,
'center': (90, 0)
},
'south': {
'base_map': basemaps.NASAGIBS.BlueMarble3031,
'projection': south_3031,
'center': (-90, 0)
}
}
def dates_slider_control(properties):
slider_dates = [(date.strftime(' %Y-%m-%d '), date) for date in
pd.date_range(properties['start_date'],
properties['end_date'],
freq='D')]
slider_index = (0, len(slider_dates)-1)
date_slider_control = widgets.SelectionRangeSlider(
options=slider_dates,
index=slider_index,
description='Date Range',
orientation='horizontal',
layout={'width': '100%'})
return date_slider_control
def draw_control(properties):
control = DrawControl(circlemarker={
"shapeOptions": {
"fillColor": "#efed69",
"color": "#efed69",
"fillOpacity": 1.0
}
},
polygon={},
polyline={},
rectangle={}
)
return control
def pixels_control(properties):
valid_percentages = [str(p) for p in range(0, 100, 10)]
valid_percentages[0] = 1
pixels = widgets.Dropdown(
options=valid_percentages,
disabled=False,
layout={'width': 'max-content',
'display': 'flex',
'description_width': 'initial'}
)
return pixels
def time_delta_control(properties):
time_delta = widgets.Dropdown(
options=['any', '17', '33', '67', '135', '365'],
disabled=False,
layout={'width': 'max-content',
'display': 'flex',
'description_width': 'initial'}
)
return time_delta
def projection_control(properties):
control = widgets.Dropdown(
options=['global', 'south', 'north'],
description='Hemisphere:',
disabled=False,
value=properties['hemisphere']
)
return control
def format_polygon(geometry):
coords = [[str(float("{:.4f}".format(coord[0]))),str(float("{:.4f}".format(coord[1])))] for coord in geometry['coordinates'][0]]
coords = list(itertools.chain.from_iterable(coords))
polygonstr = ','.join(coords)
return polygonstr
def get_minimal_bbox(geometry):
"""
a very rough approximation of a small bbox less than 1km of a given lon-lat point
params: geometry, a geojson point geometry
"""
lon = geometry['coordinates'][0]
lat = geometry['coordinates'][1]
if lon < 0.0:
lon_offset = -0.001
else:
lon_offset = 0.001
if lat < 0.0:
lat_offset = -0.001
else:
lat_offset = 0.001
bbox = box(lon - lon_offset, lat - lat_offset, lon + lon_offset, lat + lat_offset)
coords = [[str(float("{:.4f}".format(coord[0]))),str(float("{:.4f}".format(coord[1])))] for coord in bbox.exterior.coords]
coords = list(itertools.chain.from_iterable(coords))
return ','.join(coords)
|
the-stack_106_15399
|
from logging import warning
from os import path
from typing import Optional, List, Union
from requests import Response
import re
from lxml.html import HtmlElement
from manga_py.http import Http
from manga_py.http.flare_solver import Http as FS_Http
from .params import ProviderParams
CF_PROXY_RE = re.compile(r'(https?://[^/]+)')
class Base(ProviderParams):
_storage = None
_params = None
_image_params = None
_http_kwargs = None
__http = None
__arguments = None
_use_flare_solver = False
__flare_solver_http = None
_flare_solver_url = None
chapter_id = 0
quiet = False
original_url = None
def __init__(self):
self._storage = {
'cookies': {},
'main_content': None,
'chapters': [],
'current_chapter': 0,
'proxies': {},
'domain_uri': None,
}
self._params = {
'destination': 'Manga',
'cf-protect': False,
'user_agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:98.0) Gecko/20100101 Firefox/98.0',
}
self._image_params = {
'crop': (0, 0, 0, 0),
# 'crop': (left, upper, right, lower)
'auto_crop': False,
# 'auto_crop': True,
}
self._http_kwargs = {}
def _archive_type(self) -> str:
arc_type = 'zip'
if self._params['cbz']:
arc_type = 'cbz'
return arc_type
def get_url(self):
return self._params['url']
def _build_http_params(self, params):
if params is None:
params = {}
params.setdefault('allow_webp', not self._params.get('no_webp', False))
params.setdefault('referer', self._storage.get('referer', self.domain))
params.setdefault('user_agent', self._get_user_agent())
params.setdefault('proxies', self._storage.get('proxies', None))
params.setdefault('cookies', self._storage.get('cookies', None))
params.setdefault('kwargs', self._http_kwargs)
return params
def normalize_uri(self, uri, referer=None):
return self.http_normal().normalize_uri(uri=uri, referer=referer)
def http(self, new=False, params=None) -> Union[FS_Http, Http]:
if self._use_flare_solver:
return self.flare_solver_http(new, params)
else:
return self.http_normal(new, params)
def flare_solver_http(self, new=False, params=None) -> FS_Http:
allow_webp = True == (params or {}).get('no_webp', False)
headers = {}
if allow_webp:
headers['Accept'] = Http.webp_header
if self.__flare_solver_http is None:
self.__flare_solver_http = FS_Http(self._flare_solver_url, self._get_user_agent())
self.__flare_solver_http.create_session()
if new:
http = FS_Http(self._flare_solver_url, self._get_user_agent())
http.create_session()
return http
return self.__flare_solver_http
def http_normal(self, new=False, params=None) -> Http:
http_params = self._build_http_params(params)
if new:
http = Http(**http_params)
return http
if self.__http is None:
self.__http = Http(**http_params)
return self.__http
def http_get(self, url: str, headers: dict = None, cookies: dict = None):
http = self.http()
with http.get(url=url, headers=headers, cookies=cookies) as resp:
if type(http) == Http:
return resp.text
else:
content = resp.json().get('solution', {}).get('response', b'')
try:
return content.decode()
except AttributeError:
return content
def http_post(self, url: str, headers: dict = None, cookies: dict = None, data=()):
http = self.http()
with http.post(url=url, headers=headers, cookies=cookies, data=data) as resp:
if type(http) == Http:
return resp.text
else:
return resp.json().get('solution', {}).get('response', b'').decode()
def _get_user_agent(self):
return self._params.get('user_agent', None)
@classmethod
def __normalize_chapters(cls, n, element):
if isinstance(element, HtmlElement):
return n(element.get('href'))
if isinstance(element, str):
return n(element)
return element
def _prepare_chapters(self, chapters):
n = self.normalize_uri
items = []
if chapters and len(chapters):
for i in chapters:
url = self.__normalize_chapters(n, i)
items.append(url)
else:
warning('Chapters list empty. Check %s' % self.get_url())
return items
def book_meta(self) -> dict:
return {}
def _image_name(self, idx, filename):
fn, extension = path.splitext(filename)
_path = '{:0>3}_{}'.format(idx, fn)
if self._params['rename_pages']:
_path = '{:0>3}'.format(idx)
return _path + extension
def chapter_for_json(self) -> str:
return self.chapter
def put_info_json(self, meta):
# manga_name, url, directory
pass
def _fill_arguments(self, arguments: List[str]):
know_args = [
'login',
'password',
'language',
'translator',
]
if self.__arguments is None:
self.__arguments = {}
for arg in arguments:
key, value = arg.split('=', 1) # type: str, str
if key in know_args:
self.__arguments[key] = value
def arg(self, key: str) -> Optional[str]:
if self.__arguments is None:
return None
return self.__arguments.get(key)
def allow_auto_change_url(self):
return True
def cookies(self, response: Response) -> dict:
if self._use_flare_solver:
return response.json().get('solution', {}).get('cookies')
return response.cookies.__dict__
@property
def cf_proxy(self) -> Optional[str]:
cf = self._params.get('cf_proxy')
if cf is not None:
cf = CF_PROXY_RE.search(cf)
return cf.group(1) if cf else None
def __del__(self):
if self.__flare_solver_http is not None:
self.flare_solver_http().destroy_session()
|
the-stack_106_15400
|
# coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for core.domain.activity_jobs_one_off."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import activity_jobs_one_off
from core.domain import collection_domain
from core.domain import collection_services
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import rights_manager
from core.domain import search_services
from core.domain import user_services
from core.platform import models
from core.platform.taskqueue import gae_taskqueue_services as taskqueue_services
from core.tests import test_utils
import python_utils
gae_search_services = models.Registry.import_search_services()
(
collection_models, config_models,
exp_models, question_models,
skill_models, story_models,
topic_models) = (
models.Registry.import_models([
models.NAMES.collection, models.NAMES.config,
models.NAMES.exploration, models.NAMES.question,
models.NAMES.skill, models.NAMES.story,
models.NAMES.topic]))
class OneOffReindexActivitiesJobTests(test_utils.GenericTestBase):
def setUp(self):
super(OneOffReindexActivitiesJobTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.owner = user_services.UserActionsInfo(self.owner_id)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i
) for i in python_utils.RANGE(3)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
rights_manager.publish_exploration(self.owner, exp.id)
collections = [collection_domain.Collection.create_default_collection(
'%s' % i,
title='title %d' % i,
category='category%d' % i
) for i in python_utils.RANGE(3, 6)]
for collection in collections:
collection_services.save_new_collection(self.owner_id, collection)
rights_manager.publish_collection(self.owner, collection.id)
self.process_and_flush_pending_tasks()
def test_standard_operation(self):
job_id = (
activity_jobs_one_off.IndexAllActivitiesJobManager.create_new())
activity_jobs_one_off.IndexAllActivitiesJobManager.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
indexed_docs = []
def mock_add_documents_to_index(docs, index):
indexed_docs.extend(docs)
self.assertIn(index, (search_services.SEARCH_INDEX_EXPLORATIONS,
search_services.SEARCH_INDEX_COLLECTIONS))
add_docs_swap = self.swap(
gae_search_services, 'add_documents_to_index',
mock_add_documents_to_index)
with add_docs_swap:
self.process_and_flush_pending_tasks()
ids = [doc['id'] for doc in indexed_docs]
titles = [doc['title'] for doc in indexed_docs]
categories = [doc['category'] for doc in indexed_docs]
for index in python_utils.RANGE(5):
self.assertIn('%s' % index, ids)
self.assertIn('title %d' % index, titles)
self.assertIn('category%d' % index, categories)
self.assertIsNone(
activity_jobs_one_off.IndexAllActivitiesJobManager.reduce(
'key', 'value'))
|
the-stack_106_15401
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
import re
import math
import logging
import os
from sklearn.externals import joblib
from .taggers import Tagger, extract_sequence_features
from .embeddings import WordSequenceEmbedding, CharacterSequenceEmbedding
from sklearn.preprocessing import LabelBinarizer
DEFAULT_ENTITY_TOKEN_SPAN_INDEX = 2
GAZ_PATTERN_MATCH = r'in-gaz\|type:(\w+)\|pos:(\w+)\|'
REGEX_TYPE_POSITIONAL_INDEX = 1
DEFAULT_LABEL = 'B|UNK'
DEFAULT_GAZ_LABEL = 'O'
RANDOM_SEED = 1
ZERO_INITIALIZER_VALUE = 0
logger = logging.getLogger(__name__)
class LstmModel(Tagger):
"""This class encapsulates the bi-directional LSTM model and provides
the correct interface for use by the tagger model"""
def fit(self, X, y):
examples_arr = np.asarray(X, dtype='float32')
labels_arr = np.asarray(y, dtype='int32')
self._fit(examples_arr, labels_arr)
return self
def predict(self, X, dynamic_resource=None):
encoded_examples_arr = np.asarray(X, dtype='float32')
tags_by_example_arr = self._predict(encoded_examples_arr)
resized_predicted_tags = []
for query, seq_len in zip(tags_by_example_arr, self.sequence_lengths):
resized_predicted_tags.append(query[:seq_len])
return resized_predicted_tags
def set_params(self, **parameters):
"""
Initialize params for the LSTM. The keys in the parameters dictionary
are as follows:
Args:
parameters (dict): The keys in the parameters dictionary are as follows:
number_of_epochs: The number of epochs to run (int)
batch_size: The batch size for mini-batch training (int)
token_lstm_hidden_state_dimension: The hidden state
dimension of the LSTM cell (int)
learning_rate: The learning rate of the optimizer (int)
optimizer: The optimizer used to train the network
is the number of entities in the dataset (str)
display_epoch: The number of epochs after which the
network displays common stats like accuracy (int)
padding_length: The length of each query, which is
fixed, so some queries will be cut short in length
representing the word embedding, the row index
is the word's index (int)
token_embedding_dimension: The embedding dimension of the word (int)
token_pretrained_embedding_filepath: The pretrained embedding file-path (str)
dense_keep_prob: The dropout rate of the dense layers (float)
lstm_input_keep_prob: The dropout rate of the inputs to the LSTM cell (float)
lstm_output_keep_prob: The dropout rate of the outputs of the LSTM cell (float)
gaz_encoding_dimension: The gazetteer encoding dimension (int)
"""
self.number_of_epochs = parameters.get('number_of_epochs', 20)
self.batch_size = parameters.get('batch_size', 20)
self.token_lstm_hidden_state_dimension = \
parameters.get('token_lstm_hidden_state_dimension', 300)
self.learning_rate = parameters.get('learning_rate', 0.005)
self.optimizer_tf = parameters.get('optimizer', 'adam')
self.padding_length = parameters.get('padding_length', 20)
self.display_epoch = parameters.get('display_epoch', 20)
self.token_embedding_dimension = parameters.get('token_embedding_dimension', 300)
self.token_pretrained_embedding_filepath = \
parameters.get('token_pretrained_embedding_filepath')
self.dense_keep_probability = parameters.get('dense_keep_prob', 0.5)
self.lstm_input_keep_prob = parameters.get('lstm_input_keep_prob', 0.5)
self.lstm_output_keep_prob = parameters.get('lstm_output_keep_prob', 0.5)
self.gaz_encoding_dimension = parameters.get('gaz_encoding_dimension', 100)
self.use_crf_layer = parameters.get('use_crf_layer', True)
self.use_char_embeddings = parameters.get('use_character_embeddings', False)
self.char_window_sizes = parameters.get('char_window_sizes', [5])
self.max_char_per_word = parameters.get('maximum_characters_per_word', 20)
self.character_embedding_dimension = parameters.get('character_embedding_dimension', 10)
self.word_level_character_embedding_size = \
parameters.get('word_level_character_embedding_size', 40)
def get_params(self, deep=True):
return self.__dict__
def construct_tf_variables(self):
"""
Constructs the variables and operations in the TensorFlow session graph
"""
with self.graph.as_default():
self.dense_keep_prob_tf = tf.placeholder(tf.float32, name='dense_keep_prob_tf')
self.lstm_input_keep_prob_tf = \
tf.placeholder(tf.float32, name='lstm_input_keep_prob_tf')
self.lstm_output_keep_prob_tf = \
tf.placeholder(tf.float32, name='lstm_output_keep_prob_tf')
self.query_input_tf = tf.placeholder(tf.float32,
[None,
self.padding_length,
self.token_embedding_dimension],
name='query_input_tf')
self.gaz_input_tf = tf.placeholder(tf.float32,
[None,
self.padding_length,
self.gaz_dimension],
name='gaz_input_tf')
self.label_tf = tf.placeholder(tf.int32,
[None,
int(self.padding_length),
self.output_dimension],
name='label_tf')
self.batch_sequence_lengths_tf = tf.placeholder(tf.int32, shape=[None],
name='batch_sequence_lengths_tf')
self.batch_sequence_mask_tf = tf.placeholder(
tf.bool, shape=[None], name='batch_sequence_mask_tf')
if self.use_char_embeddings:
self.char_input_tf = tf.placeholder(tf.float32,
[None,
self.padding_length,
self.max_char_per_word,
self.character_embedding_dimension],
name='char_input_tf')
combined_embedding_tf = self._construct_embedding_network()
self.lstm_output_tf = self._construct_lstm_network(combined_embedding_tf)
self.lstm_output_softmax_tf = tf.nn.softmax(self.lstm_output_tf,
name='output_softmax_tensor')
self.optimizer_tf, self.cost_tf = self._define_optimizer_and_cost()
self.global_init = tf.global_variables_initializer()
self.local_init = tf.local_variables_initializer()
self.saver = tf.train.Saver()
def extract_features(self, examples, config, resources, y=None, fit=True):
"""Transforms a list of examples into features that are then used by the
deep learning model.
Args:
examples (list of mindmeld.core.Query): a list of queries
config (ModelConfig): The ModelConfig which may contain information used for feature
extraction
resources (dict): Resources which may be used for this model's feature extraction
y (list): A list of label sequences
Returns:
(sequence_embeddings, encoded_labels, groups): features for the LSTM network
"""
if y:
# Train time
self.resources = resources
padded_y = self._pad_labels(y, DEFAULT_LABEL)
y_flat = [item for sublist in padded_y for item in sublist]
encoded_labels_flat = self.label_encoder.fit_transform(y_flat)
encoded_labels = []
start_index = 0
for label_sequence in padded_y:
encoded_labels.append(
encoded_labels_flat[start_index: start_index + len(label_sequence)])
start_index += len(label_sequence)
gaz_entities = [k for k in self.resources.get('gazetteers', {}).keys()]
gaz_entities.append(DEFAULT_GAZ_LABEL)
self.gaz_encoder.fit(gaz_entities)
# The gaz dimension are the sum total of the gazetteer entities and
# the 'other' gaz entity, which is the entity for all non-gazetteer tokens
self.gaz_dimension = len(gaz_entities)
self.output_dimension = len(self.label_encoder.classes_)
else:
# Predict time
encoded_labels = None
# Extract features and classes
x_sequence_embeddings_arr, self.gaz_features_arr, self.char_features_arr = \
self._get_features(examples)
self.sequence_lengths = self._extract_seq_length(examples)
# There are no groups in this model
groups = None
return x_sequence_embeddings_arr, encoded_labels, groups
def setup_model(self, config):
self.set_params(**config.params)
self.label_encoder = LabelBinarizer()
self.gaz_encoder = LabelBinarizer()
self.graph = tf.Graph()
self.saver = None
self.example_type = config.example_type
self.features = config.features
self.query_encoder = WordSequenceEmbedding(
self.padding_length,
self.token_embedding_dimension,
self.token_pretrained_embedding_filepath)
if self.use_char_embeddings:
self.char_encoder = CharacterSequenceEmbedding(
self.padding_length,
self.character_embedding_dimension,
self.max_char_per_word)
def construct_feed_dictionary(self,
batch_examples,
batch_char,
batch_gaz,
batch_seq_len,
batch_labels=list()):
"""Constructs the feed dictionary that is used to feed data into the tensors
Args:
batch_examples (ndarray): A batch of examples
batch_char (ndarray): A batch of character features
batch_gaz (ndarray): A batch of gazetteer features
batch_seq_len (ndarray): A batch of sequence length of each query
batch_labels (ndarray): A batch of labels
Returns:
The feed dictionary
"""
return_dict = {
self.query_input_tf: batch_examples,
self.batch_sequence_lengths_tf: batch_seq_len,
self.gaz_input_tf: batch_gaz,
self.dense_keep_prob_tf: self.dense_keep_probability,
self.lstm_input_keep_prob_tf: self.lstm_input_keep_prob,
self.lstm_output_keep_prob_tf: self.lstm_output_keep_prob,
self.batch_sequence_mask_tf: self._generate_boolean_mask(batch_seq_len)
}
if len(batch_labels) > 0:
return_dict[self.label_tf] = batch_labels
if len(batch_char) > 0:
return_dict[self.char_input_tf] = batch_char
return return_dict
def _construct_embedding_network(self):
""" Constructs a network based on the word embedding and gazetteer
inputs and concatenates them together
Returns:
Combined embeddings of the word and gazetteer embeddings
"""
initializer = tf.contrib.layers.xavier_initializer(seed=RANDOM_SEED)
dense_gaz_embedding_tf = tf.contrib.layers.fully_connected(
inputs=self.gaz_input_tf,
num_outputs=self.gaz_encoding_dimension,
weights_initializer=initializer)
batch_size_dim = tf.shape(self.query_input_tf)[0]
if self.use_char_embeddings:
word_level_char_embeddings_list = []
for window_size in self.char_window_sizes:
word_level_char_embeddings_list.append(self.apply_convolution(
self.char_input_tf, batch_size_dim, window_size))
word_level_char_embedding = tf.concat(word_level_char_embeddings_list, 2)
# Combined the two embeddings
combined_embedding_tf = tf.concat(
[self.query_input_tf, word_level_char_embedding], axis=2)
else:
combined_embedding_tf = self.query_input_tf
combined_embedding_tf = tf.concat(
[combined_embedding_tf, dense_gaz_embedding_tf], axis=2)
return combined_embedding_tf
def apply_convolution(self, input_tensor, batch_size, char_window_size):
""" Constructs a convolution network of a specific window size
Args:
input_tensor (tensor): The input tensor to the network
batch_size (int): The batch size of the training data
char_window_size (int): The character window size of each stride
Returns:
(Tensor): Convolved output tensor
"""
convolution_reshaped_char_embedding = tf.reshape(input_tensor,
[-1, self.padding_length,
self.max_char_per_word,
self.character_embedding_dimension, 1])
# Index 0 dimension is 1 because we want to apply this to every word. Index 1 dimension is
# char_window_size since this is the convolution window size. Index 3 dimension is
# 1 since the input channel is 1 dimensional (the sequence string). Index 4 dimension is
# the output dimension which is a hyper-parameter.
char_convolution_filter = tf.Variable(tf.random_normal(
[1, char_window_size, self.character_embedding_dimension,
1, self.word_level_character_embedding_size], dtype=tf.float32))
# Strides is None because we want to advance one character at a time and one word at a time
conv_output = tf.nn.convolution(convolution_reshaped_char_embedding,
char_convolution_filter, padding='SAME')
# Max pool over each word, captured by the size of the filter corresponding to an entire
# single word
max_pool = tf.nn.pool(
conv_output,
window_shape=[1, self.max_char_per_word, self.character_embedding_dimension],
pooling_type='MAX', padding='VALID')
# Transpose because shape before is batch_size BY query_padding_length BY 1 BY 1
# BY num_filters. This transform rearranges the dimension of each rank such that
# the num_filters dimension comes after the query_padding_length, so the last index
# 4 is brought after the index 1.
max_pool = tf.transpose(max_pool, [0, 1, 4, 2, 3])
max_pool = tf.reshape(max_pool, [batch_size, self.padding_length,
self.word_level_character_embedding_size])
char_convolution_bias = tf.Variable(
tf.random_normal([self.word_level_character_embedding_size, ]))
char_convolution_bias = tf.tile(char_convolution_bias, [self.padding_length])
char_convolution_bias = tf.reshape(char_convolution_bias,
[self.padding_length,
self.word_level_character_embedding_size])
char_convolution_bias = tf.tile(char_convolution_bias, [batch_size, 1])
char_convolution_bias = tf.reshape(char_convolution_bias,
[batch_size, self.padding_length,
self.word_level_character_embedding_size])
word_level_char_embedding = tf.nn.relu(max_pool + char_convolution_bias)
return word_level_char_embedding
def _define_optimizer_and_cost(self):
""" This function defines the optimizer and cost function of the LSTM model
Returns:
AdamOptimizer, Tensor: The optimizer function to reduce loss and the loss values
"""
if self.use_crf_layer:
flattened_labels = tf.cast(tf.argmax(self.label_tf, axis=2), tf.int32)
log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(
self.lstm_output_tf, flattened_labels, self.batch_sequence_lengths_tf)
cost_tf = tf.reduce_mean(-log_likelihood, name='cost_tf')
else:
masked_logits = tf.boolean_mask(
tf.reshape(self.lstm_output_tf, [-1, self.output_dimension]),
self.batch_sequence_mask_tf)
masked_labels = tf.boolean_mask(
tf.reshape(self.label_tf, [-1, self.output_dimension]),
self.batch_sequence_mask_tf)
softmax_loss_tf = tf.nn.softmax_cross_entropy_with_logits(
logits=masked_logits, labels=masked_labels, name='softmax_loss_tf')
cost_tf = tf.reduce_mean(softmax_loss_tf, name='cost_tf')
optimizer_tf = tf.train.AdamOptimizer(
learning_rate=float(self.learning_rate)).minimize(cost_tf)
return optimizer_tf, cost_tf
def _calculate_score(self, output_arr, label_arr, seq_lengths_arr):
""" This function calculates the sequence score of all the queries,
that is, the total number of queries where all the tags are predicted
correctly.
Args:
output_arr (ndarray): Output array of the LSTM network
label_arr (ndarray): Label array of the true labels of the data
seq_lengths_arr (ndarray): A real sequence lengths of each example
Returns:
int: The number of queries where all the tags are correct
"""
reshaped_output_arr = np.reshape(
output_arr, [-1, int(self.padding_length), self.output_dimension])
reshaped_output_arr = np.argmax(reshaped_output_arr, 2)
reshaped_labels_arr = np.argmax(label_arr, 2)
score = 0
for idx, query in enumerate(reshaped_output_arr):
seq_len = seq_lengths_arr[idx]
predicted_tags = reshaped_output_arr[idx][:seq_len]
actual_tags = reshaped_labels_arr[idx][:seq_len]
if np.array_equal(predicted_tags, actual_tags):
score += 1
return score
def _pad_labels(self, list_of_sequences, default_token):
"""
Pads the label sequence
Args:
list_of_sequences (list): A list of label sequences
default_token (str): The default label token for padding purposes
Returns:
list: padded output
"""
padded_output = []
for sequence in list_of_sequences:
padded_seq = [default_token] * self.padding_length
for idx, token in enumerate(sequence):
if idx < self.padding_length:
padded_seq[idx] = sequence[idx]
padded_output.append(padded_seq)
return padded_output
def _generate_boolean_mask(self, seq_lengths):
"""
Generates boolean masks for each query in a query list
Args:
seq_lengths (list): A list of sequence lengths
Return:
list: A list of boolean masking values
"""
mask = [False] * (len(seq_lengths) * self.padding_length)
for idx, seq_len in enumerate(seq_lengths):
start_index = idx * self.padding_length
for i in range(start_index, start_index + seq_len):
mask[i] = True
return mask
def _construct_lstm_state(self, initializer, hidden_dimension, batch_size, name):
"""Construct the LSTM initial state
Args:
initializer (tf.contrib.layers.xavier_initializer): initializer used
hidden_dimension: num dimensions of the hidden state variable
batch_size: the batch size of the data
name: suffix of the variable going to be used
Returns:
(LSTMStateTuple): LSTM state information
"""
initial_cell_state = tf.get_variable(
"initial_cell_state_{}".format(name),
shape=[1, hidden_dimension],
dtype=tf.float32,
initializer=initializer)
initial_output_state = tf.get_variable(
"initial_output_state_{}".format(name),
shape=[1, hidden_dimension],
dtype=tf.float32,
initializer=initializer)
c_states = tf.tile(initial_cell_state, tf.stack([batch_size, 1]))
h_states = tf.tile(initial_output_state, tf.stack([batch_size, 1]))
return tf.contrib.rnn.LSTMStateTuple(c_states, h_states)
def _construct_regularized_lstm_cell(self, hidden_dimensions, initializer):
"""Construct a regularized lstm cell based on a dropout layer
Args:
hidden_dimensions: num dimensions of the hidden state variable
initializer (tf.contrib.layers.xavier_initializer): initializer used
Returns:
(DropoutWrapper): regularized LSTM cell
"""
lstm_cell = tf.contrib.rnn.CoupledInputForgetGateLSTMCell(
hidden_dimensions, forget_bias=1.0, initializer=initializer, state_is_tuple=True)
lstm_cell = tf.contrib.rnn.DropoutWrapper(
lstm_cell, input_keep_prob=self.lstm_input_keep_prob_tf,
output_keep_prob=self.lstm_output_keep_prob_tf)
return lstm_cell
def _construct_lstm_network(self, input_tensor):
""" This function constructs the Bi-Directional LSTM network
Args:
input_tensor (Tensor): Input tensor to the LSTM network
Returns:
output_tensor (Tensor): The output layer of the LSTM network
"""
n_hidden = int(self.token_lstm_hidden_state_dimension)
# We cannot use the static batch size variable since for the last batch set
# of data, the data size could be less than the batch size
batch_size_dim = tf.shape(input_tensor)[0]
# We use the xavier initializer for some of it's gradient control properties
initializer = tf.contrib.layers.xavier_initializer(seed=RANDOM_SEED)
# Forward LSTM construction
lstm_cell_forward_tf = self._construct_regularized_lstm_cell(n_hidden, initializer)
initial_state_forward_tf = self._construct_lstm_state(
initializer, n_hidden, batch_size_dim, 'lstm_cell_forward_tf')
# Backward LSTM construction
lstm_cell_backward_tf = self._construct_regularized_lstm_cell(n_hidden, initializer)
initial_state_backward_tf = self._construct_lstm_state(
initializer, n_hidden, batch_size_dim, 'lstm_cell_backward_tf')
# Combined the forward and backward LSTM networks
(output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(
cell_fw=lstm_cell_forward_tf,
cell_bw=lstm_cell_backward_tf,
inputs=input_tensor,
sequence_length=self.batch_sequence_lengths_tf,
dtype=tf.float32,
initial_state_fw=initial_state_forward_tf,
initial_state_bw=initial_state_backward_tf)
# Construct the output later
output_tf = tf.concat([output_fw, output_bw], axis=-1)
output_tf = tf.nn.dropout(output_tf, self.dense_keep_prob_tf)
output_weights_tf = tf.get_variable(name='output_weights_tf',
shape=[2 * n_hidden, self.output_dimension],
dtype="float32", initializer=initializer)
output_weights_tf = tf.tile(output_weights_tf, [batch_size_dim, 1])
output_weights_tf = tf.reshape(output_weights_tf, [batch_size_dim, 2 * n_hidden,
self.output_dimension])
zero_initializer = tf.constant_initializer(ZERO_INITIALIZER_VALUE)
output_bias_tf = tf.get_variable(name='output_bias_tf', shape=[self.output_dimension],
dtype="float32", initializer=zero_initializer)
output_tf = tf.add(tf.matmul(output_tf, output_weights_tf), output_bias_tf,
name='output_tensor')
return output_tf
def _get_model_constructor(self):
return self
def _extract_seq_length(self, examples):
"""Extract sequence lengths from the input examples
Args:
examples (list of Query objects): List of input queries
Returns:
(list): List of seq lengths for each query
"""
seq_lengths = []
for example in examples:
if len(example.normalized_tokens) > self.padding_length:
seq_lengths.append(self.padding_length)
else:
seq_lengths.append(len(example.normalized_tokens))
return seq_lengths
def _get_features(self, examples):
"""Extracts the word and gazetteer embeddings from the input examples
Args:
examples (list of mindmeld.core.Query): a list of queries
Returns:
(tuple): Word embeddings and Gazetteer one-hot embeddings
"""
x_feats_array = []
gaz_feats_array = []
char_feats_array = []
for idx, example in enumerate(examples):
x_feat, gaz_feat, char_feat = self._extract_features(example)
x_feats_array.append(x_feat)
gaz_feats_array.append(gaz_feat)
char_feats_array.append(char_feat)
# save all the embeddings used for model saving purposes
self.query_encoder.save_embeddings()
if self.use_char_embeddings:
self.char_encoder.save_embeddings()
x_feats_array = np.asarray(x_feats_array)
gaz_feats_array = np.asarray(gaz_feats_array)
char_feats_array = np.asarray(char_feats_array) if self.use_char_embeddings else []
return x_feats_array, gaz_feats_array, char_feats_array
def _gaz_transform(self, list_of_tokens_to_transform):
"""This function is used to handle special logic around SKLearn's LabelBinarizer
class which behaves in a non-standard way for 2 classes. In a 2 class system,
it encodes the classes as [0] and [1]. However, in a 3 class system, it encodes
the classes as [0,0,1], [0,1,0], [1,0,0] and sustains this behavior for num_class > 2.
We want to encode 2 class systems as [0,1] and [1,0]. This function does that.
Args:
list_of_tokens_to_transform (list): A sequence of class labels
Returns:
(array): corrected encoding from the binarizer
"""
output = self.gaz_encoder.transform(list_of_tokens_to_transform)
if len(self.gaz_encoder.classes_) == 2:
output = np.hstack((1 - output, output))
return output
def _extract_features(self, example):
"""Extracts feature dicts for each token in an example.
Args:
example (mindmeld.core.Query): an query
Returns:
(list of dict): features
"""
default_gaz_one_hot = self._gaz_transform([DEFAULT_GAZ_LABEL]).tolist()[0]
extracted_gaz_tokens = [default_gaz_one_hot] * self.padding_length
extracted_sequence_features = extract_sequence_features(
example, self.example_type, self.features, self.resources)
for index, extracted_gaz in enumerate(extracted_sequence_features):
if index >= self.padding_length:
break
if extracted_gaz == {}:
continue
combined_gaz_features = set()
for key in extracted_gaz.keys():
regex_match = re.match(GAZ_PATTERN_MATCH, key)
if regex_match:
# Examples of gaz features here are:
# in-gaz|type:city|pos:start|p_fe,
# in-gaz|type:city|pos:end|pct-char-len
# There were many gaz features of the same type that had
# bot start and end position tags for a given token.
# Due to this, we did not implement functionality to
# extract the positional information due to the noise
# associated with it.
combined_gaz_features.add(
regex_match.group(REGEX_TYPE_POSITIONAL_INDEX))
if len(combined_gaz_features) != 0:
total_encoding = np.zeros(self.gaz_dimension, dtype=np.int)
for encoding in self._gaz_transform(list(combined_gaz_features)):
total_encoding = np.add(total_encoding, encoding)
extracted_gaz_tokens[index] = total_encoding.tolist()
padded_query = self.query_encoder.encode_sequence_of_tokens(example.normalized_tokens)
if self.use_char_embeddings:
padded_char = self.char_encoder.encode_sequence_of_tokens(example.normalized_tokens)
else:
padded_char = None
return padded_query, extracted_gaz_tokens, padded_char
def _fit(self, X, y):
"""Trains a classifier without cross-validation. It iterates through
the data, feeds batches to the tensorflow session graph and fits the
model based on the feed forward and back propagation steps.
Args:
X (list of list of list of str): a list of queries to train on
y (list of list of str): a list of expected labels
"""
self.construct_tf_variables()
self.session = tf.Session(graph=self.graph)
self.session.run([self.global_init, self.local_init])
for epochs in range(int(self.number_of_epochs)):
logger.info("Training epoch : {}".format(epochs))
indices = [x for x in range(len(X))]
np.random.shuffle(indices)
gaz = self.gaz_features_arr[indices]
char = self.char_features_arr[indices] if self.use_char_embeddings else []
examples = X[indices]
labels = y[indices]
batch_size = int(self.batch_size)
num_batches = int(math.ceil(len(examples) / batch_size))
seq_len = np.array(self.sequence_lengths)[indices]
for batch in range(num_batches):
batch_start_index = batch * batch_size
batch_end_index = (batch * batch_size) + batch_size
batch_examples = examples[batch_start_index:batch_end_index]
batch_labels = labels[batch_start_index:batch_end_index]
batch_gaz = gaz[batch_start_index:batch_end_index]
batch_seq_len = seq_len[batch_start_index:batch_end_index]
batch_char = char[batch_start_index:batch_end_index]
if batch % int(self.display_epoch) == 0:
output, loss, _ = self.session.run([self.lstm_output_tf,
self.cost_tf,
self.optimizer_tf],
feed_dict=self.construct_feed_dictionary(
batch_examples,
batch_char,
batch_gaz,
batch_seq_len,
batch_labels))
score = self._calculate_score(output, batch_labels, batch_seq_len)
accuracy = score / (len(batch_examples) * 1.0)
logger.info("Trained batch from index {} to {}, "
"Mini-batch loss: {:.5f}, "
"Training sequence accuracy: {:.5f}".format(batch * batch_size,
(batch * batch_size) +
batch_size, loss,
accuracy))
else:
self.session.run(self.optimizer_tf,
feed_dict=self.construct_feed_dictionary(
batch_examples,
batch_char,
batch_gaz,
batch_seq_len,
batch_labels))
return self
def _predict(self, X):
"""Predicts tags for query sequence
Args:
X (list of list of list of str): a list of input representations
Returns:
(list): A list of decoded labelled predicted by the model
"""
seq_len_arr = np.array(self.sequence_lengths)
# During predict time, we make sure no nodes are dropped out
self.dense_keep_probability = 1.0
self.lstm_input_keep_prob = 1.0
self.lstm_output_keep_prob = 1.0
output = self.session.run(
[self.lstm_output_softmax_tf],
feed_dict=self.construct_feed_dictionary(
X, self.char_features_arr, self.gaz_features_arr, seq_len_arr))
output = np.reshape(output, [-1, int(self.padding_length), self.output_dimension])
output = np.argmax(output, 2)
decoded_queries = []
for idx, encoded_predict in enumerate(output):
decoded_query = []
for tag in encoded_predict[:self.sequence_lengths[idx]]:
decoded_query.append(self.label_encoder.classes_[tag])
decoded_queries.append(decoded_query)
return decoded_queries
def _predict_proba(self, X):
"""Predict tags for query sequence with their confidence scores
Args:
X (list of list of list of str): a list of input representations
Returns:
(list): A list of decoded labelled predicted by the model with confidence scores
"""
seq_len_arr = np.array(self.sequence_lengths)
# During predict time, we make sure no nodes are dropped out
self.dense_keep_probability = 1.0
self.lstm_input_keep_prob = 1.0
self.lstm_output_keep_prob = 1.0
output = self.session.run(
[self.lstm_output_softmax_tf],
feed_dict=self.construct_feed_dictionary(
X, self.char_features_arr, self.gaz_features_arr, seq_len_arr))
output = np.reshape(output, [-1, int(self.padding_length), self.output_dimension])
class_output = np.argmax(output, 2)
decoded_queries = []
for idx, encoded_predict in enumerate(class_output):
decoded_query = []
for token_idx, tag in enumerate(encoded_predict[:self.sequence_lengths[idx]]):
decoded_query.append([self.label_encoder.classes_[tag],
output[idx][token_idx][tag]])
decoded_queries.append(decoded_query)
return decoded_queries
def dump(self, path, config):
"""
Saves the Tensorflow model
Args:
path (str): the folder path for the entity model folder
config (dict): The model config
"""
path = path.split('.pkl')[0] + '_model_files'
config['model'] = path
config['serializable'] = False
if not os.path.isdir(path):
os.makedirs(path)
if not self.saver:
# This conditional happens when there are not entities for the associated
# model
return
self.saver.save(self.session, os.path.join(path, 'lstm_model'))
# Save feature extraction variables
variables_to_dump = {
'resources': self.resources,
'gaz_dimension': self.gaz_dimension,
'output_dimension': self.output_dimension,
'gaz_features': self.gaz_features_arr,
'sequence_lengths': self.sequence_lengths,
'gaz_encoder': self.gaz_encoder,
'label_encoder': self.label_encoder
}
joblib.dump(variables_to_dump, os.path.join(path, '.feature_extraction_vars'))
def load(self, path):
"""
Loads the Tensorflow model
Args:
path (str): the folder path for the entity model folder
"""
path = path.split('.pkl')[0] + '_model_files'
if not os.path.exists(os.path.join(path, 'lstm_model.meta')):
# This conditional is for models with no labels where no TF graph was built
# for this.
return
self.graph = tf.Graph()
self.session = tf.Session(graph=self.graph)
with self.graph.as_default():
saver = tf.train.import_meta_graph(os.path.join(path, 'lstm_model.meta'))
saver.restore(self.session, os.path.join(path, 'lstm_model'))
# Restore tensorflow graph variables
self.dense_keep_prob_tf = \
self.session.graph.get_tensor_by_name('dense_keep_prob_tf:0')
self.lstm_input_keep_prob_tf = \
self.session.graph.get_tensor_by_name('lstm_input_keep_prob_tf:0')
self.lstm_output_keep_prob_tf = \
self.session.graph.get_tensor_by_name('lstm_output_keep_prob_tf:0')
self.query_input_tf = self.session.graph.get_tensor_by_name('query_input_tf:0')
self.gaz_input_tf = self.session.graph.get_tensor_by_name('gaz_input_tf:0')
self.label_tf = self.session.graph.get_tensor_by_name('label_tf:0')
self.batch_sequence_lengths_tf = \
self.session.graph.get_tensor_by_name('batch_sequence_lengths_tf:0')
self.batch_sequence_mask_tf = \
self.session.graph.get_tensor_by_name('batch_sequence_mask_tf:0')
self.lstm_output_tf = self.session.graph.get_tensor_by_name('output_tensor:0')
self.lstm_output_softmax_tf = \
self.session.graph.get_tensor_by_name('output_softmax_tensor:0')
if self.use_char_embeddings:
self.char_input_tf = self.session.graph.get_tensor_by_name('char_input_tf:0')
# Load feature extraction variables
variables_to_load = joblib.load(os.path.join(path, '.feature_extraction_vars'))
self.resources = variables_to_load['resources']
self.gaz_dimension = variables_to_load['gaz_dimension']
self.output_dimension = variables_to_load['output_dimension']
self.gaz_features = variables_to_load['gaz_features']
self.sequence_lengths = variables_to_load['sequence_lengths']
self.gaz_encoder = variables_to_load['gaz_encoder']
self.label_encoder = variables_to_load['label_encoder']
|
the-stack_106_15404
|
#!/usr/bin/python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
from setuptools import setup, find_packages
import os
import sys
"""
setup module for ehpc.
Created on 7/3/2015
@author: alex
"""
PACKAGE = "aliyunsdkehpc"
NAME = "aliyun-python-sdk-ehpc"
DESCRIPTION = "The ehpc module of Aliyun Python sdk."
AUTHOR = "Aliyun"
AUTHOR_EMAIL = "[email protected]"
URL = "http://develop.aliyun.com/sdk/python"
TOPDIR = os.path.dirname(__file__) or "."
VERSION = __import__(PACKAGE).__version__
desc_file = open("README.rst")
try:
LONG_DESCRIPTION = desc_file.read()
finally:
desc_file.close()
requires = []
if sys.version_info < (3, 3):
requires.append("aliyun-python-sdk-core>=2.0.2")
else:
requires.append("aliyun-python-sdk-core-v3>=2.3.5")
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="Apache",
url=URL,
keywords=["aliyun","sdk","ehpc"],
packages=find_packages(exclude=["tests*"]),
include_package_data=True,
platforms="any",
install_requires=requires,
classifiers=(
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Software Development",
)
)
|
the-stack_106_15409
|
# --------------
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Load the dataframe
df = pd.read_csv(path)
#To calculate the joint probability it's very important that conditions are independent of each other. Les's check whether the condition fico credit score is greater than 700 and purpose == 'debt_consolidation' is independent of each other.
p_a = len(df[df['fico'] > 700])/len(df)
p_b = len(df[df['purpose'] == 'debt_consolidation'])/len(df)
df1 = df[df['purpose'] == 'debt_consolidation']
print(df1.shape)
p_a_b = len(df1[df1['fico'] > 700])/len(df1)
result = False
if p_a_b == p_a:
result = True
#insight: They are dependant on each other
#Calculating conditional probability is a very important step. Let's calculate the Bayes theorem for the probability of credit policy is yes and the person is given the loan.
prob_lp = len(df[df['paid.back.loan'] == 'Yes'])/ len(df)
prob_cs = len(df[df['credit.policy'] == 'Yes'])/len(df)
new_df = df[df['paid.back.loan'] == 'Yes']
prob_pd_cs = len(new_df[new_df['credit.policy'] == 'Yes'])/len(new_df)
bayes = prob_pd_cs * prob_lp / prob_cs
print(bayes)
# Let's visualize the bar plot for the purpose and again using condition where
#Instructions:
#Visualize the bar plot for the feature purpose.
#Calculate the paid.back.loan == No and the store the result in dataframe df1
#Visualize the bar plot for the feature purpose where paid.back.loan == No
df.purpose.value_counts().plot(kind = 'bar')
plt.title("Probability Distribution of Purpose")
plt.ylabel("Probability")
plt.xlabel("Number of Purpose")
plt.show()
df1 = df[df['paid.back.loan'] == 'No']
df1.purpose.value_counts().plot(kind = 'bar')
plt.title("Probability Distribution of Purpose")
plt.ylabel("Probability")
plt.xlabel("Number of Purpose")
plt.show()
#Let's plot the histogram for visualization of the continuous variable. So that you will get the basic idea about how the distribution of continuous variables looks like.
inst_median = df['installment'].median()
inst_mean = df['installment'].mean()
df['installment'].hist(normed = True, bins = 50)
plt.axvline(x=inst_median,color='r')
plt.axvline(x=inst_mean,color='g')
df['log.annual.inc'].hist(normed = True, bins = 50)
plt.show()
|
the-stack_106_15411
|
# -*- coding: utf-8 -*-
"""
Hydropy package
@author: Stijn Van Hoey
"""
from __future__ import absolute_import, print_function
import datetime
import numpy as np
import pandas as pd
from pandas.tseries.offsets import DateOffset, Day, Week, Hour, Minute
from matplotlib.ticker import LinearLocator
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib as mpl
mpl.rcParams['mathtext.default'] = 'regular'
def selectstorms(flowserie, rainserie, number_of_storms=3,
min_period_in_between=7, search_period=7,
drywindow=96):
""" (pd.DataFrame, pd.DataFrame) -> List
Easy storm selection process, based on the maximum flows measured
in the given timeserie of flow measurements.
To define the startdate of the storm, 24h no rain before the Qmax is
searched for. The end date is found by checking the
flow at the startdate (Qbase) and searching the moment after Qmax with
the same flow within the first 2 weeks.
If none is found, relaxation (1.1*Qbase; 1.2*Qbase,...)
until a moment is found.
Parameters
----------
flowserie : pd.Series
Pandas Series with the date in the index
rainserie : pd.Series
Pandas Series with the date in the index
number_of_storms : int
Number of storms you want to select
min_period_in_between : int (days)
Minimum number of days in between to selected storms
search_period : int (days)
Period to look for the start of the storm, when rain started
drywindow : int
Number of timesteps to check for no-rain
"""
if not isinstance(flowserie, pd.Series):
raise Exception('flowserie is a single data Series')
if not isinstance(rainserie, pd.Series):
raise Exception('rainserie is a single data Series')
# fill na values with very low (negative) value
temp = flowserie.fillna(value=-777.).copy()
# sort the whole array
try:
temp = temp.sort(temp.columns.tolist(), ascending=False)
except:
temp.sort(ascending=False)
# find in the index three periods which are at least given number
# of days from each other
# after three concurrences, save these dates
stormmax = [temp.index[0]] # first element is a selected storm
i = 1
while len(stormmax) < number_of_storms:
# check for each period
alldif = True
for stormdate in stormmax:
if abs(temp.index[i] - stormdate) \
< datetime.timedelta(days=min_period_in_between):
alldif = False
# if new stormperiod, select
if alldif:
stormmax.append(temp.index[i])
i += 1
selstorms = []
for storm in stormmax:
# FIND DRY DAY WEEK BEFORE
# select period before storm (1 week)
presearchperiod = datetime.timedelta(days=search_period)
temp1 = rainserie[storm - presearchperiod:storm]
temp1 = pd.rolling_sum(temp1, window=drywindow, center=False)
# zero value means the preceding 24hours no rain: so, closest zeros
# to the date itself -24h are selected
if rainserie.ndim == 2:
temp1 = temp1.min(axis=1)
tempdates = temp1[temp1 < 0.001].index.tolist()
if len(tempdates) == 0:
raise Exception('Decrease drywindow period containing no rain.')
date_arg = np.argmin([abs(times - storm) for times in tempdates])
startstormdate = tempdates[date_arg] - Day()
# Get the flow value of the storm and when it is found again + 1 Day
temp2a = flowserie[startstormdate:startstormdate + Week()*2]
# only if multiple columns
if flowserie.ndim == 2:
temp2 = temp2a.max(axis=1)
else:
temp2 = temp2a
flowbase = temp2.ix[startstormdate]
lowerafterstorm = temp2[temp2 < flowbase][storm + Day():]
if lowerafterstorm.size == 0:
print('Lower initial flow not found again...test with mean...')
if flowserie.ndim == 2:
temp2 = temp2a.mean(axis=1)
else:
temp2 = temp2a
flowbase = temp2.ix[startstormdate]
lowerafterstorm = temp2[temp2 < flowbase][storm + Day():]
cnt = 1
while lowerafterstorm.size == 0:
print('... still not working; relaxing conditions...',
cnt*10, '% of minimal after storm incorporated')
flowbase = flowbase + 0.1*flowbase
lowerafterstorm = temp2[temp2 < flowbase][storm + Day():]
cnt += 1
endstormdate = lowerafterstorm.index[0]
# add to selected storms
selstorms.append({'startdate': startstormdate,
'enddate': endstormdate})
return selstorms
def _control_dayhour(Timestamp):
"""pd.TimeStamp -> int
Help function for editing the date representation of the plots
"""
if Timestamp.hour == 0 and Timestamp.minute == 0:
return 0
else:
return 1
def _getsize(nrows):
"""int -> int
propose height of the figure based on number of rows
"""
size_dict = {1: 6, 2: 6, 3: 8, 4: 8, 5: 10, 6: 12}
return size_dict[nrows]
def _add_labels_above(ax0, fig, flowdim, raindim):
""" matplotlib.axes -> None
"""
bbox = ax0.get_position()
rainlabel = ax0.text(bbox.x0 + bbox.width,
bbox.y0 + bbox.height, r"Rain ($mm$)",
transform=fig.transFigure,
verticalalignment="bottom",
horizontalalignment="right")
flowlabel = ax0.text(bbox.x0, bbox.y0 + bbox.height,
r"Flow ($m^3s^{-1}$)",
transform=fig.transFigure,
verticalalignment="bottom",
horizontalalignment="left")
if flowdim == 1:
flowlabel.set_color('#08519c')
if raindim == 1:
rainlabel.set_color('#6baed6')
def _make_comparable(axes):
"""axes -> None
updates the y-bound of the subplot, giving them all the bounds of the
largest
only used for the rain-flow combined subplots configuration within a
gridspec environment
"""
# check the configuration
if axes[0].get_subplotspec().get_gridspec().get_height_ratios():
nplots = int(len(axes)/2.)
ymaxes = [max(axs.get_yticks()) for axs in axes]
rainmax = max(ymaxes[::2])
flowmax = max(ymaxes[1::2])
newmaxes = [rainmax, flowmax]*nplots
for axs, nmax in zip(axes, newmaxes):
axs = axs.set_ybound(upper=nmax)
else:
ymaxes = [max(axs.get_yticks()) for axs in axes[1:]]
flowmax = max(ymaxes)
for axs in axes[1:]:
axs = axs.set_ybound(upper=flowmax)
def plotstorms(flowserie, rainserie, selected_storm,
tsfreq=None, tsfrequnit=None,
make_comparable=False,
period_title=False):
"""
Plot Flow-Rain plots for every storm period selected,
optimal sizes and configuration done for 1 till 5 subplots (storms)
"""
if len(selected_storm) > 6:
raise Exception('Split plotting up in multiple figures')
fig = plt.figure(facecolor='white',
figsize=(12, _getsize(len(selected_storm))))
gs0 = gridspec.GridSpec(len(selected_storm), 1)
gs0.update(hspace=0.35)
for j, storm in enumerate(selected_storm):
gs00 = gridspec.GridSpecFromSubplotSpec(2, 1,
subplot_spec=gs0[j],
hspace=0.0,
height_ratios=[2, 4])
# RAIN PLOT
ax0 = fig.add_subplot(gs00[0])
ax0.plot(
rainserie[storm['startdate']: storm['enddate']].index.to_pydatetime(),
rainserie[storm['startdate']: storm['enddate']].values,
linestyle='steps')
# FLOW PLOT
stormflow = flowserie[storm['startdate']: storm['enddate']]
ax1 = fig.add_subplot(gs00[1], sharex=ax0)
ax1.plot(stormflow.index.to_pydatetime(), stormflow.values,
label=r" Measured Flow ($m^3s^{-1}$)")
# if single plots of flow/rain -> set specific color
if flowserie.ndim == 1:
ax1.lines[0].set_color('#08519c')
if rainserie.ndim == 1:
ax0.lines[0].set_color('#6baed6')
# ADAPT ticks for storm-conditions (less than a month timeseries)
ax0.yaxis.set_major_locator(LinearLocator(3))
ax1.yaxis.set_major_locator(LinearLocator(3))
ax1.xaxis.set_minor_locator(mpl.dates.DayLocator())
ax1.xaxis.set_minor_formatter(mpl.dates.DateFormatter('%d'))
ax1.xaxis.set_major_locator(mpl.dates.MonthLocator(bymonthday=
[1, storm['startdate'].day + \
_control_dayhour(storm['startdate'])]))
ax1.xaxis.set_major_formatter(
mpl.dates.DateFormatter('\n %b %Y'))
# Add the labels of the different flows
if j == 0:
_add_labels_above(ax0, fig, flowserie.ndim, rainserie.ndim)
# Print the start and end period as title above subplots
if period_title:
ax0.set_title(storm['startdate'].strftime("%d/%m/%y") + " - " +
storm['enddate'].strftime("%d/%m/%y"),
fontweight='bold', fontsize=12)
# Looks of the rainplot
ax0.set_xlabel('')
ax0.invert_yaxis()
ax0.yaxis.tick_right()
ax0.spines['bottom'].set_visible(False)
ax0.spines['top'].set_visible(False)
plt.setp(ax0.get_xminorticklabels(), visible=False)
plt.setp(ax0.get_xmajorticklabels(), visible=False)
plt.setp(ax0.get_xminorticklabels(), visible=False)
# looks of the flowplot
ax1.spines['top'].set_visible(False)
ax1.spines['bottom'].set_visible(False)
ax1.set_xlabel('')
plt.draw()
all_axes = fig.get_axes()
# Give all the subplots the same y-bounds
if make_comparable:
_make_comparable(all_axes)
return fig, all_axes
|
the-stack_106_15412
|
import unittest
from lattice_mc.transitions import Transitions
from lattice_mc.jump import Jump
from unittest.mock import Mock, patch
import numpy as np
class TranstitionsTestCase( unittest.TestCase ):
"""Test for Transitions class"""
def setUp( self ):
self.jumps = [ Mock( spec=Jump, relative_probability=0.25 ) for i in range(4) ]
self.transitions = Transitions( self.jumps )
def test_transitions_is_initialised( self ):
self.assertEqual( self.transitions.jumps, self.jumps )
np.testing.assert_array_equal( self.transitions.p, np.array( [ 0.25, 0.25, 0.25, 0.25 ] ) )
def test_cumulative_probabilities( self ):
self.transitions.p = np.array( [ 0.1, 0.2, 0.3, 0.4 ] )
np.testing.assert_allclose( self.transitions.cumulative_probabilities(), np.array( [ 0.1, 0.3, 0.6, 1.0 ] ) )
@patch( 'random.random' )
def test_random( self, mock_random ):
self.cumulative_probabilities = Mock( return_value = np.array( [ 0.1, 0.3, 0.6, 1.0 ] ) )
mock_random.return_value = 0.05
self.assertIs( self.transitions.random(), self.jumps[0] )
mock_random.return_value = 0.3
self.assertIs( self.transitions.random(), self.jumps[1] )
@patch( 'random.random' )
def test_time_to_jump( self, mock_random ):
self.transitions.p = np.array( [ 0.1, 0.2, 0.3, 0.4 ] )
mock_random.return_value = 0.15
self.assertAlmostEqual( self.transitions.time_to_jump(), 1.89711998489e-13 )
if __name__ == '__main__':
unittest.main()
|
the-stack_106_15413
|
# Copyright (c) 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Summarize the last ninja build, invoked with ninja's -C syntax.
This script is designed to be automatically run after each ninja build in
order to summarize the build's performance. Making build performance information
more visible should make it easier to notice anomalies and opportunities. To use
this script on Windows just set NINJA_SUMMARIZE_BUILD=1 and run autoninja.bat.
On Linux you can get autoninja to invoke this script using this syntax:
$ NINJA_SUMMARIZE_BUILD=1 autoninja -C out/Default/ chrome
You can also call this script directly using ninja's syntax to specify the
output directory of interest:
> python post_build_ninja_summary.py -C out/Default
Typical output looks like this:
>ninja -C out\debug_component base
ninja.exe -C out\debug_component base -j 960 -l 48 -d keeprsp
ninja: Entering directory `out\debug_component'
[1 processes, 1/1 @ 0.3/s : 3.092s ] Regenerating ninja files
Longest build steps:
0.1 weighted s to build obj/base/base/trace_log.obj (6.7 s elapsed time)
0.2 weighted s to build nasm.exe, nasm.exe.pdb (0.2 s elapsed time)
0.3 weighted s to build obj/base/base/win_util.obj (12.4 s elapsed time)
1.2 weighted s to build base.dll, base.dll.lib (1.2 s elapsed time)
Time by build-step type:
0.0 s weighted time to generate 6 .lib files (0.3 s elapsed time sum)
0.1 s weighted time to generate 25 .stamp files (1.2 s elapsed time sum)
0.2 s weighted time to generate 20 .o files (2.8 s elapsed time sum)
1.7 s weighted time to generate 4 PEFile (linking) files (2.0 s elapsed
time sum)
23.9 s weighted time to generate 770 .obj files (974.8 s elapsed time sum)
26.1 s weighted time (982.9 s elapsed time sum, 37.7x parallelism)
839 build steps completed, average of 32.17/s
If no gn clean has been done then results will be for the last non-NULL
invocation of ninja. Ideas for future statistics, and implementations are
appreciated.
The "weighted" time is the elapsed time of each build step divided by the number
of tasks that were running in parallel. This makes it an excellent approximation
of how "important" a slow step was. A link that is entirely or mostly serialized
will have a weighted time that is the same or similar to its elapsed time. A
compile that runs in parallel with 999 other compiles will have a weighted time
that is tiny."""
from __future__ import print_function
import argparse
import errno
import os
import sys
# The number of long build times to report:
long_count = 10
# The number of long times by extension to report
long_ext_count = 5
class Target:
"""Represents a single line read for a .ninja_log file."""
def __init__(self, start, end):
"""Creates a target object by passing in the start/end times in seconds
as a float."""
self.start = start
self.end = end
# A list of targets, appended to by the owner of this object.
self.targets = []
self.weighted_duration = 0.0
def Duration(self):
"""Returns the task duration in seconds as a float."""
return self.end - self.start
def SetWeightedDuration(self, weighted_duration):
"""Sets the duration, in seconds, passed in as a float."""
self.weighted_duration = weighted_duration
def WeightedDuration(self):
"""Returns the task's weighted duration in seconds as a float.
Weighted_duration takes the elapsed time of the task and divides it
by how many other tasks were running at the same time. Thus, it
represents the approximate impact of this task on the total build time,
with serialized or serializing steps typically ending up with much
longer weighted durations.
weighted_duration should always be the same or shorter than duration.
"""
# Allow for modest floating-point errors
epsilon = 0.000002
if (self.weighted_duration > self.Duration() + epsilon):
print('%s > %s?' % (self.weighted_duration, self.Duration()))
assert(self.weighted_duration <= self.Duration() + epsilon)
return self.weighted_duration
def DescribeTargets(self):
"""Returns a printable string that summarizes the targets."""
if len(self.targets) == 1:
return self.targets[0]
# Some build steps generate dozens of outputs - handle them sanely.
# It's a bit odd that if there are three targets we return all three
# but if there are more than three we just return two, but this works
# well in practice.
elif len(self.targets) > 3:
return '(%d items) ' % len(self.targets) + (
', '.join(self.targets[:2]) + ', ...')
else:
return ', '.join(self.targets)
# Copied with some modifications from ninjatracing
def ReadTargets(log, show_all):
"""Reads all targets from .ninja_log file |log_file|, sorted by duration.
The result is a list of Target objects."""
header = log.readline()
assert header == '# ninja log v5\n', \
'unrecognized ninja log version %r' % header
targets_dict = {}
last_end_seen = 0.0
for line in log:
parts = line.strip().split('\t')
if len(parts) != 5:
# If ninja.exe is rudely halted then the .ninja_log file may be
# corrupt. Silently continue.
continue
start, end, _, name, cmdhash = parts # Ignore restat.
# Convert from integral milliseconds to float seconds.
start = int(start) / 1000.0
end = int(end) / 1000.0
if not show_all and end < last_end_seen:
# An earlier time stamp means that this step is the first in a new
# build, possibly an incremental build. Throw away the previous
# data so that this new build will be displayed independently.
# This has to be done by comparing end times because records are
# written to the .ninja_log file when commands complete, so end
# times are guaranteed to be in order, but start times are not.
targets_dict = {}
target = None
if cmdhash in targets_dict:
target = targets_dict[cmdhash]
if not show_all and (target.start != start or target.end != end):
# If several builds in a row just run one or two build steps then
# the end times may not go backwards so the last build may not be
# detected as such. However in many cases there will be a build step
# repeated in the two builds and the changed start/stop points for
# that command, identified by the hash, can be used to detect and
# reset the target dictionary.
targets_dict = {}
target = None
if not target:
targets_dict[cmdhash] = target = Target(start, end)
last_end_seen = end
target.targets.append(name)
return targets_dict.values()
def GetExtension(target):
"""Return the file extension that best represents a target.
For targets that generate multiple outputs it is important to return a
consistent 'canonical' extension. Ultimately the goal is to group build steps
by type."""
for output in target.targets:
# Normalize all mojo related outputs to 'mojo'.
if output.count('.mojom') > 0:
extension = 'mojo'
break
# Not a true extension, but a good grouping.
if output.endswith('type_mappings'):
extension = 'type_mappings'
break
extension = os.path.splitext(output)[1]
if len(extension) == 0:
extension = '(no extension found)'
if extension in ['.pdb', '.dll', '.exe']:
extension = 'PEFile (linking)'
# Make sure that .dll and .exe are grouped together and that the
# .dll.lib files don't cause these to be listed as libraries
break
if extension in ['.so', '.TOC']:
extension = '.so (linking)'
# Attempt to identify linking, avoid identifying as '.TOC'
break
return extension
def SummarizeEntries(entries):
"""Print a summary of the passed in list of Target objects."""
# Create a list that is in order by time stamp and has entries for the
# beginning and ending of each build step (one time stamp may have multiple
# entries due to multiple steps starting/stopping at exactly the same time).
# Iterate through this list, keeping track of which tasks are running at all
# times. At each time step calculate a running total for weighted time so
# that when each task ends its own weighted time can easily be calculated.
task_start_stop_times = []
earliest = -1
latest = 0
total_cpu_time = 0
for target in entries:
if earliest < 0 or target.start < earliest:
earliest = target.start
if target.end > latest:
latest = target.end
total_cpu_time += target.Duration()
task_start_stop_times.append((target.start, 'start', target))
task_start_stop_times.append((target.end, 'stop', target))
length = latest - earliest
weighted_total = 0.0
task_start_stop_times.sort()
# Now we have all task start/stop times sorted by when they happen. If a
# task starts and stops on the same time stamp then the start will come
# first because of the alphabet, which is important for making this work
# correctly.
# Track the tasks which are currently running.
running_tasks = {}
# Record the time we have processed up to so we know how to calculate time
# deltas.
last_time = task_start_stop_times[0][0]
# Track the accumulated weighted time so that it can efficiently be added
# to individual tasks.
last_weighted_time = 0.0
# Scan all start/stop events.
for event in task_start_stop_times:
time, action_name, target = event
# Accumulate weighted time up to now.
num_running = len(running_tasks)
if num_running > 0:
# Update the total weighted time up to this moment.
last_weighted_time += (time - last_time) / float(num_running)
if action_name == 'start':
# Record the total weighted task time when this task starts.
running_tasks[target] = last_weighted_time
if action_name == 'stop':
# Record the change in the total weighted task time while this task ran.
weighted_duration = last_weighted_time - running_tasks[target]
target.SetWeightedDuration(weighted_duration)
weighted_total += weighted_duration
del running_tasks[target]
last_time = time
assert(len(running_tasks) == 0)
# Warn if the sum of weighted times is off by more than half a second.
if abs(length - weighted_total) > 500:
print('Discrepancy!!! Length = %.3f, weighted total = %.3f' % (
length, weighted_total))
# Print the slowest build steps (by weighted time).
print(' Longest build steps:')
entries.sort(key=lambda x: x.WeightedDuration())
for target in entries[-long_count:]:
print(' %8.1f weighted s to build %s (%.1f s elapsed time)' % (
target.WeightedDuration(),
target.DescribeTargets(), target.Duration()))
# Sum up the time by file extension/type of the output file
count_by_ext = {}
time_by_ext = {}
weighted_time_by_ext = {}
# Scan through all of the targets to build up per-extension statistics.
for target in entries:
extension = GetExtension(target)
time_by_ext[extension] = time_by_ext.get(extension, 0) + target.Duration()
weighted_time_by_ext[extension] = weighted_time_by_ext.get(extension,
0) + target.WeightedDuration()
count_by_ext[extension] = count_by_ext.get(extension, 0) + 1
print(' Time by build-step type:')
# Copy to a list with extension name and total time swapped, to (time, ext)
weighted_time_by_ext_sorted = sorted((y, x) for (x, y) in
weighted_time_by_ext.items())
# Print the slowest build target types (by weighted time):
for time, extension in weighted_time_by_ext_sorted[-long_ext_count:]:
print(' %8.1f s weighted time to generate %d %s files '
'(%1.1f s elapsed time sum)' % (time, count_by_ext[extension],
extension, time_by_ext[extension]))
print(' %.1f s weighted time (%.1f s elapsed time sum, %1.1fx '
'parallelism)' % (length, total_cpu_time,
total_cpu_time * 1.0 / length))
print(' %d build steps completed, average of %1.2f/s' % (
len(entries), len(entries) / (length)))
def main():
log_file = '.ninja_log'
parser = argparse.ArgumentParser()
parser.add_argument('-C', dest='build_directory',
help='Build directory.')
parser.add_argument('--log-file',
help="specific ninja log file to analyze.")
args, _extra_args = parser.parse_known_args()
if args.build_directory:
log_file = os.path.join(args.build_directory, log_file)
if args.log_file:
log_file = args.log_file
try:
with open(log_file, 'r') as log:
entries = ReadTargets(log, False)
SummarizeEntries(entries)
except IOError:
print('Log file %r not found, no build summary created.' % log_file)
return errno.ENOENT
if __name__ == '__main__':
sys.exit(main())
|
the-stack_106_15415
|
from glob import glob as ls
import numpy as np
import pandas as pd
import copy
import os
# set some initial paths
# path to the directory where this script lives
thisdir = os.path.abspath('')
# path to the main directory of the repository
maindir = os.path.split(os.path.split(thisdir)[0])[0]
# path to the analysis_results subdirectory
analysisdir = os.path.split(thisdir)[0]
# path to the data subdirectory
datadir = os.path.join(os.path.split(os.path.split(thisdir)[0])[0], 'data')
location_file = os.path.join(datadir, 'location_to_country.csv')
locations_df = pd.read_csv(location_file, delimiter=',')
popsizes_file = os.path.join(datadir, 'population_size.csv')
popsizes_df = pd.read_csv(popsizes_file, delimiter=',')
def get_country_name(df, location):
"""
Return country name of the location
Args:
df (pandas Dataframe) : dataframe containing locations and corresponding countries
location (str) : name of the location
Returns:
str: Name of the country the location is in.
"""
d = df[df.location == location]
return d.country.values[0]
def get_locations_by_country(df, country):
"""
Return locations in the country
Args:
df (pandas Dataframe) : dataframe containing locations and corresponding countries
country (str) : name of the country
Returns:
str: Name of the country the location is in.
"""
locations = list(df[df.country == country].location.values)
return locations
def get_popsize(popsizes_df, location):
"""
Return the population size for the location (according to the year of the original data - either 2010 or 2011).
Args:
popsizes_df (pandas Dataframe) : dataframe containing population sizes for each location, the year of the count, and the source
location (str) : name of the location
Returns:
int: An integer of the estimated population size for the location.
"""
return popsizes_df.loc[popsizes_df['location'] == location]['population_size'].values[0]
def get_ages(location, country, level, num_agebrackets=85):
"""
Get the age count for the synthetic population of the location.
Args:
location (str) : name of the location
country (str) : name of the country
level (str) : name of level (country or subnational)
num_agebrackets (int) : the number of age brackets
Returns:
dict: A dictionary of the age count.
"""
if country == location:
level = 'country'
if country == 'Europe':
country = location
level = 'country'
if level == 'country':
file_name = country + '_' + level + '_level_age_distribution_' + '%i' % num_agebrackets + '.csv'
else:
file_name = country + '_' + level + '_' + location + '_age_distribution_' + '%i' % num_agebrackets + '.csv'
file_path = os.path.join(datadir, 'age_distributions', file_name)
df = pd.read_csv(file_path, delimiter=',', header=None)
df.columns = ['age', 'age_count']
ages = dict(zip(df.age.values.astype(int), df.age_count.values))
return ages
def get_available_age_brackets_and_mapping():
"""
Create a mapping of the number of age brackets to the brackets and for each
set of brackets, a mapping of age to bracket or group.
"""
brackets_dic = {}
dict_of_age_by_brackets = {}
for num_agebrackets in [85, 18, 15, 12]:
brackets = []
if num_agebrackets == 85:
for i in range(84):
brackets.append([i])
brackets.append(np.arange(84, 101))
# # num_agebracket = 20 only works if you have an age distribution and
# # matrices that go in more detail than age 84+, so if you wanted
# # brackets of 85-89, 90-94, 95-100+, etc. it would be hard unless
# # you have those matrices (which we don't because of the European
# # matrices)
# if num_agebrackets == 20:
# for i in range(19):
# brackets.append(np.arange(5 * i, 5 * (i + 1)))
# brackets.append(np.arange(95, 101))
if num_agebrackets == 18:
for i in range(16):
brackets.append(np.arange(5 * i, 5 * (i + 1)))
brackets.append(np.arange(80, 84))
brackets.append(np.arange(84, 101))
if num_agebrackets == 15:
for i in range(14):
brackets.append(np.arange(5 * i, 5 * (i + 1)))
brackets.append(np.arange(70, 101))
if num_agebrackets == 12:
for i in range(11):
brackets.append(np.arange(5 * i, 5 * (i + 1)))
brackets.append(np.arange(55, 101))
age_by_brackets_dic = dict.fromkeys(np.arange(101), 0)
for n, b in enumerate(brackets):
for a in b:
age_by_brackets_dic[a] = n
brackets_dic[num_agebrackets] = brackets
dict_of_age_by_brackets[num_agebrackets] = age_by_brackets_dic
return brackets_dic, dict_of_age_by_brackets
def get_age_brackets(available_age_brackets, age_by_brackets_mapping, num_agebrackets):
"""
Return the age brackets and mapping of age to bracket for a specific mapping indexed by the number of brackets.
Args:
available_age_brackets (dict) : a dictionary of available mappings
age_by_brackets_mapping (dict) : a dictionary of mappings for single years of age to the bracket or bin they map to depending on the number of age brackets defined
num_agebrackets (int) : number of age brackets desired
Returns:
The age brackets and mapping of age to bracket for a specific mapping
indexed by the number of brackets.
"""
return available_age_brackets[num_agebrackets], age_by_brackets_mapping[num_agebrackets]
def get_aggregate_ages(ages, age_by_brackets_dic):
"""
Return an aggregated age count distribution.
Args:
ages (dict) : original age count distribution
age_by_brackets_dic (dict) : a dictionary mapping single years of age to the bracket or bin they map to
Returns:
An aggregated age count distribution from the mapping age_by_brackets_dic.
"""
num_agebrackets = len(set(age_by_brackets_dic.values()))
aggregate_ages = dict.fromkeys(np.arange(num_agebrackets), 0)
for a in ages:
b = age_by_brackets_dic[a]
aggregate_ages[b] += ages[a]
return aggregate_ages
def get_rescaled_ages(location, popsize):
country = get_country_name(locations_df, location)
level = 'subnational'
num_agebrackets = 85
ages = get_ages(location, country, level, num_agebrackets)
age_dist = ages.copy()
for a in ages:
age_dist[a] = ages[a] / sum(ages.values())
original_popsize = sum(ages.values())
ratio = popsize / original_popsize
age_range = np.arange(len(ages))
rescaled_ages = dict()
for a in ages:
rescaled_ages[a] = np.round(ages[a] * ratio)
diff = int(popsize - sum(rescaled_ages.values()))
if diff > 0:
for n in range(diff):
a = np.random.choice(age_range, p=[age_dist[a] for a in age_range])
rescaled_ages[a] += 1
elif diff < 0:
for n in range(-diff):
a = np.random.choice(age_range, p=[age_dist[a] for a in age_range])
while rescaled_ages[a] <= 0:
a = np.random.choice(age_range, p=[age_dist[a] for a in age_range])
rescaled_ages[a] -= 1
assert sum(rescaled_ages.values()) == popsize, f'New age distribution does not match the size of popsize that was desired. The difference is {sum(rescaled_ages.values()) - popsize} (sum(new age distribution) - popsize).'
print(f'{location}, {country}, {ratio:0.3f}, {original_popsize}, {popsize}, {sum(rescaled_ages.values()) - popsize}')
return rescaled_ages
def write_rescaled_ages(location, rescaled_ages, datadir, overwrite=False):
num_agebrackets = len(rescaled_ages)
country = get_country_name(locations_df, location)
if country == 'Europe':
country = location
level = 'country'
if country == location:
level = 'country'
else:
level = 'subnational'
if level == 'country':
file_name = f"{country}_{level}_level_age_distribution_{num_agebrackets:.0f}.csv"
else:
file_name = f"{country}_{level}_{location}_age_distribution_{num_agebrackets:.0f}.csv"
file_path = os.path.join(datadir, 'population_rescaled_age_distributions', file_name)
e = os.path.exists(file_path)
if e:
print(f'{file_path} already exists.')
if overwrite:
print('Overwriting the file.')
f = open(file_path, 'w+')
for a in rescaled_ages:
f.write(f"{a:.16f},{rescaled_ages[a]:.16f}\n")
f.close()
else:
print('Not overwriting the existing file.')
else:
f = open(file_path, 'w+')
for a in rescaled_ages:
f.write(f"{a:.16f},{rescaled_ages[a]:.16f}\n")
f.close()
return
def write_rescaled_aggregated_ages(location, aggregate_ages, datadir, overwrite=False):
num_agebrackets = len(aggregate_ages)
country = get_country_name(locations_df, location)
if country == 'Europe':
country = location
level = 'country'
if country == location:
level = 'country'
else:
level = 'subnational'
if level == 'country':
file_name = f"{country}_{level}_level_age_distribution_{num_agebrackets:.0f}.csv"
else:
file_name = f"{country}_{level}_{location}_age_distribution_{num_agebrackets:.0f}.csv"
file_path = os.path.join(datadir, 'population_rescaled_age_distributions', file_name)
e = os.path.exists(file_path)
if e:
print(f'{file_path} already exists.')
if overwrite:
print('Overwriting the file.')
f = open(file_path, 'w+')
for a in range(len(aggregate_ages)):
f.write(f"{a:.16f},{aggregate_ages[a]:.16f}\n")
f.close()
else:
print('Not overwriting the existing file.')
else:
f = open(file_path, 'w+')
for a in range(len(aggregate_ages)):
f.write(f"{a:.16f},{aggregate_ages[a]:.16f}\n")
f.close()
return
if __name__ == '__main__':
# get all of the locations possible from the countries/regions we provide data for
countries = ['Australia', 'Canada', 'China', 'Europe', 'India', 'Israel', 'Japan', 'Russia', 'South_Africa', 'United_States']
locations = []
for country in countries:
locations += get_locations_by_country(locations_df, country)
if country not in locations and country not in ['Russia', 'Europe']:
locations.append(country)
# a few territories in India we did not model
if country == 'India':
locations.remove('Dadra_and_Nagar_Haveli')
locations.remove('Chandigarh')
locations.remove('Lakshadweep')
# find out what age brackets and mappings are available -- if you want to use different ones than available, look at the details of this method to add them
available_age_brackets, age_by_brackets_mapping = get_available_age_brackets_and_mapping()
# let's create contact matrices aggregated to 18 age brackets: 5 year bins, where the last bin is 84 years old and up
num_agebrackets = 18
age_brackets = available_age_brackets[num_agebrackets]
age_by_brackets_dic = age_by_brackets_mapping[num_agebrackets]
# create population size rescaled ages with sizes
write_flag = False # set to True to save matrices to disk
for location in locations:
popsize = get_popsize(popsizes_df, location)
rescaled_ages = get_rescaled_ages(location, popsize)
aggregate_ages = get_aggregate_ages(rescaled_ages, age_by_brackets_dic)
assert sum(rescaled_ages.values()) == sum(aggregate_ages.values())
# write to disk
if write_flag:
write_rescaled_ages(location, rescaled_ages, datadir)
write_rescaled_aggregated_ages(location, aggregate_ages, datadir)
|
the-stack_106_15416
|
# Copyright BigchainDB GmbH and BigchainDB contributors
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
"""Database creation and schema-providing interfaces for backends."""
from functools import singledispatch
import logging
import bigchaindb
from bigchaindb.backend.connection import connect
from bigchaindb.common.exceptions import ValidationError
from bigchaindb.common.utils import validate_all_values_for_key
logger = logging.getLogger(__name__)
# Tables/collections that every backend database must create
TABLES = ('transactions', 'blocks', 'assets', 'metadata',
'validators', 'pre_commit', 'utxos')
VALID_LANGUAGES = ('danish', 'dutch', 'english', 'finnish', 'french', 'german',
'hungarian', 'italian', 'norwegian', 'portuguese', 'romanian',
'russian', 'spanish', 'swedish', 'turkish', 'none',
'da', 'nl', 'en', 'fi', 'fr', 'de', 'hu', 'it', 'nb', 'pt',
'ro', 'ru', 'es', 'sv', 'tr')
@singledispatch
def create_database(connection, dbname):
"""Create database to be used by BigchainDB.
Args:
dbname (str): the name of the database to create.
Raises:
:exc:`~DatabaseAlreadyExists`: If the given :attr:`dbname` already
exists as a database.
"""
raise NotImplementedError
@singledispatch
def create_tables(connection, dbname):
"""Create the tables to be used by BigchainDB.
Args:
dbname (str): the name of the database to create tables for.
"""
raise NotImplementedError
@singledispatch
def create_indexes(connection, dbname):
"""Create the indexes to be used by BigchainDB.
Args:
dbname (str): the name of the database to create indexes for.
"""
raise NotImplementedError
@singledispatch
def drop_database(connection, dbname):
"""Drop the database used by BigchainDB.
Args:
dbname (str): the name of the database to drop.
Raises:
:exc:`~DatabaseDoesNotExist`: If the given :attr:`dbname` does not
exist as a database.
"""
raise NotImplementedError
def init_database(connection=None, dbname=None):
"""Initialize the configured backend for use with BigchainDB.
Creates a database with :attr:`dbname` with any required tables
and supporting indexes.
Args:
connection (:class:`~bigchaindb.backend.connection.Connection`): an
existing connection to use to initialize the database.
Creates one if not given.
dbname (str): the name of the database to create.
Defaults to the database name given in the BigchainDB
configuration.
Raises:
:exc:`~DatabaseAlreadyExists`: If the given :attr:`dbname` already
exists as a database.
"""
connection = connection or connect()
dbname = dbname or bigchaindb.config['database']['name']
create_database(connection, dbname)
create_tables(connection, dbname)
create_indexes(connection, dbname)
def validate_language_key(obj, key):
"""Validate all nested "language" key in `obj`.
Args:
obj (dict): dictionary whose "language" key is to be validated.
Returns:
None: validation successful
Raises:
ValidationError: will raise exception in case language is not valid.
"""
backend = bigchaindb.config['database']['backend']
if backend == 'localmongodb':
data = obj.get(key, {})
if isinstance(data, dict):
validate_all_values_for_key(data, 'language', validate_language)
def validate_language(value):
"""Check if `value` is a valid language.
https://docs.mongodb.com/manual/reference/text-search-languages/
Args:
value (str): language to validated
Returns:
None: validation successful
Raises:
ValidationError: will raise exception in case language is not valid.
"""
if value not in VALID_LANGUAGES:
error_str = ('MongoDB does not support text search for the '
'language "{}". If you do not understand this error '
'message then please rename key/field "language" to '
'something else like "lang".').format(value)
raise ValidationError(error_str)
|
the-stack_106_15418
|
import abc
import importlib
from datetime import datetime
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import pandas
import pyarrow
from tqdm import tqdm
from feast import errors
from feast.entity import Entity
from feast.feature_table import FeatureTable
from feast.feature_view import FeatureView
from feast.infra.offline_stores.offline_store import RetrievalJob
from feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto
from feast.protos.feast.types.Value_pb2 import Value as ValueProto
from feast.registry import Registry
from feast.repo_config import RepoConfig
from feast.type_map import python_value_to_proto_value
DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL = "event_timestamp"
class Provider(abc.ABC):
@abc.abstractmethod
def update_infra(
self,
project: str,
tables_to_delete: Sequence[Union[FeatureTable, FeatureView]],
tables_to_keep: Sequence[Union[FeatureTable, FeatureView]],
entities_to_delete: Sequence[Entity],
entities_to_keep: Sequence[Entity],
partial: bool,
):
"""
Reconcile cloud resources with the objects declared in the feature repo.
Args:
project: Project to which tables belong
tables_to_delete: Tables that were deleted from the feature repo, so provider needs to
clean up the corresponding cloud resources.
tables_to_keep: Tables that are still in the feature repo. Depending on implementation,
provider may or may not need to update the corresponding resources.
entities_to_delete: Entities that were deleted from the feature repo, so provider needs to
clean up the corresponding cloud resources.
entities_to_keep: Entities that are still in the feature repo. Depending on implementation,
provider may or may not need to update the corresponding resources.
partial: if true, then tables_to_delete and tables_to_keep are *not* exhaustive lists.
There may be other tables that are not touched by this update.
"""
...
@abc.abstractmethod
def teardown_infra(
self,
project: str,
tables: Sequence[Union[FeatureTable, FeatureView]],
entities: Sequence[Entity],
):
"""
Tear down all cloud resources for a repo.
Args:
project: Feast project to which tables belong
tables: Tables that are declared in the feature repo.
entities: Entities that are declared in the feature repo.
"""
...
@abc.abstractmethod
def online_write_batch(
self,
project: str,
table: Union[FeatureTable, FeatureView],
data: List[
Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]
],
progress: Optional[Callable[[int], Any]],
) -> None:
"""
Write a batch of feature rows to the online store. This is a low level interface, not
expected to be used by the users directly.
If a tz-naive timestamp is passed to this method, it is assumed to be UTC.
Args:
project: Feast project name
table: Feast FeatureTable
data: a list of quadruplets containing Feature data. Each quadruplet contains an Entity Key,
a dict containing feature values, an event timestamp for the row, and
the created timestamp for the row if it exists.
progress: Optional function to be called once every mini-batch of rows is written to
the online store. Can be used to display progress.
"""
...
@abc.abstractmethod
def materialize_single_feature_view(
self,
feature_view: FeatureView,
start_date: datetime,
end_date: datetime,
registry: Registry,
project: str,
tqdm_builder: Callable[[int], tqdm],
) -> None:
pass
@staticmethod
@abc.abstractmethod
def get_historical_features(
config: RepoConfig,
feature_views: List[FeatureView],
feature_refs: List[str],
entity_df: Union[pandas.DataFrame, str],
registry: Registry,
project: str,
) -> RetrievalJob:
pass
@abc.abstractmethod
def online_read(
self,
project: str,
table: Union[FeatureTable, FeatureView],
entity_keys: List[EntityKeyProto],
) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:
"""
Read feature values given an Entity Key. This is a low level interface, not
expected to be used by the users directly.
Returns:
Data is returned as a list, one item per entity key. Each item in the list is a tuple
of event_ts for the row, and the feature data as a dict from feature names to values.
Values are returned as Value proto message.
"""
...
def get_provider(config: RepoConfig, repo_path: Path) -> Provider:
if "." not in config.provider:
if config.provider == "gcp":
from feast.infra.gcp import GcpProvider
return GcpProvider(config)
elif config.provider == "local":
from feast.infra.local import LocalProvider
return LocalProvider(config, repo_path)
else:
raise errors.FeastProviderNotImplementedError(config.provider)
else:
# Split provider into module and class names by finding the right-most dot.
# For example, provider 'foo.bar.MyProvider' will be parsed into 'foo.bar' and 'MyProvider'
module_name, class_name = config.provider.rsplit(".", 1)
# Try importing the module that contains the custom provider
try:
module = importlib.import_module(module_name)
except Exception as e:
# The original exception can be anything - either module not found,
# or any other kind of error happening during the module import time.
# So we should include the original error as well in the stack trace.
raise errors.FeastProviderModuleImportError(module_name) from e
# Try getting the provider class definition
try:
ProviderCls = getattr(module, class_name)
except AttributeError:
# This can only be one type of error, when class_name attribute does not exist in the module
# So we don't have to include the original exception here
raise errors.FeastProviderClassImportError(
module_name, class_name
) from None
return ProviderCls(config, repo_path)
def _get_requested_feature_views_to_features_dict(
feature_refs: List[str], feature_views: List[FeatureView]
) -> Dict[FeatureView, List[str]]:
"""Create a dict of FeatureView -> List[Feature] for all requested features"""
feature_views_to_feature_map = {} # type: Dict[FeatureView, List[str]]
for ref in feature_refs:
ref_parts = ref.split(":")
feature_view_from_ref = ref_parts[0]
feature_from_ref = ref_parts[1]
found = False
for feature_view_from_registry in feature_views:
if feature_view_from_registry.name == feature_view_from_ref:
found = True
if feature_view_from_registry in feature_views_to_feature_map:
feature_views_to_feature_map[feature_view_from_registry].append(
feature_from_ref
)
else:
feature_views_to_feature_map[feature_view_from_registry] = [
feature_from_ref
]
if not found:
raise ValueError(f"Could not find feature view from reference {ref}")
return feature_views_to_feature_map
def _get_column_names(
feature_view: FeatureView, entities: List[Entity]
) -> Tuple[List[str], List[str], str, Optional[str]]:
"""
If a field mapping exists, run it in reverse on the join keys,
feature names, event timestamp column, and created timestamp column
to get the names of the relevant columns in the offline feature store table.
Returns:
Tuple containing the list of reverse-mapped join_keys,
reverse-mapped feature names, reverse-mapped event timestamp column,
and reverse-mapped created timestamp column that will be passed into
the query to the offline store.
"""
# if we have mapped fields, use the original field names in the call to the offline store
event_timestamp_column = feature_view.input.event_timestamp_column
feature_names = [feature.name for feature in feature_view.features]
created_timestamp_column = feature_view.input.created_timestamp_column
join_keys = [entity.join_key for entity in entities]
if feature_view.input.field_mapping is not None:
reverse_field_mapping = {
v: k for k, v in feature_view.input.field_mapping.items()
}
event_timestamp_column = (
reverse_field_mapping[event_timestamp_column]
if event_timestamp_column in reverse_field_mapping.keys()
else event_timestamp_column
)
created_timestamp_column = (
reverse_field_mapping[created_timestamp_column]
if created_timestamp_column
and created_timestamp_column in reverse_field_mapping.keys()
else created_timestamp_column
)
join_keys = [
reverse_field_mapping[col] if col in reverse_field_mapping.keys() else col
for col in join_keys
]
feature_names = [
reverse_field_mapping[col] if col in reverse_field_mapping.keys() else col
for col in feature_names
]
return (
join_keys,
feature_names,
event_timestamp_column,
created_timestamp_column,
)
def _run_field_mapping(
table: pyarrow.Table, field_mapping: Dict[str, str],
) -> pyarrow.Table:
# run field mapping in the forward direction
cols = table.column_names
mapped_cols = [
field_mapping[col] if col in field_mapping.keys() else col for col in cols
]
table = table.rename_columns(mapped_cols)
return table
def _convert_arrow_to_proto(
table: pyarrow.Table, feature_view: FeatureView, join_keys: List[str],
) -> List[Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]]:
rows_to_write = []
def _coerce_datetime(ts):
"""
Depending on underlying time resolution, arrow to_pydict() sometimes returns pandas
timestamp type (for nanosecond resolution), and sometimes you get standard python datetime
(for microsecond resolution).
While pandas timestamp class is a subclass of python datetime, it doesn't always behave the
same way. We convert it to normal datetime so that consumers downstream don't have to deal
with these quirks.
"""
if isinstance(ts, pandas.Timestamp):
return ts.to_pydatetime()
else:
return ts
for row in zip(*table.to_pydict().values()):
entity_key = EntityKeyProto()
for join_key in join_keys:
entity_key.join_keys.append(join_key)
idx = table.column_names.index(join_key)
value = python_value_to_proto_value(row[idx])
entity_key.entity_values.append(value)
feature_dict = {}
for feature in feature_view.features:
idx = table.column_names.index(feature.name)
value = python_value_to_proto_value(row[idx], feature.dtype)
feature_dict[feature.name] = value
event_timestamp_idx = table.column_names.index(
feature_view.input.event_timestamp_column
)
event_timestamp = _coerce_datetime(row[event_timestamp_idx])
if feature_view.input.created_timestamp_column:
created_timestamp_idx = table.column_names.index(
feature_view.input.created_timestamp_column
)
created_timestamp = _coerce_datetime(row[created_timestamp_idx])
else:
created_timestamp = None
rows_to_write.append(
(entity_key, feature_dict, event_timestamp, created_timestamp)
)
return rows_to_write
|
the-stack_106_15421
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 9 11:52:14 2018
@author: eub_hmy
"""
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.framework import graph_util
import os
tf.set_random_seed(1) # set random seed
# 导入数据
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# hyperparameters
lr = 0.001 # learning rate
training_iters = 1000000 # train step 上限
batch_size = 512
n_inputs = 28 # MNIST data input (img shape: 28*28)
n_steps = 28 # time steps
n_hidden_units = 128 # neurons in hidden layer
n_classes = 10 # MNIST classes (0-9 digits)
num_checkpoints=5
checkpoint_every=100
MODEL_DIR = "model/ckpt"
#模型保存的名字
MODEL_NAME = "model.ckpt"
if not tf.gfile.Exists(MODEL_DIR):
tf.gfile.MakeDirs(MODEL_DIR)
# x y placeholder
x = tf.placeholder(tf.float32, [None, n_steps, n_inputs],name='input')
y = tf.placeholder(tf.float32, [None, n_classes])
# 对 weights biases 初始值的定义
weights = {
# shape (28, 128)
'in': tf.Variable(tf.random_normal([n_inputs, n_hidden_units])),
# shape (128, 10)
'out': tf.Variable(tf.random_normal([n_hidden_units, n_classes]))
}
biases = {
# shape (128, )
'in': tf.Variable(tf.constant(0.1, shape=[n_hidden_units, ])),
# shape (10, )
'out': tf.Variable(tf.constant(0.1, shape=[n_classes, ]))
}
def RNN(X, weights, biases):
# 原始的 X 是 3 维数据, 我们需要把它变成 2 维数据才能使用 weights 的矩阵乘法
# X ==> (128 batches * 28 steps, 28 inputs)
X = tf.reshape(X, [-1, n_inputs])
# X_in = W*X + b
X_in = tf.matmul(X, weights['in']) + biases['in']
# X_in ==> (128 batches, 28 steps, 128 hidden) 换回3维
X_in = tf.reshape(X_in, [-1, n_steps, n_hidden_units])
lstm_cell = tf.contrib.rnn.BasicLSTMCell(n_hidden_units, forget_bias=1.0, state_is_tuple=True)
init_state = lstm_cell.zero_state(batch_size, dtype=tf.float32) # 初始化全零 state
outputs, final_state = tf.nn.dynamic_rnn(lstm_cell, X_in, initial_state=init_state, time_major=False)
results = tf.matmul(final_state[1], weights['out']) + biases['out']
return results
pred = RNN(x, weights, biases)
prediction=tf.nn.softmax(pred,name='softmax')
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred))
train_op = tf.train.AdamOptimizer(lr).minimize(cost)
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver(max_to_keep=num_checkpoints)
sess=tf.Session()
sess.run(init)
step = 1
while step * batch_size < training_iters:
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
batch_xs = batch_xs.reshape([batch_size, n_steps, n_inputs])
if step % 20 == 0:
print(step,sess.run(accuracy, feed_dict={
x: batch_xs,
y: batch_ys,
}))
sess.run([train_op], feed_dict={
x: batch_xs,
y: batch_ys,
})
if step % checkpoint_every == 0:
saver.save(sess, os.path.join(MODEL_DIR, MODEL_NAME),global_step=step)
step += 1
|
the-stack_106_15422
|
import cv2
import os
import json
import pytest
from dataclasses import asdict
from d2r_image import processing
from d2r_image.data_models import GroundItemList
from common import ExpressionTest, pretty_dict
from functools import cache
from pick_item_test_cases import NIP_PICK_TESTS
from nip.transpile import generate_expression_object, transpile_nip_expression
import nip.actions as nip_actions
import screen
import utils.download_test_assets # downloads assets if they don't already exist, doesn't need to be called
PATH='test/assets/ground_loot'
screen.set_window_position(0, 0)
@cache
def load_ground_loot() -> dict:
test_objs = {}
for filename in os.listdir(PATH):
filename = filename
if filename.lower().endswith('.png'):
basename = filename[:-4]
image = cv2.imread(f"{PATH}/{basename}.png")
test_objs[basename] = processing.get_ground_loot(image)
return test_objs
@cache
def expressions_test_list() -> list[ExpressionTest]:
expressions = []
for key, value in NIP_PICK_TESTS.items(): # key = basename, value = list[dict]
for val in value: # for dict in list[dict]
items_json: GroundItemList = load_ground_loot()[key]
for ground_item in items_json.items:
if ground_item.Text == val["Text"] and ground_item.Color == val["Color"]:
for expr in val["expressions"]:
expressions.append(ExpressionTest(
basename=key,
read_json=ground_item.as_dict(),
expression=expr["expression"],
pick_expected=expr["should_pickup"],
transpiled=transpile_nip_expression(expr["expression"])
))
break
return expressions
# this test has essentially been made obsolete by the nip should_pick() tests
# @pytest.mark.parametrize('ground_items', load_ground_loot().items())
# def test_ground_loot(ground_items: list[str, dict]):
# basename = ground_items[0]
# result = ground_items[1]
# x = open(f"{PATH}/{basename}.json").read()
# expected_properties = GroundItemList.from_json(x)
# # print(f"expected_properties: {expected_properties}")
# assert result == expected_properties
@pytest.mark.parametrize('should_pick_expression', expressions_test_list())
def test_pick_item(should_pick_expression: ExpressionTest, mocker):
mocker.patch.object(nip_actions, 'nip_expressions', [
generate_expression_object(should_pick_expression.expression)
])
result, matching_expression = nip_actions.should_pickup(should_pick_expression.read_json)
if bool(result) != should_pick_expression.pick_expected:
print("\n")
print("nip_expressions object:")
print(pretty_dict(asdict(nip_actions.nip_expressions[0])))
print("test expression object:")
print(json.dumps(asdict(should_pick_expression), indent=4))
if matching_expression:
print(f"matching expression: {matching_expression}")
print(f"should_pickup() result: {result}; test pass/fail below")
print("\n")
assert bool(result) == should_pick_expression.pick_expected
|
the-stack_106_15427
|
import logging
import os
from typing import Optional, Tuple, List
from ray.autoscaler.sdk import rsync, configure_logging
from ray.util import get_node_ip_address
from ray.util.debug import log_once
from ray.tune.syncer import NodeSyncer
from ray.tune.sync_client import SyncClient
from ray.ray_constants import env_integer
logger = logging.getLogger(__name__)
class DockerSyncer(NodeSyncer):
"""DockerSyncer used for synchronization between Docker containers.
This syncer extends the node syncer, but is usually instantiated
without a custom sync client. The sync client defaults to
``DockerSyncClient`` instead.
Set the env var `TUNE_SYNCER_VERBOSITY` to increase verbosity
of syncing operations (0, 1, 2, 3). Defaults to 0.
.. note::
This syncer only works with the Ray cluster launcher.
If you use your own Docker setup, make sure the nodes can connect
to each other via SSH, and try the regular SSH-based syncer instead.
Example:
.. code-block:: python
from ray.tune.integration.docker import DockerSyncer
tune.run(train,
sync_config=tune.SyncConfig(
syncer=DockerSyncer))
"""
_cluster_config_file = os.path.expanduser("~/ray_bootstrap_config.yaml")
def __init__(
self, local_dir: str, remote_dir: str, sync_client: Optional[SyncClient] = None
):
configure_logging(
log_style="record", verbosity=env_integer("TUNE_SYNCER_VERBOSITY", 0)
)
self.local_ip = get_node_ip_address()
self.worker_ip = None
sync_client = sync_client or DockerSyncClient()
sync_client.configure(self._cluster_config_file)
super(NodeSyncer, self).__init__(local_dir, remote_dir, sync_client)
def set_worker_ip(self, worker_ip: str):
self.worker_ip = worker_ip
@property
def _remote_path(self) -> Tuple[str, str]:
return (self.worker_ip, self._remote_dir)
class DockerSyncClient(SyncClient):
"""DockerSyncClient to be used by DockerSyncer.
This client takes care of executing the synchronization
commands for Docker nodes. In its ``sync_down`` and
``sync_up`` commands, it expects tuples for the source
and target, respectively, for compatibility with docker.
Args:
should_bootstrap: Whether to bootstrap the autoscaler
cofiguration. This may be useful when you are
running into authentication problems; i.e.:
https://github.com/ray-project/ray/issues/17756.
"""
def __init__(self, should_bootstrap: bool = True):
self._command_runners = {}
self._cluster_config = None
if os.environ.get("TUNE_SYNC_DISABLE_BOOTSTRAP") == "1":
should_bootstrap = False
logger.debug("Skipping bootstrap for docker sync client.")
self._should_bootstrap = should_bootstrap
def configure(self, cluster_config_file: str):
self._cluster_config_file = cluster_config_file
def sync_up(
self, source: str, target: Tuple[str, str], exclude: Optional[List] = None
) -> bool:
"""Here target is a tuple (target_node, target_dir)"""
target_node, target_dir = target
# Add trailing slashes for rsync
source = os.path.join(source, "")
target_dir = os.path.join(target_dir, "")
import click
try:
rsync(
cluster_config=self._cluster_config_file,
source=source,
target=target_dir,
down=False,
ip_address=target_node,
should_bootstrap=self._should_bootstrap,
use_internal_ip=True,
)
except click.ClickException:
if log_once("docker_rsync_up_fail"):
logger.warning(
"Rsync-up failed. Consider using a durable trainable "
"or setting the `TUNE_SYNC_DISABLE_BOOTSTRAP=1` env var."
)
raise
return True
def sync_down(
self, source: Tuple[str, str], target: str, exclude: Optional[List] = None
) -> bool:
"""Here source is a tuple (source_node, source_dir)"""
source_node, source_dir = source
# Add trailing slashes for rsync
source_dir = os.path.join(source_dir, "")
target = os.path.join(target, "")
import click
try:
rsync(
cluster_config=self._cluster_config_file,
source=source_dir,
target=target,
down=True,
ip_address=source_node,
should_bootstrap=self._should_bootstrap,
use_internal_ip=True,
)
except click.ClickException:
if log_once("docker_rsync_down_fail"):
logger.warning(
"Rsync-down failed. Consider using a durable trainable "
"or setting the `TUNE_SYNC_DISABLE_BOOTSTRAP=1` env var."
)
raise
return True
def delete(self, target: str) -> bool:
raise NotImplementedError
|
the-stack_106_15428
|
"""
A harvester for the Repository at St Cloud State for the SHARE project
More information at: https://github.com/CenterForOpenScience/SHARE/blob/master/providers/edu.stcloudstate.md
An example API call: http://repository.stcloudstate.edu/do/oai/?verb=ListRecords&metadataPrefix=oai_dc&from=2014-09-26T00:00:00Z
"""
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
class StCloudHarvester(OAIHarvester):
short_name = 'stcloud'
long_name = 'The repository at St Cloud State'
url = 'http://stcloudstate.edu/'
base_url = 'http://repository.stcloudstate.edu/do/oai/'
property_list = [
'type', 'source', 'format', 'setSpec', 'date'
]
approved_sets = [
'ews_facpubs',
'ews_wps',
'hist_facwp',
'comm_facpubs',
'anth_facpubs',
'soc_facpubs',
'soc_ug_research',
'chem_facpubs',
'phys_present',
'lrs_facpubs',
'cfs_facpubs',
'hurl_facpubs',
'ed_facpubs',
'cpcf_gradresearch',
'econ_facpubs',
'econ_wps',
'econ_seminars',
'stcloud_ling'
]
|
the-stack_106_15429
|
import torch
from src.models.DenseNetwork.loss import nearest_neighbors, kl_div_add_mse_loss
from src.models.utils.distance import euclidean_softmax_similarity
import torch
def test_kl_mse_loss():
x = torch.tensor([[1.3653, -0.5120, -0.3876, 1.0540],
[-0.3208, -0.2595, -0.7641, 2.5738],
[1.0413, 0.9428, 0.4569, 0.2637]])
ground_min_dist_square, indices, _ = nearest_neighbors(x, top_k=2, device='cuda')
assert indices == torch.tensor([[2, 1],
[0, 2],
[0, 1]])
assert ground_min_dist_square == torch.tensor([[1.8866],
[2.3148],
[1.8866]])
logit = torch.tensor([0.10, 0.40, 0.50])
target = torch.tensor([0.80, 0.15, 0.05])
assert kl_div_add_mse_loss(logit,target, 0).item() - 2.0907 < 1e-5
def test_nn_softmax_loss():
x = torch.tensor([[1., 2, 3,],
[5., 1, 7,],
[4., 2, 1,]])
anchor_idx = torch.tensor([[2, 1], [0, 2], [0, 1]])
y = x[anchor_idx]
assert torch.all(torch.eq(y, torch.tensor([[[4., 2, 1], [5., 1, 7,]],
[[1., 2, 3], [4., 2, 1,]],
[[1., 2, 3], [5., 1, 7,]]])))
assert (euclidean_softmax_similarity(x, y, None, 13)[0, 0] - 0.82324) < 1e-4
test_nn_softmax_loss()
|
the-stack_106_15431
|
from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC
class TestThread(BaseTestPyPyC):
def test_simple(self):
def main(n):
import thread
def f():
i = 0
while i < n:
i += 1
done.release()
done = thread.allocate_lock()
done.acquire()
thread.start_new_thread(f, ())
done.acquire()
return 0
log = self.run(main, [500])
assert round(log.result, 6) == round(main(500), 6)
loop, = log.loops_by_filename(self.filepath)
assert loop.match("""
i2 = int_lt(i0, i1)
guard_true(i2, descr=...)
i3 = int_add(i0, 1)
--THREAD-TICK--
jump(..., descr=...)
""")
def test_tls(self):
def main(n):
import thread
local = thread._local()
local.x = 1
i = 0
while i < n:
i += local.x
return 0
log = self.run(main, [500])
assert round(log.result, 6) == round(main(500), 6)
loop, = log.loops_by_filename(self.filepath)
assert loop.match("""
i53 = int_lt(i48, i27)
guard_true(i53, descr=...)
i54 = int_add_ovf(i48, i47)
guard_no_overflow(descr=...)
--TICK--
i58 = arraylen_gc(p43, descr=...)
jump(..., descr=...)
""")
def test_lock_acquire_release(self):
def main(n):
import threading
lock = threading.Lock()
while n > 0:
with lock:
n -= 1
log = self.run(main, [500])
assert log.result == main(500)
loop, = log.loops_by_filename(self.filepath)
assert loop.match("""
i56 = int_gt(i44, 0)
guard_true(i56, descr=...)
p57 = force_token()
setfield_gc(p0, p57, descr=<FieldP pypy.interpreter.pyframe.PyFrame.vable_token 8>)
i54 = call_release_gil_i(0, _, i37, 1, descr=<Calli 4 ii EF=7>)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
i55 = int_ne(i54, 1) # sanity-check added in 90c5a06b0923
guard_false(i55, descr=...)
i58 = int_sub(i44, 1)
i59 = call_i(ConstClass(RPyThreadReleaseLock), i37, descr=<Calli . i EF=2>)
i60 = int_is_true(i59)
guard_false(i60, descr=...)
guard_not_invalidated(descr=...)
--TICK--
jump(..., descr=...)
""")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.