filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_22716 | import numpy as np
import os
conf_file = './conf'
param_file = './param'
name = []
value = []
#for l in open(conf_file, 'r'):
with open(conf_file, 'r') as f1:
lines = f1.readlines()
cnt = len(lines)
for l in lines:
l = l.strip().split(' ')
if l[0] == 'des_var':
name.append(l[1])
min = float(l[2])
max = float(l[3])
num = min + np.random.rand()*(max-min)
value.append(num)
with open(param_file, 'w') as f:
for i in range(cnt):
f.write('.param ' + name[i] + ' = ' + str(value[i]) + '\n')
|
the-stack_106_22717 | #!/usr/bin/env python3
# Copyright (c) 2015-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the preciousblock RPC."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
)
def unidirectional_node_sync_via_rpc(node_src, node_dest):
blocks_to_copy = []
blockhash = node_src.getbestblockhash()
while True:
try:
assert len(node_dest.getblock(blockhash, False)) > 0
break
except:
blocks_to_copy.append(blockhash)
blockhash = node_src.getblockheader(blockhash, True)['previousblockhash']
blocks_to_copy.reverse()
for blockhash in blocks_to_copy:
blockdata = node_src.getblock(blockhash, False)
assert node_dest.submitblock(blockdata) in (None, 'inconclusive')
def node_sync_via_rpc(nodes):
for node_src in nodes:
for node_dest in nodes:
if node_src is node_dest:
continue
unidirectional_node_sync_via_rpc(node_src, node_dest)
class PreciousTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.supports_cli = False
def setup_network(self):
self.setup_nodes()
def run_test(self):
self.log.info("Ensure submitblock can in principle reorg to a competing chain")
gen_address = lambda i: self.nodes[i].get_deterministic_priv_key().address # A non-wallet address to mine to
self.nodes[0].generatetoaddress(1, gen_address(0))
assert_equal(self.nodes[0].getblockcount(), 1)
hashZ = self.nodes[1].generatetoaddress(2, gen_address(1))[-1]
assert_equal(self.nodes[1].getblockcount(), 2)
node_sync_via_rpc(self.nodes[0:3])
assert_equal(self.nodes[0].getbestblockhash(), hashZ)
self.log.info("Mine blocks A-B-C on Node 0")
hashC = self.nodes[0].generatetoaddress(3, gen_address(0))[-1]
assert_equal(self.nodes[0].getblockcount(), 5)
self.log.info("Mine competing blocks E-F-G on Node 1")
hashG = self.nodes[1].generatetoaddress(3, gen_address(1))[-1]
assert_equal(self.nodes[1].getblockcount(), 5)
assert hashC != hashG
self.log.info("Connect nodes and check no reorg occurs")
# Submit competing blocks via RPC so any reorg should occur before we proceed (no way to wait on inaction for p2p sync)
node_sync_via_rpc(self.nodes[0:2])
self.connect_nodes(0, 1)
assert_equal(self.nodes[0].getbestblockhash(), hashC)
assert_equal(self.nodes[1].getbestblockhash(), hashG)
self.log.info("Make Node0 prefer block G")
self.nodes[0].preciousblock(hashG)
assert_equal(self.nodes[0].getbestblockhash(), hashG)
self.log.info("Make Node0 prefer block C again")
self.nodes[0].preciousblock(hashC)
assert_equal(self.nodes[0].getbestblockhash(), hashC)
self.log.info("Make Node1 prefer block C")
self.nodes[1].preciousblock(hashC)
self.sync_blocks(self.nodes[0:2]) # wait because node 1 may not have downloaded hashC
assert_equal(self.nodes[1].getbestblockhash(), hashC)
self.log.info("Make Node1 prefer block G again")
self.nodes[1].preciousblock(hashG)
assert_equal(self.nodes[1].getbestblockhash(), hashG)
self.log.info("Make Node0 prefer block G again")
self.nodes[0].preciousblock(hashG)
assert_equal(self.nodes[0].getbestblockhash(), hashG)
self.log.info("Make Node1 prefer block C again")
self.nodes[1].preciousblock(hashC)
assert_equal(self.nodes[1].getbestblockhash(), hashC)
self.log.info("Mine another block (E-F-G-)H on Node 0 and reorg Node 1")
self.nodes[0].generatetoaddress(1, gen_address(0))
assert_equal(self.nodes[0].getblockcount(), 6)
self.sync_blocks(self.nodes[0:2])
hashH = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbestblockhash(), hashH)
self.log.info("Node1 should not be able to prefer block C anymore")
self.nodes[1].preciousblock(hashC)
assert_equal(self.nodes[1].getbestblockhash(), hashH)
self.log.info("Mine competing blocks I-J-K-L on Node 2")
self.nodes[2].generatetoaddress(4, gen_address(2))
assert_equal(self.nodes[2].getblockcount(), 6)
hashL = self.nodes[2].getbestblockhash()
self.log.info("Connect nodes and check no reorg occurs")
node_sync_via_rpc(self.nodes[1:3])
self.connect_nodes(1, 2)
self.connect_nodes(0, 2)
assert_equal(self.nodes[0].getbestblockhash(), hashH)
assert_equal(self.nodes[1].getbestblockhash(), hashH)
assert_equal(self.nodes[2].getbestblockhash(), hashL)
self.log.info("Make Node1 prefer block L")
self.nodes[1].preciousblock(hashL)
assert_equal(self.nodes[1].getbestblockhash(), hashL)
self.log.info("Make Node2 prefer block H")
self.nodes[2].preciousblock(hashH)
assert_equal(self.nodes[2].getbestblockhash(), hashH)
if __name__ == '__main__':
PreciousTest().main()
|
the-stack_106_22718 | import os
import sys
import unittest
here = os.path.dirname(__file__)
sys.path.extend((
os.path.join(here, os.path.pardir),
os.path.join(here, 'versioned-libs')
))
class SimpleTestCase(unittest.TestCase):
def test_basic_functionality(self):
import using_testlib_10 as v1
import using_testlib_20 as v2
self.assertEqual(v1.a_function(), 'from version 1.0')
self.assertEqual(v2.a_function(), 'from version 2.0')
self.assert_('testlib' in sys.modules)
def test_naming(self):
import multiversion
import using_testlib_10 as v1
import using_testlib_20 as v2
prefix = multiversion.space.__name__ + '.'
self.assertEqual(v1.mod.__name__, prefix + 'testlib___312e30.testlib')
self.assertEqual(v2.mod.__name__, prefix + 'testlib___322e30.testlib')
def test_proxy(self):
# trigger proxy
import using_testlib_10 as v1
import multiversion
self.assertEqual(v1.a_function(), 'from version 1.0')
import testlib
try:
testlib.a_function
except AttributeError:
pass
else:
self.fail('failed')
multiversion.require_version('testlib', '1.0',
globals=globals())
self.assertEqual(testlib.a_function(), 'from version 1.0')
if __name__ == '__main__':
unittest.main()
|
the-stack_106_22719 | import unittest2 as unittest
import simplejson
from celery.app import app_or_default
from celery.task import Task
from celery.task.sets import subtask, TaskSet
from celery.tests.utils import execute_context
from celery.tests.compat import catch_warnings
class MockTask(Task):
name = "tasks.add"
def run(self, x, y, **kwargs):
return x + y
@classmethod
def apply_async(cls, args, kwargs, **options):
return (args, kwargs, options)
@classmethod
def apply(cls, args, kwargs, **options):
return (args, kwargs, options)
class test_subtask(unittest.TestCase):
def test_behaves_like_type(self):
s = subtask("tasks.add", (2, 2), {"cache": True},
{"routing_key": "CPU-bound"})
self.assertDictEqual(subtask(s), s)
def test_task_argument_can_be_task_cls(self):
s = subtask(MockTask, (2, 2))
self.assertEqual(s.task, MockTask.name)
def test_apply_async(self):
s = MockTask.subtask((2, 2), {"cache": True},
{"routing_key": "CPU-bound"})
args, kwargs, options = s.apply_async()
self.assertTupleEqual(args, (2, 2))
self.assertDictEqual(kwargs, {"cache": True})
self.assertDictEqual(options, {"routing_key": "CPU-bound"})
def test_delay_argmerge(self):
s = MockTask.subtask((2, ), {"cache": True},
{"routing_key": "CPU-bound"})
args, kwargs, options = s.delay(10, cache=False, other="foo")
self.assertTupleEqual(args, (10, 2))
self.assertDictEqual(kwargs, {"cache": False, "other": "foo"})
self.assertDictEqual(options, {"routing_key": "CPU-bound"})
def test_apply_async_argmerge(self):
s = MockTask.subtask((2, ), {"cache": True},
{"routing_key": "CPU-bound"})
args, kwargs, options = s.apply_async((10, ),
{"cache": False, "other": "foo"},
routing_key="IO-bound",
exchange="fast")
self.assertTupleEqual(args, (10, 2))
self.assertDictEqual(kwargs, {"cache": False, "other": "foo"})
self.assertDictEqual(options, {"routing_key": "IO-bound",
"exchange": "fast"})
def test_apply_argmerge(self):
s = MockTask.subtask((2, ), {"cache": True},
{"routing_key": "CPU-bound"})
args, kwargs, options = s.apply((10, ),
{"cache": False, "other": "foo"},
routing_key="IO-bound",
exchange="fast")
self.assertTupleEqual(args, (10, 2))
self.assertDictEqual(kwargs, {"cache": False, "other": "foo"})
self.assertDictEqual(options, {"routing_key": "IO-bound",
"exchange": "fast"})
def test_is_JSON_serializable(self):
s = MockTask.subtask((2, ), {"cache": True},
{"routing_key": "CPU-bound"})
s.args = list(s.args) # tuples are not preserved
# but this doesn't matter.
self.assertEqual(s,
subtask(simplejson.loads(simplejson.dumps(s))))
class test_TaskSet(unittest.TestCase):
def test_interface__compat(self):
def with_catch_warnings(log):
ts = TaskSet(MockTask, [[(2, 2)], [(4, 4)], [(8, 8)]])
self.assertTrue(log)
self.assertIn("Using this invocation of TaskSet is deprecated",
log[0].message.args[0])
self.assertListEqual(ts.tasks,
[MockTask.subtask((i, i))
for i in (2, 4, 8)])
return ts
context = catch_warnings(record=True)
execute_context(context, with_catch_warnings)
# TaskSet.task (deprecated)
def with_catch_warnings2(log):
ts = TaskSet(MockTask, [[(2, 2)], [(4, 4)], [(8, 8)]])
self.assertEqual(ts.task, MockTask)
self.assertTrue(log)
self.assertIn("TaskSet.task is deprecated",
log[0].message.args[0])
execute_context(catch_warnings(record=True), with_catch_warnings2)
# TaskSet.task_name (deprecated)
def with_catch_warnings3(log):
ts = TaskSet(MockTask, [[(2, 2)], [(4, 4)], [(8, 8)]])
self.assertEqual(ts.task_name, MockTask.name)
self.assertTrue(log)
self.assertIn("TaskSet.task_name is deprecated",
log[0].message.args[0])
execute_context(catch_warnings(record=True), with_catch_warnings3)
def test_task_arg_can_be_iterable__compat(self):
ts = TaskSet([MockTask.subtask((i, i))
for i in (2, 4, 8)])
self.assertEqual(len(ts), 3)
def test_respects_ALWAYS_EAGER(self):
app = app_or_default()
class MockTaskSet(TaskSet):
applied = 0
def apply(self, *args, **kwargs):
self.applied += 1
ts = MockTaskSet([MockTask.subtask((i, i))
for i in (2, 4, 8)])
app.conf.CELERY_ALWAYS_EAGER = True
try:
ts.apply_async()
finally:
app.conf.CELERY_ALWAYS_EAGER = False
self.assertEqual(ts.applied, 1)
def test_apply_async(self):
applied = [0]
class mocksubtask(subtask):
def apply_async(self, *args, **kwargs):
applied[0] += 1
ts = TaskSet([mocksubtask(MockTask, (i, i))
for i in (2, 4, 8)])
ts.apply_async()
self.assertEqual(applied[0], 3)
def test_apply(self):
applied = [0]
class mocksubtask(subtask):
def apply(self, *args, **kwargs):
applied[0] += 1
ts = TaskSet([mocksubtask(MockTask, (i, i))
for i in (2, 4, 8)])
ts.apply()
self.assertEqual(applied[0], 3)
|
the-stack_106_22722 | #!/usr/bin/env python3
import argparse
try:
import Tkinter
except ImportError:
import tkinter as Tkinter
import yaml
import os
import subprocess
import re
import time
import threading
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--configpath",
type=str,
default=os.path.join(os.path.dirname(os.path.realpath(__file__)), "../config/"),
help="Path to the configuration *.yaml files")
parser.add_argument(
"--stm32Fw",
type=str,
default=os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../../../crazyflie-firmware/cf2.bin"),
help="Path to cf2.bin")
parser.add_argument(
"--nrf51Fw",
type=str,
default=os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../../../crazyflie2-nrf-firmware/cf2_nrf.bin"),
help="Path to cf2_nrf.bin")
args = parser.parse_args()
if not os.path.exists(os.path.join(args.configpath, "all_crazyflies.yaml")) or \
not os.path.exists(os.path.join(args.configpath, "crazyflie_types.yaml")) or \
not os.path.exists(os.path.join(args.configpath, "crazyflies.yaml")):
print("ERROR: Could not find all yaml configuration files in configpath ({}).".format(args.configpath))
exit()
if not os.path.exists(args.stm32Fw):
print("WARNING: Could not find STM32 firmware ({}).".format(args.stm32Fw))
if not os.path.exists(args.nrf51Fw):
print("WARNING: Could not find NRF51 firmware ({}).".format(args.nrf51Fw))
# read a yaml file
def read_by_name(path):
with open(path, 'r') as ymlfile:
root = yaml.safe_load(ymlfile)
return root
def selected_cfs():
nodes = {name: node for name, node in allCrazyflies.items() if widgets[name].checked.get()}
return nodes
def save():
nodes = selected_cfs()
with open(os.path.join(args.configpath, "crazyflies.yaml"), 'w') as outfile:
yaml.dump(nodes, outfile)
allCrazyflies = read_by_name(os.path.join(args.configpath, "all_crazyflies.yaml"))
enabled = read_by_name(os.path.join(args.configpath, "crazyflies.yaml")).keys()
with open(os.path.join(args.configpath, "crazyflie_types.yaml"), 'r') as ymlfile:
data = yaml.safe_load(ymlfile)
cfTypes = data
# compute absolute pixel coordinates from the initial positions
positions = [node["initial_position"] for node in allCrazyflies.values()]
DOWN_DIR = [-1, 0]
RIGHT_DIR = [0, -1]
def dot(a, b):
return a[0] * b[0] + a[1] * b[1]
pixel_x = [120 * dot(pos, RIGHT_DIR) for pos in positions]
pixel_y = [120 * dot(pos, DOWN_DIR) for pos in positions]
xmin, ymin = min(pixel_x), min(pixel_y)
xmax, ymax = max(pixel_x), max(pixel_y)
# construct the main window
top = Tkinter.Tk()
top.title('Crazyflie Chooser')
# construct the frame containing the absolute-positioned checkboxes
width = xmax - xmin + 50 # account for checkbox + text width
height = ymax - ymin + 50 # account for checkbox + text height
frame = Tkinter.Frame(top, width=width, height=height)
class CFWidget(Tkinter.Frame):
def __init__(self, parent, name):
Tkinter.Frame.__init__(self, parent)
self.checked = Tkinter.BooleanVar()
checkbox = Tkinter.Checkbutton(self, variable=self.checked, command=save,
padx=0, pady=0)
checkbox.grid(row=0, column=0, sticky='E')
nameLabel = Tkinter.Label(self, text=name, padx=0, pady=0)
nameLabel.grid(row=0, column=1, sticky='W')
self.batteryLabel = Tkinter.Label(self, text="", fg="#999999", padx=0, pady=0)
self.batteryLabel.grid(row=1, column=0, columnspan=2, sticky='E')
self.versionLabel = Tkinter.Label(self, text="", fg="#999999", padx=0, pady=0)
self.versionLabel.grid(row=2, column=0, columnspan=2, sticky='E')
# construct all the checkboxes
widgets = {}
for (id, node), x, y in zip(allCrazyflies.items(), pixel_x, pixel_y):
w = CFWidget(frame, str(id))
w.place(x = x - xmin, y = y - ymin)
w.checked.set(id in enabled)
widgets[id] = w
# dragging functionality - TODO alt-drag to deselect
drag_start = None
drag_startstate = None
def minmax(a, b):
return min(a, b), max(a, b)
def mouseDown(event):
global drag_start, drag_startstate
drag_start = (event.x_root, event.y_root)
drag_startstate = [cf.checked.get() for cf in widgets.values()]
def mouseUp(event):
save()
def drag(event, select):
x, y = event.x_root, event.y_root
dragx0, dragx1 = minmax(drag_start[0], x)
dragy0, dragy1 = minmax(drag_start[1], y)
def dragcontains(widget):
x0 = widget.winfo_rootx()
y0 = widget.winfo_rooty()
x1 = x0 + widget.winfo_width()
y1 = y0 + widget.winfo_height()
return not (x0 > dragx1 or x1 < dragx0 or y0 > dragy1 or y1 < dragy0)
# depending on interation over dicts being consistent
for initial, cf in zip(drag_startstate, widgets.values()):
if dragcontains(cf):
cf.checked.set(select)
else:
cf.checked.set(initial)
top.bind('<ButtonPress-1>', mouseDown)
top.bind('<ButtonPress-3>', mouseDown)
top.bind('<B1-Motion>', lambda event: drag(event, True))
top.bind('<B3-Motion>', lambda event: drag(event, False))
top.bind('<ButtonRelease-1>', mouseUp)
top.bind('<ButtonRelease-3>', mouseUp)
# buttons for clearing/filling all checkboxes
def clear():
for box in widgets.values():
box.checked.set(False)
save()
def fill():
for box in widgets.values():
box.checked.set(True)
save()
def mkbutton(parent, name, command):
button = Tkinter.Button(parent, text=name, command=command)
button.pack(side='left')
buttons = Tkinter.Frame(top)
mkbutton(buttons, "Clear", clear)
mkbutton(buttons, "Fill", fill)
# construct bottom buttons for utility scripts
def sysOff():
nodes = selected_cfs()
for name, crazyflie in nodes.items():
uri = crazyflie["uri"]
subprocess.call(["ros2 run crazyswarm2 reboot --uri " + uri + " --mode sysoff"], shell=True)
def reboot():
nodes = selected_cfs()
for name, crazyflie in nodes.items():
uri = crazyflie["uri"]
print(name)
subprocess.call(["ros2 run crazyswarm2 reboot --uri " + uri], shell=True)
def flashSTM():
nodes = selected_cfs()
for name, crazyflie in nodes.items():
uri = crazyflie["uri"]
print("Flash STM32 FW to {}".format(uri))
subprocess.call(["ros2 run crazyswarm2 flash --uri " + uri + " --target stm32 --filename " + args.stm32Fw], shell=True)
def flashNRF():
nodes = selected_cfs()
for name, crazyflie in nodes.items():
uri = crazyflie["uri"]
print("Flash NRF51 FW to {}".format(uri))
subprocess.call(["ros2 run crazyswarm2 flash --uri " + uri + " --target nrf51 --filename " + args.nrf51Fw], shell=True)
def checkBattery():
# reset color
for id, w in widgets.items():
w.batteryLabel.config(foreground='#999999')
# query each CF
nodes = selected_cfs()
for name, crazyflie in nodes.items():
uri = crazyflie["uri"]
cfType = crazyflie["type"]
bigQuad = cfTypes[cfType]["big_quad"]
try:
if not bigQuad:
voltage = subprocess.check_output(["ros2 run crazyswarm2 battery --uri " + uri], shell=True)
else:
voltage = subprocess.check_output(["ros2 run crazyswarm2 battery --uri " + uri + " --external 1"], shell=True)
except subprocess.CalledProcessError:
voltage = None # CF not available
color = '#000000'
if voltage is not None:
voltage = float(voltage)
if voltage < cfTypes[cfType]["battery"]["voltage_warning"]:
color = '#FF8800'
if voltage < cfTypes[cfType]["battery"]["voltage_critical"]:
color = '#FF0000'
widgetText = "{:.2f} v".format(voltage)
else:
widgetText = "Err"
widgets[name].batteryLabel.config(foreground=color, text=widgetText)
# def checkVersion():
# for id, w in widgets.items():
# w.versionLabel.config(foreground='#999999')
# proc = subprocess.Popen(
# ['python3', SCRIPTDIR + 'version.py'], stdout=subprocess.PIPE)
# versions = dict()
# versionsCount = dict()
# versionForMost = None
# versionForMostCount = 0
# for line in iter(proc.stdout.readline, ''):
# print(line)
# match = re.search("(\d+): ([0-9a-fA-F]+),(\d),([0-9a-fA-F]+)", line)
# if match:
# addr = int(match.group(1))
# v1 = match.group(2)
# modified = int(match.group(3)) == 1
# v2 = match.group(4)
# v = (v1,v2)
# versions[addr] = v
# if v in versionsCount:
# versionsCount[v] += 1
# else:
# versionsCount[v] = 1
# if versionsCount[v] > versionForMostCount:
# versionForMostCount = versionsCount[v]
# versionForMost = v
# for addr, v in versions.items():
# color = '#000000'
# if v != versionForMost:
# color = '#FF0000'
# widgets[addr].versionLabel.config(foreground=color, text=str(v[0])[0:3] + "," + str(v[1])[0:3])
scriptButtons = Tkinter.Frame(top)
mkbutton(scriptButtons, "battery", checkBattery)
# currently not supported
# mkbutton(scriptButtons, "version", checkVersion)
mkbutton(scriptButtons, "sysOff", sysOff)
mkbutton(scriptButtons, "reboot", reboot)
# mkbutton(scriptButtons, "flash (STM)", flashSTM)
# mkbutton(scriptButtons, "flash (NRF)", flashNRF)
# start background threads
def checkBatteryLoop():
while True:
# rely on GIL
checkBattery()
time.sleep(10.0) # seconds
# checkBatteryThread = threading.Thread(target=checkBatteryLoop)
# checkBatteryThread.daemon = True # so it exits when the main thread exit
# checkBatteryThread.start()
# place the widgets in the window and start
buttons.pack()
frame.pack(padx=10, pady=10)
scriptButtons.pack()
top.mainloop()
|
the-stack_106_22725 | import logging
logger = logging.getLogger("pyfb")
logger.setLevel(logging.DEBUG)
from django.core.urlresolvers import reverse
from django.http import *
from django.shortcuts import *
from django.template.context import RequestContext
from pyfb import auth
import config
auth_denied_template = config.get("PYFB_AUTH_DENIED_TEMPLATE")
fb_base_url = config.get("PYFB_FB_BASE_URL") # base url where app is hosted
def auth_redirect(request):
''' After logging in user, FB redirects a GET request back to our app.
It includes a 'code' parameter that is an auth_token that can be exchanged for an
access_token but it seems like the same parameter appears on every subsequent
GET to the canvas page, so it seems like it can be just ignored at this point.
Just log whether the user authorized our app and then redirect back to the canvas
page.
'''
logger.info("auth_redirect")
context = RequestContext(request)
if request.method != "GET":
logger.error("Auth redirect callback with method %s, expected GET" % request.method)
return HttpResponseNotAllowed(["GET",])
if request.GET.has_key("error_reason"):
# user did not authorize our app
error_reason = request.GET["error_reason"]
error = error_desc = None
error = request.GET.get("error")
error_desc = request.GET.get("error_description")
msg = "User did not authorize the application. "
msg += "error_reason='%s'," % error_reason
msg += " error='%s'," % error
msg += " error_desc=%s" % error_desc
logger.info(msg)
return render_to_response(auth_denied_template, context)
elif request.GET.has_key("code"):
''' a auth token was included, means that the app was authorized. '''
logger.info("A user successfully authorized the application.")
# at this point we have a code that can be traded in for an access token. no need to do this yet, since we'll get the
# same code the next time the canvas page loads, so just redirect.
return HttpResponseRedirect(fb_base_url)
else:
# unknown scenario.
raise Exception("Bad request to auth_rediect:")
|
the-stack_106_22726 | # Copyright 2018 Datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ipaddress
import json
from itertools import chain
from subprocess import CalledProcessError
from typing import List
from telepresence.proxy import RemoteInfo
from telepresence.runner import Runner
from telepresence.utilities import random_name
def _is_subnet_of(a: ipaddress.IPv4Network, b: ipaddress.IPv4Network) -> bool:
try:
return (
b.network_address <= a.network_address
and b.broadcast_address >= a.broadcast_address
)
except AttributeError:
raise TypeError(
"Unable to test subnet containment " +
"between {} and {}".format(a, b)
)
def covering_cidrs(ips: List[str]) -> List[str]:
"""
Given list of IPs, return a list of CIDRs that covers them all.
IPs that belong to a private network are presumed as a /24 and won't be
expanded into a global network. IPs that belong to a global network are
presumed as a /31.
"""
def collapse(ns):
return list(ipaddress.collapse_addresses(ns))
assert len(ips) > 0
ip_addresses = map(ipaddress.IPv4Address, ips)
networks = collapse([
ipaddress.IPv4Interface(str(ip) +
("/31" if ip.is_global else "/24")).network
for ip in ip_addresses
])
# Increase network size until it combines everything:
results = []
while len(networks) > 1:
network = networks[0]
rest = networks[1:]
supernet = network.supernet()
while not supernet.is_global:
collapsed_networks = collapse([supernet] + rest)
# Keep the supernet if it did collapse networks together.
if len(collapsed_networks) <= len(rest):
network = supernet
rest = [n for n in rest if not _is_subnet_of(n, network)]
supernet = supernet.supernet()
networks = rest
results.append(network)
return [n.with_prefixlen for n in chain(results, networks)]
# Script to dump resolved IPs to stdout as JSON list:
_GET_IPS_PY = """
import socket, sys, json
result = []
for host in sys.argv[1:]:
host_ips = []
for x in socket.getaddrinfo(host, None):
if x[:2] == (socket.AF_INET, socket.SOCK_STREAM):
host_ips.append(x[4][0])
result.append(host_ips)
sys.stdout.write(json.dumps(result))
sys.stdout.flush()
"""
def get_proxy_cidrs(
runner: Runner,
remote_info: RemoteInfo,
hosts_or_ips: List[str] = []
) -> List[str]:
"""
Figure out which IP ranges to route via sshuttle.
1. Given the IP address of a service, figure out IP ranges used by
Kubernetes services.
2. Extract pod ranges from API.
3. Any hostnames/IPs given by the user using --also-proxy.
See https://github.com/kubernetes/kubernetes/issues/25533 for eventual
long-term solution for service CIDR.
"""
span = runner.span()
# Run script to convert --also-proxy hostnames to IPs, doing name
# resolution inside Kubernetes, so we get cloud-local IP addresses for
# cloud resources:
result = set(k8s_resolve(runner, remote_info, hosts_or_ips))
context_cache = runner.cache.child(runner.kubectl.context)
result.update(context_cache.lookup("podCIDRs", lambda: podCIDRs(runner)))
result.update(
context_cache.lookup("serviceCIDRs", lambda: serviceCIDR(runner))
)
span.end()
return list(result)
def k8s_resolve(
runner: Runner, remote_info: RemoteInfo, hosts_or_ips: List[str]
) -> List[str]:
"""
Resolve a list of host and/or ip addresses inside the cluster
using the context, namespace, and remote_info supplied. Note that
if any hostname fails to resolve this will fail Telepresence.
"""
# Separate hostnames from IPs and IP ranges
hostnames = []
ip_ranges = []
ipcache = runner.cache.child(runner.kubectl.context).child("ip-list")
for proxy_target in hosts_or_ips:
try:
addr = ipaddress.ip_network(proxy_target)
except ValueError:
pass
else:
ip_ranges.append(str(addr))
continue
if proxy_target in ipcache:
ip_ranges += ipcache[proxy_target]
continue
hostnames.append(proxy_target)
if hostnames:
try:
hostname_ips = json.loads(
runner.get_output(
runner.kubectl(
"exec", "--container=" + remote_info.container_name,
remote_info.pod_name, "--", "python3", "-c",
_GET_IPS_PY, *hostnames
)
)
)
except CalledProcessError as e:
runner.write(str(e))
raise runner.fail(
"We failed to do a DNS lookup inside Kubernetes for the "
"hostname(s) you listed in "
"--also-proxy ({}). Maybe you mistyped one of them?".format(
", ".join(hosts_or_ips)
)
)
else:
hostname_ips = []
resolved_ips = [] # type: List[str]
for host, ips in zip(hostnames, hostname_ips):
ipcache[host] = ips
resolved_ips += ips
return resolved_ips + ip_ranges
def podCIDRs(runner: Runner):
"""
Get pod IPs from nodes if possible, otherwise use pod IPs as heuristic:
"""
cidrs = set()
try:
nodes = json.loads(
runner.get_output(runner.kubectl("get", "nodes", "-o", "json"))
)["items"]
except CalledProcessError as e:
runner.write("Failed to get nodes: {}".format(e))
else:
for node in nodes:
pod_cidr = node["spec"].get("podCIDR")
if pod_cidr is not None:
cidrs.add(pod_cidr)
if len(cidrs) == 0:
# Fallback to using pod IPs:
try:
pod_data = runner.get_output(
runner.kubectl(
"get", "pods", "--all-namespaces", "-o", "json"
)
)
except CalledProcessError as e:
runner.write("Failed to get pods for all namespaces: {}".format(e))
pod_data = runner.get_output(
runner.kubectl("get", "pods", "-o", "json")
)
pods = json.loads(pod_data)["items"]
pod_ips = []
for pod in pods:
try:
pod_ips.append(pod["status"]["podIP"])
except KeyError:
# Apparently a problem on OpenShift
pass
if pod_ips:
cidrs.update(covering_cidrs(pod_ips))
return list(cidrs)
def serviceCIDR(runner: Runner) -> List[str]:
"""
Get cluster service IP range.
"""
serviceCIDR = cluster_serviceCIDR(runner)
if not serviceCIDR:
return guess_serviceCIDR(runner)
return serviceCIDR
def cluster_serviceCIDR(runner: Runner) -> List[str]:
"""
Get cluster service IP range from apiserver.
"""
try:
pods = json.loads(
runner.get_output(
runner.kubectl(
"get", "pods", "-n", "kube-system", "-o", "json"
)
)
)["items"]
except CalledProcessError:
return []
for pod_data in pods:
for container in pod_data["spec"]["containers"]:
if container["name"] != "kube-apiserver":
continue
for param in container["command"]:
if param.startswith("--service-cluster-ip-range="):
return [param.split("=", 1)[1]]
return []
return []
def guess_serviceCIDR(runner: Runner) -> List[str]:
"""
Get service IP range, based on heuristic of constructing CIDR from
existing Service IPs. We create more services if there are less
than 8, to ensure some coverage of the IP range.
"""
def get_service_ips():
services = json.loads(
runner.get_output(runner.kubectl("get", "services", "-o", "json"))
)["items"]
# FIXME: Add test(s) here so we don't crash on, e.g., ExternalName
return [
svc["spec"]["clusterIP"] for svc in services
if svc["spec"].get("clusterIP", "None") != "None"
]
service_ips = get_service_ips()
new_services = [] # type: List[str]
# Ensure we have at least 8 ClusterIP Services:
while len(service_ips) + len(new_services) < 8:
new_service = random_name()
runner.check_call(
runner.kubectl(
"create", "service", "clusterip", new_service, "--tcp=3000"
)
)
new_services.append(new_service)
if new_services:
service_ips = get_service_ips()
# Store Service CIDR:
service_cidrs = covering_cidrs(service_ips)
# Delete new services:
for new_service in new_services:
runner.check_call(runner.kubectl("delete", "service", new_service))
if runner.chatty:
runner.show(
"Guessing that Services IP range is {}. Services started after"
" this point will be inaccessible if are outside this range;"
" restart telepresence if you can't access a "
"new Service.\n".format(service_cidrs)
)
return service_cidrs
|
the-stack_106_22727 | # coding=utf-8
# Copyright 2018 Sascha Schirra
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" A ND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from ctypes import *
from ropper.loaders.loader import *
from ropper.common.error import LoaderError
from ropper.common.error import RopperError
from ropper.arch import Endianess
import filebytes.elf as elf
import os
class ELF(Loader):
def __init__(self, filename, bytes=None, arch=None):
self.__execSections = None
self.__dataSections = None
super(ELF, self).__init__(filename, bytes, arch)
@property
def entryPoint(self):
return self._binary.entryPoint
def _getImageBase(self):
return self._binary.imageBase
def _loadDefaultArch(self):
try:
machine = elf.EM[self._binary.elfHeader.header.e_machine]
cls = elf.ELFCLASS[self._binary.elfHeader.header.e_ident[elf.EI.CLASS]]
end = self._binary._bytes[elf.EI.DATA]
return getArch( (machine,cls, end ),self._binary.elfHeader.header.e_entry)
except BaseException as e:
raise RopperError(e)
@property
def executableSections(self):
if not self.__execSections:
self.__execSections = []
for phdr in self._binary.segments:
if phdr.header.p_flags & elf.PF.EXEC > 0:
self.__execSections.append(Section(name=str(elf.PT[phdr.header.p_type]), sectionbytes=phdr.raw, virtualAddress=phdr.header.p_vaddr, offset=phdr.header.p_offset))
return self.__execSections
@property
def dataSections(self):
if not self.__dataSections:
self.__dataSections = []
for shdr in self._binary.sections:
if shdr.header.sh_flags & elf.SHF.ALLOC and not (shdr.header.sh_flags & elf.SHF.EXECINSTR) and not(shdr.header.sh_type & elf.SHT.NOBITS):
self.__dataSections.append(Section(shdr.name, shdr.raw, shdr.header.sh_addr, shdr.header.sh_offset, shdr.header))
return self.__dataSections
@property
def codeVA(self):
for phdr in self.phdrs:
if phdr.header.p_type == PT.INTERP:
return phdr.header.p_vaddr
return 0
@property
def type(self):
return Type.ELF
def setASLR(self, enable):
raise LoaderError('Not available for elf files')
def setNX(self, enable):
perm = elf.PF.READ | elf.PF.WRITE if enable else elf.PF.READ | elf.PF.WRITE | elf.PF.EXEC
phdrs = self._binary.segments
for phdr in phdrs:
if phdr.header.p_type == elf.PT.GNU_STACK:
phdr.header.p_flags = perm
self.save()
def getSection(self, name):
for shdr in self._binary.sections:
if shdr.name == name:
return Section(shdr.name, shdr.raw, shdr.header.sh_addr, shdr.header.sh_addr - self._binary.imageBase)
raise RopperError('No such section: %s' % name)
def checksec(self):
return {}
def _loadFile(self, fileName, bytes=None):
return elf.ELF(fileName, bytes)
@classmethod
def isSupportedFile(cls, fileName, bytes=None):
if bytes:
return elf.ELF.isSupportedContent(bytes)
return elf.ELF.isSupportedFile(fileName)
def getArch(*params):
arch = ARCH[params[0]]
if arch==ARM and (params[1] & 1) == 1:
return ARMTHUMB
return arch
ARCH = {(elf.EM.INTEL_386 , elf.ELFCLASS.BITS_32, elf.ELFDATA.LSB): x86,
(elf.EM.INTEL_80860, elf.ELFCLASS.BITS_32, elf.ELFDATA.LSB): x86,
(elf.EM.IA_64, elf.ELFCLASS.BITS_64, elf.ELFDATA.LSB): x86_64,
(elf.EM.X86_64, elf.ELFCLASS.BITS_64, elf.ELFDATA.LSB): x86_64,
(elf.EM.MIPS, elf.ELFCLASS.BITS_32, elf.ELFDATA.MSB): MIPSBE,
(elf.EM.MIPS, elf.ELFCLASS.BITS_32, elf.ELFDATA.LSB): MIPS,
(elf.EM.MIPS, elf.ELFCLASS.BITS_64, elf.ELFDATA.MSB): MIPS64BE,
(elf.EM.MIPS, elf.ELFCLASS.BITS_64, elf.ELFDATA.LSB): MIPS64,
(elf.EM.ARM, elf.ELFCLASS.BITS_32, elf.ELFDATA.MSB) : ARMBE,
(elf.EM.ARM, elf.ELFCLASS.BITS_32, elf.ELFDATA.LSB) : ARM,
(elf.EM.ARM64, elf.ELFCLASS.BITS_64, elf.ELFDATA.LSB) : ARM64,
(elf.EM.PPC, elf.ELFCLASS.BITS_32, elf.ELFDATA.MSB) : PPC,
(elf.EM.PPC, elf.ELFCLASS.BITS_64, elf.ELFDATA.MSB) : PPC64}
|
the-stack_106_22729 | """Structural data from FreeSurfer output."""
from itertools import product
import os
from ..utils import run_fs
def get_structural_data(subjects_dir, subject, out_dir):
"""Extract structural data.
The data includes cortical sickness, cortical surface area,
and subcortical volumes from FreeSurfer processing output.
Parameters
----------
subjects_dir : str
A directory where subjects data are stored.
subject : str
The subject, for which data should be extracted.
out_dir : str
The output directory.
"""
out_files = get_cortex_data(subjects_dir, subject, out_dir)
out_files["aseg" + "_file"] = get_volumes_data(subjects_dir, subject,
out_dir)
return out_files
def get_volumes_data(subjects_dir, subject, out_dir):
"""Extract sub-cortical volumes information from aseg files.
Parameters
----------
subjects_dir : str
A directory where subjects data are stored.
subject : str
The subject, for which data should be extracted.
out_dir : str
The output directory.
Returns
-------
out_file : str
The path to generated data.
"""
subject_dir = os.path.join(out_dir, subject)
if not os.path.isdir(subject_dir):
os.makedirs(subject_dir)
out_file = os.path.join(subject_dir, 'aseg.csv')
cmd = 'python2 $FREESURFER_HOME/bin/asegstats2table'\
f' --subjects {subject} --tablefile {out_file}'\
' -d comma --meas volume'
print(subject, out_file)
print(cmd)
run_fs(cmd, env={'SUBJECTS_DIR': subjects_dir})
return out_file
def get_cortex_data(subjects_dir, subject, out_dir):
"""Extract cortical thickness and surface area.
Parameters
----------
subjects_dir : str
A directory where subjects data are stored.
subject : str
The subject, for which data should be extracted.
out_dir : str
The output directory.
Returns
-------
out_files : dict
A dictionary with the paths to generated data.
"""
out_files = {}
# surfmeasure
meas = ('thickness', 'area')
hemi = ('lh', 'rh')
for h, m in product(hemi, meas):
subject_dir = os.path.join(out_dir, subject)
if not os.path.isdir(subject_dir):
os.makedirs(subject_dir)
out_file = os.path.join(subject_dir, f'{h}.{m}.mgh')
out_files[h + '_' + m + '_file'] = out_file
cmd = f'mris_preproc --s {subject} --target fsaverage --hemi {h} '\
f'--meas {m} --out {out_file}'
run_fs(cmd, env={'SUBJECTS_DIR': subjects_dir})
return out_files
|
the-stack_106_22733 | import wx
from book_manager.bookpanel import BookPanel
class BookFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1, style=wx.MINIMIZE_BOX | wx.CAPTION | wx.CLOSE_BOX)
self.__width, self.__height = 555, 460
screen_size = wx.DisplaySize()
self.SetPosition(((screen_size[0] - self.__width) / 2, (screen_size[1] - self.__height) / 2))
self.SetTitle('Leida团队图书管理系统')
self.SetIcon(wx.Icon('bgi.ico', wx.BITMAP_TYPE_ICO))
BookPanel(self)
self.SetSize(self.__width, self.__height)
if __name__ == '__main__':
app = wx.App()
BookFrame().Show(True)
app.MainLoop()
|
the-stack_106_22735 |
import json
import os
from functools import partial
from PySide2 import QtWidgets, QtCore
import hotbox_designer
from hotbox_designer.commands import OPEN_COMMAND, CLOSE_COMMAND, SWITCH_COMMAND
from hotbox_designer.reader import HotboxReader
from hotbox_designer.designer.application import HotboxEditor
from hotbox_designer.applications import Nuke, Maya, Houdini
from hotbox_designer.widgets import BoolCombo, Title, CommandButton
from hotbox_designer.qtutils import icon
from hotbox_designer.dialog import (
import_hotbox, export_hotbox, import_hotbox_link, CreateHotboxDialog,
CommandDisplayDialog, HotkeySetter, warning)
from hotbox_designer.data import (
get_valid_name, TRIGGERING_TYPES, save_datas, load_hotboxes_datas,
hotbox_data_to_html, load_json, ensure_old_data_compatible)
hotboxes = {}
hotbox_manager = None
APPLICATIONS = {'maya': Maya, 'nuke': Nuke, 'houdini': Houdini}
def launch_manager(application):
global hotbox_manager
if hotbox_manager is None:
hotbox_manager = HotboxManager(APPLICATIONS[application]())
hotbox_manager.show()
def initialize(application):
if hotboxes:
return
load_hotboxes(application)
def load_hotboxes(application):
hotboxes_datas = load_hotboxes_datas(application.local_file)
file_ = application.shared_file
hotboxes_datas += [
ensure_old_data_compatible(load_json(f)) for f in load_json(file_)]
for hotboxes_data in hotboxes_datas:
name = hotboxes_data['general']['name']
reader = HotboxReader(hotboxes_data, parent=None)
reader.hideSubmenusRequested.connect(hide_submenus)
hotboxes[name] = reader
def clear_loaded_hotboxes():
global hotboxes
hotboxes = {}
def show(name):
hotboxes[name].show()
def hide(name):
hotboxes[name].hide()
def switch(name):
if hotboxes[name].isVisible():
return hide(name)
return show(name)
def hide_submenus():
for name in hotboxes:
if hotboxes[name].is_submenu:
hide(name)
class HotboxManager(QtWidgets.QWidget):
def __init__(self, application):
parent = application.main_window
super(HotboxManager, self).__init__(parent, QtCore.Qt.Tool)
self.setWindowTitle('Hotbox Designer')
self.application = application
self.hotbox_designer = None
hotboxes_data = load_hotboxes_datas(self.application.local_file)
self.personnal_model = HotboxPersonalTableModel(hotboxes_data)
self.personnal_view = HotboxTableView()
self.personnal_view.set_model(self.personnal_model)
method = self._personnal_selected_row_changed
self.personnal_view.selectedRowChanged.connect(method)
self.toolbar = HotboxManagerToolbar()
self.toolbar.link.setEnabled(False)
self.toolbar.unlink.setEnabled(False)
self.toolbar.newRequested.connect(self._call_create)
self.toolbar.linkRequested.connect(self._call_add_link)
self.toolbar.unlinkRequested.connect(self._call_unlink)
self.toolbar.editRequested.connect(self._call_edit)
self.toolbar.deleteRequested.connect(self._call_remove)
self.toolbar.importRequested.connect(self._call_import)
self.toolbar.exportRequested.connect(self._call_export)
self.toolbar.setHotkeyRequested.connect(self._call_set_hotkey)
setter_enabled = bool(application.available_set_hotkey_modes)
self.toolbar.hotkeyset.setEnabled(setter_enabled)
self.edit = HotboxGeneralSettingWidget()
self.edit.optionSet.connect(self._call_option_set)
self.edit.setEnabled(False)
self.edit.open_command.released.connect(self._call_open_command)
method = self._call_play_open_command
self.edit.open_command.playReleased.connect(method)
self.edit.close_command.released.connect(self._call_close_command)
method = self._call_play_close_command
self.edit.close_command.playReleased.connect(method)
self.edit.switch_command.released.connect(self._call_switch_command)
method = self._call_play_switch_command
self.edit.switch_command.playReleased.connect(method)
self.personnal = QtWidgets.QWidget()
self.hlayout = QtWidgets.QHBoxLayout(self.personnal)
self.hlayout.setContentsMargins(8, 0, 8, 8)
self.hlayout.setSpacing(4)
self.hlayout.addWidget(self.personnal_view)
self.hlayout.addWidget(self.edit)
links = load_json(application.shared_file, default=[])
self.shared_model = HotboxSharedTableModel(links)
self.shared_view = HotboxTableView()
self.shared_view.set_model(self.shared_model)
method = self._shared_selected_row_changed
self.shared_view.selectedRowChanged.connect(method)
self.infos = HotboxGeneralInfosWidget()
self.infos.setEnabled(False)
self.infos.open_command.released.connect(self._call_open_command)
method = self._call_play_open_command
self.infos.open_command.playReleased.connect(method)
self.infos.close_command.released.connect(self._call_close_command)
method = self._call_play_close_command
self.infos.close_command.playReleased.connect(method)
self.infos.switch_command.released.connect(self._call_switch_command)
method = self._call_play_switch_command
self.infos.switch_command.playReleased.connect(method)
self.shared = QtWidgets.QWidget()
self.hlayout2 = QtWidgets.QHBoxLayout(self.shared)
self.hlayout2.setContentsMargins(8, 0, 8, 8)
self.hlayout2.setSpacing(4)
self.hlayout2.addWidget(self.shared_view)
self.hlayout2.addWidget(self.infos)
self.tabwidget = QtWidgets.QTabWidget()
self.tabwidget.addTab(self.personnal, "Personal")
self.tabwidget.addTab(self.shared, "Shared")
self.tabwidget.currentChanged.connect(self.tab_index_changed)
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.setSpacing(0)
self.layout.addWidget(self.toolbar)
self.layout.addWidget(self.tabwidget)
def get_selected_hotbox(self):
index = self.tabwidget.currentIndex()
table = self.shared_view if index else self.personnal_view
model = self.shared_model if index else self.personnal_model
row = table.get_selected_row()
if row is None:
return
return model.hotboxes[row]
def save_hotboxes(self, *_):
save_datas(self.application.local_file, self.personnal_model.hotboxes)
datas = self.shared_model.hotboxes_links
save_datas(self.application.shared_file, datas)
def _personnal_selected_row_changed(self):
hotbox = self.get_selected_hotbox()
if hotbox is not None:
self.edit.set_hotbox_settings(hotbox['general'])
self.edit.setEnabled(True)
else:
self.edit.setEnabled(False)
def tab_index_changed(self):
index = self.tabwidget.currentIndex()
self.toolbar.edit.setEnabled(index == 0)
self.toolbar.delete.setEnabled(index == 0)
self.toolbar.link.setEnabled(index == 1)
self.toolbar.unlink.setEnabled(index == 1)
def hotbox_data_modified(self, hotbox_data):
row = self.personnal_view.get_selected_row()
self.personnal_model.set_hotbox(row, hotbox_data)
clear_loaded_hotboxes()
self.save_hotboxes()
def _shared_selected_row_changed(self):
index = self.shared_view.get_selected_row()
hotbox = self.shared_model.hotboxes[index]
if hotbox is not None:
self.infos.set_hotbox_data(hotbox)
self.infos.setEnabled(True)
else:
self.infos.setEnabled(False)
def _get_open_command(self):
hotbox = self.get_selected_hotbox()
if not hotbox:
return warning('Hotbox designer', 'No hotbox selected')
return OPEN_COMMAND.format(
application=self.application.name,
name=hotbox['general']['name'])
def _call_open_command(self):
CommandDisplayDialog(self._get_open_command(), self).exec_()
def _call_play_open_command(self):
exec(self._get_open_command())
def _get_close_command(self):
hotbox = self.get_selected_hotbox()
if not hotbox:
return warning('Hotbox designer', 'No hotbox selected')
return CLOSE_COMMAND.format(name=hotbox['general']['name'])
def _call_close_command(self):
CommandDisplayDialog(self._get_close_command(), self).exec_()
def _call_play_close_command(self):
exec(self._get_close_command())
def _get_switch_command(self):
hotbox = self.get_selected_hotbox()
if not hotbox:
return warning('Hotbox designer', 'No hotbox selected')
return SWITCH_COMMAND.format(
application=self.application.name,
name=hotbox['general']['name'])
def _call_switch_command(self):
CommandDisplayDialog(self._get_switch_command(), self).exec_()
def _call_play_switch_command(self):
exec(self._get_switch_command())
def _call_edit(self):
if self.tabwidget.currentIndex():
return
hotbox_data = self.get_selected_hotbox()
if hotbox_data is None:
return warning('Hotbox designer', 'No hotbox selected')
if self.hotbox_designer is not None:
self.hotbox_designer.close()
self.hotbox_designer = HotboxEditor(
hotbox_data,
self.application,
parent=self.application.main_window)
method = self.hotbox_data_modified
self.hotbox_designer.hotboxDataModified.connect(method)
self.hotbox_designer.show()
def _call_create(self):
hotboxes_ = self.personnal_model.hotboxes + self.shared_model.hotboxes
dialog = CreateHotboxDialog(hotboxes_, self)
result = dialog.exec_()
if result == QtWidgets.QDialog.Rejected:
return
self.personnal_model.layoutAboutToBeChanged.emit()
self.personnal_model.hotboxes.append(dialog.hotbox())
self.personnal_model.layoutChanged.emit()
# retrieve and selected last hotbox in the list (who's the new one)
hotbox_count = len(self.personnal_model.hotboxes) - 1
if hotbox_count > -1:
self.personnal_view.selectRow(hotbox_count)
self.save_hotboxes()
clear_loaded_hotboxes()
def _call_add_link(self):
filename = import_hotbox_link()
if not filename:
return
self.shared_model.add_link(filename)
# retrieve and selected last hotbox in the list (who's the new one)
hotbox_count = len(self.shared_model.hotboxes) - 1
if hotbox_count > -1:
self.shared_view.selectRow(hotbox_count)
self.save_hotboxes()
clear_loaded_hotboxes()
def _call_unlink(self):
index = self.shared_view.get_selected_row()
if index is None:
return warning('Hotbox designer', 'No hotbox selected')
self.shared_model.remove_link(index)
self.save_hotboxes()
clear_loaded_hotboxes()
def _call_remove(self):
hotbox = self.get_selected_hotbox()
if hotbox is None:
return warning('Hotbox designer', 'No hotbox selected')
areyousure = QtWidgets.QMessageBox.question(
self,
'remove',
'remove a hotbox is definitive, are you sure to continue',
buttons=QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,
defaultButton=QtWidgets.QMessageBox.No)
if areyousure == QtWidgets.QMessageBox.No:
return
self.personnal_model.layoutAboutToBeChanged.emit()
self.personnal_model.hotboxes.remove(hotbox)
self.personnal_model.layoutChanged.emit()
self.save_hotboxes()
clear_loaded_hotboxes()
def _call_option_set(self, option, value):
self.personnal_model.layoutAboutToBeChanged.emit()
hotbox = self.get_selected_hotbox()
if option == 'name':
value = get_valid_name(self.personnal_model.hotboxes, value)
if hotbox is not None:
hotbox['general'][option] = value
self.personnal_model.layoutChanged.emit()
self.save_hotboxes()
clear_loaded_hotboxes()
def _call_set_hotkey(self):
hotbox = self.get_selected_hotbox()
if not hotbox:
return warning('Hotbox designer', 'No hotbox selected')
modes = self.application.available_set_hotkey_modes
dialog = HotkeySetter(modes)
result = dialog.exec_()
name = hotbox['general']['name']
open_cmd = OPEN_COMMAND.format(
name=name,
application=self.application.name)
switch_cmd = SWITCH_COMMAND.format(
name=name,
application=self.application.name)
if result == QtWidgets.QDialog.Rejected:
return
self.application.set_hotkey(
name=name,
mode=dialog.mode(),
sequence=dialog.get_key_sequence(),
open_cmd=open_cmd,
close_cmd=CLOSE_COMMAND.format(name=name),
switch_cmd=switch_cmd)
def _call_export(self):
hotbox = self.get_selected_hotbox()
if not hotbox:
return warning('Hotbox designer', 'No hotbox selected')
export_hotbox(hotbox)
def _call_import(self):
hotbox = import_hotbox()
if not hotbox:
return warning('Hotbox designer', 'No hotbox selected')
hotboxes = self.personnal_model.hotboxes
name = get_valid_name(hotboxes, hotbox['general']['name'])
hotbox['general']['name'] = name
self.personnal_model.layoutAboutToBeChanged.emit()
self.personnal_model.hotboxes.append(hotbox)
self.personnal_model.layoutChanged.emit()
self.save_hotboxes()
clear_loaded_hotboxes()
class HotboxManagerToolbar(QtWidgets.QToolBar):
newRequested = QtCore.Signal()
editRequested = QtCore.Signal()
deleteRequested = QtCore.Signal()
linkRequested = QtCore.Signal()
unlinkRequested = QtCore.Signal()
importRequested = QtCore.Signal()
exportRequested = QtCore.Signal()
setHotkeyRequested = QtCore.Signal()
def __init__(self, parent=None):
super(HotboxManagerToolbar, self).__init__(parent)
self.setIconSize(QtCore.QSize(16, 16))
self.new = QtWidgets.QAction(icon('manager-new.png'), '', self)
self.new.setToolTip('Create new hotbox')
self.new.triggered.connect(self.newRequested.emit)
self.edit = QtWidgets.QAction(icon('manager-edit.png'), '', self)
self.edit.setToolTip('Edit hotbox')
self.edit.triggered.connect(self.editRequested.emit)
self.delete = QtWidgets.QAction(icon('manager-delete.png'), '', self)
self.delete.setToolTip('Delete hotbox')
self.delete.triggered.connect(self.deleteRequested.emit)
self.link = QtWidgets.QAction(icon('link.png'), '', self)
self.link.setToolTip('Link to external hotbox file')
self.link.triggered.connect(self.linkRequested.emit)
self.unlink = QtWidgets.QAction(icon('unlink.png'), '', self)
self.unlink.setToolTip('Remove hotbox file link')
self.unlink.triggered.connect(self.unlinkRequested.emit)
self.import_ = QtWidgets.QAction(icon('manager-import.png'), '', self)
self.import_.setToolTip('Import hotbox')
self.import_.triggered.connect(self.importRequested.emit)
self.export = QtWidgets.QAction(icon('manager-export.png'), '', self)
self.export.setToolTip('Export hotbox')
self.export.triggered.connect(self.exportRequested.emit)
self.hotkeyset = QtWidgets.QAction(icon('touch.png'), '', self)
self.hotkeyset.setToolTip('Set hotkey')
self.hotkeyset.triggered.connect(self.setHotkeyRequested.emit)
self.addAction(self.new)
self.addAction(self.edit)
self.addAction(self.delete)
self.addSeparator()
self.addAction(self.link)
self.addAction(self.unlink)
self.addSeparator()
self.addAction(self.import_)
self.addAction(self.export)
self.addSeparator()
self.addAction(self.hotkeyset)
class HotboxTableView(QtWidgets.QTableView):
selectedRowChanged = QtCore.Signal()
def __init__(self, parent=None):
super(HotboxTableView, self).__init__(parent)
self.selection_model = None
vheader = self.verticalHeader()
vheader.hide()
vheader.setSectionResizeMode(vheader.ResizeToContents)
hheader = self.horizontalHeader()
hheader.setStretchLastSection(True)
hheader.hide()
self.setAlternatingRowColors(True)
self.setWordWrap(True)
self.setShowGrid(False)
self.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
def selection_changed(self, *_):
return self.selectedRowChanged.emit()
def set_model(self, model):
self.setModel(model)
self.selection_model = self.selectionModel()
self.selection_model.selectionChanged.connect(self.selection_changed)
def get_selected_row(self):
indexes = self.selection_model.selectedIndexes()
rows = list({index.row() for index in indexes})
if not rows:
return None
return rows[0]
class HotboxPersonalTableModel(QtCore.QAbstractTableModel):
def __init__(self, hotboxes, parent=None):
super(HotboxPersonalTableModel, self).__init__(parent=parent)
self.hotboxes = hotboxes
def columnCount(self, _):
return 1
def rowCount(self, _):
return len(self.hotboxes)
def set_hotbox(self, row, hotbox):
self.layoutAboutToBeChanged.emit()
self.hotboxes[row] = hotbox
self.layoutChanged.emit()
def data(self, index, role):
row, col = index.row(), index.column()
hotbox = self.hotboxes[row]
if role == QtCore.Qt.DisplayRole:
if col == 0:
return hotbox['general']['name']
class HotboxSharedTableModel(QtCore.QAbstractTableModel):
def __init__(self, hotboxes_links, parent=None):
super(HotboxSharedTableModel, self).__init__(parent=parent)
self.hotboxes_links = hotboxes_links
self.hotboxes = [load_json(l) for l in hotboxes_links]
def columnCount(self, _):
return 1
def rowCount(self, _):
return len(self.hotboxes_links)
def add_link(self, hotbox_link):
self.layoutAboutToBeChanged.emit()
self.hotboxes_links.append(hotbox_link)
self.hotboxes.append(load_json(hotbox_link))
self.layoutChanged.emit()
def remove_link(self, index):
self.layoutAboutToBeChanged.emit()
self.hotboxes_links.pop(index)
self.hotboxes.pop(index)
self.layoutChanged.emit()
def data(self, index, role):
row, col = index.row(), index.column()
hotbox = self.hotboxes_links[row]
if role == QtCore.Qt.DisplayRole:
if col == 0:
return hotbox
class HotboxGeneralInfosWidget(QtWidgets.QWidget):
def __init__(self, parent=None):
super(HotboxGeneralInfosWidget, self).__init__(parent)
self.setFixedWidth(200)
self.label = QtWidgets.QLabel()
self.open_command = CommandButton('show')
self.close_command = CommandButton('hide')
self.switch_command = CommandButton('switch')
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.setSpacing(0)
self.layout.addWidget(Title('Infos'))
self.layout.addSpacing(8)
self.layout.addWidget(self.label)
self.layout.addSpacing(8)
self.layout.addStretch(1)
self.layout.addWidget(Title('Commands'))
self.layout.addSpacing(8)
self.layout.addWidget(self.open_command)
self.layout.addWidget(self.close_command)
self.layout.addWidget(self.switch_command)
def set_hotbox_data(self, hotbox_data):
self.label.setText(hotbox_data_to_html(hotbox_data))
class HotboxGeneralSettingWidget(QtWidgets.QWidget):
optionSet = QtCore.Signal(str, object)
applyRequested = QtCore.Signal()
def __init__(self, parent=None):
super(HotboxGeneralSettingWidget, self).__init__(parent)
self.setFixedWidth(200)
self.name = QtWidgets.QLineEdit()
self.name.textEdited.connect(partial(self.optionSet.emit, 'name'))
self.submenu = BoolCombo(False)
self.submenu.valueSet.connect(partial(self.optionSet.emit, 'submenu'))
self.triggering = QtWidgets.QComboBox()
self.triggering.addItems(TRIGGERING_TYPES)
self.triggering.currentIndexChanged.connect(self._triggering_changed)
self.aiming = BoolCombo(False)
self.aiming.valueSet.connect(partial(self.optionSet.emit, 'aiming'))
self.leaveclose = BoolCombo(False)
method = partial(self.optionSet.emit, 'leaveclose')
self.leaveclose.valueSet.connect(method)
self.open_command = CommandButton('show')
self.close_command = CommandButton('hide')
self.switch_command = CommandButton('switch')
self.layout = QtWidgets.QFormLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.setSpacing(0)
self.layout.setHorizontalSpacing(5)
self.layout.addRow(Title('Options'))
self.layout.addItem(QtWidgets.QSpacerItem(0, 8))
self.layout.addRow('name', self.name)
self.layout.addItem(QtWidgets.QSpacerItem(0, 8))
self.layout.addRow('is submenu', self.submenu)
self.layout.addRow('triggering', self.triggering)
self.layout.addRow('aiming', self.aiming)
self.layout.addRow('close on leave', self.leaveclose)
self.layout.addItem(QtWidgets.QSpacerItem(0, 8))
self.layout.addRow(Title('Commands'))
self.layout.addItem(QtWidgets.QSpacerItem(0, 8))
self.layout.addRow(self.open_command)
self.layout.addRow(self.close_command)
self.layout.addRow(self.switch_command)
def _triggering_changed(self, _):
self.optionSet.emit('triggering', self.triggering.currentText())
def _touch_changed(self, _):
self.optionSet.emit('touch', self.touch.text())
def set_hotbox_settings(self, hotbox_settings):
self.blockSignals(True)
self.submenu.setCurrentText(str(hotbox_settings['submenu']))
self.name.setText(hotbox_settings['name'])
self.triggering.setCurrentText(hotbox_settings['triggering'])
self.aiming.setCurrentText(str(hotbox_settings['aiming']))
self.leaveclose.setCurrentText(str(hotbox_settings['leaveclose']))
self.blockSignals(False)
|
the-stack_106_22736 | import cv2,os,glob
import SimpleITK as sitk
import numpy as np
segs='/mnt/data9/independent_segs/lungs'
raw='/mnt/data9/independent_data'
out_path='/mnt/data9/independent_crop'
for type in os.listdir(segs):
for volume in os.listdir(os.path.join(segs,type)):
person=volume.split('_')[1]
stage = volume.split('_')[2]
R=sitk.ReadImage(os.path.join(raw,type,person+'_'+stage))
R=sitk.GetArrayFromImage(R)
M=sitk.ReadImage(os.path.join(segs,type,volume))
M=sitk.GetArrayFromImage(M)
for i in range(M.shape[0]):
m = M[i,:,:]
I = R[i, :, :]
I=(I+1400)/1500*255
IMG=np.stack([I,I,m*255],-1).astype(np.uint8)
#yy,xx=np.where(I>0)
#try:
# I=I[yy.min():yy.max(),xx.min():xx.max()]
#except:
# a=1
name=os.path.join(out_path,volume.split('.')[0]+'_'+str(i)+'.jpg')
cv2.imwrite(name,IMG)
a=1 |
the-stack_106_22737 | # -*- coding: utf-8 -*-
'''
Query and modify an LDAP database (alternative interface)
=========================================================
.. versionadded:: 2016.3.0
This is an alternative to the ``ldap`` interface provided by the
:py:mod:`ldapmod <salt.modules.ldapmod>` execution module.
:depends: - ``ldap`` Python module
'''
from __future__ import absolute_import
available_backends = set()
try:
import ldap
import ldap.ldapobject
import ldap.modlist
import ldap.sasl
available_backends.add('ldap')
except ImportError:
pass
import logging
import salt.ext.six as six
import sys
log = logging.getLogger(__name__)
def __virtual__():
'''Only load this module if the Python ldap module is present'''
return bool(len(available_backends))
class LDAPError(Exception):
'''Base class of all LDAP exceptions raised by backends.
This is only used for errors encountered while interacting with
the LDAP server; usage errors (e.g., invalid backend name) will
have a different type.
:ivar cause: backend exception object, if applicable
'''
def __init__(self, message, cause=None):
super(LDAPError, self).__init__(message)
self.cause = cause
def _convert_exception(e):
'''Convert an ldap backend exception to an LDAPError and raise it.'''
args = ('exception in ldap backend: {0}'.format(repr(e)), e)
if six.PY2:
six.reraise(LDAPError, args, sys.exc_info()[2])
else:
six.raise_from(LDAPError(*args), e)
def _bind(l, bind=None):
'''Bind helper.'''
if bind is None:
return
method = bind.get('method', 'simple')
if method is None:
return
elif method == 'simple':
l.simple_bind_s(bind.get('dn', ''), bind.get('password', ''))
elif method == 'sasl':
sasl_class = getattr(ldap.sasl,
bind.get('mechanism', 'EXTERNAL').lower())
creds = bind.get('credentials', None)
if creds is None:
creds = {}
auth = sasl_class(*creds.get('args', []), **creds.get('kwargs', {}))
l.sasl_interactive_bind_s(bind.get('dn', ''), auth)
else:
raise ValueError('unsupported bind method "' + method
+ '"; supported bind methods: simple sasl')
class _connect_ctx(object):
def __init__(self, c):
self.c = c
def __enter__(self):
return self
def __exit__(self, *exc):
pass
def connect(connect_spec=None):
'''Connect and optionally bind to an LDAP server.
:param connect_spec:
This can be an LDAP connection object returned by a previous
call to :py:func:`connect` (in which case the argument is
simply returned), ``None`` (in which case an empty dict is
used), or a dict with the following keys:
* ``'backend'``
Optional; default depends on which Python LDAP modules are
installed. Name of the Python LDAP module to use. Only
``'ldap'`` is supported at the moment.
* ``'url'``
Optional; defaults to ``'ldapi:///'``. URL to the LDAP
server.
* ``'bind'``
Optional; defaults to ``None``. Describes how to bind an
identity to the LDAP connection. If ``None``, an
anonymous connection is made. Valid keys:
* ``'method'``
Optional; defaults to ``None``. The authentication
method to use. Valid values include but are not
necessarily limited to ``'simple'``, ``'sasl'``, and
``None``. If ``None``, an anonymous connection is
made. Available methods depend on the chosen backend.
* ``'mechanism'``
Optional; defaults to ``'EXTERNAL'``. The SASL
mechanism to use. Ignored unless the method is
``'sasl'``. Available methods depend on the chosen
backend and the server's capabilities.
* ``'credentials'``
Optional; defaults to ``None``. An object specific to
the chosen SASL mechanism and backend that represents
the authentication credentials. Ignored unless the
method is ``'sasl'``.
For the ``'ldap'`` backend, this is a dictionary. If
``None``, an empty dict is used. Keys:
* ``'args'``
Optional; defaults to an empty list. A list of
arguments to pass to the SASL mechanism
constructor. See the SASL mechanism constructor
documentation in the ``ldap.sasl`` Python module.
* ``'kwargs'``
Optional; defaults to an empty dict. A dict of
keyword arguments to pass to the SASL mechanism
constructor. See the SASL mechanism constructor
documentation in the ``ldap.sasl`` Python module.
* ``'dn'``
Optional; defaults to an empty string. The
distinguished name to bind.
* ``'password'``
Optional; defaults to an empty string. Password for
binding. Ignored if the method is ``'sasl'``.
* ``'tls'``
Optional; defaults to ``None``. A backend-specific object
containing settings to override default TLS behavior.
For the ``'ldap'`` backend, this is a dictionary. Not all
settings in this dictionary are supported by all versions
of ``python-ldap`` or the underlying TLS library. If
``None``, an empty dict is used. Possible keys:
* ``'starttls'``
If present, initiate a TLS connection using StartTLS.
(The value associated with this key is ignored.)
* ``'cacertdir'``
Set the path of the directory containing CA
certificates.
* ``'cacertfile'``
Set the pathname of the CA certificate file.
* ``'certfile'``
Set the pathname of the certificate file.
* ``'cipher_suite'``
Set the allowed cipher suite.
* ``'crlcheck'``
Set the CRL evaluation strategy. Valid values are
``'none'``, ``'peer'``, and ``'all'``.
* ``'crlfile'``
Set the pathname of the CRL file.
* ``'dhfile'``
Set the pathname of the file containing the parameters
for Diffie-Hellman ephemeral key exchange.
* ``'keyfile'``
Set the pathname of the certificate key file.
* ``'newctx'``
If present, instruct the underlying TLS library to
create a new TLS context. (The value associated with
this key is ignored.)
* ``'protocol_min'``
Set the minimum protocol version.
* ``'random_file'``
Set the pathname of the random file when
``/dev/random`` and ``/dev/urandom`` are not
available.
* ``'require_cert'``
Set the certificate validation policy. Valid values
are ``'never'``, ``'hard'``, ``'demand'``,
``'allow'``, and ``'try'``.
* ``'opts'``
Optional; defaults to ``None``. A backend-specific object
containing options for the backend.
For the ``'ldap'`` backend, this is a dictionary of
OpenLDAP options to set. If ``None``, an empty dict is
used. Each key is a the name of an OpenLDAP option
constant without the ``'LDAP_OPT_'`` prefix, then
converted to lower case.
:returns:
an object representing an LDAP connection that can be used as
the ``connect_spec`` argument to any of the functions in this
module (to avoid the overhead of making and terminating
multiple connections).
This object should be used as a context manager. It is safe
to nest ``with`` statements.
CLI example:
.. code-block:: bash
salt '*' ldap3.connect "{
'url': 'ldaps://ldap.example.com/',
'bind': {
'method': 'simple',
'dn': 'cn=admin,dc=example,dc=com',
'password': 'secret'}
}"
'''
if isinstance(connect_spec, _connect_ctx):
return connect_spec
if connect_spec is None:
connect_spec = {}
backend_name = connect_spec.get('backend', 'ldap')
if backend_name not in available_backends:
raise ValueError('unsupported backend or required Python module'
+ ' unavailable: {0}'.format(backend_name))
url = connect_spec.get('url', 'ldapi:///')
try:
l = ldap.initialize(url)
l.protocol_version = ldap.VERSION3
# set up tls
tls = connect_spec.get('tls', None)
if tls is None:
tls = {}
vars = {}
for k, v in six.iteritems(tls):
if k in ('starttls', 'newctx'):
vars[k] = True
elif k in ('crlcheck', 'require_cert'):
l.set_option(getattr(ldap, 'OPT_X_TLS_' + k.upper()),
getattr(ldap, 'OPT_X_TLS_' + v.upper()))
else:
l.set_option(getattr(ldap, 'OPT_X_TLS_' + k.upper()), v)
if vars.get('starttls', False):
l.start_tls_s()
if vars.get('newctx', False):
l.set_option(ldap.OPT_X_TLS_NEWCTX, 0)
# set up other options
l.set_option(ldap.OPT_REFERRALS, 0)
opts = connect_spec.get('opts', None)
if opts is None:
opts = {}
for k, v in six.iteritems(opts):
opt = getattr(ldap, 'OPT_' + k.upper())
l.set_option(opt, v)
_bind(l, connect_spec.get('bind', None))
except ldap.LDAPError as e:
_convert_exception(e)
return _connect_ctx(l)
def search(connect_spec, base, scope='subtree', filterstr='(objectClass=*)',
attrlist=None, attrsonly=0):
'''Search an LDAP database.
:param connect_spec:
See the documentation for the ``connect_spec`` parameter for
:py:func:`connect`.
:param base:
Distinguished name of the entry at which to start the search.
:param scope:
One of the following:
* ``'subtree'``
Search the base and all of its descendants.
* ``'base'``
Search only the base itself.
* ``'onelevel'``
Search only the base's immediate children.
:param filterstr:
String representation of the filter to apply in the search.
:param attrlist:
Limit the returned attributes to those in the specified list.
If ``None``, all attributes of each entry are returned.
:param attrsonly:
If non-zero, don't return any attribute values.
:returns:
a dict of results. The dict is empty if there are no results.
The dict maps each returned entry's distinguished name to a
dict that maps each of the matching attribute names to a list
of its values.
CLI example:
.. code-block:: bash
salt '*' ldap3.search "{
'url': 'ldaps://ldap.example.com/',
'bind': {
'method': 'simple',
'dn': 'cn=admin,dc=example,dc=com',
'password': 'secret',
},
}" "base='dc=example,dc=com'"
'''
l = connect(connect_spec)
scope = getattr(ldap, 'SCOPE_' + scope.upper())
try:
results = l.c.search_s(base, scope, filterstr, attrlist, attrsonly)
except ldap.NO_SUCH_OBJECT:
results = []
except ldap.LDAPError as e:
_convert_exception(e)
return dict(results)
def add(connect_spec, dn, attributes):
'''Add an entry to an LDAP database.
:param connect_spec:
See the documentation for the ``connect_spec`` parameter for
:py:func:`connect`.
:param dn:
Distinguished name of the entry.
:param attributes:
Non-empty dict mapping each of the new entry's attributes to a
non-empty iterable of values.
:returns:
``True`` if successful, raises an exception otherwise.
CLI example:
.. code-block:: bash
salt '*' ldap3.add "{
'url': 'ldaps://ldap.example.com/',
'bind': {
'method': 'simple',
'password': 'secret',
},
}" "dn='dc=example,dc=com'" "attributes={'example': 'values'}"
'''
l = connect(connect_spec)
# convert the "iterable of values" to lists in case that's what
# addModlist() expects (also to ensure that the caller's objects
# are not modified)
attributes = dict(((attr, list(vals))
for attr, vals in six.iteritems(attributes)))
log.info('adding entry: dn: {0} attributes: {1}'.format(
repr(dn), repr(attributes)))
modlist = ldap.modlist.addModlist(attributes)
try:
l.c.add_s(dn, modlist)
except ldap.LDAPError as e:
_convert_exception(e)
return True
def delete(connect_spec, dn):
'''Delete an entry from an LDAP database.
:param connect_spec:
See the documentation for the ``connect_spec`` parameter for
:py:func:`connect`.
:param dn:
Distinguished name of the entry.
:returns:
``True`` if successful, raises an exception otherwise.
CLI example:
.. code-block:: bash
salt '*' ldap3.delete "{
'url': 'ldaps://ldap.example.com/',
'bind': {
'method': 'simple',
'password': 'secret'}
}" dn='cn=admin,dc=example,dc=com'
'''
l = connect(connect_spec)
log.info('deleting entry: dn: {0}'.format(repr(dn)))
try:
l.c.delete_s(dn)
except ldap.LDAPError as e:
_convert_exception(e)
return True
def modify(connect_spec, dn, directives):
'''Modify an entry in an LDAP database.
:param connect_spec:
See the documentation for the ``connect_spec`` parameter for
:py:func:`connect`.
:param dn:
Distinguished name of the entry.
:param directives:
Iterable of directives that indicate how to modify the entry.
Each directive is a tuple of the form ``(op, attr, vals)``,
where:
* ``op`` identifies the modification operation to perform.
One of:
* ``'add'`` to add one or more values to the attribute
* ``'delete'`` to delete some or all of the values from the
attribute. If no values are specified with this
operation, all of the attribute's values are deleted.
Otherwise, only the named values are deleted.
* ``'replace'`` to replace all of the attribute's values
with zero or more new values
* ``attr`` names the attribute to modify
* ``vals`` is an iterable of values to add or delete
:returns:
``True`` if successful, raises an exception otherwise.
CLI example:
.. code-block:: bash
salt '*' ldap3.modify "{
'url': 'ldaps://ldap.example.com/',
'bind': {
'method': 'simple',
'password': 'secret'}
}" dn='cn=admin,dc=example,dc=com'
directives="('add', 'example', ['example_val'])"
'''
l = connect(connect_spec)
# convert the "iterable of values" to lists in case that's what
# modify_s() expects (also to ensure that the caller's objects are
# not modified)
modlist = [(getattr(ldap, 'MOD_' + op.upper()), attr, list(vals))
for op, attr, vals in directives]
try:
l.c.modify_s(dn, modlist)
except ldap.LDAPError as e:
_convert_exception(e)
return True
def change(connect_spec, dn, before, after):
'''Modify an entry in an LDAP database.
This does the same thing as :py:func:`modify`, but with a simpler
interface. Instead of taking a list of directives, it takes a
before and after view of an entry, determines the differences
between the two, computes the directives, and executes them.
Any attribute value present in ``before`` but missing in ``after``
is deleted. Any attribute value present in ``after`` but missing
in ``before`` is added. Any attribute value in the database that
is not mentioned in either ``before`` or ``after`` is not altered.
Any attribute value that is present in both ``before`` and
``after`` is ignored, regardless of whether that attribute value
exists in the database.
:param connect_spec:
See the documentation for the ``connect_spec`` parameter for
:py:func:`connect`.
:param dn:
Distinguished name of the entry.
:param before:
The expected state of the entry before modification. This is
a dict mapping each attribute name to an iterable of values.
:param after:
The desired state of the entry after modification. This is a
dict mapping each attribute name to an iterable of values.
:returns:
``True`` if successful, raises an exception otherwise.
CLI example:
.. code-block:: bash
salt '*' ldap3.change "{
'url': 'ldaps://ldap.example.com/',
'bind': {
'method': 'simple',
'password': 'secret'}
}" dn='cn=admin,dc=example,dc=com'
before="{'example_value': 'before_val'}"
after="{'example_value': 'after_val'}"
'''
l = connect(connect_spec)
# convert the "iterable of values" to lists in case that's what
# modifyModlist() expects (also to ensure that the caller's dicts
# are not modified)
before = dict(((attr, list(vals))
for attr, vals in six.iteritems(before)))
after = dict(((attr, list(vals))
for attr, vals in six.iteritems(after)))
modlist = ldap.modlist.modifyModlist(before, after)
try:
l.c.modify_s(dn, modlist)
except ldap.LDAPError as e:
_convert_exception(e)
return True
|
the-stack_106_22738 | import numpy as np
from sklearn.neighbors import KNeighborsClassifier as SKKNeighborsClassifier
from skopt.space import Integer
from evalml.model_family import ModelFamily
from evalml.pipelines.components.estimators import Estimator
from evalml.problem_types import ProblemTypes
class KNeighborsClassifier(Estimator):
"""
K-Nearest Neighbors Classifier.
Arguments:
n_neighbors (int): Number of neighbors to use by default. Defaults to 5.
weights ({‘uniform’, ‘distance’} or callable): Weight function used in prediction. Can be:
- ‘uniform’ : uniform weights. All points in each neighborhood are weighted equally.
- ‘distance’ : weight points by the inverse of their distance. in this case, closer neighbors of a query point will have a greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an array of distances, and returns an array of the same shape containing the weights.
Defaults to "uniform".
algorithm ({‘auto’, ‘ball_tree’, ‘kd_tree’, ‘brute’}): Algorithm used to compute the nearest neighbors:
- ‘ball_tree’ will use BallTree
- ‘kd_tree’ will use KDTree
- ‘brute’ will use a brute-force search.
‘auto’ will attempt to decide the most appropriate algorithm based on the values passed to fit method. Defaults to "auto".
Note: fitting on sparse input will override the setting of this parameter, using brute force.
leaf_size (int): Leaf size passed to BallTree or KDTree.
This can affect the speed of the construction and query, as well as the memory required to store the tree.
The optimal value depends on the nature of the problem. Defaults to 30.
p (int): Power parameter for the Minkowski metric.
When p = 1, this is equivalent to using manhattan_distance (l1),
and euclidean_distance (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
Defaults to 2.
random_seed (int): Seed for the random number generator. Defaults to 0.
"""
name = "KNN Classifier"
hyperparameter_ranges = {
"n_neighbors": Integer(2, 12),
"weights": ["uniform", "distance"],
"algorithm": ["auto", "ball_tree", "kd_tree", "brute"],
"leaf_size": Integer(10, 30),
"p": Integer(1, 5),
}
"""{
"n_neighbors": Integer(2, 12),
"weights": ["uniform", "distance"],
"algorithm": ["auto", "ball_tree", "kd_tree", "brute"],
"leaf_size": Integer(10, 30),
"p": Integer(1, 5),
}"""
model_family = ModelFamily.K_NEIGHBORS
"""ModelFamily.K_NEIGHBORS"""
supported_problem_types = [
ProblemTypes.BINARY,
ProblemTypes.MULTICLASS,
ProblemTypes.TIME_SERIES_BINARY,
ProblemTypes.TIME_SERIES_MULTICLASS,
]
"""[
ProblemTypes.BINARY,
ProblemTypes.MULTICLASS,
ProblemTypes.TIME_SERIES_BINARY,
ProblemTypes.TIME_SERIES_MULTICLASS,
]"""
def __init__(
self,
n_neighbors=5,
weights="uniform",
algorithm="auto",
leaf_size=30,
p=2,
random_seed=0,
**kwargs
):
parameters = {
"n_neighbors": n_neighbors,
"weights": weights,
"algorithm": algorithm,
"leaf_size": leaf_size,
"p": p,
}
parameters.update(kwargs)
knn_classifier = SKKNeighborsClassifier(**parameters)
super().__init__(
parameters=parameters, component_obj=knn_classifier, random_seed=random_seed
)
@property
def feature_importance(self):
"""
Returns array of 0's matching the input number of features as feature_importance is
not defined for KNN classifiers.
"""
num_features = self._component_obj.n_features_in_
return np.zeros(num_features)
|
the-stack_106_22740 | """
ModelAccounts API
Allow clients to fetch ModelAccounts Analytics through APIs. # noqa: E501
The version of the OpenAPI document: 3
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import json
import atexit
import mimetypes
from multiprocessing.pool import ThreadPool
import io
import os
import re
import typing
from typing import Tuple, Dict, Any, Callable, Optional
from urllib.parse import quote
from urllib3.fields import RequestField
from fds.sdk.Portfolio import rest
from fds.sdk.Portfolio.configuration import Configuration
from fds.sdk.Portfolio.exceptions import ApiTypeError, ApiValueError, ApiException
from fds.sdk.Portfolio.model_utils import (
ModelNormal,
ModelSimple,
ModelComposed,
check_allowed_values,
check_validations,
date,
datetime,
deserialize_file,
file_type,
model_to_dict,
none_type,
validate_and_convert_types
)
ResponseType = Tuple[Any]
"""
Types to serialize the API response into.
A tuple containing:
* valid classes
* a list containing valid classes (for list schemas)
* a dict containing a tuple of valid classes as the value
Example values:
- (str,)
- (Pet,)
- (float, none_type)
- ([int, none_type],)
- ({str: (bool, str, int, float, date, datetime, str, none_type)},)
"""
HttpStatusCode = int
ResponseTypeByStatusCode = Dict[HttpStatusCode, Optional[ResponseType]]
"""
Map specifying return types per HTTP status code.
Examples value:
- { 200: (ModelA,), 201: (None,) }
"""
ResponseWrapper = Callable[[HttpStatusCode, Any], Any]
ResponseTypeWithWrapper = Tuple[ResponseTypeByStatusCode, Optional[ResponseWrapper]]
class ApiClient(object):
"""Generic API client for OpenAPI client library builds.
OpenAPI generic API client. This client handles the client-
server communication, and is invariant across implementations. Specifics of
the methods and models for each application are generated from the OpenAPI
templates.
NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
:param configuration: .Configuration object for this client
:param header_name: a header to pass when making calls to the API.
:param header_value: a header value to pass when making calls to
the API.
:param cookie: a cookie to include in the header when making calls
to the API
:param pool_threads: The number of threads to use for async requests
to the API. More threads means more concurrent API requests.
"""
_pool = None
def __init__(self, configuration=None, header_name=None, header_value=None,
cookie=None, pool_threads=1):
if configuration is None:
configuration = Configuration.get_default_copy()
self.configuration = configuration
self.pool_threads = pool_threads
self.rest_client = rest.RESTClientObject(configuration)
self.default_headers = {}
if header_name is not None:
self.default_headers[header_name] = header_value
self.cookie = cookie
# Set default User-Agent.
self.user_agent = 'fds-sdk/python/Portfolio/0.20.0'
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
if self._pool:
self._pool.close()
self._pool.join()
self._pool = None
if hasattr(atexit, 'unregister'):
atexit.unregister(self.close)
@property
def pool(self):
"""Create thread pool on first request
avoids instantiating unused threadpool for blocking clients.
"""
if self._pool is None:
atexit.register(self.close)
self._pool = ThreadPool(self.pool_threads)
return self._pool
@property
def user_agent(self):
"""User agent for this API client"""
return self.default_headers['User-Agent']
@user_agent.setter
def user_agent(self, value):
self.default_headers['User-Agent'] = value
def set_default_header(self, header_name, header_value):
self.default_headers[header_name] = header_value
def __call_api(
self,
resource_path: str,
method: str,
path_params: typing.Optional[typing.Dict[str, typing.Any]] = None,
query_params: typing.Optional[typing.List[typing.Tuple[str, typing.Any]]] = None,
header_params: typing.Optional[typing.Dict[str, typing.Any]] = None,
body: typing.Optional[typing.Any] = None,
post_params: typing.Optional[typing.List[typing.Tuple[str, typing.Any]]] = None,
files: typing.Optional[typing.Dict[str, typing.List[io.IOBase]]] = None,
response_type: typing.Optional[ResponseTypeWithWrapper] = None,
auth_settings: typing.Optional[typing.List[str]] = None,
_return_http_data_only: typing.Optional[bool] = None,
collection_formats: typing.Optional[typing.Dict[str, str]] = None,
_preload_content: bool = True,
_request_timeout: typing.Optional[typing.Union[int, float, typing.Tuple]] = None,
_host: typing.Optional[str] = None,
_check_type: typing.Optional[bool] = None,
_content_type: typing.Optional[str] = None
):
config = self.configuration
# header parameters
header_params = header_params or {}
header_params.update(self.default_headers)
if self.cookie:
header_params['Cookie'] = self.cookie
if header_params:
header_params = self.sanitize_for_serialization(header_params)
header_params = dict(self.parameters_to_tuples(header_params,
collection_formats))
# path parameters
if path_params:
path_params = self.sanitize_for_serialization(path_params)
path_params = self.parameters_to_tuples(path_params,
collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
resource_path = resource_path.replace(
'{%s}' % k,
quote(str(v), safe=config.safe_chars_for_path_param)
)
# query parameters
if query_params:
query_params = self.sanitize_for_serialization(query_params)
query_params = self.parameters_to_tuples(query_params,
collection_formats)
# post parameters
if post_params or files:
post_params = post_params if post_params else []
post_params = self.sanitize_for_serialization(post_params)
post_params = self.parameters_to_tuples(post_params,
collection_formats)
post_params.extend(self.files_parameters(files))
if header_params['Content-Type'].startswith("multipart"):
post_params = self.parameters_to_multipart(post_params, dict)
# body
if body:
body = self.sanitize_for_serialization(body)
# auth setting
self.update_params_for_auth(header_params, query_params,
auth_settings, resource_path, method, body)
# request url
if _host is None:
url = self.configuration.host + resource_path
else:
# use server/host defined in path or operation instead
url = _host + resource_path
try:
# perform request and return response
response_data = self.request(
method, url, query_params=query_params, headers=header_params,
post_params=post_params, body=body,
_preload_content=_preload_content,
_request_timeout=_request_timeout)
except ApiException as e:
e.body = e.body.decode('utf-8')
raise e
self.last_response = response_data
return_data = response_data
if not _preload_content:
return (return_data)
# deserialize response data with response code to type serialization mapping
if response_type is not None:
(status_code_map, response_wrapper) = response_type
else:
status_code_map = {}
response_wrapper = None
if response_data.status in status_code_map:
response_type = status_code_map[response_data.status]
if response_type != (file_type,):
encoding = "utf-8"
content_type = response_data.getheader('content-type')
if content_type is not None:
match = re.search(r"charset=([a-zA-Z\-\d]+)[\s;]?", content_type)
if match:
encoding = match.group(1)
response_data.data = response_data.data.decode(encoding)
return_data = self.deserialize(
response_data,
response_type,
_check_type
)
else:
return_data = None
if response_wrapper is not None:
return_data = response_wrapper(response_data.status, return_data)
if _return_http_data_only:
return return_data
else:
return return_data, response_data.status, response_data.getheaders()
def parameters_to_multipart(self, params, collection_types):
"""Get parameters as list of tuples, formatting as json if value is collection_types
:param params: Parameters as list of two-tuples
:type params: list
:param collection_types: Parameter collection types
:type collection_types: Type(typing.Any)
:return: Parameters as list of tuple or urllib3.fields.RequestField
"""
new_params = []
if collection_types is None:
collection_types = dict
for k, v in params.items() if isinstance(params, dict) else params: # noqa: E501
if isinstance(v, collection_types): # v is instance of collection_type, formatting as application/json
v = json.dumps(v, ensure_ascii=False).encode("utf-8")
field = RequestField(k, v)
field.make_multipart(content_type="application/json; charset=utf-8")
new_params.append(field)
else:
new_params.append((k, v))
return new_params
@classmethod
def sanitize_for_serialization(cls, obj):
"""Prepares data for transmission before it is sent with the rest client
If obj is None, return None.
If obj is str, int, long, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is OpenAPI model, return the properties dict.
If obj is io.IOBase, return the bytes
:param obj: The data to serialize.
:type obj: Any
:raises ApiValueError: Unable to prepare type {} for serialization.
:return: The serialized form of data.
"""
if isinstance(obj, (ModelNormal, ModelComposed)):
return {
key: cls.sanitize_for_serialization(val) for key, val in model_to_dict(obj, serialize=True).items()
}
elif isinstance(obj, io.IOBase):
return cls.get_file_data_and_close_file(obj)
elif isinstance(obj, (str, int, float, none_type, bool)):
return obj
elif isinstance(obj, (datetime, date)):
return obj.isoformat()
elif isinstance(obj, ModelSimple):
return cls.sanitize_for_serialization(obj.value)
elif isinstance(obj, (list, tuple)):
return [cls.sanitize_for_serialization(item) for item in obj]
if isinstance(obj, dict):
return {key: cls.sanitize_for_serialization(val) for key, val in obj.items()}
raise ApiValueError('Unable to prepare type {} for serialization'.format(obj.__class__.__name__))
def deserialize(self, response, response_type, _check_type):
"""Deserializes response into an object.
:param response: RESTResponse object to be deserialized.
:type response: Any
:param response_type: For the response, a tuple containing:
valid classes
a list containing valid classes (for list schemas)
a dict containing a tuple of valid classes as the value
Example values:
(str,)
(Pet,)
(float, none_type)
([int, none_type],)
({str: (bool, str, int, float, date, datetime, str, none_type)},)
:type response_type: Any
:raises ValueError: Unable to prepare type {} for serialization
:param _check_type: boolean, whether to check the types of the data
received from the server
:type _check_type: bool
:return: deserialized object.
"""
# handle file downloading
# save response body into a tmp file and return the instance
if response_type == (file_type,):
content_disposition = response.getheader("Content-Disposition")
return deserialize_file(response.data, self.configuration,
content_disposition=content_disposition)
# fetch data from response object
try:
received_data = json.loads(response.data)
except ValueError:
received_data = response.data
# store our data under the key of 'received_data' so users have some
# context if they are deserializing a string and the data type is wrong
deserialized_data = validate_and_convert_types(
received_data,
response_type,
['received_data'],
True,
_check_type,
configuration=self.configuration
)
return deserialized_data
def call_api(
self,
resource_path: str,
method: str,
path_params: typing.Optional[typing.Dict[str, typing.Any]] = None,
query_params: typing.Optional[typing.List[typing.Tuple[str, typing.Any]]] = None,
header_params: typing.Optional[typing.Dict[str, typing.Any]] = None,
body: typing.Optional[typing.Any] = None,
post_params: typing.Optional[typing.List[typing.Tuple[str, typing.Any]]] = None,
files: typing.Optional[typing.Dict[str, typing.List[io.IOBase]]] = None,
response_type: typing.Optional[ResponseTypeWithWrapper] = None,
auth_settings: typing.Optional[typing.List[str]] = None,
async_req: typing.Optional[bool] = None,
_return_http_data_only: typing.Optional[bool] = None,
collection_formats: typing.Optional[typing.Dict[str, str]] = None,
_preload_content: bool = True,
_request_timeout: typing.Optional[typing.Union[int, float, typing.Tuple]] = None,
_host: typing.Optional[str] = None,
_check_type: typing.Optional[bool] = None
):
"""Makes the HTTP request (synchronous) and returns deserialized data.
To make an async_req request, set the async_req parameter.
:param resource_path: Path to method endpoint.
:type resource_path: str
:param method: Method to call.
:type method: str
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:type post_params: dict
:param auth_settings: Auth Settings names for the request.
:type auth_settings: list
:param response_type: Determines the type of the deserialized response.
A tuple containing a map of response types per status code and an optional wrapper function.
Example values:
({200: ModelA, 201: None, 202: ModelB}, None)
({200: ModelA, 201: None, 202: ModelB}, MyWrapper)
:type response_type: ResponseTypeWithWrapper, optional
:raises ApiValueError: Unable to prepare type.
:param files: key -> field name, value -> a list of open file
objects for `multipart/form-data`.
:type files: dict
:param async_req: execute request asynchronously
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:type collection_formats: dict, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _host: host
:type _host: str, optional
:param _check_type: boolean describing if the data back from the server
should have its type checked.
:type _check_type: bool, optional
:return:
If async_req parameter is True,
the request will be called asynchronously.
The method will return the request thread.
If parameter async_req is False or missing,
then the method will return the response directly.
"""
if not async_req:
return self.__call_api(resource_path, method,
path_params, query_params, header_params,
body, post_params, files,
response_type, auth_settings,
_return_http_data_only, collection_formats,
_preload_content, _request_timeout, _host,
_check_type)
return self.pool.apply_async(self.__call_api, (resource_path,
method, path_params,
query_params,
header_params, body,
post_params, files,
response_type,
auth_settings,
_return_http_data_only,
collection_formats,
_preload_content,
_request_timeout,
_host, _check_type))
def request(self, method, url, query_params=None, headers=None,
post_params=None, body=None, _preload_content=True,
_request_timeout=None):
"""Makes the HTTP request using RESTClient."""
if method == "GET":
return self.rest_client.GET(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "HEAD":
return self.rest_client.HEAD(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "OPTIONS":
return self.rest_client.OPTIONS(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "POST":
return self.rest_client.POST(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PUT":
return self.rest_client.PUT(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PATCH":
return self.rest_client.PATCH(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "DELETE":
return self.rest_client.DELETE(url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
else:
raise ApiValueError(
"http method must be `GET`, `HEAD`, `OPTIONS`,"
" `POST`, `PATCH`, `PUT` or `DELETE`."
)
def parameters_to_tuples(self, params, collection_formats):
"""Get parameters as list of tuples, formatting collections.
:param params: Parameters as dict or list of two-tuples
:type params: dict, list
:param collection_formats: Parameter collection formats
:type collection_formats: dict
:return: Parameters as list of tuples, collections formatted
"""
new_params = []
if collection_formats is None:
collection_formats = {}
for k, v in params.items() if isinstance(params, dict) else params: # noqa: E501
if k in collection_formats:
collection_format = collection_formats[k]
if collection_format == 'multi':
new_params.extend((k, value) for value in v)
else:
if collection_format == 'ssv':
delimiter = ' '
elif collection_format == 'tsv':
delimiter = '\t'
elif collection_format == 'pipes':
delimiter = '|'
else: # csv is the default
delimiter = ','
new_params.append(
(k, delimiter.join(str(value) for value in v)))
else:
new_params.append((k, v))
return new_params
@staticmethod
def get_file_data_and_close_file(file_instance: io.IOBase) -> bytes:
file_data = file_instance.read()
file_instance.close()
return file_data
def files_parameters(self, files: typing.Optional[typing.Dict[str, typing.List[io.IOBase]]] = None):
"""Builds form parameters.
:param files: None or a dict with key=param_name and
value is a list of open file objects
:type files: dict, None
:raises ApiValueError: Cannot read a closed file. The passed in file_type.
:return: List of tuples of form parameters with file data
"""
if files is None:
return []
params = []
for param_name, file_instances in files.items():
if file_instances is None:
# if the file field is nullable, skip None values
continue
for file_instance in file_instances:
if file_instance is None:
# if the file field is nullable, skip None values
continue
if file_instance.closed is True:
raise ApiValueError(
"Cannot read a closed file. The passed in file_type "
"for %s must be open." % param_name
)
filename = os.path.basename(file_instance.name)
filedata = self.get_file_data_and_close_file(file_instance)
mimetype = (mimetypes.guess_type(filename)[0] or
'application/octet-stream')
params.append(
tuple([param_name, tuple([filename, filedata, mimetype])]))
return params
def select_header_accept(self, accepts):
"""Returns `Accept` based on an array of accepts provided.
:param accepts: List of headers.
:type accepts: list
:return: Accept (e.g. application/json).
"""
if not accepts:
return
accepts = [x.lower() for x in accepts]
if 'application/json' in accepts:
return 'application/json'
else:
return ', '.join(accepts)
def select_header_content_type(self, content_types, method=None, body=None):
"""Returns `Content-Type` based on an array of content_types provided.
:param content_types: List of content-types.
:type content_types: list
:param method: http method (e.g. POST, PATCH).
:type method: str
:param body: http body to send.
:return: Content-Type (e.g. application/json).
"""
if not content_types:
return 'application/json'
content_types = [x.lower() for x in content_types]
if (method == 'PATCH' and
'application/json-patch+json' in content_types and
isinstance(body, list)):
return 'application/json-patch+json'
if 'application/json' in content_types or '*/*' in content_types:
return 'application/json'
else:
return content_types[0]
def update_params_for_auth(self, headers, queries, auth_settings,
resource_path, method, body):
"""Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:type headers: dict
:param queries: Query parameters tuple list to be updated.
:type queries: list
:param auth_settings: Authentication setting identifiers list.
type auth_settings: list
:param resource_path: A string representation of the HTTP request resource path.
:type resource_path: str
:param method: A string representation of the HTTP request method.
:type method: str
:param body: A object representing the body of the HTTP request.
The object type is the return value of _encoder.default().
"""
if not auth_settings:
return
auth_settings_dict = self.configuration.auth_settings()
for auth in auth_settings:
auth_setting = auth_settings_dict.get(auth)
if auth_setting:
if auth_setting['in'] == 'cookie':
headers['Cookie'] = auth_setting['value']
elif auth_setting['in'] == 'header':
if auth_setting['type'] != 'http-signature':
headers[auth_setting['key']] = auth_setting['value']
elif auth_setting['in'] == 'query':
queries.append((auth_setting['key'], auth_setting['value']))
else:
raise ApiValueError(
'Authentication token must be in `query` or `header`'
)
class Endpoint(object):
def __init__(self, settings=None, params_map=None, root_map=None,
headers_map=None, api_client=None, callable=None):
"""Creates an endpoint
Args:
settings (dict): see below key value pairs
'response_type' (ResponseTypeWithWrapper/None): response type map and wrapper function
'auth' (list): a list of auth type keys
'endpoint_path' (str): the endpoint path
'operation_id' (str): endpoint string identifier
'http_method' (str): POST/PUT/PATCH/GET etc
'servers' (list): list of str servers that this endpoint is at
params_map (dict): see below key value pairs
'all' (list): list of str endpoint parameter names
'required' (list): list of required parameter names
'nullable' (list): list of nullable parameter names
'enum' (list): list of parameters with enum values
'validation' (list): list of parameters with validations
root_map
'validations' (dict): the dict mapping endpoint parameter tuple
paths to their validation dictionaries
'allowed_values' (dict): the dict mapping endpoint parameter
tuple paths to their allowed_values (enum) dictionaries
'openapi_types' (dict): param_name to openapi type
'attribute_map' (dict): param_name to camelCase name
'location_map' (dict): param_name to 'body', 'file', 'form',
'header', 'path', 'query'
'collection_format_map' (dict): param_name to `csv` etc.
headers_map (dict): see below key value pairs
'accept' (list): list of Accept header strings
'content_type' (list): list of Content-Type header strings
api_client (ApiClient) api client instance
callable (function): the function which is invoked when the
Endpoint is called
"""
self.settings = settings
self.params_map = params_map
self.params_map['all'].extend([
'async_req',
'_host_index',
'_preload_content',
'_request_timeout',
'_return_http_data_only',
'_check_input_type',
'_check_return_type',
'_content_type',
'_spec_property_naming'
])
self.params_map['nullable'].extend(['_request_timeout'])
self.validations = root_map['validations']
self.allowed_values = root_map['allowed_values']
self.openapi_types = root_map['openapi_types']
extra_types = {
'async_req': (bool,),
'_host_index': (none_type, int),
'_preload_content': (bool,),
'_request_timeout': (none_type, float, (float,), [float], int, (int,), [int]),
'_return_http_data_only': (bool,),
'_check_input_type': (bool,),
'_check_return_type': (bool,),
'_spec_property_naming': (bool,),
'_content_type': (none_type, str)
}
self.openapi_types.update(extra_types)
self.attribute_map = root_map['attribute_map']
self.location_map = root_map['location_map']
self.collection_format_map = root_map['collection_format_map']
self.headers_map = headers_map
self.api_client = api_client
self.callable = callable
def __validate_inputs(self, kwargs):
for param in self.params_map['enum']:
if param in kwargs:
check_allowed_values(
self.allowed_values,
(param,),
kwargs[param]
)
for param in self.params_map['validation']:
if param in kwargs:
check_validations(
self.validations,
(param,),
kwargs[param],
configuration=self.api_client.configuration
)
if kwargs['_check_input_type'] is False:
return
for key, value in kwargs.items():
fixed_val = validate_and_convert_types(
value,
self.openapi_types[key],
[key],
kwargs['_spec_property_naming'],
kwargs['_check_input_type'],
configuration=self.api_client.configuration
)
kwargs[key] = fixed_val
def __gather_params(self, kwargs):
params = {
'body': None,
'collection_format': {},
'file': {},
'form': [],
'header': {},
'path': {},
'query': []
}
for param_name, param_value in kwargs.items():
param_location = self.location_map.get(param_name)
if param_location is None:
continue
if param_location:
if param_location == 'body':
params['body'] = param_value
continue
base_name = self.attribute_map[param_name]
if (param_location == 'form' and
self.openapi_types[param_name] == (file_type,)):
params['file'][base_name] = [param_value]
elif (param_location == 'form' and
self.openapi_types[param_name] == ([file_type],)):
# param_value is already a list
params['file'][base_name] = param_value
elif param_location in {'form', 'query'}:
param_value_full = (base_name, param_value)
params[param_location].append(param_value_full)
if param_location not in {'form', 'query'}:
params[param_location][base_name] = param_value
collection_format = self.collection_format_map.get(param_name)
if collection_format:
params['collection_format'][base_name] = collection_format
return params
def __call__(self, *args, **kwargs):
""" This method is invoked when endpoints are called
Example:
api_instance = ModelAccountsApi()
api_instance.create_or_update_model_account # this is an instance of the class Endpoint
api_instance.create_or_update_model_account() # this invokes api_instance.create_or_update_model_account.__call__()
which then invokes the callable functions stored in that endpoint at
api_instance.create_or_update_model_account.callable or self.callable in this class
"""
return self.callable(self, *args, **kwargs)
def call_with_http_info(self, **kwargs):
try:
index = self.api_client.configuration.server_operation_index.get(
self.settings['operation_id'], self.api_client.configuration.server_index
) if kwargs['_host_index'] is None else kwargs['_host_index']
server_variables = self.api_client.configuration.server_operation_variables.get(
self.settings['operation_id'], self.api_client.configuration.server_variables
)
_host = self.api_client.configuration.get_host_from_settings(
index, variables=server_variables, servers=self.settings['servers']
)
except IndexError:
if self.settings['servers']:
raise ApiValueError(
"Invalid host index. Must be 0 <= index < %s" %
len(self.settings['servers'])
)
_host = None
for key, value in kwargs.items():
if key not in self.params_map['all']:
raise ApiTypeError(
"Got an unexpected parameter '%s'"
" to method `%s`" %
(key, self.settings['operation_id'])
)
# only throw this nullable ApiValueError if _check_input_type
# is False, if _check_input_type==True we catch this case
# in self.__validate_inputs
if (key not in self.params_map['nullable'] and value is None
and kwargs['_check_input_type'] is False):
raise ApiValueError(
"Value may not be None for non-nullable parameter `%s`"
" when calling `%s`" %
(key, self.settings['operation_id'])
)
for key in self.params_map['required']:
if key not in kwargs.keys():
raise ApiValueError(
"Missing the required parameter `%s` when calling "
"`%s`" % (key, self.settings['operation_id'])
)
self.__validate_inputs(kwargs)
params = self.__gather_params(kwargs)
accept_headers_list = self.headers_map['accept']
if accept_headers_list:
params['header']['Accept'] = self.api_client.select_header_accept(
accept_headers_list)
if kwargs.get('_content_type'):
params['header']['Content-Type'] = kwargs['_content_type']
else:
content_type_headers_list = self.headers_map['content_type']
if content_type_headers_list:
if params['body'] != "":
header_list = self.api_client.select_header_content_type(
content_type_headers_list, self.settings['http_method'],
params['body'])
params['header']['Content-Type'] = header_list
return self.api_client.call_api(
self.settings['endpoint_path'], self.settings['http_method'],
params['path'],
params['query'],
params['header'],
body=params['body'],
post_params=params['form'],
files=params['file'],
response_type=self.settings['response_type'],
auth_settings=self.settings['auth'],
async_req=kwargs['async_req'],
_check_type=kwargs['_check_return_type'],
_return_http_data_only=kwargs['_return_http_data_only'],
_preload_content=kwargs['_preload_content'],
_request_timeout=kwargs['_request_timeout'],
_host=_host,
collection_formats=params['collection_format'])
|
the-stack_106_22742 | import unittest
import X4_parser as X4
import TI_parser as TI
class TestParser(unittest.TestCase):
def test_iq(self):
"""
Method to test if .dat binary file was converted successfully to .csv file with in-phase and quadrature
components together.
:return:
converted
"""
file_iq = X4.iq_data('X4data.dat')
self.asserEqual(file_iq,'converted')
def test_raw(self):
"""
Method to test if .dat binary file was converted successfully to .csv file with in-phase and quadrature
component separated.
:return:
converted
"""
file_raw = X4.raw_data('X4data.dat')
self.asserEqual(file_raw,'converted')
def test_TI(self):
"""
Method to test if .bin binary file was converted successfully to .csv file with iq data put together.
:return:
converted
"""
file_TI = TI.readTIdata('TIdata.bin')
self.assertEqual(file_TI,'converted')
if __name__ == '__main__':
unittest.main()
|
the-stack_106_22743 | import copy
import json
import logging
import time
from pathlib import Path
from threading import RLock
from uuid import uuid4
from typing import Any, Dict, List
from azure.identity import DefaultAzureCredential
from azure.mgmt.compute import ComputeManagementClient
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.resource.resources.models import DeploymentMode
from azure.mgmt.msi import ManagedServiceIdentityClient
from cloudtik.core._private.cli_logger import cli_logger
from cloudtik.core.node_provider import NodeProvider
from cloudtik.core.tags import CLOUDTIK_TAG_CLUSTER_NAME, CLOUDTIK_TAG_NODE_NAME
from cloudtik.providers._private._azure.config import (AZURE_MSI_NAME, get_azure_sdk_function,
verify_azure_cloud_storage, bootstrap_azure,
_extract_metadata_for_node, bootstrap_azure_for_read)
from cloudtik.providers._private._azure.utils import get_azure_config, _get_node_info
from cloudtik.providers._private.utils import validate_config_dict
VM_NAME_MAX_LEN = 64
VM_NAME_UUID_LEN = 8
RESOURCE_CHECK_TIME = 20
logger = logging.getLogger(__name__)
azure_logger = logging.getLogger(
"azure.core.pipeline.policies.http_logging_policy")
azure_logger.setLevel(logging.WARNING)
def synchronized(f):
def wrapper(self, *args, **kwargs):
self.lock.acquire()
try:
return f(self, *args, **kwargs)
finally:
self.lock.release()
return wrapper
def get_credential(provider_config):
managed_identity_client_id = provider_config.get("managed_identity_client_id")
if managed_identity_client_id is None:
# No managed identity
credential = DefaultAzureCredential(
exclude_managed_identity_credential=True,
exclude_shared_token_cache_credential=True)
else:
credential = DefaultAzureCredential(
exclude_shared_token_cache_credential=True,
managed_identity_client_id=managed_identity_client_id
)
return credential
class AzureNodeProvider(NodeProvider):
"""Node Provider for Azure
This provider assumes Azure credentials are set by running ``az login``
and the default subscription is configured through ``az account``
or set in the ``provider`` field of the scaler configuration.
Nodes may be in one of three states: {pending, running, terminated}. Nodes
appear immediately once started by ``create_node``, and transition
immediately to terminated when ``terminate_node`` is called.
"""
def __init__(self, provider_config, cluster_name):
NodeProvider.__init__(self, provider_config, cluster_name)
subscription_id = provider_config["subscription_id"]
self.credential = get_credential(provider_config)
self.compute_client = ComputeManagementClient(self.credential,
subscription_id)
self.network_client = NetworkManagementClient(self.credential,
subscription_id)
self.resource_client = ResourceManagementClient(
self.credential, subscription_id)
self.lock = RLock()
# cache node objects
self.cached_nodes = {}
def with_environment_variables(self, node_type_config: Dict[str, Any], node_id: str):
return get_azure_config(self.provider_config, node_type_config, node_id)
@synchronized
def _get_filtered_nodes(self, tag_filters):
def match_tags(vm):
for k, v in tag_filters.items():
if vm.tags is None or vm.tags.get(k) != v:
return False
return True
vms = self.compute_client.virtual_machines.list(
resource_group_name=self.provider_config["resource_group"])
nodes = [self._extract_metadata(vm) for vm in filter(match_tags, vms)]
self.cached_nodes = {node["name"]: node for node in nodes}
return self.cached_nodes
def _extract_metadata(self, vm):
return _extract_metadata_for_node(
vm,
resource_group=self.provider_config["resource_group"],
compute_client=self.compute_client,
network_client=self.network_client
)
def non_terminated_nodes(self, tag_filters):
"""Return a list of node ids filtered by the specified tags dict.
This list must not include terminated nodes. For performance reasons,
providers are allowed to cache the result of a call to nodes() to
serve single-node queries (e.g. is_running(node_id)). This means that
nodes() must be called again to refresh results.
Examples:
>>> provider.non_terminated_nodes({CLOUDTIK_TAG_NODE_KIND: "worker"})
["node-1", "node-2"]
"""
cluster_name_tag = {CLOUDTIK_TAG_CLUSTER_NAME: self.cluster_name}
tag_filters.update(cluster_name_tag)
nodes = self._get_filtered_nodes(tag_filters=tag_filters)
return [
k for k, v in nodes.items()
if not v["status"].startswith("deallocat")
]
def get_node_info(self, node_id):
node = self._get_cached_node(node_id)
return _get_node_info(node)
def is_running(self, node_id):
"""Return whether the specified node is running."""
# always get current status
node = self._get_node(node_id=node_id)
return node["status"] == "running"
def is_terminated(self, node_id):
"""Return whether the specified node is terminated."""
# always get current status
node = self._get_node(node_id=node_id)
return node["status"].startswith("deallocat")
def node_tags(self, node_id):
"""Returns the tags of the given node (string dict)."""
return self._get_cached_node(node_id=node_id)["tags"]
def external_ip(self, node_id):
"""Returns the external ip of the given node."""
ip = (self._get_cached_node(node_id=node_id)["external_ip"]
or self._get_node(node_id=node_id)["external_ip"])
return ip
def internal_ip(self, node_id):
"""Returns the internal ip of the given node."""
ip = (self._get_cached_node(node_id=node_id)["internal_ip"]
or self._get_node(node_id=node_id)["internal_ip"])
return ip
def create_node(self, node_config, tags, count):
"""Creates a number of nodes within the namespace."""
# TODO: restart deallocated nodes if possible
resource_group = self.provider_config["resource_group"]
# load the template file
current_path = Path(__file__).parent
template_path = current_path.joinpath("azure-vm-template.json")
with open(template_path, "r") as template_fp:
template = json.load(template_fp)
# get the tags
config_tags = node_config.get("tags", {}).copy()
config_tags.update(tags)
config_tags[CLOUDTIK_TAG_CLUSTER_NAME] = self.cluster_name
name_tag = config_tags.get(CLOUDTIK_TAG_NODE_NAME, "node")
unique_id = uuid4().hex[:VM_NAME_UUID_LEN]
vm_name = "{name}-{id}".format(name=name_tag, id=unique_id)
template_params = node_config["azure_arm_parameters"].copy()
template_params["vmName"] = vm_name
template_params["vmTags"] = config_tags
template_params["vmCount"] = count
parameters = {
"properties": {
"mode": DeploymentMode.incremental,
"template": template,
"parameters": {
key: {
"value": value
}
for key, value in template_params.items()
}
}
}
# TODO: we could get the private/public ips back directly
create_or_update = get_azure_sdk_function(
client=self.resource_client.deployments,
function_name="create_or_update")
create_or_update(
resource_group_name=resource_group,
deployment_name="cloudtik-vm-{}".format(name_tag),
parameters=parameters).wait()
@synchronized
def set_node_tags(self, node_id, tags):
"""Sets the tag values (string dict) for the specified node."""
node_tags = self._get_cached_node(node_id)["tags"]
node_tags.update(tags)
update = get_azure_sdk_function(
client=self.compute_client.virtual_machines,
function_name="update")
update(
resource_group_name=self.provider_config["resource_group"],
vm_name=node_id,
parameters={"tags": node_tags})
self.cached_nodes[node_id]["tags"] = node_tags
def terminate_node(self, node_id):
"""Terminates the specified node. This will delete the VM and
associated resources (NIC, IP, Storage) for the specified node."""
resource_group = self.provider_config["resource_group"]
try:
# get metadata for node
metadata = self._get_node(node_id)
except KeyError:
# node no longer exists
return
# TODO: deallocate instead of delete to allow possible reuse
# self.compute_client.virtual_machines.deallocate(
# resource_group_name=resource_group,
# vm_name=node_id)
# gather disks to delete later
vm = self.compute_client.virtual_machines.get(
resource_group_name=resource_group, vm_name=node_id)
disks = {d.name for d in vm.storage_profile.data_disks}
disks.add(vm.storage_profile.os_disk.name)
try:
# delete machine, must wait for this to complete
delete = get_azure_sdk_function(
client=self.compute_client.virtual_machines,
function_name="delete")
delete(resource_group_name=resource_group, vm_name=node_id).wait()
except Exception as e:
logger.warning("Failed to delete VM: {}".format(e))
try:
# delete nic
delete = get_azure_sdk_function(
client=self.network_client.network_interfaces,
function_name="delete")
delete(
resource_group_name=resource_group,
network_interface_name=metadata["nic_name"])
except Exception as e:
logger.warning("Failed to delete nic: {}".format(e))
# delete ip address
if "public_ip_name" in metadata:
retry_time = RESOURCE_CHECK_TIME
delete = get_azure_sdk_function(
client=self.network_client.public_ip_addresses,
function_name="delete")
cli_logger.print("Deleting public ip address...")
while retry_time > 0:
try:
delete(
resource_group_name=resource_group,
public_ip_address_name=metadata["public_ip_name"])
cli_logger.print("Successfully deleted public ip address.")
break
except Exception as e:
retry_time = retry_time - 1
if retry_time > 0:
cli_logger.warning(
"Failed to delete public ip address. "
"Remaining {} tries to delete public ip address...".format(retry_time))
time.sleep(1)
else:
cli_logger.error("Failed to delete public ip address. {}", str(e))
# delete disks
for disk in disks:
try:
delete = get_azure_sdk_function(
client=self.compute_client.disks, function_name="delete")
delete(resource_group_name=resource_group, disk_name=disk)
except Exception as e:
logger.warning("Failed to delete disk: {}".format(e))
def _get_node(self, node_id):
self._get_filtered_nodes({}) # Side effect: updates cache
return self.cached_nodes[node_id]
def _get_cached_node(self, node_id):
if node_id in self.cached_nodes:
return self.cached_nodes[node_id]
return self._get_node(node_id=node_id)
@staticmethod
def bootstrap_config(cluster_config):
return bootstrap_azure(cluster_config)
@staticmethod
def bootstrap_config_for_api(cluster_config: Dict[str, Any]) -> Dict[str, Any]:
return bootstrap_azure_for_read(cluster_config)
def prepare_for_head_node(
self, cluster_config: Dict[str, Any]) -> Dict[str, Any]:
"""Returns a new cluster config with custom configs for head node."""
managed_identity_client_id = self._get_managed_identity_client_id(cluster_config)
if managed_identity_client_id:
cluster_config["provider"]["managed_identity_client_id"] = managed_identity_client_id
return cluster_config
@staticmethod
def fillout_available_node_types_resources(
cluster_config: Dict[str, Any]) -> Dict[str, Any]:
"""Fills out missing "resources" field for available_node_types."""
if "available_node_types" not in cluster_config:
return cluster_config
cluster_config = copy.deepcopy(cluster_config)
# Get instance information from cloud provider
provider_config = cluster_config["provider"]
subscription_id = provider_config["subscription_id"]
vm_location = provider_config["location"]
credential = get_credential(provider_config)
compute_client = ComputeManagementClient(credential, subscription_id)
vmsizes = compute_client.virtual_machine_sizes.list(vm_location)
instances_dict = {
instance.name: {"memory": instance.memory_in_mb, "cpu": instance.number_of_cores}
for instance in vmsizes
}
# Update the instance information to node type
available_node_types = cluster_config["available_node_types"]
for node_type in available_node_types:
instance_type = available_node_types[node_type]["node_config"]["azure_arm_parameters"]["vmSize"]
if instance_type in instances_dict:
cpus = instances_dict[instance_type]["cpu"]
detected_resources = {"CPU": cpus}
memory_total = instances_dict[instance_type]["memory"]
memory_total_in_bytes = int(memory_total) * 1024 * 1024
detected_resources["memory"] = memory_total_in_bytes
detected_resources.update(
available_node_types[node_type].get("resources", {}))
if detected_resources != \
available_node_types[node_type].get("resources", {}):
available_node_types[node_type][
"resources"] = detected_resources
logger.debug("Updating the resources of {} to {}.".format(
node_type, detected_resources))
else:
raise ValueError("Instance type " + instance_type +
" is not available in Azure location: " +
vm_location + ".")
return cluster_config
@staticmethod
def validate_config(
provider_config: Dict[str, Any]) -> None:
config_dict = {
"subscription_id": provider_config.get("subscription_id"),
"resource_group": provider_config.get("resource_group")}
validate_config_dict(provider_config["type"], config_dict)
if "azure_cloud_storage" in provider_config:
cloud_storage = provider_config["azure_cloud_storage"]
config_dict = {
"azure.storage.type": cloud_storage.get("azure.storage.type"),
"azure.storage.account": cloud_storage.get("azure.storage.account"),
"azure.container": cloud_storage.get("azure.container"),
# The account key is no longer a must since we have role access
# "azure.account.key": cloud_storage.get("azure.account.key")
}
validate_config_dict(provider_config["type"], config_dict)
@staticmethod
def verify_config(
provider_config: Dict[str, Any]) -> None:
verify_cloud_storage = provider_config.get("verify_cloud_storage", True)
if ("azure_cloud_storage" in provider_config) and verify_cloud_storage:
cli_logger.verbose("Verifying Azure cloud storage configurations...")
verify_azure_cloud_storage(provider_config)
cli_logger.verbose("Successfully verified Azure cloud storage configurations.")
def _get_managed_identity_client_id(self, cluster_config):
try:
# The latest version doesn't require credential wrapper any longer
# credential_adapter = AzureIdentityCredentialAdapter(self.credential)
msi_client = ManagedServiceIdentityClient(self.credential,
self.provider_config["subscription_id"])
user_assigned_identity_name = self.provider_config.get("userAssignedIdentity", AZURE_MSI_NAME)
user_assigned_identity = msi_client.user_assigned_identities.get(
self.provider_config["resource_group"],
user_assigned_identity_name)
return user_assigned_identity.client_id
except Exception as e:
logger.warning("Failed to get azure client id: {}".format(e))
return None
|
the-stack_106_22744 | import argparse
import json
import os
import random
import numpy as np
import torch
from ilqr_utils import (
backward,
compute_latent_traj,
forward,
get_x_data,
latent_cost,
random_actions_trajs,
refresh_actions_trajs,
save_traj,
seq_jacobian,
update_horizon_start,
)
from mdp.cartpole_mdp import CartPoleMDP
from mdp.pendulum_mdp import PendulumMDP
from mdp.plane_obstacles_mdp import PlanarObstaclesMDP
from mdp.three_pole_mdp import ThreePoleMDP
from pc3_model import PC3
seed = 2020
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.set_default_dtype(torch.float64)
config_path = {
"plane": "ilqr_config/plane.json",
"swing": "ilqr_config/swing.json",
"balance": "ilqr_config/balance.json",
"cartpole": "ilqr_config/cartpole.json",
"threepole": "ilqr_config/threepole.json",
}
env_data_dim = {
"planar": (1600, 2, 2),
"pendulum": ((2, 48, 48), 3, 1),
"cartpole": ((2, 80, 80), 8, 1),
"threepole": ((2, 80, 80), 8, 3),
}
def main(args):
task_name = args.task
assert task_name in ["planar", "balance", "swing", "cartpole", "threepole"]
env_name = "pendulum" if task_name in ["balance", "swing"] else task_name
setting_path = args.setting_path
setting = os.path.basename(os.path.normpath(setting_path))
noise = args.noise
epoch = args.epoch
x_dim, z_dim, u_dim = env_data_dim[env_name]
# non-convolution encoder
if env_name in ["planar", "pendulum"]:
x_dim = np.prod(x_dim)
ilqr_result_path = "iLQR_result/" + "_".join([task_name, str(setting), str(noise), str(epoch)])
if not os.path.exists(ilqr_result_path):
os.makedirs(ilqr_result_path)
with open(ilqr_result_path + "/settings", "w") as f:
json.dump(args.__dict__, f, indent=2)
# each trained model will perform 10 random tasks
all_task_configs = []
for task_counter in range(10):
# config for this task
with open(config_path[task_name]) as f:
config = json.load(f)
# sample random start and goal state
s_start_min, s_start_max = config["start_min"], config["start_max"]
config["s_start"] = np.random.uniform(low=s_start_min, high=s_start_max)
s_goal = config["goal"][np.random.choice(len(config["goal"]))]
config["s_goal"] = np.array(s_goal)
all_task_configs.append(config)
# the folder where all trained models are saved
log_folders = [
os.path.join(setting_path, dI)
for dI in os.listdir(setting_path)
if os.path.isdir(os.path.join(setting_path, dI))
]
log_folders.sort()
# statistics on all trained models
avg_model_percent = 0.0
best_model_percent = 0.0
for log in log_folders:
with open(log + "/settings", "r") as f:
settings = json.load(f)
armotized = settings["armotized"]
log_base = os.path.basename(os.path.normpath(log))
model_path = ilqr_result_path + "/" + log_base
if not os.path.exists(model_path):
os.makedirs(model_path)
print("iLQR for " + log_base)
# load the trained model
model = PC3(armotized, x_dim, z_dim, u_dim, env_name)
model.load_state_dict(torch.load(log + "/model_" + str(epoch), map_location="cpu"))
model.eval()
dynamics = model.dynamics
encoder = model.encoder
# run the task with 10 different start and goal states for a particular model
avg_percent = 0.0
for task_counter, config in enumerate(all_task_configs):
print("Performing task %d: " % (task_counter) + str(config["task"]))
# environment specification
horizon = config["horizon_prob"]
plan_len = config["plan_len"]
# ilqr specification
R_z = config["q_weight"] * np.eye(z_dim)
R_u = config["r_weight"] * np.eye(u_dim)
num_uniform = config["uniform_trajs"]
num_extreme = config["extreme_trajs"]
ilqr_iters = config["ilqr_iters"]
inv_regulator_init = config["pinv_init"]
inv_regulator_multi = config["pinv_mult"]
inv_regulator_max = config["pinv_max"]
alpha_init = config["alpha_init"]
alpha_mult = config["alpha_mult"]
alpha_min = config["alpha_min"]
s_start = config["s_start"]
s_goal = config["s_goal"]
# mdp
if env_name == "planar":
mdp = PlanarObstaclesMDP(goal=s_goal, goal_thres=config["distance_thresh"], noise=noise)
elif env_name == "pendulum":
mdp = PendulumMDP(frequency=config["frequency"], noise=noise, torque=config["torque"])
elif env_name == "cartpole":
mdp = CartPoleMDP(frequency=config["frequency"], noise=noise)
elif env_name == "threepole":
mdp = ThreePoleMDP(frequency=config["frequency"], noise=noise, torque=config["torque"])
# get z_start and z_goal
x_start = get_x_data(mdp, s_start, config)
x_goal = get_x_data(mdp, s_goal, config)
with torch.no_grad():
z_start = encoder(x_start)
z_goal = encoder(x_goal)
z_start = z_start.squeeze().numpy()
z_goal = z_goal.squeeze().numpy()
# initialize actions trajectories
all_actions_trajs = random_actions_trajs(mdp, num_uniform, num_extreme, plan_len)
# perform reciding horizon iLQR
s_start_horizon = np.copy(s_start) # s_start and z_start is changed at each horizon
z_start_horizon = np.copy(z_start)
obs_traj = [mdp.render(s_start).squeeze()]
goal_counter = 0.0
for plan_iter in range(1, horizon + 1):
latent_cost_list = [None] * len(all_actions_trajs)
# iterate over all trajectories
for traj_id in range(len(all_actions_trajs)):
# initialize the inverse regulator
inv_regulator = inv_regulator_init
for iter in range(1, ilqr_iters + 1):
u_seq = all_actions_trajs[traj_id]
z_seq = compute_latent_traj(z_start_horizon, u_seq, dynamics)
# compute the linearization matrices
A_seq, B_seq = seq_jacobian(dynamics, z_seq, u_seq)
# run backward
k_small, K_big = backward(R_z, R_u, z_seq, u_seq, z_goal, A_seq, B_seq, inv_regulator)
current_cost = latent_cost(R_z, R_u, z_seq, z_goal, u_seq)
# forward using line search
alpha = alpha_init
accept = False # if any alpha is accepted
while alpha > alpha_min:
z_seq_cand, u_seq_cand = forward(
z_seq, all_actions_trajs[traj_id], k_small, K_big, dynamics, alpha
)
cost_cand = latent_cost(R_z, R_u, z_seq_cand, z_goal, u_seq_cand)
if cost_cand < current_cost: # accept the trajectory candidate
accept = True
all_actions_trajs[traj_id] = u_seq_cand
latent_cost_list[traj_id] = cost_cand
break
else:
alpha *= alpha_mult
if accept:
inv_regulator = inv_regulator_init
else:
inv_regulator *= inv_regulator_multi
if inv_regulator > inv_regulator_max:
break
for i in range(len(latent_cost_list)):
if latent_cost_list[i] is None:
latent_cost_list[i] = np.inf
traj_opt_id = np.argmin(latent_cost_list)
action_chosen = all_actions_trajs[traj_opt_id][0]
s_start_horizon, z_start_horizon = update_horizon_start(
mdp, s_start_horizon, action_chosen, encoder, config
)
obs_traj.append(mdp.render(s_start_horizon).squeeze())
goal_counter += mdp.reward_function(s_start_horizon)
all_actions_trajs = refresh_actions_trajs(
all_actions_trajs,
traj_opt_id,
mdp,
np.min([plan_len, horizon - plan_iter]),
num_uniform,
num_extreme,
)
# compute the percentage close to goal
success_rate = goal_counter / horizon
print("Success rate: %.2f" % (success_rate))
percent = success_rate
avg_percent += success_rate
with open(model_path + "/result.txt", "a+") as f:
f.write(config["task"] + ": " + str(percent) + "\n")
# save trajectory as gif file
gif_path = model_path + "/task_{:01d}.gif".format(task_counter + 1)
save_traj(obs_traj, mdp.render(s_goal).squeeze(), gif_path, config["task"])
avg_percent = avg_percent / 10
print("Average success rate: " + str(avg_percent))
print("====================================")
avg_model_percent += avg_percent
if avg_percent > best_model_percent:
best_model = log_base
best_model_percent = avg_percent
with open(model_path + "/result.txt", "a+") as f:
f.write("Average percentage: " + str(avg_percent))
avg_model_percent = avg_model_percent / len(log_folders)
with open(ilqr_result_path + "/result.txt", "w") as f:
f.write("Average percentage of all models: " + str(avg_model_percent) + "\n")
f.write("Best model: " + best_model + ", best percentage: " + str(best_model_percent))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="run iLQR")
parser.add_argument("--task", required=True, type=str, help="task to perform")
parser.add_argument("--setting_path", required=True, type=str, help="path to load trained models")
parser.add_argument("--noise", type=float, default=0.0, help="noise level for mdp")
parser.add_argument("--epoch", type=int, default=2000, help="number of epochs to load model")
args = parser.parse_args()
main(args)
|
the-stack_106_22745 | from typing import List, Dict, Optional, Callable, Awaitable, Union
from aiogram.types import InlineKeyboardButton, CallbackQuery
from aiogram_dialog.deprecation_utils import manager_deprecated
from aiogram_dialog.dialog import Dialog, ChatEvent
from aiogram_dialog.manager.protocols import DialogManager
from aiogram_dialog.widgets.widget_event import WidgetEventProcessor, ensure_event_processor
from .base import Keyboard
from .group import Group
from ..managed import ManagedWidgetAdapter
from ..when import WhenCondition
OnStateChanged = Callable[
[ChatEvent, "ManagedScrollingGroupAdapter", DialogManager],
Awaitable,
]
class ScrollingGroup(Group):
def __init__(self, *buttons: Keyboard, id: str, width: Optional[int] = None,
height: int = 0, when: WhenCondition = None,
on_page_changed: Union[OnStateChanged, WidgetEventProcessor, None] = None):
super().__init__(*buttons, id=id, width=width, when=when)
self.height = height
self.on_page_changed = ensure_event_processor(on_page_changed)
async def _render_keyboard(
self, data: Dict, manager: DialogManager
) -> List[List[InlineKeyboardButton]]:
kbd = await super()._render_keyboard(data, manager)
pages = len(kbd) // self.height + bool(len(kbd) % self.height)
last_page = pages - 1
if pages == 0:
return kbd
current_page = min(last_page, self.get_page(manager))
next_page = min(last_page, current_page + 1)
prev_page = max(0, current_page - 1)
pager = [[
InlineKeyboardButton(text="1", callback_data=f"{self.widget_id}:0"),
InlineKeyboardButton(text="<", callback_data=f"{self.widget_id}:{prev_page}"),
InlineKeyboardButton(text=str(current_page + 1),
callback_data=f"{self.widget_id}:{current_page}"),
InlineKeyboardButton(text=">", callback_data=f"{self.widget_id}:{next_page}"),
InlineKeyboardButton(text=str(last_page + 1),
callback_data=f"{self.widget_id}:{last_page}"),
]]
return kbd[current_page * self.height: (current_page + 1) * self.height] + pager
async def process_callback(self, c: CallbackQuery, dialog: Dialog,
manager: DialogManager) -> bool:
prefix = f"{self.widget_id}:"
if not c.data.startswith(prefix):
return await super().process_callback(c, dialog, manager)
new_page = int(c.data[len(prefix):])
await self.set_page(c, new_page, manager)
return True
def get_page(self, manager: DialogManager) -> int:
return manager.current_context().widget_data.get(self.widget_id, 0)
async def set_page(self, event: ChatEvent, page: int,
manager: DialogManager) -> None:
manager.current_context().widget_data[self.widget_id] = page
await self.on_page_changed.process_event(
event, self.managed(manager), manager,
)
def managed(self, manager: DialogManager):
return ManagedScrollingGroupAdapter(self, manager)
class ManagedScrollingGroupAdapter(ManagedWidgetAdapter[ScrollingGroup]):
def get_page(self, manager: Optional[DialogManager] = None) -> int:
manager_deprecated(manager)
return self.widget.get_page(self.manager)
async def set_page(self, event: ChatEvent, page: int,
manager: Optional[DialogManager] = None) -> None:
manager_deprecated(manager)
return await self.widget.set_page(
event, page, self.manager
)
|
the-stack_106_22746 | import gc
import os
import pickle
import subprocess
import sys
import pytest
from shapely.errors import ShapelyDeprecationWarning
from shapely.geometry import Point, Polygon
from shapely.geos import geos_version
from shapely import strtree
from shapely.strtree import STRtree
from .conftest import requires_geos_342
@requires_geos_342
@pytest.mark.parametrize("geoms", [[Point(i, i) for i in range(10)]])
@pytest.mark.parametrize(
"query_geom,num_results",
[(Point(2, 2).buffer(0.99), 1), (Point(2, 2).buffer(1.0), 3)],
)
def test_query(geoms, query_geom, num_results):
with pytest.warns(ShapelyDeprecationWarning):
tree = STRtree(geoms)
results = tree.query(query_geom)
assert len(results) == num_results
@requires_geos_342
@pytest.mark.parametrize("geoms", [[Point(i, i) for i in range(10)]])
@pytest.mark.parametrize(
"query_geom,expected",
[(Point(2, 2).buffer(0.99), [2]), (Point(2, 2).buffer(1.0), [1, 2, 3])],
)
def test_query_enumeration_idx(geoms, query_geom, expected):
"""Store enumeration idx"""
with pytest.warns(ShapelyDeprecationWarning):
tree = STRtree((g, i) for i, g in enumerate(geoms))
results = tree.query_items(query_geom)
assert sorted(results) == sorted(expected)
@requires_geos_342
def test_insert_empty_geometry():
"""
Passing nothing but empty geometries results in an empty strtree.
The query segfaults if the empty geometry was actually inserted.
"""
empty = Polygon()
geoms = [empty]
with pytest.warns(ShapelyDeprecationWarning):
tree = STRtree(geoms)
query = Polygon([(0, 0), (1, 1), (2, 0), (0, 0)])
results = tree.query(query)
assert len(results) == 0
@requires_geos_342
def test_query_empty_geometry():
"""
Empty geometries should be filtered out.
The query segfaults if the empty geometry was actually inserted.
"""
empty = Polygon()
point = Point(1, 0.5)
geoms = [empty, point]
with pytest.warns(ShapelyDeprecationWarning):
tree = STRtree(geoms)
query = Polygon([(0, 0), (1, 1), (2, 0), (0, 0)])
results = tree.query(query)
assert len(results) == 1
assert results[0] == point
@requires_geos_342
def test_references():
"""Don't crash due to dangling references"""
empty = Polygon()
point = Point(1, 0.5)
geoms = [empty, point]
with pytest.warns(ShapelyDeprecationWarning):
tree = STRtree(geoms)
empty = None
point = None
gc.collect()
query = Polygon([(0, 0), (1, 1), (2, 0), (0, 0)])
results = tree.query(query)
assert len(results) == 1
assert results[0] == Point(1, 0.5)
@requires_geos_342
def test_safe_delete():
with pytest.warns(ShapelyDeprecationWarning):
tree = STRtree([])
_lgeos = strtree.lgeos
strtree.lgeos = None
del tree
strtree.lgeos = _lgeos
@requires_geos_342
def test_pickle_persistence():
"""
Don't crash trying to use unpickled GEOS handle.
"""
with pytest.warns(ShapelyDeprecationWarning):
tree = STRtree([(Point(i, i).buffer(0.1), i) for i in range(3)])
pickled_strtree = pickle.dumps(tree)
unpickle_script_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "unpickle-strtree.py")
proc = subprocess.Popen(
[sys.executable, str(unpickle_script_file_path)],
stdin=subprocess.PIPE,
)
proc.communicate(input=pickled_strtree)
proc.wait()
assert proc.returncode == 0
@pytest.mark.skipif(geos_version < (3, 6, 0), reason="GEOS 3.6.0 required")
@pytest.mark.parametrize(
"geoms",
[
[
Polygon([(1, 0), (2, 0), (2, 1), (1, 1)]),
Polygon([(0, 2), (1, 2), (1, 3), (0, 3)]),
Point(0, 0.5),
]
],
)
@pytest.mark.parametrize("query_geom", [Point(0, 0.4)])
def test_nearest_geom(geoms, query_geom):
with pytest.warns(ShapelyDeprecationWarning):
tree = STRtree(geoms)
result = tree.nearest(query_geom)
assert result.geom_type == "Point"
assert result.x == 0.0
assert result.y == 0.5
@pytest.mark.skipif(geos_version < (3, 6, 0), reason="GEOS 3.6.0 required")
@pytest.mark.parametrize(
"geoms",
[
[
Point(0, 0.5),
Polygon([(1, 0), (2, 0), (2, 1), (1, 1)]),
Polygon([(0, 2), (1, 2), (1, 3), (0, 3)]),
]
],
)
@pytest.mark.parametrize("items", [list(range(1, 4)), list("abc")])
@pytest.mark.parametrize("query_geom", [Point(0, 0.4)])
def test_nearest_item(geoms, items, query_geom):
with pytest.warns(ShapelyDeprecationWarning):
tree = STRtree(geoms, items)
assert tree.nearest_item(query_geom) == items[0]
@pytest.mark.parametrize(["geoms", "items"], [(None, None), ([], None)])
def test_nearest_empty(geoms, items):
with pytest.warns(ShapelyDeprecationWarning):
tree = STRtree(geoms, items)
assert tree.nearest_item(None) is None
@pytest.mark.parametrize(["geoms", "items"], [(None, None), ([], None)])
def test_nearest_items(geoms, items):
with pytest.warns(ShapelyDeprecationWarning):
tree = STRtree(geoms, items)
assert tree.nearest_item(None) is None
|
the-stack_106_22748 | import torch
import torch.nn as nn
from glasses.interpretability import GradCam
from glasses.models.base import Freezable, Interpretable
def test_Freezable():
class MyModel(nn.Sequential, Freezable, Interpretable):
def __init__(self):
super().__init__(nn.Conv2d(3, 32, kernel_size=3, bias=False), nn.BatchNorm2d(32), nn.ReLU(),
nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten(), nn.Linear(32, 10))
model = MyModel()
model.freeze()
for param in model.parameters():
assert not param.requires_grad
model.unfreeze()
for param in model.parameters():
assert param.requires_grad
model.freeze(model[0])
for param in model[0].parameters():
assert not param.requires_grad
for param in model[1].parameters():
assert param.requires_grad
x = torch.randn((1, 3, 224, 224))
model.interpret(x, using=GradCam())
|
the-stack_106_22753 | # -*- coding: utf-8 -*-
# !/usr/bin/env python
from data_structures.graphs.utils import *
import queue
def bfs_list(g, s):
"""
Breadth First Search algorithm for adjacency lists.
:param g: the graph to visit. It is assumed the list only contains int data type.
:type g: list(list(int))
:param s: the vertex to start from.
:type s: int
:return: (V,D,P), where V is the list of visited vertices in order,
D is the list of the distance of each vertex from the start and
P is the list of the predecessors of each vertex
"""
# This list contains the distances from the start vertex.
distances = [INFINITY for k in range(len(g))]
distances[s] = 0
# This list contains the predecessor of each vertex.
predecessors = [-1 for k in range(len(g))]
# Queue of vertices to visit
q = queue.Queue()
q.put(s) # Start with the vertex given as argument
visited_vertices = [] # List of visited vertices.
while not q.empty():
v = q.get() # Vertex being visited
for u in g[v]:
if u not in visited_vertices:
distances[u] = distances[v] + 1 # Update the distance
predecessors[u] = v # Update the predecessor
q.put(u)
visited_vertices.append(v)
return visited_vertices, distances, predecessors
def dfs_rec_list(g, s):
"""
Recursive Depth First Search for adjacency lists
:param g: the graph to visit. It is assumed the list only contains int data type.
:type g: list(list(int))
:param s: the vertex to start from.
:type s: int
:return: list of visited vertices
"""
def visit(v):
visited_vertices.append(v)
for u in g[v]:
if u not in visited_vertices:
visit(u)
visited_vertices = []
visit(s)
return visited_vertices
def dfs_imp_list(g, s):
"""
Imperative Depth First Search for adjacency list
:param g: the graph to visit. It is assumed the list only contains int data type.
:type g: list(list(int))
:param s: the vertex to start from.
:type s: int
:return: list of visited vertices
"""
visited_vertices = []
stack = [s]
while not stack == []:
v = stack.pop()
if v not in visited_vertices:
visited_vertices.append(v)
for u in g[v]:
stack.append(u)
return visited_vertices
|
the-stack_106_22754 | """Display a "Pastebin" menu that allows you to pastebin files easily.
If a part of the file is selected when you click something in the "Pastebin"
menu, then only the selected part of the file is shared.
"""
# TODO: make this work with pythonprompt plugin?
from __future__ import annotations
import logging
import socket
import ssl
import textwrap
import tkinter
import webbrowser
from functools import partial
from http.client import HTTPConnection, HTTPSConnection
from tkinter import messagebox, ttk
from typing import Any, ClassVar, Type, cast
from urllib.parse import urlencode
from urllib.request import HTTPSHandler, Request, build_opener
from pygments.lexer import LexerMeta
from porcupine import get_main_window, menubar, tabs, utils
from porcupine.settings import global_settings
log = logging.getLogger(__name__)
DPASTE_URL = "https://dpaste.com/api/v2/"
TERMBIN_HOST_AND_PORT = ("termbin.com", 9999)
class Paste:
name: ClassVar[str]
def __init__(self) -> None:
self.canceled = False
def get_socket(self) -> socket.socket | ssl.SSLSocket | None:
raise NotImplementedError
# runs in a new thread
def run(self, code: str, lexer_class: LexerMeta) -> str:
raise NotImplementedError
def cancel(self) -> bool:
sock = self.get_socket()
if sock is None:
log.info("can't cancel yet")
return False
log.debug("canceling (shutting down socket)")
sock.shutdown(socket.SHUT_RDWR)
log.debug("canceling done")
self.canceled = True
return True
class Termbin(Paste):
name = "termbin.com"
def __init__(self) -> None:
super().__init__()
self._socket: socket.socket | None = None
def get_socket(self) -> socket.socket | None:
return self._socket
def run(self, code: str, lexer_class: LexerMeta) -> str:
with socket.socket() as self._socket:
self._socket.connect(TERMBIN_HOST_AND_PORT)
self._socket.sendall(code.encode("utf-8"))
url = self._socket.recv(1024)
# today termbin adds zero bytes to my URL's 0_o it hasn't done sit before
# i've never seen it add \r but i'm not surprised if it adds it
return url.rstrip(b"\n\r\0").decode("ascii")
# Hello there, random person reading my code. You are probably wondering why in
# the world I am using urllib instead of requests.
#
# It doesn't seem to be possible to access the underlying socket that requests
# uses without relying on _methods_named_like_this. We need that socket for
# canceling the pastebinning. For example, https://stackoverflow.com/a/32311849
# is useless because it gives the socket after it's connected, and most of the
# pastebinning time is spent connecting the socket (on my system).
class MyHTTPConnection(HTTPConnection):
def connect(self) -> None:
# Unlike HTTPConnection.connect, this creates the socket so that it is
# assinged to self.sock before it's connected.
self.sock: socket.socket | ssl.SSLSocket = socket.socket()
self.sock.connect((self.host, self.port))
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# HTTPSConnection does super().connect(), which calls MyHTTPConnection.connect,
# and then it SSL-wraps the socket created by MyHTTPConnection.
class MyHTTPSConnection(HTTPSConnection, MyHTTPConnection):
def __init__(self, *args: Any, dpaste: DPaste, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self._dpaste = dpaste
# https://github.com/python/mypy/issues/10049
@property # type: ignore
def sock(self) -> socket.socket | ssl.SSLSocket: # type: ignore
return self.__sock
@sock.setter
def sock(self, new_sock: socket.socket | ssl.SSLSocket) -> None:
# Canceling with the non-SSL socket fails because making the SSL socket
# closes the non-SSL socket. So, don't tell the dpaste object about
# being able to cancel until self.sock is set to SSL socket.
self.__sock = new_sock
if isinstance(new_sock, ssl.SSLSocket):
self._dpaste.connection = self
class DPaste(Paste):
name = "dpaste.com"
def __init__(self) -> None:
super().__init__()
self.connection: MyHTTPSConnection | None = None
def get_socket(self) -> ssl.SSLSocket | None:
if self.connection is None:
return None
return cast(ssl.SSLSocket, self.connection.sock)
def run(self, code: str, lexer_class: LexerMeta) -> str:
# kwargs of do_open() go to MyHTTPSConnection
handler = HTTPSHandler()
handler.https_open = partial(handler.do_open, MyHTTPSConnection, dpaste=self) # type: ignore
# docs: https://dpaste.com/api/v2/
# dpaste.com's syntax highlighting choices correspond with pygments lexers (see tests)
request = Request(
DPASTE_URL,
data=urlencode({"syntax": lexer_class.aliases[0], "content": code}).encode("utf-8"),
)
with build_opener(handler).open(request) as response:
return response.read().decode().strip()
class SuccessDialog(tkinter.Toplevel):
def __init__(self, url: str):
super().__init__()
self.url = url # accessed in tests
content = ttk.Frame(self, padding=10)
content.pack(fill="both", expand=True)
content.columnconfigure(0, weight=1)
label = ttk.Label(content, text="Here's your link:")
label.grid(row=0, column=0)
self._entry = ttk.Entry(content, justify="center")
self._entry.grid(row=1, column=0, sticky="we", pady=(10, 30))
self._entry.insert(0, url)
self._entry.config(state="readonly") # must be after the insert
self.bind("<FocusIn>", self._select_all, add=True)
self._select_all()
button_info = [
("Open in browser", self.open_in_browser),
("Copy to clipboard", self.copy_to_clipboard),
("Close this dialog", self.destroy),
]
buttonframe = ttk.Frame(content)
buttonframe.grid(row=2, column=0, sticky="we")
for (text, callback), padx in zip(button_info, [(0, 5), (5, 5), (5, 0)]):
ttk.Button(buttonframe, text=text, command=callback).pack(
side="left", expand=True, fill="x", padx=padx
)
def _select_all(self, event: tkinter.Event[tkinter.Misc] | None = None) -> None:
# toplevels annoyingly get notified of child events
if event is None or event.widget is self:
self._entry.selection_range(0, "end")
self._entry.focus()
def open_in_browser(self) -> None:
webbrowser.open(self.url)
self.destroy()
def copy_to_clipboard(self) -> None:
self.clipboard_clear()
self.clipboard_append(self.url)
def make_please_wait_window(paste: Paste) -> tkinter.Toplevel:
window = tkinter.Toplevel()
window.transient(get_main_window())
window.title("Pasting...")
window.geometry("350x150")
window.resizable(False, False)
window.protocol("WM_DELETE_WINDOW", paste.cancel)
content = ttk.Frame(window)
content.pack(fill="both", expand=True)
label = ttk.Label(
content, font=("", 12, ()), text=f"Pasting to {type(paste).name}, please wait..."
)
label.pack(expand=True)
progressbar = ttk.Progressbar(content, mode="indeterminate")
progressbar.pack(fill="x", padx=15, pady=15)
progressbar.start()
ttk.Button(content, text="Cancel", command=paste.cancel).pack(pady=15)
get_main_window().tk.call("tk", "busy", "hold", get_main_window())
return window
def pasting_done_callback(
paste: Paste, please_wait_window: tkinter.Toplevel, success: bool, result: str
) -> None:
get_main_window().tk.call("tk", "busy", "forget", get_main_window())
please_wait_window.destroy()
if success:
if result.startswith(("http://", "https://")):
log.info("pasting succeeded")
dialog = SuccessDialog(url=result)
dialog.title("Pasting Succeeded")
dialog.resizable(False, False)
dialog.transient(get_main_window())
dialog.wait_window()
else:
log.error(f"pastebin {paste.name!r} returned invalid url: {result!r}")
messagebox.showerror(
"Pasting failed", f"Instead of a valid URL, {type(paste).name} returned {result!r}."
)
elif paste.canceled:
# Log error with less dramatic log level and don't show in GUI
log.debug("Pasting failed and was cancelled. Here is the error.", exc_info=True)
else:
# result is the traceback as a string
log.error(f"pasting failed\n{result}")
messagebox.showerror(
"Pasting failed", "Check your internet connection or try a different pastebin."
)
def ask_are_you_sure(filename: str | None, paste_class: type[Paste], selection_only: bool) -> bool:
window = tkinter.Toplevel()
window.title(f"Pastebin {filename}")
window.transient(get_main_window())
content = ttk.Frame(window, name="content", padding=10)
content.pack(fill="both", expand=True)
content.columnconfigure(0, weight=1)
if selection_only:
question = f"Do you want to send the selected code to {paste_class.name}?"
else:
question = f"Do you want to send the content of {filename} to {paste_class.name}?"
label1 = ttk.Label(
content,
name="label1",
text=question,
wraplength=300,
justify="center",
font="TkHeadingFont",
)
label1.pack(pady=5)
label2 = ttk.Label(
content,
name="label2",
text="This is a bad idea if your code is not meant to be publicly available.",
wraplength=300,
justify="center",
font="TkTextFont",
)
label2.pack(pady=5)
var = tkinter.BooleanVar(value=True)
checkbox = ttk.Checkbutton(
content, text="Show this dialog when I try to pastebin something", variable=var
)
checkbox.pack(pady=25)
result = False
def yes() -> None:
nonlocal result
result = True
window.destroy()
def no() -> None:
window.destroy()
button_frame = ttk.Frame(content)
button_frame.pack(fill="x")
ttk.Button(button_frame, text="Yes", command=yes).pack(
side="left", expand=True, fill="x", padx=(0, 10)
)
ttk.Button(button_frame, text="No", command=no).pack(
side="left", expand=True, fill="x", padx=(10, 0)
)
window.wait_window()
global_settings.set("ask_to_pastebin", var.get())
return result
def start_pasting(paste_class: Type[Paste], tab: tabs.FileTab) -> None:
try:
code = tab.textwidget.get("sel.first", "sel.last")
selection_only = True
except tkinter.TclError:
code = tab.textwidget.get("1.0", "end - 1 char")
selection_only = False
if global_settings.get("ask_to_pastebin", bool):
filename = "this file" if tab.path is None else tab.path.name
if not ask_are_you_sure(filename, paste_class, selection_only=selection_only):
return
lexer_class = tab.settings.get("pygments_lexer", LexerMeta)
code = textwrap.dedent(code)
paste = paste_class()
plz_wait = make_please_wait_window(paste)
utils.run_in_thread(
partial(paste.run, code, lexer_class), partial(pasting_done_callback, paste, plz_wait)
)
def setup() -> None:
global_settings.add_option("ask_to_pastebin", default=True)
for klass in [DPaste, Termbin]:
assert "/" not in klass.name
menubar.add_filetab_command(f"Pastebin/{klass.name}", partial(start_pasting, klass))
|
the-stack_106_22756 | import traceback
import logging
from datetime import datetime, time, date, timedelta
from django.conf import settings
from django.contrib.auth.models import User
from django.template.loader import get_template, render_to_string
from django.template import Template, TemplateDoesNotExist, RequestContext
from django.core.mail import send_mail, EmailMessage, EmailMultiAlternatives
from django.urls import reverse
from django.utils.timezone import localtime, now
from comlink import mailgun
from nadine.models.membership import Membership
from nadine.utils.slack_api import SlackAPI
logger = logging.getLogger(__name__)
def valid_message_keys():
return ["all", "introduction", "newsletter", "new_membership", "first_day_checkin",
"exit_survey", "member_survey", "no_return", "checkin", "invalid_billing", "new_key",
"no_signin", "no_device", "edit_profile", "slack_invite"]
def default_context():
return {
'today': localtime(now()),
'site_name': settings.SITE_NAME,
'site_url': settings.SITE_URL,
}
def send_manual(user, message):
message = message.lower()
if not message in valid_message_keys():
return False
if message == "introduction" or message == "all":
send_introduction(user)
if message == "newsletter" or message == "all":
subscribe_to_newsletter(user)
if message == "new_member" or message == "all":
send_new_membership(user)
if message == "first_day_checkin" or message == "all":
send_first_day_checkin(user)
if message == "exit_survey" or message == "all":
send_exit_survey(user)
if message == "member_survey" or message == "all":
send_member_survey(user)
if message == "no_return_checkin" or message == "all":
send_no_return_checkin(user)
if message == "invalid_billing" or message == "all":
send_invalid_billing(user)
if message == "no_signin" or message == "all":
send_no_signin(user)
if message == "no_device" or message == "all":
send_no_device(user)
if message == "new_key" or message == "all":
send_new_key(user)
if message == "edit_profile" or message == "all":
send_edit_profile(user)
if message == "slack_invite":
SlackAPI().invite_user(user)
return True
#####################################################################
# Email Verification
#####################################################################
def send_verification(emailObj):
"""Send email verification link for this EmailAddress object.
Raises smtplib.SMTPException, and NoRouteToHost.
"""
# Build our context
verif_key = emailObj.get_verif_key()
context_dict = {
'site_name': settings.SITE_NAME,
'site_url': settings.SITE_URL,
'user': emailObj.user,
'verif_key': verif_key,
}
context_dict['verify_link'] = emailObj.get_verify_link()
subject = "Please Verify Your Email Address"
text_template = get_template('email/verification_email.txt')
text_msg = text_template.render(context=context_dict)
html_template = get_template('email/verification_email.html')
html_msg = html_template.render(context=context_dict)
send_email(emailObj.email, subject, text_msg, html_msg)
#####################################################################
# User Alerts
#####################################################################
#
# These emails go out to users.
#
#####################################################################
def send_introduction(user):
subject = "Introduction to Nadine"
context = default_context()
context['user'] = user
message = render_to_string('email/introduction.txt', context=context)
send_quietly(user.email, subject, message)
def subscribe_to_newsletter(user):
if settings.MAILCHIMP_NEWSLETTER_KEY:
from mailchimp3 import MailChimp
try:
client = MailChimp(mc_api=settings.MAILCHIMP_API_KEY)
client.lists.members.create(settings.MAILCHIMP_NEWSLETTER_KEY, {
'email_address': user.email,
'status': 'subscribed',
'merge_fields': {
'FNAME': user.first_name,
'LNAME': user.last_name,
},
})
except Exception as error:
try:
if error.args[0]['title'] == 'Member Exists':
logger.info("%s already subscribed to newsletter" % user.email)
return
except Exception:
pass
raise error
def send_new_membership(user):
membership = Membership.objects.for_user(user)
package_name = membership.package_name(include_future=True)
if package_name:
subject = "New %s Membership" % package_name
else:
subject = "New Membership"
context = default_context()
context['user'] = user
context['membership'] = membership
message = render_to_string('email/new_membership.txt', context=context)
send(user.email, subject, message)
def send_first_day_checkin(user):
subject = "How was your first day?"
context = default_context()
context['user'] = user
message = render_to_string('email/first_day.txt', context=context)
send(user.email, subject, message)
def send_exit_survey(user):
subject = "Exit Survey"
context = default_context()
context['user'] = user
message = render_to_string('email/exit_survey.txt', context=context)
send(user.email, subject, message)
def send_member_survey(user):
subject = "Coworking Survey"
context = default_context()
context['user'] = user
message = render_to_string('email/member_survey.txt', context=context)
send(user.email, subject, message)
def send_no_return_checkin(user):
subject = "Checking In"
context = default_context()
context['user'] = user
message = render_to_string('email/no_return.txt', context=context)
send(user.email, subject, message)
def send_invalid_billing(user):
subject = "Billing Problem"
context = default_context()
context['user'] = user
message = render_to_string('email/invalid_billing.txt', context=context)
send(user.email, subject, message)
def send_no_signin(user):
subject = "Forget to sign in?"
context = default_context()
context['user'] = user
message = render_to_string('email/no_signin.txt', context=context)
send(user.email, subject, message)
def send_no_device(user):
subject = "Device Registration"
context = default_context()
context['user'] = user
message = render_to_string('email/no_device.txt', context=context)
send(user.email, subject, message)
def send_new_key(user):
subject = "Key Holding Details"
context = default_context()
context['user'] = user
message = render_to_string('email/new_key.txt', context=context)
send(user.email, subject, message)
def send_user_notifications(user, target):
subject = "%s is here!" % target.get_full_name()
context = default_context()
context['user'] = user
context['target'] = target
message = render_to_string('email/user_notification.txt', context=context)
send(user.email, subject, message)
def send_contact_request(user, target):
subject = "%s wants to connect!" % user.get_full_name()
context = default_context()
context['user'] = user
context['target'] = target
message = render_to_string('email/contact_request.txt', context=context)
send(target.email, subject, message)
def send_edit_profile(user):
subject = "Please update your Nadine profile"
context = default_context()
context['user'] = user
message = render_to_string('email/edit_profile.txt', context=context)
send(user.email, subject, message)
#####################################################################
# System Alerts
#####################################################################
#
# These emails go out to the team.
#
#####################################################################
def announce_new_user(user):
subject = "New User - %s" % (user.get_full_name())
message = "Team,\r\n\r\n \t%s just signed in for the first time! %s" % (user.get_full_name(), team_signature(user))
send_quietly(settings.TEAM_EMAIL_ADDRESS, subject, message)
def announce_free_trial(user):
subject = "Free Trial - %s" % (user.get_full_name())
message = "Team,\r\n\r\n \t%s just signed in for the first time! %s" % (user.get_full_name(), team_signature(user))
send_quietly(settings.TEAM_EMAIL_ADDRESS, subject, message)
def announce_new_membership(user):
membership = Membership.objects.for_user(user)
package_name = membership.package_name(include_future=True)
subject = "New %s: %s" % (package_name, user.get_full_name())
message = "Team,\r\n\r\n \t%s has a new %s membership! %s" % (user.get_full_name(), package_name, team_signature(user))
send_quietly(settings.TEAM_EMAIL_ADDRESS, subject, message)
def announce_member_checkin(user):
membership = Membership.objects.for_user(user)
subject = "Member Check-in - %s" % (user.get_full_name())
message = "Team,\r\n\r\n \t%s has been a %s member for almost a month! Someone go see how they are doing. %s" % (user.get_full_name(), membership.package_name(), team_signature(user))
send_quietly(settings.TEAM_EMAIL_ADDRESS, subject, message)
def announce_need_photo(user):
subject = "Photo Opportunity - %s" % (user.get_full_name())
message = "Team,\r\n\r\n \t%s just signed in and we don't have a photo of them yet. %s" % (user.get_full_name(), team_signature(user))
send_quietly(settings.TEAM_EMAIL_ADDRESS, subject, message)
def announce_billing_disable(user):
subject = "Disabled Automatic Billing - %s" % (user.get_full_name())
message = "Team,\r\n\r\n \t%s just disabled their automatic billing through Nadine. %s" % (user.get_full_name(), team_signature(user))
send_quietly(settings.TEAM_EMAIL_ADDRESS, subject, message)
def announce_bad_email(user):
subject = "Email Problem - %s" % (user.get_full_name())
message = "Team,\r\n\r\n \tWe had a problem sending the introduction email to '%s'. %s" % (user.email, team_signature(user))
send_quietly(settings.TEAM_EMAIL_ADDRESS, subject, message)
def announce_anniversary(user):
subject = "Anniversary - %s" % (user.get_full_name())
duration = user.profile.duration_str()
message = "Team,\r\n\r\n \t%s has been with us now for %s! %s" % (user.get_full_name(), duration, team_signature(user))
send_quietly(settings.TEAM_EMAIL_ADDRESS, subject, message)
def announce_new_key(user):
subject = "New Key - %s" % (user.get_full_name())
message = "Team,\r\n\r\n \t%s has been assigned a new key! %s" % (user.get_full_name(), team_signature(user))
send_quietly(settings.TEAM_EMAIL_ADDRESS, subject, message)
def announce_special_day(user, special_day):
subject = "Special Day - %s" % (user.get_full_name())
message = "Team,\r\n\r\n \tToday is a special day for %s. Today is their %s! %s" % (user.get_full_name(), special_day.description.lower(), team_signature(user))
send_quietly(settings.TEAM_EMAIL_ADDRESS, subject, message)
#####################################################################
# Manage Member Email
#####################################################################
def get_manage_member_content(user):
return render_templates({'user': user}, "manage_member")
def send_manage_member(user, subject=None):
if subject == None:
subject = "Incomplete Tasks"
subject = "%s - %s" % (subject, user.get_full_name())
if hasattr(settings, "EMAIL_SUBJECT_PREFIX"):
# Adjust the subject if we have a prefix
subject = settings.EMAIL_SUBJECT_PREFIX.strip() + " " + subject.strip()
text_content, html_content = get_manage_member_content(user)
mailgun_data = {
"from": settings.DEFAULT_FROM_EMAIL,
"to": [settings.TEAM_EMAIL_ADDRESS, ],
"subject": subject,
"text": text_content,
"html": html_content,
}
if hasattr(settings, 'MAILGUN_DOMAIN'):
return mailgun.mailgun_send(mailgun_data, inject_list_id=False)
#####################################################################
# Utilities
#####################################################################
def render_templates(context, email_key):
text_content = None
html_content = None
# inject some specific context
context['today'] = localtime(now())
context['site_name'] = settings.SITE_NAME
context['site_url'] = settings.SITE_URL
try:
text_template = get_template("email/%s.txt" % email_key)
if text_template:
text_content = text_template.render(context)
html_template = get_template("email/%s.html" % email_key)
if html_template:
html_content = html_template.render(context)
except TemplateDoesNotExist:
pass
#logger.debug("text_context: %s" % text_content)
#logger.debug("html_content: %s" % html_content)
return (text_content, html_content)
def team_signature(user):
context = default_context()
context['user'] = user
return render_to_string('email/team_email_signature.txt', context=context)
def send(recipient, subject, text_message, html_message=None):
send_email(recipient, subject, text_message, html_message=html_message, fail_silently=False)
def send_quietly(recipient, subject, text_message, html_message=None):
send_email(recipient, subject, text_message, html_message=html_message, fail_silently=True)
def send_email(recipient, subject, text_message, html_message=None, fail_silently=False):
# Pull the user from the email address
user = User.objects.filter(email=recipient).first()
# A little safety net when debugging
if settings.DEBUG:
recipient = settings.DEFAULT_FROM_EMAIL
# Adjust the subject if we have a prefix
if hasattr(settings, "EMAIL_SUBJECT_PREFIX"):
subject = settings.EMAIL_SUBJECT_PREFIX.strip() + " " + subject.strip()
note = None
success = False
try:
msg = EmailMultiAlternatives(subject, text_message, settings.DEFAULT_FROM_EMAIL, [recipient])
if html_message:
msg.attach_alternative(html_message, 'text/html')
msg.send(fail_silently=False)
success = True
except:
note = traceback.format_exc()
if fail_silently:
pass
raise
finally:
if user:
try:
from nadine.models.profile import SentEmailLog
log = SentEmailLog(user=user, recipient=recipient, subject=subject, success=success)
if note:
log.note = note
log.save()
except Exception as e:
logger.error(e)
# Copyright 2020 Office Nomads LLC (https://officenomads.com/) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://opensource.org/licenses/Apache-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
|
the-stack_106_22757 | import gym
from collections import deque
from agent import DQNAGENT
import numpy as np
import random
import os
random.seed(2212)
np.random.seed(2212)
EPISODES = 10000
REPLAY_MEMORY_SIZE = 100000
MINIMUM_REPLAY_MEMORY = 1000
MINIBATCH_SIZE = 12
EPSLION = 1
EPSLION_DECAY = 0.99
MINIMUM_EPSLION = 0.001
DISCOUNT = 0.99
VISUALIZATION = True
ENV_NAME = "MountainCar-v0"
env = gym.make(ENV_NAME)
action_dim = env.action_space.n
observation_dim = env.observation_space.shape
replay_memory = deque(maxlen = REPLAY_MEMORY_SIZE)
agent = DQNAGENT(action_dim , observation_dim)
def train_dqn_agent():
minibatch = random.sample(replay_memory , MINIBATCH_SIZE) # [12 , 5]
X_cur_states = []
X_next_states = []
for index , sample in enumerate(minibatch):
cur_state , action , reward , next_state , done = sample
X_cur_states.append(cur_state)
X_next_states.append(next_state)
X_cur_states = np.array(X_cur_states) # [12 , 1 , 2] s
X_next_states = np.array(X_next_states) # [12 , 1 , 2] s'
cur_action_values = agent.model.predict(X_cur_states) # a [12 , 3]
next_action_values = agent.model.predict(X_next_states) # a' [12 , 3]
for index , sample in enumerate(minibatch):
cur_state , action , reward , next_state , done = sample # _ , 1 , -1 , _2 , False
if not done:
cur_action_values[index][action] = reward + DISCOUNT * np.amax(next_action_values[index])
# cur_action_values[0][1] = -1 + 0.99 * max(next_action_values[0])
else:
cur_action_values[index][action] = reward # cur_action_values[0][1] = reward
agent.model.fit(X_cur_states , cur_action_values , verbose = 0) # input = X_cur_states , output = cur_action_values
max_rewards = -99999
for episode in range(EPISODES):
cur_state = env.reset()
done = False
episode_reward = 0
episode_length = 0
render = True if (episode > 200) else False
while not done:
if render:
env.render()
episode_length += 1
if np.random.uniform(low = 0 , high = 1) < EPSLION:
action = np.random.randint(0 , action_dim)
else:
action = np.argmax(agent.model.predict(np.expand_dims(cur_state , axis = 0)) [0])
next_state , reward , done , _ = env.step(action)
episode_reward += reward
if done and episode_length < 200:
reward = 250 + episode_reward
if episode_reward > max_rewards:
agent.model.save_weights(str(episode_reward) + "_agent_.h5")
else:
reward = 5 * abs(next_state[0] - cur_state[0]) + 3 * abs(cur_state[1])
replay_memory.append((cur_state , action , reward , next_state , done))
cur_state = next_state
if(len(replay_memory) < MINIMUM_REPLAY_MEMORY):
continue
train_dqn_agent()
if EPSLION > MINIMUM_EPSLION :
EPSLION *= EPSLION_DECAY
max_rewards = max(episode_reward , max_rewards)
print("EPISODE " , episode , "Reward " , episode_reward , "Maximum Reward " , max_rewards , "EPSLION " , EPSLION) |
the-stack_106_22758 | import re
class Conjugator:
def calculate_lemma_model(self, lemma):
if lemma == 'être':
return 'être'
elif lemma == 'avoir':
return 'avoir'
elif lemma == 'aller':
return 'aller'
elif lemma.endswith('er'):
appeler_exceptions = ['agneler', 'celer', 'ciseler', 'congeler', 'déceler', 'décongeler', 'dégeler', 'démanteler', 'écarteler', 'geler', 'harceler', 'marteler', 'modeler', 'peler', 'receler', 'regeler', 'remodeler', 'surgeler']
jeter_exceptions = ['acheter', 'bégueter', 'caqueter', 'corseter', 'crocheter', 'duveter', 'fileter', 'fureter', 'haleter', 'racheter']
peser_regex = re.compile('.+e[nsv]er$')
céder_regex = re.compile('.+é(gl|tr|[dlrt])er$')
if lemma.endswith('guer'):
return 'fatiguer'
elif lemma.endswith('quer'):
return 'fabriquer'
elif lemma.endswith('ier'):
return 'prier'
elif lemma.endswith('cer') and not lemma.endswith('écer') and not lemma.endswith('ecer'):
return 'lancer'
elif lemma.endswith('ger') and not lemma.endswith('éger'):
return 'manger'
elif lemma.endswith('eler') and lemma not in appeler_exceptions:
return 'appeler'
elif lemma.endswith('eter') and lemma not in jeter_exceptions:
return 'jeter'
elif peser_regex.match(lemma) is not None or lemma in appeler_exceptions or lemma in jeter_exceptions:
return 'peser'
elif céder_regex.match(lemma) is not None:
return 'céder'
elif lemma in ['dépecer', 'clamecer']:
return 'dépecer'
elif lemma == 'rapiécer':
return 'rapiécer'
elif lemma.endswith('éger'):
return 'protéger'
elif (lemma.endswith('oyer') and lemma not in ['envoyer', 'renvoyer']) or lemma.endswith('uyer'):
return 'employer'
elif lemma.endswith('ayer'):
return 'payer'
elif lemma in ['envoyer', 'renvoyer']:
return 'envoyer'
else:
return 'parler'
elif lemma.endswith('oir'):
if lemma in ['voir', 'entrevoir', 'revoir']:
return 'voir'
elif lemma == 'prévoir':
return 'prévoir'
elif lemma == 'pourvoir':
return 'pourvoir'
elif lemma in ['devoir', 'redevoir']:
return 'devoir'
elif lemma.endswith('cevoir'):
return 'recevoir'
elif lemma == 'mouvoir':
return 'mouvoir'
elif lemma in ['promouvoir', 'émouvoir']:
return 'promouvoir'
elif lemma in ['pleuvoir', 'repleuvoir']:
return 'pleuvoir'
elif lemma in ['valoir', 'équivaloir', 'revaloir']:
return 'valoir'
elif lemma == 'prévaloir':
return 'prévaloir'
elif lemma == 'falloir':
return 'falloir'
elif lemma == 'pouvoir':
return 'pouvoir'
elif lemma == 'savoir':
return 'savoir'
elif lemma in ['vouloir', 'revouloir']:
return 'vouloir'
elif lemma in ['asseoir', 'rasseoir']:
return 'asseoir'
elif lemma == 'surseoir':
return 'surseoir'
elif lemma.endswith('ir') or lemma.endswith('ïr'):
if lemma == 'haïr':
return 'haïr'
elif lemma.endswith('ouvrir') or lemma in ['offrir', 'souffrir']:
return 'couvrir'
elif lemma in ['assaillir', 'défaillir', 'saillir', 'tressaillir']:
return 'assaillir'
elif lemma in ['cueillir', 'accueillir', 'recueillir']:
return 'cueillir'
elif lemma == 'bouillir':
return 'bouillir'
elif lemma in ['partir', 'départir', 'repartir', 'sortir', 'ressortir', 'servir', 'desservir', 'resservir', 'dormir', 'endormir', 'rendormir'] or (lemma.endswith('entir') and lemma not in ['ralentir', 'ratentir']):
return 'partir'
elif lemma in ['fuir', 'enfuir']:
return 'fuir'
elif lemma.endswith('quérir'):
return 'acquérir'
elif lemma.endswith('courir'):
return 'courir'
elif lemma == 'mourir':
return 'mourir'
elif lemma in ['vêtir', 'dévêtir', 'revêtir']:
return 'vêtir'
elif lemma.endswith('venir') or lemma.endswith('tenir'):
return 'venir'
else:
return 'finir'
elif lemma.endswith('re'):
if lemma == 'maudire':
return 'maudire'
elif (lemma.endswith('endre') and not lemma.endswith('prendre')) or lemma.endswith('ondre') or lemma in ['répandre', 'épandre', 'perdre', 'reperdre'] or lemma.endswith('ordre'):
return 'rendre'
elif lemma in ['rompre', 'corrompre', 'interrompre']:
return 'rompre'
elif lemma.endswith('prendre'):
return 'prendre'
elif lemma.endswith('battre'):
return 'battre'
elif lemma.endswith('mettre'):
return 'mettre'
elif lemma in ['suivre', 'poursuivre', 'ensuivre']:
return 'suivre'
elif lemma in ['vivre', 'revivre', 'survivre']:
return 'vivre'
elif lemma.endswith('écrire') or lemma.endswith('scrire'):
return 'écrire'
elif lemma in ['dire', 'redire']:
return 'dire'
elif lemma in ['prédire', 'dédire', 'interdire', 'médire']:
return 'prédire'
elif lemma == 'suffire':
return 'suffire'
elif lemma == 'circoncire':
return 'circoncire'
elif lemma.endswith('duire') or lemma.endswith('truire') or lemma.endswith('cuire'):
return 'conduire'
elif lemma.endswith('nuire') or lemma.endswith('luire'):
return 'nuire'
elif lemma in ['lire', 'élire', 'réelire', 'relire']:
return 'lire'
elif lemma in ['rire', 'sourire']:
return 'rire'
elif lemma.endswith('aindre') or lemma.endswith('eindre') or lemma.endswith('oindre'):
return 'plaindre'
elif lemma in ['absoudre', 'dissoudre']:
return 'absoudre'
elif lemma == 'résoudre':
return 'résoudre'
elif lemma in ['coudre', 'découdre', 'recoudre']:
return 'coudre'
elif lemma == 'moudre':
return 'moudre'
elif lemma in ['exclure', 'conclure']:
return 'exclure'
elif lemma in ['inclure', 'occlure']:
return 'inclure'
elif lemma == 'boire':
return 'boire'
elif lemma == 'croire':
return 'croire'
elif lemma == 'croître':
return 'croître'
elif lemma in ['accroître', 'décroître']:
return 'accroître'
elif lemma.endswith('aître') and lemma not in ['naître', 'renaître']:
return 'connaître'
elif lemma in ['naître', 'renaître']:
return 'naître'
elif lemma in ['plaire', 'complaire', 'déplaire']:
return 'plaire'
elif lemma == 'taire':
return 'taire'
elif lemma.endswith('faire'):
return 'faire'
elif lemma.endswith('raire'):
return 'traire'
elif lemma in ['vaincre', 'convaincre']:
return 'vaincre'
def calculate_conjugation_group(self, model):
if model.endswith('er') and model != 'aller':
return 1
elif model == 'finir' or model == 'haïr':
return 2
else:
return 3
def calculate_infinitive_stem(self, lemma):
if lemma.endswith('oir'):
return lemma[:-3]
elif lemma.endswith('er') or lemma.endswith('ir') or lemma.endswith('ïr') or lemma.endswith('re'):
return lemma[:-2]
def calculate_présent_stem(self, lemma, model, subject_group):
stem = self.calculate_infinitive_stem(lemma)
if stem is None:
return None
if model in ['employer', 'envoyer']:
if subject_group in ['S1', 'S2', 'S3', 'P3']:
stem = stem[:-1] + 'i'
elif model == 'finir' or model == 'haïr':
if subject_group in ['S1', 'S2', 'S3']:
stem += 'i'
elif model == 'haïr':
stem += 'ïss'
else:
stem += 'iss'
elif model == 'maudire' and subject_group in ['P1', 'P2', 'P3']:
# despite being a group 3 verb, maudire conjugates like finir
stem += 'ss'
elif model == 'bouillir' and subject_group in ['S1', 'S2', 'S3']:
# single conjugations lose the 'ill' at the end of the stem
stem = stem[:-3]
elif model in ['voir', 'prévoir', 'pourvoir']:
if subject_group in ['S1', 'S2', 'S3', 'P3']:
stem += 'oi'
else:
stem += 'oy'
elif model in ['devoir', 'recevoir']:
if subject_group in ['S1', 'S2', 'S3']:
# ev -> oi
stem = stem[:-2] + 'oi'
elif subject_group == 'P3':
# ev -> oiv
stem = stem[:-2] + 'oiv'
elif model in ['mouvoir', 'promouvoir', 'pleuvoir', 'pouvoir', 'vouloir']:
if subject_group in ['S1', 'S2', 'S3']:
stem = stem[:-1]
if stem.endswith('ou'):
# ou -> eu
stem = stem[:-2] + 'eu'
elif subject_group == 'P3':
# replace ou one letter in from the end if it is there
# eg mouv -> meuv
if stem[-3:-1] == 'ou':
stem = stem[:-3] + 'eu' + stem[-1]
elif model in ['valoir', 'prévaloir', 'falloir']:
if subject_group in ['S1', 'S2', 'S3']:
if stem.endswith('all'):
# all -> au
stem = stem[:-3] + 'au'
elif stem.endswith('al'):
# al -> au
stem = stem[:-2] + 'au'
elif model in ['savoir']:
if subject_group in ['S1', 'S2', 'S3']:
# v -> i
stem = stem[:-1] + 'i'
elif model in ['asseoir']:
if subject_group in ['S1', 'S2', 'S3']:
# e -> ied
stem = stem[:-1] + 'ied'
else:
stem += 'y'
elif model in ['surseoir']:
#TODO: 'asseoir' can also conjugate like this, how do we deal with multiple valid conjugations
if subject_group in ['S1', 'S2', 'S3', 'P3']:
# e -> oi
stem = stem[:-1] + 'oi'
else:
# e -> oy
stem = stem[:-1] + 'oy'
elif model in ['croire', 'traire']:
if subject_group in ['P1', 'P2']:
# i -> y
stem = stem[:-1] + 'y'
elif model in ['battre', 'mettre']:
if subject_group in ['S1', 'S2', 'S3']:
# remove a 't'
stem = stem[:-1]
elif model in ['suivre', 'vivre']:
if subject_group in ['S1', 'S2', 'S3']:
# remove a 'v'
stem = stem[:-1]
elif model == 'écrire':
if subject_group in ['P1', 'P2', 'P3']:
# remove a 'v'
stem = stem + 'v'
elif model == 'plaindre':
if subject_group in ['S1', 'S2', 'S3']:
# remove a 'd'
stem = stem[:-1]
else:
# nd -> gn
stem = stem[:-2] + 'gn'
elif model in ['absoudre', 'résoudre']:
if subject_group in ['S1', 'S2', 'S3']:
# remove a 'd'
stem = stem[:-1]
else:
# ud -> lv
stem = stem[:-2] + 'lv'
elif model == 'coudre':
if subject_group in ['P1', 'P2', 'P3']:
# d -> s
stem = stem[:-1] + 's'
elif model == 'moudre':
if subject_group in ['P1', 'P2', 'P3']:
# d -> l
stem = stem[:-1] + 'l'
elif model in ['dire', 'prédire', 'suffire', 'circoncire', 'conduire', 'nuire', 'lire']:
if subject_group in ['P1', 'P2', 'P3']:
# add an 's'
stem += 's'
elif model in ['plaire', 'taire', 'faire']:
if subject_group in ['P1', 'P2', 'P3']:
# add an 's'
stem += 's'
elif model == 'plaire' and subject_group == 'S3':
# i => î
stem = stem[:-1] + 'î'
elif model in ['croître', 'accroître', 'connaître', 'naître']:
if subject_group in ['S1', 'S2']:
if model == 'croître':
# remove the 't'
stem = stem[:-1]
else:
# ît -> i
stem = stem[:-2] + 'i'
elif subject_group == 'S3':
# remove the 't'
stem = stem[:-1]
else:
# ît -> iss
stem = stem[:-2] + 'iss'
elif model == 'vaincre':
if subject_group in ['P1', 'P2', 'P3']:
# c -> qu
stem = stem[:-1] + 'qu'
elif model == 'prendre':
if subject_group in ['P1', 'P2']:
# remove the d
stem = stem[:-1]
elif subject_group == 'P3':
# d -> n
stem = stem[:-1] + 'n'
elif model == 'boire':
if subject_group in ['P1', 'P2']:
# oi -> uv
stem = stem[:-2] + 'uv'
elif subject_group == 'P3':
# add a 'v'
stem += 'v'
elif model == 'partir':
if subject_group in ['S1', 'S2', 'S3']:
# remove last consonant
stem = stem[:-1]
elif model == 'fuir':
if subject_group in ['S1', 'S2', 'S3', 'P3']:
# add 'i'
stem += 'i'
else:
# add 'y'
stem += 'y'
elif model == 'mourir':
if subject_group in ['S1', 'S2', 'S3', 'P3']:
# our -> eur
stem = stem[:-3] + 'eur'
elif model == 'venir':
if subject_group in ['S1', 'S2', 'S3']:
# en -> ien
stem = stem[:-2] + 'ien'
elif subject_group == 'P3':
# en -> ienn
stem = stem[:-2] + 'ienn'
elif model == 'acquérir':
if subject_group in ['S1', 'S2', 'S3']:
# ér -> er
stem = stem[:-2] + 'ier'
elif subject_group == 'P3':
# ér -> iér
stem = stem[:-2] + 'ièr'
return stem
def calculate_présent_suffix(self, model, subject_group, stem):
suffix = ''
conjugation_group = self.calculate_conjugation_group(model)
if conjugation_group == 1 or model in ['couvrir', 'assaillir', 'cueillir']:
if subject_group == 'S1':
suffix = 'e'
elif subject_group == 'S2':
suffix = 'es'
elif subject_group == 'S3':
suffix = 'e'
else:
if subject_group == 'S1':
suffix = 's'
elif subject_group == 'S2':
suffix = 's'
elif subject_group == 'S3':
suffix = 't'
if subject_group == 'P1':
suffix = 'ons'
elif subject_group == 'P2':
suffix = 'ez'
elif subject_group == 'P3':
suffix = 'ent'
# EXCEPTIONS
# vouloir is a weird exception
if model in ['vouloir', 'valoir', 'prévaloir', 'pouvoir'] and subject_group in ['S1', 'S2']:
suffix = 'x'
# don't add 't' if stem ends in 'c', 'd', or 't'
if suffix == 't' and stem[-1] in ['c', 'd', 't']:
suffix = ''
return suffix
def calculate_présent(self, lemma, subject_group):
model = self.calculate_lemma_model(lemma)
if model == 'être':
if subject_group == 'S1':
return 'suis'
elif subject_group == 'S2':
return 'es'
elif subject_group == 'S3':
return 'est'
if subject_group == 'P1':
return 'sommes'
elif subject_group == 'P2':
return 'êtes'
elif subject_group == 'P3':
return 'sont'
elif model == 'avoir':
if subject_group == 'S1':
return 'ai'
elif subject_group == 'S2':
return 'as'
elif subject_group == 'S3':
return 'a'
elif subject_group == 'P3':
return 'ont'
elif model == 'aller':
if subject_group == 'S1':
return 'vais'
elif subject_group == 'S2':
return 'vas'
elif subject_group == 'S3':
return 'va'
elif subject_group == 'P3':
return 'vont'
elif model == 'faire':
if subject_group == 'P2':
return 'faites'
elif subject_group == 'P3':
return 'font'
elif model == 'dire':
if subject_group == 'P2':
# -dire -> -dites
return lemma[:-2] + 'tes'
stem = self.calculate_présent_stem(lemma, model, subject_group)
if stem is None:
return None
suffix = self.calculate_présent_suffix(model, subject_group, stem)
if suffix is None:
return None
# apply consonant doubling if required
if model in ['appeler', 'jeter'] and suffix in ['e', 'es', 'ent']:
stem += stem[-1]
# do e -> è replacement if required
if model in ['peser', 'dépecer'] and suffix in ['e', 'es', 'ent']:
index = stem.rfind('e')
if 0 <= index < len(stem):
stem = stem[:index] + 'è' + stem[index + 1:]
# do é -> è replacement if required
if model in ['céder', 'rapiécer', 'protéger'] and suffix in ['e', 'es', 'ent']:
index = stem.rfind('é')
if 0 <= index < len(stem):
stem = stem[:index] + 'è' + stem[index + 1:]
result = stem + suffix
# do c -> ç replacement if required
if model in ['lancer', 'dépecer', 'rapiécer', 'recevoir']:
index = result.rfind('c')
if 0 <= index and index + 1 < len(result) and result[index + 1] in ['a', 'o', 'u']:
result = result[:index] + 'ç' + result[index + 1:]
# do g -> ge replacement if required
if model in ['manger', 'protéger']:
index = result.rfind('g')
if 0 <= index and index + 1 < len(result) and result[index + 1] in ['a', 'o']:
result = result[:index] + 'ge' + result[index + 1:]
return result
def calculate_past_participle_stem(self, lemma, model):
stem = self.calculate_infinitive_stem(lemma)
if model == 'couvrir':
# r -> er
stem = stem[:-1] + 'er'
elif model in ['devoir', 'recevoir']:
# remove 'ev'
stem = stem[:-2]
elif model == 'savoir':
# remove 'av'
stem = stem[:-2]
elif model in ['mouvoir', 'promouvoir', 'pleuvoir', 'pouvoir']:
# remove 'ouv' or 'euv'
stem = stem[:-3]
elif model in ['asseoir', 'surseoir']:
# remove 'e'
stem = stem[:-1]
elif model == 'prendre':
# remove 'end'
stem = stem[:-3]
elif model == 'mettre':
# remove 'ett'
stem = stem[:-3]
elif model == 'vivre':
# iv -> éc
stem = stem[:-2] + 'éc'
elif model == 'plaindre':
# remove 'd'
stem = stem[:-1]
elif model in ['boire', 'croire']:
# remove 'oi'
stem = stem[:-2]
elif model in ['maudire', 'conduire', 'dire', 'prédire', 'écrire', 'faire', 'traire', ]:
# remove 'i'
stem = stem[:-1]
elif model in ['exclure', 'inclure']:
# remove 'u'
stem = stem[:-1]
elif model == 'couvrir':
# remove 'r'
stem = stem[:-1]
elif model in ['croître', 'accroître']:
# remove 'oît'
stem = stem[:-3]
elif model in ['connaître', 'naître']:
# remove 'aît'
stem = stem[:-3]
elif model in ['plaire', 'taire']:
# remove 'aî'
stem = stem[:-2]
elif model in ['ecrire', 'nuire', 'lire', 'rire', 'suffire', 'circoncire']:
# remove 'i'
stem = stem[:-1]
elif model == 'absoudre':
# remove 'd'
stem = stem[:-1]
elif model == 'résoudre':
# ud -> l
stem = stem[:-2] + 'l'
elif model == 'coudre':
# d -> s
stem = stem[:-1] + 's'
elif model == 'moudre':
# d -> l
stem = stem[:-1] + 'l'
elif model == 'acquérir':
# remove 'ér'
stem = stem[:-2]
elif model == 'mourir':
# our -> or
stem = stem[:-3] + 'or'
elif model == 'être':
stem = 'ét'
elif model == 'avoir':
stem = 'e'
return stem
def calculate_past_participle_suffix(self, model, masculine):
conjugation_group = self.calculate_conjugation_group(model)
suffix = None
if conjugation_group == 1 or model in ['naître', 'être', 'aller']:
suffix = 'é'
elif model in ['finir', 'assaillir', 'cueillir', 'bouillir', 'nuire', 'suivre', 'rire', 'suffire', 'partir', 'fuir']:
suffix = 'i'
elif model == 'haïr':
suffix = 'ï'
elif model in ['asseoir', 'surseoir', 'prendre', 'mettre', 'circoncire', 'acquérir']:
suffix = 'is'
elif model in ['couvrir', 'plaindre', 'mourir']:
suffix = 't'
elif model in ['maudire', 'conduire', 'dire', 'prédire', 'écrire', 'faire', 'traire']:
suffix = 'it'
elif model in ['voir', 'prévoir', 'pourvoir', 'récevoir', 'promouvoir', 'pleuvoir', 'recevoir', 'valoir', 'prévaloir', 'falloir', 'pouvoir', 'savoir', 'vouloir', 'avoir', 'rendre', 'rompre', 'battre', 'vivre', 'lire', 'exclure', 'boire', 'croire', 'accroître', 'connaître', 'plaire', 'taire', 'coudre', 'moudre', 'résoudre', 'vaincre', 'courir', 'vêtir', 'venir']:
suffix = 'u'
elif model in ['devoir', 'mouvoir', 'croître']:
suffix = 'û'
elif model == 'inclure':
suffix = 'us'
elif model == 'absoudre':
if masculine:
suffix = 's'
else:
suffix = 't'
return suffix
def calculate_past_participle(self, lemma, masculine, plural):
model = self.calculate_lemma_model(lemma)
stem = self.calculate_past_participle_stem(lemma, model);
if stem is None:
return None
suffix = self.calculate_past_participle_suffix(model, masculine);
if suffix is None:
return None
if stem.endswith('d') and suffix == 't':
# drop last 'd' to avoid 'dt'
stem = stem[:-1]
if stem.endswith('i') and suffix == 'i':
# drop last 'i' to avoid duplicated letter
stem = stem[:-1]
if stem.endswith('u') and suffix == 'u':
# drop last 'u' to avoid duplicated letter
stem = stem[:-1]
if stem.endswith('ec') and suffix == 'u':
# c -> ç
stem = stem[:-1] + 'ç'
result = stem + suffix
# some past participles do not decline
if model in ['suffire', 'nuire', 'rire', 'plaire', 'taire', 'pouvoir', 'être']:
return result
if model in ['devoir', 'mouvoir', 'croître'] and (plural or not masculine):
# these past participles lose the circumflex on their 'u' when declining
# û -> u
result = result[:-1] + 'u'
# perform past participle declension
if not masculine:
result += 'e'
if plural and not result.endswith('s'):
result += 's'
return result
def calculate_imparfait_stem(self, lemma, model):
if lemma == 'être':
return 'ét'
return self.calculate_présent_stem(lemma, model, 'P1')
def calculate_imparfait_suffix(self, subject_group):
suffix = ''
if subject_group == 'S1':
suffix = 'ais'
elif subject_group == 'S2':
suffix = 'ais'
elif subject_group == 'S3':
suffix = 'ait'
elif subject_group == 'P1':
suffix = 'ions'
elif subject_group == 'P2':
suffix = 'iez'
elif subject_group == 'P3':
suffix = 'aient'
return suffix
def calculate_imparfait(self, lemma, subject_group):
model = self.calculate_lemma_model(lemma)
stem = self.calculate_imparfait_stem(lemma, model)
if stem is None:
return None
suffix = self.calculate_imparfait_suffix(subject_group)
if suffix is None:
return None
result = stem + suffix
# do c -> ç replacement if required
if model in ['lancer', 'dépecer', 'rapiécer', 'recevoir']:
index = result.rfind('c')
if 0 <= index and index + 1 < len(result) and result[index + 1] in ['a', 'o', 'u']:
result = result[:index] + 'ç' + result[index + 1:]
# do g -> ge replacement if required
if model in ['manger', 'protéger']:
index = result.rfind('g')
if 0 <= index and index + 1 < len(result) and result[index + 1] in ['a', 'o']:
result = result[:index] + 'ge' + result[index + 1:]
return result
def calculate_futur_stem(self, lemma, model):
stem = lemma
if model in ['appeler', 'jeter']:
stem = stem[:-2] + stem[-3] + 'er'
elif model in ['peser', 'dépecer']:
index = stem[:-2].rfind('e')
if 0 <= index < len(stem):
stem = stem[:index] + 'è' + stem[index + 1:]
elif model == 'employer':
stem = stem[:-3] + 'ier'
elif model == 'envoyer':
stem = stem[:-4] + 'err'
elif model == 'maudire':
stem = stem[:-1]
elif model == 'cueillir':
stem = stem[:-2] + 'er'
elif model == 'voir':
stem = stem[:-3] + 'err'
elif model in ['devoir', 'recevoir', 'mouvoir', 'premouvoir', 'promouvoir', 'pleuvoir']:
stem = stem[:-3] + 'r'
elif model in ['valoir', 'prévaloir']:
stem = stem[:-5] + 'audr'
elif model == 'falloir':
stem = stem[:-6] + 'audr'
elif model == 'pouvoir':
stem = stem[:-6] + 'ourr'
elif model in ['savoir', 'avoir']:
stem = stem[:-5] + 'aur'
elif model == 'vouloir':
stem = stem[:-4] + 'dr'
elif model == 'asseoir':
stem = stem[:-4] + 'iér'
elif model == 'faire':
stem = stem[:-4] + 'er'
elif model == 'acquérir':
stem = stem[:-4] + 'érr'
elif model in ['courir', 'mourir']:
stem = stem[:-2] + 'r'
elif model == 'venir':
stem = stem[:-4] + 'iendr'
elif model == 'être':
stem = 'ser'
elif model == 'aller':
stem = 'ir'
elif model.endswith('re'):
stem = stem[:-1]
return stem
def calculate_futur_suffix(self, subject_group):
suffix = ''
if subject_group == 'S1':
suffix = 'ai'
elif subject_group == 'S2':
suffix = 'as'
elif subject_group == 'S3':
suffix = 'a'
elif subject_group == 'P1':
suffix = 'ons'
elif subject_group == 'P2':
suffix = 'ez'
elif subject_group == 'P3':
suffix = 'ont'
return suffix
def calculate_futur(self, lemma, subject_group):
model = self.calculate_lemma_model(lemma)
stem = self.calculate_futur_stem(lemma, model)
if stem is None:
return None
suffix = self.calculate_futur_suffix(subject_group)
if suffix is None:
return None
# apply consonant doubling if required
if model in ['appeler', 'jeter'] and suffix in ['e', 'es', 'ent']:
stem += stem[-1]
# do e -> è replacement if required
if model in ['peser', 'dépecer'] and suffix in ['e', 'es', 'ent']:
index = stem.rfind('e')
if 0 <= index < len(stem):
stem = stem[:index] + 'è' + stem[index + 1:]
# do é -> è replacement if required
if model in ['céder', 'rapiécer', 'protéger'] and suffix in ['e', 'es', 'ent']:
index = stem.rfind('é')
if 0 <= index < len(stem):
stem = stem[:index] + 'è' + stem[index + 1:]
result = stem + suffix
return result
def calculate_conditionnel_suffix(self, subject_group):
suffix = ''
if subject_group == 'S1':
suffix = 'ais'
elif subject_group == 'S2':
suffix = 'ais'
elif subject_group == 'S3':
suffix = 'ait'
elif subject_group == 'P1':
suffix = 'ions'
elif subject_group == 'P2':
suffix = 'iez'
elif subject_group == 'P3':
suffix = 'aient'
return suffix
def calculate_conditionnel(self, lemma, subject_group):
model = self.calculate_lemma_model(lemma)
stem = self.calculate_futur_stem(lemma, model)
if stem is None:
return None
suffix = self.calculate_conditionnel_suffix(subject_group)
if suffix is None:
return None
result = stem + suffix
return result
def calculate_passé_simple_stem(self, lemma, model):
stem = ""
if model == 'être':
stem = lemma[:-4] + 'fu'
elif model == 'naître':
stem = lemma[:-4] + 'qui'
elif model in ['couvrir', 'mourir']:
stem = self.calculate_infinitive_stem(lemma)
if model == 'couvrir':
stem += 'i'
else:
stem += 'u'
elif model == 'plaindre':
stem = self.calculate_présent_stem(lemma, model, 'P1') + 'i'
else:
pp_stem = self.calculate_past_participle_stem(lemma, model)
pp_suffix = self.calculate_past_participle_suffix(model, True)
if lemma.endswith('er'):
stem = pp_stem + 'a'
elif pp_suffix in ['i', 'ï', 'is', 'it']:
stem = pp_stem
if model == 'faire':
stem = stem[:-1]
elif model == 'conduire':
stem += 'is'
elif model == 'écrire':
stem += 'iv'
elif model == 'nuire':
stem += 'is'
if pp_suffix == 'ï':
stem += 'ï'
else:
stem += 'i'
elif pp_suffix in ['u', 'û', 'us']:
stem = pp_stem
if model in ['voir', 'prévoir', 'rendre', 'rompre', 'battre', 'coudre', 'vaincre', 'vêtir']:
if model == 'vaincre':
# vaincre changes -c -> -qu
stem = stem[:-1] + 'qu'
stem += 'i'
elif model == 'venir':
# venir/tenir changes -enu -> -in
stem = stem[:-2] + 'in'
elif model == 'croître':
stem += 'û'
else:
stem += 'u'
return stem
def calculate_passé_simple_suffix(self, subject_group, stem):
suffix = ''
if stem.endswith('a'):
if subject_group == 'S1':
suffix = 'i'
elif subject_group == 'S2':
suffix = 's'
elif subject_group == 'S3':
suffix = ''
else:
if subject_group == 'S1':
suffix = 's'
elif subject_group == 'S2':
suffix = 's'
elif subject_group == 'S3':
suffix = 't'
if subject_group == 'P1':
suffix = '^mes'
elif subject_group == 'P2':
suffix = '^tes'
elif subject_group == 'P3':
suffix = 'rent'
return suffix
def calculate_passé_simple(self, lemma, subject_group):
model = self.calculate_lemma_model(lemma)
stem = self.calculate_passé_simple_stem(lemma, model)
if stem is None:
return None
suffix = self.calculate_passé_simple_suffix(subject_group, stem)
if suffix is None:
return None
result = None
if suffix.startswith("^"):
last_vowel = ''
index = len(stem) - 1
while 0 <= index < len(stem) and stem[index] not in 'aâeéèêhiîïoôu':
index -= 1
if 0 <= index < len(stem) and stem[index] in 'aiu':
replacement = None
if stem[index] == 'a':
replacement = 'â'
elif stem[index] == 'i':
replacement = 'î'
elif stem[index] == 'u':
replacement = 'û'
result = stem[0:index] + replacement + stem[index + 1:] + suffix[1:]
else:
result = stem + suffix[1:]
elif stem.endswith('a') and suffix == 'rent':
result = stem[:-1] + 'è' + suffix
else:
result = stem + suffix
# do c -> ç replacement if required
if model in ['lancer', 'dépecer', 'rapiécer', 'recevoir']:
index = result.rfind('c')
if 0 <= index and index + 1 < len(result) and result[index + 1] in ['a', 'â', 'u', 'û']:
result = result[:index] + 'ç' + result[index + 1:]
# do g -> ge replacement if required
if model in ['manger', 'protéger']:
index = result.rfind('g')
if 0 <= index and index + 1 < len(result) and result[index + 1] in ['a', 'â']:
result = result[:index] + 'ge' + result[index + 1:]
return result
def calculate_subjonctif_présent_stem(self, lemma, model, subject_group):
if lemma == 'être':
if subject_group in ['S1', 'S2', 'S3', 'P3']:
return 'soi'
else:
return 'soy'
elif lemma == 'avoir':
if subject_group in ['S1', 'S2', 'S3', 'P3']:
return 'ai'
else:
return 'ay'
elif lemma == 'savoir':
return 'sach'
elif lemma == 'pouvoir':
return 'puiss'
elif lemma == 'aller' and subject_group in ['S1', 'S2', 'S3', 'P3']:
return 'aill'
elif lemma == 'falloir':
return 'faill'
elif lemma == 'vouloir' and subject_group in ['S1', 'S2', 'S3', 'P3']:
return 'veuill'
elif model == 'faire':
return lemma[:-3] + 'ss'
elif model == 'valoir' and subject_group in ['S1', 'S2', 'S3', 'P3']:
return lemma[:-4] + 'ill'
elif subject_group in ['S1', 'S2', 'S3', 'P3']:
return self.calculate_présent_stem(lemma, model, 'P3')
else:
return self.calculate_présent_stem(lemma, model, 'P1')
def calculate_subjonctif_présent_suffix(self, model, subject_group):
suffix = ''
if model == 'être':
if subject_group == 'S1':
return 's'
elif subject_group == 'S2':
return 's'
elif subject_group == 'S3':
return 't'
elif subject_group == 'P1':
return 'ons'
elif subject_group == 'P2':
return 'ez'
elif model == 'avoir':
if subject_group == 'S3':
return 't'
elif subject_group == 'P1':
return 'ons'
elif subject_group == 'P2':
return 'ez'
if subject_group == 'S1':
suffix = 'e'
elif subject_group == 'S2':
suffix = 'es'
elif subject_group == 'S3':
suffix = 'e'
elif subject_group == 'P1':
suffix = 'ions'
elif subject_group == 'P2':
suffix = 'iez'
elif subject_group == 'P3':
suffix = 'ent'
return suffix
def calculate_subjonctif_présent(self, lemma, subject_group):
model = self.calculate_lemma_model(lemma)
stem = self.calculate_subjonctif_présent_stem(lemma, model, subject_group)
if stem is None:
return None
suffix = self.calculate_subjonctif_présent_suffix(model, subject_group)
if suffix is None:
return None
# apply consonant doubling if required
if model in ['appeler', 'jeter'] and suffix in ['e', 'es', 'ent']:
stem += stem[-1]
# do e -> è replacement if required
if model in ['peser', 'dépecer'] and suffix in ['e', 'es', 'ent']:
index = stem.rfind('e')
if 0 <= index < len(stem):
stem = stem[:index] + 'è' + stem[index + 1:]
# do é -> è replacement if required
if model in ['céder', 'rapiécer', 'protéger'] and suffix in ['e', 'es', 'ent']:
index = stem.rfind('é')
if 0 <= index < len(stem):
stem = stem[:index] + 'è' + stem[index + 1:]
result = stem + suffix
# do c -> ç replacement if required
if model in ['lancer', 'dépecer', 'rapiécer', 'recevoir']:
index = result.rfind('c')
if 0 <= index and index + 1 < len(result) and result[index + 1] in ['a', 'o', 'u']:
result = result[:index] + 'ç' + result[index + 1:]
return result
def calculate_subj_group(self, sentence, verb):
subject = None
if verb.has_tag("agrees_with"):
subject = sentence.words[verb.get_tag_value("agrees_with")]
subj_group = None
if subject.lemma == "je":
subj_group = "S1"
elif subject.lemma == "tu":
subj_group = "S2"
elif subject.lemma == "nous":
subj_group = "P1"
elif subject.lemma == "vous":
subj_group = "P2"
elif subject.has_tag("is_plural"):
if subject.get_tag_value("is_plural"):
subj_group = "P3"
else:
subj_group = "S3"
return subj_group
def word_gender(self, sentence, word, save_parameters=True):
if word.has_tag("gender"):
return word.get_tag_value("gender")
if word.has_tag("agrees_with"):
agrees_with = sentence.words[word.get_tag_value("agrees_with")]
return self.word_gender(sentence, agrees_with, save_parameters)
return "masc"
def word_is_plural(self, sentence, word, save_parameters=True):
if word.has_tag("is_plural"):
return word.get_tag_value("is_plural")
if word.has_tag("agrees_with"):
agrees_with = sentence.words[word.get_tag_value("agrees_with")]
return self.word_is_plural(sentence, agrees_with, save_parameters)
return False
def conjugate(self, sentence, verb, save_parameters=True):
if verb.has_tag("finite_verb"):
subj_group = self.calculate_subj_group(sentence, verb)
if subj_group is not None:
verb.set_tag("conj_subj_group", value=subj_group)
conj_mood = verb.get_tag_value("conj_mood")
conj_tense = verb.get_tag_value("conj_tense")
if conj_mood == "indicatif":
if conj_tense == "présent":
verb.inflection = self.calculate_présent(verb.lemma, subj_group)
elif conj_tense == "imparfait":
verb.inflection = self.calculate_imparfait(verb.lemma, subj_group)
elif conj_tense == "futur":
verb.inflection = self.calculate_futur(verb.lemma, subj_group)
elif conj_tense == "passé simple":
verb.inflection = self.calculate_passé_simple(verb.lemma, subj_group)
elif conj_mood == "conditionnel":
if conj_tense == "présent":
verb.inflection = self.calculate_conditionnel(verb.lemma, subj_group)
elif conj_mood == "subjonctif":
if conj_tense == "présent":
verb.inflection = self.calculate_subjonctif_présent(verb.lemma, subj_group)
elif verb.has_tag("past_participle"):
gender = self.word_gender(sentence, verb, save_parameters)
is_plural = self.word_is_plural(sentence, verb, save_parameters)
verb.inflection = self.calculate_past_participle(verb.lemma, gender == "masc", is_plural)
|
the-stack_106_22759 | from .blur import BoxBlur, box_blur
from .blur_pool import BlurPool2D, MaxBlurPool2D, blur_pool2d, max_blur_pool2d
from .canny import Canny, canny
from .filter import filter2d, filter2D, filter2d_separable, filter3d, filter3D
from .gaussian import GaussianBlur2d, gaussian_blur2d
from .kernels import (
gaussian,
get_binary_kernel2d,
get_box_kernel2d,
get_diff_kernel2d,
get_gaussian_discrete_kernel1d,
get_gaussian_erf_kernel1d,
get_gaussian_kernel1d,
get_gaussian_kernel2d,
get_hanning_kernel1d,
get_hanning_kernel2d,
get_laplacian_kernel1d,
get_laplacian_kernel2d,
get_sobel_kernel2d,
get_spatial_gradient_kernel2d,
get_spatial_gradient_kernel3d,
laplacian_1d,
)
from .kernels_geometry import get_motion_kernel2d, get_motion_kernel3d
from .laplacian import Laplacian, laplacian
from .median import MedianBlur, median_blur
from .motion import MotionBlur, MotionBlur3D, motion_blur, motion_blur3d
from .sobel import Sobel, SpatialGradient, SpatialGradient3d, sobel, spatial_gradient, spatial_gradient3d
from .unsharp import UnsharpMask, unsharp_mask
__all__ = [
"gaussian",
"get_binary_kernel2d",
"get_box_kernel2d",
"get_gaussian_kernel1d",
"get_gaussian_discrete_kernel1d",
"get_gaussian_erf_kernel1d",
"get_gaussian_kernel2d",
"get_hanning_kernel1d",
"get_hanning_kernel2d",
"get_laplacian_kernel1d",
"get_laplacian_kernel2d",
"get_motion_kernel2d",
"get_motion_kernel3d",
"get_spatial_gradient_kernel2d",
"get_spatial_gradient_kernel3d",
"get_sobel_kernel2d",
"get_diff_kernel2d",
"gaussian_blur2d",
"laplacian",
"laplacian_1d",
"unsharp_mask",
"sobel",
"spatial_gradient",
"canny",
"box_blur",
"blur_pool2d",
"max_blur_pool2d",
"median_blur",
"motion_blur",
"motion_blur3d",
"filter2d",
"filter2d_separable",
"filter3d",
"filter2D",
"filter3D",
"GaussianBlur2d",
"Laplacian",
"SpatialGradient",
"Sobel",
"Canny",
"BoxBlur",
"BlurPool2D",
"MaxBlurPool2D",
"MedianBlur",
"MotionBlur",
"MotionBlur3D",
"SpatialGradient3d",
"spatial_gradient3d",
"UnsharpMask",
]
|
the-stack_106_22762 | """ --- Day 3: Toboggan Trajectory ---
With the toboggan login problems resolved, you set off toward the airport. While travel by toboggan might be easy, it's certainly not safe: there's very minimal steering and the area is covered in trees. You'll need to see which angles will take you near the fewest trees.
Due to the local geology, trees in this area only grow on exact integer coordinates in a grid. You make a map (your puzzle input) of the open squares (.) and trees (#) you can see. For example:
..##.......
#...#...#..
.#....#..#.
..#.#...#.#
.#...##..#.
..#.##.....
.#.#.#....#
.#........#
#.##...#...
#...##....#
.#..#...#.#
These aren't the only trees, though; due to something you read about once involving arboreal genetics and biome stability, the same pattern repeats to the right many times:
..##.........##.........##.........##.........##.........##....... --->
#...#...#..#...#...#..#...#...#..#...#...#..#...#...#..#...#...#..
.#....#..#..#....#..#..#....#..#..#....#..#..#....#..#..#....#..#.
..#.#...#.#..#.#...#.#..#.#...#.#..#.#...#.#..#.#...#.#..#.#...#.#
.#...##..#..#...##..#..#...##..#..#...##..#..#...##..#..#...##..#.
..#.##.......#.##.......#.##.......#.##.......#.##.......#.##..... --->
.#.#.#....#.#.#.#....#.#.#.#....#.#.#.#....#.#.#.#....#.#.#.#....#
.#........#.#........#.#........#.#........#.#........#.#........#
#.##...#...#.##...#...#.##...#...#.##...#...#.##...#...#.##...#...
#...##....##...##....##...##....##...##....##...##....##...##....#
.#..#...#.#.#..#...#.#.#..#...#.#.#..#...#.#.#..#...#.#.#..#...#.# --->
You start on the open square (.) in the top-left corner and need to reach the bottom (below the bottom-most row on your map).
The toboggan can only follow a few specific slopes (you opted for a cheaper model that prefers rational numbers); start by counting all the trees you would encounter for the slope right 3, down 1:
From your starting position at the top-left, check the position that is right 3 and down 1. Then, check the position that is right 3 and down 1 from there, and so on until you go past the bottom of the map.
The locations you'd check in the above example are marked here with O where there was an open square and X where there was a tree:
..##.........##.........##.........##.........##.........##....... --->
#..O#...#..#...#...#..#...#...#..#...#...#..#...#...#..#...#...#..
.#....X..#..#....#..#..#....#..#..#....#..#..#....#..#..#....#..#.
..#.#...#O#..#.#...#.#..#.#...#.#..#.#...#.#..#.#...#.#..#.#...#.#
.#...##..#..X...##..#..#...##..#..#...##..#..#...##..#..#...##..#.
..#.##.......#.X#.......#.##.......#.##.......#.##.......#.##..... --->
.#.#.#....#.#.#.#.O..#.#.#.#....#.#.#.#....#.#.#.#....#.#.#.#....#
.#........#.#........X.#........#.#........#.#........#.#........#
#.##...#...#.##...#...#.X#...#...#.##...#...#.##...#...#.##...#...
#...##....##...##....##...#X....##...##....##...##....##...##....#
.#..#...#.#.#..#...#.#.#..#...X.#.#..#...#.#.#..#...#.#.#..#...#.# --->
In this example, traversing the map using this slope would cause you to encounter 7 trees.
Starting at the top-left corner of your map and following a slope of right 3 and down 1, how many trees would you encounter? """
""" --- Part Two ---
Time to check the rest of the slopes - you need to minimize the probability of a sudden arboreal stop, after all.
Determine the number of trees you would encounter if, for each of the following slopes, you start at the top-left corner and traverse the map all the way to the bottom:
Right 1, down 1.
Right 3, down 1. (This is the slope you already checked.)
Right 5, down 1.
Right 7, down 1.
Right 1, down 2.
In the above example, these slopes would find 2, 7, 3, 4, and 2 tree(s) respectively; multiplied together, these produce the answer 336.
What do you get if you multiply together the number of trees encountered on each of the listed slopes? """
# Initialize variables
d03_input = []
# Read the input, stripping newlines from the end of each line and saving each line as an item in a list
with open('d03/d03_input.txt') as f:
d03_input = [(line.rstrip()) for line in f]
f.close()
print(d03_input)
path = d03_input
num_trees = 0
i = 0
j = 0
num_trees_1_1 = 0
i_1_1 = 0
j_1_1 = 0
num_trees_1_5 = 0
i_1_5 = 0
j_1_5 = 0
num_trees_1_7 = 0
i_1_7 = 0
j_1_7 = 0
num_trees_2_1 = 0
i_2_1 = 0
j_2_1 = 0
for line in path:
if i < len(path):
if j >= len(line):
j-=(len(line))
if line[j] == '#':
num_trees+=1
print(i, len(line), j, line[j], num_trees)
i+=1
j+=3
if i_1_1 < len(path):
if j_1_1 >= len(line):
j_1_1-=(len(line))
if line[j_1_1] == '#':
num_trees_1_1+=1
print(i_1_1, len(line), j_1_1, line[j_1_1], num_trees_1_1)
i_1_1+=1
j_1_1+=1
if i_1_5 < len(path):
if j_1_5 >= len(line):
j_1_5-=(len(line))
if line[j_1_5] == '#':
num_trees_1_5+=1
print(i_1_5, len(line), j_1_5, line[j_1_5], num_trees_1_5)
i_1_5+=1
j_1_5+=5
if i_1_7 < len(path):
if j_1_7 >= len(line):
j_1_7-=(len(line))
if line[j_1_7] == '#':
num_trees_1_7+=1
print(i_1_7, len(line), j_1_7, line[j_1_7], num_trees_1_7)
i_1_7+=1
j_1_7+=7
if i_2_1 < len(path):
if (i_2_1) % 2 == 0:
if j_2_1 >= len(line):
j_2_1-=(len(line))
if line[j_2_1] == '#':
num_trees_2_1+=1
print(i_2_1, len(line), j_2_1, line[j_2_1], num_trees_2_1)
i_2_1+=1
j_2_1+=1
print(num_trees, num_trees_1_1, num_trees_1_5, num_trees_1_7, num_trees_2_1)
print(num_trees * num_trees_1_1 * num_trees_1_5 * num_trees_1_7 * num_trees_2_1)
|
the-stack_106_22764 | import __builtin__
__builtin__.process = 'ai'
__builtin__.__dict__.update(__import__('panda3d.core', fromlist = ['*']).__dict__)
from direct.extensions_native import HTTPChannel_extensions
from toontown.toonbase import ToonPythonUtil as PythonUtil
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--base-channel', help='The base channel that the server may use.')
parser.add_argument('--max-channels', help='The number of channels the server may use.')
parser.add_argument('--stateserver', help="The control channel of this AI's designated State Server.")
parser.add_argument('--district-name', help="What this AI Server's district will be named.")
parser.add_argument('--astron-ip', help="The IP address of the Astron Message Director to connect to.")
parser.add_argument('--eventlogger-ip', help="The IP address of the Astron Event Logger to log to.")
parser.add_argument('--start-time', help="The time of day to start at")
parser.add_argument('config', nargs='*', default=['config/general.prc', 'config/release/dev.prc'], help="PRC file(s) to load.")
args = parser.parse_args()
for prc in args.config:
loadPrcFile(prc)
localconfig = ''
if args.base_channel: localconfig += 'air-base-channel %s\n' % args.base_channel
if args.max_channels: localconfig += 'air-channel-allocation %s\n' % args.max_channels
if args.stateserver: localconfig += 'air-stateserver %s\n' % args.stateserver
if args.district_name: localconfig += 'district-name %s\n' % args.district_name
if args.astron_ip: localconfig += 'air-connect %s\n' % args.astron_ip
if args.eventlogger_ip: localconfig += 'eventlog-host %s\n' % args.eventlogger_ip
if args.start_time: localconfig += 'start-time %s\n' % args.start_time
loadPrcFileData('Command-line', localconfig)
from otp.ai.AIBaseGlobal import *
from toontown.ai.ToontownAIRepository import ToontownAIRepository
simbase.air = ToontownAIRepository(config.GetInt('air-base-channel', 401000000),
config.GetInt('air-stateserver', 4002),
config.GetString('district-name', 'Devhaven'),
config.GetInt('start-time', 6))
host = config.GetString('air-connect', '127.0.0.1')
port = 7100
if ':' in host:
host, port = host.split(':', 1)
port = int(port)
simbase.air.connect(host, port)
try:
run()
except (SystemExit, KeyboardInterrupt):
raise
except Exception:
info = PythonUtil.describeException()
simbase.air.writeServerEvent('ai-exception', simbase.air.getAvatarIdFromSender(), simbase.air.getAccountIdFromSender(), info)
raise
|
the-stack_106_22767 | import unittest
import time
from drone_inner_comms.rabbit.rabbit_inner_comms import RabbitInnerComms, Endpoint
class RabbitCommsCase(unittest.TestCase):
def setUp(self) -> None:
self.rabbitComms = RabbitInnerComms()
self.message = None
def _message_handler(self, msg):
self.message = msg
def test_comms(self):
message = 'test'
endpoint = Endpoint.ENGINE
print('On message')
self.rabbitComms.on_message(endpoint, self._message_handler)
print('After on message')
self.rabbitComms.send_message(endpoint, message)
time.sleep(1)
self.assertEqual(self.message, message)
self.rabbitComms.stop_listening(endpoint)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_22769 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
import mock
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.web.models import (SourceControl, HostNameBinding, Site, SiteConfig,
HostNameSslState, SslState, Certificate,
AddressResponse, HostingEnvironmentProfile)
from azure.mgmt.web import WebSiteManagementClient
from azure.cli.core.adal_authentication import AdalAuthentication
from knack.util import CLIError
from azure.cli.command_modules.appservice.custom import (set_deployment_user,
update_git_token, add_hostname,
update_site_configs,
get_external_ip,
view_in_browser,
sync_site_repo,
_match_host_names_from_cert,
bind_ssl_cert,
list_publish_profiles,
config_source_control,
show_webapp,
get_streaming_log,
download_historical_logs)
# pylint: disable=line-too-long
from vsts_cd_manager.continuous_delivery_manager import ContinuousDeliveryResult
class TestWebappMocked(unittest.TestCase):
def setUp(self):
self.client = WebSiteManagementClient(AdalAuthentication(lambda: ('bearer', 'secretToken')), '123455678')
@mock.patch('azure.cli.command_modules.appservice.custom.web_client_factory', autospec=True)
def test_set_deployment_user_creds(self, client_factory_mock):
self.client._client = mock.MagicMock()
client_factory_mock.return_value = self.client
mock_response = mock.MagicMock()
mock_response.status_code = 200
self.client._client.send.return_value = mock_response
# action
result = set_deployment_user(mock.MagicMock(), 'admin', 'verySecret1')
# assert things get wired up with a result returned
self.assertIsNotNone(result)
@mock.patch('azure.cli.command_modules.appservice.custom.web_client_factory', autospec=True)
def test_set_source_control_token(self, client_factory_mock):
client_factory_mock.return_value = self.client
self.client._client = mock.MagicMock()
sc = SourceControl('not-really-needed', source_control_name='GitHub', token='veryNiceToken')
self.client._client.send.return_value = FakedResponse(200)
self.client._deserialize = mock.MagicMock()
self.client._deserialize.return_value = sc
# action
result = update_git_token(mock.MagicMock(), 'veryNiceToken')
# assert things gets wired up
self.assertEqual(result.token, 'veryNiceToken')
@mock.patch('azure.cli.command_modules.appservice.custom.web_client_factory', autospec=True)
def test_set_domain_name(self, client_factory_mock):
client_factory_mock.return_value = self.client
# set up the return value for getting a webapp
webapp = Site('westus')
webapp.name = 'veryNiceWebApp'
self.client.web_apps.get = lambda _, _1: webapp
# set up the result value of putting a domain name
domain = 'veryNiceDomain'
binding = HostNameBinding(webapp.location,
domain_id=domain,
custom_host_name_dns_record_type='A',
host_name_type='Managed')
self.client.web_apps._client = mock.MagicMock()
self.client.web_apps._client.send.return_value = FakedResponse(200)
self.client.web_apps._deserialize = mock.MagicMock()
self.client.web_apps._deserialize.return_value = binding
# action
result = add_hostname(mock.MagicMock(), 'g1', webapp.name, domain)
# assert
self.assertEqual(result.domain_id, domain)
@mock.patch('azure.cli.command_modules.appservice.custom.web_client_factory', autospec=True)
def test_get_external_ip_from_ase(self, client_factory_mock):
client = mock.Mock()
client_factory_mock.return_value = client
cmd_mock = mock.MagicMock()
# set up the web inside a ASE, with an ip based ssl binding
host_env = HostingEnvironmentProfile('id11')
host_env.name = 'ase1'
host_env.resource_group = 'myRg'
host_ssl_state = HostNameSslState(ssl_state=SslState.ip_based_enabled, virtual_ip='1.2.3.4')
client.web_apps.get.return_value = Site('antarctica', hosting_environment_profile=host_env,
host_name_ssl_states=[host_ssl_state])
client.app_service_environments.list_vips.return_value = AddressResponse()
# action
result = get_external_ip(cmd_mock, 'myRg', 'myWeb')
# assert, we return the virtual ip from the ip based ssl binding
self.assertEqual('1.2.3.4', result['ip'])
# tweak to have no ip based ssl binding, but it is in an internal load balancer
host_ssl_state2 = HostNameSslState(ssl_state=SslState.sni_enabled)
client.web_apps.get.return_value = Site('antarctica', hosting_environment_profile=host_env,
host_name_ssl_states=[host_ssl_state2])
client.app_service_environments.list_vips.return_value = AddressResponse(internal_ip_address='4.3.2.1')
# action
result = get_external_ip(cmd_mock, 'myRg', 'myWeb')
# assert, we take the ILB address
self.assertEqual('4.3.2.1', result['ip'])
# tweak to have no ip based ssl binding, and not in internal load balancer
host_ssl_state2 = HostNameSslState(ssl_state=SslState.sni_enabled)
client.web_apps.get.return_value = Site('antarctica', hosting_environment_profile=host_env,
host_name_ssl_states=[host_ssl_state2])
client.app_service_environments.list_vips.return_value = AddressResponse(service_ip_address='1.1.1.1')
# action
result = get_external_ip(cmd_mock, 'myRg', 'myWeb')
# assert, we take service ip
self.assertEqual('1.1.1.1', result['ip'])
@mock.patch('azure.cli.command_modules.appservice.custom.web_client_factory', autospec=True)
@mock.patch('azure.cli.command_modules.appservice.custom._resolve_hostname_through_dns', autospec=True)
def test_get_external_ip_from_dns(self, resolve_hostname_mock, client_factory_mock):
client = mock.Mock()
client_factory_mock.return_value = client
# set up the web inside a ASE, with an ip based ssl binding
site = Site('antarctica')
site.default_host_name = 'myweb.com'
client.web_apps.get.return_value = site
# action
get_external_ip(mock.MagicMock(), 'myRg', 'myWeb')
# assert, we return the virtual ip from the ip based ssl binding
resolve_hostname_mock.assert_called_with('myweb.com')
@mock.patch('azure.cli.command_modules.appservice.custom.web_client_factory', autospec=True)
@mock.patch('azure.cli.command_modules.appservice.vsts_cd_provider.ContinuousDeliveryManager', autospec=True)
@mock.patch('azure.cli.command_modules.appservice.vsts_cd_provider.Profile', autospec=True)
def test_config_source_control_vsts(self, profile_mock, cd_manager_mock, client_factory_mock):
# Mock the result of get auth token (avoiding REST call)
profile = mock.Mock()
profile.get_subscription.return_value = {'id': 'id1', 'name': 'sub1', 'tenantId': 'tenant1'}
profile.get_current_account_user.return_value = None
profile.get_login_credentials.return_value = None, None, None
profile.get_access_token_for_resource.return_value = None
profile_mock.return_value = profile
# Mock the cd manager class so no REST calls are made
cd_manager = mock.Mock()
status = ContinuousDeliveryResult(None, None, None, None, None, None, "message1", None,
None, None)
cd_manager.setup_continuous_delivery.return_value = status
cd_manager_mock.return_value = cd_manager
# Mock the client and set the location
client = mock.Mock()
client_factory_mock.return_value = client
site = Site('antarctica')
site.default_host_name = 'myweb.com'
client.web_apps.get.return_value = site
config_source_control(mock.MagicMock(), 'group1', 'myweb', 'http://github.com/repo1', None, None, None,
None, None, 'ASPNet', 'working_directory', 'Gulp', 'Django',
'Python 2.7.12 x64', True, 'https://account1.visualstudio.com',
None, 'slot1', None, None)
cd_app_type_details = {
'cd_app_type': 'ASPNet',
'app_working_dir': 'working_directory',
'nodejs_task_runner': 'Gulp',
'python_framework': 'Django',
'python_version': 'Python 2.7.12 x64'
}
cd_manager.setup_continuous_delivery.assert_called_with('slot1', cd_app_type_details,
'https://account1.visualstudio.com',
True, None, None, None)
@mock.patch('azure.cli.command_modules.appservice.custom._generic_site_operation', autospec=True)
def test_update_site_config(self, site_op_mock):
site_config = SiteConfig('antarctica')
site_op_mock.side_effect = [site_config, None]
cmd = mock.MagicMock()
# action
update_site_configs(cmd, 'myRG', 'myweb', java_version='1.8')
# assert
config_for_set = site_op_mock.call_args_list[1][0][5]
self.assertEqual(config_for_set.java_version, '1.8')
# point check some unrelated properties should stay at None
self.assertEqual(config_for_set.use32_bit_worker_process, None)
self.assertEqual(config_for_set.java_container, None)
@mock.patch('azure.cli.command_modules.appservice.custom._generic_site_operation', autospec=True)
def test_list_publish_profiles_on_slots(self, site_op_mock):
site_op_mock.return_value = [b'<publishData><publishProfile publishUrl="ftp://123"/><publishProfile publishUrl="ftp://1234"/></publishData>']
# action
result = list_publish_profiles(mock.MagicMock(), 'myRG', 'myweb', 'slot1')
# assert
site_op_mock.assert_called_with(mock.ANY, 'myRG', 'myweb', 'list_publishing_profile_xml_with_secrets', 'slot1')
self.assertTrue(result[0]['publishUrl'].startswith('ftp://123'))
@mock.patch('azure.cli.command_modules.appservice.custom._generic_site_operation', autospec=True)
@mock.patch('azure.cli.command_modules.appservice.custom.get_streaming_log', autospec=True)
@mock.patch('azure.cli.command_modules.appservice.custom._open_page_in_browser', autospec=True)
def test_browse_with_trace(self, webbrowser_mock, log_mock, site_op_mock):
site = Site('antarctica')
site.default_host_name = 'haha.com'
site.enabled_host_names = [site.default_host_name]
site.host_name_ssl_states = [HostNameSslState('does not matter',
ssl_state=SslState.ip_based_enabled)]
site_op_mock.return_value = site
# action
view_in_browser(mock.MagicMock(), 'myRG', 'myweb', logs=True)
# assert
webbrowser_mock.assert_called_with('https://haha.com')
log_mock.assert_called_with(mock.ANY, 'myRG', 'myweb', None, None)
@mock.patch('azure.cli.command_modules.appservice.custom._generic_site_operation', autospec=True)
@mock.patch('azure.cli.command_modules.appservice.custom._rename_server_farm_props', autospec=True)
@mock.patch('azure.cli.command_modules.appservice.custom._fill_ftp_publishing_url', autospec=True)
def test_show_webapp(self, file_ftp_mock, rename_mock, site_op_mock):
faked_web = mock.MagicMock()
site_op_mock.return_value = faked_web
# action
result = show_webapp(mock.MagicMock(), 'myRG', 'myweb', slot=None, app_instance=None)
# assert (we invoke the site op)
self.assertEqual(faked_web, result)
self.assertTrue(rename_mock.called)
self.assertTrue(file_ftp_mock.called)
@mock.patch('azure.cli.command_modules.appservice.custom._generic_site_operation', autospec=True)
def test_sync_repository_skip_bad_error(self, site_op_mock):
resp = FakedResponse(200) # because of bad spec, sdk throws on 200.
setattr(resp, 'text', '{"Message": ""}')
site_op_mock.side_effect = CloudError(resp, error="bad error")
# action
sync_site_repo(mock.MagicMock(), 'myRG', 'myweb')
# assert
pass # if we are here, it means CLI has captured the bogus exception
def test_match_host_names_from_cert(self):
result = _match_host_names_from_cert(['*.mysite.com'], ['admin.mysite.com', 'log.mysite.com', 'mysite.com'])
self.assertEqual(set(['admin.mysite.com', 'log.mysite.com']), result)
result = _match_host_names_from_cert(['*.mysite.com', 'mysite.com'], ['admin.mysite.com', 'log.mysite.com', 'mysite.com'])
self.assertEqual(set(['admin.mysite.com', 'log.mysite.com', 'mysite.com']), result)
@mock.patch('azure.cli.command_modules.appservice.custom._generic_site_operation', autospec=True)
@mock.patch('azure.cli.command_modules.appservice.custom._get_scm_url', autospec=True)
@mock.patch('threading.Thread', autospec=True)
def test_log_stream_supply_cli_ctx(self, threading_mock, get_scm_url_mock, site_op_mock):
# test exception to exit the streaming loop
class ErrorToExitInfiniteLoop(Exception):
pass
threading_mock.side_effect = ErrorToExitInfiniteLoop('Expected error to exit early')
get_scm_url_mock.return_value = 'http://great_url'
cmd_mock = mock.MagicMock()
cli_ctx_mock = mock.MagicMock()
cmd_mock.cli_ctx = cli_ctx_mock
try:
# action
get_streaming_log(cmd_mock, 'rg', 'web1')
self.fail('test exception was not thrown')
except ErrorToExitInfiniteLoop:
# assert
site_op_mock.assert_called_with(cli_ctx_mock, 'rg', 'web1', 'list_publishing_credentials', None)
@mock.patch('azure.cli.command_modules.appservice.custom._generic_site_operation', autospec=True)
@mock.patch('azure.cli.command_modules.appservice.custom._get_scm_url', autospec=True)
@mock.patch('azure.cli.command_modules.appservice.custom._get_log', autospec=True)
def test_download_log_supply_cli_ctx(self, get_log_mock, get_scm_url_mock, site_op_mock):
def test_result():
res = mock.MagicMock()
res.publishing_user_name, res.publishing_password = 'great_user', 'secret_password'
return res
test_scm_url = 'http://great_url'
get_scm_url_mock.return_value = test_scm_url
publish_cred_mock = mock.MagicMock()
publish_cred_mock.result = test_result
site_op_mock.return_value = publish_cred_mock
cmd_mock = mock.MagicMock()
cli_ctx_mock = mock.MagicMock()
cmd_mock.cli_ctx = cli_ctx_mock
# action
download_historical_logs(cmd_mock, 'rg', 'web1')
# assert
site_op_mock.assert_called_with(cli_ctx_mock, 'rg', 'web1', 'list_publishing_credentials', None)
get_log_mock.assert_called_with(test_scm_url + '/dump', 'great_user', 'secret_password', None)
class FakedResponse(object): # pylint: disable=too-few-public-methods
def __init__(self, status_code):
self.status_code = status_code
if __name__ == '__main__':
unittest.main()
|
the-stack_106_22770 | import responses
import json
from .helpers import mock_file, ClientTestCase
class TestCapturePayment(ClientTestCase):
def setUp(self):
super(TestCapturePayment, self).setUp()
self.base_url = '{}/payments/capture'.format(self.base_url)
@responses.activate
def test_capture_payment(self):
"""Test capture payment."""
init = mock_file('capture_payment_payload')
result = mock_file('capture_payment_response')
url = self.base_url
responses.add(responses.POST, url, status=200, body=json.dumps(result),
match_querystring=True)
self.assertEqual(self.client.Payment.capture_payment(init), result)
|
the-stack_106_22771 | from django.urls import path
from . import views
app_name = "user"
urlpatterns = [
path('create/', views.CreateUserView.as_view(), name="create"),
path('token/', views.CreateTokenView.as_view(), name='token'),
path('me/', views.ManageUserView.as_view(), name='me'),
]
|
the-stack_106_22774 |
import cPickle
from kiko.constants import SERIALIZATION
from kiko.exceptions import (KikoDeserializeException,
InvalidDeserializerException)
from kiko.io.basedeserializer import BaseDeserializer
from . import v1
class DeserializerManager:
_deserializers = {}
@classmethod
def get_deserializer(cls, version, facade):
d = cls._deserializers.get(version)
if d is None:
raise InvalidDeserializerException('Can not find kiko deserializer '
'version %d' % version)
return d(facade)
@classmethod
def register_deserializer(cls, deserializer):
if not issubclass(deserializer, BaseDeserializer):
raise InvalidDeserializerException("Invalid deserializer")
cls._deserializers[deserializer.version()] = deserializer
DeserializerManager.register_deserializer(v1.DeserializerV1)
|
the-stack_106_22775 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2018 Dario Zanzico ([email protected])
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
---
module: docker_stack
author: "Dario Zanzico (@dariko)"
short_description: docker stack module
description:
- Manage docker stacks using the 'docker stack' command
on the target node (see examples).
options:
name:
description:
- Stack name
type: str
required: yes
state:
description:
- Service state.
type: str
default: "present"
choices:
- present
- absent
compose:
description:
- List of compose definitions. Any element may be a string
referring to the path of the compose file on the target host
or the YAML contents of a compose file nested as dictionary.
type: list
# elements: raw
default: []
prune:
description:
- If true will add the C(--prune) option to the C(docker stack deploy) command.
This will have docker remove the services not present in the
current stack definition.
type: bool
default: no
with_registry_auth:
description:
- If true will add the C(--with-registry-auth) option to the C(docker stack deploy) command.
This will have docker send registry authentication details to Swarm agents.
type: bool
default: no
resolve_image:
description:
- If set will add the C(--resolve-image) option to the C(docker stack deploy) command.
This will have docker query the registry to resolve image digest and
supported platforms. If not set, docker use "always" by default.
type: str
choices: ["always", "changed", "never"]
absent_retries:
description:
- If C(>0) and I(state) is C(absent) the module will retry up to
I(absent_retries) times to delete the stack until all the
resources have been effectively deleted.
If the last try still reports the stack as not completely
removed the module will fail.
type: int
default: 0
absent_retries_interval:
description:
- Interval in seconds between consecutive I(absent_retries).
type: int
default: 1
requirements:
- jsondiff
- pyyaml
notes:
- Return values I(out) and I(err) have been deprecated and will be removed in community.general 3.0.0. Use I(stdout) and I(stderr) instead.
'''
RETURN = '''
stack_spec_diff:
description: |
dictionary containing the differences between the 'Spec' field
of the stack services before and after applying the new stack
definition.
sample: >
"stack_spec_diff":
{'test_stack_test_service': {u'TaskTemplate': {u'ContainerSpec': {delete: [u'Env']}}}}
returned: on change
type: dict
'''
EXAMPLES = '''
- name: Deploy stack from a compose file
community.general.docker_stack:
state: present
name: mystack
compose:
- /opt/docker-compose.yml
- name: Deploy stack from base compose file and override the web service
community.general.docker_stack:
state: present
name: mystack
compose:
- /opt/docker-compose.yml
- version: '3'
services:
web:
image: nginx:latest
environment:
ENVVAR: envvar
- name: Remove stack
community.general.docker_stack:
name: mystack
state: absent
'''
import json
import tempfile
from ansible.module_utils.six import string_types
from time import sleep
try:
from jsondiff import diff as json_diff
HAS_JSONDIFF = True
except ImportError:
HAS_JSONDIFF = False
try:
from yaml import dump as yaml_dump
HAS_YAML = True
except ImportError:
HAS_YAML = False
from ansible.module_utils.basic import AnsibleModule, os
def docker_stack_services(module, stack_name):
docker_bin = module.get_bin_path('docker', required=True)
rc, out, err = module.run_command([docker_bin,
"stack",
"services",
stack_name,
"--format",
"{{.Name}}"])
if err == "Nothing found in stack: %s\n" % stack_name:
return []
return out.strip().split('\n')
def docker_service_inspect(module, service_name):
docker_bin = module.get_bin_path('docker', required=True)
rc, out, err = module.run_command([docker_bin,
"service",
"inspect",
service_name])
if rc != 0:
return None
else:
ret = json.loads(out)[0]['Spec']
return ret
def docker_stack_deploy(module, stack_name, compose_files):
docker_bin = module.get_bin_path('docker', required=True)
command = [docker_bin, "stack", "deploy"]
if module.params["prune"]:
command += ["--prune"]
if module.params["with_registry_auth"]:
command += ["--with-registry-auth"]
if module.params["resolve_image"]:
command += ["--resolve-image",
module.params["resolve_image"]]
for compose_file in compose_files:
command += ["--compose-file",
compose_file]
command += [stack_name]
return module.run_command(command)
def docker_stack_inspect(module, stack_name):
ret = {}
for service_name in docker_stack_services(module, stack_name):
ret[service_name] = docker_service_inspect(module, service_name)
return ret
def docker_stack_rm(module, stack_name, retries, interval):
docker_bin = module.get_bin_path('docker', required=True)
command = [docker_bin, "stack", "rm", stack_name]
rc, out, err = module.run_command(command)
while err != "Nothing found in stack: %s\n" % stack_name and retries > 0:
sleep(interval)
retries = retries - 1
rc, out, err = module.run_command(command)
return rc, out, err
def main():
module = AnsibleModule(
argument_spec={
'name': dict(type='str', required=True),
'compose': dict(type='list', elements='raw', default=[]),
'prune': dict(type='bool', default=False),
'with_registry_auth': dict(type='bool', default=False),
'resolve_image': dict(type='str', choices=['always', 'changed', 'never']),
'state': dict(type='str', default='present', choices=['present', 'absent']),
'absent_retries': dict(type='int', default=0),
'absent_retries_interval': dict(type='int', default=1)
},
supports_check_mode=False
)
if not HAS_JSONDIFF:
return module.fail_json(msg="jsondiff is not installed, try 'pip install jsondiff'")
if not HAS_YAML:
return module.fail_json(msg="yaml is not installed, try 'pip install pyyaml'")
state = module.params['state']
compose = module.params['compose']
name = module.params['name']
absent_retries = module.params['absent_retries']
absent_retries_interval = module.params['absent_retries_interval']
if state == 'present':
if not compose:
module.fail_json(msg=("compose parameter must be a list "
"containing at least one element"))
compose_files = []
for i, compose_def in enumerate(compose):
if isinstance(compose_def, dict):
compose_file_fd, compose_file = tempfile.mkstemp()
module.add_cleanup_file(compose_file)
with os.fdopen(compose_file_fd, 'w') as stack_file:
compose_files.append(compose_file)
stack_file.write(yaml_dump(compose_def))
elif isinstance(compose_def, string_types):
compose_files.append(compose_def)
else:
module.fail_json(msg="compose element '%s' must be a " +
"string or a dictionary" % compose_def)
before_stack_services = docker_stack_inspect(module, name)
rc, out, err = docker_stack_deploy(module, name, compose_files)
after_stack_services = docker_stack_inspect(module, name)
if rc != 0:
module.fail_json(msg="docker stack up deploy command failed",
rc=rc,
out=out, err=err, # Deprecated
stdout=out, stderr=err)
before_after_differences = json_diff(before_stack_services,
after_stack_services)
for k in before_after_differences.keys():
if isinstance(before_after_differences[k], dict):
before_after_differences[k].pop('UpdatedAt', None)
before_after_differences[k].pop('Version', None)
if not list(before_after_differences[k].keys()):
before_after_differences.pop(k)
if not before_after_differences:
module.exit_json(
changed=False,
rc=rc,
stdout=out,
stderr=err)
else:
module.exit_json(
changed=True,
rc=rc,
stdout=out,
stderr=err,
stack_spec_diff=json_diff(before_stack_services,
after_stack_services,
dump=True))
else:
if docker_stack_services(module, name):
rc, out, err = docker_stack_rm(module, name, absent_retries, absent_retries_interval)
if rc != 0:
module.fail_json(msg="'docker stack down' command failed",
rc=rc,
out=out, err=err, # Deprecated
stdout=out, stderr=err)
else:
module.exit_json(changed=True,
msg=out, rc=rc,
err=err, # Deprecated
stdout=out, stderr=err)
module.exit_json(changed=False)
if __name__ == "__main__":
main()
|
the-stack_106_22778 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Fermionic Transformation """
import unittest
from test.chemistry import QiskitChemistryTestCase
from qiskit.aqua.operators import OperatorBase, I, Z
from qiskit.chemistry import QiskitChemistryError, FermionicOperator
from qiskit.chemistry.core import TransformationType, QubitMappingType
from qiskit.chemistry.drivers import PySCFDriver, UnitsType
from qiskit.chemistry.transformations import FermionicTransformation
class TestFermionicTransformation(QiskitChemistryTestCase):
"""Fermionic Transformation tests."""
def setUp(self):
super().setUp()
try:
driver = PySCFDriver(atom='H .0 .0 .0; H .0 .0 0.735',
unit=UnitsType.ANGSTROM,
charge=0,
spin=0,
basis='sto3g')
except QiskitChemistryError:
self.skipTest('PYSCF driver does not appear to be installed')
self.driver = driver
def _validate_vars(self, fermionic_transformation, energy_shift=0.0, ph_energy_shift=0.0):
self.assertAlmostEqual(fermionic_transformation._hf_energy, -1.117, places=3)
self.assertAlmostEqual(fermionic_transformation._energy_shift, energy_shift)
self.assertAlmostEqual(fermionic_transformation._ph_energy_shift, ph_energy_shift)
def _validate_info(self, fermionic_transformation, num_particles=None,
num_orbitals=4, actual_two_qubit_reduction=False):
num_particles = num_particles if num_particles is not None else (1, 1)
z2symmetries = fermionic_transformation.molecule_info.pop('z2_symmetries')
self.assertEqual(z2symmetries.is_empty(), True)
self.assertEqual(fermionic_transformation.molecule_info,
{'num_particles': num_particles,
'num_orbitals': num_orbitals,
'two_qubit_reduction': actual_two_qubit_reduction})
def _validate_input_object(self, qubit_op, num_qubits=4, num_paulis=15):
self.assertTrue(isinstance(qubit_op, OperatorBase))
self.assertIsNotNone(qubit_op)
self.assertEqual(qubit_op.num_qubits, num_qubits)
self.assertEqual(len(qubit_op), num_paulis)
def test_output(self):
""" output test """
fermionic_transformation = FermionicTransformation(
transformation=TransformationType.FULL,
qubit_mapping=QubitMappingType.PARITY,
two_qubit_reduction=True,
freeze_core=False,
orbital_reduction=[])
qubit_op, _ = fermionic_transformation.transform(self.driver)
self._validate_vars(fermionic_transformation)
self._validate_info(fermionic_transformation, actual_two_qubit_reduction=True)
self._validate_input_object(qubit_op, num_qubits=2, num_paulis=5)
def test_jordan_wigner(self):
""" jordan wigner test """
fermionic_transformation = FermionicTransformation(
transformation=TransformationType.FULL,
qubit_mapping=QubitMappingType.JORDAN_WIGNER,
two_qubit_reduction=False,
freeze_core=False,
orbital_reduction=[])
qubit_op, _ = fermionic_transformation.transform(self.driver)
self._validate_vars(fermionic_transformation)
self._validate_info(fermionic_transformation)
self._validate_input_object(qubit_op)
def test_jordan_wigner_2q(self):
""" jordan wigner 2q test """
fermionic_transformation = FermionicTransformation(
transformation=TransformationType.FULL,
qubit_mapping=QubitMappingType.JORDAN_WIGNER,
two_qubit_reduction=True,
freeze_core=False,
orbital_reduction=[])
qubit_op, _ = fermionic_transformation.transform(self.driver)
self._validate_vars(fermionic_transformation)
# Reported effective 2 qubit reduction should be false
self._validate_info(fermionic_transformation, actual_two_qubit_reduction=False)
self._validate_input_object(qubit_op)
def test_parity(self):
""" parity test """
fermionic_transformation = FermionicTransformation(
transformation=TransformationType.FULL,
qubit_mapping=QubitMappingType.PARITY,
two_qubit_reduction=False,
freeze_core=False,
orbital_reduction=[])
qubit_op, _ = fermionic_transformation.transform(self.driver)
self._validate_vars(fermionic_transformation)
self._validate_info(fermionic_transformation)
self._validate_input_object(qubit_op)
def test_bravyi_kitaev(self):
""" bravyi kitaev test """
fermionic_transformation = FermionicTransformation(
transformation=TransformationType.FULL,
qubit_mapping=QubitMappingType.BRAVYI_KITAEV,
two_qubit_reduction=False,
freeze_core=False,
orbital_reduction=[])
qubit_op, _ = fermionic_transformation.transform(self.driver)
self._validate_vars(fermionic_transformation)
self._validate_info(fermionic_transformation)
self._validate_input_object(qubit_op)
def test_particle_hole(self):
""" particle hole test """
fermionic_transformation = FermionicTransformation(
transformation=TransformationType.PARTICLE_HOLE,
qubit_mapping=QubitMappingType.JORDAN_WIGNER,
two_qubit_reduction=False,
freeze_core=False,
orbital_reduction=[])
qubit_op, _ = fermionic_transformation.transform(self.driver)
self._validate_vars(fermionic_transformation, ph_energy_shift=-1.83696799)
self._validate_info(fermionic_transformation)
self._validate_input_object(qubit_op)
def test_freeze_core(self):
""" freeze core test -- Should be in effect a no-op for H2 """
fermionic_transformation = FermionicTransformation(
transformation=TransformationType.FULL,
qubit_mapping=QubitMappingType.JORDAN_WIGNER,
two_qubit_reduction=False,
freeze_core=True,
orbital_reduction=[])
qubit_op, _ = fermionic_transformation.transform(self.driver)
self._validate_vars(fermionic_transformation)
self._validate_info(fermionic_transformation)
self._validate_input_object(qubit_op)
def test_orbital_reduction(self):
""" orbital reduction test --- Remove virtual orbital just
for test purposes (not sensible!)
"""
fermionic_transformation = FermionicTransformation(
transformation=TransformationType.FULL,
qubit_mapping=QubitMappingType.JORDAN_WIGNER,
two_qubit_reduction=False,
freeze_core=False,
orbital_reduction=[-1])
# get dummy aux operator
qmolecule = self.driver.run()
fer_op = FermionicOperator(h1=qmolecule.one_body_integrals, h2=qmolecule.two_body_integrals)
dummy = fer_op.total_particle_number()
expected = (I ^ I) - 0.5 * (I ^ Z) - 0.5 * (Z ^ I)
qubit_op, aux_ops = fermionic_transformation.transform(self.driver, [dummy])
self._validate_vars(fermionic_transformation)
self._validate_info(fermionic_transformation, num_orbitals=2)
self._validate_input_object(qubit_op, num_qubits=2, num_paulis=4)
# the first six aux_ops are added automatically, ours is the 7th one
self.assertEqual(aux_ops[6], expected)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_22780 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles database requests from other nova services."""
import contextlib
import copy
import functools
import sys
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import versionutils
import six
from nova import availability_zones
from nova.compute import instance_actions
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute.utils import wrap_instance_event
from nova.compute import vm_states
from nova.conductor.tasks import live_migrate
from nova.conductor.tasks import migrate
from nova import context as nova_context
from nova.db import base
from nova import exception
from nova.i18n import _
from nova import image
from nova import manager
from nova import network
from nova import notifications
from nova import objects
from nova.objects import base as nova_object
from nova import profiler
from nova import rpc
from nova.scheduler import client as scheduler_client
from nova.scheduler import utils as scheduler_utils
from nova import servicegroup
from nova import utils
from nova.volume import cinder
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
def targets_cell(fn):
"""Wrap a method and automatically target the instance's cell.
This decorates a method with signature func(self, context, instance, ...)
and automatically targets the context with the instance's cell
mapping. It does this by looking up the InstanceMapping.
"""
@functools.wraps(fn)
def wrapper(self, context, *args, **kwargs):
instance = kwargs.get('instance') or args[0]
try:
im = objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
except exception.InstanceMappingNotFound:
LOG.error('InstanceMapping not found, unable to target cell',
instance=instance)
im = None
else:
LOG.debug('Targeting cell %(cell)s for conductor method %(meth)s',
{'cell': im.cell_mapping.identity,
'meth': fn.__name__})
# NOTE(danms): Target our context to the cell for the rest of
# this request, so that none of the subsequent code needs to
# care about it.
nova_context.set_target_cell(context, im.cell_mapping)
return fn(self, context, *args, **kwargs)
return wrapper
class ConductorManager(manager.Manager):
"""Mission: Conduct things.
The methods in the base API for nova-conductor are various proxy operations
performed on behalf of the nova-compute service running on compute nodes.
Compute nodes are not allowed to directly access the database, so this set
of methods allows them to get specific work done without locally accessing
the database.
The nova-conductor service also exposes an API in the 'compute_task'
namespace. See the ComputeTaskManager class for details.
"""
target = messaging.Target(version='3.0')
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
*args, **kwargs)
self.compute_task_mgr = ComputeTaskManager()
self.additional_endpoints.append(self.compute_task_mgr)
# NOTE(hanlind): This can be removed in version 4.0 of the RPC API
def provider_fw_rule_get_all(self, context):
# NOTE(hanlind): Simulate an empty db result for compat reasons.
return []
def _object_dispatch(self, target, method, args, kwargs):
"""Dispatch a call to an object method.
This ensures that object methods get called and any exception
that is raised gets wrapped in an ExpectedException for forwarding
back to the caller (without spamming the conductor logs).
"""
try:
# NOTE(danms): Keep the getattr inside the try block since
# a missing method is really a client problem
return getattr(target, method)(*args, **kwargs)
except Exception:
raise messaging.ExpectedException()
def object_class_action_versions(self, context, objname, objmethod,
object_versions, args, kwargs):
objclass = nova_object.NovaObject.obj_class_from_name(
objname, object_versions[objname])
args = tuple([context] + list(args))
result = self._object_dispatch(objclass, objmethod, args, kwargs)
# NOTE(danms): The RPC layer will convert to primitives for us,
# but in this case, we need to honor the version the client is
# asking for, so we do it before returning here.
# NOTE(hanlind): Do not convert older than requested objects,
# see bug #1596119.
if isinstance(result, nova_object.NovaObject):
target_version = object_versions[objname]
requested_version = versionutils.convert_version_to_tuple(
target_version)
actual_version = versionutils.convert_version_to_tuple(
result.VERSION)
do_backport = requested_version < actual_version
other_major_version = requested_version[0] != actual_version[0]
if do_backport or other_major_version:
result = result.obj_to_primitive(
target_version=target_version,
version_manifest=object_versions)
return result
def object_action(self, context, objinst, objmethod, args, kwargs):
"""Perform an action on an object."""
oldobj = objinst.obj_clone()
result = self._object_dispatch(objinst, objmethod, args, kwargs)
updates = dict()
# NOTE(danms): Diff the object with the one passed to us and
# generate a list of changes to forward back
for name, field in objinst.fields.items():
if not objinst.obj_attr_is_set(name):
# Avoid demand-loading anything
continue
if (not oldobj.obj_attr_is_set(name) or
getattr(oldobj, name) != getattr(objinst, name)):
updates[name] = field.to_primitive(objinst, name,
getattr(objinst, name))
# This is safe since a field named this would conflict with the
# method anyway
updates['obj_what_changed'] = objinst.obj_what_changed()
return updates, result
def object_backport_versions(self, context, objinst, object_versions):
target = object_versions[objinst.obj_name()]
LOG.debug('Backporting %(obj)s to %(ver)s with versions %(manifest)s',
{'obj': objinst.obj_name(),
'ver': target,
'manifest': ','.join(
['%s=%s' % (name, ver)
for name, ver in object_versions.items()])})
return objinst.obj_to_primitive(target_version=target,
version_manifest=object_versions)
def reset(self):
objects.Service.clear_min_version_cache()
@contextlib.contextmanager
def try_target_cell(context, cell):
"""If cell is not None call func with context.target_cell.
This is a method to help during the transition period. Currently
various mappings may not exist if a deployment has not migrated to
cellsv2. If there is no mapping call the func as normal, otherwise
call it in a target_cell context.
"""
if cell:
with nova_context.target_cell(context, cell) as cell_context:
yield cell_context
else:
yield context
@contextlib.contextmanager
def obj_target_cell(obj, cell):
"""Run with object's context set to a specific cell"""
with try_target_cell(obj._context, cell) as target:
with obj.obj_alternate_context(target):
yield target
@profiler.trace_cls("rpc")
class ComputeTaskManager(base.Base):
"""Namespace for compute methods.
This class presents an rpc API for nova-conductor under the 'compute_task'
namespace. The methods here are compute operations that are invoked
by the API service. These methods see the operation to completion, which
may involve coordinating activities on multiple compute nodes.
"""
target = messaging.Target(namespace='compute_task', version='1.20')
def __init__(self):
super(ComputeTaskManager, self).__init__()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.volume_api = cinder.API()
self.image_api = image.API()
self.network_api = network.API()
self.servicegroup_api = servicegroup.API()
self.scheduler_client = scheduler_client.SchedulerClient()
self.report_client = self.scheduler_client.reportclient
self.notifier = rpc.get_notifier('compute', CONF.host)
def reset(self):
LOG.info('Reloading compute RPC API')
compute_rpcapi.LAST_VERSION = None
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
# TODO(tdurakov): remove `live` parameter here on compute task api RPC
# version bump to 2.x
# TODO(danms): remove the `reservations` parameter here on compute task api
# RPC version bump to 2.x
@messaging.expected_exceptions(
exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.ComputeHostNotFound,
exception.InvalidHypervisorType,
exception.InvalidCPUInfo,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.HypervisorUnavailable,
exception.InstanceInvalidState,
exception.MigrationPreCheckError,
exception.MigrationPreCheckClientException,
exception.LiveMigrationWithOldNovaNotSupported,
exception.UnsupportedPolicyException)
@targets_cell
@wrap_instance_event(prefix='conductor')
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit, reservations=None,
clean_shutdown=True, request_spec=None, host_list=None):
if instance and not isinstance(instance, nova_object.NovaObject):
# NOTE(danms): Until v2 of the RPC API, we need to tolerate
# old-world instance objects here
attrs = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
instance = objects.Instance._from_db_object(
context, objects.Instance(), instance,
expected_attrs=attrs)
# NOTE: Remove this when we drop support for v1 of the RPC API
if flavor and not isinstance(flavor, objects.Flavor):
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
flavor = objects.Flavor.get_by_id(context, flavor['id'])
if live and not rebuild and not flavor:
self._live_migrate(context, instance, scheduler_hint,
block_migration, disk_over_commit, request_spec)
elif not live and not rebuild and flavor:
instance_uuid = instance.uuid
with compute_utils.EventReporter(context, 'cold_migrate',
instance_uuid):
self._cold_migrate(context, instance, flavor,
scheduler_hint['filter_properties'],
clean_shutdown, request_spec,
host_list)
else:
raise NotImplementedError()
def _cold_migrate(self, context, instance, flavor, filter_properties,
clean_shutdown, request_spec, host_list):
image = utils.get_image_from_system_metadata(
instance.system_metadata)
# NOTE(sbauza): If a reschedule occurs when prep_resize(), then
# it only provides filter_properties legacy dict back to the
# conductor with no RequestSpec part of the payload.
if not request_spec:
# Make sure we hydrate a new RequestSpec object with the new flavor
# and not the nested one from the instance
request_spec = objects.RequestSpec.from_components(
context, instance.uuid, image,
flavor, instance.numa_topology, instance.pci_requests,
filter_properties, None, instance.availability_zone,
project_id=instance.project_id)
else:
# NOTE(sbauza): Resizes means new flavor, so we need to update the
# original RequestSpec object for make sure the scheduler verifies
# the right one and not the original flavor
request_spec.flavor = flavor
task = self._build_cold_migrate_task(context, instance, flavor,
request_spec, clean_shutdown, host_list)
try:
task.execute()
except exception.NoValidHost as ex:
vm_state = instance.vm_state
if not vm_state:
vm_state = vm_states.ACTIVE
updates = {'vm_state': vm_state, 'task_state': None}
self._set_vm_state_and_notify(context, instance.uuid,
'migrate_server',
updates, ex, request_spec)
# if the flavor IDs match, it's migrate; otherwise resize
if flavor.id == instance.instance_type_id:
msg = _("No valid host found for cold migrate")
else:
msg = _("No valid host found for resize")
raise exception.NoValidHost(reason=msg)
except exception.UnsupportedPolicyException as ex:
with excutils.save_and_reraise_exception():
vm_state = instance.vm_state
if not vm_state:
vm_state = vm_states.ACTIVE
updates = {'vm_state': vm_state, 'task_state': None}
self._set_vm_state_and_notify(context, instance.uuid,
'migrate_server',
updates, ex, request_spec)
except Exception as ex:
with excutils.save_and_reraise_exception():
updates = {'vm_state': instance.vm_state,
'task_state': None}
self._set_vm_state_and_notify(context, instance.uuid,
'migrate_server',
updates, ex, request_spec)
# NOTE(sbauza): Make sure we persist the new flavor in case we had
# a successful scheduler call if and only if nothing bad happened
if request_spec.obj_what_changed():
request_spec.save()
def _set_vm_state_and_notify(self, context, instance_uuid, method, updates,
ex, request_spec):
scheduler_utils.set_vm_state_and_notify(
context, instance_uuid, 'compute_task', method, updates,
ex, request_spec)
def _cleanup_allocated_networks(
self, context, instance, requested_networks):
try:
# If we were told not to allocate networks let's save ourselves
# the trouble of calling the network API.
if not (requested_networks and requested_networks.no_allocate):
self.network_api.deallocate_for_instance(
context, instance, requested_networks=requested_networks)
except Exception:
LOG.exception('Failed to deallocate networks', instance=instance)
return
instance.system_metadata['network_allocated'] = 'False'
try:
instance.save()
except exception.InstanceNotFound:
# NOTE: It's possible that we're cleaning up the networks
# because the instance was deleted. If that's the case then this
# exception will be raised by instance.save()
pass
@targets_cell
@wrap_instance_event(prefix='conductor')
def live_migrate_instance(self, context, instance, scheduler_hint,
block_migration, disk_over_commit, request_spec):
self._live_migrate(context, instance, scheduler_hint,
block_migration, disk_over_commit, request_spec)
def _live_migrate(self, context, instance, scheduler_hint,
block_migration, disk_over_commit, request_spec):
destination = scheduler_hint.get("host")
def _set_vm_state(context, instance, ex, vm_state=None,
task_state=None):
request_spec = {'instance_properties': {
'uuid': instance.uuid, },
}
scheduler_utils.set_vm_state_and_notify(context,
instance.uuid,
'compute_task', 'migrate_server',
dict(vm_state=vm_state,
task_state=task_state,
expected_task_state=task_states.MIGRATING,),
ex, request_spec)
migration = objects.Migration(context=context.elevated())
migration.dest_compute = destination
migration.status = 'accepted'
migration.instance_uuid = instance.uuid
migration.source_compute = instance.host
migration.migration_type = 'live-migration'
if instance.obj_attr_is_set('flavor'):
migration.old_instance_type_id = instance.flavor.id
migration.new_instance_type_id = instance.flavor.id
else:
migration.old_instance_type_id = instance.instance_type_id
migration.new_instance_type_id = instance.instance_type_id
migration.create()
task = self._build_live_migrate_task(context, instance, destination,
block_migration, disk_over_commit,
migration, request_spec)
try:
task.execute()
except (exception.NoValidHost,
exception.ComputeHostNotFound,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.InvalidCPUInfo,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.HypervisorUnavailable,
exception.InstanceInvalidState,
exception.MigrationPreCheckError,
exception.MigrationPreCheckClientException,
exception.LiveMigrationWithOldNovaNotSupported,
exception.MigrationSchedulerRPCError) as ex:
with excutils.save_and_reraise_exception():
# TODO(johngarbutt) - eventually need instance actions here
_set_vm_state(context, instance, ex, instance.vm_state)
migration.status = 'error'
migration.save()
except Exception as ex:
LOG.error('Migration of instance %(instance_id)s to host'
' %(dest)s unexpectedly failed.',
{'instance_id': instance.uuid, 'dest': destination},
exc_info=True)
# Reset the task state to None to indicate completion of
# the operation as it is done in case of known exceptions.
_set_vm_state(context, instance, ex, vm_states.ERROR,
task_state=None)
migration.status = 'error'
migration.save()
raise exception.MigrationError(reason=six.text_type(ex))
def _build_live_migrate_task(self, context, instance, destination,
block_migration, disk_over_commit, migration,
request_spec=None):
return live_migrate.LiveMigrationTask(context, instance,
destination, block_migration,
disk_over_commit, migration,
self.compute_rpcapi,
self.servicegroup_api,
self.scheduler_client,
request_spec)
def _build_cold_migrate_task(self, context, instance, flavor, request_spec,
clean_shutdown, host_list):
return migrate.MigrationTask(context, instance, flavor,
request_spec,
clean_shutdown,
self.compute_rpcapi,
self.scheduler_client, host_list)
def _destroy_build_request(self, context, instance):
# The BuildRequest needs to be stored until the instance is mapped to
# an instance table. At that point it will never be used again and
# should be deleted.
build_request = objects.BuildRequest.get_by_instance_uuid(
context, instance.uuid)
# TODO(alaski): Sync API updates of the build_request to the
# instance before it is destroyed. Right now only locked_by can
# be updated before this is destroyed.
build_request.destroy()
def _populate_instance_mapping(self, context, instance, host):
try:
inst_mapping = objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
except exception.InstanceMappingNotFound:
# NOTE(alaski): If nova-api is up to date this exception should
# never be hit. But during an upgrade it's possible that an old
# nova-api didn't create an instance_mapping during this boot
# request.
LOG.debug('Instance was not mapped to a cell, likely due '
'to an older nova-api service running.',
instance=instance)
return None
else:
try:
host_mapping = objects.HostMapping.get_by_host(context,
host.service_host)
except exception.HostMappingNotFound:
# NOTE(alaski): For now this exception means that a
# deployment has not migrated to cellsv2 and we should
# remove the instance_mapping that has been created.
# Eventually this will indicate a failure to properly map a
# host to a cell and we may want to reschedule.
inst_mapping.destroy()
return None
else:
inst_mapping.cell_mapping = host_mapping.cell_mapping
inst_mapping.save()
return inst_mapping
def _validate_existing_attachment_ids(self, context, instance, bdms):
"""Ensure any attachment ids referenced by the bdms exist.
New attachments will only be created if the attachment ids referenced
by the bdms no longer exist. This can happen when an instance is
rescheduled after a failure to spawn as cleanup code on the previous
host will delete attachments before rescheduling.
"""
for bdm in bdms:
if bdm.is_volume and bdm.attachment_id:
try:
self.volume_api.attachment_get(context, bdm.attachment_id)
except exception.VolumeAttachmentNotFound:
attachment = self.volume_api.attachment_create(
context, bdm.volume_id, instance.uuid)
bdm.attachment_id = attachment['id']
bdm.save()
def _cleanup_when_reschedule_fails(
self, context, instance, exception, request_spec,
requested_networks):
"""Set the instance state and clean up.
It is only used in case build_instance fails while rescheduling the
instance
"""
updates = {'vm_state': vm_states.ERROR,
'task_state': None}
self._set_vm_state_and_notify(
context, instance.uuid, 'build_instances', updates, exception,
request_spec)
self._cleanup_allocated_networks(
context, instance, requested_networks)
# NOTE(danms): This is never cell-targeted because it is only used for
# cellsv1 (which does not target cells directly) and n-cpu reschedules
# (which go to the cell conductor and thus are always cell-specific).
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping=None, legacy_bdm=True,
request_spec=None, host_lists=None):
# TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version
# 2.0 of the RPC API.
# TODO(danms): Remove this in version 2.0 of the RPC API
if (requested_networks and
not isinstance(requested_networks,
objects.NetworkRequestList)):
requested_networks = objects.NetworkRequestList.from_tuples(
requested_networks)
# TODO(melwitt): Remove this in version 2.0 of the RPC API
flavor = filter_properties.get('instance_type')
if flavor and not isinstance(flavor, objects.Flavor):
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
flavor = objects.Flavor.get_by_id(context, flavor['id'])
filter_properties = dict(filter_properties, instance_type=flavor)
# Older computes will not send a request_spec during reschedules, nor
# will the API send the request_spec if using cells v1, so we need
# to check and build our own if one is not provided.
if request_spec is None:
request_spec = scheduler_utils.build_request_spec(
image, instances)
else:
# TODO(mriedem): This is annoying but to populate the local
# request spec below using the filter_properties, we have to pass
# in a primitive version of the request spec. Yes it's inefficient
# and we can remove it once the populate_retry and
# populate_filter_properties utility methods are converted to
# work on a RequestSpec object rather than filter_properties.
request_spec = request_spec.to_legacy_request_spec_dict()
# 'host_lists' will be None in one of two cases: when running cellsv1,
# or during a reschedule from a pre-Queens compute. In all other cases,
# it will be a list of lists, though the lists may be empty if there
# are no more hosts left in a rescheduling situation.
is_reschedule = host_lists is not None
try:
# check retry policy. Rather ugly use of instances[0]...
# but if we've exceeded max retries... then we really only
# have a single instance.
# TODO(sbauza): Provide directly the RequestSpec object
# when populate_retry() accepts it
scheduler_utils.populate_retry(
filter_properties, instances[0].uuid)
instance_uuids = [instance.uuid for instance in instances]
spec_obj = objects.RequestSpec.from_primitives(
context, request_spec, filter_properties)
LOG.debug("Rescheduling: %s", is_reschedule)
if is_reschedule:
# Make sure that we have a host, as we may have exhausted all
# our alternates
if not host_lists[0]:
# We have an empty list of hosts, so this instance has
# failed to build.
msg = ("Exhausted all hosts available for retrying build "
"failures for instance %(instance_uuid)s." %
{"instance_uuid": instances[0].uuid})
raise exception.MaxRetriesExceeded(reason=msg)
else:
# This is not a reschedule, so we need to call the scheduler to
# get appropriate hosts for the request.
host_lists = self._schedule_instances(context, spec_obj,
instance_uuids, return_alternates=True)
except Exception as exc:
num_attempts = filter_properties.get(
'retry', {}).get('num_attempts', 1)
for instance in instances:
# If num_attempts > 1, we're in a reschedule and probably
# either hit NoValidHost or MaxRetriesExceeded. Either way,
# the build request should already be gone and we probably
# can't reach the API DB from the cell conductor.
if num_attempts <= 1:
try:
# If the BuildRequest stays around then instance
# show/lists will pull from it rather than the errored
# instance.
self._destroy_build_request(context, instance)
except exception.BuildRequestNotFound:
pass
self._cleanup_when_reschedule_fails(
context, instance, exc, request_spec,
requested_networks)
return
elevated = context.elevated()
for (instance, host_list) in six.moves.zip(instances, host_lists):
host = host_list.pop(0)
if is_reschedule:
# If this runs in the superconductor, the first instance will
# already have its resources claimed in placement. If this is a
# retry, though, this is running in the cell conductor, and we
# need to claim first to ensure that the alternate host still
# has its resources available. Note that there are schedulers
# that don't support Placement, so must assume that the host is
# still available.
host_available = False
while host and not host_available:
if host.allocation_request:
alloc_req = jsonutils.loads(host.allocation_request)
else:
alloc_req = None
if alloc_req:
host_available = scheduler_utils.claim_resources(
elevated, self.report_client, spec_obj,
instance.uuid, alloc_req,
host.allocation_request_version)
else:
# Some deployments use different schedulers that do not
# use Placement, so they will not have an
# allocation_request to claim with. For those cases,
# there is no concept of claiming, so just assume that
# the host is valid.
host_available = True
if not host_available:
# Insufficient resources remain on that host, so
# discard it and try the next.
host = host_list.pop(0) if host_list else None
if not host_available:
# No more available hosts for retrying the build.
msg = ("Exhausted all hosts available for retrying build "
"failures for instance %(instance_uuid)s." %
{"instance_uuid": instance.uuid})
exc = exception.MaxRetriesExceeded(reason=msg)
self._cleanup_when_reschedule_fails(
context, instance, exc, request_spec,
requested_networks)
return
instance.availability_zone = (
availability_zones.get_host_availability_zone(context,
host.service_host))
try:
# NOTE(danms): This saves the az change above, refreshes our
# instance, and tells us if it has been deleted underneath us
instance.save()
except (exception.InstanceNotFound,
exception.InstanceInfoCacheNotFound):
LOG.debug('Instance deleted during build', instance=instance)
continue
local_filter_props = copy.deepcopy(filter_properties)
scheduler_utils.populate_filter_properties(local_filter_props,
host)
# Populate the request_spec with the local_filter_props information
# like retries and limits. Note that at this point the request_spec
# could have come from a compute via reschedule and it would
# already have some things set, like scheduler_hints.
local_reqspec = objects.RequestSpec.from_primitives(
context, request_spec, local_filter_props)
# The block_device_mapping passed from the api doesn't contain
# instance specific information
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
# This is populated in scheduler_utils.populate_retry
num_attempts = local_filter_props.get('retry',
{}).get('num_attempts', 1)
if num_attempts <= 1:
# If this is a reschedule the instance is already mapped to
# this cell and the BuildRequest is already deleted so ignore
# the logic below.
inst_mapping = self._populate_instance_mapping(context,
instance,
host)
try:
self._destroy_build_request(context, instance)
except exception.BuildRequestNotFound:
# This indicates an instance delete has been requested in
# the API. Stop the build, cleanup the instance_mapping and
# potentially the block_device_mappings
# TODO(alaski): Handle block_device_mapping cleanup
if inst_mapping:
inst_mapping.destroy()
return
else:
# NOTE(lyarwood): If this is a reschedule then recreate any
# attachments that were previously removed when cleaning up
# after failures to spawn etc.
self._validate_existing_attachment_ids(context, instance, bdms)
alts = [(alt.service_host, alt.nodename) for alt in host_list]
LOG.debug("Selected host: %s; Selected node: %s; Alternates: %s",
host.service_host, host.nodename, alts, instance=instance)
self.compute_rpcapi.build_and_run_instance(context,
instance=instance, host=host.service_host, image=image,
request_spec=local_reqspec,
filter_properties=local_filter_props,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=bdms, node=host.nodename,
limits=host.limits, host_list=host_list)
def _schedule_instances(self, context, request_spec,
instance_uuids=None, return_alternates=False):
scheduler_utils.setup_instance_group(context, request_spec)
host_lists = self.scheduler_client.select_destinations(context,
request_spec, instance_uuids, return_objects=True,
return_alternates=return_alternates)
return host_lists
@targets_cell
def unshelve_instance(self, context, instance, request_spec=None):
sys_meta = instance.system_metadata
def safe_image_show(ctx, image_id):
if image_id:
return self.image_api.get(ctx, image_id, show_deleted=False)
else:
raise exception.ImageNotFound(image_id='')
if instance.vm_state == vm_states.SHELVED:
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=task_states.UNSHELVING)
self.compute_rpcapi.start_instance(context, instance)
elif instance.vm_state == vm_states.SHELVED_OFFLOADED:
image = None
image_id = sys_meta.get('shelved_image_id')
# No need to check for image if image_id is None as
# "shelved_image_id" key is not set for volume backed
# instance during the shelve process
if image_id:
with compute_utils.EventReporter(
context, 'get_image_info', instance.uuid):
try:
image = safe_image_show(context, image_id)
except exception.ImageNotFound as error:
instance.vm_state = vm_states.ERROR
instance.save()
reason = _('Unshelve attempted but the image %s '
'cannot be found.') % image_id
LOG.error(reason, instance=instance)
compute_utils.add_instance_fault_from_exc(
context, instance, error, sys.exc_info(),
fault_message=reason)
raise exception.UnshelveException(
instance_id=instance.uuid, reason=reason)
try:
with compute_utils.EventReporter(context, 'schedule_instances',
instance.uuid):
if not request_spec:
# NOTE(sbauza): We were unable to find an original
# RequestSpec object - probably because the instance is
# old. We need to mock that the old way
filter_properties = {}
request_spec = scheduler_utils.build_request_spec(
image, [instance])
else:
# NOTE(sbauza): Force_hosts/nodes needs to be reset
# if we want to make sure that the next destination
# is not forced to be the original host
request_spec.reset_forced_destinations()
# TODO(sbauza): Provide directly the RequestSpec object
# when populate_filter_properties and populate_retry()
# accept it
filter_properties = request_spec.\
to_legacy_filter_properties_dict()
request_spec = request_spec.\
to_legacy_request_spec_dict()
scheduler_utils.populate_retry(filter_properties,
instance.uuid)
request_spec = objects.RequestSpec.from_primitives(
context, request_spec, filter_properties)
# NOTE(cfriesen): Ensure that we restrict the scheduler to
# the cell specified by the instance mapping.
instance_mapping = \
objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
LOG.debug('Requesting cell %(cell)s while unshelving',
{'cell': instance_mapping.cell_mapping.identity},
instance=instance)
if ('requested_destination' in request_spec and
request_spec.requested_destination):
request_spec.requested_destination.cell = (
instance_mapping.cell_mapping)
else:
request_spec.requested_destination = (
objects.Destination(
cell=instance_mapping.cell_mapping))
request_spec.ensure_project_id(instance)
host_lists = self._schedule_instances(context,
request_spec, [instance.uuid],
return_alternates=False)
host_list = host_lists[0]
selection = host_list[0]
scheduler_utils.populate_filter_properties(
filter_properties, selection)
(host, node) = (selection.service_host, selection.nodename)
instance.availability_zone = (
availability_zones.get_host_availability_zone(
context, host))
self.compute_rpcapi.unshelve_instance(
context, instance, host, image=image,
filter_properties=filter_properties, node=node)
except (exception.NoValidHost,
exception.UnsupportedPolicyException):
instance.task_state = None
instance.save()
LOG.warning("No valid host found for unshelve instance",
instance=instance)
return
except Exception:
with excutils.save_and_reraise_exception():
instance.task_state = None
instance.save()
LOG.error("Unshelve attempted but an error "
"has occurred", instance=instance)
else:
LOG.error('Unshelve attempted but vm_state not SHELVED or '
'SHELVED_OFFLOADED', instance=instance)
instance.vm_state = vm_states.ERROR
instance.save()
return
def _allocate_for_evacuate_dest_host(self, context, instance, host,
request_spec=None):
# The user is forcing the destination host and bypassing the
# scheduler. We need to copy the source compute node
# allocations in Placement to the destination compute node.
# Normally select_destinations() in the scheduler would do this
# for us, but when forcing the target host we don't call the
# scheduler.
source_node = None # This is used for error handling below.
try:
source_node = objects.ComputeNode.get_by_host_and_nodename(
context, instance.host, instance.node)
dest_node = (
objects.ComputeNode.get_first_node_by_host_for_old_compat(
context, host, use_slave=True))
except exception.ComputeHostNotFound as ex:
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify(
context, instance.uuid, 'rebuild_server',
{'vm_state': instance.vm_state,
'task_state': None}, ex, request_spec)
if source_node:
LOG.warning('Specified host %s for evacuate was not '
'found.', host, instance=instance)
else:
LOG.warning('Source host %s and node %s for evacuate was '
'not found.', instance.host, instance.node,
instance=instance)
# TODO(mriedem): In Queens, call select_destinations() with a
# skip_filters=True flag so the scheduler does the work of
# claiming resources on the destination in Placement but still
# bypass the scheduler filters, which honors the 'force' flag
# in the API.
try:
scheduler_utils.claim_resources_on_destination(
context, self.report_client, instance, source_node, dest_node)
except exception.NoValidHost as ex:
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify(
context, instance.uuid, 'rebuild_server',
{'vm_state': instance.vm_state,
'task_state': None}, ex, request_spec)
LOG.warning('Specified host %s for evacuate is '
'invalid.', host, instance=instance)
@targets_cell
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral=False, host=None,
request_spec=None):
with compute_utils.EventReporter(context, 'rebuild_server',
instance.uuid):
node = limits = None
try:
migration = objects.Migration.get_by_instance_and_status(
context, instance.uuid, 'accepted')
except exception.MigrationNotFoundByStatus:
LOG.debug("No migration record for the rebuild/evacuate "
"request.", instance=instance)
migration = None
# The host variable is passed in two cases:
# 1. rebuild - the instance.host is passed to rebuild on the
# same host and bypass the scheduler *unless* a new image
# was specified
# 2. evacuate with specified host and force=True - the specified
# host is passed and is meant to bypass the scheduler.
# NOTE(mriedem): This could be a lot more straight-forward if we
# had separate methods for rebuild and evacuate...
if host:
# We only create a new allocation on the specified host if
# we're doing an evacuate since that is a move operation.
if host != instance.host:
# If a destination host is forced for evacuate, create
# allocations against it in Placement.
self._allocate_for_evacuate_dest_host(
context, instance, host, request_spec)
else:
# At this point, the user is either:
#
# 1. Doing a rebuild on the same host (not evacuate) and
# specified a new image.
# 2. Evacuating and specified a host but are not forcing it.
#
# In either case, the API passes host=None but sets up the
# RequestSpec.requested_destination field for the specified
# host.
if not request_spec:
# NOTE(sbauza): We were unable to find an original
# RequestSpec object - probably because the instance is old
# We need to mock that the old way
filter_properties = {'ignore_hosts': [instance.host]}
# build_request_spec expects a primitive image dict
image_meta = nova_object.obj_to_primitive(
instance.image_meta)
request_spec = scheduler_utils.build_request_spec(
image_meta, [instance])
request_spec = objects.RequestSpec.from_primitives(
context, request_spec, filter_properties)
elif recreate:
# NOTE(sbauza): Augment the RequestSpec object by excluding
# the source host for avoiding the scheduler to pick it
request_spec.ignore_hosts = [instance.host]
# NOTE(sbauza): Force_hosts/nodes needs to be reset
# if we want to make sure that the next destination
# is not forced to be the original host
request_spec.reset_forced_destinations()
try:
request_spec.ensure_project_id(instance)
host_lists = self._schedule_instances(context,
request_spec, [instance.uuid],
return_alternates=False)
host_list = host_lists[0]
selection = host_list[0]
host, node, limits = (selection.service_host,
selection.nodename, selection.limits)
except (exception.NoValidHost,
exception.UnsupportedPolicyException) as ex:
if migration:
migration.status = 'error'
migration.save()
# Rollback the image_ref if a new one was provided (this
# only happens in the rebuild case, not evacuate).
if orig_image_ref and orig_image_ref != image_ref:
instance.image_ref = orig_image_ref
instance.save()
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify(context, instance.uuid,
'rebuild_server',
{'vm_state': vm_states.ERROR,
'task_state': None}, ex, request_spec)
LOG.warning('Rebuild failed: %s',
six.text_type(ex), instance=instance)
compute_utils.add_instance_fault_from_exc(context,
instance, ex, sys.exc_info())
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "rebuild.scheduled")
instance.availability_zone = (
availability_zones.get_host_availability_zone(
context, host))
self.compute_rpcapi.rebuild_instance(context,
instance=instance,
new_pass=new_pass,
injected_files=injected_files,
image_ref=image_ref,
orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata,
bdms=bdms,
recreate=recreate,
on_shared_storage=on_shared_storage,
preserve_ephemeral=preserve_ephemeral,
migration=migration,
host=host, node=node, limits=limits,
request_spec=request_spec)
# TODO(avolkov): move method to bdm
@staticmethod
def _volume_size(instance_type, bdm):
size = bdm.get('volume_size')
# NOTE (ndipanov): inherit flavor size only for swap and ephemeral
if (size is None and bdm.get('source_type') == 'blank' and
bdm.get('destination_type') == 'local'):
if bdm.get('guest_format') == 'swap':
size = instance_type.get('swap', 0)
else:
size = instance_type.get('ephemeral_gb', 0)
return size
def _create_block_device_mapping(self, cell, instance_type, instance_uuid,
block_device_mapping):
"""Create the BlockDeviceMapping objects in the db.
This method makes a copy of the list in order to avoid using the same
id field in case this is called for multiple instances.
"""
LOG.debug("block_device_mapping %s", list(block_device_mapping),
instance_uuid=instance_uuid)
instance_block_device_mapping = copy.deepcopy(block_device_mapping)
for bdm in instance_block_device_mapping:
bdm.volume_size = self._volume_size(instance_type, bdm)
bdm.instance_uuid = instance_uuid
with obj_target_cell(bdm, cell):
bdm.update_or_create()
return instance_block_device_mapping
def _create_tags(self, context, instance_uuid, tags):
"""Create the Tags objects in the db."""
if tags:
tag_list = [tag.tag for tag in tags]
instance_tags = objects.TagList.create(
context, instance_uuid, tag_list)
return instance_tags
else:
return tags
def _bury_in_cell0(self, context, request_spec, exc,
build_requests=None, instances=None,
block_device_mapping=None,
tags=None):
"""Ensure all provided build_requests and instances end up in cell0.
Cell0 is the fake cell we schedule dead instances to when we can't
schedule them somewhere real. Requests that don't yet have instances
will get a new instance, created in cell0. Instances that have not yet
been created will be created in cell0. All build requests are destroyed
after we're done. Failure to delete a build request will trigger the
instance deletion, just like the happy path in
schedule_and_build_instances() below.
"""
try:
cell0 = objects.CellMapping.get_by_uuid(
context, objects.CellMapping.CELL0_UUID)
except exception.CellMappingNotFound:
# Not yet setup for cellsv2. Instances will need to be written
# to the configured database. This will become a deployment
# error in Ocata.
LOG.error('No cell mapping found for cell0 while '
'trying to record scheduling failure. '
'Setup is incomplete.')
return
build_requests = build_requests or []
instances = instances or []
instances_by_uuid = {inst.uuid: inst for inst in instances}
for build_request in build_requests:
if build_request.instance_uuid not in instances_by_uuid:
# This is an instance object with no matching db entry.
instance = build_request.get_new_instance(context)
instances_by_uuid[instance.uuid] = instance
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
for instance in instances_by_uuid.values():
with obj_target_cell(instance, cell0) as cctxt:
instance.create()
# NOTE(mnaser): In order to properly clean-up volumes after
# being buried in cell0, we need to store BDMs.
if block_device_mapping:
self._create_block_device_mapping(
cell0, instance.flavor, instance.uuid,
block_device_mapping)
self._create_tags(cctxt, instance.uuid, tags)
# Use the context targeted to cell0 here since the instance is
# now in cell0.
self._set_vm_state_and_notify(
cctxt, instance.uuid, 'build_instances', updates,
exc, request_spec)
try:
# We don't need the cell0-targeted context here because the
# instance mapping is in the API DB.
inst_mapping = \
objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
inst_mapping.cell_mapping = cell0
inst_mapping.save()
except exception.InstanceMappingNotFound:
pass
for build_request in build_requests:
try:
build_request.destroy()
except exception.BuildRequestNotFound:
# Instance was deleted before we finished scheduling
inst = instances_by_uuid[build_request.instance_uuid]
with obj_target_cell(inst, cell0):
inst.destroy()
def schedule_and_build_instances(self, context, build_requests,
request_specs, image,
admin_password, injected_files,
requested_networks, block_device_mapping,
tags=None):
# Add all the UUIDs for the instances
instance_uuids = [spec.instance_uuid for spec in request_specs]
try:
host_lists = self._schedule_instances(context, request_specs[0],
instance_uuids, return_alternates=True)
except Exception as exc:
LOG.exception('Failed to schedule instances')
self._bury_in_cell0(context, request_specs[0], exc,
build_requests=build_requests,
block_device_mapping=block_device_mapping,
tags=tags)
return
host_mapping_cache = {}
cell_mapping_cache = {}
instances = []
for (build_request, request_spec, host_list) in six.moves.zip(
build_requests, request_specs, host_lists):
instance = build_request.get_new_instance(context)
# host_list is a list of one or more Selection objects, the first
# of which has been selected and its resources claimed.
host = host_list[0]
# Convert host from the scheduler into a cell record
if host.service_host not in host_mapping_cache:
try:
host_mapping = objects.HostMapping.get_by_host(
context, host.service_host)
host_mapping_cache[host.service_host] = host_mapping
except exception.HostMappingNotFound as exc:
LOG.error('No host-to-cell mapping found for selected '
'host %(host)s. Setup is incomplete.',
{'host': host.service_host})
self._bury_in_cell0(
context, request_spec, exc,
build_requests=[build_request], instances=[instance],
block_device_mapping=block_device_mapping,
tags=tags)
# This is a placeholder in case the quota recheck fails.
instances.append(None)
continue
else:
host_mapping = host_mapping_cache[host.service_host]
cell = host_mapping.cell_mapping
# Before we create the instance, let's make one final check that
# the build request is still around and wasn't deleted by the user
# already.
try:
objects.BuildRequest.get_by_instance_uuid(
context, instance.uuid)
except exception.BuildRequestNotFound:
# the build request is gone so we're done for this instance
LOG.debug('While scheduling instance, the build request '
'was already deleted.', instance=instance)
# This is a placeholder in case the quota recheck fails.
instances.append(None)
rc = self.scheduler_client.reportclient
rc.delete_allocation_for_instance(context, instance.uuid)
continue
else:
instance.availability_zone = (
availability_zones.get_host_availability_zone(
context, host.service_host))
with obj_target_cell(instance, cell):
instance.create()
instances.append(instance)
cell_mapping_cache[instance.uuid] = cell
# NOTE(melwitt): We recheck the quota after creating the
# objects to prevent users from allocating more resources
# than their allowed quota in the event of a race. This is
# configurable because it can be expensive if strict quota
# limits are not required in a deployment.
if CONF.quota.recheck_quota:
try:
compute_utils.check_num_instances_quota(
context, instance.flavor, 0, 0,
orig_num_req=len(build_requests))
except exception.TooManyInstances as exc:
with excutils.save_and_reraise_exception():
self._cleanup_build_artifacts(context, exc, instances,
build_requests,
request_specs,
block_device_mapping, tags,
cell_mapping_cache)
zipped = six.moves.zip(build_requests, request_specs, host_lists,
instances)
for (build_request, request_spec, host_list, instance) in zipped:
if instance is None:
# Skip placeholders that were buried in cell0 or had their
# build requests deleted by the user before instance create.
continue
cell = cell_mapping_cache[instance.uuid]
# host_list is a list of one or more Selection objects, the first
# of which has been selected and its resources claimed.
host = host_list.pop(0)
alts = [(alt.service_host, alt.nodename) for alt in host_list]
LOG.debug("Selected host: %s; Selected node: %s; Alternates: %s",
host.service_host, host.nodename, alts, instance=instance)
filter_props = request_spec.to_legacy_filter_properties_dict()
scheduler_utils.populate_retry(filter_props, instance.uuid)
scheduler_utils.populate_filter_properties(filter_props,
host)
# TODO(melwitt): Maybe we should set_target_cell on the contexts
# once we map to a cell, and remove these separate with statements.
with obj_target_cell(instance, cell) as cctxt:
# send a state update notification for the initial create to
# show it going from non-existent to BUILDING
# This can lazy-load attributes on instance.
notifications.send_update_with_states(cctxt, instance, None,
vm_states.BUILDING, None, None, service="conductor")
objects.InstanceAction.action_start(
cctxt, instance.uuid, instance_actions.CREATE,
want_result=False)
instance_bdms = self._create_block_device_mapping(
cell, instance.flavor, instance.uuid, block_device_mapping)
instance_tags = self._create_tags(cctxt, instance.uuid, tags)
# TODO(Kevin Zheng): clean this up once instance.create() handles
# tags; we do this so the instance.create notification in
# build_and_run_instance in nova-compute doesn't lazy-load tags
instance.tags = instance_tags if instance_tags \
else objects.TagList()
# Update mapping for instance. Normally this check is guarded by
# a try/except but if we're here we know that a newer nova-api
# handled the build process and would have created the mapping
inst_mapping = objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
inst_mapping.cell_mapping = cell
inst_mapping.save()
if not self._delete_build_request(
context, build_request, instance, cell, instance_bdms,
instance_tags):
# The build request was deleted before/during scheduling so
# the instance is gone and we don't have anything to build for
# this one.
continue
# NOTE(danms): Compute RPC expects security group names or ids
# not objects, so convert this to a list of names until we can
# pass the objects.
legacy_secgroups = [s.identifier
for s in request_spec.security_groups]
with obj_target_cell(instance, cell) as cctxt:
self.compute_rpcapi.build_and_run_instance(
cctxt, instance=instance, image=image,
request_spec=request_spec,
filter_properties=filter_props,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=legacy_secgroups,
block_device_mapping=instance_bdms,
host=host.service_host, node=host.nodename,
limits=host.limits, host_list=host_list)
def _cleanup_build_artifacts(self, context, exc, instances, build_requests,
request_specs, block_device_mappings, tags,
cell_mapping_cache):
for (instance, build_request, request_spec) in six.moves.zip(
instances, build_requests, request_specs):
# Skip placeholders that were buried in cell0 or had their
# build requests deleted by the user before instance create.
if instance is None:
continue
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
cell = cell_mapping_cache[instance.uuid]
with try_target_cell(context, cell) as cctxt:
self._set_vm_state_and_notify(cctxt, instance.uuid,
'build_instances', updates, exc,
request_spec)
# In order to properly clean-up volumes when deleting a server in
# ERROR status with no host, we need to store BDMs in the same
# cell.
if block_device_mappings:
self._create_block_device_mapping(
cell, instance.flavor, instance.uuid,
block_device_mappings)
# Like BDMs, the server tags provided by the user when creating the
# server should be persisted in the same cell so they can be shown
# from the API.
if tags:
with nova_context.target_cell(context, cell) as cctxt:
self._create_tags(cctxt, instance.uuid, tags)
# NOTE(mdbooth): To avoid an incomplete instance record being
# returned by the API, the instance mapping must be
# created after the instance record is complete in
# the cell, and before the build request is
# destroyed.
# TODO(mnaser): The cell mapping should already be populated by
# this point to avoid setting it below here.
inst_mapping = objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
inst_mapping.cell_mapping = cell
inst_mapping.save()
# Be paranoid about artifacts being deleted underneath us.
try:
build_request.destroy()
except exception.BuildRequestNotFound:
pass
try:
request_spec.destroy()
except exception.RequestSpecNotFound:
pass
def _delete_build_request(self, context, build_request, instance, cell,
instance_bdms, instance_tags):
"""Delete a build request after creating the instance in the cell.
This method handles cleaning up the instance in case the build request
is already deleted by the time we try to delete it.
:param context: the context of the request being handled
:type context: nova.context.RequestContext
:param build_request: the build request to delete
:type build_request: nova.objects.BuildRequest
:param instance: the instance created from the build_request
:type instance: nova.objects.Instance
:param cell: the cell in which the instance was created
:type cell: nova.objects.CellMapping
:param instance_bdms: list of block device mappings for the instance
:type instance_bdms: nova.objects.BlockDeviceMappingList
:param instance_tags: list of tags for the instance
:type instance_tags: nova.objects.TagList
:returns: True if the build request was successfully deleted, False if
the build request was already deleted and the instance is now gone.
"""
try:
build_request.destroy()
except exception.BuildRequestNotFound:
# This indicates an instance deletion request has been
# processed, and the build should halt here. Clean up the
# bdm, tags and instance record.
with obj_target_cell(instance, cell) as cctxt:
with compute_utils.notify_about_instance_delete(
self.notifier, cctxt, instance):
try:
instance.destroy()
except exception.InstanceNotFound:
pass
except exception.ObjectActionError:
# NOTE(melwitt): Instance became scheduled during
# the destroy, "host changed". Refresh and re-destroy.
try:
instance.refresh()
instance.destroy()
except exception.InstanceNotFound:
pass
for bdm in instance_bdms:
with obj_target_cell(bdm, cell):
try:
bdm.destroy()
except exception.ObjectActionError:
pass
if instance_tags:
with try_target_cell(context, cell) as target_ctxt:
try:
objects.TagList.destroy(target_ctxt, instance.uuid)
except exception.InstanceNotFound:
pass
return False
return True
|
the-stack_106_22781 | from parser_class import parse_all
from gen_sent_event import gen_sent
from mem import MemoryGraph
from manipState import *
from storyenv import *
import random
import time, pickle
from datetime import datetime
#########################
# TO BE IMPLEMENTED #
#########################
local_story = storyEnv()
memory = MemoryGraph()
sentence_state_dict = dict()
# def add_event(frame, pred):
# local_story.step_placeholder(pred)
# return None
def filter_event(utterance, orig_state, found, frame_dict, preps):
valid, dict = local_story.examine(utterance, orig_state, found, frame_dict, preps)
print(dict)
sentence_state_dict.update(dict)
return valid
def append_sentence(utterance, orig_state):
print("WITHIN APPEND ---", utterance)
global first_time
try:
if (utterance.split(" "))[0] == "START":
utterance = " ".join(utterance.split(" ")[1:])
print("NEW UTTERANCE", utterance)
# Obtain NER
# utterance = local_story.fill_sentence(utterance)
found, frame_dict, preps = parse_all(utterance)
print("FOUND", found, frame_dict, preps)
for v in frame_dict:
if 'SYNTAX' in frame_dict[v]:
print("------------ SYNTAX --------------")
print(v, frame_dict[v]['SYNTAX'])
print("------------ SYNTAX --------------")
if first_time:
NERs = frame_dict['NER']
for type, name in NERs:
p_name = type + str(len(local_story.ner_memory))
local_story.ner_memory.update({p_name: name})
local_story.rev_ner.update({name: p_name})
#######################################
# FILL THE DICT OF HUMAN NAMES ######
######################################
except:
return False
if not found:
return False
# print("Found", found)
# print("Frame Dict", frame_dict)
# input()
correct = filter_event(utterance, orig_state, found, frame_dict, preps)
if not correct:
print("Not Correct!! --->", frame_dict, utterance)
return correct
def select_utterance(utterances):
valid = []
invalid = []
orig_state = local_story.state
for utterance in utterances:
utterance = utterance.strip("\n")
utterance = utterance.strip()
flag = True
if "." in utterance:
utterance = utterance[: utterance.index(".") + 1]
print(utterance)
ms = set(re.findall('PERSON[0-9]+', utterance))
ks = set(local_story.ner_memory.keys())
len_over = len(ms.intersection(ks))
# Compare with every sentence and check repetition
percents = [float(len(set(sent.lower().split(" ")).intersection(utterance.lower().split(" "))))/len(utterance.lower().split(" ")) for sent in local_story.sentence_history]
if len_over == 0 or len(ms) == 0 or max(percents) > 0.6:
flag = True
else:
flag = False
if "." not in utterance or utterance in local_story.sentence_history or flag:
correct = False
else:
correct = append_sentence(utterance, orig_state)
print("Correctness in Selection:", correct)
print("Current Utterance:", utterance)
if correct:
valid.append(utterance)
else:
invalid.append(utterance)
########################################
# FILTER THE SENTENCES ###########
#################################
########################################
# UPDATE THE NER DICT AND REPLACE ######
########################################
choice = random.choice(valid)
local_story.state = sentence_state_dict[choice]
print("BIG DICT", sentence_state_dict[choice].returnDictionary())
log_file.write("VALID SENTENCES: " + str(valid) + "\n")
log_file.write("INVALID SENTENCES: " + str(invalid) + "\n")
choice = local_story.fill_sentence(choice)
return len(valid) > 0, choice
# while not correct and count <= len(utterances):
# utterance = random.choice(utterances)
# utterance = utterance.strip("\n")
# if "." not in utterance or len(utterance) < 10 or utterance in local_story.sentence_history:
# correct = False
# else:
# utterance = utterance[: utterance.index(".") + 1]
# correct = append_sentence(utterance, turn)
# count += 1
# return count < len(utterances) and correct, utterance
def append_to_file(utterance, turn, code=0):
if code == 0:
log_file.write(turn + " >> " + utterance + "\n")
log_file.write(str(local_story.state.returnDictionary()) + "\n")
else:
log_file.write(utterance + "\n")
LOG_PATH = 'logs/'
log_file = open(LOG_PATH + str(datetime.fromtimestamp(time.time()).date()) + ".txt", "w+")
log_file.write("=======================================" + "\n")
start_sents = [x.strip() for x in open("data/start_sents.txt", 'r').readlines()]
pick = open("data/names-percentage.pkl", 'rb')
gender_list = pickle.load(pick) # gender_list[name] = set(genders)
pick.close()
del (pick)
# memory = MemoryGraph(gender_list)
memory = MemoryGraph()
sentence_memory = []
first_time = True
print("Let's write a Sci-Fi story together!")
utterance = ""
starter = input("Would you like me to start? (Y/N) >> ").lower() in ['y', 'yes']
correct = False
while not correct:
if not starter:
if first_time:
start_sent = input("Please start our Sci-Fi story >> ").strip("\n")
turn = "User"
else:
start_sent = input("Please try again >> ").strip("\n")
turn = "User"
else:
# Load from the starter sentences
rando = random.choice(start_sents)
# Generalized event, event, picked sentence
gen_event, event, picked_sent = rando.split("|||")
start_sent = picked_sent
turn = "AI"
utterance = "START " + start_sent
# Add sentence to event memory
correct = append_sentence(utterance, local_story.state)
utterance = local_story.rep_sentence(utterance)
print("UTTERS", utterance)
local_story.add_to_mem(utterance)
append_to_file(utterance, turn)
first_time = False
# If the start was picked, let the user go next
# if starter:
# turn = "User"
# else:
# turn = "AI"
turn = "AI"
while utterance != "q" and utterance != "quit":
# if turn == "User":
# utterance = input("Please input your sentence >> ").strip("\n")
# append_sentence(utterance, turn)
# print(turn + " >> " + utterance)
# turn = "AI"
# else:
print("Generating Sentence...")
# The list of sentences
found = False
sent = ""
while not found:
ut_hist = local_story.most_recent_mem()
print("SENTENCE HISTORY =====", ut_hist)
utterances = gen_sent(ut_hist)
found, sent = select_utterance(utterances)
utterance = sent
if turn == "User" or (turn == "AI" and found):
local_story.add_to_mem(utterance)
append_to_file(utterance, turn)
print(turn + " >> " + utterance)
print("TURN IS DONE")
input()
# turn = "User"
# Eventify the utterance
print("I hope you enjoyed our story! Bye!")
log_file.close()
|
the-stack_106_22785 | import nltk
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
import pickle
import numpy as np
import pandas as pd
lemmatizer = WordNetLemmatizer()
'''
polarity 0 = negative. 2 = neutral. 4 = positive.
id
date
query
user
tweet
'''
def init_process(fin,fout):
outfile = open(fout,'a')
with open(fin, buffering=200000, encoding='latin-1') as f:
try:
for line in f:
line = line.replace('"','')
initial_polarity = line.split(',')[0]
if initial_polarity == '0':
initial_polarity = [1,0]
elif initial_polarity == '4':
initial_polarity = [0,1]
tweet = line.split(',')[-1]
outline = str(initial_polarity)+':::'+tweet
outfile.write(outline)
except Exception as e:
print(str(e))
outfile.close()
init_process('training.1600000.processed.noemoticon.csv','train_set.csv')
init_process('testdata.manual.2009.06.14.csv','test_set.csv')
def create_lexicon(fin):
lexicon = []
with open(fin, 'r', buffering=100000, encoding='latin-1') as f:
try:
counter = 1
content = ''
for line in f:
counter += 1
if (counter/2500.0).is_integer():
tweet = line.split(':::')[1]
content += ' '+tweet
words = word_tokenize(content)
words = [lemmatizer.lemmatize(i) for i in words]
lexicon = list(set(lexicon + words))
print(counter, len(lexicon))
except Exception as e:
print(str(e))
with open('lexicon-2500-2638.pickle','wb') as f:
pickle.dump(lexicon,f)
create_lexicon('train_set.csv')
def convert_to_vec(fin,fout,lexicon_pickle):
with open(lexicon_pickle,'rb') as f:
lexicon = pickle.load(f)
outfile = open(fout,'a')
with open(fin, buffering=20000, encoding='latin-1') as f:
counter = 0
for line in f:
counter +=1
label = line.split(':::')[0]
tweet = line.split(':::')[1]
current_words = word_tokenize(tweet.lower())
current_words = [lemmatizer.lemmatize(i) for i in current_words]
features = np.zeros(len(lexicon))
for word in current_words:
if word.lower() in lexicon:
index_value = lexicon.index(word.lower())
# OR DO +=1, test both
features[index_value] += 1
features = list(features)
outline = str(features)+'::'+str(label)+'\n'
outfile.write(outline)
print(counter)
convert_to_vec('test_set.csv','processed-test-set.csv','lexicon-2500-2638.pickle')
def shuffle_data(fin):
df = pd.read_csv(fin, error_bad_lines=False)
df = df.iloc[np.random.permutation(len(df))]
print(df.head())
df.to_csv('train_set_shuffled.csv', index=False)
shuffle_data('train_set.csv')
def create_test_data_pickle(fin):
feature_sets = []
labels = []
counter = 0
with open(fin, buffering=20000) as f:
for line in f:
try:
features = list(eval(line.split('::')[0]))
label = list(eval(line.split('::')[1]))
feature_sets.append(features)
labels.append(label)
counter += 1
except:
pass
print(counter)
feature_sets = np.array(feature_sets)
labels = np.array(labels)
create_test_data_pickle('processed-test-set.csv') |
the-stack_106_22787 | """
Contains sync flow implementation for Auto Dependency Layer
"""
import hashlib
import logging
import os
import tempfile
import uuid
from typing import List, TYPE_CHECKING, Dict, cast, Optional
from samcli.lib.bootstrap.nested_stack.nested_stack_builder import NestedStackBuilder
from samcli.lib.bootstrap.nested_stack.nested_stack_manager import NestedStackManager
from samcli.lib.build.build_graph import BuildGraph
from samcli.lib.package.utils import make_zip
from samcli.lib.providers.provider import Function, Stack
from samcli.lib.providers.sam_function_provider import SamFunctionProvider
from samcli.lib.sync.exceptions import (
MissingFunctionBuildDefinition,
InvalidRuntimeDefinitionForFunction,
NoLayerVersionsFoundError,
)
from samcli.lib.sync.flows.layer_sync_flow import AbstractLayerSyncFlow
from samcli.lib.sync.flows.zip_function_sync_flow import ZipFunctionSyncFlow
from samcli.lib.sync.sync_flow import SyncFlow
from samcli.lib.utils.hash import file_checksum
if TYPE_CHECKING: # pragma: no cover
from samcli.commands.deploy.deploy_context import DeployContext
from samcli.commands.build.build_context import BuildContext
LOG = logging.getLogger(__name__)
class AutoDependencyLayerSyncFlow(AbstractLayerSyncFlow):
"""
Auto Dependency Layer, Layer Sync flow.
It creates auto dependency layer files out of function dependencies, and syncs layer code and then updates
the function configuration with new layer version
This flow is not instantiated from factory method, please see AutoDependencyLayerParentSyncFlow
"""
_function_identifier: str
_build_graph: Optional[BuildGraph]
def __init__(
self,
function_identifier: str,
build_graph: BuildGraph,
build_context: "BuildContext",
deploy_context: "DeployContext",
physical_id_mapping: Dict[str, str],
stacks: List[Stack],
):
super().__init__(
NestedStackBuilder.get_layer_logical_id(function_identifier),
build_context,
deploy_context,
physical_id_mapping,
stacks,
)
self._function_identifier = function_identifier
self._build_graph = build_graph
def set_up(self) -> None:
super().set_up()
# find layer's physical id
layer_name = NestedStackBuilder.get_layer_name(self._deploy_context.stack_name, self._function_identifier)
layer_versions = self._lambda_client.list_layer_versions(LayerName=layer_name).get("LayerVersions", [])
if not layer_versions:
raise NoLayerVersionsFoundError(layer_name)
self._layer_arn = layer_versions[0].get("LayerVersionArn").rsplit(":", 1)[0]
def gather_resources(self) -> None:
function_build_definitions = cast(BuildGraph, self._build_graph).get_function_build_definitions()
if not function_build_definitions:
raise MissingFunctionBuildDefinition(self._function_identifier)
self._artifact_folder = NestedStackManager.update_layer_folder(
self._build_context.build_dir,
function_build_definitions[0].dependencies_dir,
self._layer_identifier,
self._function_identifier,
self._get_compatible_runtimes()[0],
)
zip_file_path = os.path.join(tempfile.gettempdir(), "data-" + uuid.uuid4().hex)
self._zip_file = make_zip(zip_file_path, self._artifact_folder)
self._local_sha = file_checksum(cast(str, self._zip_file), hashlib.sha256())
def _get_dependent_functions(self) -> List[Function]:
function = SamFunctionProvider(cast(List[Stack], self._stacks)).get(self._function_identifier)
return [function] if function else []
def _get_compatible_runtimes(self) -> List[str]:
function = SamFunctionProvider(cast(List[Stack], self._stacks)).get(self._function_identifier)
if not function or not function.runtime:
raise InvalidRuntimeDefinitionForFunction(self._function_identifier)
return [function.runtime]
class AutoDependencyLayerParentSyncFlow(ZipFunctionSyncFlow):
"""
Parent sync flow for auto dependency layer
It builds function with regular ZipFunctionSyncFlow, and then adds _AutoDependencyLayerSyncFlow to start syncing
dependency layer.
"""
def gather_dependencies(self) -> List[SyncFlow]:
"""
Return auto dependency layer sync flow along with parent dependencies
"""
parent_dependencies = super().gather_dependencies()
function_build_definitions = cast(BuildGraph, self._build_graph).get_function_build_definitions()
if not function_build_definitions:
raise MissingFunctionBuildDefinition(self._function.name)
# don't queue up auto dependency layer, if dependencies are not changes
need_dependency_layer_sync = function_build_definitions[0].download_dependencies
if need_dependency_layer_sync:
parent_dependencies.append(
AutoDependencyLayerSyncFlow(
self._function_identifier,
cast(BuildGraph, self._build_graph),
self._build_context,
self._deploy_context,
self._physical_id_mapping,
cast(List[Stack], self._stacks),
)
)
return parent_dependencies
@staticmethod
def _combine_dependencies() -> bool:
return False
|
the-stack_106_22788 | """
This module implements sums and products containing the Kronecker Delta function.
References
==========
- http://mathworld.wolfram.com/KroneckerDelta.html
"""
from __future__ import print_function, division
from sympy.core import Add, Mul, S, Dummy
from sympy.core.cache import cacheit
from sympy.core.compatibility import default_sort_key
from sympy.functions import KroneckerDelta, Piecewise, piecewise_fold
from sympy.sets import Interval
@cacheit
def _expand_delta(expr, index):
"""
Expand the first Add containing a simple KroneckerDelta.
"""
if not expr.is_Mul:
return expr
delta = None
func = Add
terms = [S.One]
for h in expr.args:
if delta is None and h.is_Add and _has_simple_delta(h, index):
delta = True
func = h.func
terms = [terms[0]*t for t in h.args]
else:
terms = [t*h for t in terms]
return func(*terms)
@cacheit
def _extract_delta(expr, index):
"""
Extract a simple KroneckerDelta from the expression.
Returns the tuple ``(delta, newexpr)`` where:
- ``delta`` is a simple KroneckerDelta expression if one was found,
or ``None`` if no simple KroneckerDelta expression was found.
- ``newexpr`` is a Mul containing the remaining terms; ``expr`` is
returned unchanged if no simple KroneckerDelta expression was found.
Examples
========
>>> from sympy import KroneckerDelta
>>> from sympy.concrete.delta import _extract_delta
>>> from sympy.abc import x, y, i, j, k
>>> _extract_delta(4*x*y*KroneckerDelta(i, j), i)
(KroneckerDelta(i, j), 4*x*y)
>>> _extract_delta(4*x*y*KroneckerDelta(i, j), k)
(None, 4*x*y*KroneckerDelta(i, j))
See Also
========
sympy.functions.special.tensor_functions.KroneckerDelta
deltaproduct
deltasummation
"""
if not _has_simple_delta(expr, index):
return (None, expr)
if isinstance(expr, KroneckerDelta):
return (expr, S.One)
if not expr.is_Mul:
raise ValueError("Incorrect expr")
delta = None
terms = []
for arg in expr.args:
if delta is None and _is_simple_delta(arg, index):
delta = arg
else:
terms.append(arg)
return (delta, expr.func(*terms))
@cacheit
def _has_simple_delta(expr, index):
"""
Returns True if ``expr`` is an expression that contains a KroneckerDelta
that is simple in the index ``index``, meaning that this KroneckerDelta
is nonzero for a single value of the index ``index``.
"""
if expr.has(KroneckerDelta):
if _is_simple_delta(expr, index):
return True
if expr.is_Add or expr.is_Mul:
for arg in expr.args:
if _has_simple_delta(arg, index):
return True
return False
@cacheit
def _is_simple_delta(delta, index):
"""
Returns True if ``delta`` is a KroneckerDelta and is nonzero for a single
value of the index ``index``.
"""
if isinstance(delta, KroneckerDelta) and delta.has(index):
p = (delta.args[0] - delta.args[1]).as_poly(index)
if p:
return p.degree() == 1
return False
@cacheit
def _remove_multiple_delta(expr):
"""
Evaluate products of KroneckerDelta's.
"""
from sympy.solvers import solve
if expr.is_Add:
return expr.func(*list(map(_remove_multiple_delta, expr.args)))
if not expr.is_Mul:
return expr
eqs = []
newargs = []
for arg in expr.args:
if isinstance(arg, KroneckerDelta):
eqs.append(arg.args[0] - arg.args[1])
else:
newargs.append(arg)
if not eqs:
return expr
solns = solve(eqs, dict=True)
if len(solns) == 0:
return S.Zero
elif len(solns) == 1:
for key in solns[0].keys():
newargs.append(KroneckerDelta(key, solns[0][key]))
expr2 = expr.func(*newargs)
if expr != expr2:
return _remove_multiple_delta(expr2)
return expr
@cacheit
def _simplify_delta(expr):
"""
Rewrite a KroneckerDelta's indices in its simplest form.
"""
from sympy.solvers import solve
if isinstance(expr, KroneckerDelta):
try:
slns = solve(expr.args[0] - expr.args[1], dict=True)
if slns and len(slns) == 1:
return Mul(*[KroneckerDelta(*(key, value))
for key, value in slns[0].items()])
except NotImplementedError:
pass
return expr
@cacheit
def deltaproduct(f, limit):
"""
Handle products containing a KroneckerDelta.
See Also
========
deltasummation
sympy.functions.special.tensor_functions.KroneckerDelta
sympy.concrete.products.product
"""
from sympy.concrete.products import product
if ((limit[2] - limit[1]) < 0) == True:
return S.One
if not f.has(KroneckerDelta):
return product(f, limit)
if f.is_Add:
# Identify the term in the Add that has a simple KroneckerDelta
delta = None
terms = []
for arg in sorted(f.args, key=default_sort_key):
if delta is None and _has_simple_delta(arg, limit[0]):
delta = arg
else:
terms.append(arg)
newexpr = f.func(*terms)
k = Dummy("kprime", integer=True)
if isinstance(limit[1], int) and isinstance(limit[2], int):
result = deltaproduct(newexpr, limit) + sum([
deltaproduct(newexpr, (limit[0], limit[1], ik - 1)) *
delta.subs(limit[0], ik) *
deltaproduct(newexpr, (limit[0], ik + 1, limit[2])) for ik in range(int(limit[1]), int(limit[2] + 1))]
)
else:
result = deltaproduct(newexpr, limit) + deltasummation(
deltaproduct(newexpr, (limit[0], limit[1], k - 1)) *
delta.subs(limit[0], k) *
deltaproduct(newexpr, (limit[0], k + 1, limit[2])),
(k, limit[1], limit[2]),
no_piecewise=_has_simple_delta(newexpr, limit[0])
)
return _remove_multiple_delta(result)
delta, _ = _extract_delta(f, limit[0])
if not delta:
g = _expand_delta(f, limit[0])
if f != g:
from sympy import factor
try:
return factor(deltaproduct(g, limit))
except AssertionError:
return deltaproduct(g, limit)
return product(f, limit)
return _remove_multiple_delta(f.subs(limit[0], limit[1])*KroneckerDelta(limit[2], limit[1])) + \
S.One*_simplify_delta(KroneckerDelta(limit[2], limit[1] - 1))
@cacheit
def deltasummation(f, limit, no_piecewise=False):
"""
Handle summations containing a KroneckerDelta.
The idea for summation is the following:
- If we are dealing with a KroneckerDelta expression, i.e. KroneckerDelta(g(x), j),
we try to simplify it.
If we could simplify it, then we sum the resulting expression.
We already know we can sum a simplified expression, because only
simple KroneckerDelta expressions are involved.
If we couldn't simplify it, there are two cases:
1) The expression is a simple expression: we return the summation,
taking care if we are dealing with a Derivative or with a proper
KroneckerDelta.
2) The expression is not simple (i.e. KroneckerDelta(cos(x))): we can do
nothing at all.
- If the expr is a multiplication expr having a KroneckerDelta term:
First we expand it.
If the expansion did work, then we try to sum the expansion.
If not, we try to extract a simple KroneckerDelta term, then we have two
cases:
1) We have a simple KroneckerDelta term, so we return the summation.
2) We didn't have a simple term, but we do have an expression with
simplified KroneckerDelta terms, so we sum this expression.
Examples
========
>>> from sympy import oo, symbols
>>> from sympy.abc import k
>>> i, j = symbols('i, j', integer=True, finite=True)
>>> from sympy.concrete.delta import deltasummation
>>> from sympy import KroneckerDelta, Piecewise
>>> deltasummation(KroneckerDelta(i, k), (k, -oo, oo))
1
>>> deltasummation(KroneckerDelta(i, k), (k, 0, oo))
Piecewise((1, i >= 0), (0, True))
>>> deltasummation(KroneckerDelta(i, k), (k, 1, 3))
Piecewise((1, (i >= 1) & (i <= 3)), (0, True))
>>> deltasummation(k*KroneckerDelta(i, j)*KroneckerDelta(j, k), (k, -oo, oo))
j*KroneckerDelta(i, j)
>>> deltasummation(j*KroneckerDelta(i, j), (j, -oo, oo))
i
>>> deltasummation(i*KroneckerDelta(i, j), (i, -oo, oo))
j
See Also
========
deltaproduct
sympy.functions.special.tensor_functions.KroneckerDelta
sympy.concrete.sums.summation
"""
from sympy.concrete.summations import summation
from sympy.solvers import solve
if ((limit[2] - limit[1]) < 0) == True:
return S.Zero
if not f.has(KroneckerDelta):
return summation(f, limit)
x = limit[0]
g = _expand_delta(f, x)
if g.is_Add:
return piecewise_fold(
g.func(*[deltasummation(h, limit, no_piecewise) for h in g.args]))
# try to extract a simple KroneckerDelta term
delta, expr = _extract_delta(g, x)
if (delta is not None) and (delta.delta_range is not None):
dinf, dsup = delta.delta_range
if (limit[1] - dinf <= 0) == True and (limit[2] - dsup >= 0) == True:
no_piecewise = True
if not delta:
return summation(f, limit)
solns = solve(delta.args[0] - delta.args[1], x)
if len(solns) == 0:
return S.Zero
elif len(solns) != 1:
from sympy.concrete.summations import Sum
return Sum(f, limit)
value = solns[0]
if no_piecewise:
return expr.subs(x, value)
return Piecewise(
(expr.subs(x, value), Interval(*limit[1:3]).as_relational(value)),
(S.Zero, True)
)
|
the-stack_106_22789 | import datetime
import random
from faker.utils.checksums import calculate_luhn
from .. import Provider as SsnProvider
class Provider(SsnProvider):
@staticmethod
def _org_to_vat(org_id):
org_id = org_id.replace('-', '')
if len(org_id) == 10:
org_id = '16' + org_id
return 'SE{}01'.format(org_id)
def ssn(self, min_age=18, max_age=90, long=False, dash=True):
"""
Returns a 10 or 12 (long=True) digit Swedish SSN, "Personnummer".
It consists of 10 digits in the form (CC)YYMMDD-SSSQ, where
YYMMDD is the date of birth, SSS is a serial number
and Q is a control character (Luhn checksum).
Specifying dash=False will give a purely numeric string, suitable
for writing direct to databases.
http://en.wikipedia.org/wiki/Personal_identity_number_(Sweden)
"""
age = datetime.timedelta(
days=self.generator.random.randrange(min_age * 365, max_age * 365))
birthday = datetime.datetime.now() - age
yr_fmt = '%Y' if long else '%y'
pnr_date = birthday.strftime('{}%m%d'.format(yr_fmt))
chk_date = pnr_date[2:] if long else pnr_date
suffix = str(self.generator.random.randrange(0, 999)).zfill(3)
luhn_checksum = str(calculate_luhn(chk_date + suffix))
hyphen = '-' if dash else ''
pnr = '{}{}{}{}'.format(pnr_date, hyphen, suffix, luhn_checksum)
return pnr
ORG_ID_DIGIT_1 = (2, 5, 7, 8, 9)
def org_id(self, long=False, dash=True):
"""
Returns a 10 or 12 digit Organisation ID for a Swedish
company.
(In Swedish) https://sv.wikipedia.org/wiki/Organisationsnummer
"""
first_digits = list(self.ORG_ID_DIGIT_1)
random.shuffle(first_digits)
onr_one = str(first_digits.pop())
onr_one += str(self.generator.random.randrange(0, 9)).zfill(1)
onr_one += str(self.generator.random.randrange(20, 99))
onr_one += str(self.generator.random.randrange(0, 99)).zfill(2)
onr_two = str(self.generator.random.randrange(0, 999)).zfill(3)
luhn_checksum = str(calculate_luhn(onr_one + onr_two))
prefix = '16' if long else ''
hyphen = '-' if dash else ''
org_id = '{}{}{}{}{}'.format(prefix, onr_one, hyphen, onr_two, luhn_checksum)
return org_id
def vat_id(self):
"""
http://ec.europa.eu/taxation_customs/vies/faq.html#item_11
:return: A random Swedish VAT ID, based on a valid Org ID
"""
oid = self.org_id(long=True, dash=False)
vid = Provider._org_to_vat(oid)
return vid
def org_and_vat_id(self, long=False, dash=True):
"""Returns matching Org ID and VAT number"""
oid = self.org_id(long=long, dash=dash)
vid = Provider._org_to_vat(oid)
return oid, vid
|
the-stack_106_22790 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of restful-distributed-lock-manager released under the MIT license.
# See the LICENSE file for more information.
from rdlm.request_handler import RequestHandler, admin_authenticated
from rdlm.lock import LOCK_MANAGER_INSTANCE
from rdlm.hal import Resource
class ResourceHandler(RequestHandler):
"""Class which handles the /resources/[resource] URL"""
SUPPORTED_METHODS = ['GET', 'DELETE']
@admin_authenticated
def delete(self, name):
'''
@summary: deals with DELETE request (deleting the given resource)
@param name: name of the resource
'''
res = LOCK_MANAGER_INSTANCE.remove_resource(name)
if res:
self.send_status(204)
else:
self.send_error(404, message="no resource (with locks) found")
@admin_authenticated
def get(self, name):
'''
@summary: deals with GET request (getting a JSON HAL of the resource)
@param name: name of the resource
'''
tmp = LOCK_MANAGER_INSTANCE.get_resource_as_dict(name)
resource = Resource(self.reverse_url("resource", name), {"name": name})
if tmp:
for lock_dict in tmp['locks']:
lock = Resource(self.reverse_url("lock", name, lock_dict['uid']), lock_dict)
resource.add_embedded_resource("locks", lock)
self.set_header("Content-Type", "application/hal+json")
self.finish(resource.to_json())
|
the-stack_106_22791 | # __BEGIN_LICENSE__
# Copyright (C) 2008-2010 United States Government as represented by
# the Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# __END_LICENSE__
from django.conf.urls.defaults import url, patterns
urlpatterns = patterns(
'mapFastenApp.views',
url(r'^$', 'home',
{}, 'mapFasten_home'),
)
|
the-stack_106_22795 | import numpy as np
from scipy.spatial.distance import cdist, pdist, squareform
def apply_sequence_kernel(seq_emb, kernel_radius=1, eplison=0.01):
'''
Method applies fuzzy graph kernels to sequence vectors. This is used to naively propagate
local information of neighbors within a specified kernel radius. All graph kernels
sum to 1.
'''
# Generate sequence distance matrix
kernels = squareform(pdist(np.expand_dims(np.arange(len(seq_emb)),axis=-1)))
# Calculate graph kernels with cutoff of epsilon at the kernel_radius.
kernels = np.exp((kernels*np.log(eplison))/kernel_radius)
kernels[kernels<eplison] = 0
# Normalize kernels by dividing by row sums.
kernels = kernels / np.expand_dims(np.sum(kernels, axis=-1), axis=-1)
# Updated sequence embeddings using kernel
seq_emb_prime = np.dot(kernels, seq_emb)
return seq_emb_prime
def distance_matrix(s1, s2):
'''
Method calcualtes cosine distance matrix between two sequences. Returns
distances between values of -1.0 and 1.0.
'''
a = 1 - cdist(s1,s2,'cosine')
return a
def scoring_matrix(a, wi=1.0, wj=1.0, epsilon=0.01):
'''
Method generates scoring matrix used to align sequences. This algorithm is
inspired by the Smith-Waterman local sequence-alignment algorithm used
in bioinformatics. Source: https://en.wikipedia.org/wiki/Smith–Waterman_algorithm
The gap weights are adpatively assigned according to fuzzy graph kernels defined
by wi, wj and eplison. Gap weights vary from (0.0, 0.0) to (wi, wj) where
small gaps are closer to 0.0.
'''
# Pad distance matrix
sa = np.pad(a, ((1,0),(1,0)), 'constant', constant_values=0)
# Calculate gap weight kernels
dims = a.shape
wi_ = [wi*np.exp((i*np.log(epsilon))/dims[0]) for i in reversed(range(dims[0]+1))]
wj_ = [wj*np.exp((j*np.log(epsilon))/dims[1]) for j in reversed(range(dims[1]+1))]
# Updated scoring matrix according to policy
for i in range(1,dims[0]+1):
for j in range(1,dims[1]+1):
inputs = [(sa[i,j]+sa[i-1,j-1]), # Top Left + Bottom Right
np.max(sa[:i,j])-wi_[i-np.argmax(sa[:i,j])], # Max of all previous values in column - column gap weight
np.max(sa[i,:j])-wj_[j-np.argmax(sa[i,:j])], # Max of all previous values in row - row gap weight
0] # Zero
sa[i,j] = np.max(inputs)
return sa
def traceback(sa, k=100):
'''
Method preforms traceback path finding on scoring matrix to find first k alignments
of length greater than 1.
'''
# Sort scoring matrix values in descending order; Save coordinates in look up table.
sorted_args = np.argsort(sa.flatten())[::-1]
coords = [(i,j) for i in range(sa.shape[0]) for j in range(sa.shape[1])]
# Perform traceback until all coords have been visted
tracebacks = []
seen = []
route = []
for ind in sorted_args:
i, j = coords[ind]
flag = True
score = sa[i,j]
while(flag):
# Route connects to other traceback
if (i,j) in seen:
tracebacks.append([route,(i,j)])
route = []
break
route.append((i,j))
seen.append((i,j))
# Route terminates at zero
if sa[i,j] == 0:
tracebacks.append([route,[]])
route = []
break
# Select path direction
kernel = [sa[i-1,j],sa[i,j-1],sa[i-1,j-1],sa[i,j]]
m = np.argmax(kernel)
# Move to next gap
if m == 0:
# Terminate route if score is less than gap value
if score > sa[i-1,j]:
i -= 1
score += sa[i,j]
else:
tracebacks.append([route,[]])
route = []
break
elif m==1:
# Terminate route if score is less than gap value
if score > sa[i,j-1]:
j -= 1
score += sa[i,j]
else:
tracebacks.append([route,[]])
route = []
break
# Move to next hit
elif m==2:
i -= 1
j -= 1
score += sa[i,j]
elif m==3:
i -= 1
j -= 1
score += sa[i,j]
# Return alignments with length greater than 1 in order as discovered
if k == None: k = len(tracebacks)
alignments = []
for _ in tracebacks:
if len(_[0]) > 1:
r = [(i-1,j-1) for i,j in _[0]]
alignments.append(r[:-1])
if len(alignments) == k: break
return alignments
def score_alignment(alignment, s1, s2, k):
'''
This method is used to calculate a global score for aligmnets, to sort
alignments from multiple search queries of the same topic. This is still
a work in progress, but has shown good prelimanary results on the note example.
'''
# Find gaps and hits, and gather feature vectors
temp_i = []
temp_j = []
i = -1
j = -1
s1_ = []
s2_ = []
for _ in alignment:
if _[0] != i:
temp_i.append(1)
i = _[0]
else: temp_i.append(0.0)
if _[1] != j:
temp_j.append(1)
j = _[1]
else: temp_j.append(0.0)
s1_.append(s1[_[0]])
s2_.append(s2[_[1]])
# Calculate similarity score
mask = np.array(temp_i) * np.array(temp_j)
similarity = 2 - cdist(s1_,s2_,'cosine').diagonal()
score = (similarity*mask)/(2*len(alignment)) * (np.sum(mask)/len(s2)) * k * len(s2)
return score[0]
|
the-stack_106_22798 | from __future__ import annotations
import json
from functools import lru_cache
from pathlib import Path
from typing import Any, Iterable, List, Set, Tuple, Union, cast
from packaging.version import Version as PackageVersion
from pip._vendor.packaging.specifiers import SpecifierSet
from pdm.exceptions import InvalidPyVersion
from pdm.models.versions import Version
MAX_VERSIONS_FILE = Path(__file__).with_name("python_max_versions.json")
@lru_cache()
def get_specifier(version_str: Union[SpecifierSet, str]) -> SpecifierSet:
if isinstance(version_str, SpecifierSet):
return version_str
if not version_str or version_str == "*":
return SpecifierSet()
return SpecifierSet(version_str)
def _normalize_op_specifier(op: str, version_str: str) -> Tuple[str, Version]:
version = Version(version_str)
if version.is_wildcard:
if op == "==":
op = "~="
version[-1] = 0
elif op == ">": # >X.Y.* => >=X.Y+1.0
op = ">="
version = version.bump(-2)
elif op in ("<", ">=", "<="):
# <X.Y.* => <X.Y.0
# >=X.Y.* => >=X.Y.0
# <=X.Y.* => <X.Y.0
version[-1] = 0
if op == "<=":
op = "<"
elif op != "!=":
raise InvalidPyVersion(f"Unsupported version specifier: {op}{version}")
if op != "~=" and not (op == "!=" and version.is_wildcard):
# Don't complete with .0 for ~=3.5 and !=3.4.*:
version = version.complete()
return op, version
class PySpecSet(SpecifierSet):
"""A custom SpecifierSet that supports merging with logic operators (&, |)."""
PY_MAX_MINOR_VERSION = {
Version(key): value
for key, value in json.loads(MAX_VERSIONS_FILE.read_text()).items()
}
MAX_MAJOR_VERSION = max(PY_MAX_MINOR_VERSION)[:1].bump()
def __init__(self, version_str: str = "", analyze: bool = True) -> None:
if version_str == "*":
version_str = ""
super().__init__(version_str)
self._lower_bound = Version.MIN
self._upper_bound = Version.MAX
self._excludes: List[Version] = []
if version_str and analyze:
self._analyze_specifiers()
def _analyze_specifiers(self) -> None:
# XXX: Prerelease or postrelease specifiers will fail here, but I guess we can
# just ignore them for now.
lower_bound, upper_bound = Version.MIN, Version.MAX
excludes: Set[Version] = set()
for spec in self:
op, version = _normalize_op_specifier(spec.operator, spec.version)
if op in ("==", "==="):
lower_bound = version
upper_bound = version.bump()
break
if op == "!=":
excludes.add(version)
elif op[0] == ">":
lower_bound = max(
lower_bound, version if op == ">=" else version.bump()
)
elif op[0] == "<":
upper_bound = min(
upper_bound, version.bump() if op == "<=" else version
)
elif op == "~=":
new_lower = version.complete()
new_upper = version.bump(-2)
if new_upper < upper_bound:
upper_bound = new_upper
if new_lower > lower_bound:
lower_bound = new_lower
else:
raise InvalidPyVersion(f"Unsupported version specifier: {op}{version}")
self._rearrange(lower_bound, upper_bound, excludes)
@classmethod
def equal_to(cls, version: PackageVersion) -> "PySpecSet":
"""Create a specifierset that is equal to the given version."""
if not version.is_prerelease:
return cls(f"=={version}")
spec = cls(f"=={version}", analyze=False)
spec._upper_bound = Version((version.major, version.minor, 0))
lower_bound = Version((version.major, version.minor - 1))
spec._lower_bound = lower_bound.complete(
cls.PY_MAX_MINOR_VERSION[lower_bound] + 1
)
return spec
@classmethod
def _merge_bounds_and_excludes(
cls,
lower: Version,
upper: Version,
excludes: Iterable[Version],
) -> Tuple[Version, Version, List[Version]]:
sorted_excludes = sorted(excludes)
wildcard_excludes = {
version[:-1] for version in sorted_excludes if version.is_wildcard
}
# Remove versions that are already excluded by another wildcard exclude.
sorted_excludes = [
version
for version in sorted_excludes
if version.is_wildcard
or not any(version.startswith(wv) for wv in wildcard_excludes)
]
if lower == Version.MIN and upper == Version.MAX:
# Nothing we can do here, it is a non-constraint.
return lower, upper, sorted_excludes
for version in list(sorted_excludes): # from to low to high
if version >= upper:
sorted_excludes[:] = []
break
if version.is_wildcard:
valid_length = len(version._version) - 1
valid_version = version[:valid_length]
if valid_version < lower[:valid_length]:
# Useless excludes
sorted_excludes.remove(version)
elif lower.startswith(valid_version):
# The lower bound is excluded, e.g: >=3.7.3,!=3.7.*
# bump the lower version in the last common bit: >=3.8.0
lower = version.bump(-2)
sorted_excludes.remove(version)
else:
break
else:
if version < lower:
sorted_excludes.remove(version)
elif version == lower:
lower = version.bump()
sorted_excludes.remove(version)
else:
break
for version in reversed(sorted_excludes): # from high to low
if version >= upper:
sorted_excludes.remove(version)
continue
if not version.is_wildcard:
break
valid_length = len(version._version) - 1
valid_version = version[:valid_length]
if upper.startswith(valid_version) or version.bump(-2) == upper:
# Case 1: The upper bound is excluded, e.g: <3.7.3,!=3.7.*
# set the upper to the zero version: <3.7.0
# Case 2: The upper bound is adjacent to the excluded one,
# e.g: <3.7.0,!=3.6.*
# Move the upper bound to below the excluded: <3.6.0
upper = valid_version.complete()
sorted_excludes.remove(version)
else:
break
return lower, upper, sorted_excludes
def _rearrange(
self, lower_bound: Version, upper_bound: Version, excludes: Iterable[Version]
) -> None:
"""Rearrange the version bounds with the given inputs."""
(
self._lower_bound,
self._upper_bound,
self._excludes,
) = self._merge_bounds_and_excludes(lower_bound, upper_bound, excludes)
if not self.is_impossible:
super().__init__(str(self))
def _comp_key(self) -> Tuple[Version, Version, Tuple[Version, ...]]:
return (self._lower_bound, self._upper_bound, tuple(self._excludes))
def __hash__(self) -> int:
return hash(self._comp_key())
def __eq__(self, other: Any) -> bool:
if not isinstance(other, PySpecSet):
return False
return self._comp_key() == other._comp_key()
@property
def is_impossible(self) -> bool:
"""Check whether the specifierset contains any valid versions."""
if self._lower_bound == Version.MIN or self._upper_bound == Version.MAX:
return False
return self._lower_bound >= self._upper_bound
@property
def is_allow_all(self) -> bool:
"""Return True if the specifierset accepts all versions."""
if self.is_impossible:
return False
return (
self._lower_bound == Version.MIN
and self._upper_bound == Version.MAX
and not self._excludes
)
def __bool__(self) -> bool:
return not self.is_allow_all
def __str__(self) -> str:
if self.is_impossible:
return "impossible"
if self.is_allow_all:
return ""
lower = self._lower_bound
upper = self._upper_bound
if lower[-1] == 0:
lower = lower[:-1]
if upper[-1] == 0:
upper = upper[:-1]
lower_str = "" if lower == Version.MIN else f">={lower}"
upper_str = "" if upper == Version.MAX else f"<{upper}"
excludes_str = ",".join(f"!={version}" for version in self._excludes)
return ",".join(filter(None, [lower_str, upper_str, excludes_str]))
def __repr__(self) -> str:
return f"<PySpecSet {self}>"
def copy(self) -> "PySpecSet":
"""Create a new specifierset that is same as the original one."""
if self.is_impossible:
return ImpossiblePySpecSet()
instance = self.__class__(str(self), False)
instance._lower_bound = self._lower_bound
instance._upper_bound = self._upper_bound
instance._excludes = self._excludes[:]
return instance
@lru_cache()
def __and__(self, other: "PySpecSet") -> "PySpecSet":
if any(s.is_impossible for s in (self, other)):
return ImpossiblePySpecSet()
if self.is_allow_all:
return other.copy()
elif other.is_allow_all:
return self.copy()
rv = self.copy()
excludes = set(rv._excludes) | set(other._excludes)
lower = max(rv._lower_bound, other._lower_bound)
upper = min(rv._upper_bound, other._upper_bound)
rv._rearrange(lower, upper, excludes)
return rv
@lru_cache()
def __or__(self, other: "PySpecSet") -> "PySpecSet":
if self.is_impossible:
return other.copy()
elif other.is_impossible:
return self.copy()
if self.is_allow_all:
return self.copy()
elif other.is_allow_all:
return other.copy()
rv = self.copy()
left, right = sorted([rv, other], key=lambda x: x._lower_bound)
excludes = set(left._excludes) & set(right._excludes)
lower = left._lower_bound
upper = max(left._upper_bound, right._upper_bound)
if right._lower_bound > left._upper_bound: # two ranges has no overlap
excludes.update(
self._populate_version_range(left._upper_bound, right._lower_bound)
)
rv._rearrange(lower, upper, excludes)
return rv
def _populate_version_range(
self, lower: Version, upper: Version
) -> Iterable[Version]:
"""Expand the version range to a collection of versions to exclude,
taking the released python versions into consideration.
"""
assert lower < upper
prev = lower
while prev < upper:
if prev[-2:] == Version((0, 0)): # X.0.0
cur = prev.bump(0) # X+1.0.0
if cur <= upper: # It is still within the range
yield Version((prev[0], "*")) # Exclude the whole major series: X.*
prev = cur
continue
if prev[-1] == 0: # X.Y.0
cur = prev.bump(1) # X.Y+1.0
if cur <= upper: # It is still within the range
yield prev[:2].complete("*") # Exclude X.Y.*
prev = (
prev.bump(0)
if cur.is_py2
and cast(int, cur[1]) > self.PY_MAX_MINOR_VERSION[cur[:1]]
else cur
) # If prev is 2.7, next is 3.0, otherwise next is X.Y+1.0
continue
while prev < upper:
# Iterate each version from X.Y.0(prev) to X.Y.Z(upper)
yield prev
prev = prev.bump()
break
# Can't produce any wildcard versions
cur = prev.bump(1)
if cur <= upper: # X.Y+1.0 is still within the range
current_max = self.PY_MAX_MINOR_VERSION[prev[:2]]
for z in range(cast(int, prev[2]), current_max + 1):
yield prev[:2].complete(z)
prev = (
prev.bump(0)
if cur.is_py2
and cast(int, cur[1]) > self.PY_MAX_MINOR_VERSION[cur[:1]]
else cur
)
else: # Produce each version from X.Y.Z to X.Y.W
while prev < upper:
yield prev
prev = prev.bump()
break
@lru_cache()
def is_superset(self, other: Union[str, SpecifierSet]) -> bool:
if self.is_impossible:
return False
if self.is_allow_all:
return True
other = type(self)(str(other))
if other._upper_bound >= self.MAX_MAJOR_VERSION:
# XXX: narrow down the upper bound to ``MAX_MAJOR_VERSION``
# So that `>=3.6,<4.0` is considered a superset of `>=3.7`, see issues/66
other._upper_bound = self.MAX_MAJOR_VERSION
lower, upper, excludes = self._merge_bounds_and_excludes(
other._lower_bound, other._upper_bound, self._excludes
)
if (
self._lower_bound > other._lower_bound
or self._upper_bound < other._upper_bound
):
return False
return (
lower <= other._lower_bound
and upper >= other._upper_bound
and set(excludes) <= set(other._excludes)
)
@lru_cache()
def is_subset(self, other: Union[str, SpecifierSet]) -> bool:
if self.is_impossible:
return False
other = type(self)(str(other))
if other._upper_bound >= self.MAX_MAJOR_VERSION:
# Relax the upper bound to max version
other._upper_bound = Version.MAX
if other.is_allow_all:
return True
lower, upper, excludes = self._merge_bounds_and_excludes(
self._lower_bound, self._upper_bound, other._excludes
)
if (
self._lower_bound < other._lower_bound
or self._upper_bound > other._upper_bound
):
return False
return (
lower <= self._lower_bound
and upper >= self._upper_bound
and set(self._excludes) >= set(excludes)
)
def as_marker_string(self) -> str:
if self.is_allow_all:
return ""
result = []
excludes = []
full_excludes = []
for spec in sorted(self, key=lambda spec: spec.version):
op, version = spec.operator, spec.version
if len(version.split(".")) < 3:
key = "python_version"
else:
key = "python_full_version"
if version[-2:] == ".*":
version = version[:-2]
key = "python_version"
if op == "!=":
if key == "python_version":
excludes.append(version)
else:
full_excludes.append(version)
else:
result.append(f"{key}{op}{version!r}")
if excludes:
result.append(
"python_version not in {!r}".format(", ".join(sorted(excludes)))
)
if full_excludes:
result.append(
"python_full_version not in {!r}".format(
", ".join(sorted(full_excludes))
)
)
return " and ".join(result)
def supports_py2(self) -> bool:
return self._lower_bound.is_py2
class ImpossiblePySpecSet(PySpecSet):
"""
A special subclass of PySpecSet that references to an impossible specifier set.
"""
def __init__(self, version_str: str = "", analyze: bool = True) -> None:
super().__init__(version_str=version_str, analyze=False)
# Make sure the spec set is impossible
self._lower_bound = Version.MAX
self._upper_bound = Version.MIN
@property
def is_impossible(self) -> bool:
return True
|
the-stack_106_22800 | import time
import torch
from reinvent_scoring import ScoringFuncionParameters
from reinvent_models.reinvent_core.models.model import Model
from reinvent_scoring.scoring.diversity_filters.reinvent_core.base_diversity_filter import BaseDiversityFilter
from running_modes.automated_curriculum_learning.logging.base_auto_cl_logger import BaseAutoCLLogger
from running_modes.automated_curriculum_learning.production_strategy.base_production_strategy import \
BaseProductionStrategy
from running_modes.configurations.automated_curriculum_learning.production_strategy_configuration import \
ProductionStrategyConfiguration
from running_modes.enums.special_parameters_enum import SpecialParametersEnum
from running_modes.automated_curriculum_learning.scoring_table import ScoringTable
class ProductionStrategyOverSpecificComponent(BaseProductionStrategy):
def __init__(self, prior: Model, scoring_function_params: ScoringFuncionParameters,
configuration: ProductionStrategyConfiguration, diversity_filter: BaseDiversityFilter,
logger: BaseAutoCLLogger, scoring_table: ScoringTable):
super().__init__(prior=prior, configuration=configuration,
scoring_function_params=scoring_function_params, diversity_filter=diversity_filter,
logger=logger, scoring_table=scoring_table)
self._special_parameters_enum = SpecialParametersEnum()
def run(self, cl_agent: Model, steps_so_far: int):
self._diversity_filter.flush_memory()
selected_components = self._parameters.special_parameters.get(self._special_parameters_enum.SELECTED_COMPONENTS)
sf_parameters = self._scoring_table.get_sf_components_by_name(names=selected_components)
scoring_function = self.setup_scoring_function(name=self.sf_name, parameter_list=sf_parameters)
start_time = time.time()
self._disable_prior_gradients()
optimizer = torch.optim.Adam(cl_agent.network.parameters(), lr=self._parameters.lr)
for step in range(steps_so_far, steps_so_far + self._parameters.n_steps):
self._take_step(agent=cl_agent, optimizer=optimizer, scoring_function_current=scoring_function,
step=step, start_time=start_time)
self._logger.log_text_to_file(f"Production finished at step {step}")
self._logger.save_final_state(cl_agent, self._diversity_filter)
self._logger.log_out_input_configuration()
|
the-stack_106_22802 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import getpass
import logging
import multiprocessing
import os
import psutil
import signal
import six
import sys
import threading
import time
import datetime
from collections import defaultdict
from past.builtins import basestring
from sqlalchemy import (
Column, Integer, String, func, Index, or_, and_, not_)
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm.session import make_transient
from sqlalchemy_utc import UtcDateTime
from tabulate import tabulate
from time import sleep
from airflow import configuration as conf
from airflow import executors, models, settings
from airflow.exceptions import AirflowException
from airflow.models import DAG, DagRun
from airflow.settings import Stats
from airflow.task.task_runner import get_task_runner
from airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, RUN_DEPS
from airflow.utils import asciiart, helpers, timezone
from airflow.utils.dag_processing import (AbstractDagFileProcessor,
DagFileProcessorManager,
SimpleDag,
SimpleDagBag,
list_py_file_paths)
from airflow.utils.db import create_session, provide_session
from airflow.utils.email import send_email
from airflow.utils.log.logging_mixin import LoggingMixin, set_context, StreamLogWriter
from airflow.utils.state import State
from airflow.utils.configuration import tmp_configuration_copy
from airflow.utils.net import get_hostname
Base = models.Base
ID_LEN = models.ID_LEN
class BaseJob(Base, LoggingMixin):
"""
Abstract class to be derived for jobs. Jobs are processing items with state
and duration that aren't task instances. For instance a BackfillJob is
a collection of task instance runs, but should have its own state, start
and end time.
"""
__tablename__ = "job"
id = Column(Integer, primary_key=True)
dag_id = Column(String(ID_LEN),)
state = Column(String(20))
job_type = Column(String(30))
start_date = Column(UtcDateTime())
end_date = Column(UtcDateTime())
latest_heartbeat = Column(UtcDateTime())
executor_class = Column(String(500))
hostname = Column(String(500))
unixname = Column(String(1000))
__mapper_args__ = {
'polymorphic_on': job_type,
'polymorphic_identity': 'BaseJob'
}
__table_args__ = (
Index('job_type_heart', job_type, latest_heartbeat),
Index('idx_job_state_heartbeat', state, latest_heartbeat),
)
def __init__(
self,
executor=executors.GetDefaultExecutor(),
heartrate=conf.getfloat('scheduler', 'JOB_HEARTBEAT_SEC'),
*args, **kwargs):
self.hostname = get_hostname()
self.executor = executor
self.executor_class = executor.__class__.__name__
self.start_date = timezone.utcnow()
self.latest_heartbeat = timezone.utcnow()
self.heartrate = heartrate
self.unixname = getpass.getuser()
self.max_tis_per_query = conf.getint('scheduler', 'max_tis_per_query')
super(BaseJob, self).__init__(*args, **kwargs)
def is_alive(self):
return (
(timezone.utcnow() - self.latest_heartbeat).seconds <
(conf.getint('scheduler', 'JOB_HEARTBEAT_SEC') * 2.1)
)
@provide_session
def kill(self, session=None):
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.end_date = timezone.utcnow()
try:
self.on_kill()
except Exception as e:
self.log.error('on_kill() method failed: {}'.format(e))
session.merge(job)
session.commit()
raise AirflowException("Job shut down externally.")
def on_kill(self):
"""
Will be called when an external kill command is received
"""
pass
def heartbeat_callback(self, session=None):
pass
def heartbeat(self):
"""
Heartbeats update the job's entry in the database with a timestamp
for the latest_heartbeat and allows for the job to be killed
externally. This allows at the system level to monitor what is
actually active.
For instance, an old heartbeat for SchedulerJob would mean something
is wrong.
This also allows for any job to be killed externally, regardless
of who is running it or on which machine it is running.
Note that if your heartbeat is set to 60 seconds and you call this
method after 10 seconds of processing since the last heartbeat, it
will sleep 50 seconds to complete the 60 seconds and keep a steady
heart rate. If you go over 60 seconds before calling it, it won't
sleep at all.
"""
with create_session() as session:
job = session.query(BaseJob).filter_by(id=self.id).one()
make_transient(job)
session.commit()
if job.state == State.SHUTDOWN:
self.kill()
# Figure out how long to sleep for
sleep_for = 0
if job.latest_heartbeat:
sleep_for = max(
0,
self.heartrate - (
timezone.utcnow() - job.latest_heartbeat).total_seconds())
sleep(sleep_for)
# Update last heartbeat time
with create_session() as session:
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.latest_heartbeat = timezone.utcnow()
session.merge(job)
session.commit()
self.heartbeat_callback(session=session)
self.log.debug('[heartbeat]')
def run(self):
Stats.incr(self.__class__.__name__.lower() + '_start', 1, 1)
# Adding an entry in the DB
with create_session() as session:
self.state = State.RUNNING
session.add(self)
session.commit()
id_ = self.id
make_transient(self)
self.id = id_
try:
self._execute()
# In case of max runs or max duration
self.state = State.SUCCESS
except SystemExit as e:
# In case of ^C or SIGTERM
self.state = State.SUCCESS
except Exception as e:
self.state = State.FAILED
raise
finally:
self.end_date = timezone.utcnow()
session.merge(self)
session.commit()
Stats.incr(self.__class__.__name__.lower() + '_end', 1, 1)
def _execute(self):
raise NotImplementedError("This method needs to be overridden")
@provide_session
def reset_state_for_orphaned_tasks(self, filter_by_dag_run=None, session=None):
"""
This function checks if there are any tasks in the dagrun (or all)
that have a scheduled state but are not known by the
executor. If it finds those it will reset the state to None
so they will get picked up again.
The batch option is for performance reasons as the queries are made in
sequence.
:param filter_by_dag_run: the dag_run we want to process, None if all
:type filter_by_dag_run: models.DagRun
:return: the TIs reset (in expired SQLAlchemy state)
:rtype: List(TaskInstance)
"""
queued_tis = self.executor.queued_tasks
# also consider running as the state might not have changed in the db yet
running_tis = self.executor.running
resettable_states = [State.SCHEDULED, State.QUEUED]
TI = models.TaskInstance
DR = models.DagRun
if filter_by_dag_run is None:
resettable_tis = (
session
.query(TI)
.join(
DR,
and_(
TI.dag_id == DR.dag_id,
TI.execution_date == DR.execution_date))
.filter(
DR.state == State.RUNNING,
DR.run_id.notlike(BackfillJob.ID_PREFIX + '%'),
TI.state.in_(resettable_states))).all()
else:
resettable_tis = filter_by_dag_run.get_task_instances(state=resettable_states,
session=session)
tis_to_reset = []
# Can't use an update here since it doesn't support joins
for ti in resettable_tis:
if ti.key not in queued_tis and ti.key not in running_tis:
tis_to_reset.append(ti)
if len(tis_to_reset) == 0:
return []
def query(result, items):
filter_for_tis = ([and_(TI.dag_id == ti.dag_id,
TI.task_id == ti.task_id,
TI.execution_date == ti.execution_date)
for ti in items])
reset_tis = (
session
.query(TI)
.filter(or_(*filter_for_tis), TI.state.in_(resettable_states))
.with_for_update()
.all())
for ti in reset_tis:
ti.state = State.NONE
session.merge(ti)
return result + reset_tis
reset_tis = helpers.reduce_in_chunks(query,
tis_to_reset,
[],
self.max_tis_per_query)
task_instance_str = '\n\t'.join(
["{}".format(x) for x in reset_tis])
session.commit()
self.log.info(
"Reset the following %s TaskInstances:\n\t%s",
len(reset_tis), task_instance_str
)
return reset_tis
class DagFileProcessor(AbstractDagFileProcessor, LoggingMixin):
"""Helps call SchedulerJob.process_file() in a separate process."""
# Counter that increments everytime an instance of this class is created
class_creation_counter = 0
def __init__(self, file_path, pickle_dags, dag_id_white_list):
"""
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: unicode
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_id_whitelist: If specified, only look at these DAG ID's
:type dag_id_whitelist: list[unicode]
"""
self._file_path = file_path
# Queue that's used to pass results from the child process.
self._result_queue = multiprocessing.Queue()
# The process that was launched to process the given .
self._process = None
self._dag_id_white_list = dag_id_white_list
self._pickle_dags = pickle_dags
# The result of Scheduler.process_file(file_path).
self._result = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessor.class_creation_counter
DagFileProcessor.class_creation_counter += 1
@property
def file_path(self):
return self._file_path
@staticmethod
def _launch_process(result_queue,
file_path,
pickle_dags,
dag_id_white_list,
thread_name):
"""
Launch a process to process the given file.
:param result_queue: the queue to use for passing back the result
:type result_queue: multiprocessing.Queue
:param file_path: the file to process
:type file_path: unicode
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_id_white_list: if specified, only examine DAG ID's that are
in this list
:type dag_id_white_list: list[unicode]
:param thread_name: the name to use for the process that is launched
:type thread_name: unicode
:return: the process that was launched
:rtype: multiprocessing.Process
"""
def helper():
# This helper runs in the newly created process
log = logging.getLogger("airflow.processor")
stdout = StreamLogWriter(log, logging.INFO)
stderr = StreamLogWriter(log, logging.WARN)
set_context(log, file_path)
try:
# redirect stdout/stderr to log
sys.stdout = stdout
sys.stderr = stderr
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
start_time = time.time()
log.info("Started process (PID=%s) to work on %s",
os.getpid(), file_path)
scheduler_job = SchedulerJob(dag_ids=dag_id_white_list, log=log)
result = scheduler_job.process_file(file_path,
pickle_dags)
result_queue.put(result)
end_time = time.time()
log.info(
"Processing %s took %.3f seconds", file_path, end_time - start_time
)
except Exception:
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
p = multiprocessing.Process(target=helper,
args=(),
name="{}-Process".format(thread_name))
p.start()
return p
def start(self):
"""
Launch the process and start processing the DAG.
"""
self._process = DagFileProcessor._launch_process(
self._result_queue,
self.file_path,
self._pickle_dags,
self._dag_id_white_list,
"DagFileProcessor{}".format(self._instance_id))
self._start_time = timezone.utcnow()
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None:
raise AirflowException("Tried to call stop before starting!")
# The queue will likely get corrupted, so remove the reference
self._result_queue = None
self._process.terminate()
# Arbitrarily wait 5s for the process to die
self._process.join(5)
if sigkill and self._process.is_alive():
self.log.warning("Killing PID %s", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
@property
def pid(self):
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self):
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if not self._result_queue.empty():
self._result = self._result_queue.get_nowait()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
return True
# Potential error case when process dies
if not self._process.is_alive():
self._done = True
# Get the object from the queue or else join() can hang.
if not self._result_queue.empty():
self._result = self._result_queue.get_nowait()
self.log.debug("Waiting for %s", self._process)
self._process.join()
return True
return False
@property
def result(self):
"""
:return: result of running SchedulerJob.process_file()
:rtype: SimpleDag
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self):
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
class SchedulerJob(BaseJob):
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
def __init__(
self,
dag_id=None,
dag_ids=None,
subdir=settings.DAGS_FOLDER,
num_runs=-1,
file_process_interval=conf.getint('scheduler',
'min_file_process_interval'),
min_file_parsing_loop_time=conf.getint('scheduler',
'min_file_parsing_loop_time'),
run_duration=None,
do_pickle=False,
log=None,
*args, **kwargs):
"""
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: unicode
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[unicode]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: unicode
:param num_runs: The number of times to try to schedule each DAG file.
-1 for unlimited within the run_duration.
:param run_duration: how long to run (in seconds) before exiting
:type run_duration: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
# for BaseJob compatibility
self.dag_id = dag_id
self.dag_ids = [dag_id] if dag_id else []
if dag_ids:
self.dag_ids.extend(dag_ids)
self.subdir = subdir
self.num_runs = num_runs
self.run_duration = run_duration
self.do_pickle = do_pickle
super(SchedulerJob, self).__init__(*args, **kwargs)
self.heartrate = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
self.max_threads = conf.getint('scheduler', 'max_threads')
if log:
self._log = log
self.using_sqlite = False
if 'sqlite' in conf.get('core', 'sql_alchemy_conn'):
if self.max_threads > 1:
self.log.error("Cannot use more than 1 thread when using sqlite. Setting max_threads to 1")
self.max_threads = 1
self.using_sqlite = True
# How often to scan the DAGs directory for new files. Default to 5 minutes.
self.dag_dir_list_interval = conf.getint('scheduler',
'dag_dir_list_interval')
# How often to print out DAG file processing stats to the log. Default to
# 30 seconds.
self.print_stats_interval = conf.getint('scheduler',
'print_stats_interval')
# Parse and schedule each file no faster than this interval. Default
# to 3 minutes.
self.file_process_interval = file_process_interval
# Wait until at least this many seconds have passed before parsing files once all
# files have finished parsing.
self.min_file_parsing_loop_time = min_file_parsing_loop_time
if run_duration is None:
self.run_duration = conf.getint('scheduler',
'run_duration')
@provide_session
def manage_slas(self, dag, session=None):
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
Where assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
if not any([ti.sla for ti in dag.tasks]):
self.log.info(
"Skipping SLA check for %s because no tasks in DAG have SLAs",
dag
)
return
TI = models.TaskInstance
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti'))
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(or_(
TI.state == State.SUCCESS,
TI.state == State.SKIPPED))
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
).all()
ts = timezone.utcnow()
SlaMiss = models.SlaMiss
for ti in max_tis:
task = dag.get_task(ti.task_id)
dttm = ti.execution_date
if task.sla:
dttm = dag.following_schedule(dttm)
while dttm < timezone.utcnow():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < timezone.utcnow():
session.merge(models.SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm = dag.following_schedule(dttm)
session.commit()
slas = (
session
.query(SlaMiss)
.filter(SlaMiss.notification_sent == False)
.filter(SlaMiss.dag_id == dag.dag_id)
.all()
)
if slas:
sla_dates = [sla.execution_date for sla in slas]
qry = (
session
.query(TI)
.filter(TI.state != State.SUCCESS)
.filter(TI.execution_date.in_(sla_dates))
.filter(TI.dag_id == dag.dag_id)
.all()
)
blocking_tis = []
for ti in qry:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info(' --------------> ABOUT TO CALL SLA MISS CALL BACK ')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas,
blocking_tis)
notification_sent = True
except Exception:
self.log.exception("Could not call sla_miss_callback for DAG %s",
dag.dag_id)
email_content = """\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}\n{bug}<code></pre>
""".format(bug=asciiart.bug, **locals())
emails = []
for t in dag.tasks:
if t.email:
if isinstance(t.email, basestring):
l = [t.email]
elif isinstance(t.email, (list, tuple)):
l = t.email
for email in l:
if email not in emails:
emails.append(email)
if emails and len(slas):
try:
send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
email_sent = True
notification_sent = True
except Exception:
self.log.exception("Could not send SLA Miss email notification for"
" DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
if email_sent:
sla.email_sent = True
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
@provide_session
def clear_nonexistent_import_errors(session, known_file_paths):
"""
Clears import errors for files that no longer exist.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param known_file_paths: The list of existing files that are parsed for DAGs
:type known_file_paths: list[unicode]
"""
query = session.query(models.ImportError)
if known_file_paths:
query = query.filter(
~models.ImportError.filename.in_(known_file_paths)
)
query.delete(synchronize_session='fetch')
session.commit()
@staticmethod
def update_import_errors(session, dagbag):
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: models.Dagbag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(models.ImportError).filter(
models.ImportError.filename == dagbag_file
).delete()
# Add the errors of the processed files
for filename, stacktrace in six.iteritems(dagbag.import_errors):
session.add(models.ImportError(
filename=filename,
stacktrace=stacktrace))
session.commit()
@provide_session
def create_dag_run(self, dag, session=None):
"""
This method checks whether a new DagRun needs to be created
for a DAG based on scheduling interval
Returns DagRun if one is scheduled. Otherwise returns None.
"""
if dag.schedule_interval:
active_runs = DagRun.find(
dag_id=dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
# return if already reached maximum active runs and no timeout setting
if len(active_runs) >= dag.max_active_runs and not dag.dagrun_timeout:
return
timedout_runs = 0
for dr in active_runs:
if (
dr.start_date and dag.dagrun_timeout and
dr.start_date < timezone.utcnow() - dag.dagrun_timeout):
dr.state = State.FAILED
dr.end_date = timezone.utcnow()
dag.handle_callback(dr, success=False, reason='dagrun_timeout',
session=session)
timedout_runs += 1
session.commit()
if len(active_runs) - timedout_runs >= dag.max_active_runs:
return
# this query should be replaced by find dagrun
qry = (
session.query(func.max(DagRun.execution_date))
.filter_by(dag_id=dag.dag_id)
.filter(or_(
DagRun.external_trigger == False,
# add % as a wildcard for the like query
DagRun.run_id.like(DagRun.ID_PREFIX + '%')
))
)
last_scheduled_run = qry.scalar()
# don't schedule @once again
if dag.schedule_interval == '@once' and last_scheduled_run:
return None
# don't do scheduler catchup for dag's that don't have dag.catchup = True
if not (dag.catchup or dag.schedule_interval == '@once'):
# The logic is that we move start_date up until
# one period before, so that timezone.utcnow() is AFTER
# the period end, and the job can be created...
now = timezone.utcnow()
next_start = dag.following_schedule(now)
last_start = dag.previous_schedule(now)
if next_start <= now:
new_start = last_start
else:
new_start = dag.previous_schedule(last_start)
if dag.start_date:
if new_start >= dag.start_date:
dag.start_date = new_start
else:
dag.start_date = new_start
next_run_date = None
if not last_scheduled_run:
# First run
task_start_dates = [t.start_date for t in dag.tasks]
if task_start_dates:
next_run_date = dag.normalize_schedule(min(task_start_dates))
self.log.debug(
"Next run date based on tasks %s",
next_run_date
)
else:
next_run_date = dag.following_schedule(last_scheduled_run)
# make sure backfills are also considered
last_run = dag.get_last_dagrun(session=session)
if last_run and next_run_date:
while next_run_date <= last_run.execution_date:
next_run_date = dag.following_schedule(next_run_date)
# don't ever schedule prior to the dag's start_date
if dag.start_date:
next_run_date = (dag.start_date if not next_run_date
else max(next_run_date, dag.start_date))
if next_run_date == dag.start_date:
next_run_date = dag.normalize_schedule(dag.start_date)
self.log.debug(
"Dag start date: %s. Next run date: %s",
dag.start_date, next_run_date
)
# don't ever schedule in the future
if next_run_date > timezone.utcnow():
return
# this structure is necessary to avoid a TypeError from concatenating
# NoneType
if dag.schedule_interval == '@once':
period_end = next_run_date
elif next_run_date:
period_end = dag.following_schedule(next_run_date)
# Don't schedule a dag beyond its end_date (as specified by the dag param)
if next_run_date and dag.end_date and next_run_date > dag.end_date:
return
# Don't schedule a dag beyond its end_date (as specified by the task params)
# Get the min task end date, which may come from the dag.default_args
min_task_end_date = []
task_end_dates = [t.end_date for t in dag.tasks if t.end_date]
if task_end_dates:
min_task_end_date = min(task_end_dates)
if next_run_date and min_task_end_date and next_run_date > min_task_end_date:
return
if next_run_date and period_end and period_end <= timezone.utcnow():
next_run = dag.create_dagrun(
run_id=DagRun.ID_PREFIX + next_run_date.isoformat(),
execution_date=next_run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False
)
return next_run
@provide_session
def _process_task_instances(self, dag, queue, session=None):
"""
This method schedules the tasks for a single DAG by looking at the
active DAG runs and adding task instances that should run to the
queue.
"""
# update the state of the previously active dag runs
dag_runs = DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session)
active_dag_runs = []
for run in dag_runs:
self.log.info("Examining DAG run %s", run)
# don't consider runs that are executed in the future
if run.execution_date > timezone.utcnow():
self.log.error(
"Execution date is in future: %s",
run.execution_date
)
continue
if len(active_dag_runs) >= dag.max_active_runs:
self.log.info("Active dag runs > max_active_run.")
continue
# skip backfill dagruns for now as long as they are not really scheduled
if run.is_backfill:
continue
# todo: run.dag is transient but needs to be set
run.dag = dag
# todo: preferably the integrity check happens at dag collection time
run.verify_integrity(session=session)
run.update_state(session=session)
if run.state == State.RUNNING:
make_transient(run)
active_dag_runs.append(run)
for run in active_dag_runs:
self.log.debug("Examining active DAG run: %s", run)
# this needs a fresh session sometimes tis get detached
tis = run.get_task_instances(state=(State.NONE,
State.UP_FOR_RETRY))
# this loop is quite slow as it uses are_dependencies_met for
# every task (in ti.is_runnable). This is also called in
# update_state above which has already checked these tasks
for ti in tis:
task = dag.get_task(ti.task_id)
# fixme: ti.task is transient but needs to be set
ti.task = task
# future: remove adhoc
if task.adhoc:
continue
if ti.are_dependencies_met(
dep_context=DepContext(flag_upstream_failed=True),
session=session):
self.log.debug('Queuing task: %s', ti)
queue.append(ti.key)
@provide_session
def _change_state_for_tis_without_dagrun(self,
simple_dag_bag,
old_states,
new_state,
session=None):
"""
For all DAG IDs in the SimpleDagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_state: list[State]
:param new_state: set TaskInstances to this state
:type new_state: State
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag and with states in the old_state will be examined
:type simple_dag_bag: SimpleDagBag
"""
tis_changed = 0
query = session \
.query(models.TaskInstance) \
.outerjoin(models.DagRun, and_(
models.TaskInstance.dag_id == models.DagRun.dag_id,
models.TaskInstance.execution_date == models.DagRun.execution_date)) \
.filter(models.TaskInstance.dag_id.in_(simple_dag_bag.dag_ids)) \
.filter(models.TaskInstance.state.in_(old_states)) \
.filter(or_(
models.DagRun.state != State.RUNNING,
models.DagRun.state.is_(None)))
if self.using_sqlite:
tis_to_change = query \
.with_for_update() \
.all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
tis_changed = session \
.query(models.TaskInstance) \
.filter(and_(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date ==
subq.c.execution_date)) \
.update({models.TaskInstance.state: new_state},
synchronize_session=False)
session.commit()
if tis_changed > 0:
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed, new_state
)
@provide_session
def __get_task_concurrency_map(self, states, session=None):
"""
Returns a map from tasks to number in the states list given.
:param states: List of states to query for
:type states: List[State]
:return: A map from (dag_id, task_id) to count of tasks in states
:rtype: Dict[[String, String], Int]
"""
TI = models.TaskInstance
ti_concurrency_query = (
session
.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
task_map = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
task_map[(dag_id, task_id)] = count
return task_map
@provide_session
def _find_executable_task_instances(self, simple_dag_bag, states, session=None):
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: SimpleDagBag
:param executor: the executor that runs task instances
:type executor: BaseExecutor
:param states: Execute TaskInstances in these states
:type states: Tuple[State]
:return: List[TaskInstance]
"""
executable_tis = []
# Get all the queued task instances from associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
ti_query = (
session
.query(TI)
.filter(TI.dag_id.in_(simple_dag_bag.dag_ids))
.outerjoin(
DR,
and_(DR.dag_id == TI.dag_id, DR.execution_date == TI.execution_date)
)
.filter(or_(DR.run_id == None, # noqa E711
not_(DR.run_id.like(BackfillJob.ID_PREFIX + '%'))))
.outerjoin(DM, DM.dag_id == TI.dag_id)
.filter(or_(DM.dag_id == None, # noqa E711
not_(DM.is_paused)))
)
if None in states:
ti_query = ti_query.filter(or_(TI.state == None, TI.state.in_(states))) # noqa E711
else:
ti_query = ti_query.filter(TI.state.in_(states))
task_instances_to_examine = ti_query.all()
if len(task_instances_to_examine) == 0:
self.log.info("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join(
["{}".format(x) for x in task_instances_to_examine])
self.log.info("Tasks up for execution:\n\t%s", task_instance_str)
# Get the pool settings
pools = {p.pool: p for p in session.query(models.Pool).all()}
pool_to_task_instances = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
states_to_count_as_running = [State.RUNNING, State.QUEUED]
task_concurrency_map = self.__get_task_concurrency_map(
states=states_to_count_as_running, session=session)
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
for pool, task_instances in pool_to_task_instances.items():
if not pool:
# Arbitrary:
# If queued outside of a pool, trigger no more than
# non_pooled_task_slot_count per run
open_slots = conf.getint('core', 'non_pooled_task_slot_count')
else:
if pool not in pools:
self.log.warning(
"Tasks using non-existent pool '%s' will not be scheduled",
pool
)
open_slots = 0
else:
open_slots = pools[pool].open_slots(session=session)
num_queued = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name={pool}) with {open_slots} "
"open slots and {num_queued} task instances in queue".format(
**locals()
)
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date))
# DAG IDs with running tasks that equal the concurrency limit of the dag
dag_id_to_possibly_running_task_count = {}
for task_instance in priority_sorted_task_instances:
if open_slots <= 0:
self.log.info(
"Not scheduling since there are %s open slots in pool %s",
open_slots, pool
)
# Can't schedule any more since there are no more open slots.
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
simple_dag = simple_dag_bag.get_dag(dag_id)
if dag_id not in dag_id_to_possibly_running_task_count:
dag_id_to_possibly_running_task_count[dag_id] = \
DAG.get_num_task_instances(
dag_id,
simple_dag_bag.get_dag(dag_id).task_ids,
states=states_to_count_as_running,
session=session)
current_task_concurrency = dag_id_to_possibly_running_task_count[dag_id]
task_concurrency_limit = simple_dag_bag.get_dag(dag_id).concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id, current_task_concurrency, task_concurrency_limit
)
if current_task_concurrency >= task_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued from DAG %s"
" is >= to the DAG's task concurrency limit of %s",
task_instance, dag_id, task_concurrency_limit
)
continue
task_concurrency = simple_dag.get_task_special_arg(
task_instance.task_id,
'task_concurrency')
if task_concurrency is not None:
num_running = task_concurrency_map[
((task_instance.dag_id, task_instance.task_id))
]
if num_running >= task_concurrency:
self.log.info("Not executing %s since the task concurrency for"
" this task has been reached.", task_instance)
continue
else:
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
if self.executor.has_task(task_instance):
self.log.debug(
"Not handling task %s as the executor reports it is running",
task_instance.key
)
continue
executable_tis.append(task_instance)
open_slots -= 1
dag_id_to_possibly_running_task_count[dag_id] += 1
task_instance_str = "\n\t".join(
["{}".format(x) for x in executable_tis])
self.log.info(
"Setting the follow tasks to queued state:\n\t%s", task_instance_str)
# so these dont expire on commit
for ti in executable_tis:
copy_dag_id = ti.dag_id
copy_execution_date = ti.execution_date
copy_task_id = ti.task_id
make_transient(ti)
ti.dag_id = copy_dag_id
ti.execution_date = copy_execution_date
ti.task_id = copy_task_id
return executable_tis
@provide_session
def _change_state_for_executable_task_instances(self, task_instances,
acceptable_states, session=None):
"""
Changes the state of task instances in the list with one of the given states
to QUEUED atomically, and returns the TIs changed.
:param task_instances: TaskInstances to change the state of
:type task_instances: List[TaskInstance]
:param acceptable_states: Filters the TaskInstances updated to be in these states
:type acceptable_states: Iterable[State]
:return: List[TaskInstance]
"""
if len(task_instances) == 0:
session.commit()
return []
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == ti.dag_id,
TI.task_id == ti.task_id,
TI.execution_date == ti.execution_date)
for ti in task_instances])
ti_query = (
session
.query(TI)
.filter(or_(*filter_for_ti_state_change)))
if None in acceptable_states:
ti_query = ti_query.filter(
or_(TI.state == None, TI.state.in_(acceptable_states)) # noqa E711
)
else:
ti_query = ti_query.filter(TI.state.in_(acceptable_states))
tis_to_set_to_queued = (
ti_query
.with_for_update()
.all())
if len(tis_to_set_to_queued) == 0:
self.log.info("No tasks were able to have their state changed to queued.")
session.commit()
return []
# set TIs to queued state
for task_instance in tis_to_set_to_queued:
task_instance.state = State.QUEUED
task_instance.queued_dttm = (timezone.utcnow()
if not task_instance.queued_dttm
else task_instance.queued_dttm)
session.merge(task_instance)
# save which TIs we set before session expires them
filter_for_ti_enqueue = ([and_(TI.dag_id == ti.dag_id,
TI.task_id == ti.task_id,
TI.execution_date == ti.execution_date)
for ti in tis_to_set_to_queued])
session.commit()
# requery in batches since above was expired by commit
def query(result, items):
tis_to_be_queued = (
session
.query(TI)
.filter(or_(*items))
.all())
task_instance_str = "\n\t".join(
["{}".format(x) for x in tis_to_be_queued])
self.log.info("Setting the follow tasks to queued state:\n\t%s",
task_instance_str)
return result + tis_to_be_queued
tis_to_be_queued = helpers.reduce_in_chunks(query,
filter_for_ti_enqueue,
[],
self.max_tis_per_query)
return tis_to_be_queued
def _enqueue_task_instances_with_queued_state(self, simple_dag_bag, task_instances):
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param task_instances: TaskInstances to enqueue
:type task_instances: List[TaskInstance]
:param simple_dag_bag: Should contains all of the task_instances' dags
:type simple_dag_bag: SimpleDagBag
"""
TI = models.TaskInstance
# actually enqueue them
for task_instance in task_instances:
simple_dag = simple_dag_bag.get_dag(task_instance.dag_id)
command = " ".join(TI.generate_command(
task_instance.dag_id,
task_instance.task_id,
task_instance.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=task_instance.pool,
file_path=simple_dag.full_filepath,
pickle_id=simple_dag.pickle_id))
priority = task_instance.priority_weight
queue = task_instance.queue
self.log.info(
"Sending %s to executor with priority %s and queue %s",
task_instance.key, priority, queue
)
# save attributes so sqlalchemy doesnt expire them
copy_dag_id = task_instance.dag_id
copy_task_id = task_instance.task_id
copy_execution_date = task_instance.execution_date
make_transient(task_instance)
task_instance.dag_id = copy_dag_id
task_instance.task_id = copy_task_id
task_instance.execution_date = copy_execution_date
self.executor.queue_command(
task_instance,
command,
priority=priority,
queue=queue)
@provide_session
def _execute_task_instances(self,
simple_dag_bag,
states,
session=None):
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: SimpleDagBag
:param states: Execute TaskInstances in these states
:type states: Tuple[State]
:return: None
"""
executable_tis = self._find_executable_task_instances(simple_dag_bag, states,
session=session)
def query(result, items):
tis_with_state_changed = self._change_state_for_executable_task_instances(
items,
states,
session=session)
self._enqueue_task_instances_with_queued_state(
simple_dag_bag,
tis_with_state_changed)
session.commit()
return result + len(tis_with_state_changed)
return helpers.reduce_in_chunks(query, executable_tis, 0, self.max_tis_per_query)
def _process_dags(self, dagbag, dags, tis_out):
"""
Iterates over the dags and processes them. Processing includes:
1. Create appropriate DagRun(s) in the DB.
2. Create appropriate TaskInstance(s) in the DB.
3. Send emails for tasks that have missed SLAs.
:param dagbag: a collection of DAGs to process
:type dagbag: models.DagBag
:param dags: the DAGs from the DagBag to process
:type dags: DAG
:param tis_out: A queue to add generated TaskInstance objects
:type tis_out: multiprocessing.Queue[TaskInstance]
:return: None
"""
for dag in dags:
dag = dagbag.get_dag(dag.dag_id)
if dag.is_paused:
self.log.info("Not processing DAG %s since it's paused", dag.dag_id)
continue
if not dag:
self.log.error("DAG ID %s was not found in the DagBag", dag.dag_id)
continue
self.log.info("Processing %s", dag.dag_id)
dag_run = self.create_dag_run(dag)
if dag_run:
self.log.info("Created %s", dag_run)
self._process_task_instances(dag, tis_out)
self.manage_slas(dag)
models.DagStat.update([d.dag_id for d in dags])
@provide_session
def _process_executor_events(self, simple_dag_bag, session=None):
"""
Respond to executor events.
"""
# TODO: this shares quite a lot of code with _manage_executor_state
TI = models.TaskInstance
for key, state in list(self.executor.get_event_buffer(simple_dag_bag.dag_ids)
.items()):
dag_id, task_id, execution_date = key
self.log.info(
"Executor reports %s.%s execution_date=%s as %s",
dag_id, task_id, execution_date, state
)
if state == State.FAILED or state == State.SUCCESS:
qry = session.query(TI).filter(TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date)
ti = qry.first()
if not ti:
self.log.warning("TaskInstance %s went missing from the database", ti)
continue
# TODO: should we fail RUNNING as well, as we do in Backfills?
if ti.state == State.QUEUED:
msg = ("Executor reports task instance {} finished ({}) "
"although the task says its {}. Was the task "
"killed externally?".format(ti, state, ti.state))
self.log.error(msg)
try:
simple_dag = simple_dag_bag.get_dag(dag_id)
dagbag = models.DagBag(simple_dag.full_filepath)
dag = dagbag.get_dag(dag_id)
ti.task = dag.get_task(task_id)
ti.handle_failure(msg)
except Exception:
self.log.error("Cannot load the dag bag to handle failure for %s"
". Setting task to FAILED without callbacks or "
"retries. Do you have enough resources?", ti)
ti.state = State.FAILED
session.merge(ti)
session.commit()
def _log_file_processing_stats(self,
known_file_paths,
processor_manager):
"""
Print out stats about how files are getting processed.
:param known_file_paths: a list of file paths that may contain Airflow
DAG definitions
:type known_file_paths: list[unicode]
:param processor_manager: manager for the file processors
:type stats: DagFileProcessorManager
:return: None
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path",
"PID",
"Runtime",
"Last Runtime",
"Last Run"]
rows = []
for file_path in known_file_paths:
last_runtime = processor_manager.get_last_runtime(file_path)
processor_pid = processor_manager.get_pid(file_path)
processor_start_time = processor_manager.get_start_time(file_path)
runtime = ((timezone.utcnow() - processor_start_time).total_seconds()
if processor_start_time else None)
last_run = processor_manager.get_last_finish_time(file_path)
rows.append((file_path,
processor_pid,
runtime,
last_runtime,
last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, last_runtime, last_run in rows:
formatted_rows.append((file_path,
pid,
"{:.2f}s".format(runtime)
if runtime else None,
"{:.2f}s".format(last_runtime)
if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S")
if last_run else None))
log_str = ("\n" +
"=" * 80 +
"\n" +
"DAG File Processing Stats\n\n" +
tabulate(formatted_rows, headers=headers) +
"\n" +
"=" * 80)
self.log.info(log_str)
def _execute(self):
self.log.info("Starting the scheduler")
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = False
if self.do_pickle and self.executor.__class__ not in \
(executors.LocalExecutor, executors.SequentialExecutor):
pickle_dags = True
# Use multiple processes to parse and generate tasks for the
# DAGs in parallel. By processing them in separate processes,
# we can get parallelism and isolation from potentially harmful
# user code.
self.log.info("Processing files using up to %s processes at a time",
self.max_threads)
self.log.info("Running execute loop for %s seconds", self.run_duration)
self.log.info("Processing each file at most %s times", self.num_runs)
self.log.info("Process each file at most once every %s seconds",
self.file_process_interval)
self.log.info("Wait until at least %s seconds have passed between file parsing "
"loops", self.min_file_parsing_loop_time)
self.log.info("Checking for new files in %s every %s seconds",
self.subdir, self.dag_dir_list_interval)
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self.subdir)
known_file_paths = list_py_file_paths(self.subdir)
self.log.info("There are %s files in %s", len(known_file_paths), self.subdir)
def processor_factory(file_path):
return DagFileProcessor(file_path,
pickle_dags,
self.dag_ids)
processor_manager = DagFileProcessorManager(self.subdir,
known_file_paths,
self.max_threads,
self.file_process_interval,
self.min_file_parsing_loop_time,
self.num_runs,
processor_factory)
try:
self._execute_helper(processor_manager)
finally:
self.log.info("Exited execute loop")
# Kill all child processes on exit since we don't want to leave
# them as orphaned.
pids_to_kill = processor_manager.get_all_pids()
if len(pids_to_kill) > 0:
# First try SIGTERM
this_process = psutil.Process(os.getpid())
# Only check child processes to ensure that we don't have a case
# where we kill the wrong process because a child process died
# but the PID got reused.
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
for child in child_processes:
self.log.info("Terminating child PID: %s", child.pid)
child.terminate()
# TODO: Remove magic number
timeout = 5
self.log.info(
"Waiting up to %s seconds for processes to exit...", timeout)
try:
psutil.wait_procs(
child_processes, timeout=timeout,
callback=lambda x: self.log.info('Terminated PID %s', x.pid))
except psutil.TimeoutExpired:
self.log.debug("Ran out of time while waiting for processes to exit")
# Then SIGKILL
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
if len(child_processes) > 0:
self.log.info("SIGKILL processes that did not terminate gracefully")
for child in child_processes:
self.log.info("Killing child PID: %s", child.pid)
child.kill()
child.wait()
def _execute_helper(self, processor_manager):
"""
:param processor_manager: manager to use
:type processor_manager: DagFileProcessorManager
:return: None
"""
self.executor.start()
self.log.info("Resetting orphaned tasks for active dag runs")
self.reset_state_for_orphaned_tasks()
execute_start_time = timezone.utcnow()
# Last time stats were printed
last_stat_print_time = datetime.datetime(2000, 1, 1, tzinfo=timezone.utc)
# Last time that self.heartbeat() was called.
last_self_heartbeat_time = timezone.utcnow()
# Last time that the DAG dir was traversed to look for files
last_dag_dir_refresh_time = timezone.utcnow()
# Use this value initially
known_file_paths = processor_manager.file_paths
# For the execute duration, parse and schedule DAGs
while (timezone.utcnow() - execute_start_time).total_seconds() < \
self.run_duration or self.run_duration < 0:
self.log.debug("Starting Loop...")
loop_start_time = time.time()
# Traverse the DAG directory for Python files containing DAGs
# periodically
elapsed_time_since_refresh = (timezone.utcnow() -
last_dag_dir_refresh_time).total_seconds()
if elapsed_time_since_refresh > self.dag_dir_list_interval:
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self.subdir)
known_file_paths = list_py_file_paths(self.subdir)
last_dag_dir_refresh_time = timezone.utcnow()
self.log.info(
"There are %s files in %s", len(known_file_paths), self.subdir)
processor_manager.set_file_paths(known_file_paths)
self.log.debug("Removing old import errors")
self.clear_nonexistent_import_errors(known_file_paths=known_file_paths)
# Kick of new processes and collect results from finished ones
self.log.debug("Heartbeating the process manager")
simple_dags = processor_manager.heartbeat()
if self.using_sqlite:
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug(
"Waiting for processors to finish since we're using sqlite")
processor_manager.wait_until_finished()
# Send tasks for execution if available
simple_dag_bag = SimpleDagBag(simple_dags)
if len(simple_dags) > 0:
# Handle cases where a DAG run state is set (perhaps manually) to
# a non-running state. Handle task instances that belong to
# DAG runs in those states
# If a task instance is up for retry but the corresponding DAG run
# isn't running, mark the task instance as FAILED so we don't try
# to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.UP_FOR_RETRY],
State.FAILED)
# If a task instance is scheduled or queued, but the corresponding
# DAG run isn't running, set the state to NONE so we don't try to
# re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.QUEUED,
State.SCHEDULED],
State.NONE)
self._execute_task_instances(simple_dag_bag,
(State.SCHEDULED,))
# Call heartbeats
self.log.debug("Heartbeating the executor")
self.executor.heartbeat()
# Process events from the executor
self._process_executor_events(simple_dag_bag)
# Heartbeat the scheduler periodically
time_since_last_heartbeat = (timezone.utcnow() -
last_self_heartbeat_time).total_seconds()
if time_since_last_heartbeat > self.heartrate:
self.log.debug("Heartbeating the scheduler")
self.heartbeat()
last_self_heartbeat_time = timezone.utcnow()
# Occasionally print out stats about how fast the files are getting processed
if ((timezone.utcnow() - last_stat_print_time).total_seconds() >
self.print_stats_interval):
if len(known_file_paths) > 0:
self._log_file_processing_stats(known_file_paths,
processor_manager)
last_stat_print_time = timezone.utcnow()
loop_end_time = time.time()
self.log.debug("Ran scheduling loop in %.2f seconds",
loop_end_time - loop_start_time)
# Exit early for a test mode
if processor_manager.max_runs_reached():
self.log.info("Exiting loop as all files have been processed %s times",
self.num_runs)
break
# Stop any processors
processor_manager.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
all_files_processed = True
for file_path in known_file_paths:
if processor_manager.get_last_finish_time(file_path) is None:
all_files_processed = False
break
if all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s",
execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove()
@provide_session
def process_file(self, file_path, pickle_dags=False, session=None):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param file_path: the path to the Python file that should be executed
:type file_path: unicode
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:return: a list of SimpleDags made from the Dags found in the file
:rtype: list[SimpleDag]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
# As DAGs are parsed from this file, they will be converted into SimpleDags
simple_dags = []
try:
dagbag = models.DagBag(file_path)
except Exception:
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return []
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return []
# Save individual DAGs in the ORM and update DagModel.last_scheduled_time
for dag in dagbag.dags.values():
dag.sync_to_db()
paused_dag_ids = [dag.dag_id for dag in dagbag.dags.values()
if dag.is_paused]
# Pickle the DAGs (if necessary) and put them into a SimpleDag
for dag_id in dagbag.dags:
dag = dagbag.get_dag(dag_id)
pickle_id = None
if pickle_dags:
pickle_id = dag.pickle(session).id
# Only return DAGs that are not paused
if dag_id not in paused_dag_ids:
simple_dags.append(SimpleDag(dag, pickle_id=pickle_id))
if len(self.dag_ids) > 0:
dags = [dag for dag in dagbag.dags.values()
if dag.dag_id in self.dag_ids and
dag.dag_id not in paused_dag_ids]
else:
dags = [dag for dag in dagbag.dags.values()
if not dag.parent_dag and
dag.dag_id not in paused_dag_ids]
# Not using multiprocessing.Queue() since it's no longer a separate
# process and due to some unusual behavior. (empty() incorrectly
# returns true?)
ti_keys_to_schedule = []
self._process_dags(dagbag, dags, ti_keys_to_schedule)
for ti_key in ti_keys_to_schedule:
dag = dagbag.dags[ti_key[0]]
task = dag.get_task(ti_key[1])
ti = models.TaskInstance(task, ti_key[2])
ti.refresh_from_db(session=session, lock_for_update=True)
# We can defer checking the task dependency checks to the worker themselves
# since they can be expensive to run in the scheduler.
dep_context = DepContext(deps=QUEUE_DEPS, ignore_task_deps=True)
# Only schedule tasks that have their dependencies met, e.g. to avoid
# a task that recently got its state changed to RUNNING from somewhere
# other than the scheduler from getting its state overwritten.
# TODO(aoen): It's not great that we have to check all the task instance
# dependencies twice; once to get the task scheduled, and again to actually
# run the task. We should try to come up with a way to only check them once.
if ti.are_dependencies_met(
dep_context=dep_context,
session=session,
verbose=True):
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
self.log.info("Creating / updating %s in ORM", ti)
session.merge(ti)
# commit batch
session.commit()
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception:
self.log.exception("Error logging import errors!")
try:
dagbag.kill_zombies()
except Exception:
self.log.exception("Error killing zombies!")
return simple_dags
@provide_session
def heartbeat_callback(self, session=None):
Stats.gauge('scheduler_heartbeat', 1, 1)
class BackfillJob(BaseJob):
"""
A backfill job consists of a dag or subdag for a specific time range. It
triggers a set of task instance runs, in the right order and lasts for
as long as it takes for the set of task instance to be completed.
"""
ID_PREFIX = 'backfill_'
ID_FORMAT_PREFIX = ID_PREFIX + '{0}'
__mapper_args__ = {
'polymorphic_identity': 'BackfillJob'
}
class _DagRunTaskStatus(object):
"""
Internal status of the backfill job. This class is intended to be instantiated
only within a BackfillJob instance and will track the execution of tasks,
e.g. running, skipped, succeeded, failed, etc. Information about the dag runs
related to the backfill job are also being tracked in this structure,
.e.g finished runs, etc. Any other status related information related to the
execution of dag runs / tasks can be included in this structure since it makes
it easier to pass it around.
"""
# TODO(edgarRd): AIRFLOW-1444: Add consistency check on counts
def __init__(self,
to_run=None,
running=None,
skipped=None,
succeeded=None,
failed=None,
not_ready=None,
deadlocked=None,
active_runs=None,
executed_dag_run_dates=None,
finished_runs=0,
total_runs=0,
):
"""
:param to_run: Tasks to run in the backfill
:type to_run: dict[Tuple[String, String, DateTime], TaskInstance]
:param running: Maps running task instance key to task instance object
:type running: dict[Tuple[String, String, DateTime], TaskInstance]
:param skipped: Tasks that have been skipped
:type skipped: set[Tuple[String, String, DateTime]]
:param succeeded: Tasks that have succeeded so far
:type succeeded: set[Tuple[String, String, DateTime]]
:param failed: Tasks that have failed
:type failed: set[Tuple[String, String, DateTime]]
:param not_ready: Tasks not ready for execution
:type not_ready: set[Tuple[String, String, DateTime]]
:param deadlocked: Deadlocked tasks
:type deadlocked: set[Tuple[String, String, DateTime]]
:param active_runs: Active dag runs at a certain point in time
:type active_runs: list[DagRun]
:param executed_dag_run_dates: Datetime objects for the executed dag runs
:type executed_dag_run_dates: set[Datetime]
:param finished_runs: Number of finished runs so far
:type finished_runs: int
:param total_runs: Number of total dag runs able to run
:type total_runs: int
"""
self.to_run = to_run or dict()
self.running = running or dict()
self.skipped = skipped or set()
self.succeeded = succeeded or set()
self.failed = failed or set()
self.not_ready = not_ready or set()
self.deadlocked = deadlocked or set()
self.active_runs = active_runs or list()
self.executed_dag_run_dates = executed_dag_run_dates or set()
self.finished_runs = finished_runs
self.total_runs = total_runs
def __init__(
self,
dag,
start_date=None,
end_date=None,
mark_success=False,
donot_pickle=False,
ignore_first_depends_on_past=False,
ignore_task_deps=False,
pool=None,
delay_on_limit_secs=1.0,
verbose=False,
conf=None,
rerun_failed_tasks=False,
*args, **kwargs):
"""
:param dag: DAG object.
:type dag: `class DAG`.
:param start_date: start date for the backfill date range.
:type start_date: datetime.
:param end_date: end date for the backfill date range.
:type end_date: datetime
:param mark_success: flag whether to mark the task auto success.
:type mark_success: bool
:param donot_pickle: whether pickle
:type donot_pickle: bool
:param ignore_first_depends_on_past: whether to ignore depend on past
:type ignore_first_depends_on_past: bool
:param ignore_task_deps: whether to ignore the task dependency
:type ignore_task_deps: bool
:param pool:
:type pool: list
:param delay_on_limit_secs:
:param verbose:
:type verbose: flag to whether display verbose message to backfill console
:param conf: a dictionary which user could pass k-v pairs for backfill
:type conf: dictionary
:param rerun_failed_tasks: flag to whether to
auto rerun the failed task in backfill
:type rerun_failed_tasks: bool
:param args:
:param kwargs:
"""
self.dag = dag
self.dag_id = dag.dag_id
self.bf_start_date = start_date
self.bf_end_date = end_date
self.mark_success = mark_success
self.donot_pickle = donot_pickle
self.ignore_first_depends_on_past = ignore_first_depends_on_past
self.ignore_task_deps = ignore_task_deps
self.pool = pool
self.delay_on_limit_secs = delay_on_limit_secs
self.verbose = verbose
self.conf = conf
self.rerun_failed_tasks = rerun_failed_tasks
super(BackfillJob, self).__init__(*args, **kwargs)
def _update_counters(self, ti_status):
"""
Updates the counters per state of the tasks that were running. Can re-add
to tasks to run in case required.
:param ti_status: the internal status of the backfill job tasks
:type ti_status: BackfillJob._DagRunTaskStatus
"""
for key, ti in list(ti_status.running.items()):
ti.refresh_from_db()
if ti.state == State.SUCCESS:
ti_status.succeeded.add(key)
self.log.debug("Task instance %s succeeded. Don't rerun.", ti)
ti_status.running.pop(key)
continue
elif ti.state == State.SKIPPED:
ti_status.skipped.add(key)
self.log.debug("Task instance %s skipped. Don't rerun.", ti)
ti_status.running.pop(key)
continue
elif ti.state == State.FAILED:
self.log.error("Task instance %s failed", ti)
ti_status.failed.add(key)
ti_status.running.pop(key)
continue
# special case: if the task needs to run again put it back
elif ti.state == State.UP_FOR_RETRY:
self.log.warning("Task instance %s is up for retry", ti)
ti_status.running.pop(key)
ti_status.to_run[key] = ti
# special case: The state of the task can be set to NONE by the task itself
# when it reaches concurrency limits. It could also happen when the state
# is changed externally, e.g. by clearing tasks from the ui. We need to cover
# for that as otherwise those tasks would fall outside of the scope of
# the backfill suddenly.
elif ti.state == State.NONE:
self.log.warning(
"FIXME: task instance %s state was set to none externally or "
"reaching concurrency limits. Re-adding task to queue.",
ti
)
ti.set_state(State.SCHEDULED)
ti_status.running.pop(key)
ti_status.to_run[key] = ti
def _manage_executor_state(self, running):
"""
Checks if the executor agrees with the state of task instances
that are running
:param running: dict of key, task to verify
"""
executor = self.executor
for key, state in list(executor.get_event_buffer().items()):
if key not in running:
self.log.warning(
"%s state %s not in running=%s",
key, state, running.values()
)
continue
ti = running[key]
ti.refresh_from_db()
self.log.debug("Executor state: %s task %s", state, ti)
if state == State.FAILED or state == State.SUCCESS:
if ti.state == State.RUNNING or ti.state == State.QUEUED:
msg = ("Executor reports task instance {} finished ({}) "
"although the task says its {}. Was the task "
"killed externally?".format(ti, state, ti.state))
self.log.error(msg)
ti.handle_failure(msg)
@provide_session
def _get_dag_run(self, run_date, session=None):
"""
Returns a dag run for the given run date, which will be matched to an existing
dag run if available or create a new dag run otherwise. If the max_active_runs
limit is reached, this function will return None.
:param run_date: the execution date for the dag run
:type run_date: datetime
:param session: the database session object
:type session: Session
:return: a DagRun in state RUNNING or None
"""
run_id = BackfillJob.ID_FORMAT_PREFIX.format(run_date.isoformat())
# consider max_active_runs but ignore when running subdags
respect_dag_max_active_limit = (True
if (self.dag.schedule_interval and
not self.dag.is_subdag)
else False)
current_active_dag_count = self.dag.get_num_active_runs(external_trigger=False)
# check if we are scheduling on top of a already existing dag_run
# we could find a "scheduled" run instead of a "backfill"
run = DagRun.find(dag_id=self.dag.dag_id,
execution_date=run_date,
session=session)
if run is not None and len(run) > 0:
run = run[0]
if run.state == State.RUNNING:
respect_dag_max_active_limit = False
else:
run = None
# enforce max_active_runs limit for dag, special cases already
# handled by respect_dag_max_active_limit
if (respect_dag_max_active_limit and
current_active_dag_count >= self.dag.max_active_runs):
return None
run = run or self.dag.create_dagrun(
run_id=run_id,
execution_date=run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False,
session=session,
conf=self.conf,
)
# set required transient field
run.dag = self.dag
# explicitly mark as backfill and running
run.state = State.RUNNING
run.run_id = run_id
run.verify_integrity(session=session)
return run
@provide_session
def _task_instances_for_dag_run(self, dag_run, session=None):
"""
Returns a map of task instance key to task instance object for the tasks to
run in the given dag run.
:param dag_run: the dag run to get the tasks from
:type dag_run: models.DagRun
:param session: the database session object
:type session: Session
"""
tasks_to_run = {}
if dag_run is None:
return tasks_to_run
# check if we have orphaned tasks
self.reset_state_for_orphaned_tasks(filter_by_dag_run=dag_run, session=session)
# for some reason if we don't refresh the reference to run is lost
dag_run.refresh_from_db()
make_transient(dag_run)
# TODO(edgarRd): AIRFLOW-1464 change to batch query to improve perf
for ti in dag_run.get_task_instances():
# all tasks part of the backfill are scheduled to run
if ti.state == State.NONE:
ti.set_state(State.SCHEDULED, session=session)
if ti.state != State.REMOVED:
tasks_to_run[ti.key] = ti
return tasks_to_run
def _log_progress(self, ti_status):
msg = ' | '.join([
"[backfill progress]",
"finished run {0} of {1}",
"tasks waiting: {2}",
"succeeded: {3}",
"running: {4}",
"failed: {5}",
"skipped: {6}",
"deadlocked: {7}",
"not ready: {8}"
]).format(
ti_status.finished_runs,
ti_status.total_runs,
len(ti_status.to_run),
len(ti_status.succeeded),
len(ti_status.running),
len(ti_status.failed),
len(ti_status.skipped),
len(ti_status.deadlocked),
len(ti_status.not_ready))
self.log.info(msg)
self.log.debug(
"Finished dag run loop iteration. Remaining tasks %s",
ti_status.to_run.values()
)
@provide_session
def _process_backfill_task_instances(self,
ti_status,
executor,
pickle_id,
start_date=None, session=None):
"""
Process a set of task instances from a set of dag runs. Special handling is done
to account for different task instance states that could be present when running
them in a backfill process.
:param ti_status: the internal status of the job
:type ti_status: BackfillJob._DagRunTaskStatus
:param executor: the executor to run the task instances
:type executor: BaseExecutor
:param pickle_id: the pickle_id if dag is pickled, None otherwise
:type pickle_id: int
:param start_date: the start date of the backfill job
:type start_date: datetime
:param session: the current session object
:type session: Session
:return: the list of execution_dates for the finished dag runs
:rtype: list
"""
executed_run_dates = []
while ((len(ti_status.to_run) > 0 or len(ti_status.running) > 0) and
len(ti_status.deadlocked) == 0):
self.log.debug("*** Clearing out not_ready list ***")
ti_status.not_ready.clear()
# we need to execute the tasks bottom to top
# or leaf to root, as otherwise tasks might be
# determined deadlocked while they are actually
# waiting for their upstream to finish
for task in self.dag.topological_sort():
for key, ti in list(ti_status.to_run.items()):
if task.task_id != ti.task_id:
continue
ti.refresh_from_db()
task = self.dag.get_task(ti.task_id)
ti.task = task
ignore_depends_on_past = (
self.ignore_first_depends_on_past and
ti.execution_date == (start_date or ti.start_date))
self.log.debug(
"Task instance to run %s state %s", ti, ti.state)
# The task was already marked successful or skipped by a
# different Job. Don't rerun it.
if ti.state == State.SUCCESS:
ti_status.succeeded.add(key)
self.log.debug("Task instance %s succeeded. Don't rerun.", ti)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
continue
elif ti.state == State.SKIPPED:
ti_status.skipped.add(key)
self.log.debug("Task instance %s skipped. Don't rerun.", ti)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
continue
# guard against externally modified tasks instances or
# in case max concurrency has been reached at task runtime
elif ti.state == State.NONE:
self.log.warning(
"FIXME: task instance {} state was set to None "
"externally. This should not happen"
)
ti.set_state(State.SCHEDULED, session=session)
if self.rerun_failed_tasks:
# Rerun failed tasks or upstreamed failed tasks
if ti.state in (State.FAILED, State.UPSTREAM_FAILED):
self.log.error("Task instance {ti} "
"with state {state}".format(ti=ti,
state=ti.state))
if key in ti_status.running:
ti_status.running.pop(key)
# Reset the failed task in backfill to scheduled state
ti.set_state(State.SCHEDULED, session=session)
else:
# Default behaviour which works for subdag.
if ti.state in (State.FAILED, State.UPSTREAM_FAILED):
self.log.error("Task instance {ti} "
"with {state} state".format(ti=ti,
state=ti.state))
ti_status.failed.add(key)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
continue
backfill_context = DepContext(
deps=RUN_DEPS,
ignore_depends_on_past=ignore_depends_on_past,
ignore_task_deps=self.ignore_task_deps,
flag_upstream_failed=True)
# Is the task runnable? -- then run it
# the dependency checker can change states of tis
if ti.are_dependencies_met(
dep_context=backfill_context,
session=session,
verbose=self.verbose):
ti.refresh_from_db(lock_for_update=True, session=session)
if ti.state == State.SCHEDULED or ti.state == State.UP_FOR_RETRY:
if executor.has_task(ti):
self.log.debug(
"Task Instance %s already in executor "
"waiting for queue to clear",
ti
)
else:
self.log.debug('Sending %s to executor', ti)
# Skip scheduled state, we are executing immediately
ti.state = State.QUEUED
session.merge(ti)
cfg_path = None
if executor.__class__ in (executors.LocalExecutor,
executors.SequentialExecutor):
cfg_path = tmp_configuration_copy()
executor.queue_task_instance(
ti,
mark_success=self.mark_success,
pickle_id=pickle_id,
ignore_task_deps=self.ignore_task_deps,
ignore_depends_on_past=ignore_depends_on_past,
pool=self.pool,
cfg_path=cfg_path)
ti_status.running[key] = ti
ti_status.to_run.pop(key)
session.commit()
continue
if ti.state == State.UPSTREAM_FAILED:
self.log.error("Task instance %s upstream failed", ti)
ti_status.failed.add(key)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
continue
# special case
if ti.state == State.UP_FOR_RETRY:
self.log.debug(
"Task instance %s retry period not "
"expired yet", ti)
if key in ti_status.running:
ti_status.running.pop(key)
ti_status.to_run[key] = ti
continue
# all remaining tasks
self.log.debug('Adding %s to not_ready', ti)
ti_status.not_ready.add(key)
# execute the tasks in the queue
self.heartbeat()
executor.heartbeat()
# If the set of tasks that aren't ready ever equals the set of
# tasks to run and there are no running tasks then the backfill
# is deadlocked
if (ti_status.not_ready and
ti_status.not_ready == set(ti_status.to_run) and
len(ti_status.running) == 0):
self.log.warning(
"Deadlock discovered for ti_status.to_run=%s",
ti_status.to_run.values()
)
ti_status.deadlocked.update(ti_status.to_run.values())
ti_status.to_run.clear()
# check executor state
self._manage_executor_state(ti_status.running)
# update the task counters
self._update_counters(ti_status=ti_status)
# update dag run state
_dag_runs = ti_status.active_runs[:]
for run in _dag_runs:
run.update_state(session=session)
if run.state in State.finished():
ti_status.finished_runs += 1
ti_status.active_runs.remove(run)
executed_run_dates.append(run.execution_date)
if run.dag.is_paused:
models.DagStat.update([run.dag_id], session=session)
self._log_progress(ti_status)
# return updated status
return executed_run_dates
@provide_session
def _collect_errors(self, ti_status, session=None):
err = ''
if ti_status.failed:
err += (
"---------------------------------------------------\n"
"Some task instances failed:\n{}\n".format(ti_status.failed))
if ti_status.deadlocked:
err += (
'---------------------------------------------------\n'
'BackfillJob is deadlocked.')
deadlocked_depends_on_past = any(
t.are_dependencies_met(
dep_context=DepContext(ignore_depends_on_past=False),
session=session,
verbose=self.verbose) !=
t.are_dependencies_met(
dep_context=DepContext(ignore_depends_on_past=True),
session=session,
verbose=self.verbose)
for t in ti_status.deadlocked)
if deadlocked_depends_on_past:
err += (
'Some of the deadlocked tasks were unable to run because '
'of "depends_on_past" relationships. Try running the '
'backfill with the option '
'"ignore_first_depends_on_past=True" or passing "-I" at '
'the command line.')
err += ' These tasks have succeeded:\n{}\n'.format(ti_status.succeeded)
err += ' These tasks are running:\n{}\n'.format(ti_status.running)
err += ' These tasks have failed:\n{}\n'.format(ti_status.failed)
err += ' These tasks are skipped:\n{}\n'.format(ti_status.skipped)
err += ' These tasks are deadlocked:\n{}\n'.format(ti_status.deadlocked)
return err
@provide_session
def _execute_for_run_dates(self, run_dates, ti_status, executor, pickle_id,
start_date, session=None):
"""
Computes the dag runs and their respective task instances for
the given run dates and executes the task instances.
Returns a list of execution dates of the dag runs that were executed.
:param run_dates: Execution dates for dag runs
:type run_dates: list
:param ti_status: internal BackfillJob status structure to tis track progress
:type ti_status: BackfillJob._DagRunTaskStatus
:param executor: the executor to use, it must be previously started
:type executor: BaseExecutor
:param pickle_id: numeric id of the pickled dag, None if not pickled
:type pickle_id: int
:param start_date: backfill start date
:type start_date: datetime
:param session: the current session object
:type session: Session
"""
for next_run_date in run_dates:
dag_run = self._get_dag_run(next_run_date, session=session)
tis_map = self._task_instances_for_dag_run(dag_run,
session=session)
if dag_run is None:
continue
ti_status.active_runs.append(dag_run)
ti_status.to_run.update(tis_map or {})
processed_dag_run_dates = self._process_backfill_task_instances(
ti_status=ti_status,
executor=executor,
pickle_id=pickle_id,
start_date=start_date,
session=session)
ti_status.executed_dag_run_dates.update(processed_dag_run_dates)
@provide_session
def _execute(self, session=None):
"""
Initializes all components required to run a dag for a specified date range and
calls helper method to execute the tasks.
"""
ti_status = BackfillJob._DagRunTaskStatus()
start_date = self.bf_start_date
# Get intervals between the start/end dates, which will turn into dag runs
run_dates = self.dag.get_run_dates(start_date=start_date,
end_date=self.bf_end_date)
if len(run_dates) == 0:
self.log.info("No run dates were found for the given dates and dag interval.")
return
# picklin'
pickle_id = None
if not self.donot_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle = models.DagPickle(self.dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
executor = self.executor
executor.start()
ti_status.total_runs = len(run_dates) # total dag runs in backfill
try:
remaining_dates = ti_status.total_runs
while remaining_dates > 0:
dates_to_process = [run_date for run_date in run_dates
if run_date not in ti_status.executed_dag_run_dates]
self._execute_for_run_dates(run_dates=dates_to_process,
ti_status=ti_status,
executor=executor,
pickle_id=pickle_id,
start_date=start_date,
session=session)
remaining_dates = (
ti_status.total_runs - len(ti_status.executed_dag_run_dates)
)
err = self._collect_errors(ti_status=ti_status, session=session)
if err:
raise AirflowException(err)
if remaining_dates > 0:
self.log.info(
"max_active_runs limit for dag %s has been reached "
" - waiting for other dag runs to finish",
self.dag_id
)
time.sleep(self.delay_on_limit_secs)
finally:
executor.end()
session.commit()
self.log.info("Backfill done. Exiting.")
class LocalTaskJob(BaseJob):
__mapper_args__ = {
'polymorphic_identity': 'LocalTaskJob'
}
def __init__(
self,
task_instance,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
mark_success=False,
pickle_id=None,
pool=None,
*args, **kwargs):
self.task_instance = task_instance
self.ignore_all_deps = ignore_all_deps
self.ignore_depends_on_past = ignore_depends_on_past
self.ignore_task_deps = ignore_task_deps
self.ignore_ti_state = ignore_ti_state
self.pool = pool
self.pickle_id = pickle_id
self.mark_success = mark_success
# terminating state is used so that a job don't try to
# terminate multiple times
self.terminating = False
super(LocalTaskJob, self).__init__(*args, **kwargs)
def _execute(self):
self.task_runner = get_task_runner(self)
def signal_handler(signum, frame):
"""Setting kill signal handler"""
self.log.error("Received SIGTERM. Terminating subprocesses")
self.on_kill()
raise AirflowException("LocalTaskJob received SIGTERM signal")
signal.signal(signal.SIGTERM, signal_handler)
if not self.task_instance._check_and_change_state_before_execution(
mark_success=self.mark_success,
ignore_all_deps=self.ignore_all_deps,
ignore_depends_on_past=self.ignore_depends_on_past,
ignore_task_deps=self.ignore_task_deps,
ignore_ti_state=self.ignore_ti_state,
job_id=self.id,
pool=self.pool):
self.log.info("Task is not able to be run")
return
try:
self.task_runner.start()
last_heartbeat_time = time.time()
heartbeat_time_limit = conf.getint('scheduler',
'scheduler_zombie_task_threshold')
while True:
# Monitor the task to see if it's done
return_code = self.task_runner.return_code()
if return_code is not None:
self.log.info("Task exited with return code %s", return_code)
return
# Periodically heartbeat so that the scheduler doesn't think this
# is a zombie
try:
self.heartbeat()
last_heartbeat_time = time.time()
except OperationalError:
Stats.incr('local_task_job_heartbeat_failure', 1, 1)
self.log.exception(
"Exception while trying to heartbeat! Sleeping for %s seconds",
self.heartrate
)
time.sleep(self.heartrate)
# If it's been too long since we've heartbeat, then it's possible that
# the scheduler rescheduled this task, so kill launched processes.
time_since_last_heartbeat = time.time() - last_heartbeat_time
if time_since_last_heartbeat > heartbeat_time_limit:
Stats.incr('local_task_job_prolonged_heartbeat_failure', 1, 1)
self.log.error("Heartbeat time limited exceeded!")
raise AirflowException("Time since last heartbeat({:.2f}s) "
"exceeded limit ({}s)."
.format(time_since_last_heartbeat,
heartbeat_time_limit))
finally:
self.on_kill()
def on_kill(self):
self.task_runner.terminate()
self.task_runner.on_finish()
@provide_session
def heartbeat_callback(self, session=None):
"""Self destruct task if state has been moved away from running externally"""
if self.terminating:
# ensure termination if processes are created later
self.task_runner.terminate()
return
self.task_instance.refresh_from_db()
ti = self.task_instance
fqdn = get_hostname()
same_hostname = fqdn == ti.hostname
same_process = ti.pid == os.getpid()
if ti.state == State.RUNNING:
if not same_hostname:
self.log.warning("The recorded hostname {ti.hostname} "
"does not match this instance's hostname "
"{fqdn}".format(**locals()))
raise AirflowException("Hostname of job runner does not match")
elif not same_process:
current_pid = os.getpid()
self.log.warning("Recorded pid {ti.pid} does not match "
"the current pid "
"{current_pid}".format(**locals()))
raise AirflowException("PID of job runner does not match")
elif (
self.task_runner.return_code() is None and
hasattr(self.task_runner, 'process')
):
self.log.warning(
"State of this instance has been externally set to %s. "
"Taking the poison pill.",
ti.state
)
self.task_runner.terminate()
self.terminating = True
|
the-stack_106_22803 | """The interface between scenes and ffmpeg."""
__all__ = ["SceneFileWriter"]
import datetime
import os
import shutil
import subprocess
from pathlib import Path
from time import sleep
import numpy as np
from PIL import Image
from pydub import AudioSegment
from manim import __version__
from .. import config, logger
from ..constants import FFMPEG_BIN, GIF_FILE_EXTENSION
from ..utils.file_ops import (
add_extension_if_not_present,
add_version_before_extension,
guarantee_existence,
is_gif_format,
is_png_format,
modify_atime,
write_to_movie,
)
from ..utils.sounds import get_full_sound_file_path
class SceneFileWriter(object):
"""
SceneFileWriter is the object that actually writes the animations
played, into video files, using FFMPEG.
This is mostly for Manim's internal use. You will rarely, if ever,
have to use the methods for this class, unless tinkering with the very
fabric of Manim's reality.
Some useful attributes are:
"write_to_movie" (bool=False)
Whether or not to write the animations into a video file.
"png_mode" (str="RGBA")
The PIL image mode to use when outputting PNGs
"movie_file_extension" (str=".mp4")
The file-type extension of the outputted video.
"partial_movie_files"
List of all the partial-movie files.
"""
def __init__(self, renderer, scene_name, **kwargs):
self.renderer = renderer
self.stream_lock = False
self.init_output_directories(scene_name)
self.init_audio()
self.frame_count = 0
self.partial_movie_files = []
def init_output_directories(self, scene_name):
"""Initialise output directories.
Notes
-----
The directories are read from ``config``, for example
``config['media_dir']``. If the target directories don't already
exist, they will be created.
"""
if config["dry_run"]: # in dry-run mode there is no output
return
if config["input_file"]:
module_name = config.get_dir("input_file").stem
else:
module_name = ""
if config["output_file"] and not config["write_all"]:
default_name = config.get_dir("output_file")
else:
default_name = Path(scene_name)
if config["media_dir"]:
image_dir = guarantee_existence(
config.get_dir("images_dir", module_name=module_name)
)
self.image_file_path = os.path.join(
image_dir, add_extension_if_not_present(default_name, ".png")
)
if write_to_movie():
movie_dir = guarantee_existence(
config.get_dir("video_dir", module_name=module_name)
)
self.movie_file_path = os.path.join(
movie_dir,
add_extension_if_not_present(
default_name, config["movie_file_extension"]
),
)
if is_gif_format():
self.gif_file_path = os.path.join(
movie_dir,
add_extension_if_not_present(default_name, GIF_FILE_EXTENSION),
)
self.partial_movie_directory = guarantee_existence(
config.get_dir(
"partial_movie_dir",
scene_name=scene_name,
module_name=module_name,
)
)
def add_partial_movie_file(self, hash_animation):
"""Adds a new partial movie file path to scene.partial_movie_files from an hash. This method will compute the path from the hash.
Parameters
----------
hash_animation : str
Hash of the animation.
"""
if not hasattr(self, "partial_movie_directory") or not write_to_movie():
return
# None has to be added to partial_movie_files to keep the right index with scene.num_plays.
# i.e if an animation is skipped, scene.num_plays is still incremented and we add an element to partial_movie_file be even with num_plays.
if hash_animation is None:
self.partial_movie_files.append(None)
return
new_partial_movie_file = os.path.join(
self.partial_movie_directory,
f"{hash_animation}{config['movie_file_extension']}",
)
self.partial_movie_files.append(new_partial_movie_file)
def get_resolution_directory(self):
"""Get the name of the resolution directory directly containing
the video file.
This method gets the name of the directory that immediately contains the
video file. This name is ``<height_in_pixels_of_video>p<frame_rate>``.
For example, if you are rendering an 854x480 px animation at 15fps,
the name of the directory that immediately contains the video, file
will be ``480p15``.
The file structure should look something like::
MEDIA_DIR
|--Tex
|--texts
|--videos
|--<name_of_file_containing_scene>
|--<height_in_pixels_of_video>p<frame_rate>
|--<scene_name>.mp4
Returns
-------
:class:`str`
The name of the directory.
"""
pixel_height = config["pixel_height"]
frame_rate = config["frame_rate"]
return f"{pixel_height}p{frame_rate}"
# Sound
def init_audio(self):
"""
Preps the writer for adding audio to the movie.
"""
self.includes_sound = False
def create_audio_segment(self):
"""
Creates an empty, silent, Audio Segment.
"""
self.audio_segment = AudioSegment.silent()
def add_audio_segment(self, new_segment, time=None, gain_to_background=None):
"""
This method adds an audio segment from an
AudioSegment type object and suitable parameters.
Parameters
----------
new_segment : AudioSegment
The audio segment to add
time : int, float, optional
the timestamp at which the
sound should be added.
gain_to_background : optional
The gain of the segment from the background.
"""
if not self.includes_sound:
self.includes_sound = True
self.create_audio_segment()
segment = self.audio_segment
curr_end = segment.duration_seconds
if time is None:
time = curr_end
if time < 0:
raise ValueError("Adding sound at timestamp < 0")
new_end = time + new_segment.duration_seconds
diff = new_end - curr_end
if diff > 0:
segment = segment.append(
AudioSegment.silent(int(np.ceil(diff * 1000))),
crossfade=0,
)
self.audio_segment = segment.overlay(
new_segment,
position=int(1000 * time),
gain_during_overlay=gain_to_background,
)
def add_sound(self, sound_file, time=None, gain=None, **kwargs):
"""
This method adds an audio segment from a sound file.
Parameters
----------
sound_file : str
The path to the sound file.
time : float or int, optional
The timestamp at which the audio should be added.
gain : optional
The gain of the given audio segment.
**kwargs
This method uses add_audio_segment, so any keyword arguments
used there can be referenced here.
"""
file_path = get_full_sound_file_path(sound_file)
new_segment = AudioSegment.from_file(file_path)
if gain:
new_segment = new_segment.apply_gain(gain)
self.add_audio_segment(new_segment, time, **kwargs)
# Writers
def begin_animation(self, allow_write=False, file_path=None):
"""
Used internally by manim to stream the animation to FFMPEG for
displaying or writing to a file.
Parameters
----------
allow_write : bool, optional
Whether or not to write to a video file.
"""
if write_to_movie() and allow_write:
self.open_movie_pipe(file_path=file_path)
def end_animation(self, allow_write=False):
"""
Internally used by Manim to stop streaming to
FFMPEG gracefully.
Parameters
----------
allow_write : bool, optional
Whether or not to write to a video file.
"""
if write_to_movie() and allow_write:
self.close_movie_pipe()
def write_frame(self, frame_or_renderer):
"""
Used internally by Manim to write a frame to
the FFMPEG input buffer.
Parameters
----------
frame : np.array
Pixel array of the frame.
"""
if config.renderer == "opengl":
renderer = frame_or_renderer
self.writing_process.stdin.write(
renderer.get_raw_frame_buffer_object_data()
)
else:
frame = frame_or_renderer
if write_to_movie():
self.writing_process.stdin.write(frame.tobytes())
if is_png_format():
target_dir, extension = os.path.splitext(self.image_file_path)
Image.fromarray(frame).save(
f"{target_dir}{self.frame_count}{extension}"
)
self.frame_count += 1
def save_final_image(self, image):
"""
The name is a misnomer. This method saves the image
passed to it as an in the default image directory.
Parameters
----------
image : np.array
The pixel array of the image to save.
"""
if not config["output_file"]:
self.image_file_path = add_version_before_extension(self.image_file_path)
image.save(self.image_file_path)
self.print_file_ready_message(self.image_file_path)
def idle_stream(self):
"""
Doesn't write anything to the FFMPEG frame buffer.
"""
while self.stream_lock:
a = datetime.datetime.now()
# self.update_frame()
self.renderer.update_frame()
n_frames = 1
# frame = self.get_frame()
frame = self.renderer.get_frame()
# self.add_frame(*[frame] * n_frames)
self.renderer.add_frame(*[frame] * n_frames)
b = datetime.datetime.now()
time_diff = (b - a).total_seconds()
frame_duration = 1 / config["frame_rate"]
if time_diff < frame_duration:
sleep(frame_duration - time_diff)
def finish(self, partial_movie_files=None):
"""
Finishes writing to the FFMPEG buffer or writing images
to output directory.
Combines the partial movie files into the
whole scene.
If save_last_frame is True, saves the last
frame in the default image directory.
"""
if write_to_movie():
if hasattr(self, "writing_process"):
self.writing_process.terminate()
self.combine_movie_files(partial_movie_files=partial_movie_files)
if config["flush_cache"]:
self.flush_cache_directory()
else:
self.clean_cache()
elif is_png_format():
target_dir, _ = os.path.splitext(self.image_file_path)
logger.info("\n%i images ready at %s\n", self.frame_count, target_dir)
def open_movie_pipe(self, file_path=None):
"""
Used internally by Manim to initialise
FFMPEG and begin writing to FFMPEG's input
buffer.
"""
if file_path is None:
file_path = self.partial_movie_files[self.renderer.num_plays]
self.partial_movie_file_path = file_path
fps = config["frame_rate"]
if fps == int(fps): # fps is integer
fps = int(fps)
if config.renderer == "opengl":
width, height = self.renderer.get_pixel_shape()
else:
height = config["pixel_height"]
width = config["pixel_width"]
command = [
FFMPEG_BIN,
"-y", # overwrite output file if it exists
"-f",
"rawvideo",
"-s",
"%dx%d" % (width, height), # size of one frame
"-pix_fmt",
"rgba",
"-r",
str(fps), # frames per second
"-i",
"-", # The input comes from a pipe
"-an", # Tells FFMPEG not to expect any audio
"-loglevel",
config["ffmpeg_loglevel"].lower(),
"-metadata",
f"comment=Rendered with Manim Community v{__version__}",
]
if config.renderer == "opengl":
command += ["-vf", "vflip"]
if config["transparent"]:
command += ["-vcodec", "qtrle"]
else:
command += ["-vcodec", "libx264", "-pix_fmt", "yuv420p"]
command += [file_path]
self.writing_process = subprocess.Popen(command, stdin=subprocess.PIPE)
def close_movie_pipe(self):
"""
Used internally by Manim to gracefully stop writing to FFMPEG's input buffer
"""
self.writing_process.stdin.close()
self.writing_process.wait()
logger.info(
f"Animation {self.renderer.num_plays} : Partial movie file written in %(path)s",
{"path": {self.partial_movie_file_path}},
)
def is_already_cached(self, hash_invocation):
"""Will check if a file named with `hash_invocation` exists.
Parameters
----------
hash_invocation : :class:`str`
The hash corresponding to an invocation to either `scene.play` or `scene.wait`.
Returns
-------
:class:`bool`
Whether the file exists.
"""
if not hasattr(self, "partial_movie_directory") or not write_to_movie():
return False
path = os.path.join(
self.partial_movie_directory,
f"{hash_invocation}{config['movie_file_extension']}",
)
return os.path.exists(path)
def combine_movie_files(self, partial_movie_files=None):
"""
Used internally by Manim to combine the separate
partial movie files that make up a Scene into a single
video file for that Scene.
"""
# Manim renders the scene as many smaller movie files which are then
# concatenated to a larger one. The reason for this is that sometimes
# video-editing is made easier when one works with the broken up scene,
# which effectively has cuts at all the places you might want. But for
# viewing the scene as a whole, one of course wants to see it as a
# single piece.
partial_movie_files = [el for el in self.partial_movie_files if el is not None]
# NOTE : Here we should do a check and raise an exception if partial
# movie file is empty. We can't, as a lot of stuff (in particular, in
# tests) use scene initialization, and this error would be raised as
# it's just an empty scene initialized.
# Write a file partial_file_list.txt containing all partial movie
# files. This is used by FFMPEG.
file_list = os.path.join(
self.partial_movie_directory, "partial_movie_file_list.txt"
)
logger.debug(
f"Partial movie files to combine ({len(partial_movie_files)} files): %(p)s",
{"p": partial_movie_files[:5]},
)
with open(file_list, "w") as fp:
fp.write("# This file is used internally by FFMPEG.\n")
for pf_path in partial_movie_files:
if os.name == "nt":
pf_path = pf_path.replace("\\", "/")
fp.write(f"file 'file:{pf_path}'\n")
movie_file_path = self.movie_file_path
commands = [
FFMPEG_BIN,
"-y", # overwrite output file if it exists
"-f",
"concat",
"-safe",
"0",
"-i",
file_list,
"-loglevel",
config["ffmpeg_loglevel"].lower(),
"-metadata",
f"comment=Rendered with Manim Community v{__version__}",
"-nostdin",
]
if write_to_movie() and not is_gif_format():
commands += ["-c", "copy", movie_file_path]
if is_gif_format():
if not config["output_file"]:
self.gif_file_path = str(
add_version_before_extension(self.gif_file_path)
)
commands += [
"-vf",
f"fps={np.clip(config['frame_rate'], 1, 50)},split[s0][s1];[s0]palettegen=stats_mode=diff[p];[s1][p]paletteuse=dither=bayer:bayer_scale=5:diff_mode=rectangle",
self.gif_file_path,
]
if not self.includes_sound:
commands.insert(-1, "-an")
combine_process = subprocess.Popen(commands)
combine_process.wait()
if self.includes_sound:
extension = config["movie_file_extension"]
sound_file_path = movie_file_path.replace(extension, ".wav")
# Makes sure sound file length will match video file
self.add_audio_segment(AudioSegment.silent(0))
self.audio_segment.export(
sound_file_path,
bitrate="312k",
)
temp_file_path = movie_file_path.replace(extension, f"_temp{extension}")
commands = [
FFMPEG_BIN,
"-i",
movie_file_path,
"-i",
sound_file_path,
"-y", # overwrite output file if it exists
"-c:v",
"copy",
"-c:a",
"aac",
"-b:a",
"320k",
# select video stream from first file
"-map",
"0:v:0",
# select audio stream from second file
"-map",
"1:a:0",
"-loglevel",
config["ffmpeg_loglevel"].lower(),
"-metadata",
f"comment=Rendered with Manim Community v{__version__}",
# "-shortest",
temp_file_path,
]
subprocess.call(commands)
shutil.move(temp_file_path, movie_file_path)
os.remove(sound_file_path)
self.print_file_ready_message(
self.gif_file_path if is_gif_format() else movie_file_path
)
if write_to_movie():
for file_path in partial_movie_files:
# We have to modify the accessed time so if we have to clean the cache we remove the one used the longest.
modify_atime(file_path)
def clean_cache(self):
"""Will clean the cache by removing the partial_movie_files used by manim the longest ago."""
cached_partial_movies = [
os.path.join(self.partial_movie_directory, file_name)
for file_name in os.listdir(self.partial_movie_directory)
if file_name != "partial_movie_file_list.txt"
]
if len(cached_partial_movies) > config["max_files_cached"]:
number_files_to_delete = (
len(cached_partial_movies) - config["max_files_cached"]
)
oldest_files_to_delete = sorted(
cached_partial_movies,
key=os.path.getatime,
)[:number_files_to_delete]
# oldest_file_path = min(cached_partial_movies, key=os.path.getatime)
for file_to_delete in oldest_files_to_delete:
os.remove(file_to_delete)
logger.info(
f"The partial movie directory is full (> {config['max_files_cached']} files). Therefore, manim has removed {number_files_to_delete} file(s) used by it the longest ago."
+ "You can change this behaviour by changing max_files_cached in config."
)
def flush_cache_directory(self):
"""Delete all the cached partial movie files"""
cached_partial_movies = [
os.path.join(self.partial_movie_directory, file_name)
for file_name in os.listdir(self.partial_movie_directory)
if file_name != "partial_movie_file_list.txt"
]
for f in cached_partial_movies:
os.remove(f)
logger.info(
f"Cache flushed. {len(cached_partial_movies)} file(s) deleted in %(par_dir)s.",
{"par_dir": self.partial_movie_directory},
)
def print_file_ready_message(self, file_path):
"""Prints the "File Ready" message to STDOUT."""
config["output_file"] = file_path
logger.info("\nFile ready at %(file_path)s\n", {"file_path": file_path})
|
the-stack_106_22804 | # Copyright Clayton Brown 2019. See LICENSE file.
from ..internal import GameObject, Box
from ..utilities import recursiveSplit
from .textLine import TextLine
from typing import List
from dataclasses import dataclass
@dataclass
class TextLineData:
'''
Handles the meta information necessary for the text line data
'''
# TextLine object
textLine: TextLine = None
# Which position in the original textBox text this textLineData starts at
textIndex: int = 0
class TextBox(GameObject):
'''
TextBoxes are a gameobject which display text at a certain position on a canvas.
Parameters
----------
x, y: Position of the text box on the screen
text: The text the TextBox is displaying on the screen
characterStyle: Character style object used to get the style. The character is ignored
justify: "L", "R", or "C". (Left, Right, or Center). Justifies the text display in the texBox. If a value is ommitted or incorrectly given, will left justify
'''
def __init__(self, box: Box, text: str, textColor: tuple, backgroundColor: tuple, justify: str = "L", **kwargs):
GameObject.__init__(self, box, **kwargs)
# Set the initialization paramters
self.text: str = text
self.textColor: tuple = textColor
self.backgroundColor: tuple = backgroundColor
self.justify: str = justify
# A List of text lines for each line in the textBox
self.textLineDataList: List[TextLineData] = []
for i in range(self.h):
textLineBox = Box(0, i, self.w, 1)
textLine = TextLine(textLineBox, "", self.textColor, self.backgroundColor, justify = self.justify)
self.textLineDataList.append(TextLineData(textLine, 0))
self.clearText()
def clearText(self):
for textLineData in self.textLineDataList:
textLineData.textLine.text = ""
textLineData.textLine._render()
textLineData.textIndex = 0
def render(self):
'''
**kwargs
lines: Presplit text data
'''
# Get all the text to split
lines = recursiveSplit(self.text, self.w)
# Keep track of the start index for each line
lineStart = 0
for i in range(len(self.textLineDataList)):
textLineData = self.textLineDataList[i]
if i < len(lines):
textLineData.textIndex = lineStart
textLineData.textLine.text = lines[i]
lineStart += len(lines[i])
else:
textLineData.textLine.text = ""
textLineData.textIndex = (i - len(lines)) * self.w + lineStart
textLineData.textLine._render()
textLineData.textLine.draw(self.bufferCanvas, (self.xOffset, self.yOffset))
|
the-stack_106_22805 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2020 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at https://trac.edgewall.org/log/.
import io
import os
import tempfile
import unittest
import zipfile
from datetime import datetime
from xml.dom import minidom
from trac.admin.api import console_datetime_format
from trac.admin.console import TracAdmin
from trac.admin.test import TracAdminTestCaseBase
from trac.attachment import Attachment, AttachmentModule, \
IAttachmentChangeListener, LegacyAttachmentPolicy
from trac.core import Component, ComponentMeta, implements, TracError
from trac.perm import IPermissionPolicy, PermissionCache
from trac.resource import IResourceManager, Resource, resource_exists
from trac.test import EnvironmentStub, Mock, MockRequest, mkdtemp
from trac.util.datefmt import format_datetime, to_utimestamp, utc
from trac.web.api import HTTPBadRequest, RequestDone
from trac.web.chrome import Chrome
hashes = {
'42': '92cfceb39d57d914ed8b14d0e37643de0797ae56',
'Foo.Mp3': '95797b6eb253337ff2c54e0881e2b747ec394f51',
'SomePage': 'd7e80bae461ca8568e794792f5520b603f540e06',
'Teh bar.jpg': 'ed9102c4aa099e92baf1073f824d21c5e4be5944',
'Teh foo.txt': 'ab97ba98d98fcf72b92e33a66b07077010171f70',
'bar.7z': '6c9600ad4d59ac864e6f0d2030c1fc76b4b406cb',
'bar.jpg': 'ae0faa593abf2b6f8871f6f32fe5b28d1c6572be',
'foo.$$$': 'eefc6aa745dbe129e8067a4a57637883edd83a8a',
'foo.2.txt': 'a8fcfcc2ef4e400ee09ae53c1aabd7f5a5fda0c7',
'foo.txt': '9206ac42b532ef8e983470c251f4e1a365fd636c',
u'bar.aäc': '70d0e3b813fdc756602d82748719a3ceb85cbf29',
u'ÜberSicht': 'a16c6837f6d3d2cc3addd68976db1c55deb694c8',
}
class TicketOnlyViewsTicket(Component):
implements(IPermissionPolicy)
def check_permission(self, action, username, resource, perm):
if action.startswith('TICKET_'):
return resource.realm == 'ticket'
else:
return None
class ResourceManagerStub(Component):
"""Fake implementation of IResourceManager."""
implements(IResourceManager)
def get_resource_realms(self):
yield 'parent_realm'
def get_resource_url(self, resource, href, **kwargs):
return href(resource.realm, resource.id, version=resource.version)
def get_resource_description(self, resource, format='default',
context=None, **kwargs):
pass
def resource_exists(self, resource):
return resource.id == 'parent_id'
class AttachmentTestCase(unittest.TestCase):
attachment_change_listeners = []
@classmethod
def setUpClass(cls):
class AttachmentChangeListener(Component):
implements(IAttachmentChangeListener)
def __init__(self):
self.added_call_count = 0
self.deleted_call_count = 0
self.moved_call_count = 0
self.reparented_call_count = 0
self.moved_old_parent_realm = None
self.moved_old_parent_id = None
self.moved_old_filename = None
self.reparented_old_parent_realm = None
self.reparented_old_parent_id = None
def attachment_added(self, attachment):
self.added_call_count += 1
def attachment_deleted(self, attachment):
self.deleted_call_count += 1
def attachment_moved(self, attachment, old_parent_realm,
old_parent_id, old_filename):
self.moved_call_count += 1
self.moved_old_parent_realm = old_parent_realm
self.moved_old_parent_id = old_parent_id
self.moved_old_filename = old_filename
class LegacyChangeListener(Component):
implements(IAttachmentChangeListener)
def __init__(self):
self.added_called = 0
self.deleted_called = 0
def attachment_added(self, attachment):
self.added_called += 1
def attachment_deleted(self, attachment):
self.deleted_called += 1
cls.attachment_change_listeners = [AttachmentChangeListener,
LegacyChangeListener]
@classmethod
def tearDownClass(cls):
for listener in cls.attachment_change_listeners:
ComponentMeta.deregister(listener)
def setUp(self):
self.env = EnvironmentStub(enable=('trac.*', TicketOnlyViewsTicket))
self.env.path = mkdtemp()
self.env.config.set('trac', 'permission_policies',
'TicketOnlyViewsTicket, LegacyAttachmentPolicy')
self.env.config.set('attachment', 'max_size', 512)
self.perm = PermissionCache(self.env)
self.datetime = datetime(2001, 1, 1, 1, 1, 1, 0, utc)
with self.env.db_transaction as db:
db("INSERT INTO wiki (name,version) VALUES ('WikiStart',1)")
db("INSERT INTO wiki (name,version) VALUES ('SomePage',1)")
db("INSERT INTO ticket (id) VALUES (42)")
db("INSERT INTO ticket (id) VALUES (43)")
db("INSERT INTO attachment VALUES (%s,%s,%s,%s,%s,%s,%s)",
('ticket', '43', 'foo.txt', 8, to_utimestamp(self.datetime),
'A comment', 'joe'))
def tearDown(self):
self.env.reset_db_and_disk()
def test_new_attachment(self):
attachment = Attachment(self.env, 'ticket', 42)
self.assertIsNone(attachment.filename)
self.assertIsNone(attachment.description)
self.assertIsNone(attachment.size)
self.assertIsNone(attachment.date)
self.assertIsNone(attachment.author)
self.assertEqual('<Attachment None>', repr(attachment))
def test_existing_attachment(self):
attachment = Attachment(self.env, 'ticket', 43, 'foo.txt')
self.assertEqual('foo.txt', attachment.filename)
self.assertEqual('A comment', attachment.description)
self.assertEqual(8, attachment.size)
self.assertEqual(self.datetime, attachment.date)
self.assertEqual('joe', attachment.author)
self.assertEqual("<Attachment u'foo.txt'>", repr(attachment))
def test_existing_attachment_from_resource(self):
resource = Resource('ticket', 43).child('attachment', 'foo.txt')
attachment = Attachment(self.env, resource)
self.assertEqual('foo.txt', attachment.filename)
self.assertEqual('A comment', attachment.description)
self.assertEqual(8, attachment.size)
self.assertEqual(self.datetime, attachment.date)
self.assertEqual('joe', attachment.author)
self.assertEqual("<Attachment u'foo.txt'>", repr(attachment))
def test_get_path(self):
attachment = Attachment(self.env, 'ticket', 42)
attachment.filename = 'foo.txt'
self.assertEqual(os.path.join(self.env.attachments_dir, 'ticket',
hashes['42'][0:3], hashes['42'],
hashes['foo.txt'] + '.txt'),
attachment.path)
attachment = Attachment(self.env, 'wiki', 'SomePage')
attachment.filename = 'bar.jpg'
self.assertEqual(os.path.join(self.env.attachments_dir, 'wiki',
hashes['SomePage'][0:3],
hashes['SomePage'],
hashes['bar.jpg'] + '.jpg'),
attachment.path)
def test_path_extension(self):
attachment = Attachment(self.env, 'ticket', 42)
attachment.filename = 'Foo.Mp3'
self.assertEqual(os.path.join(self.env.attachments_dir, 'ticket',
hashes['42'][0:3], hashes['42'],
hashes['Foo.Mp3'] + '.Mp3'),
attachment.path)
attachment = Attachment(self.env, 'wiki', 'SomePage')
attachment.filename = 'bar.7z'
self.assertEqual(os.path.join(self.env.attachments_dir, 'wiki',
hashes['SomePage'][0:3],
hashes['SomePage'],
hashes['bar.7z'] + '.7z'),
attachment.path)
attachment = Attachment(self.env, 'ticket', 42)
attachment.filename = 'foo.$$$'
self.assertEqual(os.path.join(self.env.attachments_dir, 'ticket',
hashes['42'][0:3], hashes['42'],
hashes['foo.$$$']),
attachment.path)
attachment = Attachment(self.env, 'wiki', 'SomePage')
attachment.filename = u'bar.aäc'
self.assertEqual(os.path.join(self.env.attachments_dir, 'wiki',
hashes['SomePage'][0:3],
hashes['SomePage'],
hashes[u'bar.aäc']),
attachment.path)
def test_get_path_encoded(self):
attachment = Attachment(self.env, 'ticket', 42)
attachment.filename = 'Teh foo.txt'
self.assertEqual(os.path.join(self.env.attachments_dir, 'ticket',
hashes['42'][0:3], hashes['42'],
hashes['Teh foo.txt'] + '.txt'),
attachment.path)
attachment = Attachment(self.env, 'wiki', u'ÜberSicht')
attachment.filename = 'Teh bar.jpg'
self.assertEqual(os.path.join(self.env.attachments_dir, 'wiki',
hashes[u'ÜberSicht'][0:3],
hashes[u'ÜberSicht'],
hashes['Teh bar.jpg'] + '.jpg'),
attachment.path)
def test_select_empty(self):
with self.assertRaises(StopIteration):
next(Attachment.select(self.env, 'ticket', 42))
with self.assertRaises(StopIteration):
next(Attachment.select(self.env, 'wiki', 'SomePage'))
def test_insert(self):
attachment = Attachment(self.env, 'ticket', 42)
attachment.insert('foo.txt', io.BytesIO(), 0, 1)
attachment = Attachment(self.env, 'ticket', 42)
attachment.insert('bar.jpg', io.BytesIO(), 0, 2)
attachments = Attachment.select(self.env, 'ticket', 42)
self.assertEqual('foo.txt', next(attachments).filename)
self.assertEqual('bar.jpg', next(attachments).filename)
with self.assertRaises(StopIteration):
next(attachments)
def test_insert_unique(self):
attachment = Attachment(self.env, 'ticket', 42)
attachment.insert('foo.txt', io.BytesIO(), 0)
self.assertEqual('foo.txt', attachment.filename)
attachment = Attachment(self.env, 'ticket', 42)
attachment.insert('foo.txt', io.BytesIO(), 0)
self.assertEqual('foo.2.txt', attachment.filename)
self.assertEqual(os.path.join(self.env.attachments_dir, 'ticket',
hashes['42'][0:3], hashes['42'],
hashes['foo.2.txt'] + '.txt'),
attachment.path)
self.assertTrue(os.path.exists(attachment.path))
def test_insert_outside_attachments_dir(self):
attachment = Attachment(self.env, '../../../../../sth/private', 42)
with self.assertRaises(TracError):
attachment.insert('foo.txt', io.BytesIO(), 0)
def test_delete(self):
attachment1 = Attachment(self.env, 'wiki', 'SomePage')
attachment1.insert('foo.txt', io.BytesIO(), 0)
attachment2 = Attachment(self.env, 'wiki', 'SomePage')
attachment2.insert('bar.jpg', io.BytesIO(), 0)
attachments = Attachment.select(self.env, 'wiki', 'SomePage')
self.assertEqual(2, len(list(attachments)))
attachment1.delete()
attachment2.delete()
self.assertFalse(os.path.exists(attachment1.path))
self.assertFalse(os.path.exists(attachment2.path))
attachments = Attachment.select(self.env, 'wiki', 'SomePage')
self.assertEqual(0, len(list(attachments)))
def test_delete_file_gone(self):
"""
Verify that deleting an attachment works even if the referenced file
doesn't exist for some reason.
"""
attachment = Attachment(self.env, 'wiki', 'SomePage')
attachment.insert('foo.txt', io.BytesIO(), 0)
os.unlink(attachment.path)
attachment.delete()
def test_rename(self):
"""Rename an attachment."""
attachment = Attachment(self.env, 'wiki', 'SomePage')
attachment.insert('foo.txt', io.BytesIO(), 0)
original_path = attachment.path
self.assertTrue(os.path.exists(original_path))
attachments = Attachment.select(self.env, 'wiki', 'SomePage')
self.assertEqual(1, len(list(attachments)))
attachment.move(new_filename='bar.txt')
attachments = Attachment.select(self.env, 'wiki', 'SomePage')
self.assertEqual(1, len(list(attachments)))
self.assertEqual('wiki', attachment.parent_realm)
self.assertEqual('SomePage', attachment.parent_id)
self.assertEqual('bar.txt', attachment.filename)
self.assertFalse(os.path.exists(original_path))
self.assertTrue(os.path.exists(attachment.path))
def test_move_nonexistent_attachment_raises(self):
"""TracError is raised when moving a non-existent attachment."""
attachment = Attachment(self.env, 'wiki', 'SomePage')
with self.assertRaises(TracError) as cm:
attachment.move(attachment.parent_realm, attachment.parent_id,
attachment.filename)
self.assertEqual("Cannot rename non-existent attachment",
unicode(cm.exception))
def test_move_attachment_not_modified_raises(self):
"""TracError is raised when attachment not modified on move."""
attachment = Attachment(self.env, 'wiki', 'SomePage')
attachment.insert('foo.txt', io.BytesIO(), 0)
with self.assertRaises(TracError) as cm:
attachment.move(attachment.parent_realm, attachment.parent_id,
attachment.filename)
self.assertEqual("Attachment not modified", unicode(cm.exception))
def test_move_attachment_to_nonexistent_resource_raises(self):
"""TracError is raised moving an attachment to nonexistent resource
"""
attachment = Attachment(self.env, 'wiki', 'SomePage')
attachment.insert('foo.txt', io.BytesIO(), 0)
with self.assertRaises(TracError) as cm:
attachment.move('wiki', 'NonExistentPage')
self.assertEqual("NonExistentPage doesn't exist, can't move attachment",
unicode(cm.exception))
def test_move_attachment_to_existing_path_raises(self):
"""TracError is raised if target already exists"""
attachment1 = Attachment(self.env, 'wiki', 'SomePage')
attachment1.insert('foo.txt', io.BytesIO(), 0)
attachment2 = Attachment(self.env, 'wiki', 'SomePage')
attachment2.insert('bar.txt', io.BytesIO(), 0)
with self.assertRaises(TracError) as cm:
attachment1.move(new_filename=attachment2.filename)
self.assertEqual('Cannot move attachment "foo.txt" to "wiki:SomePage: '
'bar.txt" as it already exists', unicode(cm.exception))
def test_attachment_change_listeners_called(self):
"""The move method calls attachment change listeners"""
attachment = Attachment(self.env, 'wiki', 'SomePage')
attachment.insert('foo.txt', io.BytesIO(), 0)
attachment.move(new_realm='ticket', new_id=42)
attachment.delete()
modern_listener = self.attachment_change_listeners[0](self.env)
self.assertEqual(1, modern_listener.added_call_count)
self.assertEqual(1, modern_listener.deleted_call_count)
self.assertEqual(1, modern_listener.moved_call_count)
self.assertEqual('wiki', modern_listener.moved_old_parent_realm)
self.assertEqual('SomePage', modern_listener.moved_old_parent_id)
self.assertEqual('foo.txt', modern_listener.moved_old_filename)
legacy_listener = self.attachment_change_listeners[0](self.env)
self.assertEqual(1, legacy_listener.added_call_count)
self.assertEqual(1, legacy_listener.deleted_call_count)
def test_attachment_reparented_not_called_on_rename(self):
attachment = Attachment(self.env, 'wiki', 'SomePage')
attachment.insert('foo.txt', io.BytesIO(), 0)
attachment.move(new_filename='bar.txt')
modern_listener = self.attachment_change_listeners[0](self.env)
self.assertEqual(1, modern_listener.moved_call_count)
self.assertEqual(0, modern_listener.reparented_call_count)
def test_reparent(self):
"""Change the parent realm and parent id of an attachment
"""
attachment1 = Attachment(self.env, 'wiki', 'SomePage')
attachment1.insert('foo.txt', io.BytesIO(), 0)
path1 = attachment1.path
attachment2 = Attachment(self.env, 'wiki', 'SomePage')
attachment2.insert('bar.jpg', io.BytesIO(), 0)
attachments = Attachment.select(self.env, 'wiki', 'SomePage')
self.assertEqual(2, len(list(attachments)))
attachments = Attachment.select(self.env, 'ticket', 42)
self.assertEqual(0, len(list(attachments)))
self.assertTrue(os.path.exists(path1) and os.path.exists(attachment2.path))
attachment1.move('ticket', 42)
self.assertEqual('ticket', attachment1.parent_realm)
self.assertEqual('ticket', attachment1.resource.parent.realm)
self.assertEqual('42', attachment1.parent_id)
self.assertEqual('42', attachment1.resource.parent.id)
attachments = Attachment.select(self.env, 'wiki', 'SomePage')
self.assertEqual(1, len(list(attachments)))
attachments = Attachment.select(self.env, 'ticket', 42)
self.assertEqual(1, len(list(attachments)))
self.assertFalse(os.path.exists(path1) and os.path.exists(attachment1.path))
self.assertTrue(os.path.exists(attachment2.path))
def test_reparent_all_to_unknown_realm(self):
"""TracError is raised when reparenting an attachment unknown realm
"""
attachment = Attachment(self.env, 'wiki', 'SomePage')
attachment.insert('foo.txt', io.BytesIO(), 0)
attachment = Attachment(self.env, 'wiki', 'SomePage')
attachment.insert('bar.txt', io.BytesIO(), 0)
with self.assertRaises(TracError) as cm:
Attachment.reparent_all(self.env, 'wiki', 'SomePage',
'unknown_realm', 'UnknownId')
self.assertEqual("unknown_realm doesn't exist, can't move attachment",
unicode(cm.exception))
def test_reparent_all(self):
"""Change the parent realm and parent id of multiple attachments.
"""
attachment = Attachment(self.env, 'wiki', 'SomePage')
attachment.insert('foo.txt', io.BytesIO(), 0)
attachment = Attachment(self.env, 'wiki', 'SomePage')
attachment.insert('bar.txt', io.BytesIO(), 0)
attachments = Attachment.select(self.env, 'wiki', 'SomePage')
self.assertEqual(2, len(list(attachments)))
attachments = Attachment.select(self.env, 'wiki', 'WikiStart')
self.assertEqual(0, len(list(attachments)))
Attachment.reparent_all(self.env, 'wiki', 'SomePage',
'wiki', 'WikiStart')
attachments = Attachment.select(self.env, 'wiki', 'SomePage')
self.assertEqual(0, len(list(attachments)))
attachments = Attachment.select(self.env, 'wiki', 'WikiStart')
self.assertEqual(2, len(list(attachments)))
def test_legacy_permission_on_parent(self):
"""Ensure that legacy action tests are done on parent. As
`ATTACHMENT_VIEW` maps to `TICKET_VIEW`, the `TICKET_VIEW` is tested
against the ticket's resource."""
attachment = Attachment(self.env, 'ticket', 42)
self.assertIn('ATTACHMENT_VIEW', self.perm(attachment.resource))
def test_resource_exists(self):
att = Attachment(self.env, 'wiki', 'WikiStart')
att.insert('file.txt', io.BytesIO(), 1)
self.assertTrue(resource_exists(self.env, att.resource))
class AttachmentModuleTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(enable=('trac.*', ResourceManagerStub,))
self.env.path = mkdtemp()
def tearDown(self):
self.env.reset_db_and_disk()
def test_invalid_post_request_raises_exception(self):
path_info = '/attachment/parent_realm/parent_id/attachment_id'
attachment = Attachment(self.env, 'parent_realm', 'parent_id')
attachment.insert('attachment_id', io.BytesIO(), 0, 1)
req = MockRequest(self.env, method='POST', action=None,
path_info=path_info)
module = AttachmentModule(self.env)
self.assertTrue(module.match_request(req))
with self.assertRaises(HTTPBadRequest):
module.process_request(req)
def test_post_request_without_attachment_raises_exception(self):
"""TracError is raised for POST request with no file."""
path_info = '/attachment/parent_realm/parent_id'
req = MockRequest(self.env, path_info=path_info, method='POST',
args={'action': 'new'})
module = AttachmentModule(self.env)
self.assertTrue(module.match_request(req))
with self.assertRaises(TracError) as cm:
module.process_request(req)
self.assertEqual("No file uploaded", unicode(cm.exception))
def test_post_request_with_empty_attachment_raises_exception(self):
"""TracError is raised for POST request with empty file."""
module = AttachmentModule(self.env)
path_info = '/attachment/parent_realm/parent_id'
with tempfile.NamedTemporaryFile('rb', dir=self.env.path) as file_:
upload = Mock(filename=file_.name, file=file_)
req = MockRequest(self.env, path_info=path_info, method='POST',
args={'action': 'new', 'attachment': upload})
self.assertTrue(module.match_request(req))
with self.assertRaises(TracError) as cm:
module.process_request(req)
self.assertEqual("Can't upload empty file", unicode(cm.exception))
def test_post_request_exceeding_max_size_raises_exception(self):
"""TracError is raised for file exceeding max size"""
self.env.config.set('attachment', 'max_size', 10)
module = AttachmentModule(self.env)
path_info = '/attachment/parent_realm/parent_id'
with tempfile.NamedTemporaryFile('w+b', dir=self.env.path) as file_:
file_.write(b' ' * (module.max_size + 1))
file_.flush()
upload = Mock(filename=file_.name, file=file_)
req = MockRequest(self.env, path_info=path_info, method='POST',
args={'action': 'new', 'attachment': upload})
self.assertTrue(module.match_request(req))
with self.assertRaises(TracError) as cm:
module.process_request(req)
self.assertEqual("Maximum attachment size: 10 bytes",
unicode(cm.exception))
def test_attachment_parent_realm_raises_exception(self):
"""TracError is raised when 'attachment' is the resource parent
realm.
"""
path_info = '/attachment/attachment/parent_id/attachment_id'
req = MockRequest(self.env, path_info=path_info)
module = AttachmentModule(self.env)
self.assertTrue(module.match_request(req))
with self.assertRaises(TracError):
module.process_request(req)
def test_resource_doesnt_exist(self):
"""Non-existent resource returns False from resource_exists."""
parent = Resource('parent_realm', 'parent_id')
self.assertTrue(resource_exists(self.env, parent))
r = parent.child('attachment', 'file.txt')
self.assertFalse(resource_exists(self.env, r))
def test_download_zip(self):
att = Attachment(self.env, 'parent_realm', 'parent_id')
att.description = 'Blah blah'
att.insert('foo.txt', io.BytesIO('foo'), 3,
datetime(2016, 9, 23, 12, 34, 56, tzinfo=utc))
att = Attachment(self.env, 'parent_realm', 'parent_id')
att.insert('bar.jpg', io.BytesIO('bar'), 3,
datetime(2016, 12, 14, 23, 56, 30, tzinfo=utc))
module = AttachmentModule(self.env)
req = MockRequest(self.env, args={'format': 'zip'},
path_info='/attachment/parent_realm/parent_id/')
self.assertTrue(module.match_request(req))
self.assertRaises(RequestDone, module.process_request, req)
z = zipfile.ZipFile(req.response_sent, 'r')
self.assertEqual(['bar.jpg', 'foo.txt'],
sorted(i.filename for i in z.infolist()))
zinfo = z.getinfo('foo.txt')
self.assertEqual('foo', z.read('foo.txt'))
self.assertEqual(3, zinfo.file_size)
self.assertEqual((2016, 9, 23, 12, 34, 56), zinfo.date_time)
self.assertEqual('Blah blah', zinfo.comment)
zinfo = z.getinfo('bar.jpg')
self.assertEqual('bar', z.read('bar.jpg'))
self.assertEqual(3, zinfo.file_size)
self.assertEqual((2016, 12, 14, 23, 56, 30), zinfo.date_time)
self.assertEqual('', zinfo.comment)
def test_preview_valid_xhtml(self):
chrome = Chrome(self.env)
module = AttachmentModule(self.env)
def render(attachment):
path_info = '/attachment/%s/%s/%s' % (attachment.parent_realm,
attachment.parent_id,
attachment.filename)
req = MockRequest(self.env, path_info=path_info)
self.assertTrue(module.match_request(req))
template, data = module.process_request(req)
return chrome.render_template(req, template, data,
{'fragment': True})
# empty file
attachment = Attachment(self.env, 'parent_realm', 'parent_id')
attachment.insert('empty', io.BytesIO(), 0, 1)
result = render(attachment)
self.assertIn('<strong>(The file is empty)</strong>', result)
xml = minidom.parseString(result)
# text file
attachment = Attachment(self.env, 'parent_realm', 'parent_id')
attachment.insert('foo.txt', io.BytesIO(b'text'), 4, 1)
result = render(attachment)
self.assertIn('<tr><th id="L1"><a href="#L1">1</a></th>'
'<td>text</td></tr>', result)
xml = minidom.parseString(result)
# preview unavailable
attachment = Attachment(self.env, 'parent_realm', 'parent_id')
attachment.insert('foo.dat', io.BytesIO(b'\x00\x00\x01\xb3'), 4, 1)
result = render(attachment)
self.assertIn('<strong>HTML preview not available</strong>', result)
xml = minidom.parseString(result)
class LegacyAttachmentPolicyTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(enable=('trac.attachment.*', 'trac.perm.*',
ResourceManagerStub),
path=mkdtemp())
self.env.config.set('trac', 'permission_policies',
'DefaultPermissionPolicy,LegacyAttachmentPolicy')
self.policy = LegacyAttachmentPolicy(self.env)
def tearDown(self):
self.env.reset_db_and_disk()
def _insert_attachment(self, author):
parent_resource = Resource('parent_realm', 'parent_id')
att = Attachment(self.env, 'parent_realm', 'parent_id')
att.author = author
att.insert('file.txt', io.BytesIO(), 1)
return Resource('attachment', 'file.txt', parent=parent_resource)
def test_authenticated_can_delete_own_attachments(self):
"""Authenticated user can delete their own attachments."""
resource = self._insert_attachment(author='user1')
perm_cache = PermissionCache(self.env, 'user1', resource)
action = 'ATTACHMENT_DELETE'
self.assertIn(action, perm_cache)
self.assertTrue(self.policy.check_permission(
action, perm_cache.username, resource, perm_cache))
def test_authenticated_cannot_delete_other_attachments(self):
"""Authenticated user cannot delete other attachments."""
resource = self._insert_attachment(author='user1')
perm_cache = PermissionCache(self.env, 'user2', resource)
action = 'ATTACHMENT_DELETE'
self.assertNotIn(action, perm_cache)
self.assertIsNone(self.policy.check_permission(
action, perm_cache.username, resource, perm_cache))
def test_anonymous_cannot_delete_attachments(self):
"""Anonymous user cannot delete attachments."""
resource = self._insert_attachment(author='anonymous')
perm_cache = PermissionCache(self.env, 'anonymous', resource)
action = 'ATTACHMENT_DELETE'
self.assertNotIn(action, perm_cache)
self.assertIsNone(self.policy.check_permission(
action, perm_cache.username, resource, perm_cache))
class TracAdminTestCase(TracAdminTestCaseBase):
"""
Tests the output of trac-admin and is meant to be used with
.../trac/tests.py.
"""
expected_results_filename = 'attachment-console-tests.txt'
def setUp(self):
self.env = EnvironmentStub(default_data=True, enable=('trac.*',),
disable=('trac.tests.*',))
self.env.path = mkdtemp()
self.admin = TracAdmin()
self.admin.env_set('', self.env)
self.datetime = datetime(2001, 1, 1, 1, 1, 1, 0, utc)
with self.env.db_transaction as db:
db("INSERT INTO wiki (name,version) VALUES ('WikiStart',1)")
db("INSERT INTO wiki (name,version) VALUES ('SomePage',1)")
db("INSERT INTO ticket (id) VALUES (42)")
db("INSERT INTO ticket (id) VALUES (43)")
db("INSERT INTO attachment VALUES (%s,%s,%s,%s,%s,%s,%s)",
('ticket', '43', 'foo.txt', 8, to_utimestamp(self.datetime),
'A comment', 'joe'))
def tearDown(self):
self.env.reset_db_and_disk()
def test_attachment_list(self):
"""Attachment list command."""
attachment = Attachment(self.env, 'wiki', 'SomePage')
attachment.insert('foo.txt', io.BytesIO(), 0)
rv, output = self.execute('attachment list wiki:SomePage')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output, {
'date': format_datetime(attachment.date, console_datetime_format)
})
def test_attachment_list_empty(self):
"""Attachment list command with no output."""
rv, output = self.execute('attachment list wiki:WikiStart')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output)
def test_attachment_add_nonexistent_resource(self):
"""Error raised when adding an attachment to a non-existent resource.
"""
rv, output = self.execute('attachment add wiki:NonExistentPage "%s"'
% __file__)
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_attachment_rename(self):
"""Rename attachment."""
attachment = Attachment(self.env, 'wiki', 'SomePage')
attachment.insert('foo.txt', io.BytesIO(), 0)
rv, output = self.execute('attachment move wiki:SomePage foo.txt '
'wiki:SomePage bar.txt')
self.assertEqual(0, rv, output)
self.assertEqual('', output)
rv, output = self.execute('attachment list wiki:SomePage')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output, {
'date': format_datetime(attachment.date, console_datetime_format)
})
def test_attachment_reparent(self):
"""Reparent attachment to another resource."""
attachment = Attachment(self.env, 'wiki', 'SomePage')
attachment.insert('foo.txt', io.BytesIO(), 0)
rv, output = self.execute('attachment move wiki:SomePage foo.txt '
'wiki:WikiStart foo.txt')
self.assertEqual(0, rv, output)
self.assertEqual('', output)
rv, output = self.execute('attachment list wiki:SomePage')
self.assertEqual(0, rv, output)
rv, output = self.execute('attachment list wiki:WikiStart')
self.assertEqual(0, rv, output)
self.assertExpectedResult(output, {
'date': format_datetime(attachment.date, console_datetime_format)
})
def test_attachment_move_nonexistent_resource(self):
"""Error raised when reparenting attachment to another resource."""
attachment = Attachment(self.env, 'wiki', 'SomePage')
attachment.insert('foo.txt', io.BytesIO(), 0)
rv, output = self.execute('attachment move wiki:SomePage foo.txt '
'wiki:NonExistentPage foo.txt')
self.assertEqual(2, rv, output)
self.assertExpectedResult(output)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(AttachmentTestCase))
suite.addTest(unittest.makeSuite(AttachmentModuleTestCase))
suite.addTest(unittest.makeSuite(LegacyAttachmentPolicyTestCase))
suite.addTest(unittest.makeSuite(TracAdminTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
the-stack_106_22807 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sentiment', '0004_auto_20160130_2357'),
]
operations = [
migrations.AlterField(
model_name='tweetgroup',
name='sentiment',
field=models.DecimalField(default=0.0, max_digits=5, decimal_places=5),
),
]
|
the-stack_106_22808 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from nova import context
from nova import objects
from nova.tests.unit import fake_flavor
INSTANCE_NUMA_TOPOLOGY = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1, 2]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3, 4]), memory=512)])
INSTANCE_NUMA_TOPOLOGY.obj_reset_changes(recursive=True)
IMAGE_META = objects.ImageMeta.from_dict(
{'status': 'active',
'container_format': 'bare',
'min_ram': 0,
'updated_at': '2014-12-12T11:16:36.000000',
'min_disk': 0,
'owner': '2d8b9502858c406ebee60f0849486222',
'protected': 'yes',
'properties': {
'os_type': 'Linux',
'hw_video_model': 'vga',
'hw_video_ram': '512',
'hw_qemu_guest_agent': 'yes',
'hw_scsi_model': 'virtio-scsi',
},
'size': 213581824,
'name': 'f16-x86_64-openstack-sda',
'checksum': '755122332caeb9f661d5c978adb8b45f',
'created_at': '2014-12-10T16:23:14.000000',
'disk_format': 'qcow2',
'id': 'c8b1790e-a07d-4971-b137-44f2432936cd',
}
)
IMAGE_META.obj_reset_changes(recursive=True)
PCI_REQUESTS = objects.InstancePCIRequests(
requests=[objects.InstancePCIRequest(count=1),
objects.InstancePCIRequest(count=2)])
PCI_REQUESTS.obj_reset_changes(recursive=True)
def fake_db_spec():
req_obj = fake_spec_obj()
db_request_spec = {
'id': 1,
'instance_uuid': req_obj.instance_uuid,
'spec': jsonutils.dumps(req_obj.obj_to_primitive()),
}
return db_request_spec
def fake_spec_obj(remove_id=False):
ctxt = context.RequestContext('fake', 'fake')
req_obj = objects.RequestSpec(ctxt)
if not remove_id:
req_obj.id = 42
req_obj.instance_uuid = uuidutils.generate_uuid()
req_obj.image = IMAGE_META
req_obj.numa_topology = INSTANCE_NUMA_TOPOLOGY
req_obj.pci_requests = PCI_REQUESTS
req_obj.flavor = fake_flavor.fake_flavor_obj(ctxt)
req_obj.retry = objects.SchedulerRetries()
req_obj.limits = objects.SchedulerLimits()
req_obj.instance_group = objects.InstanceGroup()
req_obj.project_id = 'fake'
req_obj.num_instances = 1
req_obj.availability_zone = None
req_obj.ignore_hosts = ['host2', 'host4']
req_obj.force_hosts = ['host1', 'host3']
req_obj.force_nodes = ['node1', 'node2']
req_obj.scheduler_hints = {'hint': ['over-there']}
req_obj.requested_destination = None
# This should never be a changed field
req_obj.obj_reset_changes(['id'])
return req_obj
|
the-stack_106_22810 | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test defid shutdown."""
from test_framework.test_framework import DefiTestFramework
from test_framework.util import assert_equal, get_rpc_proxy, wait_until
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(DefiTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
the-stack_106_22811 | load("@fbcode_macros//build_defs/lib:visibility.bzl", "get_visibility")
load("@fbsource//tools/build_defs:fb_native_wrapper.bzl", "fb_native")
def _java_protoc_compile_srcs(
name,
srcs,
generated_src_zip_rule_name):
cmd = (
"include=; for s in `dirname $SRCS|sort|uniq`; do include=\"$include -I$s\"; done; " +
"java -cp $(classpath //third-party-java/com.github.os72:protoc-jar) " +
"com.github.os72.protocjar.Protoc -v3.5.0 --include_std_types --java_out=$OUT $include $SRCS"
)
fb_native.genrule(
name = generated_src_zip_rule_name,
srcs = srcs,
out = name + ".src.zip",
cmd = cmd,
)
def _java_protoc_create_library(
name,
generated_src_zip_rule_name,
visibility):
fb_native.java_library(
name = name,
srcs = [":" + generated_src_zip_rule_name],
exported_deps = ["//third-party-java/com.google.protobuf:protobuf-java"],
visibility = get_visibility(visibility, name),
)
def java_protoc_library(
name,
srcs,
visibility = None):
"""
Creates a java jar from protoc sources
Args:
name: The name of the main java_library rule to be created
srcs: A list of protoc sources
visibility: Visibility for the main java_ibrary rule
"""
generated_src_zip_rule_name = name + "_src_zip"
_java_protoc_compile_srcs(name, srcs, generated_src_zip_rule_name)
_java_protoc_create_library(name, generated_src_zip_rule_name, visibility)
|
the-stack_106_22813 | import datetime
import uuid
from unittest.mock import Mock, patch
import pytest
from flask import get_flashed_messages, url_for
from tests.factories import *
from tests.mock_azure import mock_azure
from tests.utils import captured_templates
from werkzeug.datastructures import ImmutableMultiDict
from atst.database import db
from atst.domain.application_roles import ApplicationRoles
from atst.domain.applications import Applications
from atst.domain.common import Paginator
from atst.domain.csp.cloud.azure_cloud_provider import AzureCloudProvider
from atst.domain.csp.cloud.exceptions import GeneralCSPException
from atst.domain.csp.cloud.models import SubscriptionCreationCSPResult
from atst.domain.environment_roles import EnvironmentRoles
from atst.domain.invitations import ApplicationInvitations
from atst.domain.permission_sets import PermissionSets
from atst.forms.application import EditEnvironmentForm
from atst.forms.application_member import UpdateMemberForm
from atst.forms.data import ENV_ROLE_NO_ACCESS as NO_ACCESS
from atst.models.application_role import Status as ApplicationRoleStatus
from atst.models.environment_role import CSPRole, EnvironmentRole
from atst.models.permissions import Permissions
from atst.routes.applications.settings import (
filter_env_roles_data,
filter_env_roles_form_data,
get_environments_obj_for_app,
handle_create_member,
handle_update_member,
)
def test_updating_application_environments_success(client, user_session):
portfolio = PortfolioFactory.create()
application = ApplicationFactory.create(portfolio=portfolio)
environment = EnvironmentFactory.create(application=application)
user_session(portfolio.owner)
form_data = {"name": "new name a"}
response = client.post(
url_for("applications.update_environment", environment_id=environment.id),
data=form_data,
)
assert response.status_code == 302
assert response.location == url_for(
"applications.settings",
application_id=application.id,
_external=True,
fragment="application-environments",
_anchor="application-environments",
)
assert environment.name == "new name a"
def test_update_environment_failure(client, user_session):
portfolio = PortfolioFactory.create()
application = ApplicationFactory.create(portfolio=portfolio)
environment = EnvironmentFactory.create(
application=application, name="original name"
)
user_session(portfolio.owner)
form_data = {"name": ""}
response = client.post(
url_for("applications.update_environment", environment_id=environment.id),
data=form_data,
)
assert response.status_code == 400
assert environment.name == "original name"
def test_enforces_unique_env_name(client, user_session, session):
application = ApplicationFactory.create()
user = application.portfolio.owner
name = "New Environment"
environment = EnvironmentFactory.create(application=application, name=name)
form_data = {"name": name}
user_session(user)
session.begin_nested()
response = client.post(
url_for("applications.new_environment", application_id=application.id),
data=form_data,
)
session.rollback()
assert response.status_code == 400
def test_application_settings(client, user_session):
portfolio = PortfolioFactory.create()
application = Applications.create(
portfolio.owner,
portfolio,
"Snazzy Application",
"A new application for me and my friends",
{"env1", "env2"},
)
user_session(portfolio.owner)
response = client.get(
url_for("applications.settings", application_id=application.id)
)
assert response.status_code == 200
# the assertion below is a quick check to prevent regressions -- this ensures that
# the correct URL for creating a member for an application is _somewhere_ in
# the settings page.
assert (
url_for("applications.create_member", application_id=application.id)
in response.data.decode()
)
def test_edit_application_environments_obj(app, client, user_session):
portfolio = PortfolioFactory.create()
application = Applications.create(
portfolio.owner,
portfolio,
"Snazzy Application",
"A new application for me and my friends",
{"env"},
)
env = application.environments[0]
app_role1 = ApplicationRoleFactory.create(application=application)
env_role1 = EnvironmentRoleFactory.create(
application_role=app_role1, environment=env, role=CSPRole.ADMIN
)
app_role2 = ApplicationRoleFactory.create(application=application, user=None)
env_role2 = EnvironmentRoleFactory.create(
application_role=app_role2, environment=env, role=CSPRole.CONTRIBUTOR
)
user_session(portfolio.owner)
with captured_templates(app) as templates:
response = app.test_client().get(
url_for("applications.settings", application_id=application.id)
)
assert response.status_code == 200
_, context = templates[-1]
env_obj = context["environments_obj"][0]
assert env_obj["name"] == env.name
assert env_obj["id"] == env.id
assert isinstance(env_obj["edit_form"], EditEnvironmentForm)
assert {
"user_name": app_role1.user_name,
"status": env_role1.status.value,
} in env_obj["members"]
assert {
"user_name": app_role2.user_name,
"status": env_role2.status.value,
} in env_obj["members"]
assert isinstance(context["audit_events"], Paginator)
def test_get_environments_obj_for_app(app, client, user_session):
application = ApplicationFactory.create(
environments=[{"name": "Naboo"}, {"name": "Endor"}, {"name": "Hoth"}]
)
environments_obj = get_environments_obj_for_app(application)
assert [environment["name"] for environment in environments_obj] == [
"Endor",
"Hoth",
"Naboo",
]
def test_get_members_data(app, client, user_session):
user = UserFactory.create()
application = ApplicationFactory.create(
environments=[
{
"name": "testing",
"members": [{"user": user, "role_name": CSPRole.ADMIN}],
}
],
)
environment = application.environments[0]
app_role = ApplicationRoles.get(user_id=user.id, application_id=application.id)
env_role = EnvironmentRoles.get(
application_role_id=app_role.id, environment_id=environment.id
)
user_session(application.portfolio.owner)
with captured_templates(app) as templates:
response = app.test_client().get(
url_for("applications.settings", application_id=application.id)
)
assert response.status_code == 200
_, context = templates[-1]
member = context["members"][0]
assert member["role_id"] == app_role.id
assert member["user_name"] == user.full_name
assert member["permission_sets"] == {
"perms_team_mgmt": False,
"perms_env_mgmt": False,
}
assert member["environment_roles"] == [
{
"environment_id": str(environment.id),
"environment_name": environment.name,
"role": env_role.role.value,
}
]
assert member["role_status"]
assert isinstance(member["form"], UpdateMemberForm)
def test_user_with_permission_can_update_application(client, user_session):
owner = UserFactory.create()
portfolio = PortfolioFactory.create(
owner=owner,
applications=[
{
"name": "Awesome Application",
"description": "It's really awesome!",
"environments": [{"name": "dev"}, {"name": "prod"}],
}
],
)
application = portfolio.applications[0]
user_session(owner)
response = client.post(
url_for("applications.update", application_id=application.id),
data={
"name": "Really Cool Application",
"description": "A very cool application.",
},
follow_redirects=True,
)
assert response.status_code == 200
assert application.name == "Really Cool Application"
assert application.description == "A very cool application."
def test_user_without_permission_cannot_update_application(client, user_session):
dev = UserFactory.create()
owner = UserFactory.create()
portfolio = PortfolioFactory.create(
owner=owner,
members=[{"user": dev, "role_name": "developer"}],
applications=[
{
"name": "Great Application",
"description": "Cool stuff happening here!",
"environments": [{"name": "dev"}, {"name": "prod"}],
}
],
)
application = portfolio.applications[0]
user_session(dev)
response = client.post(
url_for("applications.update", application_id=application.id),
data={"name": "New Name", "description": "A new description."},
follow_redirects=True,
)
assert response.status_code == 404
assert application.name == "Great Application"
assert application.description == "Cool stuff happening here!"
def test_update_application_enforces_unique_name(client, user_session, session):
portfolio = PortfolioFactory.create()
name = "Test Application"
application = ApplicationFactory.create(portfolio=portfolio, name=name)
dupe_application = ApplicationFactory.create(portfolio=portfolio)
user_session(portfolio.owner)
session.begin_nested()
response = client.post(
url_for("applications.update", application_id=dupe_application.id),
data={"name": name, "description": dupe_application.description},
)
session.rollback()
assert response.status_code == 400
def test_user_can_only_access_apps_in_their_portfolio(client, user_session):
portfolio = PortfolioFactory.create()
other_portfolio = PortfolioFactory.create(
applications=[
{
"name": "Awesome Application",
"description": "More cool stuff happening here!",
"environments": [{"name": "dev"}],
}
]
)
other_application = other_portfolio.applications[0]
user_session(portfolio.owner)
# user can't view application edit form
response = client.get(
url_for("applications.settings", application_id=other_application.id)
)
assert response.status_code == 404
# user can't post update application form
time_updated = other_application.time_updated
response = client.post(
url_for("applications.update", application_id=other_application.id),
data={"name": "New Name", "description": "A new description."},
)
assert response.status_code == 404
assert time_updated == other_application.time_updated
def test_new_environment(client, user_session):
user = UserFactory.create()
portfolio = PortfolioFactory(owner=user)
application = ApplicationFactory.create(portfolio=portfolio)
num_envs = len(application.environments)
user_session(user)
response = client.post(
url_for("applications.new_environment", application_id=application.id),
data={"name": "dabea"},
)
assert response.status_code == 302
assert len(application.environments) == num_envs + 1
def test_new_environment_with_bad_data(client, user_session):
user = UserFactory.create()
portfolio = PortfolioFactory(owner=user)
application = ApplicationFactory.create(portfolio=portfolio)
num_envs = len(application.environments)
user_session(user)
response = client.post(
url_for("applications.new_environment", application_id=application.id),
data={"name": None},
)
assert response.status_code == 400
assert len(application.environments) == num_envs
def test_delete_environment(client, user_session):
user = UserFactory.create()
portfolio = PortfolioFactory(owner=user)
application = ApplicationFactory.create(portfolio=portfolio)
environment = EnvironmentFactory.create(application=application)
user_session(user)
response = client.post(
url_for("applications.delete_environment", environment_id=environment.id)
)
# appropriate response and redirect
assert response.status_code == 302
assert response.location == url_for(
"applications.settings",
application_id=application.id,
_anchor="application-environments",
_external=True,
fragment="application-environments",
)
# appropriate flash message
message = get_flashed_messages()[0]
assert "deleted" in message["message"]
assert environment.name in message["message"]
# deletes environment
assert len(application.environments) == 0
def test_create_member(monkeypatch, client, user_session, session):
job_mock = Mock()
monkeypatch.setattr("atst.jobs.send_mail.delay", job_mock)
user = UserFactory.create()
application = ApplicationFactory.create(
environments=[{"name": "Naboo"}, {"name": "Endor"}]
)
env = application.environments[0]
env_1 = application.environments[1]
user_session(application.portfolio.owner)
response = client.post(
url_for("applications.create_member", application_id=application.id),
data={
"user_data-first_name": user.first_name,
"user_data-last_name": user.last_name,
"user_data-dod_id": user.dod_id,
"user_data-email": user.email,
"environment_roles-0-environment_id": str(env.id),
"environment_roles-0-role": "ADMIN",
"environment_roles-0-environment_name": env.name,
"environment_roles-1-environment_id": str(env_1.id),
"environment_roles-1-role": NO_ACCESS,
"environment_roles-1-environment_name": env_1.name,
"perms_env_mgmt": True,
"perms_team_mgmt": True,
},
)
assert response.status_code == 302
expected_url = url_for(
"applications.settings",
application_id=application.id,
fragment="application-members",
_anchor="application-members",
_external=True,
)
assert response.location == expected_url
assert len(application.roles) == 1
environment_roles = application.roles[0].environment_roles
assert len(environment_roles) == 1
assert environment_roles[0].environment == env
invitation = (
session.query(ApplicationInvitation).filter_by(dod_id=user.dod_id).one()
)
assert invitation.role.application == application
assert job_mock.called
def test_remove_member_success(client, user_session):
user = UserFactory.create()
application = ApplicationFactory.create()
application_role = ApplicationRoleFactory.create(application=application, user=user)
user_session(application.portfolio.owner)
response = client.post(
url_for(
"applications.remove_member",
application_id=application.id,
application_role_id=application_role.id,
)
)
assert response.status_code == 302
assert response.location == url_for(
"applications.settings",
_anchor="application-members",
_external=True,
application_id=application.id,
fragment="application-members",
)
def test_remove_new_member_success(client, user_session):
application = ApplicationFactory.create()
application_role = ApplicationRoleFactory.create(application=application, user=None)
user_session(application.portfolio.owner)
response = client.post(
url_for(
"applications.remove_member",
application_id=application.id,
application_role_id=application_role.id,
)
)
assert response.status_code == 302
assert response.location == url_for(
"applications.settings",
_anchor="application-members",
_external=True,
application_id=application.id,
fragment="application-members",
)
def test_remove_member_failure(client, user_session):
user = UserFactory.create()
application = ApplicationFactory.create()
user_session(application.portfolio.owner)
response = client.post(
url_for(
"applications.remove_member",
application_id=application.id,
application_role_id=uuid.uuid4(),
)
)
assert response.status_code == 404
def test_update_member(client, user_session, session):
role = PermissionSets.get(PermissionSets.EDIT_APPLICATION_TEAM)
# create an app role with only edit team perms
app_role = ApplicationRoleFactory.create(permission_sets=[role])
application = app_role.application
env = EnvironmentFactory.create(application=application)
env_1 = EnvironmentFactory.create(application=application)
env_2 = EnvironmentFactory.create(application=application)
# add user to two of the environments: env and env_1
updated_role = EnvironmentRoleFactory.create(
environment=env, application_role=app_role, role=CSPRole.ADMIN
)
suspended_role = EnvironmentRoleFactory.create(
environment=env_1, application_role=app_role, role=CSPRole.ADMIN
)
user_session(application.portfolio.owner)
# update the user's app permissions to have edit team and env perms
# update user's role in env, remove user from env_1, and add user to env_2
response = client.post(
url_for(
"applications.update_member",
application_id=application.id,
application_role_id=app_role.id,
),
data={
"environment_roles-0-environment_id": str(env.id),
"environment_roles-0-role": "CONTRIBUTOR",
"environment_roles-0-environment_name": env.name,
"environment_roles-1-environment_id": str(env_1.id),
"environment_roles-1-environment_name": env_1.name,
"environment_roles-1-disabled": "True",
"environment_roles-2-environment_id": str(env_2.id),
"environment_roles-2-role": "BILLING_READ",
"environment_roles-2-environment_name": env_2.name,
"perms_env_mgmt": True,
"perms_team_mgmt": True,
},
)
assert response.status_code == 302
expected_url = url_for(
"applications.settings",
application_id=application.id,
fragment="application-members",
_anchor="application-members",
_external=True,
)
assert response.location == expected_url
# make sure new application role was not created
assert len(application.roles) == 1
# check that new app perms were added
assert bool(app_role.has_permission_set(PermissionSets.EDIT_APPLICATION_TEAM))
assert bool(
app_role.has_permission_set(PermissionSets.EDIT_APPLICATION_ENVIRONMENTS)
)
environment_roles = application.roles[0].environment_roles
# check that the user has roles in the correct envs
assert len(environment_roles) == 3
assert updated_role.role == CSPRole.CONTRIBUTOR
assert suspended_role.disabled
def test_revoke_invite(client, user_session):
invite = ApplicationInvitationFactory.create()
app_role = invite.role
application = app_role.application
user_session(application.portfolio.owner)
response = client.post(
url_for(
"applications.revoke_invite",
application_id=application.id,
application_role_id=app_role.id,
)
)
assert invite.is_revoked
assert app_role.status == ApplicationRoleStatus.DISABLED
assert app_role.deleted
def test_filter_environment_roles():
application_role = ApplicationRoleFactory.create(user=None)
application_role2 = ApplicationRoleFactory.create(
user=None, application=application_role.application
)
application_role3 = ApplicationRoleFactory.create(
user=None, application=application_role.application
)
environment = EnvironmentFactory.create(application=application_role.application)
EnvironmentRoleFactory.create(
environment=environment, application_role=application_role
)
EnvironmentRoleFactory.create(
environment=environment, application_role=application_role2
)
environment_data = filter_env_roles_form_data(application_role, [environment])
assert environment_data[0]["role"] != "No Access"
environment_data = filter_env_roles_form_data(application_role3, [environment])
assert environment_data[0]["role"] == "No Access"
def test_resend_invite(client, user_session, session):
user = UserFactory.create()
# need to set the time created to yesterday, otherwise the original invite and resent
# invite have the same time_created and then we can't rely on time to order the invites
yesterday = datetime.date.today() - datetime.timedelta(days=1)
invite = ApplicationInvitationFactory.create(
user=user, time_created=yesterday, email="[email protected]"
)
app_role = invite.role
application = app_role.application
user_session(application.portfolio.owner)
response = client.post(
url_for(
"applications.resend_invite",
application_id=application.id,
application_role_id=app_role.id,
),
data={
"first_name": user.first_name,
"last_name": user.last_name,
"dod_id": user.dod_id,
"email": "[email protected]",
},
)
session.refresh(app_role)
assert response.status_code == 302
assert invite.is_revoked
assert app_role.status == ApplicationRoleStatus.PENDING
assert app_role.latest_invitation.email == "[email protected]"
def test_filter_env_roles_data():
env_a = EnvironmentFactory.create(name="a")
env_b = EnvironmentFactory.create(name="b")
env_c = EnvironmentFactory.create(name="c")
env_role_a = EnvironmentRoleFactory.create(environment=env_a)
env_role_b = EnvironmentRoleFactory.create(environment=env_b)
env_role_c = EnvironmentRoleFactory.create(environment=env_c)
env_role_data = filter_env_roles_data([env_role_b, env_role_c, env_role_a])
# test that the environments are sorted in alphabetical order by name. Since
# we're just testing if the names are sorted, in this case we don't need to
# ensure that the environment roles and environments are associated with the
# same application.
assert [env["environment_name"] for env in env_role_data] == ["a", "b", "c"]
@pytest.fixture
def set_g(monkeypatch):
_g = Mock()
monkeypatch.setattr("atst.app.g", _g)
monkeypatch.setattr("atst.routes.applications.settings.g", _g)
def _set_g(attr, val):
setattr(_g, attr, val)
yield _set_g
def test_handle_create_member(monkeypatch, set_g, session):
user = UserFactory.create()
application = ApplicationFactory.create(
environments=[{"name": "Naboo"}, {"name": "Endor"}]
)
(env, env_1) = application.environments
job_mock = Mock()
monkeypatch.setattr("atst.jobs.send_mail.delay", job_mock)
set_g("current_user", application.portfolio.owner)
set_g("portfolio", application.portfolio)
set_g("application", application)
form_data = ImmutableMultiDict(
{
"user_data-first_name": user.first_name,
"user_data-last_name": user.last_name,
"user_data-dod_id": user.dod_id,
"user_data-email": user.email,
"environment_roles-0-environment_id": str(env.id),
"environment_roles-0-role": "ADMIN",
"environment_roles-0-environment_name": env.name,
"environment_roles-1-environment_id": str(env_1.id),
"environment_roles-1-role": NO_ACCESS,
"environment_roles-1-environment_name": env_1.name,
"perms_env_mgmt": True,
"perms_team_mgmt": True,
}
)
handle_create_member(application.id, form_data)
assert len(application.roles) == 1
environment_roles = application.roles[0].environment_roles
assert len(environment_roles) == 1
assert environment_roles[0].environment == env
invitation = (
session.query(ApplicationInvitation).filter_by(dod_id=user.dod_id).one()
)
assert invitation.role.application == application
assert job_mock.called
def test_handle_update_member_success(set_g):
user = UserFactory.create()
application = ApplicationFactory.create(
environments=[{"name": "Naboo"}, {"name": "Endor"}]
)
(env, env_1) = application.environments
app_role = ApplicationRoleFactory(application=application)
set_g("current_user", application.portfolio.owner)
set_g("portfolio", application.portfolio)
set_g("application", application)
form_data = ImmutableMultiDict(
{
"environment_roles-0-environment_id": str(env.id),
"environment_roles-0-role": "ADMIN",
"environment_roles-0-environment_name": env.name,
"environment_roles-1-environment_id": str(env_1.id),
"environment_roles-1-role": NO_ACCESS,
"environment_roles-1-environment_name": env_1.name,
"perms_env_mgmt": True,
"perms_team_mgmt": True,
}
)
handle_update_member(application.id, app_role.id, form_data)
assert len(application.roles) == 1
assert len(app_role.environment_roles) == 1
assert app_role.environment_roles[0].environment == env
def test_handle_update_member_with_error(set_g, monkeypatch, mock_logger):
exception = "An error occurred."
def _raise_csp_exception(*args, **kwargs):
raise GeneralCSPException(exception)
monkeypatch.setattr(
"atst.domain.environments.Environments.update_env_role", _raise_csp_exception
)
user = UserFactory.create()
application = ApplicationFactory.create(
environments=[{"name": "Naboo"}, {"name": "Endor"}]
)
(env, env_1) = application.environments
app_role = ApplicationRoleFactory(application=application)
set_g("current_user", application.portfolio.owner)
set_g("portfolio", application.portfolio)
set_g("application", application)
form_data = ImmutableMultiDict(
{
"environment_roles-0-environment_id": str(env.id),
"environment_roles-0-role": "ADMIN",
"environment_roles-0-environment_name": env.name,
"environment_roles-1-environment_id": str(env_1.id),
"environment_roles-1-role": NO_ACCESS,
"environment_roles-1-environment_name": env_1.name,
"perms_env_mgmt": True,
"perms_team_mgmt": True,
}
)
handle_update_member(application.id, app_role.id, form_data)
assert mock_logger.messages[-1] == exception
def test_create_subscription_success(
client, user_session, mock_azure: AzureCloudProvider
):
environment = EnvironmentFactory.create()
user_session(environment.portfolio.owner)
environment.cloud_id = "management/group/id"
environment.application.portfolio.csp_data = {
"billing_account_name": "xxxx-xxxx-xxx-xxx",
"billing_profile_name": "xxxxxxxxxxx:xxxxxxxxxxxxx_xxxxxx",
"tenant_id": "xxxxxxxxxxx-xxxxxxxxxx-xxxxxxx-xxxxx",
"billing_profile_properties": {
"invoice_sections": [{"invoice_section_name": "xxxx-xxxx-xxx-xxx"}]
},
}
with patch.object(
AzureCloudProvider, "create_subscription", wraps=mock_azure.create_subscription,
) as create_subscription:
create_subscription.return_value = SubscriptionCreationCSPResult(
subscription_verify_url="https://zombo.com", subscription_retry_after=10
)
response = client.post(
url_for("applications.create_subscription", environment_id=environment.id),
)
assert response.status_code == 302
assert response.location == url_for(
"applications.settings",
application_id=environment.application.id,
_external=True,
fragment="application-environments",
_anchor="application-environments",
)
def test_create_subscription_failure(client, user_session, monkeypatch):
environment = EnvironmentFactory.create()
def _raise_csp_exception(*args, **kwargs):
raise GeneralCSPException("An error occurred.")
monkeypatch.setattr(
"atst.domain.csp.cloud.MockCloudProvider.create_subscription",
_raise_csp_exception,
)
user_session(environment.portfolio.owner)
environment.cloud_id = "management/group/id"
environment.application.portfolio.csp_data = {
"billing_account_name": "xxxx-xxxx-xxx-xxx",
"billing_profile_name": "xxxxxxxxxxx:xxxxxxxxxxxxx_xxxxxx",
"tenant_id": "xxxxxxxxxxx-xxxxxxxxxx-xxxxxxx-xxxxx",
"billing_profile_properties": {
"invoice_sections": [{"invoice_section_name": "xxxx-xxxx-xxx-xxx"}]
},
}
response = client.post(
url_for("applications.create_subscription", environment_id=environment.id),
)
assert response.status_code == 400
|
the-stack_106_22815 | #!/usr/bin/env python3
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import sys
import argparse
import matplotlib.pyplot as plt
from cyber_py.record import RecordReader
from modules.control.proto import control_cmd_pb2
from modules.planning.proto import planning_pb2
from modules.canbus.proto import chassis_pb2
from modules.drivers.proto import pointcloud_pb2
from module_control_analyzer import ControlAnalyzer
from module_planning_analyzer import PlannigAnalyzer
from modules.perception.proto import perception_obstacle_pb2
from modules.prediction.proto import prediction_obstacle_pb2
from lidar_endtoend_analyzer import LidarEndToEndAnalyzer
def process(control_analyzer, planning_analyzer, lidar_endtoend_analyzer,
is_simulation, plot_planning_path, plot_planning_refpath, all_data):
is_auto_drive = False
for msg in reader.read_messages():
if msg.topic == "/apollo/canbus/chassis":
chassis = chassis_pb2.Chassis()
chassis.ParseFromString(msg.message)
if chassis.driving_mode == \
chassis_pb2.Chassis.COMPLETE_AUTO_DRIVE:
is_auto_drive = True
else:
is_auto_drive = False
if msg.topic == "/apollo/control":
if (not is_auto_drive and not all_data) or \
is_simulation or plot_planning_path or plot_planning_refpath:
continue
control_cmd = control_cmd_pb2.ControlCommand()
control_cmd.ParseFromString(msg.message)
control_analyzer.put(control_cmd)
lidar_endtoend_analyzer.put_pb('control', control_cmd)
if msg.topic == "/apollo/planning":
if (not is_auto_drive) and (not all_data):
continue
adc_trajectory = planning_pb2.ADCTrajectory()
adc_trajectory.ParseFromString(msg.message)
planning_analyzer.put(adc_trajectory)
lidar_endtoend_analyzer.put_pb('planning', adc_trajectory)
if plot_planning_path:
planning_analyzer.plot_path(plt, adc_trajectory)
if plot_planning_refpath:
planning_analyzer.plot_refpath(plt, adc_trajectory)
if msg.topic == "/apollo/sensor/velodyne64/compensator/PointCloud2" or \
msg.topic == "/apollo/sensor/lidar128/compensator/PointCloud2":
if ((not is_auto_drive) and (not all_data)) or is_simulation or \
plot_planning_path or plot_planning_refpath:
continue
point_cloud = pointcloud_pb2.PointCloud()
point_cloud.ParseFromString(msg.message)
lidar_endtoend_analyzer.put_lidar(point_cloud)
if msg.topic == "/apollo/perception/obstacles":
if ((not is_auto_drive) and (not all_data)) or is_simulation or \
plot_planning_path or plot_planning_refpath:
continue
perception = perception_obstacle_pb2.PerceptionObstacles()
perception.ParseFromString(msg.message)
lidar_endtoend_analyzer.put_pb('perception', perception)
if msg.topic == "/apollo/prediction":
if ((not is_auto_drive) and (not all_data)) or is_simulation or \
plot_planning_path or plot_planning_refpath:
continue
prediction = prediction_obstacle_pb2.PredictionObstacles()
prediction.ParseFromString(msg.message)
lidar_endtoend_analyzer.put_pb('prediction', prediction)
if __name__ == "__main__":
if len(sys.argv) < 2:
print("usage: python main.py record_file")
parser = argparse.ArgumentParser(
description="Recode Analyzer is a tool to analyze record files.",
prog="main.py")
parser.add_argument(
"-f", "--file", action="store", type=str, required=True,
help="Specify the record file for analysis.")
parser.add_argument(
"-sim", "--simulation", action="store_const", const=True,
help="For dreamland API call")
parser.add_argument(
"-path", "--planningpath", action="store_const", const=True,
help="plot planing paths in cartesian coordinate.")
parser.add_argument(
"-refpath", "--planningrefpath", action="store_const", const=True,
help="plot planing reference paths in cartesian coordinate.")
parser.add_argument(
"-a", "--alldata", action="store_const", const=True,
help="Analyze all data (both auto and manual), otherwise auto data only without this option.")
parser.add_argument(
"-acc", "--showacc", action="store_const", const=True,
help="Analyze all data (both auto and manual), otherwise auto data only without this option.")
args = parser.parse_args()
record_file = args.file
reader = RecordReader(record_file)
control_analyzer = ControlAnalyzer()
planning_analyzer = PlannigAnalyzer(args)
lidar_endtoend_analyzer = LidarEndToEndAnalyzer()
process(control_analyzer, planning_analyzer,
lidar_endtoend_analyzer, args.simulation, args.planningpath,
args.planningrefpath, args.alldata)
if args.simulation:
planning_analyzer.print_sim_results()
elif args.planningpath or args.planningrefpath:
plt.axis('equal')
plt.show()
else:
control_analyzer.print_latency_statistics()
planning_analyzer.print_latency_statistics()
lidar_endtoend_analyzer.print_endtoend_latency()
|
the-stack_106_22818 | try:
from userbot.modules.sql_helper import BASE, SESSION
except ImportError:
raise AttributeError
from sqlalchemy import Column, String
class Mute(BASE):
__tablename__ = "muted"
chat_id = Column(String(14), primary_key=True)
sender = Column(String(14), primary_key=True)
def __init__(self, chat_id, sender):
self.chat_id = str(chat_id) # ensure string
self.sender = str(sender)
Mute.__table__.create(checkfirst=True)
def is_muted(chat_id):
try:
return SESSION.query(Mute).filter(Mute.chat_id == str(chat_id)).all()
except BaseException:
return None
finally:
SESSION.close()
def mute(chat_id, sender):
adder = Mute(str(chat_id), str(sender))
SESSION.add(adder)
SESSION.commit()
def unmute(chat_id, sender):
rem = SESSION.query(Mute).get(((str(chat_id)), (str(sender))))
if rem:
SESSION.delete(rem)
SESSION.commit()
|
the-stack_106_22819 | import numpy as np
from gym.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set
class SawyerFaucetOpenEnv(SawyerXYZEnv):
def __init__(self):
hand_low = (-0.5, 0.40, -0.15)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.05, 0.8, 0.05)
obj_high = (0.05, 0.85, 0.05)
super().__init__(
self.model_name,
hand_low=hand_low,
hand_high=hand_high,
)
self.init_config = {
'obj_init_pos': np.array([0, 0.8, 0.05]),
'hand_init_pos': np.array([0., .6, .2]),
}
self.obj_init_pos = self.init_config['obj_init_pos']
self.hand_init_pos = self.init_config['hand_init_pos']
self.goal = np.array([0.1, 0.8, 0.115])
goal_low = self.hand_low
goal_high = self.hand_high
self._random_reset_space = Box(
np.array(obj_low),
np.array(obj_high),
)
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
@property
def model_name(self):
return full_v1_path_for('sawyer_xyz/sawyer_faucet.xml')
@_assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pullDist = self.compute_reward(action, ob)
self.curr_path_length += 1
info = {
'reachDist': reachDist,
'goalDist': pullDist,
'epRew': reward,
'pickRew': None,
'success': float(pullDist <= 0.05)
}
return ob, reward, False, info
@property
def _target_site_config(self):
return [
('goal_open', self._target_pos),
('goal_close', np.array([10., 10., 10.]))
]
def _get_pos_objects(self):
return self._get_site_pos('handleStartOpen')
def reset_model(self):
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = self.init_config['obj_init_pos']
if self.random_init:
goal_pos = self._get_state_rand_vec()
self.obj_init_pos = goal_pos[:3]
final_pos = goal_pos.copy()
final_pos += np.array([0.1, -0.015, 0.065])
self._target_pos = final_pos
self.sim.model.body_pos[self.model.body_name2id('faucet')] = self.obj_init_pos
self.sim.model.body_pos[self.model.body_name2id('faucetBase')] = self.obj_init_pos
self.maxPullDist = np.linalg.norm(self._target_pos - self.obj_init_pos)
return self._get_obs()
def _reset_hand(self):
super()._reset_hand(10)
rightFinger, leftFinger = self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector')
self.init_fingerCOM = (rightFinger + leftFinger)/2
self.reachCompleted = False
def compute_reward(self, actions, obs):
del actions
objPos = obs[3:6]
rightFinger, leftFinger = self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector')
fingerCOM = (rightFinger + leftFinger)/2
pullGoal = self._target_pos
pullDist = np.linalg.norm(objPos - pullGoal)
reachDist = np.linalg.norm(objPos - fingerCOM)
reachRew = -reachDist
self.reachCompleted = reachDist < 0.05
def pullReward():
c1 = 1000
c2 = 0.01
c3 = 0.001
if self.reachCompleted:
pullRew = 1000*(self.maxPullDist - pullDist) + c1*(np.exp(-(pullDist**2)/c2) + np.exp(-(pullDist**2)/c3))
pullRew = max(pullRew,0)
return pullRew
else:
return 0
pullRew = pullReward()
reward = reachRew + pullRew
return [reward, reachDist, pullDist]
|
the-stack_106_22821 | #!/usr/bin/env python
import os
import sys
sys.path = ["lib", "pytests", "pysystests"] + sys.path
import time
from xunit import XUnitTestResult
import glob
import xml.dom.minidom
import logging
log = logging.getLogger(__name__)
logging.info(__name__)
logging.getLogger().setLevel(logging.INFO)
import argparse
def filter_fields(testname, run_params=""):
testwords = testname.split(",")
line = ""
filter_test_params = ['logs_folder', 'conf_file',
'cluster_name:', 'ini:', 'case_number:',
'num_nodes:', 'spec:', 'is_container:']
filter_test_params.extend([param.split("=")[0] for param in
run_params.split(',')])
for fw in testwords:
filter_word = False
for filter_words in filter_test_params:
if fw.startswith(filter_words):
filter_word = True
if not filter_word:
line = line + fw.replace(":", "=", 1)
if fw != testwords[-1]:
line = line + ","
return line.rstrip(',')
def compare_with_sort(dict, key):
key_split = key.split(',')
key = '%s,%s' % (key_split[0], ','.join(sorted(key_split[1:])))
for k in dict.keys():
test_case_split = k.split(',')
test_case = "%s,%s" % (test_case_split[0],
",".join(sorted(test_case_split[1:])))
if key == test_case:
return True, k
return False, None
def merge_reports(filespath, run_params=""):
log.info("Merging of report files from "+str(filespath))
testsuites = {}
if not isinstance(filespath, list):
filespaths = filespath.split(",")
else:
filespaths = filespath
for filepath in filespaths:
xml_files = glob.glob(filepath)
if not isinstance(filespath, list) and filespath.find("*"):
xml_files.sort(key=os.path.getmtime)
for xml_file in xml_files:
log.info("-- " + xml_file + " --")
doc = xml.dom.minidom.parse(xml_file)
testsuitelem = doc.getElementsByTagName("testsuite")
for ts in testsuitelem:
tsname = ts.getAttribute("name")
tserros = ts.getAttribute("errors")
tsfailures = ts.getAttribute("failures")
tsskips = ts.getAttribute("skips")
tstime = ts.getAttribute("time")
tstests = ts.getAttribute("tests")
issuite_existed = False
tests = {}
testsuite = {}
# fill testsuite details
if tsname in testsuites.keys():
testsuite = testsuites[tsname]
tests = testsuite['tests']
else:
testsuite['name'] = tsname
testsuite['errors'] = tserros
testsuite['failures'] = tsfailures
testsuite['skips'] = tsskips
testsuite['time'] = tstime
testsuite['testcount'] = tstests
issuite_existed = False
testcaseelem = ts.getElementsByTagName("testcase")
# fill test case details
for tc in testcaseelem:
testcase = {}
tcname = tc.getAttribute("name")
tctime = tc.getAttribute("time")
tcerror = tc.getElementsByTagName("error")
tcname_filtered = filter_fields(tcname, run_params)
test_case_present, key = compare_with_sort(tests,
tcname_filtered)
if test_case_present:
testcase = tests[key]
testcase['name'] = tcname
else:
testcase['name'] = tcname
testcase['time'] = tctime
testcase['error'] = ""
if tcerror:
testcase['error'] = str(tcerror[0].firstChild.nodeValue)
if test_case_present:
tests[key] = testcase
else:
tests[tcname_filtered] = testcase
testsuite['tests'] = tests
testsuites[tsname] = testsuite
try:
abs_path = os.path.dirname(os.path.abspath(sys.argv[0]))
abs_path = abs_path.rstrip("scripts")
logs_directory = os.path.join(abs_path, "logs")
move_logs_directory = os.path.join(abs_path, "job_logs")
os.rename(logs_directory, move_logs_directory)
os.mkdir(logs_directory)
except Exception as e:
log.info(e)
return {}
log.info("\nNumber of TestSuites="+str(len(testsuites)))
tsindex = 0
for tskey in testsuites.keys():
tsindex = tsindex+1
log.info("\nTestSuite#"+str(tsindex)+") "+str(tskey)+", Number of Tests="+str(len(testsuites[tskey]['tests'])))
pass_count = 0
fail_count = 0
tests = testsuites[tskey]['tests']
xunit = XUnitTestResult()
for testname in tests.keys():
testcase = tests[testname]
tname = testcase['name']
ttime = testcase['time']
inttime = float(ttime)
terrors = testcase['error']
tparams = ""
if "," in tname:
tparams = tname[tname.find(","):]
tname = tname[:tname.find(",")]
if terrors:
failed = True
fail_count = fail_count + 1
xunit.add_test(name=tname, status='fail', time=inttime,
errorType='membase.error', errorMessage=str(terrors), params=tparams
)
else:
passed = True
pass_count = pass_count + 1
xunit.add_test(name=tname, time=inttime, params=tparams
)
str_time = time.strftime("%y-%b-%d_%H-%M-%S", time.localtime())
root_log_dir = os.path.join(logs_directory, "testrunner-{0}".format(
str_time))
if not os.path.exists(root_log_dir):
os.makedirs(root_log_dir)
logs_folder = os.path.join(root_log_dir, "merged_summary")
try:
os.mkdir(logs_folder)
except:
pass
output_filepath="{0}{2}mergedreport-{1}".format(logs_folder, str_time, os.sep).strip()
xunit.write(output_filepath)
xunit.print_summary()
log.info("Summary file is at " + output_filepath+"-"+tsname+".xml")
return testsuites
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('run_params', type=str, default="")
parser.add_argument('files', metavar='<xml file1> <xml file2> ...', type=str, nargs='+',
help='Accept all input xml files')
args = parser.parse_args()
print(args.files)
merge_reports(args.files, args.run_params) |
the-stack_106_22822 | from datetime import date, datetime
import csv
import numpy as np
import pandas as pd
from utils.helpers import is_two_digit_year, is_multiple_date_data, add_years, year_in_front
def convert_raw_data_to_matrix(fixed_df, current_gauge_column_index, start_date):
"""Summary Function
"""
current_gauge_class, current_gauge_number, raw_date_column, raw_flow_column = extract_current_data_at_index(fixed_df, current_gauge_column_index)
date_column, flow_column = remove_nan_from_date_and_flow_columns(raw_date_column, raw_flow_column)
years, julian_dates, number_of_years = extract_info_from_date(date_column)
year_ranges = get_year_ranges_from_julian_dates(julian_dates, years, start_date)
flow_matrix = get_flow_matrix(years, julian_dates, flow_column, year_ranges, start_date)
return current_gauge_class, current_gauge_number, year_ranges, flow_matrix, julian_dates
def extract_current_data_at_index(fixed_df, current_gauge_column_index):
current_gauge_number = fixed_df.iloc[1, current_gauge_column_index]
current_gauge_class = fixed_df.iloc[0, current_gauge_column_index]
print('Gaguge Class: {}'.format(int(current_gauge_class)))
print('Gauge Number: {}'.format(int(current_gauge_number)))
if is_multiple_date_data(fixed_df):
raw_date_column = fixed_df.iloc[:, current_gauge_column_index - 1]
else:
raw_date_column = fixed_df.iloc[:, 0]
raw_flow_column = fixed_df.iloc[:, current_gauge_column_index]
return current_gauge_class, current_gauge_number, raw_date_column, raw_flow_column
def remove_nan_from_date_and_flow_columns(raw_date, raw_flow):
"""Loop through the date and remove all date with NA values.
The purpose is to clean the data before creating the final matrix.
"""
date_column = []
flow_column = []
index = 0
for data in raw_date:
if not pd.isnull(data) and index > 1:
date_column.append(raw_date[index])
flow_column.append(raw_flow[index])
index = index + 1
return date_column, flow_column
def extract_info_from_date(date):
years=[]
julian_dates=[]
number_of_years=0
current_year = 0
for single_date in date:
if is_two_digit_year(single_date):
dt = datetime.strptime(single_date, "%m/%d/%y")
elif year_in_front(single_date):
dt = datetime.strptime(single_date, "%Y-%m-%d")
else:
dt = datetime.strptime(single_date, "%m/%d/%Y")
if dt.year > 2019:
parsed_year = dt.year - 100
else:
parsed_year = dt.year
years.append(parsed_year)
julian_dates.append(dt.timetuple().tm_yday)
if parsed_year != current_year:
current_year = parsed_year;
number_of_years = number_of_years + 1
return years, julian_dates, number_of_years
def get_year_ranges_from_julian_dates(julian_dates, years, start_date):
julian_start_date_first_year = datetime.strptime("{}/{}".format(start_date, years[0]), "%m/%d/%Y").timetuple().tm_yday
julian_start_date_last_year = datetime.strptime("{}/{}".format(start_date, years[-1]), "%m/%d/%Y").timetuple().tm_yday
if (julian_dates[0] < julian_start_date_first_year):
first_year = years[0] - 1
else:
first_year = years[0]
if(julian_dates[-1] >= julian_start_date_last_year):
last_year = years[-1] + 1
else:
last_year = years[-1]
year_ranges = list(range(first_year, last_year))
return year_ranges
def get_flow_matrix(years, julian_dates, flow, year_ranges, start_date):
"""Return one matrix containing flow data for raw dataset based on start date
"""
number_of_columns = len(year_ranges)
flow_matrix = np.zeros((366, number_of_columns))
flow_matrix.fill(None)
for index, julian_date in enumerate(julian_dates):
if (years[index] % 4 == 0):
days_in_year = 366
else:
days_in_year = 365
julian_start_date = datetime.strptime("{}/{}".format(start_date, years[index]), "%m/%d/%Y").timetuple().tm_yday
row, column = get_position(years[index], julian_date, year_ranges, julian_start_date, days_in_year)
flow_matrix[row][column] = flow[index]
return flow_matrix
def import_and_parse_csv(path):
"""Return 3 arrays for year, julian_date, and flow, and calculate
number of years given in each dataset. Parameter: path for csv file path
"""
year = []
julian_date = []
flow = []
number_of_years = 0
flow_index = 0
with open(path) as csvfile:
file = csv.reader(csvfile, delimiter=',')
current_year = 0
for row in file:
if row[0] == 'Date':
for column in row:
if 'Flow' in column:
break
flow_index = flow_index + 1
continue
current_date = datetime.strptime(row[0], "%m/%d/%y")
"""reduce by 100 when '88' is interpreted as 2088"""
if current_date.year > 2015:
current_date = add_years(current_date, -100)
year.append(current_date.year)
julian_date.append(current_date.timetuple().tm_yday)
if row[flow_index] == "" or row[flow_index] == "NA":
flow.append(None)
else:
flow.append(row[flow_index])
if current_date.year != current_year:
current_year = current_date.year
number_of_years = number_of_years + 1
return year, julian_date, flow, number_of_years
def get_position(year, julian_date, year_ranges, julian_start_date, days_in_year):
row = julian_date - julian_start_date
if (row < 0):
row = row + days_in_year
if(year > year_ranges[-1]):
column = -1
else:
column = year_ranges.index(year)
if (julian_date < julian_start_date):
column = column - 1
return row, column
def sort_matrix(matrix, index):
row = len(matrix)
column = len(matrix[0])
index_array = np.argsort(matrix[index])
sorted_matrix = np.zeros((row, column))
counter = 0
for index in index_array:
for rowIndex, value in enumerate(sorted_matrix[:,counter]):
sorted_matrix[rowIndex,counter] = matrix[rowIndex][index]
counter = counter + 1
return sorted_matrix.tolist()
def insert_column_header(matrix, column_header):
for index, name in enumerate(column_header):
matrix[index].insert(0, name)
return matrix
|
the-stack_106_22823 | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Editor Plugin"""
# pylint: disable=C0103
# pylint: disable=R0903
# pylint: disable=R0911
# pylint: disable=R0201
# Standard library imports
import logging
import os
import os.path as osp
import re
import sys
import time
# Third party imports
from qtpy.compat import from_qvariant, getopenfilenames, to_qvariant
from qtpy.QtCore import QByteArray, Qt, Signal, Slot, QDir
from qtpy.QtPrintSupport import QAbstractPrintDialog, QPrintDialog, QPrinter
from qtpy.QtWidgets import (QAction, QActionGroup, QApplication, QDialog,
QFileDialog, QInputDialog, QMenu, QSplitter,
QToolBar, QVBoxLayout, QWidget)
# Local imports
from spyder.api.panel import Panel
from spyder.api.plugins import Plugins, SpyderPluginWidget
from spyder.config.base import _, get_conf_path, running_under_pytest
from spyder.config.manager import CONF
from spyder.config.utils import (get_edit_filetypes, get_edit_filters,
get_filter)
from spyder.py3compat import PY2, qbytearray_to_str, to_text_string
from spyder.utils import encoding, programs, sourcecode
from spyder.utils.icon_manager import ima
from spyder.utils.qthelpers import create_action, add_actions, MENU_SEPARATOR
from spyder.utils.misc import getcwd_or_home
from spyder.widgets.findreplace import FindReplace
from spyder.plugins.editor.confpage import EditorConfigPage
from spyder.plugins.editor.utils.autosave import AutosaveForPlugin
from spyder.plugins.editor.utils.switcher import EditorSwitcherManager
from spyder.plugins.editor.widgets.codeeditor_widgets import Printer
from spyder.plugins.editor.widgets.editor import (EditorMainWindow,
EditorSplitter,
EditorStack,)
from spyder.plugins.editor.widgets.codeeditor import CodeEditor
from spyder.plugins.editor.utils.bookmarks import (load_bookmarks,
save_bookmarks)
from spyder.plugins.editor.utils.debugger import (clear_all_breakpoints,
clear_breakpoint)
from spyder.plugins.editor.widgets.status import (CursorPositionStatus,
EncodingStatus, EOLStatus,
ReadWriteStatus, VCSStatus)
from spyder.plugins.run.widgets import (ALWAYS_OPEN_FIRST_RUN_OPTION,
get_run_configuration,
RunConfigDialog, RunConfigOneDialog)
from spyder.plugins.mainmenu.api import ApplicationMenus
logger = logging.getLogger(__name__)
WINPDB_PATH = programs.find_program('winpdb')
class Editor(SpyderPluginWidget):
"""
Multi-file Editor widget
"""
CONF_SECTION = 'editor'
CONFIGWIDGET_CLASS = EditorConfigPage
CONF_FILE = False
TEMPFILE_PATH = get_conf_path('temp.py')
TEMPLATE_PATH = get_conf_path('template.py')
DISABLE_ACTIONS_WHEN_HIDDEN = False # SpyderPluginWidget class attribute
# This is required for the new API
NAME = 'editor'
REQUIRES = [Plugins.Console]
OPTIONAL = [Plugins.Completions, Plugins.OutlineExplorer]
# Signals
run_in_current_ipyclient = Signal(str, str, str,
bool, bool, bool, bool, bool)
run_cell_in_ipyclient = Signal(str, object, str, bool)
debug_cell_in_ipyclient = Signal(str, object, str, bool)
exec_in_extconsole = Signal(str, bool)
redirect_stdio = Signal(bool)
sig_dir_opened = Signal(str)
"""
This signal is emitted when the editor changes the current directory.
Parameters
----------
new_working_directory: str
The new working directory path.
Notes
-----
This option is available on the options menu of the editor plugin
"""
breakpoints_saved = Signal()
sig_file_opened_closed_or_updated = Signal(str, str)
"""
This signal is emitted when a file is opened, closed or updated,
including switching among files.
Parameters
----------
filename: str
Name of the file that was opened, closed or updated.
language: str
Name of the programming language of the file that was opened,
closed or updated.
"""
sig_file_debug_message_requested = Signal()
# This signal is fired for any focus change among all editor stacks
sig_editor_focus_changed = Signal()
sig_help_requested = Signal(dict)
"""
This signal is emitted to request help on a given object `name`.
Parameters
----------
help_data: dict
Dictionary required by the Help pane to render a docstring.
Examples
--------
>>> help_data = {
'obj_text': str,
'name': str,
'argspec': str,
'note': str,
'docstring': str,
'force_refresh': bool,
'path': str,
}
See Also
--------
:py:meth:spyder.plugins.editor.widgets.editor.EditorStack.send_to_help
"""
def __init__(self, parent, ignore_last_opened_files=False):
SpyderPluginWidget.__init__(self, parent)
self.__set_eol_chars = True
# Creating template if it doesn't already exist
if not osp.isfile(self.TEMPLATE_PATH):
if os.name == "nt":
shebang = []
else:
shebang = ['#!/usr/bin/env python' + ('2' if PY2 else '3')]
header = shebang + [
'# -*- coding: utf-8 -*-',
'"""', 'Created on %(date)s', '',
'@author: %(username)s', '"""', '', '']
try:
encoding.write(os.linesep.join(header), self.TEMPLATE_PATH,
'utf-8')
except EnvironmentError:
pass
self.projects = None
self.outlineexplorer = None
self.file_dependent_actions = []
self.pythonfile_dependent_actions = []
self.dock_toolbar_actions = None
self.edit_menu_actions = None #XXX: find another way to notify Spyder
self.stack_menu_actions = None
self.checkable_actions = {}
self.__first_open_files_setup = True
self.editorstacks = []
self.last_focused_editorstack = {}
self.editorwindows = []
self.editorwindows_to_be_created = []
self.toolbar_list = None
self.menu_list = None
# We need to call this here to create self.dock_toolbar_actions,
# which is used below.
self._setup()
self.options_button.hide()
# Configuration dialog size
self.dialog_size = None
self.vcs_status = VCSStatus(self)
self.cursorpos_status = CursorPositionStatus(self)
self.encoding_status = EncodingStatus(self)
self.eol_status = EOLStatus(self)
self.readwrite_status = ReadWriteStatus(self)
# TODO: temporal fix while editor uses new API
statusbar = self.main.statusbar
statusbar.add_status_widget(self.readwrite_status)
statusbar.add_status_widget(self.eol_status)
statusbar.add_status_widget(self.encoding_status)
statusbar.add_status_widget(self.cursorpos_status)
statusbar.add_status_widget(self.vcs_status)
layout = QVBoxLayout()
self.dock_toolbar = QToolBar(self)
add_actions(self.dock_toolbar, self.dock_toolbar_actions)
layout.addWidget(self.dock_toolbar)
self.last_edit_cursor_pos = None
self.cursor_pos_history = []
self.cursor_pos_index = None
self.__ignore_cursor_position = True
# Completions setup
self.completion_capabilities = {}
# Setup new windows:
self.main.all_actions_defined.connect(self.setup_other_windows)
# Change module completions when PYTHONPATH changes
self.main.sig_pythonpath_changed.connect(self.set_path)
# Find widget
self.find_widget = FindReplace(self, enable_replace=True)
self.find_widget.hide()
self.register_widget_shortcuts(self.find_widget)
# Start autosave component
# (needs to be done before EditorSplitter)
self.autosave = AutosaveForPlugin(self)
self.autosave.try_recover_from_autosave()
# Multiply by 1000 to convert seconds to milliseconds
self.autosave.interval = self.get_option('autosave_interval') * 1000
self.autosave.enabled = self.get_option('autosave_enabled')
# Tabbed editor widget + Find/Replace widget
editor_widgets = QWidget(self)
editor_layout = QVBoxLayout()
editor_layout.setContentsMargins(0, 0, 0, 0)
editor_widgets.setLayout(editor_layout)
self.editorsplitter = EditorSplitter(self, self,
self.stack_menu_actions, first=True)
editor_layout.addWidget(self.editorsplitter)
editor_layout.addWidget(self.find_widget)
# Splitter: editor widgets (see above) + outline explorer
self.splitter = QSplitter(self)
self.splitter.setContentsMargins(0, 0, 0, 0)
self.splitter.addWidget(editor_widgets)
self.splitter.setStretchFactor(0, 5)
self.splitter.setStretchFactor(1, 1)
layout.addWidget(self.splitter)
self.setLayout(layout)
self.setFocusPolicy(Qt.ClickFocus)
# Editor's splitter state
state = self.get_option('splitter_state', None)
if state is not None:
self.splitter.restoreState( QByteArray().fromHex(
str(state).encode('utf-8')) )
self.recent_files = self.get_option('recent_files', [])
self.untitled_num = 0
# Parameters of last file execution:
self.__last_ic_exec = None # internal console
self.__last_ec_exec = None # external console
# File types and filters used by the Open dialog
self.edit_filetypes = None
self.edit_filters = None
self.__ignore_cursor_position = False
current_editor = self.get_current_editor()
if current_editor is not None:
filename = self.get_current_filename()
position = current_editor.get_position('cursor')
line, column = current_editor.get_cursor_line_column()
self.add_cursor_position_to_history(filename, position, line,
column)
self.update_cursorpos_actions()
self.set_path()
def set_projects(self, projects):
self.projects = projects
@Slot()
def show_hide_projects(self):
if self.projects is not None:
dw = self.projects.dockwidget
if dw.isVisible():
dw.hide()
else:
dw.show()
dw.raise_()
self.switch_to_plugin()
def set_outlineexplorer(self, outlineexplorer):
self.outlineexplorer = outlineexplorer
for editorstack in self.editorstacks:
# Pass the OutlineExplorer widget to the stacks because they
# don't need the plugin
editorstack.set_outlineexplorer(self.outlineexplorer.get_widget())
self.outlineexplorer.get_widget().edit_goto.connect(
lambda filenames, goto, word:
self.load(filenames=filenames, goto=goto, word=word,
editorwindow=self))
self.outlineexplorer.get_widget().edit.connect(
lambda filenames:
self.load(filenames=filenames, editorwindow=self))
#------ Private API --------------------------------------------------------
def restore_scrollbar_position(self):
"""Restoring scrollbar position after main window is visible"""
# Widget is now visible, we may center cursor on top level editor:
try:
self.get_current_editor().centerCursor()
except AttributeError:
pass
@Slot(dict)
def report_open_file(self, options):
"""Report that a file was opened to the completion manager."""
filename = options['filename']
language = options['language']
codeeditor = options['codeeditor']
status = None
if self.main.get_plugin(Plugins.Completions, error=False):
status = (
self.main.completions.start_completion_services_for_language(
language.lower()))
self.main.completions.register_file(
language.lower(), filename, codeeditor)
if status:
if language.lower() in self.completion_capabilities:
# When this condition is True, it means there's a server
# that can provide completion services for this file.
codeeditor.register_completion_capabilities(
self.completion_capabilities[language.lower()])
codeeditor.start_completion_services()
elif self.main.completions.is_fallback_only(language.lower()):
# This is required to use fallback completions for files
# without a language server.
codeeditor.start_completion_services()
else:
if codeeditor.language == language.lower():
logger.debug('Setting {0} completions off'.format(filename))
codeeditor.completions_available = False
@Slot(dict, str)
def register_completion_capabilities(self, capabilities, language):
"""
Register completion server capabilities in all editorstacks.
Parameters
----------
capabilities: dict
Capabilities supported by a language server.
language: str
Programming language for the language server (it has to be
in small caps).
"""
logger.debug(
'Completion server capabilities for {!s} are: {!r}'.format(
language, capabilities)
)
# This is required to start workspace before completion
# services when Spyder starts with an open project.
# TODO: Find a better solution for it in the future!!
self.main.projects.start_workspace_services()
self.completion_capabilities[language] = dict(capabilities)
for editorstack in self.editorstacks:
editorstack.register_completion_capabilities(
capabilities, language)
self.start_completion_services(language)
def start_completion_services(self, language):
"""Notify all editorstacks about LSP server availability."""
for editorstack in self.editorstacks:
editorstack.start_completion_services(language)
def stop_completion_services(self, language):
"""Notify all editorstacks about LSP server unavailability."""
for editorstack in self.editorstacks:
editorstack.stop_completion_services(language)
def send_completion_request(self, language, request, params):
logger.debug("Perform request {0} for: {1}".format(
request, params['file']))
self.main.completions.send_request(language, request, params)
@Slot(str, tuple, dict)
def _rpc_call(self, method, args, kwargs):
meth = getattr(self, method)
meth(*args, **kwargs)
#------ SpyderPluginWidget API ---------------------------------------------
def get_plugin_title(self):
"""Return widget title"""
title = _('Editor')
return title
def get_plugin_icon(self):
"""Return widget icon."""
return ima.icon('edit')
def get_focus_widget(self):
"""
Return the widget to give focus to.
This happens when plugin's dockwidget is raised on top-level.
"""
return self.get_current_editor()
def _visibility_changed(self, enable):
"""DockWidget visibility has changed"""
SpyderPluginWidget._visibility_changed(self, enable)
if self.dockwidget is None:
return
if self.dockwidget.isWindow():
self.dock_toolbar.show()
else:
self.dock_toolbar.hide()
if enable:
self.refresh_plugin()
self.sig_update_plugin_title.emit()
def refresh_plugin(self):
"""Refresh editor plugin"""
editorstack = self.get_current_editorstack()
editorstack.refresh()
self.refresh_save_all_action()
def closing_plugin(self, cancelable=False):
"""Perform actions before parent main window is closed"""
state = self.splitter.saveState()
self.set_option('splitter_state', qbytearray_to_str(state))
editorstack = self.editorstacks[0]
active_project_path = None
if self.projects is not None:
active_project_path = self.projects.get_active_project_path()
if not active_project_path:
self.set_open_filenames()
else:
self.projects.set_project_filenames(
[finfo.filename for finfo in editorstack.data])
self.set_option('layout_settings',
self.editorsplitter.get_layout_settings())
self.set_option('windows_layout_settings',
[win.get_layout_settings() for win in self.editorwindows])
# self.set_option('filenames', filenames)
self.set_option('recent_files', self.recent_files)
# Stop autosave timer before closing windows
self.autosave.stop_autosave_timer()
try:
if not editorstack.save_if_changed(cancelable) and cancelable:
return False
else:
for win in self.editorwindows[:]:
win.close()
return True
except IndexError:
return True
def get_plugin_actions(self):
"""Return a list of actions related to plugin"""
# ---- File menu and toolbar ----
self.new_action = create_action(
self,
_("&New file..."),
icon=ima.icon('filenew'), tip=_("New file"),
triggered=self.new,
context=Qt.WidgetShortcut
)
self.register_shortcut(self.new_action, context="Editor",
name="New file", add_shortcut_to_tip=True)
self.open_last_closed_action = create_action(
self,
_("O&pen last closed"),
tip=_("Open last closed"),
triggered=self.open_last_closed
)
self.register_shortcut(self.open_last_closed_action, context="Editor",
name="Open last closed")
self.open_action = create_action(self, _("&Open..."),
icon=ima.icon('fileopen'), tip=_("Open file"),
triggered=self.load,
context=Qt.WidgetShortcut)
self.register_shortcut(self.open_action, context="Editor",
name="Open file", add_shortcut_to_tip=True)
self.revert_action = create_action(self, _("&Revert"),
icon=ima.icon('revert'), tip=_("Revert file from disk"),
triggered=self.revert)
self.save_action = create_action(self, _("&Save"),
icon=ima.icon('filesave'), tip=_("Save file"),
triggered=self.save,
context=Qt.WidgetShortcut)
self.register_shortcut(self.save_action, context="Editor",
name="Save file", add_shortcut_to_tip=True)
self.save_all_action = create_action(self, _("Sav&e all"),
icon=ima.icon('save_all'), tip=_("Save all files"),
triggered=self.save_all,
context=Qt.WidgetShortcut)
self.register_shortcut(self.save_all_action, context="Editor",
name="Save all", add_shortcut_to_tip=True)
save_as_action = create_action(self, _("Save &as..."), None,
ima.icon('filesaveas'), tip=_("Save current file as..."),
triggered=self.save_as,
context=Qt.WidgetShortcut)
self.register_shortcut(save_as_action, "Editor", "Save As")
save_copy_as_action = create_action(self, _("Save copy as..."), None,
ima.icon('filesaveas'), _("Save copy of current file as..."),
triggered=self.save_copy_as)
print_preview_action = create_action(self, _("Print preview..."),
tip=_("Print preview..."), triggered=self.print_preview)
self.print_action = create_action(self, _("&Print..."),
icon=ima.icon('print'), tip=_("Print current file..."),
triggered=self.print_file)
# Shortcut for close_action is defined in widgets/editor.py
self.close_action = create_action(self, _("&Close"),
icon=ima.icon('fileclose'), tip=_("Close current file"),
triggered=self.close_file)
self.close_all_action = create_action(self, _("C&lose all"),
icon=ima.icon('filecloseall'), tip=_("Close all opened files"),
triggered=self.close_all_files,
context=Qt.WidgetShortcut)
self.register_shortcut(self.close_all_action, context="Editor",
name="Close all")
# ---- Find menu and toolbar ----
_text = _("&Find text")
find_action = create_action(self, _text, icon=ima.icon('find'),
tip=_text, triggered=self.find,
context=Qt.WidgetShortcut)
self.register_shortcut(find_action, context="find_replace",
name="Find text", add_shortcut_to_tip=True)
find_next_action = create_action(self, _("Find &next"),
icon=ima.icon('findnext'),
triggered=self.find_next,
context=Qt.WidgetShortcut)
self.register_shortcut(find_next_action, context="find_replace",
name="Find next")
find_previous_action = create_action(self, _("Find &previous"),
icon=ima.icon('findprevious'),
triggered=self.find_previous,
context=Qt.WidgetShortcut)
self.register_shortcut(find_previous_action, context="find_replace",
name="Find previous")
_text = _("&Replace text")
replace_action = create_action(self, _text, icon=ima.icon('replace'),
tip=_text, triggered=self.replace,
context=Qt.WidgetShortcut)
self.register_shortcut(replace_action, context="find_replace",
name="Replace text")
# ---- Debug menu and toolbar ----
set_clear_breakpoint_action = create_action(self,
_("Set/Clear breakpoint"),
icon=ima.icon('breakpoint_big'),
triggered=self.set_or_clear_breakpoint,
context=Qt.WidgetShortcut)
self.register_shortcut(set_clear_breakpoint_action, context="Editor",
name="Breakpoint")
set_cond_breakpoint_action = create_action(self,
_("Set/Edit conditional breakpoint"),
icon=ima.icon('breakpoint_cond_big'),
triggered=self.set_or_edit_conditional_breakpoint,
context=Qt.WidgetShortcut)
self.register_shortcut(set_cond_breakpoint_action, context="Editor",
name="Conditional breakpoint")
clear_all_breakpoints_action = create_action(self,
_('Clear breakpoints in all files'),
triggered=self.clear_all_breakpoints)
self.winpdb_action = create_action(self, _("Debug with winpdb"),
triggered=self.run_winpdb)
self.winpdb_action.setEnabled(WINPDB_PATH is not None and PY2)
# --- Debug toolbar ---
self.debug_action = create_action(
self, _("&Debug"),
icon=ima.icon('debug'),
tip=_("Debug file"),
triggered=self.debug_file)
self.register_shortcut(self.debug_action, context="_", name="Debug",
add_shortcut_to_tip=True)
self.debug_next_action = create_action(
self, _("Step"),
icon=ima.icon('arrow-step-over'), tip=_("Run current line"),
triggered=lambda: self.debug_command("next"))
self.register_shortcut(self.debug_next_action, "_", "Debug Step Over",
add_shortcut_to_tip=True)
self.debug_continue_action = create_action(
self, _("Continue"),
icon=ima.icon('arrow-continue'),
tip=_("Continue execution until next breakpoint"),
triggered=lambda: self.debug_command("continue"))
self.register_shortcut(
self.debug_continue_action, "_", "Debug Continue",
add_shortcut_to_tip=True)
self.debug_step_action = create_action(
self, _("Step Into"),
icon=ima.icon('arrow-step-in'),
tip=_("Step into function or method of current line"),
triggered=lambda: self.debug_command("step"))
self.register_shortcut(self.debug_step_action, "_", "Debug Step Into",
add_shortcut_to_tip=True)
self.debug_return_action = create_action(
self, _("Step Return"),
icon=ima.icon('arrow-step-out'),
tip=_("Run until current function or method returns"),
triggered=lambda: self.debug_command("return"))
self.register_shortcut(
self.debug_return_action, "_", "Debug Step Return",
add_shortcut_to_tip=True)
self.debug_exit_action = create_action(
self, _("Stop"),
icon=ima.icon('stop_debug'), tip=_("Stop debugging"),
triggered=self.stop_debugging)
self.register_shortcut(self.debug_exit_action, "_", "Debug Exit",
add_shortcut_to_tip=True)
# --- Run toolbar ---
run_action = create_action(self, _("&Run"), icon=ima.icon('run'),
tip=_("Run file"),
triggered=self.run_file)
self.register_shortcut(run_action, context="_", name="Run",
add_shortcut_to_tip=True)
configure_action = create_action(
self,
_("&Configuration per file..."),
icon=ima.icon('run_settings'),
tip=_("Run settings"),
menurole=QAction.NoRole,
triggered=self.edit_run_configurations)
self.register_shortcut(configure_action, context="_",
name="Configure", add_shortcut_to_tip=True)
re_run_action = create_action(self, _("Re-run &last script"),
icon=ima.icon('run_again'),
tip=_("Run again last file"),
triggered=self.re_run_file)
self.register_shortcut(re_run_action, context="_",
name="Re-run last script",
add_shortcut_to_tip=True)
run_selected_action = create_action(self, _("Run &selection or "
"current line"),
icon=ima.icon('run_selection'),
tip=_("Run selection or "
"current line"),
triggered=self.run_selection,
context=Qt.WidgetShortcut)
self.register_shortcut(run_selected_action, context="Editor",
name="Run selection", add_shortcut_to_tip=True)
run_cell_action = create_action(self,
_("Run cell"),
icon=ima.icon('run_cell'),
tip=_("Run current cell \n"
"[Use #%% to create cells]"),
triggered=self.run_cell,
context=Qt.WidgetShortcut)
self.register_shortcut(run_cell_action, context="Editor",
name="Run cell", add_shortcut_to_tip=True)
run_cell_advance_action = create_action(
self,
_("Run cell and advance"),
icon=ima.icon('run_cell_advance'),
tip=_("Run current cell and go to the next one "),
triggered=self.run_cell_and_advance,
context=Qt.WidgetShortcut)
self.register_shortcut(run_cell_advance_action, context="Editor",
name="Run cell and advance",
add_shortcut_to_tip=True)
self.debug_cell_action = create_action(
self,
_("Debug cell"),
icon=ima.icon('debug_cell'),
tip=_("Debug current cell "
"(Alt+Shift+Enter)"),
triggered=self.debug_cell,
context=Qt.WidgetShortcut)
self.register_shortcut(self.debug_cell_action, context="Editor",
name="Debug cell",
add_shortcut_to_tip=True)
re_run_last_cell_action = create_action(self,
_("Re-run last cell"),
tip=_("Re run last cell "),
triggered=self.re_run_last_cell,
context=Qt.WidgetShortcut)
self.register_shortcut(re_run_last_cell_action,
context="Editor",
name='re-run last cell',
add_shortcut_to_tip=True)
# --- Source code Toolbar ---
self.todo_list_action = create_action(self,
_("Show todo list"), icon=ima.icon('todo_list'),
tip=_("Show comments list (TODO/FIXME/XXX/HINT/TIP/@todo/"
"HACK/BUG/OPTIMIZE/!!!/???)"),
triggered=self.go_to_next_todo)
self.todo_menu = QMenu(self)
self.todo_menu.setStyleSheet("QMenu {menu-scrollable: 1;}")
self.todo_list_action.setMenu(self.todo_menu)
self.todo_menu.aboutToShow.connect(self.update_todo_menu)
self.warning_list_action = create_action(self,
_("Show warning/error list"), icon=ima.icon('wng_list'),
tip=_("Show code analysis warnings/errors"),
triggered=self.go_to_next_warning)
self.warning_menu = QMenu(self)
self.warning_menu.setStyleSheet("QMenu {menu-scrollable: 1;}")
self.warning_list_action.setMenu(self.warning_menu)
self.warning_menu.aboutToShow.connect(self.update_warning_menu)
self.previous_warning_action = create_action(self,
_("Previous warning/error"), icon=ima.icon('prev_wng'),
tip=_("Go to previous code analysis warning/error"),
triggered=self.go_to_previous_warning,
context=Qt.WidgetShortcut)
self.register_shortcut(self.previous_warning_action,
context="Editor",
name="Previous warning",
add_shortcut_to_tip=True)
self.next_warning_action = create_action(self,
_("Next warning/error"), icon=ima.icon('next_wng'),
tip=_("Go to next code analysis warning/error"),
triggered=self.go_to_next_warning,
context=Qt.WidgetShortcut)
self.register_shortcut(self.next_warning_action,
context="Editor",
name="Next warning",
add_shortcut_to_tip=True)
self.previous_edit_cursor_action = create_action(self,
_("Last edit location"), icon=ima.icon('last_edit_location'),
tip=_("Go to last edit location"),
triggered=self.go_to_last_edit_location,
context=Qt.WidgetShortcut)
self.register_shortcut(self.previous_edit_cursor_action,
context="Editor",
name="Last edit location",
add_shortcut_to_tip=True)
self.previous_cursor_action = create_action(self,
_("Previous cursor position"), icon=ima.icon('prev_cursor'),
tip=_("Go to previous cursor position"),
triggered=self.go_to_previous_cursor_position,
context=Qt.WidgetShortcut)
self.register_shortcut(self.previous_cursor_action,
context="Editor",
name="Previous cursor position",
add_shortcut_to_tip=True)
self.next_cursor_action = create_action(self,
_("Next cursor position"), icon=ima.icon('next_cursor'),
tip=_("Go to next cursor position"),
triggered=self.go_to_next_cursor_position,
context=Qt.WidgetShortcut)
self.register_shortcut(self.next_cursor_action,
context="Editor",
name="Next cursor position",
add_shortcut_to_tip=True)
# --- Edit Toolbar ---
self.toggle_comment_action = create_action(self,
_("Comment")+"/"+_("Uncomment"), icon=ima.icon('comment'),
tip=_("Comment current line or selection"),
triggered=self.toggle_comment, context=Qt.WidgetShortcut)
self.register_shortcut(self.toggle_comment_action, context="Editor",
name="Toggle comment")
blockcomment_action = create_action(self, _("Add &block comment"),
tip=_("Add block comment around "
"current line or selection"),
triggered=self.blockcomment, context=Qt.WidgetShortcut)
self.register_shortcut(blockcomment_action, context="Editor",
name="Blockcomment")
unblockcomment_action = create_action(self,
_("R&emove block comment"),
tip = _("Remove comment block around "
"current line or selection"),
triggered=self.unblockcomment, context=Qt.WidgetShortcut)
self.register_shortcut(unblockcomment_action, context="Editor",
name="Unblockcomment")
# ----------------------------------------------------------------------
# The following action shortcuts are hard-coded in CodeEditor
# keyPressEvent handler (the shortcut is here only to inform user):
# (context=Qt.WidgetShortcut -> disable shortcut for other widgets)
self.indent_action = create_action(self,
_("Indent"), "Tab", icon=ima.icon('indent'),
tip=_("Indent current line or selection"),
triggered=self.indent, context=Qt.WidgetShortcut)
self.unindent_action = create_action(self,
_("Unindent"), "Shift+Tab", icon=ima.icon('unindent'),
tip=_("Unindent current line or selection"),
triggered=self.unindent, context=Qt.WidgetShortcut)
self.text_uppercase_action = create_action(self,
_("Toggle Uppercase"), icon=ima.icon('toggle_uppercase'),
tip=_("Change to uppercase current line or selection"),
triggered=self.text_uppercase, context=Qt.WidgetShortcut)
self.register_shortcut(self.text_uppercase_action, context="Editor",
name="transform to uppercase")
self.text_lowercase_action = create_action(self,
_("Toggle Lowercase"), icon=ima.icon('toggle_lowercase'),
tip=_("Change to lowercase current line or selection"),
triggered=self.text_lowercase, context=Qt.WidgetShortcut)
self.register_shortcut(self.text_lowercase_action, context="Editor",
name="transform to lowercase")
# ----------------------------------------------------------------------
self.win_eol_action = create_action(self,
_("Carriage return and line feed (Windows)"),
toggled=lambda checked: self.toggle_eol_chars('nt', checked))
self.linux_eol_action = create_action(self,
_("Line feed (UNIX)"),
toggled=lambda checked: self.toggle_eol_chars('posix', checked))
self.mac_eol_action = create_action(self,
_("Carriage return (Mac)"),
toggled=lambda checked: self.toggle_eol_chars('mac', checked))
eol_action_group = QActionGroup(self)
eol_actions = (self.win_eol_action, self.linux_eol_action,
self.mac_eol_action)
add_actions(eol_action_group, eol_actions)
eol_menu = QMenu(_("Convert end-of-line characters"), self)
add_actions(eol_menu, eol_actions)
trailingspaces_action = create_action(
self,
_("Remove trailing spaces"),
triggered=self.remove_trailing_spaces)
formatter = CONF.get(
'completions',
('provider_configuration', 'lsp', 'values', 'formatting'),
'')
self.formatting_action = create_action(
self,
_('Format file or selection with {0}').format(
formatter.capitalize()),
shortcut=CONF.get_shortcut('editor', 'autoformatting'),
context=Qt.WidgetShortcut,
triggered=self.format_document_or_selection)
self.formatting_action.setEnabled(False)
# Checkable actions
showblanks_action = self._create_checkable_action(
_("Show blank spaces"), 'blank_spaces', 'set_blanks_enabled')
scrollpastend_action = self._create_checkable_action(
_("Scroll past the end"), 'scroll_past_end',
'set_scrollpastend_enabled')
showindentguides_action = self._create_checkable_action(
_("Show indent guides"), 'indent_guides', 'set_indent_guides')
showcodefolding_action = self._create_checkable_action(
_("Show code folding"), 'code_folding', 'set_code_folding_enabled')
show_classfunc_dropdown_action = self._create_checkable_action(
_("Show selector for classes and functions"),
'show_class_func_dropdown', 'set_classfunc_dropdown_visible')
show_codestyle_warnings_action = self._create_checkable_action(
_("Show code style warnings"), 'pycodestyle',)
show_docstring_warnings_action = self._create_checkable_action(
_("Show docstring style warnings"), 'pydocstyle')
underline_errors = self._create_checkable_action(
_("Underline errors and warnings"),
'underline_errors', 'set_underline_errors_enabled')
self.checkable_actions = {
'blank_spaces': showblanks_action,
'scroll_past_end': scrollpastend_action,
'indent_guides': showindentguides_action,
'code_folding': showcodefolding_action,
'show_class_func_dropdown': show_classfunc_dropdown_action,
'pycodestyle': show_codestyle_warnings_action,
'pydocstyle': show_docstring_warnings_action,
'underline_errors': underline_errors}
fixindentation_action = create_action(self, _("Fix indentation"),
tip=_("Replace tab characters by space characters"),
triggered=self.fix_indentation)
gotoline_action = create_action(self, _("Go to line..."),
icon=ima.icon('gotoline'),
triggered=self.go_to_line,
context=Qt.WidgetShortcut)
self.register_shortcut(gotoline_action, context="Editor",
name="Go to line")
workdir_action = create_action(self,
_("Set console working directory"),
icon=ima.icon('DirOpenIcon'),
tip=_("Set current console (and file explorer) working "
"directory to current script directory"),
triggered=self.__set_workdir)
self.max_recent_action = create_action(self,
_("Maximum number of recent files..."),
triggered=self.change_max_recent_files)
self.clear_recent_action = create_action(self,
_("Clear this list"), tip=_("Clear recent files list"),
triggered=self.clear_recent_files)
# Fixes spyder-ide/spyder#6055.
# See: https://bugreports.qt.io/browse/QTBUG-8596
self.tab_navigation_actions = []
if sys.platform == 'darwin':
self.go_to_next_file_action = create_action(
self,
_("Go to next file"),
shortcut=CONF.get_shortcut('editor', 'go to previous file'),
triggered=self.go_to_next_file,
)
self.go_to_previous_file_action = create_action(
self,
_("Go to previous file"),
shortcut=CONF.get_shortcut('editor', 'go to next file'),
triggered=self.go_to_previous_file,
)
self.register_shortcut(
self.go_to_next_file_action,
context="Editor",
name="Go to next file",
)
self.register_shortcut(
self.go_to_previous_file_action,
context="Editor",
name="Go to previous file",
)
self.tab_navigation_actions = [
MENU_SEPARATOR,
self.go_to_previous_file_action,
self.go_to_next_file_action,
]
# ---- File menu/toolbar construction ----
self.recent_file_menu = QMenu(_("Open &recent"), self)
self.recent_file_menu.aboutToShow.connect(self.update_recent_file_menu)
from spyder.plugins.mainmenu.api import (
ApplicationMenus, FileMenuSections)
# New Section
self.main.mainmenu.add_item_to_application_menu(
self.new_action,
menu_id=ApplicationMenus.File,
section=FileMenuSections.New,
before_section=FileMenuSections.Restart)
# Open section
open_actions = [
self.open_action,
self.open_last_closed_action,
self.recent_file_menu,
]
for open_action in open_actions:
self.main.mainmenu.add_item_to_application_menu(
open_action,
menu_id=ApplicationMenus.File,
section=FileMenuSections.Open,
before_section=FileMenuSections.Restart)
# Save section
save_actions = [
self.save_action,
self.save_all_action,
save_as_action,
save_copy_as_action,
self.revert_action,
]
for save_action in save_actions:
self.main.mainmenu.add_item_to_application_menu(
save_action,
menu_id=ApplicationMenus.File,
section=FileMenuSections.Save,
before_section=FileMenuSections.Restart)
# Print
print_actions = [
print_preview_action,
self.print_action,
]
for print_action in print_actions:
self.main.mainmenu.add_item_to_application_menu(
print_action,
menu_id=ApplicationMenus.File,
section=FileMenuSections.Print,
before_section=FileMenuSections.Restart)
# Close
close_actions = [
self.close_action,
self.close_all_action
]
for close_action in close_actions:
self.main.mainmenu.add_item_to_application_menu(
close_action,
menu_id=ApplicationMenus.File,
section=FileMenuSections.Close,
before_section=FileMenuSections.Restart)
# Navigation
if sys.platform == 'darwin':
self.main.mainmenu.add_item_to_application_menu(
self.tab_navigation_actions,
menu_id=ApplicationMenus.File,
section=FileMenuSections.Navigation,
before_section=FileMenuSections.Restart)
file_toolbar_actions = ([self.new_action, self.open_action,
self.save_action, self.save_all_action] +
self.main.file_toolbar_actions)
self.main.file_toolbar_actions += file_toolbar_actions
# ---- Find menu/toolbar construction ----
self.main.search_menu_actions = [find_action,
find_next_action,
find_previous_action,
replace_action]
self.main.search_toolbar_actions = [find_action,
find_next_action,
replace_action]
# ---- Edit menu/toolbar construction ----
self.edit_menu_actions = [self.toggle_comment_action,
blockcomment_action, unblockcomment_action,
self.indent_action, self.unindent_action,
self.text_uppercase_action,
self.text_lowercase_action]
# ---- Search menu/toolbar construction ----
self.main.search_menu_actions += [gotoline_action]
# ---- Run menu/toolbar construction ----
run_menu_actions = [run_action, run_cell_action,
run_cell_advance_action,
re_run_last_cell_action, MENU_SEPARATOR,
run_selected_action, re_run_action,
configure_action, MENU_SEPARATOR]
self.main.run_menu_actions += run_menu_actions
run_toolbar_actions = [run_action, run_cell_action,
run_cell_advance_action, run_selected_action]
self.main.run_toolbar_actions += run_toolbar_actions
# ---- Debug menu/toolbar construction ----
# NOTE: 'list_breakpoints' is used by the breakpoints
# plugin to add its "List breakpoints" action to this
# menu
debug_menu_actions = [
self.debug_action,
self.debug_cell_action,
self.debug_next_action,
self.debug_step_action,
self.debug_return_action,
self.debug_continue_action,
self.debug_exit_action,
MENU_SEPARATOR,
set_clear_breakpoint_action,
set_cond_breakpoint_action,
clear_all_breakpoints_action,
'list_breakpoints',
MENU_SEPARATOR,
self.winpdb_action
]
self.main.debug_menu_actions += debug_menu_actions
debug_toolbar_actions = [
self.debug_action,
self.debug_next_action,
self.debug_step_action,
self.debug_return_action,
self.debug_continue_action,
self.debug_exit_action
]
self.main.debug_toolbar_actions += debug_toolbar_actions
# ---- Source menu/toolbar construction ----
source_menu_actions = [
showblanks_action,
scrollpastend_action,
showindentguides_action,
showcodefolding_action,
show_classfunc_dropdown_action,
show_codestyle_warnings_action,
show_docstring_warnings_action,
underline_errors,
MENU_SEPARATOR,
self.todo_list_action,
self.warning_list_action,
self.previous_warning_action,
self.next_warning_action,
MENU_SEPARATOR,
self.previous_edit_cursor_action,
self.previous_cursor_action,
self.next_cursor_action,
MENU_SEPARATOR,
eol_menu,
trailingspaces_action,
fixindentation_action,
self.formatting_action
]
self.main.source_menu_actions += source_menu_actions
# ---- Dock widget and file dependent actions ----
self.dock_toolbar_actions = (
file_toolbar_actions +
[MENU_SEPARATOR] +
run_toolbar_actions +
[MENU_SEPARATOR] +
debug_toolbar_actions
)
self.pythonfile_dependent_actions = [
run_action,
configure_action,
set_clear_breakpoint_action,
set_cond_breakpoint_action,
self.debug_action,
self.debug_cell_action,
run_selected_action,
run_cell_action,
run_cell_advance_action,
re_run_last_cell_action,
blockcomment_action,
unblockcomment_action,
self.winpdb_action
]
self.cythonfile_compatible_actions = [run_action, configure_action]
self.file_dependent_actions = (
self.pythonfile_dependent_actions +
[
self.save_action,
save_as_action,
save_copy_as_action,
print_preview_action,
self.print_action,
self.save_all_action,
gotoline_action,
workdir_action,
self.close_action,
self.close_all_action,
self.toggle_comment_action,
self.revert_action,
self.indent_action,
self.unindent_action
]
)
self.stack_menu_actions = [gotoline_action, workdir_action]
return self.file_dependent_actions
def update_pdb_state(self, state, last_step):
"""
Enable/disable debugging actions and handle pdb state change.
Some examples depending on the debugging state:
self.debug_action.setEnabled(not state)
self.debug_cell_action.setEnabled(not state)
self.debug_next_action.setEnabled(state)
self.debug_step_action.setEnabled(state)
self.debug_return_action.setEnabled(state)
self.debug_continue_action.setEnabled(state)
self.debug_exit_action.setEnabled(state)
"""
current_editor = self.get_current_editor()
if current_editor:
current_editor.update_debugger_panel_state(state, last_step)
def register_plugin(self):
"""Register plugin in Spyder's main window"""
completions = self.main.get_plugin(Plugins.Completions, error=False)
outlineexplorer = self.main.get_plugin(
Plugins.OutlineExplorer, error=False)
self.main.restore_scrollbar_position.connect(
self.restore_scrollbar_position)
self.main.console.sig_edit_goto_requested.connect(self.load)
self.redirect_stdio.connect(self.main.redirect_internalshell_stdio)
if completions:
self.main.completions.sig_language_completions_available.connect(
self.register_completion_capabilities)
self.main.completions.sig_open_file.connect(self.load)
self.main.completions.sig_editor_rpc.connect(self._rpc_call)
self.main.completions.sig_stop_completions.connect(
self.stop_completion_services)
self.sig_file_opened_closed_or_updated.connect(
self.main.completions.file_opened_closed_or_updated)
if outlineexplorer:
self.set_outlineexplorer(self.main.outlineexplorer)
self.add_dockwidget()
self.update_pdb_state(False, {})
# Add modes to switcher
self.switcher_manager = EditorSwitcherManager(
self,
self.main.switcher,
lambda: self.get_current_editor(),
lambda: self.get_current_editorstack(),
section=self.get_plugin_title())
def update_source_menu(self, options, **kwargs):
option_names = [opt[-1] if isinstance(opt, tuple) else opt
for opt in options]
named_options = dict(zip(option_names, options))
for name, action in self.checkable_actions.items():
if name in named_options:
section = 'completions'
if name == 'underline_errors':
section = 'editor'
opt = named_options[name]
state = self.get_option(opt, section=section)
# Avoid triggering the action when this action changes state
# See: spyder-ide/spyder#9915
action.blockSignals(True)
action.setChecked(state)
action.blockSignals(False)
def update_font(self):
"""Update font from Preferences"""
font = self.get_font()
color_scheme = self.get_color_scheme()
for editorstack in self.editorstacks:
editorstack.set_default_font(font, color_scheme)
completion_size = CONF.get('main', 'completion/size')
for finfo in editorstack.data:
comp_widget = finfo.editor.completion_widget
kite_call_to_action = finfo.editor.kite_call_to_action
comp_widget.setup_appearance(completion_size, font)
kite_call_to_action.setFont(font)
def set_ancestor(self, ancestor):
"""
Set ancestor of child widgets like the CompletionWidget.
Needed to properly set position of the widget based on the correct
parent/ancestor.
See spyder-ide/spyder#11076
"""
for editorstack in self.editorstacks:
for finfo in editorstack.data:
comp_widget = finfo.editor.completion_widget
kite_call_to_action = finfo.editor.kite_call_to_action
comp_widget.setParent(ancestor)
kite_call_to_action.setParent(ancestor)
def _create_checkable_action(self, text, conf_name, method=''):
"""Helper function to create a checkable action.
Args:
text (str): Text to be displayed in the action.
conf_name (str): configuration setting associated with the
action
method (str): name of EditorStack class that will be used
to update the changes in each editorstack.
"""
def toogle(checked):
self.switch_to_plugin()
self._toggle_checkable_action(checked, method, conf_name)
action = create_action(self, text, toggled=toogle)
action.blockSignals(True)
if conf_name not in ['pycodestyle', 'pydocstyle']:
action.setChecked(self.get_option(conf_name))
else:
opt = CONF.get(
'completions',
('provider_configuration', 'lsp', 'values', conf_name),
False
)
action.setChecked(opt)
action.blockSignals(False)
return action
@Slot(bool, str, str)
def _toggle_checkable_action(self, checked, method_name, conf_name):
"""
Handle the toogle of a checkable action.
Update editorstacks, PyLS and CONF.
Args:
checked (bool): State of the action.
method_name (str): name of EditorStack class that will be used
to update the changes in each editorstack.
conf_name (str): configuration setting associated with the
action.
"""
if method_name:
if self.editorstacks:
for editorstack in self.editorstacks:
try:
method = getattr(editorstack, method_name)
method(checked)
except AttributeError as e:
logger.error(e, exc_info=True)
self.set_option(conf_name, checked)
else:
if conf_name in ('pycodestyle', 'pydocstyle'):
CONF.set(
'completions',
('provider_configuration', 'lsp', 'values', conf_name),
checked)
if self.main.get_plugin(Plugins.Completions, error=False):
completions = self.main.completions
completions.after_configuration_update([])
#------ Focus tabwidget
def __get_focused_editorstack(self):
fwidget = QApplication.focusWidget()
if isinstance(fwidget, EditorStack):
return fwidget
else:
for editorstack in self.editorstacks:
if editorstack.isAncestorOf(fwidget):
return editorstack
def set_last_focused_editorstack(self, editorwindow, editorstack):
self.last_focused_editorstack[editorwindow] = editorstack
# very last editorstack
self.last_focused_editorstack[None] = editorstack
def get_last_focused_editorstack(self, editorwindow=None):
return self.last_focused_editorstack[editorwindow]
def remove_last_focused_editorstack(self, editorstack):
for editorwindow, widget in list(
self.last_focused_editorstack.items()):
if widget is editorstack:
self.last_focused_editorstack[editorwindow] = None
def save_focused_editorstack(self):
editorstack = self.__get_focused_editorstack()
if editorstack is not None:
for win in [self]+self.editorwindows:
if win.isAncestorOf(editorstack):
self.set_last_focused_editorstack(win, editorstack)
# ------ Handling editorstacks
def register_editorstack(self, editorstack):
self.editorstacks.append(editorstack)
self.register_widget_shortcuts(editorstack)
if self.isAncestorOf(editorstack):
# editorstack is a child of the Editor plugin
self.set_last_focused_editorstack(self, editorstack)
editorstack.set_closable(len(self.editorstacks) > 1)
if self.outlineexplorer is not None:
editorstack.set_outlineexplorer(
self.outlineexplorer.get_widget())
editorstack.set_find_widget(self.find_widget)
editorstack.reset_statusbar.connect(self.readwrite_status.hide)
editorstack.reset_statusbar.connect(self.encoding_status.hide)
editorstack.reset_statusbar.connect(self.cursorpos_status.hide)
editorstack.readonly_changed.connect(
self.readwrite_status.update_readonly)
editorstack.encoding_changed.connect(
self.encoding_status.update_encoding)
editorstack.sig_editor_cursor_position_changed.connect(
self.cursorpos_status.update_cursor_position)
editorstack.sig_editor_cursor_position_changed.connect(
self.current_editor_cursor_changed)
editorstack.sig_refresh_eol_chars.connect(
self.eol_status.update_eol)
editorstack.current_file_changed.connect(
self.vcs_status.update_vcs)
editorstack.file_saved.connect(
self.vcs_status.update_vcs_state)
editorstack.set_io_actions(self.new_action, self.open_action,
self.save_action, self.revert_action)
editorstack.set_tempfile_path(self.TEMPFILE_PATH)
settings = (
('set_todolist_enabled', 'todo_list'),
('set_blanks_enabled', 'blank_spaces'),
('set_underline_errors_enabled', 'underline_errors'),
('set_scrollpastend_enabled', 'scroll_past_end'),
('set_linenumbers_enabled', 'line_numbers'),
('set_edgeline_enabled', 'edge_line'),
('set_edgeline_columns', 'edge_line_columns'),
('set_indent_guides', 'indent_guides'),
('set_code_folding_enabled', 'code_folding'),
('set_focus_to_editor', 'focus_to_editor'),
('set_run_cell_copy', 'run_cell_copy'),
('set_close_parentheses_enabled', 'close_parentheses'),
('set_close_quotes_enabled', 'close_quotes'),
('set_add_colons_enabled', 'add_colons'),
('set_auto_unindent_enabled', 'auto_unindent'),
('set_indent_chars', 'indent_chars'),
('set_tab_stop_width_spaces', 'tab_stop_width_spaces'),
('set_wrap_enabled', 'wrap'),
('set_tabmode_enabled', 'tab_always_indent'),
('set_stripmode_enabled', 'strip_trailing_spaces_on_modify'),
('set_intelligent_backspace_enabled', 'intelligent_backspace'),
('set_automatic_completions_enabled', 'automatic_completions'),
('set_automatic_completions_after_chars',
'automatic_completions_after_chars'),
('set_automatic_completions_after_ms',
'automatic_completions_after_ms'),
('set_completions_hint_enabled', 'completions_hint'),
('set_completions_hint_after_ms',
'completions_hint_after_ms'),
('set_highlight_current_line_enabled', 'highlight_current_line'),
('set_highlight_current_cell_enabled', 'highlight_current_cell'),
('set_occurrence_highlighting_enabled', 'occurrence_highlighting'),
('set_occurrence_highlighting_timeout', 'occurrence_highlighting/timeout'),
('set_checkeolchars_enabled', 'check_eol_chars'),
('set_tabbar_visible', 'show_tab_bar'),
('set_classfunc_dropdown_visible', 'show_class_func_dropdown'),
('set_always_remove_trailing_spaces', 'always_remove_trailing_spaces'),
('set_remove_trailing_newlines', 'always_remove_trailing_newlines'),
('set_add_newline', 'add_newline'),
('set_convert_eol_on_save', 'convert_eol_on_save'),
('set_convert_eol_on_save_to', 'convert_eol_on_save_to'),
)
for method, setting in settings:
getattr(editorstack, method)(self.get_option(setting))
editorstack.set_help_enabled(CONF.get('help', 'connect/editor'))
hover_hints = CONF.get(
'completions',
('provider_configuration', 'lsp', 'values',
'enable_hover_hints'),
True
)
format_on_save = CONF.get(
'completions',
('provider_configuration', 'lsp', 'values', 'format_on_save'),
False
)
editorstack.set_hover_hints_enabled(hover_hints)
editorstack.set_format_on_save(format_on_save)
color_scheme = self.get_color_scheme()
editorstack.set_default_font(self.get_font(), color_scheme)
editorstack.starting_long_process.connect(self.starting_long_process)
editorstack.ending_long_process.connect(self.ending_long_process)
# Redirect signals
editorstack.sig_option_changed.connect(self.sig_option_changed)
editorstack.redirect_stdio.connect(
lambda state: self.redirect_stdio.emit(state))
editorstack.exec_in_extconsole.connect(
lambda text, option:
self.exec_in_extconsole.emit(text, option))
editorstack.run_cell_in_ipyclient.connect(
lambda code, cell_name, filename, run_cell_copy:
self.run_cell_in_ipyclient.emit(code, cell_name, filename,
run_cell_copy))
editorstack.debug_cell_in_ipyclient.connect(
lambda code, cell_name, filename, run_cell_copy:
self.debug_cell_in_ipyclient.emit(code, cell_name, filename,
run_cell_copy))
editorstack.update_plugin_title.connect(
lambda: self.sig_update_plugin_title.emit())
editorstack.editor_focus_changed.connect(self.save_focused_editorstack)
editorstack.editor_focus_changed.connect(self.main.plugin_focus_changed)
editorstack.editor_focus_changed.connect(self.sig_editor_focus_changed)
editorstack.zoom_in.connect(lambda: self.zoom(1))
editorstack.zoom_out.connect(lambda: self.zoom(-1))
editorstack.zoom_reset.connect(lambda: self.zoom(0))
editorstack.sig_open_file.connect(self.report_open_file)
editorstack.sig_new_file.connect(lambda s: self.new(text=s))
editorstack.sig_new_file[()].connect(self.new)
editorstack.sig_close_file.connect(self.close_file_in_all_editorstacks)
editorstack.sig_close_file.connect(self.remove_file_cursor_history)
editorstack.file_saved.connect(self.file_saved_in_editorstack)
editorstack.file_renamed_in_data.connect(
self.file_renamed_in_data_in_editorstack)
editorstack.opened_files_list_changed.connect(
self.opened_files_list_changed)
editorstack.active_languages_stats.connect(
self.update_active_languages)
editorstack.sig_go_to_definition.connect(
lambda fname, line, col: self.load(
fname, line, start_column=col))
editorstack.sig_perform_completion_request.connect(
self.send_completion_request)
editorstack.todo_results_changed.connect(self.todo_results_changed)
editorstack.update_code_analysis_actions.connect(
self.update_code_analysis_actions)
editorstack.update_code_analysis_actions.connect(
self.update_todo_actions)
editorstack.refresh_file_dependent_actions.connect(
self.refresh_file_dependent_actions)
editorstack.refresh_save_all_action.connect(self.refresh_save_all_action)
editorstack.sig_refresh_eol_chars.connect(self.refresh_eol_chars)
editorstack.sig_refresh_formatting.connect(self.refresh_formatting)
editorstack.sig_breakpoints_saved.connect(self.breakpoints_saved)
editorstack.text_changed_at.connect(self.text_changed_at)
editorstack.current_file_changed.connect(self.current_file_changed)
editorstack.plugin_load.connect(self.load)
editorstack.plugin_load[()].connect(self.load)
editorstack.edit_goto.connect(self.load)
editorstack.sig_save_as.connect(self.save_as)
editorstack.sig_prev_edit_pos.connect(self.go_to_last_edit_location)
editorstack.sig_prev_cursor.connect(self.go_to_previous_cursor_position)
editorstack.sig_next_cursor.connect(self.go_to_next_cursor_position)
editorstack.sig_prev_warning.connect(self.go_to_previous_warning)
editorstack.sig_next_warning.connect(self.go_to_next_warning)
editorstack.sig_save_bookmark.connect(self.save_bookmark)
editorstack.sig_load_bookmark.connect(self.load_bookmark)
editorstack.sig_save_bookmarks.connect(self.save_bookmarks)
editorstack.sig_help_requested.connect(self.sig_help_requested)
# Register editorstack's autosave component with plugin's autosave
# component
self.autosave.register_autosave_for_stack(editorstack.autosave)
def unregister_editorstack(self, editorstack):
"""Removing editorstack only if it's not the last remaining"""
self.remove_last_focused_editorstack(editorstack)
if len(self.editorstacks) > 1:
index = self.editorstacks.index(editorstack)
self.editorstacks.pop(index)
return True
else:
# editorstack was not removed!
return False
def clone_editorstack(self, editorstack):
editorstack.clone_from(self.editorstacks[0])
for finfo in editorstack.data:
self.register_widget_shortcuts(finfo.editor)
@Slot(str, str)
def close_file_in_all_editorstacks(self, editorstack_id_str, filename):
for editorstack in self.editorstacks:
if str(id(editorstack)) != editorstack_id_str:
editorstack.blockSignals(True)
index = editorstack.get_index_from_filename(filename)
editorstack.close_file(index, force=True)
editorstack.blockSignals(False)
@Slot(str, str, str)
def file_saved_in_editorstack(self, editorstack_id_str,
original_filename, filename):
"""A file was saved in editorstack, this notifies others"""
for editorstack in self.editorstacks:
if str(id(editorstack)) != editorstack_id_str:
editorstack.file_saved_in_other_editorstack(original_filename,
filename)
@Slot(str, str, str)
def file_renamed_in_data_in_editorstack(self, editorstack_id_str,
original_filename, filename):
"""A file was renamed in data in editorstack, this notifies others"""
for editorstack in self.editorstacks:
if str(id(editorstack)) != editorstack_id_str:
editorstack.rename_in_data(original_filename, filename)
def call_all_editorstacks(self, method, args, **kwargs):
"""Call a method with arguments on all editorstacks."""
for editorstack in self.editorstacks:
method = getattr(editorstack, method)
method(*args, **kwargs)
#------ Handling editor windows
def setup_other_windows(self):
"""Setup toolbars and menus for 'New window' instances"""
# TODO: All the actions here should be taken from
# the MainMenus plugin
file_menu_actions = self.main.mainmenu.get_application_menu(
ApplicationMenus.File).get_actions()
tools_menu_actions = self.main.mainmenu.get_application_menu(
ApplicationMenus.Tools).get_actions()
help_menu_actions = self.main.mainmenu.get_application_menu(
ApplicationMenus.Help).get_actions()
self.toolbar_list = ((_("File toolbar"), "file_toolbar",
self.main.file_toolbar_actions),
(_("Run toolbar"), "run_toolbar",
self.main.run_toolbar_actions),
(_("Debug toolbar"), "debug_toolbar",
self.main.debug_toolbar_actions))
self.menu_list = ((_("&File"), file_menu_actions),
(_("&Edit"), self.main.edit_menu_actions),
(_("&Search"), self.main.search_menu_actions),
(_("Sour&ce"), self.main.source_menu_actions),
(_("&Run"), self.main.run_menu_actions),
(_("&Tools"), tools_menu_actions),
(_("&View"), []),
(_("&Help"), help_menu_actions))
# Create pending new windows:
for layout_settings in self.editorwindows_to_be_created:
win = self.create_new_window()
win.set_layout_settings(layout_settings)
def switch_to_plugin(self):
"""
Reimplemented method to deactivate shortcut when
opening a new window.
"""
if not self.editorwindows:
super(Editor, self).switch_to_plugin()
def create_new_window(self):
window = EditorMainWindow(
self, self.stack_menu_actions, self.toolbar_list, self.menu_list)
window.add_toolbars_to_menu("&View", window.get_toolbars())
window.load_toolbars()
window.resize(self.size())
window.show()
window.editorwidget.editorsplitter.editorstack.new_window = True
self.register_editorwindow(window)
window.destroyed.connect(lambda: self.unregister_editorwindow(window))
return window
def register_editorwindow(self, window):
self.editorwindows.append(window)
def unregister_editorwindow(self, window):
self.editorwindows.pop(self.editorwindows.index(window))
#------ Accessors
def get_filenames(self):
return [finfo.filename for finfo in self.editorstacks[0].data]
def get_filename_index(self, filename):
return self.editorstacks[0].has_filename(filename)
def get_current_editorstack(self, editorwindow=None):
if self.editorstacks is not None:
if len(self.editorstacks) == 1:
editorstack = self.editorstacks[0]
else:
editorstack = self.__get_focused_editorstack()
if editorstack is None or editorwindow is not None:
editorstack = self.get_last_focused_editorstack(
editorwindow)
if editorstack is None:
editorstack = self.editorstacks[0]
return editorstack
def get_current_editor(self):
editorstack = self.get_current_editorstack()
if editorstack is not None:
return editorstack.get_current_editor()
def get_current_finfo(self):
editorstack = self.get_current_editorstack()
if editorstack is not None:
return editorstack.get_current_finfo()
def get_current_filename(self):
editorstack = self.get_current_editorstack()
if editorstack is not None:
return editorstack.get_current_filename()
def get_current_language(self):
editorstack = self.get_current_editorstack()
if editorstack is not None:
return editorstack.get_current_language()
def is_file_opened(self, filename=None):
return self.editorstacks[0].is_file_opened(filename)
def set_current_filename(self, filename, editorwindow=None, focus=True):
"""Set focus to *filename* if this file has been opened.
Return the editor instance associated to *filename*.
"""
editorstack = self.get_current_editorstack(editorwindow)
return editorstack.set_current_filename(filename, focus)
def set_path(self):
for finfo in self.editorstacks[0].data:
finfo.path = self.main.get_spyder_pythonpath()
#------ Refresh methods
def refresh_file_dependent_actions(self):
"""Enable/disable file dependent actions
(only if dockwidget is visible)"""
if self.dockwidget and self.dockwidget.isVisible():
enable = self.get_current_editor() is not None
for action in self.file_dependent_actions:
action.setEnabled(enable)
def refresh_save_all_action(self):
"""Enable 'Save All' if there are files to be saved"""
editorstack = self.get_current_editorstack()
if editorstack:
state = any(finfo.editor.document().isModified() or finfo.newly_created
for finfo in editorstack.data)
self.save_all_action.setEnabled(state)
def update_warning_menu(self):
"""Update warning list menu"""
editor = self.get_current_editor()
check_results = editor.get_current_warnings()
self.warning_menu.clear()
filename = self.get_current_filename()
for message, line_number in check_results:
error = 'syntax' in message
text = message[:1].upper() + message[1:]
icon = ima.icon('error') if error else ima.icon('warning')
slot = lambda _checked, _l=line_number: self.load(filename, goto=_l)
action = create_action(self, text=text, icon=icon, triggered=slot)
self.warning_menu.addAction(action)
def update_todo_menu(self):
"""Update todo list menu"""
editorstack = self.get_current_editorstack()
results = editorstack.get_todo_results()
self.todo_menu.clear()
filename = self.get_current_filename()
for text, line0 in results:
icon = ima.icon('todo')
slot = lambda _checked, _l=line0: self.load(filename, goto=_l)
action = create_action(self, text=text, icon=icon, triggered=slot)
self.todo_menu.addAction(action)
self.update_todo_actions()
def todo_results_changed(self):
"""
Synchronize todo results between editorstacks
Refresh todo list navigation buttons
"""
editorstack = self.get_current_editorstack()
results = editorstack.get_todo_results()
index = editorstack.get_stack_index()
if index != -1:
filename = editorstack.data[index].filename
for other_editorstack in self.editorstacks:
if other_editorstack is not editorstack:
other_editorstack.set_todo_results(filename, results)
self.update_todo_actions()
def refresh_eol_chars(self, os_name):
os_name = to_text_string(os_name)
self.__set_eol_chars = False
if os_name == 'nt':
self.win_eol_action.setChecked(True)
elif os_name == 'posix':
self.linux_eol_action.setChecked(True)
else:
self.mac_eol_action.setChecked(True)
self.__set_eol_chars = True
def refresh_formatting(self, status):
self.formatting_action.setEnabled(status)
def refresh_formatter_name(self):
formatter = CONF.get(
'completions',
('provider_configuration', 'lsp', 'values', 'formatting'),
'')
self.formatting_action.setText(
_('Format file or selection with {0}').format(
formatter.capitalize()))
#------ Slots
def opened_files_list_changed(self):
"""
Opened files list has changed:
--> open/close file action
--> modification ('*' added to title)
--> current edited file has changed
"""
# Refresh Python file dependent actions:
editor = self.get_current_editor()
if editor:
python_enable = editor.is_python_or_ipython()
cython_enable = python_enable or (
programs.is_module_installed('Cython') and editor.is_cython())
for action in self.pythonfile_dependent_actions:
if action in self.cythonfile_compatible_actions:
enable = cython_enable
else:
enable = python_enable
if action is self.winpdb_action:
action.setEnabled(enable and WINPDB_PATH is not None)
else:
action.setEnabled(enable)
self.sig_file_opened_closed_or_updated.emit(
self.get_current_filename(), self.get_current_language())
def update_code_analysis_actions(self):
"""Update actions in the warnings menu."""
editor = self.get_current_editor()
# To fix an error at startup
if editor is None:
return
results = editor.get_current_warnings()
# Update code analysis actions
state = results is not None and len(results)
for action in (self.warning_list_action, self.previous_warning_action,
self.next_warning_action):
if state is not None:
action.setEnabled(state)
def update_todo_actions(self):
editorstack = self.get_current_editorstack()
results = editorstack.get_todo_results()
state = (self.get_option('todo_list') and
results is not None and len(results))
if state is not None:
self.todo_list_action.setEnabled(state)
@Slot(set)
def update_active_languages(self, languages):
if self.main.get_plugin(Plugins.Completions, error=False):
self.main.completions.update_client_status(languages)
# ------ Bookmarks
def save_bookmarks(self, filename, bookmarks):
"""Receive bookmark changes and save them."""
filename = to_text_string(filename)
bookmarks = to_text_string(bookmarks)
filename = osp.normpath(osp.abspath(filename))
bookmarks = eval(bookmarks)
save_bookmarks(filename, bookmarks)
#------ File I/O
def __load_temp_file(self):
"""Load temporary file from a text file in user home directory"""
if not osp.isfile(self.TEMPFILE_PATH):
# Creating temporary file
default = ['# -*- coding: utf-8 -*-',
'"""', _("Spyder Editor"), '',
_("This is a temporary script file."),
'"""', '', '']
text = os.linesep.join([encoding.to_unicode(qstr)
for qstr in default])
try:
encoding.write(to_text_string(text), self.TEMPFILE_PATH,
'utf-8')
except EnvironmentError:
self.new()
return
self.load(self.TEMPFILE_PATH)
@Slot()
def __set_workdir(self):
"""Set current script directory as working directory"""
fname = self.get_current_filename()
if fname is not None:
directory = osp.dirname(osp.abspath(fname))
self.sig_dir_opened.emit(directory)
def __add_recent_file(self, fname):
"""Add to recent file list"""
if fname is None:
return
if fname in self.recent_files:
self.recent_files.remove(fname)
self.recent_files.insert(0, fname)
if len(self.recent_files) > self.get_option('max_recent_files'):
self.recent_files.pop(-1)
def _clone_file_everywhere(self, finfo):
"""Clone file (*src_editor* widget) in all editorstacks
Cloning from the first editorstack in which every single new editor
is created (when loading or creating a new file)"""
for editorstack in self.editorstacks[1:]:
editor = editorstack.clone_editor_from(finfo, set_current=False)
self.register_widget_shortcuts(editor)
@Slot()
@Slot(str)
def new(self, fname=None, editorstack=None, text=None):
"""
Create a new file - Untitled
fname=None --> fname will be 'untitledXX.py' but do not create file
fname=<basestring> --> create file
"""
# If no text is provided, create default content
empty = False
try:
if text is None:
default_content = True
text, enc = encoding.read(self.TEMPLATE_PATH)
enc_match = re.search(r'-*- coding: ?([a-z0-9A-Z\-]*) -*-',
text)
if enc_match:
enc = enc_match.group(1)
# Initialize template variables
# Windows
username = encoding.to_unicode_from_fs(
os.environ.get('USERNAME', ''))
# Linux, Mac OS X
if not username:
username = encoding.to_unicode_from_fs(
os.environ.get('USER', '-'))
VARS = {
'date': time.ctime(),
'username': username,
}
try:
text = text % VARS
except Exception:
pass
else:
default_content = False
enc = encoding.read(self.TEMPLATE_PATH)[1]
except (IOError, OSError):
text = ''
enc = 'utf-8'
default_content = True
create_fname = lambda n: to_text_string(_("untitled")) + ("%d.py" % n)
# Creating editor widget
if editorstack is None:
current_es = self.get_current_editorstack()
else:
current_es = editorstack
created_from_here = fname is None
if created_from_here:
if self.untitled_num == 0:
for finfo in current_es.data:
current_filename = finfo.editor.filename
if _("untitled") in current_filename:
# Start the counter of the untitled_num with respect
# to this number if there's other untitled file in
# spyder. Please see spyder-ide/spyder#7831
fname_data = osp.splitext(current_filename)
try:
act_num = int(
fname_data[0].split(_("untitled"))[-1])
self.untitled_num = act_num + 1
except ValueError:
# Catch the error in case the user has something
# different from a number after the untitled
# part.
# Please see spyder-ide/spyder#12892
self.untitled_num = 0
while True:
fname = create_fname(self.untitled_num)
self.untitled_num += 1
if not osp.isfile(fname):
break
basedir = getcwd_or_home()
if self.main.projects.get_active_project() is not None:
basedir = self.main.projects.get_active_project_path()
else:
c_fname = self.get_current_filename()
if c_fname is not None and c_fname != self.TEMPFILE_PATH:
basedir = osp.dirname(c_fname)
fname = osp.abspath(osp.join(basedir, fname))
else:
# QString when triggered by a Qt signal
fname = osp.abspath(to_text_string(fname))
index = current_es.has_filename(fname)
if index is not None and not current_es.close_file(index):
return
# Creating the editor widget in the first editorstack (the one that
# can't be destroyed), then cloning this editor widget in all other
# editorstacks:
# Setting empty to True by default to avoid the additional space
# created at the end of the templates.
# See: spyder-ide/spyder#12596
finfo = self.editorstacks[0].new(fname, enc, text, default_content,
empty=True)
finfo.path = self.main.get_spyder_pythonpath()
self._clone_file_everywhere(finfo)
current_editor = current_es.set_current_filename(finfo.filename)
self.register_widget_shortcuts(current_editor)
if not created_from_here:
self.save(force=True)
def edit_template(self):
"""Edit new file template"""
self.load(self.TEMPLATE_PATH)
def update_recent_file_menu(self):
"""Update recent file menu"""
recent_files = []
for fname in self.recent_files:
if osp.isfile(fname):
recent_files.append(fname)
self.recent_file_menu.clear()
if recent_files:
for fname in recent_files:
action = create_action(
self, fname,
icon=ima.get_icon_by_extension_or_type(
fname, scale_factor=1.0),
triggered=self.load)
action.setData(to_qvariant(fname))
self.recent_file_menu.addAction(action)
self.clear_recent_action.setEnabled(len(recent_files) > 0)
add_actions(self.recent_file_menu, (None, self.max_recent_action,
self.clear_recent_action))
@Slot()
def clear_recent_files(self):
"""Clear recent files list"""
self.recent_files = []
@Slot()
def change_max_recent_files(self):
"Change max recent files entries"""
editorstack = self.get_current_editorstack()
mrf, valid = QInputDialog.getInt(editorstack, _('Editor'),
_('Maximum number of recent files'),
self.get_option('max_recent_files'), 1, 35)
if valid:
self.set_option('max_recent_files', mrf)
@Slot()
@Slot(str)
@Slot(str, int, str)
@Slot(str, int, str, object)
def load(self, filenames=None, goto=None, word='',
editorwindow=None, processevents=True, start_column=None,
end_column=None, set_focus=True, add_where='end'):
"""
Load a text file
editorwindow: load in this editorwindow (useful when clicking on
outline explorer with multiple editor windows)
processevents: determines if processEvents() should be called at the
end of this method (set to False to prevent keyboard events from
creeping through to the editor during debugging)
If goto is not none it represent a line to go to. start_column is
the start position in this line and end_column the length
(So that the end position is start_column + end_column)
Alternatively, the first match of word is used as a position.
"""
# Switch to editor before trying to load a file
try:
self.switch_to_plugin()
except AttributeError:
pass
editor0 = self.get_current_editor()
if editor0 is not None:
filename0 = self.get_current_filename()
else:
filename0 = None
if not filenames:
# Recent files action
action = self.sender()
if isinstance(action, QAction):
filenames = from_qvariant(action.data(), to_text_string)
if not filenames:
basedir = getcwd_or_home()
if self.edit_filetypes is None:
self.edit_filetypes = get_edit_filetypes()
if self.edit_filters is None:
self.edit_filters = get_edit_filters()
c_fname = self.get_current_filename()
if c_fname is not None and c_fname != self.TEMPFILE_PATH:
basedir = osp.dirname(c_fname)
self.redirect_stdio.emit(False)
parent_widget = self.get_current_editorstack()
if filename0 is not None:
selectedfilter = get_filter(self.edit_filetypes,
osp.splitext(filename0)[1])
else:
selectedfilter = ''
if not running_under_pytest():
# See: spyder-ide/spyder#3291
if sys.platform == 'darwin':
dialog = QFileDialog(
parent=parent_widget,
caption=_("Open file"),
directory=basedir,
)
dialog.setNameFilters(self.edit_filters.split(';;'))
dialog.setOption(QFileDialog.HideNameFilterDetails, True)
dialog.setFilter(QDir.AllDirs | QDir.Files | QDir.Drives
| QDir.Hidden)
dialog.setFileMode(QFileDialog.ExistingFiles)
if dialog.exec_():
filenames = dialog.selectedFiles()
else:
filenames, _sf = getopenfilenames(
parent_widget,
_("Open file"),
basedir,
self.edit_filters,
selectedfilter=selectedfilter,
options=QFileDialog.HideNameFilterDetails,
)
else:
# Use a Qt (i.e. scriptable) dialog for pytest
dialog = QFileDialog(parent_widget, _("Open file"),
options=QFileDialog.DontUseNativeDialog)
if dialog.exec_():
filenames = dialog.selectedFiles()
self.redirect_stdio.emit(True)
if filenames:
filenames = [osp.normpath(fname) for fname in filenames]
else:
return
focus_widget = QApplication.focusWidget()
if self.editorwindows and not self.dockwidget.isVisible():
# We override the editorwindow variable to force a focus on
# the editor window instead of the hidden editor dockwidget.
# See spyder-ide/spyder#5742.
if editorwindow not in self.editorwindows:
editorwindow = self.editorwindows[0]
editorwindow.setFocus()
editorwindow.raise_()
elif (self.dockwidget and not self._ismaximized
and not self.dockwidget.isAncestorOf(focus_widget)
and not isinstance(focus_widget, CodeEditor)):
self.switch_to_plugin()
def _convert(fname):
fname = osp.abspath(encoding.to_unicode_from_fs(fname))
if os.name == 'nt' and len(fname) >= 2 and fname[1] == ':':
fname = fname[0].upper()+fname[1:]
return fname
if hasattr(filenames, 'replaceInStrings'):
# This is a QStringList instance (PyQt API #1), converting to list:
filenames = list(filenames)
if not isinstance(filenames, list):
filenames = [_convert(filenames)]
else:
filenames = [_convert(fname) for fname in list(filenames)]
if isinstance(goto, int):
goto = [goto]
elif goto is not None and len(goto) != len(filenames):
goto = None
for index, filename in enumerate(filenames):
# -- Do not open an already opened file
focus = set_focus and index == 0
current_editor = self.set_current_filename(filename,
editorwindow,
focus=focus)
if current_editor is None:
# -- Not a valid filename:
if not osp.isfile(filename):
continue
# --
current_es = self.get_current_editorstack(editorwindow)
# Creating the editor widget in the first editorstack
# (the one that can't be destroyed), then cloning this
# editor widget in all other editorstacks:
finfo = self.editorstacks[0].load(
filename, set_current=False, add_where=add_where,
processevents=processevents)
finfo.path = self.main.get_spyder_pythonpath()
self._clone_file_everywhere(finfo)
current_editor = current_es.set_current_filename(filename,
focus=focus)
current_editor.debugger.load_breakpoints()
current_editor.set_bookmarks(load_bookmarks(filename))
self.register_widget_shortcuts(current_editor)
current_es.analyze_script()
self.__add_recent_file(filename)
if goto is not None: # 'word' is assumed to be None as well
current_editor.go_to_line(goto[index], word=word,
start_column=start_column,
end_column=end_column)
current_editor.clearFocus()
current_editor.setFocus()
current_editor.window().raise_()
if processevents:
QApplication.processEvents()
else:
# processevents is false only when calling from debugging
current_editor.sig_debug_stop.emit(goto[index])
current_sw = self.main.ipyconsole.get_current_shellwidget()
current_sw.sig_prompt_ready.connect(
current_editor.sig_debug_stop[()].emit)
current_pdb_state = self.main.ipyconsole.get_pdb_state()
pdb_last_step = self.main.ipyconsole.get_pdb_last_step()
self.update_pdb_state(current_pdb_state, pdb_last_step)
@Slot()
def print_file(self):
"""Print current file"""
editor = self.get_current_editor()
filename = self.get_current_filename()
printer = Printer(mode=QPrinter.HighResolution,
header_font=self.get_font())
printDialog = QPrintDialog(printer, editor)
if editor.has_selected_text():
printDialog.setOption(QAbstractPrintDialog.PrintSelection, True)
self.redirect_stdio.emit(False)
answer = printDialog.exec_()
self.redirect_stdio.emit(True)
if answer == QDialog.Accepted:
self.starting_long_process(_("Printing..."))
printer.setDocName(filename)
editor.print_(printer)
self.ending_long_process()
@Slot()
def print_preview(self):
"""Print preview for current file"""
from qtpy.QtPrintSupport import QPrintPreviewDialog
editor = self.get_current_editor()
printer = Printer(mode=QPrinter.HighResolution,
header_font=self.get_font())
preview = QPrintPreviewDialog(printer, self)
preview.setWindowFlags(Qt.Window)
preview.paintRequested.connect(lambda printer: editor.print_(printer))
self.redirect_stdio.emit(False)
preview.exec_()
self.redirect_stdio.emit(True)
def can_close_file(self, filename=None):
"""
Check if a file can be closed taking into account debugging state.
"""
if not CONF.get('ipython_console', 'pdb_prevent_closing'):
return True
debugging = self.main.ipyconsole.get_pdb_state()
last_pdb_step = self.main.ipyconsole.get_pdb_last_step()
can_close = True
if debugging and 'fname' in last_pdb_step and filename:
if osp.normcase(last_pdb_step['fname']) == osp.normcase(filename):
can_close = False
self.sig_file_debug_message_requested.emit()
elif debugging:
can_close = False
self.sig_file_debug_message_requested.emit()
return can_close
@Slot()
def close_file(self):
"""Close current file"""
filename = self.get_current_filename()
if self.can_close_file(filename=filename):
editorstack = self.get_current_editorstack()
editorstack.close_file()
@Slot()
def close_all_files(self):
"""Close all opened scripts"""
self.editorstacks[0].close_all_files()
@Slot()
def save(self, index=None, force=False):
"""Save file"""
editorstack = self.get_current_editorstack()
return editorstack.save(index=index, force=force)
@Slot()
def save_as(self):
"""Save *as* the currently edited file"""
editorstack = self.get_current_editorstack()
if editorstack.save_as():
fname = editorstack.get_current_filename()
self.__add_recent_file(fname)
@Slot()
def save_copy_as(self):
"""Save *copy as* the currently edited file"""
editorstack = self.get_current_editorstack()
editorstack.save_copy_as()
@Slot()
def save_all(self, save_new_files=True):
"""Save all opened files"""
self.get_current_editorstack().save_all(save_new_files=save_new_files)
@Slot()
def revert(self):
"""Revert the currently edited file from disk"""
editorstack = self.get_current_editorstack()
editorstack.revert()
@Slot()
def find(self):
"""Find slot"""
editorstack = self.get_current_editorstack()
editorstack.find_widget.show()
editorstack.find_widget.search_text.setFocus()
@Slot()
def find_next(self):
"""Fnd next slot"""
editorstack = self.get_current_editorstack()
editorstack.find_widget.find_next()
@Slot()
def find_previous(self):
"""Find previous slot"""
editorstack = self.get_current_editorstack()
editorstack.find_widget.find_previous()
@Slot()
def replace(self):
"""Replace slot"""
editorstack = self.get_current_editorstack()
editorstack.find_widget.show_replace()
def open_last_closed(self):
""" Reopens the last closed tab."""
editorstack = self.get_current_editorstack()
last_closed_files = editorstack.get_last_closed_files()
if (len(last_closed_files) > 0):
file_to_open = last_closed_files[0]
last_closed_files.remove(file_to_open)
editorstack.set_last_closed_files(last_closed_files)
self.load(file_to_open)
#------ Explorer widget
def close_file_from_name(self, filename):
"""Close file from its name"""
filename = osp.abspath(to_text_string(filename))
index = self.editorstacks[0].has_filename(filename)
if index is not None:
self.editorstacks[0].close_file(index)
def removed(self, filename):
"""File was removed in file explorer widget or in project explorer"""
self.close_file_from_name(filename)
def removed_tree(self, dirname):
"""Directory was removed in project explorer widget"""
dirname = osp.abspath(to_text_string(dirname))
for fname in self.get_filenames():
if osp.abspath(fname).startswith(dirname):
self.close_file_from_name(fname)
def renamed(self, source, dest):
"""
Propagate file rename to editor stacks and autosave component.
This function is called when a file is renamed in the file explorer
widget or the project explorer. The file may not be opened in the
editor.
"""
filename = osp.abspath(to_text_string(source))
index = self.editorstacks[0].has_filename(filename)
if index is not None:
for editorstack in self.editorstacks:
editorstack.rename_in_data(filename,
new_filename=to_text_string(dest))
self.editorstacks[0].autosave.file_renamed(
filename, to_text_string(dest))
def renamed_tree(self, source, dest):
"""Directory was renamed in file explorer or in project explorer."""
dirname = osp.abspath(to_text_string(source))
tofile = to_text_string(dest)
for fname in self.get_filenames():
if osp.abspath(fname).startswith(dirname):
new_filename = fname.replace(dirname, tofile)
self.renamed(source=fname, dest=new_filename)
#------ Source code
@Slot()
def indent(self):
"""Indent current line or selection"""
editor = self.get_current_editor()
if editor is not None:
editor.indent()
@Slot()
def unindent(self):
"""Unindent current line or selection"""
editor = self.get_current_editor()
if editor is not None:
editor.unindent()
@Slot()
def text_uppercase(self):
"""Change current line or selection to uppercase."""
editor = self.get_current_editor()
if editor is not None:
editor.transform_to_uppercase()
@Slot()
def text_lowercase(self):
"""Change current line or selection to lowercase."""
editor = self.get_current_editor()
if editor is not None:
editor.transform_to_lowercase()
@Slot()
def toggle_comment(self):
"""Comment current line or selection"""
editor = self.get_current_editor()
if editor is not None:
editor.toggle_comment()
@Slot()
def blockcomment(self):
"""Block comment current line or selection"""
editor = self.get_current_editor()
if editor is not None:
editor.blockcomment()
@Slot()
def unblockcomment(self):
"""Un-block comment current line or selection"""
editor = self.get_current_editor()
if editor is not None:
editor.unblockcomment()
@Slot()
def go_to_next_todo(self):
self.switch_to_plugin()
editor = self.get_current_editor()
position = editor.go_to_next_todo()
filename = self.get_current_filename()
line, column = editor.get_cursor_line_column()
self.add_cursor_position_to_history(filename, position, line, column)
@Slot()
def go_to_next_warning(self):
self.switch_to_plugin()
editor = self.get_current_editor()
position = editor.go_to_next_warning()
filename = self.get_current_filename()
line, column = editor.get_cursor_line_column()
self.add_cursor_position_to_history(filename, position, line, column)
@Slot()
def go_to_previous_warning(self):
self.switch_to_plugin()
editor = self.get_current_editor()
position = editor.go_to_previous_warning()
filename = self.get_current_filename()
line, column = editor.get_cursor_line_column()
self.add_cursor_position_to_history(filename, position, line, column)
@Slot()
def run_winpdb(self):
"""Run winpdb to debug current file"""
if self.save():
fname = self.get_current_filename()
runconf = get_run_configuration(fname)
if runconf is None:
args = []
wdir = None
else:
args = runconf.get_arguments().split()
wdir = runconf.get_working_directory()
# Handle the case where wdir comes back as an empty string
# when the working directory dialog checkbox is unchecked.
# (subprocess "cwd" default is None, so empty str
# must be changed to None in this case.)
programs.run_program(WINPDB_PATH, [fname] + args, cwd=wdir or None)
def toggle_eol_chars(self, os_name, checked):
if checked:
editor = self.get_current_editor()
if self.__set_eol_chars:
self.switch_to_plugin()
editor.set_eol_chars(sourcecode.get_eol_chars_from_os_name(os_name))
@Slot()
def remove_trailing_spaces(self):
self.switch_to_plugin()
editorstack = self.get_current_editorstack()
editorstack.remove_trailing_spaces()
@Slot()
def format_document_or_selection(self):
self.switch_to_plugin()
editorstack = self.get_current_editorstack()
editorstack.format_document_or_selection()
@Slot()
def fix_indentation(self):
self.switch_to_plugin()
editorstack = self.get_current_editorstack()
editorstack.fix_indentation()
#------ Cursor position history management
def update_cursorpos_actions(self):
self.previous_edit_cursor_action.setEnabled(
self.last_edit_cursor_pos is not None)
self.previous_cursor_action.setEnabled(
self.cursor_pos_index is not None and self.cursor_pos_index > 0)
self.next_cursor_action.setEnabled(self.cursor_pos_index is not None \
and self.cursor_pos_index < len(self.cursor_pos_history)-1)
def add_cursor_position_to_history(self, filename, position, line, column,
fc=False):
if self.__ignore_cursor_position:
return
for index, (fname, pos, c_line, c_col) in enumerate(
self.cursor_pos_history):
if fname == filename:
if pos == position or pos == 0 or line == c_line:
if fc:
self.cursor_pos_history[index] = (filename, position,
line, column)
self.cursor_pos_index = index
self.update_cursorpos_actions()
return
else:
if self.cursor_pos_index >= index:
self.cursor_pos_index -= 1
self.cursor_pos_history.pop(index)
break
if self.cursor_pos_index is not None:
self.cursor_pos_history = \
self.cursor_pos_history[:self.cursor_pos_index+1]
self.cursor_pos_history.append((filename, position, line, column))
self.cursor_pos_index = len(self.cursor_pos_history)-1
self.update_cursorpos_actions()
def text_changed_at(self, filename, position):
self.last_edit_cursor_pos = (to_text_string(filename), position)
def current_file_changed(self, filename, position, line, column):
self.add_cursor_position_to_history(to_text_string(filename), position,
line, column, fc=True)
# Hide any open tooltips
current_stack = self.get_current_editorstack()
if current_stack is not None:
current_stack.hide_tooltip()
# Update debugging state
if self.main.ipyconsole is not None:
pdb_state = self.main.ipyconsole.get_pdb_state()
pdb_last_step = self.main.ipyconsole.get_pdb_last_step()
self.update_pdb_state(pdb_state, pdb_last_step)
def current_editor_cursor_changed(self, line, column):
"""Handles the change of the cursor inside the current editor."""
code_editor = self.get_current_editor()
filename = code_editor.filename
position = code_editor.get_position('cursor')
line, column = code_editor.get_cursor_line_column()
self.add_cursor_position_to_history(
to_text_string(filename), position, line, column, fc=True)
def remove_file_cursor_history(self, id, filename):
"""Remove the cursor history of a file if the file is closed."""
new_history = []
for i, (cur_filename, pos, line, column) in enumerate(
self.cursor_pos_history):
if cur_filename == filename:
if i < self.cursor_pos_index:
self.cursor_pos_index = self.cursor_pos_index - 1
else:
new_history.append((cur_filename, pos, line, column))
self.cursor_pos_history = new_history
@Slot()
def go_to_last_edit_location(self):
if self.last_edit_cursor_pos is not None:
filename, position = self.last_edit_cursor_pos
if not osp.isfile(filename):
self.last_edit_cursor_pos = None
return
else:
self.load(filename)
editor = self.get_current_editor()
if position < editor.document().characterCount():
editor.set_cursor_position(position)
def __move_cursor_position(self, index_move):
"""
Move the cursor position forward or backward in the cursor
position history by the specified index increment.
"""
if self.cursor_pos_index is None:
return
filename, _position, _line, _column = (
self.cursor_pos_history[self.cursor_pos_index])
cur_line, cur_col = self.get_current_editor().get_cursor_line_column()
self.cursor_pos_history[self.cursor_pos_index] = (
filename, self.get_current_editor().get_position('cursor'),
cur_line, cur_col)
self.__ignore_cursor_position = True
old_index = self.cursor_pos_index
self.cursor_pos_index = min(len(self.cursor_pos_history) - 1,
max(0, self.cursor_pos_index + index_move))
filename, position, line, col = (
self.cursor_pos_history[self.cursor_pos_index])
filenames = self.get_current_editorstack().get_filenames()
if not osp.isfile(filename) and filename not in filenames:
self.cursor_pos_history.pop(self.cursor_pos_index)
if self.cursor_pos_index <= old_index:
old_index -= 1
self.cursor_pos_index = old_index
else:
self.load(filename)
editor = self.get_current_editor()
if position < editor.document().characterCount():
editor.set_cursor_position(position)
self.__ignore_cursor_position = False
self.update_cursorpos_actions()
@Slot()
def go_to_previous_cursor_position(self):
self.switch_to_plugin()
self.__move_cursor_position(-1)
@Slot()
def go_to_next_cursor_position(self):
self.switch_to_plugin()
self.__move_cursor_position(1)
@Slot()
def go_to_line(self, line=None):
"""Open 'go to line' dialog"""
editorstack = self.get_current_editorstack()
if editorstack is not None:
editorstack.go_to_line(line)
@Slot()
def set_or_clear_breakpoint(self):
"""Set/Clear breakpoint"""
editorstack = self.get_current_editorstack()
if editorstack is not None:
self.switch_to_plugin()
editorstack.set_or_clear_breakpoint()
@Slot()
def set_or_edit_conditional_breakpoint(self):
"""Set/Edit conditional breakpoint"""
editorstack = self.get_current_editorstack()
if editorstack is not None:
self.switch_to_plugin()
editorstack.set_or_edit_conditional_breakpoint()
@Slot()
def clear_all_breakpoints(self):
"""Clear breakpoints in all files"""
self.switch_to_plugin()
clear_all_breakpoints()
self.breakpoints_saved.emit()
editorstack = self.get_current_editorstack()
if editorstack is not None:
for data in editorstack.data:
data.editor.debugger.clear_breakpoints()
self.refresh_plugin()
def clear_breakpoint(self, filename, lineno):
"""Remove a single breakpoint"""
clear_breakpoint(filename, lineno)
self.breakpoints_saved.emit()
editorstack = self.get_current_editorstack()
if editorstack is not None:
index = self.is_file_opened(filename)
if index is not None:
editorstack.data[index].editor.debugger.toogle_breakpoint(
lineno)
def stop_debugging(self):
"""Stop debugging"""
self.main.ipyconsole.stop_debugging()
def debug_command(self, command):
"""Debug actions"""
self.switch_to_plugin()
self.main.ipyconsole.pdb_execute_command(command)
focus_widget = self.main.ipyconsole.get_focus_widget()
if focus_widget:
focus_widget.setFocus()
#------ Run Python script
@Slot()
def edit_run_configurations(self):
dialog = RunConfigDialog(self)
dialog.size_change.connect(lambda s: self.set_dialog_size(s))
if self.dialog_size is not None:
dialog.resize(self.dialog_size)
fname = osp.abspath(self.get_current_filename())
dialog.setup(fname)
if dialog.exec_():
fname = dialog.file_to_run
if fname is not None:
self.load(fname)
self.run_file()
@Slot()
def run_file(self, debug=False):
"""Run script inside current interpreter or in a new one"""
editorstack = self.get_current_editorstack()
editor = self.get_current_editor()
fname = osp.abspath(self.get_current_filename())
# Get fname's dirname before we escape the single and double
# quotes. Fixes spyder-ide/spyder#6771.
dirname = osp.dirname(fname)
# Escape single and double quotes in fname and dirname.
# Fixes spyder-ide/spyder#2158.
fname = fname.replace("'", r"\'").replace('"', r'\"')
dirname = dirname.replace("'", r"\'").replace('"', r'\"')
runconf = get_run_configuration(fname)
if runconf is None:
dialog = RunConfigOneDialog(self)
dialog.size_change.connect(lambda s: self.set_dialog_size(s))
if self.dialog_size is not None:
dialog.resize(self.dialog_size)
dialog.setup(fname)
if CONF.get('run', 'open_at_least_once',
not running_under_pytest()):
# Open Run Config dialog at least once: the first time
# a script is ever run in Spyder, so that the user may
# see it at least once and be conscious that it exists
show_dlg = True
CONF.set('run', 'open_at_least_once', False)
else:
# Open Run Config dialog only
# if ALWAYS_OPEN_FIRST_RUN_OPTION option is enabled
show_dlg = CONF.get('run', ALWAYS_OPEN_FIRST_RUN_OPTION)
if show_dlg and not dialog.exec_():
return
runconf = dialog.get_configuration()
args = runconf.get_arguments()
python_args = runconf.get_python_arguments()
interact = runconf.interact
post_mortem = runconf.post_mortem
current = runconf.current
systerm = runconf.systerm
clear_namespace = runconf.clear_namespace
console_namespace = runconf.console_namespace
if runconf.file_dir:
wdir = dirname
elif runconf.cw_dir:
wdir = ''
elif osp.isdir(runconf.dir):
wdir = runconf.dir
else:
wdir = ''
python = True # Note: in the future, it may be useful to run
# something in a terminal instead of a Python interp.
self.__last_ec_exec = (fname, wdir, args, interact, debug,
python, python_args, current, systerm,
post_mortem, clear_namespace,
console_namespace)
self.re_run_file(save_new_files=False)
if not interact and not debug:
# If external console dockwidget is hidden, it will be
# raised in top-level and so focus will be given to the
# current external shell automatically
# (see SpyderPluginWidget.visibility_changed method)
editor.setFocus()
def set_dialog_size(self, size):
self.dialog_size = size
@Slot()
def debug_file(self):
"""Debug current script"""
self.switch_to_plugin()
current_editor = self.get_current_editor()
if current_editor is not None:
current_editor.sig_debug_start.emit()
self.run_file(debug=True)
@Slot()
def re_run_file(self, save_new_files=True):
"""Re-run last script"""
if self.get_option('save_all_before_run'):
all_saved = self.save_all(save_new_files=save_new_files)
if all_saved is not None and not all_saved:
return
if self.__last_ec_exec is None:
return
(fname, wdir, args, interact, debug,
python, python_args, current, systerm,
post_mortem, clear_namespace,
console_namespace) = self.__last_ec_exec
if not systerm:
self.run_in_current_ipyclient.emit(fname, wdir, args,
debug, post_mortem,
current, clear_namespace,
console_namespace)
else:
self.main.open_external_console(fname, wdir, args, interact,
debug, python, python_args,
systerm, post_mortem)
@Slot()
def run_selection(self):
"""Run selection or current line in external console"""
editorstack = self.get_current_editorstack()
editorstack.run_selection()
@Slot()
def run_cell(self):
"""Run current cell"""
editorstack = self.get_current_editorstack()
editorstack.run_cell()
@Slot()
def run_cell_and_advance(self):
"""Run current cell and advance to the next one"""
editorstack = self.get_current_editorstack()
editorstack.run_cell_and_advance()
@Slot()
def debug_cell(self):
'''Debug Current cell.'''
editorstack = self.get_current_editorstack()
editorstack.debug_cell()
@Slot()
def re_run_last_cell(self):
"""Run last executed cell."""
editorstack = self.get_current_editorstack()
editorstack.re_run_last_cell()
# ------ Code bookmarks
@Slot(int)
def save_bookmark(self, slot_num):
"""Save current line and position as bookmark."""
bookmarks = CONF.get('editor', 'bookmarks')
editorstack = self.get_current_editorstack()
if slot_num in bookmarks:
filename, line_num, column = bookmarks[slot_num]
if osp.isfile(filename):
index = editorstack.has_filename(filename)
if index is not None:
block = (editorstack.tabs.widget(index).document()
.findBlockByNumber(line_num))
block.userData().bookmarks.remove((slot_num, column))
if editorstack is not None:
self.switch_to_plugin()
editorstack.set_bookmark(slot_num)
@Slot(int)
def load_bookmark(self, slot_num):
"""Set cursor to bookmarked file and position."""
bookmarks = CONF.get('editor', 'bookmarks')
if slot_num in bookmarks:
filename, line_num, column = bookmarks[slot_num]
else:
return
if not osp.isfile(filename):
self.last_edit_cursor_pos = None
return
self.load(filename)
editor = self.get_current_editor()
if line_num < editor.document().lineCount():
linelength = len(editor.document()
.findBlockByNumber(line_num).text())
if column <= linelength:
editor.go_to_line(line_num + 1, column)
else:
# Last column
editor.go_to_line(line_num + 1, linelength)
#------ Zoom in/out/reset
def zoom(self, factor):
"""Zoom in/out/reset"""
editor = self.get_current_editorstack().get_current_editor()
if factor == 0:
font = self.get_font()
editor.set_font(font)
else:
font = editor.font()
size = font.pointSize() + factor
if size > 0:
font.setPointSize(size)
editor.set_font(font)
editor.update_tab_stop_width_spaces()
#------ Options
def apply_plugin_settings(self, options):
"""Apply configuration file's plugin settings"""
if self.editorstacks is not None:
# --- syntax highlight and text rendering settings
color_scheme_n = 'color_scheme_name'
color_scheme_o = self.get_color_scheme()
currentline_n = 'highlight_current_line'
currentline_o = self.get_option(currentline_n)
currentcell_n = 'highlight_current_cell'
currentcell_o = self.get_option(currentcell_n)
occurrence_n = 'occurrence_highlighting'
occurrence_o = self.get_option(occurrence_n)
occurrence_timeout_n = 'occurrence_highlighting/timeout'
occurrence_timeout_o = self.get_option(occurrence_timeout_n)
focus_to_editor_n = 'focus_to_editor'
focus_to_editor_o = self.get_option(focus_to_editor_n)
for editorstack in self.editorstacks:
if color_scheme_n in options:
editorstack.set_color_scheme(color_scheme_o)
if currentline_n in options:
editorstack.set_highlight_current_line_enabled(
currentline_o)
if currentcell_n in options:
editorstack.set_highlight_current_cell_enabled(
currentcell_o)
if occurrence_n in options:
editorstack.set_occurrence_highlighting_enabled(occurrence_o)
if occurrence_timeout_n in options:
editorstack.set_occurrence_highlighting_timeout(
occurrence_timeout_o)
if focus_to_editor_n in options:
editorstack.set_focus_to_editor(focus_to_editor_o)
# --- everything else
tabbar_n = 'show_tab_bar'
tabbar_o = self.get_option(tabbar_n)
classfuncdropdown_n = 'show_class_func_dropdown'
classfuncdropdown_o = self.get_option(classfuncdropdown_n)
linenb_n = 'line_numbers'
linenb_o = self.get_option(linenb_n)
blanks_n = 'blank_spaces'
blanks_o = self.get_option(blanks_n)
scrollpastend_n = 'scroll_past_end'
scrollpastend_o = self.get_option(scrollpastend_n)
edgeline_n = 'edge_line'
edgeline_o = self.get_option(edgeline_n)
edgelinecols_n = 'edge_line_columns'
edgelinecols_o = self.get_option(edgelinecols_n)
wrap_n = 'wrap'
wrap_o = self.get_option(wrap_n)
indentguides_n = 'indent_guides'
indentguides_o = self.get_option(indentguides_n)
codefolding_n = 'code_folding'
codefolding_o = self.get_option(codefolding_n)
tabindent_n = 'tab_always_indent'
tabindent_o = self.get_option(tabindent_n)
stripindent_n = 'strip_trailing_spaces_on_modify'
stripindent_o = self.get_option(stripindent_n)
ibackspace_n = 'intelligent_backspace'
ibackspace_o = self.get_option(ibackspace_n)
autocompletions_n = 'automatic_completions'
autocompletions_o = self.get_option(autocompletions_n)
completionshint_n = 'completions_hint'
completionshint_o = self.get_option(completionshint_n)
removetrail_n = 'always_remove_trailing_spaces'
removetrail_o = self.get_option(removetrail_n)
add_newline_n = 'add_newline'
add_newline_o = self.get_option(add_newline_n)
removetrail_newlines_n = 'always_remove_trailing_newlines'
removetrail_newlines_o = self.get_option(removetrail_newlines_n)
converteol_n = 'convert_eol_on_save'
converteol_o = self.get_option(converteol_n)
converteolto_n = 'convert_eol_on_save_to'
converteolto_o = self.get_option(converteolto_n)
runcellcopy_n = 'run_cell_copy'
runcellcopy_o = self.get_option(runcellcopy_n)
closepar_n = 'close_parentheses'
closepar_o = self.get_option(closepar_n)
close_quotes_n = 'close_quotes'
close_quotes_o = self.get_option(close_quotes_n)
add_colons_n = 'add_colons'
add_colons_o = self.get_option(add_colons_n)
autounindent_n = 'auto_unindent'
autounindent_o = self.get_option(autounindent_n)
indent_chars_n = 'indent_chars'
indent_chars_o = self.get_option(indent_chars_n)
tab_stop_width_spaces_n = 'tab_stop_width_spaces'
tab_stop_width_spaces_o = self.get_option(tab_stop_width_spaces_n)
help_n = 'connect_to_oi'
help_o = CONF.get('help', 'connect/editor')
todo_n = 'todo_list'
todo_o = self.get_option(todo_n)
finfo = self.get_current_finfo()
for editorstack in self.editorstacks:
# Checkable options
if blanks_n in options:
editorstack.set_blanks_enabled(blanks_o)
if scrollpastend_n in options:
editorstack.set_scrollpastend_enabled(scrollpastend_o)
if indentguides_n in options:
editorstack.set_indent_guides(indentguides_o)
if codefolding_n in options:
editorstack.set_code_folding_enabled(codefolding_o)
if classfuncdropdown_n in options:
editorstack.set_classfunc_dropdown_visible(
classfuncdropdown_o)
if tabbar_n in options:
editorstack.set_tabbar_visible(tabbar_o)
if linenb_n in options:
editorstack.set_linenumbers_enabled(linenb_o,
current_finfo=finfo)
if autocompletions_n in options:
editorstack.set_automatic_completions_enabled(
autocompletions_o)
if completionshint_n in options:
editorstack.set_completions_hint_enabled(completionshint_o)
if edgeline_n in options:
editorstack.set_edgeline_enabled(edgeline_o)
if edgelinecols_n in options:
editorstack.set_edgeline_columns(edgelinecols_o)
if wrap_n in options:
editorstack.set_wrap_enabled(wrap_o)
if tabindent_n in options:
editorstack.set_tabmode_enabled(tabindent_o)
if stripindent_n in options:
editorstack.set_stripmode_enabled(stripindent_o)
if ibackspace_n in options:
editorstack.set_intelligent_backspace_enabled(ibackspace_o)
if removetrail_n in options:
editorstack.set_always_remove_trailing_spaces(removetrail_o)
if add_newline_n in options:
editorstack.set_add_newline(add_newline_o)
if removetrail_newlines_n in options:
editorstack.set_remove_trailing_newlines(
removetrail_newlines_o)
if converteol_n in options:
editorstack.set_convert_eol_on_save(converteol_o)
if converteolto_n in options:
editorstack.set_convert_eol_on_save_to(converteolto_o)
if runcellcopy_n in options:
editorstack.set_run_cell_copy(runcellcopy_o)
if closepar_n in options:
editorstack.set_close_parentheses_enabled(closepar_o)
if close_quotes_n in options:
editorstack.set_close_quotes_enabled(close_quotes_o)
if add_colons_n in options:
editorstack.set_add_colons_enabled(add_colons_o)
if autounindent_n in options:
editorstack.set_auto_unindent_enabled(autounindent_o)
if indent_chars_n in options:
editorstack.set_indent_chars(indent_chars_o)
if tab_stop_width_spaces_n in options:
editorstack.set_tab_stop_width_spaces(tab_stop_width_spaces_o)
if help_n in options:
editorstack.set_help_enabled(help_o)
if todo_n in options:
editorstack.set_todolist_enabled(todo_o,
current_finfo=finfo)
for name, action in self.checkable_actions.items():
if name in options:
# Avoid triggering the action when this action changes state
action.blockSignals(True)
state = self.get_option(name)
action.setChecked(state)
action.blockSignals(False)
# See: spyder-ide/spyder#9915
# Multiply by 1000 to convert seconds to milliseconds
self.autosave.interval = (
self.get_option('autosave_interval') * 1000)
self.autosave.enabled = self.get_option('autosave_enabled')
# We must update the current editor after the others:
# (otherwise, code analysis buttons state would correspond to the
# last editor instead of showing the one of the current editor)
if finfo is not None:
if todo_n in options and todo_o:
finfo.run_todo_finder()
# --- Open files
def get_open_filenames(self):
"""Get the list of open files in the current stack"""
editorstack = self.editorstacks[0]
filenames = []
filenames += [finfo.filename for finfo in editorstack.data]
return filenames
def set_open_filenames(self):
"""
Set the recent opened files on editor based on active project.
If no project is active, then editor filenames are saved, otherwise
the opened filenames are stored in the project config info.
"""
if self.projects is not None:
if not self.projects.get_active_project():
filenames = self.get_open_filenames()
self.set_option('filenames', filenames)
def setup_open_files(self, close_previous_files=True):
"""
Open the list of saved files per project.
Also open any files that the user selected in the recovery dialog.
"""
self.set_create_new_file_if_empty(False)
active_project_path = None
if self.projects is not None:
active_project_path = self.projects.get_active_project_path()
if active_project_path:
filenames = self.projects.get_project_filenames()
else:
filenames = self.get_option('filenames', default=[])
if close_previous_files:
self.close_all_files()
all_filenames = self.autosave.recover_files_to_open + filenames
if all_filenames and any([osp.isfile(f) for f in all_filenames]):
layout = self.get_option('layout_settings', None)
# Check if no saved layout settings exist, e.g. clean prefs file.
# If not, load with default focus/layout, to fix
# spyder-ide/spyder#8458.
if layout:
is_vertical, cfname, clines = layout.get('splitsettings')[0]
# Check that a value for current line exist for each filename
# in the available settings. See spyder-ide/spyder#12201
if cfname in filenames and len(filenames) == len(clines):
index = filenames.index(cfname)
# First we load the last focused file.
self.load(filenames[index], goto=clines[index], set_focus=True)
# Then we load the files located to the left of the last
# focused file in the tabbar, while keeping the focus on
# the last focused file.
if index > 0:
self.load(filenames[index::-1], goto=clines[index::-1],
set_focus=False, add_where='start')
# Then we load the files located to the right of the last
# focused file in the tabbar, while keeping the focus on
# the last focused file.
if index < (len(filenames) - 1):
self.load(filenames[index+1:], goto=clines[index:],
set_focus=False, add_where='end')
# Finally we load any recovered files at the end of the tabbar,
# while keeping focus on the last focused file.
if self.autosave.recover_files_to_open:
self.load(self.autosave.recover_files_to_open,
set_focus=False, add_where='end')
else:
if filenames:
self.load(filenames, goto=clines)
if self.autosave.recover_files_to_open:
self.load(self.autosave.recover_files_to_open)
else:
if filenames:
self.load(filenames)
if self.autosave.recover_files_to_open:
self.load(self.autosave.recover_files_to_open)
if self.__first_open_files_setup:
self.__first_open_files_setup = False
if layout is not None:
self.editorsplitter.set_layout_settings(
layout,
dont_goto=filenames[0])
win_layout = self.get_option('windows_layout_settings', [])
if win_layout:
for layout_settings in win_layout:
self.editorwindows_to_be_created.append(
layout_settings)
self.set_last_focused_editorstack(self, self.editorstacks[0])
else:
self.__load_temp_file()
self.set_create_new_file_if_empty(True)
def save_open_files(self):
"""Save the list of open files"""
self.set_option('filenames', self.get_open_filenames())
def set_create_new_file_if_empty(self, value):
"""Change the value of create_new_file_if_empty"""
for editorstack in self.editorstacks:
editorstack.create_new_file_if_empty = value
# --- File Menu actions (Mac only)
@Slot()
def go_to_next_file(self):
"""Switch to next file tab on the current editor stack."""
editorstack = self.get_current_editorstack()
editorstack.tabs.tab_navigate(+1)
@Slot()
def go_to_previous_file(self):
"""Switch to previous file tab on the current editor stack."""
editorstack = self.get_current_editorstack()
editorstack.tabs.tab_navigate(-1)
def set_current_project_path(self, root_path=None):
"""
Set the current active project root path.
Parameters
----------
root_path: str or None, optional
Path to current project root path. Default is None.
"""
for editorstack in self.editorstacks:
editorstack.set_current_project_path(root_path)
def register_panel(self, panel_class, *args, position=Panel.Position.LEFT,
**kwargs):
"""Register a panel in all the editorstacks in the given position."""
for editorstack in self.editorstacks:
editorstack.register_panel(
panel_class, *args, position=position, **kwargs)
|
the-stack_106_22824 | # Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import threading
from webkitpy.common.config.committers import CommitterList, Reviewer
from webkitpy.common.checkout.commitinfo import CommitInfo
from webkitpy.common.checkout.scm import CommitMessage
from webkitpy.common.net.bugzilla import Bug, Attachment
from webkitpy.common.net.rietveld import Rietveld
from webkitpy.thirdparty.mock import Mock
from webkitpy.common.system.deprecated_logging import log
def _id_to_object_dictionary(*objects):
dictionary = {}
for thing in objects:
dictionary[thing["id"]] = thing
return dictionary
# Testing
# FIXME: The ids should be 1, 2, 3 instead of crazy numbers.
_patch1 = {
"id": 197,
"bug_id": 42,
"url": "http://example.com/197",
"name": "Patch1",
"is_obsolete": False,
"is_patch": True,
"review": "+",
"reviewer_email": "[email protected]",
"commit-queue": "+",
"committer_email": "[email protected]",
"attacher_email": "Contributer1",
}
_patch2 = {
"id": 128,
"bug_id": 42,
"url": "http://example.com/128",
"name": "Patch2",
"is_obsolete": False,
"is_patch": True,
"review": "+",
"reviewer_email": "[email protected]",
"commit-queue": "+",
"committer_email": "[email protected]",
"attacher_email": "[email protected]",
}
_patch3 = {
"id": 103,
"bug_id": 75,
"url": "http://example.com/103",
"name": "Patch3",
"is_obsolete": False,
"is_patch": True,
"review": "?",
"attacher_email": "[email protected]",
}
_patch4 = {
"id": 104,
"bug_id": 77,
"url": "http://example.com/103",
"name": "Patch3",
"is_obsolete": False,
"is_patch": True,
"review": "+",
"commit-queue": "?",
"reviewer_email": "[email protected]",
"attacher_email": "Contributer2",
}
_patch5 = {
"id": 105,
"bug_id": 77,
"url": "http://example.com/103",
"name": "Patch5",
"is_obsolete": False,
"is_patch": True,
"review": "+",
"reviewer_email": "[email protected]",
"attacher_email": "[email protected]",
}
_patch6 = { # Valid committer, but no reviewer.
"id": 106,
"bug_id": 77,
"url": "http://example.com/103",
"name": "ROLLOUT of r3489",
"is_obsolete": False,
"is_patch": True,
"commit-queue": "+",
"committer_email": "[email protected]",
"attacher_email": "[email protected]",
}
_patch7 = { # Valid review, patch is marked obsolete.
"id": 107,
"bug_id": 76,
"url": "http://example.com/103",
"name": "Patch7",
"is_obsolete": True,
"is_patch": True,
"review": "+",
"reviewer_email": "[email protected]",
"attacher_email": "[email protected]",
}
# This matches one of Bug.unassigned_emails
_unassigned_email = "[email protected]"
# FIXME: The ids should be 1, 2, 3 instead of crazy numbers.
_bug1 = {
"id": 42,
"title": "Bug with two r+'d and cq+'d patches, one of which has an "
"invalid commit-queue setter.",
"assigned_to_email": _unassigned_email,
"attachments": [_patch1, _patch2],
}
_bug2 = {
"id": 75,
"title": "Bug with a patch needing review.",
"assigned_to_email": "[email protected]",
"attachments": [_patch3],
}
_bug3 = {
"id": 76,
"title": "The third bug",
"assigned_to_email": _unassigned_email,
"attachments": [_patch7],
}
_bug4 = {
"id": 77,
"title": "The fourth bug",
"assigned_to_email": "[email protected]",
"attachments": [_patch4, _patch5, _patch6],
}
class MockBugzillaQueries(Mock):
def __init__(self, bugzilla):
Mock.__init__(self)
self._bugzilla = bugzilla
def _all_bugs(self):
return map(lambda bug_dictionary: Bug(bug_dictionary, self._bugzilla),
self._bugzilla.bug_cache.values())
def fetch_bug_ids_from_commit_queue(self):
bugs_with_commit_queued_patches = filter(
lambda bug: bug.commit_queued_patches(),
self._all_bugs())
return map(lambda bug: bug.id(), bugs_with_commit_queued_patches)
def fetch_attachment_ids_from_review_queue(self):
unreviewed_patches = sum([bug.unreviewed_patches()
for bug in self._all_bugs()], [])
return map(lambda patch: patch.id(), unreviewed_patches)
def fetch_patches_from_commit_queue(self):
return sum([bug.commit_queued_patches()
for bug in self._all_bugs()], [])
def fetch_bug_ids_from_pending_commit_list(self):
bugs_with_reviewed_patches = filter(lambda bug: bug.reviewed_patches(),
self._all_bugs())
bug_ids = map(lambda bug: bug.id(), bugs_with_reviewed_patches)
# NOTE: This manual hack here is to allow testing logging in
# test_assign_to_committer the real pending-commit query on bugzilla
# will return bugs with patches which have r+, but are also obsolete.
return bug_ids + [76]
def fetch_patches_from_pending_commit_list(self):
return sum([bug.reviewed_patches() for bug in self._all_bugs()], [])
# FIXME: Bugzilla is the wrong Mock-point. Once we have a BugzillaNetwork
# class we should mock that instead.
# Most of this class is just copy/paste from Bugzilla.
class MockBugzilla(Mock):
bug_server_url = "http://example.com"
bug_cache = _id_to_object_dictionary(_bug1, _bug2, _bug3, _bug4)
attachment_cache = _id_to_object_dictionary(_patch1,
_patch2,
_patch3,
_patch4,
_patch5,
_patch6,
_patch7)
def __init__(self):
Mock.__init__(self)
self.queries = MockBugzillaQueries(self)
self.committers = CommitterList(reviewers=[Reviewer("Foo Bar",
"[email protected]")])
def create_bug(self,
bug_title,
bug_description,
component=None,
diff=None,
patch_description=None,
cc=None,
blocked=None,
mark_for_review=False,
mark_for_commit_queue=False):
log("MOCK create_bug")
log("bug_title: %s" % bug_title)
log("bug_description: %s" % bug_description)
def quips(self):
return ["Good artists copy. Great artists steal. - Pablo Picasso"]
def fetch_bug(self, bug_id):
return Bug(self.bug_cache.get(bug_id), self)
def fetch_attachment(self, attachment_id):
# This could be changed to .get() if we wish to allow failed lookups.
attachment_dictionary = self.attachment_cache[attachment_id]
bug = self.fetch_bug(attachment_dictionary["bug_id"])
for attachment in bug.attachments(include_obsolete=True):
if attachment.id() == int(attachment_id):
return attachment
def bug_url_for_bug_id(self, bug_id):
return "%s/%s" % (self.bug_server_url, bug_id)
def fetch_bug_dictionary(self, bug_id):
return self.bug_cache.get(bug_id)
def attachment_url_for_id(self, attachment_id, action="view"):
action_param = ""
if action and action != "view":
action_param = "&action=%s" % action
return "%s/%s%s" % (self.bug_server_url, attachment_id, action_param)
def post_comment_to_bug(self, bug_id, comment_text, cc=None):
log("MOCK bug comment: bug_id=%s, cc=%s\n--- Begin comment ---\%s\n--- End comment ---\n" % (
bug_id, cc, comment_text))
def add_patch_to_bug(self,
bug_id,
diff,
description,
comment_text=None,
mark_for_review=False,
mark_for_commit_queue=False,
mark_for_landing=False):
log("MOCK add_patch_to_bug: bug_id=%s, description=%s, mark_for_review=%s, mark_for_commit_queue=%s, mark_for_landing=%s" %
(bug_id, description, mark_for_review, mark_for_commit_queue, mark_for_landing))
log("-- Begin comment --")
log(comment_text)
log("-- End comment --")
class MockBuilder(object):
def __init__(self, name):
self._name = name
def name(self):
return self._name
def force_build(self, username, comments):
log("MOCK: force_build: name=%s, username=%s, comments=%s" % (
self._name, username, comments))
class MockBuildBot(object):
def __init__(self):
self._mock_builder1_status = {
"name": "Builder1",
"is_green": True,
"activity": "building",
}
self._mock_builder2_status = {
"name": "Builder2",
"is_green": True,
"activity": "idle",
}
def builder_with_name(self, name):
return MockBuilder(name)
def builder_statuses(self):
return [
self._mock_builder1_status,
self._mock_builder2_status,
]
def red_core_builders_names(self):
if not self._mock_builder2_status["is_green"]:
return [self._mock_builder2_status["name"]]
return []
def red_core_builders(self):
if not self._mock_builder2_status["is_green"]:
return [self._mock_builder2_status]
return []
def idle_red_core_builders(self):
if not self._mock_builder2_status["is_green"]:
return [self._mock_builder2_status]
return []
def last_green_revision(self):
return 9479
def light_tree_on_fire(self):
self._mock_builder2_status["is_green"] = False
def revisions_causing_failures(self):
return {
"29837": [self.builder_with_name("Builder1")],
}
class MockSCM(Mock):
fake_checkout_root = os.path.realpath("/tmp") # realpath is needed to allow for Mac OS X's /private/tmp
def __init__(self):
Mock.__init__(self)
# FIXME: We should probably use real checkout-root detection logic here.
# os.getcwd() can't work here because other parts of the code assume that "checkout_root"
# will actually be the root. Since getcwd() is wrong, use a globally fake root for now.
self.checkout_root = self.fake_checkout_root
def create_patch(self, git_commit, squash):
return "Patch1"
def commit_ids_from_commitish_arguments(self, args):
return ["Commitish1", "Commitish2"]
def commit_message_for_local_commit(self, commit_id):
if commit_id == "Commitish1":
return CommitMessage("CommitMessage1\n" \
"https://bugs.example.org/show_bug.cgi?id=42\n")
if commit_id == "Commitish2":
return CommitMessage("CommitMessage2\n" \
"https://bugs.example.org/show_bug.cgi?id=75\n")
raise Exception("Bogus commit_id in commit_message_for_local_commit.")
def diff_for_revision(self, revision):
return "DiffForRevision%s\n" \
"http://bugs.webkit.org/show_bug.cgi?id=12345" % revision
def svn_revision_from_commit_text(self, commit_text):
return "49824"
class MockCheckout(object):
_committer_list = CommitterList()
def commit_info_for_revision(self, svn_revision):
return CommitInfo(svn_revision, "[email protected]", {
"bug_id": 42,
"author_name": "Adam Barth",
"author_email": "[email protected]",
"author": self._committer_list.committer_by_email("[email protected]"),
"reviewer_text": "Darin Adler",
"reviewer": self._committer_list.committer_by_name("Darin Adler"),
})
def bug_id_for_revision(self, svn_revision):
return 12345
def modified_changelogs(self, git_commit, squash):
# Ideally we'd return something more interesting here. The problem is
# that LandDiff will try to actually read the patch from disk!
return []
def commit_message_for_this_commit(self, git_commit, squash):
commit_message = Mock()
commit_message.message = lambda:"This is a fake commit message that is at least 50 characters."
return commit_message
def apply_patch(self, patch, force=False):
pass
def apply_reverse_diff(self, revision):
pass
class MockUser(object):
@staticmethod
def prompt(message, repeat=1, raw_input=raw_input):
return "Mock user response"
def edit(self, files):
pass
def page(self, message):
pass
def confirm(self, message=None):
return True
def open_url(self, url):
if url.startswith("file://"):
log("MOCK: user.open_url: file://...")
return
log("MOCK: user.open_url: %s" % url)
class MockIRC(object):
def post(self, message):
log("MOCK: irc.post: %s" % message)
def disconnect(self):
log("MOCK: irc.disconnect")
class MockStatusServer(object):
def __init__(self):
self.host = "example.com"
def patch_status(self, queue_name, patch_id):
return None
def svn_revision(self, svn_revision):
return None
def update_status(self, queue_name, status, patch=None, results_file=None):
log("MOCK: update_status: %s %s" % (queue_name, status))
return 187
def update_svn_revision(self, svn_revision, broken_bot):
return 191
class MockExecute(Mock):
def run_and_throw_if_fail(self, args, quiet=False):
return "MOCK output of child process"
def run_command(self,
args,
cwd=None,
input=None,
error_handler=None,
return_exit_code=False,
return_stderr=True,
decode_output=False):
return "MOCK output of child process"
class MockTool():
def __init__(self, log_executive=False):
self.wakeup_event = threading.Event()
self.bugs = MockBugzilla()
self.buildbot = MockBuildBot()
self.executive = MockExecute()
if log_executive:
self.executive.run_and_throw_if_fail = lambda args: log("MOCK run_and_throw_if_fail: %s" % args)
self._irc = None
self.user = MockUser()
self._scm = MockSCM()
self._checkout = MockCheckout()
self.status_server = MockStatusServer()
self.irc_password = "MOCK irc password"
self.codereview = Rietveld(self.executive)
def scm(self):
return self._scm
def checkout(self):
return self._checkout
def ensure_irc_connected(self, delegate):
if not self._irc:
self._irc = MockIRC()
def irc(self):
return self._irc
def path(self):
return "echo"
|
the-stack_106_22827 | from __future__ import print_function
from keras.models import Model
from keras.layers import Input, LSTM, Dense
import numpy as np
batch_size = 64
epochs = 500
latent_dim = 256
num_samples = 10000
data_path = 'reverse_train_data'
# prepare datasets
# -------------------------------------------------
input_melodies = []
target_melodies = []
input_notes = set()
target_notes = set()
with open(data_path, 'r', encoding='utf-8') as f:
lines = f.read().split('\n')
for line in lines[: min(num_samples, len(lines) - 1)]:
#reverse as target
input_melody = line
target_melody = input_melody[::-1]
target_melody = '\t' + target_melody + '\n'
input_melodies.append(input_melody)
target_melodies.append(target_melody)
for char in input_melody:
if char not in input_notes:
input_notes.add(char)
for char in target_melody:
if char not in target_notes:
target_notes.add(char)
input_notes = sorted(list(input_notes))
target_notes = sorted(list(target_notes))
num_encoder_tokens = len(input_notes)
num_decoder_tokens = len(target_notes)
max_encoder_seq_length = max([len(txt) for txt in input_melodies])
max_decoder_seq_length = max([len(txt) for txt in target_melodies])
print('Number of samples:', len(input_melodies))
print('Number of unique input tokens:', num_encoder_tokens)
print('Number of unique output tokens:', num_decoder_tokens)
print('Max sequence length for inputs:', max_encoder_seq_length)
print('Max sequence length for outputs:', max_decoder_seq_length)
input_token_index = dict(
[(char, i) for i, char in enumerate(input_notes)])
target_token_index = dict(
[(char, i) for i, char in enumerate(target_notes)])
encoder_input_data = np.zeros(
(len(input_melodies), max_encoder_seq_length, num_encoder_tokens),
dtype='float32')
decoder_input_data = np.zeros(
(len(input_melodies), max_decoder_seq_length, num_decoder_tokens),
dtype='float32')
decoder_target_data = np.zeros(
(len(input_melodies), max_decoder_seq_length, num_decoder_tokens),
dtype='float32')
for i, (input_melody, target_melody) in enumerate(zip(input_melodies, target_melodies)):
for t, char in enumerate(input_melody):
encoder_input_data[i, t, input_token_index[char]] = 1.
for t, char in enumerate(target_melody):
decoder_input_data[i, t, target_token_index[char]] = 1.
if t > 0:
decoder_target_data[i, t - 1, target_token_index[char]] = 1.
# build net work
# ------------------------------------------------------------------
encoder_inputs = Input(shape=(None, num_encoder_tokens))
encoder = LSTM(latent_dim, return_state=True)
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
encoder_states = [state_h, state_c]
decoder_inputs = Input(shape=(None, num_decoder_tokens))
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_inputs,
initial_state=encoder_states)
decoder_dense = Dense(num_decoder_tokens, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
model.fit([encoder_input_data, decoder_input_data], decoder_target_data,
batch_size=batch_size,
epochs=epochs,
validation_split=0.2)
model.save('seq2seq.h5')
encoder_model = Model(encoder_inputs, encoder_states)
decoder_state_input_h = Input(shape=(latent_dim,))
decoder_state_input_c = Input(shape=(latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_outputs, state_h, state_c = decoder_lstm(
decoder_inputs, initial_state=decoder_states_inputs)
decoder_states = [state_h, state_c]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model(
[decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
reverse_input_char_index = dict(
(i, char) for char, i in input_token_index.items())
reverse_target_char_index = dict(
(i, char) for char, i in target_token_index.items())
def decode_sequence(input_seq):
states_value = encoder_model.predict(input_seq)
target_seq = np.zeros((1, 1, num_decoder_tokens))
target_seq[0, 0, target_token_index['\t']] = 1.
stop_condition = False
decoded_sentence = ''
while not stop_condition:
output_tokens, h, c = decoder_model.predict(
[target_seq] + states_value)
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_char = reverse_target_char_index[sampled_token_index]
decoded_sentence += sampled_char
if (sampled_char == '\n' or
len(decoded_sentence) > max_decoder_seq_length):
stop_condition = True
target_seq = np.zeros((1, 1, num_decoder_tokens))
target_seq[0, 0, sampled_token_index] = 1.
states_value = [h, c]
return decoded_sentence
with open('result.txt','w') as fout:
for seq_index in range(100):
input_seq = encoder_input_data[seq_index: seq_index + 1]
decoded_sentence = decode_sequence(input_seq)
fout.write('-\n')
fout.write('Input sentence:'+ input_melodies[seq_index]+'\n')
fout.write('Decoded sentence:'+decoded_sentence+'\n')
|
the-stack_106_22828 | from datetime import datetime
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.core.mail import mail_admins
from django.db import models
from dennis.translator import Translator
from statsd import statsd
import waffle
from .gengo_utils import (
FjordGengo,
GengoAPIFailure,
GengoMachineTranslationFailure,
GengoUnknownLanguage,
GengoUnsupportedLanguage,
)
from .utils import locale_equals_language
from fjord.base.models import ModelBase
from fjord.base.utils import instance_to_key, wrap_with_paragraphs
from fjord.journal.models import Record
from fjord.journal.utils import j_error, j_info
class SuperModel(models.Model):
"""Model used for unit tests
It's really difficult to define a model in the test suite used
just for testing without a lot of shenanigans with South and the
db, so intead we define a "real" model, but only use it for
testing.
"""
locale = models.CharField(max_length=5)
desc = models.CharField(blank=True, default=u'', max_length=100)
trans_desc = models.CharField(blank=True, default=u'', max_length=100)
def generate_translation_jobs(self, system=None):
"""This always returns a fake translation job"""
return [
(instance_to_key(self), u'fake', self.locale, u'desc',
u'en', u'trans_desc')
]
_translation_systems = {}
def get_translation_systems():
"""Returns translation systems map
"""
return _translation_systems
def get_translation_system_choices():
"""Returns a tuple of (value, display-name) tuples for Choices field
This inserts a "no choice" choice at the beginning, too, the value of
which is the empty string.
"""
choices = [(key, key) for key in _translation_systems.keys()]
choices.insert(0, (u'', u'None'))
return tuple(choices)
class TranslationSystemMeta(type):
"""Metaclass to register TranslationSystem subclasses"""
def __new__(cls, name, bases, attrs):
new_cls = super(TranslationSystemMeta, cls).__new__(
cls, name, bases, attrs)
if new_cls.name:
_translation_systems[new_cls.name] = new_cls
return new_cls
class TranslationSystem(object):
"""Translation system base class
All translation system plugins should subclass this. They should
additionally do the following:
1. set the name property to something unique
2. implement translate method
See FakeTranslator and DennisTranslator for sample
implementations.
"""
__metaclass__ = TranslationSystemMeta
# Name of this translation system
name = ''
# Whether or not this system uses push and pull translations
use_push_and_pull = False
# Whether or not this system has daily activities
use_daily = False
def translate(self, instance, src_lang, src_field, dst_lang, dst_field):
"""Implement this to translation fields on an instance
This translates in-place.
If this is an asynchronous system, then this can either push
the text to be translated now or queue the text to be pushed
later in a batch of things to be translated.
"""
raise NotImplementedError()
def push_translations(self):
"""Implement this to do any work required to push translations
This is for asynchronous systems that take a batch of translations,
perform some work, and then return results some time later.
Print any status text to stdout.
"""
raise NotImplementedError()
def pull_translations(self):
"""Implement this to do any work required to pull translations
This is for asynchronous systems that take a batch of translations,
perform some work, and then return results some time later.
Print any status text to stdout.
"""
raise NotImplementedError()
def run_daily_activities(self):
"""Implement this to do any work that needs to happen once per day
Examples:
1. sending out daily reminders
2. sending out a warning about low balance
Print any status text to stdout.
"""
raise NotImplementedError()
def log_info(self, instance, action='translate', msg=u'', metadata=None):
metadata = metadata or {}
j_info(
app='translations',
src=self.name,
action=action,
msg=msg,
instance=instance,
metadata=metadata
)
def log_error(self, instance, action='translate', msg=u'', metadata=None):
metadata = metadata or {}
j_error(
app='translations',
src=self.name,
action=action,
msg=msg,
instance=instance,
metadata=metadata
)
# ---------------------------------------------------------
# Fake translation system
# ---------------------------------------------------------
class FakeTranslator(TranslationSystem):
"""Translates by uppercasing text"""
name = 'fake'
def translate(self, instance, src_lang, src_field, dst_lang, dst_field):
setattr(instance, dst_field, getattr(instance, src_field).upper())
instance.save()
self.log_info(instance=instance, action='translate', msg='success')
# ---------------------------------------------------------
# Dennis translation system
# ---------------------------------------------------------
class DennisTranslator(TranslationSystem):
"""Translates using shouty and anglequote"""
name = 'dennis'
def translate(self, instance, src_lang, src_field, dst_lang, dst_field):
text = getattr(instance, src_field)
if text:
pipeline = ['shouty', 'anglequote']
translated = Translator([], pipeline).translate_string(text)
setattr(instance, dst_field, translated)
instance.save()
# ---------------------------------------------------------
# Gengo machine translator system AI 9000 of doom
# ---------------------------------------------------------
class GengoMachineTranslator(TranslationSystem):
"""Translates using Gengo machine translation"""
name = 'gengo_machine'
def translate(self, instance, src_lang, src_field, dst_lang, dst_field):
# If gengosystem is disabled, we just return immediately. We
# can backfill later.
if not waffle.switch_is_active('gengosystem'):
return
text = getattr(instance, src_field)
metadata = {
'locale': instance.locale,
'length': len(text),
'body': text[:50].encode('utf-8')
}
gengo_api = FjordGengo()
try:
lc_src = gengo_api.guess_language(text)
if lc_src not in gengo_api.get_languages():
raise GengoUnsupportedLanguage(
'unsupported language: {0}'.format(lc_src))
if not locale_equals_language(instance.locale, lc_src):
# Log this for metrics-purposes
self.log_error(
instance,
action='guess-language',
msg='locale "{0}" != guessed language "{1}"'.format(
instance.locale, lc_src),
metadata=metadata)
if locale_equals_language(dst_lang, lc_src):
# If the source language is english, we just copy it over.
setattr(instance, dst_field, text)
instance.save()
self.log_info(
instance, action='translate',
msg=u'lc_src == dst_lang, so we copy src to dst',
metadata=metadata)
return
translated = gengo_api.machine_translate(
instance.id, lc_src, dst_lang, text)
if translated:
setattr(instance, dst_field, translated)
instance.save()
self.log_info(instance, action='translate', msg='success',
metadata=metadata)
statsd.incr('translation.gengo_machine.success')
else:
self.log_error(instance, action='translate',
msg='did not translate', metadata=metadata)
statsd.incr('translation.gengo_machine.failure')
except GengoUnknownLanguage as exc:
# FIXME: This might be an indicator that this response is
# spam. At some point p, we can write code to account for
# that.
self.log_error(instance, action='guess-language', msg=unicode(exc),
metadata=metadata)
statsd.incr('translation.gengo_machine.unknown')
except GengoUnsupportedLanguage as exc:
# FIXME: This is a similar boat to GengoUnknownLanguage
# where for now, we're just going to ignore it because I'm
# not sure what to do about it and I'd like more data.
self.log_error(instance, action='translate', msg=unicode(exc),
metadata=metadata)
statsd.incr('translation.gengo_machine.unsupported')
except (GengoAPIFailure, GengoMachineTranslationFailure):
# FIXME: For now, if we have a machine translation
# failure, we're just going to ignore it and move on.
self.log_error(instance, action='translate', msg=unicode(exc),
metadata=metadata)
statsd.incr('translation.gengo_machine.failure')
# ---------------------------------------------------------
# Gengo human translator system
# ---------------------------------------------------------
STATUS_CREATED = 'created'
STATUS_IN_PROGRESS = 'in-progress'
STATUS_COMPLETE = 'complete'
STATUS_CHOICES = (
(STATUS_CREATED, STATUS_CREATED),
(STATUS_IN_PROGRESS, STATUS_IN_PROGRESS),
(STATUS_COMPLETE, STATUS_COMPLETE)
)
class GengoJob(ModelBase):
"""Represents a job for the Gengo human translation system"""
# Generic foreign key to the instance this record is about
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey()
# Source and destination fields for the translation
src_field = models.CharField(max_length=50)
dst_field = models.CharField(max_length=50)
# Source and destination languages
src_lang = models.CharField(default=u'', blank=True, max_length=10)
dst_lang = models.CharField(default=u'', blank=True, max_length=10)
# Status of the job and the order it's tied to
status = models.CharField(
choices=STATUS_CHOICES, default=STATUS_CREATED, max_length=12)
order = models.ForeignKey('translations.GengoOrder', null=True)
# When this job instance was created
created = models.DateTimeField(default=datetime.now)
# When this job instance was completed
completed = models.DateTimeField(blank=True, null=True)
def __unicode__(self):
return u'<GengoJob {0}>'.format(self.id)
def save(self, *args, **kwargs):
super(GengoJob, self).save(*args, **kwargs)
if not self.pk:
self.log('create GengoJob', {})
@classmethod
def unique_id_to_id(self, unique_id):
parts = unique_id.split('||')
return int(parts[-1])
@property
def unique_id(self):
"""Returns a unique id for this job for this host
When we create a job with Gengo, we need to tie that job
uniquely back to a GengoJob row, but that could be created on
a variety of systems. This (attempts to) create a unique
identifier for a specific GengoJob in a specific environment
by (ab)using the SITE_URL.
FIXME: It's possible we don't need to do this because jobs are
tied to orders and order numbers are generated by Gengo and
should be unique.
"""
return '||'.join([
getattr(settings, 'SITE_URL', 'localhost'),
'GengoJob',
str(self.pk)
])
def assign_to_order(self, order):
"""Assigns the job to an order which makes the job in progress"""
self.order = order
self.status = STATUS_IN_PROGRESS
self.save()
def mark_complete(self):
"""Marks a job as complete"""
self.status = STATUS_COMPLETE
self.completed = datetime.now()
self.save()
self.log('completed', {})
def log(self, action, metadata):
j_info(
app='translations',
src='gengo_human',
action=action,
msg='job event',
instance=self,
metadata=metadata
)
@property
def records(self):
return Record.objects.records(self)
class GengoOrder(ModelBase):
"""Represents a Gengo translation order which contains multiple jobs"""
order_id = models.CharField(max_length=100)
status = models.CharField(
choices=STATUS_CHOICES, default=STATUS_IN_PROGRESS, max_length=12)
# When this instance was created which should also line up with
# the time the order was submitted to Gengo
created = models.DateTimeField(default=datetime.now)
# When this order was completed
completed = models.DateTimeField(blank=True, null=True)
def __unicode__(self):
return u'<GengoOrder {0}>'.format(self.id)
def save(self, *args, **kwargs):
super(GengoOrder, self).save(*args, **kwargs)
if not self.pk:
self.log('create GengoOrder', {})
def mark_complete(self):
"""Marks an order as complete"""
self.status = STATUS_COMPLETE
self.completed = datetime.now()
self.save()
self.log('completed', {})
def completed_jobs(self):
return self.gengojob_set.filter(status=STATUS_COMPLETE)
def outstanding_jobs(self):
return self.gengojob_set.exclude(status=STATUS_COMPLETE)
def log(self, action, metadata):
j_info(
app='translations',
src='gengo_human',
action=action,
msg='order event',
instance=self,
metadata=metadata
)
@property
def records(self):
return Record.objects.records(self)
class GengoHumanTranslator(TranslationSystem):
"""Translates using Gengo human translation
Note: This costs real money!
"""
name = 'gengo_human'
use_push_and_pull = True
use_daily = True
def translate(self, instance, src_lang, src_field, dst_lang, dst_field):
# If gengosystem is disabled, we just return immediately. We
# can backfill later.
if not waffle.switch_is_active('gengosystem'):
return
text = getattr(instance, src_field)
metadata = {
'locale': instance.locale,
'length': len(text),
'body': text[:50].encode('utf-8')
}
gengo_api = FjordGengo()
# Guess the language. If we can't guess the language, then we
# don't create a GengoJob.
try:
lc_src = gengo_api.guess_language(text)
if not locale_equals_language(instance.locale, lc_src):
# Log this for metrics purposes
self.log_error(
instance,
action='guess-language',
msg='locale "{0}" != guessed language "{1}"'.format(
instance.locale, lc_src),
metadata=metadata)
except GengoUnknownLanguage as exc:
# FIXME: This might be an indicator that this response is
# spam. At some point p, we can write code to account for
# that.
self.log_error(instance, action='guess-language', msg=unicode(exc),
metadata=metadata)
statsd.incr('translation.gengo_machine.unknown')
return
except GengoUnsupportedLanguage as exc:
# FIXME: This is a similar boat to GengoUnknownLanguage
# where for now, we're just going to ignore it because I'm
# not sure what to do about it and I'd like more data.
self.log_error(instance, action='translate', msg=unicode(exc),
metadata=metadata)
statsd.incr('translation.gengo_machine.unsupported')
return
# If the source language is english, we just copy it over.
if locale_equals_language(dst_lang, lc_src):
setattr(instance, dst_field, text)
instance.save()
self.log_info(
instance, action='translate',
msg=u'lc_src == dst_lang, so we copy src to dst',
metadata=metadata)
return
# If the src -> dst isn't a supported pair, log an issue for
# metrics purposes and move on.
if (lc_src, dst_lang) not in gengo_api.get_language_pairs():
self.log_error(
instance, action='translate',
msg=u'(lc_src {0}, dst_lang {1}) not supported'.format(
lc_src, dst_lang),
metadata=metadata)
return
job = GengoJob(
content_object=instance,
src_lang=lc_src,
src_field=src_field,
dst_lang=dst_lang,
dst_field=dst_field
)
job.save()
def balance_good_to_continue(self, balance, threshold):
"""Checks whether balance is good to continue
If it's not, this sends some mail and returns False.
We check against a threshold that's high enough that we're
pretty sure the next job we create will not exceed the credits
in the account. Pretty sure if we exceed the credits in the
account, it'll return a non-ok opstat and that'll throw an
exception and everything will be ok data-consistency-wise.
"""
# FIXME: This should email a different group than admin,
# but I'm (ab)using the admin group for now because I know
# they're set up right.
if balance < threshold:
mail_admins(
subject='Gengo account balance {0} < {1}'.format(
balance, threshold),
message=wrap_with_paragraphs(
'Dagnabit! Send more money or the translations get it! '
'Don\'t try no funny business, neither!'
'\n\n'
'Love,'
'\n\n'
'Fjord McGengo'
)
)
return False
return True
def push_translations(self):
# If gengosystem is disabled, we just return immediately. We
# can backfill later.
if not waffle.switch_is_active('gengosystem'):
return
gengo_api = FjordGengo()
if not gengo_api.is_configured():
# If Gengo isn't configured, then we drop out here rather
# than raise a GengoConfig error.
return
balance = gengo_api.get_balance()
threshold = settings.GENGO_ACCOUNT_BALANCE_THRESHOLD
# statsd the balance so we can track it with graphite
statsd.gauge('translation.gengo.balance', balance)
if not self.balance_good_to_continue(balance, threshold):
# If we don't have enough balance, stop.
return
# Create language buckets for the jobs
jobs = GengoJob.objects.filter(status=STATUS_CREATED)
lang_buckets = {}
for job in jobs:
lang_buckets.setdefault(job.src_lang, []).append(job)
# For each bucket, assemble and order and post it.
for lang, jobs in lang_buckets.items():
batch = []
for job in jobs:
batch.append({
'id': job.id,
'lc_src': job.src_lang,
'lc_dst': job.dst_lang,
'text': getattr(job.content_object, job.src_field),
'unique_id': job.unique_id
})
# This will kick up a GengoAPIFailure which has the
# complete response in the exception message. We want that
# to propagate that and end processing in cases where
# something bad happened because then we can learn more
# about the state things are in. Thus we don't catch
# exceptions here.
resp = gengo_api.human_translate_bulk(batch)
# We should have an order_id at this point, so we create a
# GengoOrder with it.
order = GengoOrder(order_id=resp['order_id'])
order.save()
order.log('created', metadata={'response': resp})
# Persist the order on all the jobs and change their
# status.
for job in jobs:
job.assign_to_order(order)
# Update the balance and see if we're below the threshold.
balance = balance - float(resp['credits_used'])
if not self.balance_good_to_continue(balance, threshold):
# If we don't have enough balance, stop.
return
def pull_translations(self):
# If gengosystem is disabled, we just return immediately. We
# can backfill later.
if not waffle.switch_is_active('gengosystem'):
return
gengo_api = FjordGengo()
if not gengo_api.is_configured():
# If Gengo isn't configured, then we drop out here rather
# than raise a GengoConfig error.
return
# Get all the orders that are in progress
orders = GengoOrder.objects.filter(status=STATUS_IN_PROGRESS)
for order in orders:
# Get the list of all completed jobs
completed = gengo_api.completed_jobs_for_order(order.order_id)
# If there are no completed jobs, then we don't need to
# bother doing any additional processing for this order
if not completed:
continue
# For each complete job we haven't seen before, pull it
# from the db, save the translated text and update all the
# bookkeeping.
for comp in completed:
id_ = GengoJob.unique_id_to_id(comp['custom_data'])
job = GengoJob.objects.get(pk=id_)
if job.status == STATUS_COMPLETE:
continue
instance = job.content_object
setattr(instance, job.dst_field, comp['body_tgt'])
instance.save()
job.mark_complete()
# Check to see if there are still outstanding jobs for
# this order. If there aren't, close the order out.
outstanding = (GengoJob.objects
.filter(order=order, status=STATUS_IN_PROGRESS)
.count())
if outstanding == 0:
order.mark_complete()
def run_daily_activities(self):
# If gengosystem is disabled, we don't want to do anything.
if not waffle.switch_is_active('gengosystem'):
return
gengo_api = FjordGengo()
if not gengo_api.is_configured():
# If Gengo isn't configured, then we drop out here rather
# than raise a GengoConfig error.
return
balance = gengo_api.get_balance()
threshold = settings.GENGO_ACCOUNT_BALANCE_THRESHOLD
if threshold < balance < (2 * threshold):
mail_admins(
subject='Warning: Gengo account balance {0} < {1}'.format(
balance, 2 * threshold),
message=wrap_with_paragraphs(
'Dear mom,'
'\n\n'
'Translations are the fab. Running low on funds. Send '
'more money when you get a chance.'
'\n\n'
'Love,'
'\n\n'
'Fjord McGengo'
)
)
|
the-stack_106_22829 | import bpy
import struct
import mathutils
import math
def write_indices(f, o, mesh, face, index, offset):
f.write(struct.pack("I", face.vertices[index] + offset))
def write_some_data(context, filepath, use_some_setting):
print(filepath)
f = open(filepath, 'wb')
meshes = []
vertex_count = 0
face_count = 0
for o in context.selected_objects:
if o.type == 'MESH':
m = o.to_mesh(context.scene, False, 'PREVIEW')
meshes.append(m)
vertex_count += len(m.vertices)
for face in m.tessfaces:
if len(face.vertices) == 3:
face_count += 1;
if len(face.vertices) == 4:
face_count += 2;
m = None
f.write(struct.pack("I", vertex_count));
offsets = [0]
for m in meshes:
for v in m.vertices:
f.write(struct.pack("f", v.co.x))
f.write(struct.pack("f", v.co.z))
f.write(struct.pack("f", -v.co.y))
offsets.append(offsets[len(offsets) - 1] + len(m.vertices))
f.write(struct.pack("I", face_count * 3))
i = 0
for m in meshes:
offset = offsets[i]
for face in m.tessfaces:
if len(face.vertices) == 3:
write_indices(f, o, m, face, 0, offset);
write_indices(f, o, m, face, 1, offset);
write_indices(f, o, m, face, 2, offset);
if len(face.vertices) == 4:
write_indices(f, o, m, face, 0, offset);
write_indices(f, o, m, face, 2, offset);
write_indices(f, o, m, face, 1, offset);
write_indices(f, o, m, face, 0, offset);
write_indices(f, o, m, face, 3, offset);
write_indices(f, o, m, face, 2, offset);
++i
f.close()
return {'FINISHED'}
# ExportHelper is a helper class, defines filename and
# invoke() function which calls the file selector.
from bpy_extras.io_utils import ExportHelper
from bpy.props import StringProperty, BoolProperty, EnumProperty
from bpy.types import Operator
class ExportSomeData(Operator, ExportHelper):
"""This appears in the tooltip of the operator and in the generated docs"""
bl_idname = "export_test.some_data" # important since its how bpy.ops.import_test.some_data is constructed
bl_label = "Export Some Data"
# ExportHelper mixin class uses this
filename_ext = ".pda"
filter_glob = StringProperty(
default="*.txt",
options={'HIDDEN'},
)
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
use_setting = BoolProperty(
name="Example Boolean",
description="Example Tooltip",
default=True,
)
type = EnumProperty(
name="Example Enum",
description="Choose between two items",
items=(('OPT_A', "First Option", "Description one"),
('OPT_B', "Second Option", "Description two")),
default='OPT_A',
)
def execute(self, context):
return write_some_data(context, self.filepath, self.use_setting)
# Only needed if you want to add into a dynamic menu
def menu_func_export(self, context):
self.layout.operator(ExportSomeData.bl_idname, text="Text Export Operator")
def register():
bpy.utils.register_class(ExportSomeData)
bpy.types.INFO_MT_file_export.append(menu_func_export)
def unregister():
bpy.utils.unregister_class(ExportSomeData)
bpy.types.INFO_MT_file_export.remove(menu_func_export)
if __name__ == "__main__":
register()
# test call
bpy.ops.export_test.some_data('INVOKE_DEFAULT') |
the-stack_106_22830 | from typing import Any
from ply.lex import LexToken
from windyquery.ctx import Ctx
from windyquery.validator import ValidationError
from ._base import Base
TOKEN = 'JOIN'
class JoinToken(LexToken):
def __init__(self, value):
self.type = TOKEN
self.value = value
self.lineno = 0
self.lexpos = 0
class Join(Base):
def join(self, tbl: str, *cond: Any):
if len(cond) == 0:
raise UserWarning(f'JOIN cannot be empty')
if len(cond) == 1 or '?' in cond[0]:
_join = cond[0]
params = cond[1:]
elif len(cond) == 3:
_join = f'{cond[0]} {cond[1]} {cond[2]}'
params = []
else:
raise UserWarning(f"Invalid JOIN: {tbl} {cond}")
try:
ctx = Ctx(self.paramOffset, params)
sql = self.validator.validate_join(tbl, _join, ctx)
except ValidationError as err:
raise UserWarning(f'invalid JOIN: {err}') from None
value = {'sql': sql, 'params': ctx.args}
self.paramOffset += len(value['params'])
self.append(JoinToken(value))
|
the-stack_106_22833 | # Copyright (c) 2020, Manfred Moitzi
# License: MIT License
import time
from pathlib import Path
import ezdxf
from ezdxf.addons import iterdxf
BIGFILE = Path(ezdxf.EZDXF_TEST_FILES) / "GKB-R2010.dxf"
# BIGFILE = Path(ezdxf.EZDXF_TEST_FILES) / 'ACAD_R2000.dxf'
OUTDIR = Path("~/Desktop/Outbox").expanduser()
t0 = time.perf_counter()
doc = iterdxf.opendxf(BIGFILE)
line_exporter = doc.export(OUTDIR / "lines.dxf")
text_exporter = doc.export(OUTDIR / "text.dxf")
polyline_exporter = doc.export(OUTDIR / "polyline.dxf")
lwpolyline_exporter = doc.export(OUTDIR / "lwpolyline.dxf")
try:
for entity in doc.modelspace():
if entity.dxftype() == "LINE":
line_exporter.write(entity)
elif entity.dxftype() == "TEXT":
text_exporter.write(entity)
elif entity.dxftype() == "POLYLINE":
polyline_exporter.write(entity)
elif entity.dxftype() == "LWPOLYLINE":
lwpolyline_exporter.write(entity)
finally:
line_exporter.close()
text_exporter.close()
polyline_exporter.close()
lwpolyline_exporter.close()
doc.close()
print(f"Processing time: {time.perf_counter()-t0:.2f}s")
|
the-stack_106_22835 | from fabric import task
from benchmark.local import LocalBench
from benchmark.logs import ParseError, LogParser
from benchmark.utils import Print, BenchError
from benchmark.plot import Ploter, PlotError
from benchmark.instance import InstanceManager
from benchmark.remote import Bench
@task
def local(ctx, debug=True):
''' Run benchmarks on localhost '''
bench_params = {
'faults': 0,
'nodes': 4,
'batch_size': 100,
'rate': 1000,
'duration': 20,
'witness-only': False
}
try:
ret = LocalBench(bench_params).run(debug)
print(ret.result())
except BenchError as e:
Print.error(e)
@task
def create(ctx, nodes=4):
''' Create a testbed'''
try:
InstanceManager.make().create_instances(nodes)
except BenchError as e:
Print.error(e)
@task
def destroy(ctx):
''' Destroy the testbed '''
try:
InstanceManager.make().terminate_instances()
except BenchError as e:
Print.error(e)
@task
def start(ctx, max=2):
''' Start at most `max` machines per data center '''
try:
InstanceManager.make().start_instances(max)
except BenchError as e:
Print.error(e)
@task
def stop(ctx):
''' Stop all machines '''
try:
InstanceManager.make().stop_instances()
except BenchError as e:
Print.error(e)
@task
def info(ctx):
''' Display connect information about all the available machines '''
try:
InstanceManager.make().print_info()
except BenchError as e:
Print.error(e)
@task
def install(ctx):
''' Install the codebase on all machines '''
try:
Bench(ctx).install()
except BenchError as e:
Print.error(e)
@task
def remote(ctx, debug=False):
''' Run benchmarks on AWS '''
bench_params = {
'faults': 1,
'nodes': [10],
'batch_size': 1_024,
'rate': [50, 100, 150],
'duration': 300,
'runs': 2,
}
try:
Bench(ctx).run(bench_params, debug)
except BenchError as e:
Print.error(e)
@task
def plot(ctx):
''' Plot performance using the logs generated by "fab remote" '''
plot_params = {
'faults': [0, 1, 3],
'nodes': [10],
'batch_size': 1024,
'shards': [1],
'collocate': True,
'max_latency': [1000, 30_000]
}
try:
Ploter.plot(plot_params)
except PlotError as e:
Print.error(BenchError('Failed to plot performance', e))
@ task
def kill(ctx):
''' Stop execution on all machines '''
try:
Bench(ctx).kill()
except BenchError as e:
Print.error(e)
@ task
def logs(ctx):
''' Print a summary of the logs '''
try:
print(LogParser.process('./logs', num_nodes='?', faults='?').result())
except ParseError as e:
Print.error(BenchError('Failed to parse logs', e))
|
the-stack_106_22836 | import ast
import csv
import os
import pandas as pd
import pickle
import tarfile
import xml.etree.ElementTree as ET
import korbinian
import korbinian.utils as utils
import sys
import zipfile
import xml
from multiprocessing import Pool
# import debugging tools
from korbinian.utils import pr, pc, pn, aaa
def run_parse_simap_to_csv(pathdict, s, logging):
"""For a dataframe containing a list of proteins, for each protein parses the SIMAP XML file to a csv file.
Parameters
----------
pathdict : dict
Dictionary of the key paths and files associated with that List number.
s : dict
Settings dictionary extracted from excel settings file.
logging : logging.Logger
Logger for printing to console and logfile.
Saved Files and Figures
-----------------------
acc_not_in_homol_db_txt : txt
List of uniprot accessions that ar not in the homologue database (e.g. SIMAP)
Any XML files with "Query failed: could not find the query sequence (check your query parameters)"
will be added to this list
For each protein (see parse_SIMAP_to_csv() below):
homol_df_orig_zip : zipfile
Zipfile containing the following:
SIMAP_align_pretty_csv : csv
CSV file containing the hit_number protein description and the pretty alignment for each homologue
homol_df_orig_pickle : pickled pd.DataFrame
Dataframe containing all sequence extracted from the XML file.
This can be large, as it contains the full query, markup and match sequences
"""
logging.info('~~~~~~~~~~~~ starting parse_SIMAP_to_csv ~~~~~~~~~~~~')
acc_not_in_homol_db = []
if os.path.isfile(pathdict["acc_not_in_homol_db_txt"]):
# Extracts accession numbers out of file
with open(pathdict["acc_not_in_homol_db_txt"], "r") as source:
for line in source:
line = line.strip()
acc_not_in_homol_db.append(line)
# if multiprocessing is used, log only to the console
p_dict_logging = logging if s["use_multiprocessing"] != True else utils.Log_Only_To_Console()
# create list of protein dictionaries to process
list_p = korbinian.utils.convert_summary_csv_to_input_list(s, pathdict, p_dict_logging, list_excluded_acc=acc_not_in_homol_db)
# number of processes is the number the settings, or the number of proteins, whichever is smallest
n_processes = s["multiprocessing_cores"] if s["multiprocessing_cores"] < len(list_p) else len(list_p)
if s["use_multiprocessing"]:
with Pool(processes=n_processes) as pool:
parse_simap_list = pool.map(parse_SIMAP_to_csv, list_p)
# log the list of protein results to the actual logfile, not just the console
logging.info(parse_simap_list)
try:
# remove all the None values from the list
# note that we don't know exactly how they get there, as all return statements should give a tuple
parse_simap_list = list(filter(None.__ne__, parse_simap_list))
df_parsed = pd.DataFrame(parse_simap_list)
df_parsed.set_index(0, inplace=True)
df_parsed.index.name = "acc"
df_parsed.columns = ["finished", "result"]
not_finished_df = df_parsed.loc[df_parsed.finished == False]
finished_df = df_parsed.loc[df_parsed.finished == True]
if not not_finished_df.empty:
logging.info("\nparse_SIMAP_to_csv proteins not finished :\n\n{}\n".format(df_parsed.loc[df_parsed.finished == False]))
if not finished_df.empty:
logging.info("\nparse_SIMAP_to_csv proteins finished correctly:\n\n{}\n".format(df_parsed.loc[df_parsed.finished == True]))
df_parsed["not_in_database"] = df_parsed.result.str.contains("not in simap database")
new_acc_not_in_db_list = list(df_parsed.loc[df_parsed["not_in_database"]].index)
new_acc_not_in_db_nr_set = set(new_acc_not_in_db_list) - set(acc_not_in_homol_db)
# add accession number to the list of failed downloads
with open(pathdict["acc_not_in_homol_db_txt"], "a") as source:
for acc in new_acc_not_in_db_nr_set:
source.write("\n{}".format(acc))
except (TypeError, IndexError, ValueError):
logging.info(parse_simap_list)
sys.stdout.write("TypeError, IndexError, parse_simap_list is not a list of 3-item tuples for some reason.")
else:
for p in list_p:
parse_SIMAP_to_csv(p)
logging.info('\n~~~~~~~~~~~~ finished parse_SIMAP_to_csv ~~~~~~~~~~~~')
def parse_SIMAP_to_csv(p):
""" Parses the SIMAP XML file to csv for a single protein.
Designed for use in multiprocessing, where logging.info will only print to the console, and the logfile will
contain the messages in the return statements, telling if that protein was successful.
Notes:
- sdict is the dictionary with all the simap header info. It's not actually used anywhere further in the pipeline at the moment.
Parameters
----------
p : dict
Protein Dictionary. Contains all input settings, sequences and filepaths related to a single protein.
Protein-specific data is extracted from one row of the the list summary, e.g. List05_summary.csv, which is read as df.
p also contains the GENERAL korbinian settings and filepaths for that list (pathdict, s, logging)
Components
----------
pathdict : dict
Dictionary of the key paths and files associated with that List number.
s : dict
Settings dictionary extracted from excel settings file.
logging : logging.Logger
Logger for printing to console and/or logfile.
If multiprocessing == True, logging.info etc will only print to console.
p : protein-specific dictionary components
acc, list_of_TMDs, description, TM01_seq, etc
Saved Files and Figures
-----------------------
homol_df_orig_zip : zipfile
Zipfile containing the following:
SIMAP_align_pretty_csv : csv
CSV file containing the hit_number protein description and the pretty alignment for each homologue
homol_df_orig_pickle : pickled pd.DataFrame
Dataframe containing all sequence extracted from the XML file.
This can be large, as it contains the full query, markup and match sequences
Returns
-------
In all cases, a tuple (str, bool, str) is returned.
if sucsessful:
return acc, True, "0"
if not successful:
return acc, False, "specific warning or reason why protein failed"
"""
pathdict, s, logging = p["pathdict"], p["s"], p["logging"]
acc = p["acc"]
sys.stdout.write("{}, ".format(acc))
sys.stdout.flush()
protein_name = p['protein_name']
# if overwrite_simap_parsed_to_csv is False, skip proteins where the homol_df_orig_zip file seems good
if s["overwrite_simap_parsed_to_csv"] == False:
if os.path.isfile(p['homol_df_orig_zip']):
try:
# open up the csv as a dataframe. Delete the zip file if a csv is not found.
dfh_test = utils.open_df_from_pickle_zip(p['homol_df_orig_zip'], filename=os.path.basename(p['homol_df_orig_pickle']), delete_corrupt=True)
description_of_first_hit = dfh_test.loc[1, 'description']
logging.info('Protein %s: homologues already converted to csv. (%s)' % (p["acc"], description_of_first_hit))
# The file seems fine. Skip to next protein.
warning = "{} skipped, homologues already parsed to csv".format(p['protein_name'])
logging.info(warning)
return acc, False, warning
except (EOFError, KeyError):
# file may be corrupted, if script stopped unexpectedly before compression was finished
logging.info('%s seems to be corrupted. File will be deleted and parsing from xml to csv repeated.' % p['homol_df_orig_zip'])
os.remove(p['homol_df_orig_zip'])
#set up counters
number_of_hits_missing_protein_node = 0
num_hits_with_SW_align_node = 0
number_of_hits_missing_smithWatermanAlignment_node = 0
ft_xml_path = p['SIMAP_feature_table_XML_path']
homol_xml_path = p['SIMAP_homol_XML_path']
SIMAP_tar = p['SIMAP_tar']
homol_xml_filename = os.path.basename(homol_xml_path)
#check which files exist
homol_in_tar = utils.check_SIMAP_tarfile(SIMAP_tar, ft_xml_path, homol_xml_path, acc, logging, delete_corrupt=True)[-1]
# NEW: XML is parsed if only the homol_in_tar (feature tables are not necessary)
if not homol_in_tar:
warning = "{} skipped (no homologues)".format(p['protein_name'])
logging.info(warning)
return acc, False, warning
# create subfolders, if they don't exist
subfolder = os.path.dirname(p['homol_df_orig_zip'])
utils.make_sure_path_exists(subfolder)
#extract the tarfile so that it can be read as xml
tar = tarfile.open(p['SIMAP_tar'], 'r:gz')
SIMAP_homologues_XML_file_extracted = tar.extractfile(homol_xml_filename)
try:
#parse_uniprot the XML file with elementtree, define the 'root' of the XML file
simap_homologue_tree = ET.parse(SIMAP_homologues_XML_file_extracted)
simap_homologue_root = simap_homologue_tree.getroot()
except xml.etree.ElementTree.ParseError:
# returns a tuple
message = "{} contains xml file that gives a ParseError. " \
"In the future, file may be automatically deleted.".format(p['homol_df_orig_zip'])
logging.info(message)
return acc, False, message
try:
error = simap_homologue_root[0][0][1][0].text
if "could not find the query sequence" in error:
# returns a tuple
message = "{} not in simap database".format(acc)
logging.info(message)
return acc, False, message
except IndexError:
# file is probably normal, as it doesn't contain the message saying that the protein is not found in the database
pass
# the sdict is the dictionary of info at top of SIMAP XML, before the matches start
# it will be saved in a separate csv
sdict = {}
try:
sdict['SIMAP_created'] = simap_homologue_root[0][0][0][0][2][1][0].attrib["created"]
for parameters in simap_homologue_root[0][0][0][0].iter('parameters'):
sdict['SIMAP_input_seq_details_dict'] = str(parameters[0][0].attrib)
for SIMAP_filter in parameters.iter('filter'):
SIMAP_filter_string = SIMAP_filter.text
sdict['SIMAP_filter_string'] = str(SIMAP_filter_string)
for resultSpecification in parameters.iter('resultSpecification'):
SIMAP_resultSpecification_dict = resultSpecification.attrib
sdict['SIMAP_resultSpecification_dict'] = '"%s"' % SIMAP_resultSpecification_dict
for databases in parameters.iter('databases'):
database_details_dict = databases[0].attrib
sdict['database_details_dict'] = '"%s"' % database_details_dict
sdict['simap_version'] = simap_homologue_root[0][0][0][0][0].attrib['version']
sdict['SIMAP_total_hits'] = int(simap_homologue_root[0][0][0][1][0].attrib['total'])
if sdict['simap_version'] != '4.0':
logging.warning('WARNING! Your XML file is simap version %s,'
'however this SIMAP parser was developed for SIMAP version 4.0.' %
sdict['simap_version'])
query_sequence_node = simap_homologue_root[0][0][0][0][2][0][0]
''' xxxx CURRENTLY THE df is filled with nan values,
but that doesn't make sense as the script seems to work
'''
sdict['query_md5'] = query_sequence_node.attrib['md5']
sdict['seqlen'] = int(query_sequence_node.attrib['length'])
sdict['query_selfscore'] = query_sequence_node.attrib['selfscore']
sdict['query_sequenceid'] = query_sequence_node.attrib['sequenceid']
sdict['total_number_of_simap_hits'] = query_sequence_node[0].attrib['number_hits']
sdict['query_sequence_from_homologue_XML_file'] = query_sequence_node[0][0].text
sdict['number_of_hits_in_homologue_XML_file'] = int(simap_homologue_root[0][0][0][1][0].attrib['total'])
except (IndexError, KeyError):
warning = "{} skipped, homologue XML seems to be damaged. Error in reading general query details.".format(protein_name)
logging.warning("{} skipped, homologue XML seems to be damaged. Error in reading general query details.".format(protein_name))
# skip to the next protein
return acc, False, warning
if p['full_seq'].upper() != sdict['query_sequence_from_homologue_XML_file'].upper():
logging.warning("...............................\n"
"{} WARNING: Mismatch between full_seq and SIMAP seq from XML file. Tarball with SIMAP XML is probably old and should be deleted.\n"
"full_seq : {}\n"
"XML_seq : {}\n"
"Tarball : {}\n"
"acc has been added to mismatch_full_seq_with_simap_txt\n"
"...............................\n".format(acc, p['full_seq'].upper(),sdict['query_sequence_from_homologue_XML_file'].upper(), p['SIMAP_tar']))
# add accession number to the list of acc with a sequence mismatch
mismatch_full_seq_with_simap_list = utils.get_acc_list_from_txt(pathdict["mismatch_full_seq_with_simap_txt"])
if acc not in mismatch_full_seq_with_simap_list:
with open(pathdict["mismatch_full_seq_with_simap_txt"], "a") as source:
source.write("\n{}".format(acc))
#for each hit, save all the relevant data in the form of a dictionary,
# so it can be added to a csv file or used in other calculations
simap_homologue_hits = simap_homologue_root[0][0][0][1][0]
#see if there are any hits at all
try:
test2 = simap_homologue_root[0][0][0][1][0][0]
except IndexError:
warning = "{} skipped, homologue XML has no hits.".format(protein_name)
logging.warning(warning)
# skip to the next protein
return acc, False, warning
"""OLD AMINO ACID SUBSTITUTION CODE. THIS IS SLOW, AND GIVES NO SIGNIFICANT DIFFERENCE TO
AAIMON OR AASMON WITH THE SIMAP SMITH-WATERMAN MATRIX"""
#load the amino acid substitution matrices from the settings file
#list_of_aa_sub_matrices = s['aa_sub_matrices']
#import the amino acid substitution matrices
#utils.import_amino_acid_substitution_matrices()
#add the similarity ratios to the csv_header_for_SIMAP_homologue_file.
# These will depend on the individual settings
# if s['["mp_calculate_TMD_conservation_with_aa_matrices']:
# for j in range(s["gap_open_penalty_min"],
# s["gap_open_penalty_max"],
# s["gap_open_penalty_increment"]):
# gap_open_penalty = j
# gap_extension_penalty = j
# for matrix_name in list_of_aa_sub_matrices:
# column_name = 'sim_ratio_%s_gapo%i' % (matrix_name.replace("'", "")[0:-7], j)
# csv_header_for_SIMAP_homologue_file.append(column_name)
#import the necessary matrices
#for matrix_name in list_of_aa_sub_matrices:
#matrix = matrix_name[0:-7]
#from Bio.SubsMat.MatrixInfo import matrix as matrix_name
SIMAP_orig_csv = p['homol_df_orig_zip'][:-4] + ".csv"
#fasta_file_path = p['fasta_file_path']
#create an empty file
open(SIMAP_orig_csv, 'w').close()
#reopen to add match details iteratively from dictionary
csvfile = open(SIMAP_orig_csv, 'a')
#set up a bool to catch those files where not a single hit actually gives data
at_least_one_hit_contains_SW_node = False
for hit in simap_homologue_hits:
match_details_dict = {}
#add desired hit information to the dictionary for transfer to csv
hit_num = int(hit.attrib['number'])
match_details_dict['hit_num'] = hit_num
match_details_dict['md5'] = hit[1].attrib['md5']
#define the major nodes in the XML-file
try:
protein_node = hit[1][1]
hit_contains_protein_node = True
except IndexError:
hit_contains_protein_node = False
number_of_hits_missing_protein_node += 1
logging.warning('%s hit %s contains no protein node' % (protein_name, match_details_dict['md5']))
if not hit_contains_protein_node:
#skip to next hit
continue
try:
smithWatermanAlignment_node = hit[0][0][14]
hit_contains_SW_node = True
num_hits_with_SW_align_node += 1
except IndexError:
hit_contains_SW_node = False
match_details_dict['hit_contains_SW_node'] = hit_contains_SW_node
#add the description. Add a custom name if it is the first (query) hit
if hit_num == 1:
description = '%s_SIMAP_query_sequence' % protein_name
else:
description = protein_node.attrib['description']
match_details_dict['description'] = description
try:
databaseId = int(protein_node[1].attrib['databaseId'])
match_details_dict['databaseId'] = int(protein_node[1].attrib['databaseId'])
except KeyError:
databaseId = 0
#match_details_dict['databaseId'] = int(0)
#databaseId = int(protein_node[1].attrib['databaseId'])
databasenode = protein_node[1]
match_details_dict['database'] = databasenode.attrib['name']
try:
taxonomyNode = protein_node[2]
match_details_dict['organism'] = taxonomyNode.attrib['name']
match_details_dict['taxonomy_node_id'] = taxonomyNode.attrib['node_id']
match_details_dict['taxonomy_rank'] = taxonomyNode.attrib['rank']
except IndexError:
#sequence is from an unknown organism, as it has no database node
match_details_dict['description'] += ', no_database_node'
match_details_dict['organism'] = 'no_database_node'
match_details_dict['taxonomy_node_id'] = 'no_database_node'
match_details_dict['taxonomy_rank'] = 'no_database_node'
match_details_dict['len_full_match_seq'] = len(hit[1][0][0].text)
#len_full_match_seq = len(full_match_seq)
alignment_node = hit[0][0]
#E-value for hit
match_details_dict['FASTA_expectation'] = float(alignment_node[1].text)
#convert identity from e.g. 80 (80%) to 0.8
match_details_dict['FASTA_identity'] = float(alignment_node[3].text) / 100
#strangely, I think gappedIdentity is the identity EXCLUDING gaps, which is a better value to base judgements on. convert identity from e.g. 80 (80%) to 0.8
match_details_dict['FASTA_gapped_identity'] = float(alignment_node[4].text) / 100
# creating the real observed changes from FASTA_gapped_identity - this is a percentage value now!!!
match_details_dict['obs_changes'] = 100 - float(alignment_node[4].text)
'''xxx notes on the gapped identity
N.B The FASTA_gapped_identity data here is from the FASTA algorithm, that precedes the SW algorithm.
Occasionally they don’t match!!!
I calculate the TMD identity manually from the SW alignment, BUT
currently for the calculation of membranous/nonmembranous I use the gappedIdentity from the FASTA output
(the SW output inly has identity including gaps)
- if I counted the gaps from the SW alignment, I COULD recalculate the gappedIdentity for the SW alignment
- OR: I could simply remove the data where the FASTA and SW don’t match.
'''
#FASTA overlap should be the length of the aligned region after running the FASTA algorithm (alignment is not shown by SIMAP)
match_details_dict['FASTA_overlap'] = int(alignment_node[5].text)
match_details_dict['FASTA_query_coverage'] = float(alignment_node[11].text)
match_details_dict['FASTA_match_coverage'] = float(alignment_node[12].text)
#find the start and the stop of the hsp
querySeq = alignment_node[6]
match_details_dict['FASTA_query_start'] = int(querySeq.attrib['start'])
match_details_dict['FASTA_query_end'] = int(querySeq.attrib['end'])
matchSeq = alignment_node[7]
match_details_dict['FASTA_match_start'] = int(matchSeq.attrib['start'])
match_details_dict['FASTA_match_end'] = int(matchSeq.attrib['end'])
"""OLD CALCULATIONS THAT ARE NOW CONVERTED TO PANDAS ARRAY-WISE FUNCTIONS"""
#some parameters that are needed for identity calculations later
#FASTA_num_ident_res = FASTA_identity / 100.0 * FASTA_overlap
#is_start_of_TMD_in_FASTA = True if FASTA_query_start <= TMDstart else False
#is_end_of_TMD_in_FASTA = True if TMDend <= FASTA_query_end else False
#is_TMD_in_FASTA_alignment = True if all([is_start_of_TMD_in_FASTA, is_end_of_TMD_in_FASTA]) else False
'''***********************if the TMD region is actually covered by the hsp, then conduct some further analyses of the match TMD region*************************'''
if hit_contains_SW_node:
query_align_seq = ''
'''For the moment, there is no need to put the whole match hsp sequence into the csv file'''
#for smithWatermanAlignment in alignment_node.iter('smithWatermanAlignment'):
match_details_dict['SW_query_score_ratio'] = smithWatermanAlignment_node[0].text
match_details_dict['SW_match_score_ratio'] = smithWatermanAlignment_node[1].text
match_details_dict['SW_query_coverage'] = smithWatermanAlignment_node[2].text
match_details_dict['SW_match_coverage'] = smithWatermanAlignment_node[3].text
match_details_dict['SW_coverage_ratio'] = smithWatermanAlignment_node[4].text
match_details_dict['align_pretty'] = smithWatermanAlignment_node[8].text
match_details_dict['SW_alignment_seq1offset'] = int(smithWatermanAlignment_node.attrib['alignment-seq1offset'])
match_details_dict['SW_alignment_seq2offset'] = int(smithWatermanAlignment_node.attrib['alignment-seq2offset'])
match_details_dict['SW_identity'] = float(smithWatermanAlignment_node.attrib['identity'])
match_details_dict['SW_similarity'] = float(smithWatermanAlignment_node.attrib['similarity'])
#Get the full sequences. Note that they greatly increase the size of the csv file.
match_details_dict['query_align_seq'] = smithWatermanAlignment_node[5].text
match_details_dict['align_markup_seq'] = smithWatermanAlignment_node[6].text
match_details_dict['match_align_seq'] = smithWatermanAlignment_node[7].text
else:
number_of_hits_missing_smithWatermanAlignment_node += 1
if hit_num == 1:
#sort
csv_header_for_SIMAP_homologue_file = sorted(list(match_details_dict.keys()))
#save the csv header to the csv file
writer = csv.writer(csvfile, delimiter=',', quotechar='"', lineterminator='\n',quoting=csv.QUOTE_NONNUMERIC, doublequote=True)
writer.writerow(csv_header_for_SIMAP_homologue_file)
#save the match_details_dict as a line in the csv file
writer = csv.DictWriter(csvfile, fieldnames=csv_header_for_SIMAP_homologue_file,
extrasaction='ignore', delimiter=',', quotechar='"',
lineterminator='\n', quoting=csv.QUOTE_NONNUMERIC,
doublequote=True)
writer.writerow(match_details_dict)
# close tar and csv
csvfile.close()
tar.close()
# open csv as a dataframe,
df_homol = pd.read_csv(SIMAP_orig_csv, sep=",", quoting=csv.QUOTE_NONNUMERIC, index_col="hit_num")
if "query_align_seq" not in df_homol.columns:
# this is a serious error in the XML file. None of the hits had a protein node. The file should probably be downloaded.
warning = 'The homologue XML file likely has a serious error, "query_align_seq" is not in dataframe. ' \
'XML should probably be re-downloaded.\n' \
'df_homol["hit_contains_SW_node"].value_counts()\n{}'.format(df_homol["hit_contains_SW_node"].value_counts())
logging.warning(warning)
# skip this protein
return acc, False, warning
# get length of seq. Previously this was a lambda function that needed more filtering
df_homol['len_query_align_seq'] = df_homol['query_align_seq'].str.len()
# conduct the text searching for disallowed words
words_not_allowed_in_description = ast.literal_eval(s["words_not_allowed_in_description"])
# collect disallowed words in hit protein description (patent, synthetic, etc)
df_homol['list_disallowed_words_in_descr'] = df_homol['description'].dropna().apply(utils.find_disallowed_words, args=(words_not_allowed_in_description,))
# create a boolean column to select hits that do not contain these words in the description
df_homol['disallowed_words_not_in_descr'] = df_homol['list_disallowed_words_in_descr'] == '[]'
# check if there are non-IUPAC amino acids in the sequence (frequently large gaps from NG sequencing data)
df_homol['X_in_match_seq'] = df_homol['match_align_seq'].str.contains("X")
# restrict to just a few columns including the align_pretty that might be useful to check manually
df_pretty = df_homol[["FASTA_gapped_identity", "obs_changes", "organism", "description", "align_pretty"]]
# save the align_pretty to csv
df_pretty.to_csv(p['SIMAP_align_pretty_csv'], sep=',', quoting=csv.QUOTE_NONNUMERIC)
# drop the align_pretty column from the orig dataframe
df_homol.drop('align_pretty', axis=1, inplace=True)
# save the whole dataframe as a pickle for faster opening later
with open(p['homol_df_orig_pickle'], "wb") as pick:
pickle.dump(df_homol, pick, protocol=pickle.HIGHEST_PROTOCOL)
simap_header_info_ser = pd.Series(sdict)
simap_header_info_ser.to_csv(p['simap_header_info_csv'])
# either create new zip and add ("w"), or open existing zip and add "a"
with zipfile.ZipFile(p['homol_df_orig_zip'], mode="w", compression=zipfile.ZIP_DEFLATED) as zipout:
#zipout.write(SIMAP_orig_csv, arcname=os.path.basename(SIMAP_orig_csv))
zipout.write(p['SIMAP_align_pretty_csv'], arcname=os.path.basename(p['SIMAP_align_pretty_csv']))
zipout.write(p['homol_df_orig_pickle'], arcname=os.path.basename(p['homol_df_orig_pickle']))
zipout.write(p['simap_header_info_csv'], arcname=os.path.basename(p['simap_header_info_csv']))
# delete temporary uncompressed files
os.remove(SIMAP_orig_csv)
os.remove(p['SIMAP_align_pretty_csv'])
os.remove(p['homol_df_orig_pickle'])
os.remove(p['simap_header_info_csv'])
return acc, True, "0"
def get_phobius_TMD_region(feature_table_root):
"""Old function, no longer in use."""
for feature in feature_table_root[0]:
if 'PHOBIUS' and 'TMHelix' in feature.attrib.values():
for begin in feature.iter('begin'):
TMD_start = int(begin.attrib['position']) #same as feature[0][0].attrib['position'], but more resistant to parser breaking
for end in feature.iter('end'):
TMD_end = int(end.attrib['position'])
TMD_length = TMD_end - TMD_start + 1
#logging.info('phobius prediction: TMD start = %s, TMD end = %s' % (TMD_start, TMD_end))
#logging.info(begin.attrib['position']) #same as feature[0][0].attrib['position'], but more resistant to parser breaking
else:
TMD_start, TMD_end, TMD_length = 0, 0, 0
return TMD_start, TMD_end, TMD_length
def get_TMHMM_TMD_region(root):
"""Old function, no longer in use."""
for feature in root[0]:
for subfeature in feature:
if 'TMHMM' and 'TMHelix' in subfeature.attrib.values():
for begin in subfeature.iter('begin'):
TMD_start = begin.attrib['position'] #same as feature[0][0].attrib['position'], but more resistant to parser breaking
for end in subfeature.iter('end'):
TMD_end = end.attrib['position']
#logging.info('TMHMM prediction: TMD start = %s, TMD end = %s' % (TMD_start, TMD_end))
#logging.info(begin.attrib['position']) #same as feature[0][0].attrib['position'], but more resistant to parser breaking
else:
TMD_start, TMD_end = 0, 0
return TMD_start, TMD_end |
the-stack_106_22840 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="brazilpinpoint",
version="1.0.7",
author="Newton Galiza",
author_email="[email protected]",
description="Using a latitude and longitude a pin point is marked on Brazil map",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/NewtonGaliza/BrazilPinPoint-PyPi-",
packages=setuptools.find_packages(),
install_requires = ['folium'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
the-stack_106_22841 | import pytest
from django.urls import resolve, reverse
from peer_lending.users.models import User
pytestmark = pytest.mark.django_db
def test_user_detail(user: User):
assert (
reverse("api:user-detail", kwargs={"username": user.username})
== f"/api/users/{user.username}/"
)
assert resolve(f"/api/users/{user.username}/").view_name == "api:user-detail"
def test_user_list():
assert reverse("api:user-list") == "/api/users/"
assert resolve("/api/users/").view_name == "api:user-list"
def test_user_me():
assert reverse("api:user-me") == "/api/users/me/"
assert resolve("/api/users/me/").view_name == "api:user-me"
|
the-stack_106_22842 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020-2022 Barcelona Supercomputing Center (BSC), Spain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boto3
from botocore import UNSIGNED
from botocore.client import Config
import botocore.exceptions
from urllib.parse import urlparse
from typing import List, Mapping, Optional, Tuple, Union
import os
import shutil
import logging
from . import FetcherException
from ..common import AbsPath, AnyURI, ContentKind, SecurityContextConfig
from ..common import URIType, URIWithMetadata
from ..common import ProtocolFetcher, ProtocolFetcherReturn
# Logger of this module
logger = logging.getLogger(__name__)
def downloadContentFrom_s3(remote_file:URIType, cachedFilename:AbsPath, secContext:Optional[SecurityContextConfig]=None) -> ProtocolFetcherReturn:
urlParse = urlparse(remote_file)
bucket = urlParse.netloc
prefix = urlParse.path
prefix = prefix[1:]
local_path = cachedFilename
if(isinstance(secContext, dict)):
access_key = secContext.get('access_key')
secret_key = secContext.get('secret_key')
else:
access_key = None
secret_key = None
try:
if access_key == None and secret_key == None:
s3cli = boto3.client('s3', config=Config(signature_version=UNSIGNED))
else:
s3cli = boto3.client('s3', access_key, secret_key)
except botocore.exceptions.ClientError as error:
errmsg = f'S3 client authentication error on {remote_file}'
logger.exception(errmsg)
raise FetcherException(errmsg) from error
metadata_payload : List[Mapping] = []
metadata = {
'fetched': remote_file,
'payload': metadata_payload
}
try:
s3_obj = s3cli.get_object(Bucket=bucket, Key=prefix)
with open(cachedFilename, mode='wb') as dH:
shutil.copyfileobj(s3_obj['Body'], dH)
s3_obj.pop('Body')
metadata_payload.append(s3_obj)
kind = ContentKind.File
except botocore.exceptions.ClientError as error:
if error.response['Error']['Code'] != 'NoSuchKey':
raise error
# This happens when the object is not a file
blob_prefix = prefix
if blob_prefix[-1] != '/':
blob_prefix += '/'
# Check whether there is something
response = s3cli.list_objects_v2(Bucket=bucket, Prefix=blob_prefix, MaxKeys=1)
# Nothing leads to an exception
if response["KeyCount"] == 0:
errmsg = f'Path prefix {blob_prefix} from {remote_file} matches no blob'
logger.error(errmsg)
raise FetcherException(errmsg)
# Let's use the paginator
try:
paginator = s3cli.get_paginator('list_objects_v2')
for result in paginator.paginate(Bucket=bucket, Prefix=prefix):
for key in result['Contents']:
local_blob_filename = os.path.join(local_path, key['Key'][len(blob_prefix):])
try:
os.makedirs(os.path.dirname(local_blob_filename), exist_ok=True)
s3cli.download_file(bucket, key['Key'], local_blob_filename)
metadata_payload.append(key)
except Exception as e:
errmsg = f'Error downloading {key["Key"]} from {remote_file} to {local_blob_filename}'
logger.exception(errmsg)
raise FetcherException(errmsg) from e
except FetcherException as fe:
raise fe
except Exception as e:
errmsg = f'Error paginating {prefix} from {remote_file} to {local_path}'
logger.exception(errmsg)
raise FetcherException(errmsg) from e
kind = ContentKind.Directory
return kind, [ URIWithMetadata(remote_file, metadata) ], None
S3_SCHEME_HANDLERS : Mapping[str, ProtocolFetcher] = {
's3': downloadContentFrom_s3
}
|
the-stack_106_22843 | # Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SketchRNN data loading and image manipulation utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
def get_bounds(data, factor=10):
"""Return bounds of data."""
min_x = 0
max_x = 0
min_y = 0
max_y = 0
abs_x = 0
abs_y = 0
for i in range(len(data)):
x = float(data[i, 0]) / factor
y = float(data[i, 1]) / factor
abs_x += x
abs_y += y
min_x = min(min_x, abs_x)
min_y = min(min_y, abs_y)
max_x = max(max_x, abs_x)
max_y = max(max_y, abs_y)
return (min_x, max_x, min_y, max_y)
def slerp(p0, p1, t):
"""Spherical interpolation."""
omega = np.arccos(np.dot(p0 / np.linalg.norm(p0), p1 / np.linalg.norm(p1)))
so = np.sin(omega)
return np.sin((1.0 - t) * omega) / so * p0 + np.sin(t * omega) / so * p1
def lerp(p0, p1, t):
"""Linear interpolation."""
return (1.0 - t) * p0 + t * p1
# A note on formats:
# Sketches are encoded as a sequence of strokes. stroke-3 and stroke-5 are
# different stroke encodings.
# stroke-3 uses 3-tuples, consisting of x-offset, y-offset, and a binary
# variable which is 1 if the pen is lifted between this position and
# the next, and 0 otherwise.
# stroke-5 consists of x-offset, y-offset, and p_1, p_2, p_3, a binary
# one-hot vector of 3 possible pen states: pen down, pen up, end of sketch.
# See section 3.1 of https://arxiv.org/abs/1704.03477 for more detail.
# Sketch-RNN takes input in stroke-5 format, with sketches padded to a common
# maximum length and prefixed by the special start token [0, 0, 1, 0, 0]
# The QuickDraw dataset is stored using stroke-3.
def strokes_to_lines(strokes):
"""Convert stroke-3 format to polyline format."""
x = 0
y = 0
lines = []
line = []
for i in range(len(strokes)):
if strokes[i, 2] == 1:
x += float(strokes[i, 0])
y += float(strokes[i, 1])
line.append([x, y])
lines.append(line)
line = []
else:
x += float(strokes[i, 0])
y += float(strokes[i, 1])
line.append([x, y])
return lines
def lines_to_strokes(lines):
"""Convert polyline format to stroke-3 format."""
eos = 0
strokes = [[0, 0, 0]]
for line in lines:
linelen = len(line)
for i in range(linelen):
eos = 0 if i < linelen - 1 else 1
strokes.append([line[i][0], line[i][1], eos])
strokes = np.array(strokes)
strokes[1:, 0:2] -= strokes[:-1, 0:2]
return strokes[1:, :]
def augment_strokes(strokes, prob=0.0):
"""Perform data augmentation by randomly dropping out strokes."""
# drop each point within a line segments with a probability of prob
# note that the logic in the loop prevents points at the ends to be dropped.
result = []
prev_stroke = [0, 0, 1]
count = 0
stroke = [0, 0, 1] # Added to be safe.
for i in range(len(strokes)):
candidate = [strokes[i][0], strokes[i][1], strokes[i][2]]
if candidate[2] == 1 or prev_stroke[2] == 1:
count = 0
else:
count += 1
urnd = np.random.rand() # uniform random variable
if candidate[2] == 0 and prev_stroke[2] == 0 and count > 2 and urnd < prob:
stroke[0] += candidate[0]
stroke[1] += candidate[1]
else:
stroke = candidate
prev_stroke = stroke
result.append(stroke)
return np.array(result)
def scale_bound(stroke, average_dimension=10.0):
"""Scale an entire image to be less than a certain size."""
# stroke is a numpy array of [dx, dy, pstate], average_dimension is a float.
# modifies stroke directly.
bounds = get_bounds(stroke, 1)
max_dimension = max(bounds[1] - bounds[0], bounds[3] - bounds[2])
stroke[:, 0:2] /= (max_dimension / average_dimension)
def to_normal_strokes(big_stroke):
"""Convert from stroke-5 format (from sketch-rnn paper) back to stroke-3."""
l = 0
for i in range(len(big_stroke)):
if big_stroke[i, 4] > 0:
l = i
break
if l == 0:
l = len(big_stroke)
result = np.zeros((l, 3))
result[:, 0:2] = big_stroke[0:l, 0:2]
result[:, 2] = big_stroke[0:l, 3]
return result
def clean_strokes(sample_strokes, factor=100):
"""Cut irrelevant end points, scale to pixel space and store as integer."""
# Useful function for exporting data to .json format.
copy_stroke = []
added_final = False
for j in range(len(sample_strokes)):
finish_flag = int(sample_strokes[j][4])
if finish_flag == 0:
copy_stroke.append([
int(round(sample_strokes[j][0] * factor)),
int(round(sample_strokes[j][1] * factor)),
int(sample_strokes[j][2]),
int(sample_strokes[j][3]), finish_flag
])
else:
copy_stroke.append([0, 0, 0, 0, 1])
added_final = True
break
if not added_final:
copy_stroke.append([0, 0, 0, 0, 1])
return copy_stroke
def to_big_strokes(stroke, max_len=250):
"""Converts from stroke-3 to stroke-5 format and pads to given length."""
# (But does not insert special start token).
result = np.zeros((max_len, 5), dtype=float)
l = len(stroke)
assert l <= max_len
result[0:l, 0:2] = stroke[:, 0:2]
result[0:l, 3] = stroke[:, 2]
result[0:l, 2] = 1 - result[0:l, 3]
result[l:, 4] = 1
return result
def get_max_len(strokes):
"""Return the maximum length of an array of strokes."""
max_len = 0
for stroke in strokes:
ml = len(stroke)
if ml > max_len:
max_len = ml
return max_len
class DataLoader(object):
"""Class for loading data."""
def __init__(self,
strokes,
batch_size=100,
max_seq_length=250,
scale_factor=1.0,
random_scale_factor=0.0,
augment_stroke_prob=0.0,
limit=1000):
self.batch_size = batch_size # minibatch size
self.max_seq_length = max_seq_length # N_max in sketch-rnn paper
self.scale_factor = scale_factor # divide offsets by this factor
self.random_scale_factor = random_scale_factor # data augmentation method
# Removes large gaps in the data. x and y offsets are clamped to have
# absolute value no greater than this limit.
self.limit = limit
self.augment_stroke_prob = augment_stroke_prob # data augmentation method
self.start_stroke_token = [0, 0, 1, 0, 0] # S_0 in sketch-rnn paper
# sets self.strokes (list of ndarrays, one per sketch, in stroke-3 format,
# sorted by size)
self.preprocess(strokes)
def preprocess(self, strokes):
"""Remove entries from strokes having > max_seq_length points."""
raw_data = []
seq_len = []
count_data = 0
for i in range(len(strokes)):
data = strokes[i]
if len(data) <= (self.max_seq_length):
count_data += 1
# removes large gaps from the data
data = np.minimum(data, self.limit)
data = np.maximum(data, -self.limit)
data = np.array(data, dtype=np.float32)
data[:, 0:2] /= self.scale_factor
raw_data.append(data)
seq_len.append(len(data))
seq_len = np.array(seq_len) # nstrokes for each sketch
idx = np.argsort(seq_len)
self.strokes = []
for i in range(len(seq_len)):
self.strokes.append(raw_data[idx[i]])
print("total images <= max_seq_len is %d" % count_data)
self.num_batches = int(count_data / self.batch_size)
def random_sample(self):
"""Return a random sample, in stroke-3 format as used by draw_strokes."""
sample = np.copy(random.choice(self.strokes))
return sample
def random_scale(self, data):
"""Augment data by stretching x and y axis randomly [1-e, 1+e]."""
x_scale_factor = (
np.random.random() - 0.5) * 2 * self.random_scale_factor + 1.0
y_scale_factor = (
np.random.random() - 0.5) * 2 * self.random_scale_factor + 1.0
result = np.copy(data)
result[:, 0] *= x_scale_factor
result[:, 1] *= y_scale_factor
return result
def calculate_normalizing_scale_factor(self):
"""Calculate the normalizing factor explained in appendix of sketch-rnn."""
data = []
for i in range(len(self.strokes)):
if len(self.strokes[i]) > self.max_seq_length:
continue
for j in range(len(self.strokes[i])):
data.append(self.strokes[i][j, 0])
data.append(self.strokes[i][j, 1])
data = np.array(data)
return np.std(data)
def normalize(self, scale_factor=None):
"""Normalize entire dataset (delta_x, delta_y) by the scaling factor."""
if scale_factor is None:
scale_factor = self.calculate_normalizing_scale_factor()
self.scale_factor = scale_factor
for i in range(len(self.strokes)):
self.strokes[i][:, 0:2] /= self.scale_factor
def _get_batch_from_indices(self, indices):
"""Given a list of indices, return the potentially augmented batch."""
x_batch = []
seq_len = []
for idx in range(len(indices)):
i = indices[idx]
data = self.random_scale(self.strokes[i])
data_copy = np.copy(data)
if self.augment_stroke_prob > 0:
data_copy = augment_strokes(data_copy, self.augment_stroke_prob)
x_batch.append(data_copy)
length = len(data_copy)
seq_len.append(length)
seq_len = np.array(seq_len, dtype=int)
# We return three things: stroke-3 format, stroke-5 format, list of seq_len.
return x_batch, self.pad_batch(x_batch, self.max_seq_length), seq_len
def random_batch(self):
"""Return a randomised portion of the training data."""
idx = np.random.permutation(range(0, len(self.strokes)))[0:self.batch_size]
return self._get_batch_from_indices(idx)
def get_batch(self, idx):
"""Get the idx'th batch from the dataset."""
assert idx >= 0, "idx must be non negative"
assert idx < self.num_batches, "idx must be less than the number of batches"
start_idx = idx * self.batch_size
indices = range(start_idx, start_idx + self.batch_size)
return self._get_batch_from_indices(indices)
def pad_batch(self, batch, max_len):
"""Pad the batch to be stroke-5 bigger format as described in paper."""
result = np.zeros((self.batch_size, max_len + 1, 5), dtype=float)
assert len(batch) == self.batch_size
for i in range(self.batch_size):
l = len(batch[i])
assert l <= max_len
result[i, 0:l, 0:2] = batch[i][:, 0:2]
result[i, 0:l, 3] = batch[i][:, 2]
result[i, 0:l, 2] = 1 - result[i, 0:l, 3]
result[i, l:, 4] = 1
# put in the first token, as described in sketch-rnn methodology
result[i, 1:, :] = result[i, :-1, :]
result[i, 0, :] = 0
result[i, 0, 2] = self.start_stroke_token[2] # setting S_0 from paper.
result[i, 0, 3] = self.start_stroke_token[3]
result[i, 0, 4] = self.start_stroke_token[4]
return result
|
the-stack_106_22844 | import random
from ..exceptions import DriverNotFound
from .BaseConnection import BaseConnection
from ..query.grammars import MSSQLGrammar
from ..schema.platforms import MSSQLPlatform
from ..query.processors import MSSQLPostProcessor
from ..exceptions import QueryException
CONNECTION_POOL = []
class MSSQLConnection(BaseConnection):
"""MSSQL Connection class."""
name = "mssql"
def __init__(
self,
host=None,
database=None,
user=None,
port=None,
password=None,
prefix=None,
options={},
full_details={},
name=None,
):
self.host = host
if port:
self.port = int(port)
else:
self.port = port
self.database = database
self.user = user
self.password = password
self.prefix = prefix
self.full_details = full_details
self.options = options
self._cursor = None
self.transaction_level = 0
self.open = 0
if name:
self.name = name
def make_connection(self):
"""This sets the connection on the connection class"""
try:
import pyodbc
except ModuleNotFoundError:
raise DriverNotFound(
"You must have the 'pyodbc' package installed to make a connection to Microsoft SQL Server. Please install it using 'pip install pyodbc'"
)
if self.has_global_connection():
return self.get_global_connection()
mssql_driver = self.options.get("driver", "ODBC Driver 17 for SQL Server")
self._connection = pyodbc.connect(
f"DRIVER={mssql_driver};SERVER={self.host},{self.port};DATABASE={self.database};UID={self.user};PWD={self.password}",
autocommit=True,
)
self.open = 1
return self
def get_database_name(self):
return self.database
@classmethod
def get_default_query_grammar(cls):
return MSSQLGrammar
@classmethod
def get_default_platform(cls):
return MSSQLPlatform
@classmethod
def get_default_post_processor(cls):
return MSSQLPostProcessor
def reconnect(self):
pass
def commit(self):
"""Transaction"""
if self.get_transaction_level() == 1:
self._connection.commit()
self._connection.autocommit = True
self.transaction_level -= 1
def begin(self):
"""MSSQL Transaction"""
self._connection.autocommit = False
self.transaction_level += 1
return self
def rollback(self):
"""Transaction"""
if self.get_transaction_level() == 1:
self._connection.rollback()
self._connection.autocommit = True
self.transaction_level -= 1
def get_transaction_level(self):
"""Transaction"""
return self.transaction_level
def get_cursor(self):
return self._cursor
def query(self, query, bindings=(), results="*"):
"""Make the actual query that will reach the database and come back with a result.
Arguments:
query {string} -- A string query. This could be a qmarked string or a regular query.
bindings {tuple} -- A tuple of bindings
Keyword Arguments:
results {str|1} -- If the results is equal to an asterisks it will call 'fetchAll'
else it will return 'fetchOne' and return a single record. (default: {"*"})
Returns:
dict|None -- Returns a dictionary of results or None
"""
try:
if not self.open:
self.make_connection()
self._cursor = self._connection.cursor()
with self._cursor as cursor:
if isinstance(query, list) and not self._dry:
for q in query:
self.statement(q, ())
return
query = query.replace("'?'", "?")
self.statement(query, bindings)
if results == 1:
if not cursor.description:
return {}
columnNames = [column[0] for column in cursor.description]
result = cursor.fetchone()
return dict(zip(columnNames, result))
else:
if not cursor.description:
return {}
return self.format_cursor_results(cursor.fetchall())
return {}
except Exception as e:
raise QueryException(str(e)) from e
finally:
if self.get_transaction_level() <= 0:
self._connection.close()
def format_cursor_results(self, cursor_result):
columnNames = [column[0] for column in self.get_cursor().description]
results = []
for record in cursor_result:
results.append(dict(zip(columnNames, record)))
return results
|
the-stack_106_22845 | #########
# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from flask_login import current_user
from manager_rest import manager_exceptions
from manager_rest.test import base_test
from manager_rest.storage import models
from manager_rest.test.attribute import attr
from cloudify_rest_client.exceptions import CloudifyClientError
@attr(client_min_version=3.1, client_max_version=base_test.LATEST_API_VERSION)
class ManagerConfigTestCase(base_test.BaseServerTestCase):
def _put_config(self, **kwargs):
config = {
'name': 'x',
'value': 5,
'_updater_id': current_user.id,
'scope': 'rest'
}
config.update(kwargs)
instance = self.sm.put(models.Config(**config))
self.addCleanup(self.sm.delete, instance)
def test_get_config(self):
self._put_config()
result = self.client.manager.get_config()
expected = {
'name': 'x',
'value': 5,
'updater_name': current_user.username,
'is_editable': True,
'scope': 'rest'
}
actual = [item for item in result if item['name'] == 'x'][0]
actual = {k: actual[k] for k in expected}
self.assertEqual(actual, expected)
def test_get_by_name(self):
self._put_config()
result = self.client.manager.get_config(name='x')
self.assertEqual(result.name, 'x')
self.assertEqual(result.value, 5)
def test_get_by_scope(self):
self._put_config(scope='rest')
self._put_config(name='y', value=6, scope='new-scope')
result = self.client.manager.get_config(scope='new-scope')
self.assertEqual(len(result), 1)
self.assertEqual(result[0].name, 'y')
self.assertEqual(result[0].value, 6)
def test_get_by_name_missing(self):
with self.assertRaises(CloudifyClientError) as cm:
self.client.manager.get_config(name='x')
self.assertEqual(cm.exception.status_code, 404)
def test_put_config(self):
self._put_config()
result = self.client.manager.put_config('x', 6)
expected = {
'name': 'x',
'value': 6,
'updater_name': current_user.username,
'is_editable': True
}
actual = {k: result[k] for k in expected}
self.assertEqual(actual, expected)
def test_put_missing(self):
with self.assertRaises(CloudifyClientError) as cm:
self.client.manager.put_config('x', 6)
self.assertEqual(cm.exception.status_code, 404)
def test_put_config_schema(self):
self._put_config(schema={'type': 'number', 'maximum': 10})
self.client.manager.put_config('x', 6)
def test_put_config_schema_invalid(self):
self._put_config(schema={'type': 'number', 'maximum': 5})
with self.assertRaises(CloudifyClientError) as cm:
self.client.manager.put_config('x', 6)
self.assertEqual(cm.exception.status_code, 409)
def test_put_config_noneditable(self):
self._put_config(is_editable=False)
with self.assertRaises(CloudifyClientError) as cm:
self.client.manager.put_config('x', 6)
self.assertEqual(cm.exception.status_code, 409)
self.assertIn('is not editable', cm.exception.args[0])
def test_put_config_noneditable_force(self):
self._put_config(is_editable=False)
self.client.manager.put_config('x', 6, force=True)
def test_get_ambiguous_name(self):
self._put_config(name='x', scope='rest', value=5)
self._put_config(name='x', scope='agent', value=6)
with self.assertRaises(CloudifyClientError) as cm:
self.client.manager.get_config(name='x')
self.assertEqual(
cm.exception.error_code,
manager_exceptions.AmbiguousName.AMBIGUOUS_NAME_CODE)
self.assertIn('Expected 1 value, but found 2', str(cm.exception))
result = self.client.manager.get_config(name='rest.x')
self.assertEqual(result.value, 5)
result = self.client.manager.get_config(name='agent.x')
self.assertEqual(result.value, 6)
def test_update_ambiguous_name(self):
self._put_config(name='x', scope='rest', value=5)
self._put_config(name='x', scope='agent', value=6)
with self.assertRaises(CloudifyClientError):
self.client.manager.put_config('x', 7)
self.client.manager.put_config('agent.x', 7)
self.assertEqual(
self.client.manager.get_config(name='rest.x').value, 5)
self.assertEqual(
self.client.manager.get_config(name='agent.x').value, 7)
|
the-stack_106_22847 | # shelve.py - save/restore working directory state
#
# Copyright 2013 Facebook, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""save and restore changes to the working directory
The "hg shelve" command saves changes made to the working directory
and reverts those changes, resetting the working directory to a clean
state.
Later on, the "hg unshelve" command restores the changes saved by "hg
shelve". Changes can be restored even after updating to a different
parent, in which case Mercurial's merge machinery will resolve any
conflicts if necessary.
You can have more than one shelved change outstanding at a time; each
shelved change has a distinct name. For details, see the help for "hg
shelve".
"""
from __future__ import absolute_import
import collections
import errno
import itertools
from mercurial.i18n import _
from mercurial import (
bundle2,
bundlerepo,
changegroup,
cmdutil,
commands,
error,
exchange,
hg,
lock as lockmod,
mdiff,
merge,
node as nodemod,
patch,
phases,
repair,
scmutil,
templatefilters,
util,
)
from . import (
rebase,
)
cmdtable = {}
command = cmdutil.command(cmdtable)
# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
# be specifying the version(s) of Mercurial they are tested with, or
# leave the attribute unspecified.
testedwith = 'ships-with-hg-core'
backupdir = 'shelve-backup'
shelvedir = 'shelved'
class shelvedfile(object):
"""Helper for the file storing a single shelve
Handles common functions on shelve files (.hg/.patch) using
the vfs layer"""
def __init__(self, repo, name, filetype=None):
self.repo = repo
self.name = name
self.vfs = scmutil.vfs(repo.join(shelvedir))
self.backupvfs = scmutil.vfs(repo.join(backupdir))
self.ui = self.repo.ui
if filetype:
self.fname = name + '.' + filetype
else:
self.fname = name
def exists(self):
return self.vfs.exists(self.fname)
def filename(self):
return self.vfs.join(self.fname)
def backupfilename(self):
def gennames(base):
yield base
base, ext = base.rsplit('.', 1)
for i in itertools.count(1):
yield '%s-%d.%s' % (base, i, ext)
name = self.backupvfs.join(self.fname)
for n in gennames(name):
if not self.backupvfs.exists(n):
return n
def movetobackup(self):
if not self.backupvfs.isdir():
self.backupvfs.makedir()
util.rename(self.filename(), self.backupfilename())
def stat(self):
return self.vfs.stat(self.fname)
def opener(self, mode='rb'):
try:
return self.vfs(self.fname, mode)
except IOError as err:
if err.errno != errno.ENOENT:
raise
raise error.Abort(_("shelved change '%s' not found") % self.name)
def applybundle(self):
fp = self.opener()
try:
gen = exchange.readbundle(self.repo.ui, fp, self.fname, self.vfs)
if not isinstance(gen, bundle2.unbundle20):
gen.apply(self.repo, 'unshelve',
'bundle:' + self.vfs.join(self.fname),
targetphase=phases.secret)
if isinstance(gen, bundle2.unbundle20):
bundle2.applybundle(self.repo, gen,
self.repo.currenttransaction(),
source='unshelve',
url='bundle:' + self.vfs.join(self.fname))
finally:
fp.close()
def bundlerepo(self):
return bundlerepo.bundlerepository(self.repo.baseui, self.repo.root,
self.vfs.join(self.fname))
def writebundle(self, bases, node):
cgversion = changegroup.safeversion(self.repo)
if cgversion == '01':
btype = 'HG10BZ'
compression = None
else:
btype = 'HG20'
compression = 'BZ'
cg = changegroup.changegroupsubset(self.repo, bases, [node], 'shelve',
version=cgversion)
bundle2.writebundle(self.ui, cg, self.fname, btype, self.vfs,
compression=compression)
class shelvedstate(object):
"""Handle persistence during unshelving operations.
Handles saving and restoring a shelved state. Ensures that different
versions of a shelved state are possible and handles them appropriately.
"""
_version = 1
_filename = 'shelvedstate'
@classmethod
def load(cls, repo):
fp = repo.vfs(cls._filename)
try:
version = int(fp.readline().strip())
if version != cls._version:
raise error.Abort(_('this version of shelve is incompatible '
'with the version used in this repo'))
name = fp.readline().strip()
wctx = nodemod.bin(fp.readline().strip())
pendingctx = nodemod.bin(fp.readline().strip())
parents = [nodemod.bin(h) for h in fp.readline().split()]
stripnodes = [nodemod.bin(h) for h in fp.readline().split()]
branchtorestore = fp.readline().strip()
except (ValueError, TypeError) as err:
raise error.CorruptedState(str(err))
finally:
fp.close()
try:
obj = cls()
obj.name = name
obj.wctx = repo[wctx]
obj.pendingctx = repo[pendingctx]
obj.parents = parents
obj.stripnodes = stripnodes
obj.branchtorestore = branchtorestore
except error.RepoLookupError as err:
raise error.CorruptedState(str(err))
return obj
@classmethod
def save(cls, repo, name, originalwctx, pendingctx, stripnodes,
branchtorestore):
fp = repo.vfs(cls._filename, 'wb')
fp.write('%i\n' % cls._version)
fp.write('%s\n' % name)
fp.write('%s\n' % nodemod.hex(originalwctx.node()))
fp.write('%s\n' % nodemod.hex(pendingctx.node()))
fp.write('%s\n' %
' '.join([nodemod.hex(p) for p in repo.dirstate.parents()]))
fp.write('%s\n' %
' '.join([nodemod.hex(n) for n in stripnodes]))
fp.write('%s\n' % branchtorestore)
fp.close()
@classmethod
def clear(cls, repo):
util.unlinkpath(repo.join(cls._filename), ignoremissing=True)
def cleanupoldbackups(repo):
vfs = scmutil.vfs(repo.join(backupdir))
maxbackups = repo.ui.configint('shelve', 'maxbackups', 10)
hgfiles = [f for f in vfs.listdir() if f.endswith('.hg')]
hgfiles = sorted([(vfs.stat(f).st_mtime, f) for f in hgfiles])
if 0 < maxbackups and maxbackups < len(hgfiles):
bordermtime = hgfiles[-maxbackups][0]
else:
bordermtime = None
for mtime, f in hgfiles[:len(hgfiles) - maxbackups]:
if mtime == bordermtime:
# keep it, because timestamp can't decide exact order of backups
continue
base = f[:-3]
for ext in 'hg patch'.split():
try:
vfs.unlink(base + '.' + ext)
except OSError as err:
if err.errno != errno.ENOENT:
raise
def _aborttransaction(repo):
'''Abort current transaction for shelve/unshelve, but keep dirstate
'''
tr = repo.currenttransaction()
repo.dirstate.savebackup(tr, suffix='.shelve')
tr.abort()
repo.dirstate.restorebackup(None, suffix='.shelve')
def createcmd(ui, repo, pats, opts):
"""subcommand that creates a new shelve"""
with repo.wlock():
cmdutil.checkunfinished(repo)
return _docreatecmd(ui, repo, pats, opts)
def _docreatecmd(ui, repo, pats, opts):
def mutableancestors(ctx):
"""return all mutable ancestors for ctx (included)
Much faster than the revset ancestors(ctx) & draft()"""
seen = set([nodemod.nullrev])
visit = collections.deque()
visit.append(ctx)
while visit:
ctx = visit.popleft()
yield ctx.node()
for parent in ctx.parents():
rev = parent.rev()
if rev not in seen:
seen.add(rev)
if parent.mutable():
visit.append(parent)
wctx = repo[None]
parents = wctx.parents()
if len(parents) > 1:
raise error.Abort(_('cannot shelve while merging'))
parent = parents[0]
origbranch = wctx.branch()
# we never need the user, so we use a generic user for all shelve operations
user = 'shelve@localhost'
label = repo._activebookmark or parent.branch() or 'default'
# slashes aren't allowed in filenames, therefore we rename it
label = label.replace('/', '_')
def gennames():
yield label
for i in xrange(1, 100):
yield '%s-%02d' % (label, i)
if parent.node() != nodemod.nullid:
desc = "changes to: %s" % parent.description().split('\n', 1)[0]
else:
desc = '(changes in empty repository)'
if not opts.get('message'):
opts['message'] = desc
name = opts.get('name')
lock = tr = None
try:
lock = repo.lock()
# use an uncommitted transaction to generate the bundle to avoid
# pull races. ensure we don't print the abort message to stderr.
tr = repo.transaction('commit', report=lambda x: None)
if name:
if shelvedfile(repo, name, 'hg').exists():
raise error.Abort(_("a shelved change named '%s' already exists"
) % name)
else:
for n in gennames():
if not shelvedfile(repo, n, 'hg').exists():
name = n
break
else:
raise error.Abort(_("too many shelved changes named '%s'") %
label)
# ensure we are not creating a subdirectory or a hidden file
if '/' in name or '\\' in name:
raise error.Abort(_('shelved change names may not contain slashes'))
if name.startswith('.'):
raise error.Abort(_("shelved change names may not start with '.'"))
interactive = opts.get('interactive', False)
includeunknown = (opts.get('unknown', False) and
not opts.get('addremove', False))
extra={}
if includeunknown:
s = repo.status(match=scmutil.match(repo[None], pats, opts),
unknown=True)
if s.unknown:
extra['shelve_unknown'] = '\0'.join(s.unknown)
repo[None].add(s.unknown)
if _iswctxonnewbranch(repo) and not _isbareshelve(pats, opts):
# In non-bare shelve we don't store newly created branch
# at bundled commit
repo.dirstate.setbranch(repo['.'].branch())
def commitfunc(ui, repo, message, match, opts):
hasmq = util.safehasattr(repo, 'mq')
if hasmq:
saved, repo.mq.checkapplied = repo.mq.checkapplied, False
backup = repo.ui.backupconfig('phases', 'new-commit')
try:
repo.ui. setconfig('phases', 'new-commit', phases.secret)
editor = cmdutil.getcommiteditor(editform='shelve.shelve',
**opts)
return repo.commit(message, user, opts.get('date'), match,
editor=editor, extra=extra)
finally:
repo.ui.restoreconfig(backup)
if hasmq:
repo.mq.checkapplied = saved
def interactivecommitfunc(ui, repo, *pats, **opts):
match = scmutil.match(repo['.'], pats, {})
message = opts['message']
return commitfunc(ui, repo, message, match, opts)
if not interactive:
node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
else:
node = cmdutil.dorecord(ui, repo, interactivecommitfunc, None,
False, cmdutil.recordfilter, *pats, **opts)
if not node:
stat = repo.status(match=scmutil.match(repo[None], pats, opts))
if stat.deleted:
ui.status(_("nothing changed (%d missing files, see "
"'hg status')\n") % len(stat.deleted))
else:
ui.status(_("nothing changed\n"))
return 1
bases = list(mutableancestors(repo[node]))
shelvedfile(repo, name, 'hg').writebundle(bases, node)
cmdutil.export(repo, [node],
fp=shelvedfile(repo, name, 'patch').opener('wb'),
opts=mdiff.diffopts(git=True))
if ui.formatted():
desc = util.ellipsis(desc, ui.termwidth())
ui.status(_('shelved as %s\n') % name)
hg.update(repo, parent.node())
if origbranch != repo['.'].branch() and not _isbareshelve(pats, opts):
repo.dirstate.setbranch(origbranch)
_aborttransaction(repo)
finally:
lockmod.release(tr, lock)
def _isbareshelve(pats, opts):
return (not pats
and not opts.get('interactive', False)
and not opts.get('include', False)
and not opts.get('exclude', False))
def _iswctxonnewbranch(repo):
return repo[None].branch() != repo['.'].branch()
def cleanupcmd(ui, repo):
"""subcommand that deletes all shelves"""
with repo.wlock():
for (name, _type) in repo.vfs.readdir(shelvedir):
suffix = name.rsplit('.', 1)[-1]
if suffix in ('hg', 'patch'):
shelvedfile(repo, name).movetobackup()
cleanupoldbackups(repo)
def deletecmd(ui, repo, pats):
"""subcommand that deletes a specific shelve"""
if not pats:
raise error.Abort(_('no shelved changes specified!'))
with repo.wlock():
try:
for name in pats:
for suffix in 'hg patch'.split():
shelvedfile(repo, name, suffix).movetobackup()
cleanupoldbackups(repo)
except OSError as err:
if err.errno != errno.ENOENT:
raise
raise error.Abort(_("shelved change '%s' not found") % name)
def listshelves(repo):
"""return all shelves in repo as list of (time, filename)"""
try:
names = repo.vfs.readdir(shelvedir)
except OSError as err:
if err.errno != errno.ENOENT:
raise
return []
info = []
for (name, _type) in names:
pfx, sfx = name.rsplit('.', 1)
if not pfx or sfx != 'patch':
continue
st = shelvedfile(repo, name).stat()
info.append((st.st_mtime, shelvedfile(repo, pfx).filename()))
return sorted(info, reverse=True)
def listcmd(ui, repo, pats, opts):
"""subcommand that displays the list of shelves"""
pats = set(pats)
width = 80
if not ui.plain():
width = ui.termwidth()
namelabel = 'shelve.newest'
for mtime, name in listshelves(repo):
sname = util.split(name)[1]
if pats and sname not in pats:
continue
ui.write(sname, label=namelabel)
namelabel = 'shelve.name'
if ui.quiet:
ui.write('\n')
continue
ui.write(' ' * (16 - len(sname)))
used = 16
age = '(%s)' % templatefilters.age(util.makedate(mtime), abbrev=True)
ui.write(age, label='shelve.age')
ui.write(' ' * (12 - len(age)))
used += 12
with open(name + '.patch', 'rb') as fp:
while True:
line = fp.readline()
if not line:
break
if not line.startswith('#'):
desc = line.rstrip()
if ui.formatted():
desc = util.ellipsis(desc, width - used)
ui.write(desc)
break
ui.write('\n')
if not (opts['patch'] or opts['stat']):
continue
difflines = fp.readlines()
if opts['patch']:
for chunk, label in patch.difflabel(iter, difflines):
ui.write(chunk, label=label)
if opts['stat']:
for chunk, label in patch.diffstatui(difflines, width=width,
git=True):
ui.write(chunk, label=label)
def singlepatchcmds(ui, repo, pats, opts, subcommand):
"""subcommand that displays a single shelf"""
if len(pats) != 1:
raise error.Abort(_("--%s expects a single shelf") % subcommand)
shelfname = pats[0]
if not shelvedfile(repo, shelfname, 'patch').exists():
raise error.Abort(_("cannot find shelf %s") % shelfname)
listcmd(ui, repo, pats, opts)
def checkparents(repo, state):
"""check parent while resuming an unshelve"""
if state.parents != repo.dirstate.parents():
raise error.Abort(_('working directory parents do not match unshelve '
'state'))
def pathtofiles(repo, files):
cwd = repo.getcwd()
return [repo.pathto(f, cwd) for f in files]
def unshelveabort(ui, repo, state, opts):
"""subcommand that abort an in-progress unshelve"""
with repo.lock():
try:
checkparents(repo, state)
util.rename(repo.join('unshelverebasestate'),
repo.join('rebasestate'))
try:
rebase.rebase(ui, repo, **{
'abort' : True
})
except Exception:
util.rename(repo.join('rebasestate'),
repo.join('unshelverebasestate'))
raise
mergefiles(ui, repo, state.wctx, state.pendingctx)
repair.strip(ui, repo, state.stripnodes, backup=False,
topic='shelve')
finally:
shelvedstate.clear(repo)
ui.warn(_("unshelve of '%s' aborted\n") % state.name)
def mergefiles(ui, repo, wctx, shelvectx):
"""updates to wctx and merges the changes from shelvectx into the
dirstate."""
oldquiet = ui.quiet
try:
ui.quiet = True
hg.update(repo, wctx.node())
files = []
files.extend(shelvectx.files())
files.extend(shelvectx.parents()[0].files())
# revert will overwrite unknown files, so move them out of the way
for file in repo.status(unknown=True).unknown:
if file in files:
util.rename(file, scmutil.origpath(ui, repo, file))
ui.pushbuffer(True)
cmdutil.revert(ui, repo, shelvectx, repo.dirstate.parents(),
*pathtofiles(repo, files),
**{'no_backup': True})
ui.popbuffer()
finally:
ui.quiet = oldquiet
def restorebranch(ui, repo, branchtorestore):
if branchtorestore and branchtorestore != repo.dirstate.branch():
repo.dirstate.setbranch(branchtorestore)
ui.status(_('marked working directory as branch %s\n')
% branchtorestore)
def unshelvecleanup(ui, repo, name, opts):
"""remove related files after an unshelve"""
if not opts.get('keep'):
for filetype in 'hg patch'.split():
shelvedfile(repo, name, filetype).movetobackup()
cleanupoldbackups(repo)
def unshelvecontinue(ui, repo, state, opts):
"""subcommand to continue an in-progress unshelve"""
# We're finishing off a merge. First parent is our original
# parent, second is the temporary "fake" commit we're unshelving.
with repo.lock():
checkparents(repo, state)
ms = merge.mergestate.read(repo)
if [f for f in ms if ms[f] == 'u']:
raise error.Abort(
_("unresolved conflicts, can't continue"),
hint=_("see 'hg resolve', then 'hg unshelve --continue'"))
util.rename(repo.join('unshelverebasestate'),
repo.join('rebasestate'))
try:
rebase.rebase(ui, repo, **{
'continue' : True
})
except Exception:
util.rename(repo.join('rebasestate'),
repo.join('unshelverebasestate'))
raise
shelvectx = repo['tip']
if not shelvectx in state.pendingctx.children():
# rebase was a no-op, so it produced no child commit
shelvectx = state.pendingctx
else:
# only strip the shelvectx if the rebase produced it
state.stripnodes.append(shelvectx.node())
mergefiles(ui, repo, state.wctx, shelvectx)
restorebranch(ui, repo, state.branchtorestore)
repair.strip(ui, repo, state.stripnodes, backup=False, topic='shelve')
shelvedstate.clear(repo)
unshelvecleanup(ui, repo, state.name, opts)
ui.status(_("unshelve of '%s' complete\n") % state.name)
@command('unshelve',
[('a', 'abort', None,
_('abort an incomplete unshelve operation')),
('c', 'continue', None,
_('continue an incomplete unshelve operation')),
('k', 'keep', None,
_('keep shelve after unshelving')),
('t', 'tool', '', _('specify merge tool')),
('', 'date', '',
_('set date for temporary commits (DEPRECATED)'), _('DATE'))],
_('hg unshelve [SHELVED]'))
def unshelve(ui, repo, *shelved, **opts):
"""restore a shelved change to the working directory
This command accepts an optional name of a shelved change to
restore. If none is given, the most recent shelved change is used.
If a shelved change is applied successfully, the bundle that
contains the shelved changes is moved to a backup location
(.hg/shelve-backup).
Since you can restore a shelved change on top of an arbitrary
commit, it is possible that unshelving will result in a conflict
between your changes and the commits you are unshelving onto. If
this occurs, you must resolve the conflict, then use
``--continue`` to complete the unshelve operation. (The bundle
will not be moved until you successfully complete the unshelve.)
(Alternatively, you can use ``--abort`` to abandon an unshelve
that causes a conflict. This reverts the unshelved changes, and
leaves the bundle in place.)
If bare shelved change(when no files are specified, without interactive,
include and exclude option) was done on newly created branch it would
restore branch information to the working directory.
After a successful unshelve, the shelved changes are stored in a
backup directory. Only the N most recent backups are kept. N
defaults to 10 but can be overridden using the ``shelve.maxbackups``
configuration option.
.. container:: verbose
Timestamp in seconds is used to decide order of backups. More
than ``maxbackups`` backups are kept, if same timestamp
prevents from deciding exact order of them, for safety.
"""
with repo.wlock():
return _dounshelve(ui, repo, *shelved, **opts)
def _dounshelve(ui, repo, *shelved, **opts):
abortf = opts.get('abort')
continuef = opts.get('continue')
if not abortf and not continuef:
cmdutil.checkunfinished(repo)
if abortf or continuef:
if abortf and continuef:
raise error.Abort(_('cannot use both abort and continue'))
if shelved:
raise error.Abort(_('cannot combine abort/continue with '
'naming a shelved change'))
if abortf and opts.get('tool', False):
ui.warn(_('tool option will be ignored\n'))
try:
state = shelvedstate.load(repo)
except IOError as err:
if err.errno != errno.ENOENT:
raise
cmdutil.wrongtooltocontinue(repo, _('unshelve'))
except error.CorruptedState as err:
ui.debug(str(err) + '\n')
if continuef:
msg = _('corrupted shelved state file')
hint = _('please run hg unshelve --abort to abort unshelve '
'operation')
raise error.Abort(msg, hint=hint)
elif abortf:
msg = _('could not read shelved state file, your working copy '
'may be in an unexpected state\nplease update to some '
'commit\n')
ui.warn(msg)
shelvedstate.clear(repo)
return
if abortf:
return unshelveabort(ui, repo, state, opts)
elif continuef:
return unshelvecontinue(ui, repo, state, opts)
elif len(shelved) > 1:
raise error.Abort(_('can only unshelve one change at a time'))
elif not shelved:
shelved = listshelves(repo)
if not shelved:
raise error.Abort(_('no shelved changes to apply!'))
basename = util.split(shelved[0][1])[1]
ui.status(_("unshelving change '%s'\n") % basename)
else:
basename = shelved[0]
if not shelvedfile(repo, basename, 'patch').exists():
raise error.Abort(_("shelved change '%s' not found") % basename)
oldquiet = ui.quiet
lock = tr = None
forcemerge = ui.backupconfig('ui', 'forcemerge')
try:
ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), 'unshelve')
lock = repo.lock()
tr = repo.transaction('unshelve', report=lambda x: None)
oldtiprev = len(repo)
pctx = repo['.']
tmpwctx = pctx
# The goal is to have a commit structure like so:
# ...-> pctx -> tmpwctx -> shelvectx
# where tmpwctx is an optional commit with the user's pending changes
# and shelvectx is the unshelved changes. Then we merge it all down
# to the original pctx.
# Store pending changes in a commit and remember added in case a shelve
# contains unknown files that are part of the pending change
s = repo.status()
addedbefore = frozenset(s.added)
if s.modified or s.added or s.removed or s.deleted:
ui.status(_("temporarily committing pending changes "
"(restore with 'hg unshelve --abort')\n"))
def commitfunc(ui, repo, message, match, opts):
hasmq = util.safehasattr(repo, 'mq')
if hasmq:
saved, repo.mq.checkapplied = repo.mq.checkapplied, False
backup = repo.ui.backupconfig('phases', 'new-commit')
try:
repo.ui.setconfig('phases', 'new-commit', phases.secret)
return repo.commit(message, 'shelve@localhost',
opts.get('date'), match)
finally:
repo.ui.restoreconfig(backup)
if hasmq:
repo.mq.checkapplied = saved
tempopts = {}
tempopts['message'] = "pending changes temporary commit"
tempopts['date'] = opts.get('date')
ui.quiet = True
node = cmdutil.commit(ui, repo, commitfunc, [], tempopts)
tmpwctx = repo[node]
ui.quiet = True
shelvedfile(repo, basename, 'hg').applybundle()
ui.quiet = oldquiet
shelvectx = repo['tip']
branchtorestore = ''
if shelvectx.branch() != shelvectx.p1().branch():
branchtorestore = shelvectx.branch()
# If the shelve is not immediately on top of the commit
# we'll be merging with, rebase it to be on top.
if tmpwctx.node() != shelvectx.parents()[0].node():
ui.status(_('rebasing shelved changes\n'))
try:
rebase.rebase(ui, repo, **{
'rev' : [shelvectx.rev()],
'dest' : str(tmpwctx.rev()),
'keep' : True,
'tool' : opts.get('tool', ''),
})
except error.InterventionRequired:
tr.close()
stripnodes = [repo.changelog.node(rev)
for rev in xrange(oldtiprev, len(repo))]
shelvedstate.save(repo, basename, pctx, tmpwctx, stripnodes,
branchtorestore)
util.rename(repo.join('rebasestate'),
repo.join('unshelverebasestate'))
raise error.InterventionRequired(
_("unresolved conflicts (see 'hg resolve', then "
"'hg unshelve --continue')"))
# refresh ctx after rebase completes
shelvectx = repo['tip']
if not shelvectx in tmpwctx.children():
# rebase was a no-op, so it produced no child commit
shelvectx = tmpwctx
mergefiles(ui, repo, pctx, shelvectx)
restorebranch(ui, repo, branchtorestore)
# Forget any files that were unknown before the shelve, unknown before
# unshelve started, but are now added.
shelveunknown = shelvectx.extra().get('shelve_unknown')
if shelveunknown:
shelveunknown = frozenset(shelveunknown.split('\0'))
addedafter = frozenset(repo.status().added)
toforget = (addedafter & shelveunknown) - addedbefore
repo[None].forget(toforget)
shelvedstate.clear(repo)
# The transaction aborting will strip all the commits for us,
# but it doesn't update the inmemory structures, so addchangegroup
# hooks still fire and try to operate on the missing commits.
# Clean up manually to prevent this.
repo.unfiltered().changelog.strip(oldtiprev, tr)
unshelvecleanup(ui, repo, basename, opts)
_aborttransaction(repo)
finally:
ui.quiet = oldquiet
if tr:
tr.release()
lockmod.release(lock)
ui.restoreconfig(forcemerge)
@command('shelve',
[('A', 'addremove', None,
_('mark new/missing files as added/removed before shelving')),
('u', 'unknown', None,
_('store unknown files in the shelve')),
('', 'cleanup', None,
_('delete all shelved changes')),
('', 'date', '',
_('shelve with the specified commit date'), _('DATE')),
('d', 'delete', None,
_('delete the named shelved change(s)')),
('e', 'edit', False,
_('invoke editor on commit messages')),
('l', 'list', None,
_('list current shelves')),
('m', 'message', '',
_('use text as shelve message'), _('TEXT')),
('n', 'name', '',
_('use the given name for the shelved commit'), _('NAME')),
('p', 'patch', None,
_('show patch')),
('i', 'interactive', None,
_('interactive mode, only works while creating a shelve')),
('', 'stat', None,
_('output diffstat-style summary of changes'))] + commands.walkopts,
_('hg shelve [OPTION]... [FILE]...'))
def shelvecmd(ui, repo, *pats, **opts):
'''save and set aside changes from the working directory
Shelving takes files that "hg status" reports as not clean, saves
the modifications to a bundle (a shelved change), and reverts the
files so that their state in the working directory becomes clean.
To restore these changes to the working directory, using "hg
unshelve"; this will work even if you switch to a different
commit.
When no files are specified, "hg shelve" saves all not-clean
files. If specific files or directories are named, only changes to
those files are shelved.
In bare shelve (when no files are specified, without interactive,
include and exclude option), shelving remembers information if the
working directory was on newly created branch, in other words working
directory was on different branch than its first parent. In this
situation unshelving restores branch information to the working directory.
Each shelved change has a name that makes it easier to find later.
The name of a shelved change defaults to being based on the active
bookmark, or if there is no active bookmark, the current named
branch. To specify a different name, use ``--name``.
To see a list of existing shelved changes, use the ``--list``
option. For each shelved change, this will print its name, age,
and description; use ``--patch`` or ``--stat`` for more details.
To delete specific shelved changes, use ``--delete``. To delete
all shelved changes, use ``--cleanup``.
'''
allowables = [
('addremove', set(['create'])), # 'create' is pseudo action
('unknown', set(['create'])),
('cleanup', set(['cleanup'])),
# ('date', set(['create'])), # ignored for passing '--date "0 0"' in tests
('delete', set(['delete'])),
('edit', set(['create'])),
('list', set(['list'])),
('message', set(['create'])),
('name', set(['create'])),
('patch', set(['patch', 'list'])),
('stat', set(['stat', 'list'])),
]
def checkopt(opt):
if opts.get(opt):
for i, allowable in allowables:
if opts[i] and opt not in allowable:
raise error.Abort(_("options '--%s' and '--%s' may not be "
"used together") % (opt, i))
return True
if checkopt('cleanup'):
if pats:
raise error.Abort(_("cannot specify names when using '--cleanup'"))
return cleanupcmd(ui, repo)
elif checkopt('delete'):
return deletecmd(ui, repo, pats)
elif checkopt('list'):
return listcmd(ui, repo, pats, opts)
elif checkopt('patch'):
return singlepatchcmds(ui, repo, pats, opts, subcommand='patch')
elif checkopt('stat'):
return singlepatchcmds(ui, repo, pats, opts, subcommand='stat')
else:
return createcmd(ui, repo, pats, opts)
def extsetup(ui):
cmdutil.unfinishedstates.append(
[shelvedstate._filename, False, False,
_('unshelve already in progress'),
_("use 'hg unshelve --continue' or 'hg unshelve --abort'")])
cmdutil.afterresolvedstates.append(
[shelvedstate._filename, _('hg unshelve --continue')])
|
the-stack_106_22848 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import sys
import os
import logging
import signal
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))
from shadowsocks import shell, daemon, eventloop, tcprelay, udprelay, asyncdns
def main():
shell.check_python()
# fix py2exe
if hasattr(sys, "frozen") and sys.frozen in \
("windows_exe", "console_exe"):
p = os.path.dirname(os.path.abspath(sys.executable))
os.chdir(p)
config = shell.get_config(True)
config['userhash'] = shell.hash_user(config['username'], config['password'])
daemon.daemon_exec(config)
try:
logging.info("starting local at %s:%d" %
(config['local_address'], config['local_port']))
dns_resolver = asyncdns.DNSResolver()
tcp_server = tcprelay.TCPRelay(config, {}, dns_resolver, True)
udp_server = udprelay.UDPRelay(config, dns_resolver, True)
loop = eventloop.EventLoop()
dns_resolver.add_to_loop(loop)
tcp_server.add_to_loop(loop)
udp_server.add_to_loop(loop)
def handler(signum, _):
logging.warn('received SIGQUIT, doing graceful shutting down..')
tcp_server.close(next_tick=True)
udp_server.close(next_tick=True)
signal.signal(getattr(signal, 'SIGQUIT', signal.SIGTERM), handler)
def int_handler(signum, _):
sys.exit(1)
signal.signal(signal.SIGINT, int_handler)
daemon.set_user(config.get('user', None))
loop.run()
except Exception as e:
shell.print_exception(e)
sys.exit(1)
if __name__ == '__main__':
main()
|
the-stack_106_22850 | # The MIT License (MIT)
#
# Copyright (c) 2019 Carter Nelson for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_gizmo.tft_gizmo`
================================================================================
Helper for the `TFT Gizmo <https://www.adafruit.com/product/4367>`_.
* Author(s): Carter Nelson, Melissa LeBlanc-Williams
"""
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_Gizmo.git"
import board
import displayio
from adafruit_st7789 import ST7789
# pylint: disable=invalid-name, too-few-public-methods
class TFT_Gizmo(ST7789):
"""Class representing a TFT Gizmo."""
def __init__(
self, *, spi=None, cs=board.RX, dc=board.TX, backlight=board.A3, rotation=180
):
displayio.release_displays()
if spi is None:
import busio # pylint: disable=import-outside-toplevel
spi = busio.SPI(board.SCL, MOSI=board.SDA)
self._display_bus = displayio.FourWire(spi, command=dc, chip_select=cs)
super().__init__(
self._display_bus,
width=240,
height=240,
rowstart=80,
backlight_pin=backlight,
rotation=rotation,
)
|
the-stack_106_22851 | #!/usr/bin/python
"""This program parses various source files and extracts the comment texts.
Currently supported languages:
Bash/sh
C
C++
Go
HTML
Java
Javascript
Ruby
XML
Dependencies:
python-magic: pip install python-magic (optional)
"""
import sys
try:
import magic
has_magic = True
except ImportError:
has_magic = False
from comment_parser.parsers import c_parser
from comment_parser.parsers import common
from comment_parser.parsers import go_parser
from comment_parser.parsers import html_parser
from comment_parser.parsers import js_parser
from comment_parser.parsers import python_parser
from comment_parser.parsers import ruby_parser
from comment_parser.parsers import shell_parser
MIME_MAP = {
'application/javascript': js_parser, # Javascript
'text/html': html_parser, # HTML
'text/x-c': c_parser, # C
'text/x-c++': c_parser, # C++/C#
'text/x-go': go_parser, # Go
'text/x-java': c_parser, # Java
'text/x-java-source': c_parser, # Java
'text/x-javascript': js_parser, # Javascript
'text/x-python': python_parser, # Python
'text/x-ruby': ruby_parser, # Ruby
'text/x-shellscript': shell_parser, # Unix shell
'text/xml': html_parser, # XML
}
class Error(Exception):
"""Base Error class in this module."""
class UnsupportedError(Error):
"""Raised when trying to extract comments from an unsupported MIME type."""
class ParseError(Error):
"""Raised when a parser issue is encountered."""
def extract_comments(filename, mime=None):
"""Extracts and returns the comments from the given source file.
Args:
filename: String name of the file to extract comments from.
mime: Optional MIME type for file (str). Note some MIME types accepted
don't comply with RFC2045. If not given, an attempt to deduce the
MIME type will occur.
Returns:
Python list of parsers.common.Comment in the order that they appear in
the source file.
Raises:
UnsupportedError: If filename is of an unsupported MIME type.
"""
with open(filename, 'r') as code:
return extract_comments_from_str(code.read(), mime)
def extract_comments_from_str(code, mime=None):
"""Extracts and returns comments from the given source string.
Args:
code: String containing code to extract comments from.
mime: Optional MIME type for code (str). Note some MIME types accepted
don't comply with RFC2045. If not given, an attempt to deduce the
MIME type will occur.
Returns:
Python list of parsers.common.Comment in the order that they appear in
the source code.
Raises:
UnsupportedError: If code is of an unsupported MIME type.
"""
if not mime:
if not has_magic:
raise ImportError('python-magic was not imported')
mime = magic.from_buffer(code, mime=True)
if isinstance(mime, bytes):
mime = mime.decode('utf-8')
if mime not in MIME_MAP:
raise UnsupportedError('Unsupported MIME type %s' % mime)
try:
parser = MIME_MAP[mime]
return parser.extract_comments(code)
except common.Error as e:
raise ParseError() from e
def main(argv):
"""Extracts comments from files and prints them to stdout."""
for filename in argv:
try:
comments = extract_comments(filename)
for comment in comments:
print(comment.text())
except Error as exception:
sys.stderr.write(str(exception))
if __name__ == '__main__':
main(sys.argv[1:])
|
the-stack_106_22853 | import helpers
# get input
raw_input = helpers.read_input_at_once(__file__)
# initialize parameters
stride = 3
count = 0
# initialize depths with first window
depths = []
for i in range(stride):
depths.append(int(raw_input[i]))
# getting first sum
aggregate_depth = sum(depths)
# looping over the rest
for i in range(stride, len(raw_input)):
# get the new reading
depths[i%stride] = int(raw_input[i])
# compare current window with previous one
if sum(depths) > aggregate_depth:
count += 1
# update depth
aggregate_depth = sum(depths)
print(count)
|
the-stack_106_22855 | import contextlib
from distutils.version import StrictVersion
import functools
import inspect
import os
from pathlib import Path
import shutil
import sys
import unittest
import warnings
import matplotlib as mpl
import matplotlib.style
import matplotlib.units
import matplotlib.testing
from matplotlib import cbook
from matplotlib import ft2font
from matplotlib import pyplot as plt
from matplotlib import ticker
from . import is_called_from_pytest
from .compare import comparable_formats, compare_images, make_test_filename
from .exceptions import ImageComparisonFailure
@contextlib.contextmanager
def _cleanup_cm():
orig_units_registry = matplotlib.units.registry.copy()
try:
with warnings.catch_warnings(), matplotlib.rc_context():
yield
finally:
matplotlib.units.registry.clear()
matplotlib.units.registry.update(orig_units_registry)
plt.close("all")
class CleanupTestCase(unittest.TestCase):
"""A wrapper for unittest.TestCase that includes cleanup operations."""
@classmethod
def setUpClass(cls):
cls._cm = _cleanup_cm().__enter__()
@classmethod
def tearDownClass(cls):
cls._cm.__exit__(None, None, None)
@cbook.deprecated("3.0")
class CleanupTest(object):
setup_class = classmethod(CleanupTestCase.setUpClass.__func__)
teardown_class = classmethod(CleanupTestCase.tearDownClass.__func__)
def test(self):
self._func()
def cleanup(style=None):
"""
A decorator to ensure that any global state is reset before
running a test.
Parameters
----------
style : str, optional
The name of the style to apply.
"""
# If cleanup is used without arguments, `style` will be a callable, and we
# pass it directly to the wrapper generator. If cleanup if called with an
# argument, it is a string naming a style, and the function will be passed
# as an argument to what we return. This is a confusing, but somewhat
# standard, pattern for writing a decorator with optional arguments.
def make_cleanup(func):
if inspect.isgeneratorfunction(func):
@functools.wraps(func)
def wrapped_callable(*args, **kwargs):
with _cleanup_cm(), matplotlib.style.context(style):
yield from func(*args, **kwargs)
else:
@functools.wraps(func)
def wrapped_callable(*args, **kwargs):
with _cleanup_cm(), matplotlib.style.context(style):
func(*args, **kwargs)
return wrapped_callable
if isinstance(style, str):
return make_cleanup
else:
result = make_cleanup(style)
# Default of mpl_test_settings fixture and image_comparison too.
style = '_classic_test'
return result
def check_freetype_version(ver):
if ver is None:
return True
if isinstance(ver, str):
ver = (ver, ver)
ver = [StrictVersion(x) for x in ver]
found = StrictVersion(ft2font.__freetype_version__)
return ver[0] <= found <= ver[1]
def _checked_on_freetype_version(required_freetype_version):
import pytest
reason = ("Mismatched version of freetype. "
"Test requires '%s', you have '%s'" %
(required_freetype_version, ft2font.__freetype_version__))
return pytest.mark.xfail(
not check_freetype_version(required_freetype_version),
reason=reason, raises=ImageComparisonFailure, strict=False)
def remove_ticks_and_titles(figure):
figure.suptitle("")
null_formatter = ticker.NullFormatter()
for ax in figure.get_axes():
ax.set_title("")
ax.xaxis.set_major_formatter(null_formatter)
ax.xaxis.set_minor_formatter(null_formatter)
ax.yaxis.set_major_formatter(null_formatter)
ax.yaxis.set_minor_formatter(null_formatter)
try:
ax.zaxis.set_major_formatter(null_formatter)
ax.zaxis.set_minor_formatter(null_formatter)
except AttributeError:
pass
def _raise_on_image_difference(expected, actual, tol):
__tracebackhide__ = True
err = compare_images(expected, actual, tol, in_decorator=True)
if not os.path.exists(expected):
raise ImageComparisonFailure('image does not exist: %s' % expected)
if err:
for key in ["actual", "expected"]:
err[key] = os.path.relpath(err[key])
raise ImageComparisonFailure(
'images not close (RMS %(rms).3f):\n\t%(actual)s\n\t%(expected)s '
% err)
def _skip_if_format_is_uncomparable(extension):
import pytest
return pytest.mark.skipif(
extension not in comparable_formats(),
reason='Cannot compare {} files on this system'.format(extension))
def _mark_skip_if_format_is_uncomparable(extension):
import pytest
if isinstance(extension, str):
name = extension
marks = []
elif isinstance(extension, tuple):
# Extension might be a pytest ParameterSet instead of a plain string.
# Unfortunately, this type is not exposed, so since it's a namedtuple,
# check for a tuple instead.
name, = extension.values
marks = [*extension.marks]
else:
# Extension might be a pytest marker instead of a plain string.
name, = extension.args
marks = [extension.mark]
return pytest.param(name,
marks=[*marks, _skip_if_format_is_uncomparable(name)])
class _ImageComparisonBase(object):
"""
Image comparison base class
This class provides *just* the comparison-related functionality and avoids
any code that would be specific to any testing framework.
"""
def __init__(self, tol, remove_text, savefig_kwargs):
self.func = self.baseline_dir = self.result_dir = None
self.tol = tol
self.remove_text = remove_text
self.savefig_kwargs = savefig_kwargs
def delayed_init(self, func):
assert self.func is None, "it looks like same decorator used twice"
self.func = func
self.baseline_dir, self.result_dir = _image_directories(func)
def copy_baseline(self, baseline, extension):
baseline_path = os.path.join(self.baseline_dir, baseline)
orig_expected_fname = baseline_path + '.' + extension
if extension == 'eps' and not os.path.exists(orig_expected_fname):
orig_expected_fname = baseline_path + '.pdf'
expected_fname = make_test_filename(
os.path.join(self.result_dir,
os.path.basename(orig_expected_fname)),
'expected')
if os.path.exists(orig_expected_fname):
shutil.copyfile(orig_expected_fname, expected_fname)
else:
reason = ("Do not have baseline image {} because this "
"file does not exist: {}".format(expected_fname,
orig_expected_fname))
raise ImageComparisonFailure(reason)
return expected_fname
def compare(self, idx, baseline, extension):
__tracebackhide__ = True
fignum = plt.get_fignums()[idx]
fig = plt.figure(fignum)
if self.remove_text:
remove_ticks_and_titles(fig)
actual_fname = (
os.path.join(self.result_dir, baseline) + '.' + extension)
kwargs = self.savefig_kwargs.copy()
if extension == 'pdf':
kwargs.setdefault('metadata',
{'Creator': None, 'Producer': None,
'CreationDate': None})
fig.savefig(actual_fname, **kwargs)
expected_fname = self.copy_baseline(baseline, extension)
_raise_on_image_difference(expected_fname, actual_fname, self.tol)
@cbook.deprecated("3.0")
class ImageComparisonTest(CleanupTest, _ImageComparisonBase):
"""
Nose-based image comparison class
This class generates tests for a nose-based testing framework. Ideally,
this class would not be public, and the only publicly visible API would
be the :func:`image_comparison` decorator. Unfortunately, there are
existing downstream users of this class (e.g., pytest-mpl) so it cannot yet
be removed.
"""
def __init__(self, baseline_images, extensions, tol,
freetype_version, remove_text, savefig_kwargs, style):
_ImageComparisonBase.__init__(self, tol, remove_text, savefig_kwargs)
self.baseline_images = baseline_images
self.extensions = extensions
self.freetype_version = freetype_version
self.style = style
def setup(self):
func = self.func
plt.close('all')
self.setup_class()
try:
matplotlib.style.use(self.style)
matplotlib.testing.set_font_settings_for_testing()
func()
assert len(plt.get_fignums()) == len(self.baseline_images), (
"Test generated {} images but there are {} baseline images"
.format(len(plt.get_fignums()), len(self.baseline_images)))
except:
# Restore original settings before raising errors.
self.teardown_class()
raise
def teardown(self):
self.teardown_class()
def nose_runner(self):
func = self.compare
func = _checked_on_freetype_version(self.freetype_version)(func)
funcs = {extension: _skip_if_format_is_uncomparable(extension)(func)
for extension in self.extensions}
for idx, baseline in enumerate(self.baseline_images):
for extension in self.extensions:
yield funcs[extension], idx, baseline, extension
def __call__(self, func):
self.delayed_init(func)
import nose.tools
@functools.wraps(func)
@nose.tools.with_setup(self.setup, self.teardown)
def runner_wrapper():
yield from self.nose_runner()
return runner_wrapper
def _pytest_image_comparison(baseline_images, extensions, tol,
freetype_version, remove_text, savefig_kwargs,
style):
"""
Decorate function with image comparison for pytest.
This function creates a decorator that wraps a figure-generating function
with image comparison code. Pytest can become confused if we change the
signature of the function, so we indirectly pass anything we need via the
`mpl_image_comparison_parameters` fixture and extra markers.
"""
import pytest
extensions = map(_mark_skip_if_format_is_uncomparable, extensions)
def decorator(func):
@functools.wraps(func)
# Parameter indirection; see docstring above and comment below.
@pytest.mark.usefixtures('mpl_image_comparison_parameters')
@pytest.mark.parametrize('extension', extensions)
@pytest.mark.baseline_images(baseline_images)
# END Parameter indirection.
@pytest.mark.style(style)
@_checked_on_freetype_version(freetype_version)
@functools.wraps(func)
def wrapper(*args, **kwargs):
__tracebackhide__ = True
img = _ImageComparisonBase(tol=tol, remove_text=remove_text,
savefig_kwargs=savefig_kwargs)
img.delayed_init(func)
matplotlib.testing.set_font_settings_for_testing()
func(*args, **kwargs)
# Parameter indirection:
# This is hacked on via the mpl_image_comparison_parameters fixture
# so that we don't need to modify the function's real signature for
# any parametrization. Modifying the signature is very very tricky
# and likely to confuse pytest.
baseline_images, extension = func.parameters
assert len(plt.get_fignums()) == len(baseline_images), (
"Test generated {} images but there are {} baseline images"
.format(len(plt.get_fignums()), len(baseline_images)))
for idx, baseline in enumerate(baseline_images):
img.compare(idx, baseline, extension)
return wrapper
return decorator
def image_comparison(baseline_images, extensions=None, tol=0,
freetype_version=None, remove_text=False,
savefig_kwarg=None,
# Default of mpl_test_settings fixture and cleanup too.
style='_classic_test'):
"""
Compare images generated by the test with those specified in
*baseline_images*, which must correspond, else an `ImageComparisonFailure`
exception will be raised.
Parameters
----------
baseline_images : list or None
A list of strings specifying the names of the images generated by
calls to :meth:`matplotlib.figure.savefig`.
If *None*, the test function must use the ``baseline_images`` fixture,
either as a parameter or with `pytest.mark.usefixtures`. This value is
only allowed when using pytest.
extensions : None or list of str
The list of extensions to test, e.g. ``['png', 'pdf']``.
If *None*, defaults to all supported extensions: png, pdf, and svg.
In order to keep the size of the test suite from ballooning, we only
include the ``svg`` or ``pdf`` outputs if the test is explicitly
exercising a feature dependent on that backend (see also the
`check_figures_equal` decorator for that purpose).
tol : float, optional, default: 0
The RMS threshold above which the test is considered failed.
freetype_version : str or tuple
The expected freetype version or range of versions for this test to
pass.
remove_text : bool
Remove the title and tick text from the figure before comparison. This
is useful to make the baseline images independent of variations in text
rendering between different versions of FreeType.
This does not remove other, more deliberate, text, such as legends and
annotations.
savefig_kwarg : dict
Optional arguments that are passed to the savefig method.
style : string
Optional name for the base style to apply to the image test. The test
itself can also apply additional styles if desired. Defaults to the
'_classic_test' style.
"""
if extensions is None:
# default extensions to test
extensions = ['png', 'pdf', 'svg']
if savefig_kwarg is None:
#default no kwargs to savefig
savefig_kwarg = dict()
if is_called_from_pytest():
return _pytest_image_comparison(
baseline_images=baseline_images, extensions=extensions, tol=tol,
freetype_version=freetype_version, remove_text=remove_text,
savefig_kwargs=savefig_kwarg, style=style)
else:
if baseline_images is None:
raise ValueError('baseline_images must be specified')
return ImageComparisonTest(
baseline_images=baseline_images, extensions=extensions, tol=tol,
freetype_version=freetype_version, remove_text=remove_text,
savefig_kwargs=savefig_kwarg, style=style)
def check_figures_equal(*, extensions=("png", "pdf", "svg"), tol=0):
"""
Decorator for test cases that generate and compare two figures.
The decorated function must take two arguments, *fig_test* and *fig_ref*,
and draw the test and reference images on them. After the function
returns, the figures are saved and compared.
This decorator should be preferred over `image_comparison` when possible in
order to keep the size of the test suite from ballooning.
Parameters
----------
extensions : list, default: ["png", "pdf", "svg"]
The extensions to test.
tol : float
The RMS threshold above which the test is considered failed.
Examples
--------
Check that calling `Axes.plot` with a single argument plots it against
``[0, 1, 2, ...]``::
@check_figures_equal()
def test_plot(fig_test, fig_ref):
fig_test.subplots().plot([1, 3, 5])
fig_ref.subplots().plot([0, 1, 2], [1, 3, 5])
"""
def decorator(func):
import pytest
_, result_dir = map(Path, _image_directories(func))
if len(inspect.signature(func).parameters) == 2:
# Free-standing function.
@pytest.mark.parametrize("ext", extensions)
def wrapper(ext):
fig_test = plt.figure("test")
fig_ref = plt.figure("reference")
func(fig_test, fig_ref)
test_image_path = str(
result_dir / (func.__name__ + "." + ext))
ref_image_path = str(
result_dir / (func.__name__ + "-expected." + ext))
fig_test.savefig(test_image_path)
fig_ref.savefig(ref_image_path)
_raise_on_image_difference(
ref_image_path, test_image_path, tol=tol)
elif len(inspect.signature(func).parameters) == 3:
# Method.
@pytest.mark.parametrize("ext", extensions)
def wrapper(self, ext):
fig_test = plt.figure("test")
fig_ref = plt.figure("reference")
func(self, fig_test, fig_ref)
test_image_path = str(
result_dir / (func.__name__ + "." + ext))
ref_image_path = str(
result_dir / (func.__name__ + "-expected." + ext))
fig_test.savefig(test_image_path)
fig_ref.savefig(ref_image_path)
_raise_on_image_difference(
ref_image_path, test_image_path, tol=tol)
return wrapper
return decorator
def _image_directories(func):
"""
Compute the baseline and result image directories for testing *func*.
For test module ``foo.bar.test_baz``, the baseline directory is at
``foo/bar/baseline_images/test_baz`` and the result directory at
``$(pwd)/result_images/test_baz``. The result directory is created if it
doesn't exist.
"""
module_path = Path(sys.modules[func.__module__].__file__)
baseline_dir = module_path.parent / "baseline_images" / module_path.stem
result_dir = Path().resolve() / "result_images" / module_path.stem
result_dir.mkdir(parents=True, exist_ok=True)
return str(baseline_dir), str(result_dir)
@cbook.deprecated("3.1", alternative="pytest.mark.backend")
def switch_backend(backend):
def switch_backend_decorator(func):
@functools.wraps(func)
def backend_switcher(*args, **kwargs):
try:
prev_backend = mpl.get_backend()
matplotlib.testing.setup()
plt.switch_backend(backend)
return func(*args, **kwargs)
finally:
plt.switch_backend(prev_backend)
return backend_switcher
return switch_backend_decorator
@cbook.deprecated("3.0")
def skip_if_command_unavailable(cmd):
"""
skips a test if a command is unavailable.
Parameters
----------
cmd : list of str
must be a complete command which should not
return a non zero exit code, something like
["latex", "-version"]
"""
from subprocess import check_output
try:
check_output(cmd)
except Exception:
import pytest
return pytest.mark.skip(reason='missing command: %s' % cmd[0])
return lambda f: f
|
the-stack_106_22856 | import random
import string
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.mail import send_mail
from django.shortcuts import render, redirect
from django.urls import reverse_lazy
from django.views.generic import ListView, UpdateView, DeleteView
from explorebg.questions.forms import CreateQuestionForm, EditQuestionForm, EditAnswerForm
from explorebg.questions.models import Question, Answer, Code
@login_required
def start_quiz(request):
if request.method == "POST":
questions = [el for el in Question.objects.all()]
random_questions = random.sample(questions, 2)
context = {
'questions': random_questions
}
return render(request, 'quiz/play_quiz.html', context)
return render(request, 'quiz/start_quiz.html')
@login_required
def get_result(request):
if request.method == "POST":
dict_qa = {}
for a in Answer.objects.all():
if a.correct:
dict_qa[a.question.text] = a.text
correct_answers = 0
for k, v in request.POST.items():
if dict_qa.get(k) == v:
correct_answers += 1
context = {
'correct_answers': correct_answers
}
return render(request, 'quiz/finish_quiz.html', context)
class MyQuestions(LoginRequiredMixin, ListView):
model = Question
template_name = 'questions/my_questions.html'
ordering = ['text']
def get_queryset(self):
return Question.objects.filter(user=self.request.user)
@login_required
def create_question(request):
if request.method == 'POST':
form = CreateQuestionForm(request.POST)
if form.is_valid():
question_text = form.cleaned_data['question_text']
first_answer = form.cleaned_data['first_answer']
second_answer = form.cleaned_data['second_answer']
correct_answer = form.cleaned_data['correct_answer']
question = Question(
text=question_text,
user=request.user,
)
question.save()
answer = Answer(
text=first_answer,
correct=False,
question=question
)
answer.save()
answer = Answer(
text=second_answer,
correct=False,
question=question
)
answer.save()
answer = Answer(
text=correct_answer,
correct=True,
question=question
)
answer.save()
return redirect('my questions')
else:
form = CreateQuestionForm()
context = {
'form': form,
}
return render(request, 'questions/create_question.html', context)
class EditQuestionView(LoginRequiredMixin, UpdateView):
model = Question
template_name = 'questions/edit.html'
form_class = EditQuestionForm
success_url = reverse_lazy('my questions')
class DeleteQuestionView(LoginRequiredMixin, DeleteView):
template_name = 'questions/delete-question.html'
model = Question
success_url = reverse_lazy('my questions')
# def like_question(request, pk):
# question = Question.objects.get(pk=pk)
# like_object_by_user = question.like_set.filter(user_id=request.user.id).first()
#
# if like_object_by_user:
# like_object_by_user.delete()
# else:
# like = Like(
# question=question,
# user=request.user,
# )
# like.save()
# return redirect('quiz')
class EditAnswerView(LoginRequiredMixin, UpdateView):
model = Answer
template_name = 'questions/edit_answer.html'
form_class = EditAnswerForm
success_url = reverse_lazy('my questions')
def get_promo_code():
symbols = string.ascii_lowercase + string.ascii_uppercase + string.digits
length = 5
code = "".join(random.sample(symbols, length))
return code
@login_required
def send_email(request):
code = get_promo_code()
user = request.user
new_code = Code(
text=code,
user=user,
)
new_code.save()
# TODO if error: code = get new code
send_mail('Hello from Explore Quiz',
f"Your code is {code}",
'[email protected]',
[f'{user}'],
fail_silently=False)
context = {
'code': code,
}
return render(request, 'quiz/send_email.html', context)
|
the-stack_106_22861 | # https://github.com/tensorflow/tensorflow/blob/r1.7/tensorflow/python/ops/array_grad.py#L725
from math import ceil
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
def _ExtractImagePatchesGrad(op, grad):
''' Gradient function of tf.extract_image_patches. '''
batch_size, rows_in, cols_in, channels = [
dim.value for dim in op.inputs[0].get_shape()]
input_bhwc = array_ops.shape(op.inputs[0])
batch_size = input_bhwc[0]
channels = input_bhwc[3]
_, rows_out, cols_out, _ = [dim.value for dim in op.outputs[0].get_shape()]
_, ksize_r, ksize_c, _ = op.get_attr("ksizes")
_, stride_r, stride_h, _ = op.get_attr("strides")
_, rate_r, rate_c, _ = op.get_attr("rates")
padding = op.get_attr("padding")
ksize_r_eff = ksize_r + (ksize_r - 1) * (rate_r - 1)
ksize_c_eff = ksize_c + (ksize_c - 1) * (rate_c - 1)
if padding == b"SAME":
rows_out = int(ceil(rows_in / stride_r))
cols_out = int(ceil(cols_in / stride_h))
pad_rows = ((rows_out - 1) * stride_r + ksize_r_eff - rows_in) // 2
pad_cols = ((cols_out - 1) * stride_h + ksize_c_eff - cols_in) // 2
elif padding == b"VALID":
rows_out = int(ceil((rows_in - ksize_r_eff + 1) / stride_r))
cols_out = int(ceil((cols_in - ksize_c_eff + 1) / stride_h))
pad_rows = (rows_out - 1) * stride_r + ksize_r_eff - rows_in
pad_cols = (cols_out - 1) * stride_h + ksize_c_eff - cols_in
pad_rows, pad_cols = max(0, pad_rows), max(0, pad_cols)
grad_expanded = array_ops.transpose(
array_ops.reshape(
grad, (batch_size, rows_out, cols_out, ksize_r, ksize_c, channels)),
(1, 2, 3, 4, 0, 5))
grad_flat = array_ops.reshape(grad_expanded, (-1, batch_size * channels))
row_steps = range(0, rows_out * stride_r, stride_r)
col_steps = range(0, cols_out * stride_h, stride_h)
idx = []
for i in range(rows_out):
for j in range(cols_out):
r_low, c_low = row_steps[i] - pad_rows, col_steps[j] - pad_cols
r_high, c_high = r_low + ksize_r_eff, c_low + ksize_c_eff
idx.extend([(r * (cols_in) + c, i * (cols_out * ksize_r * ksize_c) + j
* (ksize_r * ksize_c) + ri * (ksize_c) + ci)
for (ri, r) in enumerate(range(r_low, r_high, rate_r))
for (ci, c) in enumerate(range(c_low, c_high, rate_c))
if 0 <= r and r < rows_in and 0 <= c and c < cols_in])
sp_shape = (rows_in * cols_in, rows_out * cols_out * ksize_r * ksize_c)
sp_mat = sparse_tensor.SparseTensor(
array_ops.constant(idx, dtype=ops.dtypes.int64),
array_ops.ones((len(idx),), dtype=ops.dtypes.float32), sp_shape)
jac = sparse_ops.sparse_tensor_dense_matmul(sp_mat, grad_flat)
grad_out = array_ops.reshape(jac, (rows_in, cols_in, batch_size, channels))
grad_out = array_ops.transpose(grad_out, (2, 0, 1, 3))
return [grad_out] |
the-stack_106_22862 | import os
import os.path as osp
import random
from math import floor
import dataset_split.dir_utils as utils
#CONVENTION: args come in (train, test, val) format
COPY_FOLDER = '_split_'
def replicate_classes(path, classes, subdirs):
'''
Will create all classes (dirs) inside every
subdir found in path.
'''
for subd in subdirs:
utils.create_dirs(osp.join(path, subd), classes)
def calculate_splits(items, folders, shuffle_enabled):
'''
With a list of files (items) and numbers for folders ratios,
shuffles (if requested) the files and decides their final
folder in the split.
'''
#If requested, generates a random seed and shuffle items
if shuffle_enabled:
random.seed = os.urandom(49)
random.shuffle(items)
n = len(items)
#Calculating where the list will be sliced
train_limit = round(folders['train'] * n)
test_limit = train_limit + floor(folders['test'] * n)
#Sliced list for every subset
split = {'train': items[:train_limit],
'test': items[train_limit:test_limit],
'valid': items[test_limit:]
}
return split
def move_data(orig_path, dest_path, splits, subd):
'''
With the split already decided, this function
moves the files to their respective destination.
'''
for d in splits.keys():
origin = osp.join(orig_path, subd)
dest = osp.join(dest_path, d, subd)
utils.move_files(origin, dest, splits[d])
def copy_data(orig_path, dest_path, splits, subd):
'''
With the split already decided, this function
copies the files to their respective destination.
'''
for d in splits.keys():
origin = osp.join(orig_path, subd)
dest = osp.join(dest_path, d, subd)
utils.copy_files(origin, dest, splits[d])
def split_data(orig_path, dest_path, classes, new_folders,
copy_enabled, shuffle_enabled):
'''
Calculates the split for traint-valid-test dirs
and moves the right amount of files.
'''
for subd in classes:
temp_path = osp.join(orig_path, subd)
items = [f for f in os.listdir(temp_path)]
splits = calculate_splits(items, new_folders, shuffle_enabled)
if copy_enabled:
copy_data(orig_path, dest_path, splits, subd)
else:
move_data(orig_path, dest_path, splits, subd)
def split(ratio, orig_path, copy_enabled, shuffle_enabled=True):
'''
Splits a dataset after reading it's path
through cmd line and the desired ratio.
Will create three new folders: train, test, valid
'''
# Making sure that 3 floats came from cmd line
assert len(ratio) == 3, "Ratio didn't get 3 parameters"
assert (0.99 <= sum(ratio) <= 1.01), "Ratio doesn't sum up to 1"
data_folders = dict(zip(['train', 'test', 'valid'], ratio))
dest_path = orig_path
classes = utils.list_dirs(orig_path)
if copy_enabled:
dest_path = osp.join(orig_path, COPY_FOLDER)
os.mkdir(dest_path)
utils.create_dirs(dest_path, data_folders.keys())
replicate_classes(dest_path, classes, data_folders)
split_data(orig_path, dest_path, classes, data_folders, copy_enabled, shuffle_enabled)
# If copy is enabled, remove the original dataset folders
if not copy_enabled:
utils.remove_dirs(orig_path, classes)
def main():
args = arguments.get_arguments()
path = osp.join(os.getcwd(), args.path)
split(args.ratio, path, args.copy, not args.noshuffle)
if __name__ == '__main__':
main()
|
the-stack_106_22864 | import os
import requests
import operator
import re
import nltk
from flask import Flask, render_template, request
from flask.ext.sqlalchemy import SQLAlchemy
from stop_words import stops
from collections import Counter
from bs4 import BeautifulSoup
app = Flask(__name__)
app.config.from_object(os.environ['APP_SETTINGS'])
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db = SQLAlchemy(app)
from models import Result
@app.route('/', methods=['GET', 'POST'])
def index():
errors = []
results = {}
if request.method == "POST":
# get url that the person has entered
try:
url = request.form['url']
r = requests.get(url)
except:
errors.append(
"Unable to get URL. Please make sure it's valid and try again."
)
return render_template('index.html', errors=errors)
if r:
# text processing
raw = BeautifulSoup(r.text, 'html.parser').get_text()
nltk.data.path.append('./nltk_data/') # set the path
tokens = nltk.word_tokenize(raw)
text = nltk.Text(tokens)
# remove punctuation, count raw words
nonPunct = re.compile('.*[A-Za-z].*')
raw_words = [w for w in text if nonPunct.match(w)]
raw_word_count = Counter(raw_words)
# stop words
no_stop_words = [w for w in raw_words if w.lower() not in stops]
no_stop_words_count = Counter(no_stop_words)
# save the results
results = sorted(
no_stop_words_count.items(),
key=operator.itemgetter(1),
reverse=True
)
try:
result = Result(
url=url,
result_all=raw_word_count,
result_no_stop_words=no_stop_words_count
)
db.session.add(result)
db.session.commit()
except:
errors.append("Unable to add item to database.")
return render_template('index.html', errors=errors, results=results)
if __name__ == '__main__':
app.run() |
the-stack_106_22865 | import tensorflow as tf
from numpy import random
from glob import glob
from custom_models import VGG19
from ImageLoader import *
from functools import reduce
VGG_MEAN = [103.939, 116.779, 123.68, 102.32]
HPF_risks = []
class COX_model_with_VGG(object, VGG19):
def build(self, rgb, train_mode=None):
"""
load variable from npy to build the VGG
:param rgb: rgb image [batch, height, width, 3] values scaled [0, 1]
:param rgba: rgba image [batch, height, width, 4]
:param train_mode: a bool tensor, usually a placeholder: if True, dropout will be turned on
"""
rgb_scaled = rgb * 255.0
# Convert RGB to BGR
red, green, blue, alpha = tf.split(axis=3, num_or_size_splits=4, value=rgb_scaled)
assert red.get_shape().as_list()[1:] == [256, 256, 1]
assert green.get_shape().as_list()[1:] == [256, 256, 1]
assert blue.get_shape().as_list()[1:] == [256, 256, 1]
assert alpha.get_shape().as_list()[1:] == [256, 256, 1]
bgr = tf.concat(axis=4, values=[
blue - VGG_MEAN[0],
green - VGG_MEAN[1],
red - VGG_MEAN[2],
alpha - VGG_MEAN[3]
])
assert bgr.get_shape().as_list()[1:] == [256, 256, 4]
self.conv1_1 = self.conv_layer(bgr, 4, 64, "conv1_1")
self.conv1_2 = self.conv_layer(self.conv1_1, 64, 64, "conv1_2")
self.pool1 = self.max_pool(self.conv1_2, 'pool1')
self.conv2_1 = self.conv_layer(self.pool1, 64, 128, "conv2_1")
self.conv2_2 = self.conv_layer(self.conv2_1, 128, 128, "conv2_2")
self.pool2 = self.max_pool(self.conv2_2, 'pool2')
self.conv3_1 = self.conv_layer(self.pool2, 128, 256, "conv3_1")
self.conv3_2 = self.conv_layer(self.conv3_1, 256, 256, "conv3_2")
self.conv3_3 = self.conv_layer(self.conv3_2, 256, 256, "conv3_3")
self.conv3_4 = self.conv_layer(self.conv3_3, 256, 256, "conv3_4")
self.pool3 = self.max_pool(self.conv3_4, 'pool3')
self.conv4_1 = self.conv_layer(self.pool3, 256, 512, "conv4_1")
self.conv4_2 = self.conv_layer(self.conv4_1, 512, 512, "conv4_2")
self.conv4_3 = self.conv_layer(self.conv4_2, 512, 512, "conv4_3")
self.conv4_4 = self.conv_layer(self.conv4_3, 512, 512, "conv4_4")
self.pool4 = self.max_pool(self.conv4_4, 'pool4')
self.conv5_1 = self.conv_layer(self.pool4, 512, 512, "conv5_1")
self.conv5_2 = self.conv_layer(self.conv5_1, 512, 512, "conv5_2")
self.conv5_3 = self.conv_layer(self.conv5_2, 512, 512, "conv5_3")
self.conv5_4 = self.conv_layer(self.conv5_3, 512, 512, "conv5_4")
self.pool5 = self.max_pool(self.conv5_4, 'pool5')
self.fc6 = self.fc_layer(self.pool5, 32768, 1000, "fc6")
self.relu6 = tf.nn.relu(self.fc6)
if train_mode is not None:
self.relu6 = tf.cond(train_mode, lambda: tf.nn.dropout(self.relu6, self.dropout), lambda: self.relu6)
elif self.trainable:
self.relu6 = tf.nn.dropout(self.relu6, self.dropout)
self.fc7 = self.fc_layer(self.relu6, 1000, 1000, "fc7")
self.relu7 = tf.nn.relu(self.fc7)
if train_mode is not None:
self.relu7 = tf.cond(train_mode, lambda: tf.nn.dropout(self.relu7, self.dropout), lambda: self.relu7)
elif self.trainable:
self.relu7 = tf.nn.dropout(self.relu7, self.dropout)
self.fc8 = self.fc_layer(self.relu7, 1000, 256, "fc8")
self.risk = self.Cox_layer(self.fc8, 256, 1, "Cox")
return self.risk
def Cox_layer(self, bottom, in_channels, out_channels, name):
with tf.variable_scope(name):
Betas, biases = self.get_fc_var(in_size, out_size, name)
x = tf.reshape(bottom, [-1, in_size])
Risk = tf.matmul(x, Betas)
return Risk
if __name__=="__main__":
input_Tensor, SubSets = random_image_tensor()
is_training = tf.placeholder(tf.bool, name='MODE')
x = tf.placeholder(tf.float32, shape=(14, 256, 256, 4))
y = []
for i in range(0,13):
individual_x = tf.slice(x, [i, 0, 0, 0], [1, 256, 256, 4])
individual_y = tf.Variable(tf.zeros([1]), tf.float32)
print("x = ", x.shape)
print("sliced x = ", individual_x.shape)
SCNN = COX_model_with_VGG()
individual_y = SCNN.build(individual_x, is_training)
y.append(individual_y)
# CONVOLUTIONAL NEURAL NETWORK MODEL
# DEFINE LOSS
with tf.name_scope("LOSS"):
likelihood = tf.Variable(tf.zeros([1]), tf.float32)
for i in range(0,13):
SetofAtRisk = tf.Variable(tf.zeros([1]), tf.float32)
if len(SubSets[i]) > 0:
for j in SubSets[i]:
SetofAtRisk = tf.add(SetofAtRisk, tf.exp(fourteenD[j]))
likelihood = likelihood + y[i] - tf.log(SetofAtRisk)
else:
continue
likelihood = -1.0*likelihood
# DEFINE OPTIMIZER
with tf.name_scope("ADAGRAD"):
batch = tf.Variable(0)
learning_rate = tf.train.exponential_decay(
1e-3, # LEARNING_RATE
batch * batch_size, # GLOBAL_STEP
train_size, # DECAY_STEP
4e-4, # DECAY_RATE
staircase=True) # LR = LEARNING_RATE*DECAY_RATE^(GLOBAL_STEP/DECAY_STEP)
train_step = tf.train.AdagradOptimizer(learning_rate).minimize(likelihood,global_step=batch)
# SUMMARIES For TensorBoard
saver = tf.train.Saver()
tf.summary.scalar('learning_rate', learning_rate)
tf.summary.scalar('loss', loss)
merged_summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(LOGS_DIRECTORY, graph=tf.get_default_graph())
print ("MODEL DEFINED.")
with tf.Session() as sess:
sess.run(tf.global_variables_initializer(), feed_dict={is_training: True, x: input_Tensor})
|
the-stack_106_22866 | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""Downloads binary tar files from S3 and unpacks them locally.
Takes paths that corresponds to credentials for the user's AWS account,
S3 source and local destination paths and downloads and unpacks the tar files
Example:
$ python sync_dir.py \
--csv_path=/path/to/credentials.csv \
--s3_dir=s3://path/to/src/ \
--local_dir=/path/to/dst/
Attributes:
FLAGS (absl.flags._flagvalues.FlagValues): Globally defined flags for clean.py.
"""
import glob
import os
import tarfile
import sys
from absl import app, flags
from watchdog.events import FileSystemEventHandler, FileMovedEvent
from watchdog.observers import Observer
dir_scripts = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
dir_root = os.path.dirname(dir_scripts)
sys.path.append(dir_root)
sys.path.append(os.path.join(dir_scripts, "render"))
sys.path.append(os.path.join(dir_scripts, "util"))
import glog_check as glog
from util import AWSUtil
FLAGS = flags.FLAGS
class ViewerHandler(FileSystemEventHandler):
"""Handles events triggered for extracting tar files as soon as we receive them.
Attributes:
local_dir (str): Path of the local directory to watch.
"""
def on_moved(self, event):
"""When a tar file is created we unpack its contents and delete the file.
Args:
event (watchdog.FileSystemEvent): Watchdog event for when tar file is created.
"""
if not isinstance(event, FileMovedEvent):
return
fn = event.dest_path
if fn.endswith(".tar") and os.path.isfile(fn):
extract_and_delete_tar(fn)
def extract_and_delete_tar(fn):
print(f"Extracting {fn}...")
tar = tarfile.open(fn)
tar.extractall(path=os.path.dirname(fn))
tar.close()
os.remove(fn)
def main(argv):
"""Downloads binary tar files from S3 and unpacks them locally.
Args:
argv (list[str]): List of arguments (used interally by abseil).
"""
os.makedirs(FLAGS.local_dir, exist_ok=True)
if FLAGS.watch:
event_handler = ViewerHandler()
observer = Observer()
observer.schedule(event_handler, path=FLAGS.local_dir, recursive=False)
observer.start()
# Download tar files
glog.check(FLAGS.s3_dir.startswith("s3://"), "S3 directory must start with s3://")
aws_util = AWSUtil(FLAGS.csv_path)
try:
print("Syncing files from S3...")
aws_util.s3_sync(
FLAGS.s3_dir,
FLAGS.local_dir,
exclude="*",
include=["*.tar", "*.json"],
run_silently=not FLAGS.verbose,
)
except KeyboardInterrupt:
if FLAGS.watch:
observer.stop()
if FLAGS.watch:
observer.stop()
observer.join()
# One last pass for missed files
tars = list(glob.iglob(f"{FLAGS.local_dir}/*.tar", recursive=False))
for fn in tars:
extract_and_delete_tar(fn)
if __name__ == "__main__":
# Abseil entry point app.run() expects all flags to be already defined
flags.DEFINE_string("csv_path", None, "path to AWS credentials CSV")
flags.DEFINE_string("local_dir", None, "path to local directory to sync to")
flags.DEFINE_string("s3_dir", None, "path to S3 bin directory (starts with s3://)")
flags.DEFINE_boolean("verbose", False, "Verbose mode")
flags.DEFINE_boolean("watch", False, "Watch for files and extract as they appear")
# Required FLAGS.
flags.mark_flag_as_required("csv_path")
flags.mark_flag_as_required("s3_dir")
flags.mark_flag_as_required("local_dir")
app.run(main)
|
the-stack_106_22867 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import testtools
from tempest import config
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestServerAdvancedOps(manager.ScenarioTest):
"""
This test case stresses some advanced server instance operations:
* Resizing an instance
* Sequence suspend resume
"""
@classmethod
def skip_checks(cls):
super(TestServerAdvancedOps, cls).skip_checks()
if CONF.compute.flavor_ref_alt == CONF.compute.flavor_ref:
msg = "Skipping test - flavor_ref and flavor_ref_alt are identical"
raise cls.skipException(msg)
@classmethod
def setup_credentials(cls):
cls.set_network_resources()
super(TestServerAdvancedOps, cls).setup_credentials()
@test.idempotent_id('e6c28180-7454-4b59-b188-0257af08a63b')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize is not available.')
@test.services('compute')
def test_resize_server_confirm(self):
# We create an instance for use in this test
instance = self.create_server()
instance_id = instance['id']
resize_flavor = CONF.compute.flavor_ref_alt
LOG.debug("Resizing instance %s from flavor %s to flavor %s",
instance['id'], instance['flavor']['id'], resize_flavor)
self.servers_client.resize(instance_id, resize_flavor)
self.servers_client.wait_for_server_status(instance_id,
'VERIFY_RESIZE')
LOG.debug("Confirming resize of instance %s", instance_id)
self.servers_client.confirm_resize(instance_id)
self.servers_client.wait_for_server_status(instance_id,
'ACTIVE')
@test.idempotent_id('949da7d5-72c8-4808-8802-e3d70df98e2c')
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.services('compute')
def test_server_sequence_suspend_resume(self):
# We create an instance for use in this test
instance = self.create_server()
instance_id = instance['id']
LOG.debug("Suspending instance %s. Current status: %s",
instance_id, instance['status'])
self.servers_client.suspend_server(instance_id)
self.servers_client.wait_for_server_status(instance_id,
'SUSPENDED')
fetched_instance = self.servers_client.show_server(instance_id)
LOG.debug("Resuming instance %s. Current status: %s",
instance_id, fetched_instance['status'])
self.servers_client.resume_server(instance_id)
self.servers_client.wait_for_server_status(instance_id,
'ACTIVE')
fetched_instance = self.servers_client.show_server(instance_id)
LOG.debug("Suspending instance %s. Current status: %s",
instance_id, fetched_instance['status'])
self.servers_client.suspend_server(instance_id)
self.servers_client.wait_for_server_status(instance_id,
'SUSPENDED')
fetched_instance = self.servers_client.show_server(instance_id)
LOG.debug("Resuming instance %s. Current status: %s",
instance_id, fetched_instance['status'])
self.servers_client.resume_server(instance_id)
self.servers_client.wait_for_server_status(instance_id,
'ACTIVE')
|
the-stack_106_22868 | # qubit number=2
# total number=7
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
#for i in range(n):
# prog.measure(input_qubit[i], classicals[i])
prog.y(input_qubit[1]) # number=2
prog.y(input_qubit[1]) # number=4
prog.y(input_qubit[1]) # number=3
prog.swap(input_qubit[1],input_qubit[0]) # number=5
prog.swap(input_qubit[1],input_qubit[0]) # number=6
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
circuit1.measure_all()
prog = circuit1
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
writefile = open("../data/startQiskit82.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
the-stack_106_22869 | import unittest, sys
sys.path.append('src')
from ganon import ganon
from ganon.config import Config
base_dir = "tests/ganon/"
sys.path.append(base_dir)
from utils import *
data_dir = base_dir + "data/"
class TestBuildOnline(unittest.TestCase):
results_dir = base_dir + "results/integration_online/build/"
default_params = {"input_files": [data_dir+"build/bacteria_NC_010333.1.fasta.gz",
data_dir+"build/bacteria_NC_017164.1.fasta.gz",
data_dir+"build/bacteria_NC_017163.1.fasta.gz",
data_dir+"build/bacteria_NC_017543.1.fasta.gz"],
"taxdump_file": [data_dir+"mini_nodes.dmp",
data_dir+"mini_names.dmp"],
"write_seq_info_file": True,
"rank": "species",
"quiet": True}
@classmethod
def setUpClass(self):
setup_dir(self.results_dir)
def test_default(self):
"""
ganon build with default parameters (online: eutils, taxdump)
"""
params = self.default_params.copy()
params["db_prefix"] = self.results_dir + "test_default"
params["taxdump_file"] = []
# Build config from params
cfg = Config("build", **params)
# Run
self.assertTrue(ganon.main(cfg=cfg), "ganon build exited with an error")
# General sanity check of results
res = build_sanity_check_and_parse(vars(cfg))
self.assertIsNotNone(res, "ganon build has inconsistent results")
def test_specialization_assembly(self):
"""
ganon build --specialization assembly (online: eutils)
"""
params = self.default_params.copy()
params["db_prefix"] = self.results_dir + "test_specialization_assembly"
params["specialization"] = "assembly"
# Build config from params
cfg = Config("build", **params)
# Run
self.assertTrue(ganon.main(cfg=cfg), "ganon build exited with an error")
# General sanity check of results
res = build_sanity_check_and_parse(vars(cfg))
self.assertIsNotNone(res, "ganon build has inconsistent results")
# Specific test - count assemblies on tax (3 bac)
self.assertEqual(sum(res["tax_pd"]["rank"]=="assembly"), 3, "error retrieving assembly accessions")
# Check if all targets starts with "GCF_"
self.assertTrue((res["map_pd"]["target"].map(lambda x: x.startswith("GCF_"))).all(), "failed to retrieve assembly accession")
def test_specialization_sequence(self):
"""
ganon build --specialization sequence (online: eutils)
"""
params = self.default_params.copy()
params["db_prefix"] = self.results_dir + "test_specialization_sequence"
params["specialization"] = "sequence"
# Build config from params
cfg = Config("build", **params)
# Run
self.assertTrue(ganon.main(cfg=cfg), "ganon build exited with an error")
# General sanity check of results
res = build_sanity_check_and_parse(vars(cfg))
self.assertIsNotNone(res, "ganon build has inconsistent results")
# Specific test - count sequences
self.assertEqual(sum(res["tax_pd"]["rank"]=="sequence"), 4, "failed to use sequence accession as specialization")
# Check if all targets starts with "NC_"
self.assertTrue((res["map_pd"]["target"].map(lambda x: x.startswith("NC_"))).all(), "failed to use sequence accession as specialization")
def test_specialization_file(self):
"""
ganon build --specialization file (online: eutils)
"""
params = self.default_params.copy()
params["db_prefix"] = self.results_dir + "test_specialization_file"
params["specialization"] = "file"
# Build config from params
cfg = Config("build", **params)
# Run
self.assertTrue(ganon.main(cfg=cfg), "ganon build exited with an error")
# General sanity check of results
res = build_sanity_check_and_parse(vars(cfg))
self.assertIsNotNone(res, "ganon build has inconsistent results")
# Specific test - count files
self.assertEqual(sum(res["tax_pd"]["rank"]=="file"), 4, "failed to use file name as specialization")
# Check if all targets ends with ".fasta.gz"
self.assertTrue((res["map_pd"]["target"].map(lambda x: x.endswith(".fasta.gz"))).all(), "failed to use file name as specialization")
def test_specialization_file_single(self):
"""
ganon build --specialization file (with one file only online: eutils)
"""
params = self.default_params.copy()
merge_gz(params["input_files"], self.results_dir + "merged_input_files.fasta.gz")
params["input_files"] = self.results_dir + "merged_input_files.fasta.gz"
params["db_prefix"] = self.results_dir + "test_specialization_file_single"
params["specialization"] = "file"
# Build config from params
cfg = Config("build", **params)
# Run
self.assertTrue(ganon.main(cfg=cfg), "ganon build exited with an error")
# General sanity check of results
res = build_sanity_check_and_parse(vars(cfg))
self.assertIsNotNone(res, "ganon build has inconsistent results")
# Specific test - count files
self.assertEqual(sum(res["tax_pd"]["rank"]=="file"), 4, "failed to use file name as specialization")
# Check if all targets starts with "NC_" - fails to use file specialization and replaces it with sequence accession
self.assertTrue((res["map_pd"]["target"].map(lambda x: x.startswith("NC_"))).all(), "failed to use sequence accession as specialization")
def test_duplicated_input_files(self):
"""
ganon build with duplicated input files. ganon-build will process all input files, but bins should be correct
"""
params = self.default_params.copy()
params["db_prefix"] = self.results_dir + "test_duplicated_input_files"
params["input_files"] = params["input_files"] * 4
# Build config from params
cfg = Config("build", **params)
# Run
self.assertTrue(ganon.main(cfg=cfg), "ganon build exited with an error")
# General sanity check of results
res = build_sanity_check_and_parse(vars(cfg))
self.assertIsNotNone(res, "ganon build has inconsistent results")
# Specific test
# Unique entries on bins (not duplicated)
self.assertTrue(res["bins_pd"][["seqid","seqstart","seqend"]].equals(res["bins_pd"][["seqid","seqstart","seqend"]].drop_duplicates()), "Duplicated entries of repeated sequences on bins")
def test_invalid_ncbi_header(self):
"""
ganon build --specialization sequence with one invalid entry (online: eutils)
"""
params = self.default_params.copy()
params["db_prefix"] = self.results_dir + "test_invalid_ncbi_header"
params["input_files"].append(data_dir+"build/invalid_ncbi_header.fasta.gz")
params["specialization"] = "sequence"
# Build config from params
cfg = Config("build", **params)
# Run
self.assertTrue(ganon.main(cfg=cfg), "ganon build exited with an error")
# General sanity check of results
res = build_sanity_check_and_parse(vars(cfg))
self.assertIsNotNone(res, "ganon build has inconsistent results")
# Specific test - count sequences on tax (4 files)
self.assertEqual(sum(res["tax_pd"]["rank"]=="sequence"), 4, "Wrong number of sequences")
if __name__ == '__main__':
unittest.main()
|
the-stack_106_22871 | """Transfer Out item definition."""
from gaphas.geometry import Rectangle
from gaphor.core.modeling import DrawContext
from gaphor.diagram.presentation import (
Classified,
ElementPresentation,
from_package_str,
)
from gaphor.diagram.shapes import Box, IconBox, Text, stroke
from gaphor.diagram.support import represents
from gaphor.diagram.text import FontStyle, FontWeight
from gaphor.RAAML import raaml
from gaphor.RAAML.fta.constants import DEFAULT_FTA_MAJOR
from gaphor.RAAML.fta.transferin import draw_transfer_in
from gaphor.UML.modelfactory import stereotypes_str
@represents(raaml.TransferOut)
class TransferOutItem(ElementPresentation, Classified):
def __init__(self, diagram, id=None):
super().__init__(diagram, id, width=DEFAULT_FTA_MAJOR, height=DEFAULT_FTA_MAJOR)
self.watch("subject[NamedElement].name").watch(
"subject[NamedElement].namespace.name"
)
def update_shapes(self, event=None):
self.shape = IconBox(
Box(
draw=draw_transfer_out,
),
Text(
text=lambda: stereotypes_str(self.subject, ["Transfer Out"]),
),
Text(
text=lambda: self.subject.name or "",
width=lambda: self.width - 4,
style={
"font-weight": FontWeight.BOLD,
"font-style": FontStyle.NORMAL,
},
),
Text(
text=lambda: from_package_str(self),
style={"font-size": "x-small"},
),
)
def draw_transfer_out(box, context: DrawContext, bounding_box: Rectangle):
draw_transfer_in(box, context, bounding_box)
cr = context.cairo
cr.move_to(bounding_box.width / 4.0, bounding_box.height / 2.0)
cr.line_to(0, bounding_box.height / 2.0)
stroke(context)
|
the-stack_106_22872 | # -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2020, Jim Miller'
__docformat__ = 'restructuredtext en'
import collections
from functools import reduce
from io import StringIO
import logging
logger = logging.getLogger(__name__)
from calibre_plugins.fanficfare_plugin.fanficfare import adapters
from calibre_plugins.fanficfare_plugin.fanficfare.configurable import Configuration
from calibre_plugins.fanficfare_plugin.prefs import prefs
from .fanficfare.six import ensure_text
from .fanficfare.six.moves import configparser
def get_fff_personalini():
return prefs['personal.ini']
def get_fff_config(url,fileform="epub",personalini=None):
if not personalini:
personalini = get_fff_personalini()
sections=['unknown']
try:
sections = adapters.getConfigSectionsFor(url)
except Exception as e:
logger.debug("Failed trying to get ini config for url(%s): %s, using section %s instead"%(url,e,sections))
configuration = Configuration(sections,fileform)
configuration.readfp(StringIO(ensure_text(get_resources("plugin-defaults.ini"))))
configuration.readfp(StringIO(ensure_text(personalini)))
return configuration
def get_fff_adapter(url,fileform="epub",personalini=None):
return adapters.getAdapter(get_fff_config(url,fileform,personalini),url)
def test_config(initext):
try:
configini = get_fff_config("test1.com?sid=555",
personalini=initext)
errors = configini.test_config()
except configparser.ParsingError as pe:
errors = pe.errors
return errors
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
def get_common_elements(ll):
## returns a list of elements common to all lists in ll
## https://www.tutorialspoint.com/find-common-elements-in-list-of-lists-in-python
return list(reduce(lambda i, j: i & j, (OrderedSet(n) for n in ll)))
|
the-stack_106_22873 | """Built in actions for Jaseci"""
from jaseci.actions.live_actions import jaseci_action
from jaseci.jac.jac_set import jac_set
@jaseci_action()
def max(item_set: jac_set):
ret = None
if (not len(item_set)):
return None
items = item_set.obj_list()
max_val = items[0].anchor_value()
ret = items[0]
for i in items:
if(i.anchor_value() > max_val):
ret = i
max_val = i.anchor_value()
return ret
|
the-stack_106_22874 | from mldp.pipeline import Pipeline
from mldp.steps.readers import CsvReader
from mldp.steps.transformers.nlp import TokenProcessor,\
VocabMapper, Padder
from mldp.steps.transformers.field import FieldSelector
from mldp.utils.helpers.nlp.token_cleaning import twitter_text_cleaner
from mldp.utils.tools import Vocabulary
from mldp.utils.tools.vocabulary import PAD
from mldp.tutorials.steps import TwitterFilesPreprocessor,\
FeaturesLabelsFormatter
from mldp.tutorials.model import ISentiLSTM
import unittest
from nltk.tokenize import TweetTokenizer
import os
class TestTutorials(unittest.TestCase):
def setUp(self):
self.tutorials_path = "mldp/tutorials/"
def test_how_to_apply_run(self):
data_path = os.path.join(self.tutorials_path,
"data/tweets.csv")
# paths where vocabs will be saved and later loaded from
words_vocab_file_path = os.path.join(self.tutorials_path,
"data/vocabs/words.txt")
labels_vocab_file_path = os.path.join(self.tutorials_path,
'data/vocabs/labels.txt')
# creating step objects
twitter_tokenizer = TweetTokenizer()
preprocessor = TwitterFilesPreprocessor(input_cols_number=3,
tweets_indx=2,
add_header=['ids', 'labels',
'tweets'])
csv_reader = CsvReader(sep='\t', chunk_size=30)
fields_selector = FieldSelector(fnames=["tweets", "labels"])
token_processor = TokenProcessor(fnames="tweets",
tokenization_func=twitter_tokenizer.tokenize,
token_cleaning_func=twitter_text_cleaner,
lower_case=True)
# data pipeline for vocabularies creation
vocab_data_pipeline = Pipeline(reader=csv_reader,
preprocessor=preprocessor,
worker_processes_num=0,
name_prefix="vocabs")
vocab_data_pipeline.add_step(fields_selector)
vocab_data_pipeline.add_step(token_processor)
# creating or loading vocabs
words_vocab = Vocabulary(vocab_data_pipeline, name_prefix="words")
words_vocab.load_or_create(words_vocab_file_path,
data_source={"data_path": data_path},
data_fnames="tweets")
labels_vocab = Vocabulary(vocab_data_pipeline,
name_prefix="labels")
labels_vocab.load_or_create(labels_vocab_file_path,
data_source={"data_path": data_path},
data_fnames="labels")
print(words_vocab)
print(labels_vocab)
print(vocab_data_pipeline)
# extra steps for training and evaluation
mapper = VocabMapper(field_names_to_vocabs={"tweets": words_vocab,
"labels": labels_vocab})
padder = Padder(fname="tweets", new_mask_fname="tweets_mask",
pad_symbol=words_vocab[PAD].id)
formatter = FeaturesLabelsFormatter(features_field_name="tweets",
labels_field_name="labels",
classes_number=len(labels_vocab))
# building the actual pipeline
dev_data_pipeline = Pipeline(reader=csv_reader, preprocessor=preprocessor,
worker_processes_num=1, name_prefix="dev")
dev_data_pipeline.add_step(fields_selector)
dev_data_pipeline.add_step(token_processor)
dev_data_pipeline.add_step(mapper)
dev_data_pipeline.add_step(padder)
dev_data_pipeline.add_step(formatter)
print(dev_data_pipeline)
epochs = 2
i_model = ISentiLSTM(dev_data_pipeline)
i_model.init_model(words_vocab_size=len(words_vocab), input_dim=50,
lstm_hidden_dim=120,
number_of_classes=len(labels_vocab),
mask_symbol=words_vocab[PAD].id)
# print("testing before training")
# i_model.test(data_path=data_path)
# print("training the model")
# for epoch in range(1, epochs + 1):
# print "epoch %d" % epoch
# i_model.train(data_path=data_path)
# i_model.test(data_path=data_path)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_22875 | import random
import time
from airtest.core.api import *
from airtest.core.error import TargetNotFoundError
from airtest.core.helper import (G, delay_after_operation)
def connect_windows(name):
""" 连接win设备
"""
try:
connect_device("windows:///?title_re=%s" % name)
except Exception as e:
print("connect failed! Please check or report it: ", e)
return 1
return 0
def connect_android(seq):
""" 连接安卓设备
"""
try:
connect_device("Android:///%s" % seq)
except Exception as e:
print("connect android failed! Please check or report it: ", e)
return 1
print(G.DEVICE.get_display_info())
return 0
def random_pos(pos, xmin, xmax, zmin=-1, zmax=-1):
if zmin == -1 or zmax == -1:
zmin, zmax = xmin, xmax
return (pos[0] + random.randrange(xmin, xmax),
pos[1] + random.randrange(zmin, zmax))
def touch_pos(pos, times=1, **kwargs):
for _ in range(times):
G.DEVICE.touch(pos, **kwargs)
time.sleep(0.05)
delay_after_operation()
def image(path, rpos, rs):
return Template(r"../assets/{}".format(path), record_pos=rpos, resolution=rs)
def get_current_resolution():
return G.DEVICE.get_current_resolution()
def wati_util(v, condition, timeout=60, interval=0.5):
"""等待符合条件的对象
"""
start_time = time.time()
while True:
ret = find_all(v)
if condition(ret):
return ret
if (time.time() - start_time) > timeout:
raise TargetNotFoundError('Continue %s not found in screen' % v)
time.sleep(interval)
def select(vs, timeout=60, interval=0.5):
"""等待多个对象,返回第一个匹配到的对象
"""
start_time = time.time()
while True:
for idx, v in enumerate(vs):
ret = find_all(v)
if ret:
return idx, ret
if (time.time() - start_time) > timeout:
raise TargetNotFoundError('Continue %s not found in screen' % v)
time.sleep(interval)
|
the-stack_106_22876 | import click
from os import path
from .base import cli
from ..client import ProtectClient
from ..sync import ProtectSync
@cli.command(
"sync", help="Synchronize your UniFi Protect footage to a local destination"
)
@click.argument("dest", type=click.Path(exists=True, writable=True, resolve_path=True))
@click.option(
"--address",
default="unifi",
show_default=True,
required=True,
help="CloudKey IP address or hostname",
)
@click.option(
"--port", default=7443, show_default=True, help="UniFi Protect service port"
)
@click.option(
"--username",
required=True,
help="Username of user with local access",
prompt="Username of local Protect user",
)
@click.option(
"--password",
required=True,
help="Password of user with local access",
prompt="Password for local Protect user",
hide_input=True,
)
@click.option(
"--statefile",
default="sync.state",
show_default=True,
)
@click.option(
"--ignore-state", is_flag=True, default=False, show_default=True,
)
@click.option(
"--verify-ssl",
is_flag=True,
default=False,
show_default=True,
help="Verify CloudKey SSL certificate",
)
@click.option(
"--ignore-failed-downloads",
is_flag=True,
default=False,
show_default=True,
help="Ignore failed downloads and continue with next download",
)
@click.option(
"--cameras",
default="all",
show_default=True,
help=(
"Comma-separated list of one or more camera IDs ('--cameras=\"id_1,id_2,id_3,...\"'). "
"Use '--cameras=all' to download footage of all available cameras."
),
)
@click.option(
"--use-subfolders/--no-use-subfolders",
default=True,
show_default=True,
help="Save footage to folder structure with format 'YYYY/MM/DD/camera_name/'",
)
@click.option(
"--download-request-timeout",
"download_timeout",
default=60.0,
show_default=True,
help="Time to wait before aborting download request, in seconds",
)
@click.option(
"--skip-existing-files",
is_flag=True,
default=False,
show_default=True,
help="Skip downloading files which already exist on disk",
)
def sync(
dest,
address,
port,
username,
password,
verify_ssl,
statefile,
ignore_state,
ignore_failed_downloads,
cameras,
):
# normalize path to destination directory and check if it exists
dest = path.abspath(dest)
if not path.isdir(dest):
click.echo(
f"Video file destination directory '{dest} is invalid or does not exist!"
)
exit(1)
client = ProtectClient(
address=address,
port=port,
username=username,
password=password,
verify_ssl=verify_ssl,
destination_path=dest,
ignore_failed_downloads=ignore_failed_downloads,
use_subfolders=True,
)
# get camera list
print("Getting camera list")
camera_list = client.get_camera_list()
if cameras != "all":
camera_ids = set(cameras.split(","))
camera_list = [c for c in camera_list if c.id in camera_ids]
process = ProtectSync(client=client, destination_path=dest, statefile=statefile)
process.run(camera_list, ignore_state=ignore_state)
client.print_download_stats()
|
the-stack_106_22877 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
import paddle
import paddle.nn.functional as F
np.random.seed(10)
def stable_softmax(x):
"""Compute the softmax of vector x in a numerically stable way."""
# clip to shiftx, otherwise, when calc loss with
# log(exp(shiftx)), may get log(0)=INF
shiftx = (x - np.max(x)).clip(-64.)
exps = np.exp(shiftx)
return exps / np.sum(exps)
def ref_softmax(x, axis=None, dtype=None):
x_t = x.copy()
if dtype is not None:
x_t = x_t.astype(dtype)
if axis is None:
axis = -1
return np.apply_along_axis(stable_softmax, axis, x_t)
class TestSoftmaxOp(OpTest):
def get_x_shape(self):
return [10, 10]
def get_axis(self):
return -1
def setUp(self):
self.op_type = "softmax"
self.use_cudnn = False
self.use_mkldnn = False
self.dtype = np.float64
self.init_kernel_type()
self.shape = self.get_x_shape()
self.axis = self.get_axis()
np.random.seed(0)
x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
out = np.apply_along_axis(stable_softmax, self.axis, x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
self.attrs = {
'axis': self.axis,
'use_cudnn': self.use_cudnn,
'use_mkldnn': self.use_mkldnn
}
def init_kernel_type(self):
pass
def test_check_output(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
if self.use_cudnn:
place = core.CUDAPlace(0)
self.check_output_with_place(
place, atol=1e-5, check_dygraph=(self.use_mkldnn == False))
else:
self.check_output(check_dygraph=(self.use_mkldnn == False))
def test_check_grad(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
if self.use_cudnn or self.dtype == np.float16:
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_grad_with_place(
place, ["X"],
"Out",
max_relative_error=0.01,
check_dygraph=(self.use_mkldnn == False))
else:
self.check_grad(
["X"],
"Out",
max_relative_error=0.01,
check_dygraph=(self.use_mkldnn == False))
class TestSoftmaxOp2(TestSoftmaxOp):
def get_x_shape(self):
return [2, 3, 4, 5]
class TestSoftmaxOp3(TestSoftmaxOp):
def get_x_shape(self):
return [2, 3, 4, 5]
def get_axis(self):
return 0
class TestSoftmaxOp4(TestSoftmaxOp):
def get_x_shape(self):
return [2, 3, 4, 5]
def get_axis(self):
return 1
class TestSoftmaxOp5(TestSoftmaxOp):
def get_x_shape(self):
return [2, 3, 4, 5]
def get_axis(self):
return 2
class TestSoftmaxOp6(TestSoftmaxOp):
def get_x_shape(self):
return [2, 3, 4, 5]
def get_axis(self):
return 3
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSoftmaxCUDNNOp(TestSoftmaxOp):
def init_kernel_type(self):
self.use_cudnn = True
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSoftmaxCUDNNOp2(TestSoftmaxCUDNNOp):
def get_x_shape(self):
return [2, 3, 4, 5]
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSoftmaxCUDNNOp5(TestSoftmaxCUDNNOp):
def get_x_shape(self):
return [2, 3, 4, 5]
def get_axis(self):
return 3
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSoftmaxFP16Op(TestSoftmaxOp):
def init_kernel_type(self):
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=1e-3)
# FIXME: If the x_shape is [10, 10], gradient failed.
def test_check_grad(self):
pass
@unittest.skip('disable TestSoftmaxFP16Op2')
class TestSoftmaxFP16Op2(TestSoftmaxOp):
def init_kernel_type(self):
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=1e-3)
def get_x_shape(self):
return [2, 3, 4, 5]
def test_check_grad(self):
pass
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSoftmaxFP16CUDNNOp(TestSoftmaxOp):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=1e-3)
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSoftmaxFP16CUDNNOp2(TestSoftmaxFP16CUDNNOp):
def get_x_shape(self):
return [2, 3, 4, 5]
class TestSoftmaxAPI(unittest.TestCase):
def setUp(self):
self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda(
) else paddle.CPUPlace()
self.x_np = np.random.uniform(-1., 1., [2, 3, 4, 5]).astype('float32')
self.out_ref = np.apply_along_axis(stable_softmax, -1, self.x_np)
def test_static_check(self):
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', self.x_np.shape, 'float32')
out1 = F.softmax(x)
m = paddle.nn.Softmax()
out2 = m(x)
exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
out_ref = ref_softmax(self.x_np, axis=-1, dtype=None)
for r in res:
self.assertEqual(np.allclose(out_ref, r), True)
def test_dygraph_check(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out1 = F.softmax(x)
m = paddle.nn.Softmax()
out2 = m(x)
out_ref = ref_softmax(self.x_np, axis=-1, dtype=None)
for r in [out1, out2]:
self.assertEqual(np.allclose(out_ref, r.numpy()), True)
out1 = F.softmax(x, axis=0)
m = paddle.nn.Softmax(axis=0)
out2 = m(x)
out_ref = ref_softmax(self.x_np, axis=0, dtype=None)
for r in [out1, out2]:
self.assertEqual(np.allclose(out_ref, r.numpy()), True)
out = F.softmax(x, dtype=np.float64)
out_ref = ref_softmax(self.x_np, axis=-1, dtype=np.float64)
self.assertEqual(np.allclose(out_ref, out.numpy()), True)
paddle.enable_static()
def test_error(self):
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
self.assertRaises(TypeError, F.softmax, 1)
# The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[2, 3], dtype='int32')
self.assertRaises(TypeError, F.softmax, x_int32)
# support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[2, 3], dtype='float16')
F.softmax(x_fp16)
if __name__ == "__main__":
unittest.main()
|
the-stack_106_22881 | from jumpscale import j
from gevent.pywsgi import WSGIServer
from geventwebsocket.handler import WebSocketHandler
import sys
import os
from .JSWebLoader import JSWebLoader
JSConfigBase = j.tools.configmanager.base_class_config
TEMPLATE = """
host = "localhost"
port = 5050
port_ssl = 0
secret_ = ""
ws_dir = ""
"""
class JSWebServer(JSConfigBase):
def __init__(self, instance, data=None, parent=None, interactive=False, template=None):
if not data:
data = {}
JSConfigBase.__init__(self, instance=instance, data=data, parent=parent,
template=template or TEMPLATE, interactive=interactive)
# Set proper instance for j.data.schema
self.host = self.config.data["host"]
self.port = int(self.config.data["port"])
self.port_ssl = int(self.config.data["port_ssl"])
self.address = '{}:{}'.format(self.host, self.port)
# config_path = j.sal.fs.joinPaths(self.path, "site_config.toml")
# if j.sal.fs.exists(config_path):
# self.site_config = j.data.serializer.toml.load(config_path)
# else:
# self.site_config = {}
self._inited = False
j.servers.web.latest = self
self.http_server = None
def register_blueprints(self, app=app):
self.init()
self._loader.register_blueprints(app)
def init(self, debug=False):
if self._inited:
return
self._loader = JSWebLoader(path=self.path)
self.logger.info("init server")
if self.path not in sys.path:
sys.path.append(self.path)
self.app = self._loader.app
self.app.debug = True
self.http_server = WSGIServer((self.host, self.port), self.app, handler_class=WebSocketHandler)
self.app.http_server = self.http_server
self.app.server = self
# self.docs_load()
# self._sig_handler.append(gevent.signal(signal.SIGINT, self.stop))
self._inited = False
# def docs_load(self):
# if "docsite" not in self.site_config:
# return
#
# for item in self.site_config["docsite"]:
# url = item["url"]
# name = item["name"]
# if url is not "":
# path = j.clients.git.getContentPathFromURLorPath(url)
# if not j.sal.fs.exists(path):
# j.clients.git.pullGitRepo(url=url)
# j.tools.docsites.load(path=path, name=name)
@property
def path(self):
return self.config.data['ws_dir'].rstrip("/") + "/"
def sslkeys_generate(self):
res = j.sal.ssl.ca_cert_generate(self.ws_dir)
if res:
self.logger.info("generated sslkeys for gedis in %s" % self.ws_dir)
else:
self.logger.info('using existing key and cerificate for gedis @ %s' % self.ws_dir)
key = os.path.join(self.path, 'ca.key')
cert = os.path.join(self.path, 'ca.crt')
return key, cert
def start(self, debug=False):
print("start")
self.init(debug=debug)
print("Webserver running")
print(self)
self.http_server.serve_forever()
def stop(self):
"""
stop receiving requests and close the server
"""
# prevent the signal handler to be called again if
# more signal are received
for h in self._sig_handler:
h.cancel()
self.logger.info('stopping server')
self.server.stop()
def __repr__(self):
return '<Flask Server http://%s app_dir=%s)' % (self.address, self.path)
__str__ = __repr__
|
the-stack_106_22882 | from datetime import datetime
import pytest
from opentrons.types import MountType
from pydantic import ValidationError
from opentrons.protocol_engine import commands as pe_commands
from robot_server.service.session.models import command, command_definitions
@pytest.mark.parametrize(
argnames="command_def",
argvalues=[
command_definitions.ProtocolCommand.start_run,
command_definitions.CalibrationCommand.move_to_deck,
command_definitions.CheckCalibrationCommand.compare_point,
])
def test_empty(command_def: command_definitions.CommandDefinition):
"""Test creation of empty command request and response."""
request = command.CommandRequest.parse_obj({
"data": {
"command": command_def.value,
"data": {}
}
})
assert request.data.command == command_def
assert request.data.data == command.EmptyModel()
dt = datetime(2000, 1, 1)
response = request.data.make_response(
identifier="id",
status=command.CommandStatus.executed,
created_at=dt,
started_at=None,
completed_at=None,
result=None
)
assert response.command == command_def
assert response.data == command.EmptyModel()
assert response.id == "id"
assert response.createdAt == dt
assert response.startedAt is None
assert response.completedAt is None
assert response.result is None
def test_not_empty():
"""Test that command request with data and result are created properly."""
request = command.CommandRequest.parse_obj({
"data": {
"command": "equipment.loadPipette",
"data": {
"pipetteName": "p10_single",
"mount": "left"
}
}
})
assert request.data.command == \
command_definitions.EquipmentCommand.load_pipette
assert request.data.data == pe_commands.LoadPipetteRequest(
pipetteName="p10_single",
mount=MountType.LEFT
)
dt = datetime(2000, 1, 1)
response = request.data.make_response(
identifier="id",
status=command.CommandStatus.executed,
created_at=dt,
started_at=None,
completed_at=None,
result=pe_commands.LoadPipetteResult(
pipetteId="123"
)
)
assert response.command == \
command_definitions.EquipmentCommand.load_pipette
assert response.data == pe_commands.LoadPipetteRequest(
pipetteName="p10_single",
mount=MountType.LEFT
)
assert response.id == "id"
assert response.createdAt == dt
assert response.startedAt is None
assert response.completedAt is None
assert response.result == pe_commands.LoadPipetteResult(
pipetteId="123"
)
@pytest.mark.parametrize(
argnames="command_def",
argvalues=[
command_definitions.EquipmentCommand.load_labware,
command_definitions.EquipmentCommand.load_pipette,
command_definitions.PipetteCommand.aspirate,
command_definitions.PipetteCommand.dispense,
command_definitions.PipetteCommand.drop_tip,
command_definitions.PipetteCommand.pick_up_tip,
command_definitions.CalibrationCommand.jog,
command_definitions.CalibrationCommand.set_has_calibration_block
])
def test_requires_data(command_def: command_definitions.CommandDefinition):
"""Test creation of command requiring data will fail with empty body."""
with pytest.raises(ValidationError):
command.CommandRequest.parse_obj({
"data": {
"command": command_def.value,
"data": {}
}
})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.