filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_19978
|
# -*- coding: utf-8 -*-
"""Some miscellaneous utility functions."""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import fnmatch
import inspect
from io import StringIO
import logging
from math import log
import os
from string import Formatter
import subprocess
import sys
import traceback
import numpy as np
from ..fixes import _get_args
from ._logging import logger, verbose, warn
def _pl(x, non_pl=''):
"""Determine if plural should be used."""
len_x = x if isinstance(x, (int, np.generic)) else len(x)
return non_pl if len_x == 1 else 's'
def _explain_exception(start=-1, stop=None, prefix='> '):
"""Explain an exception."""
# start=-1 means "only the most recent caller"
etype, value, tb = sys.exc_info()
string = traceback.format_list(traceback.extract_tb(tb)[start:stop])
string = (''.join(string).split('\n') +
traceback.format_exception_only(etype, value))
string = ':\n' + prefix + ('\n' + prefix).join(string)
return string
def _sort_keys(x):
"""Sort and return keys of dict."""
keys = list(x.keys()) # note: not thread-safe
idx = np.argsort([str(k) for k in keys])
keys = [keys[ii] for ii in idx]
return keys
class _Counter():
count = 1
def __call__(self, *args, **kargs):
c = self.count
self.count += 1
return c
class _FormatDict(dict):
"""Help pformat() work properly."""
def __missing__(self, key):
return "{" + key + "}"
def pformat(temp, **fmt):
"""Format a template string partially.
Examples
--------
>>> pformat("{a}_{b}", a='x')
'x_{b}'
"""
formatter = Formatter()
mapping = _FormatDict(fmt)
return formatter.vformat(temp, (), mapping)
@verbose
def run_subprocess(command, verbose=None, *args, **kwargs):
"""Run command using subprocess.Popen.
Run command and wait for command to complete. If the return code was zero
then return, otherwise raise CalledProcessError.
By default, this will also add stdout= and stderr=subproces.PIPE
to the call to Popen to suppress printing to the terminal.
Parameters
----------
command : list of str | str
Command to run as subprocess (see subprocess.Popen documentation).
%(verbose)s
*args, **kwargs : arguments
Additional arguments to pass to subprocess.Popen.
Returns
-------
stdout : str
Stdout returned by the process.
stderr : str
Stderr returned by the process.
"""
for stdxxx, sys_stdxxx, thresh in (
['stderr', sys.stderr, logging.ERROR],
['stdout', sys.stdout, logging.WARNING]):
if stdxxx not in kwargs and logger.level >= thresh:
kwargs[stdxxx] = subprocess.PIPE
elif kwargs.get(stdxxx, sys_stdxxx) is sys_stdxxx:
if isinstance(sys_stdxxx, StringIO):
# nose monkey patches sys.stderr and sys.stdout to StringIO
kwargs[stdxxx] = subprocess.PIPE
else:
kwargs[stdxxx] = sys_stdxxx
# Check the PATH environment variable. If run_subprocess() is to be called
# frequently this should be refactored so as to only check the path once.
env = kwargs.get('env', os.environ)
if any(p.startswith('~') for p in env['PATH'].split(os.pathsep)):
warn('Your PATH environment variable contains at least one path '
'starting with a tilde ("~") character. Such paths are not '
'interpreted correctly from within Python. It is recommended '
'that you use "$HOME" instead of "~".')
if isinstance(command, str):
command_str = command
else:
command_str = ' '.join(command)
logger.info("Running subprocess: %s" % command_str)
try:
p = subprocess.Popen(command, *args, **kwargs)
except Exception:
if isinstance(command, str):
command_name = command.split()[0]
else:
command_name = command[0]
logger.error('Command not found: %s' % command_name)
raise
stdout_, stderr = p.communicate()
stdout_ = u'' if stdout_ is None else stdout_.decode('utf-8')
stderr = u'' if stderr is None else stderr.decode('utf-8')
output = (stdout_, stderr)
if p.returncode:
print(output)
err_fun = subprocess.CalledProcessError.__init__
if 'output' in _get_args(err_fun):
raise subprocess.CalledProcessError(p.returncode, command, output)
else:
raise subprocess.CalledProcessError(p.returncode, command)
return output
def _clean_names(names, remove_whitespace=False, before_dash=True):
"""Remove white-space on topo matching.
This function handles different naming
conventions for old VS new VectorView systems (`remove_whitespace`).
Also it allows to remove system specific parts in CTF channel names
(`before_dash`).
Usage
-----
# for new VectorView (only inside layout)
ch_names = _clean_names(epochs.ch_names, remove_whitespace=True)
# for CTF
ch_names = _clean_names(epochs.ch_names, before_dash=True)
"""
cleaned = []
for name in names:
if ' ' in name and remove_whitespace:
name = name.replace(' ', '')
if '-' in name and before_dash:
name = name.split('-')[0]
if name.endswith('_v'):
name = name[:-2]
cleaned.append(name)
return cleaned
def _get_argvalues():
"""Return all arguments (except self) and values of read_raw_xxx."""
# call stack
# read_raw_xxx -> EOF -> verbose() -> BaseRaw.__init__ -> get_argvalues
frame = inspect.stack()[4][0]
fname = frame.f_code.co_filename
if not fnmatch.fnmatch(fname, '*/mne/io/*'):
return None
args, _, _, values = inspect.getargvalues(frame)
params = dict()
for arg in args:
params[arg] = values[arg]
params.pop('self', None)
return params
def sizeof_fmt(num):
"""Turn number of bytes into human-readable str.
Parameters
----------
num : int
The number of bytes.
Returns
-------
size : str
The size in human-readable format.
"""
units = ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB']
decimals = [0, 0, 1, 2, 2, 2]
if num > 1:
exponent = min(int(log(num, 1024)), len(units) - 1)
quotient = float(num) / 1024 ** exponent
unit = units[exponent]
num_decimals = decimals[exponent]
format_string = '{0:.%sf} {1}' % (num_decimals)
return format_string.format(quotient, unit)
if num == 0:
return '0 bytes'
if num == 1:
return '1 byte'
|
the-stack_106_19979
|
import json
import logging
import math
import pathlib
from carim.configuration import decorators
from carim.configuration.mods.trader.models import config, objects
from carim.global_resources import types, matching_model, resourcesdir
from carim.util import file_writing
log = logging.getLogger(__name__)
class TraderName:
AUTO = 'auto'
CLOTHING = 'clothing'
FOOD = 'food'
WEAPONS = 'weapons'
ACCESSORIES = 'accessories'
TOOLS = 'tools'
BM = 'bm'
@decorators.mod('@Trader')
@decorators.profile(directory='Trader') # run after type modifications
def trader_items(directory):
with open(pathlib.Path(resourcesdir.get(), 'modifications/mods/trader/inventory.json')) as f:
inventory = json.load(f)
traders_config = config.Config()
trader_names = [
TraderName.AUTO,
TraderName.CLOTHING,
TraderName.FOOD,
TraderName.WEAPONS,
TraderName.ACCESSORIES,
TraderName.TOOLS,
TraderName.BM
]
traders = {}
for trader_name in trader_names:
categories = inventory.get(trader_name, list())
current_trader = config.Trader(trader_name)
traders[trader_name] = current_trader
traders_config.traders.append(current_trader)
for category in categories:
new_category = config.Category(category.get('category'))
current_trader.categories.append(new_category)
build_category(new_category, category)
log.info('added {} items to {}'.format(len(new_category.items), (trader_name, new_category.name)))
add_dynamic(traders)
with file_writing.f_open(pathlib.Path(directory, 'TraderConfig.txt'), mode='w') as f:
f.write(traders_config.generate())
def build_category(new_category, category_config):
for item in category_config.get('items', list()):
name = item.get('name')
buy = item.get('buy')
sell = item.get('sell')
quantity = item.get('quantity')
item_type = get_item_type_for_name(item.get('class', 'max'))
if quantity is not None:
new_item = item_type(name, buy, sell, quantity)
else:
new_item = item_type(name, buy, sell)
new_category.items.append(new_item)
def get_item_type_for_name(name):
if name == "vehicle":
return config.Vehicle
elif name == "magazine":
return config.Magazine
elif name == "weapon":
return config.Weapon
elif name == "steak":
return config.Steak
elif name == "quantity":
return config.Item
else:
return config.Singular
def add_dynamic(traders):
with open(pathlib.Path(resourcesdir.get(), 'modifications/mods/trader/inventory_dynamic.json')) as f:
trader_config = json.load(f)
temp_traders = {}
for entry in trader_config:
current_traders = entry.get('trader')
if not isinstance(current_traders, list):
current_traders = [current_traders]
for trader_name in current_traders:
if trader_name not in temp_traders:
temp_traders[trader_name] = list()
category_name = entry.get('category')
log.info('processing {}'.format((trader_name, category_name)))
categories = {}
temp_traders[trader_name].append(categories)
for item in entry.get('items'):
expanded = {}
matching = item.get('matching')
buy = item.get('buy')
sell = item.get('sell')
quantity = item.get('quantity', None)
item_type = get_item_type_for_name(item.get('item_class'))
if matching is not None:
match = matching_model.Match(matching)
for t in types.get().getroot():
result = match.match(t)
if result:
items = expanded.get(result.groups.get('captured'), list())
items.append(item_type(t.get('name'), buy, sell, quantity))
expanded[result.groups.get('captured')] = items
else:
name = item.get('name')
expanded[None] = [item_type(name, buy, sell, quantity)]
for key in expanded:
current_cat_name = category_name.format(captured=key)
current_cat = categories.get(current_cat_name, config.Category(current_cat_name))
if quantity is not None:
current_cat.items += [item_type(i.name, buy, sell, quantity) for i in expanded[key] if
i not in current_cat]
else:
current_cat.items += [item_type(i.name, buy, sell) for i in expanded[key] if
i not in current_cat]
categories[current_cat_name] = current_cat
for key in temp_traders:
for cat_set in temp_traders[key]:
for c in cat_set.values():
log.info('added {} dynamic items to {}'.format(len(c.items), (key, c.name)))
traders[key].categories += cat_set.values()
@decorators.register
@decorators.mod('@Trader')
@decorators.profile(directory='Trader')
def trader_objects_config(directory):
to = objects.Config()
with open(pathlib.Path(resourcesdir.get(), 'modifications/mods/trader/locations.json')) as f:
locations = json.load(f)
with open(pathlib.Path(resourcesdir.get(), 'modifications/mods/trader/outfits.json')) as f:
outfits = json.load(f)
for name, l_config in locations.items():
log.info('processing {}'.format(name))
for trader_name, t in l_config.items():
new_trader = objects.Trader(t.get('marker'), t.get('location'), t.get('safezone', 200))
new_object = objects.Object(outfits.get(trader_name).get('class'), t.get('location'), t.get('o'))
for attachment in outfits.get(trader_name).get('attachments'):
new_object.attachments.append(objects.Attachment(attachment))
if 'vehicle' in t:
raw_vehicle = t.get('vehicle')
new_trader.set_vehicle(objects.Vehicle(raw_vehicle.get('location'), raw_vehicle.get('o')))
to.objects.extend(get_cones_for_vehicle(raw_vehicle.get('location'), raw_vehicle.get('o')))
to.traders.append(new_trader)
to.objects.append(new_object)
with file_writing.f_open(pathlib.Path(directory, 'TraderObjects.txt'), mode='w') as f:
f.write(to.generate())
def get_cones_for_vehicle(location, orientation):
cones = list()
orientation_radians = -math.radians(orientation)
center_x = location[0]
center_y = location[1]
center_z = location[2]
xs = (-3, 3)
zs = (-4, 4)
for x in xs:
for z in zs:
new_x, new_z = get_rotated_point(x, z, orientation_radians)
new_x += center_x
new_z += center_z
cones.append(objects.Object('Land_RoadCone', [new_x, center_y, new_z], orientation))
return cones
def get_rotated_point(x, z, radians):
new_x = x * math.cos(radians) - z * math.sin(radians)
new_z = x * math.sin(radians) + z * math.cos(radians)
return new_x, new_z
|
the-stack_106_19980
|
import operator as ops
import numpy as np
import pytest
import taichi as ti
from taichi import allclose
binary_func_table = [
(ops.add, ) * 2,
(ops.sub, ) * 2,
(ops.mul, ) * 2,
(ops.truediv, ) * 2,
(ops.floordiv, ) * 2,
(ops.mod, ) * 2,
(ops.pow, ) * 2,
(ops.and_, ) * 2,
(ops.or_, ) * 2,
(ops.xor, ) * 2,
(ops.eq, ) * 2,
(ops.ne, ) * 2,
(ops.lt, ) * 2,
(ops.le, ) * 2,
(ops.gt, ) * 2,
(ops.ge, ) * 2,
(ti.max, np.maximum),
(ti.min, np.minimum),
(ti.atan2, np.arctan2),
]
unary_func_table = [
(ops.neg, ) * 2,
(ops.invert, ) * 2,
(ti.logical_not, np.logical_not),
(ti.abs, np.abs),
(ti.exp, np.exp),
(ti.log, np.log),
(ti.sin, np.sin),
(ti.cos, np.cos),
(ti.tan, np.tan),
(ti.asin, np.arcsin),
(ti.acos, np.arccos),
(ti.tanh, np.tanh),
(ti.floor, np.floor),
(ti.ceil, np.ceil),
]
@pytest.mark.parametrize('ti_func,np_func', binary_func_table)
def test_python_scope_vector_binary(ti_func, np_func):
x = ti.Vector([2, 3])
y = ti.Vector([5, 4])
result = ti_func(x, y).to_numpy()
if ti_func in [ops.eq, ops.ne, ops.lt, ops.le, ops.gt, ops.ge]:
result = result.astype(bool)
expected = np_func(x.to_numpy(), y.to_numpy())
assert allclose(result, expected)
@pytest.mark.parametrize('ti_func,np_func', unary_func_table)
def test_python_scope_vector_unary(ti_func, np_func):
x = ti.Vector([2, 3] if ti_func in
[ops.invert, ti.logical_not] else [0.2, 0.3])
result = ti_func(x).to_numpy()
if ti_func in [ti.logical_not]:
result = result.astype(bool)
expected = np_func(x.to_numpy())
assert allclose(result, expected)
def test_python_scope_matmul():
a = np.array([[1, 2], [3, 4]])
b = np.array([[5, 6], [7, 8]])
x = ti.Vector(a)
y = ti.Vector(b)
result = (x @ y).to_numpy()
expected = a @ b
assert allclose(result, expected)
def test_python_scope_linalg():
a = np.array([3, 4, -2])
b = np.array([-5, 0, 6])
x = ti.Vector(a)
y = ti.Vector(b)
assert allclose(x.dot(y), np.dot(a, b))
assert allclose(x.norm(), np.sqrt(np.dot(a, a)))
assert allclose(x.normalized(), a / np.sqrt(np.dot(a, a)))
assert x.any() == 1 # To match that of Taichi IR, we return -1 for True
assert y.all() == 0
|
the-stack_106_19983
|
'''
Quick way to gather sites using Teamtailor with Google Custom Search API.
Note this is limited by Google to 100 entries, meaning max 10 pages with 10 results each page.
'''
## Loads key from single line keyfiles
def load_key(filename=''):
if not filename: return None
with open(filename, 'r') as f:
return f.read()
## Load companies from json file
import json
import requests
def load_companies(filename='teamtailor.json'):
with open(filename, 'r') as f:
d = json.load(f)
return d
# Get existing companies
company_dict = load_companies()
# Personal api key and cse id
api_key = load_key('cfg/api_key.hidden')
cse_id = load_key('cfg/cse_id.hidden')
## Avoid duplicate urls
url_set = set(company_dict.values())
## Adds JSON content to dictionary
def add_to_dict(content):
results = json.loads(content)
for result in results['items']:
title = result['htmlTitle'].split('-')[0].strip()
print(title)
if not company_dict.get(title, 0):
real_url = requests.get('http://' + result['displayLink'], verify=False).url
if real_url in url_set: # Checked second to title because anoter request is requied
print('Already in set!')
return
print(real_url)
company_dict[title] = real_url # Add to dict
url_set.add(real_url) # Add to url_set
else:
print('Already in dict!')
print('-' * 40)
## API Request URL
url = ('https://www.googleapis.com/customsearch/v1?'
'q=site%3A*.teamtailor.com' # Query: site:*.teamtailor.com
'&cx={}' # cse_id
'&start={}' # index of result
'&key={}') # api_key
for i in range(2, 92, 10):
r = requests.get(url.format(cse_id, i, api_key))
print('\nQuery %s to %s' % (str(i), str(i + 10)))
add_to_dict(r.content)
## Add locations to location database
## NOTE: Still requires some manual scrubbing
from jdbhandler import jdbhandler
def db_add_companies(dbh, c_dict):
for company, url in c_dict.items():
dbh.insert_company(company, url)
dbh.commit()
dbh = jdbhandler()
db_add_companies(dbh, company_dict)
dbh.close()
|
the-stack_106_19984
|
import filecmp
import os
import subprocess
from tests import TESTS, SAMPLE_WHITELISTS
from vulture_whitelist import __version__
def create_whitelist_from_test_sip_files(name):
path = os.path.join(TESTS, 'test-data', 'sip')
subprocess.call(['vulture-whitelist', 'sip', '--name', name], cwd=path)
def test_qt_whitelist(tmpdir):
whitelist = str(tmpdir.mkdir("whitelists").join("whitelist.py"))
create_whitelist_from_test_sip_files(whitelist)
whitelist_sample = os.path.join(SAMPLE_WHITELISTS, 'qtbluetooth.py')
assert filecmp.cmp(whitelist, whitelist_sample, shallow=False)
def test_version():
version_string = 'vulture-whitelist {}\n'.format(__version__)
assert subprocess.check_output(
['vulture-whitelist', '--version']).decode("utf_8") == version_string
|
the-stack_106_19986
|
from tronx import app, gen
from pytube import YouTube
from pyrogram.types import Message
app.CMD_HELP.update(
{"utube": (
"utube",
{
"yvinfo [link]" : "Get a youtube video information . . .",
"yvdl [link]" : "Download any video from YouTube . . ."
}
)
}
)
@app.on_message(gen("yvinfo", allow = ["sudo", "channel"]))
async def videoinfo_handler(_, m: Message):
reply = m.reply_to_message
if reply and reply.text:
link = reply.text
elif not reply and app.long() >= 1:
link = m.text.split(None, 1)[1]
elif not reply and app.long() == 1:
return await app.send_edit("Reply to youtube link or give link as a suffix . . .", text_type=["mono"], delme=5)
await app.send_edit("Getting information . . .", text_type=["mono"])
yt = YouTube(link)
thumb_link = yt.thumbnail_url
data = f"**Title:** {yt.title}\n\n"
data += f"**Duration:** {app.GetReadableTime(yt.length)}\n\n"
data += f"**Description:** {yt.description}\n\n"
data += f"**Views:** {yt.views}\n\n"
data += f"**Age Restricted:** {'Yes' if yt.age_restricted else 'No'}"
await app.send_photo(m.chat.id, thumb_link, caption=data)
@app.on_message(gen("yvdl", allow = ["sudo", "channel"]))
async def ytdownload_handler(_, m):
reply = m.reply_to_message
cmd = m.command
await app.send_edit("processing link . . .", text_type=["mono"])
if not reply:
if app.long() == 1:
return await app.send_edit("Please reply to a yt link or give me link as a suffix . . .", text_type=["mono"], delme=4)
elif app.long() > 1 and cmd[1].startswith("https://"):
link = cmd[1]
else:
return await app.send_edit("Please reply to a link or give me the link as a suffix after command . . .", text_type=["mono"], delme=4)
elif reply:
if reply.text and reply.text.startswith("https://"):
link = reply.text
else:
return await app.send_edit("Please reply to a link or give me the link as a suffix after command . . .", text_type=["mono"], delme=4)
else:
return await app.send_edit("Something went wrong . . .")
yt = YouTube(link)
data = yt.streams.all()
await app.send_edit("**Trying to download **" + f"`{yt.title}`")
for x in data:
if x.type == "video" and x.resolution in ("720p" or "1080p") and x.mime_type == "video/mp4":
try:
loc = x.download()
await app.send_video(m.chat.id, loc, caption="**Title:**\n\n" + yt.title)
await m.delete()
break
except Exception as e:
await error(e)
|
the-stack_106_19988
|
from collections import deque
from utils import stringify, get_collection_or_data_object, find_children
from irods_cli import cli
import click
from irods.collection import iRODSCollection
@cli.command()
@click.argument('path')
@click.option('--recursive','-R', is_flag=True,
help='use a long listing format')
@click.option('--human-readable','-h', is_flag=True,
help='with -l and/or -s, print human readable sizes' +
'(e.g., 1K 234M 2G)')
@click.option('--size','-s', is_flag=True,
help='print the allocated size of each file, in blocks')
@click.option('-l', is_flag=True, help='use a long listing format')
@click.pass_context
def ls(ctx, path, recursive, **print_kwargs):
obj = get_collection_or_data_object(ctx.obj['session'], path)
if not obj:
click.echo('Not found in remote path: {}'.format(path))
exit(-1)
children = deque([obj])
[children.append(c) for c in find_children(obj)]
while children:
c = children.pop()
click.echo(stringify(c, ctx.obj['session'], **print_kwargs))
if type(c) == iRODSCollection and recursive:
for child in find_children(c):
if child:
children.append(child)
|
the-stack_106_19991
|
''' Proxlight Designer - Created By Pratyush Mishra (Proxlight)'''
from tkinter import *
from tkinter import filedialog, messagebox
############################################################################
import requests
import os
def generate_code(token, link, output_path):
def get_color(element):
# Returns HEX form of element RGB color (str)
el_r = element["fills"][0]["color"]['r'] * 255
el_g = element["fills"][0]["color"]['g'] * 255
el_b = element["fills"][0]["color"]['b'] * 255
hex_code = ('#%02x%02x%02x' % (round(el_r), round(el_g), round(el_b)))
return hex_code
def get_coordinates(element):
# Returns element coordinates as x (int) and y (int)
x = int(element["absoluteBoundingBox"]["x"])
y = int(element["absoluteBoundingBox"]["y"])
return x, y
def get_dimensions(element):
# Return element dimensions as width (int) and height (int)
height = int(element["absoluteBoundingBox"]["height"])
width = int(element["absoluteBoundingBox"]["width"])
return width, height
def get_text_properties(element):
# Return element font and fontSize (str)
font = element["style"]["fontPostScriptName"]
fontSize = element["style"]["fontSize"]
return font, fontSize
global fig_window, response
generated_dir = output_path + "/Proxlight_Designer_Export/"
lines = []
lines.extend(['from tkinter import *\n\n',
'def btn_clicked():',
' print("Button Clicked")\n\n\n'
'window = Tk()'])
# Getting File Data
def find_between(s, first, last):
try:
start = s.index(first) + len(first)
end = s.index(last, start)
return s[start:end]
except ValueError:
return ""
token = token.strip()
file_url = link.strip()
file_id = find_between(file_url, "file/", "/")
try:
response = requests.get(
f"https://api.figma.com/v1/files/{file_id}",
headers={"X-FIGMA-TOKEN": token})
except ValueError:
messagebox.showerror(
"Value Error",
"Invalid Input. Please check your input and try again.")
except requests.ConnectionError:
messagebox.showerror(
"No Connection",
"Proxlight Designer requires internet access to work.")
data = response.json()
# Getting Window Properties
try:
fig_window = data["document"]["children"][0]["children"][0]
try:
os.mkdir(generated_dir)
except FileExistsError:
messagebox.showinfo("File Exists",
"Existing Files will be overwritten.")
except PermissionError:
messagebox.showerror("Permission Error",
"Change directory or directory permissions.")
except KeyError:
messagebox.showerror(
"Error",
"Invalid Input. Please check your input and try again.")
except IndexError:
messagebox.showerror(
"Error",
"Invalid design file. Does your file contain a Frame?")
window_width, window_height = get_dimensions(fig_window)
try:
window_bg_hex = get_color(fig_window)
except Exception as e:
print(e)
window_bg_hex = "#FFFFFF"
# Creating Window
lines.extend([f'\nwindow.geometry("{window_width}x{window_height}")',
f'window.configure(bg = "{window_bg_hex}")',
'canvas = Canvas(',
' window,',
f' bg = "{window_bg_hex}",',
f' height = {window_height},',
f' width = {window_width},',
' bd = 0,',
' highlightthickness = 0,',
' relief = "ridge")',
'canvas.place(x = 0, y = 0)\n'])
# Getting Elements inside Window
window_elements = fig_window["children"]
btn_count = 0
text_entry_count = 0
for element in window_elements:
if element["name"] == "Rectangle"or element["name"] == "rectangle":
width, height = get_dimensions(element)
x, y = get_coordinates(element)
element_color = get_color(element)
lines.extend(['\ncanvas.create_rectangle(',
f' {x}, {y}, {x}+{width}, {y}+{height},',
f' fill = "{element_color}",',
' outline = "")\n'])
elif element["name"] == "Button" or element["name"] == "button":
width, height = get_dimensions(element)
x, y = get_coordinates(element)
item_id = element["id"]
response = requests.get(
f"https://api.figma.com/v1/images/{file_id}?ids={item_id}",
headers={"X-FIGMA-TOKEN": f"{token}"})
image_link = requests.get(response.json()["images"][item_id])
with open(f"{generated_dir}img{btn_count}.png", "wb") as file:
file.write(image_link.content)
lines.extend([
f'img{btn_count} = PhotoImage(file = f"img{btn_count}.png")',
f'b{btn_count} = Button(',
f' image = img{btn_count},',
' borderwidth = 0,',
' highlightthickness = 0,',
' command = btn_clicked,',
' relief = "flat")\n',
f'b{btn_count}.place(',
f' x = {x}, y = {y},',
f' width = {width},',
f' height = {height})\n'])
btn_count += 1
elif element["type"] == "TEXT":
text = element["characters"]
x, y = get_coordinates(element)
width, height = get_dimensions(element)
color = get_color(element)
font, fontSize = get_text_properties(element)
x, y = x + (width / 2), y + (height / 2)
text = text.replace("\n", "\\n")
lines.extend([f'canvas.create_text(',
f' {x}, {y},',
f' text = "{text}",',
f' fill = "{color}",',
f' font = ("{font}", int({fontSize})))\n'])
elif element["name"] in ("TextBox", "TextArea"):
element_types = {
"TextArea": "Text",
"TextBox": "Entry"
}
width, height = get_dimensions(element)
x, y = get_coordinates(element)
x, y = x + (width / 2), y + (height / 2)
bg = get_color(element)
item_id = element["id"]
response = requests.get(
f"https://api.figma.com/v1/images/{file_id}?ids={item_id}",
headers={"X-FIGMA-TOKEN": f"{token}"})
image_link = requests.get(response.json()["images"][item_id])
with open(
f"{generated_dir}img_textBox{text_entry_count}.png",
"wb"
) as file:
file.write(image_link.content)
lines.extend([f'entry{text_entry_count}_img = PhotoImage('
f'file = f"img_textBox{text_entry_count}.png")',
f'entry{text_entry_count}_bg = '
'canvas.create_image(',
f' {x}, {y},',
f' image = entry{text_entry_count}_img)\n'])
try:
corner_radius = element["cornerRadius"]
except KeyError:
corner_radius = 0
if corner_radius > height / 2:
corner_radius = height / 2
reduced_width = width - (corner_radius * 2)
reduced_height = height - 2
x, y = get_coordinates(element)
x = x + corner_radius
lines.extend([f'entry{text_entry_count} = '
f'{element_types[element["name"]]}(',
' bd = 0,',
f' bg = "{bg}",',
' highlightthickness = 0)\n',
f'entry{text_entry_count}.place(',
f' x = {x}, y = {y},',
f' width = {reduced_width},',
f' height = {reduced_height})\n'])
text_entry_count += 1
elif element["name"] == "Background" or element["name"] == "background" or element["name"] == "BG" or element["name"] == "bg":
width, height = get_dimensions(element)
x, y = get_coordinates(element)
x, y = x + (width / 2), y + (height / 2)
item_id = element["id"]
response = requests.get(
f"https://api.figma.com/v1/images/{file_id}"
f"?ids={item_id}&use_absolute_bounds=true",
headers={"X-FIGMA-TOKEN": f"{token}"})
image_link = requests.get(response.json()["images"][item_id])
with open(f"{generated_dir}background.png", "wb") as file:
file.write(image_link.content)
lines.extend(['background_img = PhotoImage('
'file = f"background.png")',
'background = canvas.create_image(',
f' {x}, {y},',
f' image=background_img)\n'])
# Adding Generated Code to window.py
lines.extend(['window.resizable(False, False)', 'window.mainloop()'])
final_code = [line + "\n" for line in lines]
with open(f"{generated_dir}window.py", 'w') as py_file:
py_file.writelines(final_code)
messagebox.showinfo("Success", "Design Exported successfully!")
############################################################################
# Required in order to add data files to Windows executable
import sys, os
path = getattr(sys, '_MEIPASS', os.getcwd())
os.chdir(path)
def btn_clicked():
token = token_entry.get()
URL = URL_entry.get()
if not token:
messagebox.showerror(title="Empty Fields",
message="Please enter Token")
elif not URL:
messagebox.showerror(title="Empty Fields",
message="Please enter URL")
elif not output_path:
messagebox.showerror(title="invalid path",
message="Enter a valid output path")
else:
generate_code(token,URL, output_path)
def select_path(event):
global output_path
# window.withdraw()
output_path = filedialog.askdirectory()
path_entry.delete(0, END)
path_entry.insert(0, output_path)
# window.deiconify()
def make_label(master, x, y, h, w, *args, **kwargs):
f = Frame(master, height=h, width=w)
f.pack_propagate(0) # don't shrink
f.place(x=x, y=y)
label = Label(f, *args, **kwargs)
label.pack(fill=BOTH, expand=1)
return label
##################################################################
window = Tk()
window.title("Proxlight Designer")
window.iconbitmap("Icon.ico")
window.geometry("1000x600")
window.configure(bg = "#FFFFFF")
canvas = Canvas(
window,
bg = "#FFFFFF",
height = 600,
width = 1000,
bd = 0,
highlightthickness = 0,
relief = "ridge")
canvas.place(x = 0, y = 0)
background_img = PhotoImage(file = f"background.png")
background = canvas.create_image(
474.5, 286.0,
image=background_img)
canvas.create_text(
635.0, 100,
text = "Enter Details Here",
fill = "#ffffff",
font = ("Roboto-Light", int(14.0)))
canvas.create_text(
98, 563,
text = "Made with by Proxlight",
fill = "#949494",
font = ("Roboto-Thin", int(12.0)))
token_entry_img = PhotoImage(file = f"img_textBox0.png")
token_entry_bg = canvas.create_image(
703.5, 187.5,
image = token_entry_img)
token_entry = Entry(
bd = 0,
bg = "#ffffff",
highlightthickness = 0)
token_entry.place(
x = 569.5, y = 162,
width = 268.0,
height = 49)
URL_entry_img = PhotoImage(file = f"img_textBox1.png")
URL_entry_bg = canvas.create_image(
703.5, 290.5,
image = URL_entry_img)
URL_entry = Entry(
bd = 0,
bg = "#ffffff",
highlightthickness = 0)
URL_entry.place(
x = 569.5, y = 265,
width = 268.0,
height = 49)
path_entry_img = PhotoImage(file = f"img_textBox2.png")
path_entry_bg = canvas.create_image(
703.5, 393.5,
image = path_entry_img)
path_entry = Entry(
bd = 0,
bg = "#ffffff",
highlightthickness = 0)
path_entry.place(
x = 569.5, y = 368,
width = 268.0,
height = 49)
path_entry.bind("<1>", select_path)
img0 = PhotoImage(file = f"img0.png")
b0 = Button(
image = img0,
borderwidth = 0,
highlightthickness = 0,
command = btn_clicked,
relief = "flat")
b0.place(
x = 642, y = 471,
width = 123,
height = 49)
window.resizable(False, False)
window.mainloop()
|
the-stack_106_19994
|
"""A setuptools based setup module.
See:
https://packaging.python.org/guides/distributing-packages-using-setuptools/
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
# This is the name of your project. The first time you publish this
# package, this name will be registered for you. It will determine how
# users can install this project, e.g.:
#
# $ pip install sampleproject
#
# And where it will live on PyPI: https://pypi.org/project/sampleproject/
#
# There are some restrictions on what makes a valid project name
# specification here:
# https://packaging.python.org/specifications/core-metadata/#name
name='eririn', # Required
# Versions should comply with PEP 440:
# https://www.python.org/dev/peps/pep-0440/
#
# For a discussion on single-sourcing the version across setup.py and the
# project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.2.114', # Required
# This is a one-line description or tagline of what your project does. This
# corresponds to the "Summary" metadata field:
# https://packaging.python.org/specifications/core-metadata/#summary
description='Generate Excel Graph Paper sheets from image files', # Optional
# This is an optional longer description of your project that represents
# the body of text which users will see when they visit PyPI.
#
# Often, this is the same as your README, so you can just read it in from
# that file directly (as we have already done above)
#
# This field corresponds to the "Description" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-optional
long_description=long_description, # Optional
# Denotes that our long_description is in Markdown; valid values are
# text/plain, text/x-rst, and text/markdown
#
# Optional if long_description is written in reStructuredText (rst) but
# required for plain-text or Markdown; if unspecified, "applications should
# attempt to render [the long_description] as text/x-rst; charset=UTF-8 and
# fall back to text/plain if it is not valid rst" (see link below)
#
# This field corresponds to the "Description-Content-Type" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-content-type-optional
long_description_content_type='text/markdown', # Optional (see note above)
# This should be a valid link to your project's main homepage.
#
# This field corresponds to the "Home-Page" metadata field:
# https://packaging.python.org/specifications/core-metadata/#home-page-optional
url='https://github.com/kimrin/eririn', # Optional
# This should be your name or the name of the organization which owns the
# project.
author='Takeshi Kimura', # Optional
# This should be a valid email address corresponding to the author listed
# above.
author_email='[email protected]', # Optional
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see https://pypi.org/classifiers/
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Office/Business :: Financial :: Spreadsheet',
# Pick your license as you wish
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate you support Python 3. These classifiers are *not*
# checked by 'pip install'. See instead 'python_requires' below.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3 :: Only',
],
# This field adds keywords for your project which will appear on the
# project page. What does your project relate to?
#
# Note that this is a string of words separated by whitespace, not a list.
keywords='openpyxl Pillow Excel', # Optional
# When your source code is in a subdirectory under the project root, e.g.
# `src/`, it is necessary to specify the `package_dir` argument.
package_dir={'': 'src'}, # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
packages=find_packages(where='src'), # Required
# Specify which Python versions you support. In contrast to the
# 'Programming Language' classifiers above, 'pip install' will check this
# and refuse to install the project if the version does not match. See
# https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires
python_requires='>=3.5, <4',
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['openpyxl', 'Pillow', 'scikit-learn', 'docopt', 'tqdm'], # Optional
# List additional groups of dependencies here (e.g. development
# dependencies). Users will be able to install these using the "extras"
# syntax, for example:
#
# $ pip install sampleproject[dev]
#
# Similar to `install_requires` above, these must be valid existing
# projects.
extras_require={ # Optional
'dev': [],
'test': [],
},
# If there are data files included in your packages that need to be
# installed, specify them here.
# package_data={ # Optional
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/distutils/setupscript.html#installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])], # Optional
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
#
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
entry_points={ # Optional
'console_scripts': [
'eririn=eririn:maineririn',
],
},
# List additional URLs that are relevant to your project as a dict.
#
# This field corresponds to the "Project-URL" metadata fields:
# https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use
#
# Examples listed include a pattern for specifying where the package tracks
# issues, where the source is hosted, where to say thanks to the package
# maintainers, and where to support the project financially. The key is
# what's used to render the link text on PyPI.
project_urls={ # Optional
'Bug Reports': 'https://github.com/kimrin/eririn/issues',
'Funding': 'https://donate.pypi.org',
'Say Thanks!': 'https://twitter.com/kimrin/',
'Source': 'https://github.com/kimrin/eririn/',
},
)
|
the-stack_106_19997
|
import unittest
import operator
import types
from saucebrush.filters import (Filter, YieldFilter, FieldFilter,
SubrecordFilter, ConditionalPathFilter,
ConditionalFilter, FieldModifier, FieldKeeper,
FieldRemover, FieldMerger, FieldAdder,
FieldCopier, FieldRenamer, Unique)
class DummyRecipe(object):
rejected_record = None
rejected_msg = None
def reject_record(self, record, msg):
self.rejected_record = record
self.rejected_msg = msg
class Doubler(Filter):
def process_record(self, record):
return record*2
class OddRemover(Filter):
def process_record(self, record):
if record % 2 == 0:
return record
else:
return None # explicitly return None
class ListFlattener(YieldFilter):
def process_record(self, record):
for item in record:
yield item
class FieldDoubler(FieldFilter):
def process_field(self, item):
return item*2
class NonModifyingFieldDoubler(Filter):
def __init__(self, key):
self.key = key
def process_record(self, record):
record = dict(record)
record[self.key] *= 2
return record
class ConditionalOddRemover(ConditionalFilter):
def test_record(self, record):
# return True for even values
return record % 2 == 0
class FilterTestCase(unittest.TestCase):
def _simple_data(self):
return [{'a':1, 'b':2, 'c':3},
{'a':5, 'b':5, 'c':5},
{'a':1, 'b':10, 'c':100}]
def assert_filter_result(self, filter_obj, expected_data):
result = filter_obj.attach(self._simple_data())
self.assertEqual(list(result), expected_data)
def test_reject_record(self):
recipe = DummyRecipe()
f = Doubler()
result = f.attach([1,2,3], recipe=recipe)
# next has to be called for attach to take effect
next(result)
f.reject_record('bad', 'this one was bad')
# ensure that the rejection propagated to the recipe
self.assertEqual('bad', recipe.rejected_record)
self.assertEqual('this one was bad', recipe.rejected_msg)
def test_simple_filter(self):
df = Doubler()
result = df.attach([1,2,3])
# ensure we got a generator that yields 2,4,6
self.assertEqual(type(result), types.GeneratorType)
self.assertEqual(list(result), [2,4,6])
def test_simple_filter_return_none(self):
cf = OddRemover()
result = cf.attach(range(10))
# ensure only even numbers remain
self.assertEqual(list(result), [0,2,4,6,8])
def test_simple_yield_filter(self):
lf = ListFlattener()
result = lf.attach([[1],[2,3],[4,5,6]])
# ensure we got a generator that yields 1,2,3,4,5,6
self.assertEqual(type(result), types.GeneratorType)
self.assertEqual(list(result), [1,2,3,4,5,6])
def test_simple_field_filter(self):
ff = FieldDoubler(['a', 'c'])
# check against expected data
expected_data = [{'a':2, 'b':2, 'c':6},
{'a':10, 'b':5, 'c':10},
{'a':2, 'b':10, 'c':200}]
self.assert_filter_result(ff, expected_data)
def test_conditional_filter(self):
cf = ConditionalOddRemover()
result = cf.attach(range(10))
# ensure only even numbers remain
self.assertEqual(list(result), [0,2,4,6,8])
### Tests for Subrecord
def test_subrecord_filter_list(self):
data = [{'a': [{'b': 2}, {'b': 4}]},
{'a': [{'b': 5}]},
{'a': [{'b': 8}, {'b':2}, {'b':1}]}]
expected = [{'a': [{'b': 4}, {'b': 8}]},
{'a': [{'b': 10}]},
{'a': [{'b': 16}, {'b':4}, {'b':2}]}]
sf = SubrecordFilter('a', NonModifyingFieldDoubler('b'))
result = sf.attach(data)
self.assertEqual(list(result), expected)
def test_subrecord_filter_deep(self):
data = [{'a': {'d':[{'b': 2}, {'b': 4}]}},
{'a': {'d':[{'b': 5}]}},
{'a': {'d':[{'b': 8}, {'b':2}, {'b':1}]}}]
expected = [{'a': {'d':[{'b': 4}, {'b': 8}]}},
{'a': {'d':[{'b': 10}]}},
{'a': {'d':[{'b': 16}, {'b':4}, {'b':2}]}}]
sf = SubrecordFilter('a.d', NonModifyingFieldDoubler('b'))
result = sf.attach(data)
self.assertEqual(list(result), expected)
def test_subrecord_filter_nonlist(self):
data = [
{'a':{'b':{'c':1}}},
{'a':{'b':{'c':2}}},
{'a':{'b':{'c':3}}},
]
expected = [
{'a':{'b':{'c':2}}},
{'a':{'b':{'c':4}}},
{'a':{'b':{'c':6}}},
]
sf = SubrecordFilter('a.b', NonModifyingFieldDoubler('c'))
result = sf.attach(data)
self.assertEqual(list(result), expected)
def test_subrecord_filter_list_in_path(self):
data = [
{'a': [{'b': {'c': 5}}, {'b': {'c': 6}}]},
{'a': [{'b': {'c': 1}}, {'b': {'c': 2}}, {'b': {'c': 3}}]},
{'a': [{'b': {'c': 2}} ]}
]
expected = [
{'a': [{'b': {'c': 10}}, {'b': {'c': 12}}]},
{'a': [{'b': {'c': 2}}, {'b': {'c': 4}}, {'b': {'c': 6}}]},
{'a': [{'b': {'c': 4}} ]}
]
sf = SubrecordFilter('a.b', NonModifyingFieldDoubler('c'))
result = sf.attach(data)
self.assertEqual(list(result), expected)
def test_conditional_path(self):
predicate = lambda r: r['a'] == 1
# double b if a == 1, otherwise double c
cpf = ConditionalPathFilter(predicate, FieldDoubler('b'),
FieldDoubler('c'))
expected_data = [{'a':1, 'b':4, 'c':3},
{'a':5, 'b':5, 'c':10},
{'a':1, 'b':20, 'c':100}]
self.assert_filter_result(cpf, expected_data)
### Tests for Generic Filters
def test_field_modifier(self):
# another version of FieldDoubler
fm = FieldModifier(['a', 'c'], lambda x: x*2)
# check against expected data
expected_data = [{'a':2, 'b':2, 'c':6},
{'a':10, 'b':5, 'c':10},
{'a':2, 'b':10, 'c':200}]
self.assert_filter_result(fm, expected_data)
def test_field_keeper(self):
fk = FieldKeeper(['c'])
# check against expected results
expected_data = [{'c':3}, {'c':5}, {'c':100}]
self.assert_filter_result(fk, expected_data)
def test_field_remover(self):
fr = FieldRemover(['a', 'b'])
# check against expected results
expected_data = [{'c':3}, {'c':5}, {'c':100}]
self.assert_filter_result(fr, expected_data)
def test_field_merger(self):
fm = FieldMerger({'sum':('a','b','c')}, lambda x,y,z: x+y+z)
# check against expected results
expected_data = [{'sum':6}, {'sum':15}, {'sum':111}]
self.assert_filter_result(fm, expected_data)
def test_field_merger_keep_fields(self):
fm = FieldMerger({'sum':('a','b','c')}, lambda x,y,z: x+y+z,
keep_fields=True)
# check against expected results
expected_data = [{'a':1, 'b':2, 'c':3, 'sum':6},
{'a':5, 'b':5, 'c':5, 'sum':15},
{'a':1, 'b':10, 'c':100, 'sum': 111}]
self.assert_filter_result(fm, expected_data)
def test_field_adder_scalar(self):
fa = FieldAdder('x', 7)
expected_data = [{'a':1, 'b':2, 'c':3, 'x':7},
{'a':5, 'b':5, 'c':5, 'x':7},
{'a':1, 'b':10, 'c':100, 'x': 7}]
self.assert_filter_result(fa, expected_data)
def test_field_adder_callable(self):
fa = FieldAdder('x', lambda: 7)
expected_data = [{'a':1, 'b':2, 'c':3, 'x':7},
{'a':5, 'b':5, 'c':5, 'x':7},
{'a':1, 'b':10, 'c':100, 'x': 7}]
self.assert_filter_result(fa, expected_data)
def test_field_adder_iterable(self):
fa = FieldAdder('x', [1,2,3])
expected_data = [{'a':1, 'b':2, 'c':3, 'x':1},
{'a':5, 'b':5, 'c':5, 'x':2},
{'a':1, 'b':10, 'c':100, 'x': 3}]
self.assert_filter_result(fa, expected_data)
def test_field_adder_replace(self):
fa = FieldAdder('b', lambda: 7)
expected_data = [{'a':1, 'b':7, 'c':3},
{'a':5, 'b':7, 'c':5},
{'a':1, 'b':7, 'c':100}]
self.assert_filter_result(fa, expected_data)
def test_field_adder_no_replace(self):
fa = FieldAdder('b', lambda: 7, replace=False)
expected_data = [{'a':1, 'b':2, 'c':3},
{'a':5, 'b':5, 'c':5},
{'a':1, 'b':10, 'c':100}]
self.assert_filter_result(fa, expected_data)
def test_field_copier(self):
fc = FieldCopier({'a2':'a', 'b2':'b'})
expected_data = [{'a':1, 'b':2, 'c':3, 'a2':1, 'b2':2},
{'a':5, 'b':5, 'c':5, 'a2':5, 'b2':5},
{'a':1, 'b':10, 'c':100, 'a2': 1, 'b2': 10}]
self.assert_filter_result(fc, expected_data)
def test_field_renamer(self):
fr = FieldRenamer({'x':'a', 'y':'b'})
expected_data = [{'x':1, 'y':2, 'c':3},
{'x':5, 'y':5, 'c':5},
{'x':1, 'y':10, 'c':100}]
self.assert_filter_result(fr, expected_data)
# TODO: splitter & flattner tests?
def test_unique_filter(self):
u = Unique()
in_data = [{'a': 77}, {'a':33}, {'a': 77}]
expected_data = [{'a': 77}, {'a':33}]
result = u.attach(in_data)
self.assertEqual(list(result), expected_data)
# TODO: unicode & string filter tests
if __name__ == '__main__':
unittest.main()
|
the-stack_106_19998
|
# Author: Markus Böck
import numpy as np
import pandas as pd
import os
import re
import csv
import datetime
class IICF:
def __init__(self, path_to_data, dataset_type):
self.path_to_data = path_to_data
expanded_path = os.path.expanduser(path_to_data)
part_files = [os.path.join(expanded_path, f) for f in os.listdir(expanded_path) if dataset_type in f]
self.part_files = sorted(part_files, key = lambda x:x[-5:])
print("Found files:", self.part_files)
self.initialise_target_names()
self.initialise_ratings_matrix()
self.initialise_tweet_features()
print("\nFinished initialisation!")
def initialise_target_names(self):
self.all_features = ["text_tokens", "hashtags", "tweet_id", "present_media", "present_links", "present_domains",\
"tweet_type","language", "tweet_timestamp", "engaged_with_user_id", "engaged_with_user_follower_count",\
"engaged_with_user_following_count", "engaged_with_user_is_verified", "engaged_with_user_account_creation",\
"engaging_user_id", "enaging_user_follower_count", "enaging_user_following_count", "enaging_user_is_verified",\
"enaging_user_account_creation", "engagee_follows_engager", "reply", "retweet", "quote", "like"]
self.all_features_to_idx = dict(zip(self.all_features, range(len(self.all_features))))
self.feature_names = [
"photo",
"video",
"link",
"retweet",
"quote",
"toplevel",
"follower_count",
"following_count",
"verified"
]
self.target_names = [
"reply",
"retweet",
"quote",
"like"
]
self.feature_to_col = dict(zip(self.feature_names, range(len(self.feature_names))))
print("Tweet Features:", self.feature_to_col)
self.target_to_col = dict(zip(self.target_names, range(len(self.target_names))))
print("Tweet Targets:", self.target_to_col)
# Calculates ratings matrix,a pandas dataframe of form
# user_id, tweet_id, reply, retweet, quote, like, follow
# where follow indicates whether user_id follows author of tweet_id
# this may take a while
def initialise_ratings_matrix(self):
print("Initialising \"Ratings Matrix\" ...")
self.tweet_ids = {} # map tweet ids to row index
i = 0
total_entries = 0
# collect relevant data for "Ratings Matrix"
tids = []
uids = []
replies = []
retweets = []
quotes = []
likes = []
follows = []
print("Reading", self.part_files, "...")
for file in self.part_files:
with open(file, 'r') as f:
linereader = csv.reader(f, delimiter='\x01')
for row in linereader:
tweet_id = row[self.all_features_to_idx["tweet_id"]]
if tweet_id not in self.tweet_ids:
self.tweet_ids[tweet_id] = i # assign tweet_id row i
i += 1
user_id = row[self.all_features_to_idx["engaging_user_id"]]
total_entries += 1
reply = int(row[self.all_features_to_idx["reply"]] != "")
retweet = int(row[self.all_features_to_idx["retweet"]] != "")
quote = int(row[self.all_features_to_idx["quote"]] != "")
like = int(row[self.all_features_to_idx["like"]] != "")
# follow relationship is not a tweet feature and is collected here also
follow = bool(row[self.all_features_to_idx["engagee_follows_engager"]])
replies.append(reply)
retweets.append(retweet)
quotes.append(quote)
likes.append(like)
follows.append(follow)
tids.append(tweet_id)
uids.append(user_id)
if total_entries % 10000 == 0:
print(f"Read {total_entries} entries.", end="\r")
self.total_entries = total_entries
print("Read", self.total_entries, "entries.")
print("Convert to DataFrame ...") # maybe this is a bottleneck and could be sped up with sparse matrices
self.R = pd.DataFrame(
{"reply": replies,
"retweet": retweets,
"quote": quotes,
"like":likes,
"follow":follows},
index=pd.MultiIndex.from_arrays((uids, tids), names=["user_id", "tweet_id"]))
del replies
del retweets
del quotes
del likes
del follows
#print("Sort index ...")
#self.R.sort_index(inplace=True)
print("Done!")
# calculate average engagements
self.target_means = self.R[["reply", "retweet", "quote", "like"]].mean(axis=0)
print("Engagement means:")
print(self.target_means)
def parse_input_features(self, row):
tweet_id = row[self.all_features_to_idx['tweet_id']]
user_id = row[self.all_features_to_idx['engaging_user_id']]
input_feats = np.zeros((self.tweet_features.shape[1],),dtype=np.float32)
# one hot encode media (photo, video, link)
media = row[self.all_features_to_idx["present_media"]]
if "Photo" in media:
input_feats[self.feature_to_col["photo"]] = 1
if "Video" in media or "GIF" in media:
input_feats[self.feature_to_col["video"]] = 1
if row[self.all_features_to_idx["present_links"]] != "":
input_feats[self.feature_to_col["link"]] = 1
# one hot encode tweet type (toplevel, quote, retweet)
tweet_type = row[self.all_features_to_idx["tweet_type"]]
if tweet_type == "TopLevel":
input_feats[self.feature_to_col["toplevel"]] = 1
elif tweet_type == "Quote":
input_feats[self.feature_to_col["quote"]] = 1
elif tweet_type == "Retweet":
input_feats[self.feature_to_col["retweet"]] = 1
# log10 follower count of tweet author
input_feats[self.feature_to_col["follower_count"]] = np.log10(int(row[self.all_features_to_idx["engaged_with_user_following_count"]])+1)
# log10 following count of tweet author
input_feats[self.feature_to_col["following_count"]] = np.log10(int(row[self.all_features_to_idx["engaged_with_user_follower_count"]])+1)
input_feats[self.feature_to_col["verified"]] = bool(row[self.all_features_to_idx["engaged_with_user_is_verified"]])
input_feats /= np.linalg.norm(input_feats) # normalise
# following relationship is not a tweet feature
follow = bool(row[self.all_features_to_idx["engagee_follows_engager"]])
tweet_timestamp = int(row[self.all_features_to_idx['tweet_timestamp']])
return tweet_id, user_id, input_feats, follow, tweet_timestamp
# calculates and stores tweet features and average tweet engagements of training data
# in self.tweet_features and self.tweet_targets
# this may take a while
def initialise_tweet_features(self):
print("Calculate tweet features ...")
# precompute all tweet features
self.tweet_features = np.zeros((len(self.tweet_ids), len(self.feature_names)), dtype=np.float32)
# precompute engagement means for each tweet
self.tweet_targets = np.zeros((len(self.tweet_ids), len(self.target_names)), dtype=np.float32)
# count tweets for averaging
tweet_counts = np.zeros((len(self.tweet_ids),), dtype=np.float32)
# collect timestamps for consistency check
tweet_timestamps = np.zeros((len(self.tweet_ids)), dtype=np.int)
for file in self.part_files:
with open(file, 'r') as f:
linereader = csv.reader(f, delimiter='\x01')
j = 0
for row in linereader:
tweet_id, user_id, input_feats, follow, timestamp = self.parse_input_features(row)
tweet_index = self.tweet_ids[tweet_id] # get row for tweet id
if tweet_timestamps[tweet_index] != 0:
assert timestamp == tweet_timestamps[tweet_index], "Found tweet with different timestamps!"
else:
tweet_timestamps[tweet_index] = timestamp
self.tweet_features[tweet_index,:] = input_feats # store tweet features
# count engagements
if row[self.all_features_to_idx["reply"]]:
self.tweet_targets[tweet_index, self.target_to_col["reply"]] += 1
if row[self.all_features_to_idx["retweet"]]:
self.tweet_targets[tweet_index, self.target_to_col["retweet"]] += 1
if row[self.all_features_to_idx["quote"]]:
self.tweet_targets[tweet_index, self.target_to_col["quote"]] += 1
if row[self.all_features_to_idx["like"]]:
self.tweet_targets[tweet_index, self.target_to_col["like"]] += 1
# count occurences of tweet
tweet_counts[tweet_index] += 1
j += 1
if j % 10000 == 0:
print(f"{j}/{self.total_entries}", end="\r")
print(f"{j}/{self.total_entries} Done!")
# average engagements
self.tweet_targets /= tweet_counts.reshape(-1,1)
# gets row of data and predicts all features simultaneously
# this is faster than predicting each target alone
def predict(self, tweet_id, user_id, features, follow):
try:
# neighbourhood: get all tweets from engaging user
# throws KeyError if user_id is unknown
rated_tweets = self.R.loc[user_id,:]
# filter for tweets with the same follow relationship
rated_tweets = rated_tweets[rated_tweets.follow == follow]
# transform tweet_ids to row indexes
rated_tweets_ids = rated_tweets.index.values
rated_tweets_indexes = [self.tweet_ids[tid] for tid in rated_tweets_ids]
# similiartiy is the angle between features (features are normalised)
similarities = self.tweet_features[rated_tweets_indexes,:].dot(features)
# calculate weights as usual
weights = similarities / np.sum(np.abs(similarities))
# get engagement means for tweets in neighbourhood
item_means = self.tweet_targets[rated_tweets_indexes,:]
# transform user engagments to np array
user_ratings = np.array(rated_tweets)[:,0:4] # 5th column is follow status
# make predictions according to formula
target_prediction = self.target_means + weights.dot(user_ratings - item_means)
# restrict the predictions to the interval [0,1]
target_prediction = target_prediction.clip(0,1)
reply_pred, retweet_pred, quote_pred, fav_pred = target_prediction
return reply_pred, retweet_pred, quote_pred, fav_pred
except KeyError:
# user not known => predict average in training data
reply_pred, retweet_pred, quote_pred, fav_pred = self.target_means
return reply_pred, retweet_pred, quote_pred, fav_pred
|
the-stack_106_19999
|
import pytest
from thinc.api import chain, ReLu, reduce_max, Softmax, with_ragged
from thinc.api import ParametricAttention, list2ragged, reduce_sum
from thinc.util import DataValidationError
def test_validation():
model = chain(ReLu(10), ReLu(10), with_ragged(reduce_max()), Softmax())
with pytest.raises(DataValidationError):
model.initialize(X=model.ops.alloc2f(1, 10), Y=model.ops.alloc2f(1, 10))
with pytest.raises(DataValidationError):
model.initialize(X=model.ops.alloc3f(1, 10, 1), Y=model.ops.alloc2f(1, 10))
with pytest.raises(DataValidationError):
model.initialize(X=[model.ops.alloc2f(1, 10)], Y=model.ops.alloc2f(1, 10))
def test_validation_complex():
good_model = chain(list2ragged(), reduce_sum(), ReLu(12, dropout=0.5), ReLu(1))
X = [good_model.ops.xp.zeros((4, 75), dtype="f")]
Y = good_model.ops.xp.zeros((1,), dtype="f")
good_model.initialize(X, Y)
good_model.predict(X)
bad_model = chain(
list2ragged(),
reduce_sum(),
ReLu(12, dropout=0.5),
# ERROR: Why can't I attach a ReLu to an attention layer?
ParametricAttention(12),
ReLu(1),
)
with pytest.raises(DataValidationError):
bad_model.initialize(X, Y)
|
the-stack_106_20002
|
# protobuf_parser.py
#
# simple parser for parsing protobuf .proto files
#
# Copyright 2010, Paul McGuire
#
from pyparsing import (Word, alphas, alphanums, Regex, Suppress, Forward,
Group, oneOf, ZeroOrMore, Optional, delimitedList, Keyword,
restOfLine, quotedString, Dict)
ident = Word(alphas+"_",alphanums+"_").setName("identifier")
integer = Regex(r"[+-]?\d+")
LBRACE,RBRACE,LBRACK,RBRACK,LPAR,RPAR,EQ,SEMI = map(Suppress,"{}[]()=;")
kwds = """message required optional repeated enum extensions extends extend
to package service rpc returns true false option import"""
for kw in kwds.split():
exec("%s_ = Keyword('%s')" % (kw.upper(), kw))
messageBody = Forward()
messageDefn = MESSAGE_ - ident("messageId") + LBRACE + messageBody("body") + RBRACE
typespec = oneOf("""double float int32 int64 uint32 uint64 sint32 sint64
fixed32 fixed64 sfixed32 sfixed64 bool string bytes""") | ident
rvalue = integer | TRUE_ | FALSE_ | ident
fieldDirective = LBRACK + Group(ident + EQ + rvalue) + RBRACK
fieldDefn = (( REQUIRED_ | OPTIONAL_ | REPEATED_ )("fieldQualifier") -
typespec("typespec") + ident("ident") + EQ + integer("fieldint") + ZeroOrMore(fieldDirective) + SEMI)
# enumDefn ::= 'enum' ident '{' { ident '=' integer ';' }* '}'
enumDefn = ENUM_("typespec") - ident('name') + LBRACE + Dict( ZeroOrMore( Group(ident + EQ + integer + SEMI) ))('values') + RBRACE
# extensionsDefn ::= 'extensions' integer 'to' integer ';'
extensionsDefn = EXTENSIONS_ - integer + TO_ + integer + SEMI
# messageExtension ::= 'extend' ident '{' messageBody '}'
messageExtension = EXTEND_ - ident + LBRACE + messageBody + RBRACE
# messageBody ::= { fieldDefn | enumDefn | messageDefn | extensionsDefn | messageExtension }*
messageBody << Group(ZeroOrMore( Group(fieldDefn | enumDefn | messageDefn | extensionsDefn | messageExtension) ))
# methodDefn ::= 'rpc' ident '(' [ ident ] ')' 'returns' '(' [ ident ] ')' ';'
methodDefn = (RPC_ - ident("methodName") +
LPAR + Optional(ident("methodParam")) + RPAR +
RETURNS_ + LPAR + Optional(ident("methodReturn")) + RPAR)
# serviceDefn ::= 'service' ident '{' methodDefn* '}'
serviceDefn = SERVICE_ - ident("serviceName") + LBRACE + ZeroOrMore(Group(methodDefn)) + RBRACE
# packageDirective ::= 'package' ident [ '.' ident]* ';'
packageDirective = Group(PACKAGE_ - delimitedList(ident, '.', combine=True) + SEMI)
comment = '//' + restOfLine
importDirective = IMPORT_ - quotedString("importFileSpec") + SEMI
optionDirective = OPTION_ - ident("optionName") + EQ + quotedString("optionValue") + SEMI
topLevelStatement = Group(messageDefn | messageExtension | enumDefn | serviceDefn | importDirective | optionDirective)
parser = Optional(packageDirective) + ZeroOrMore(topLevelStatement)
parser.ignore(comment)
test1 = """message Person {
required int32 id = 1;
required string name = 2;
optional string email = 3;
}"""
test2 = """package tutorial;
message Person {
required string name = 1;
required int32 id = 2;
optional string email = 3;
enum PhoneType {
MOBILE = 0;
HOME = 1;
WORK = 2;
}
message PhoneNumber {
required string number = 1;
optional PhoneType type = 2 [default = HOME];
}
repeated PhoneNumber phone = 4;
}
message AddressBook {
repeated Person person = 1;
}"""
parser.runTests([test1, test2])
|
the-stack_106_20003
|
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import autograd
from models.layers import MLP
from models.reparam import NormalDistributionLinear
from utils import loss_kld_gaussian, loss_recon_gaussian, normal_energy_func
from utils import logprob_gaussian
from utils import get_nonlinear_func
def sample_gaussian(mu, logvar):
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std)
return mu + std * eps
class Encoder(nn.Module):
def __init__(self,
input_dim=2,
h_dim=64,
z_dim=2,
nonlinearity='softplus',
num_hidden_layers=1,
):
super().__init__()
self.input_dim = input_dim
self.h_dim = h_dim
self.z_dim = z_dim
self.nonlinearity = nonlinearity
self.num_hidden_layers = num_hidden_layers
self.main = MLP(input_dim=input_dim, hidden_dim=h_dim, output_dim=h_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers-1, use_nonlinearity_output=True)
self.reparam = NormalDistributionLinear(h_dim, z_dim)
def sample(self, mu, logvar):
return self.reparam.sample_gaussian(mu, logvar)
def forward(self, x):
batch_size = x.size(0)
x = x.view(batch_size, self.input_dim)
# forward
h = self.main(x)
mu, logvar = self.reparam(h)
# sample
z = self.sample(mu, logvar)
return z, mu, logvar
class Decoder(nn.Module):
def __init__(self,
input_dim=2,
h_dim=64,
z_dim=2,
nonlinearity='tanh',
num_hidden_layers=1,
init='gaussian', #None,
):
super().__init__()
self.input_dim = input_dim
self.h_dim = h_dim
self.z_dim = z_dim
self.nonlinearity = nonlinearity
self.num_hidden_layers = num_hidden_layers
self.init = init
self.main = MLP(input_dim=z_dim, hidden_dim=h_dim, output_dim=h_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers-1, use_nonlinearity_output=True)
self.reparam = NormalDistributionLinear(h_dim, input_dim)
if self.init == 'gaussian':
self.reset_parameters()
else:
pass
def reset_parameters(self):
nn.init.normal_(self.reparam.mean_fn.weight)
def sample(self, mu, logvar):
return self.reparam.sample_gaussian(mu, logvar)
def forward(self, z):
batch_size = z.size(0)
z = z.view(batch_size, -1)
# forward
h = self.main(z)
mu, logvar = self.reparam(h)
# sample
x = self.sample(mu, logvar)
return x, mu, logvar
class VAE(nn.Module):
def __init__(self,
energy_func=normal_energy_func,
input_dim=2,
h_dim=64,
z_dim=2,
nonlinearity='softplus',
num_hidden_layers=1,
init='gaussian', #None,
):
super().__init__()
self.energy_func = energy_func
self.input_dim = input_dim
self.h_dim = h_dim
self.z_dim = z_dim
self.latent_dim = self.z_dim # for ais
self.nonlinearity = nonlinearity
self.num_hidden_layers = num_hidden_layers
self.init = init
self.encode = Encoder(input_dim, h_dim, z_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers)
self.decode = Decoder(input_dim, h_dim, z_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers)
def loss(self, mu_z, logvar_z, mu_x, logvar_x, target_x, beta=1.0):
# kld loss
kld_loss = loss_kld_gaussian(mu_z, logvar_z, do_sum=False)
# recon loss (likelihood)
recon_loss = loss_recon_gaussian(mu_x, logvar_x, target_x.view(-1, 2), do_sum=False)
# add loss
loss = recon_loss + beta*kld_loss
return loss.mean(), recon_loss.mean(), kld_loss.mean()
def forward(self, input, beta=1.0):
# init
batch_size = input.size(0)
input = input.view(batch_size, self.input_dim)
# encode
z, mu_z, logvar_z = self.encode(input)
# decode
x, mu_x, logvar_x = self.decode(z)
# loss
loss, recon_loss, kld_loss \
= self.loss(mu_z, logvar_z,
mu_x, logvar_x, input,
beta=beta,
)
# return
return x, mu_x, z, loss, recon_loss.detach(), kld_loss.detach()
def generate(self, batch_size=1):
# init mu_z and logvar_z (as unit normal dist)
weight = next(self.parameters())
mu_z = weight.new_zeros(batch_size, self.z_dim)
logvar_z = weight.new_zeros(batch_size, self.z_dim)
# sample z (from unit normal dist)
z = sample_gaussian(mu_z, logvar_z) # sample z
# decode
output, mu_x, logvar_x = self.decode(z)
# return
return output, mu_x, z
def logprob(self, input, sample_size=128, z=None):
# init
batch_size = input.size(0)
input = input.view(batch_size, self.input_dim)
''' get log q(z|x) '''
_, mu_qz, logvar_qz = self.encode(input)
mu_qz = mu_qz.detach().repeat(1, sample_size).view(batch_size, sample_size, self.z_dim)
logvar_qz = logvar_qz.detach().repeat(1, sample_size).view(batch_size, sample_size, self.z_dim)
z = self.encode.sample(mu_qz, logvar_qz)
logposterior = logprob_gaussian(mu_qz, logvar_qz, z, do_unsqueeze=False, do_mean=False)
logposterior = torch.sum(logposterior.view(batch_size, sample_size, self.z_dim), dim=2) # bsz x ssz
''' get log p(z) '''
# get prior (as unit normal dist)
mu_pz = input.new_zeros(batch_size, sample_size, self.z_dim)
logvar_pz = input.new_zeros(batch_size, sample_size, self.z_dim)
logprior = logprob_gaussian(mu_pz, logvar_pz, z, do_unsqueeze=False, do_mean=False)
logprior = torch.sum(logprior.view(batch_size, sample_size, self.z_dim), dim=2) # bsz x ssz
''' get log p(x|z) '''
# decode
#mu_x, logvar_x = [], []
#for i in range(batch_size):
# _, _mu_x, _logvar_x = self.decode(z[i, :, :]) # ssz x zdim
# mu_x += [_mu_x.detach().unsqueeze(0)]
# logvar_x += [_logvar_x.detach().unsqueeze(0)]
#mu_x = torch.cat(mu_x, dim=0) # bsz x ssz x input_dim
#logvar_x = torch.cat(logvar_x, dim=0) # bsz x ssz x input_dim
_z = z.view(-1, self.z_dim)
_, mu_x, logvar_x = self.decode(_z) # bsz*ssz x zdim
mu_x = mu_x.view(batch_size, sample_size, self.input_dim)
logvar_x = logvar_x.view(batch_size, sample_size, self.input_dim)
_input = input.unsqueeze(1).expand(batch_size, sample_size, self.input_dim) # bsz x ssz x input_dim
loglikelihood = logprob_gaussian(mu_x, logvar_x, _input, do_unsqueeze=False, do_mean=False)
loglikelihood = torch.sum(loglikelihood, dim=2) # bsz x ssz
''' get log p(x|z)p(z)/q(z|x) '''
logprob = loglikelihood + logprior - logposterior # bsz x ssz
logprob_max, _ = torch.max(logprob, dim=1, keepdim=True)
rprob = (logprob - logprob_max).exp() # relative prob
logprob = torch.log(torch.mean(rprob, dim=1, keepdim=True) + 1e-10) + logprob_max # bsz x 1
# return
return logprob.mean()
def logprob_w_prior(self, input, sample_size=128, z=None):
# init
batch_size = input.size(0)
input = input.view(batch_size, self.input_dim)
''' get z samples from p(z) '''
# get prior (as unit normal dist)
if z is None:
mu_pz = input.new_zeros(batch_size, sample_size, self.z_dim)
logvar_pz = input.new_zeros(batch_size, sample_size, self.z_dim)
z = sample_gaussian(mu_pz, logvar_pz) # sample z
''' get log p(x|z) '''
# decode
_z = z.view(-1, self.z_dim)
_, mu_x, logvar_x = self.decode(_z) # bsz*ssz x zdim
mu_x = mu_x.view(batch_size, sample_size, self.input_dim)
logvar_x = logvar_x.view(batch_size, sample_size, self.input_dim)
_input = input.unsqueeze(1).expand(batch_size, sample_size, self.input_dim) # bsz x ssz x input_dim
loglikelihood = logprob_gaussian(mu_x, logvar_x, _input, do_unsqueeze=False, do_mean=False)
loglikelihood = torch.sum(loglikelihood, dim=2) # bsz x ssz
''' get log p(x) '''
logprob = loglikelihood # bsz x ssz
logprob_max, _ = torch.max(logprob, dim=1, keepdim=True)
rprob = (logprob-logprob_max).exp() # relative prob
logprob = torch.log(torch.mean(rprob, dim=1, keepdim=True) + 1e-10) + logprob_max # bsz x 1
# return
return logprob.mean()
|
the-stack_106_20004
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=19
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
class Opty(cirq.PointOptimizer):
def optimization_at(
self,
circuit: 'cirq.Circuit',
index: int,
op: 'cirq.Operation'
) -> Optional[cirq.PointOptimizationSummary]:
if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)):
return cirq.PointOptimizationSummary(
clear_span=1,
clear_qubits=op.qubits,
new_operations=[
cirq.CZ(*op.qubits),
cirq.X.on_each(*op.qubits),
cirq.X.on_each(*op.qubits),
]
)
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=5
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=6
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=8
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=10
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=11
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=12
c.append(cirq.Z.on(input_qubit[3])) # number=16
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=13
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=14
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=15
c.append(cirq.Y.on(input_qubit[2])) # number=17
c.append(cirq.Y.on(input_qubit[2])) # number=18
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2820
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_pragma713.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
the-stack_106_20005
|
from litex.build.generic_platform import *
from litex.build.lattice import LatticePlatform
from litex.build.openfpgaloader import OpenFPGALoader
from litex.soc.cores.bitbang import I2CMaster
from ..crg_ecp5 import CRG
from litespi.opcodes import SpiNorFlashOpCodes as Codes
from ..flash_modules import S25FL064L
from ..hyperram import HyperRAM
from ..serial_led import SerialLedController
from migen import ClockDomainsRenamer
# IOs ----------------------------------------------------------------------------------------------
_io = [
# Clk / Rst
('clk30', 0, Pins('L16'), IOStandard('LVCMOS33')),
# Leds
('serial_led', 0, Pins('N12'), IOStandard('LVCMOS33')),
('programn', 0, Pins('P11'), IOStandard('LVCMOS33')),
('btn', 0, Pins('P12'), IOStandard('LVCMOS33')),
# Debug
('debug', 0,
Subsignal('jtck', Pins('B13')),
Subsignal('jtck_dir', Pins('B14')),
Subsignal('jtms', Pins('A14')),
Subsignal('jtms_dir', Pins('A15')),
Subsignal('jtdo', Pins('B12')),
Subsignal('jtdi', Pins('A12')),
Subsignal('jtdi_dir', Pins('A13')),
Subsignal('nrst', Pins('A11')),
Subsignal('nrst_dir', Pins('B11')),
IOStandard('LVCMOS33')
),
# Trace
('trace', 0,
Subsignal('clk', Pins('C8')),
Subsignal('data', Pins('A10 B9 A9 B8')),
IOStandard('LVCMOS33')
),
# Target power
('target_power', 0,
Subsignal('vtref_en', Pins('C6')),
Subsignal('vtref_sel', Pins('D6')),
Subsignal('vtref_fault', Pins('C5')),
Subsignal('vtpwr_en', Pins('D4')),
Subsignal('vtpwr_sel', Pins('C4')),
Subsignal('vtpwr_fault', Pins('D5')),
IOStandard('LVCMOS33')
),
# GPIO
('gpio', 0,
Subsignal('data', Pins('A2')),
Subsignal('dir', Pins('B3')),
),
('gpio', 1,
Subsignal('data', Pins('A3')),
Subsignal('dir', Pins('A4')),
),
('gpio', 2,
Subsignal('data', Pins('B4')),
Subsignal('dir', Pins('B5')),
),
('gpio', 3,
Subsignal('data', Pins('A5')),
Subsignal('dir', Pins('A6')),
),
('gpio', 4,
Subsignal('data', Pins('C7')),
Subsignal('dir', Pins('B7')),
),
('gpio', 5,
Subsignal('data', Pins('A7')),
Subsignal('dir', Pins('A8')),
),
# HyperRAM
('hyperram', 0,
Subsignal('rst_n', Pins('M16')),
Subsignal('cs_n', Pins('M15')),
Subsignal('clk', Pins('N16')),
Subsignal('rwds', Pins('R12')),
Subsignal('dq', Pins('T15 P13 T14 R13 T13 R14 R15 P14')),
IOStandard('LVCMOS33')
),
# SPIFlash
('spiflash', 0,
Subsignal('cs_n', Pins('N8')),
Subsignal('mosi', Pins('T8')),
Subsignal('miso', Pins('T7')),
Subsignal('wp', Pins('M7')),
Subsignal('hold', Pins('N7')),
IOStandard('LVCMOS33')
),
('spiflash4x', 0,
Subsignal('cs_n', Pins('N8')),
Subsignal('dq', Pins('T8', 'T7', 'M7', 'N7')),
IOStandard('LVCMOS33')
),
# USB
('ulpi', 0,
Subsignal('rst_n', Pins('T4')),
Subsignal('clk_o', Pins('R5')),
Subsignal('dir', Pins('T3')),
Subsignal('nxt', Pins('R3')),
Subsignal('stp', Pins('R4')),
Subsignal('data', Pins('T2 R2 R1 P2 P1 N1 M2 M1')),
IOStandard('LVCMOS33')
),
# I2C
('i2c', 0,
Subsignal('scl', Pins('L2')),
Subsignal('sda', Pins('L1')),
IOStandard('LVCMOS33')
),
]
# Connectors ---------------------------------------------------------------------------------------
_connectors = [
('ext',
'- - - - - - - - - - - - - - - - - - '
'J5 K1 J4 K2 - - '
'K3 J1 J3 J2 - - '
'H4 H2 H5 G1 - - '
'G3 G2 H3 F1 - - '
'G4 F2 G5 E1 - - '
'F3 E2 E3 D1 - - '
'F4 C1 F5 C2 - - '
'D3 B1 C3 B2 - - '
'- - - - '
),
]
# Platform -----------------------------------------------------------------------------------------
class Platform(LatticePlatform):
default_clk_name = 'clk30'
default_clk_period = 1e9/30e6
def __init__(self, device='25F', toolchain='trellis', **kwargs):
assert device in ['25F', '45F']
LatticePlatform.__init__(self, f'LFE5U-{device}-8BG256C', _io, _connectors, toolchain=toolchain, **kwargs)
def get_crg(self, sys_clk_freq):
crg = CRG(self, sys_clk_freq)
crg.add_usb()
return crg
def get_flash_module(self):
return S25FL064L(Codes.READ_1_4_4)
def add_leds(self, soc):
soc.submodules.serial_led = SerialLedController(self.request('serial_led'), 5)
soc.led_status = soc.serial_led.leds[0]
soc.led_debug = soc.serial_led.leds[1]
soc.led_trace = soc.serial_led.leds[2]
soc.led_vtref = soc.serial_led.leds[3]
soc.led_vtpwr = soc.serial_led.leds[4]
def add_platform_specific(self, soc):
# I2C
soc.submodules.i2c = I2CMaster(self.request('i2c'))
# HyperRAM
cdr = ClockDomainsRenamer({
'hr': 'sys',
'hr2x': 'sys2x',
'hr_90': 'sys_90',
'hr2x_90': 'sys2x_90',
})
pads = self.request('hyperram')
soc.submodules.hyperram = cdr(HyperRAM(pads))
soc.add_csr('hyperram')
soc.register_mem('hyperram', soc.mem_map.get('hyperram', 0x20000000), soc.hyperram.bus, size = 0x800000)
soc.comb += pads.rst_n.eq(1)
def create_programmer(self):
return OpenFPGALoader('ecpix5')
def do_finalize(self, fragment):
LatticePlatform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request('clk30', loose=True), 1e9/30e6)
#self.add_period_constraint(self.lookup_request('ulpi:clk', 0, loose=True), 1e9/60e6)
self.add_period_constraint(self.lookup_request('trace:clk', 0, loose=True), 1e9/120e6)
@classmethod
def add_arguments(cls, parser):
parser.add_argument('--device', choices = ['25F', '45F'], default = '25F', help = 'ECP5 device (default: 25F)')
@classmethod
def get_profile(cls, profile):
return {
'default': {
'uart_name': 'stream',
'with_debug': True,
'with_trace': True,
'with_target_power': True,
#'with_dfu': 'runtime',
'ecppack_bootaddr': '0x0',
#'ecppack_spimode': 'qspi',
'ecppack_compress': True,
},
'dfu': {
'uart_name': 'stream',
'with_debug': False,
'with_trace': False,
'with_dfu': 'bootloader',
'usb_pid': 0x3442,
'led_default': 0xd, # status=purple
'output_dir': 'build/orbtrace_mini_dfu',
'bootloader_auto_reset': True,
'ecppack_bootaddr': '0x100000',
#'ecppack_spimode': 'qspi',
'ecppack_compress': True,
},
'test': {
'uart_name': 'stream',
'with_debug': False,
'with_trace': False,
'with_test_io': True,
'with_dfu': 'bootloader',
'usb_pid': 0x0001,
'led_default': 0xe, # status=cyan
'output_dir': 'build/orbtrace_mini_test',
'ecppack_compress': True,
},
}[profile]
|
the-stack_106_20007
|
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QMessageBox, QVBoxLayout
import sys
class Window(QWidget):
def __init__(self):
QWidget.__init__(self)
layout = QVBoxLayout()
button1 = QPushButton()
button1.setText("Show dialog!")
button1.move(50,50)
button1.clicked.connect(self.showDialog)
layout.addWidget(button1)
# Set fixed window size
self.setFixedSize(500, 550)
# Set the central widget and the general layout
self.setLayout(layout)
def showDialog(self):
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Information)
msgBox.setText("Message box pop up window")
msgBox.setWindowTitle("QMessageBox Example")
msgBox.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)
msgBox.buttonClicked.connect(self.msgButtonClick)
returnValue = msgBox.exec()
if returnValue == QMessageBox.Ok:
print('OK clicked')
def msgButtonClick(self, i):
print("Button clicked is:",i.text())
app = QApplication(sys.argv)
screen = Window()
screen.show()
sys.exit(app.exec_())
|
the-stack_106_20011
|
##########################################################################
#
# Copyright (c) 2012, Image Engine Design Inc. All rights reserved.
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferUI
class ClassParameterValueWidget( GafferUI.CompoundParameterValueWidget ) :
def __init__( self, parameterHandler, collapsible=None, **kw ) :
GafferUI.CompoundParameterValueWidget.__init__(
self,
parameterHandler,
collapsible,
_PlugValueWidget,
**kw
)
class _PlugValueWidget( GafferUI.CompoundParameterValueWidget._PlugValueWidget ) :
def __init__( self, parameterHandler, collapsed ) :
GafferUI.CompoundParameterValueWidget._PlugValueWidget.__init__( self, parameterHandler, collapsed )
def _headerWidget( self ) :
result = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 0 )
# label
label = GafferUI.Label(
"Class" if self._collapsible() is not None else self._parameterLabelText( self.parameterHandler() ),
horizontalAlignment = GafferUI.Label.HorizontalAlignment.Right
)
## \todo Decide how we allow this sort of tweak using the public
# interface. Perhaps we should have a SizeableContainer or something?
label._qtWidget().setFixedWidth( GafferUI.PlugWidget.labelWidth() )
label.setToolTip( self._parameterToolTip( self._parameterHandler() ) )
result.append( label )
# space
result.append( GafferUI.Spacer( IECore.V2i( 8, 1 ) ) )
# class button
className, classVersion = self._parameter().getClass( True )[1:3]
classButton = GafferUI.MenuButton( className if className else "Choose...", hasFrame=False )
classButton.setMenu( self.__classMenu() )
result.append( classButton )
# version button
if className :
versionButton = GafferUI.MenuButton( " v%d" % classVersion if className else "", hasFrame=False )
versionButton.setMenu( self.__versionMenu() )
result.append( versionButton )
# a spacer to stop the buttons expanding
result.append( GafferUI.Spacer( IECore.V2i( 1, 1 ), IECore.V2i( 9999999, 1 ) ), expand=True )
return result
def __classMenu( self ) :
md = IECore.MenuDefinition()
classInfo = self._parameter().getClass( True )
classNameFilter = "*"
with IECore.IgnoredExceptions( KeyError ) :
classNameFilter = self._parameter().userData()["UI"]["classNameFilter"].value
menuPathStart = max( 0, classNameFilter.find( "*" ) )
if classInfo[1] :
md.append(
"/Remove", { "command" : IECore.curry( Gaffer.WeakMethod( self.__setClass ), "", 0 ) }
)
md.append( "/RemoveDivider", { "divider" : True } )
loader = IECore.ClassLoader.defaultLoader( classInfo[3] )
for className in loader.classNames( classNameFilter ) :
classVersions = loader.versions( className )
for classVersion in classVersions :
menuPath = "/" + className[menuPathStart:]
if len( classVersions ) > 1 :
menuPath += "/v%d" % classVersion
md.append(
menuPath,
{
"command" : IECore.curry( Gaffer.WeakMethod( self.__setClass ), className, classVersion ),
"active" : className != classInfo[1] or classVersion != classInfo[2]
},
)
return GafferUI.Menu( md )
def __versionMenu( self ) :
md = IECore.MenuDefinition()
classInfo = self._parameter().getClass( True )
if classInfo[1] :
loader = IECore.ClassLoader.defaultLoader( classInfo[3] )
for version in loader.versions( classInfo[1] ) :
md.append(
"/v%d" % version,
{
"command" : IECore.curry( Gaffer.WeakMethod( self.__setClass ), classInfo[1], version ),
"active" : version != classInfo[2],
},
)
return GafferUI.Menu( md )
def __setClass( self, className, classVersion ) :
with self.getPlug().node().parameterModificationContext() :
self._parameter().setClass( className, classVersion )
GafferUI.ParameterValueWidget.registerType( IECore.ClassParameter.staticTypeId(), ClassParameterValueWidget )
|
the-stack_106_20012
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# robot_randomsearch.py
# Contact (ce fichier uniquement): nicolas.bredeche(at)upmc.fr
#
# Description:
# Template pour robotique evolutionniste simple
# Ce code utilise pySpriteWorld, développé par Yann Chevaleyre (U. Paris 13)
#
# Dépendances:
# Python 3.x
# Matplotlib
# Pygame
#
# Historique:
# 2016-03-28__23:23 - template pour 3i025 (IA&RO, UPMC, licence info)
# 2018-03-28__14:06 - refactoring complet, synchronisation avec les autres exemples, implémentation fonctionnelle du random search
# 2019-04-02__11:42 - passage Python 3.x
#
# Cet exemple illustre la recherche aléatoire de contrôleur, dont la performance est mesurée en fonction d'une tâche fixée par l'utilisateur.
# Le contrôleur est ici un réseau de neurones de type Perceptron, c'est à dire que les vitesses de translation et rotation sont fixées par une combinaison linéaire entre entrées sensorielles et paramètres, en appliquant une fonction tangente hyperbolique pour obtenir une réponse non-linéaire et bornée entre -1 et +1.
# Les paramètres contenu dans le génome sont ici limités à un vecteur donc chaque élément prend soit -1 (inhibition), 0 (annulation), +1 (excitation).
# Il s'agit d'un problème d'optimisation de type boîte noir, ou la performance d'une solution donne peu d'information sur comment modifier les paramètres de cette solution pour l'améliorer.
# Ce code donne les bases pour implémenter des algorithmes optimisation de type évolution artificielle (algorithmes génétiques, stratégies d'évolution, etc.)
# A noter que ce code peut aussi facilement être étendu pour optimiser le comportement de groupe de robots (plutôt qu'un seul robot)
#
# Aide sur le code
# - La méthode "stepController" de la classe Agent, qui définit comment est utilisé le génome pour moduler les actions du robot en fonction des informations sensorielles
# - Les méthodes "updateFitness" de la classe Agent, qui permettent de définir une métrique pour la tâche visée
# - le contenu du main (tout en bas), ou est défini l'algorithme d'exploration (ici: un random search), y compris l'initialisation, la modification et le stockage des génomes, et leur évaluation
# Et aussi, éventuellement:
# - Partie "variables globales"
# - La fonction setupAgents (permet de placer les robots au début de la simulation)
# - La fonction setupArena (permet de placer des obstacles au début de la simulation)
# - il n'est pas conseillé de modifier les autres parties du code.
#
# Aide sur la partie optimisation:
# - pour modifier un genome, il faut modifier sa taille (dans le main lors de l'initialisation) et son utilisation (dans le stepController)
# - pour définir l'objectif, il faut écrire une fonction fitness. Ce que mesure la fonction fitness peut être plus ou moins directement reliée à l'objectif (p.ex.: si l'objectif est d'optimiser des explorateurs, la fonction fitness peut être une mesure de la capacité à se déplacer en ligne droite en évitant les murs)
# - pour obtenir un tirage issue d'une distribution normal, il faut utiliser la fonction gauss. Exemple: random.gauss(0,1) <=> N(0,1) (i.e. tirage d'une distribution normale centrée sur 0 et d'écart type 1)
from robosim import *
from random import random, shuffle, randint, gauss
import math
import time
import sys
import atexit
from itertools import count
'''''''''''''''''''''''''''''
'''''''''''''''''''''''''''''
''' Aide '''
'''''''''''''''''''''''''''''
'''''''''''''''''''''''''''''
#game.setMaxTranslationSpeed(3) # entre -3 et 3
# size of arena:
# screenw,screenh = taille_terrain()
# OU: screen_width,screen_height
'''''''''''''''''''''''''''''
'''''''''''''''''''''''''''''
''' variables globales '''
'''''''''''''''''''''''''''''
'''''''''''''''''''''''''''''
game = Game()
agents = []
screen_width=512 #512,768,... -- multiples de 32
screen_height=512 #512,768,... -- multiples de 32
nbAgents = 1
maxSensorDistance = 30 # utilisé localement.
maxRotationSpeed = 5
maxTranslationSpeed = 3
SensorBelt = [-170,-80,-40,-20,+20,40,80,+170] # angles en degres des senseurs
showSensors = True
frameskip = 200 # 0: no-skip. >1: skip n-1 frames
verbose = True
'''''''''''''''''''''''''''''
'''''''''''''''''''''''''''''
''' Classe Agent/Robot '''
'''''''''''''''''''''''''''''
'''''''''''''''''''''''''''''
class Agent(object):
agentIdCounter = 0 # use as static
id = -1
robot = -1
name = "Equipe Evol" # A modifier avec le nom de votre équipe
translationValue = 0 # ne pas modifier directement
rotationValue = 0 # ne pas modifier directement
params = []
fitness = 0
previousPos = (0,0)
def __init__(self,robot):
self.id = Agent.agentIdCounter
Agent.agentIdCounter = Agent.agentIdCounter + 1
#print "robot #", self.id, " -- init"
self.robot = robot
def getRobot(self):
return self.robot
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
def evaluate(self,_params):
self.resetPosition()
self.resetFitness()
self.params = list(_params)
for i in range(maxIterations):
updateSensors()
self.step()
#self.updateFitness1() # pour maximiser la distance au centre de l'arène
#self.updateFitness2() # pour maximiser la distance parcourue a chaque pas de temps
self.updateFitness3() # pour maximiser la distance parcourue a chaque pas de temps, en pénalisant les commandes de rotation
game.mainiteration()
return self.fitness
def resetPosition(self):
p = self.robot
p.set_position( screen_width/2+random()*4 , screen_height/2+random()*4 )
p.oriente( random()*360 ) # DEBUG
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
def resetFitness(self):
prevPos = self.robot.get_centroid()
self.fitness = 0
def updateFitness1(self):
currentPos = self.robot.get_centroid()
self.fitness += math.sqrt(abs(currentPos[0]**2-(screen_width/2)**2)) + math.sqrt(abs(currentPos[1]**2-(screen_height/2)**2)) # somme de la distance au centre de l'arene a chaque pas de temps
def updateFitness2(self):
currentPos = self.robot.get_centroid()
self.fitness += math.sqrt(abs(currentPos[0]**2-self.previousPos[0]**2)) + math.sqrt(abs(currentPos[1]**2-self.previousPos[1]**2)) # a chaque pas de temps, ajoute la distance parcourue depuis t-1
self.previousPos = currentPos
def updateFitness3(self):
currentPos = self.robot.get_centroid()
self.fitness += ( 1 - abs(self.rotationValue/maxRotationSpeed) ) * math.sqrt(abs(currentPos[0]**2-self.previousPos[0]**2)) + math.sqrt(abs(currentPos[1]**2-self.previousPos[1]**2)) # a chaque pas de temps, ajoute la distance parcourue depuis t-1, avec une pénalité si rotation
self.previousPos = currentPos
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
def stepController(self):
translation = 0
rotation = 0
#sensorMinus80 = self.getDistanceAtSensor(1)
sensorMinus40 = self.getDistanceAtSensor(2)
sensorMinus20 = self.getDistanceAtSensor(3)
sensorPlus20 = self.getDistanceAtSensor(4)
sensorPlus40 = self.getDistanceAtSensor(5)
#sensorPlus80 = self.getDistanceAtSensor(6)
if len(self.params) != 10: # vérifie que le nombre de paramètres donné est correct
print ("[ERROR] number of parameters is incorrect. Exiting.")
exit()
# Perceptron: a linear combination of sensory inputs with weights (=parameters). Use an additional parameters as a bias, and apply hyperbolic tangeant to ensure result is in [-1,+1]
translation = math.tanh( sensorMinus40 * self.params[0] + sensorMinus20 * self.params[1] + sensorPlus20 * self.params[2] + sensorPlus40 * self.params[3] + self.params[4] )
rotation = math.tanh( sensorMinus40 * self.params[5] + sensorMinus20 * self.params[6] + sensorPlus20 * self.params[7] + sensorPlus40 * self.params[8] + self.params[9] )
#print ("robot #", self.id, "[r =",rotation," - t =",translation,"]")
self.setRotationValue( rotation )
self.setTranslationValue( translation )
return
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
def step(self):
self.stepController()
self.move()
def move(self):
self.robot.forward(self.translationValue)
self.robot.rotate(self.rotationValue)
def getDistanceAtSensor(self,id):
sensor_infos = sensors[self.robot] # sensor_infos est une liste de namedtuple (un par capteur).
return min(sensor_infos[id].dist_from_border,maxSensorDistance) / maxSensorDistance
def getObjectTypeAtSensor(self,id):
if sensors[self.robot][id].dist_from_border > maxSensorDistance:
return 0 # nothing
elif sensors[self.robot][id].layer == 'joueur':
return 2 # robot
else:
return 1 # wall/border
def getRobotInfoAtSensor(self,id):
if sensors[self.robot][id].dist_from_border < maxSensorDistance and sensors[self.robot][id].layer == 'joueur':
otherRobot = sensors[self.robot][id].sprite
info = {'id': otherRobot.numero, 'centroid': otherRobot.get_centroid(), 'orientation': otherRobot.orientation()}
return info
else:
#print ("[WARNING] getPlayerInfoAtSensor(.): not a robot!")
return None
def setTranslationValue(self,value):
if value > 1:
print ("[WARNING] translation value not in [-1,+1]. Normalizing.")
value = maxTranslationSpeed
elif value < -1:
print ("[WARNING] translation value not in [-1,+1]. Normalizing.")
value = -maxTranslationSpeed
else:
value = value * maxTranslationSpeed
self.translationValue = value
def setRotationValue(self,value):
if value > 1:
print ("[WARNING] translation value not in [-1,+1]. Normalizing.")
value = maxRotationSpeed
elif value < -1:
print ("[WARNING] translation value not in [-1,+1]. Normalizing.")
value = -maxRotationSpeed
else:
value = value * maxRotationSpeed
self.rotationValue = value
'''''''''''''''''''''''''''''
'''''''''''''''''''''''''''''
''' Fonctions init/step '''
'''''''''''''''''''''''''''''
'''''''''''''''''''''''''''''
def setupAgents():
global screen_width, screen_height, nbAgents, agents, game
# Make agents
nbAgentsCreated = 0
for i in range(nbAgents):
while True:
p = -1
while p == -1: # p renvoi -1 s'il n'est pas possible de placer le robot ici (obstacle)
p = game.add_players( (random()*screen_width , random()*screen_height) , None , tiled=False)
if p:
p.oriente( random()*360 )
p.numero = nbAgentsCreated
nbAgentsCreated = nbAgentsCreated + 1
agents.append(Agent(p))
break
game.mainiteration()
def setupArena():
for i in range(6,13):
addObstacle(row=3,col=i)
for i in range(3,10):
addObstacle(row=12,col=i)
addObstacle(row=4,col=12)
addObstacle(row=5,col=12)
addObstacle(row=6,col=12)
addObstacle(row=11,col=3)
addObstacle(row=10,col=3)
addObstacle(row=9,col=3)
def updateSensors():
global sensors
# throw_rays...(...) : appel couteux (une fois par itération du simulateur). permet de mettre à jour le masque de collision pour tous les robots.
sensors = throw_rays_for_many_players(game,game.layers['joueur'],SensorBelt,max_radius = maxSensorDistance+game.player.diametre_robot() , show_rays=showSensors)
def stepWorld():
updateSensors()
# chaque agent se met à jour. L'ordre de mise à jour change à chaque fois (permet d'éviter des effets d'ordre).
shuffledIndexes = [i for i in range(len(agents))]
shuffle(shuffledIndexes)
for i in range(len(agents)):
agents[shuffledIndexes[i]].step()
return
'''''''''''''''''''''''''''''
'''''''''''''''''''''''''''''
''' Fonctions internes '''
'''''''''''''''''''''''''''''
'''''''''''''''''''''''''''''
def addObstacle(row,col):
# le sprite situe colone 13, ligne 0 sur le spritesheet
game.add_new_sprite('obstacle',tileid=(0,13),xy=(col,row),tiled=True)
class MyTurtle(Turtle): # also: limit robot speed through this derived class
maxRotationSpeed = maxRotationSpeed # 10, 10000, etc.
def rotate(self,a):
mx = MyTurtle.maxRotationSpeed
Turtle.rotate(self, max(-mx,min(a,mx)))
def onExit():
print ("\n[Terminated]")
''''''''''''''''''''''''''''''
''''''''''''''''''''''''''''''
''''''''''''''''''''''''''''''
''''''''''''''''''''''''''''''
''' Main loop '''
''''''''''''''''''''''''''''''
''''''''''''''''''''''''''''''
''''''''''''''''''''''''''''''
''''''''''''''''''''''''''''''
''''''''''''''''''''''''''''''''''''
''' Initialisation du monde '''
''''''''''''''''''''''''''''''''''''
init('empty',MyTurtle,screen_width,screen_height) # display is re-dimensioned, turtle acts as a template to create new players/robots
game.auto_refresh = False # display will be updated only if game.mainiteration() is called
game.frameskip = frameskip
atexit.register(onExit)
setupArena()
setupAgents()
game.mainiteration()
''''''''''''''''''''''''''''''''''''
''' Apprentissage/optimisation '''
''''''''''''''''''''''''''''''''''''
'''
for evaluationIt in range(maxEvaluations):
stepWorld()
game.mainiteration()
'''
print ("Optimizing.")
game.frameskip = 200 # affichage à vitesse (très) rapide -- Benchmark (2018/3/28): macbook pro 3.1Ghz 12" core i7 'early 2015': 1250 updates/sec
bestFitness = 0 # init with worst value
bestParams = []
bestEvalIt = 0
maxEvaluations = 100 # budget en terme de nombre de robots évalués au total
maxIterations = 200 # temps passé pour évaluer _un_ robot
nbReevaluations = 4
genomeSize = 10
for evaluationIt in range(maxEvaluations):
print ("Evaluation #"), evaluationIt
# genere un nouveau jeu de paramètres
params = []
for i in range(genomeSize): # taille du genome
params.append(randint(-1,+1)) # construit un genome composé de N valeurs -1, 0 ou +1
# evalue les parametres
fitness = 0
for i in range (nbReevaluations): # N évaluations indépendantes
fitness += agents[0].evaluate(params)
if bestFitness < fitness:
bestParams = list(params)
bestFitness = fitness
bestEvalIt = evaluationIt
print ("\tParameters:", str(params))
print ("\tFitness:", fitness, "(best:", bestFitness,")")
game.frameskip = 1 # affichage à vitesse normal
print ("Display best individual")
print ("\tParameters:", str(bestParams))
i = 0
while True:
print ("\tTest #",i)
i = i + 1
# evalue les parametres
fitness = agents[0].evaluate(bestParams)
print ("\t\tFitness:", fitness, "(original recorded fitness:", bestFitness,", measured at evaluation",bestEvalIt,")")
print ("\t\tGenome:", bestParams)
|
the-stack_106_20014
|
"""
Utility functions that may prove useful when writing an ACME client.
"""
import uuid
from datetime import datetime, timedelta
from functools import wraps
from acme import jose
from acme.jose.errors import DeserializationError
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509.oid import NameOID
from OpenSSL import crypto
from twisted.internet.defer import maybeDeferred
from twisted.python.url import URL
def generate_private_key(key_type):
"""
Generate a random private key using sensible parameters.
:param str key_type: The type of key to generate. One of: ``rsa``.
"""
if key_type == u'rsa':
return rsa.generate_private_key(
public_exponent=65537, key_size=2048, backend=default_backend())
raise ValueError(key_type)
def generate_tls_sni_01_cert(server_name, key_type=u'rsa',
_generate_private_key=None):
"""
Generate a certificate/key pair for responding to a tls-sni-01 challenge.
:param str server_name: The SAN the certificate should have.
:param str key_type: The type of key to generate; usually not necessary.
:rtype: ``Tuple[`~cryptography.x509.Certificate`, PrivateKey]``
:return: A tuple of the certificate and private key.
"""
key = (_generate_private_key or generate_private_key)(key_type)
name = x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u'acme.invalid')])
cert = (
x509.CertificateBuilder()
.subject_name(name)
.issuer_name(name)
.not_valid_before(datetime.now() - timedelta(seconds=3600))
.not_valid_after(datetime.now() + timedelta(seconds=3600))
.serial_number(int(uuid.uuid4()))
.public_key(key.public_key())
.add_extension(
x509.SubjectAlternativeName([x509.DNSName(server_name)]),
critical=False)
.sign(
private_key=key,
algorithm=hashes.SHA256(),
backend=default_backend())
)
return (cert, key)
def cert_cryptography_to_pyopenssl(cert):
"""
Convert a `cryptography.x509.Certificate` object to an
``OpenSSL.crypto.X509`` object.
"""
return crypto.load_certificate(
crypto.FILETYPE_PEM,
cert.public_bytes(serialization.Encoding.PEM))
def key_cryptography_to_pyopenssl(key):
"""
Convert a Cryptography private key object to an ``OpenSSL.crypto.PKey``
object.
"""
return crypto.load_privatekey(
crypto.FILETYPE_PEM,
key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()))
def tap(f):
"""
"Tap" a Deferred callback chain with a function whose return value is
ignored.
"""
@wraps(f)
def _cb(res, *a, **kw):
d = maybeDeferred(f, res, *a, **kw)
d.addCallback(lambda ignored: res)
return d
return _cb
def encode_csr(csr):
"""
Encode CSR as JOSE Base-64 DER.
:param cryptography.x509.CertificateSigningRequest csr: The CSR.
:rtype: str
"""
return jose.encode_b64jose(csr.public_bytes(serialization.Encoding.DER))
def decode_csr(b64der):
"""
Decode JOSE Base-64 DER-encoded CSR.
:param str b64der: The encoded CSR.
:rtype: `cryptography.x509.CertificateSigningRequest`
:return: The decoded CSR.
"""
try:
return x509.load_der_x509_csr(
jose.decode_b64jose(b64der), default_backend())
except ValueError as error:
raise DeserializationError(error)
def csr_for_names(names, key):
"""
Generate a certificate signing request for the given names and private key.
.. seealso:: `acme.client.Client.request_issuance`
.. seealso:: `generate_private_key`
:param ``List[str]``: One or more names (subjectAltName) for which to
request a certificate.
:param key: A Cryptography private key object.
:rtype: `cryptography.x509.CertificateSigningRequest`
:return: The certificate request message.
"""
if len(names) == 0:
raise ValueError('Must have at least one name')
if len(names[0]) > 64:
common_name = u'san.too.long.invalid'
else:
common_name = names[0]
return (
x509.CertificateSigningRequestBuilder()
.subject_name(x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, common_name)]))
.add_extension(
x509.SubjectAlternativeName(list(map(x509.DNSName, names))),
critical=False)
.sign(key, hashes.SHA256(), default_backend()))
def clock_now(clock):
"""
Get a datetime representing the current time.
:param clock: An ``IReactorTime`` provider.
:rtype: `~datetime.datetime`
:return: A datetime representing the current time.
"""
return datetime.utcfromtimestamp(clock.seconds())
def check_directory_url_type(url):
"""
Check that ``url`` is a ``twisted.python.url.URL`` instance, raising
`TypeError` if it isn't.
"""
if not isinstance(url, URL):
raise TypeError(
'ACME directory URL should be a twisted.python.url.URL, '
'got {!r} instead'.format(url))
def const(x):
"""
Return a constant function.
"""
return lambda: x
__all__ = [
'generate_private_key', 'generate_tls_sni_01_cert',
'cert_cryptography_to_pyopenssl', 'key_cryptography_to_pyopenssl', 'tap',
'encode_csr', 'decode_csr', 'csr_for_names', 'clock_now',
'check_directory_url_type', 'const']
|
the-stack_106_20015
|
import os
import pathlib
import deepdiff
import inflection
import pytest
import mlrun
import mlrun.errors
import mlrun.projects.project
import tests.conftest
def test_sync_functions():
project_name = "project-name"
project = mlrun.new_project(project_name)
project.set_function("hub://describe", "describe")
project_function_object = project.spec._function_objects
project_file_path = pathlib.Path(tests.conftest.results) / "project.yaml"
project.export(str(project_file_path))
imported_project = mlrun.load_project("./", str(project_file_path))
assert imported_project.spec._function_objects == {}
imported_project.sync_functions()
_assert_project_function_objects(imported_project, project_function_object)
fn = project.func("describe")
assert fn.metadata.name == "describe", "func did not return"
# test that functions can be fetched from the DB (w/o set_function)
mlrun.import_function("hub://sklearn_classifier", new_name="train").save()
fn = project.func("train")
assert fn.metadata.name == "train", "train func did not return"
def test_create_project_from_file_with_legacy_structure():
project_name = "project-name"
description = "project description"
params = {"param_key": "param value"}
artifact_path = "/tmp"
legacy_project = mlrun.projects.project.MlrunProjectLegacy(
project_name, description, params, artifact_path=artifact_path
)
function_name = "trainer-function"
function = mlrun.new_function(function_name, project_name)
legacy_project.set_function(function, function_name)
legacy_project.set_function("hub://describe", "describe")
workflow_name = "workflow-name"
workflow_file_path = (
pathlib.Path(tests.conftest.tests_root_directory) / "projects" / "workflow.py"
)
legacy_project.set_workflow(workflow_name, str(workflow_file_path))
artifact_dict = {
"key": "raw-data",
"kind": "",
"iter": 0,
"tree": "latest",
"target_path": "https://raw.githubusercontent.com/mlrun/demos/master/customer-churn-prediction/WA_Fn-UseC_-Telc"
"o-Customer-Churn.csv",
"db_key": "raw-data",
}
legacy_project.artifacts = [artifact_dict]
legacy_project_file_path = pathlib.Path(tests.conftest.results) / "project.yaml"
legacy_project.save(str(legacy_project_file_path))
project = mlrun.load_project("./", str(legacy_project_file_path))
assert project.kind == "project"
assert project.metadata.name == project_name
assert project.spec.description == description
# assert accessible from the project as well
assert project.description == description
assert project.spec.artifact_path == artifact_path
# assert accessible from the project as well
assert project.artifact_path == artifact_path
assert deepdiff.DeepDiff(params, project.spec.params, ignore_order=True,) == {}
# assert accessible from the project as well
assert deepdiff.DeepDiff(params, project.params, ignore_order=True,) == {}
assert (
deepdiff.DeepDiff(
legacy_project.functions, project.functions, ignore_order=True,
)
== {}
)
assert (
deepdiff.DeepDiff(
legacy_project.workflows, project.workflows, ignore_order=True,
)
== {}
)
assert (
deepdiff.DeepDiff(
legacy_project.artifacts, project.artifacts, ignore_order=True,
)
== {}
)
def test_export_project_dir_doesnt_exist():
project_name = "project-name"
project_file_path = (
pathlib.Path(tests.conftest.results)
/ "new-dir"
/ "another-new-dir"
/ "project.yaml"
)
project = mlrun.projects.project.new_project(project_name)
project.export(filepath=project_file_path)
def test_new_project_context_doesnt_exist():
project_name = "project-name"
project_dir_path = (
pathlib.Path(tests.conftest.results) / "new-dir" / "another-new-dir"
)
mlrun.projects.project.new_project(project_name, project_dir_path)
def test_create_project_with_invalid_name():
invalid_name = "project_name"
with pytest.raises(mlrun.errors.MLRunInvalidArgumentError):
mlrun.projects.project.new_project(invalid_name, init_git=False)
def test_get_set_params():
project_name = "project-name"
project = mlrun.new_project(project_name)
param_key = "param-key"
param_value = "param-value"
project.params[param_key] = param_value
assert param_value == project.get_param(param_key)
default_value = "default-value"
assert project.get_param("not-exist", default_value) == default_value
def test_user_project():
project_name = "project-name"
original_username = os.environ.get("V3IO_USERNAME")
usernames = ["valid-username", "require_Normalization"]
for username in usernames:
os.environ["V3IO_USERNAME"] = username
project = mlrun.new_project(project_name, user_project=True)
assert (
project.metadata.name
== f"{project_name}-{inflection.dasherize(username.lower())}"
), "project name doesnt include user name"
if original_username is not None:
os.environ["V3IO_USERNAME"] = original_username
def test_build_project_from_minimal_dict():
# When mlrun is follower, the created project will usually have all values set to None when created from the leader
# API, verify we successfully initialize Project instance from that
project_dict = {
"metadata": {"name": "default", "labels": None, "annotations": None},
"spec": {
"description": None,
"goals": None,
"params": None,
"functions": None,
"workflows": None,
"artifacts": None,
"artifact_path": None,
"conda": None,
"source": None,
"subpath": None,
"origin_url": None,
"desired_state": "online",
},
"status": {"state": "online"},
}
mlrun.projects.MlrunProject.from_dict(project_dict)
def _assert_project_function_objects(project, expected_function_objects):
project_function_objects = project.spec._function_objects
assert len(project_function_objects) == len(expected_function_objects)
for function_name, function_object in expected_function_objects.items():
assert function_name in project_function_objects
assert (
deepdiff.DeepDiff(
project_function_objects[function_name].to_dict(),
function_object.to_dict(),
ignore_order=True,
exclude_paths=["root['spec']['build']['code_origin']"],
)
== {}
)
|
the-stack_106_20016
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that we can create a local shared library containing shared
object files built in a repository.
"""
import os
import sys
import TestSCons
test = TestSCons.TestSCons()
#
test.subdir('repository', 'work')
#
opts = '-Y ' + test.workpath('repository')
#
test.write(['repository', 'SConstruct'], """\
env = Environment()
f1 = env.SharedObject('f1.c')
f2 = env.SharedObject('f2.c')
f3 = env.SharedObject('f3.c')
if ARGUMENTS.get('PROGRAM'):
lib = env.SharedLibrary(target = 'foo',
source = f1 + f2 + f3,
WINDOWS_INSERT_DEF = 1)
env.Program(target='prog', source='prog.c', LIBS='foo', LIBPATH=['.'])
""")
for fx in ['1', '2', '3']:
test.write(['repository', 'f%s.c' % fx], r"""
#include <stdio.h>
void
f%s(void)
{
printf("f%s.c\n");
fflush(stdout);
}
""" % (fx,fx))
test.write(['repository', "foo.def"], r"""
LIBRARY "foo"
DESCRIPTION "Foo Shared Library"
EXPORTS
f1
f2
f3
""")
test.write(['repository', 'prog.c'], r"""
#include <stdio.h>
void f1(void);
void f2(void);
void f3(void);
int
main(int argc, char *argv[])
{
argv[argc++] = "--";
f1();
f2();
f3();
printf("prog.c\n");
return 0;
}
""")
# Build the relocatable objects within the repository
test.run(chdir = 'repository', arguments = '.')
# Make the repository non-writable,
# so we'll detect if we try to write into it accidentally.
test.writable('repository', 0)
# Build the library and the program within the work area
test.run(chdir='work',
options=opts,
arguments='PROGRAM=1',
stderr=TestSCons.noisy_ar,
match=TestSCons.match_re_dotall)
# Run the program and verify that the library worked
if os.name == 'posix':
if sys.platform[:6] == 'darwin':
os.environ['DYLD_LIBRARY_PATH'] = test.workpath('work')
else:
os.environ['LD_LIBRARY_PATH'] = test.workpath('work')
if sys.platform.find('irix') != -1:
os.environ['LD_LIBRARYN32_PATH'] = test.workpath('work')
test.run(program = test.workpath('work', 'prog'),
stdout = "f1.c\nf2.c\nf3.c\nprog.c\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
the-stack_106_20019
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from qiita_client import QiitaCommand
from .trim import trim
__all__ = ['trim']
# Define the trim command
req_params = {'input': ('artifact', ['per_sample_FASTQ'])}
opt_params = {
# 3' adapter
'Fwd read adapter': ['string', 'GATCGGAAGAGCACACGTCTGAACTCCAGTCAC'],
# 3' adapter for rev
'Rev read adapter': ['string', 'GATCGGAAGAGCGTCGTGTAGGGAAAGGAGTGT'],
# 3' quality cutoff
'Trim low-quality bases': ['integer', '15'],
# min length after trimming
'Minimum trimmed read length': ['integer', '80'],
# drop pairs whose mates are filtered out
'Pair-end read required to match': ['choice:["any", "both"]', 'any'],
# maximum Ns to drop sequence
'Maximum number of N bases in a read to keep it': ['integer', '80'],
# trim Ns on end of read
'Trim Ns on ends of reads': ['boolean', True],
# Threads used
'Number of threads used': ['integer', '5'],
# NextSeq-specific quality trimming
'NextSeq-specific quality trimming': ['boolean', False],
}
outputs = {'Adapter trimmed files': 'per_sample_FASTQ'}
dflt_param_set = {
'KAPA HyperPlus with iTru': {
'Fwd read adapter': 'GATCGGAAGAGCACACGTCTGAACTCCAGTCAC',
'Rev read adapter': 'GATCGGAAGAGCGTCGTGTAGGGAAAGGAGTGT',
'Trim low-quality bases': 15,
'Minimum trimmed read length': 80,
'Pair-end read required to match': 'any',
'Maximum number of N bases in a read to keep it': 80,
'Trim Ns on ends of reads': True,
'NextSeq-specific quality trimming': False,
'Number of threads used': 5
}
}
trim_cmd = QiitaCommand(
'Atropos v1.1.15', "Sequence QC - adapter trimming", trim,
req_params, opt_params, outputs, dflt_param_set)
|
the-stack_106_20021
|
from requests_html import HTMLSession
import mechanicalsoup
session = HTMLSession()
browser = mechanicalsoup.StatefulBrowser()
browser.addheaders = [('User-agent', 'Firefox')]
class Profile:
"""
Parse twitter profile and split informations into class as attribute.
Attributes:
- name
- username
- birthday
- biography
- website
- profile_photo
- likes_count
- tweets_count
- followers_count
- following_count
"""
def __init__(self, username):
browser.open("https://twitter.com/"+username)
page = browser.get_current_page()
self.username = username
self.__parse_profile(page)
def __parse_profile(self, page):
# parse location, also check is username valid
try:
self.location = page.find(attrs={"class": "ProfileHeaderCard-\
locationText u-dir"}).\
contents[1].contents[0].strip()
except AttributeError:
raise ValueError(
f'Oops! Either "@{self.username}"\
does not exist or is private.')
# parse birthday
try:
self.birthday = page.find(attrs={"class": "ProfileHeaderCard-\
birthdateText u-dir"}).find().\
contents[0].strip().\
replace("Born ", "")
except self.birthday == -1:
self.birthday = None
# parse URL of profile photo
self.profile_photo = page.find(attrs={"class":
"ProfileAvatar-image"}).attrs['src']
# parse full name
name_text = page.find("title").contents[0]
self.name = name_text[:name_text.find('(')].strip()
# parse biography
self.biography = self.__process_paragraph(page.find(attrs={"class":
"ProfileHeaderCard-bio\
u-dir"}).contents)
# parse user's website adress
try:
self.website = page.find(attrs={'class': 'ProfileHeaderCard-urlText \
u-dir'}).find().contents[0].strip()
except self.website == -1:
self.website = None
# parse count of followers
try:
q = page.find(attrs={"data-nav": "followers"})
self.followers_count = int(q.attrs["title"].
split(' ')[0].replace(',', ''))
except q == -1:
self.followers_count = 0
# parse count of likes
q = page.find(attrs={"data-nav": "favorites"})
self.likes_count = int(q.attrs["title"].split(' ')[0].replace('.', ''))
# parse count of following
q = page.find(attrs={"data-nav": "following"})
self.following_count = int(q.attrs["title"].split(' ')[0].
replace(',', ''))
# parse count of tweets
q = page.find(attrs={"data-nav": "tweets"})
self.tweets_count = int(q.attrs["title"].
split(' ')[0].replace(',', ''))
def __process_paragraph(self, contents):
output = ''
links = []
for i in contents:
try:
output += i
except i == len(contents):
if i.name == "a":
tmp_txt, tmp_lnk = process_paragraph(i.contents)
links += tmp_lnk
output += tmp_txt # +'@['+i.attrs['href']+']'
links.append(i.attrs['href'])
elif i.name in ['s', 'b']:
tmp_txt, tmp_lnk = process_paragraph(i.contents)
links += tmp_lnk
output += tmp_txt
return output, links
def __dir__(self):
return [
'name',
'username',
'birthday',
'biography',
'website',
'profile_photo',
'likes_count',
'tweets_count',
'followers_count',
'following_count'
]
def __repr__(self):
return f'<profile {self.username}@twitter>'
|
the-stack_106_20022
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('orders', '0007_usercheckout_braintree_id'),
]
operations = [
migrations.AddField(
model_name='order',
name='order_id',
field=models.CharField(max_length=20, null=True, blank=True),
),
migrations.AlterField(
model_name='order',
name='status',
field=models.CharField(default=b'created', max_length=120, choices=[(b'created', b'Created'), (b'paid', b'Paid'), (b'shipped', b'Shipped')]),
),
]
|
the-stack_106_20024
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
class FluxCore(AutotoolsPackage):
""" A next-generation resource manager (pre-alpha) """
homepage = "https://github.com/flux-framework/flux-core"
url = "https://github.com/flux-framework/flux-core/releases/download/v0.8.0/flux-core-0.8.0.tar.gz"
git = "https://github.com/flux-framework/flux-core.git"
version('master', branch='master')
version('0.11.3', sha256='91b5d7dca8fc28a77777c4e4cb8717fc3dc2c174e70611740689a71901c6de7e')
version('0.11.2', sha256='ab8637428cd9b74b2dff4842d10e0fc4acc8213c4e51f31d32a4cbfbdf730412')
version('0.11.1', sha256='3c8495db0f3b701f6dfe3e2a75aed794fc561e9f28284e8c02ac67693bfe890e')
version('0.11.0', sha256='a4d8ff92e79b4ca19d556395bb8c5f8dc02fd9d5a8cc38c4a2c66867a96de5ea')
version('0.10.0', sha256='a70cdd228077af60c9443a5c69d3da932e447dd11697f5fef9028c48dabb3041')
version('0.9.0', sha256='7b5b4aa72704b3c4432136b9e515e0d663568e6dbfc3ecd2f91c83b65841104e')
version('0.8.0', sha256='eb4b0fe0da191acd3823ef42d415c40aae6a0c3aef62ebb27905658d045e11cc')
# Avoid the infinite symlink issue
# This workaround is documented in PR #3543
build_directory = 'spack-build'
variant('docs', default=False, description='Build flux manpages')
variant('cuda', default=False, description='Build dependencies with support for CUDA')
depends_on("[email protected]:")
depends_on("czmq")
depends_on("[email protected]:3.99", when="@0.1:0.6")
depends_on("[email protected]:", when="@0.7:")
depends_on("[email protected]:1.99")
depends_on("hwloc +cuda", when='+cuda')
# Provide version hints for lua so that the concretizer succeeds when no
# explicit flux-core version is given. See issue #10000 for details
depends_on("[email protected]:5.2.99", type=('build', 'run', 'link'))
depends_on("[email protected]:5.1.99", when="@0.1.0:0.9.0")
depends_on("[email protected]:5.2.99", when="@0.10.0:,master")
depends_on("lua-luaposix")
depends_on("munge", when="@0.1.0:0.10.0")
depends_on("python", type=('build', 'run'))
depends_on("[email protected]:2.99", when="@0.1.0:0.11.0")
depends_on("[email protected]:", when="@0.11.1:")
depends_on("py-cffi", type=('build', 'run'))
depends_on("py-six", type=('build', 'run'), when="@0.11.0:")
depends_on("py-pyyaml", type=('build', 'run'), when="@0.11.0:")
depends_on("py-jsonschema", type=('build', 'run'), when="@0.12.0:")
depends_on("jansson")
depends_on("pkgconfig")
depends_on("yaml-cpp", when="@:0.11")
depends_on("lz4", when="@0.11.0:")
# versions up to 0.8.0 uses pylint to check Flux's python binding
# later versions provide a configure flag and disable the check by default
depends_on("py-pylint", when='@:0.8.0', type='build')
depends_on("asciidoc", type='build', when="+docs")
# Need autotools when building on master:
depends_on("autoconf", type='build', when='@master')
depends_on("automake", type='build', when='@master')
depends_on("libtool", type='build', when='@master')
# Testing Dependencies
depends_on("mpich pmi=pmi", type="test")
depends_on("valgrind", type="test")
depends_on("jq", type="test", when='@0.12.0:')
def url_for_version(self, version):
'''
Flux uses a fork of ZeroMQ's Collective Code Construction Contract
(https://github.com/flux-framework/rfc/blob/master/spec_1.adoc).
This model requires a repository fork for every stable release that has
patch releases. For example, 0.8.0 and 0.9.0 are both tags within the
main repository, but 0.8.1 and 0.9.5 would be releases on the v0.8 and
v0.9 forks, respectively.
Rather than provide an explicit URL for each patch release, make Spack
aware of this repo structure.
'''
if version[-1] == 0:
url = "https://github.com/flux-framework/flux-core/releases/download/v{0}/flux-core-{0}.tar.gz"
else:
url = "https://github.com/flux-framework/flux-core-v{1}/releases/download/v{0}/flux-core-{0}.tar.gz"
return url.format(version.up_to(3), version.up_to(2))
def setup(self):
pass
@when('@master')
def setup(self):
with working_dir(self.stage.source_path):
# Allow git-describe to get last tag so flux-version works:
git = which('git')
git('fetch', '--unshallow')
git("config", "remote.origin.fetch",
"+refs/heads/*:refs/remotes/origin/*")
git('fetch', 'origin')
def autoreconf(self, spec, prefix):
self.setup()
if not os.path.exists('configure'):
# Bootstrap with autotools
bash = which('bash')
bash('./autogen.sh')
@property
def lua_version(self):
return self.spec['lua'].version.up_to(2)
@property
def lua_share_dir(self):
return os.path.join('share', 'lua', str(self.lua_version))
@property
def lua_lib_dir(self):
return os.path.join('lib', 'lua', str(self.lua_version))
def setup_build_environment(self, env):
# Ensure ./fluxometer.lua can be found during flux's make check
env.append_path('LUA_PATH', './?.lua', separator=';')
def setup_run_environment(self, env):
env.prepend_path(
'LUA_PATH',
os.path.join(self.spec.prefix, self.lua_share_dir, '?.lua'),
separator=';')
env.prepend_path(
'LUA_CPATH',
os.path.join(self.spec.prefix, self.lua_lib_dir, '?.so'),
separator=';')
env.prepend_path(
'PYTHONPATH',
os.path.join(
self.spec.prefix.lib,
"python{0}".format(self.spec['python'].version.up_to(2)),
"site-packages"),
)
env.prepend_path('FLUX_MODULE_PATH', self.prefix.lib.flux.modules)
env.prepend_path('FLUX_EXEC_PATH', self.prefix.libexec.flux.cmd)
env.prepend_path('FLUX_RC_PATH', self.prefix.etc.flux)
env.prepend_path('FLUX_RC1_PATH', self.prefix.etc.flux.rc1)
env.prepend_path('FLUX_RC3_PATH', self.prefix.etc.flux.rc3)
env.prepend_path(
'FLUX_CONNECTOR_PATH',
self.prefix.lib.flux.connectors
)
env.prepend_path(
'FLUX_PMI_LIBRARY_PATH',
os.path.join(self.prefix.lib.flux, "libpmi.so")
)
# Wreck was removed in 0.12
if self.version < Version("0.12.0"):
env.prepend_path(
'FLUX_WREXECD_PATH',
self.prefix.libexec.flux.wrexecd
)
env.prepend_path(
'FLUX_WRECK_LUA_PATTERN',
os.path.join(self.prefix.etc.wreck, "lua.d", "*.lua")
)
def configure_args(self):
args = ['--enable-pylint=no']
if '+docs' not in self.spec:
args.append('--disable-docs')
return args
|
the-stack_106_20025
|
#!/usr/bin/env python3
# pylint: disable=unused-wildcard-import
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox # Why
import tkbetter as tkb
import subprocess as sp
from pathlib import Path
from threading import Thread
from time import sleep
class LogWindow(tkb.Window):
def __init__(self, parent=None):
super().__init__(parent)
#self.geometry("600x400")
#self.resizable(False, False)
#self.title("Logged Window")
self.main_frame = ttk.Frame(self)
self.main_frame.pack(fill=tk.X, expand=0, padx=10, pady=(10, 0))
self.log_frame = ttk.LabelFrame(self, text="Log")
self.log_frame.pack(fill=tk.BOTH, expand=1, padx=10, pady=10)
self.log_text = tk.Text(self.log_frame)
self.log_text.config(state="disabled")
self.log_text.pack(fill=tk.BOTH, expand=1, padx=7, pady=5)
def log_label(self, label):
self.log_frame.config(text=str(label))
def log_write(self, line=""):
self.log_text.config(state="normal")
self.log_text.insert(tk.END, str(line) + "\n")
self.log_text.config(state="disabled")
def log_get(self):
return self.log_text.get("1.0", tk.END)
def log_clear(self):
self.log_text.config(state="normal")
self.log_text.delete("1.0", tk.END)
self.log_text.config(state="disabled")
class AppWindow(LogWindow):
def __init__(self, parent=None, avds=[]):
super().__init__(parent)
# GUI
self.geometry("600x400")
self.title("AVD TkLuncher")
avd_label = ttk.Label(self.main_frame, text="AVD:")
avd_label.pack(side=tk.LEFT)
self.avd_variable = tk.StringVar()
self.avd_select = ttk.OptionMenu(self.main_frame, self.avd_variable, avds[0], *avds)
self.avd_select.pack(side=tk.LEFT, fill=tk.X, expand=1, padx=10, pady=10)
self.avd_run = ttk.Button(self.main_frame, text="Run", command=self.handle_run)
self.avd_run.pack(side=tk.RIGHT)
def handle_run(self):
self.log_clear()
def target():
#self.core.run_queued(print, self.avd_variable.get())
tkb.run_queued(self.avd_select.config, state="disabled")
tkb.run_queued(self.avd_run.config, state="disabled")
# temporal fix
try:
adbp = sp.Popen(["adb", "start-server"], stdout=sp.PIPE, stderr=sp.STDOUT)
while not adbp.poll():
line = adbp.stdout.readline()
if not line:
break
if line:
tkb.run_queued(self.log_write, line.decode('utf-8'))
except FileNotFoundError:
messagebox.showerror("ADB required", "'adb' command not found. Please add its location to the PATH.\n\n"
"adb is necesary to fix a bug in this program. adb will not be necesary after fixing "
"it but I have no time for fixing this project.")
return
except Exception as e:
messagebox.showerror("Unexpected error", str(e))
return
# end of temporal fix
process = sp.Popen(["emulator", "-avd", self.avd_variable.get()], stdout=sp.PIPE, stderr=sp.STDOUT)
while not process.poll():
line = process.stdout.readline()
if not line:
break
if line:
tkb.run_queued(self.log_write, line.decode('utf-8'))
tkb.run_queued(self.avd_select.config, state="normal")
tkb.run_queued(self.avd_run.config, state="normal")
tkb.run_thread(target)
class EmptyListAvdsException(Exception):
pass
def main():
core = tkb.Core()
# List AVDs
avds = None
try:
avds = sp.Popen(["emulator", "-list-avds"], stdout=sp.PIPE, stderr=sp.STDOUT) \
.communicate()[0] \
.decode("utf-8") \
.strip() \
.split()
if len(avds) == 0:
raise EmptyListAvdsException
AppWindow(core, avds)
core.mainloop()
except FileNotFoundError:
messagebox.showerror("Emulator AVD list failed", "Can't get list of AVDs. Make sure that 'emulator' executable is in PATH and works correctly.\nTry running 'emulator -list-avds' command by your self.")
except EmptyListAvdsException:
messagebox.showerror("Empty List AVDs", "No AVDs found. Please create a new AVD from Android Studio.")
except Exception as e:
messagebox.showerror("Unexpected error", str(e))
if __name__ == "__main__":
main()
|
the-stack_106_20026
|
import os
import paramiko
import string
import random
from common import log_orig as contrail_logging
from common.contrail_test_init import ContrailTestInit
logger = contrail_logging.getLogger('auth')
class Util:
templates = {
'pod': '/var/tmp/templates/pod.yaml',
'deployment': '/var/tmp/templates/deployment.yaml',
'service': '/var/tmp/templates/service.yaml',
'namespace': '/var/tmp/templates/namespace.yaml',
'network_attachment_definition': '/var/tmp/templates/network_attachment_definition.yaml',
'network_policy': '/var/tmp/templates/network_policy.yaml',
'ingress': '/var/tmp/templates/ingress.yaml',
'daemonset': '/var/tmp/templates/daemonset.yaml',
'stackrc': '/var/tmp/templates/stackrc.sh'}
@staticmethod
def exec_kubectl_cmd_on_file(
verb,
resource,
namespace,
stackrc_file,
inputs):
# kubectl = 'kubectl -v=5 --insecure-skip-tls-verify=true -s https://192.168.30.29:6443'
kubectl = 'kubectl'
template_file = Util.templates[resource]
cmd = ['%s %s -f %s -n %s' % (kubectl, verb, template_file, namespace)]
out, err = Util.execute_cmds_on_remote(
ip=inputs.juju_server, cmd_list=cmd, stackrc_file=stackrc_file)
return out, err
@staticmethod
def source_stackrc_to_file(
user_name='admin',
password='password',
project_name='admin',
domain_name='admin_domain',
auth_url=None,
inputs=None):
export_list = [
'export OS_IDENTITY_API_VERSION=3',
'export OS_USER_DOMAIN_NAME=%s' % domain_name,
'export OS_USERNAME=%s' % user_name,
'export OS_PROJECT_DOMAIN_NAME=%s' % domain_name,
'export OS_PROJECT_NAME=%s' % project_name,
'export OS_PASSWORD=%s' % password,
'export OS_AUTH_URL=%s' % auth_url,
'export OS_DOMAIN_NAME=%s' % domain_name
]
filename = '/contrail-test/tcutils/kubernetes/auth/templates/stackrc.sh'
with open(filename, 'w') as f:
for exports in export_list:
f.write(exports + os.linesep)
inputs.copy_file_to_server(
ip=inputs.juju_server,
src=filename,
dst='stackrc.sh',
dstdir='/var/tmp/templates')
return Util.templates['stackrc']
@staticmethod
def execute_cmds_on_remote(
ip,
cmd_list,
stackrc_file=None,
username='root',
password='c0ntrail123'):
output = ""
error = ""
client = paramiko.SSHClient()
try:
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(ip, username=username, password=password)
except BaseException:
print("[!] Cannot connect to the SSH Server")
exit()
for cmd in cmd_list:
if stackrc_file is not None:
source_stackrc = 'source %s' % stackrc_file
cmd = f"{source_stackrc};{cmd}"
stdin, stdout, stderr = client.exec_command(cmd)
output = stdout.read().decode()
error = stderr.read().decode()
client.close()
return output, error
|
the-stack_106_20027
|
##############################################################################
#
# Copyright (c) 2010 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import z3c.preference
import zope.app.wsgi.testlayer
import zope.testbrowser.wsgi
class BrowserLayer(zope.testbrowser.wsgi.Layer,
zope.app.wsgi.testlayer.BrowserLayer):
"""BrowserLayer which is compatible with zope.testbrowser."""
def testSetUp(self):
super(BrowserLayer, self).testSetUp()
self._application.requestFactory._db = self.db
def testTearDown(self):
self._application.requestFactory._publication_cache.clear()
super(BrowserLayer, self).testTearDown()
make_wsgi_app = zope.app.wsgi.testlayer.BrowserLayer.make_wsgi_app
Layer = BrowserLayer(z3c.preference)
|
the-stack_106_20028
|
#!/usr/bin/env python
import os
from setuptools import setup
# load __version__
exec(open('trimesh/version.py').read())
long_description = ''
if os.path.exists('README.md'):
with open('README.md', 'r') as f:
long_description = f.read()
setup(name='trimesh',
version=__version__,
description='Import, export, process, analyze and view triangular meshes.',
long_description=long_description,
long_description_content_type='text/markdown',
author='Michael Dawson-Haggerty',
author_email='[email protected]',
license='MIT',
url='http://github.com/mikedh/trimesh',
keywords='graphics mesh geometry 3D',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Natural Language :: English',
'Topic :: Scientific/Engineering'],
packages=[
'trimesh',
'trimesh.io',
'trimesh.ray',
'trimesh.path',
'trimesh.path.io',
'trimesh.scene',
'trimesh.resources',
'trimesh.interfaces'],
package_data={'trimesh': ['resources/*.template',
'resources/*.json']},
install_requires=['numpy',
'scipy',
'networkx'],
extras_require={'easy': ['lxml',
'pyglet',
'Shapely',
'rtree',
'svg.path',
'sympy',
'msgpack',
'pillow',
'colorlog'],
'all': ['lxml',
'pyglet',
'Shapely',
'rtree',
'svg.path',
'triangle',
'sympy',
'msgpack',
'python-fcl',
'colorlog',
'xxhash',
'pillow',
'setuptools']}
)
|
the-stack_106_20029
|
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from six import iteritems
from canonicaljson import json
from twisted.internet import defer
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage._base import LoggingTransaction, SQLBaseStore
from synapse.util.caches.descriptors import cachedInlineCallbacks
logger = logging.getLogger(__name__)
DEFAULT_NOTIF_ACTION = ["notify", {"set_tweak": "highlight", "value": False}]
DEFAULT_HIGHLIGHT_ACTION = [
"notify",
{"set_tweak": "sound", "value": "default"},
{"set_tweak": "highlight"},
]
def _serialize_action(actions, is_highlight):
"""Custom serializer for actions. This allows us to "compress" common actions.
We use the fact that most users have the same actions for notifs (and for
highlights).
We store these default actions as the empty string rather than the full JSON.
Since the empty string isn't valid JSON there is no risk of this clashing with
any real JSON actions
"""
if is_highlight:
if actions == DEFAULT_HIGHLIGHT_ACTION:
return "" # We use empty string as the column is non-NULL
else:
if actions == DEFAULT_NOTIF_ACTION:
return ""
return json.dumps(actions)
def _deserialize_action(actions, is_highlight):
"""Custom deserializer for actions. This allows us to "compress" common actions
"""
if actions:
return json.loads(actions)
if is_highlight:
return DEFAULT_HIGHLIGHT_ACTION
else:
return DEFAULT_NOTIF_ACTION
class EventPushActionsWorkerStore(SQLBaseStore):
def __init__(self, db_conn, hs):
super(EventPushActionsWorkerStore, self).__init__(db_conn, hs)
# These get correctly set by _find_stream_orderings_for_times_txn
self.stream_ordering_month_ago = None
self.stream_ordering_day_ago = None
cur = LoggingTransaction(
db_conn.cursor(),
name="_find_stream_orderings_for_times_txn",
database_engine=self.database_engine,
)
self._find_stream_orderings_for_times_txn(cur)
cur.close()
self.find_stream_orderings_looping_call = self._clock.looping_call(
self._find_stream_orderings_for_times, 10 * 60 * 1000
)
self._rotate_delay = 3
self._rotate_count = 10000
@cachedInlineCallbacks(num_args=3, tree=True, max_entries=5000)
def get_unread_event_push_actions_by_room_for_user(
self, room_id, user_id, last_read_event_id
):
ret = yield self.runInteraction(
"get_unread_event_push_actions_by_room",
self._get_unread_counts_by_receipt_txn,
room_id,
user_id,
last_read_event_id,
)
return ret
def _get_unread_counts_by_receipt_txn(
self, txn, room_id, user_id, last_read_event_id
):
sql = (
"SELECT stream_ordering"
" FROM events"
" WHERE room_id = ? AND event_id = ?"
)
txn.execute(sql, (room_id, last_read_event_id))
results = txn.fetchall()
if len(results) == 0:
return {"notify_count": 0, "highlight_count": 0}
stream_ordering = results[0][0]
return self._get_unread_counts_by_pos_txn(
txn, room_id, user_id, stream_ordering
)
def _get_unread_counts_by_pos_txn(self, txn, room_id, user_id, stream_ordering):
# First get number of notifications.
# We don't need to put a notif=1 clause as all rows always have
# notif=1
sql = (
"SELECT count(*)"
" FROM event_push_actions ea"
" WHERE"
" user_id = ?"
" AND room_id = ?"
" AND stream_ordering > ?"
)
txn.execute(sql, (user_id, room_id, stream_ordering))
row = txn.fetchone()
notify_count = row[0] if row else 0
txn.execute(
"""
SELECT notif_count FROM event_push_summary
WHERE room_id = ? AND user_id = ? AND stream_ordering > ?
""",
(room_id, user_id, stream_ordering),
)
rows = txn.fetchall()
if rows:
notify_count += rows[0][0]
# Now get the number of highlights
sql = (
"SELECT count(*)"
" FROM event_push_actions ea"
" WHERE"
" highlight = 1"
" AND user_id = ?"
" AND room_id = ?"
" AND stream_ordering > ?"
)
txn.execute(sql, (user_id, room_id, stream_ordering))
row = txn.fetchone()
highlight_count = row[0] if row else 0
return {"notify_count": notify_count, "highlight_count": highlight_count}
@defer.inlineCallbacks
def get_push_action_users_in_range(self, min_stream_ordering, max_stream_ordering):
def f(txn):
sql = (
"SELECT DISTINCT(user_id) FROM event_push_actions WHERE"
" stream_ordering >= ? AND stream_ordering <= ?"
)
txn.execute(sql, (min_stream_ordering, max_stream_ordering))
return [r[0] for r in txn]
ret = yield self.runInteraction("get_push_action_users_in_range", f)
return ret
@defer.inlineCallbacks
def get_unread_push_actions_for_user_in_range_for_http(
self, user_id, min_stream_ordering, max_stream_ordering, limit=20
):
"""Get a list of the most recent unread push actions for a given user,
within the given stream ordering range. Called by the httppusher.
Args:
user_id (str): The user to fetch push actions for.
min_stream_ordering(int): The exclusive lower bound on the
stream ordering of event push actions to fetch.
max_stream_ordering(int): The inclusive upper bound on the
stream ordering of event push actions to fetch.
limit (int): The maximum number of rows to return.
Returns:
A promise which resolves to a list of dicts with the keys "event_id",
"room_id", "stream_ordering", "actions".
The list will be ordered by ascending stream_ordering.
The list will have between 0~limit entries.
"""
# find rooms that have a read receipt in them and return the next
# push actions
def get_after_receipt(txn):
# find rooms that have a read receipt in them and return the next
# push actions
sql = (
"SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions,"
" ep.highlight "
" FROM ("
" SELECT room_id,"
" MAX(stream_ordering) as stream_ordering"
" FROM events"
" INNER JOIN receipts_linearized USING (room_id, event_id)"
" WHERE receipt_type = 'm.read' AND user_id = ?"
" GROUP BY room_id"
") AS rl,"
" event_push_actions AS ep"
" WHERE"
" ep.room_id = rl.room_id"
" AND ep.stream_ordering > rl.stream_ordering"
" AND ep.user_id = ?"
" AND ep.stream_ordering > ?"
" AND ep.stream_ordering <= ?"
" ORDER BY ep.stream_ordering ASC LIMIT ?"
)
args = [user_id, user_id, min_stream_ordering, max_stream_ordering, limit]
txn.execute(sql, args)
return txn.fetchall()
after_read_receipt = yield self.runInteraction(
"get_unread_push_actions_for_user_in_range_http_arr", get_after_receipt
)
# There are rooms with push actions in them but you don't have a read receipt in
# them e.g. rooms you've been invited to, so get push actions for rooms which do
# not have read receipts in them too.
def get_no_receipt(txn):
sql = (
"SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions,"
" ep.highlight "
" FROM event_push_actions AS ep"
" INNER JOIN events AS e USING (room_id, event_id)"
" WHERE"
" ep.room_id NOT IN ("
" SELECT room_id FROM receipts_linearized"
" WHERE receipt_type = 'm.read' AND user_id = ?"
" GROUP BY room_id"
" )"
" AND ep.user_id = ?"
" AND ep.stream_ordering > ?"
" AND ep.stream_ordering <= ?"
" ORDER BY ep.stream_ordering ASC LIMIT ?"
)
args = [user_id, user_id, min_stream_ordering, max_stream_ordering, limit]
txn.execute(sql, args)
return txn.fetchall()
no_read_receipt = yield self.runInteraction(
"get_unread_push_actions_for_user_in_range_http_nrr", get_no_receipt
)
notifs = [
{
"event_id": row[0],
"room_id": row[1],
"stream_ordering": row[2],
"actions": _deserialize_action(row[3], row[4]),
}
for row in after_read_receipt + no_read_receipt
]
# Now sort it so it's ordered correctly, since currently it will
# contain results from the first query, correctly ordered, followed
# by results from the second query, but we want them all ordered
# by stream_ordering, oldest first.
notifs.sort(key=lambda r: r["stream_ordering"])
# Take only up to the limit. We have to stop at the limit because
# one of the subqueries may have hit the limit.
return notifs[:limit]
@defer.inlineCallbacks
def get_unread_push_actions_for_user_in_range_for_email(
self, user_id, min_stream_ordering, max_stream_ordering, limit=20
):
"""Get a list of the most recent unread push actions for a given user,
within the given stream ordering range. Called by the emailpusher
Args:
user_id (str): The user to fetch push actions for.
min_stream_ordering(int): The exclusive lower bound on the
stream ordering of event push actions to fetch.
max_stream_ordering(int): The inclusive upper bound on the
stream ordering of event push actions to fetch.
limit (int): The maximum number of rows to return.
Returns:
A promise which resolves to a list of dicts with the keys "event_id",
"room_id", "stream_ordering", "actions", "received_ts".
The list will be ordered by descending received_ts.
The list will have between 0~limit entries.
"""
# find rooms that have a read receipt in them and return the most recent
# push actions
def get_after_receipt(txn):
sql = (
"SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions,"
" ep.highlight, e.received_ts"
" FROM ("
" SELECT room_id,"
" MAX(stream_ordering) as stream_ordering"
" FROM events"
" INNER JOIN receipts_linearized USING (room_id, event_id)"
" WHERE receipt_type = 'm.read' AND user_id = ?"
" GROUP BY room_id"
") AS rl,"
" event_push_actions AS ep"
" INNER JOIN events AS e USING (room_id, event_id)"
" WHERE"
" ep.room_id = rl.room_id"
" AND ep.stream_ordering > rl.stream_ordering"
" AND ep.user_id = ?"
" AND ep.stream_ordering > ?"
" AND ep.stream_ordering <= ?"
" ORDER BY ep.stream_ordering DESC LIMIT ?"
)
args = [user_id, user_id, min_stream_ordering, max_stream_ordering, limit]
txn.execute(sql, args)
return txn.fetchall()
after_read_receipt = yield self.runInteraction(
"get_unread_push_actions_for_user_in_range_email_arr", get_after_receipt
)
# There are rooms with push actions in them but you don't have a read receipt in
# them e.g. rooms you've been invited to, so get push actions for rooms which do
# not have read receipts in them too.
def get_no_receipt(txn):
sql = (
"SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions,"
" ep.highlight, e.received_ts"
" FROM event_push_actions AS ep"
" INNER JOIN events AS e USING (room_id, event_id)"
" WHERE"
" ep.room_id NOT IN ("
" SELECT room_id FROM receipts_linearized"
" WHERE receipt_type = 'm.read' AND user_id = ?"
" GROUP BY room_id"
" )"
" AND ep.user_id = ?"
" AND ep.stream_ordering > ?"
" AND ep.stream_ordering <= ?"
" ORDER BY ep.stream_ordering DESC LIMIT ?"
)
args = [user_id, user_id, min_stream_ordering, max_stream_ordering, limit]
txn.execute(sql, args)
return txn.fetchall()
no_read_receipt = yield self.runInteraction(
"get_unread_push_actions_for_user_in_range_email_nrr", get_no_receipt
)
# Make a list of dicts from the two sets of results.
notifs = [
{
"event_id": row[0],
"room_id": row[1],
"stream_ordering": row[2],
"actions": _deserialize_action(row[3], row[4]),
"received_ts": row[5],
}
for row in after_read_receipt + no_read_receipt
]
# Now sort it so it's ordered correctly, since currently it will
# contain results from the first query, correctly ordered, followed
# by results from the second query, but we want them all ordered
# by received_ts (most recent first)
notifs.sort(key=lambda r: -(r["received_ts"] or 0))
# Now return the first `limit`
return notifs[:limit]
def get_if_maybe_push_in_range_for_user(self, user_id, min_stream_ordering):
"""A fast check to see if there might be something to push for the
user since the given stream ordering. May return false positives.
Useful to know whether to bother starting a pusher on start up or not.
Args:
user_id (str)
min_stream_ordering (int)
Returns:
Deferred[bool]: True if there may be push to process, False if
there definitely isn't.
"""
def _get_if_maybe_push_in_range_for_user_txn(txn):
sql = """
SELECT 1 FROM event_push_actions
WHERE user_id = ? AND stream_ordering > ?
LIMIT 1
"""
txn.execute(sql, (user_id, min_stream_ordering))
return bool(txn.fetchone())
return self.runInteraction(
"get_if_maybe_push_in_range_for_user",
_get_if_maybe_push_in_range_for_user_txn,
)
def add_push_actions_to_staging(self, event_id, user_id_actions):
"""Add the push actions for the event to the push action staging area.
Args:
event_id (str)
user_id_actions (dict[str, list[dict|str])]): A dictionary mapping
user_id to list of push actions, where an action can either be
a string or dict.
Returns:
Deferred
"""
if not user_id_actions:
return
# This is a helper function for generating the necessary tuple that
# can be used to inert into the `event_push_actions_staging` table.
def _gen_entry(user_id, actions):
is_highlight = 1 if _action_has_highlight(actions) else 0
return (
event_id, # event_id column
user_id, # user_id column
_serialize_action(actions, is_highlight), # actions column
1, # notif column
is_highlight, # highlight column
)
def _add_push_actions_to_staging_txn(txn):
# We don't use _simple_insert_many here to avoid the overhead
# of generating lists of dicts.
sql = """
INSERT INTO event_push_actions_staging
(event_id, user_id, actions, notif, highlight)
VALUES (?, ?, ?, ?, ?)
"""
txn.executemany(
sql,
(
_gen_entry(user_id, actions)
for user_id, actions in iteritems(user_id_actions)
),
)
return self.runInteraction(
"add_push_actions_to_staging", _add_push_actions_to_staging_txn
)
@defer.inlineCallbacks
def remove_push_actions_from_staging(self, event_id):
"""Called if we failed to persist the event to ensure that stale push
actions don't build up in the DB
Args:
event_id (str)
"""
try:
res = yield self._simple_delete(
table="event_push_actions_staging",
keyvalues={"event_id": event_id},
desc="remove_push_actions_from_staging",
)
return res
except Exception:
# this method is called from an exception handler, so propagating
# another exception here really isn't helpful - there's nothing
# the caller can do about it. Just log the exception and move on.
logger.exception(
"Error removing push actions after event persistence failure"
)
def _find_stream_orderings_for_times(self):
return run_as_background_process(
"event_push_action_stream_orderings",
self.runInteraction,
"_find_stream_orderings_for_times",
self._find_stream_orderings_for_times_txn,
)
def _find_stream_orderings_for_times_txn(self, txn):
logger.info("Searching for stream ordering 1 month ago")
self.stream_ordering_month_ago = self._find_first_stream_ordering_after_ts_txn(
txn, self._clock.time_msec() - 30 * 24 * 60 * 60 * 1000
)
logger.info(
"Found stream ordering 1 month ago: it's %d", self.stream_ordering_month_ago
)
logger.info("Searching for stream ordering 1 day ago")
self.stream_ordering_day_ago = self._find_first_stream_ordering_after_ts_txn(
txn, self._clock.time_msec() - 24 * 60 * 60 * 1000
)
logger.info(
"Found stream ordering 1 day ago: it's %d", self.stream_ordering_day_ago
)
def find_first_stream_ordering_after_ts(self, ts):
"""Gets the stream ordering corresponding to a given timestamp.
Specifically, finds the stream_ordering of the first event that was
received on or after the timestamp. This is done by a binary search on
the events table, since there is no index on received_ts, so is
relatively slow.
Args:
ts (int): timestamp in millis
Returns:
Deferred[int]: stream ordering of the first event received on/after
the timestamp
"""
return self.runInteraction(
"_find_first_stream_ordering_after_ts_txn",
self._find_first_stream_ordering_after_ts_txn,
ts,
)
@staticmethod
def _find_first_stream_ordering_after_ts_txn(txn, ts):
"""
Find the stream_ordering of the first event that was received on or
after a given timestamp. This is relatively slow as there is no index
on received_ts but we can then use this to delete push actions before
this.
received_ts must necessarily be in the same order as stream_ordering
and stream_ordering is indexed, so we manually binary search using
stream_ordering
Args:
txn (twisted.enterprise.adbapi.Transaction):
ts (int): timestamp to search for
Returns:
int: stream ordering
"""
txn.execute("SELECT MAX(stream_ordering) FROM events")
max_stream_ordering = txn.fetchone()[0]
if max_stream_ordering is None:
return 0
# We want the first stream_ordering in which received_ts is greater
# than or equal to ts. Call this point X.
#
# We maintain the invariants:
#
# range_start <= X <= range_end
#
range_start = 0
range_end = max_stream_ordering + 1
# Given a stream_ordering, look up the timestamp at that
# stream_ordering.
#
# The array may be sparse (we may be missing some stream_orderings).
# We treat the gaps as the same as having the same value as the
# preceding entry, because we will pick the lowest stream_ordering
# which satisfies our requirement of received_ts >= ts.
#
# For example, if our array of events indexed by stream_ordering is
# [10, <none>, 20], we should treat this as being equivalent to
# [10, 10, 20].
#
sql = (
"SELECT received_ts FROM events"
" WHERE stream_ordering <= ?"
" ORDER BY stream_ordering DESC"
" LIMIT 1"
)
while range_end - range_start > 0:
middle = (range_end + range_start) // 2
txn.execute(sql, (middle,))
row = txn.fetchone()
if row is None:
# no rows with stream_ordering<=middle
range_start = middle + 1
continue
middle_ts = row[0]
if ts > middle_ts:
# we got a timestamp lower than the one we were looking for.
# definitely need to look higher: X > middle.
range_start = middle + 1
else:
# we got a timestamp higher than (or the same as) the one we
# were looking for. We aren't yet sure about the point we
# looked up, but we can be sure that X <= middle.
range_end = middle
return range_end
class EventPushActionsStore(EventPushActionsWorkerStore):
EPA_HIGHLIGHT_INDEX = "epa_highlight_index"
def __init__(self, db_conn, hs):
super(EventPushActionsStore, self).__init__(db_conn, hs)
self.register_background_index_update(
self.EPA_HIGHLIGHT_INDEX,
index_name="event_push_actions_u_highlight",
table="event_push_actions",
columns=["user_id", "stream_ordering"],
)
self.register_background_index_update(
"event_push_actions_highlights_index",
index_name="event_push_actions_highlights_index",
table="event_push_actions",
columns=["user_id", "room_id", "topological_ordering", "stream_ordering"],
where_clause="highlight=1",
)
self._doing_notif_rotation = False
self._rotate_notif_loop = self._clock.looping_call(
self._start_rotate_notifs, 30 * 60 * 1000
)
def _set_push_actions_for_event_and_users_txn(
self, txn, events_and_contexts, all_events_and_contexts
):
"""Handles moving push actions from staging table to main
event_push_actions table for all events in `events_and_contexts`.
Also ensures that all events in `all_events_and_contexts` are removed
from the push action staging area.
Args:
events_and_contexts (list[(EventBase, EventContext)]): events
we are persisting
all_events_and_contexts (list[(EventBase, EventContext)]): all
events that we were going to persist. This includes events
we've already persisted, etc, that wouldn't appear in
events_and_context.
"""
sql = """
INSERT INTO event_push_actions (
room_id, event_id, user_id, actions, stream_ordering,
topological_ordering, notif, highlight
)
SELECT ?, event_id, user_id, actions, ?, ?, notif, highlight
FROM event_push_actions_staging
WHERE event_id = ?
"""
if events_and_contexts:
txn.executemany(
sql,
(
(
event.room_id,
event.internal_metadata.stream_ordering,
event.depth,
event.event_id,
)
for event, _ in events_and_contexts
),
)
for event, _ in events_and_contexts:
user_ids = self._simple_select_onecol_txn(
txn,
table="event_push_actions_staging",
keyvalues={"event_id": event.event_id},
retcol="user_id",
)
for uid in user_ids:
txn.call_after(
self.get_unread_event_push_actions_by_room_for_user.invalidate_many,
(event.room_id, uid),
)
# Now we delete the staging area for *all* events that were being
# persisted.
txn.executemany(
"DELETE FROM event_push_actions_staging WHERE event_id = ?",
((event.event_id,) for event, _ in all_events_and_contexts),
)
@defer.inlineCallbacks
def get_push_actions_for_user(
self, user_id, before=None, limit=50, only_highlight=False
):
def f(txn):
before_clause = ""
if before:
before_clause = "AND epa.stream_ordering < ?"
args = [user_id, before, limit]
else:
args = [user_id, limit]
if only_highlight:
if len(before_clause) > 0:
before_clause += " "
before_clause += "AND epa.highlight = 1"
# NB. This assumes event_ids are globally unique since
# it makes the query easier to index
sql = (
"SELECT epa.event_id, epa.room_id,"
" epa.stream_ordering, epa.topological_ordering,"
" epa.actions, epa.highlight, epa.profile_tag, e.received_ts"
" FROM event_push_actions epa, events e"
" WHERE epa.event_id = e.event_id"
" AND epa.user_id = ? %s"
" ORDER BY epa.stream_ordering DESC"
" LIMIT ?" % (before_clause,)
)
txn.execute(sql, args)
return self.cursor_to_dict(txn)
push_actions = yield self.runInteraction("get_push_actions_for_user", f)
for pa in push_actions:
pa["actions"] = _deserialize_action(pa["actions"], pa["highlight"])
return push_actions
@defer.inlineCallbacks
def get_time_of_last_push_action_before(self, stream_ordering):
def f(txn):
sql = (
"SELECT e.received_ts"
" FROM event_push_actions AS ep"
" JOIN events e ON ep.room_id = e.room_id AND ep.event_id = e.event_id"
" WHERE ep.stream_ordering > ?"
" ORDER BY ep.stream_ordering ASC"
" LIMIT 1"
)
txn.execute(sql, (stream_ordering,))
return txn.fetchone()
result = yield self.runInteraction("get_time_of_last_push_action_before", f)
return result[0] if result else None
@defer.inlineCallbacks
def get_latest_push_action_stream_ordering(self):
def f(txn):
txn.execute("SELECT MAX(stream_ordering) FROM event_push_actions")
return txn.fetchone()
result = yield self.runInteraction("get_latest_push_action_stream_ordering", f)
return result[0] or 0
def _remove_push_actions_for_event_id_txn(self, txn, room_id, event_id):
# Sad that we have to blow away the cache for the whole room here
txn.call_after(
self.get_unread_event_push_actions_by_room_for_user.invalidate_many,
(room_id,),
)
txn.execute(
"DELETE FROM event_push_actions WHERE room_id = ? AND event_id = ?",
(room_id, event_id),
)
def _remove_old_push_actions_before_txn(
self, txn, room_id, user_id, stream_ordering
):
"""
Purges old push actions for a user and room before a given
stream_ordering.
We however keep a months worth of highlighted notifications, so that
users can still get a list of recent highlights.
Args:
txn: The transcation
room_id: Room ID to delete from
user_id: user ID to delete for
stream_ordering: The lowest stream ordering which will
not be deleted.
"""
txn.call_after(
self.get_unread_event_push_actions_by_room_for_user.invalidate_many,
(room_id, user_id),
)
# We need to join on the events table to get the received_ts for
# event_push_actions and sqlite won't let us use a join in a delete so
# we can't just delete where received_ts < x. Furthermore we can
# only identify event_push_actions by a tuple of room_id, event_id
# we we can't use a subquery.
# Instead, we look up the stream ordering for the last event in that
# room received before the threshold time and delete event_push_actions
# in the room with a stream_odering before that.
txn.execute(
"DELETE FROM event_push_actions "
" WHERE user_id = ? AND room_id = ? AND "
" stream_ordering <= ?"
" AND ((stream_ordering < ? AND highlight = 1) or highlight = 0)",
(user_id, room_id, stream_ordering, self.stream_ordering_month_ago),
)
txn.execute(
"""
DELETE FROM event_push_summary
WHERE room_id = ? AND user_id = ? AND stream_ordering <= ?
""",
(room_id, user_id, stream_ordering),
)
def _start_rotate_notifs(self):
return run_as_background_process("rotate_notifs", self._rotate_notifs)
@defer.inlineCallbacks
def _rotate_notifs(self):
if self._doing_notif_rotation or self.stream_ordering_day_ago is None:
return
self._doing_notif_rotation = True
try:
while True:
logger.info("Rotating notifications")
caught_up = yield self.runInteraction(
"_rotate_notifs", self._rotate_notifs_txn
)
if caught_up:
break
yield self.hs.get_clock().sleep(self._rotate_delay)
finally:
self._doing_notif_rotation = False
def _rotate_notifs_txn(self, txn):
"""Archives older notifications into event_push_summary. Returns whether
the archiving process has caught up or not.
"""
old_rotate_stream_ordering = self._simple_select_one_onecol_txn(
txn,
table="event_push_summary_stream_ordering",
keyvalues={},
retcol="stream_ordering",
)
# We don't to try and rotate millions of rows at once, so we cap the
# maximum stream ordering we'll rotate before.
txn.execute(
"""
SELECT stream_ordering FROM event_push_actions
WHERE stream_ordering > ?
ORDER BY stream_ordering ASC LIMIT 1 OFFSET ?
""",
(old_rotate_stream_ordering, self._rotate_count),
)
stream_row = txn.fetchone()
if stream_row:
(offset_stream_ordering,) = stream_row
rotate_to_stream_ordering = min(
self.stream_ordering_day_ago, offset_stream_ordering
)
caught_up = offset_stream_ordering >= self.stream_ordering_day_ago
else:
rotate_to_stream_ordering = self.stream_ordering_day_ago
caught_up = True
logger.info("Rotating notifications up to: %s", rotate_to_stream_ordering)
self._rotate_notifs_before_txn(txn, rotate_to_stream_ordering)
# We have caught up iff we were limited by `stream_ordering_day_ago`
return caught_up
def _rotate_notifs_before_txn(self, txn, rotate_to_stream_ordering):
old_rotate_stream_ordering = self._simple_select_one_onecol_txn(
txn,
table="event_push_summary_stream_ordering",
keyvalues={},
retcol="stream_ordering",
)
# Calculate the new counts that should be upserted into event_push_summary
sql = """
SELECT user_id, room_id,
coalesce(old.notif_count, 0) + upd.notif_count,
upd.stream_ordering,
old.user_id
FROM (
SELECT user_id, room_id, count(*) as notif_count,
max(stream_ordering) as stream_ordering
FROM event_push_actions
WHERE ? <= stream_ordering AND stream_ordering < ?
AND highlight = 0
GROUP BY user_id, room_id
) AS upd
LEFT JOIN event_push_summary AS old USING (user_id, room_id)
"""
txn.execute(sql, (old_rotate_stream_ordering, rotate_to_stream_ordering))
rows = txn.fetchall()
logger.info("Rotating notifications, handling %d rows", len(rows))
# If the `old.user_id` above is NULL then we know there isn't already an
# entry in the table, so we simply insert it. Otherwise we update the
# existing table.
self._simple_insert_many_txn(
txn,
table="event_push_summary",
values=[
{
"user_id": row[0],
"room_id": row[1],
"notif_count": row[2],
"stream_ordering": row[3],
}
for row in rows
if row[4] is None
],
)
txn.executemany(
"""
UPDATE event_push_summary SET notif_count = ?, stream_ordering = ?
WHERE user_id = ? AND room_id = ?
""",
((row[2], row[3], row[0], row[1]) for row in rows if row[4] is not None),
)
txn.execute(
"DELETE FROM event_push_actions"
" WHERE ? <= stream_ordering AND stream_ordering < ? AND highlight = 0",
(old_rotate_stream_ordering, rotate_to_stream_ordering),
)
logger.info("Rotating notifications, deleted %s push actions", txn.rowcount)
txn.execute(
"UPDATE event_push_summary_stream_ordering SET stream_ordering = ?",
(rotate_to_stream_ordering,),
)
def _action_has_highlight(actions):
for action in actions:
try:
if action.get("set_tweak", None) == "highlight":
return action.get("value", True)
except AttributeError:
pass
return False
|
the-stack_106_20031
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide basic Bokeh server objects that use a Tornado ``HTTPServer`` and
``BokeTornado`` Tornado Application to service Bokeh Server Applications.
There are two public classes in this module:
:class:`~bokeh.server.server.BaseServer`
This is a lightweight class to explicitly coordinate the components needed
to run a Bokeh server (A :class:`~bokeh.server.tornado.BokehTornado`
instance, and Tornado ``HTTPServer`` and a Tornado ``IOLoop``)
:class:`~bokeh.server.server.Server`
This higher-level convenience class only needs to be configured with Bokeh
:class:`~bokeh.application.application.Application` instances, and will
automatically create and coordinate the lower level Tornado components.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import atexit
import signal
import sys
# External imports
import tornado
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
# Bokeh imports
from .. import __version__
from ..application import Application
from ..core.properties import Bool, Int, List, String
from ..resources import DEFAULT_SERVER_PORT
from ..util.options import Options
from .util import bind_sockets, create_hosts_whitelist
from .tornado import BokehTornado, DEFAULT_WEBSOCKET_MAX_MESSAGE_SIZE_BYTES
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'BaseServer',
'Server',
)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class BaseServer(object):
''' Explicitly coordinate the level Tornado components required to run a
Bokeh server:
* A Tornado ``IOLoop`` to run the Bokeh server machinery.
* a ``BokehTornado`` Tornado application that defines the Bokeh server
machinery.
* a Tornado ``HTTPServer`` to direct HTTP requests
All three of these components must be passed to ``BaseServer``, which will
initialize the ``BokehTornado`` instance on the ``io_loop``. The
``http_server`` must have been previously created and initialized with the
``BokehTornado`` instance.
'''
def __init__(self, io_loop, tornado_app, http_server):
''' Create a ``BaseServer`` instance.
Args:
io_loop (IOLoop) :
A Tornado ``IOLoop`` to run the Bokeh Tornado application on.
tornado_app (BokehTornado) :
An instance of the Bokeh Tornado application that generates
Bokeh Documents and Sessions.
http_server (HTTPServer) :
A Tornado ``HTTPServer`` to service HTTP requests for Bokeh
applications. Should have already be configured with the
``tornado_app`` when created.
'''
self._started = False
self._stopped = False
self._http = http_server
self._loop = io_loop
self._tornado = tornado_app
self._tornado.initialize(io_loop)
@property
def io_loop(self):
''' The Tornado ``IOLoop`` that this Bokeh Server is running on.
'''
return self._loop
def start(self):
''' Install the Bokeh Server and its background tasks on a Tornado
``IOLoop``.
This method does *not* block and does *not* affect the state of the
Tornado ``IOLoop`` You must start and stop the loop yourself, i.e.
this method is typically useful when you are already explicitly
managing an ``IOLoop`` yourself.
To start a Bokeh server and immediately "run forever" in a blocking
manner, see :func:`~bokeh.server.server.BaseServer.run_until_shutdown`.
'''
assert not self._started, "Already started"
self._started = True
self._tornado.start()
def stop(self, wait=True):
''' Stop the Bokeh Server.
This stops and removes all Bokeh Server ``IOLoop`` callbacks, as well
as stops the ``HTTPServer`` that this instance was configured with.
Args:
fast (bool):
Whether to wait for orderly cleanup (default: True)
Returns:
None
'''
assert not self._stopped, "Already stopped"
self._stopped = True
self._tornado.stop(wait)
self._http.stop()
def unlisten(self):
''' Stop listening on ports. The server will no longer be usable after
calling this function.
Returns:
None
'''
yield self._http.close_all_connections()
self._http.stop()
def run_until_shutdown(self):
''' Run the Bokeh Server until shutdown is requested by the user,
either via a Keyboard interrupt (Ctrl-C) or SIGTERM.
Calling this method will start the Tornado ``IOLoop`` and block
all execution in the calling process.
Returns:
None
'''
if not self._started:
self.start()
# Install shutdown hooks
atexit.register(self._atexit)
signal.signal(signal.SIGTERM, self._sigterm)
try:
self._loop.start()
except KeyboardInterrupt:
print("\nInterrupted, shutting down")
self.stop()
def get_session(self, app_path, session_id):
''' Get an active a session by name application path and session ID.
Args:
app_path (str) :
The configured application path for the application to return
a session for.
session_id (str) :
The session ID of the session to retrieve.
Returns:
ServerSession
'''
return self._tornado.get_session(app_path, session_id)
def get_sessions(self, app_path=None):
''' Gets all currently active sessions for applications.
Args:
app_path (str, optional) :
The configured application path for the application to return
sessions for. If None, return active sessions for all
applications. (default: None)
Returns:
list[ServerSession]
'''
if app_path is not None:
return self._tornado.get_sessions(app_path)
all_sessions = []
for path in self._tornado.app_paths:
all_sessions += self._tornado.get_sessions(path)
return all_sessions
def show(self, app_path, browser=None, new='tab'):
''' Opens an app in a browser window or tab.
This method is useful for testing or running Bokeh server applications
on a local machine but should not call when running Bokeh server for
an actual deployment.
Args:
app_path (str) : the app path to open
The part of the URL after the hostname:port, with leading slash.
browser (str, optional) : browser to show with (default: None)
For systems that support it, the **browser** argument allows
specifying which browser to display in, e.g. "safari", "firefox",
"opera", "windows-default" (see the ``webbrowser`` module
documentation in the standard lib for more details).
new (str, optional) : window or tab (default: "tab")
If ``new`` is 'tab', then opens a new tab.
If ``new`` is 'window', then opens a new window.
Returns:
None
'''
if not app_path.startswith("/"):
raise ValueError("app_path must start with a /")
address_string = 'localhost'
if self.address is not None and self.address != '':
address_string = self.address
url = "http://%s:%d%s%s" % (address_string, self.port, self.prefix, app_path)
from bokeh.util.browser import view
view(url, browser=browser, new=new)
_atexit_ran = False
def _atexit(self):
if self._atexit_ran:
return
self._atexit_ran = True
log.debug("Shutdown: cleaning up")
if not self._stopped:
self.stop(wait=False)
def _sigterm(self, signum, frame):
print("Received signal %d, shutting down" % (signum,))
# Tell self._loop.start() to return.
self._loop.add_callback_from_signal(self._loop.stop)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Server(BaseServer):
''' A high level convenience class to run a Bokeh server.
This class can automatically coordinate the three the base level
components required to run a Bokeh server:
* A Tornado ``IOLoop`` to run the Bokeh server machinery.
* a ``BokehTornado`` Tornado application that defines the Bokeh server
machinery.
* a Tornado ``HTTPServer`` to direct HTTP requests
This high level ``Server`` class has some limitations. In particular, it is
not possible to set an explicit ``io_loop`` and ``num_procs`` other than 1
at the same time. To do that, it is necessary to use ``BaseServer`` and
coordinate the three components above explicitly.
'''
def __init__(self, applications, io_loop=None, http_server_kwargs=None, **kwargs):
''' Create a ``Server`` instance.
Args:
applications (dict[str, Application] or Application or callable) :
A mapping from URL paths to Application instances, or a single
Application to put at the root URL.
The Application is a factory for Documents, with a new Document
initialized for each Session. Each application is identified
by a path that corresponds to a URL, like "/" or "/myapp"
If a single Application is provided, it is mapped to the URL
path "/" automatically.
As a convenience, a callable may also be provided, in which
an Application will be created for it using
``FunctionHandler``.
io_loop (IOLoop, optional) :
An explicit Tornado ``IOLoop`` to run Bokeh Server code on. If
None, ``IOLoop.current()`` will be used (default: None)
http_server_kwargs (dict, optional) :
Extra arguments passed to ``tornado.httpserver.HTTPServer``.
E.g. ``max_buffer_size`` to specify the maximum upload size.
More details can be found at:
http://www.tornadoweb.org/en/stable/httpserver.html#http-server
If None, no extra arguments are passed (default: None)
Additionally, the following options may be passed to configure the
operation of ``Server``:
.. bokeh-options:: _ServerOpts
:module: bokeh.server.server
Any remaining keyword arguments will be passed as-is to
``BokehTornado``.
'''
log.info("Starting Bokeh server version %s (running on Tornado %s)" % (__version__, tornado.version))
from bokeh.application.handlers.function import FunctionHandler
from bokeh.application.handlers.document_lifecycle import DocumentLifecycleHandler
if callable(applications):
applications = Application(FunctionHandler(applications))
if isinstance(applications, Application):
applications = { '/' : applications }
for k, v in list(applications.items()):
if callable(v):
applications[k] = Application(FunctionHandler(v))
if all(not isinstance(handler, DocumentLifecycleHandler)
for handler in applications[k]._handlers):
applications[k].add(DocumentLifecycleHandler())
opts = _ServerOpts(kwargs)
self._port = opts.port
self._address = opts.address
self._prefix = opts.prefix
self._index = opts.index
if opts.num_procs != 1:
assert all(app.safe_to_fork for app in applications.values()), (
'User application code has run before attempting to start '
'multiple processes. This is considered an unsafe operation.')
if opts.num_procs > 1 and io_loop is not None:
raise RuntimeError(
"Setting both num_procs and io_loop in Server is incompatible. Use BaseServer to coordinate an explicit IOLoop and multi-process HTTPServer"
)
if opts.num_procs > 1 and sys.platform == "win32":
raise RuntimeError("num_procs > 1 not supported on Windows")
if http_server_kwargs is None:
http_server_kwargs = {}
http_server_kwargs.setdefault('xheaders', opts.use_xheaders)
sockets, self._port = bind_sockets(self.address, self.port)
extra_websocket_origins = create_hosts_whitelist(opts.allow_websocket_origin, self.port)
try:
tornado_app = BokehTornado(applications,
extra_websocket_origins=extra_websocket_origins,
prefix=self.prefix,
index=self.index,
websocket_max_message_size_bytes=opts.websocket_max_message_size,
**kwargs)
http_server = HTTPServer(tornado_app, **http_server_kwargs)
http_server.start(opts.num_procs)
http_server.add_sockets(sockets)
except Exception:
for s in sockets:
s.close()
raise
# Can only refer to IOLoop after HTTPServer.start() is called, see #5524
if io_loop is None:
io_loop = IOLoop.current()
super(Server, self).__init__(io_loop, tornado_app, http_server)
@property
def index(self):
''' A path to a Jinja2 template to use for index at "/"
'''
return self._index
@property
def prefix(self):
''' The configured URL prefix to use for all Bokeh server paths.
'''
return self._prefix
@property
def port(self):
''' The configured port number that the server listens on for HTTP
requests.
'''
return self._port
@property
def address(self):
''' The configured address that the server listens on for HTTP
requests.
'''
return self._address
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
# This class itself is intentionally undocumented (it is used to generate
# documentation elsewhere)
class _ServerOpts(Options):
num_procs = Int(default=1, help="""
The number of worker processes to start for the HTTP server. If an explicit
``io_loop`` is also configured, then ``num_procs=1`` is the only compatible
value. Use ``BaseServer`` to coordinate an explicit ``IOLoop`` with a
multi-process HTTP server.
A value of 0 will auto detect number of cores.
Note that due to limitations inherent in Tornado, Windows does not support
``num_procs`` values greater than one! In this case consider running
multiple Bokeh server instances behind a load balancer.
""")
address = String(default=None, help="""
The address the server should listen on for HTTP requests.
""")
port = Int(default=DEFAULT_SERVER_PORT, help="""
The port number the server should listen on for HTTP requests.
""")
prefix = String(default="", help="""
A URL prefix to use for all Bokeh server paths.
""")
index = String(default=None, help="""
A path to a Jinja2 template to use for the index "/"
""")
allow_websocket_origin = List(String, default=None, help="""
A list of hosts that can connect to the websocket.
This is typically required when embedding a Bokeh server app in an external
web site using :func:`~bokeh.embed.server_document` or similar.
If None, "localhost" is used.
""")
use_xheaders = Bool(default=False, help="""
Whether to have the Bokeh server override the remote IP and URI scheme
and protocol for all requests with ``X-Real-Ip``, ``X-Forwarded-For``,
``X-Scheme``, ``X-Forwarded-Proto`` headers (if they are provided).
""")
websocket_max_message_size = Int(default=DEFAULT_WEBSOCKET_MAX_MESSAGE_SIZE_BYTES, help="""
Set the Tornado ``websocket_max_message_size`` value.
NOTE: This setting has effect ONLY for Tornado>=4.5
""")
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
the-stack_106_20032
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from typing import Dict
from typing import List
import jieba.posseg as psg
import numpy as np
import paddle
from g2pM import G2pM
from pypinyin import lazy_pinyin
from pypinyin import Style
from parakeet.frontend.generate_lexicon import generate_lexicon
from parakeet.frontend.tone_sandhi import ToneSandhi
from parakeet.frontend.zh_normalization.text_normlization import TextNormalizer
class Frontend():
def __init__(self,
g2p_model="pypinyin",
phone_vocab_path=None,
tone_vocab_path=None):
self.tone_modifier = ToneSandhi()
self.text_normalizer = TextNormalizer()
self.punc = ":,;。?!“”‘’':,;.?!"
# g2p_model can be pypinyin and g2pM
self.g2p_model = g2p_model
if self.g2p_model == "g2pM":
self.g2pM_model = G2pM()
self.pinyin2phone = generate_lexicon(
with_tone=True, with_erhua=False)
self.must_erhua = {"小院儿", "胡同儿", "范儿", "老汉儿", "撒欢儿", "寻老礼儿", "妥妥儿"}
self.not_erhua = {
"虐儿", "为儿", "护儿", "瞒儿", "救儿", "替儿", "有儿", "一儿", "我儿", "俺儿", "妻儿",
"拐儿", "聋儿", "乞儿", "患儿", "幼儿", "孤儿", "婴儿", "婴幼儿", "连体儿", "脑瘫儿",
"流浪儿", "体弱儿", "混血儿", "蜜雪儿", "舫儿", "祖儿", "美儿", "应采儿", "可儿", "侄儿",
"孙儿", "侄孙儿", "女儿", "男儿", "红孩儿", "花儿", "虫儿", "马儿", "鸟儿", "猪儿", "猫儿",
"狗儿"
}
self.vocab_phones = {}
self.vocab_tones = {}
if phone_vocab_path:
with open(phone_vocab_path, 'rt') as f:
phn_id = [line.strip().split() for line in f.readlines()]
for phn, id in phn_id:
self.vocab_phones[phn] = int(id)
if tone_vocab_path:
with open(tone_vocab_path, 'rt') as f:
tone_id = [line.strip().split() for line in f.readlines()]
for tone, id in tone_id:
self.vocab_tones[tone] = int(id)
def _get_initials_finals(self, word: str) -> List[List[str]]:
initials = []
finals = []
if self.g2p_model == "pypinyin":
orig_initials = lazy_pinyin(
word, neutral_tone_with_five=True, style=Style.INITIALS)
orig_finals = lazy_pinyin(
word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)
for c, v in zip(orig_initials, orig_finals):
if re.match(r'i\d', v):
if c in ['z', 'c', 's']:
v = re.sub('i', 'ii', v)
elif c in ['zh', 'ch', 'sh', 'r']:
v = re.sub('i', 'iii', v)
initials.append(c)
finals.append(v)
elif self.g2p_model == "g2pM":
pinyins = self.g2pM_model(word, tone=True, char_split=False)
for pinyin in pinyins:
pinyin = pinyin.replace("u:", "v")
if pinyin in self.pinyin2phone:
initial_final_list = self.pinyin2phone[pinyin].split(" ")
if len(initial_final_list) == 2:
initials.append(initial_final_list[0])
finals.append(initial_final_list[1])
elif len(initial_final_list) == 1:
initials.append('')
finals.append(initial_final_list[1])
else:
# If it's not pinyin (possibly punctuation) or no conversion is required
initials.append(pinyin)
finals.append(pinyin)
return initials, finals
# if merge_sentences, merge all sentences into one phone sequence
def _g2p(self,
sentences: List[str],
merge_sentences: bool=True,
with_erhua: bool=True) -> List[List[str]]:
segments = sentences
phones_list = []
for seg in segments:
phones = []
seg_cut = psg.lcut(seg)
initials = []
finals = []
seg_cut = self.tone_modifier.pre_merge_for_modify(seg_cut)
for word, pos in seg_cut:
if pos == 'eng':
continue
sub_initials, sub_finals = self._get_initials_finals(word)
sub_finals = self.tone_modifier.modified_tone(word, pos,
sub_finals)
if with_erhua:
sub_initials, sub_finals = self._merge_erhua(
sub_initials, sub_finals, word, pos)
initials.append(sub_initials)
finals.append(sub_finals)
# assert len(sub_initials) == len(sub_finals) == len(word)
initials = sum(initials, [])
finals = sum(finals, [])
for c, v in zip(initials, finals):
# NOTE: post process for pypinyin outputs
# we discriminate i, ii and iii
if c and c not in self.punc:
phones.append(c)
if v and v not in self.punc:
phones.append(v)
# add sp between sentence (replace the last punc with sp)
if initials[-1] in self.punc:
phones.append('sp')
phones_list.append(phones)
if merge_sentences:
merge_list = sum(phones_list, [])
phones_list = []
phones_list.append(merge_list)
return phones_list
def _merge_erhua(self,
initials: List[str],
finals: List[str],
word: str,
pos: str) -> List[List[str]]:
if word not in self.must_erhua and (word in self.not_erhua or
pos in {"a", "j", "nr"}):
return initials, finals
new_initials = []
new_finals = []
assert len(finals) == len(word)
for i, phn in enumerate(finals):
if i == len(finals) - 1 and word[i] == "儿" and phn in {
"er2", "er5"
} and word[-2:] not in self.not_erhua and new_finals:
new_finals[-1] = new_finals[-1][:-1] + "r" + new_finals[-1][-1]
else:
new_finals.append(phn)
new_initials.append(initials[i])
return new_initials, new_finals
def _p2id(self, phonemes: List[str]) -> np.array:
# replace unk phone with sp
phonemes = [
phn if phn in self.vocab_phones else "sp" for phn in phonemes
]
phone_ids = [self.vocab_phones[item] for item in phonemes]
return np.array(phone_ids, np.int64)
def _t2id(self, tones: List[str]) -> np.array:
# replace unk phone with sp
tones = [tone if tone in self.vocab_tones else "0" for tone in tones]
tone_ids = [self.vocab_tones[item] for item in tones]
return np.array(tone_ids, np.int64)
def _get_phone_tone(self, phonemes: List[str],
get_tone_ids: bool=False) -> List[List[str]]:
phones = []
tones = []
if get_tone_ids and self.vocab_tones:
for full_phone in phonemes:
# split tone from finals
match = re.match(r'^(\w+)([012345])$', full_phone)
if match:
phone = match.group(1)
tone = match.group(2)
# if the merged erhua not in the vocab
# assume that the input is ['iaor3'] and 'iaor' not in self.vocab_phones, we split 'iaor' into ['iao','er']
# and the tones accordingly change from ['3'] to ['3','2'], while '2' is the tone of 'er2'
if len(phone) >= 2 and phone != "er" and phone[
-1] == 'r' and phone not in self.vocab_phones and phone[:
-1] in self.vocab_phones:
phones.append(phone[:-1])
phones.append("er")
tones.append(tone)
tones.append("2")
else:
phones.append(phone)
tones.append(tone)
else:
phones.append(full_phone)
tones.append('0')
else:
for phone in phonemes:
# if the merged erhua not in the vocab
# assume that the input is ['iaor3'] and 'iaor' not in self.vocab_phones, change ['iaor3'] to ['iao3','er2']
if len(phone) >= 3 and phone[:-1] != "er" and phone[
-2] == 'r' and phone not in self.vocab_phones and (
phone[:-2] + phone[-1]) in self.vocab_phones:
phones.append((phone[:-2] + phone[-1]))
phones.append("er2")
else:
phones.append(phone)
return phones, tones
def get_phonemes(self,
sentence: str,
merge_sentences: bool=True,
with_erhua: bool=True) -> List[List[str]]:
sentences = self.text_normalizer.normalize(sentence)
phonemes = self._g2p(
sentences, merge_sentences=merge_sentences, with_erhua=with_erhua)
return phonemes
def get_input_ids(
self,
sentence: str,
merge_sentences: bool=True,
get_tone_ids: bool=False) -> Dict[str, List[paddle.Tensor]]:
phonemes = self.get_phonemes(sentence, merge_sentences=merge_sentences)
result = {}
phones = []
tones = []
temp_phone_ids = []
temp_tone_ids = []
for part_phonemes in phonemes:
phones, tones = self._get_phone_tone(
part_phonemes, get_tone_ids=get_tone_ids)
if tones:
tone_ids = self._t2id(tones)
tone_ids = paddle.to_tensor(tone_ids)
temp_tone_ids.append(tone_ids)
if phones:
phone_ids = self._p2id(phones)
phone_ids = paddle.to_tensor(phone_ids)
temp_phone_ids.append(phone_ids)
if temp_tone_ids:
result["tone_ids"] = temp_tone_ids
if temp_phone_ids:
result["phone_ids"] = temp_phone_ids
return result
|
the-stack_106_20034
|
# -*- coding: utf-8 -*-
"""Tools for handling LaTeX.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010 IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from io import StringIO
from base64 import encodestring
import os
import tempfile
import shutil
import subprocess
from IPython.utils.process import find_cmd, FindCmdError
#-----------------------------------------------------------------------------
# Tools
#-----------------------------------------------------------------------------
def latex_to_png(s, encode=False, backend='mpl'):
"""Render a LaTeX string to PNG.
Parameters
----------
s : str
The raw string containing valid inline LaTeX.
encode : bool, optional
Should the PNG data bebase64 encoded to make it JSON'able.
backend : {mpl, dvipng}
Backend for producing PNG data.
None is returned when the backend cannot be used.
"""
if backend == 'mpl':
f = latex_to_png_mpl
elif backend == 'dvipng':
f = latex_to_png_dvipng
else:
raise ValueError('No such backend {0}'.format(backend))
bin_data = f(s)
if encode and bin_data:
bin_data = encodestring(bin_data)
return bin_data
def latex_to_png_mpl(s):
try:
from matplotlib import mathtext
except ImportError:
return None
mt = mathtext.MathTextParser('bitmap')
f = StringIO()
mt.to_png(f, s, fontsize=12)
return f.getvalue()
def latex_to_png_dvipng(s):
try:
find_cmd('latex')
find_cmd('dvipng')
except FindCmdError:
return None
try:
workdir = tempfile.mkdtemp()
tmpfile = os.path.join(workdir, "tmp.tex")
dvifile = os.path.join(workdir, "tmp.dvi")
outfile = os.path.join(workdir, "tmp.png")
with open(tmpfile, "w") as f:
f.write(_latex_header)
f.write(s)
f.write(_latex_footer)
subprocess.check_call(
["latex", "-halt-on-errror", tmpfile], cwd=workdir,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
subprocess.check_call(
["dvipng", "-T", "tight", "-x", "1500", "-z", "9",
"-bg", "transparent", "-o", outfile, dvifile], cwd=workdir,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
with open(outfile) as f:
bin_data = f.read()
finally:
shutil.rmtree(workdir)
return bin_data
_latex_header = r'''
\documentclass{article}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{bm}
\pagestyle{empty}
\begin{document}
'''
_latex_footer = r'\end{document}'
_data_uri_template_png = """<img src="data:image/png;base64,%s" alt=%s />"""
def latex_to_html(s, alt='image'):
"""Render LaTeX to HTML with embedded PNG data using data URIs.
Parameters
----------
s : str
The raw string containing valid inline LateX.
alt : str
The alt text to use for the HTML.
"""
base64_data = latex_to_png(s, encode=True)
if base64_data:
return _data_uri_template_png % (base64_data, alt)
# From matplotlib, thanks to mdboom. Once this is in matplotlib releases, we
# will remove.
def math_to_image(s, filename_or_obj, prop=None, dpi=None, format=None):
"""
Given a math expression, renders it in a closely-clipped bounding
box to an image file.
*s*
A math expression. The math portion should be enclosed in
dollar signs.
*filename_or_obj*
A filepath or writable file-like object to write the image data
to.
*prop*
If provided, a FontProperties() object describing the size and
style of the text.
*dpi*
Override the output dpi, otherwise use the default associated
with the output format.
*format*
The output format, eg. 'svg', 'pdf', 'ps' or 'png'. If not
provided, will be deduced from the filename.
"""
from matplotlib import figure
# backend_agg supports all of the core output formats
from matplotlib.backends import backend_agg
from matplotlib.font_manager import FontProperties
from matplotlib.mathtext import MathTextParser
if prop is None:
prop = FontProperties()
parser = MathTextParser('path')
width, height, depth, _, _ = parser.parse(s, dpi=72, prop=prop)
fig = figure.Figure(figsize=(width / 72.0, height / 72.0))
fig.text(0, depth/height, s, fontproperties=prop)
backend_agg.FigureCanvasAgg(fig)
fig.savefig(filename_or_obj, dpi=dpi, format=format)
return depth
|
the-stack_106_20036
|
#1 - Import library
import pygame, sys
import os
import math
import random
from pygame.locals import *
#2 - Initialize game
pygame.init()
width, height = 640, 480
screen = pygame.display.set_mode((width, height))
keys = [False, False, False, False, False]
playerpos = [150, 100]
acc=[0,0]
hats=[]
pygame.display.set_caption('THIS IS WAR!')
#2.1 - add the bad guys decrease the
#badtimer every frame until it is zero and then you spawn a new badger
badtimer=100
badtimer1=0
coyotes=[[640,100]]
healthvalue=194
#3 - load images
current_path = os.path.dirname(r'''C:\Users\jilli\AppData\Local\Programs\Python\Python36\shooting.py''') # Where your .py file is located
resource_path = os.path.join(current_path, 'resources') # The resource folder path
image_path = os.path.join(resource_path, 'images') # The image folder path
player = pygame.image.load(os.path.join(image_path, 'perry.png'))
grass = pygame.image.load(os.path.join(image_path, 'grass.png'))
sunflower = pygame.image.load(os.path.join(image_path, 'sunflower.png'))
hat = pygame.image.load(os.path.join(image_path, 'perryhat.png'))
coyoteimg1 = pygame.image.load(os.path.join(image_path, 'coyote.png'))
coyoteimg = coyoteimg1
badguyimg1 = pygame.image.load(os.path.join(image_path,'badguy.png'))
badguyimg=badguyimg1
#4 - Loop through game so it doesn't halt
while True:
badtimer = badtimer - 1
#5 - clears the screen before drawing it again
screen.fill(0)
#6 - draw screen elements (draw backgorund before player so player is above background
for x in range(width//grass.get_width()+1): # range() can only work with integers, but dividing
#with the / operator always results in a float value
for y in range(height//grass.get_height()+1):
screen.blit(grass,(x*100,y*100))
screen.blit(sunflower,(0,30))
screen.blit(sunflower,(0,135))
screen.blit(sunflower,(0,240))
screen.blit(sunflower,(0,345 ))
position = pygame.mouse.get_pos()
angle = math.atan2(position[1]-(playerpos[1]+32), position[0]-(playerpos[0]+26))
playerrotates = pygame.transform.rotate(player, 360-angle*57.29)
playerpos1 = (playerpos[0]-playerrotates.get_rect().width/2, playerpos[1]-playerrotates.get_rect().height/2)
screen.blit(playerrotates, playerpos1)
# 6.2 - Draw hats
for perryhat in hats:
index=0
velx = math.cos(perryhat[0])*10 #10 is the speed of the arrow
vely = math.sin(perryhat[0])*10
perryhat[1] = perryhat[1] + velx
perryhat[2] = perryhat[2] + vely
if perryhat[1] < -64 or perryhat[2] > 640 or perryhat[2] < -64 or perryhat[2] > 480:
hats.pop(index) #If no index is specified, a.pop() removes and
# returns the last item in the list.
index = index + 1
for projectile in hats:
hats1 = pygame.transform.rotate(hat, 360-projectile[0]*57.29) # multiply radians by approximately 57.29 or 360/2π
screen.blit(hats1, (projectile[1], projectile[2]))
#6.3 - Draw coyotes
if badtimer==0:
coyotes.append([640, random.randint(50,430)])
badtimer=100-(badtimer1*2)
if badtimer1>=35:
badtimer1=35
else:
badtimer1+=5
index=0
for coyote in coyotes:
if coyote[0]<-64:
coyotes.pop(index)
coyote[0]-=7
index+=1
for coyote in coyotes:
screen.blit(coyoteimg, coyote)
# 6.3.1 - Attack sunflowers
badrect=pygame.Rect(coyoteimg.get_rect())
badrect.top=coyote[1]
badrect.left=coyote[0]
if badrect.left<64:
healthvalue -= random.randint(5,20)
coyotes.pop(index)
#6.3.2 - check for collisions
index1 = 0
for perryhat in hats: #rect here store rectangular coordinates
hatrect = pygame.Rect(hat.get_rect())
hatrect.left=perryhat[1]
hatrect.top=perryhat[2]
if badrect.colliderect(hatrect):
acc[0]+=1
coyotes.pop(index) # pop() removes and returns last object or obj from the list
hats.pop(index1)
index1 += 1
#6.3.3 - placing next bed guy into screen
index += 1
for coyote in coyotes:
screen.blit(coyoteimg, coyote)
#7 - update the screen
pygame.display.flip() # Update the full display Surface to the screen
for event in pygame.event.get(): #event is for actions made by user
#like pressing a key
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
#8 - Keys!
if event.type == pygame.KEYDOWN:
if event.key==K_w:
keys[0]=True
elif event.key==K_a:
keys[1]=True
elif event.key==K_s:
keys[2]=True
elif event.key==K_d:
keys[3]=True
if event.type == pygame.KEYUP:
if event.key==pygame.K_w:
keys[0]=False
elif event.key==pygame.K_a:
keys[1]=False
elif event.key==pygame.K_s:
keys[2]=False
elif event.key==pygame.K_d:
keys[3]=False
if event.type==pygame.MOUSEBUTTONDOWN:
position=pygame.mouse.get_pos()
acc[1]+=1
hats.append([math.atan2(position[1]-(playerpos1[1]+32),
position[0]-(playerpos1[0]+26)),
playerpos1[0]+32,
playerpos1[1]+32])
#9 - Move player
if keys[0]:
playerpos[1]= playerpos[1] - 5
elif keys[1]:
playerpos[1]= playerpos[1] + 5
elif keys[2]:
playerpos[0] = playerpos[0] - 5
elif keys[3]:
playerpos[0] = playerpos[0] + 5
|
the-stack_106_20037
|
"""
SVB - Model parameters
This module defines a set of classes of model parameters.
The factory methods which create priors/posteriors can
make use of the instance class to create the appropriate
type of vertexwise prior/posterior
"""
try:
import tensorflow.compat.v1 as tf
except ImportError:
import tensorflow as tf
from .utils import LogBase
from . import dist
def get_parameter(name, **kwargs):
"""
Factory method to create an instance of a parameter
"""
custom_kwargs = kwargs.pop("param_overrides", {}).get(name, {})
kwargs.update(custom_kwargs)
desc = kwargs.get("desc", "No description given")
prior_dist = dist.get_dist(prefix="prior", **kwargs)
prior_type = kwargs.get("prior_type", "N")
post_dist = dist.get_dist(prefix="post", **kwargs)
post_type = kwargs.get("post_type", "vertexwise")
post_init = kwargs.get("post_init", None)
return Parameter(name, desc=desc, prior=prior_dist, prior_type=prior_type, post=post_dist, post_init=post_init, post_type=post_type)
class Parameter(LogBase):
"""
A standard model parameter
"""
def __init__(self, name, **kwargs):
"""
Constructor
:param name: Parameter name
:param prior: Dist instance giving the parameter's prior distribution
:param desc: Optional parameter description
Keyword arguments (optional):
- ``mean_init`` Initial value for the posterior mean either as a numeric
value or a callable which takes the parameters t, data, param_name
- ``log_var_init`` Initial value for the posterior log variance either as a numeric
value or a callable which takes the parameters t, data, param_name
- ``param_overrides`` Dictionary keyed by parameter name. Value should be dictionary
of keyword arguments which will override those defined as
existing keyword arguments
"""
LogBase.__init__(self)
custom_kwargs = kwargs.pop("param_overrides", {}).get(name, {})
kwargs.update(custom_kwargs)
self.name = name
self.desc = kwargs.get("desc", "No description given")
self.prior_dist = kwargs.get("prior")
self.prior_type = kwargs.get("prior_type", "N")
self.post_dist = kwargs.get("post", self.prior_dist)
self.post_type = kwargs.get("post_type", "vertexwise")
self.post_init = kwargs.get("post_init", None)
def __str__(self):
return "Parameter: %s" % self.name
|
the-stack_106_20038
|
# Bizzaro Francesco
# March 2020
#
# This script can be used to plot
# the results of all the GAs executed.
from matplotlib import pyplot as plt
import json
import datetime
import numpy as np
from math import exp,pow
from operator import add
mean_sing_best = []
mean_sing_avg = []
mean_coop_best = []
mean_coop_avg = []
best_of_all = -1
# problems = [
# "knap0_120","knap1_129","knap2_60"
# ,"knap3_40","knap4_65","knap5_42","knap6_132"
# ,"knap7_87","knap8_69","knap9_88"]
# problems = [
# "knap0_805","knap1_622","knap2_466"
# ,"knap3_194","knap4_534","knap5_621","knap6_253"
# ,"knap7_198","knap8_996","knap9_200"]
# problems = [
# "2/knap0_2594","2/knap1_2334","2/knap2_1555"
# ,"2/knap3_2829","2/knap4_2883","2/knap5_1272","2/knap6_1131"
# ,"2/knap7_1019","2/knap8_2391","2/knap9_2357"]
problems = [
"0/knap0_120","0/knap1_129","0/knap2_60"
,"0/knap3_40","0/knap4_65","0/knap5_42","0/knap6_132"
,"0/knap7_87","0/knap8_69","0/knap9_88"
,"1/knap0_805","1/knap1_622","1/knap2_466"
,"1/knap3_194","1/knap4_534","1/knap5_621","1/knap6_253"
,"1/knap7_198","1/knap8_996","1/knap9_200"
,"2/knap0_2594","2/knap1_2334","2/knap2_1555"
,"2/knap3_2829","2/knap4_2883","2/knap5_1272","2/knap6_1131"
,"2/knap7_1019","2/knap8_2391","2/knap9_2357"
]
for p in range(len(problems)):
prb = problems[p]
sing_best = []
sing_avg = []
coop_best = []
coop_avg = []
best_of_all = -1
for i in range(5):
path_sing = "problems/"+prb+"/results/Sing"+str(i)+".txt"
path_coop = "problems/"+prb+"/results/Coop"+str(i)+".txt"
with open(path_sing,"r") as fsing:
best = -1.0
avg = 0.0
tot = 0.0
for line in fsing.readlines():
tot += 1.0
cur = float(line.split(",")[1])
avg += cur
if(cur>best):
best = cur
if cur>best_of_all:
best_of_all = cur
avg = float(avg)/float(tot)
sing_best.append(best)
sing_avg.append(avg)
with open(path_coop,"r") as fsing:
best = -1.0
avg = 0.0
tot = 0.0
for line in fsing.readlines():
tot += 1.0
cur = float(line.split(",")[1])
avg += cur
if(cur>best):
best = cur
if cur>best_of_all:
best_of_all = cur
avg = float(avg)/float(tot)
coop_best.append(best)
coop_avg.append(avg)
if p==0:
mean_sing_best = [float(y)/float(best_of_all) for y in sing_best]
mean_sing_avg = [float(y)/float(best_of_all) for y in sing_avg]
mean_coop_best = [float(y)/float(best_of_all) for y in coop_best]
mean_coop_avg = [float(y)/float(best_of_all) for y in coop_avg]
else:
mean_sing_best = list(map(add,mean_sing_best,
[float(y)/float(best_of_all) for y in sing_best]))
mean_sing_avg = list(map(add,mean_sing_avg,
[float(y)/float(best_of_all) for y in sing_avg]))
mean_coop_best = list(map(add,mean_coop_best,
[float(y)/float(best_of_all) for y in coop_best]))
mean_coop_avg = list(map(add,mean_coop_avg,
[float(y)/float(best_of_all) for y in coop_avg]))
plt.plot([100*x for x in range(1,1+(len(mean_sing_best)))],
[y/float(len(problems)) for y in mean_sing_best],label="Std best",
linestyle=":",color="k",linewidth=2)
plt.plot([100*x for x in range(1,1+(len(mean_sing_best)))],
[y/float(len(problems)) for y in mean_sing_avg],label="Std avg",
linestyle="-.",color="k",linewidth=2)
plt.plot([100*x for x in range(1,1+(len(mean_sing_best)))],
[y/float(len(problems)) for y in mean_coop_best],label="Coop best",
linestyle="-",color="k",linewidth=2)
plt.plot([100*x for x in range(1,1+(len(mean_sing_best)))],
[y/float(len(problems)) for y in mean_coop_avg],label="Coop avg",
linestyle="--",color="k",linewidth=2)
plt.xlabel("Generations")
plt.ylabel("Normalized Fitness")
plt.legend(bbox_to_anchor=(1,0),loc=4,borderaxespad=0.,frameon=False)
plt.tight_layout()
plt.savefig("knapsack_tot.png")
plt.show()
print("coop best:",mean_coop_best[len(mean_coop_best)-1]/float(len(problems)))
print("sing best:",mean_sing_best[len(mean_coop_best)-1]/float(len(problems)))
|
the-stack_106_20039
|
from flask import Flask, render_template, request, make_response, g
from redis import Redis
import os
import socket
import random
import json
option_a = os.getenv('OPTION_A', "Jubilee")
option_b = os.getenv('OPTION_B', "Sabretooth")
hostname = socket.gethostname()
app = Flask(__name__)
def get_redis():
if not hasattr(g, 'redis'):
redis_host = os.getenv('REDIS_HOST')
g.redis = Redis(host=redis_host, db=0, socket_timeout=5)
return g.redis
@app.route("/", methods=['POST','GET'])
def hello():
voter_id = request.cookies.get('voter_id')
if not voter_id:
voter_id = hex(random.getrandbits(64))[2:-1]
vote = None
if request.method == 'POST':
redis = get_redis()
vote = request.form['vote']
data = json.dumps({'voter_id': voter_id, 'vote': vote})
redis.rpush('votes', data)
resp = make_response(render_template(
'index.html',
option_a=option_a,
option_b=option_b,
hostname=hostname,
vote=vote,
))
resp.set_cookie('voter_id', voter_id)
return resp
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80, debug=True, threaded=True)
|
the-stack_106_20040
|
"""
Export CAT12 segmentation results as an xarray dataset.
"""
from typing import Dict, List
import dask.array as da
import nibabel as nib
import pandas as pd
import xarray as xr
from django.db.models import QuerySet
from django_mri.analysis.automation.cat12_segmentation.utils import (
get_node, get_run_set, read_nifti)
OUTPUT_KEYS = "modulated_grey_matter", "modulated_white_matter", "warped_image"
OUTPUT_DIMS = "Run ID", "x", "y", "z"
def get_coords(runs: QuerySet) -> Dict[str, List[int]]:
run_ids = list(runs.values_list("id", flat=True))
return {"Run ID": run_ids}
def create_output_array(runs: QuerySet, key: str) -> da:
paths = (run.get_output(key) for run in runs)
arrays = (da.from_array(read_nifti(path)) for path in paths)
all_data = da.stack(arrays)
coords = get_coords(runs)
return xr.DataArray(all_data, coords=coords, dims=OUTPUT_DIMS, name=key)
INFO_COLUMNS = (
"Subject ID",
"Session ID",
"Scan ID",
"Scan Description",
"Acquisition Time",
)
def extract_run_info(runs: QuerySet) -> pd.DataFrame:
from django_mri.models.scan import Scan
info = {}
for run in runs:
input_path = run.get_input("path")
scan = Scan.objects.get(_nifti__path=input_path)
run_info = {
"Subject ID": scan.session.subject.id,
"Session ID": scan.session.id,
"Scan ID": scan.id,
"Scan Description": scan.description,
"Acquisition Time": scan.time.strftime("%Y-%m-%d"),
}
info[run.id] = run_info
return pd.DataFrame.from_dict(info, orient="index")
def export_cat_results(runs: QuerySet = None) -> xr.Dataset:
runs = runs or get_run_set()
info_df = extract_run_info(runs)
ds = xr.Dataset.from_dataframe(info_df)
ds = ds.rename({"index": "Run ID"})
affine = nib.load(runs[0].get_output(OUTPUT_KEYS[0])).affine
node = get_node()
str_configuration = {
key: str(value) for key, value in node.configuration.items()
}
configuration = list(str_configuration.items())
attrs = {
"configuration": configuration,
"affine": affine,
}
ds = ds.assign_attrs(attrs)
data_vars = {key: create_output_array(runs, key) for key in OUTPUT_KEYS}
return ds.assign(data_vars)
|
the-stack_106_20047
|
import multiprocessing as mp
"""
Refer: Shared memory in multiprocessing
https://stackoverflow.com/questions/14124588/shared-memory-in-multiprocessing
https://docs.python.org/3/library/multiprocessing.html#sharing-state-between-processes
"""
import time
import numpy as np
def process1(num, ary):
print('p1 start', num.value, ary[:])
time.sleep(1)
num.value = 1
print('p1', np.array(ary).dtype)
ary[:] = [-1 * i for i in ary]
ary[:] = list(-np.ones(4))
print('p1 changed')
print('p1 print', num.value, ary[:])
time.sleep(123)
def process2(num, ary):
print('p2 start', num.value, ary[:])
time.sleep(2)
print('p2 print', num.value, ary[:])
a = np.array(ary)
print(a + 1)
time.sleep(123)
if __name__ == '__main__':
shared_num = mp.Value('d', 0.0) # 'd': float, 'i': int
shared_ary = mp.Array('d', range(4))
# from multiprocessing import shared_memory # new in Python3.8
# shared_mem = shared_memory.SharedMemory(create=True, size=4)
process = [
mp.Process(target=process1, args=(shared_num, shared_ary)),
mp.Process(target=process2, args=(shared_num, shared_ary)),
]
[p.start() for p in process]
[p.join() for p in process]
|
the-stack_106_20048
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Substitute variables of QuadraticProgram."""
import logging
from collections import defaultdict
from dataclasses import dataclass
from math import isclose
from typing import Dict, Optional, Tuple, Union, cast
from ..exceptions import QiskitOptimizationError
from ..infinity import INFINITY
from .constraint import ConstraintSense
from .linear_expression import LinearExpression
from .quadratic_expression import QuadraticExpression
from .quadratic_program import QuadraticProgram
logger = logging.getLogger(__name__)
@dataclass
class SubstitutionExpression:
"""Represents a substitution of a variable with a linear expression.
If `variable` is `None`, it substitutes a variable with the constant value.
Otherwise, it substitutes a variable with (constant + coefficient * new_variable).
"""
const: float = 0.0
"""Constant value"""
coeff: float = 0.0
"""Coefficient of the new variable"""
variable: Optional[str] = None
"""Variable name or `None`"""
def substitute_variables(
quadratic_program: QuadraticProgram,
constants: Optional[Dict[Union[str, int], float]] = None,
variables: Optional[Dict[Union[str, int], Tuple[Union[str, int], float]]] = None,
) -> QuadraticProgram:
"""Substitutes variables with constants or other variables.
Args:
quadratic_program: a quadratic program whose variables are substituted.
constants: replace variable by constant
e.g., {'x': 2} means 'x' is substituted with 2
variables: replace variables by weighted other variable
need to copy everything using name reference to make sure that indices are matched
correctly. The lower and upper bounds are updated accordingly.
e.g., {'x': ('y', 2)} means 'x' is substituted with 'y' * 2
Returns:
An optimization problem by substituting variables with constants or other variables.
If the substitution is valid, `QuadraticProgram.status` is still
`QuadraticProgram.Status.VALID`.
Otherwise, it gets `QuadraticProgram.Status.INFEASIBLE`.
Raises:
QiskitOptimizationError: if the substitution is invalid as follows.
- Same variable is substituted multiple times.
- Coefficient of variable substitution is zero.
"""
# guarantee that there is no overlap between variables to be replaced and combine input
subs = {}
if constants:
for i, v in constants.items():
# substitute i <- v
i_2 = quadratic_program.get_variable(i).name
if i_2 in subs:
raise QiskitOptimizationError(
f"Cannot substitute the same variable twice: {i} <- {v}"
)
subs[i_2] = SubstitutionExpression(const=v)
if variables:
for i, (j, v) in variables.items():
if v == 0:
raise QiskitOptimizationError(f"coefficient must be non-zero: {i} {j} {v}")
# substitute i <- j * v
i_2 = quadratic_program.get_variable(i).name
j_2 = quadratic_program.get_variable(j).name
if i_2 == j_2:
raise QiskitOptimizationError(
f"Cannot substitute the same variable: {i} <- {j} {v}"
)
if i_2 in subs:
raise QiskitOptimizationError(
f"Cannot substitute the same variable twice: {i} <- {j} {v}"
)
if j_2 in subs:
raise QiskitOptimizationError(
"Cannot substitute by variable that gets substituted itself: " f"{i} <- {j} {v}"
)
subs[i_2] = SubstitutionExpression(variable=j_2, coeff=v)
return _SubstituteVariables().substitute_variables(quadratic_program, subs)
class _SubstituteVariables:
"""A class to substitute variables of an optimization problem with constants for other
variables"""
def __init__(self):
self._src: Optional[QuadraticProgram] = None
self._dst: Optional[QuadraticProgram] = None
self._subs: Dict[str, SubstitutionExpression] = {}
def substitute_variables(
self, quadratic_program: QuadraticProgram, subs: Dict[str, SubstitutionExpression]
) -> QuadraticProgram:
"""Substitutes variables with constants or other variables.
Args:
quadratic_program: a quadratic program whose variables are substituted.
subs: substitution expressions as a dictionary.
e.g., {'x': SubstitutionExpression(const=1, coeff=2, variable='y'} means
`x` is substituted with `1 + 2 * y`.
Returns:
An optimization problem by substituting variables with constants or other variables.
If the substitution is valid, `QuadraticProgram.status` is still
`QuadraticProgram.Status.VALID`.
Otherwise, it gets `QuadraticProgram.Status.INFEASIBLE`.
"""
self._src = quadratic_program
self._dst = QuadraticProgram(quadratic_program.name)
self._subs = subs
results = [
self._variables(),
self._objective(),
self._linear_constraints(),
self._quadratic_constraints(),
]
if not all(results):
self._dst._status = QuadraticProgram.Status.INFEASIBLE
return self._dst
@staticmethod
def _feasible(sense: ConstraintSense, rhs: float) -> bool:
"""Checks feasibility of the following condition
0 `sense` rhs
"""
# I use the following pylint option because `rhs` should come to right
# pylint: disable=misplaced-comparison-constant
if sense == ConstraintSense.EQ:
if 0 == rhs:
return True
elif sense == ConstraintSense.LE:
if 0 <= rhs:
return True
elif sense == ConstraintSense.GE:
if 0 >= rhs:
return True
return False
def _variables(self) -> bool:
# copy variables that are not replaced
feasible = True
for var in self._src.variables:
name = var.name
vartype = var.vartype
lowerbound = var.lowerbound
upperbound = var.upperbound
if name not in self._subs:
self._dst._add_variable(lowerbound, upperbound, vartype, name)
for i, expr in self._subs.items():
lb_i = self._src.get_variable(i).lowerbound
ub_i = self._src.get_variable(i).upperbound
# substitute x_i <- x_j * coeff + const
# lb_i <= x_i <= ub_i -->
# (lb_i - const) / coeff <= x_j <= (ub_i - const) / coeff if coeff > 0
# (ub_i - const) / coeff <= x_j <= (lb_i - const) / coeff if coeff < 0
# lb_i <= const <= ub_i if coeff == 0
if isclose(expr.coeff, 0.0, abs_tol=1e-10):
if not lb_i <= expr.const <= ub_i:
logger.warning("Infeasible substitution for variable: %s", i)
feasible = False
else:
if abs(lb_i) < INFINITY:
new_lb_i = (lb_i - expr.const) / expr.coeff
else:
new_lb_i = lb_i if expr.coeff > 0 else -lb_i
if abs(ub_i) < INFINITY:
new_ub_i = (ub_i - expr.const) / expr.coeff
else:
new_ub_i = ub_i if expr.coeff > 0 else -ub_i
var_j = self._dst.get_variable(expr.variable)
lb_j = var_j.lowerbound
ub_j = var_j.upperbound
if expr.coeff > 0:
var_j.lowerbound = max(lb_j, new_lb_i)
var_j.upperbound = min(ub_j, new_ub_i)
else:
var_j.lowerbound = max(lb_j, new_ub_i)
var_j.upperbound = min(ub_j, new_lb_i)
for var in self._dst.variables:
if var.lowerbound > var.upperbound:
logger.warning(
"Infeasible lower and upper bounds: %s %f %f",
var,
var.lowerbound,
var.upperbound,
)
feasible = False
return feasible
def _linear_expression(self, lin_expr: LinearExpression) -> Tuple[float, LinearExpression]:
const = 0.0
lin_dict: Dict[str, float] = defaultdict(float)
for i, w_i in lin_expr.to_dict(use_name=True).items():
i = cast(str, i)
expr_i = self._subs.get(i, SubstitutionExpression(coeff=1, variable=i))
const += w_i * expr_i.const
if expr_i.variable:
lin_dict[expr_i.variable] += w_i * expr_i.coeff
new_lin = LinearExpression(
quadratic_program=self._dst, coefficients=lin_dict if lin_dict else {}
)
return const, new_lin
def _quadratic_expression(
self, quad_expr: QuadraticExpression
) -> Tuple[float, Optional[LinearExpression], Optional[QuadraticExpression]]:
const = 0.0
lin_dict: Dict[str, float] = defaultdict(float)
quad_dict: Dict[Tuple[str, str], float] = defaultdict(float)
for (i, j), w_ij in quad_expr.to_dict(use_name=True).items():
i = cast(str, i)
j = cast(str, j)
expr_i = self._subs.get(i, SubstitutionExpression(coeff=1, variable=i))
expr_j = self._subs.get(j, SubstitutionExpression(coeff=1, variable=j))
const += w_ij * expr_i.const * expr_j.const
if expr_i.variable:
lin_dict[expr_i.variable] += w_ij * expr_i.coeff * expr_j.const
if expr_j.variable:
lin_dict[expr_j.variable] += w_ij * expr_j.coeff * expr_i.const
if expr_i.variable and expr_j.variable:
quad_dict[expr_i.variable, expr_j.variable] += w_ij * expr_i.coeff * expr_j.coeff
new_lin = LinearExpression(
quadratic_program=self._dst, coefficients=lin_dict if lin_dict else {}
)
new_quad = QuadraticExpression(
quadratic_program=self._dst, coefficients=quad_dict if quad_dict else {}
)
return const, new_lin, new_quad
def _objective(self) -> bool:
obj = self._src.objective
const1, lin1 = self._linear_expression(obj.linear)
const2, lin2, quadratic = self._quadratic_expression(obj.quadratic)
constant = obj.constant + const1 + const2
linear = lin1.coefficients + lin2.coefficients
if obj.sense == obj.sense.MINIMIZE:
self._dst.minimize(constant=constant, linear=linear, quadratic=quadratic.coefficients)
else:
self._dst.maximize(constant=constant, linear=linear, quadratic=quadratic.coefficients)
return True
def _linear_constraints(self) -> bool:
feasible = True
for lin_cst in self._src.linear_constraints:
constant, linear = self._linear_expression(lin_cst.linear)
rhs = lin_cst.rhs - constant
if linear.coefficients.nnz > 0:
self._dst.linear_constraint(
name=lin_cst.name,
linear=linear.coefficients,
sense=lin_cst.sense,
rhs=rhs,
)
else:
if not self._feasible(lin_cst.sense, rhs):
logger.warning("constraint %s is infeasible due to substitution", lin_cst.name)
feasible = False
return feasible
def _quadratic_constraints(self) -> bool:
feasible = True
for quad_cst in self._src.quadratic_constraints:
const1, lin1 = self._linear_expression(quad_cst.linear)
const2, lin2, quadratic = self._quadratic_expression(quad_cst.quadratic)
rhs = quad_cst.rhs - const1 - const2
linear = lin1.coefficients + lin2.coefficients
if quadratic.coefficients.nnz > 0:
self._dst.quadratic_constraint(
name=quad_cst.name,
linear=linear,
quadratic=quadratic.coefficients,
sense=quad_cst.sense,
rhs=rhs,
)
elif linear.nnz > 0:
name = quad_cst.name
lin_names = set(lin.name for lin in self._dst.linear_constraints)
while name in lin_names:
name = "_" + name
self._dst.linear_constraint(name=name, linear=linear, sense=quad_cst.sense, rhs=rhs)
else:
if not self._feasible(quad_cst.sense, rhs):
logger.warning("constraint %s is infeasible due to substitution", quad_cst.name)
feasible = False
return feasible
|
the-stack_106_20049
|
import json
import logging
import queue
import re
import socket
import ssl
from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass
from threading import Event, RLock, Thread
from time import monotonic as now, sleep
from typing import Callable, List, Dict, Match, Optional, Pattern, Tuple, Union
from urllib.parse import urlparse, ParseResult
import pkg_resources
from pynats2.exceptions import (
NATSConnectionError,
NATSInvalidResponse,
NATSInvalidSchemeError,
NATSInvalidUrlError,
NATSRequestTimeoutError,
NATSTCPConnectionRequiredError,
NATSTLSConnectionRequiredError,
NATSUnexpectedResponse,
)
from pynats2.nuid import NUID
__all__ = ("NATSSubscription", "NATSMessage", "NATSClient", "NATSNoSubscribeClient")
LOG = logging.getLogger(__name__)
INFO_OP = b"INFO"
CONNECT_OP = b"CONNECT"
PING_OP = b"PING"
PONG_OP = b"PONG"
SUB_OP = b"SUB"
UNSUB_OP = b"UNSUB"
PUB_OP = b"PUB"
MSG_OP = b"MSG"
OK_OP = b"+OK"
ERR_OP = b"-ERR"
DEFAULT_PING_INTERVAL = 30.0
DEFAULT_RECONNECT_DELAY = 10.0
DEFAULT_REQUEST_TIMEOUT = 120.0
DEFAULT_SOCKET_TIMEOUT = 1.0
DEFAULT_WORKERS = 3
RETRY_COUNT = 3
RETRY_INTERVAL = 3
INFO_RE = re.compile(rb"^INFO\s+([^\r\n]+)\r\n")
PING_RE = re.compile(rb"^PING\r\n")
PONG_RE = re.compile(rb"^PONG\r\n")
MSG_RE = re.compile(
rb"^MSG\s+(?P<subject>[^\s\r\n]+)\s+(?P<sid>[^\s\r\n]+)\s+(?P<reply>([^\s\r\n]+)[^\S\r\n]+)?(?P<size>\d+)\r\n" # noqa
)
OK_RE = re.compile(rb"^\+OK\s*\r\n")
ERR_RE = re.compile(rb"^-ERR\s+('.+')?\r\n")
_CRLF_ = b"\r\n"
_SPC_ = b" "
COMMANDS = {
INFO_OP: INFO_RE,
PING_OP: PING_RE,
PONG_OP: PONG_RE,
MSG_OP: MSG_RE,
OK_OP: OK_RE,
ERR_OP: ERR_RE,
}
INBOX_PREFIX = bytearray(b"_INBOX.")
def auto_reconnect(func) -> Callable:
def wrapper(self, *args, **kwargs):
while True:
for _ in self._parsed_urls:
try:
return func(self, *args, **kwargs)
except (socket.error, ssl.SSLError) as e:
LOG.error(str(e))
self._url_index += 1
if self._url_index >= len(self._parsed_urls):
self._url_index = 0
if func.__name__ != "connect":
self.reconnect()
if not self._reconnect_forever:
raise NATSConnectionError("all connection failed")
sleep(self._reconnect_delay)
return wrapper
def auto_retry(func) -> Callable:
def wrapper(self, *args, **kwargs):
while True:
for _ in range(RETRY_COUNT):
try:
return func(self, *args, **kwargs)
except (socket.error, ssl.SSLError) as e:
LOG.error(str(e))
sleep(RETRY_INTERVAL)
if func.__name__ != "_pinger_thread" and not self._reconnect_forever:
raise NATSConnectionError("all connection failed")
return wrapper
@dataclass
class NATSSubscription:
sid: int
subject: str
queue: str
callback: Callable
max_messages: Optional[int] = None
received_messages: int = 0
def is_wasted(self):
return (
self.max_messages is not None
and self.received_messages == self.max_messages
)
@dataclass
class NATSMessage:
sid: int
subject: str
reply: str
payload: bytes
class NATSNoSubscribeClient:
__slots__ = (
"_name",
"_nuid",
"_parsed_urls",
"_pedantic",
"_ping_interval",
"_reconnect_delay",
"_reconnect_forever",
"_socket",
"_socket_buffer",
"_socket_keepalive",
"_socket_timeout",
"_ssid",
"_subs",
"_tls_cacert",
"_tls_client_cert",
"_tls_client_key",
"_tls_verify",
"_url_index",
"_verbose",
"_vhost_name",
"_vhost_len",
)
def __init__(
self,
url: str = "nats://127.0.0.1:4222",
*,
name: str = "pynats2",
pedantic: bool = False,
ping_interval: float = DEFAULT_PING_INTERVAL,
reconnect: bool = False,
reconnect_delay: float = DEFAULT_RECONNECT_DELAY,
reconnect_forever: bool = False,
tls_cacert: Optional[str] = None,
tls_client_cert: Optional[str] = None,
tls_client_key: Optional[str] = None,
tls_verify: bool = False,
socket_keepalive: bool = False,
socket_timeout: float = DEFAULT_SOCKET_TIMEOUT,
verbose: bool = False,
) -> None:
self._name: str = name
self._nuid: NUID = NUID()
self._parsed_urls: List[ParseResult] = []
self._pedantic: bool = pedantic
self._ping_interval: float = ping_interval
self._reconnect_delay: float = reconnect_delay
self._reconnect_forever = reconnect_forever
self._socket: socket.socket
self._socket_buffer: bytes = b""
self._socket_keepalive: bool = socket_keepalive
self._socket_timeout: float = socket_timeout
self._ssid: int = 0
self._tls_cacert: Optional[str] = tls_cacert
self._tls_client_cert: Optional[str] = tls_client_cert
self._tls_client_key: Optional[str] = tls_client_key
self._tls_verify: bool = tls_verify
self._url_index: int = 0
self._verbose: bool = verbose
for _url in url.split(","):
try:
parsed = urlparse(_url)
self._parsed_urls.append(parsed)
except ValueError:
raise NATSInvalidUrlError(_url)
if parsed.scheme not in ("nats", "tls"):
raise NATSInvalidSchemeError(
f"got unsupported URI scheme: %s" % parsed.scheme
)
vhost: str = parsed.path.strip("/").replace("/", ".")
if len(vhost) > 0:
vhost += "."
self._vhost_name: str = vhost
self._vhost_len: int = len(vhost)
def __enter__(self) -> "NATSNoSubscribeClient":
self.connect()
return self
def __exit__(self, type_, value, traceback) -> None:
self.close()
def _vhost(self, subject: str) -> str:
if self._vhost_name == "":
return subject
return "%s%s" % (self._vhost_name, subject)
def _del_vhost(self, subject: str) -> str:
subject = subject.strip()
if self._vhost_name == "":
return subject
if subject.startswith(self._vhost_name):
return subject[self._vhost_len :]
return subject
def _send_connect_command(self) -> None:
options = {
"name": self._name,
"lang": "python",
"protocol": 0,
"version": pkg_resources.get_distribution("pynats2").version,
"verbose": self._verbose,
"pedantic": self._pedantic,
}
username = self._parsed_urls[self._url_index].username
password = self._parsed_urls[self._url_index].password
if username and password:
options["user"] = username
options["pass"] = password
elif username:
options["auth_token"] = username
self._send(CONNECT_OP, json.dumps(options))
def _connect_tls(self) -> None:
ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH)
if self._tls_verify:
if self._tls_cacert is not None:
ctx.load_verify_locations(cafile=self._tls_cacert)
if self._tls_client_cert is not None and self._tls_client_key is not None:
ctx.load_cert_chain(
certfile=str(self._tls_client_cert),
keyfile=str(self._tls_client_key),
)
else:
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
hostname = self._parsed_urls[self._url_index].hostname
self._socket = ctx.wrap_socket(self._socket, server_hostname=hostname)
@auto_reconnect
def connect(self) -> None:
sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if self._socket_keepalive:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
sock.settimeout(self._socket_timeout)
hostname = self._parsed_urls[self._url_index].hostname
port = self._parsed_urls[self._url_index].port
sock.connect((hostname, port))
self._socket = sock
_command, result = self._recv(INFO_RE)
if result is None:
raise NATSConnectionError("connection failed")
server_info = json.loads(result.group(1))
tls = self._parsed_urls[self._url_index].scheme == "tls"
if server_info.get("tls_required", False) != tls:
if tls:
raise NATSTCPConnectionRequiredError("server disabled TLS connection")
else:
raise NATSTLSConnectionRequiredError("server enabled TLS connection")
if tls:
self._connect_tls()
self._send_connect_command()
def close(self) -> None:
try:
self._socket.close()
self._socket_buffer = b""
except (socket.error, ssl.SSLError):
pass
def reconnect(self) -> None:
self.close()
self.connect()
@auto_reconnect
def ping(self) -> None:
self._send(PING_OP)
@auto_reconnect
def publish(self, subject: str, *, payload: bytes = b"", reply: str = "") -> None:
self._send(PUB_OP, self._vhost(subject), self._vhost(reply), len(payload))
self._send(payload)
@auto_reconnect
def request(
self,
subject: str,
*,
payload: bytes = b"",
timeout: float = DEFAULT_REQUEST_TIMEOUT,
) -> Optional[NATSMessage]:
next_inbox = INBOX_PREFIX[:]
next_inbox.extend(self._nuid.next_())
reply_subject = next_inbox.decode()
self._send(SUB_OP, self._vhost(reply_subject), "", 0)
self.publish(subject, payload=payload, reply=reply_subject)
_from_start = now()
_from_ping = now()
while True:
command, result = self._recv(MSG_RE, PING_RE, PONG_RE, OK_RE)
if command is None:
if now() - _from_start >= timeout:
self._send(UNSUB_OP, 0)
raise NATSRequestTimeoutError()
if now() - _from_ping >= self._ping_interval:
_from_ping = now()
self.ping()
continue
if command is MSG_RE:
if result is None:
# Not reachable
return None
message = self._recv_message(result)
self._send(UNSUB_OP, 0)
return message
elif command is PING_RE:
self._send(PONG_OP)
def _send(self, *parts: Union[bytes, str, int]) -> None:
self._socket.sendall(_SPC_.join(self._encode(p) for p in parts) + _CRLF_)
def _encode(self, value: Union[bytes, str, int]) -> bytes:
if isinstance(value, bytes):
return value
elif isinstance(value, str):
return value.encode()
elif isinstance(value, int):
return f"{value:d}".encode()
raise RuntimeError(f"got unsupported type for encoding: type={type(value)}")
def _recv(
self, *commands: Pattern[bytes]
) -> Union[Tuple[Pattern[bytes], Match[bytes]], Tuple[None, None]]:
try:
line = self._readline()
except socket.timeout:
return None, None
command = self._get_command(line)
if command not in commands:
raise NATSUnexpectedResponse(line)
result = command.match(line)
if result is None:
raise NATSInvalidResponse(line)
return command, result
def _readline(self, *, size: int = None) -> bytes:
result: bytes = b""
if size is None:
while _CRLF_ not in self._socket_buffer:
self._socket_buffer += self._socket.recv(4096)
newline_pos = self._socket_buffer.index(_CRLF_) + len(_CRLF_)
result = self._socket_buffer[:newline_pos]
self._socket_buffer = self._socket_buffer[newline_pos:]
else:
to_recv_size = size + len(_CRLF_)
while len(self._socket_buffer) < to_recv_size:
self._socket_buffer += self._socket.recv(4096)
result = self._socket_buffer[:to_recv_size]
self._socket_buffer = self._socket_buffer[to_recv_size:]
return result
def _strip(self, line: bytes) -> bytes:
return line[: -len(_CRLF_)]
def _get_command(self, line: bytes) -> Optional[Pattern[bytes]]:
values = self._strip(line).split(b" ", 1)
return COMMANDS.get(values[0])
def _recv_message(self, result: Match[bytes]) -> NATSMessage:
message_data = result.groupdict()
message_payload_size = int(message_data["size"])
message_payload = self._readline(size=message_payload_size)
message_payload = self._strip(message_payload)
message = NATSMessage(
sid=int(message_data["sid"].decode()),
subject=self._del_vhost(message_data["subject"].decode()),
reply=self._del_vhost(message_data["reply"].decode())
if message_data["reply"]
else "",
payload=message_payload,
)
return message
class NATSClient(NATSNoSubscribeClient):
__slots__ = (
"_name",
"_nuid",
"_parsed_urls",
"_pedantic",
"_ping_interval",
"_pinger",
"_pinger_timer",
"_reconnect_delay",
"_reconnect_forever",
"_send_lock",
"_socket",
"_socket_buffer",
"_socket_keepalive",
"_socket_timeout",
"_ssid",
"_subs",
"_tls_cacert",
"_tls_client_cert",
"_tls_client_key",
"_tls_verify",
"_url_index",
"_waiter",
"_waiter_enabled",
"_workers",
"_worker_num",
"_verbose",
"_vhost_name",
"_vhost_len",
)
def __init__(
self,
url: str = "nats://127.0.0.1:4222",
*,
name: str = "pynats2",
pedantic: bool = False,
ping_interval: float = DEFAULT_PING_INTERVAL,
reconnect_delay: float = DEFAULT_RECONNECT_DELAY,
reconnect_forever: bool = False,
tls_cacert: Optional[str] = None,
tls_client_cert: Optional[str] = None,
tls_client_key: Optional[str] = None,
tls_verify: bool = False,
socket_keepalive: bool = False,
socket_timeout: float = DEFAULT_SOCKET_TIMEOUT,
verbose: bool = False,
workers: int = DEFAULT_WORKERS,
) -> None:
super().__init__(
url,
name=name,
pedantic=pedantic,
ping_interval=ping_interval,
reconnect_delay=reconnect_delay,
reconnect_forever=reconnect_forever,
tls_cacert=tls_cacert,
tls_client_cert=tls_client_cert,
tls_client_key=tls_client_key,
tls_verify=tls_verify,
socket_keepalive=socket_keepalive,
socket_timeout=socket_timeout,
verbose=verbose,
)
self._pinger: Optional[Thread] = None
self._pinger_timer: Event = Event()
self._subs: Dict[int, NATSSubscription] = {}
self._send_lock: RLock = RLock()
self._waiter: Optional[Thread] = None
self._waiter_enabled: bool = False
self._worker_num: int = workers
self._workers: Optional[ThreadPoolExecutor] = None
def _start_workers(self):
self._workers = ThreadPoolExecutor(
max_workers=self._worker_num, thread_name_prefix="worker"
)
def _stop_workers(self):
if self._workers:
self._workers.shutdown()
self._workers = None
@auto_reconnect
def _waiter_thread(self):
while self._waiter_enabled:
command, result = self._recv(MSG_RE, PING_RE, PONG_RE, OK_RE)
if command is None:
continue
if command is MSG_RE:
self._handle_message(result)
elif command is PING_RE:
self._send(PONG_OP)
def _start_waiter(self):
self._waiter = Thread(target=self._waiter_thread)
self._waiter_enabled = True
self._waiter.start()
def _stop_waiter(self):
if self._waiter_enabled:
self._waiter_enabled = False
try:
self._send(PING_OP)
except (socket.error, ssl.SSLError):
pass
if self._waiter:
self._waiter.join()
self._waiter = None
@auto_retry
def _pinger_thread(self) -> None:
while not self._pinger_timer.wait(timeout=self._ping_interval):
self._send(PING_OP)
self._pinger_timer.clear()
def _start_pinger(self):
self._pinger_timer.clear()
self._pinger = Thread(target=self._pinger_thread)
self._pinger.start()
def _stop_pinger(self):
self._pinger_timer.set()
if self._pinger:
self._pinger.join()
self._pinger = None
def connect(self) -> None:
super().connect()
self._start_workers()
self._start_waiter()
self._start_pinger()
def close(self) -> None:
try:
self._stop_pinger()
self._stop_waiter()
self._stop_workers()
super().close()
except (socket.error, ssl.SSLError):
pass
def reconnect(self) -> None:
self.close()
self.connect()
for sub in self._subs.values():
self._send(SUB_OP, self._vhost(sub.subject), sub.queue, sub.sid)
@auto_retry
def subscribe(
self,
subject: str,
*,
callback: Callable,
queue: str = "",
max_messages: Optional[int] = None,
) -> NATSSubscription:
sub = NATSSubscription(
sid=self._ssid,
subject=subject,
queue=queue,
callback=callback,
max_messages=max_messages,
)
self._ssid += 1
self._subs[sub.sid] = sub
self._send(SUB_OP, self._vhost(sub.subject), sub.queue, sub.sid)
return sub
@auto_retry
def unsubscribe(self, sub: NATSSubscription) -> None:
self._subs.pop(sub.sid, None)
self._send(UNSUB_OP, sub.sid)
@auto_retry
def auto_unsubscribe(self, sub: NATSSubscription) -> None:
if sub.max_messages is None:
return
self._send(UNSUB_OP, sub.sid, sub.max_messages)
@auto_retry
def publish(self, subject: str, *, payload: bytes = b"", reply: str = "") -> None:
with self._send_lock:
self._send(PUB_OP, self._vhost(subject), self._vhost(reply), len(payload))
self._send(payload)
@auto_retry
def request(
self,
subject: str,
*,
payload: bytes = b"",
timeout: float = DEFAULT_REQUEST_TIMEOUT,
) -> NATSMessage:
next_inbox = INBOX_PREFIX[:]
next_inbox.extend(self._nuid.next_())
reply_subject = next_inbox.decode()
reply_queue: queue.Queue = queue.Queue()
def callback(message: NATSMessage) -> None:
reply_queue.put(message)
sub = self.subscribe(reply_subject, callback=callback, max_messages=1)
self.publish(subject, payload=payload, reply=reply_subject)
try:
return reply_queue.get(timeout=timeout)
except queue.Empty:
raise NATSRequestTimeoutError()
finally:
self.unsubscribe(sub)
def _send(self, *parts: Union[bytes, str, int]) -> None:
with self._send_lock:
self._socket.sendall(_SPC_.join(self._encode(p) for p in parts) + _CRLF_)
def _handle_message(self, result: Match[bytes]) -> None:
message_data = result.groupdict()
message_payload_size = int(message_data["size"])
message_payload = self._readline(size=message_payload_size)
message_payload = self._strip(message_payload)
message = NATSMessage(
sid=int(message_data["sid"].decode()),
subject=self._del_vhost(message_data["subject"].decode()),
reply=self._del_vhost(message_data["reply"].decode())
if message_data["reply"]
else "",
payload=message_payload,
)
sub = self._subs.get(message.sid)
if sub is None:
LOG.error("no subscribe")
return
sub.received_messages += 1
if sub.is_wasted():
self._subs.pop(sub.sid, None)
if self._workers:
self._workers.submit(sub.callback, message)
|
the-stack_106_20050
|
"""Utils for minibatch SGD across multiple RLlib policies."""
import numpy as np
import logging
from collections import defaultdict
import random
from ray.rllib.evaluation.metrics import LEARNER_STATS_KEY
from ray.rllib.policy.sample_batch import SampleBatch, DEFAULT_POLICY_ID, \
MultiAgentBatch
logger = logging.getLogger(__name__)
def averaged(kv):
"""Average the value lists of a dictionary.
Arguments:
kv (dict): dictionary with values that are lists of floats.
Returns:
dictionary with single averaged float as values.
"""
out = {}
for k, v in kv.items():
if v[0] is not None and not isinstance(v[0], dict):
out[k] = np.mean(v)
return out
def standardized(array):
"""Normalize the values in an array.
Arguments:
array (np.ndarray): Array of values to normalize.
Returns:
array with zero mean and unit standard deviation.
"""
return (array - array.mean()) / max(1e-4, array.std())
def minibatches(samples, sgd_minibatch_size):
"""Return a generator yielding minibatches from a sample batch.
Arguments:
samples (SampleBatch): batch of samples to split up.
sgd_minibatch_size (int): size of minibatches to return.
Returns:
generator that returns mini-SampleBatches of size sgd_minibatch_size.
"""
if not sgd_minibatch_size:
yield samples
return
if isinstance(samples, MultiAgentBatch):
raise NotImplementedError(
"Minibatching not implemented for multi-agent in simple mode")
if "state_in_0" in samples.data:
logger.warning("Not shuffling RNN data for SGD in simple mode")
else:
samples.shuffle()
i = 0
slices = []
while i < samples.count:
slices.append((i, i + sgd_minibatch_size))
i += sgd_minibatch_size
random.shuffle(slices)
for i, j in slices:
yield samples.slice(i, j)
def do_minibatch_sgd(samples, policies, local_worker, num_sgd_iter,
sgd_minibatch_size, standardize_fields):
"""Execute minibatch SGD.
Arguments:
samples (SampleBatch): batch of samples to optimize.
policies (dict): dictionary of policies to optimize.
local_worker (RolloutWorker): master rollout worker instance.
num_sgd_iter (int): number of epochs of optimization to take.
sgd_minibatch_size (int): size of minibatches to use for optimization.
standardize_fields (list): list of sample field names that should be
normalized prior to optimization.
Returns:
averaged info fetches over the last SGD epoch taken.
"""
if isinstance(samples, SampleBatch):
samples = MultiAgentBatch({DEFAULT_POLICY_ID: samples}, samples.count)
fetches = {}
for policy_id, policy in policies.items():
if policy_id not in samples.policy_batches:
continue
batch = samples.policy_batches[policy_id]
for field in standardize_fields:
batch[field] = standardized(batch[field])
for i in range(num_sgd_iter):
iter_extra_fetches = defaultdict(list)
for minibatch in minibatches(batch, sgd_minibatch_size):
batch_fetches = (local_worker.learn_on_batch(
MultiAgentBatch({
policy_id: minibatch
}, minibatch.count)))[policy_id]
for k, v in batch_fetches[LEARNER_STATS_KEY].items():
iter_extra_fetches[k].append(v)
logger.debug("{} {}".format(i, averaged(iter_extra_fetches)))
fetches[policy_id] = averaged(iter_extra_fetches)
return fetches
|
the-stack_106_20052
|
import sys
import psana
from libtbx.phil import parse
from scitbx.array_family import flex
from dxtbx.format.FormatXTC import FormatXTC, locator_str
try:
from xfel.cxi.cspad_ana import rayonix_tbx
except ImportError:
# xfel not configured
pass
rayonix_locator_str = """
rayonix {
bin_size = None
.type = int
.help = Detector binning mode
}
"""
rayonix_locator_scope = parse(rayonix_locator_str + locator_str, process_includes=True)
class FormatXTCRayonix(FormatXTC):
def __init__(self, image_file, **kwargs):
super().__init__(image_file, locator_scope=rayonix_locator_scope, **kwargs)
cfgs = self._ds.env().configStore()
rayonix_cfg = cfgs.get(psana.Rayonix.ConfigV2, psana.Source("Rayonix"))
if self.params.rayonix.bin_size is None:
assert rayonix_cfg.binning_f() == rayonix_cfg.binning_s()
bin_size = rayonix_cfg.binning_f()
else:
bin_size = self.params.rayonix.bin_size
self._pixel_size = rayonix_tbx.get_rayonix_pixel_size(bin_size)
self._image_size = rayonix_tbx.get_rayonix_detector_dimensions(self._ds.env())
@staticmethod
def understand(image_file):
try:
params = FormatXTC.params_from_phil(rayonix_locator_scope, image_file)
except Exception:
return False
return any(["rayonix" in src.lower() for src in params.detector_address])
def get_raw_data(self, index=None):
if index is None:
index = 0
assert len(self.params.detector_address) == 1
data = rayonix_tbx.get_data_from_psana_event(
self._get_event(index), self.params.detector_address[0]
)
return flex.double(data)
def get_detector(self, index=None):
return self._detector()
def _detector(self):
return self._detector_factory.simple(
sensor="UNKNOWN",
distance=100.0,
beam_centre=(50.0, 50.0),
fast_direction="+x",
slow_direction="-y",
pixel_size=(self._pixel_size, self._pixel_size),
image_size=self._image_size,
trusted_range=(
rayonix_tbx.rayonix_min_trusted_value,
rayonix_tbx.rayonix_saturated_value,
),
mask=[],
)
if __name__ == "__main__":
for arg in sys.argv[1:]:
print(FormatXTCRayonix.understand(arg))
|
the-stack_106_20057
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# import python libs
import re
import json
import argparse
import json
import random
import math
import os
import copy
import sys
from os import listdir
from os.path import isfile, join
from pprint import pprint as pp
# import project libs
sys.path.append('../Auswertung')
import compare_annotations
# defining globals & constants
SUBJECTS_TABEL_JSON = '/Users/rg/Nextcloud/Uni/Bachelorarbeit/Apps/Vorverarbeitung/subjects_tabel.json'
QUESTIONNAIRE_DOCUMENT_JSON = '/Users/rg/Nextcloud/Uni/Bachelorarbeit/Apps/Vorverarbeitung/questionnaire_document.json'
# PLAIN_CORPUS_FILES = '/Users/rg/Nextcloud/Uni/Bachelorarbeit/Apps/Auswertung/test_text/raw/'
# GOLD_ANNOTATED_CORPUS_FILES = '/Users/rg/Nextcloud/Uni/Bachelorarbeit/Apps/Auswertung/test_text/gold-annotiert/'
PLAIN_CORPUS_FILES = '/Users/rg/Nextcloud/Uni/Bachelorarbeit/Korpora/Implisense/json/'
GOLD_ANNOTATED_CORPUS_FILES = '/Users/rg/Nextcloud/Uni/Bachelorarbeit/Korpora/Implisense/json gold/'
SUBJECT_CORPUS_FOLDER = '/Users/rg/Nextcloud/Uni/Bachelorarbeit/Korpora/VP vorbereitet'
CORPUS_SUFFIX = 'preannotated_subject_corpus_document'
# methods
def read_subject_table():
def flatten_annotations_per_block(blocks_per_subject):
# return a list of paragraphs for each subject
subjects = []
for blocks_for_subject in blocks_per_subject:
paragraphs = []
for block in blocks_for_subject:
for paragraph in block:
paragraphs.append(paragraph)
subjects.append(paragraphs)
return subjects
file_handler = open(SUBJECTS_TABEL_JSON, 'r', encoding='utf-8')
raw_content = file_handler.read()
file_handler.close()
blocks = json.JSONDecoder().decode(raw_content)
return flatten_annotations_per_block(blocks)
def read_questionnaire_document():
file_handler = open(QUESTIONNAIRE_DOCUMENT_JSON, 'r', encoding='utf-8')
raw_content = file_handler.read()
file_handler.close()
return json.JSONDecoder().decode(raw_content)
def read_corpus_files(path):
def clean_unrecognized_labels(corpus):
for document in corpus:
for paragraph in document['data']:
for sentence in paragraph:
for token in sentence:
if 'annotation' in token and token['annotation']['label'] not in LABELS:
del token['annotation']
corpus = []
for file_name in sorted(listdir(path)):
if not (isfile(join(path, file_name)) and file_name.endswith('.json')): continue
file_handler = open(path + file_name, 'r', encoding='utf-8')
raw_content = file_handler.read()
file_handler.close()
deconded_content = json.JSONDecoder().decode(raw_content)
corpus.append(deconded_content)
# clean_unrecognized_labels(corpus)
return corpus
def create_annotated_corpus(subject_corpus, gold_annotated_corpus, subject_annotation_classes):
total_paragraph_index = -1
for document_index, document in enumerate(gold_annotated_corpus):
for paragraph_index, paragraph in enumerate(document['data']):
total_paragraph_index += 1
for sentence_index, sentence in enumerate(paragraph):
annotation_classes_of_sentence = subject_annotation_classes[total_paragraph_index][sentence_index]
if len(annotation_classes_of_sentence) == 0: continue
annotation_number = 0
for token_index, token in enumerate(sentence):
next_annotation_class = annotation_classes_of_sentence[annotation_number]
subject_sentence = subject_corpus[document_index]['data'][paragraph_index][sentence_index]
# look ahead annotation for class 4 annotations
if next_annotation_class == 4:
(new_subject_sentence, _) = unnecessary_annotation(subject_sentence, token_index, sentence)
# skip if token not annotated
elif not ('annotation' in token):
continue
# if current token is annotated, apply the specificated annotation class
else:
manipulator = ANNOTATION_CLASSES[next_annotation_class]
(new_subject_sentence, _) = manipulator(subject_sentence, token_index, sentence)
subject_corpus[document_index]['data'][paragraph_index][sentence_index] = new_subject_sentence
annotation_number += 1
if annotation_number >= len(annotation_classes_of_sentence): break
return subject_corpus
# annotation manipulation
def manipulate_span(sentence, token_index, reference_sentence):
length = reference_sentence[token_index]['annotation']['length']
debug_length = reference_sentence[token_index]['annotation']['length']
new_start_index = token_index
index_offset = 0
possible_offsets_reference = possible_start_index_offsets(reference_sentence, token_index)
possible_offsets_sentence = possible_start_index_offsets(sentence, token_index)
possible_offsets = list(set(possible_offsets_reference).intersection(possible_offsets_sentence))
if not (len(possible_offsets) == 0):
index_offset = random.choice(possible_offsets)
new_start_index += index_offset
sentence[new_start_index]['annotation'] = copy.deepcopy(reference_sentence[token_index]['annotation'])
# chunk has a new beginning but stil same length - let's change that!
length_with_offset = (length - index_offset)
if index_offset < 0:
min_length = length_with_offset - (length - 1)
if min_length < 1: min_length = 1
max_length = maximum_chunk_length(reference_sentence, token_index + 1) - index_offset
else:
min_length = length_with_offset
max_length = maximum_chunk_length(reference_sentence, new_start_index)
if min_length == length and length + 1 <= max_length:
min_length += 1
length = random.choice(range(min_length, max_length + 1))
if length > 4:
length = 4
sentence[new_start_index]['annotation']['length'] = length
return (sentence, new_start_index)
def manipulate_cat(sentence, token_index, reference_sentence):
annotated_token = reference_sentence[token_index]
new_label = change_label(annotated_token['annotation']['label'])
annotated_token['annotation']['label'] = new_label
sentence[token_index] = annotated_token
return (sentence, token_index)
def manipulate_cat_span(sentence, token_index, reference_sentence):
(sentence, token_index) = manipulate_span(sentence, token_index, reference_sentence)
return manipulate_cat(sentence, token_index, sentence)
def correct_annotation(sentence, token_index, reference_sentence):
sentence[token_index] = copy.deepcopy(reference_sentence[token_index])
return (sentence, token_index)
def no_annotation(sentence, token_index, reference_sentence):
return (sentence, token_index)
def unnecessary_annotation(sentence, token_index, reference_sentence):
if token_index == 0:
begin = 0
else:
if not 'annotation' in reference_sentence[token_index - 1]: # DEBUG!
print('DEBBUG token_index', token_index, 'sentence:')
pp(reference_sentence)
current_annotation_length = reference_sentence[token_index - 1]['annotation']['length']
begin = token_index - 1 + current_annotation_length
end = begin + maximum_chunk_length(reference_sentence, begin)
annotation_index = random.choice(range(begin, end))
max_length = maximum_chunk_length(reference_sentence, annotation_index)
annotation_label = random.choice(LABELS)
annotation_length = 1
if max_length >= 3:
annotation_length = random.choice([1, 2, 3])
elif max_length == 2:
annotation_length = random.choice([1, 2])
sentence[annotation_index]['annotation'] = {
'label': annotation_label,
'length': annotation_length
}
return (sentence, annotation_index)
# helpers
def possible_start_index_offsets(sentence, token_index):
possible_offsets = []
length = 3 # >= 2
if 'annotation' in sentence[token_index]:
length = sentence[token_index]['annotation']['length']
max_left_shift = token_index - earliest_chunk_start_index(sentence, token_index)
max_right_shift = maximum_chunk_length(sentence, token_index) - 1
if max_left_shift >= 2 and length > 2:
possible_offsets = [-2, -1]
elif max_left_shift >= 1:
possible_offsets = [-1]
if max_right_shift >= 2 and length > 2:
possible_offsets += [1, 2]
elif max_right_shift >= 1 and length >= 2:
possible_offsets += [1]
return possible_offsets
def earliest_chunk_start_index(sentence, start_index):
sentence_length = len(sentence)
if start_index == 0:
return 0
earliest_index = start_index
for iteration_index in range((start_index - 1), -1, -1):
earliest_index -= 1
if 'annotation' in sentence[iteration_index]:
length = sentence[iteration_index]['annotation']['length']
earliest_index += length
break
return earliest_index
def maximum_chunk_length(sentence, start_index):
sentence_length = len(sentence)
if start_index == sentence_length - 1:
return 1
max_chunk_length = 1
for iteration_index in range((start_index + 1), sentence_length):
if 'annotation' in sentence[iteration_index]:
break
max_chunk_length = max_chunk_length + 1
return max_chunk_length
def change_label(label):
index = LABELS.index(label)
if index > 0: return LABELS[0]
return LABELS[1]
def save_document_to_file(corpus, subject_id):
for document_index, document in enumerate(corpus):
prefix = chr(97 + document_index)
folder_postfix = subject_id
if subject_id < 10:
folder_postfix = "%s%s" % (0, subject_id)
subject_folder = "%s/VP%s" % (SUBJECT_CORPUS_FOLDER, folder_postfix)
file_name = "%s_%s.json" % (prefix, CORPUS_SUFFIX)
file_path = "%s/%s" % (subject_folder, file_name)
if not os.path.exists(subject_folder):
os.makedirs(subject_folder)
json_encoded_document = json.dumps(document)
file_handler = open(file_path, 'w')
file_handler.write(json_encoded_document)
file_handler.close()
# experiment definitions
ANNOTATION_CLASSES = {
0 : correct_annotation,
1 : manipulate_span,
2 : manipulate_cat,
3 : manipulate_cat_span,
4 : unnecessary_annotation,
5 : no_annotation
}
LABELS = [
'PER', 'COM'
]
# entry point as a stand alone script
if __name__ == '__main__':
subjects_table = read_subject_table()
questionnaire_document = read_questionnaire_document()
for subject_id, subject_annotation_classes in enumerate(subjects_table):
# for i in range(0, 20):
# subject_id = i
# subject_annotation_classes = subjects_table[i]
print('create corpus for subject #', subject_id)
gold_annotated_corpus = read_corpus_files(GOLD_ANNOTATED_CORPUS_FILES)
plain_corpus = read_corpus_files(PLAIN_CORPUS_FILES)
subject_corpus = create_annotated_corpus(plain_corpus, gold_annotated_corpus, subject_annotation_classes)
subject_corpus.insert(0, questionnaire_document)
save_document_to_file(subject_corpus, subject_id)
|
the-stack_106_20059
|
import json
import requests
from urllib.parse import urlencode
from zohocrm.exceptions import UnknownError, InvalidModuleError, NoPermissionError, MandatoryKeyNotFoundError, \
InvalidDataError, MandatoryFieldNotFoundError
BASE_URL = 'https://www.zohoapis.com/crm/v2/'
ZOHOCRM_AUTHORIZE_URL = 'https://accounts.zoho.com/oauth/v2/auth'
ZOHOCRM_REQUEST_TOKEN_URL = 'https://accounts.zoho.com/oauth/v2/token'
ZOHOCRM_REFRESH_TOKEN_URL = "https://accounts.zoho.com/oauth/v2/token"
READ_MODULE_LIST = ['leads', 'accounts', 'contacts', 'deals', 'campaigns', 'tasks', 'cases', 'events', 'calls',
'solutions', 'products', 'vendors', 'pricebooks', 'quotes', 'salesorders', 'purchaseorders',
'invoices', 'custom', 'notes', 'approvals', 'dashboards', 'search', 'activities']
# module purchaseorders, 'invoices', salesorders and quotes are temporarily disable for writing this
# due to the complexity of the module
WRITE_MODULE_LIST = ['leads', 'accounts', 'contacts', 'deals', 'campaigns', 'tasks', 'cases', 'events', 'calls',
'solutions', 'products', 'vendors', 'pricebooks', 'purchaseorders', 'custom', 'notes']
class Client(object):
def __init__(self, client_id, client_secret, redirect_uri, scope, access_type, refresh_token=None):
self.code = None
self.scope = scope
self.access_type = access_type
self.client_id = client_id
self._refresh_token = refresh_token
self.redirect_uri = redirect_uri
self.client_secret = client_secret
self.access_token = None
def get_authorization_url(self):
"""
:return:
"""
params = {'scope': ','.join(self.scope), 'client_id': self.client_id, 'access_type': 'offline',
'redirect_uri': self.redirect_uri, 'response_type': 'code', 'prompt':'consent'}
url = ZOHOCRM_AUTHORIZE_URL + '?' + urlencode(params)
return url
def exchange_code(self, code):
"""
:param code:
:return:
"""
params = {'code': code, 'client_id': self.client_id, 'client_secret': self.client_secret,
'redirect_uri': self.redirect_uri, 'grant_type': 'authorization_code'}
url = ZOHOCRM_REQUEST_TOKEN_URL + '?' + urlencode(params)
return self._post(url)
def refresh_token(self):
"""
:return:
"""
params = {'refresh_token': self._refresh_token, 'client_id': self.client_id,
'client_secret': self.client_secret, 'grant_type': 'refresh_token'}
url = ZOHOCRM_REFRESH_TOKEN_URL + '?' + urlencode(params)
response = self._post(url)
return response
def set_access_token(self, token):
"""
:param token:
:return:
"""
if isinstance(token, dict):
self.access_token = token['access_token']
if 'refresh_token' in token:
self._refresh_token = token['refresh_token']
else:
self.access_token = token
def get_module_list(self):
"""
:return:
"""
url = BASE_URL + "settings/modules"
response = self._get(url)
if response:
return [i for i in response['modules'] if i['api_supported'] is True]
else:
return None
def get_fields_list(self, module):
"""
:param module:
:return:
"""
params = {'module': module}
url = BASE_URL + "settings/fields" + "?" + urlencode(params)
response = self._get(url)
if response:
try:
result = [
{
'id': i['id'],
'label': i['field_label'],
'api_name': i['api_name'],
'max_length': i['length'],
'read_only': i['read_only'],
'data_type': i['data_type'],
'currency': i['currency'],
'lookup': i['lookup'],
'pick_list_values': i['pick_list_values']
} for i in response['fields']]
except Exception as e:
print(e)
else:
return None
return result
def create_webhook(self, module, gearplug_webhook_id, notify_url):
"""
:param module:
:param gearplug_webhook_id:
:param notify_url:
:return:
"""
endpoint = 'actions/watch'
event = ["{0}.create".format(module)]
data = [{'notify_url': notify_url, 'channel_id': gearplug_webhook_id, 'events': event, }]
data = {'watch': data}
url = BASE_URL + endpoint
try:
response = self._post(url, data=data)
except Exception as e:
return False
if response['watch'][-1]['code'] == "SUCCESS":
return response['watch'][-1]['details']
else:
return False
def delete_webhook(self, webhook_id, module):
"""
:return:
"""
events = ["{0}.create".format(module)]
data = [{'channel_id': webhook_id, 'events': events, '_delete_events': 'true'}]
data = {'watch': data}
endpoint = 'actions/watch'
url = BASE_URL + endpoint
response = self._patch(url, data=data)
if response['watch'][-1]['code'] == "SUCCESS":
return response['watch'][-1]['details']
else:
return False
def get_records(self, module_name):
"""
:param module_name: module from which to read record (api_name)
:return:
"""
if module_name not in READ_MODULE_LIST:
return None
url = BASE_URL + str(module_name)
response = self._get(url)
all_data = [response['data']]
while response['info']['more_records'] == 'true':
page = response['info']['page']
response = self._get(url, params={'page': int(page) + 1})
all_data.append(response['data'])
return all_data
def get_specific_record(self, module, id):
"""
:return:
"""
endpoint = '{0}/{1}'.format(module, id)
url = BASE_URL + str(endpoint)
response = self._get(url)
if response and 'data' in response and len(response['data']) > 0 and response['data'][0]['id'] == id:
return response['data']
else:
return False
def get_all_active_users(self):
"""
:return: all active users
"""
endpoint = 'users?type=ActiveUsers'
url = BASE_URL + str(endpoint)
response = self._get(url)
if response and 'users' in response and isinstance(response['users'], list) and len(response['users']) > 0:
return response['users']
else:
return False
def get_all_organizations(self):
"""
:return: all oganizations
"""
endpoint = 'org'
url = BASE_URL + str(endpoint)
response = self._get(url)
if response and 'org' in response and isinstance(response['org'], list) and len(response['users']) > 0:
return response['org']
else:
return False
def insert_record(self, module_name, data):
"""
:param module_name:
:param data:
:return:
"""
if module_name.lower() not in WRITE_MODULE_LIST:
return None
url = BASE_URL + str(module_name)
data = dict(data)
for k, v in data.items():
if v == 'False':
data[k] = False
if v == 'True':
data[k] = True
formatted_data = {'data': []}
formatted_data['data'].append(data)
return self._post(url, data=formatted_data)
def _get(self, endpoint, params=None):
headers = {'Authorization': 'Zoho-oauthtoken {0}'.format(self.access_token), }
response = requests.get(endpoint, params=params, headers=headers)
return self._parse(response, method='get')
def _post(self, endpoint, params=None, data=None):
headers = {'Authorization': 'Zoho-oauthtoken {0}'.format(self.access_token), }
response = requests.post(endpoint, params=params, json=data, headers=headers)
return self._parse(response, method='post')
def _put(self, endpoint, params=None, data=None):
headers = {'Authorization': 'Zoho-oauthtoken {0}'.format(self.access_token), }
response = requests.put(endpoint, params=params, json=data, headers=headers)
return self._parse(response, method='put')
def _patch(self, endpoint, params=None, data=None):
headers = {'Authorization': 'Zoho-oauthtoken {0}'.format(self.access_token), }
response = requests.patch(endpoint, params=params, json=data, headers=headers)
return self._parse(response, method='patch')
def _delete(self, endpoint, params=None):
headers = {'Authorization': 'Zoho-oauthtoken {0}'.format(self.access_token), }
response = requests.delete(endpoint, params=params, headers=headers)
return self._parse(response, method='delete')
def _parse(self, response, method=None):
status_code = response.status_code
if 'application/json' in response.headers['Content-Type']:
r = response.json()
else:
r = response.text
if status_code in (200, 201):
return r
if status_code == 204:
return None
message = None
try:
if 'message' in r:
message = r['message']
except Exception:
message = 'No error message.'
if status_code == 400:
raise InvalidModuleError(message)
if status_code == 401:
raise NoPermissionError(status_code)
if status_code == 201:
raise MandatoryFieldNotFoundError(message)
elif status_code == 202:
raise InvalidDataError(message)
elif status_code == 400:
raise InvalidDataError(message)
|
the-stack_106_20062
|
# Authors:
# Christian F. Baumgartner ([email protected])
import tensorflow as tf
import numpy as np
from math import sqrt
def flatten(tensor):
'''
Flatten the last N-1 dimensions of a tensor only keeping the first one, which is typically
equal to the number of batches.
Example: A tensor of shape [10, 200, 200, 32] becomes [10, 1280000]
'''
rhs_dim = get_rhs_dim(tensor)
return tf.reshape(tensor, [-1, rhs_dim])
def get_rhs_dim(tensor):
'''
Get the multiplied dimensions of the last N-1 dimensions of a tensor.
I.e. an input tensor with shape [10, 200, 200, 32] leads to an output of 1280000
'''
shape = tensor.get_shape().as_list()
return np.prod(shape[1:])
def put_kernels_on_grid(images, pad=1, rescale_mode='automatic', input_range=None, cutoff_abs=0.5):
'''Visualize conv. filters as an image (mostly for the 1st layer).
Arranges filters into a grid, with some paddings between adjacent filters.
Args:
images: [batch_size, X, Y, channels]
pad: number of black pixels around each filter (between them)
rescale_mode: 'manual', 'automatic' or 'centered'
Automatic rescale mode scales the images such that the they are in the range [0,255]
Manual rescale mode maps input_range to [0,255] and thresholds everything outside the range
Centered rescale mode maps input_range to [0,255] with 0 at 127.5 and cutoff_abs at 255. Input values outside the
range [-cutoff_abs, cutoff_abs] are clipped
input_range: input range used for manual rescaling (min, max)
Return:
Tensor of shape [1, (Y+2*pad)*grid_Y, (X+2*pad)*grid_X, NumChannels].
'''
# get shape of the grid. NumKernels == grid_Y * grid_X
def factorization(n):
for i in range(int(sqrt(float(n))), 0, -1):
if n % i == 0:
if i == 1: print('Who would enter a prime number of images')
return (i, int(n / i))
(grid_Y, grid_X) = factorization(images.get_shape()[0].value)
print('grid: %d = (%d, %d)' % (images.get_shape()[0].value, grid_Y, grid_X))
if rescale_mode == 'automatic':
x_min = tf.reduce_min(images)
x_max = tf.reduce_max(images)
elif rescale_mode == 'manual':
x_min = input_range[0]
x_max = input_range[1]
elif rescale_mode == 'centered':
images = images / (2*cutoff_abs) + 0.5
else:
raise ValueError('Unknown rescale_mode: %s' % type)
if rescale_mode in ['automatic', 'manual']:
images = (images - x_min) / (x_max - x_min)
images = 255.0 * images
if rescale_mode in ['manual', 'centered']:
# threshold such that everything is in [0,255]
images = tf.clip_by_value(images, 0, 255)
# pad X and Y
x = tf.pad(images, tf.constant([[0, 0], [pad, pad], [pad, pad],[0, 0]]), mode='CONSTANT')
# X and Y dimensions, w.r.t. padding
Y = images.get_shape().as_list()[1] + 2 * pad
X = images.get_shape().as_list()[2] + 2 * pad
channels = images.get_shape()[3]
# organize grid on Y axis
x = tf.reshape(x, tf.stack([grid_X, Y * grid_Y, X, channels]))
# switch X and Y axes
x = tf.transpose(x, (0, 2, 1, 3))
# organize grid on X axis
x = tf.reshape(x, tf.stack([1, X * grid_X, Y * grid_Y, channels]))
# Transpose the image again
x = tf.transpose(x, (0, 2, 1, 3))
return x
def put_kernels_on_grid3d(images, axis, cut_index, pad=1, rescale_mode='automatic', input_range=None, cutoff_abs=0.5):
""" Puts a cut through the 3D kernel on the grid
:param images: tensor of rank 5 with [batches, x, y, z, channels]
:param axis: direction perpendicular to the cut, 0 for x, 1 for y, 2 for z
:param cut_index: index where the cut is along the axis
:param pad: number of black pixels around each filter (between them)
:param rescale_mode: 'manual' or 'automatic
Automatic rescale mode scales the images such that the they are in the range [0,255]
Manual rescale mode maps input_range to [0,255] and thresholds everything outside the range
:param input_range: input range used for manual rescaling
:return:
"""
image_cut = None
if axis == 0:
image_cut = images[:, cut_index, :, :, :]
elif axis == 1:
image_cut = images[:, :, cut_index, :, :]
elif axis == 2:
image_cut = images[:, :, :, cut_index, :]
return put_kernels_on_grid(image_cut, pad, rescale_mode, input_range, cutoff_abs)
|
the-stack_106_20064
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from datetime import timedelta
from typing import Optional
from flask import Flask
from flask_appbuilder import SQLA
from flask_caching import Cache
from flask_wtf.csrf import CSRFProtect
from airflow import settings
from airflow.configuration import conf
from airflow.logging_config import configure_logging
from airflow.utils.json import AirflowJsonEncoder
from airflow.www.extensions.init_appbuilder import init_appbuilder
from airflow.www.extensions.init_appbuilder_links import init_appbuilder_links
from airflow.www.extensions.init_dagbag import init_dagbag
from airflow.www.extensions.init_jinja_globals import init_jinja_globals
from airflow.www.extensions.init_manifest_files import configure_manifest_files
from airflow.www.extensions.init_security import init_api_experimental_auth, init_xframe_protection
from airflow.www.extensions.init_session import init_logout_timeout, init_permanent_session
from airflow.www.extensions.init_views import (
init_api_connexion,
init_api_experimental,
init_appbuilder_views,
init_error_handlers,
init_flash_views,
init_plugins,
)
from airflow.www.extensions.init_wsgi_middlewares import init_wsgi_middleware
app: Optional[Flask] = None
# Initializes at the module level, so plugins can access it.
# See: /docs/plugins.rst
csrf = CSRFProtect()
def sync_appbuilder_roles(flask_app):
"""Sync appbuilder roles to DB"""
# Garbage collect old permissions/views after they have been modified.
# Otherwise, when the name of a view or menu is changed, the framework
# will add the new Views and Menus names to the backend, but will not
# delete the old ones.
if conf.getboolean('webserver', 'UPDATE_FAB_PERMS'):
security_manager = flask_app.appbuilder.sm
security_manager.sync_roles()
security_manager.sync_resource_permissions()
def create_app(config=None, testing=False, app_name="Airflow"):
"""Create a new instance of Airflow WWW app"""
flask_app = Flask(__name__)
flask_app.secret_key = conf.get('webserver', 'SECRET_KEY')
session_lifetime_days = conf.getint('webserver', 'SESSION_LIFETIME_DAYS', fallback=30)
flask_app.config['PERMANENT_SESSION_LIFETIME'] = timedelta(days=session_lifetime_days)
flask_app.config.from_pyfile(settings.WEBSERVER_CONFIG, silent=True)
flask_app.config['APP_NAME'] = app_name
flask_app.config['TESTING'] = testing
flask_app.config['SQLALCHEMY_DATABASE_URI'] = conf.get('core', 'SQL_ALCHEMY_CONN')
flask_app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
flask_app.config['SESSION_COOKIE_HTTPONLY'] = True
flask_app.config['SESSION_COOKIE_SECURE'] = conf.getboolean('webserver', 'COOKIE_SECURE')
flask_app.config['SESSION_COOKIE_SAMESITE'] = conf.get('webserver', 'COOKIE_SAMESITE')
if config:
flask_app.config.from_mapping(config)
if 'SQLALCHEMY_ENGINE_OPTIONS' not in flask_app.config:
flask_app.config['SQLALCHEMY_ENGINE_OPTIONS'] = settings.prepare_engine_args()
# Configure the JSON encoder used by `|tojson` filter from Flask
flask_app.json_encoder = AirflowJsonEncoder
csrf.init_app(flask_app)
init_wsgi_middleware(flask_app)
db = SQLA()
db.session = settings.Session
db.init_app(flask_app)
init_dagbag(flask_app)
init_api_experimental_auth(flask_app)
Cache(app=flask_app, config={'CACHE_TYPE': 'filesystem', 'CACHE_DIR': '/tmp'})
init_flash_views(flask_app)
configure_logging()
configure_manifest_files(flask_app)
with flask_app.app_context():
init_appbuilder(flask_app)
init_appbuilder_views(flask_app)
init_appbuilder_links(flask_app)
init_plugins(flask_app)
init_error_handlers(flask_app)
init_api_connexion(flask_app)
init_api_experimental(flask_app)
sync_appbuilder_roles(flask_app)
init_jinja_globals(flask_app)
init_logout_timeout(flask_app)
init_xframe_protection(flask_app)
init_permanent_session(flask_app)
return flask_app
def cached_app(config=None, testing=False):
"""Return cached instance of Airflow WWW app"""
global app # pylint: disable=global-statement
if not app:
app = create_app(config=config, testing=testing)
return app
|
the-stack_106_20065
|
from __future__ import absolute_import, division, print_function
import io
import itertools
import math
import uuid
import warnings
from collections import defaultdict
from distutils.version import LooseVersion
from functools import wraps, partial
from operator import getitem
from random import Random
from toolz import (merge, take, reduce, valmap, map, partition_all, filter,
remove, compose, curry, first, second, accumulate, peek)
from toolz.compatibility import iteritems, zip
import toolz
_implement_accumulate = LooseVersion(toolz.__version__) > '0.7.4'
try:
import cytoolz
from cytoolz import (frequencies, merge_with, join, reduceby,
count, pluck, groupby, topk)
if LooseVersion(cytoolz.__version__) > '0.7.3':
from cytoolz import accumulate # noqa: F811
_implement_accumulate = True
except ImportError:
from toolz import (frequencies, merge_with, join, reduceby,
count, pluck, groupby, topk)
from .. import config
from .avro import to_avro
from ..base import tokenize, dont_optimize, DaskMethodsMixin
from ..bytes import open_files
from ..compatibility import apply, urlopen, Iterable, Iterator
from ..context import globalmethod
from ..core import quote, istask, get_dependencies, reverse_dict, flatten
from ..delayed import Delayed, unpack_collections
from ..highlevelgraph import HighLevelGraph
from ..multiprocessing import get as mpget
from ..optimization import fuse, cull, inline
from ..utils import (system_encoding, takes_multiple_arguments, funcname,
digit, insert, ensure_dict, ensure_bytes, ensure_unicode)
no_default = '__no__default__'
no_result = type('no_result', (object,),
{'__slots__': (),
'__reduce__': lambda self: 'no_result'})
def lazify_task(task, start=True):
"""
Given a task, remove unnecessary calls to ``list`` and ``reify``.
This traverses tasks and small lists. We choose not to traverse down lists
of size >= 50 because it is unlikely that sequences this long contain other
sequences in practice.
Examples
--------
>>> task = (sum, (list, (map, inc, [1, 2, 3]))) # doctest: +SKIP
>>> lazify_task(task) # doctest: +SKIP
(sum, (map, inc, [1, 2, 3]))
"""
if type(task) is list and len(task) < 50:
return [lazify_task(arg, False) for arg in task]
if not istask(task):
return task
head, tail = task[0], task[1:]
if not start and head in (list, reify):
task = task[1]
return lazify_task(*tail, start=False)
else:
return (head,) + tuple([lazify_task(arg, False) for arg in tail])
def lazify(dsk):
"""
Remove unnecessary calls to ``list`` in tasks.
See Also
--------
``dask.bag.core.lazify_task``
"""
return valmap(lazify_task, dsk)
def inline_singleton_lists(dsk, keys, dependencies=None):
""" Inline lists that are only used once.
>>> d = {'b': (list, 'a'),
... 'c': (f, 'b', 1)} # doctest: +SKIP
>>> inline_singleton_lists(d) # doctest: +SKIP
{'c': (f, (list, 'a'), 1)}
Pairs nicely with lazify afterwards.
"""
if dependencies is None:
dependencies = {k: get_dependencies(dsk, task=v)
for k, v in dsk.items()}
dependents = reverse_dict(dependencies)
inline_keys = {k for k, v in dsk.items()
if istask(v) and v and v[0] is list and len(dependents[k]) == 1}
inline_keys.difference_update(flatten(keys))
dsk = inline(dsk, inline_keys, inline_constants=False)
for k in inline_keys:
del dsk[k]
return dsk
def optimize(dsk, keys, fuse_keys=None, rename_fused_keys=True, **kwargs):
""" Optimize a dask from a dask Bag. """
dsk = ensure_dict(dsk)
dsk2, dependencies = cull(dsk, keys)
dsk3, dependencies = fuse(dsk2, keys + (fuse_keys or []), dependencies,
rename_keys=rename_fused_keys)
dsk4 = inline_singleton_lists(dsk3, keys, dependencies)
dsk5 = lazify(dsk4)
return dsk5
def _to_textfiles_chunk(data, lazy_file, last_endline):
with lazy_file as f:
if isinstance(f, io.TextIOWrapper):
endline = u'\n'
ensure = ensure_unicode
else:
endline = b'\n'
ensure = ensure_bytes
started = False
for d in data:
if started:
f.write(endline)
else:
started = True
f.write(ensure(d))
if last_endline:
f.write(endline)
def to_textfiles(b, path, name_function=None, compression='infer',
encoding=system_encoding, compute=True, storage_options=None,
last_endline=False, **kwargs):
""" Write dask Bag to disk, one filename per partition, one line per element.
**Paths**: This will create one file for each partition in your bag. You
can specify the filenames in a variety of ways.
Use a globstring
>>> b.to_textfiles('/path/to/data/*.json.gz') # doctest: +SKIP
The * will be replaced by the increasing sequence 1, 2, ...
::
/path/to/data/0.json.gz
/path/to/data/1.json.gz
Use a globstring and a ``name_function=`` keyword argument. The
name_function function should expect an integer and produce a string.
Strings produced by name_function must preserve the order of their
respective partition indices.
>>> from datetime import date, timedelta
>>> def name(i):
... return str(date(2015, 1, 1) + i * timedelta(days=1))
>>> name(0)
'2015-01-01'
>>> name(15)
'2015-01-16'
>>> b.to_textfiles('/path/to/data/*.json.gz', name_function=name) # doctest: +SKIP
::
/path/to/data/2015-01-01.json.gz
/path/to/data/2015-01-02.json.gz
...
You can also provide an explicit list of paths.
>>> paths = ['/path/to/data/alice.json.gz', '/path/to/data/bob.json.gz', ...] # doctest: +SKIP
>>> b.to_textfiles(paths) # doctest: +SKIP
**Compression**: Filenames with extensions corresponding to known
compression algorithms (gz, bz2) will be compressed accordingly.
**Bag Contents**: The bag calling ``to_textfiles`` must be a bag of
text strings. For example, a bag of dictionaries could be written to
JSON text files by mapping ``json.dumps`` on to the bag first, and
then calling ``to_textfiles`` :
>>> b_dict.map(json.dumps).to_textfiles("/path/to/data/*.json") # doctest: +SKIP
**Last endline**: By default the last line does not end with a newline
character. Pass ``last_endline=True`` to invert the default.
"""
mode = 'wb' if encoding is None else 'wt'
files = open_files(path, compression=compression, mode=mode,
encoding=encoding, name_function=name_function,
num=b.npartitions, **(storage_options or {}))
name = 'to-textfiles-' + uuid.uuid4().hex
dsk = {(name, i): (_to_textfiles_chunk, (b.name, i), f, last_endline)
for i, f in enumerate(files)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[b])
out = type(b)(graph, name, b.npartitions)
if compute:
out.compute(**kwargs)
return [f.path for f in files]
else:
return out.to_delayed()
def finalize(results):
if not results:
return results
if isinstance(results, Iterator):
results = list(results)
if isinstance(results[0], Iterable) and not isinstance(results[0], str):
results = toolz.concat(results)
if isinstance(results, Iterator):
results = list(results)
return results
def finalize_item(results):
return results[0]
class StringAccessor(object):
""" String processing functions
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'])
>>> list(b.str.lower())
['alice smith', 'bob jones', 'charlie smith']
>>> list(b.str.match('*Smith'))
['Alice Smith', 'Charlie Smith']
>>> list(b.str.split(' '))
[['Alice', 'Smith'], ['Bob', 'Jones'], ['Charlie', 'Smith']]
"""
def __init__(self, bag):
self._bag = bag
def __dir__(self):
return sorted(set(dir(type(self)) + dir(str)))
def _strmap(self, key, *args, **kwargs):
return self._bag.map(lambda s: getattr(s, key)(*args, **kwargs))
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
if key in dir(str):
func = getattr(str, key)
return robust_wraps(func)(partial(self._strmap, key))
else:
raise
def match(self, pattern):
""" Filter strings by those that match a pattern.
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(['Alice Smith', 'Bob Jones', 'Charlie Smith'])
>>> list(b.str.match('*Smith'))
['Alice Smith', 'Charlie Smith']
See Also
--------
fnmatch.fnmatch
"""
from fnmatch import fnmatch
return self._bag.filter(partial(fnmatch, pat=pattern))
def robust_wraps(wrapper):
""" A weak version of wraps that only copies doc. """
def _(wrapped):
wrapped.__doc__ = wrapper.__doc__
return wrapped
return _
class Item(DaskMethodsMixin):
def __init__(self, dsk, key):
self.dask = dsk
self.key = key
self.name = key
def __dask_graph__(self):
return self.dask
def __dask_keys__(self):
return [self.key]
def __dask_tokenize__(self):
return self.key
__dask_optimize__ = globalmethod(optimize, key='bag_optimize',
falsey=dont_optimize)
__dask_scheduler__ = staticmethod(mpget)
def __dask_postcompute__(self):
return finalize_item, ()
def __dask_postpersist__(self):
return Item, (self.key,)
@staticmethod
def from_delayed(value):
""" Create bag item from a dask.delayed value.
See ``dask.bag.from_delayed`` for details
"""
from dask.delayed import Delayed, delayed
if not isinstance(value, Delayed) and hasattr(value, 'key'):
value = delayed(value)
assert isinstance(value, Delayed)
return Item(ensure_dict(value.dask), value.key)
@property
def _args(self):
return (self.dask, self.key)
def __getstate__(self):
return self._args
def __setstate__(self, state):
self.dask, self.key = state
def apply(self, func):
name = '{0}-{1}'.format(funcname(func), tokenize(self, func, 'apply'))
dsk = {name: (func, self.key)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return Item(graph, name)
__int__ = __float__ = __complex__ = __bool__ = DaskMethodsMixin.compute
def to_delayed(self, optimize_graph=True):
"""Convert into a ``dask.delayed`` object.
Parameters
----------
optimize_graph : bool, optional
If True [default], the graph is optimized before converting into
``dask.delayed`` objects.
"""
from dask.delayed import Delayed
dsk = self.__dask_graph__()
if optimize_graph:
dsk = self.__dask_optimize__(dsk, self.__dask_keys__())
return Delayed(self.key, dsk)
class Bag(DaskMethodsMixin):
""" Parallel collection of Python objects
Examples
--------
Create Bag from sequence
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.filter(lambda x: x % 2 == 0).map(lambda x: x * 10)) # doctest: +SKIP
[0, 20, 40]
Create Bag from filename or globstring of filenames
>>> b = db.read_text('/path/to/mydata.*.json.gz').map(json.loads) # doctest: +SKIP
Create manually (expert use)
>>> dsk = {('x', 0): (range, 5),
... ('x', 1): (range, 5),
... ('x', 2): (range, 5)}
>>> b = Bag(dsk, 'x', npartitions=3)
>>> sorted(b.map(lambda x: x * 10)) # doctest: +SKIP
[0, 0, 0, 10, 10, 10, 20, 20, 20, 30, 30, 30, 40, 40, 40]
>>> int(b.fold(lambda x, y: x + y)) # doctest: +SKIP
30
"""
def __init__(self, dsk, name, npartitions):
if not isinstance(dsk, HighLevelGraph):
dsk = HighLevelGraph.from_collections(name, dsk, dependencies=[])
self.dask = dsk
self.name = name
self.npartitions = npartitions
def __dask_graph__(self):
return self.dask
def __dask_keys__(self):
return [(self.name, i) for i in range(self.npartitions)]
def __dask_layers__(self):
return (self.name,)
def __dask_tokenize__(self):
return self.name
__dask_optimize__ = globalmethod(optimize, key='bag_optimize',
falsey=dont_optimize)
__dask_scheduler__ = staticmethod(mpget)
def __dask_postcompute__(self):
return finalize, ()
def __dask_postpersist__(self):
return type(self), (self.name, self.npartitions)
def __str__(self):
name = self.name if len(self.name) < 10 else self.name[:7] + '...'
return 'dask.bag<%s, npartitions=%d>' % (name, self.npartitions)
__repr__ = __str__
str = property(fget=StringAccessor)
def map(self, func, *args, **kwargs):
"""Apply a function elementwise across one or more bags.
Note that all ``Bag`` arguments must be partitioned identically.
Parameters
----------
func : callable
*args, **kwargs : Bag, Item, or object
Extra arguments and keyword arguments to pass to ``func`` *after*
the calling bag instance. Non-Bag args/kwargs are broadcasted
across all calls to ``func``.
Notes
-----
For calls with multiple `Bag` arguments, corresponding partitions
should have the same length; if they do not, the call will error at
compute time.
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(range(5), npartitions=2)
>>> b2 = db.from_sequence(range(5, 10), npartitions=2)
Apply a function to all elements in a bag:
>>> b.map(lambda x: x + 1).compute()
[1, 2, 3, 4, 5]
Apply a function with arguments from multiple bags:
>>> from operator import add
>>> b.map(add, b2).compute()
[5, 7, 9, 11, 13]
Non-bag arguments are broadcast across all calls to the mapped
function:
>>> b.map(add, 1).compute()
[1, 2, 3, 4, 5]
Keyword arguments are also supported, and have the same semantics as
regular arguments:
>>> def myadd(x, y=0):
... return x + y
>>> b.map(myadd, y=b2).compute()
[5, 7, 9, 11, 13]
>>> b.map(myadd, y=1).compute()
[1, 2, 3, 4, 5]
Both arguments and keyword arguments can also be instances of
``dask.bag.Item``. Here we'll add the max value in the bag to each
element:
>>> b.map(myadd, b.max()).compute()
[4, 5, 6, 7, 8]
"""
return bag_map(func, self, *args, **kwargs)
def starmap(self, func, **kwargs):
"""Apply a function using argument tuples from the given bag.
This is similar to ``itertools.starmap``, except it also accepts
keyword arguments. In pseudocode, this is could be written as:
>>> def starmap(func, bag, **kwargs):
... return (func(*args, **kwargs) for args in bag)
Parameters
----------
func : callable
**kwargs : Item, Delayed, or object, optional
Extra keyword arguments to pass to ``func``. These can either be
normal objects, ``dask.bag.Item``, or ``dask.delayed.Delayed``.
Examples
--------
>>> import dask.bag as db
>>> data = [(1, 2), (3, 4), (5, 6), (7, 8), (9, 10)]
>>> b = db.from_sequence(data, npartitions=2)
Apply a function to each argument tuple:
>>> from operator import add
>>> b.starmap(add).compute()
[3, 7, 11, 15, 19]
Apply a function to each argument tuple, with additional keyword
arguments:
>>> def myadd(x, y, z=0):
... return x + y + z
>>> b.starmap(myadd, z=10).compute()
[13, 17, 21, 25, 29]
Keyword arguments can also be instances of ``dask.bag.Item`` or
``dask.delayed.Delayed``:
>>> max_second = b.pluck(1).max()
>>> max_second.compute()
10
>>> b.starmap(myadd, z=max_second).compute()
[13, 17, 21, 25, 29]
"""
name = '{0}-{1}'.format(funcname(func),
tokenize(self, func, 'starmap', **kwargs))
dependencies = [self]
if kwargs:
kwargs, collections = unpack_scalar_dask_kwargs(kwargs)
dependencies.extend(collections)
dsk = {(name, i): (reify, (starmap_chunk, func, (self.name, i), kwargs))
for i in range(self.npartitions)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)
return type(self)(graph, name, self.npartitions)
@property
def _args(self):
return (self.dask, self.name, self.npartitions)
def __getstate__(self):
return self._args
def __setstate__(self, state):
self.dask, self.name, self.npartitions = state
def filter(self, predicate):
""" Filter elements in collection by a predicate function.
>>> def iseven(x):
... return x % 2 == 0
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.filter(iseven)) # doctest: +SKIP
[0, 2, 4]
"""
name = 'filter-{0}-{1}'.format(funcname(predicate),
tokenize(self, predicate))
dsk = dict(((name, i), (reify, (filter, predicate, (self.name, i))))
for i in range(self.npartitions))
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return type(self)(graph, name, self.npartitions)
def random_sample(self, prob, random_state=None):
""" Return elements from bag with probability of ``prob``.
Parameters
----------
prob : float
A float between 0 and 1, representing the probability that each
element will be returned.
random_state : int or random.Random, optional
If an integer, will be used to seed a new ``random.Random`` object.
If provided, results in deterministic sampling.
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.random_sample(0.5, 42))
[1, 3]
>>> list(b.random_sample(0.5, 42))
[1, 3]
"""
if not 0 <= prob <= 1:
raise ValueError('prob must be a number in the interval [0, 1]')
if not isinstance(random_state, Random):
random_state = Random(random_state)
name = 'random-sample-%s' % tokenize(self, prob, random_state.getstate())
state_data = random_state_data_python(self.npartitions, random_state)
dsk = {(name, i): (reify, (random_sample, (self.name, i), state, prob))
for i, state in zip(range(self.npartitions), state_data)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return type(self)(graph, name, self.npartitions)
def remove(self, predicate):
""" Remove elements in collection that match predicate.
>>> def iseven(x):
... return x % 2 == 0
>>> import dask.bag as db
>>> b = db.from_sequence(range(5))
>>> list(b.remove(iseven)) # doctest: +SKIP
[1, 3]
"""
name = 'remove-{0}-{1}'.format(funcname(predicate),
tokenize(self, predicate))
dsk = dict(((name, i), (reify, (remove, predicate, (self.name, i))))
for i in range(self.npartitions))
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return type(self)(graph, name, self.npartitions)
def map_partitions(self, func, *args, **kwargs):
"""Apply a function to every partition across one or more bags.
Note that all ``Bag`` arguments must be partitioned identically.
Parameters
----------
func : callable
The function to be called on every partition.
This function should expect an ``Iterator`` or ``Iterable`` for
every partition and should return an ``Iterator`` or ``Iterable``
in return.
*args, **kwargs : Bag, Item, Delayed, or object
Arguments and keyword arguments to pass to ``func``.
Partitions from this bag will be the first argument, and these will
be passed *after*.
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(range(1, 101), npartitions=10)
>>> def div(nums, den=1):
... return [num / den for num in nums]
Using a python object:
>>> hi = b.max().compute()
>>> hi
100
>>> b.map_partitions(div, den=hi).take(5)
(0.01, 0.02, 0.03, 0.04, 0.05)
Using an ``Item``:
>>> b.map_partitions(div, den=b.max()).take(5)
(0.01, 0.02, 0.03, 0.04, 0.05)
Note that while both versions give the same output, the second forms a
single graph, and then computes everything at once, and in some cases
may be more efficient.
"""
return map_partitions(func, self, *args, **kwargs)
def pluck(self, key, default=no_default):
""" Select item from all tuples/dicts in collection.
>>> b = from_sequence([{'name': 'Alice', 'credits': [1, 2, 3]},
... {'name': 'Bob', 'credits': [10, 20]}])
>>> list(b.pluck('name')) # doctest: +SKIP
['Alice', 'Bob']
>>> list(b.pluck('credits').pluck(0)) # doctest: +SKIP
[1, 10]
"""
name = 'pluck-' + tokenize(self, key, default)
key = quote(key)
if default == no_default:
dsk = dict(((name, i), (list, (pluck, key, (self.name, i))))
for i in range(self.npartitions))
else:
dsk = dict(((name, i), (list, (pluck, key, (self.name, i), default)))
for i in range(self.npartitions))
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return type(self)(graph, name, self.npartitions)
def unzip(self, n):
"""Transform a bag of tuples to ``n`` bags of their elements.
Examples
--------
>>> b = from_sequence([(i, i + 1, i + 2) for i in range(10)])
>>> first, second, third = b.unzip(3)
>>> isinstance(first, Bag)
True
>>> first.compute()
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
Note that this is equivalent to:
>>> first, second, third = (b.pluck(i) for i in range(3))
"""
return tuple(self.pluck(i) for i in range(n))
@wraps(to_textfiles)
def to_textfiles(self, path, name_function=None, compression='infer',
encoding=system_encoding, compute=True,
storage_options=None, last_endline=False, **kwargs):
return to_textfiles(self, path, name_function, compression, encoding,
compute, storage_options=storage_options,
last_endline=last_endline, **kwargs)
@wraps(to_avro)
def to_avro(self, filename, schema, name_function=None,
storage_options=None,
codec='null', sync_interval=16000, metadata=None, compute=True,
**kwargs):
return to_avro(self, filename, schema, name_function, storage_options,
codec, sync_interval, metadata, compute, **kwargs)
def fold(self, binop, combine=None, initial=no_default, split_every=None,
out_type=Item):
""" Parallelizable reduction
Fold is like the builtin function ``reduce`` except that it works in
parallel. Fold takes two binary operator functions, one to reduce each
partition of our dataset and another to combine results between
partitions
1. ``binop``: Binary operator to reduce within each partition
2. ``combine``: Binary operator to combine results from binop
Sequentially this would look like the following:
>>> intermediates = [reduce(binop, part) for part in partitions] # doctest: +SKIP
>>> final = reduce(combine, intermediates) # doctest: +SKIP
If only one function is given then it is used for both functions
``binop`` and ``combine`` as in the following example to compute the
sum:
>>> def add(x, y):
... return x + y
>>> b = from_sequence(range(5))
>>> b.fold(add).compute() # doctest: +SKIP
10
In full form we provide both binary operators as well as their default
arguments
>>> b.fold(binop=add, combine=add, initial=0).compute() # doctest: +SKIP
10
More complex binary operators are also doable
>>> def add_to_set(acc, x):
... ''' Add new element x to set acc '''
... return acc | set([x])
>>> b.fold(add_to_set, set.union, initial=set()).compute() # doctest: +SKIP
{1, 2, 3, 4, 5}
See Also
--------
Bag.foldby
"""
combine = combine or binop
if initial is not no_default:
return self.reduction(curry(_reduce, binop, initial=initial),
curry(_reduce, combine),
split_every=split_every, out_type=out_type)
else:
from toolz.curried import reduce
return self.reduction(reduce(binop), reduce(combine),
split_every=split_every, out_type=out_type)
def frequencies(self, split_every=None, sort=False):
""" Count number of occurrences of each distinct element.
>>> b = from_sequence(['Alice', 'Bob', 'Alice'])
>>> dict(b.frequencies()) # doctest: +SKIP
{'Alice': 2, 'Bob', 1}
"""
result = self.reduction(frequencies, merge_frequencies,
out_type=Bag, split_every=split_every,
name='frequencies').map_partitions(dictitems)
if sort:
result = result.map_partitions(sorted, key=second, reverse=True)
return result
def topk(self, k, key=None, split_every=None):
""" K largest elements in collection
Optionally ordered by some key function
>>> b = from_sequence([10, 3, 5, 7, 11, 4])
>>> list(b.topk(2)) # doctest: +SKIP
[11, 10]
>>> list(b.topk(2, lambda x: -x)) # doctest: +SKIP
[3, 4]
"""
if key:
if callable(key) and takes_multiple_arguments(key):
key = partial(apply, key)
func = partial(topk, k, key=key)
else:
func = partial(topk, k)
return self.reduction(func, compose(func, toolz.concat), out_type=Bag,
split_every=split_every, name='topk')
def distinct(self):
""" Distinct elements of collection
Unordered without repeats.
>>> b = from_sequence(['Alice', 'Bob', 'Alice'])
>>> sorted(b.distinct())
['Alice', 'Bob']
"""
return self.reduction(set, merge_distinct, out_type=Bag,
name='distinct')
def reduction(self, perpartition, aggregate, split_every=None,
out_type=Item, name=None):
""" Reduce collection with reduction operators.
Parameters
----------
perpartition: function
reduction to apply to each partition
aggregate: function
reduction to apply to the results of all partitions
split_every: int (optional)
Group partitions into groups of this size while performing reduction
Defaults to 8
out_type: {Bag, Item}
The out type of the result, Item if a single element, Bag if a list
of elements. Defaults to Item.
Examples
--------
>>> b = from_sequence(range(10))
>>> b.reduction(sum, sum).compute()
45
"""
if split_every is None:
split_every = 8
if split_every is False:
split_every = self.npartitions
token = tokenize(self, perpartition, aggregate, split_every)
a = '%s-part-%s' % (name or funcname(perpartition), token)
is_last = self.npartitions == 1
dsk = {(a, i): (empty_safe_apply, perpartition, (self.name, i), is_last)
for i in range(self.npartitions)}
k = self.npartitions
b = a
fmt = '%s-aggregate-%s' % (name or funcname(aggregate), token)
depth = 0
while k > split_every:
c = fmt + str(depth)
dsk2 = dict(((c, i), (empty_safe_aggregate, aggregate,
[(b, j) for j in inds], False))
for i, inds in enumerate(partition_all(split_every,
range(k))))
dsk.update(dsk2)
k = len(dsk2)
b = c
depth += 1
dsk[(fmt, 0)] = (empty_safe_aggregate, aggregate,
[(b, j) for j in range(k)], True)
graph = HighLevelGraph.from_collections(fmt, dsk, dependencies=[self])
if out_type is Item:
dsk[fmt] = dsk.pop((fmt, 0))
return Item(graph, fmt)
else:
return Bag(graph, fmt, 1)
def sum(self, split_every=None):
""" Sum all elements """
return self.reduction(sum, sum, split_every=split_every)
def max(self, split_every=None):
""" Maximum element """
return self.reduction(max, max, split_every=split_every)
def min(self, split_every=None):
""" Minimum element """
return self.reduction(min, min, split_every=split_every)
def any(self, split_every=None):
""" Are any of the elements truthy? """
return self.reduction(any, any, split_every=split_every)
def all(self, split_every=None):
""" Are all elements truthy? """
return self.reduction(all, all, split_every=split_every)
def count(self, split_every=None):
""" Count the number of elements. """
return self.reduction(count, sum, split_every=split_every)
def mean(self):
""" Arithmetic mean """
def mean_chunk(seq):
total, n = 0.0, 0
for x in seq:
total += x
n += 1
return total, n
def mean_aggregate(x):
totals, counts = list(zip(*x))
return 1.0 * sum(totals) / sum(counts)
return self.reduction(mean_chunk, mean_aggregate, split_every=False)
def var(self, ddof=0):
""" Variance """
def var_chunk(seq):
squares, total, n = 0.0, 0.0, 0
for x in seq:
squares += x**2
total += x
n += 1
return squares, total, n
def var_aggregate(x):
squares, totals, counts = list(zip(*x))
x2, x, n = float(sum(squares)), float(sum(totals)), sum(counts)
result = (x2 / n) - (x / n)**2
return result * n / (n - ddof)
return self.reduction(var_chunk, var_aggregate, split_every=False)
def std(self, ddof=0):
""" Standard deviation """
return self.var(ddof=ddof).apply(math.sqrt)
def join(self, other, on_self, on_other=None):
""" Joins collection with another collection.
Other collection must be one of the following:
1. An iterable. We recommend tuples over lists for internal
performance reasons.
2. A delayed object, pointing to a tuple. This is recommended if the
other collection is sizable and you're using the distributed
scheduler. Dask is able to pass around data wrapped in delayed
objects with greater sophistication.
3. A Bag with a single partition
You might also consider Dask Dataframe, whose join operations are much
more heavily optimized.
Parameters
----------
other: Iterable, Delayed, Bag
Other collection on which to join
on_self: callable
Function to call on elements in this collection to determine a
match
on_other: callable (defaults to on_self)
Function to call on elements in the other collection to determine a
match
Examples
--------
>>> people = from_sequence(['Alice', 'Bob', 'Charlie'])
>>> fruit = ['Apple', 'Apricot', 'Banana']
>>> list(people.join(fruit, lambda x: x[0])) # doctest: +SKIP
[('Apple', 'Alice'), ('Apricot', 'Alice'), ('Banana', 'Bob')]
"""
name = 'join-' + tokenize(self, other, on_self, on_other)
dsk = {}
if isinstance(other, Bag):
if other.npartitions == 1:
dsk.update(other.dask)
other = other.__dask_keys__()[0]
dsk['join-%s-other' % name] = (list, other)
else:
msg = ("Multi-bag joins are not implemented. "
"We recommend Dask dataframe if appropriate")
raise NotImplementedError(msg)
elif isinstance(other, Delayed):
dsk.update(other.dask)
other = other._key
elif isinstance(other, Iterable):
other = other
else:
msg = ("Joined argument must be single-partition Bag, "
" delayed object, or Iterable, got %s" %
type(other).__name)
raise TypeError(msg)
if on_other is None:
on_other = on_self
dsk.update({(name, i): (list, (join, on_other, other,
on_self, (self.name, i)))
for i in range(self.npartitions)})
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return type(self)(graph, name, self.npartitions)
def product(self, other):
""" Cartesian product between two bags. """
assert isinstance(other, Bag)
name = 'product-' + tokenize(self, other)
n, m = self.npartitions, other.npartitions
dsk = dict(((name, i * m + j),
(list, (itertools.product, (self.name, i),
(other.name, j))))
for i in range(n) for j in range(m))
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self, other])
return type(self)(graph, name, n * m)
def foldby(self, key, binop, initial=no_default, combine=None,
combine_initial=no_default, split_every=None):
""" Combined reduction and groupby.
Foldby provides a combined groupby and reduce for efficient parallel
split-apply-combine tasks.
The computation
>>> b.foldby(key, binop, init) # doctest: +SKIP
is equivalent to the following:
>>> def reduction(group): # doctest: +SKIP
... return reduce(binop, group, init) # doctest: +SKIP
>>> b.groupby(key).map(lambda (k, v): (k, reduction(v)))# doctest: +SKIP
But uses minimal communication and so is *much* faster.
>>> b = from_sequence(range(10))
>>> iseven = lambda x: x % 2 == 0
>>> add = lambda x, y: x + y
>>> dict(b.foldby(iseven, add)) # doctest: +SKIP
{True: 20, False: 25}
**Key Function**
The key function determines how to group the elements in your bag.
In the common case where your bag holds dictionaries then the key
function often gets out one of those elements.
>>> def key(x):
... return x['name']
This case is so common that it is special cased, and if you provide a
key that is not a callable function then dask.bag will turn it into one
automatically. The following are equivalent:
>>> b.foldby(lambda x: x['name'], ...) # doctest: +SKIP
>>> b.foldby('name', ...) # doctest: +SKIP
**Binops**
It can be tricky to construct the right binary operators to perform
analytic queries. The ``foldby`` method accepts two binary operators,
``binop`` and ``combine``. Binary operators two inputs and output must
have the same type.
Binop takes a running total and a new element and produces a new total:
>>> def binop(total, x):
... return total + x['amount']
Combine takes two totals and combines them:
>>> def combine(total1, total2):
... return total1 + total2
Each of these binary operators may have a default first value for
total, before any other value is seen. For addition binary operators
like above this is often ``0`` or the identity element for your
operation.
**split_every**
Group partitions into groups of this size while performing reduction.
Defaults to 8.
>>> b.foldby('name', binop, 0, combine, 0) # doctest: +SKIP
See Also
--------
toolz.reduceby
pyspark.combineByKey
"""
if split_every is None:
split_every = 8
if split_every is False:
split_every = self.npartitions
token = tokenize(self, key, binop, initial, combine, combine_initial)
a = 'foldby-a-' + token
if combine is None:
combine = binop
if initial is not no_default:
dsk = {(a, i): (reduceby, key, binop, (self.name, i), initial)
for i in range(self.npartitions)}
else:
dsk = {(a, i): (reduceby, key, binop, (self.name, i))
for i in range(self.npartitions)}
def combine2(acc, x):
return combine(acc, x[1])
depth = 0
k = self.npartitions
b = a
while k > split_every:
c = b + str(depth)
if combine_initial is not no_default:
dsk2 = {(c, i): (reduceby, 0, combine2,
(toolz.concat, (map, dictitems,
[(b, j) for j in inds])),
combine_initial)
for i, inds in enumerate(partition_all(split_every,
range(k)))}
else:
dsk2 = {(c, i): (merge_with, (partial, reduce, combine),
[(b, j) for j in inds])
for i, inds in enumerate(partition_all(split_every,
range(k)))}
dsk.update(dsk2)
k = len(dsk2)
b = c
depth += 1
e = 'foldby-b-' + token
if combine_initial is not no_default:
dsk[(e, 0)] = (dictitems, (reduceby, 0, combine2,
(toolz.concat, (map, dictitems,
[(b, j) for j in range(k)])),
combine_initial))
else:
dsk[(e, 0)] = (dictitems, (merge_with, (partial, reduce, combine),
[(b, j) for j in range(k)]))
graph = HighLevelGraph.from_collections(e, dsk, dependencies=[self])
return type(self)(graph, e, 1)
def take(self, k, npartitions=1, compute=True, warn=True):
""" Take the first k elements.
Parameters
----------
k : int
The number of elements to return
npartitions : int, optional
Elements are only taken from the first ``npartitions``, with a
default of 1. If there are fewer than ``k`` rows in the first
``npartitions`` a warning will be raised and any found rows
returned. Pass -1 to use all partitions.
compute : bool, optional
Whether to compute the result, default is True.
warn : bool, optional
Whether to warn if the number of elements returned is less than
requested, default is True.
>>> b = from_sequence(range(10))
>>> b.take(3) # doctest: +SKIP
(0, 1, 2)
"""
if npartitions <= -1:
npartitions = self.npartitions
if npartitions > self.npartitions:
raise ValueError("only {} partitions, take "
"received {}".format(self.npartitions, npartitions))
token = tokenize(self, k, npartitions)
name = 'take-' + token
if npartitions > 1:
name_p = 'take-partial-' + token
dsk = {}
for i in range(npartitions):
dsk[(name_p, i)] = (list, (take, k, (self.name, i)))
concat = (toolz.concat, ([(name_p, i) for i in range(npartitions)]))
dsk[(name, 0)] = (safe_take, k, concat, warn)
else:
dsk = {(name, 0): (safe_take, k, (self.name, 0), warn)}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
b = Bag(graph, name, 1)
if compute:
return tuple(b.compute())
else:
return b
def flatten(self):
""" Concatenate nested lists into one long list.
>>> b = from_sequence([[1], [2, 3]])
>>> list(b)
[[1], [2, 3]]
>>> list(b.flatten())
[1, 2, 3]
"""
name = 'flatten-' + tokenize(self)
dsk = dict(((name, i), (list, (toolz.concat, (self.name, i))))
for i in range(self.npartitions))
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[self])
return type(self)(graph, name, self.npartitions)
def __iter__(self):
return iter(self.compute())
def groupby(self, grouper, method=None, npartitions=None, blocksize=2**20,
max_branch=None, shuffle=None):
""" Group collection by key function
This requires a full dataset read, serialization and shuffle.
This is expensive. If possible you should use ``foldby``.
Parameters
----------
grouper: function
Function on which to group elements
shuffle: str
Either 'disk' for an on-disk shuffle or 'tasks' to use the task
scheduling framework. Use 'disk' if you are on a single machine
and 'tasks' if you are on a distributed cluster.
npartitions: int
If using the disk-based shuffle, the number of output partitions
blocksize: int
If using the disk-based shuffle, the size of shuffle blocks (bytes)
max_branch: int
If using the task-based shuffle, the amount of splitting each
partition undergoes. Increase this for fewer copies but more
scheduler overhead.
Examples
--------
>>> b = from_sequence(range(10))
>>> iseven = lambda x: x % 2 == 0
>>> dict(b.groupby(iseven)) # doctest: +SKIP
{True: [0, 2, 4, 6, 8], False: [1, 3, 5, 7, 9]}
See Also
--------
Bag.foldby
"""
if method is not None:
raise Exception("The method= keyword has been moved to shuffle=")
if shuffle is None:
shuffle = config.get('shuffle', None)
if shuffle is None:
if 'distributed' in config.get('scheduler', ''):
shuffle = 'tasks'
else:
shuffle = 'disk'
if shuffle == 'disk':
return groupby_disk(self, grouper, npartitions=npartitions,
blocksize=blocksize)
elif shuffle == 'tasks':
return groupby_tasks(self, grouper, max_branch=max_branch)
else:
msg = "Shuffle must be 'disk' or 'tasks'"
raise NotImplementedError(msg)
def to_dataframe(self, meta=None, columns=None):
""" Create Dask Dataframe from a Dask Bag.
Bag should contain tuples, dict records, or scalars.
Index will not be particularly meaningful. Use ``reindex`` afterwards
if necessary.
Parameters
----------
meta : pd.DataFrame, dict, iterable, optional
An empty ``pd.DataFrame`` that matches the dtypes and column names
of the output. This metadata is necessary for many algorithms in
dask dataframe to work. For ease of use, some alternative inputs
are also available. Instead of a ``DataFrame``, a ``dict`` of
``{name: dtype}`` or iterable of ``(name, dtype)`` can be provided.
If not provided or a list, a single element from the first
partition will be computed, triggering a potentially expensive call
to ``compute``. This may lead to unexpected results, so providing
``meta`` is recommended. For more information, see
``dask.dataframe.utils.make_meta``.
columns : sequence, optional
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the columns.
Otherwise this argument indicates the order of the columns in the
result (any names not found in the data will become all-NA
columns). Note that if ``meta`` is provided, column names will be
taken from there and this parameter is invalid.
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence([{'name': 'Alice', 'balance': 100},
... {'name': 'Bob', 'balance': 200},
... {'name': 'Charlie', 'balance': 300}],
... npartitions=2)
>>> df = b.to_dataframe()
>>> df.compute()
balance name
0 100 Alice
1 200 Bob
0 300 Charlie
"""
import pandas as pd
import dask.dataframe as dd
if meta is None:
head = self.take(1, warn=False)
if len(head) == 0:
raise ValueError("`dask.bag.Bag.to_dataframe` failed to "
"properly infer metadata, please pass in "
"metadata via the `meta` keyword")
meta = pd.DataFrame(list(head), columns=columns)
elif columns is not None:
raise ValueError("Can't specify both `meta` and `columns`")
else:
meta = dd.utils.make_meta(meta)
# Serializing the columns and dtypes is much smaller than serializing
# the empty frame
cols = list(meta.columns)
dtypes = meta.dtypes.to_dict()
name = 'to_dataframe-' + tokenize(self, cols, dtypes)
dsk = self.__dask_optimize__(self.dask, self.__dask_keys__())
dsk.update({(name, i): (to_dataframe, (self.name, i), cols, dtypes)
for i in range(self.npartitions)})
divisions = [None] * (self.npartitions + 1)
return dd.DataFrame(dsk, name, meta, divisions)
def to_delayed(self, optimize_graph=True):
"""Convert into a list of ``dask.delayed`` objects, one per partition.
Parameters
----------
optimize_graph : bool, optional
If True [default], the graph is optimized before converting into
``dask.delayed`` objects.
See Also
--------
dask.bag.from_delayed
"""
from dask.delayed import Delayed
keys = self.__dask_keys__()
dsk = self.__dask_graph__()
if optimize_graph:
dsk = self.__dask_optimize__(dsk, keys)
return [Delayed(k, dsk) for k in keys]
def repartition(self, npartitions):
""" Coalesce bag into fewer partitions.
Examples
--------
>>> b.repartition(5) # set to have 5 partitions # doctest: +SKIP
"""
new_name = 'repartition-%d-%s' % (npartitions, tokenize(self, npartitions))
if npartitions == self.npartitions:
return self
elif npartitions < self.npartitions:
ratio = self.npartitions / npartitions
new_partitions_boundaries = [int(old_partition_index * ratio)
for old_partition_index in range(npartitions + 1)]
dsk = {}
for new_partition_index in range(npartitions):
value = (list, (toolz.concat,
[(self.name, old_partition_index)
for old_partition_index in
range(new_partitions_boundaries[new_partition_index],
new_partitions_boundaries[new_partition_index + 1])]))
dsk[new_name, new_partition_index] = value
else: # npartitions > self.npartitions
ratio = npartitions / self.npartitions
split_name = 'split-%s' % tokenize(self, npartitions)
dsk = {}
last = 0
j = 0
for i in range(self.npartitions):
new = last + ratio
if i == self.npartitions - 1:
k = npartitions - j
else:
k = int(new - last)
dsk[(split_name, i)] = (split, (self.name, i), k)
for jj in range(k):
dsk[(new_name, j)] = (getitem, (split_name, i), jj)
j += 1
last = new
graph = HighLevelGraph.from_collections(new_name, dsk, dependencies=[self])
return Bag(graph, name=new_name, npartitions=npartitions)
def accumulate(self, binop, initial=no_default):
""" Repeatedly apply binary function to a sequence, accumulating results.
This assumes that the bag is ordered. While this is typically the case
not all Dask.bag functions preserve this property.
Examples
--------
>>> from operator import add
>>> b = from_sequence([1, 2, 3, 4, 5], npartitions=2)
>>> b.accumulate(add).compute() # doctest: +SKIP
[1, 3, 6, 10, 15]
Accumulate also takes an optional argument that will be used as the
first value.
>>> b.accumulate(add, initial=-1) # doctest: +SKIP
[-1, 0, 2, 5, 9, 14]
"""
if not _implement_accumulate:
raise NotImplementedError("accumulate requires `toolz` > 0.7.4"
" or `cytoolz` > 0.7.3.")
token = tokenize(self, binop, initial)
binop_name = funcname(binop)
a = '%s-part-%s' % (binop_name, token)
b = '%s-first-%s' % (binop_name, token)
c = '%s-second-%s' % (binop_name, token)
dsk = {(a, 0): (accumulate_part, binop, (self.name, 0), initial, True),
(b, 0): (first, (a, 0)),
(c, 0): (second, (a, 0))}
for i in range(1, self.npartitions):
dsk[(a, i)] = (accumulate_part, binop, (self.name, i), (c, i - 1))
dsk[(b, i)] = (first, (a, i))
dsk[(c, i)] = (second, (a, i))
graph = HighLevelGraph.from_collections(b, dsk, dependencies=[self])
return Bag(graph, b, self.npartitions)
def accumulate_part(binop, seq, initial, is_first=False):
if initial == no_default:
res = list(accumulate(binop, seq))
else:
res = list(accumulate(binop, seq, initial=initial))
if is_first:
return res, res[-1] if res else [], initial
return res[1:], res[-1]
def partition(grouper, sequence, npartitions, p, nelements=2**20):
""" Partition a bag along a grouper, store partitions on disk. """
for block in partition_all(nelements, sequence):
d = groupby(grouper, block)
d2 = defaultdict(list)
for k, v in d.items():
d2[abs(hash(k)) % npartitions].extend(v)
p.append(d2, fsync=True)
return p
def collect(grouper, group, p, barrier_token):
""" Collect partitions from disk and yield k,v group pairs. """
d = groupby(grouper, p.get(group, lock=False))
return list(d.items())
def from_sequence(seq, partition_size=None, npartitions=None):
""" Create a dask Bag from Python sequence.
This sequence should be relatively small in memory. Dask Bag works
best when it handles loading your data itself. Commonly we load a
sequence of filenames into a Bag and then use ``.map`` to open them.
Parameters
----------
seq: Iterable
A sequence of elements to put into the dask
partition_size: int (optional)
The length of each partition
npartitions: int (optional)
The number of desired partitions
It is best to provide either ``partition_size`` or ``npartitions``
(though not both.)
Examples
--------
>>> b = from_sequence(['Alice', 'Bob', 'Chuck'], partition_size=2)
See Also
--------
read_text: Create bag from text files
"""
seq = list(seq)
if npartitions and not partition_size:
partition_size = int(math.ceil(len(seq) / npartitions))
if npartitions is None and partition_size is None:
if len(seq) < 100:
partition_size = 1
else:
partition_size = int(len(seq) / 100)
parts = list(partition_all(partition_size, seq))
name = 'from_sequence-' + tokenize(seq, partition_size)
if len(parts) > 0:
d = dict(((name, i), list(part)) for i, part in enumerate(parts))
else:
d = {(name, 0): []}
return Bag(d, name, len(d))
def from_url(urls):
"""Create a dask Bag from a url.
Examples
--------
>>> a = from_url('http://raw.githubusercontent.com/dask/dask/master/README.rst') # doctest: +SKIP
>>> a.npartitions # doctest: +SKIP
1
>>> a.take(8) # doctest: +SKIP
(b'Dask\\n',
b'====\\n',
b'\\n',
b'|Build Status| |Coverage| |Doc Status| |Gitter| |Version Status|\\n',
b'\\n',
b'Dask is a flexible parallel computing library for analytics. See\\n',
b'documentation_ for more information.\\n',
b'\\n')
>>> b = from_url(['http://github.com', 'http://google.com']) # doctest: +SKIP
>>> b.npartitions # doctest: +SKIP
2
"""
if isinstance(urls, str):
urls = [urls]
name = 'from_url-' + uuid.uuid4().hex
dsk = {}
for i, u in enumerate(urls):
dsk[(name, i)] = (list, (urlopen, u))
return Bag(dsk, name, len(urls))
def dictitems(d):
""" A pickleable version of dict.items
>>> dictitems({'x': 1})
[('x', 1)]
"""
return list(d.items())
def concat(bags):
""" Concatenate many bags together, unioning all elements.
>>> import dask.bag as db
>>> a = db.from_sequence([1, 2, 3])
>>> b = db.from_sequence([4, 5, 6])
>>> c = db.concat([a, b])
>>> list(c)
[1, 2, 3, 4, 5, 6]
"""
name = 'concat-' + tokenize(*bags)
counter = itertools.count(0)
dsk = {(name, next(counter)): key
for bag in bags for key in bag.__dask_keys__()}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=bags)
return Bag(graph, name, len(dsk))
def reify(seq):
if isinstance(seq, Iterator):
seq = list(seq)
if seq and isinstance(seq[0], Iterator):
seq = list(map(list, seq))
return seq
def from_delayed(values):
""" Create bag from many dask Delayed objects.
These objects will become the partitions of the resulting Bag. They should
evaluate to a ``list`` or some other concrete sequence.
Parameters
----------
values: list of delayed values
An iterable of dask Delayed objects. Each evaluating to a list.
Returns
-------
Bag
Examples
--------
>>> x, y, z = [delayed(load_sequence_from_file)(fn)
... for fn in filenames] # doctest: +SKIP
>>> b = from_delayed([x, y, z]) # doctest: +SKIP
See also
--------
dask.delayed
"""
from dask.delayed import Delayed, delayed
if isinstance(values, Delayed):
values = [values]
values = [delayed(v)
if not isinstance(v, Delayed) and hasattr(v, 'key')
else v
for v in values]
name = 'bag-from-delayed-' + tokenize(*values)
names = [(name, i) for i in range(len(values))]
values2 = [(reify, v.key) for v in values]
dsk = dict(zip(names, values2))
graph = HighLevelGraph.from_collections(name, dsk, dependencies=values)
return Bag(graph, name, len(values))
def merge_distinct(seqs):
return set().union(*seqs)
def merge_frequencies(seqs):
if isinstance(seqs, Iterable):
seqs = list(seqs)
if not seqs:
return {}
first, rest = seqs[0], seqs[1:]
if not rest:
return first
out = defaultdict(int)
out.update(first)
for d in rest:
for k, v in iteritems(d):
out[k] += v
return out
def bag_range(n, npartitions):
""" Numbers from zero to n
Examples
--------
>>> import dask.bag as db
>>> b = db.range(5, npartitions=2)
>>> list(b)
[0, 1, 2, 3, 4]
"""
size = n // npartitions
name = 'range-%d-npartitions-%d' % (n, npartitions)
ijs = list(enumerate(take(npartitions, range(0, n, size))))
dsk = dict(((name, i), (reify, (range, j, min(j + size, n))))
for i, j in ijs)
if n % npartitions != 0:
i, j = ijs[-1]
dsk[(name, i)] = (reify, (range, j, n))
return Bag(dsk, name, npartitions)
def bag_zip(*bags):
""" Partition-wise bag zip
All passed bags must have the same number of partitions.
NOTE: corresponding partitions should have the same length; if they do not,
the "extra" elements from the longer partition(s) will be dropped. If you
have this case chances are that what you really need is a data alignment
mechanism like pandas's, and not a missing value filler like zip_longest.
Examples
--------
Correct usage:
>>> import dask.bag as db
>>> evens = db.from_sequence(range(0, 10, 2), partition_size=4)
>>> odds = db.from_sequence(range(1, 10, 2), partition_size=4)
>>> pairs = db.zip(evens, odds)
>>> list(pairs)
[(0, 1), (2, 3), (4, 5), (6, 7), (8, 9)]
Incorrect usage:
>>> numbers = db.range(20) # doctest: +SKIP
>>> fizz = numbers.filter(lambda n: n % 3 == 0) # doctest: +SKIP
>>> buzz = numbers.filter(lambda n: n % 5 == 0) # doctest: +SKIP
>>> fizzbuzz = db.zip(fizz, buzz) # doctest: +SKIP
>>> list(fizzbuzzz) # doctest: +SKIP
[(0, 0), (3, 5), (6, 10), (9, 15), (12, 20), (15, 25), (18, 30)]
When what you really wanted was more along the lines of the following:
>>> list(fizzbuzzz) # doctest: +SKIP
[(0, 0), (3, None), (None, 5), (6, None), (None 10), (9, None),
(12, None), (15, 15), (18, None), (None, 20), (None, 25), (None, 30)]
"""
npartitions = bags[0].npartitions
assert all(bag.npartitions == npartitions for bag in bags)
# TODO: do more checks
name = 'zip-' + tokenize(*bags)
dsk = dict(
((name, i), (reify, (zip,) + tuple((bag.name, i) for bag in bags)))
for i in range(npartitions))
graph = HighLevelGraph.from_collections(name, dsk, dependencies=bags)
return Bag(graph, name, npartitions)
def map_chunk(f, args, bag_kwargs, kwargs):
if kwargs:
f = partial(f, **kwargs)
args = [iter(a) for a in args]
iters = list(args)
if bag_kwargs:
keys = list(bag_kwargs)
kw_val_iters = [iter(v) for v in bag_kwargs.values()]
iters.extend(kw_val_iters)
kw_iter = (dict(zip(keys, k)) for k in zip(*kw_val_iters))
if args:
for a, k in zip(zip(*args), kw_iter):
yield f(*a, **k)
else:
for k in kw_iter:
yield f(**k)
else:
for a in zip(*args):
yield f(*a)
# Check that all iterators are fully exhausted
if len(iters) > 1:
for i in iters:
if isinstance(i, itertools.repeat):
continue
try:
next(i)
except StopIteration:
pass
else:
msg = ("map called with multiple bags that aren't identically "
"partitioned. Please ensure that all bag arguments "
"have the same partition lengths")
raise ValueError(msg)
def starmap_chunk(f, x, kwargs):
if kwargs:
f = partial(f, **kwargs)
return itertools.starmap(f, x)
def unpack_scalar_dask_kwargs(kwargs):
"""Extracts dask values from kwargs.
Currently only ``dask.bag.Item`` and ``dask.delayed.Delayed`` are
supported. Returns a merged dask graph and a task resulting in a keyword
dict.
"""
kwargs2 = {}
dependencies = []
for k, v in kwargs.items():
vv, collections = unpack_collections(v)
if not collections:
kwargs2[k] = v
else:
kwargs2[k] = vv
dependencies.extend(collections)
if dependencies:
kwargs2 = (dict, (zip, list(kwargs2), list(kwargs2.values())))
return kwargs2, dependencies
def bag_map(func, *args, **kwargs):
"""Apply a function elementwise across one or more bags.
Note that all ``Bag`` arguments must be partitioned identically.
Parameters
----------
func : callable
*args, **kwargs : Bag, Item, Delayed, or object
Arguments and keyword arguments to pass to ``func``. Non-Bag args/kwargs
are broadcasted across all calls to ``func``.
Notes
-----
For calls with multiple `Bag` arguments, corresponding partitions should
have the same length; if they do not, the call will error at compute time.
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(range(5), npartitions=2)
>>> b2 = db.from_sequence(range(5, 10), npartitions=2)
Apply a function to all elements in a bag:
>>> db.map(lambda x: x + 1, b).compute()
[1, 2, 3, 4, 5]
Apply a function with arguments from multiple bags:
>>> from operator import add
>>> db.map(add, b, b2).compute()
[5, 7, 9, 11, 13]
Non-bag arguments are broadcast across all calls to the mapped function:
>>> db.map(add, b, 1).compute()
[1, 2, 3, 4, 5]
Keyword arguments are also supported, and have the same semantics as
regular arguments:
>>> def myadd(x, y=0):
... return x + y
>>> db.map(myadd, b, y=b2).compute()
[5, 7, 9, 11, 13]
>>> db.map(myadd, b, y=1).compute()
[1, 2, 3, 4, 5]
Both arguments and keyword arguments can also be instances of
``dask.bag.Item`` or ``dask.delayed.Delayed``. Here we'll add the max value
in the bag to each element:
>>> db.map(myadd, b, b.max()).compute()
[4, 5, 6, 7, 8]
"""
name = '%s-%s' % (funcname(func), tokenize(func, 'map', *args, **kwargs))
dsk = {}
dependencies = []
bags = []
args2 = []
for a in args:
if isinstance(a, Bag):
bags.append(a)
args2.append(a)
elif isinstance(a, (Item, Delayed)):
dependencies.append(a)
args2.append((itertools.repeat, a.key))
else:
args2.append((itertools.repeat, a))
bag_kwargs = {}
other_kwargs = {}
for k, v in kwargs.items():
if isinstance(v, Bag):
bag_kwargs[k] = v
bags.append(v)
else:
other_kwargs[k] = v
other_kwargs, collections = unpack_scalar_dask_kwargs(other_kwargs)
dependencies.extend(collections)
if not bags:
raise ValueError("At least one argument must be a Bag.")
npartitions = {b.npartitions for b in bags}
if len(npartitions) > 1:
raise ValueError("All bags must have the same number of partitions.")
npartitions = npartitions.pop()
def build_args(n):
return [(a.name, n) if isinstance(a, Bag) else a for a in args2]
def build_bag_kwargs(n):
if not bag_kwargs:
return None
return (dict, (zip, list(bag_kwargs),
[(b.name, n) for b in bag_kwargs.values()]))
dsk = {(name, n): (reify, (map_chunk, func, build_args(n),
build_bag_kwargs(n), other_kwargs))
for n in range(npartitions)}
# If all bags are the same type, use that type, otherwise fallback to Bag
return_type = set(map(type, bags))
return_type = return_type.pop() if len(return_type) == 1 else Bag
graph = HighLevelGraph.from_collections(name, dsk, dependencies=bags + dependencies)
return return_type(graph, name, npartitions)
def map_partitions(func, *args, **kwargs):
"""Apply a function to every partition across one or more bags.
Note that all ``Bag`` arguments must be partitioned identically.
Parameters
----------
func : callable
*args, **kwargs : Bag, Item, Delayed, or object
Arguments and keyword arguments to pass to ``func``.
Examples
--------
>>> import dask.bag as db
>>> b = db.from_sequence(range(1, 101), npartitions=10)
>>> def div(nums, den=1):
... return [num / den for num in nums]
Using a python object:
>>> hi = b.max().compute()
>>> hi
100
>>> b.map_partitions(div, den=hi).take(5)
(0.01, 0.02, 0.03, 0.04, 0.05)
Using an ``Item``:
>>> b.map_partitions(div, den=b.max()).take(5)
(0.01, 0.02, 0.03, 0.04, 0.05)
Note that while both versions give the same output, the second forms a
single graph, and then computes everything at once, and in some cases
may be more efficient.
"""
name = '%s-%s' % (funcname(func),
tokenize(func, 'map-partitions', *args, **kwargs))
dsk = {}
dependencies = []
bags = []
args2 = []
for a in args:
if isinstance(a, Bag):
bags.append(a)
args2.append(a)
elif isinstance(a, (Item, Delayed)):
args2.append(a.key)
dependencies.append(a)
else:
args2.append(a)
bag_kwargs = {}
other_kwargs = {}
for k, v in kwargs.items():
if isinstance(v, Bag):
bag_kwargs[k] = v
bags.append(v)
else:
other_kwargs[k] = v
other_kwargs, collections = unpack_scalar_dask_kwargs(other_kwargs)
dependencies.extend(collections)
if not bags:
raise ValueError("At least one argument must be a Bag.")
npartitions = {b.npartitions for b in bags}
if len(npartitions) > 1:
raise ValueError("All bags must have the same number of partitions.")
npartitions = npartitions.pop()
def build_args(n):
return [(a.name, n) if isinstance(a, Bag) else a for a in args2]
def build_bag_kwargs(n):
if not bag_kwargs:
return {}
return (dict, (zip, list(bag_kwargs),
[(b.name, n) for b in bag_kwargs.values()]))
if kwargs:
dsk = {(name, n): (apply,
func,
build_args(n),
(merge, build_bag_kwargs(n), other_kwargs))
for n in range(npartitions)}
else:
dsk = {(name, n): (func,) + tuple(build_args(n))
for n in range(npartitions)}
# If all bags are the same type, use that type, otherwise fallback to Bag
return_type = set(map(type, bags))
return_type = return_type.pop() if len(return_type) == 1 else Bag
graph = HighLevelGraph.from_collections(name, dsk, dependencies=bags + dependencies)
return return_type(graph, name, npartitions)
def _reduce(binop, sequence, initial=no_default):
if initial is not no_default:
return reduce(binop, sequence, initial)
else:
return reduce(binop, sequence)
def make_group(k, stage):
def h(x):
return x[0] // k ** stage % k
return h
def groupby_tasks(b, grouper, hash=hash, max_branch=32):
max_branch = max_branch or 32
n = b.npartitions
stages = int(math.ceil(math.log(n) / math.log(max_branch))) or 1
if stages > 1:
k = int(math.ceil(n ** (1 / stages)))
else:
k = n
groups = []
splits = []
joins = []
inputs = [tuple(digit(i, j, k) for j in range(stages))
for i in range(k**stages)]
b2 = b.map(lambda x: (hash(grouper(x)), x))
token = tokenize(b, grouper, hash, max_branch)
start = dict((('shuffle-join-' + token, 0, inp),
(b2.name, i) if i < b.npartitions else [])
for i, inp in enumerate(inputs))
for stage in range(1, stages + 1):
group = dict((('shuffle-group-' + token, stage, inp),
(groupby,
(make_group, k, stage - 1),
('shuffle-join-' + token, stage - 1, inp)))
for inp in inputs)
split = dict((('shuffle-split-' + token, stage, i, inp),
(dict.get, ('shuffle-group-' + token, stage, inp), i, {}))
for i in range(k)
for inp in inputs)
join = dict((('shuffle-join-' + token, stage, inp),
(list, (toolz.concat, [('shuffle-split-' + token, stage, inp[stage - 1],
insert(inp, stage - 1, j)) for j in range(k)])))
for inp in inputs)
groups.append(group)
splits.append(split)
joins.append(join)
end = dict((('shuffle-' + token, i),
(list, (dict.items, (groupby, grouper, (pluck, 1, j)))))
for i, j in enumerate(join))
name = 'shuffle-' + token
dsk = merge(start, end, *(groups + splits + joins))
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[b2])
return type(b)(graph, name, len(inputs))
def groupby_disk(b, grouper, npartitions=None, blocksize=2**20):
if npartitions is None:
npartitions = b.npartitions
token = tokenize(b, grouper, npartitions, blocksize)
import partd
p = ('partd-' + token,)
dirname = config.get('temporary_directory', None)
if dirname:
file = (apply, partd.File, (), {'dir': dirname})
else:
file = (partd.File,)
try:
dsk1 = {p: (partd.Python, (partd.Snappy, file))}
except AttributeError:
dsk1 = {p: (partd.Python, file)}
# Partition data on disk
name = 'groupby-part-{0}-{1}'.format(funcname(grouper), token)
dsk2 = dict(((name, i), (partition, grouper, (b.name, i),
npartitions, p, blocksize))
for i in range(b.npartitions))
# Barrier
barrier_token = 'groupby-barrier-' + token
def barrier(args):
return 0
dsk3 = {barrier_token: (barrier, list(dsk2))}
# Collect groups
name = 'groupby-collect-' + token
dsk4 = dict(((name, i),
(collect, grouper, i, p, barrier_token))
for i in range(npartitions))
dsk = merge(dsk1, dsk2, dsk3, dsk4)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[b])
return type(b)(graph, name, npartitions)
def empty_safe_apply(func, part, is_last):
if isinstance(part, Iterator):
try:
_, part = peek(part)
except StopIteration:
if not is_last:
return no_result
return func(part)
elif not is_last and len(part) == 0:
return no_result
else:
return func(part)
def empty_safe_aggregate(func, parts, is_last):
parts2 = (p for p in parts if p is not no_result)
return empty_safe_apply(func, parts2, is_last)
def safe_take(n, b, warn=True):
r = list(take(n, b))
if len(r) != n and warn:
warnings.warn("Insufficient elements for `take`. {0} elements "
"requested, only {1} elements available. Try passing "
"larger `npartitions` to `take`.".format(n, len(r)))
return r
def random_sample(x, state_data, prob):
"""Filter elements of `x` by a probability `prob`.
Parameters
----------
x : iterable
state_data : tuple
A tuple that can be passed to ``random.Random``.
prob : float
A float between 0 and 1, representing the probability that each
element will be yielded.
"""
random_state = Random(state_data)
for i in x:
if random_state.random() < prob:
yield i
def random_state_data_python(n, random_state=None):
"""Return a list of tuples that can initialize.
``random.Random``.
Parameters
----------
n : int
Number of tuples to return.
random_state : int or ``random.Random``, optional
If an int, is used to seed a new ``random.Random``.
"""
if not isinstance(random_state, Random):
random_state = Random(random_state)
maxuint32 = 1 << 32
return [tuple(random_state.randint(0, maxuint32) for i in range(624))
for i in range(n)]
def split(seq, n):
""" Split apart a sequence into n equal pieces.
>>> split(range(10), 3)
[[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]]
"""
if not isinstance(seq, (list, tuple)):
seq = list(seq)
part = len(seq) / n
L = [seq[int(part * i): int(part * (i + 1))] for i in range(n - 1)]
L.append(seq[int(part * (n - 1)):])
return L
def to_dataframe(seq, columns, dtypes):
import pandas as pd
seq = reify(seq)
# pd.DataFrame expects lists, only copy if necessary
if not isinstance(seq, list):
seq = list(seq)
res = pd.DataFrame(seq, columns=list(columns))
return res.astype(dtypes, copy=False)
|
the-stack_106_20067
|
# mpirun -np 16 --ppn 16 python slowtest_performance_aposmm_only.py
# Should run in under 20 sec
import os
import sys
import numpy as np
import networkx as nx
import pickle
from functools import partial
from qiskit.optimization.ising.max_cut import get_operator as get_maxcut_operator
import scipy
from variationaltoolkit import VariationalQuantumOptimizerAPOSMM
from variationaltoolkit import VariationalQuantumOptimizerSequential
from mpi4py import MPI
is_master = (MPI.COMM_WORLD.Get_rank() == 0)
world_size = MPI.COMM_WORLD.Get_size()
if is_master:
start_time_aposmm = MPI.Wtime()
def maxcut_obj(x,G):
cut = 0
for i, j in G.edges():
if x[i] != x[j]:
# the edge is cut
cut -= 1
return cut
elist = [
[0,1],[1,2],[2,3],[3,4],[4,0],
[0,5],[1,6],[2,7],[3,8],[4,9],
[5,7],[5,8],[6,8],[6,9],[7,9]
]
G=nx.OrderedGraph()
G.add_edges_from(elist)
w = nx.adjacency_matrix(G)
obj = partial(maxcut_obj, G=G)
C, offset = get_maxcut_operator(w)
p = 10
lb = np.array([0, 0] * p)
ub = np.array([np.pi / 2] * p + [np.pi] * p)
np.random.seed(0)
init_theta = np.random.uniform(lb, ub)
np.random.seed(0)
varopt_aposmm = VariationalQuantumOptimizerAPOSMM(
obj,
'scipy_COBYLA',
initial_point=init_theta,
gen_specs_user={'max_active_runs': world_size-2},
optimizer_parameters={'tol': 1e-10, 'options': {'disp':False, 'maxiter': 200}},
varform_description={'name':'QAOA', 'p':p, 'cost_operator':C, 'num_qubits':G.number_of_nodes()},
backend_description={'package':'qiskit', 'provider':'Aer', 'name':'statevector_simulator'},
problem_description={'offset': offset, 'do_not_check_cost_operator':True},
execute_parameters={})
res_aposmm = varopt_aposmm.optimize()
sys.stdout.flush()
sys.stderr.flush()
MPI.COMM_WORLD.Barrier()
if is_master:
end_time_aposmm = MPI.Wtime()
running_time = end_time_aposmm-start_time_aposmm
print(f"APOSMM finished in {running_time}s with {world_size} processes", flush=True)
assert(running_time < 25)
script_name = os.path.splitext(os.path.basename(__file__))[0]
print(f"{script_name} finished successfully")
MPI.COMM_WORLD.Barrier()
|
the-stack_106_20070
|
"""Support for Met.no weather service."""
from __future__ import annotations
import logging
from types import MappingProxyType
from typing import Any
import voluptuous as vol
from homeassistant.components.weather import (
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TIME,
ATTR_WEATHER_HUMIDITY,
ATTR_WEATHER_PRESSURE,
ATTR_WEATHER_TEMPERATURE,
ATTR_WEATHER_WIND_BEARING,
ATTR_WEATHER_WIND_SPEED,
PLATFORM_SCHEMA,
Forecast,
WeatherEntity,
)
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
CONF_ELEVATION,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
LENGTH_INCHES,
LENGTH_MILLIMETERS,
PRESSURE_HPA,
PRESSURE_INHG,
SPEED_KILOMETERS_PER_HOUR,
SPEED_MILES_PER_HOUR,
TEMP_CELSIUS,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
T,
)
from homeassistant.util.distance import convert as convert_distance
from homeassistant.util.pressure import convert as convert_pressure
from homeassistant.util.speed import convert as convert_speed
from .const import (
ATTR_FORECAST_PRECIPITATION,
ATTR_MAP,
CONDITIONS_MAP,
CONF_TRACK_HOME,
DOMAIN,
FORECAST_MAP,
)
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = (
"Weather forecast from met.no, delivered by the Norwegian "
"Meteorological Institute."
)
DEFAULT_NAME = "Met.no"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Inclusive(
CONF_LATITUDE, "coordinates", "Latitude and longitude must exist together"
): cv.latitude,
vol.Inclusive(
CONF_LONGITUDE, "coordinates", "Latitude and longitude must exist together"
): cv.longitude,
vol.Optional(CONF_ELEVATION): int,
}
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Met.no weather platform."""
_LOGGER.warning("Loading Met.no via platform config is deprecated")
# Add defaults.
config = {CONF_ELEVATION: hass.config.elevation, **config}
if config.get(CONF_LATITUDE) is None:
config[CONF_TRACK_HOME] = True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=config
)
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Add a weather entity from a config_entry."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]
async_add_entities(
[
MetWeather(
coordinator, config_entry.data, hass.config.units.is_metric, False
),
MetWeather(
coordinator, config_entry.data, hass.config.units.is_metric, True
),
]
)
def format_condition(condition: str) -> str:
"""Return condition from dict CONDITIONS_MAP."""
for key, value in CONDITIONS_MAP.items():
if condition in value:
return key
return condition
class MetWeather(CoordinatorEntity, WeatherEntity):
"""Implementation of a Met.no weather condition."""
def __init__(
self,
coordinator: DataUpdateCoordinator[T],
config: MappingProxyType[str, Any],
is_metric: bool,
hourly: bool,
) -> None:
"""Initialise the platform with a data instance and site."""
super().__init__(coordinator)
self._config = config
self._is_metric = is_metric
self._hourly = hourly
@property
def track_home(self) -> (Any | bool):
"""Return if we are tracking home."""
return self._config.get(CONF_TRACK_HOME, False)
@property
def unique_id(self) -> str:
"""Return unique ID."""
name_appendix = ""
if self._hourly:
name_appendix = "-hourly"
if self.track_home:
return f"home{name_appendix}"
return f"{self._config[CONF_LATITUDE]}-{self._config[CONF_LONGITUDE]}{name_appendix}"
@property
def name(self) -> str:
"""Return the name of the sensor."""
name = self._config.get(CONF_NAME)
name_appendix = ""
if self._hourly:
name_appendix = " Hourly"
if name is not None:
return f"{name}{name_appendix}"
if self.track_home:
return f"{self.hass.config.location_name}{name_appendix}"
return f"{DEFAULT_NAME}{name_appendix}"
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return not self._hourly
@property
def condition(self) -> str | None:
"""Return the current condition."""
condition = self.coordinator.data.current_weather_data.get("condition")
return format_condition(condition)
@property
def temperature(self) -> float | None:
"""Return the temperature."""
return self.coordinator.data.current_weather_data.get(
ATTR_MAP[ATTR_WEATHER_TEMPERATURE]
)
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def pressure(self) -> float | None:
"""Return the pressure."""
pressure_hpa = self.coordinator.data.current_weather_data.get(
ATTR_MAP[ATTR_WEATHER_PRESSURE]
)
if self._is_metric or pressure_hpa is None:
return pressure_hpa
return round(convert_pressure(pressure_hpa, PRESSURE_HPA, PRESSURE_INHG), 2)
@property
def humidity(self) -> float | None:
"""Return the humidity."""
return self.coordinator.data.current_weather_data.get(
ATTR_MAP[ATTR_WEATHER_HUMIDITY]
)
@property
def wind_speed(self) -> float | None:
"""Return the wind speed."""
speed_km_h = self.coordinator.data.current_weather_data.get(
ATTR_MAP[ATTR_WEATHER_WIND_SPEED]
)
if self._is_metric or speed_km_h is None:
return speed_km_h
speed_mi_h = convert_speed(
speed_km_h, SPEED_KILOMETERS_PER_HOUR, SPEED_MILES_PER_HOUR
)
return int(round(speed_mi_h))
@property
def wind_bearing(self) -> float | str | None:
"""Return the wind direction."""
return self.coordinator.data.current_weather_data.get(
ATTR_MAP[ATTR_WEATHER_WIND_BEARING]
)
@property
def attribution(self) -> str:
"""Return the attribution."""
return ATTRIBUTION
@property
def forecast(self) -> list[Forecast] | None:
"""Return the forecast array."""
if self._hourly:
met_forecast = self.coordinator.data.hourly_forecast
else:
met_forecast = self.coordinator.data.daily_forecast
required_keys = {ATTR_FORECAST_TEMP, ATTR_FORECAST_TIME}
ha_forecast: list[Forecast] = []
for met_item in met_forecast:
if not set(met_item).issuperset(required_keys):
continue
ha_item = {
k: met_item[v]
for k, v in FORECAST_MAP.items()
if met_item.get(v) is not None
}
if not self._is_metric and ATTR_FORECAST_PRECIPITATION in ha_item:
if ha_item[ATTR_FORECAST_PRECIPITATION] is not None:
precip_inches = convert_distance(
ha_item[ATTR_FORECAST_PRECIPITATION],
LENGTH_MILLIMETERS,
LENGTH_INCHES,
)
ha_item[ATTR_FORECAST_PRECIPITATION] = round(precip_inches, 2)
if ha_item.get(ATTR_FORECAST_CONDITION):
ha_item[ATTR_FORECAST_CONDITION] = format_condition(
ha_item[ATTR_FORECAST_CONDITION]
)
ha_forecast.append(ha_item) # type: ignore[arg-type]
return ha_forecast
@property
def device_info(self) -> DeviceInfo:
"""Device info."""
return DeviceInfo(
default_name="Forecast",
entry_type="service",
identifiers={(DOMAIN,)}, # type: ignore[arg-type]
manufacturer="Met.no",
model="Forecast",
)
|
the-stack_106_20071
|
from sphinx_testing import with_app
@with_app(buildername="html", srcdir="./tests/examples", copy_srcdir_to_tmpdir=True)
def sphinx_build(app, status, warning):
app.build()
with open(app.outdir + "/index.html", "r") as f:
html = f.read()
assert "python test.py -h" in html
assert "No such file or directory" in html
def test_build():
sphinx_build()
|
the-stack_106_20074
|
"""
Implementation of a simple digital digit detector for thermometers
Author: Corentin Chauvin-Hameau
Date: 2021
License: Apache-2.0 License
"""
from math import atan2, degrees, pi, nan
import numpy as np
import cv2
import imutils
class DigitalDetector:
"""
Simple digital digit detector for thermometer images
"""
#
# Public member functions
#
def detect_digits(self, img, display_debug=False):
"""
Detects digits on a thermometer image and returns the temperature
Args:
- img: BGR image of the thermometer
- display_debug: Whether to display an image with debugging information
Returns:
- Temperature displayed by the thermometer (nan if not successful)
"""
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
screen_img = self._crop_screen(gray_img)
screen_img = cv2.resize(screen_img, (96, 120))
thresh_img = self._threshold_screen(screen_img)
digit_rectangles = self._detect_digits(thresh_img)
digits, digits_position, segments_rectangles = \
self._detect_segments(thresh_img, digit_rectangles)
temperature = self._get_temperature(digits, digits_position)
# Display debug image
if display_debug:
debug_img = cv2.cvtColor(thresh_img, cv2.COLOR_GRAY2RGB)
for (x, y, w, h) in digit_rectangles:
cv2.rectangle(debug_img, (x, y), (x+w, y+h), (0, 255, 0), 1)
font = cv2.FONT_HERSHEY_SIMPLEX
for k in range(len(digits)):
digit = digits[k]
position = digits_position[k]
debug_img = cv2.putText(debug_img, str(digit), position, font, 0.5, (0, 0, 255), 2, cv2.LINE_AA)
for segment in segments_rectangles:
cv2.rectangle(debug_img, segment[0], segment[1], (255, 0, 0), 1)
debug_img = cv2.resize(debug_img, (360, 288))
screen_img = cv2.resize(screen_img, (360, 288))
screen_img = cv2.cvtColor(screen_img, cv2.COLOR_GRAY2RGB)
double_image = np.hstack((screen_img, debug_img))
cv2.imshow("Temperature: {}".format(temperature), double_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
return temperature
#
# Private member functions
#
def _crop_screen(self, img):
"""
Returns an image of the screen only
"""
img = img.copy()
screen_rectangle = self._find_screen(img)
screen_rectangle, img = self._rotate_image(screen_rectangle, img)
x0 = int(np.min(screen_rectangle[:, 0]))
x1 = int(np.max(screen_rectangle[:, 0]))
y0 = int(np.min(screen_rectangle[:, 1]))
y1 = int(np.max(screen_rectangle[:, 1]))
screen_img = img[y0:y1, x0:x1]
return screen_img
def _find_screen(self, img):
"""
Returns the coordinates of the screen on the given image
"""
blurred = cv2.GaussianBlur(img, (15, 15), 1)
edges = cv2.Canny(blurred, 40, 60)
screen_rectangle = None
contours = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(contours)
for cnt in contours:
approx = cv2.approxPolyDP(cnt, 0.01*cv2.arcLength(cnt, True), True)
if len(approx) != 4:
continue
p0 = approx[0]
p1 = approx[1]
p2 = approx[2]
e1 = (p0 - p1)[0]
e2 = (p2 - p1)[0]
l1 = np.linalg.norm(e1)
l2 = np.linalg.norm(e2)
angle = np.arccos(np.dot(e1, e2) / (l1 * l2))
if l1 < l2:
l1, l2 = l2, l1
if l2 < 10 or l1 / l2 > 1.4:
continue
if not (pi/2 - 0.1 <= angle <= pi/2 + 0.1):
continue
cv2.drawContours(img, [approx], -1, (0, 255, 0), 2)
screen_rectangle = approx[:, 0, :]
return screen_rectangle # don't need to get dupplicates
def _rotate_image(self, screen_rectangle, img):
"""
Rotates the image too compensate for any angle of the screen
"""
e1 = screen_rectangle[0] - screen_rectangle[1]
e2 = screen_rectangle[1] - screen_rectangle[2]
if np.linalg.norm(e1) > np.linalg.norm(e2):
h_edge = e1
else:
h_edge = e2
orientation = atan2(h_edge[1], h_edge[0])
while orientation < -pi/2 or orientation > pi/2:
if orientation < -pi/2:
orientation += pi
else:
orientation -= pi
image_center = tuple(np.array(img.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, degrees(orientation), 1.0)
rotated_img = cv2.warpAffine(img, rot_mat, img.shape[1::-1], flags=cv2.INTER_LINEAR)
homogeneous_rectangle = np.ones((3, 4))
homogeneous_rectangle[0:2, :] = screen_rectangle.transpose()
rotated_screen_rectangle = np.matmul(rot_mat, homogeneous_rectangle).transpose()
return rotated_screen_rectangle, rotated_img
def _threshold_screen(self, screen_img):
"""
Thresholds the screen image, and remove the edges
"""
screen_img = screen_img.copy()
blurred = cv2.GaussianBlur(screen_img, (15, 15), 1)
alpha = 1.5
sharpened = cv2.addWeighted(screen_img, 1 + alpha, blurred, -alpha, 0.0)
thresh = cv2.adaptiveThreshold(
sharpened, 255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,
19, 4
)
H, W = np.shape(thresh)
labels_nbr, labels, stats, centroids = cv2.connectedComponentsWithStats(255-thresh, connectivity=8)
def clean_connected(i, j, thresh):
if thresh[i, j] == 0:
label = labels[i, j]
thresh[labels == label] = 255
for i in range(H):
clean_connected(i, 0, thresh)
clean_connected(i, W-1, thresh)
for j in range(W):
clean_connected(0, j, thresh)
clean_connected(H-1, j, thresh)
# Remove the dot
for label in range(1, labels_nbr):
x = stats[label][cv2.CC_STAT_LEFT]
y = stats[label][cv2.CC_STAT_TOP]
w = stats[label][cv2.CC_STAT_WIDTH]
h = stats[label][cv2.CC_STAT_HEIGHT]
if y / H > 0.3 and h / H <= 0.1 and w / W <= 0.1:
thresh[labels == label] = 255
return thresh
def _detect_digits(self, img):
"""
Returns the rectangular bounding box of each digit on the given image
"""
H, W = np.shape(img)
labels_nbr, labels, stats, centroids = cv2.connectedComponentsWithStats(255-img, connectivity=8)
digit_rectangles = []
for label in range(1, labels_nbr):
component = img[labels == label]
x = stats[label][cv2.CC_STAT_LEFT]
y = stats[label][cv2.CC_STAT_TOP]
w = stats[label][cv2.CC_STAT_WIDTH]
h = stats[label][cv2.CC_STAT_HEIGHT]
if y / H > 0.3 and h / H >= 0.05:
digit_rectangles.append((x, y, w, h))
# Merge overlapping rectangles
if digit_rectangles == []:
return []
merged = True
while merged:
merged = False
new_rectangles = [digit_rectangles[0]]
for k in range(1, len(digit_rectangles)):
[x, y, w, h] = digit_rectangles[k]
new_rectangle = True
for l in range(len(new_rectangles)):
[x_other, y_other, w_other, h_other] = new_rectangles[l]
overlap = (x < x_other+w_other and x_other < x+w) or (y > y_other+h_other and y_other > y+h)
if overlap or x_other <= x + w/2.0 <= x_other + w_other:
new_x1 = min(x, x_other)
new_y1 = min(y, y_other)
new_x2 = max(x + w, x_other + w_other)
new_y2 = max(y + h, y_other + h_other)
new_rectangles[l] = (new_x1, new_y1, new_x2 - new_x1, new_y2 - new_y1)
new_rectangle = False
merged = True
if new_rectangle:
new_rectangles.append((x, y, w, h))
digit_rectangles = new_rectangles[:]
return digit_rectangles
def _detect_segments(self, img, digit_rectangles):
"""
Detects digit segments and returns the corresponding digit
Args:
- img: Thresholded image of the screen
- digit_rectangles: Bounding boxes of each digit
Returns:
- digits: List of corresponding digits
- digits_position: List of position where to display the digit (for debug)
- segments_rectangles: List of bounding boxes of each detected segment
"""
H, W = np.shape(img)
digits = []
digits_position = []
segments_rectangles = []
for (x, y, w, h) in digit_rectangles:
# Handle the case of ones
if w / W <= 0.1 and h / H > 0.3:
digits.append(1)
digits_position.append((x, int(0.4*H)))
continue
# Horizontal segments
binary_string = ""
d = int(0.3 * w)
for k in range(3):
x_segment = x + d
y_segment = y + int(k/2.0 * (h - d))
if self._check_segment_state(img, x_segment, y_segment, w - 2*d, d):
segments_rectangles.append([
(x_segment, y_segment),
(x_segment + w - 2*d, y_segment + d)
])
binary_string = "1" + binary_string
else:
binary_string = "0" + binary_string
# Vertical segments
d = int(0.4 * w)
for k in range(2):
x_segment = x + int(k * (w - d))
for l in range(2):
y_segment = y + int(l * h / 2) + d//2
if self._check_segment_state(img, x_segment, y_segment, d, h//2-d):
segments_rectangles.append([
(x_segment, y_segment),
(x_segment + d, y_segment + h//2-d)
])
binary_string = "1" + binary_string
else:
binary_string = "0" + binary_string
# Decode the digit
digit = self._decode_segments(binary_string)
digits.append(digit)
digits_position.append((x, int(0.4*H)))
return digits, digits_position, segments_rectangles
def _check_segment_state(self, img, x, y, w, h):
"""
Returns the state of the segment
Args:
- x, y: Coordinates of the top-left corner of the bounding box of the segment
- w, h: Sizes of the bounding box of the segment
Returns:
- Whether the segment is switched on
"""
black_pixels_nbr = np.count_nonzero(img[y:y+h, x:x+w] == 0)
if h == 0 or w == 0:
return False
else:
ratio = float(black_pixels_nbr) / (h * w)
if h > w: # vertical segment
return ratio > 0.25
else: # horizontal segment
return ratio > 0.4
def _decode_segments(self, binary_string):
"""
Returns the digit corresponding to a string representing the state of each segment
Returns numpy.NaN if the digit couldn't be decoded
"""
# Case of faulty seven (sometimes one segment is wrongly detected)
if int(binary_string, 2) == int("001100101", 2):
return 7
# Normal case
integers = [
"01111101",
"01100000",
"00110111",
"01100111",
"01101010",
"01001111",
"01011111",
"01100001",
"01111111",
"01101111"
]
integers = [int(x, 2) for x in integers[:]]
value = int(binary_string, 2)
if value in integers:
return integers.index(value)
else:
return nan
def _get_temperature(self, digits, digits_position):
"""
Returns the temperature indicated by the thermometer
"""
digits = np.array(digits)
x = np.array(digits_position)[:, 0]
digits = digits[np.argsort(x)]
string = ""
for digit in digits:
string += str(digit)
try:
return int(string) / 10
except ValueError:
return nan
|
the-stack_106_20075
|
from __future__ import annotations
import numpy as np
from pandas._libs import (
lib,
missing as libmissing,
)
from pandas._typing import DtypeObj
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
is_bool_dtype,
is_float_dtype,
is_integer_dtype,
is_object_dtype,
)
from pandas.core.dtypes.dtypes import register_extension_dtype
from pandas.core.arrays.numeric import (
NumericArray,
NumericDtype,
)
class FloatingDtype(NumericDtype):
"""
An ExtensionDtype to hold a single size of floating dtype.
These specific implementations are subclasses of the non-public
FloatingDtype. For example we have Float32Dtype to represent float32.
The attributes name & type are set when these subclasses are created.
"""
def __repr__(self) -> str:
return f"{self.name}Dtype()"
@property
def _is_numeric(self) -> bool:
return True
@classmethod
def construct_array_type(cls) -> type[FloatingArray]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return FloatingArray
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
# for now only handle other floating types
if not all(isinstance(t, FloatingDtype) for t in dtypes):
return None
np_dtype = np.find_common_type(
# error: Item "ExtensionDtype" of "Union[Any, ExtensionDtype]" has no
# attribute "numpy_dtype"
[t.numpy_dtype for t in dtypes], # type: ignore[union-attr]
[],
)
if np.issubdtype(np_dtype, np.floating):
return FLOAT_STR_TO_DTYPE[str(np_dtype)]
return None
def coerce_to_array(
values, dtype=None, mask=None, copy: bool = False
) -> tuple[np.ndarray, np.ndarray]:
"""
Coerce the input values array to numpy arrays with a mask.
Parameters
----------
values : 1D list-like
dtype : float dtype
mask : bool 1D array, optional
copy : bool, default False
if True, copy the input
Returns
-------
tuple of (values, mask)
"""
# if values is floating numpy array, preserve its dtype
if dtype is None and hasattr(values, "dtype"):
if is_float_dtype(values.dtype):
dtype = values.dtype
if dtype is not None:
if isinstance(dtype, str) and dtype.startswith("Float"):
# Avoid DeprecationWarning from NumPy about np.dtype("Float64")
# https://github.com/numpy/numpy/pull/7476
dtype = dtype.lower()
if not issubclass(type(dtype), FloatingDtype):
try:
dtype = FLOAT_STR_TO_DTYPE[str(np.dtype(dtype))]
except KeyError as err:
raise ValueError(f"invalid dtype specified {dtype}") from err
if isinstance(values, FloatingArray):
values, mask = values._data, values._mask
if dtype is not None:
values = values.astype(dtype.numpy_dtype, copy=False)
if copy:
values = values.copy()
mask = mask.copy()
return values, mask
values = np.array(values, copy=copy)
if is_object_dtype(values.dtype):
inferred_type = lib.infer_dtype(values, skipna=True)
if inferred_type == "empty":
pass
elif inferred_type not in [
"floating",
"integer",
"mixed-integer",
"integer-na",
"mixed-integer-float",
]:
raise TypeError(f"{values.dtype} cannot be converted to a FloatingDtype")
elif is_bool_dtype(values) and is_float_dtype(dtype):
values = np.array(values, dtype=float, copy=copy)
elif not (is_integer_dtype(values) or is_float_dtype(values)):
raise TypeError(f"{values.dtype} cannot be converted to a FloatingDtype")
if values.ndim != 1:
raise TypeError("values must be a 1D list-like")
if mask is None:
mask = libmissing.is_numeric_na(values)
else:
assert len(mask) == len(values)
if not mask.ndim == 1:
raise TypeError("mask must be a 1D list-like")
# infer dtype if needed
if dtype is None:
dtype = np.dtype("float64")
else:
dtype = dtype.type
# if we are float, let's make sure that we can
# safely cast
# we copy as need to coerce here
# TODO should this be a safe cast?
if mask.any():
values = values.copy()
values[mask] = np.nan
values = values.astype(dtype, copy=False) # , casting="safe")
return values, mask
class FloatingArray(NumericArray):
"""
Array of floating (optional missing) values.
.. versionadded:: 1.2.0
.. warning::
FloatingArray is currently experimental, and its API or internal
implementation may change without warning. Especially the behaviour
regarding NaN (distinct from NA missing values) is subject to change.
We represent a FloatingArray with 2 numpy arrays:
- data: contains a numpy float array of the appropriate dtype
- mask: a boolean array holding a mask on the data, True is missing
To construct an FloatingArray from generic array-like input, use
:func:`pandas.array` with one of the float dtypes (see examples).
See :ref:`integer_na` for more.
Parameters
----------
values : numpy.ndarray
A 1-d float-dtype array.
mask : numpy.ndarray
A 1-d boolean-dtype array indicating missing values.
copy : bool, default False
Whether to copy the `values` and `mask`.
Attributes
----------
None
Methods
-------
None
Returns
-------
FloatingArray
Examples
--------
Create an FloatingArray with :func:`pandas.array`:
>>> pd.array([0.1, None, 0.3], dtype=pd.Float32Dtype())
<FloatingArray>
[0.1, <NA>, 0.3]
Length: 3, dtype: Float32
String aliases for the dtypes are also available. They are capitalized.
>>> pd.array([0.1, None, 0.3], dtype="Float32")
<FloatingArray>
[0.1, <NA>, 0.3]
Length: 3, dtype: Float32
"""
# The value used to fill '_data' to avoid upcasting
_internal_fill_value = 0.0
# Fill values used for any/all
_truthy_value = 1.0
_falsey_value = 0.0
@cache_readonly
def dtype(self) -> FloatingDtype:
return FLOAT_STR_TO_DTYPE[str(self._data.dtype)]
def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False):
if not (isinstance(values, np.ndarray) and values.dtype.kind == "f"):
raise TypeError(
"values should be floating numpy array. Use "
"the 'pd.array' function instead"
)
if values.dtype == np.float16:
# If we don't raise here, then accessing self.dtype would raise
raise TypeError("FloatingArray does not support np.float16 dtype.")
super().__init__(values, mask, copy=copy)
@classmethod
def _coerce_to_array(
cls, value, *, dtype: DtypeObj, copy: bool = False
) -> tuple[np.ndarray, np.ndarray]:
return coerce_to_array(value, dtype=dtype, copy=copy)
def _values_for_argsort(self) -> np.ndarray:
return self._data
_dtype_docstring = """
An ExtensionDtype for {dtype} data.
This dtype uses ``pd.NA`` as missing value indicator.
Attributes
----------
None
Methods
-------
None
"""
# create the Dtype
@register_extension_dtype
class Float32Dtype(FloatingDtype):
type = np.float32
name = "Float32"
__doc__ = _dtype_docstring.format(dtype="float32")
@register_extension_dtype
class Float64Dtype(FloatingDtype):
type = np.float64
name = "Float64"
__doc__ = _dtype_docstring.format(dtype="float64")
FLOAT_STR_TO_DTYPE = {
"float32": Float32Dtype(),
"float64": Float64Dtype(),
}
|
the-stack_106_20076
|
# -*- coding: utf-8 -*-
import csv
from operator import itemgetter
rent_file = open('tashu.csv', 'r')
station_file = open('station.csv', 'r')
tashu_dict = csv.DictReader(rent_file)
station_dict = csv.DictReader(station_file)
gu_dict = {'유성구': 0, '서구': 0, '대덕구': 0, '중구': 0, '동구': 0}
station_list = [{}]
for info in station_dict :
station_list.append(info)
for rent in tashu_dict :
if rent['RENT_STATION'] != '' :
if int(rent['RENT_STATION']) <= 144 :
gu_dict[station_list[int(rent['RENT_STATION'])]['GU']] += 1
if rent['RETURN_STATION'] != '' :
if int(rent['RETURN_STATION']) <= 144 :
gu_dict[station_list[int(rent['RETURN_STATION'])]['GU']] += 1
print('유성구' + " " + str(gu_dict['유성구']) + "\n" +\
'서구' + " " + str(gu_dict['서구']) + "\n" +\
'대덕구' + " " + str(gu_dict['대덕구']) + "\n" +\
'중구' + " " + str(gu_dict['중구']) + "\n" +\
'동구' + " " + str(gu_dict['동구']))
|
the-stack_106_20078
|
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import pytest
from mmdet.ops.nms.nms_wrapper import get_nms_from_type
def test_get_nms_op_for_unsupported_type():
nms_type = 'definitely_not_nms_type'
with pytest.raises(RuntimeError):
get_nms_from_type(nms_type)
@pytest.mark.parametrize('supported_type', ['nms'])
def test_get_nms_op_for_supported_type(supported_type):
nms_op = get_nms_from_type(supported_type)
assert nms_op is not None, f'No operation found for type {supported_type}.'
|
the-stack_106_20081
|
import argparse
import copy
import pickle
import datetime
import time
import shutil
import sys
import git
import os
import numpy as np
from multiprocessing import Pool
from multiprocessing import Manager
import core
import graph
import plots
import routing_policies
import restoration_policies
import logging
logging.basicConfig(format='%(asctime)s\t%(name)-12s\t%(threadName)s\t%(message)s', level=logging.DEBUG)
def run(uargs):
start_time = time.time()
topology = graph.get_topology(uargs)
topology = graph.get_dcs(uargs, topology)
topology = graph.get_ksp(uargs, topology)
env = core.Environment(uargs, topology=topology)
logger = logging.getLogger('run')
# in this case, a configuration changes only the load of the network
exec_routing_policies = ['CADC', 'FADC', 'FLB']
exec_restoration_policies = ['DNR', 'PR', 'PRwR']
loads = [x for x in range(args.min_load, args.max_load + 1, args.load_step)]
final_output_folder = env.output_folder + '/' + datetime.datetime.now(datetime.timezone.utc).strftime('%Y%m%dT%H%M%S.%fUTC')
env.output_folder = final_output_folder
if not os.path.isdir('./results/' + env.output_folder):
os.makedirs('./results/' + env.output_folder)
logger.debug(f'creating folder {env.output_folder}')
# creating a graphical representation of the topology
plots.plot_topology(env, args)
# copy current version of files
with open('./results/{}/0-info.txt'.format(env.output_folder), 'wt') as file:
width = 20
print('Date (UTC):'.ljust(width), datetime.datetime.now(datetime.timezone.utc), file=file)
print('Date (local):'.ljust(width), datetime.datetime.now(), file=file)
# repo = git.Repo()
# print('Commit date:'.ljust(width),
# datetime.datetime.fromtimestamp(repo.head.object.committed_date).strftime('%Y-%m-%d %H:%M:%S'),
# file=file)
# print('Author:'.ljust(width), repo.head.object.committer, file=file)
# print('GIT hexsha:'.ljust(width), repo.head.object.hexsha, file=file)
print('Command:'.ljust(width), ' '.join(sys.argv), file=file)
print('Arguments:'.ljust(width), args, file=file)
# copy current version of files
shutil.copytree('./', f'./results/{env.output_folder}/source-code/',
ignore=shutil.ignore_patterns('__pycache__', '*.pyc', '*.md', 'results', 'LICENSE', '*.ipynb', '.git', '.idea', '.gitignore'))
# preparing the thread-safe data structure to hold the results
manager = Manager()
results = manager.dict()
for routing_policy in exec_routing_policies:
results[routing_policy] = manager.dict()
for restoration_policy in exec_restoration_policies:
results[routing_policy][restoration_policy] = {load: manager.list() for load in loads}
envs = []
for routing_policy in exec_routing_policies: # runs the simulations for every routing policy
for restoration_policy in exec_restoration_policies: # runs the simulations for every restoration policy
for load in loads: # runs the simulations for every load
if routing_policy == 'CADC':
routing_policy_instance = routing_policies.ClosestAvailableDC()
elif routing_policy == 'FADC':
routing_policy_instance = routing_policies.FarthestAvailableDC()
elif routing_policy == 'FLB':
routing_policy_instance = routing_policies.FullLoadBalancing()
else:
raise ValueError('Routing policy was not configured correctly (value set to {})'.format(routing_policy))
if restoration_policy == 'DNR':
restoration_policy_instance = restoration_policies.DoNotRestorePolicy()
elif restoration_policy == 'PR':
restoration_policy_instance = restoration_policies.PathRestorationPolicy()
elif restoration_policy == 'PRwR':
restoration_policy_instance = restoration_policies.PathRestorationWithRelocationPolicy()
else:
raise ValueError('Restoration policy was not configured correctly (value set to {})'.format(restoration_policy))
env_topology = copy.deepcopy(topology) # makes a deep copy of the topology object
env_t = core.Environment(uargs,
topology=env_topology,
results=results,
load=load,
routing_policy=routing_policy_instance,
restoration_policy=restoration_policy_instance,
seed=len(exec_routing_policies) * load,
output_folder=env.output_folder)
envs.append(env_t)
# code for debugging purposes -- it runs without multithreading
# if load == 600 and routing_policy == 'CADC':
# core.run_simulation(env_t)
# print("Ran in debug mode... exiting...")
# exit(0)
logger.debug(f'Starting pool of simulators with {uargs.threads} threads')
# use the code above to keep updating the final plot as the simulation progresses
with Pool(processes=uargs.threads) as p:
result_pool = p.map_async(core.run_simulation, envs)
p.close()
done = False
while not done:
if result_pool.ready():
done = True
else:
time.sleep(uargs.temporary_plot_every)
plots.plot_final_results(env, results, start_time)
# if you do not want periodical updates, you can use the following code
# with Pool(processes=uargs.threads) as p:
# p.map(core.run_simulation, envs)
# p.close()
# p.join()
# logging.debug("Finished the threads")
# consolidating statistics
plots.plot_final_results(env, results, start_time)
with open('./results/{}/final_results.h5'.format(env.output_folder), 'wb') as file:
realized_results = dict(results)
for k1, v1 in results.items():
realized_results[k1] = dict(v1)
for k2, v2 in results[k1].items():
realized_results[k1][k2] = dict(v2)
for k3, v3 in results[k1][k2].items():
realized_results[k1][k2][k3] = list(v3)
pickle.dump({
'args': uargs,
'env': env,
'results': realized_results,
'routing_policies': [policy for policy in exec_routing_policies],
'restoration_policies': [policy for policy in exec_restoration_policies],
'loads': loads,
'timedelta': datetime.timedelta(seconds=(time.time() - start_time)),
'datetime': datetime.datetime.fromtimestamp(time.time())
}, file)
logger.debug('Finishing simulation after {}'.format(datetime.timedelta(seconds=(time.time() - start_time))))
if __name__ == '__main__':
env = core.Environment()
parser = argparse.ArgumentParser()
parser.add_argument('--plot_simulation_progress', default=False, action='store_true',
help='Plot summary for each seed simulated (default=False)')
parser.add_argument('-tf', '--topology_file', default=env.topology_file, help='Network topology file to be used')
parser.add_argument('-a', '--num_arrivals', type=int, default=env.num_arrivals,
help='Number of arrivals per episode to be generated (default={})'.format(env.num_arrivals))
parser.add_argument('-k', '--k_paths', type=int, default=env.k_paths,
help='Number of k-shortest-paths to be considered (default={})'.format(env.k_paths))
parser.add_argument('-d', '--num_dcs', type=int, default=env.num_dcs,
help='Number of datacenters to be placed (default={})'.format(env.num_dcs))
parser.add_argument('--dc_placement', default=env.dc_placement,
help='DC placement criteria (default={})'.format(env.dc_placement))
parser.add_argument('-t', '--threads', type=int, default=env.threads,
help='Number of threads to be used to run the simulations (default={})'.format(
env.threads))
parser.add_argument('--min_load', type=int, default=600,
help='Load in Erlangs of the traffic generated (mandatory)')
parser.add_argument('--max_load', type=int, default=840,
help='Load in Erlangs of the traffic generated (mandatory)')
parser.add_argument('--load_step', type=int, default=40,
help='Load in Erlangs of the traffic generated (default: {})'.format(50))
parser.add_argument('-s', '--seed', type=int, default=env.seed,
help='Seed of the random numbers (default={})'.format(env.seed))
parser.add_argument('-ns', '--num_seeds', type=int, default=env.num_seeds,
help='Number of seeds to run for each configuration (default={})'.format(env.num_seeds))
te = 5
parser.add_argument('-te', '--temporary_plot_every', type=int, default=te, #TODO: adjust for your needs
help='Time interval for plotting intermediate statistics of the simulation in seconds (default={})'.format(te))
parser.add_argument('-o', '--output_folder', default=env.output_folder,
help='Output folder inside results (default={})'.format(env.output_folder))
parser.add_argument('-do', '--disaster_occurences', default=env.number_disaster_occurences,
help='Number of disasters to occur for each seed simulated'.format(env.number_disaster_occurences))
parser.add_argument('-fd', '--failure_duration', default=env.mean_failure_duration,
help='Mean failure or disaster duration'.format(env.mean_failure_duration))
args = parser.parse_args()
run(args)
|
the-stack_106_20084
|
import RPi.GPIO as GPIO
from time import sleep
import subprocess
import sys
import pygame.mixer
pygame.mixer.init()
GPIO.setmode(GPIO.BOARD)
GPIO.setup(11, GPIO.OUT)
GPIO.setup(12, GPIO.OUT)
GPIO.output(11, False)
GPIO.output(12, False)
def detect():
"""Detects qr code from camera and returns string that represents that code.
return -- qr code from image as string
"""
playSound("Searching2")
subprocess.call(["raspistill -n -t 500 -w 720 -h 720 -o cam.png"],shell=True)
process = subprocess.Popen(["zbarimg -D cam.png"], stdout=subprocess.PIPE, shell=True)
(out, err) = process.communicate()
qr_code = None
# out looks like "QR-code: Xuz213asdY" so you need
# to remove first 8 characters plus whitespaces
if len(out) > 8:
qr_code = out[8:].strip()
return qr_code
def searchQR():
qrcode = None
while qrcode == None:
qrcode = detect()
return qrcode
def playSound(soundFile):
print("Playing sound")
print("sounds/" + soundFile + ".aiff")
sound = pygame.mixer.Sound("sounds/" + soundFile + ".aiff")
sound.play()
def orange():
GPIO.output(11, False)
GPIO.output(12, True)
def blue():
GPIO.output(11, True)
GPIO.output(12, False)
def rainbowColours():
GPIO.output(11, True)
GPIO.output(12, True)
def ledsOff():
GPIO.output(11, False)
GPIO.output(12, False)
def menu():
print("Enter value")
print("1 for orange, 2 for blue, 3 for rainbow, 4 for no LEDs, 5 for QRcode, 9 for quit")
thing = str(raw_input())
if thing == "1":
orange()
elif thing == "2":
blue()
elif thing == "3":
rainbowColours()
elif thing == "5":
book = searchQR()
print("Book found!")
print("----------------")
print("")
print(book)
print("")
print("----------------")
elif thing == "4":
ledsOff()
elif thing == "9":
GPIO.cleanup()
sys.exit()
else:
print("Unknown entry")
while True:
menu()
|
the-stack_106_20085
|
import logging
import time
import os
import sys
import click
import click_log
import tqdm
import pysam
from construct import *
from ..utils import bam_utils
from ..annotate.command import get_segments
logging.basicConfig(stream=sys.stderr)
logger = logging.getLogger("extract")
click_log.basic_config(logger)
@click.command(name=logger.name)
@click_log.simple_verbosity_option(logger)
@click.option(
"-p",
"--pbi",
required=False,
type=click.Path(exists=True),
help="BAM .pbi index file",
)
@click.option(
"-o",
"--out-file",
default=".",
required=True,
type=str,
help="Output file name.",
)
@click.option(
'--force',
is_flag=True,
default=False,
show_default=True,
help="Force overwrite of the output files if they exist."
)
@click.option(
"-b",
"--base-padding",
default=2,
required=False,
show_default=True,
type=int,
help="Number of bases to include on either side of the extracted region(s).",
)
@click.option(
"--leading-adapter",
default="10x_Adapter",
required=False,
show_default=True,
type=str,
help="Adapter preceding the region to extract.",
)
@click.option(
"--trailing-adapter",
default="Poly_A",
required=False,
show_default=True,
type=str,
help="Adapter following the region to extract.",
)
@click.option(
"--start-offset",
default=16+10, # CBC + UMI for MAS15 is the default
required=False,
show_default=True,
type=int,
help="Number of bases to ignore from the extracted region start. "
"These bases will not be included in the extracted sequences.",
)
@click.argument("input-bam", default="-" if not sys.stdin.isatty() else None, type=click.File("rb"))
def main(pbi, out_file, force, base_padding, leading_adapter, trailing_adapter, start_offset, input_bam):
"""Extract coding segments from the reads in the given bam.
The main coding segments are assumed to be labeled as `random` segments.
Uses known segments flanking the region to be extracted as markers to indicate
the start and end of what to extract."""
t_start = time.time()
logger.info("Invoked via: longbow %s", " ".join(sys.argv[1:]))
pbi = f"{input_bam.name}.pbi" if pbi is None else pbi
read_count = None
if os.path.exists(pbi):
read_count = bam_utils.load_read_count(pbi)
logger.info("About to Extract segments from %d reads", read_count)
# Check to see if the output file exists:
bam_utils.check_for_preexisting_files(out_file, exist_ok=force)
# TODO: We don't need to check our model right now because our models are SO similar.
# This will need to be fixed when we start using more exotic models.
logger.info(f"Writing extracted read segments to: {out_file}")
logger.info(f"Extracting `random` segments between {leading_adapter} and {trailing_adapter}.")
logger.info(f"Ignoring the first {start_offset} bases from extracted read segments.")
logger.info(f"Including {base_padding} flanking bases.")
# Open our input bam file:
pysam.set_verbosity(0)
with pysam.AlignmentFile(input_bam, "rb", check_sq=False, require_index=False) as bam_file, \
tqdm.tqdm(
desc="Progress",
unit=" read",
colour="green",
file=sys.stderr,
disable=not sys.stdin.isatty(),
total=read_count
) as pbar:
# Get our header from the input bam file:
out_header = bam_utils.create_bam_header_with_program_group(logger.name, bam_file.header)
# Setup output files:
with pysam.AlignmentFile(out_file, "wb", header=out_header) as extracted_bam_file:
num_reads = 0
num_reads_with_extracted_segments = 0
num_segments_extracted = 0
num_segments_skipped = 0
for read in bam_file:
# Get our read segments:
try:
_, segments = get_segments(read)
except KeyError:
logger.error(f"Input bam file does not contain longbow segmented reads! "
f"No {bam_utils.SEGMENTS_TAG} tag detected on read {read.query_name} !")
sys.exit(1)
# Get our marker segments:
start_marker_list = [(i, s) for i, s in enumerate(segments) if s.name == leading_adapter]
end_marker_list = [(i, s) for i, s in enumerate(segments) if s.name == trailing_adapter]
if len(start_marker_list) != len(end_marker_list):
logger.warning(f"Found %d start markers and %d end markers. Only looking at first %d pairs. "
f"(starts: %s, ends: %s)",
len(start_marker_list), len(end_marker_list),
min(len(start_marker_list), len(end_marker_list)),
" ".join([f"{i}:{s.name}" for i, s in start_marker_list]),
" ".join([f"{i}:{s.name}" for i, s in end_marker_list]))
extracted_segment = False
# Go through each marker pair and do the extraction:
for s_info, e_info in zip(start_marker_list, end_marker_list):
si, start_marker = s_info
ei, end_marker = e_info
# Does the start marker come before the end marker and do we have exactly one random segment in
# between them?
if (start_marker.end < end_marker.start) and (ei - si == 2) and (segments[si + 1].name == "random"):
# We have a valid segment to extract:
logger.debug("Found a segment to extract: %s: %s", read.query_name, segments[si + 1])
# Create an AlignedSegment to output:
aligned_segment = _create_extracted_aligned_segment(
read, segments[si + 1], start_offset, base_padding
)
if aligned_segment:
extracted_bam_file.write(aligned_segment)
num_segments_extracted += 1
extracted_segment = True
else:
num_segments_skipped += 1
else:
if start_marker.end >= end_marker.start:
logger.warning("Read %s: start marker segment (i=%d) occurs at or after end segment (i=%d):"
" %d >= %d. Skipping segment.",
read.query_name, si, ei, start_marker.end, end_marker.start)
elif ei - si != 2:
logger.warning("Read %s: start segment (i=%d) and end segment (i=%d) have more than one "
"segment between them. Skipping segment.", read.query_name, si, ei)
elif segments[si + 1].name != "random":
logger.warning("Read %s: segment between start segment (i=%d) and end segment (i=%d) "
"is not a random segment. Skipping segment.", read.query_name, si, ei)
num_segments_skipped += 1
pbar.update(1)
num_reads += 1
if extracted_segment:
num_reads_with_extracted_segments += 1
# Calc some stats:
pct_reads_with_extracted_segments = 100 * num_reads_with_extracted_segments / num_reads if num_reads > 0 else 0
segs_per_read = num_segments_extracted / num_reads if num_reads > 0 else 0
# Yell at the user:
logger.info(f"Done. Elapsed time: %2.2fs.", time.time() - t_start)
logger.info(f"Total # Reads Processed: %d", num_reads)
logger.info(f"# Reads Containing Extracted Segments: %d (%2.2f%%)",
num_reads_with_extracted_segments, pct_reads_with_extracted_segments)
logger.info(f"Total # Segments Extracted: %d", num_segments_extracted)
logger.info(f"Total # Segments Skipped: %d", num_segments_skipped)
logger.info(f"# Segments extracted per read: %2.2f", segs_per_read)
def _create_extracted_aligned_segment(read, seg_to_extract, start_offset, base_padding):
"""Create a pysam.AlignedSegment object to store the information from the extracted bases."""
start_coord = seg_to_extract.start + start_offset - base_padding
end_coord = seg_to_extract.end + base_padding
# Bounds check our coords:
if start_coord < 0:
logger.debug("Calculated start for %s would start before read begins. Setting to 0.", read.query_name)
start_coord = 0
elif start_coord >= len(read.query_sequence):
logger.warning("Start coord for %s would start after read. Cannot process.", read.query_name)
return None
if end_coord < 0:
logger.warning("End coord for %s would start before read. Cannot process.", read.query_name)
return None
elif end_coord >= len(read.query_sequence):
logger.debug("Calculated end for %s would start after read ends. Setting to 0.", read.query_name)
end_coord = len(read.query_sequence)-1
if end_coord <= start_coord:
logger.warning("Start coord for %s would start at or after end coord. Cannot process.", read.query_name)
return None
# Create our segment:
a = pysam.AlignedSegment()
a.query_name = (
f"{read.query_name}/{start_coord}_{end_coord}"
)
# Add one to end_coord because coordinates are inclusive:
a.query_sequence = f"{read.query_sequence[start_coord:end_coord+1]}"
a.query_qualities = read.query_alignment_qualities[start_coord: end_coord + 1]
a.tags = read.get_tags()
a.flag = 4 # unmapped flag
a.mapping_quality = 255
return a
|
the-stack_106_20087
|
#!/usr/bin/env python
# coding: utf-8
import sys
import setuptools
PACKAGE_NAME = 'pycsvsql'
MINIMUM_PYTHON_VERSION = '3.6'
def check_python_version():
"""Exit when the Python version is too low."""
if sys.version < MINIMUM_PYTHON_VERSION:
sys.exit("Python {0}+ is required.".format(MINIMUM_PYTHON_VERSION))
def build_description():
"""Build a description for the project from documentation files."""
readme = open("README.md").read()
return readme
check_python_version()
setuptools.setup(
name='pycsvsql',
version='0.0.1',
description="Python csvsql convertor.",
url='https://github.com/gzxultra/pycsvsql',
author='Zhixiang Gu',
author_email='[email protected]',
packages=setuptools.find_packages(),
long_description=build_description(),
long_description_content_type='text/markdown',
license='MIT',
classifiers=[
'Development Status :: 1 - Planning',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
],
install_requires=[
"click ~= 6.0",
"peewee ~= 3.3.1",
]
)
|
the-stack_106_20088
|
# -*- coding: utf-8 -*-
import os
from datetime import datetime
import pandas as pd
import scrapy
from scrapy import Request
from scrapy import signals
from fooltrader.api.technical import parse_shfe_data, parse_shfe_day_data
from fooltrader.contract.files_contract import get_exchange_cache_dir, get_exchange_cache_path
from fooltrader.utils.utils import to_timestamp
class FutureShfeSpider(scrapy.Spider):
name = "future_shfe_spider"
custom_settings = {
# 'DOWNLOAD_DELAY': 2,
# 'CONCURRENT_REQUESTS_PER_DOMAIN': 8,
}
def __init__(self, name=None, **kwargs):
super().__init__(name, **kwargs)
self.trading_dates = None
def start_requests(self):
self.trading_dates = self.settings.get("trading_dates")
if self.dataType or self.dataType=='inventory':
today = pd.Timestamp.today()
for date in pd.date_range(start=today.date()-pd.Timedelta(weeks=520),end=today):
the_dir=get_exchange_cache_path(security_type='future',exchange='shfe',the_date=to_timestamp(date),data_type='inventory')+'.json'
if date.dayofweek<5 and not os.path.exists(the_dir):
yield Request(url=self.get_day_inventory_url(the_date=date.strftime('%Y%m%d')),
meta={'the_date': date,
'the_path': the_dir},
callback=self.download_shfe_data_by_date)
if self.trading_dates:
# 每天的数据
for the_date in self.trading_dates:
the_path = get_exchange_cache_path(security_type='future', exchange='shfe',
the_date=to_timestamp(the_date),
data_type='day_kdata')
yield Request(url=self.get_day_kdata_url(the_date=the_date),
meta={'the_date': the_date,
'the_path': the_path},
callback=self.download_shfe_data_by_date)
else:
# 直接抓年度统计数据
for the_year in range(2009, datetime.today().year):
the_dir = get_exchange_cache_dir(security_type='future', exchange='shfe')
the_path = os.path.join(the_dir, "{}_shfe_history_data.zip".format(the_year))
if not os.path.exists(the_path):
yield Request(url=self.get_year_k_data_url(the_year=the_year),
meta={'the_year': the_year,
'the_path': the_path},
callback=self.download_shfe_history_data)
def download_shfe_history_data(self, response):
content_type_header = response.headers.get('content-type', None)
the_year = response.meta['the_year']
the_path = response.meta['the_path']
if content_type_header.decode("utf-8") == 'application/zip':
with open(the_path, "wb") as f:
f.write(response.body)
f.flush()
else:
self.logger.exception(
"get shfe year {} data failed:the_path={} url={} content type={} body={}".format(the_year,
the_path,
response.url,
content_type_header,
response.body))
def download_shfe_data_by_date(self, response):
the_path = response.meta['the_path']
# 缓存数据
with open(the_path, "wb") as f:
f.write(response.body)
f.flush()
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super(FutureShfeSpider, cls).from_crawler(crawler, *args, **kwargs)
crawler.signals.connect(spider.spider_closed, signal=signals.spider_closed)
return spider
def spider_closed(self, spider, reason):
if self.trading_dates:
parse_shfe_day_data()
else:
parse_shfe_data()
spider.logger.info('Spider closed: %s,%s\n', spider.name, reason)
def get_year_k_data_url(self, the_year):
return 'http://www.shfe.com.cn/historyData/MarketData_Year_{}.zip'.format(the_year)
def get_day_kdata_url(self, the_date):
return 'http://www.shfe.com.cn/data/dailydata/kx/kx{}.dat'.format(the_date)
def get_day_inventory_url(self, the_date):
return 'http://www.shfe.com.cn/data/dailydata/kx/pm{}.dat'.format(the_date)
def get_trading_date_url(self):
return 'http://www.shfe.com.cn/bourseService/businessdata/calendar/20171201all.dat'
|
the-stack_106_20089
|
# Copyright 2018 Davide Spadini
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains all the classes regarding a specific commit, such as
Commit, Modification,
ModificationType and Method.
"""
import logging
from _datetime import datetime
from enum import Enum
from pathlib import Path
from typing import List, Set, Dict, Tuple, Optional
import hashlib
import lizard
import lizard_languages
from git import Diff, Git, Commit as GitCommit, NULL_TREE
from pydriller.domain.developer import Developer
logger = logging.getLogger(__name__)
class ModificationType(Enum):
"""
Type of Modification. Can be ADD, COPY, RENAME, DELETE, MODIFY or UNKNOWN.
"""
ADD = 1
COPY = 2
RENAME = 3
DELETE = 4
MODIFY = 5
UNKNOWN = 6
class DMMProperty(Enum):
"""
Maintainability properties of the Delta Maintainability Model.
"""
UNIT_SIZE = 1
UNIT_COMPLEXITY = 2
UNIT_INTERFACING = 3
class Method:
"""
This class represents a method in a class. Contains various information
extracted through Lizard.
"""
def __init__(self, func):
"""
Initialize a method object. This is calculated using Lizard: it parses
the source code of all the modifications in a commit, extracting
information of the methods contained in the file (if the file is a
source code written in one of the supported programming languages).
"""
self.name = func.name
self.long_name = func.long_name
self.filename = func.filename
self.nloc = func.nloc
self.complexity = func.cyclomatic_complexity
self.token_count = func.token_count
self.parameters = func.parameters
self.start_line = func.start_line
self.end_line = func.end_line
self.fan_in = func.fan_in
self.fan_out = func.fan_out
self.general_fan_out = func.general_fan_out
self.length = func.length
self.top_nesting_level = func.top_nesting_level
def __eq__(self, other):
return self.name == other.name and self.parameters == other.parameters
def __hash__(self):
# parameters are used in hashing in order to
# prevent collisions when overloading method names
return hash(
(
"name",
self.name,
"long_name",
self.long_name,
"params",
(x for x in self.parameters),
)
)
UNIT_SIZE_LOW_RISK_THRESHOLD = 15
"""
Threshold used in the Delta Maintainability Model to establish whether a method
is low risk in terms of its size.
The procedure to obtain the threshold is described in the
:ref:`PyDriller documentation <Properties>`.
"""
UNIT_COMPLEXITY_LOW_RISK_THRESHOLD = 5
"""
Threshold used in the Delta Maintainability Model to establish whether a method
is low risk in terms of its cyclomatic complexity.
The procedure to obtain the threshold is described in the
:ref:`PyDriller documentation <Properties>`.
"""
UNIT_INTERFACING_LOW_RISK_THRESHOLD = 2
"""
Threshold used in the Delta Maintainability Model to establish whether a method
is low risk in terms of its interface.
The procedure to obtain the threshold is described in the
:ref:`PyDriller documentation <Properties>`.
"""
def is_low_risk(self, dmm_prop: DMMProperty) -> bool:
"""
Predicate indicating whether this method is low risk in terms of
the given property.
:param dmm_prop: Property according to which this method is considered risky.
:return: True if and only if the method is considered low-risk w.r.t. this property.
"""
if dmm_prop is DMMProperty.UNIT_SIZE:
return self.nloc <= Method.UNIT_SIZE_LOW_RISK_THRESHOLD
if dmm_prop is DMMProperty.UNIT_COMPLEXITY:
return self.complexity <= Method.UNIT_COMPLEXITY_LOW_RISK_THRESHOLD
assert dmm_prop is DMMProperty.UNIT_INTERFACING
return (
len(self.parameters) <= Method.UNIT_INTERFACING_LOW_RISK_THRESHOLD
)
class ModifiedFile:
"""
This class contains information regarding a modified file in a commit.
"""
def __init__(
self,
old_path: Optional[str],
new_path: Optional[str],
change_type: ModificationType,
diff_and_sc: Dict[str, str],
):
"""
Initialize a modified file. A modified file carries on information
regarding the changed file. Normally, you shouldn't initialize a new
one.
"""
self._old_path = Path(old_path) if old_path is not None else None
self._new_path = Path(new_path) if new_path is not None else None
self.change_type = change_type
self.diff = diff_and_sc["diff"]
self.source_code = diff_and_sc["source_code"]
self.source_code_before = diff_and_sc["source_code_before"]
self._nloc = None
self._complexity = None
self._token_count = None
self._function_list = [] # type: List[Method]
self._function_list_before = [] # type: List[Method]
def __hash__(self):
"""
Implements hashing similar as Git would do it. Alternatively, if the
object had the hash of th Git Blob, one could use that directly.
:return: int hash
"""
string = " ".join(
[self.change_type.name, self.new_path, self.source_code]
)
return hash(hashlib.sha256(string.encode("utf-8")).hexdigest())
@property
def added_lines(self) -> int:
"""
Return the total number of added lines in the file.
:return: int lines_added
"""
added_lines = 0
for line in self.diff.replace("\r", "").split("\n"):
if line.startswith("+") and not line.startswith("+++"):
added_lines += 1
return added_lines
@property
def deleted_lines(self):
"""
Return the total number of deleted lines in the file.
:return: int lines_deleted
"""
deleted_lines = 0
for line in self.diff.replace("\r", "").split("\n"):
if line.startswith("-") and not line.startswith("---"):
deleted_lines += 1
return deleted_lines
@property
def old_path(self):
"""
Old path of the file. Can be None if the file is added.
:return: str old_path
"""
if self._old_path is not None:
return str(self._old_path)
return None
@property
def new_path(self):
"""
New path of the file. Can be None if the file is deleted.
:return: str new_path
"""
if self._new_path is not None:
return str(self._new_path)
return None
@property
def filename(self) -> str:
"""
Return the filename. Given a path-like-string (e.g.
"/Users/dspadini/pydriller/myfile.py") returns only the filename
(e.g. "myfile.py")
:return: str filename
"""
if self._new_path is not None and str(self._new_path) != "/dev/null":
path = self._new_path
else:
assert self._old_path
path = self._old_path
return path.name
@property
def language_supported(self) -> bool:
"""
Return whether the language used in the modification can be analyzed by Pydriller.
Languages are derived from the file extension.
Supported languages are those supported by Lizard.
:return: True iff language of this Modification can be analyzed.
"""
return lizard_languages.get_reader_for(self.filename) is not None
@property
def nloc(self) -> Optional[int]:
"""
Calculate the LOC of the file.
:return: LOC of the file
"""
self._calculate_metrics()
return self._nloc
@property
def complexity(self) -> Optional[int]:
"""
Calculate the Cyclomatic Complexity of the file.
:return: Cyclomatic Complexity of the file
"""
self._calculate_metrics()
return self._complexity
@property
def token_count(self) -> Optional[int]:
"""
Calculate the token count of functions.
:return: token count
"""
self._calculate_metrics()
return self._token_count
@property
def diff_parsed(self) -> Dict[str, List[Tuple[int, str]]]:
"""
Returns a dictionary with the added and deleted lines.
The dictionary has 2 keys: "added" and "deleted", each containing the
corresponding added or deleted lines. For both keys, the value is a
list of Tuple (int, str), corresponding to (number of line in the file,
actual line).
:return: Dictionary
"""
lines = self.diff.split("\n")
modified_lines = {
"added": [],
"deleted": [],
} # type: Dict[str, List[Tuple[int, str]]]
count_deletions = 0
count_additions = 0
for line in lines:
line = line.rstrip()
count_deletions += 1
count_additions += 1
if line.startswith("@@"):
count_deletions, count_additions = self._get_line_numbers(line)
if line.startswith("-"):
modified_lines["deleted"].append((count_deletions, line[1:]))
count_additions -= 1
if line.startswith("+"):
modified_lines["added"].append((count_additions, line[1:]))
count_deletions -= 1
if line == r"\ No newline at end of file":
count_deletions -= 1
count_additions -= 1
return modified_lines
@staticmethod
def _get_line_numbers(line):
token = line.split(" ")
numbers_old_file = token[1]
numbers_new_file = token[2]
delete_line_number = (
int(numbers_old_file.split(",")[0].replace("-", "")) - 1
)
additions_line_number = int(numbers_new_file.split(",")[0]) - 1
return delete_line_number, additions_line_number
@property
def methods(self) -> List[Method]:
"""
Return the list of methods in the file. Every method
contains various information like complexity, loc, name,
number of parameters, etc.
:return: list of methods
"""
self._calculate_metrics()
return self._function_list
@property
def methods_before(self) -> List[Method]:
"""
Return the list of methods in the file before the
change happened. Each method will have all specific
info, e.g. complexity, loc, name, etc.
:return: list of methods
"""
self._calculate_metrics(include_before=True)
return self._function_list_before
@property
def changed_methods(self) -> List[Method]:
"""
Return the list of methods that were changed. This analysis
is more complex because Lizard runs twice: for methods before
and after the change
:return: list of methods
"""
new_methods = self.methods
old_methods = self.methods_before
added = self.diff_parsed["added"]
deleted = self.diff_parsed["deleted"]
methods_changed_new = {
y
for x in added
for y in new_methods
if y.start_line <= x[0] <= y.end_line
}
methods_changed_old = {
y
for x in deleted
for y in old_methods
if y.start_line <= x[0] <= y.end_line
}
return list(methods_changed_new.union(methods_changed_old))
@staticmethod
def _risk_profile(
methods: List[Method], dmm_prop: DMMProperty
) -> Tuple[int, int]:
"""
Return the risk profile of the set of methods, with two bins: risky, or non risky.
The risk profile is a pair (v_low, v_high), where
v_low is the volume of the low risk methods in the list, and
v_high is the volume of the high risk methods in the list.
:param methods: List of methods for which risk profile is to be determined
:param dmm_prop: Property indicating the type of risk
:return: total risk profile for methods according to property.
"""
low = sum([m.nloc for m in methods if m.is_low_risk(dmm_prop)])
high = sum([m.nloc for m in methods if not m.is_low_risk(dmm_prop)])
return low, high
def _delta_risk_profile(self, dmm_prop: DMMProperty) -> Tuple[int, int]:
"""
Return the delta risk profile of this commit, which a pair (dv1, dv2), where
dv1 is the total change in volume (lines of code) of low risk methods, and
dv2 is the total change in volume of the high risk methods.
:param dmm_prop: Property indicating the type of risk
:return: total delta risk profile for this property.
"""
assert self.language_supported
low_before, high_before = self._risk_profile(
self.methods_before, dmm_prop
)
low_after, high_after = self._risk_profile(self.methods, dmm_prop)
return low_after - low_before, high_after - high_before
def _calculate_metrics(self, include_before=False):
"""
:param include_before: either to compute the metrics
for source_code_before, i.e. before the change happened
"""
if not self.language_supported:
return
if self.source_code and self._nloc is None:
analysis = lizard.analyze_file.analyze_source_code(
self.filename, self.source_code
)
self._nloc = analysis.nloc
self._complexity = analysis.CCN
self._token_count = analysis.token_count
for func in analysis.function_list:
self._function_list.append(Method(func))
if (
include_before
and self.source_code_before
and not self._function_list_before
):
anal = lizard.analyze_file.analyze_source_code(
self.filename, self.source_code_before
)
self._function_list_before = [Method(x) for x in anal.function_list]
def __eq__(self, other):
if not isinstance(other, ModifiedFile):
return NotImplemented
if self is other:
return True
return self.__dict__ == other.__dict__
class Commit:
"""
Class representing a Commit. Contains all the important information such
as hash, author, dates, and modified files.
"""
def __init__(self, commit: GitCommit, conf) -> None:
"""
Create a commit object.
:param commit: GitPython Commit object
:param conf: Configuration class
"""
self._c_object = commit
self._modified_files = None
self._branches = None
self._conf = conf
def __hash__(self):
"""
Since already used in Git for identification use the SHA of the commit
as hash value.
:return: int hash
"""
# Unfortunately, the Git hash cannot be used for the Python object
# directly. The documentation says it "should" return an integer
# https://docs.python.org/3/reference/datamodel.html#object.__hash__
# but I just learned it **has** to return one.
return hash(self._c_object.hexsha)
@property
def hash(self) -> str:
"""
Return the SHA of the commit.
:return: str hash
"""
return self._c_object.hexsha
@property
def author(self) -> Developer:
"""
Return the author of the commit as a Developer object.
:return: author
"""
return Developer(
self._c_object.author.name, self._c_object.author.email
)
@property
def committer(self) -> Developer:
"""
Return the committer of the commit as a Developer object.
:return: committer
"""
return Developer(
self._c_object.committer.name, self._c_object.committer.email
)
@property
def project_name(self) -> str:
"""
Return the project name.
:return: project name
"""
return Path(self._conf.get("path_to_repo")).name
@property
def project_path(self) -> str:
"""
Return the absolute path of the project.
:return: project path
"""
return str(Path(self._conf.get("path_to_repo")))
@property
def author_date(self) -> datetime:
"""
Return the authored datetime.
:return: datetime author_datetime
"""
return self._c_object.authored_datetime
@property
def committer_date(self) -> datetime:
"""
Return the committed datetime.
:return: datetime committer_datetime
"""
return self._c_object.committed_datetime
@property
def author_timezone(self) -> int:
"""
Author timezone expressed in seconds from epoch.
:return: int timezone
"""
return self._c_object.author_tz_offset
@property
def committer_timezone(self) -> int:
"""
Author timezone expressed in seconds from epoch.
:return: int timezone
"""
return self._c_object.committer_tz_offset
@property
def msg(self) -> str:
"""
Return commit message.
:return: str commit_message
"""
return self._c_object.message.strip()
@property
def parents(self) -> List[str]:
"""
Return the list of parents SHAs.
:return: List[str] parents
"""
parents = []
for p in self._c_object.parents:
parents.append(p.hexsha)
return parents
@property
def merge(self) -> bool:
"""
Return True if the commit is a merge, False otherwise.
:return: bool merge
"""
return len(self._c_object.parents) > 1
@property
def insertions(self) -> int:
"""
Return the number of added lines in the commit (as shown from --shortstat).
:return: int insertion lines
"""
return self._c_object.stats.total["insertions"]
@property
def deletions(self) -> int:
"""
Return the number of deleted lines in the commit (as shown from --shortstat).
:return: int deletion lines
"""
return self._c_object.stats.total["deletions"]
@property
def lines(self) -> int:
"""
Return the number of modified lines in the commit (as shown from --shortstat).
:return: int insertion + deletion lines
"""
return self._c_object.stats.total["lines"]
@property
def files(self) -> int:
"""
Return the number of modified files of the commit (as shown from --shortstat).
:return: int modified files number
"""
return self._c_object.stats.total["files"]
@property
def modified_files(self) -> List[ModifiedFile]:
"""
Return a list of modified files. The list is empty if the commit is
a merge commit. For more info on this, see
https://haacked.com/archive/2014/02/21/reviewing-merge-commits/ or
https://github.com/ishepard/pydriller/issues/89#issuecomment-590243707
:return: List[Modification] modifications
"""
if self._modified_files is None:
self._modified_files = self._get_modified_files()
assert self._modified_files is not None
return self._modified_files
def _get_modified_files(self):
options = {}
if self._conf.get("histogram"):
options["histogram"] = True
if self._conf.get("skip_whitespaces"):
options["w"] = True
if len(self.parents) == 1:
# the commit has a parent
diff_index = self._c_object.parents[0].diff(
self._c_object, create_patch=True, **options
)
elif len(self.parents) > 1:
# if it's a merge commit, the modified files of the commit are the
# conflicts. This because if the file is not in conflict,
# pydriller will visit the modification in one of the previous
# commits. However, parsing the output of a combined diff (that
# returns the list of conflicts) is challenging: so, right now,
# I will return an empty array, in the meanwhile I will try to
# find a way to parse the output.
# c_git = Git(str(self.project_path))
# d = c_git.diff_tree("--cc", commit.hexsha, '-r', '--abbrev=40',
# '--full-index', '-M', '-p', '--no-color')
diff_index = []
else:
# this is the first commit of the repo. Comparing it with git
# NULL TREE
diff_index = self._c_object.diff(
NULL_TREE, create_patch=True, **options
)
return self._parse_diff(diff_index)
def _parse_diff(self, diff_index) -> List[ModifiedFile]:
modified_files_list = []
for diff in diff_index:
old_path = diff.a_path
new_path = diff.b_path
change_type = self._from_change_to_modification_type(diff)
diff_and_sc = {
"diff": self._get_decoded_str(diff.diff),
"source_code_before": self._get_decoded_sc_str(diff.a_blob),
"source_code": self._get_decoded_sc_str(diff.b_blob),
}
modified_files_list.append(
ModifiedFile(old_path, new_path, change_type, diff_and_sc)
)
return modified_files_list
def _get_decoded_str(self, diff):
try:
return diff.decode("utf-8", "ignore")
except (AttributeError, ValueError):
logger.debug(
"Could not load the diff of a " "file in commit %s",
self._c_object.hexsha,
)
return None
def _get_decoded_sc_str(self, diff):
try:
return diff.data_stream.read().decode("utf-8", "ignore")
except (AttributeError, ValueError):
logger.debug(
"Could not load source code of a " "file in commit %s",
self._c_object.hexsha,
)
return None
@property
def in_main_branch(self) -> bool:
"""
Return True if the commit is in the main branch, False otherwise.
:return: bool in_main_branch
"""
return self._conf.get("main_branch") in self.branches
@property
def branches(self) -> Set[str]:
"""
Return the set of branches that contain the commit.
:return: set(str) branches
"""
if self._branches is None:
self._branches = self._get_branches()
assert self._branches is not None
return self._branches
def _get_branches(self):
c_git = Git(str(self._conf.get("path_to_repo")))
branches = set()
args = ["--contains", self.hash]
if self._conf.get("include_remotes"):
args = ["-r"] + args
if self._conf.get("include_refs"):
args = ["-a"] + args
for branch in set(c_git.branch(*args).split("\n")):
branches.add(branch.strip().replace("* ", ""))
return branches
@property
def dmm_unit_size(self) -> Optional[float]:
"""
Return the Delta Maintainability Model (DMM) metric value for the unit size property.
It represents the proportion (between 0.0 and 1.0) of maintainability improving
change, when considering the lengths of the modified methods.
It rewards (value close to 1.0) modifications to low-risk (small) methods,
or spliting risky (large) ones.
It penalizes (value close to 0.0) working on methods that remain large
or get larger.
:return: The DMM value (between 0.0 and 1.0) for method size in this commit,
or None if none of the programming languages in the commit are supported.
"""
return self._delta_maintainability(DMMProperty.UNIT_SIZE)
@property
def dmm_unit_complexity(self) -> Optional[float]:
"""
Return the Delta Maintainability Model (DMM) metric value for the unit complexity property.
It represents the proportion (between 0.0 and 1.0) of maintainability improving
change, when considering the cyclomatic complexity of the modified methods.
It rewards (value close to 1.0) modifications to low-risk (low complexity) methods,
or spliting risky (highly complex) ones.
It penalizes (value close to 0.0) working on methods that remain complex
or get more complex.
:return: The DMM value (between 0.0 and 1.0) for method complexity in this commit.
or None if none of the programming languages in the commit are supported.
"""
return self._delta_maintainability(DMMProperty.UNIT_COMPLEXITY)
@property
def dmm_unit_interfacing(self) -> Optional[float]:
"""
Return the Delta Maintainability Model (DMM) metric value for the unit interfacing property.
It represents the proportion (between 0.0 and 1.0) of maintainability improving
change, when considering the interface (number of parameters) of the modified methods.
It rewards (value close to 1.0) modifications to low-risk (with few parameters) methods,
or spliting risky (with many parameters) ones.
It penalizes (value close to 0.0) working on methods that continue to have
or are extended with too many parameters.
:return: The dmm value (between 0.0 and 1.0) for method interfacing in this commit.
or None if none of the programming languages in the commit are supported.
"""
return self._delta_maintainability(DMMProperty.UNIT_INTERFACING)
def _delta_maintainability(self, dmm_prop: DMMProperty) -> Optional[float]:
"""
Compute the Delta Maintainability Model (DMM) value for the given risk predicate.
The DMM value is computed as the proportion of good change in the commit:
Good changes: Adding low risk code or removing high risk codee.
Bad changes: Adding high risk code or removing low risk code.
:param dmm_prop: Property indicating the type of risk
:return: dmm value (between 0.0 and 1.0) for the property represented in the property.
"""
delta_profile = self._delta_risk_profile(dmm_prop)
if delta_profile:
(delta_low, delta_high) = delta_profile
return self._good_change_proportion(delta_low, delta_high)
return None
def _delta_risk_profile(
self, dmm_prop: DMMProperty
) -> Optional[Tuple[int, int]]:
"""
Return the delta risk profile of this commit, which a pair (dv1, dv2), where
dv1 is the total change in volume (lines of code) of low risk methods, and
dv2 is the total change in volume of the high risk methods.
:param dmm_prop: Property indicating the type of risk
:return: total delta risk profile for this commit.
"""
supported_modifications = [
mod for mod in self.modified_files if mod.language_supported
]
if supported_modifications:
deltas = [
mod._delta_risk_profile(dmm_prop)
for mod in supported_modifications
]
delta_low = sum(dlow for (dlow, dhigh) in deltas)
delta_high = sum(dhigh for (dlow, dhigh) in deltas)
return delta_low, delta_high
return None
@staticmethod
def _good_change_proportion(
low_risk_delta: int, high_risk_delta: int
) -> Optional[float]:
"""
Given a delta risk profile, compute the proportion of "good" change in the total change.
Increasing low risk code, or decreasing high risk code, is considered good.
Other types of changes are considered not good.
:return: proportion of good change in total change, or None if the total change is zero.
"""
bad_change, good_change = (0, 0)
if low_risk_delta >= 0:
good_change = low_risk_delta
else:
bad_change = abs(low_risk_delta)
if high_risk_delta >= 0:
bad_change += high_risk_delta
else:
good_change += abs(high_risk_delta)
assert good_change >= 0 and bad_change >= 0
total_change = good_change + bad_change
if total_change == 0:
proportion = None
else:
proportion = good_change / total_change
assert 0.0 <= proportion <= 1.0
return proportion
@staticmethod
def _from_change_to_modification_type(diff: Diff):
if diff.new_file:
return ModificationType.ADD
if diff.deleted_file:
return ModificationType.DELETE
if diff.renamed_file:
return ModificationType.RENAME
if diff.a_blob and diff.b_blob and diff.a_blob != diff.b_blob:
return ModificationType.MODIFY
return ModificationType.UNKNOWN
def __eq__(self, other):
if not isinstance(other, Commit):
return NotImplemented
if self is other:
return True
return self.__dict__ == other.__dict__
|
the-stack_106_20090
|
_base_ = [
'../_base_/models/dpt_vit-b16.py','../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py','../_base_/schedules/schedule_160k.py'
]
model = dict(
backbone=dict(drop_path_rate=0.1, final_norm=True),
decode_head=dict(num_classes=150),
auxiliary_head=dict(num_classes=150),
test_cfg=dict(mode='slide', crop_size=(512, 512), stride=(341, 341)))
# AdamW optimizer, no weight decay for position embedding & layer norm
# in backbone
optimizer = dict(
_delete_=True,
type='AdamW',
lr=0.00006,
betas=(0.9, 0.999),
weight_decay=0.01,
paramwise_cfg=dict(
custom_keys={
'pos_embed': dict(decay_mult=0.),
'cls_token': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)
}))
lr_config = dict(
_delete_=True,
policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0,
min_lr=0.0,
by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data = dict(samples_per_gpu=2)
|
the-stack_106_20095
|
import os
import oneflow.experimental as flow
import argparse
import numpy as np
import time
from utils.data_utils import load_image
from utils.utils import to_numpy, to_tensor, save_images
from models.networks import Generator
def main(args):
test_x, test_y = load_image(args.image_path)
test_inp = to_tensor(test_x.astype(np.float32))
test_target = to_tensor(test_y.astype(np.float32))
generator = Generator().to("cuda")
start_t = time.time()
pretrain_model = flow.load(args.model_path)
generator.load_state_dict(pretrain_model)
end_t = time.time()
print("load params time : {}".format(end_t - start_t))
start_t = time.time()
generator.eval()
with flow.no_grad():
gout = to_numpy(generator(test_inp), False)
end_t = time.time()
print("infer time : {}".format(end_t - start_t))
# save images
save_images(
gout,
test_inp.numpy(),
test_target.numpy(),
path=os.path.join("./testimage.png"),
plot_size=1,
)
if __name__ == "__main__":
flow.enable_eager_execution()
parser = argparse.ArgumentParser(description="oneflow PIX2PIX")
parser.add_argument("--model_path", type=str, required=True, help="model path")
parser.add_argument(
"--image_path", type=str, required=True, help="input image path"
)
args = parser.parse_args()
main(args)
|
the-stack_106_20096
|
import unittest
import numpy
try:
import scipy.sparse # NOQA
scipy_available = True
except ImportError:
scipy_available = False
import cupy
import cupy.sparse
from cupy import testing
def _make(xp, sp, dtype):
data = xp.array([[0, 1, 2], [3, 4, 5]], dtype)
offsets = xp.array([0, -1], 'i')
# 0, 0, 0, 0
# 3, 1, 0, 0
# 0, 4, 2, 0
return sp.dia_matrix((data, offsets), shape=(3, 4))
def _make_empty(xp, sp, dtype):
data = xp.array([[]], 'f')
offsets = xp.array([0], 'i')
return sp.dia_matrix((data, offsets), shape=(3, 4))
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],
}))
class TestDiaMatrix(unittest.TestCase):
def setUp(self):
self.m = _make(cupy, cupy.sparse, self.dtype)
def test_dtype(self):
self.assertEqual(self.m.dtype, self.dtype)
def test_data(self):
self.assertEqual(self.m.data.dtype, self.dtype)
testing.assert_array_equal(
self.m.data, cupy.array([[0, 1, 2], [3, 4, 5]], self.dtype))
def test_offsets(self):
self.assertEqual(self.m.offsets.dtype, numpy.int32)
testing.assert_array_equal(
self.m.offsets, cupy.array([0, -1], self.dtype))
def test_shape(self):
self.assertEqual(self.m.shape, (3, 4))
def test_ndim(self):
self.assertEqual(self.m.ndim, 2)
def test_nnz(self):
self.assertEqual(self.m.nnz, 5)
@unittest.skipUnless(scipy_available, 'requires scipy')
def test_str(self):
if numpy.dtype(self.dtype).kind == 'f':
expect = ''' (1, 1)\t1.0
(2, 2)\t2.0
(1, 0)\t3.0
(2, 1)\t4.0'''
else:
expect = ''' (1, 1)\t(1+0j)
(2, 2)\t(2+0j)
(1, 0)\t(3+0j)
(2, 1)\t(4+0j)'''
self.assertEqual(str(self.m), expect)
def test_toarray(self):
m = self.m.toarray()
expect = [
[0, 0, 0, 0],
[3, 1, 0, 0],
[0, 4, 2, 0]
]
self.assertTrue(m.flags.c_contiguous)
cupy.testing.assert_allclose(m, expect)
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],
}))
@unittest.skipUnless(scipy_available, 'requires scipy')
class TestDiaMatrixInit(unittest.TestCase):
def setUp(self):
self.shape = (3, 4)
def data(self, xp):
return xp.array([[1, 2, 3], [4, 5, 6]], self.dtype)
def offsets(self, xp):
return xp.array([0, -1], 'i')
@testing.numpy_cupy_raises(sp_name='sp', accept_error=ValueError)
def test_shape_none(self, xp, sp):
sp.dia_matrix(
(self.data(xp), self.offsets(xp)), shape=None)
@testing.numpy_cupy_raises(sp_name='sp', accept_error=ValueError)
def test_large_rank_offset(self, xp, sp):
sp.dia_matrix(
(self.data(xp), self.offsets(xp)[None]), shape=self.shape)
@testing.numpy_cupy_raises(sp_name='sp', accept_error=ValueError)
def test_large_rank_data(self, xp, sp):
sp.dia_matrix(
(self.data(xp)[None], self.offsets(xp)), shape=self.shape)
@testing.numpy_cupy_raises(sp_name='sp', accept_error=ValueError)
def test_data_offsets_different_size(self, xp, sp):
offsets = xp.array([0, -1, 1], 'i')
sp.dia_matrix(
(self.data(xp), offsets), shape=self.shape)
@testing.numpy_cupy_raises(sp_name='sp', accept_error=ValueError)
def test_duplicated_offsets(self, xp, sp):
offsets = xp.array([1, 1], 'i')
sp.dia_matrix(
(self.data(xp), offsets), shape=self.shape)
@testing.parameterize(*testing.product({
'make_method': ['_make', '_make_empty'],
'dtype': [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128],
}))
@unittest.skipUnless(scipy_available, 'requires scipy')
class TestDiaMatrixScipyComparison(unittest.TestCase):
@property
def make(self):
return globals()[self.make_method]
@testing.numpy_cupy_equal(sp_name='sp')
def test_nnz_axis(self, xp, sp):
m = self.make(xp, sp, self.dtype)
return m.nnz
@testing.numpy_cupy_raises(sp_name='sp', accept_error=NotImplementedError)
def test_nnz_axis_not_none(self, xp, sp):
m = self.make(xp, sp, self.dtype)
m.getnnz(axis=0)
@testing.numpy_cupy_allclose(sp_name='sp')
def test_toarray(self, xp, sp):
m = self.make(xp, sp, self.dtype)
return m.toarray()
@testing.numpy_cupy_allclose(sp_name='sp')
def test_A(self, xp, sp):
m = self.make(xp, sp, self.dtype)
return m.A
@testing.numpy_cupy_raises(sp_name='sp')
def test_sum_tuple_axis(self, xp, sp):
m = _make(xp, sp, self.dtype)
m.sum(axis=(0, 1))
@testing.numpy_cupy_raises(sp_name='sp')
def test_sum_float_axis(self, xp, sp):
m = _make(xp, sp, self.dtype)
m.sum(axis=0.0)
@testing.numpy_cupy_raises(sp_name='sp')
def test_sum_too_large_axis(self, xp, sp):
m = _make(xp, sp, self.dtype)
m.sum(axis=3)
@testing.numpy_cupy_allclose(sp_name='sp')
def test_tocoo(self, xp, sp):
m = self.make(xp, sp, self.dtype)
return m.tocoo().toarray()
@testing.numpy_cupy_allclose(sp_name='sp')
def test_tocoo_copy(self, xp, sp):
m = self.make(xp, sp, self.dtype)
n = m.tocoo(copy=True)
self.assertIsNot(m.data, n.data)
return n.toarray()
@testing.numpy_cupy_allclose(sp_name='sp')
def test_tocsc(self, xp, sp):
m = _make(xp, sp, self.dtype)
return m.tocsc().toarray()
@testing.numpy_cupy_allclose(sp_name='sp')
def test_tocsc_copy(self, xp, sp):
m = self.make(xp, sp, self.dtype)
n = m.tocsc(copy=True)
self.assertIsNot(m.data, n.data)
return n.toarray()
@testing.numpy_cupy_allclose(sp_name='sp')
def test_tocsr(self, xp, sp):
m = self.make(xp, sp, self.dtype)
return m.tocsr().toarray()
@testing.numpy_cupy_allclose(sp_name='sp')
def test_tocsr_copy(self, xp, sp):
m = self.make(xp, sp, self.dtype)
n = m.tocsr(copy=True)
self.assertIsNot(m.data, n.data)
return n.toarray()
@testing.numpy_cupy_allclose(sp_name='sp')
def test_transpose(self, xp, sp):
m = self.make(xp, sp, self.dtype)
return m.transpose().toarray()
@testing.parameterize(*testing.product({
'dtype': [numpy.float32, numpy.float64],
'ret_dtype': [None, numpy.float32, numpy.float64],
'axis': [None, 0, 1, -1, -2],
}))
@unittest.skipUnless(scipy_available, 'requires scipy')
class TestDiaMatrixSum(unittest.TestCase):
@testing.numpy_cupy_allclose(sp_name='sp')
def test_sum(self, xp, sp):
m = _make(xp, sp, self.dtype)
return m.sum(axis=self.axis, dtype=self.ret_dtype)
@testing.numpy_cupy_allclose(sp_name='sp')
def test_sum_with_out(self, xp, sp):
m = _make(xp, sp, self.dtype)
if self.axis is None:
shape = ()
else:
shape = list(m.shape)
shape[self.axis] = 1
shape = tuple(shape)
out = xp.empty(shape, dtype=self.ret_dtype)
if xp is numpy:
# TODO(unno): numpy.matrix is used for scipy.sparse though
# cupy.ndarray is used for cupy.sparse.
out = xp.asmatrix(out)
return m.sum(axis=self.axis, dtype=self.ret_dtype, out=out)
class TestIsspmatrixDia(unittest.TestCase):
def test_dia(self):
x = cupy.sparse.dia_matrix(
(cupy.array([], 'f'),
cupy.array([0], 'i')),
shape=(0, 0), dtype='f')
self.assertTrue(cupy.sparse.isspmatrix_dia(x))
def test_csr(self):
x = cupy.sparse.csr_matrix(
(cupy.array([], 'f'),
cupy.array([], 'i'),
cupy.array([0], 'i')),
shape=(0, 0), dtype='f')
self.assertFalse(cupy.sparse.isspmatrix_dia(x))
|
the-stack_106_20097
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
from siem import utils
def transform(logdata):
identifier = utils.cluster_instance_identifier(logdata)
logdata['rds']['cluster_identifier'] = identifier['cluster']
logdata['rds']['instance_identifier'] = identifier['instance']
logdata['mysql_timestamp'] = utils.convrt_micro_epoch_to_seconds_epoch(
logdata['mysql_timestamp'])
if 'mysql_object' in logdata:
logdata['rds']['query'] = logdata['mysql_object'].rstrip(';').encode(
"utf-8").decode("unicode-escape")[1:-1]
if 'mysql_operation' in logdata:
if logdata['mysql_operation'] in ('FAILED_CONNECT', ):
logdata['event']['category'] = 'authentication'
logdata['event']['type'] = 'start'
logdata['event']['action'] = 'failed'
if logdata['mysql_operation'] in ('CONNECT', ):
logdata['event']['category'] = 'authentication'
logdata['event']['type'] = 'start'
logdata['event']['action'] = 'authorized'
if logdata['mysql_operation'] in ('DISCONNECT', ):
logdata['event']['category'] = 'authentication'
logdata['event']['type'] = 'end'
logdata['event']['action'] = 'disconnected'
if 'mysql_retcode' in logdata:
if logdata['mysql_retcode'] == 0:
logdata['event']['outcome'] = 'success'
else:
logdata['event']['outcome'] = 'failure'
logdata = utils.convert_underscore_field_into_dot_notation(
'mysql', logdata)
return logdata
|
the-stack_106_20100
|
import _plotly_utils.basevalidators
class HovertextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name='hovertext', parent_name='pie', **kwargs):
super(HovertextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop('array_ok', True),
edit_type=kwargs.pop('edit_type', 'style'),
role=kwargs.pop('role', 'info'),
**kwargs
)
|
the-stack_106_20101
|
# originalString = str(input("Type in any string: "))
# You can use the input but for testing I used a predefined string
originalString = "Test"
def stringToAscii(string):
NumberDec = []
NumberBin = []
NumberHex = []
NumberOct = []
for i in string:
NumberBin.append(bin(ord(i)))
NumberHex.append(hex(ord(i)))
NumberOct.append(oct(ord(i)))
NumberDec.append(ord(i))
message = ""
message += "Decimal numbers: "
for i in NumberDec:
message += str(i) + ", "
message += "\n"
message += "Hex numbers: "
for i in NumberHex:
message += str(i)[2:] + ", "
message += "\n"
message += "Octal numbers: "
for i in NumberOct:
message += str(i)[2:] + ", "
message += "\n"
message += "Binary numbers: "
for i in NumberBin:
message += str(i)[2:] + ", "
return message;
print(stringToAscii(originalString))
|
the-stack_106_20102
|
"""
This is (for instance) a Raspberry Pi only worker!
The libcamera project (in development), aims to offer an open source stack for
cameras for Linux, ChromeOS and Android.
It will be able to detect and manage all of the exposed camera on the system.
Connected via USB or CSI (Rasperry pi camera).
libcamera developers plan to privide Python bindings:
https://www.raspberrypi.org/blog/an-open-source-camera-stack-for-raspberry-pi-using-libcamera/#comment-1528789
Not available at time of writing: 9 Nov 2020
Once available, we should look forward migrating to this library, as it would
allow our worker to support multiple boards and devices.
"""
import datetime
import json
import os
import sys
import threading
import time
from picamera import PiCamera
from utils import get_config_item
from workers.linux.worker import Worker
from logger.Logger import Logger, LOG_LEVEL
class CameraWorker(Worker):
def __init__(self, config, main_thread_running, system_ready,
camera_available):
super().__init__(config, main_thread_running, system_ready)
self.pending_reset = False
# Events
self.camera_available = camera_available
# Dynamic Properties based on config
self.path = get_config_item(self.config, 'path', '/etc/mudpi/img/')
self.topic = get_config_item(
self.config, 'topic', 'mudpi/camera/', replace_char="/"
)
if self.config['resolution'] is not None:
self.resolutionX = int(self.config['resolution'].get('x', 1920))
self.resolutionY = int(self.config['resolution'].get('y', 1080))
if self.config['delay'] is not None:
self.hours = int(self.config['delay'].get('hours', 0))
self.minutes = int(self.config['delay'].get('minutes', 0))
self.seconds = int(self.config['delay'].get('seconds', 0))
self.init()
return
def init(self):
try:
self.camera = PiCamera(
resolution=(self.resolutionX, self.resolutionY))
# Below we calibrate the camera for consistent imaging
self.camera.framerate = 30
# Wait for the automatic gain control to settle
time.sleep(2)
# Now fix the values
self.camera.shutter_speed = self.camera.exposure_speed
self.camera.exposure_mode = 'off'
g = self.camera.awb_gains
self.camera.awb_mode = 'off'
self.camera.awb_gains = g
except Exception:
self.camera = PiCamera()
# Pubsub Listeners
self.pubsub = self.r.pubsub()
self.pubsub.subscribe(**{self.topic: self.handle_event})
Logger.log(
LOG_LEVEL["info"],
'Camera Worker...\t\t\t\033[1;32m Ready\033[0;0m'
)
return
def run(self):
thread = threading.Thread(target=self.work, args=())
thread.start()
self.listener = threading.Thread(target=self.listen, args=())
self.listener.start()
Logger.log(
LOG_LEVEL["info"],
'Camera Worker...\t\t\t\033[1;32m Online\033[0;0m'
)
return thread
def wait(self):
# Calculate the delay
try:
self.next_time = (datetime.datetime.now() + datetime.timedelta(
hours=self.hours, minutes=self.minutes,
seconds=self.seconds)).replace(microsecond=0)
except Exception:
# Default every hour
self.next_time = (
datetime.datetime.now() + datetime.timedelta(hours=1)
).replace(minute=0, second=0, microsecond=0)
delay = (self.next_time - datetime.datetime.now()).seconds
time.sleep(delay)
def handle_event(self, message):
data = message['data']
decoded_message = None
if data is not None:
try:
if isinstance(data, dict):
decoded_message = data
elif isinstance(data.decode('utf-8'), str):
temp = json.loads(data.decode('utf-8'))
decoded_message = temp
if decoded_message['event'] == 'Timelapse':
Logger.log(
LOG_LEVEL["info"],
"Camera Signaled for Reset"
)
self.camera_available.clear()
self.pending_reset = True
except Exception:
Logger.log(LOG_LEVEL["error"],
'Error Handling Event for Camera')
def listen(self):
while self.main_thread_running.is_set():
if self.system_ready.is_set():
if self.camera_available.is_set():
self.pubsub.get_message()
time.sleep(1)
else:
delay = (
self.next_time - datetime.datetime.now()
).seconds + 15
# wait 15 seconds after next scheduled picture
time.sleep(delay)
self.camera_available.set()
else:
time.sleep(2)
return
def work(self):
self.reset_elapsed_time()
while self.main_thread_running.is_set():
if self.system_ready.is_set():
if self.camera_available.is_set():
# try:
for i, filename in enumerate(
self.camera.capture_continuous(
self.path + 'mudpi-{counter:05d}.jpg')):
if not self.camera_available.is_set():
if self.pending_reset:
try:
# cleanup previous file
os.remove(
filename
)
self.pending_reset = False
except Exception:
Logger.log(
LOG_LEVEL["error"],
"Error During Camera Reset Cleanup"
)
break
message = {'event': 'StateChanged', 'data': filename}
self.r.set('last_camera_image', filename)
self.r.publish(self.topic, json.dumps(message))
Logger.log(
LOG_LEVEL["debug"],
'Image Captured \033[1;36m%s\033[0;0m' % filename
)
self.wait()
# except:
# print("Camera Worker \t\033[1;31m Unexpected Error\033[0;0m")
# time.sleep(30)
else:
time.sleep(1)
self.reset_elapsed_time()
else:
# System not ready camera should be off
time.sleep(1)
self.reset_elapsed_time()
time.sleep(0.1)
# This is only ran after the main thread is shut down
self.camera.close()
self.listener.join()
self.pubsub.close()
Logger.log(
LOG_LEVEL["info"],
"Camera Worker Shutting Down...\t\t\033[1;32m Complete\033[0;0m"
)
|
the-stack_106_20104
|
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import click
import pyperclip
from six import string_types
from ..console import (
CONTEXT_SETTINGS, abort, echo_failure, echo_info, echo_success, echo_waiting, echo_warning
)
from ...e2e import E2E_SUPPORTED_TYPES, derive_interface, start_environment, stop_environment
from ...testing import get_available_tox_envs
from ...utils import get_tox_file
from ....utils import dir_exists, file_exists, path_join
@click.command(
context_settings=CONTEXT_SETTINGS,
short_help='Start an environment'
)
@click.argument('check')
@click.argument('env')
@click.option('--agent', '-a', default='stackstate/stackstate-agent-2:latest', show_default=True,
help='The docker image of the agent to use')
@click.option('--dev/--prod', default=True, show_default=True,
help='Use the latest version of a check (or else what is shipped with the agent package)')
@click.option('--base', is_flag=True, help='Whether to use the latest version of the base check or what is shipped.\
Also will install all shared libraries')
@click.option('--api-key', '-k',
help='Set the api key. can also be picked up form the STS_API_KEY environment variable')
@click.option('--sts-url', '-u',
help='StackState product url, can also be picked up from STS_STS_URL environment variable')
@click.option('--cluster-name', '-c',
help='Kubernetes cluster name, can also be picked up from CLUSTER_NAME environment variable')
@click.pass_context
def start(ctx, check, env, agent, dev, base, api_key, sts_url, cluster_name):
"""Start an environment."""
if not file_exists(get_tox_file(check)):
abort('`{}` is not a testable check.'.format(check))
base_package = None
if base:
core_dir = os.path.expanduser(ctx.obj.get('core', ''))
if not dir_exists(core_dir):
if core_dir:
abort('`{}` directory does not exist.'.format(core_dir))
else:
abort('`core` config setting does not exist.')
base_package = path_join(core_dir, 'stackstate_checks_base')
if not dir_exists(base_package):
abort('`stackstate_checks_base` directory does not exist.')
envs = get_available_tox_envs(check, e2e_only=True)
if env not in envs:
echo_failure('`{}` is not an available environment.'.format(env))
echo_info('See what is available via `checksdev env ls {}`.'.format(check))
abort()
api_key = api_key or ctx.obj['sts_api_key']
if api_key is None:
echo_warning(
'Environment/parameter variable STS_API_KEY does not exist; a well-formatted '
'the default API_KEY will be used instead. You can also set the API key '
'by doing `checksdev config set sts_api_key`.'
)
sts_url = sts_url or ctx.obj['sts_sts_url']
if sts_url is None:
sts_url = "http://localhost:7077/stsAgent"
echo_warning(
'Environment/parameter variable STS_STS_URL does not exist;'
' default to {}'.format(sts_url)
)
cluster_name = cluster_name or ctx.obj['cluster_name']
if cluster_name is not None:
echo_info(
'Kubernetes clustername has been set {}'.format(cluster_name)
)
echo_waiting('Setting up environment `{}`... '.format(env), nl=False)
config, metadata, error = start_environment(check, env)
if error:
echo_failure('failed!')
echo_waiting('Stopping the environment...')
stop_environment(check, env, metadata=metadata)
abort(error)
echo_success('success!')
env_type = metadata['env_type']
# Support legacy config where agent5 and agent6 were strings
agent_ver = ctx.obj.get('agent{}'.format(agent), agent)
if isinstance(agent_ver, string_types):
agent_build = agent_ver
echo_warning(
'Agent field missing from checksdev config, please update to the latest config, '
'falling back to latest docker image...'
)
else:
agent_build = agent_ver.get(env_type, env_type)
interface = derive_interface(env_type)
if interface is None:
echo_failure('`{}` is an unsupported environment type.'.format(env_type))
echo_waiting('Stopping the environment...')
stop_environment(check, env, metadata=metadata)
abort()
if env_type not in E2E_SUPPORTED_TYPES and agent.isdigit():
echo_failure('Configuration for default Agents are only for Docker. You must specify the full build.')
echo_waiting('Stopping the environment...')
stop_environment(check, env, metadata=metadata)
abort()
environment = interface(check, env, base_package, config, metadata, agent_build, sts_url, api_key, cluster_name)
echo_waiting('Updating `{}`... '.format(agent_build), nl=False)
environment.update_agent()
echo_success('success!')
echo_waiting('Writing configuration for `{}`... '.format(env), nl=False)
environment.write_config()
echo_success('success!')
echo_waiting('Starting the Agent... ', nl=False)
result = environment.start_agent()
if result.code:
click.echo()
echo_info(result.stdout + result.stderr)
echo_failure('An error occurred.')
echo_waiting('Stopping the environment...')
stop_environment(check, env, metadata=metadata)
environment.remove_config()
abort()
echo_success('success!')
if base and not dev:
dev = True
echo_info(
'Will install the development version of the check too so the base package can import it (in editable mode)'
)
editable_warning = (
'\nEnv will started with an editable check install for the {} package. '
'This check will remain in an editable install after '
'the environment is torn down. Would you like to proceed?'
)
if base:
echo_waiting('Upgrading the base package to the development version... ', nl=False)
if environment.ENV_TYPE == 'local' and not click.confirm(editable_warning.format('base')):
echo_success('skipping')
else:
environment.update_base_package()
echo_success('success!')
if dev:
echo_waiting('Upgrading `{}` check to the development version... '.format(check), nl=False)
if environment.ENV_TYPE == 'local' and not click.confirm(editable_warning.format(environment.check)):
echo_success('skipping')
else:
environment.update_check()
echo_success('success!')
if dev or base:
echo_waiting('Restarting agent to use the customer base/check... ', nl=False)
environment.restart_agent()
click.echo()
try:
pyperclip.copy(environment.config_file)
except Exception:
config_message = 'Config file: '
else:
config_message = 'Config file (copied to your clipboard): '
echo_success(config_message, nl=False)
echo_info(environment.config_file)
pyperclip.copy(environment.config_file)
echo_success('To run this check, do: ', nl=False)
echo_info('checksdev env check {} {}'.format(check, env))
echo_success('To stop this check, do: ', nl=False)
echo_info('checksdev env stop {} {}'.format(check, env))
|
the-stack_106_20105
|
import tweepy
from pymongo import MongoClient
import time
# Get a cursor object
# CONSUMER_KEY = 'cwuOhOSiMHaqSjUsyfYRVltuE'
# CONSUMER_SECRET = 'JBZWaPi3ldDHgMo6NPr8MbRKEU2iHBW7xVzL094HjsoX33K4eJ'
# OAUTH_TOKEN = '842632842207203328-cNbwTaG4eW4rbQJwaG4RxtZkHJ51SoO'
# OAUTH_TOKEN_SECRET = 'IhypdlKWPYtpKJ8aWevWTPTyeTbtmffVRGsFcF9hXkQQg'
CONSUMER_KEY = 'cwuOhOSiMHaqSjUsyfYRVltuE', 'q4utaFepGhE5OjujyoruBOoQg', '8ZHXLlxEfLQm90U19fRC08NBx', 'r8dh3IsvxNOMH2UjSIoM00gnN', 'y5UTqSuap75HuKye2NPcr7rfI', 'tA56qfLnFMdUNHMMhrs5XtFUE'
CONSUMER_SECRET = 'JBZWaPi3ldDHgMo6NPr8MbRKEU2iHBW7xVzL094HjsoX33K4eJ', 'D5K3P5URNUTxKnoVnggiUFsNapuNLOSx5cB7Zh6Y4HhpBhhtNy', 'OjxPXXR4lDc666H2HUWtbQBtG2J5d2wDZY1B6XirQHpyZqGEbY', '0SqGVySPhZ8ngMYnQ05W8KPctMp8jbXGSdCo0qKxMYKwcewIrZ', 'blLk28TL9jEye1DH6CKDrlu4liu1kFssRMHglPXELUlzHzuhaP', 'mcyFIkViAEs2HgYAi5n9toH1rYnPKYMblYvUhz0rh7CL5mEG72'
OAUTH_TOKEN = '842632842207203328-cNbwTaG4eW4rbQJwaG4RxtZkHJ51SoO', '438291047-AWXl0LpNxZzjhdFA3FH7AJHtmLRK52QDJiKzq5Wz', '916331671372353536-lVwpfVwieRCuLmyP14j0lbXGuNcitcD', '917416602634768385-pXPkTeyW9vaysd4vZflYm2pZckkIeDn', '917723257217998848-uld992dlGdvz71FpxosLs7gjAUCuIbI', '915531741795962880-NFqV6fvMwNahmd4PWcxS9Yw2UEWhcks'
OAUTH_TOKEN_SECRET = 'IhypdlKWPYtpKJ8aWevWTPTyeTbtmffVRGsFcF9hXkQQg', 'o3kZKFF2s9ctgVpfDVRRpMbg6BMsGUIFWlJm9wSysKyyY', 'gdpRf9Qf2cU01yGPem2aJaP6sljaEah1lDdPRtyt2b75b', 'HweGKohJFWSMPDj1LwjoNExGIj1K2e7ApHdHpA7fcwl7F', 'bkDjxKNVddeDwBUIJo1mL5ENz3JTMD2Ka2jyJvAyGxsfC', 'J1p0QEWwbz6L9zjRXRsPqsRLcvzRG43UTL0mfrkj3wTs9'
t_id = 942058742123761665 - 1
end = False
def changeAPI(id):
a = tweepy.OAuthHandler(CONSUMER_KEY[id], CONSUMER_SECRET[id])
a.set_access_token(OAUTH_TOKEN[id], OAUTH_TOKEN_SECRET[id])
return tweepy.API(a)
def search_tweets():
global t_id
global end
max_id = t_id
for tweet in tweepy.Cursor(api.search, geocode="41.8781,-87.62979,10km", lang="en", include_entities=True, max_id=str(max_id-1)).items():
date = str(tweet.created_at)[8:10]
t_id = tweet.id
requiredDate = ['10','11', '12', '13', '14','15', '16']
print(tweet.created_at)
if date not in requiredDate:
print(tweet.text)
end = True
print('end', end)
break
#no geo info
if(tweet.user.geo_enabled == False):
print(tweet._json)
a = tweet._json
a["_id"] = a["id"]
try:
db.MonToWed_hist_chicago_nogeolocation.insert_one(a).inserted_id
except:
print("duplicated!")
#exact coord
if (tweet.user.geo_enabled == True and tweet.coordinates != None):
print(tweet._json)
a = tweet._json
a["_id"] = a["id"]
try:
db.MonToWed_hist_chicago_geo_coordinates.insert_one(a).inserted_id
except:
print("duplicated!")
##bounding box
if (tweet.user.geo_enabled == True and tweet.place != None and tweet.coordinates == None):
# print(decodeData(tweet))
print(tweet.place.bounding_box.coordinates)
a = tweet._json
a["_id"] = a["id"]
try:
db.MonToWed_hist_chicago_bounding_box.insert_one(tweet._json).inserted_id
except:
print("duplicated!")
def decodeData(status):
return {
"retweet_count": status.retweet_count,
"favorited": status.favorited,
"in_reply_to_user_id": status.in_reply_to_user_id,
"created_at": status.created_at,
"coordinates": status.coordinates,
"user": {
"created_at": status.user.created_at,
"geo_enabled": status.user.geo_enabled,
"lang": status.user.lang,
"url": status.user.url,
"description": status.user.description,
"time_zone": status.user.time_zone,
"location": status.user.location,
"screen_name": status.user.screen_name,
"protected": status.user.protected,
"statuses_count": status.user.statuses_count,
"profile_image_url_https": status.user.profile_image_url_https,
"utc_offset": status.user.utc_offset,
"followers_count": status.user.followers_count,
"id": status.user.id,
"id_str": status.user.id_str,
"name": status.user.name,
"friends_count": status.user.friends_count,
},
"retweeted": status.retweeted,
"place": {
"country_code": status.place.country_code,
"country": status.place.country,
"name": status.place.name,
"full_name": status.place.full_name,
"id": status.place.id,
"bounding_box": {
"type": status.place.bounding_box.type,
"coordinates": status.place.bounding_box.coordinates
}
},
"geo": status.geo,
"_id": status.id,
"text": status.text,
"lang": status.lang,
"in_reply_to_user_id_str": status.in_reply_to_user_id_str,
"id_str": status.id_str
}
auth = tweepy.OAuthHandler(CONSUMER_KEY[0], CONSUMER_SECRET[0])
auth.set_access_token(OAUTH_TOKEN[0], OAUTH_TOKEN_SECRET[0])
api = changeAPI(0)
client = MongoClient()
db = client.twitterdb
global t
t = 0
global i
i = 0
while(True):
##run the program for 200 times
if t >= 2000:
break
try:
api = changeAPI(i)
search_tweets()
if end == True:
break
except Exception as e:
print('time: ', t)
print('id: ', i)
i = i + 1
print(str(e))
if i == 6:
## sleep for 15mins if error
time.sleep(60 * 5)
t += 1
i = 0
|
the-stack_106_20108
|
import datetime
#pegando ano de nascimento
anoUsu = int(input('Digite o seu ano de nascimento: '))
#ano atual
anoAtu = datetime.date.today().year
#calculo
calc = anoAtu - anoUsu
#validação das categorias
if calc <= 9:
print('Você está na categoria: \033[1;32mMIRIM')
elif calc > 9 and calc <= 14:
print('Você está na categoria: \033[1;36mINFANTIL')
elif calc > 14 and calc <= 19:
print('Você está na categoria: \033[1;31mJUNIOR')
elif calc > 19 and calc <= 25:
print('Você está na categoria: \033[1;34mSÊNIOR')
else:
print('Você está na categoria: \033[1;35mMASTER')
'''
Nesse programa será visto em qual categoria a pessoa está para o esporte
Primeiro importamos a biblioteca datetime para pegar o ano atual
O usuário vai informar o ano de nascimento armazenado em anoUsu
A variavel anoAtu vai pegar usando o date.today().year o ano atual
A variavel calc vai pegar o valor do ano atual menos o nascimento para ter a idade
Na verificação se baseia na idade, ou seja, o primeiro if será True se o calc (idade) for menor ou igual a 9 anos
Em seguida temos outra entre acima de 9 e menor igual a 14 anos
Outra com acima de 14 anos e menor igual a 19 anos
A última acima de 19 e menor ou igual a 25
Caso nenhuma bata True, caira no else acima de 25 anos
Em todas o print está mostrando a categoria definida para a pessoa, usando cores e parametros para deixar a letra em negrito
\033[x;y;zPalavra\033[m
Nesse exemplo o x é o estilo da palavra, negrito ou sublinhado exemplo
O y é a cor das letras, e o z a cor de fundo da palavra
'''
|
the-stack_106_20111
|
import os
from griddly import GymWrapperFactory, gd, GymWrapper
from griddly.RenderTools import VideoRecorder
if __name__ == "__main__":
wrapper = GymWrapperFactory()
name = "proximity_env"
current_path = os.path.dirname(os.path.realpath(__file__))
env = GymWrapper(
"proximity_env.yaml",
player_observer_type=gd.ObserverType.VECTOR,
global_observer_type=gd.ObserverType.ISOMETRIC,
level=0,
)
env.reset()
global_recorder = VideoRecorder()
global_visualization = env.render(observer="global", mode="rgb_array")
global_recorder.start("global_video_test.mp4", global_visualization.shape)
for i in range(1000):
obs, reward, done, info = env.step(env.action_space.sample())
env.render(observer="global")
frame = env.render(observer="global", mode="rgb_array")
global_recorder.add_frame(frame)
if done:
env.reset()
global_recorder.close()
|
the-stack_106_20112
|
"""
Example of running a Unity3D (MLAgents) Policy server that can learn
Policies via sampling inside many connected Unity game clients (possibly
running in the cloud on n nodes).
For a locally running Unity3D example, see:
`examples/unity3d_env_local.py`
To run this script against one or more possibly cloud-based clients:
1) Install Unity3D and `pip install mlagents`.
2) Compile a Unity3D example game with MLAgents support (e.g. 3DBall or any
other one that you created yourself) and place the compiled binary
somewhere, where your RLlib client script (see below) can access it.
2.1) To find Unity3D MLAgent examples, first `pip install mlagents`,
then check out the `.../ml-agents/Project/Assets/ML-Agents/Examples/`
folder.
3) Change this RLlib Policy server code so it knows the observation- and
action Spaces, the different Policies (called "behaviors" in Unity3D
MLAgents), and Agent-to-Policy mappings for your particular game.
Alternatively, use one of the two already existing setups (3DBall or
SoccerStrikersVsGoalie).
4) Then run (two separate shells/machines):
$ python unity3d_server.py --env 3DBall
$ python unity3d_client.py --inference-mode=local --game [path to game binary]
"""
import argparse
import os
import ray
from ray.rllib.agents.registry import get_trainer_class
from ray.rllib.env.policy_server_input import PolicyServerInput
from ray.rllib.env.wrappers.unity3d_env import Unity3DEnv
SERVER_ADDRESS = "localhost"
SERVER_PORT = 9900
CHECKPOINT_FILE = "last_checkpoint_{}.out"
parser = argparse.ArgumentParser()
parser.add_argument(
"--run",
default="PPO",
choices=["DQN", "PPO"],
help="The RLlib-registered algorithm to use.")
parser.add_argument(
"--framework",
choices=["tf", "tf2", "tfe", "torch"],
default="tf",
help="The DL framework specifier.")
parser.add_argument(
"--num-workers",
type=int,
default=2,
help="The number of workers to use. Each worker will create "
"its own listening socket for incoming experiences.")
parser.add_argument(
"--env",
type=str,
default="3DBall",
choices=[
"3DBall", "3DBallHard", "FoodCollector", "GridFoodCollector",
"Pyramids", "SoccerStrikersVsGoalie", "Sorter", "Tennis",
"VisualHallway", "Walker"
],
help="The name of the Env to run in the Unity3D editor "
"(feel free to add more to this script!)")
parser.add_argument(
"--port",
type=int,
default=SERVER_PORT,
help="The Policy server's port to listen on for ExternalEnv client "
"conections.")
parser.add_argument(
"--checkpoint-freq",
type=int,
default=10,
help="The frequency with which to create checkpoint files of the learnt "
"Policies.")
parser.add_argument(
"--no-restore",
action="store_true",
help="Whether to load the Policy "
"weights from a previous checkpoint")
if __name__ == "__main__":
args = parser.parse_args()
ray.init()
# `InputReader` generator (returns None if no input reader is needed on
# the respective worker).
def _input(ioctx):
# We are remote worker or we are local worker with num_workers=0:
# Create a PolicyServerInput.
if ioctx.worker_index > 0 or ioctx.worker.num_workers == 0:
return PolicyServerInput(
ioctx, SERVER_ADDRESS, args.port + ioctx.worker_index -
(1 if ioctx.worker_index > 0 else 0))
# No InputReader (PolicyServerInput) needed.
else:
return None
# Get the multi-agent policies dict and agent->policy
# mapping-fn.
policies, policy_mapping_fn = \
Unity3DEnv.get_policy_configs_for_game(args.env)
# The entire config will be sent to connecting clients so they can
# build their own samplers (and also Policy objects iff
# `inference_mode=local` on clients' command line).
config = {
# Indicate that the Trainer we setup here doesn't need an actual env.
# Allow spaces to be determined by user (see below).
"env": None,
# Use the `PolicyServerInput` to generate experiences.
"input": _input,
# Use n worker processes to listen on different ports.
"num_workers": args.num_workers,
# Disable OPE, since the rollouts are coming from online clients.
"input_evaluation": [],
# Other settings.
"train_batch_size": 256,
"rollout_fragment_length": 20,
# Multi-agent setup for the given env.
"multiagent": {
"policies": policies,
"policy_mapping_fn": policy_mapping_fn,
},
# DL framework to use.
"framework": args.framework,
}
# Create the Trainer used for Policy serving.
trainer = get_trainer_class(args.run)(config=config)
# Attempt to restore from checkpoint if possible.
checkpoint_path = CHECKPOINT_FILE.format(args.env)
if not args.no_restore and os.path.exists(checkpoint_path):
checkpoint_path = open(checkpoint_path).read()
print("Restoring from checkpoint path", checkpoint_path)
trainer.restore(checkpoint_path)
# Serving and training loop.
count = 0
while True:
# Calls to train() will block on the configured `input` in the Trainer
# config above (PolicyServerInput).
print(trainer.train())
if count % args.checkpoint_freq == 0:
print("Saving learning progress to checkpoint file.")
checkpoint = trainer.save()
# Write the latest checkpoint location to CHECKPOINT_FILE,
# so we can pick up from the latest one after a server re-start.
with open(checkpoint_path, "w") as f:
f.write(checkpoint)
count += 1
|
the-stack_106_20113
|
import sys
import pygame
from bullet import Bullet
from alien import Alien
from time import sleep
def check_events(ai_settings,screen,ship,bullets,stats,play_button,aliens,sb):
#监视键盘和鼠标事件
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
ship.moving_right = True
elif event.key == pygame.K_LEFT:
ship.moving_left = True
elif event.key == pygame.K_SPACE:
#创建一颗子弹,并将他加入到编组bullets中
new_bullet = Bullet(ai_settings,screen,ship)
bullets.add(new_bullet)
elif event.key == pygame.K_q:
sys.exit()
elif event.type == pygame.KEYUP:
if event.key == pygame.K_RIGHT:
ship.moving_right = False
elif event.key == pygame.K_LEFT:
ship.moving_left = False
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_x,mouse_y = pygame.mouse.get_pos()
check_play_button(stats,play_button,mouse_x,mouse_y,ai_settings,screen,ship,aliens,bullets,sb)
def check_play_button(stats,play_button,mouse_x,mouse_y,ai_settings,screen,ship,aliens,bullets,sb):
"""在玩家点击Play按钮时开始游戏"""
if play_button.rect.collidepoint(mouse_x,mouse_y) and not stats.game_active:
stats.game_active = True
#重置游戏设置
ai_settings.initialize_dynamic_settings()
#设置光标不可见
pygame.mouse.set_visible(False)
#重置游戏统计信息
stats.reset_stats()
#重置记分牌图像
sb.prep_score()
sb.prep_level()
#清空外星人列表和子弹列表
aliens.empty()
bullets.empty()
#创建一群新的外星人
create_fleet(ai_settings,screen,aliens,ship)
ship.center_ship()
def update_screen(ai_settings,screen,ship,aliens,bullets,stats,play_button,sb):
"""更新屏幕上的图像,并切换到新屏幕"""
#m每次循环时都重绘屏幕
screen.fill(ai_settings.bg_color)
#在飞船和外星人后面重绘制所有子弹
for bullet in bullets.sprites():
bullet.draw_bullet()
#绘制飞船
ship.blitme()
#绘制外星人
aliens.draw(screen)
#显示分数函数
sb.show_score()
#如果游戏处于非活跃状态,就绘制开始游戏按钮
if not stats.game_active:
play_button.draw_button()
#让最近绘制的屏幕可见
pygame.display.flip()
def update_bullets(ai_settings,screen,ship,bullets,aliens,sb,stats):
#检测子弹位置更新函数
bullets.update()
#删除已经消失的子弹
for bullet in bullets.copy():
if bullet.rect.bottom <= 0:
bullets.remove(bullet)
#调用响应外星人和子弹的碰撞函数
check_bullet_alien_collisions(ai_settings,screen,ship,bullets,aliens,sb,stats)
def check_bullet_alien_collisions(ai_settings,screen,ship,bullets,aliens,sb,stats):
"""响应外星人和子弹的碰撞函数"""
#检测是否有子弹击中了外星人
#如果是的话就删除子弹和外星人
collisions = pygame.sprite.groupcollide(bullets,aliens,True,True)
#检测外星人是否为空
if len(aliens)==0:
#删除所有剩余的子弹并新建一批外星人,提高一个等级
bullets.empty()
create_fleet(ai_settings,screen,aliens,ship)
ai_settings.increase_speed()
#提高等级
stats.level +=1
sb.prep_level()
#有外星人被击落,开始计分
if collisions:
for aliens in collisions.values():
stats.score += ai_settings.alien_points
sb.prep_score()
def get_number_aliens_x(ai_settings,alien_width):
"""计算一行可容纳多少个外星人"""
available_space_x = ai_settings.screen_width - 2 * alien_width
#"""一行可以放的外星人数量"""
number_aliens_x = int(available_space_x / (2 * alien_width))
return number_aliens_x
def get_number_rows(ai_settings,ship_height,alien_height):
"""计算屏幕可容纳多少个外星人"""
available_space_y = (ai_settings.screen_height - (3 * alien_height)-ship_height)
number_rows = int (available_space_y / (2 * alien_height))
return number_rows
def create_alien(ai_settings,screen,aliens,alien_number,row_number):
#c创建一盒外星人并将其加入当前行
alien = Alien(ai_settings,screen)
alien_width = alien.rect.width
alien.x = alien_width + 2 * alien_width * alien_number
alien.rect.x = alien.x
alien.rect.y = alien.rect.height + 2 * alien.rect.height * row_number
aliens.add(alien)
def create_fleet(ai_settings,screen,aliens,ship):
"""创建外星人群"""
#创建一个外星人,
#外星人间距为外星人宽度
alien = Alien(ai_settings,screen)
number_aliens_x = get_number_aliens_x(ai_settings,alien.rect.width)
number_rows = get_number_rows(ai_settings,ship.rect.height,alien.rect.height)
#创建外星人群
for row_number in range(number_rows):
for alien_number in range(number_aliens_x):
create_alien(ai_settings,screen,aliens,alien_number,row_number)
def update_aliens(ai_settings,aliens,ship,stats,screen,bullets):
"""检查是否有外星人位于屏幕边缘,并更新所有外星人的位置"""
check_fleet_edges(ai_settings,aliens)
aliens.update()
#检测外星人和飞船之间的碰撞
if pygame.sprite.spritecollideany(ship,aliens):
ship_hit(ai_settings,aliens,ship,stats,screen,bullets)
#检测是否外星人到达屏幕底端
check_aliens_bottom(ai_settings,aliens,ship,stats,screen,bullets)
def check_fleet_edges(ai_settings,aliens):
""""有外星人到达时采取相应的措施"""
for alien in aliens.sprites():
if alien.check_edges():
change_fleet_direction(ai_settings,aliens)
break
def change_fleet_direction(ai_settings,aliens):
"""有外星人到达边缘时采取相应的措施"""
for alien in aliens.sprites():
alien.rect.y += ai_settings.fleet_drop_speed
ai_settings.fleet_direction *= -1
def ship_hit(ai_settings,aliens,ship,stats,screen,bullets):
"""响应外星人撞到的飞船"""
if stats.ships_left > 0:
#将ships_left减一
stats.ships_left -=1
#清空外星人列表和子弹列表
aliens.empty()
bullets.empty()
#c创建一群新的外星人,并将飞船放到屏幕底端中央
create_fleet(ai_settings,screen,aliens,ship)
ship.center_ship()
#暂停
sleep(1)
else:
stats.game_active = False
pygame.mouse.set_visible(True)
def check_aliens_bottom(ai_settings,aliens,ship,stats,screen,bullets):
"""检查是否有外星人到达屏幕底端"""
screen_rect = screen.get_rect()
for alien in aliens.sprites():
if alien.rect.bottom >= screen_rect.bottom:
#像处理外星人撞到飞船那样处理
ship_hit(ai_settings,aliens,ship,stats,screen,bullets)
break
|
the-stack_106_20115
|
# -*- coding: utf-8 -*-
from ....Classes.Arc1 import Arc1
from ....Classes.Arc3 import Arc3
from ....Classes.Segment import Segment
def build_geometry(self):
"""Compute the curve (Line) needed to plot the object.
The ending point of a curve is the starting point of the next curve
in the list
Parameters
----------
self : SlotW11
A SlotW11 object
Returns
-------
curve_list: list
A list of 7 Segment and 2 Arc1
"""
point_dict = self._comp_point_coordinate()
Z1 = point_dict["Z1"]
Z2 = point_dict["Z2"]
Z3 = point_dict["Z3"]
Z4 = point_dict["Z4"]
Z5 = point_dict["Z5"]
Z6 = point_dict["Z6"]
Z7 = point_dict["Z7"]
Z8 = point_dict["Z8"]
Z9 = point_dict["Z9"]
Z10 = point_dict["Z10"]
if self.is_outwards():
rot_sign = 1 # Rotation direction for Arc1
else: # inward slot
rot_sign = -1 # Rotation direction for Arc1
# Creation of curve
curve_list = list()
curve_list.append(Segment(Z1, Z2))
curve_list.append(Segment(Z2, Z3))
curve_list.append(Segment(Z3, Z4))
if self.R1 * 2 < self.W2:
curve_list.append(
Arc1(Z4, Z5, rot_sign * self.R1, is_trigo_direction=self.is_outwards())
)
curve_list.append(Segment(Z5, Z6))
curve_list.append(
Arc1(Z6, Z7, rot_sign * self.R1, is_trigo_direction=self.is_outwards())
)
else:
curve_list.append(Arc3(Z4, Z7, self.is_outwards()))
curve_list.append(Segment(Z7, Z8))
curve_list.append(Segment(Z8, Z9))
curve_list.append(Segment(Z9, Z10))
return curve_list
|
the-stack_106_20116
|
import model.m_mysql as db
from util.app_util import AppUtil
'''
账户历史信息表,记录账户每日现金资产和股票资产,以前一交易日收盘价
计算总现金资产
'''
class MAccountHist(object):
@staticmethod
def insert_account_hist(account_id, account_date,
cash_amount, stock_amount):
'''
向t_account_hist表中添加一条记录,记录用户某天的资产总值
@param account_id:int 账户编号
@param account_date:datetime 账务日期
@param cash_amount:int 以分为单位的现金资产
@param stock_amount:int 以前一交易日收盘价计算的股票资产
@return 新插入的主键和插入行数
@version v0.0.1 闫涛 2019-03-16
'''
sql = 'insert into t_account_hist(account_id, '\
'account_date, cash_amount, stock_amount) '\
'values(%s, %s, %s, %s)'
params = (account_id, AppUtil.format_date(account_date, \
AppUtil.DF_HYPHEN), cash_amount, stock_amount)
return db.insert(sql, params)
|
the-stack_106_20117
|
from collections.abc import Sequence
import mmcv
import numpy as np
import torch
from mmcv.parallel import DataContainer as DC
from ..registry import PIPELINES
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
"""
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError('type {} cannot be converted to tensor.'.format(
type(data)))
@PIPELINES.register_module
class ToTensor(object):
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
for key in self.keys:
results[key] = to_tensor(results[key])
return results
def __repr__(self):
return self.__class__.__name__ + '(keys={})'.format(self.keys)
@PIPELINES.register_module
class ImageToTensor(object):
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
for key in self.keys:
img = results[key]
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
results[key] = to_tensor(img.transpose(2, 0, 1))
return results
def __repr__(self):
return self.__class__.__name__ + '(keys={})'.format(self.keys)
@PIPELINES.register_module
class Transpose(object):
def __init__(self, keys, order):
self.keys = keys
self.order = order
def __call__(self, results):
for key in self.keys:
results[key] = results[key].transpose(self.order)
return results
def __repr__(self):
return self.__class__.__name__ + '(keys={}, order={})'.format(
self.keys, self.order)
@PIPELINES.register_module
class ToDataContainer(object):
def __init__(self,
fields=(dict(key='img', stack=True), dict(key='gt_bboxes'),
dict(key='gt_labels'))):
self.fields = fields
def __call__(self, results):
for field in self.fields:
field = field.copy()
key = field.pop('key')
results[key] = DC(results[key], **field)
return results
def __repr__(self):
return self.__class__.__name__ + '(fields={})'.format(self.fields)
@PIPELINES.register_module
class DefaultFormatBundle(object):
"""Default formatting bundle.
It simplifies the pipeline of formatting common fields, including "img",
"proposals", "gt_bboxes", "gt_labels", "gt_masks" and "gt_semantic_seg".
These fields are formatted as follows.
- img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)
- proposals: (1)to tensor, (2)to DataContainer
- gt_bboxes: (1)to tensor, (2)to DataContainer
- gt_bboxes_ignore: (1)to tensor, (2)to DataContainer
- gt_labels: (1)to tensor, (2)to DataContainer
- gt_masks: (1)to tensor, (2)to DataContainer (cpu_only=True)
- gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor,
(3)to DataContainer (stack=True)
"""
def __call__(self, results):
if 'fact_img' in results:
img = results['fact_img']
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
img = np.ascontiguousarray(img.transpose(2, 0, 1))
results['fact_img'] = DC(to_tensor(img), stack=True)
if 'template_img' in results:
img = results['template_img']
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
img = np.ascontiguousarray(img.transpose(2, 0, 1))
results['template_img'] = DC(to_tensor(img), stack=True)
for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']:
if key not in results:
continue
results[key] = DC(to_tensor(results[key]))
if 'gt_masks' in results:
results['gt_masks'] = DC(results['gt_masks'], cpu_only=True)
if 'gt_semantic_seg' in results:
results['gt_semantic_seg'] = DC(
to_tensor(results['gt_semantic_seg'][None, ...]), stack=True)
return results
def __repr__(self):
return self.__class__.__name__
@PIPELINES.register_module
class Collect(object):
"""
Collect data from the loader relevant to the specific task.
This is usually the last stage of the data loader pipeline. Typically keys
is set to some subset of "img", "proposals", "gt_bboxes",
"gt_bboxes_ignore", "gt_labels", and/or "gt_masks".
The "img_meta" item is always populated. The contents of the "img_meta"
dictionary depends on "meta_keys". By default this includes:
- "img_shape": shape of the image input to the network as a tuple
(h, w, c). Note that images may be zero padded on the bottom/right
if the batch tensor is larger than this shape.
- "scale_factor": a float indicating the preprocessing scale
- "flip": a boolean indicating if image flip transform was used
- "filename": path to the image file
- "ori_shape": original shape of the image as a tuple (h, w, c)
- "pad_shape": image shape after padding
- "img_norm_cfg": a dict of normalization information:
- mean - per channel mean subtraction
- std - per channel std divisor
- to_rgb - bool indicating if bgr was converted to rgb
"""
def __init__(self,
keys,
meta_keys=('template_img', 'fact_img', 'ori_shape', 'img_shape', 'pad_shape',
'scale_factor', 'flip', 'img_norm_cfg')):
self.keys = keys
self.meta_keys = meta_keys
def __call__(self, results):
data = {}
img_meta = {}
for key in self.meta_keys:
img_meta[key] = results[key]
data['img_meta'] = DC(img_meta, cpu_only=True)
for key in self.keys:
data[key] = results[key]
return data
def __repr__(self):
return self.__class__.__name__ + '(keys={}, meta_keys={})'.format(
self.keys, self.meta_keys)
|
the-stack_106_20119
|
#!/usr/bin/env python2
from __future__ import unicode_literals
# Last.FM API libraries
import time
import urllib2
import json
# YouTube API libraries
import youtube_dl
from googleapiclient.discovery import build
# Playback libraries
from pydub import AudioSegment
from pydub.playback import play
from os import remove
# Your YouTube developer key from https://cloud.google.com/console
DEVELOPER_KEY = "your-api-key"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
# Retrieve tracklist from Last.FM station
lastfm_username = "your_username"
while True:
# Last.fm servers reply with a 500 error quite often, we need to catch it and retry
try:
response = urllib2.urlopen("http://www.last.fm/player/station/user/" + lastfm_username + "/mix")
except urllib2.HTTPError:
print("Is Last.fm down? Trying again in 5 seconds...")
time.sleep(5)
continue
# Get and decode response
json_tracklist = response.read()
tracklist = json.loads(json_tracklist)
# Validate tracklist
if not "playlist" in tracklist:
print("No playlist received from Last.fm")
time.sleep(5)
continue
if len(tracklist["playlist"]) == 0:
print("Empty playlis received from Last.fm")
time.sleep(5)
continue
# Iterate over tracks to play them
for track in tracklist["playlist"]:
# Last.fm returns an array of artist, so we need to be prepared
# to parse more songs with more than one artist
artist = track["artists"][0]["name"]
for i in range(1, len(track["artists"])):
artist += ", " + track["artists"][i]["name"]
# We print song name even before a download link is actually found
# so we can know which song had to be played even if no donwload link
# was found
print("Playing " + artist + " - " + track["name"])
yt_id = None
# We first check if Last.fm has already provided us with a YouTube link
for link in track["playlinks"]:
if link["affiliate"] == "youtube":
yt_id = link["id"]
# If Last.fm has not provided a donwload link, try to find a video by ourselves
if yt_id is None:
print("No YouTube link provided, looking for one")
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, developerKey=DEVELOPER_KEY)
search = youtube.search().list(
q=track["artists"][0]["name"] + " - " + track["name"],
part="id",
maxResults=1,
# Limit search to Music category
videoCategoryId="sGDdEsjSJ_SnACpEvVQ6MtTzkrI/nqRIq97-xe5XRZTxbknKFVe5Lmg",
safeSearch="none",
type="video",
videoLicense="any",
videoEmbeddable="any"
).execute()
videos = search["items"]
if len(videos) != 1:
print("No videos found. Skipping.")
continue
else:
yt_id = search["items"][0]["id"]["videoId"]
ydl_opts = {
'format': 'bestaudio',
'logtostderr': True,
'outtmpl': '/tmp/%(id)s',
}
ydl = youtube_dl.YoutubeDL(ydl_opts)
try:
ydl.download(["https://www.youtube.com/watch?v=" + yt_id])
except youtube_dl.utils.DownloadError:
print("Song download failed. Skipping.")
continue
song = AudioSegment.from_file("/tmp/" + yt_id)
play(song)
remove("/tmp/" + yt_id)
|
the-stack_106_20122
|
import numpy as np
from pyQBTNs import QBTNs
### Steepest descent solver takes an arduous amount of time to compute. It should run fairly quickly for small ranks though.
qbtns = QBTNs(factorization_method="Matrix_Factorization", solver_method="classical-steepest-descent")
p = 0.5
N1 = 10
N2 = 10
RANK = 8
A = np.random.choice(a=[False, True], size=(N1, RANK), p=[p, 1-p])
B = np.random.choice(a=[False, True], size=(RANK, N2), p=[p, 1-p])
print(np.matmul(A, B))
qbtns.fit(np.matmul(A, B), RANK)
print(qbtns.get_score())
print(qbtns.get_factors())
|
the-stack_106_20123
|
import torch.nn as nn
class HeightCompression(nn.Module):
def __init__(self, model_cfg, **kwargs):
"""
在高度方向上进行压缩
"""
super().__init__()
self.model_cfg = model_cfg
self.num_bev_features = self.model_cfg.NUM_BEV_FEATURES # 256
def forward(self, batch_dict):
"""
Args:
batch_dict:
encoded_spconv_tensor: sparse tensor
Returns:
batch_dict:
spatial_features:
"""
encoded_spconv_tensor = batch_dict['encoded_spconv_tensor']
# 结合batch,spatial_shape、indice和feature将特征还原的对应位置
spatial_features = encoded_spconv_tensor.dense()
N, C, D, H, W = spatial_features.shape # 4,128,2,200,150
spatial_features = spatial_features.view(N, C * D, H, W) # (4,256,200,150)在高度方向上合并,将特征图压缩至BEV特征图
# 将特征和采样尺度加入batch_dict
batch_dict['spatial_features'] = spatial_features
batch_dict['spatial_features_stride'] = batch_dict['encoded_spconv_tensor_stride'] # 8
return batch_dict
|
the-stack_106_20125
|
"""empty message
Revision ID: 0005 add team name to git_metric
Revises: 0004 use repo name not url
Create Date: 2019-01-25 15:09:08.178909
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0005 add team name to git_metric'
down_revision = '0004 use repo name not url'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('git_metric', sa.Column('team_name', sa.String(), nullable=True))
update_team_name = "UPDATE git_metric SET team_name='observe'"
op.execute(update_team_name)
op.alter_column('git_metric', 'team_name', nullable=False)
update_team_id = "UPDATE git_metric SET team_id='1'"
op.execute(update_team_id)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('git_metric', 'team_name')
update_team_id = "UPDATE git_metric SET team_id='2613549'"
op.execute(update_team_id)
# ### end Alembic commands ###
|
the-stack_106_20126
|
#!/usr/bin/env python
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import distutils.command.clean
import os
import shutil
import subprocess
import sys
from pathlib import Path
from setuptools import find_packages, setup
from tools.setup_helpers.extension import CMakeBuild, get_ext_modules
ROOT_DIR = Path(__file__).parent.resolve()
################################################################################
# Parameters parsed from environment
################################################################################
RUN_BUILD_DEP = True
for _, arg in enumerate(sys.argv):
if arg in ["clean", "egg_info", "sdist"]:
RUN_BUILD_DEP = False
def _get_submodule_folders():
git_modules_path = ROOT_DIR / ".gitmodules"
if not os.path.exists(git_modules_path):
return []
with open(git_modules_path) as f:
return [
os.path.join(ROOT_DIR, line.split("=", 1)[1].strip())
for line in f.readlines()
if line.strip().startswith("path")
]
def _check_submodules():
def check_for_files(folder, files):
if not any(os.path.exists(os.path.join(folder, f)) for f in files):
print("Could not find any of {} in {}".format(", ".join(files), folder))
print("Did you run 'git submodule update --init --recursive --jobs 0'?")
sys.exit(1)
def not_exists_or_empty(folder):
return not os.path.exists(folder) or (os.path.isdir(folder) and len(os.listdir(folder)) == 0)
if bool(os.getenv("USE_SYSTEM_LIBS", False)):
return
folders = _get_submodule_folders()
# If none of the submodule folders exists, try to initialize them
if all(not_exists_or_empty(folder) for folder in folders):
try:
import time
print(" --- Trying to initialize submodules")
start = time.time()
subprocess.check_call(["git", "submodule", "update", "--init", "--recursive"], cwd=ROOT_DIR)
end = time.time()
print(f" --- Submodule initialization took {end - start:.2f} sec")
except Exception:
print(" --- Submodule initalization failed")
print("Please run:\n\tgit submodule update --init --recursive --jobs 0")
sys.exit(1)
for folder in folders:
check_for_files(folder, ["CMakeLists.txt", "Makefile", "setup.py", "LICENSE", "LICENSE.md", "LICENSE.txt"])
def _get_version():
with open(os.path.join(ROOT_DIR, "version.txt")) as f:
version = f.readline().strip()
sha = "Unknown"
try:
sha = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=str(ROOT_DIR)).decode("ascii").strip()
except Exception:
pass
os_build_version = os.getenv("BUILD_VERSION")
if os_build_version:
version = os_build_version
elif sha != "Unknown":
version += "+" + sha[:7]
return version, sha
def _export_version(version, sha):
version_path = ROOT_DIR / "torchdata" / "version.py"
with open(version_path, "w") as f:
f.write(f"__version__ = '{version}'\n")
f.write(f"git_version = {repr(sha)}\n")
# Use new version of torch on main branch
pytorch_package_dep = "torch>1.11.0"
if os.getenv("PYTORCH_VERSION"):
pytorch_package_dep = pytorch_package_dep.split(">")[0]
pytorch_package_dep += "==" + os.getenv("PYTORCH_VERSION")
requirements = [
"urllib3 >= 1.25",
"requests",
pytorch_package_dep,
]
class clean(distutils.command.clean.clean):
def run(self):
# Run default behavior first
distutils.command.clean.clean.run(self)
# Remove torchdata extension
def remove_extension(pattern):
for path in (ROOT_DIR / "torchdata").glob(pattern):
print(f"removing extension '{path}'")
path.unlink()
for ext in ["so", "dylib", "pyd"]:
remove_extension("**/*." + ext)
# Remove build directory
build_dirs = [
ROOT_DIR / "build",
]
for path in build_dirs:
if path.exists():
print(f"removing '{path}' (and everything under it)")
shutil.rmtree(str(path), ignore_errors=True)
if __name__ == "__main__":
VERSION, SHA = _get_version()
_export_version(VERSION, SHA)
print("-- Building version " + VERSION)
if RUN_BUILD_DEP:
from tools.gen_pyi import gen_pyi
_check_submodules()
gen_pyi()
setup(
# Metadata
name="torchdata",
version=VERSION,
description="Composable data loading modules for PyTorch",
url="https://github.com/pytorch/data",
author="PyTorch Team",
author_email="[email protected]",
license="BSD",
install_requires=requirements,
python_requires=">=3.7",
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
package_data={
"torchdata": [
"datapipes/iter/*.pyi",
],
},
# Package Info
packages=find_packages(exclude=["test*", "examples*", "tools*", "torchdata.csrc*", "build*"]),
zip_safe=False,
# C++ Extension Modules
ext_modules=get_ext_modules(),
cmdclass={
"build_ext": CMakeBuild,
"clean": clean,
},
)
|
the-stack_106_20132
|
try:
from sql_helpers import SESSION, BASE
except ImportError:
raise AttributeError
from sqlalchemy import Column, String
class GMute(BASE):
__tablename__ = "gmute"
sender = Column(String(14), primary_key=True)
def __init__(self, sender):
self.sender = str(sender)
GMute.__table__.create(checkfirst=True)
def is_gmuted(sender_id):
try:
return SESSION.query(GMute).all()
except BaseException:
return None
finally:
SESSION.close()
def gmute(sender):
adder = GMute(str(sender))
SESSION.add(adder)
SESSION.commit()
def ungmute(sender):
rem = SESSION.query(GMute).get((str(sender)))
if rem:
SESSION.delete(rem)
SESSION.commit()
|
the-stack_106_20133
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import contextlib
import funsor
from pyro.contrib.funsor import to_data
from pyro.contrib.funsor.handlers import enum, plate, replay, trace
from pyro.contrib.funsor.infer.elbo import ELBO, Jit_ELBO
from pyro.contrib.funsor.infer.traceenum_elbo import terms_from_trace
from pyro.distributions.util import copy_docs_from
from pyro.infer import TraceTMC_ELBO as _OrigTraceTMC_ELBO
@copy_docs_from(_OrigTraceTMC_ELBO)
class TraceTMC_ELBO(ELBO):
def differentiable_loss(self, model, guide, *args, **kwargs):
with plate(size=self.num_particles) if self.num_particles > 1 else contextlib.ExitStack(), \
enum(first_available_dim=(-self.max_plate_nesting-1) if self.max_plate_nesting else None):
guide_tr = trace(guide).get_trace(*args, **kwargs)
model_tr = trace(replay(model, trace=guide_tr)).get_trace(*args, **kwargs)
model_terms = terms_from_trace(model_tr)
guide_terms = terms_from_trace(guide_tr)
log_measures = guide_terms["log_measures"] + model_terms["log_measures"]
log_factors = model_terms["log_factors"] + [-f for f in guide_terms["log_factors"]]
plate_vars = model_terms["plate_vars"] | guide_terms["plate_vars"]
measure_vars = model_terms["measure_vars"] | guide_terms["measure_vars"]
with funsor.terms.lazy:
elbo = funsor.sum_product.sum_product(
funsor.ops.logaddexp, funsor.ops.add,
log_measures + log_factors,
eliminate=measure_vars | plate_vars,
plates=plate_vars
)
return -to_data(funsor.optimizer.apply_optimizer(elbo))
class JitTraceTMC_ELBO(Jit_ELBO, TraceTMC_ELBO):
pass
|
the-stack_106_20135
|
# -*- coding: utf-8 -*-
"""
=== 思路 ===
核心:每次落稳之后截图,根据截图算出棋子的坐标和下一个块顶面的中点坐标,
根据两个点的距离乘以一个时间系数获得长按的时间
识别棋子:靠棋子的颜色来识别位置,通过截图发现最下面一行大概是一条
直线,就从上往下一行一行遍历,比较颜色(颜色用了一个区间来比较)
找到最下面的那一行的所有点,然后求个中点,求好之后再让 Y 轴坐标
减小棋子底盘的一半高度从而得到中心点的坐标
识别棋盘:靠底色和方块的色差来做,从分数之下的位置开始,一行一行扫描,
由于圆形的块最顶上是一条线,方形的上面大概是一个点,所以就
用类似识别棋子的做法多识别了几个点求中点,这时候得到了块中点的 X
轴坐标,这时候假设现在棋子在当前块的中心,根据一个通过截图获取的
固定的角度来推出中点的 Y 坐标
最后:根据两点的坐标算距离乘以系数来获取长按时间(似乎可以直接用 X 轴距离)
"""
from __future__ import print_function, division
import os
import sys
import time
import math
import random
from PIL import Image
from six.moves import input
try:
from common import debug, config, screenshot
except Exception as ex:
print(ex)
print('请将脚本放在项目根目录中运行')
print('请检查项目根目录中的 common 文件夹是否存在')
exit(-1)
VERSION = "1.1.1"
# DEBUG 开关,需要调试的时候请改为 True,不需要调试的时候为 False
DEBUG_SWITCH = False
# Magic Number,不设置可能无法正常执行,请根据具体截图从上到下按需
# 设置,设置保存在 config 文件夹中
config = config.open_accordant_config()
under_game_score_y = config['under_game_score_y']
# 长按的时间系数,请自己根据实际情况调节
press_coefficient = config['press_coefficient']
# 二分之一的棋子底座高度,可能要调节
piece_base_height_1_2 = config['piece_base_height_1_2']
# 棋子的宽度,比截图中量到的稍微大一点比较安全,可能要调节
piece_body_width = config['piece_body_width']
def set_button_position(im):
"""
将 swipe 设置为 `再来一局` 按钮的位置
"""
global swipe_x1, swipe_y1, swipe_x2, swipe_y2
w, h = im.size
left = int(w / 2)
top = int(1584 * (h / 1920.0))
left = int(random.uniform(left-50, left+50))
top = int(random.uniform(top-10, top+10)) # 随机防 ban
swipe_x1, swipe_y1, swipe_x2, swipe_y2 = left, top, left, top
def jump(distance):
"""
跳跃一定的距离
"""
press_time = distance * press_coefficient
press_time = max(press_time, 200) # 设置 200ms 是最小的按压时间
press_time = int(press_time)
cmd = 'adb shell input swipe {x1} {y1} {x2} {y2} {duration}'.format(
x1=swipe_x1,
y1=swipe_y1,
x2=swipe_x2,
y2=swipe_y2,
duration=press_time
)
print(cmd)
os.system(cmd)
return press_time
def find_piece_and_board(im):
"""
寻找关键坐标
"""
w, h = im.size
piece_x_sum = 0
piece_x_c = 0
piece_y_max = 0
board_x = 0
board_y = 0
scan_x_border = int(w / 8) # 扫描棋子时的左右边界
scan_start_y = 0 # 扫描的起始 y 坐标
im_pixel = im.load()
# 以 50px 步长,尝试探测 scan_start_y
for i in range(int(h / 3), int(h*2 / 3), 50):
last_pixel = im_pixel[0, i]
for j in range(1, w):
pixel = im_pixel[j, i]
# 不是纯色的线,则记录 scan_start_y 的值,准备跳出循环
if pixel != last_pixel:
scan_start_y = i - 50
break
if scan_start_y:
break
print('scan_start_y: {}'.format(scan_start_y))
# 从 scan_start_y 开始往下扫描,棋子应位于屏幕上半部分,这里暂定不超过 2/3
for i in range(scan_start_y, int(h * 2 / 3)):
# 横坐标方面也减少了一部分扫描开销
for j in range(scan_x_border, w - scan_x_border):
pixel = im_pixel[j, i]
# 根据棋子的最低行的颜色判断,找最后一行那些点的平均值,这个颜
# 色这样应该 OK,暂时不提出来
if (50 < pixel[0] < 60) \
and (53 < pixel[1] < 63) \
and (95 < pixel[2] < 110):
piece_x_sum += j
piece_x_c += 1
piece_y_max = max(i, piece_y_max)
if not all((piece_x_sum, piece_x_c)):
return 0, 0, 0, 0
piece_x = int(piece_x_sum / piece_x_c)
piece_y = piece_y_max - piece_base_height_1_2 # 上移棋子底盘高度的一半
# 限制棋盘扫描的横坐标,避免音符 bug
if piece_x < w/2:
board_x_start = piece_x
board_x_end = w
else:
board_x_start = 0
board_x_end = piece_x
for i in range(int(h / 3), int(h * 2 / 3)):
last_pixel = im_pixel[0, i]
if board_x or board_y:
break
board_x_sum = 0
board_x_c = 0
for j in range(int(board_x_start), int(board_x_end)):
pixel = im_pixel[j, i]
# 修掉脑袋比下一个小格子还高的情况的 bug
if abs(j - piece_x) < piece_body_width:
continue
# 修掉圆顶的时候一条线导致的小 bug,这个颜色判断应该 OK,暂时不提出来
if abs(pixel[0] - last_pixel[0]) \
+ abs(pixel[1] - last_pixel[1]) \
+ abs(pixel[2] - last_pixel[2]) > 10:
board_x_sum += j
board_x_c += 1
if board_x_sum:
board_x = board_x_sum / board_x_c
last_pixel = im_pixel[board_x, i]
# 从上顶点往下 +274 的位置开始向上找颜色与上顶点一样的点,为下顶点
# 该方法对所有纯色平面和部分非纯色平面有效,对高尔夫草坪面、木纹桌面、
# 药瓶和非菱形的碟机(好像是)会判断错误
for k in range(i+274, i, -1): # 274 取开局时最大的方块的上下顶点距离
pixel = im_pixel[board_x, k]
if abs(pixel[0] - last_pixel[0]) \
+ abs(pixel[1] - last_pixel[1]) \
+ abs(pixel[2] - last_pixel[2]) < 10:
break
board_y = int((i+k) / 2)
# 如果上一跳命中中间,则下个目标中心会出现 r245 g245 b245 的点,利用这个
# 属性弥补上一段代码可能存在的判断错误
# 若上一跳由于某种原因没有跳到正中间,而下一跳恰好有无法正确识别花纹,则有
# 可能游戏失败,由于花纹面积通常比较大,失败概率较低
for j in range(i, i+200):
pixel = im_pixel[board_x, j]
if abs(pixel[0] - 245) + abs(pixel[1] - 245) + abs(pixel[2] - 245) == 0:
board_y = j + 10
break
if not all((board_x, board_y)):
return 0, 0, 0, 0
return piece_x, piece_y, board_x, board_y
def yes_or_no(prompt, true_value='y', false_value='n', default=True):
"""
检查是否已经为启动程序做好了准备
"""
default_value = true_value if default else false_value
prompt = '{} {}/{} [{}]: '.format(prompt, true_value,
false_value, default_value)
i = input(prompt)
if not i:
return default
while True:
if i == true_value:
return True
elif i == false_value:
return False
prompt = 'Please input {} or {}: '.format(true_value, false_value)
i = input(prompt)
def main():
"""
主函数
"""
op = yes_or_no('请确保手机打开了 ADB 并连接了电脑,'
'然后打开跳一跳并【开始游戏】后再用本程序,确定开始?')
if not op:
print('bye')
return
print('程序版本号:{}'.format(VERSION))
debug.dump_device_info()
screenshot.check_screenshot()
i, next_rest, next_rest_time = (0, random.randrange(3, 10),
random.randrange(5, 10))
while True:
screenshot.pull_screenshot()
im = Image.open('./autojump.png')
# 获取棋子和 board 的位置
piece_x, piece_y, board_x, board_y = find_piece_and_board(im)
ts = int(time.time())
print(ts, piece_x, piece_y, board_x, board_y)
set_button_position(im)
jump(math.sqrt((board_x - piece_x) ** 2 + (board_y - piece_y) ** 2))
if DEBUG_SWITCH:
debug.save_debug_screenshot(ts, im, piece_x,
piece_y, board_x, board_y)
debug.backup_screenshot(ts)
im.close()
i += 1
if i == next_rest:
print('已经连续打了 {} 下,休息 {}s'.format(i, next_rest_time))
for j in range(next_rest_time):
sys.stdout.write('\r程序将在 {}s 后继续'.format(next_rest_time - j))
sys.stdout.flush()
time.sleep(1)
print('\n继续')
i, next_rest, next_rest_time = (0, random.randrange(30, 100),
random.randrange(10, 60))
# 为了保证截图的时候应落稳了,多延迟一会儿,随机值防 ban
time.sleep(random.uniform(0.9, 1.2))
if __name__ == '__main__':
main()
|
the-stack_106_20136
|
import numpy as np #necessary imports
import cv2
import time
import math
import serial
color=(255,0,0) #variable for contour color and thickness
thickness=2
cX = cY = 0 #centroid of ball contour
cap = cv2.VideoCapture(1) #capture from video camera
j=0
int_x,int_y,prev_x,prev_y = 0,0,0,0 #previous co-ordinates of ball contour centriod
x_cor,y_cor,i = 0,0,0 #x,y co-ordinate of edge of platform initialize
s = serial.Serial("COM3",9600) #Establish Serial Communication
s.baudrate = 9600
m = 0
def Platform(c):
global x_cor,y_cor,img2,Left,Right,Top,Bottom,frame,Q
Left = tuple(c[c[:, :, 0].argmin()][0]) #This is creating a tuple of x,y cordinates of extreme points
Right = tuple(c[c[:, :, 0].argmax()][0]) #Minimum along X-Axis is Left and similar logic for others
Top = tuple(c[c[:, :, 1].argmin()][0])
Bottom = tuple(c[c[:, :, 1].argmax()][0])
x_cor = int(((Right[0] - Left[0])**2 + (Right[1] - Left[1])**2 )**0.5) #Sides of the platform (dynamically)
y_cor = int(((Bottom[0] - Top[0])**2 + (Bottom[1] - Top[1])**2 )**0.5)
pts1 = np.float32([(list(Top),list(Right),list(Bottom),list(Left))]) #List of all 4 corners
pts2 = np.float32([[0,0],[x_cor,0],[x_cor,y_cor],[0,y_cor]]) #List of 4 points we want to map it to
Q = cv2.getPerspectiveTransform(pts1,pts2) #Get the Transformation Matrix
pi = math.pi
def PointsInCircum(r,n=100):
return [(math.cos(2*pi/n*x)*r,math.sin(2*pi/n*x)*r) for x in range(1,n+1)]
Points = PointsInCircum(60,4)
def Ball_Track():
global dst,x_cor,y_cor,thresh1,frame,Q,i
dst = cv2.warpPerspective(frame,Q,(x_cor,y_cor)) #Trsansform and view in orthogonal perspective
gray1 = cv2.cvtColor(dst,cv2.COLOR_BGR2GRAY)
ret,thresh1 = cv2.threshold(gray1,170,255,cv2.THRESH_BINARY)
(_,cont_bw,hierarchy)=cv2.findContours(thresh1,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) #contours of ball
cv2.circle(dst, (x_cor//2,y_cor//2), 8, (255, 255, 0), -1)
if len(cont_bw) != 0:
#l = max(cont_bw, key = cv2.contourArea)
for q in range(len(cont_bw)):
peri = cv2.arcLength(cont_bw[q], True)
approx = cv2.approxPolyDP(cont_bw[q], 0.01 * peri, True)
area = cv2.contourArea(cont_bw[q])
#print(len(approx))
if peri != 0 :
#print(area/peri)
if (len(approx)>=7 and area/peri > 2): # circle will have more than 7 sides and also area/peri is Radius/2
print(area/peri)
dst=cv2.drawContours(dst, cont_bw[q], -1, [0,255,0], thickness) #Draw contours of the ball
M = cv2.moments(cont_bw[q])
if M["m00"] != 0:
cX = int(M["m10"] / M["m00"]) #Centroid of ball
cY = int(M["m01"] / M["m00"])
i = [cX,cY] #List of centroid
print(i)
data = PID() #Get Servo Angles to send by PID
Serial_C(data) #Send data to Arduino
def PID():
global x_cor,y_cor,i
global int_x,int_y,prev_x,prev_y
global Points,m,dst
Ball_x = 15*(i[0]-x_cor/2+60)//x_cor #Co-ordinates of Ball maped in cm
Ball_y = 15*(i[1]-y_cor/2-60)//y_cor
cv2.circle(dst,(i[0],i[1]), 8, (0, 255, 0), -1)
print(Points[m])
print(m)
if(int(((i[0]-Points[m][0]-x_cor/2)**2 + (i[1]-Points[m][1]-y_cor/2)**2)**0.5)<30): #If less than 20 pixels
m = m+1
if(m == 4):
m = 0
#Ball_x = 15*(i[0]-Points[m][0]-x_cor/2)//x_cor #Co-ordinates of Ball maped in cm
#Ball_y = 15*(i[1]-Points[m][1]-y_cor/2)//y_cor
Kp = 1.2 #1
Kd = -45 #-35
Ki = 0.01 #-0.01 #PID co-efficients
angle_x = (90+int(Kp*(Ball_x) + Kd*(prev_x-(Ball_x)) + Ki*(Ball_x + int_x))) #X-Angle to send
angle_y = (90+int(Kp*Ball_y + Kd*(prev_y-(Ball_y))+ Ki*(y_cor + Ball_y))) #Y-Angle to send
int_x = Ball_x #Storing x,y co-ordinates
int_y = Ball_y
prev_x = Ball_x
prev_y = Ball_y
angle_x = max(75,angle_x) #Min Angle to send is 60 deg and max 120 deg
angle_x = min(105,angle_x)
angle_y = max(75,angle_y)
angle_y = min(105,angle_y)
ard_x = str(angle_x) #Making is as 6digit like 087098 for 87 and 98 degrees
if(len(ard_x)==2):
ard_x = "0"+ard_x
ard_y = str(angle_y)
if(len(ard_y)==2):
ard_y = "0"+ard_y
arduino = ard_y + ard_x + "*" #End of command character
print(arduino)
#arduino = "090" + ard_y + "*"
return arduino
def Serial_C(data):
global s
s.write(data.encode()) #Send Data to Arduino
if __name__ == "__main__":
global j,img2,Left,Right,Top,Bottom,dst,thresh1,frame,Points,m,x_cor,y_cor,i
while(True):
j=j+1
#print(j)
# Capture frame-by-frame
ret, frame = cap.read() # ret = 1 if the video is captured; frame is the image
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray,120,255,cv2.THRESH_BINARY_INV)
(_,contour,hierarchy)=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) #Contours for Platform
if len(contour) != 0:
c = max(contour, key = cv2.contourArea) # find the largest contour
img2=cv2.drawContours(frame, c, -1, color, thickness) # draw largest contour
if(j>=25): #From 25th Frame for settling the image
Platform(c) #Make Platform Contours
if(j>=25):
Ball_Track() #Make Ball Track Contours
cv2.circle(img2, Left, 8, (0, 0, 255), -1) #Points (Extreme Display)
cv2.circle(img2, Right, 8, (0, 255, 0), -1)
cv2.circle(img2, Top, 8, (255, 0, 0), -1)
cv2.circle(img2, Bottom, 8, (0, 255, 0), -1)
#cv2.circle(dst,(i[0],i[1]), 8, (0, 255, 0), -1)
#for x in Points:
# cv2.circle(dst, (int(Points[m][0]+x_cor/2),int(Points[m][1]+y_cor/2)), 8, (255, 255, 0), -1)
cv2.imshow('Original View',img2) #Display all 3 views
cv2.imshow('B&W',thresh1)
cv2.imshow('Tracking',dst)
# Display the resulting image
#cv2.imshow('Contour',img3)
if cv2.waitKey(1) & 0xFF == ord('q'): # press q to quit
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
|
the-stack_106_20139
|
import sqlite3 as sqlite3
from sqlite3 import Error
import os as os
class Project:
def __init__(self, db_file):
# Init connection
self.conn = None
# Create the database file
# If this is a new database file, create the tables
self.create_conn(db_file)
def create_conn(self, db_file):
# If the file does not exist, then we need to create the tables
new_file = True
if os.path.isfile(db_file):
new_file = False
try:
# Create a database connection to a SQLite database
self.conn = sqlite3.connect(db_file)
print(sqlite3.version)
# If a new file, create the tables
if new_file:
self.create_tables()
except Error as e:
print(e)
return None
def close(self):
if self.conn is not None:
self.conn.commit()
self.conn.close()
def create_tables(self):
c = self.conn.cursor() # Create connection
# Creating a new SQLite table for Projects
c.execute('CREATE TABLE Projects (Name TEXT)')
# Creating a new SQLite table for Ensembles
c.execute('CREATE TABLE {tn} ({nf} {ft})'.format(tn='Ensembles', nf='Project', ft='INTEGER')) # Project index
c.execute("ALTER TABLE {tn} ADD COLUMN '{cn}' {ct}".format(tn='Ensembles', cn='Correlation', ct='BLOB')) # Pickle data
c.execute("ALTER TABLE {tn} ADD COLUMN '{cn}' {ct}".format(tn='Ensembles', cn='Amplitude', ct='BLOB')) # Pickle data
c.execute("ALTER TABLE {tn} ADD COLUMN '{cn}' {ct}".format(tn='Ensembles', cn='BeamVel', ct='BLOB')) # Pickle data
self.conn.commit()
|
the-stack_106_20141
|
from __future__ import print_function
import sys
from pyspark.sql import SparkSession
from pyspark.sql import Row
#-----------------------------------------------------
# Create a DataFrame from a JSON file
# Input: JSON File
# In this example, there is one JSON object per line.
#------------------------------------------------------
# Input Parameters:
# a JSON file
#-------------------------------------------------------
# @author Mahmoud Parsian
#-------------------------------------------------------
#=====================================
def debug_file(input_path):
# Opening a file in python for reading is easy:
f = open(input_path, 'r')
# To get everything in the file, just use read()
file_contents = f.read()
#And to print the contents, just do:
print ("file_contents = \n" + file_contents)
# Don't forget to close the file when you're done.
f.close()
#end-def
#=====================================
def main():
#if len(sys.argv) != 2:
# print("Usage: datasource_json_reader_single_line.py <csv-file>", file=sys.stderr)
# exit(-1)
# create an instance of SparkSession
spark = SparkSession.builder.getOrCreate()
#
# read name of input file
input_path = sys.argv[1]
print("input path : ", input_path)
debug_file(input_path)
#=====================================
# Create a DataFrame from a given input JSON file
#=====================================
# Spark enable us to read JSON files
# and create a new DataFrame
# The following example reads a JSON file
# and creates a new DataFrame:
df = spark.read.json(input_path)
#
print("df.count() = " , df.count())
#
print("df.collect() = " , df.collect())
#
df.show(10, truncate=False)
#
df.printSchema()
# done!
spark.stop()
#end-def
#=====================================
if __name__ == '__main__':
main()
|
the-stack_106_20142
|
import asyncio
import functools
def event_handler(loop, stop=False):
print('Event handler called')
if stop:
print('stopping the loop')
loop.stop()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
try:
loop.call_soon(functools.partial(event_handler, loop))
print('starting event loop')
loop.call_soon(functools.partial(event_handler, loop, stop=True))
loop.run_forever()
finally:
print('closing event loop')
loop.close()
|
the-stack_106_20144
|
from CommonServerPython import IncidentStatus, EntryType
response_incident = {"incident_id": "inc:afb5d1512a00480f53e9ad91dc3e4b55:1cf23a95678a421db810e11b5db693bd",
"cid": "24ab288b109b411aba970e570d1ddf58",
"host_ids": [
"afb5d1512a00480f53e9ad91dc3e4b55"
],
"hosts": [
{"device_id": "afb5d1512a00480f53e9ad91dc3e4b55",
"cid": "24ab288b109b411aba970e570d1ddf58",
"agent_load_flags": "0",
"agent_local_time": "2020-05-06T23:36:34.594Z",
"agent_version": "5.28.10902.0",
"bios_manufacturer": "Apple Inc.",
"bios_version": "1037.100.359.0.0 (iBridge: 17.16.14263.0.0,0)",
"config_id_base": "65994753",
"config_id_build": "10902",
"config_id_platform": "4",
"external_ip": "1.1.1.1",
"hostname": "SFO-M-Y81WHJ",
"first_seen": "2019-05-10T17:20:39Z",
"last_seen": "2020-05-17T16:59:42Z",
"local_ip": "1.1.1.1",
"mac_address": "86-89-ad-65-d0-30",
"major_version": "18",
"minor_version": "7",
"os_version": "Mojave (10.14)",
"platform_id": "1",
"platform_name": "Mac",
"product_type_desc": "Workstation",
"status": "normal",
"system_manufacturer": "Apple Inc.",
"system_product_name": "MacBookPro15,1",
"modified_timestamp": "2020-05-17T16:59:56Z"}
],
"created": "2020-05-17T17:30:38Z",
"start": "2020-05-17T17:30:38Z",
"end": "2020-05-17T17:30:38Z",
"state": "closed",
"status": 20,
"name": "Incident on SFO-M-Y81WHJ at 2020-05-17T17:30:38Z",
"description": "Objectives in this incident: Keep Access. Techniques: External Remote Services. "
"Involved hosts and end users: SFO-M-Y81WHJ.",
"tags": [
"Objective/Keep Access"
],
"fine_score": 38}
response_detection = {"cid": "20879a8064904ecfbb62c118a6a19411",
"created_timestamp": "2021-12-19T13:53:34.708949512Z",
"detection_id": "ldt:15dbb9d8f06b89fe9f61eb46e829d986:528715079668",
"device": {
"device_id": "15dbb9d7f06b45fe0f61eb46e829d986",
"cid": "20897a8064904ecfbb62c118a3a19411",
"agent_load_flags": "0",
"agent_local_time": "2021-12-03T22:06:35.590Z",
"agent_version": "6.30.14406.0",
"bios_manufacturer": "Google",
"bios_version": "Google",
"config_id_base": "65994853",
"config_id_build": "14706",
"config_id_platform": "3",
"external_ip": "35.224.136.145",
"hostname": "FALCON-CROWDSTR",
"first_seen": "2020-02-10T12:40:18Z",
"last_seen": "2021-12-19T13:35:53Z",
"local_ip": "10.128.0.7",
"mac_address": "42-03-0a-80-92-07",
"major_version": "10",
"minor_version": "0",
"os_version": "Windows Server 2019",
"platform_id": "0",
"platform_name": "Windows",
"product_type": "3",
"product_type_desc": "Server",
"status": "normal",
"system_manufacturer": "Google",
"system_product_name": "Google Compute Engine",
"modified_timestamp": "2021-12-19T13:51:07Z",
"instance_id": "5278723726495898635",
"service_provider": "GCP",
"service_provider_account_id": "578609343865"
},
"behaviors": [
{
"device_id": "15dbb9d8f06b45fe9f61eb46e829d986",
"timestamp": "2021-12-19T13:53:27Z",
"template_instance_id": "382",
"behavior_id": "10197",
"filename": "choice.exe",
"filepath": "\\Device\\HarddiskVolume1\\Windows\\System32\\choice.exe",
"alleged_filetype": "exe",
"cmdline": "choice /m crowdstrike_sample_detection",
"scenario": "suspicious_activity",
"objective": "Falcon Detection Method",
"tactic": "Malware",
"tactic_id": "CSTA0001",
"technique": "Malicious File",
"technique_id": "CST0001",
"display_name": "SampleTemplateDetection",
"description": "For evaluation only - benign, no action needed.",
"severity": 30,
"confidence": 80,
"ioc_type": "",
"ioc_value": "",
"ioc_source": "",
"ioc_description": "",
"user_name": "admin",
"user_id": "S-1-5-21-3482992587-1103702653-2661900019-1000",
"control_graph_id": "ctg:15dbb9d8f06b45fe9f61eb46e829d986:528715219540",
"triggering_process_graph_id": "pid:15dbb9d8f06b45fe9f61eb46e829d986:1560553487562",
"sha256": "90f352c1fb7b21cc0216b2f0701a236db92b786e4301904d28f4ec4cb81f2a8b",
"md5": "463b5477ff96ab86a01ba44bcc02b539",
"pattern_disposition": 0,
}
],
"email_sent": False,
"first_behavior": "2021-12-19T13:53:27Z",
"last_behavior": "2021-12-19T13:53:27Z",
"max_confidence": 80,
"max_severity": 30,
"max_severity_displayname": "Low",
"show_in_ui": True,
"status": "new",
"hostinfo": {
"domain": ""
},
"seconds_to_triaged": 0,
"seconds_to_resolved": 0,
"behaviors_processed": [
"pid:15dbb9d8f06b45fe9f61eb46e829d986:1560553487562:10194"
],
"date_updated": "2021-12-19T13:53:34.708949512Z"}
remote_incident_id = 'inc:afb5d1512a00480f53e9ad91dc3e4b55:1cf23a95678a421db810e11b5db693bd'
remote_detection_id = 'ldt:15dbb9d8f06b89fe9f61eb46e829d986:528715079668'
# remote_id, close_incident, incident_status, detection_status, mirrored_object, entries
get_remote_incident = (remote_incident_id,
False,
30,
None,
{'hosts.hostname': 'SFO-M-Y81WHJ', 'incident_type': 'incident', 'state': 'closed', 'status': 'In Progress',
'tags': ['Objective/Keep Access']},
[])
get_remote_incident_update = (remote_incident_id,
True,
25,
None,
{'hosts.hostname': 'SFO-M-Y81WHJ', 'incident_type': 'incident', 'state': 'closed',
'status': 'Reopened', 'tags': ['Objective/Keep Access']},
[{'Contents': {'dbotIncidentReopen': True}, 'ContentsFormat': 'json', 'Type': EntryType.NOTE}])
get_remote_incident_close = (remote_incident_id,
True,
40,
None,
{'hosts.hostname': 'SFO-M-Y81WHJ', 'incident_type': 'incident', 'state': 'closed',
'status': 'Closed', 'tags': ['Objective/Keep Access']},
[{'Contents': {'closeReason': 'Incident was closed on CrowdStrike Falcon',
'dbotIncidentClose': True}, 'ContentsFormat': 'json', 'Type': EntryType.NOTE}])
get_remote_incident_no_close = (remote_incident_id,
False,
40,
None,
{'hosts.hostname': 'SFO-M-Y81WHJ', 'incident_type': 'incident', 'state': 'closed',
'status': 'Closed', 'tags': ['Objective/Keep Access']},
[])
get_remote_detection = (remote_detection_id,
False,
None,
'in_progress',
{'behaviors.objective': 'Falcon Detection Method', 'behaviors.scenario': 'suspicious_activity',
'behaviors.tactic': 'Malware', 'behaviors.technique': 'Malicious File',
'device.hostname': 'FALCON-CROWDSTR', 'incident_type': 'detection', 'severity': 2,
'status': 'in_progress'},
[])
get_remote_detection_update = (remote_detection_id,
True,
None,
'reopened',
{'behaviors.objective': 'Falcon Detection Method', 'behaviors.scenario': 'suspicious_activity',
'behaviors.tactic': 'Malware', 'behaviors.technique': 'Malicious File',
'device.hostname': 'FALCON-CROWDSTR', 'incident_type': 'detection', 'severity': 2,
'status': 'reopened'},
[{'Contents': {'dbotIncidentReopen': True}, 'ContentsFormat': 'json', 'Type': EntryType.NOTE}])
get_remote_detection_close = (remote_detection_id,
True,
None,
'closed',
{'behaviors.objective': 'Falcon Detection Method', 'behaviors.scenario': 'suspicious_activity',
'behaviors.tactic': 'Malware', 'behaviors.technique': 'Malicious File',
'device.hostname': 'FALCON-CROWDSTR', 'incident_type': 'detection', 'severity': 2,
'status': 'closed'},
[{'Contents': {'closeReason': 'Detection was closed on CrowdStrike Falcon',
'dbotIncidentClose': True}, 'ContentsFormat': 'json', 'Type': EntryType.NOTE}])
get_remote_detection_no_close = (remote_detection_id,
False,
None,
'closed',
{'behaviors.objective': 'Falcon Detection Method', 'behaviors.scenario': 'suspicious_activity',
'behaviors.tactic': 'Malware', 'behaviors.technique': 'Malicious File',
'device.hostname': 'FALCON-CROWDSTR', 'incident_type': 'detection', 'severity': 2,
'status': 'closed'},
[])
get_remote_data_command_args = [get_remote_incident,
get_remote_incident_update,
get_remote_incident_close,
get_remote_incident_no_close,
get_remote_detection,
get_remote_detection_update,
get_remote_detection_close,
get_remote_detection_no_close,
]
# updated_object, entry_content, close_incident
incident_closes = ({'status': 'Closed'},
'dbotIncidentClose',
True)
incident_reopens = ({'status': 'Reopened'},
'dbotIncidentReopen',
True)
incident_reopens_other_status = ({'status': 'New'},
'dbotIncidentReopen',
True)
incident_not_closed = ({'status': 'In Progress'},
None,
False)
incident_no_status = ({},
None,
True)
set_xsoar_incident_entries_args = [incident_closes,
incident_reopens,
incident_reopens_other_status,
incident_not_closed,
incident_no_status,
]
# updated_object, entry_content, close_incident
detection_closes = ({'status': 'closed'},
'dbotIncidentClose',
True)
detection_reopens = ({'status': 'reopened'},
'dbotIncidentReopen',
True)
detection_reopens_other_status = ({'status': 'true_positive'},
'dbotIncidentReopen',
True)
detection_not_closed = ({'status': 'in_progress'},
None,
False)
detection_no_status = ({},
None,
True)
set_xsoar_detection_entries_args = [detection_closes,
detection_reopens,
detection_reopens_other_status,
detection_not_closed,
detection_no_status,
]
# updated_object, mirrored_data, mirroring_fields, output
keeping_updated_object = ({'incident_type': 'incident'},
{},
[],
{'incident_type': 'incident'})
keeping_empty_updated_object = ({}, {}, [], {})
no_nested_fields = ({'incident_type': 'incident'},
response_incident,
['state', 'status', 'tags'],
{'incident_type': 'incident',
'state': 'closed',
'status': 20,
'tags': ['Objective/Keep Access']})
fields_not_existing = ({},
response_incident,
['tactics.', 'techniques', 'objectives'],
{})
field_nested_dict_in_list = ({'incident_type': 'incident'},
response_incident,
['state', 'hosts.hostname'],
{'incident_type': 'incident',
'state': 'closed',
'hosts.hostname': 'SFO-M-Y81WHJ'})
field_nested_in_dict = ({}, response_detection,
['behaviors.tactic', 'behaviors.scenario', 'behaviors.objective',
'behaviors.technique'],
{'behaviors.objective': 'Falcon Detection Method', 'behaviors.scenario': 'suspicious_activity',
'behaviors.tactic': 'Malware', 'behaviors.technique': 'Malicious File'})
fields_nested_all_options = ({'incident_type': 'detection'},
response_detection,
['status', 'severity', 'behaviors.tactic', 'behaviors.scenario', 'behaviors.objective',
'behaviors.technique', 'device.hostname'],
{'incident_type': 'detection', 'status': 'new', 'behaviors.objective': 'Falcon Detection Method',
'behaviors.scenario': 'suspicious_activity', 'behaviors.tactic': 'Malware',
'behaviors.technique': 'Malicious File', 'device.hostname': 'FALCON-CROWDSTR'})
set_updated_object_args = [keeping_updated_object,
keeping_empty_updated_object,
no_nested_fields,
fields_not_existing,
field_nested_dict_in_list,
field_nested_in_dict,
fields_nested_all_options,
]
# args, to_mock, call_args, remote_id, prev_tags, close_in_cs_falcon_param
incident_changed_status = ({'data': {'status': 'New'},
'entries': [],
'incidentChanged': True,
'remoteId': remote_incident_id,
'status': IncidentStatus.ACTIVE,
'delta': {'status': 'New'}},
'update_incident_request',
[([remote_incident_id], '20', 'update_status')],
remote_incident_id,
None,
False)
incident_changed_tags = ({'data': {'tag': ['newTag']},
'entries': [],
'incidentChanged': True,
'remoteId': remote_incident_id,
'status': IncidentStatus.PENDING,
'delta': {'tag': ['newTag']}},
'update_incident_request',
[([remote_incident_id], 'prevTag', 'delete_tag'),
([remote_incident_id], 'newTag', 'add_tag')],
remote_incident_id,
{'prevTag'},
False)
incident_changed_both = ({'data': {'tag': ['newTag'], 'status': 'Reopened'},
'entries': [],
'incidentChanged': True,
'remoteId': remote_incident_id,
'status': IncidentStatus.DONE,
'delta': {'tag': ['newTag'], 'status': 'Reopened'}},
'update_incident_request',
[([remote_incident_id], 'prevTag', 'delete_tag'),
([remote_incident_id], 'newTag', 'add_tag'),
([remote_incident_id], '25', 'update_status')],
remote_incident_id,
{'prevTag'},
False)
incident_changed_no_close = ({'data': {'tag': ['newTag'], 'status': 'Reopened'},
'entries': [],
'incidentChanged': True,
'remoteId': remote_incident_id,
'status': IncidentStatus.DONE,
'delta': {'tag': ['newTag']}},
'update_incident_request',
[([remote_incident_id], 'newTag', 'add_tag')],
remote_incident_id,
set(),
False)
incident_changed_param_close = ({'data': {'tag': ['newTag'], 'status': 'Reopened'},
'entries': [],
'incidentChanged': True,
'remoteId': remote_incident_id,
'status': IncidentStatus.ACTIVE,
'delta': {'tag': ['newTag']}},
'update_incident_request',
[([remote_incident_id], 'newTag', 'add_tag')],
remote_incident_id,
set(),
True)
incident_closed = ({'data': {'tag': ['newTag'], 'status': 'Reopened'},
'entries': [],
'incidentChanged': True,
'remoteId': remote_incident_id,
'status': IncidentStatus.DONE,
'delta': {'closeReason': 'Other'}},
'update_incident_request',
[([remote_incident_id], '40', 'update_status')],
remote_incident_id,
set(),
True)
detection_changed = ({'data': {'status': 'new'},
'entries': [],
'incidentChanged': True,
'remoteId': remote_detection_id,
'status': IncidentStatus.PENDING,
'delta': {'status': 'new'}},
'update_detection_request',
[([remote_detection_id], 'new')],
remote_detection_id,
None,
False)
detection_changed_no_close = ({'data': {'status': 'new'},
'entries': [],
'incidentChanged': True,
'remoteId': remote_detection_id,
'status': IncidentStatus.DONE,
'delta': {'status': 'new'}},
'update_detection_request',
[([remote_detection_id], 'new')],
remote_detection_id,
None,
False)
detection_changed_param_close = ({'data': {'status': 'new'},
'entries': [],
'incidentChanged': True,
'remoteId': remote_detection_id,
'status': IncidentStatus.ACTIVE,
'delta': {'status': 'new'}},
'update_detection_request',
[([remote_detection_id], 'new')],
remote_detection_id,
None,
True)
detection_closed = ({'data': {'status': 'new'},
'entries': [],
'incidentChanged': True,
'remoteId': remote_detection_id,
'status': IncidentStatus.DONE,
'delta': {'closeReason': 'Other'}},
'update_detection_request',
[([remote_detection_id], 'closed')],
remote_detection_id,
None,
True)
update_remote_system_command_args = [incident_changed_status,
incident_changed_tags,
incident_changed_both,
incident_changed_no_close,
incident_changed_param_close,
incident_closed,
detection_changed,
detection_changed_no_close,
detection_changed_param_close,
detection_closed,
]
# delta, close_in_cs_falcon_param, to_close
delta_closed = ({'closeReason': 'Other', 'closingUserId': 'admin', 'runStatus': ''},
True,
True)
param_no_close = ({'closeReason': 'Other', 'closingUserId': 'admin', 'runStatus': ''},
False,
False)
delta_not_closed = ({'status': 'new'},
True,
False)
no_close = ({},
False,
False)
close_in_cs_falcon_args = [delta_closed,
param_no_close,
delta_not_closed,
no_close,
]
# delta, inc_status, close_in_cs_falcon, detection_request_status
detection_closed_in_xsoar = ({'closeReason': 'Other'},
IncidentStatus.DONE,
True,
'closed')
detection_status_closed = ({'status': 'closed'},
IncidentStatus.ACTIVE,
False,
'closed')
detection_update_status_true_close_remote = ({'status': 'new'},
IncidentStatus.ACTIVE,
True,
'new')
detection_update_status_false_close_remote = ({'status': 'in_progress'},
IncidentStatus.ACTIVE,
False,
'in_progress')
detection_update_by_status_dont_close = ({'status': 'false_positive'},
IncidentStatus.DONE,
False,
'false_positive')
detection_didnt_change = ({},
IncidentStatus.ACTIVE,
False,
'')
update_remote_detection_args = [detection_closed_in_xsoar,
detection_status_closed,
detection_update_status_true_close_remote,
detection_update_status_false_close_remote,
detection_update_by_status_dont_close,
detection_didnt_change,
]
# delta, inc_status, close_in_cs_falcon, resolve_incident_status
incident_closed_in_xsoar = ({'closeReason': 'Other'},
IncidentStatus.DONE,
True,
'Closed')
incident_status_closed = ({'status': 'Closed'},
IncidentStatus.ACTIVE,
False,
'Closed')
incident_update_status_true_close_remote = ({'status': 'New'},
IncidentStatus.ACTIVE,
True,
'New')
incident_update_status_false_close_remote = ({'status': 'In Progress'},
IncidentStatus.ACTIVE,
False,
'In Progress')
incident_update_by_status_dont_close = ({'status': 'New'},
IncidentStatus.DONE,
False,
'New')
incident_didnt_change = ({},
IncidentStatus.ACTIVE,
False,
'')
update_remote_incident_status_args = [incident_closed_in_xsoar,
incident_status_closed,
incident_update_status_true_close_remote,
incident_update_status_false_close_remote,
incident_update_by_status_dont_close,
incident_didnt_change,
]
# tags, action_name
no_tags = (set(),
'add_tag')
one_tag_add = ({'tag1'},
'add_tag')
one_tag_delete = ({'Tag2'},
'delete_tag')
add_tags = ({'Objective/Keep Access', 'Detected', 'ignored'},
'add_tag')
delete_tags = ({'Objective/Keep Access', 'detected', 'Ignored'},
'delete_tag')
remote_incident_handle_tags_args = [no_tags,
one_tag_add,
one_tag_delete,
add_tags,
delete_tags,
]
|
the-stack_106_20145
|
from discord.ext import commands
import os
import traceback
bot = commands.Bot(command_prefix='/')
token = os.environ['DISCORD_BOT_TOKEN']
@bot.event
async def on_command_error(ctx, error):
orig_error = getattr(error, "original", error)
error_msg = ''.join(traceback.TracebackException.from_exception(orig_error).format())
await ctx.send(error_msg)
@bot.command()
async def ping(ctx):
await ctx.send('pong')
bot.run(token)
|
the-stack_106_20150
|
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2020 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class GetPollVotes(TLObject): # type: ignore
"""Telegram API method.
Details:
- Layer: ``117``
- ID: ``0xb86e380e``
Parameters:
peer: :obj:`InputPeer <pyrogram.raw.base.InputPeer>`
id: ``int`` ``32-bit``
limit: ``int`` ``32-bit``
option (optional): ``bytes``
offset (optional): ``str``
Returns:
:obj:`messages.VotesList <pyrogram.raw.base.messages.VotesList>`
"""
__slots__: List[str] = ["peer", "id", "limit", "option", "offset"]
ID = 0xb86e380e
QUALNAME = "functions.messages.GetPollVotes"
def __init__(self, *, peer: "raw.base.InputPeer", id: int, limit: int, option: Union[None, bytes] = None, offset: Union[None, str] = None) -> None:
self.peer = peer # InputPeer
self.id = id # int
self.limit = limit # int
self.option = option # flags.0?bytes
self.offset = offset # flags.1?string
@staticmethod
def read(data: BytesIO, *args: Any) -> "GetPollVotes":
flags = Int.read(data)
peer = TLObject.read(data)
id = Int.read(data)
option = Bytes.read(data) if flags & (1 << 0) else None
offset = String.read(data) if flags & (1 << 1) else None
limit = Int.read(data)
return GetPollVotes(peer=peer, id=id, limit=limit, option=option, offset=offset)
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
flags = 0
flags |= (1 << 0) if self.option is not None else 0
flags |= (1 << 1) if self.offset is not None else 0
data.write(Int(flags))
data.write(self.peer.write())
data.write(Int(self.id))
if self.option is not None:
data.write(Bytes(self.option))
if self.offset is not None:
data.write(String(self.offset))
data.write(Int(self.limit))
return data.getvalue()
|
the-stack_106_20152
|
import unittest
from transformers import XLMRobertaConfig
from transformers.testing_utils import require_torch
from .test_adapter import AdapterTestBase, make_config
from .test_adapter_conversion import ModelClassConversionTestMixin
@require_torch
class XLMRobertaClassConversionTest(
ModelClassConversionTestMixin,
AdapterTestBase,
unittest.TestCase,
):
config_class = XLMRobertaConfig
config = make_config(
XLMRobertaConfig,
hidden_size=32,
num_hidden_layers=4,
num_attention_heads=4,
intermediate_size=37,
)
|
the-stack_106_20153
|
"""Retrieve upstream datasets from a specified dataset."""
import argparse
from typing import List, Dict
import tamr_toolbox as tbox
from tamr_unify_client.dataset.resource import Dataset
def main(*, instance_connection_info: Dict[str, str], dataset_id: str) -> List[Dataset]:
"""Retrieve upstream datasets from a specified dataset.
Args:
instance_connection_info: Information for connecting to Tamr (host, port, username etc)
dataset_id: the dataset_id of the dataset for which upstream
datasets are being retrieved
Returns:
List of upstream datasets
"""
# Create the tamr client
tamr_client = tbox.utils.client.create(**instance_connection_info)
dataset = tamr_client.dataset.by_resource_id(dataset_id)
# Retrieve upstream projects
LOGGER.info(f"Retrieving upstream datasets for dataset: {dataset}")
upstream_datasets = tbox.utils.upstream.projects(dataset)
if upstream_datasets:
LOGGER.info(
f"The following upstream datasets were retrieved successfully {upstream_datasets}."
)
else:
LOGGER.info(f"No upstream datasets found for dataset {dataset}.")
return upstream_datasets
if __name__ == "__main__":
# Set up command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("--config", help="path to a YAML configuration file", required=False)
args = parser.parse_args()
# Load the configuration from the file path provided or the default file path specified
CONFIG = tbox.utils.config.from_yaml(
path_to_file=args.config, default_path_to_file="/path/to/my/conf/dataset.config.yaml",
)
# Use the configuration to create a global logger
LOGGER = tbox.utils.logger.create(__name__, log_directory=CONFIG["logging_dir"])
# Run the main function
main(
instance_connection_info=CONFIG["my_tamr_instance"],
dataset_id=CONFIG["datasets"]["my_categorization_project_dataset"]["id"],
)
|
the-stack_106_20157
|
import abc
import json
import os
import traceback
import ray
import tensorflow as tf
class BaseEnsemble(abc.ABC):
"""Base class for ensembles, every new ensemble algorithms needs to extend this class.
Args:
model_dir (str): Path to directory containing saved Keras models in .h5 format.
loss (callable): a callable taking (y_true, y_pred) as input.
size (int, optional): Number of unique models used in the ensemble. Defaults to 5.
verbose (bool, optional): Verbose mode. Defaults to True.
ray_address (str, optional): Address of the Ray cluster. If "auto" it will try to connect to an existing cluster. If "" it will start a local Ray cluster. Defaults to "".
num_cpus (int, optional): Number of CPUs allocated to load one model and predict. Defaults to 1.
num_gpus (int, optional): Number of GPUs allocated to load one model and predict. Defaults to None.
batch_size (int, optional): Batch size used batchify the inference of loaded models. Defaults to 32.
"""
def __init__(
self,
model_dir,
loss,
size=5,
verbose=True,
ray_address="",
num_cpus=1,
num_gpus=None,
batch_size=32,
):
self.model_dir = os.path.abspath(model_dir)
self.loss = loss
self.members_files = []
self.size = size
self.verbose = verbose
self.ray_address = ray_address
self.num_cpus = num_cpus
self.num_gpus = num_gpus
self.batch_size = batch_size
if not(ray.is_initialized()):
ray.init(address=self.ray_address)
def __repr__(self) -> str:
out = ""
out += f"Model Dir: {self.model_dir}\n"
out += f"Members files: {self.members_files}\n"
out += f"Ensemble size: {len(self.members_files)}/{self.size}\n"
return out
def _list_files_in_model_dir(self):
return [f for f in os.listdir(self.model_dir) if f[-2:] == "h5"]
@abc.abstractmethod
def fit(self, X, y):
"""Fit the current algorithm to the provided data.
Args:
X (array): The input data.
y (array): The output data.
Returns:
BaseEnsemble: The current fitted instance.
"""
@abc.abstractmethod
def predict(self, X):
"""Execute an inference of the ensemble for the provided data.
Args:
X (array): An array of input data.
Returns:
array: The prediction.
"""
@abc.abstractmethod
def evaluate(self, X, y, metrics=None):
"""Compute metrics based on the provided data.
Args:
X (array): An array of input data.
y (array): An array of true output data.
metrics (callable, optional): A metric. Defaults to None.
"""
def load_members_files(self, file: str = "ensemble.json") -> None:
"""Load the members composing an ensemble.
Args:
file (str, optional): Path of JSON file containing the ensemble members. All members needs to be accessible in ``model_dir``. Defaults to "ensemble.json".
"""
with open(file, "r") as f:
self.members_files = json.load(f)
def save_members_files(self, file: str = "ensemble.json") -> None:
"""Save the list of file names of the members of the ensemble in a JSON file.
Args:
file (str, optional): Path JSON file where the file names are saved. Defaults to "ensemble.json".
"""
with open(file, "w") as f:
json.dump(self.members_files, f)
def load(self, file: str) -> None:
"""Load an ensemble from a save.
Args:
file (str): Path to the save of the ensemble.
"""
self.load_members_files(file)
def save(self, file: str=None) -> None:
"""Save an ensemble.
Args:
file (str): Path to the save of the ensemble.
"""
self.save_members_files(file)
|
the-stack_106_20158
|
import argparse
import ast
import sys
import traceback
import zmq
from mldebugger.utils import record_python_run
def workflow_function(kw_args, diagnosis):
clauses = []
for clause in diagnosis:
clauses.append("".join([' kw_args[\'%s\'] %s %s and' % (p,
clause[p]['cp'],
"'{}'".format(clause[p]['v'])
if isinstance(clause[p]['v'], str)
else clause[p]['v']
)
for p in clause]
)
[:-4]
)
booleans = [eval(clause) for clause in clauses]
return not (bool(sum(booleans)))
parser = argparse.ArgumentParser()
parser.add_argument("--server", type=str, help="host responsible for execution requests")
parser.add_argument("--receive", type=str, help="port to receive messages on")
parser.add_argument("--send", type=str, help="port to send messages to")
args = parser.parse_args()
if args.server:
HOST = args.server
else:
HOST = 'localhost'
if args.receive:
RECEIVE = args.receive
else:
RECEIVE = '5557'
if args.send:
SEND = args.send
else:
SEND = '5558'
context = zmq.Context()
# Socket to receive messages on
receiver = context.socket(zmq.PULL)
receiver.connect("tcp://{0}:{1}".format(HOST, RECEIVE))
# Socket to send messages to
sender = context.socket(zmq.PUSH)
sender.connect("tcp://{0}:{1}".format(HOST, SEND))
# Process tasks forever
while True:
data = receiver.recv()
if data == 'kill':
break
fields = data.split("|")
filename = fields[0]
parameter_list = ast.literal_eval(fields[1])
inputs = ast.literal_eval(fields[2])
outputs = ast.literal_eval(fields[3])
f = open(filename, "r")
diag = ast.literal_eval(f.read())
f.close()
kwargs = {}
for i in range(len(parameter_list)):
kwargs[inputs[i]] = parameter_list[i]
try:
result = workflow_function(kwargs, diag)
parameter_list.append(str(result))
except:
print("Exception in user code:")
print("-" * 60)
traceback.print_exc(file=sys.stdout)
print("-" * 60)
parameter_list.append(str(False))
kwargs['result'] = parameter_list[-1]
origin = None
if len(fields) == 5:
origin = fields[4]
record_python_run(kwargs, filename, origin=origin)
sender.send_string(str(parameter_list))
|
the-stack_106_20160
|
from datetime import datetime, date, timedelta
import os
import pytz
import numpy as np
def quantile_sorted(sorted_arr, quantile):
# For small arrays (less than about 4000 items) np.quantile is significantly
# slower than sorting the array and picking the quantile out by index. Computing
# quantiles this way significantly improves performance for computing
# trip time stats across all stops.
max_index = len(sorted_arr) - 1
quantile_index = max_index * quantile
quantile_index_int = int(quantile_index)
quantile_index_fractional = quantile_index - quantile_index_int
quantile_lower = sorted_arr[quantile_index_int]
if quantile_index_fractional > 0:
quantile_upper = sorted_arr[quantile_index_int + 1]
return quantile_lower + (quantile_upper - quantile_lower) * quantile_index_fractional
else:
return quantile_lower
def parse_date(date_str):
(y,m,d) = date_str.split('-')
return date(int(y),int(m),int(d))
# todo: allow specifying day(s) of week
def get_dates_in_range(start_date_str, end_date_str, max_dates=1000):
start_date = parse_date(start_date_str)
end_date = parse_date(end_date_str)
delta = end_date - start_date
if delta.days < 0:
raise Exception(f'start date after end date')
incr = timedelta(days=1)
res = []
cur_date = start_date
while True:
res.append(cur_date)
cur_date = cur_date + incr
if cur_date > end_date:
break
if len(res) > max_dates:
raise Exception(f'too many dates between {start_date_str} and {end_date_str}')
return res
# haversine formula for calcuating distance between two coordinates in lat lon
# from bird eye view; seems to be +- 8 meters difference from geopy distance
def haver_distance(lat1, lon1, lat2, lon2):
lat1, lon1, lat2, lon2 = map(np.deg2rad, [lat1, lon1, lat2, lon2])
eradius = 6371000
latdiff = lat2 - lat1
londiff = lon2 - lon1
a = np.sin(latdiff/2)**2 + np.cos(lat1)*np.cos(lat2)*np.sin(londiff/2)**2
c = 2*np.arctan2(np.sqrt(a),np.sqrt(1-a))
distance = eradius*c
return distance
def render_dwell_time(seconds):
# remove 0 hours and replace 00 minutes with spaces to make it easier to scan column for large durations
return f'+{timedelta(seconds=round(seconds))}'.replace('+0:','+').replace('+00:','+ :')
def get_data_dir():
return f"{os.path.dirname(os.path.dirname(os.path.realpath(__file__)))}/data"
def get_timestamp_or_none(d: date, time_str: str, tz: pytz.timezone):
return int(get_localized_datetime(d, time_str, tz).timestamp()) if time_str is not None else None
def get_localized_datetime(d: date, time_str: str, tz: pytz.timezone):
time_str_parts = time_str.split('+') # + number of days
if len(time_str_parts[0].split(':')) == 2:
format = "%Y-%m-%d %H:%M"
else:
format = "%Y-%m-%d %H:%M:%S"
dt_str = f"{d.isoformat()} {time_str_parts[0]}"
dt = datetime.strptime(dt_str, format)
if len(time_str_parts) > 1:
dt = dt + timedelta(days=int(time_str_parts[1]))
return tz.localize(dt)
def get_intervals(start_time, end_time, interval_length):
# round start_time down and end_time up to allow for even intervals
rounded_start_time = datetime.strptime(start_time, '%H:%M:%S').replace(microsecond=0, second=0, minute=0)
rounded_end_time = datetime.strptime(end_time, '%H:%M:%S').replace(microsecond=0, second=0, minute=0) + timedelta(hours=1)
# save the date of the start time to account for the case that the end time is during the next day
start_day = rounded_start_time.date()
time_str_intervals = []
# if the next interval would extend beyond the end time, exclude it
while rounded_start_time <= rounded_end_time:
new_start_time = rounded_start_time + timedelta(hours = interval_length)
time_str_intervals.append((
rounded_start_time.strftime('%H:%M:%S'),
(new_start_time).strftime('%H:%M:%S') if start_day == new_start_time.date() else f"{(new_start_time).strftime('%H:%M:%S')}+1"
))
rounded_start_time = new_start_time
return time_str_intervals
|
the-stack_106_20161
|
from datafaucet.metadata import reader
from datafaucet import paths
from datafaucet.utils import Singleton
import os
from textwrap import dedent
import pytest
from testfixtures import TempDirectory
from ruamel.yaml import YAML
yaml = YAML()
yaml.preserve_quotes = True
yaml.indent(mapping=4, sequence=4, offset=2)
@pytest.fixture()
def tempdir():
with TempDirectory() as dir:
original_dir = os.getcwd()
os.chdir(dir.path)
# clear all Singletons at the beginning of the test
Singleton._instances = {}
# init Paths here
dir.write('main.ipynb', b'')
yield dir
os.chdir(original_dir)
# noinspection PyProtectedMember,PyProtectedMember,PyProtectedMember,PyProtectedMember,PyProtectedMember,PyProtectedMember,PyProtectedMember,PyProtectedMember
class Test_rootdir(object):
def test_empty(self, tempdir):
yml = ''
tempdir.write('loader.yml', dedent(yml).encode())
md = { 'profile':'default', 'engine': {}, 'logging': {}, 'providers': {}, 'resources': {}, 'variables': {}}
assert(reader.load('default')== md)
def test_minimal(self, tempdir):
yml = '''\
---
a:
b: 'ohoh'
c: 42
s: 1
'''
tempdir.write('loader.yml', dedent(yml).encode())
assert(reader.read(['loader.yml'])['default']['a'] == {'b': 'ohoh', 'c': 42, 's': 1})
def test_minimal_with_resources(self, tempdir):
yml = '''\
---
a:
b: 'ohoh'
c: 42
s: 1
resources:
hello:
best:resource
'''
tempdir.write('loader.yml', dedent(yml).encode())
assert(reader.read([os.path.join(tempdir.path, 'loader.yml')])['default']['a'] == {'b': 'ohoh', 'c': 42, 's': 1})
assert(reader.read([os.path.join(tempdir.path, 'loader.yml')])['default']['resources'] == {'hello': 'best:resource'})
def test_multiple_docs(self,tempdir):
yml = '''\
---
a:
b: 'ohoh'
resources:
hello:
a: 1
---
profile: second
c:
d: 'lalala'
resources:
world:
b: 2
'''
tempdir.write('loader.yml', dedent(yml).encode())
assert(reader.read(['loader.yml'])['default']['a'] == {'b': 'ohoh'})
assert(reader.read(['loader.yml'])['default']['resources'] == {'hello': {'a': 1}})
assert (reader.read(['loader.yml'])['second']['c'] == {'d': 'lalala'})
assert (reader.read(['loader.yml'])['second']['resources'] == {'world': {'b': 2}})
def test_multiple_files(self,tempdir):
yml_1 = '''\
---
a:
b: 'ohoh'
resources:
hello:
a: 1
---
profile: second
c:
d: 'lalala'
'''
yml_2 = '''\
---
resources:
hello:
aa: 1
---
profile: second
resources:
world:
b: 2
'''
tempdir.write('loader.yml', dedent(yml_1).encode())
tempdir.write('abc/loader.yml', dedent(yml_2).encode())
assert(reader.read(['loader.yml', 'abc/loader.yml'])['default']['a'] == {'b': 'ohoh'})
assert(reader.read(['loader.yml', 'abc/loader.yml'])['default']['resources'] == {'hello': {'a': 1, 'aa': 1}})
assert(reader.read(['loader.yml', 'abc/loader.yml'])['second']['c'] == {'d': 'lalala'})
assert(reader.read(['loader.yml', 'abc/loader.yml'])['second']['resources'] == {'world': {'b': 2}})
# noinspection PyProtectedMember
def test_render():
doc = '''
---
profile: default
resources:
input:
path: datasets/extract/{{ profile }}
format: parquet
provider: local-other
'''
ref = {
'resources': {
'input': {
'format': 'parquet',
'path': 'datasets/extract/default',
'provider': 'local-other'}},
'profile': 'default'}
md = yaml.load(dedent(doc))
res = reader.render(md)
assert (res == ref)
# noinspection PyProtectedMember
def test_render_multipass():
# no multipass currently
doc = '''
---
profile: test
resources:
oh : '{{ profile }}'
data:
path: datasets/extract/{{ resources.oh }}
format: parquet-{{ resources.oh }}
provider: local-{{ resources.oh }}
'''
ref = {
'resources': {
'data': {
'format': 'parquet-test',
'path': 'datasets/extract/test',
'provider': 'local-test'},
'oh': 'test'},
'profile': 'test'}
md = yaml.load(dedent(doc))
res = reader.render(md)
assert (res == ref)
# noinspection PyProtectedMember
def test_render_env():
os.environ['MYENVVAR'] = '/bin/bash'
doc = '''
---
profile: default
variables:
ref: default.variables.a0
a0: "{{ env('MYENVVAR') }}"
c0: "{{ ''|env('MYENVVAR')}}"
a1: "{{ env('MYENVVAR','default_value') }}"
c1: "{{ 'default_value'|env('MYENVVAR')}}"
a2: "{{ env('UNDEFINED_ENV', 'world') }}"
c2: "{{ 'world'|env('UNDEFINED_ENV')}}"
'''
ref = {'profile': 'default',
'variables': {
'a0': '/bin/bash',
'a1': '/bin/bash',
'a2': 'world',
'c0': '/bin/bash',
'c1': '/bin/bash',
'c2': 'world',
'ref': 'default.variables.a0'}}
md = yaml.load(dedent(doc))
res = reader.render(md)
assert (res == ref)
# noinspection PyProtectedMember
def test_render_multipass_concat():
os.environ['MYENVVAR'] = '/bin/bash'
doc = '''
---
variables:
a: "hello-{{ env('NOTFOUND_DEFAULT_VALUE', 'world') }}"
b: "one-{{ env('MYENVVAR') }}"
c: "two-{{ variables.b }}"
d: "three-{{ variables.c }}"
e: "{{ variables.c + '-plus-' + variables.d }}"
'''
ref = {'variables': {
'a': 'hello-world',
'b': 'one-/bin/bash',
'c': 'two-one-/bin/bash',
'd': 'three-two-one-/bin/bash',
'e': 'two-one-/bin/bash-plus-three-two-one-/bin/bash'}}
md = yaml.load(dedent(doc))
res = reader.render(md)
assert (res == ref)
|
the-stack_106_20162
|
class QueryBuilder(object):
@staticmethod
def build(query_details):
if "columns" in query_details.keys():
num_columns = len(query_details["columns"])
select_stmt = 'SELECT ' + ", ".join(num_columns * ['{}'])
columns = query_details["columns"]
else:
select_stmt = 'SELECT *'
columns = []
from_stmt = "FROM `sotorrent-org.2018_09_23.Posts`"
if "filters" in query_details.keys():
filters = "WHERE"
for query_filter in query_details["filters"]:
if "conector" in query_filter.keys():
conector = query_filter["conector"]
else:
conector = ""
column = query_filter["column"]
operator = query_filter["operator"]
if operator == "IN":
num_values = len(query_filter["value"])
value = "(" + ", ".join(num_values * ['{}']) + ")"
else:
value = "'{}'"
if len(conector) == 0:
new_filter = " ".join([column, operator, value])
else:
new_filter = " ".join([conector, column, operator, value])
filters = filters + " " + new_filter
filters = filters + " AND AnswerCount >= 1"
values = [item for qf in query_details["filters"]
for item in qf["values"]]
else:
filters = "WHERE AnswerCount >= 1"
values = []
query_inpt = columns + values
print("QUERY SUCCESSFULLY BUILD")
complete_query = " ".join(
[select_stmt, from_stmt, filters]).format(*query_inpt)
print(complete_query)
return complete_query
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.