text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import collections
from oslo_log import log as logging
import six
LOG = logging.getLogger(__name__)
DATABASE_POLICY_TYPE = 'database'
NONRECURSIVE_POLICY_TYPE = 'nonrecursive'
ACTION_POLICY_TYPE = 'action'
MATERIALIZED_POLICY_TYPE = 'materialized'
DELTA_POLICY_TYPE = 'delta'
class Tracer(object):
def __init__(self):
self.expressions = []
self.funcs = [LOG.debug] # functions to call to trace
def trace(self, table):
self.expressions.append(table)
def is_traced(self, table):
return table in self.expressions or '*' in self.expressions
def log(self, table, msg, *args, **kwargs):
depth = kwargs.pop("depth", 0)
if kwargs:
raise TypeError("Unexpected keyword arguments: %s" % kwargs)
if self.is_traced(table):
for func in self.funcs:
func(("| " * depth) + msg, *args)
class StringTracer(Tracer):
def __init__(self):
super(StringTracer, self).__init__()
self.stream = six.moves.StringIO()
self.funcs.append(self.string_output)
def string_output(self, msg, *args):
self.stream.write((msg % args) + "\n")
def get_value(self):
return self.stream.getvalue()
##############################################################################
# Logical Building Blocks
##############################################################################
class Proof(object):
"""A single proof.
Differs semantically from Database's
Proof in that this version represents a proof that spans rules,
instead of just a proof for a single rule.
"""
def __init__(self, root, children):
self.root = root
self.children = children
def __str__(self):
return self.str_tree(0)
def str_tree(self, depth):
s = " " * depth
s += str(self.root)
s += "\n"
for child in self.children:
s += child.str_tree(depth + 1)
return s
def leaves(self):
if len(self.children) == 0:
return [self.root]
result = []
for child in self.children:
result.extend(child.leaves())
return result
##############################################################################
# Events
##############################################################################
class EventQueue(object):
def __init__(self):
self.queue = collections.deque()
def enqueue(self, event):
self.queue.append(event)
def dequeue(self):
return self.queue.popleft()
def __len__(self):
return len(self.queue)
def __str__(self):
return "[" + ",".join([str(x) for x in self.queue]) + "]"
##############################################################################
# Abstract Theories
##############################################################################
class Theory(object):
def __init__(self, name=None, abbr=None, schema=None, theories=None,
id=None):
self.schema = schema
self.theories = theories
self.kind = None
self.id = id
self.tracer = Tracer()
if name is None:
self.name = repr(self)
else:
self.name = name
if abbr is None:
self.abbr = "th"
else:
self.abbr = abbr
maxlength = 6
if len(self.abbr) > maxlength:
self.trace_prefix = self.abbr[0:maxlength]
else:
self.trace_prefix = self.abbr + " " * (maxlength - len(self.abbr))
def set_id(self, id):
self.id = id
def initialize_tables(self, tablenames, facts):
"""initialize_tables
Event handler for (re)initializing a collection of tables. Clears
tables befores assigning the new table content.
@facts must be an iterable containing compile.Fact objects.
"""
raise NotImplementedError
def actual_events(self, events):
"""Returns subset of EVENTS that are not noops."""
actual = []
for event in events:
if event.insert:
if event.formula not in self:
actual.append(event)
else:
if event.formula in self:
actual.append(event)
return actual
def debug_mode(self):
tr = Tracer()
tr.trace('*')
self.set_tracer(tr)
def set_tracer(self, tracer):
self.tracer = tracer
def get_tracer(self):
return self.tracer
def log(self, table, msg, *args, **kwargs):
msg = self.trace_prefix + ": " + msg
self.tracer.log(table, msg, *args, **kwargs)
def policy(self):
"""Return a list of the policy statements in this theory."""
raise NotImplementedError()
def content(self):
"""Return a list of the contents of this theory.
Maybe rules and/or data. Note: do not change name to CONTENTS, as this
is reserved for a dictionary of stuff used by TopDownTheory.
"""
raise NotImplementedError()
def tablenames(self, body_only=False, include_builtin=False):
tablenames = set()
for rule in self.policy():
tablenames |= rule.tablenames(
body_only=body_only, include_builtin=include_builtin)
return tablenames
def __str__(self):
return "Theory %s" % self.name
def content_string(self):
return '\n'.join([str(p) for p in self.content()]) + '\n'
def get_rule(self, ident):
for p in self.policy():
if hasattr(p, 'id') and str(p.id) == str(ident):
return p
return
def arity(self, tablename, modal=None):
"""Return the number of columns for the given tablename.
TABLENAME is of the form <policy>:<table> or <table>.
MODAL is the value of the modal operator.
"""
return NotImplementedError
|
{
"content_hash": "09116f36dbcef90cdbb0574c3733c8b8",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 78,
"avg_line_length": 28.682692307692307,
"alnum_prop": 0.5283271873952397,
"repo_name": "ekcs/congress",
"id": "9102b03639eebbfb3f9124a3d60ef6bfc7ccce48",
"size": "6597",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "congress/datalog/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2744"
},
{
"name": "GAP",
"bytes": "7778"
},
{
"name": "HTML",
"bytes": "19644"
},
{
"name": "JavaScript",
"bytes": "9896"
},
{
"name": "Makefile",
"bytes": "503"
},
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "Python",
"bytes": "1874341"
},
{
"name": "Shell",
"bytes": "8824"
}
],
"symlink_target": ""
}
|
from steve.util import scrapevideo, html_to_markdown
from process import process
from main.models import Episode
class Get_vimeo(process):
ready_state = None
def process_ep(self, ep):
if self.options.verbose: print(ep.id, ep.name)
meta = scrapevideo(ep.host_url)
# print ep.host_url
# print meta['description']
description = html_to_markdown(meta['description'])
ep.description = description
title = html_to_markdown(meta['title'])
if ep.name != title:
print(ep.host_url)
print("veyepar:\t%s" %( ep.name, ))
print(" vimeo:\t%s" %( title, ))
print()
ep.save()
ret = None
return ret
def add_more_options(self, parser):
parser.add_option('-u', '--update', action="store_true",
help='update when diff, else print' )
if __name__ == '__main__':
p=Get_vimeo()
p.main()
|
{
"content_hash": "947717d857890feb4e2682051e85076a",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 65,
"avg_line_length": 23.825,
"alnum_prop": 0.5645330535152151,
"repo_name": "xfxf/veyepar",
"id": "9e3d177462f6085ce8715609035f986f3359307c",
"size": "1115",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "dj/scripts/get_vimeo.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6132"
},
{
"name": "HTML",
"bytes": "88316"
},
{
"name": "JavaScript",
"bytes": "76640"
},
{
"name": "Python",
"bytes": "883375"
},
{
"name": "Ruby",
"bytes": "3503"
},
{
"name": "Shell",
"bytes": "82496"
}
],
"symlink_target": ""
}
|
import unittest
from beer_song import song, verse
class BeerTest(unittest.TestCase):
def test_a_verse(self):
self.assertEqual(
verse(8),
"8 bottles of beer on the wall, 8 bottles of beer.\n"
"Take one down and pass it around, "
"7 bottles of beer on the wall.\n"
)
def test_verse_1(self):
self.assertEqual(
verse(1),
"1 bottle of beer on the wall, 1 bottle of beer.\n"
"Take it down and pass it around, "
"no more bottles of beer on the wall.\n"
)
def test_verse_2(self):
self.assertEqual(
verse(2),
"2 bottles of beer on the wall, 2 bottles of beer.\n"
"Take one down and pass it around, 1 bottle of beer on the wall.\n"
)
def test_verse_0(self):
self.assertEqual(
verse(0),
"No more bottles of beer on the wall, no more bottles of beer.\n"
"Go to the store and buy some more, "
"99 bottles of beer on the wall.\n"
)
def test_songing_several_verses(self):
self.assertEqual(
song(8, 6),
"8 bottles of beer on the wall, 8 bottles of beer.\n"
"Take one down and pass it around, "
"7 bottles of beer on the wall.\n\n"
"7 bottles of beer on the wall, 7 bottles of beer.\n"
"Take one down and pass it around, "
"6 bottles of beer on the wall.\n\n"
"6 bottles of beer on the wall, 6 bottles of beer.\n"
"Take one down and pass it around, "
"5 bottles of beer on the wall.\n\n"
)
def test_song_all_the_rest_of_the_verses(self):
self.assertEqual(
song(3),
"3 bottles of beer on the wall, 3 bottles of beer.\n"
"Take one down and pass it around, "
"2 bottles of beer on the wall.\n\n"
"2 bottles of beer on the wall, 2 bottles of beer.\n"
"Take one down and pass it around, "
"1 bottle of beer on the wall.\n\n"
"1 bottle of beer on the wall, 1 bottle of beer.\n"
"Take it down and pass it around, "
"no more bottles of beer on the wall.\n\n"
"No more bottles of beer on the wall, no more bottles of beer.\n"
"Go to the store and buy some more, "
"99 bottles of beer on the wall.\n\n"
)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "221d3470c4462465f6564fe3cc43ae53",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 79,
"avg_line_length": 35.36619718309859,
"alnum_prop": 0.5380326563122262,
"repo_name": "rootulp/xpython",
"id": "858b72960defb588ab6de26119b5cfac95b900de",
"size": "2511",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "exercises/beer-song/beer_song_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "209553"
},
{
"name": "Shell",
"bytes": "640"
}
],
"symlink_target": ""
}
|
import matplotlib.pyplot as plt
import warnings
import random
fontsizes = [8, 16, 24, 32]
def example_plot(ax):
ax.plot([1, 2])
ax.set_xlabel('x-label', fontsize=random.choice(fontsizes))
ax.set_ylabel('y-label', fontsize=random.choice(fontsizes))
ax.set_title('Title', fontsize=random.choice(fontsizes))
fig, ax = plt.subplots()
example_plot(ax)
plt.tight_layout()
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2)
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
example_plot(ax4)
plt.tight_layout()
fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1)
example_plot(ax1)
example_plot(ax2)
plt.tight_layout()
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2)
example_plot(ax1)
example_plot(ax2)
plt.tight_layout()
fig, axes = plt.subplots(nrows=3, ncols=3)
for row in axes:
for ax in row:
example_plot(ax)
plt.tight_layout()
fig = plt.figure()
ax1 = plt.subplot(221)
ax2 = plt.subplot(223)
ax3 = plt.subplot(122)
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
plt.tight_layout()
fig = plt.figure()
ax1 = plt.subplot2grid((3, 3), (0, 0))
ax2 = plt.subplot2grid((3, 3), (0, 1), colspan=2)
ax3 = plt.subplot2grid((3, 3), (1, 0), colspan=2, rowspan=2)
ax4 = plt.subplot2grid((3, 3), (1, 2), rowspan=2)
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
example_plot(ax4)
plt.tight_layout()
plt.show()
fig = plt.figure()
import matplotlib.gridspec as gridspec
gs1 = gridspec.GridSpec(3, 1)
ax1 = fig.add_subplot(gs1[0])
ax2 = fig.add_subplot(gs1[1])
ax3 = fig.add_subplot(gs1[2])
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
# This raises warnings since tight layout cannot
# handle gridspec automatically. We are going to
# do that manually so we can filter the warning.
gs1.tight_layout(fig, rect=[None, None, 0.45, None])
gs2 = gridspec.GridSpec(2, 1)
ax4 = fig.add_subplot(gs2[0])
ax5 = fig.add_subplot(gs2[1])
example_plot(ax4)
example_plot(ax5)
with warnings.catch_warnings():
# This raises warnings since tight layout cannot
# handle gridspec automatically. We are going to
# do that manually so we can filter the warning.
warnings.simplefilter("ignore", UserWarning)
gs2.tight_layout(fig, rect=[0.45, None, None, None])
# now match the top and bottom of two gridspecs.
top = min(gs1.top, gs2.top)
bottom = max(gs1.bottom, gs2.bottom)
gs1.update(top=top, bottom=bottom)
gs2.update(top=top, bottom=bottom)
plt.show()
|
{
"content_hash": "17505dd6da78a8c1086b1575dfb220ea",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 63,
"avg_line_length": 22.36283185840708,
"alnum_prop": 0.6968737633557578,
"repo_name": "bundgus/python-playground",
"id": "88a5c37dc7b982eb1567d9a014de679e8c9bf047",
"size": "2528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "matplotlib-playground/examples/pylab_examples/demo_tight_layout.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "68"
},
{
"name": "Gherkin",
"bytes": "897"
},
{
"name": "HTML",
"bytes": "22309040"
},
{
"name": "Jupyter Notebook",
"bytes": "666681"
},
{
"name": "Python",
"bytes": "1046557"
},
{
"name": "Thrift",
"bytes": "58"
}
],
"symlink_target": ""
}
|
r"""A lightweight JSON library used by the Scalyr agent to read JSON configuration
files and to serialize some parts of the server requests.
This library is used instead of python's default json library because
it supports some custom Scalyr extensions (chiefly it allows for comments
in the JSON).
The classes exported by this package are:
JsonObject -- A JSON object containing keys and fields. Has similar methods as a dict.
JsonArray -- A JSON array. Has similar methods to a list.
JsonConversionException -- Exception raised when conversion of a field in a JSON object fails.
JsonMissingFieldException -- Exception raised when a request field in a JSON object is missing.
JsonParseException -- Exception raised when parsing a string as JSON fails.
The methods exported are:
parse -- Parses a string as JSON and returns the value.
"""
from __future__ import unicode_literals
from __future__ import absolute_import
__author__ = "Steven Czerwinski <[email protected]>"
from scalyr_agent.json_lib.exceptions import JsonConversionException
from scalyr_agent.json_lib.exceptions import (
JsonMissingFieldException,
JsonParseException,
)
from scalyr_agent.json_lib.objects import (
JsonObject,
JsonArray,
ArrayOfStrings,
SpaceAndCommaSeparatedArrayOfStrings,
)
from scalyr_agent.json_lib.parser import parse
from scalyr_agent.json_lib.serializer import serialize_as_length_prefixed_string
__all__ = [
"parse",
"JsonObject",
"JsonArray",
"ArrayOfStrings",
"SpaceAndCommaSeparatedArrayOfStrings",
"JsonConversionException",
"JsonMissingFieldException",
"JsonParseException",
"serialize_as_length_prefixed_string",
]
|
{
"content_hash": "aa6db56ac94392ed58976eed3052f154",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 106,
"avg_line_length": 36.791666666666664,
"alnum_prop": 0.7344280860702151,
"repo_name": "scalyr/scalyr-agent-2",
"id": "c5c9685f33002ed8c41f3a548a8a41de83890bc0",
"size": "2414",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scalyr_agent/json_lib/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1926"
},
{
"name": "Dockerfile",
"bytes": "16280"
},
{
"name": "Java",
"bytes": "541"
},
{
"name": "JavaScript",
"bytes": "2191"
},
{
"name": "Jinja",
"bytes": "31315"
},
{
"name": "PowerShell",
"bytes": "1161"
},
{
"name": "Python",
"bytes": "5244579"
},
{
"name": "Shell",
"bytes": "173598"
},
{
"name": "XSLT",
"bytes": "1082"
}
],
"symlink_target": ""
}
|
from django.conf.urls.defaults import patterns # noqa
from django.conf.urls.defaults import url # noqa
from openstack_dashboard.dashboards.settings.password import views
urlpatterns = patterns('',
url(r'^$', views.PasswordView.as_view(), name='index'))
|
{
"content_hash": "5ec9fb8caadaae21ded8473379158024",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 66,
"avg_line_length": 32.75,
"alnum_prop": 0.7519083969465649,
"repo_name": "kaiweifan/horizon",
"id": "1cc32f48b44239d2824e54eb2c65f3b662f75362",
"size": "925",
"binary": false,
"copies": "11",
"ref": "refs/heads/vip2",
"path": "openstack_dashboard/dashboards/settings/password/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "160827"
},
{
"name": "JavaScript",
"bytes": "360901"
},
{
"name": "Python",
"bytes": "2832603"
},
{
"name": "Shell",
"bytes": "12986"
}
],
"symlink_target": ""
}
|
import sys
import pyvips
# import logging
# logging.basicConfig(level = logging.DEBUG)
# pyvips.cache_set_trace(True)
a = pyvips.Image.new_from_file(sys.argv[1])
profile = a.get("icc-profile-data")
with open('x.icm', 'w') as f:
f.write(profile)
|
{
"content_hash": "bdec7e5fb587d0a5475b184095291379",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 44,
"avg_line_length": 18.142857142857142,
"alnum_prop": 0.6968503937007874,
"repo_name": "jcupitt/pyvips",
"id": "7b3dced0af8a3abfe0fdb9f7d367ce670bb323bf",
"size": "273",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/read_profile.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "115655"
},
{
"name": "Shell",
"bytes": "811"
}
],
"symlink_target": ""
}
|
"""
Verify src files in YAML are images and not too big
"""
from __future__ import print_function, unicode_literals
from PIL import Image # pip install pillow
import argparse
import glob
import os
import re
import sys
import tempfile
import wget # pip install wget
import yaml # pip install pyyaml
DISALLOWED_CHARS_REGEX = re.compile('[^a-z0-9_-]')
def load_yaml(filename):
"""
Load YAML data from a file
"""
with open(filename) as f:
# yaml.BaseLoader leaves everything as a string,
# so doesn't convert "no" to False
data = yaml.load(f, Loader=yaml.BaseLoader)
return data
def check_name(name):
"""Check emoji name is valid.
Return error if invalid.
Return None if valid."""
# http://stackoverflow.com/a/92000/724176
if DISALLOWED_CHARS_REGEX.search(name):
# Name is invalid
return ("Error: custom emoji names can only contain lower case "
"letters, numbers, dashes and underscores: {}".format(name))
else:
# Name is valid
return None
def name_from_path(path):
"""Given 'packs/frontend.yaml' return 'frontend'"""
basename = os.path.basename(path)
return os.path.splitext(basename)[0]
def create_dirs(dir):
"""Create directory and all intermediate-level directories"""
if not os.path.isdir(dir):
os.makedirs(dir)
def resize_image(im, yaml_filename, url):
"""Given:
* an image,
* a yaml_filename ('packs/packname.yaml')
* and a URL ('https://example.com/emojiname.png')
thumbnail it to no more than 128x128, preserving aspect ratio,
and save to 'resized/packname/emojiname.png'
"""
subdir = name_from_path(yaml_filename)
subdir = os.path.join("resized", subdir)
create_dirs(subdir)
im.thumbnail((128, 128), Image.BICUBIC)
outfile = os.path.basename(url)
outfile = os.path.join(subdir, outfile)
im.save(outfile)
return outfile
def check_yaml(yaml_filename, resize=False):
"""
Given emojipack YAML filename, check each image in the src field
is an image of the correct size
"""
errors = []
warnings = []
resized = []
out = "Checking {}".format(yaml_filename)
sys.stdout.write(out)
# monkey patch
wget.ulib.URLopener.version = (
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/35.0.1916.153 Safari/537.36 SE 2.X MetaSr 1.0")
data = load_yaml(yaml_filename)
urls_checked = set()
for emoji in data["emojis"]:
error = check_name(emoji["name"])
if error:
errors.append(error)
url = emoji["src"]
if url not in urls_checked:
urls_checked.add(url)
sys.stdout.write('.')
download = wget.download(url, tempfile.gettempdir(), bar=None)
# Square images work best. Image can't be larger than 128px in
# width or height, and must be smaller than 64K in file size.
if os.path.getsize(download) > 65536:
error = ("Error: must be smaller than 64K in file size: "
"{}").format(url)
errors.append(error)
with open(download, "rb") as f:
try:
# Is it an image?
im = Image.open(f)
if im.width > 128 or im.height > 128:
if resize:
outfile = resize_image(im, yaml_filename, url)
message = "Info: resized {} to {}".format(
url, outfile)
resized.append(message)
error = ("Error: image can't be larger than 128px "
"in width or height: {} {}".format(
im.size, url))
errors.append(error)
elif im.width != im.height:
warning = ("Warning: square images work best: "
"{} {}".format(im.size, url))
warnings.append(warning)
except IOError:
error = "Error: cannot open as image: {}".format(url)
errors.append(error)
f.close()
os.remove(download)
print()
print()
print("Found {} errors in {}".format(len(errors), yaml_filename))
if len(errors):
print("\n".join(errors))
print()
print("Fixed {} errors in {}".format(len(resized), yaml_filename))
if len(resized):
print("\n".join(resized))
print("Please re-upload and update YAML")
print()
print("Found {} warnings in {}".format(len(warnings), yaml_filename))
if len(warnings):
print("\n".join(warnings))
print()
return errors, warnings
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Verify src files in YAML are images and not too big",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('inspec', nargs='?',
help="Input file spec")
parser.add_argument('-r', '--resize',
action='store_true',
help="Whether to resize large files")
args = parser.parse_args()
if args.inspec:
filenames = glob.glob(args.inspec)
if not filenames:
sys.exit("No input files found matching " + args.inspec)
all_errors = []
all_warnings = []
for filename in filenames:
errors, warnings = check_yaml(filename, args.resize)
all_errors += errors
all_warnings += warnings
print("Found {} total errors and {} total warnings".format(
len(all_errors), len(all_warnings)))
sys.exit(len(all_errors))
# End of file
|
{
"content_hash": "05381a741567c9316ac83e559b4cce0e",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 78,
"avg_line_length": 31.19047619047619,
"alnum_prop": 0.5614927905004241,
"repo_name": "bchalls/emojipacks",
"id": "c42de5af1bce8c9723adf860f6697a6823300326",
"size": "5935",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/image-checker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "26647"
},
{
"name": "JavaScript",
"bytes": "8991"
},
{
"name": "Makefile",
"bytes": "484"
},
{
"name": "Python",
"bytes": "5935"
},
{
"name": "Shell",
"bytes": "210"
}
],
"symlink_target": ""
}
|
"""
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from plaid.model.transfer_event import TransferEvent
globals()['TransferEvent'] = TransferEvent
class TransferEventListResponse(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'transfer_events': ([TransferEvent],), # noqa: E501
'request_id': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'transfer_events': 'transfer_events', # noqa: E501
'request_id': 'request_id', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, transfer_events, request_id, *args, **kwargs): # noqa: E501
"""TransferEventListResponse - a model defined in OpenAPI
Args:
transfer_events ([TransferEvent]):
request_id (str): A unique identifier for the request, which can be used for troubleshooting. This identifier, like all Plaid identifiers, is case sensitive.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.transfer_events = transfer_events
self.request_id = request_id
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
{
"content_hash": "630056bfe545314fedb6bed1af589b5b",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 169,
"avg_line_length": 39.94535519125683,
"alnum_prop": 0.5662106703146375,
"repo_name": "plaid/plaid-python",
"id": "f26884d131185db4f0e161b96a8753196cf846bf",
"size": "7310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plaid/model/transfer_event_list_response.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "323"
},
{
"name": "Makefile",
"bytes": "622"
},
{
"name": "Mustache",
"bytes": "125163"
},
{
"name": "Python",
"bytes": "9342874"
}
],
"symlink_target": ""
}
|
from .rest_api import RestSFMC
from .fuel_api import FuelSFMC
|
{
"content_hash": "668526fc4d820c62403061ed9ec6f22d",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 30,
"avg_line_length": 31,
"alnum_prop": 0.8064516129032258,
"repo_name": "lexual/rocket-fuel-sdk",
"id": "51f7c4f8b22a1ffdf5dcf6caa50dc66d154a2d09",
"size": "62",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rocket_fuel_sdk/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7683"
}
],
"symlink_target": ""
}
|
import os
import re
import webapp2
import uuid
from perf_insights import trace_info
import third_party.cloudstorage as gcs
default_retry_params = gcs.RetryParams(initial_delay=0.2,
max_delay=5.0,
backoff_factor=2,
max_retry_period=15)
gcs.set_default_retry_params(default_retry_params)
class UploadPage(webapp2.RequestHandler):
def get(self):
self.response.out.write("""
<html><body>
<head><title>Performance Insights - Trace Uploader</title></head>
<form action="/upload" enctype="multipart/form-data" method="post">
<div><input type="file" name="trace"/></div>
<div><input type="submit" value="Upload"></div>
</form><hr>
</body></html>""")
def post(self):
trace_uuid = str(uuid.uuid4())
bucket_name = ('/performance-insights/' + trace_uuid)
gcs_file = gcs.open(bucket_name,
'w',
content_type='application/octet-stream',
options={},
retry_params=default_retry_params)
gcs_file.write(self.request.get('trace'))
gcs_file.close()
trace_object = trace_info.TraceInfo(id=trace_uuid)
trace_object.prod = self.request.get('prod')
trace_object.network_type = self.request.get('network_type')
trace_object.remote_addr = os.environ["REMOTE_ADDR"]
tags_string = self.request.get('tags')
if re.match('^[a-zA-Z0-9,]+$', tags_string): # ignore non alpha-numeric tags
trace_object.tags = tags_string.split(',')
trace_object.user_agent = self.request.headers.get('User-Agent')
trace_object.ver = self.request.get('product_version')
trace_object.put()
self.response.write(trace_uuid)
app = webapp2.WSGIApplication([('/upload', UploadPage)])
|
{
"content_hash": "122ceb46a48aea058cefbdf8fb3ebad5",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 80,
"avg_line_length": 35.9622641509434,
"alnum_prop": 0.5912906610703043,
"repo_name": "zeptonaut/catapult",
"id": "1cd601a1841693d40131c352f10ce56848370572",
"size": "2073",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "perf_insights/perf_insights/upload.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "19263"
},
{
"name": "HTML",
"bytes": "23156805"
},
{
"name": "JavaScript",
"bytes": "35437"
},
{
"name": "Python",
"bytes": "1231734"
}
],
"symlink_target": ""
}
|
import tests.appserver
from tests.web import *
import random
import string
class TestCase(WebTestCase):
@classmethod
def setup_class(cls):
super(TestCase, cls).setup_class()
cls._appserver = tests.appserver.AppServer(*tests.appserver.AppServer.fetch_config('default_appserver'))
cls._appserver.start()
@classmethod
def teardown_class(cls):
if cls._appserver:
cls._appserver.stop()
super(TestCase, cls).teardown_class()
def test_static(self):
home_url = urlparse.urljoin(self.__class__.get_root_url(), '/static/status/500.html')
self.__class__.goto(home_url)
with self.browser as B:
HAS_TITLE(B, 'Osadnici')
|
{
"content_hash": "9601174e7498ee903df680fdd9049b87",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 108,
"avg_line_length": 23.892857142857142,
"alnum_prop": 0.6905829596412556,
"repo_name": "happz/settlers",
"id": "2435cdc8fb2482283bc42cc2c9ece56872ec9e23",
"size": "669",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/web/sanity.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "75190"
},
{
"name": "CoffeeScript",
"bytes": "111627"
},
{
"name": "Inno Setup",
"bytes": "3439"
},
{
"name": "JavaScript",
"bytes": "30274"
},
{
"name": "Makefile",
"bytes": "5003"
},
{
"name": "Mako",
"bytes": "66564"
},
{
"name": "Python",
"bytes": "278777"
},
{
"name": "Shell",
"bytes": "833"
}
],
"symlink_target": ""
}
|
r"""TELNET negotiation filter
This code was adapted from the telnetlib library included with Python 2.7,
and is being used under the PSF License agreement included below.
All changes to the original telnetlib code are copyright (c) 2019 R Pickett,
and licensed under the same terms.
======================================
1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and
the Individual or Organization ("Licensee") accessing and otherwise using Python
2.7.15 software in source or binary form and its associated documentation.
2. Subject to the terms and conditions of this License Agreement, PSF hereby
grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
analyze, test, perform and/or display publicly, prepare derivative works,
distribute, and otherwise use Python 2.7.15 alone or in any derivative
version, provided, however, that PSF's License Agreement and PSF's notice of
copyright, i.e., "Copyright © 2001-2019 Python Software Foundation; All Rights
Reserved" are retained in Python 2.7.15 alone or in any derivative version
prepared by Licensee.
3. In the event Licensee prepares a derivative work that is based on or
incorporates Python 2.7.15 or any part thereof, and wants to make the
derivative work available to others as provided herein, then Licensee hereby
agrees to include in any such work a brief summary of the changes made to Python
2.7.15.
4. PSF is making Python 2.7.15 available to Licensee on an "AS IS" basis.
PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF
EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR
WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE
USE OF PYTHON 2.7.15 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON 2.7.15
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF
MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 2.7.15, OR ANY DERIVATIVE
THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
6. This License Agreement will automatically terminate upon a material breach of
its terms and conditions.
7. Nothing in this License Agreement shall be deemed to create any relationship
of agency, partnership, or joint venture between PSF and Licensee. This License
Agreement does not grant permission to use PSF trademarks or trade name in a
trademark sense to endorse or promote products or services of Licensee, or any
third party.
8. By copying, installing or otherwise using Python 2.7.15, Licensee agrees
to be bound by the terms and conditions of this License Agreement.
======================================
"""
from collections import deque
import wx
# Telnet protocol characters (don't change)
IAC = bytes([255]) # "Interpret As Command"
DONT = bytes([254])
DO = bytes([253])
WONT = bytes([252])
WILL = bytes([251])
theNULL = bytes([0])
SE = bytes([240]) # Subnegotiation End
NOP = bytes([241]) # No Operation
DM = bytes([242]) # Data Mark
BRK = bytes([243]) # Break
IP = bytes([244]) # Interrupt process
AO = bytes([245]) # Abort output
AYT = bytes([246]) # Are You There
EC = bytes([247]) # Erase Character
EL = bytes([248]) # Erase Line
GA = bytes([249]) # Go Ahead
SB = bytes([250]) # Subnegotiation Begin
############# MU*-RELATED PROTOCOLS
ECHO = bytes([1]) # echo
LINEMODE = bytes([34]) # Linemode option
EOR = bytes([25]) # end or record
NEW_ENVIRON = bytes([39]) # New - Environment variables
from filters.telnetiac.naws import handle_naws
NAWS = bytes([31]) # window size
# MTTS - MUD Terminal Type Standard (https://tintin.sourceforge.io/protocols/mtts/)
# (specific implementation of arpa Telnet IAC TTYPE command)
from filters.telnetiac.mtts import handle_mtts
MTTS = bytes([24]) # terminal type
# MSDP - MUD Server Data Protocol
MSDP = bytes([69])
# MSSP - MUD Server Status Protocol
from filters.telnetiac.mssp import handle_mssp
MSSP = bytes([70])
# MCCP - MUD Client Compression Protocol (http://www.gammon.com.au/mccp/protocol.html)
MCCP1 = bytes([85])
MCCP2 = bytes([86])
# MSP - Mud Sound Protocol (https://www.zuggsoft.com/zmud/msp.htm)
MSP = bytes([90])
# MXP - MUD eXtension Protocol (https://www.zuggsoft.com/zmud/mxp.htm)
MXP = bytes([91])
# ZMP - Zenith MUD Protocol (http://discworld.starturtle.net/external/protocols/zmp.html)
ZMP = bytes([93])
# ATCP - Achaea Telnet Client Protocol (https://www.ironrealms.com/rapture/manual/files/FeatATCP-txt.html)
ATCP = bytes([200])
# GMCP - Generic MUD Communication Protocol (http://www.gammon.com.au/gmcp)
GMCP = bytes([201])
def process_line(conn, line):
buf = [b'',b'']
iacseq = b''
sbdataq = b''
sb = 0
option_callback = None
# if we're compressing, decompress us back into a wad of bytes here.
if 'MCCP' in conn.features:
# TODO - try / except in case something breaks.
# From the MCCP site:
#
# If any stream errors are seen on the decompressing side, they should
# be treated as fatal errors, closing the connection. It is unlikely
# that the compressing side will be transmitting useful information
# after a compression error.
# count up the bytes to update the status icon
zbytes = len(line)
line = conn.decompressor.decompress(line)
ubytes = len(line)
zbytes -= len(conn.decompressor.unused_data)
conn.compressed_bytes += zbytes
conn.uncompressed_bytes += ubytes
percent = 100 - round(conn.compressed_bytes * 100 / conn.uncompressed_bytes,1)
conn.UpdateIcon('MCCP', f'MCCP enabled\n{conn.compressed_bytes} compressed bytes\n{conn.uncompressed_bytes} uncompressed bytes\n{percent}% compression')
# Re-queue leftover compressed data
conn.filter_queue = conn.decompressor.unused_data
line = deque(line)
while len(line):
c = bytes([line.popleft()])
if len(iacseq) == 0:
if c == theNULL:
continue
# This was removing some stuff 8BitMUSH uses for art. Why is this here?
#if c == b"\021":
#continue
if c != IAC:
buf[sb] += c
continue
else:
iacseq += c
elif len(iacseq) == 1:
# 'IAC: IAC CMD [OPTION only for WILL/WONT/DO/DONT]'
if c in (DO, DONT, WILL, WONT):
iacseq += c
continue
iacseq = b''
if c == IAC:
buf[sb] = buf[sb] + c
else:
if c == SB: # SB ... SE start.
sb = 1
sbdataq = b''
elif c == SE:
sb = 0
sbdataq = sbdataq + buf[1]
buf[1] = b''
result = handle_iac_subnegotiation(sbdataq, conn)
if result == "requeue":
print("Time to requeue!")
conn.filter_queue = line
return
elif len(iacseq) == 2:
cmd = bytes([iacseq[1]])
iacseq = b''
opt = c
if cmd in (DO, DONT):
handle_iac_do_negotiation(cmd, opt, conn)
elif cmd in (WILL, WONT):
handle_iac_will_negotiation(cmd, opt, conn)
# end loop
return buf[0]
def handle_iac_do_negotiation(cmd, opt, conn):
if cmd == DO:
if opt == MTTS:
print("Got IAC DO MTTS; Sending IAC WILL MTTS")
conn.ActivateFeature('MTTS')
conn.output(IAC + WILL + MTTS)
elif opt == NEW_ENVIRON:
print("Got IAC DO NEW_ENVIRON; Sending IAC WONT NEW_ENVIRON")
conn.output(IAC + WONT + NEW_ENVIRON)
elif opt == NAWS:
print("Got IAC DO NAWS; Sending IAC WILL NAWS + x/y info")
conn.ActivateFeature('NAWS')
conn.output(IAC + WILL + NAWS)
handle_naws(conn)
elif opt == MXP:
# TODO - support this eventually
print("Got IAC DO MXP; Sending IAC WONT MXP")
conn.output(IAC + WONT + MXP)
elif opt == ATCP:
print("Got IAC DO ATCP; Sending IAC WONT ATCP")
conn.output(IAC + WONT + ATCP)
else:
print("Got an *unknown* negotiation IAC DO " + str(ord(opt)) + ", saying WONT")
conn.output(IAC + WONT + opt)
else:
if opt == MTTS:
print("Got IAC DONT MTTS; Resetting and sending WONT MTTS")
conn.mtts_reply = 0
conn.output(IAC + WONT + MTTS)
elif opt == NAWS:
print("Got IAC DONT NAWS; Sending IAC WONT NAWS")
conn.ActivateFeature('NAWS', False)
conn.output(IAC + WONT + NAWS)
else:
print("Got an *unknown* negotiation IAC DONT " + str(ord(opt)) + ", saying WONT")
conn.output(IAC + WONT + opt)
def handle_iac_will_negotiation(cmd, opt, conn):
if cmd == WILL:
if opt == MSDP:
# TODO - support this eventually
print("Got IAC WILL MSDP; Sending IAC DONT MSDP")
conn.output(IAC + DONT + MSDP)
elif opt == MSSP:
print("Got IAC WILL MSSP; Sending IAC DO MSSP")
conn.output(IAC + DO + MSSP)
conn.ActivateFeature('MSSP')
elif opt == MCCP1 or opt == MCCP2:
if 'MCCP' in conn.features:
answer = DONT
print("Got IAC WILL MCCP; Already compressing, Sending IAC DONT MCCP")
else:
answer = DO
print("Got IAC WILL MCCP; Sending IAC DO MCCP")
conn.output(IAC + answer + opt)
elif opt == MSP:
# TODO - support this eventually
print("Got IAC WILL MSP; Sending IAC DONT MSP")
conn.output(IAC + DONT + MSP)
elif opt == ZMP:
print("Got IAC WILL ZMP; Sending IAC DONT ZMP")
conn.output(IAC + DONT + ZMP)
elif opt == GMCP:
# TODO - support this eventually
print("Got IAC WILL GMCP; Sending IAC DONT GMCP")
conn.output(IAC + DONT + GMCP)
elif opt == ECHO:
print("Got IAC WILL ECHO")
conn.iac['ECHO'] = False
else:
print("Got an *unknown* negotiation IAC WILL " + str(ord(opt)) + ", saying DONT")
conn.output(IAC + DONT + opt)
elif cmd == WONT:
if opt == ECHO:
print("Got IAC WONT ECHO")
conn.iac['ECHO'] = True
else:
print("Got an *unknown* negotiation IAC WONT " + str(ord(opt)) + ", saying DONT")
conn.output(IAC + DONT + opt)
def handle_iac_subnegotiation(sbdataq, conn):
payload = deque(sbdataq)
SB_ID = bytes([payload.popleft()])
if SB_ID == MSSP:
handle_mssp(payload, conn)
elif SB_ID == MTTS:
handle_mtts(payload, conn)
elif SB_ID == MCCP1 or SB_ID == MCCP2:
# Turn on the compression flag on the connection and requeue all remaning bytes
conn.ActivateFeature('MCCP')
conn.compressed_bytes = conn.uncompressed_bytes = 0
return('requeue')
else:
print("unhandled IAC Subnegotiation")
print(sbdataq)
return
# Unimplemented / Unused / Uninteresting IAC protocols
#
# Telnet protocol options code (don't change)
# These ones all come from arpa/telnet.h
#BINARY = bytes([0]) # 8-bit data path
#RCP = bytes([2]) # prepare to reconnect
#SGA = bytes([3]) # suppress go ahead
#NAMS = bytes([4]) # approximate message size
#STATUS = bytes([5]) # give status
#TM = bytes([6]) # timing mark
#RCTE = bytes([7]) # remote controlled transmission and echo
#NAOL = bytes([8]) # negotiate about output line width
#NAOP = bytes([9]) # negotiate about output page size
#NAOCRD = bytes([10]) # negotiate about CR disposition
#NAOHTS = bytes([11]) # negotiate about horizontal tabstops
#NAOHTD = bytes([12]) # negotiate about horizontal tab disposition
#NAOFFD = bytes([13]) # negotiate about formfeed disposition
#NAOVTS = bytes([14]) # negotiate about vertical tab stops
#NAOVTD = bytes([15]) # negotiate about vertical tab disposition
#NAOLFD = bytes([16]) # negotiate about output LF disposition
#XASCII = bytes([17]) # extended ascii character set
#LOGOUT = bytes([18]) # force logout
#BM = bytes([19]) # byte macro
#DET = bytes([20]) # data entry terminal
#SUPDUP = bytes([21]) # supdup protocol
#SUPDUPOUTPUT = bytes([22]) # supdup output
#SNDLOC = bytes([23]) # send location
#TUID = bytes([26]) # TACACS user identification
#OUTMRK = bytes([27]) # output marking
#TTYLOC = bytes([28]) # terminal location number
#VT3270REGIME = bytes([29]) # 3270 regime
#X3PAD = bytes([30]) # X.3 PAD
#TSPEED = bytes([32]) # terminal speed
#LFLOW = bytes([33]) # remote flow control
#XDISPLOC = bytes([35]) # X Display Location
#OLD_ENVIRON = bytes([36]) # Old - Environment variables
#AUTHENTICATION = bytes([37]) # Authenticate
#ENCRYPT = bytes([38]) # Encryption option
# the following ones come from
# http://www.iana.org/assignments/telnet-options
# Unfortunately, that document does not assign identifiers
# to all of them, so we are making them up
#TN3270E = bytes([40]) # TN3270E
#XAUTH = bytes([41]) # XAUTH
#CHARSET = bytes([42]) # CHARSET
#RSP = bytes([43]) # Telnet Remote Serial Port
#COM_PORT_OPTION = bytes([44]) # Com Port Control Option
#SUPPRESS_LOCAL_ECHO = bytes([45]) # Telnet Suppress Local Echo
#TLS = bytes([46]) # Telnet Start TLS
#KERMIT = bytes([47]) # KERMIT
#SEND_URL = bytes([48]) # SEND-URL
#FORWARD_X = bytes([49]) # FORWARD_X
#PRAGMA_LOGON = bytes([138]) # TELOPT PRAGMA LOGON
#SSPI_LOGON = bytes([139]) # TELOPT SSPI LOGON
#PRAGMA_HEARTBEAT = bytes([140]) # TELOPT PRAGMA HEARTBEAT
#EXOPL = bytes([255]) # Extended-Options-List
#NOOPT = bytes([0])
|
{
"content_hash": "00b0dd3636c756255633b80fba0ddaa5",
"timestamp": "",
"source": "github",
"line_count": 354,
"max_line_length": 160,
"avg_line_length": 40.74858757062147,
"alnum_prop": 0.6035355285961872,
"repo_name": "emersonrp/wxpymoo",
"id": "26d425b3293f4c6d4ab7b08d3241b6474e5a90d1",
"size": "14426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "filters/telnetiac/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "201427"
}
],
"symlink_target": ""
}
|
import re
class ReadLogWriteReport(object):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
def execute(self):
logFileName = 'log.txt'
totals = self.getTotalDroppedPacketsByIp(logFileName)
self.writeDroppedPacketsReport(totals)
def parseLine(self,line):
'''leverage regular expression to parse on space.'''
parsedLine = re.split(r'\s',line)
return parsedLine
def getIpAddress(self, line):
'''the line must be parsed for this method to work!'''
ip = line[2]
return ip
def getMessage(self, line):
'''the line must be parsed for this method to work!'''
message = line[3]
return message
def getNumberOfDroppedPackets(self, line):
numOfDroppedPackets = line[4]
return numOfDroppedPackets
def getTotalDroppedPacketsByIp(self, logFileName):
totalsByIp = {}
with open(logFileName, 'rt') as log:
for line in log:
parsedLine = self.parseLine(line)
ip = self.getIpAddress(parsedLine)
message = self.getMessage(parsedLine)
if message in 'drops':
value = self.getNumberOfDroppedPackets(parsedLine)
existingValue = totalsByIp.get(ip)
if existingValue is None:
totalsByIp[ip] = int(value)
else:
totalsByIp[ip] = existingValue + int(value)
return totalsByIp
def writeDroppedPacketsReport(self, totals):
with open('report.txt', 'wt') as f:
for key in totals.keys():
ip = key
totalPackets = totals[key]
line = ip + ' drops total ' + str(totalPackets) + ' packets.\n'
f.write(line)
f.close()
|
{
"content_hash": "3abd378230c77e9dadd090c1321feb65",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 79,
"avg_line_length": 30.37313432835821,
"alnum_prop": 0.5125307125307126,
"repo_name": "shadoobie/dbench",
"id": "2e1978713b6f7c85315b2fa3486af7f7f00e41f6",
"size": "2035",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DistributedStorageBenchmarkTool/ReadLogWriteReport.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25737"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from ios_code_generator.utils import to_mixed_case, to_camel_case
__author__ = 'banxi'
def test_to_mixed_case(): # noqa
assert to_mixed_case('user') == 'user'
assert to_mixed_case('User') == 'user'
assert to_mixed_case('userStory') == 'userStory'
assert to_mixed_case('UserStory') == 'userStory'
assert to_mixed_case('User-Story') == 'userStory'
assert to_mixed_case('User_Story') == 'userStory'
assert to_mixed_case('User Story') == 'userStory'
assert to_mixed_case('user story') == 'userStory'
def test_to_camel_case(): # noqa
assert to_camel_case('user') == 'User'
assert to_camel_case('User') == 'User'
assert to_camel_case('userStory') == 'UserStory'
assert to_camel_case('UserStory') == 'UserStory'
assert to_camel_case('User-Story') == 'UserStory'
assert to_camel_case('User_Story') == 'UserStory'
assert to_camel_case('User Story') == 'UserStory'
assert to_camel_case('user story') == 'UserStory'
|
{
"content_hash": "7ec45b8ae4c739f9a91ba5bc8511c674",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 65,
"avg_line_length": 37.666666666666664,
"alnum_prop": 0.647984267453294,
"repo_name": "banxi1988/iOSCodeGenerator",
"id": "ea2705b281728a3d9e76f33e4b0a96bef596d052",
"size": "1041",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ios_code_generator/tests/test_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "26077"
},
{
"name": "Python",
"bytes": "86476"
}
],
"symlink_target": ""
}
|
import jagpdf
import jag.testlib as testlib
import os
import sys
def test_main(argv=None):
#raw_input('.......')
if argv==None:
argv=sys.argv
out_file = os.path.abspath(os.path.join(argv[1], 'pattern-nonstd-op.pdf'))
cfg = testlib.test_config()
doc = jagpdf.create_stream(testlib.FileStreamOut(out_file), cfg)
patt_canvas = doc.canvas_create()
testlib.must_throw(doc.tiling_pattern_load,
"step=0, 0",
patt_canvas) # empty canvas
patt_canvas.move_to(5, 5)
patt_canvas.line_to(15, 15)
patt_canvas.path_paint("fs")
testlib.must_throw(doc.tiling_pattern_load,
"matrix=1, 0, 0, 1, 0, 0",
patt_canvas) # no step
patt_id = doc.tiling_pattern_load("step=20, 20", patt_canvas)
# use pattern
doc.page_start(5.9*72, 3.5*72)
writer = doc.page().canvas()
writer.color_space_pattern_uncolored("f", jagpdf.CS_DEVICE_GRAY)
writer.pattern("f", patt_id, .5)
writer.rectangle(36, 36, 144, 144)
writer.path_paint("sf")
#try to write to pattern that is already registered
patt_canvas.rectangle(7, 7, 6, 6)
patt_canvas.path_paint("fs")
writer.rectangle(36+144+4, 36, 144, 144)
writer.path_paint("sf")
doc.page_end()
# !!!! suprising - try to write to pattern that is already outputted
patt_canvas.circle(10, 10, 5)
patt_canvas.path_paint("s")
# !!!! this is wild -> causes abort
#patt_canvas.paint("fs")
doc.finalize()
#open("patt.pdf", "wb").write(stream.content())
if __name__ == "__main__":
test_main()
|
{
"content_hash": "f75ac81198ed668b296dafff8101f6c3",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 78,
"avg_line_length": 30.11111111111111,
"alnum_prop": 0.5996309963099631,
"repo_name": "jgresula/jagpdf",
"id": "41439ea55dc23dce14d3a06da706b25ac08a7a63",
"size": "1813",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/test/apitest/py/pattern-nonstd-op.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1904"
},
{
"name": "C",
"bytes": "16283"
},
{
"name": "C++",
"bytes": "1491801"
},
{
"name": "CMake",
"bytes": "89989"
},
{
"name": "Java",
"bytes": "21790"
},
{
"name": "Python",
"bytes": "631818"
},
{
"name": "Shell",
"bytes": "61805"
}
],
"symlink_target": ""
}
|
from array import array
import socket
import time
try:
from struct import calcsize, pack_into
except ImportError:
from ustruct import calcsize, pack_into
# Objective:
# Monitor the Geiger counter count every second
# Be able to provide:
# - count each second for the last 300 seconds
# - count each minute for the last 300 minutes
# - count each hour for the last 300 hours
# - count each day for the last 300 days (maybe)
#
# Assume max counting rate is < 2**16/sec
# Second-counts can be stored in uint16
# The rest should be uint32
# Alternative approaches, not implemented:
# Logarithm-based:
# - store every value in uint16
# - second counts are actual counts
# - all others are log-encoded
# enc = lambda x: int(log(x+1)*scale+0.5)
# dec = lambda x: int(exp(x/scale)-0.5)
# scale = 2918 to fit:
# enc((1<<16)*60*60*24) gives 65530
#
# Lin-log:
# - actual counts below knee
# - offset log above knee
# - knee is enc(scale)
# A Strip is a window of stored history. It has a limited length
# of stored history. It has a period, which is the number of its
# stored values which get summed to make up the next single value
# in the next-coarser-grained Strip in an Accumulator (FIXME: refactor).
# It has a code, the storage type of the elements in the array
# that contains the history.
class Strip:
def __init__(self, code, length, period):
self.code = code
self.network_order_code = '!' + code # for noalloc pack_into(fmt)
self.data = array(code, (0 for i in range(length)))
self.period = period
self.ix = 0
self.count = 0
def note(self, v):
ix = self.ix = (self.ix + 1) % len(self.data)
self.data[ix] = v
self.count += 1
return self.count % self.period == 0
def last_n(self, n):
ix = self.ix
data = self.data
capacity = len(data)
for i in range(min(n, capacity, self.count)):
yield data[(ix + capacity - i)%capacity]
def period_sum(self):
return sum(self.last_n(self.period))
# An Accumulator is given an integer value every second, and can
# provide the "last N" values for seconds, and binned values
# for last N minutes, hours, and days, up to one year.
class Accumulator:
def __init__(self):
self.s = Strip('H', 300, 60)
self.m = Strip('L', 300, 60)
self.h = Strip('L', 300, 24)
self.d = Strip('L', 300, 365)
def log_value(self, v):
if self.s.note(v):
if self.m.note(self.s.period_sum()):
if self.h.note(self.m.period_sum()):
if self.d.note(self.h.period_sum()):
# a year's worth, FIXME
pass
def last_n_seconds(self, n):
yield from self.s.last_n(n)
def last_n_minutes(self, n):
yield from self.m.last_n(n)
def last_n_hours(self, n):
yield from self.h.last_n(n)
def last_n_days(self, n):
yield from self.d.last_n(n)
# A Reporter reports some contents of an Accumulator log
# via packed binary UDP to a host
class Reporter:
def __init__(self, g, host, log):
self.g = g
self.host = host
self.log = log
self.addr = socket.getaddrinfo(host, 27183)[0][-1]
self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.buf = bytearray(1024)
bv = self.bv = memoryview(self.buf)
bv[0] = 1 # version
bv[1:5] = g.uid
self.bi = 5
def send(self):
def append(strip, n):
# Append up to n values from the specified Strip, with header
bi_for_header = bi
fmt = strip.network_order_code
assert len(fmt) == 2
bi += calcsize('!H2s')
di = calcsize(fmt)
qty = 0
for v in strip.last_n(n):
pack_into(fmt, buf, bi, v)
bi += di
qty += 1
pack_into('!H2s', buf, bi_for_header, qty, strip.network_order_code)
buf = self.buf
bi = self.bi
bv = self.bv
acc = self.log.acc
pack_into('!II', buf, bi, time.time(), acc.s.count)
bi += calcsize('!II')
append(acc.s, 60) # 120 bytes
append(acc.m, 60) # 240 bytes
append(acc.h, 24) # 96 bytes
append(acc.d, 30) # 120 bytes
# 576 bytes total
# Followed by the bssid's with signal strength
if True:
l_uchar = calcsize('!B')
bslist = self.g.bssids
pack_into('!B', buf, bi, len(bslist))
bi += l_uchar
for dbm, bssid in bslist:
pack_into('!B', buf, bi, 200+dbm)
bi += l_uchar
# Don't attempt to send more bssid's than fit
if bi+6 > len(buf):
break
bv[bi:bi+6] = bssid
bi += 6
r = self.s.sendto(bv[:bi], self.addr)
self.bi = 5
def update_bssids(g):
g.bssids = sorted(((v[3], v[1]) \
for v in g.wlan.scan()), reverse=True)
|
{
"content_hash": "33a1ed3bb3bb6d071a1f2bcc84f54ce2",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 80,
"avg_line_length": 31.634146341463413,
"alnum_prop": 0.5510794140323825,
"repo_name": "pramasoul/ESP-geiger",
"id": "3e36cb8824fac67766a87757fb9f82ac656ef6b2",
"size": "5287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "96370"
}
],
"symlink_target": ""
}
|
"""
Decomposition of the circuit constraint in Google CP Solver.
Cf Global constraint catalog:
http://www.emn.fr/x-info/sdemasse/gccat/Ccircuit.html
Solution of n=4:
x: [2, 0, 3, 1]
x: [3, 0, 1, 2]
x: [1, 3, 0, 2]
x: [3, 2, 0, 1]
x: [1, 2, 3, 0]
x: [2, 3, 1, 0]
The 'orbit' method that is used here is based on some
observations on permutation orbits.
Compare with the following models:
* MiniZinc: http://www.hakank.org/minizinc/circuit_test.mzn
* Gecode: http://www.hakank.org/gecode/circuit_orbit.mzn
This model was created by Hakan Kjellerstrand ([email protected])
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
import sys
from ortools.constraint_solver import pywrapcp
#
# circuit(x)
# constraints x to be an circuit
#
# Note: This assumes that x is has the domain 0..len(x)-1,
# i.e. 0-based.
#
def circuit(solver, x):
n = len(x)
z = [solver.IntVar(0, n - 1, "z%i" % i) for i in range(n)]
solver.Add(solver.AllDifferent(x))
solver.Add(solver.AllDifferent(z))
# put the orbit of x[0] in in z[0..n-1]
solver.Add(z[0] == x[0])
for i in range(1, n - 1):
# The following constraint give the error
# "TypeError: list indices must be integers, not IntVar"
# solver.Add(z[i] == x[z[i-1]])
# solution: use Element instead
solver.Add(z[i] == solver.Element(x, z[i - 1]))
#
# Note: At least one of the following two constraint must be set.
#
# may not be 0 for i < n-1
for i in range(1, n - 1):
solver.Add(z[i] != 0)
# when i = n-1 it must be 0
solver.Add(z[n - 1] == 0)
def main(n=5):
# Create the solver.
solver = pywrapcp.Solver("Send most money")
# data
print("n:", n)
# declare variables
# Note: domain should be 0..n-1
x = [solver.IntVar(0, n - 1, "x%i" % i) for i in range(n)]
#
# constraints
#
circuit(solver, x)
#
# solution and search
#
solution = solver.Assignment()
solution.Add(x)
collector = solver.AllSolutionCollector(solution)
solver.Solve(
solver.Phase(x, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE),
[collector])
num_solutions = collector.SolutionCount()
for s in range(num_solutions):
print("x:", [collector.Value(s, x[i]) for i in range(len(x))])
print()
print("num_solutions:", num_solutions)
print("failures:", solver.Failures())
print("branches:", solver.Branches())
print("WallTime:", solver.WallTime())
print()
n = 5
if __name__ == "__main__":
if len(sys.argv) > 1:
n = int(sys.argv[1])
main(n)
|
{
"content_hash": "95cb6abbce83030c434746d418da7912",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 76,
"avg_line_length": 21.76271186440678,
"alnum_prop": 0.6273364485981309,
"repo_name": "google/or-tools",
"id": "d1680b770e791223e641445add85d6564a1a99d3",
"size": "3168",
"binary": false,
"copies": "2",
"ref": "refs/heads/stable",
"path": "examples/contrib/circuit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "18599"
},
{
"name": "C",
"bytes": "11382"
},
{
"name": "C#",
"bytes": "498888"
},
{
"name": "C++",
"bytes": "14071164"
},
{
"name": "CMake",
"bytes": "219723"
},
{
"name": "Dockerfile",
"bytes": "149476"
},
{
"name": "Java",
"bytes": "459136"
},
{
"name": "Lex",
"bytes": "2271"
},
{
"name": "Makefile",
"bytes": "207007"
},
{
"name": "Python",
"bytes": "629275"
},
{
"name": "SWIG",
"bytes": "414259"
},
{
"name": "Shell",
"bytes": "83555"
},
{
"name": "Starlark",
"bytes": "235950"
},
{
"name": "Yacc",
"bytes": "26027"
},
{
"name": "sed",
"bytes": "45"
}
],
"symlink_target": ""
}
|
"""Base classes / Design
The design is that there are three components fitting together in this project:
- Trials - a list of documents including at least sub-documents:
['spec'] - the specification of hyper-parameters for a job
['result'] - the result of Domain.evaluate(). Typically includes:
['status'] - one of the STATUS_STRINGS
['loss'] - real-valued scalar that hyperopt is trying to minimize
['idxs'] - compressed representation of spec
['vals'] - compressed representation of spec
['tid'] - trial id (unique in Trials list)
- Domain - specifies a search problem
- Ctrl - a channel for two-way communication
between an Experiment and Domain.evaluate.
Experiment subclasses may subclass Ctrl to match. For example, if an
experiment is going to dispatch jobs in other threads, then an
appropriate thread-aware Ctrl subclass should go with it.
"""
__authors__ = "James Bergstra"
__license__ = "3-clause BSD License"
__contact__ = "github.com/hyperopt/hyperopt"
import logging
import datetime
import os
import sys
import numpy as np
try:
import bson # -- comes with pymongo
from bson.objectid import ObjectId
have_bson = True
except ImportError:
have_bson = False
import pyll
#from pyll import scope # looks unused but
from pyll.stochastic import recursive_set_rng_kwarg
from .exceptions import (
DuplicateLabel, InvalidTrial, InvalidResultStatus, InvalidLoss)
from .utils import pmin_sampled
from .utils import use_obj_for_literal_in_memo
from .vectorize import VectorizeHelper
logger = logging.getLogger(__name__)
# -- STATUS values
# An eval_fn returning a dictionary must have a status key with
# one of these values. They are used by optimization routines
# and plotting functions.
STATUS_NEW = 'new'
STATUS_RUNNING = 'running'
STATUS_SUSPENDED = 'suspended'
STATUS_OK = 'ok'
STATUS_FAIL = 'fail'
STATUS_STRINGS = (
'new', # computations have not started
'running', # computations are in prog
'suspended', # computations have been suspended, job is not finished
'ok', # computations are finished, terminated normally
'fail') # computations are finished, terminated with error
# - result['status_fail'] should contain more info
# -- JOBSTATE values
# These are used internally by the scheduler.
# These values are used to communicate between an Experiment
# and a worker process. Consider moving them to mongoexp.
# -- named constants for job execution pipeline
JOB_STATE_NEW = 0
JOB_STATE_RUNNING = 1
JOB_STATE_DONE = 2
JOB_STATE_ERROR = 3
JOB_STATES = [
JOB_STATE_NEW,
JOB_STATE_RUNNING,
JOB_STATE_DONE,
JOB_STATE_ERROR]
TRIAL_KEYS = [
'tid',
'spec',
'result',
'misc',
'state',
'owner',
'book_time',
'refresh_time',
'exp_key']
TRIAL_MISC_KEYS = [
'tid',
'cmd',
'idxs',
'vals']
def _all_same(*args):
return 1 == len(set(args))
def SONify(arg, memo=None):
if not have_bson:
return arg
add_arg_to_raise = True
try:
if memo is None:
memo = {}
if id(arg) in memo:
rval = memo[id(arg)]
if isinstance(arg, ObjectId):
rval = arg
elif isinstance(arg, datetime.datetime):
rval = arg
elif isinstance(arg, np.floating):
rval = float(arg)
elif isinstance(arg, np.integer):
rval = int(arg)
elif isinstance(arg, (list, tuple)):
rval = type(arg)([SONify(ai, memo) for ai in arg])
elif isinstance(arg, dict):
rval = dict(
[(SONify(k, memo), SONify(v, memo)) for k, v in arg.items()])
elif isinstance(arg, (basestring, float, int, long, type(None))):
rval = arg
elif isinstance(arg, np.ndarray):
if arg.ndim == 0:
rval = SONify(arg.sum())
else:
rval = map(SONify, arg) # N.B. memo None
# -- put this after ndarray because ndarray not hashable
elif arg in (True, False):
rval = int(arg)
else:
add_arg_to_raise = False
raise TypeError('SONify', arg)
except Exception, e:
if add_arg_to_raise:
e.args = e.args + (arg,)
raise
memo[id(rval)] = rval
return rval
def miscs_update_idxs_vals(miscs, idxs, vals,
assert_all_vals_used=True,
idxs_map=None):
"""
Unpack the idxs-vals format into the list of dictionaries that is
`misc`.
idxs_map: a dictionary of id->id mappings so that the misc['idxs'] can
contain different numbers than the idxs argument. XXX CLARIFY
"""
if idxs_map is None:
idxs_map = {}
assert set(idxs.keys()) == set(vals.keys())
misc_by_id = dict([(m['tid'], m) for m in miscs])
for m in miscs:
m['idxs'] = dict([(key, []) for key in idxs])
m['vals'] = dict([(key, []) for key in idxs])
for key in idxs:
assert len(idxs[key]) == len(vals[key])
for tid, val in zip(idxs[key], vals[key]):
tid = idxs_map.get(tid, tid)
if assert_all_vals_used or tid in misc_by_id:
misc_by_id[tid]['idxs'][key] = [tid]
misc_by_id[tid]['vals'][key] = [val]
return miscs
def miscs_to_idxs_vals(miscs, keys=None):
if keys is None:
if len(miscs) == 0:
raise ValueError('cannot infer keys from empty miscs')
keys = miscs[0]['idxs'].keys()
idxs = dict([(k, []) for k in keys])
vals = dict([(k, []) for k in keys])
for misc in miscs:
for node_id in idxs:
t_idxs = misc['idxs'][node_id]
t_vals = misc['vals'][node_id]
assert len(t_idxs) == len(t_vals)
assert t_idxs == [] or t_idxs == [misc['tid']]
idxs[node_id].extend(t_idxs)
vals[node_id].extend(t_vals)
return idxs, vals
def spec_from_misc(misc):
spec = {}
for k, v in misc['vals'].items():
if len(v) == 0:
pass
elif len(v) == 1:
spec[k] = v[0]
else:
raise NotImplementedError('multiple values', (k, v))
return spec
class Trials(object):
"""Database interface supporting data-driven model-based optimization.
The model-based optimization algorithms used by hyperopt's fmin function
work by analyzing samples of a response surface--a history of what points
in the search space were tested, and what was discovered by those tests.
A Trials instance stores that history and makes it available to fmin and
to the various optimization algorithms.
This class (`base.Trials`) is a pure-Python implementation of the database
in terms of lists of dictionaries. Subclass `mongoexp.MongoTrials`
implements the same API in terms of a mongodb database running in another
process. Other subclasses may be implemented in future.
The elements of `self.trials` represent all of the completed, in-progress,
and scheduled evaluation points from an e.g. `fmin` call.
Each element of `self.trials` is a dictionary with *at least* the following
keys:
* **tid**: a unique trial identification object within this Trials instance
usually it is an integer, but it isn't obvious that other sortable,
hashable objects couldn't be used at some point.
* **result**: a sub-dictionary representing what was returned by the fmin
evaluation function. This sub-dictionary has a key 'status' with a value
from `STATUS_STRINGS` and the status is `STATUS_OK`, then there should be
a 'loss' key as well with a floating-point value. Other special keys in
this sub-dictionary may be used by optimization algorithms (see them
for details). Other keys in this sub-dictionary can be used by the
evaluation function to store miscelaneous diagnostics and debugging
information.
* **misc**: despite generic name, this is currently where the trial's
hyperparameter assigments are stored. This sub-dictionary has two
elements: `'idxs'` and `'vals'`. The `vals` dictionary is
a sub-sub-dictionary mapping each hyperparameter to either `[]` (if the
hyperparameter is inactive in this trial), or `[<val>]` (if the
hyperparameter is active). The `idxs` dictionary is technically
redundant -- it is the same as `vals` but it maps hyperparameter names
to either `[]` or `[<tid>]`.
"""
async = False
def __init__(self, exp_key=None, refresh=True):
self._ids = set()
self._dynamic_trials = []
self._exp_key = exp_key
self.attachments = {}
if refresh:
self.refresh()
def view(self, exp_key=None, refresh=True):
rval = object.__new__(self.__class__)
rval._exp_key = exp_key
rval._ids = self._ids
rval._dynamic_trials = self._dynamic_trials
rval.attachments = self.attachments
if refresh:
rval.refresh()
return rval
def aname(self, trial, name):
return 'ATTACH::%s::%s' % (trial['tid'], name)
def trial_attachments(self, trial):
"""
Support syntax for load: self.trial_attachments(doc)[name]
# -- does this work syntactically?
# (In any event a 2-stage store will work)
Support syntax for store: self.trial_attachments(doc)[name] = value
"""
# don't offer more here than in MongoCtrl
class Attachments(object):
def __contains__(_self, name):
return self.aname(trial, name) in self.attachments
def __getitem__(_self, name):
return self.attachments[self.aname(trial, name)]
def __setitem__(_self, name, value):
self.attachments[self.aname(trial, name)] = value
def __delitem__(_self, name):
del self.attachments[self.aname(trial, name)]
return Attachments()
def __iter__(self):
try:
return iter(self._trials)
except AttributeError:
print >> sys.stderr, "You have to refresh before you iterate"
raise
def __len__(self):
try:
return len(self._trials)
except AttributeError:
print >> sys.stderr, "You have to refresh before you compute len"
raise
def __getitem__(self, item):
# -- how to make it obvious whether indexing is by _trials position
# or by tid if both are integers?
raise NotImplementedError('')
def refresh(self):
# In MongoTrials, this method fetches from database
if self._exp_key is None:
self._trials = [
tt for tt in self._dynamic_trials
if tt['state'] != JOB_STATE_ERROR]
else:
self._trials = [tt
for tt in self._dynamic_trials
if (tt['state'] != JOB_STATE_ERROR
and tt['exp_key'] == self._exp_key)]
self._ids.update([tt['tid'] for tt in self._trials])
@property
def trials(self):
return self._trials
@property
def tids(self):
return [tt['tid'] for tt in self._trials]
@property
def specs(self):
return [tt['spec'] for tt in self._trials]
@property
def results(self):
return [tt['result'] for tt in self._trials]
@property
def miscs(self):
return [tt['misc'] for tt in self._trials]
@property
def idxs_vals(self):
return miscs_to_idxs_vals(self.miscs)
@property
def idxs(self):
return self.idxs_vals[0]
@property
def vals(self):
return self.idxs_vals[1]
def assert_valid_trial(self, trial):
if not (hasattr(trial, 'keys') and hasattr(trial, 'values')):
raise InvalidTrial('trial should be dict-like', trial)
for key in TRIAL_KEYS:
if key not in trial:
raise InvalidTrial('trial missing key %s', key)
for key in TRIAL_MISC_KEYS:
if key not in trial['misc']:
raise InvalidTrial('trial["misc"] missing key', key)
if trial['tid'] != trial['misc']['tid']:
raise InvalidTrial(
'tid mismatch between root and misc',
trial)
# -- check for SON-encodable
if have_bson:
try:
bson.BSON.encode(trial)
except:
# TODO: save the trial object somewhere to inspect, fix, re-insert
# so that precious data is not simply deallocated and lost.
print '-' * 80
print "CANT ENCODE"
print '-' * 80
raise
if trial['exp_key'] != self._exp_key:
raise InvalidTrial('wrong exp_key',
(trial['exp_key'], self._exp_key))
# XXX how to assert that tids are unique?
return trial
def _insert_trial_docs(self, docs):
"""insert with no error checking
"""
rval = [doc['tid'] for doc in docs]
self._dynamic_trials.extend(docs)
return rval
def insert_trial_doc(self, doc):
"""insert trial after error checking
Does not refresh. Call self.refresh() for the trial to appear in
self.specs, self.results, etc.
"""
doc = self.assert_valid_trial(SONify(doc))
return self._insert_trial_docs([doc])[0]
# refreshing could be done fast in this base implementation, but with
# a real DB the steps should be separated.
def insert_trial_docs(self, docs):
""" trials - something like is returned by self.new_trial_docs()
"""
asserted_docs = []
for doc in docs:
if isinstance(doc, list):
asserted_docs.append(self.assert_valid_trial(doc[0]))
else:
asserted_docs.append(self.assert_valid_trial(doc))
return self._insert_trial_docs(asserted_docs)
def new_trial_ids(self, N):
aa = len(self._ids)
rval = range(aa, aa + N)
self._ids.update(rval)
return rval
def new_trial_docs(self, tids, specs, results, miscs):
assert len(tids) == len(specs) == len(results) == len(miscs)
rval = []
for tid, spec, result, misc in zip(tids, specs, results, miscs):
doc = dict(
state=JOB_STATE_NEW,
tid=tid,
spec=spec,
result=result,
misc=misc)
doc['exp_key'] = self._exp_key
doc['owner'] = None
doc['version'] = 0
doc['book_time'] = None
doc['refresh_time'] = None
rval.append(doc)
return rval
def source_trial_docs(self, tids, specs, results, miscs, sources):
assert _all_same(map(len, [tids, specs, results, miscs, sources]))
rval = []
for tid, spec, result, misc, source in zip(tids, specs, results, miscs,
sources):
doc = dict(
version=0,
tid=tid,
spec=spec,
result=result,
misc=misc,
state=source['state'],
exp_key=source['exp_key'],
owner=source['owner'],
book_time=source['book_time'],
refresh_time=source['refresh_time'],
)
# -- ensure that misc has the following fields,
# some of which may already by set correctly.
assign = ('tid', tid), ('cmd', None), ('from_tid', source['tid'])
for k, v in assign:
assert doc['misc'].setdefault(k, v) == v
rval.append(doc)
return rval
def delete_all(self):
self._dynamic_trials = []
self.attachments = {}
self.refresh()
def count_by_state_synced(self, arg, trials=None):
"""
Return trial counts by looking at self._trials
"""
if trials is None:
trials = self._trials
if arg in JOB_STATES:
queue = [doc for doc in trials if doc['state'] == arg]
elif hasattr(arg, '__iter__'):
states = set(arg)
assert all([x in JOB_STATES for x in states])
queue = [doc for doc in trials if doc['state'] in states]
else:
raise TypeError(arg)
rval = len(queue)
return rval
def count_by_state_unsynced(self, arg):
"""
Return trial counts that count_by_state_synced would return if we
called refresh() first.
"""
if self._exp_key is not None:
exp_trials = [tt
for tt in self._dynamic_trials
if tt['exp_key'] == self._exp_key]
else:
exp_trials = self._dynamic_trials
return self.count_by_state_synced(arg, trials=exp_trials)
def losses(self, bandit=None):
if bandit is None:
return [r.get('loss') for r in self.results]
else:
return map(bandit.loss, self.results, self.specs)
def statuses(self, bandit=None):
if bandit is None:
return [r.get('status') for r in self.results]
else:
return map(bandit.status, self.results, self.specs)
def average_best_error(self, bandit=None):
"""Return the average best error of the experiment
Average best error is defined as the average of bandit.true_loss,
weighted by the probability that the corresponding bandit.loss is best.
For domains with loss measurement variance of 0, this function simply
returns the true_loss corresponding to the result with the lowest loss.
"""
if bandit is None:
results = self.results
loss = [r['loss']
for r in results if r['status'] == STATUS_OK]
loss_v = [r.get('loss_variance', 0)
for r in results if r['status'] == STATUS_OK]
true_loss = [r.get('true_loss', r['loss'])
for r in results if r['status'] == STATUS_OK]
else:
def fmap(f):
rval = np.asarray([
f(r, s)
for (r, s) in zip(self.results, self.specs)
if bandit.status(r) == STATUS_OK]).astype('float')
if not np.all(np.isfinite(rval)):
raise ValueError()
return rval
loss = fmap(bandit.loss)
loss_v = fmap(bandit.loss_variance)
true_loss = fmap(bandit.true_loss)
loss3 = zip(loss, loss_v, true_loss)
if not loss3:
raise ValueError('Empty loss vector')
loss3.sort()
loss3 = np.asarray(loss3)
if np.all(loss3[:, 1] == 0):
best_idx = np.argmin(loss3[:, 0])
return loss3[best_idx, 2]
else:
cutoff = 0
sigma = np.sqrt(loss3[0][1])
while (cutoff < len(loss3)
and loss3[cutoff][0] < loss3[0][0] + 3 * sigma):
cutoff += 1
pmin = pmin_sampled(loss3[:cutoff, 0], loss3[:cutoff, 1])
#print pmin
#print loss3[:cutoff, 0]
#print loss3[:cutoff, 1]
#print loss3[:cutoff, 2]
avg_true_loss = (pmin * loss3[:cutoff, 2]).sum()
return avg_true_loss
@property
def best_trial(self):
"""Trial with lowest loss and status=STATUS_OK
"""
candidates = [t for t in self.trials
if t['result']['status'] == STATUS_OK]
losses = [float(t['result']['loss']) for t in candidates]
assert not np.any(np.isnan(losses))
best = np.argmin(losses)
return candidates[best]
@property
def argmin(self):
best_trial = self.best_trial
vals = best_trial['misc']['vals']
# unpack the one-element lists to values
# and skip over the 0-element lists
rval = {}
for k, v in vals.items():
if v:
rval[k] = v[0]
return rval
def fmin(self, fn, space, algo, max_evals,
rstate=None,
verbose=0,
pass_expr_memo_ctrl=None,
catch_eval_exceptions=False,
return_argmin=True,
):
"""Minimize a function over a hyperparameter space.
For most parameters, see `hyperopt.fmin.fmin`.
Parameters
----------
catch_eval_exceptions : bool, default False
If set to True, exceptions raised by either the evaluation of the
configuration space from hyperparameters or the execution of `fn`
, will be caught by fmin, and recorded in self._dynamic_trials as
error jobs (JOB_STATE_ERROR). If set to False, such exceptions
will not be caught, and so they will propagate to calling code.
"""
# -- Stop-gap implementation!
# fmin should have been a Trials method in the first place
# but for now it's still sitting in another file.
import fmin as fmin_module
return fmin_module.fmin(
fn, space, algo, max_evals,
trials=self,
rstate=rstate,
verbose=verbose,
allow_trials_fmin=False, # -- prevent recursion
pass_expr_memo_ctrl=pass_expr_memo_ctrl,
catch_eval_exceptions=catch_eval_exceptions,
return_argmin=return_argmin)
def trials_from_docs(docs, validate=True, **kwargs):
"""Construct a Trials base class instance from a list of trials documents
"""
rval = Trials(**kwargs)
if validate:
rval.insert_trial_docs(docs)
else:
rval._insert_trial_docs(docs)
rval.refresh()
return rval
class Ctrl(object):
"""Control object for interruptible, checkpoint-able evaluation
"""
info = logger.info
warn = logger.warn
error = logger.error
debug = logger.debug
def __init__(self, trials, current_trial=None):
# -- attachments should be used like
# attachments[key]
# attachments[key] = value
# where key and value are strings. Client code should not
# expect any dictionary-like behaviour beyond that (no update)
if trials is None:
self.trials = Trials()
else:
self.trials = trials
self.current_trial = current_trial
def checkpoint(self, r=None):
assert self.current_trial in self.trials._trials
if r is not None:
self.current_trial['result'] = r
@property
def attachments(self):
"""
Support syntax for load: self.attachments[name]
Support syntax for store: self.attachments[name] = value
"""
return self.trials.trial_attachments(trial=self.current_trial)
def inject_results(self, specs, results, miscs, new_tids=None):
"""Inject new results into self.trials
Returns ??? XXX
new_tids can be None, in which case new tids will be generated
automatically
"""
trial = self.current_trial
assert trial is not None
num_news = len(specs)
assert len(specs) == len(results) == len(miscs)
if new_tids is None:
new_tids = self.trials.new_trial_ids(num_news)
new_trials = self.trials.source_trial_docs(tids=new_tids,
specs=specs,
results=results,
miscs=miscs,
sources=[trial])
for t in new_trials:
t['state'] = JOB_STATE_DONE
return self.trials.insert_trial_docs(new_trials)
class Domain(object):
"""Picklable representation of search space and evaluation function.
"""
rec_eval_print_node_on_error = False
# -- the Ctrl object is not used directly, but rather
# a live Ctrl instance is inserted for the pyll_ctrl
# in self.evaluate so that it can be accessed from within
# the pyll graph describing the search space.
pyll_ctrl = pyll.as_apply(Ctrl)
def __init__(self, fn, expr,
workdir=None,
pass_expr_memo_ctrl=None,
name=None,
loss_target=None,
):
"""
Paramaters
----------
fn : callable
This stores the `fn` argument to `fmin`. (See `hyperopt.fmin.fmin`)
expr : hyperopt.pyll.Apply
This is the `space` argument to `fmin`. (See `hyperopt.fmin.fmin`)
workdir : string (or None)
If non-None, the current working directory will be `workdir`while
`expr` and `fn` are evaluated. (XXX Currently only respected by
jobs run via MongoWorker)
pass_expr_memo_ctrl : bool
If True, `fn` will be called like this:
`fn(self.expr, memo, ctrl)`,
where `memo` is a dictionary mapping `Apply` nodes to their
computed values, and `ctrl` is a `Ctrl` instance for communicating
with a Trials database. This lower-level calling convention is
useful if you want to call e.g. `hyperopt.pyll.rec_eval` yourself
in some customized way.
name : string (or None)
Label, used for pretty-printing.
loss_target : float (or None)
The actual or estimated minimum of `fn`.
Some optimization algorithms may behave differently if their first
objective is to find an input that achieves a certain value,
rather than the more open-ended objective of pure minimization.
XXX: Move this from Domain to be an fmin arg.
"""
self.fn = fn
if pass_expr_memo_ctrl is None:
self.pass_expr_memo_ctrl = getattr(fn,
'fmin_pass_expr_memo_ctrl',
False)
else:
self.pass_expr_memo_ctrl = pass_expr_memo_ctrl
self.expr = pyll.as_apply(expr)
self.params = {}
for node in pyll.dfs(self.expr):
if node.name == 'hyperopt_param':
label = node.arg['label'].obj
if label in self.params:
raise DuplicateLabel(label)
self.params[label] = node.arg['obj']
self.loss_target = loss_target
self.name = name
self.workdir = workdir
self.s_new_ids = pyll.Literal('new_ids') # -- list at eval-time
before = pyll.dfs(self.expr)
# -- raises exception if expr contains cycles
pyll.toposort(self.expr)
vh = self.vh = VectorizeHelper(self.expr, self.s_new_ids)
# -- raises exception if v_expr contains cycles
pyll.toposort(vh.v_expr)
idxs_by_label = vh.idxs_by_label()
vals_by_label = vh.vals_by_label()
after = pyll.dfs(self.expr)
# -- try to detect if VectorizeHelper screwed up anything inplace
assert before == after
assert set(idxs_by_label.keys()) == set(vals_by_label.keys())
assert set(idxs_by_label.keys()) == set(self.params.keys())
self.s_rng = pyll.Literal('rng-placeholder')
# -- N.B. operates inplace:
self.s_idxs_vals = recursive_set_rng_kwarg(
pyll.scope.pos_args(idxs_by_label, vals_by_label),
self.s_rng)
# -- raises an exception if no topological ordering exists
pyll.toposort(self.s_idxs_vals)
# -- Protocol for serialization.
# self.cmd indicates to e.g. MongoWorker how this domain
# should be [un]serialized.
# XXX This mechanism deserves review as support for ipython
# workers improves.
self.cmd = ('domain_attachment', 'FMinIter_Domain')
def memo_from_config(self, config):
memo = {}
for node in pyll.dfs(self.expr):
if node.name == 'hyperopt_param':
label = node.arg['label'].obj
# -- hack because it's not really garbagecollected
# this does have the desired effect of crashing the
# function if rec_eval actually needs a value that
# the the optimization algorithm thought to be unnecessary
memo[node] = config.get(label, pyll.base.GarbageCollected)
return memo
def evaluate(self, config, ctrl, attach_attachments=True):
memo = self.memo_from_config(config)
use_obj_for_literal_in_memo(self.expr, ctrl, Ctrl, memo)
if self.pass_expr_memo_ctrl:
rval = self.fn(expr=self.expr, memo=memo, ctrl=ctrl)
else:
# -- the "work" of evaluating `config` can be written
# either into the pyll part (self.expr)
# or the normal Python part (self.fn)
pyll_rval = pyll.rec_eval(
self.expr,
memo=memo,
print_node_on_error=self.rec_eval_print_node_on_error)
rval = self.fn(pyll_rval)
if isinstance(rval, (float, int, np.number)):
dict_rval = {'loss': float(rval), 'status': STATUS_OK}
else:
dict_rval = dict(rval)
status = dict_rval['status']
if status not in STATUS_STRINGS:
raise InvalidResultStatus(dict_rval)
if status == STATUS_OK:
# -- make sure that the loss is present and valid
try:
dict_rval['loss'] = float(dict_rval['loss'])
except (TypeError, KeyError):
raise InvalidLoss(dict_rval)
if attach_attachments:
attachments = dict_rval.pop('attachments', {})
for key, val in attachments.items():
ctrl.attachments[key] = val
# -- don't do this here because SON-compatibility is only a requirement
# for trials destined for a mongodb. In-memory rvals can contain
# anything.
#return base.SONify(dict_rval)
return dict_rval
def short_str(self):
return 'Domain{%s}' % str(self.fn)
def loss(self, result, config=None):
"""Extract the scalar-valued loss from a result document
"""
return result.get('loss', None)
def loss_variance(self, result, config=None):
"""Return the variance in the estimate of the loss"""
return result.get('loss_variance', 0.0)
def true_loss(self, result, config=None):
"""Return a true loss, in the case that the `loss` is a surrogate"""
# N.B. don't use get() here, it evaluates self.loss un-necessarily
try:
return result['true_loss']
except KeyError:
return self.loss(result, config=config)
def true_loss_variance(self, config=None):
"""Return the variance in true loss,
in the case that the `loss` is a surrogate.
"""
raise NotImplementedError()
def status(self, result, config=None):
"""Extract the job status from a result document
"""
return result['status']
def new_result(self):
"""Return a JSON-encodable object
to serve as the 'result' for new jobs.
"""
return {'status': STATUS_NEW}
# -- flake8 doesn't like blank last line
|
{
"content_hash": "c2974421673f69dcdd8b2b3529511dcc",
"timestamp": "",
"source": "github",
"line_count": 908,
"max_line_length": 82,
"avg_line_length": 35.107929515418505,
"alnum_prop": 0.5658134136395006,
"repo_name": "w1kke/hyperopt",
"id": "859542133aaa1d09b356189189146140600e0f26",
"size": "31878",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hyperopt/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "394366"
}
],
"symlink_target": ""
}
|
import json
from django.test import TestCase
from django.test.client import RequestFactory
from djangular.views.crud import NgCRUDView
from djangular.views.mixins import JSONResponseMixin
from server.models import DummyModel, DummyModel2, SimpleModel, M2MModel
class CRUDTestViewWithM2M(JSONResponseMixin, NgCRUDView):
"""
Include JSONResponseMixin to make sure there aren't any problems when using both together
"""
model = M2MModel
class CRUDTestViewWithFK(JSONResponseMixin, NgCRUDView):
"""
Include JSONResponseMixin to make sure there aren't any problems when using both together
"""
model = DummyModel
class CRUDTestView(JSONResponseMixin, NgCRUDView):
"""
Include JSONResponseMixin to make sure there aren't any problems when using both together
"""
model = DummyModel2
class CRUDTestViewWithSlug(NgCRUDView):
"""
Differs from CRUDTestViewWithFK in slug field 'email', which has a 'unique' constraint and
can be used as an alternative key (for GET operations only).
"""
model = SimpleModel
slug_field = 'email'
class CRUDViewTest(TestCase):
names = ['John', 'Anne', 'Chris', 'Beatrice', 'Matt']
emails = ["@".join((name, "example.com")) for name in names]
def setUp(self):
self.factory = RequestFactory()
# DummyModel2 and DummyModel / CRUDTestViewWithFK
model2 = DummyModel2(name="Model2 name")
model2.save()
for name in self.names:
DummyModel(name=name, model2=model2).save()
# SimpleModel / CRUDTestViewWithSlug
for name, email in zip(self.names, self.emails):
SimpleModel(name=name, email=email).save()
# model with m2m relationship
dummy_model2 = DummyModel2(name="Mathilde")
dummy_model2.save()
self.m2m_model = M2MModel()
self.m2m_model.save()
self.m2m_model.dummy_models.add(dummy_model2)
self.m2m_model.save()
def test_ng_query(self):
# CRUDTestViewWithFK
request = self.factory.get('/crud/')
response = CRUDTestViewWithFK.as_view()(request)
data = json.loads(response.content.decode('utf-8'))
for obj in data:
db_obj = DummyModel.objects.get(pk=obj['pk'])
self.assertEqual(obj['name'], db_obj.name)
# CRUDTestViewWithSlug
request2 = self.factory.get('/crud/')
response2 = CRUDTestViewWithSlug.as_view()(request2)
data2 = json.loads(response2.content.decode('utf-8'))
for obj in data2:
db_obj = SimpleModel.objects.get(email=obj['email'])
self.assertEqual(obj['name'], db_obj.name)
def test_ng_get(self):
# CRUDTestViewWithFK
request = self.factory.get('/crud/?pk=1')
response = CRUDTestViewWithFK.as_view()(request)
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(self.names[0], data['name'])
# CRUDTestViewWithSlug
request2 = self.factory.get('/crud/?email={0}'.format(self.emails[0]))
response2 = CRUDTestViewWithSlug.as_view()(request2)
data2 = json.loads(response2.content.decode('utf-8'))
self.assertEqual(self.names[0], data2['name'])
def test_ng_save_create(self):
# CRUDTestViewWithFK
request = self.factory.post('/crud/',
data=json.dumps({'name': 'Leonard'}),
content_type='application/json')
response = CRUDTestView.as_view()(request)
data = json.loads(response.content.decode('utf-8'))
pk = data['pk']
request2 = self.factory.get('/crud/?pk={0}'.format(pk))
response2 = CRUDTestView.as_view()(request2)
data2 = json.loads(response2.content.decode('utf-8'))
self.assertEqual(data2['name'], 'Leonard')
# CRUDTestViewWithSlug
request3 = self.factory.post('/crud/',
data=json.dumps({'name': 'Leonard', 'email': '[email protected]'}),
content_type='application/json')
CRUDTestViewWithSlug.as_view()(request3)
request4 = self.factory.get('/crud/?email={0}'.format('[email protected]'))
response4 = CRUDTestViewWithSlug.as_view()(request4)
data4 = json.loads(response4.content.decode('utf-8'))
self.assertEqual(data4['name'], 'Leonard')
request5 = self.factory.post('/crud/',
data=json.dumps({'name': 'Leonard2', 'email': '[email protected]'}),
content_type='application/json')
response5 = CRUDTestViewWithSlug.as_view()(request5)
self.assertGreaterEqual(response5.status_code, 400)
data5 = json.loads(response5.content.decode('utf-8'))
self.assertTrue('detail' in data5 and 'email' in data5['detail'] and len(data5['detail']['email']) > 0)
def test_ng_save_update(self):
# CRUDTestViewWithFK
request = self.factory.post('/crud/?pk=1',
data=json.dumps({'pk': 1, 'name': 'John2'}),
content_type='application/json')
response = CRUDTestView.as_view()(request)
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(data['name'], 'John2')
request2 = self.factory.get('/crud/?pk=1')
response2 = CRUDTestView.as_view()(request2)
data2 = json.loads(response2.content.decode('utf-8'))
self.assertEqual(data2['name'], 'John2')
# CRUDTestViewWithSlug
request3 = self.factory.post('/crud/?pk=1',
data=json.dumps({'name': 'John', 'email': '[email protected]'}),
content_type='application/json')
response3 = CRUDTestViewWithSlug.as_view()(request3)
data3 = json.loads(response3.content.decode('utf-8'))
self.assertEqual(data3['name'], 'John')
self.assertEqual(data3['email'], '[email protected]')
request4 = self.factory.get('/crud/[email protected]')
response4 = CRUDTestViewWithSlug.as_view()(request4)
data4 = json.loads(response4.content.decode('utf-8'))
self.assertEqual(data4['name'], 'John')
request5 = self.factory.post('/crud/?pk=3', # Modifying "Chris"
data=json.dumps({'pk': 4, 'name': 'John2', 'email': '[email protected]'}),
content_type='application/json')
response5 = CRUDTestViewWithSlug.as_view()(request5)
self.assertGreaterEqual(response5.status_code, 400)
data5 = json.loads(response5.content.decode('utf-8'))
self.assertTrue('detail' in data5 and 'email' in data5['detail'] and len(data5['detail']['email']) > 0)
def test_ng_delete(self):
# CRUDTestViewWithFK
request = self.factory.delete('/crud/?pk=1')
response = CRUDTestViewWithFK.as_view()(request)
data = json.loads(response.content.decode('utf-8'))
deleted_name = data['name']
request2 = self.factory.get('/crud/')
response2 = CRUDTestViewWithFK.as_view()(request2)
data2 = json.loads(response2.content.decode('utf-8'))
for obj in data2:
self.assertTrue(deleted_name != obj['name'])
# CRUDTestViewWithSlug delete is not different from CRUDTestViewWithFK only testing error status codes
request3 = self.factory.delete('/crud/[email protected]') # Missing pk
response3 = CRUDTestViewWithSlug.as_view()(request3)
self.assertEqual(response3.status_code, 400)
request4 = self.factory.delete('/crud/?pk=100') # Invalid pk
response4 = CRUDTestViewWithSlug.as_view()(request4)
self.assertEqual(response4.status_code, 404)
# Testing with m2m relationship
request5 = self.factory.delete('/crud/?pk=%s' % self.m2m_model.pk)
response5 = CRUDTestViewWithM2M.as_view()(request5)
self.assertEqual(response5.status_code, 200)
|
{
"content_hash": "e6d6ea1c96e711842b5e792614c1e818",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 111,
"avg_line_length": 42.63157894736842,
"alnum_prop": 0.6176543209876543,
"repo_name": "vaniakov/django-angular",
"id": "5c78d79f272ea9c294fe9a2f69c970d40489b287",
"size": "8124",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/server/tests/test_crud.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "703"
},
{
"name": "JavaScript",
"bytes": "59813"
},
{
"name": "Python",
"bytes": "73804"
}
],
"symlink_target": ""
}
|
'''
Windows Only.
Generic WMI check. This check allows you to specify particular metrics that you
want from WMI in your configuration. Check wmi_check.yaml.example in your conf.d
directory for more details on configuration.
'''
# project
from checks import AgentCheck
# 3rd party
import wmi
UP_METRIC = 'Up'
SEARCH_WILDCARD = '*'
class WMICheck(AgentCheck):
def __init__(self, name, init_config, agentConfig):
AgentCheck.__init__(self, name, init_config, agentConfig)
self.wmi_conns = {}
def _get_wmi_conn(self, host, user, password):
key = "%s:%s:%s" % (host, user, password)
if key not in self.wmi_conns:
self.wmi_conns[key] = wmi.WMI(host, user=user, password=password)
return self.wmi_conns[key]
def check(self, instance):
host = instance.get('host', None)
user = instance.get('username', None)
password = instance.get('password', None)
w = self._get_wmi_conn(host, user, password)
wmi_class = instance.get('class')
metrics = instance.get('metrics')
filters = instance.get('filters')
tag_by = instance.get('tag_by')
tag_queries = instance.get('tag_queries')
constant_tags = instance.get('constant_tags')
if not wmi_class:
raise Exception('WMI instance is missing a value for `class` in wmi_check.yaml')
# If there are filters, we need one query per filter.
if filters:
for f in filters:
prop = f.keys()[0]
search = f.values()[0]
if SEARCH_WILDCARD in search:
search = search.replace(SEARCH_WILDCARD, '%')
wql = "SELECT * FROM %s WHERE %s LIKE '%s'" \
% (wmi_class, prop, search)
results = w.query(wql)
else:
results = getattr(w, wmi_class)(**f)
self._extract_metrics(results, metrics, tag_by, w, tag_queries, constant_tags)
else:
results = getattr(w, wmi_class)()
self._extract_metrics(results, metrics, tag_by, w, tag_queries, constant_tags)
def _extract_metrics(self, results, metrics, tag_by, wmi, tag_queries, constant_tags):
if len(results) > 1 and tag_by is None:
raise Exception('WMI query returned multiple rows but no `tag_by` value was given. '
'metrics=%s' % metrics)
for res in results:
tags = []
# include any constant tags...
if constant_tags:
tags.extend(constant_tags)
# if tag_queries is specified then get attributes from other classes and use as a tags
if tag_queries:
for query in tag_queries:
link_source_property = int(getattr(res, query[0]))
target_class = query[1]
link_target_class_property = query[2]
target_property = query[3]
link_results = \
wmi.query("SELECT {0} FROM {1} WHERE {2} = {3}"
.format(target_property, target_class,
link_target_class_property, link_source_property))
if len(link_results) != 1:
self.log.warning("Failed to find {0} for {1} {2}. No metrics gathered"
.format(target_class, link_target_class_property,
link_source_property))
continue
link_value = str(getattr(link_results[0], target_property)).lower()
tags.append("{0}:{1}".format(target_property.lower(),
"_".join(link_value.split())))
# Grab the tag from the result if there's a `tag_by` value (e.g.: "name:jenkins")
# Strip any #instance off the value when `tag_queries` is set (gives us unique tags)
if tag_by:
tag_value = getattr(res, tag_by).lower()
if tag_queries and tag_value.find("#") > 0:
tag_value = tag_value[:tag_value.find("#")]
tags.append('%s:%s' % (tag_by.lower(), tag_value))
if len(tags) == 0:
tags = None
for wmi_property, name, mtype in metrics:
if wmi_property == UP_METRIC:
# Special-case metric will just submit 1 for every value
# returned in the result.
val = 1
else:
val = float(getattr(res, wmi_property))
try:
func = getattr(self, mtype)
except AttributeError:
raise Exception('Invalid metric type: {0}'.format(mtype))
# submit the metric to datadog
func(name, val, tags=tags)
|
{
"content_hash": "4331446e467806665dcad672e1992666",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 98,
"avg_line_length": 40.885245901639344,
"alnum_prop": 0.5212510024057738,
"repo_name": "JohnLZeller/dd-agent",
"id": "44a709dc7f168ceafd9ea9091ae993d1f065fbd8",
"size": "4988",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "checks.d/wmi_check.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2717"
},
{
"name": "Go",
"bytes": "917"
},
{
"name": "HTML",
"bytes": "8758"
},
{
"name": "Nginx",
"bytes": "3404"
},
{
"name": "PowerShell",
"bytes": "2665"
},
{
"name": "Python",
"bytes": "1454456"
},
{
"name": "Ruby",
"bytes": "57718"
},
{
"name": "Shell",
"bytes": "38669"
},
{
"name": "XSLT",
"bytes": "2222"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from django.db import models
class Item(models.Model):
title = models.CharField("Title", max_length=50, blank=False)
content = models.TextField("Description", blank=False)
source = models.URLField("Source", blank=True)
related = models.ManyToManyField("self", blank=True)
created = models.DateTimeField('Created', default=datetime.now)
def __str__(self):
return self.title
|
{
"content_hash": "68edb79ca061f0c53201aa6b185dad4b",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 67,
"avg_line_length": 33.61538461538461,
"alnum_prop": 0.7116704805491991,
"repo_name": "n2o/labbook",
"id": "c6c4f985b3a9c90c6dad3d126d9d9ca1de5a45b7",
"size": "437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wiki/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1377"
},
{
"name": "HTML",
"bytes": "12909"
},
{
"name": "Python",
"bytes": "29057"
},
{
"name": "Shell",
"bytes": "120"
}
],
"symlink_target": ""
}
|
"""
Determinantal Point Process
===========================
This a pure Python port of the Matlab code of Alex Kulesza.
The port was done using the fabulous SMOP package with alterations to ensure
it only used scipy/numpy packages. As such various line comments referencing
the original `*.m` file is retained in this code
If there are any errors in the port please let me know on: [email protected]
"""
import numpy as np
import random
import scipy
import os
os.environ['OCTAVE_EXECUTABLE'] = "C:/Octave/Octave-4.2.1/bin/octave-cli-4.2.1.exe"
from oct2py import octave
"""
replicate `null`
"""
import numpy as np
from numpy.linalg import svd
def rank(A, atol=1e-13, rtol=0):
"""Estimate the rank (i.e. the dimension of the nullspace) of a matrix.
The algorithm used by this function is based on the singular value
decomposition of `A`.
Parameters
----------
A : ndarray
A should be at most 2-D. A 1-D array with length n will be treated
as a 2-D with shape (1, n)
atol : float
The absolute tolerance for a zero singular value. Singular values
smaller than `atol` are considered to be zero.
rtol : float
The relative tolerance. Singular values less than rtol*smax are
considered to be zero, where smax is the largest singular value.
If both `atol` and `rtol` are positive, the combined tolerance is the
maximum of the two; that is::
tol = max(atol, rtol * smax)
Singular values smaller than `tol` are considered to be zero.
Return value
------------
r : int
The estimated rank of the matrix.
See also
--------
np.linalg.matrix_rank
matrix_rank is basically the same as this function, but it does not
provide the option of the absolute tolerance.
"""
A = np.atleast_2d(A)
s = svd(A, compute_uv=False)
tol = max(atol, rtol * s[0])
rank = int((s >= tol).sum())
return rank
def nullspace(A, atol=1e-13, rtol=0):
"""Compute an approximate basis for the nullspace of A.
The algorithm used by this function is based on the singular value
decomposition of `A`.
Parameters
----------
A : ndarray
A should be at most 2-D. A 1-D array with length k will be treated
as a 2-D with shape (1, k)
atol : float
The absolute tolerance for a zero singular value. Singular values
smaller than `atol` are considered to be zero.
rtol : float
The relative tolerance. Singular values less than rtol*smax are
considered to be zero, where smax is the largest singular value.
If both `atol` and `rtol` are positive, the combined tolerance is the
maximum of the two; that is::
tol = max(atol, rtol * smax)
Singular values smaller than `tol` are considered to be zero.
Return value
------------
ns : ndarray
If `A` is an array with shape (m, k), then `ns` will be an array
with shape (k, n), where n is the estimated dimension of the
nullspace of `A`. The columns of `ns` are a basis for the
nullspace; each element in np.dot(A, ns) will be approximately
zero.
"""
A = np.atleast_2d(A)
u, s, vh = svd(A)
tol = max(atol, rtol * s[0])
nnz = (s >= tol).sum()
ns = vh[nnz:].conj().T
return scipy.linalg.orth(ns)
def elem_sympoly(lambda_=None,k=None):
# given a vector of lambdas and a maximum size k, determine the value of
# the elementary symmetric polynomials:
# E(l+1,n+1) = sum_{J \subseteq 1..n,|J| = l} prod_{i \in J} lambda(i)
N= len (lambda_)
# elem_sympoly.m:6
E= np.zeros((k + 1,N + 1))
# elem_sympoly.m:7
E[0,:]=1
# elem_sympoly.m:8
for l_ in range(k):
for n_ in range(N):
l = l_+1
n = n_+1
E[l,n]=E[l,n - 1] + lambda_[n - 1]*E[l-1, n-1]
return E
def sample_k(lambda_=None,k=None):
# pick k lambdas according to p(S) \propto prod(lambda \in S)
# compute elementary symmetric polynomials
E=elem_sympoly(lambda_,k)
i=len(lambda_)
remaining=k
S = []
while remaining > 0:
# compute marginal of i given that we choose remaining values from 1:i
if i == remaining:
marg=1
else:
marg=np.dot(lambda_[i-1],E[remaining-1,i-1]) / E[remaining,i]
# sample marginal
if np.random.uniform() < marg:
S.append(i-1) # indexing problems
remaining=remaining - 1
i = i - 1
return S
def decompose_kernel(M):
"""
Decomposes the kernel so that dpp function can sample.
based on: https://github.com/javiergonzalezh/dpp/blob/master/dpp/samplers/decompose_kernel.m
this function returns:
* M - the original kernel
* V - eigenvectors
* D - diagonals of eigenvalues
"""
L = {}
M = np.nan_to_num(M)
# no approximation
D, V = np.linalg.eig(M)
"""
if M.shape[0] < 1000:
D, V = np.linalg.eig(M)
else:
# nystrom approximation for eigenvalues
# https://github.com/charanpald/sandbox/blob/master/sandbox/misc/Nystrom.py
n = 1000
inds = np.sort(np.random.permutation(M.shape[0])[0:n])
invInds = np.setdiff1d(np.arange(M.shape[0]), inds)
A = M[inds, :][:, inds]
B = M[inds, :][:, invInds]
Am12 = np.linalg.inv(scipy.linalg.sqrtm(A))
#Am12 = Util.matrixPowerh(A, -0.5)
S = A + Am12.dot(B).dot(B.T).dot(Am12)
S = np.nan_to_num(S)
lmbda, U = np.linalg.eig(S)
Ubar = np.r_[U, B.T.dot(U).dot(np.diag(1/lmbda))]
Z = Ubar.dot(np.diag(lmbda**0.5))
D, F = np.linalg.eig(Z.T.dot(Z))
V = Z.dot(F).dot(np.diag(D**-0.5))
"""
L['M'] = M.copy()
L['V'] = np.real(V.copy()) + (np.ones(V.shape) * np.finfo(float).eps)
L['D'] = np.real(D.copy()) + (np.ones(D.shape) * np.finfo(float).eps)
return L
def sample_dpp(L=None,k=None):
"""
Sample a set from a dpp. L is the (decomposed) kernel, and k is (optionally)
the size of the set to return
"""
if k == L['V'].shape[1]:
# error handling
return list(range(k))
if k is None:
# choose eigenvectors randomly
D = np.divide(L['D'], (1+L['D']))
# check this - might just do a random.sample along axis.
v = np.random.randint(0, L['V'].shape[1], random.choice(range(L['V'].shape[1])))
u_proba = np.random.uniform(size=(len(D), ))
v = np.argwhere(u_proba <= D).flatten()
else:
v = sample_k(L['D'], k)
k = len(v)
V = L['V'][:, v]
# iterate
y_index = None #list(range(L['V'].shape[1]))
Y=[]
for _ in range(k):
# compute probabilities for each item
P=np.sum(np.power(V, 2), 1)
# sample_dpp.m:21
# sample_dpp.m:22
#find(rand <= cumsum(P),1)
"""
This step differs from matlab code in the following way:
1. Create vector with valid indices which can be sampled
2. Normalise the probabilities
3. Make use of `np.random.choice` to choose (guarentees that it will be a new choice for `Y`)
"""
P_index = [(indx, prob) for indx, prob in list(zip(range(len(P)), P)) if indx not in Y]
P_list = [x for x, _ in P_index]
if y_index is None:
y_index = list(range(len(P_list)))
if len(P_list) == 0:
return Y
P_norm = np.array([p for _, p in P_index])
P_norm = P_norm/np.sum(P_norm)
choose_item = np.random.choice(range(len(P_list)), 1, p=P_norm.flatten())
choose_item = choose_item.flatten()[0]
# add the index into our sampler
#print(choose_item)
#print(len(y_index))
#print(len(P_list))
#print("\n")
Y.append(y_index[choose_item])
if len(Y) == k:
return Y
# delete item from y_index...
y_index.pop(choose_item)
# update...choose a vector to elinate, lets pick randomly
j = random.choice(range(V.shape[1]))
Vj = V[:, j]
V = np.delete(V, j, axis=1)
# make sure we do a projection onto Vj,
# is orthogonal basis
V = V + (np.ones(V.shape) * np.finfo(float).eps)
V_norm = V[choose_item, :]/Vj[choose_item]
V = V - (Vj.reshape(-1, 1).dot(V_norm.reshape(1, -1))) + (np.ones(V.shape) * np.finfo(float).eps)
# orthogonalise
for a in range(V.shape[1]):
for b in range(a):
V[:, a] = V[:, a] - ((V[:, a].T).dot(V[:, b]))*(V[:, b])
V[:, a] = V[:, a]/np.linalg.norm(V[:, a])
def sample_dpp2(L=None, k=None, chosen_set=None):
"""
Sample a set from a dpp. L is the (decomposed) kernel, and k is (optionally)
the size of the set to return
chosen_set will be the indices which k is _forced_ to sample from.
"""
if chosen_set is None or k > len(chosen_set):
print("Chosen set is either too small or not provided...using default `sample_dpp`")
return sample_dpp(L, k)
if k == L['V'].shape[1]:
# error handling
return list(range(k))
if k is None:
# choose eigenvectors randomly
D = np.divide(L['D'], (1+L['D']))
# check this - might just do a random.sample along axis.
v = np.random.randint(0, L['V'].shape[1], random.choice(range(L['V'].shape[1])))
v = np.argwhere(np.random.uniform(size=(len(D), 1) <= D))
else:
v = sample_k(L['D'], k)
k = len(v)
V = L['V'][:, v]
# iterate
y_index = list(range(L['V'].shape[1]))
Y=[]
for _ in range(k):
# compute probabilities for each item
P=np.sum(np.power(V, 2), 1)
# sample_dpp.m:21
# sample_dpp.m:22
#find(rand <= cumsum(P),1)
"""
This step differs from matlab code in the following way:
1. Create vector with valid indices which can be sampled
2. Normalise the probabilities
3. Make use of `np.random.choice` to choose (guarentees that it will be a new choice for `Y`)
"""
# for all items in chosen set, their probability is moved to 0
P_index = [(indx, prob) if indx not in chosen_set else (indx, 0.0)
for indx, prob in list(zip(range(len(P)), P)) if indx not in Y]
P_list = [x for x, _ in P_index]
P_norm = np.array([p for _, p in P_index])
if np.sum(P_norm) > 0:
P_norm = P_norm/np.sum(P_norm)
else:
P_index = [(indx, prob)
for indx, prob in list(zip(range(len(P)), P)) if indx not in Y]
P_list = [x for x, _ in P_index]
P_norm = np.array([p for _, p in P_index])
P_norm = P_norm/np.sum(P_norm)
choose_item = np.random.choice(range(len(P_list)), 1, p=P_norm)[0]
# add the index into our sampler
Y.append(y_index[choose_item])
if len(Y) == k:
return Y
# delete item from y_index...
y_index.pop(choose_item)
# update...choose a vector to elinate, lets pick randomly
j = random.choice(range(V.shape[1]))
Vj = V[:, j]
V = np.delete(V, j, axis=1)
# make sure we do a projection onto Vj,
# is orthogonal basis
V_norm = V[choose_item, :]/Vj[choose_item]
V = V - (Vj.reshape(-1, 1).dot(V_norm.reshape(1, -1))) + (np.ones(V.shape) * np.finfo(float).eps)
# orthogonalise
for a in range(V.shape[1]):
for b in range(a):
V[:, a] = V[:, a] - ((V[:, a].T).dot(V[:, b]))*(V[:, b])
V[:, a] = V[:, a]/np.linalg.norm(V[:, a])
def sample_dpp1(L=None,k=None):
"""
Sample a set from a dpp. L is the (decomposed) kernel, and k is (optionally)
the size of the set to return
"""
if k == L['V'].shape[1]:
# error handling
return list(range(k))
if k is None:
# choose eigenvectors randomly
D = np.divide(L['D'], (1+L['D']))
# check this - might just do a random.sample along axis.
v = np.random.randint(0, L['V'].shape[1], random.choice(range(L['V'].shape[1])))
v = np.argwhere(np.random.uniform(size=(len(D), 1) <= D))
else:
v = sample_k(L['D'], k)
k = len(v)
V = L['V'][:, v]
# iterate
y_index = list(range(L['V'].shape[1]))
Y=[]
for _ in range(k):
# compute probabilities for each item
P=np.sum(np.power(V, 2), 1)
# sample_dpp.m:21
# sample_dpp.m:22
#find(rand <= cumsum(P),1)
"""
This step differs from matlab code in the following way:
1. Create vector with valid indices which can be sampled
2. Normalise the probabilities
3. Make use of `np.random.choice` to choose (guarentees that it will be a new choice for `Y`)
"""
P_index = [(indx, prob) for indx, prob in list(zip(range(len(P)), P)) if indx not in Y]
P_list = [x for x, _ in P_index]
P_norm = np.array([p for _, p in P_index])
P_norm = P_norm/np.sum(P_norm)
choose_item = np.random.choice(range(len(P_list)) , 1, p=P_norm)[0]
# add the index into our sampler
Y.append(y_index[choose_item])
if len(Y) == k:
return Y
# delete item from y_index...
y_index.pop(choose_item)
# update...choose a vector to elinate, lets pick randomly
j = random.choice(range(V.shape[1]))
Vj = V[:, j]
V = np.delete(V, j, axis=1)
# make sure we do a projection onto Vj,
# https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4524741/
"""
where Proj⊥Bi Bj is the the projection of Bj on the subspace perpendicular to Bi.
For Bi ≠ 0 and Bj = 0 the projection is ∥Proj⊥Bi Bj∥2 = 0.
"""
# is orthogonal basis
Vj_basis = nullspace(Vj)
# project onto basis
V = np.apply_along_axis(lambda x: np.dot(x, Vj_basis), 0, V)
V = scipy.linalg.orth(V)
def sample_conditional_dpp(L, set_, k=None):
'''
Given a kernel matrix L, returns a sample from a k-DPP.
Based on code from [Javier Gonzalez](https://github.com/javiergonzalezh/dpp/blob/master/dpp/samplers/dpp.py~)
The code is hacked in a way that if a set A is provied, samples from a conditional
dpp given A are produced.
See `L-ensembles` section and Theorem 2.1 + Theorem 2.2 in [Determinantal point processes for machine learning](https://arxiv.org/pdf/1207.6083.pdf)
L: kernel matrix
set: index of the conditional elements. Integer numpy array containing the locations
(starting in zero) relative to the rows of L.
k: size of the sample from the DPP
'''
# Calculate the kernel for the marginal
if L.shape[0] != L.shape[1]:
print("Whaaaatt???")
print(L)
print(L.shape)
sq_size = max(L.shape[0], L.shape[1])
L_temp1 = np.zeros(size=(sq_size, sq_size))
L_temp1[:L.shape[0], :L.shape[1]] = L
L_temp2 = np.zeros(size=(sq_size, sq_size))
L_temp2[:L.shape[1], :L.shape[0]] = L.T
L = np.maximum(L_temp1, L_temp2)
Id = np.array([1]*L.shape[0])
Id[set_] = 0
Id = np.diag(Id)
try:
L_compset_full = np.linalg.inv(Id + L)
except:
try:
L_compset_full = np.linalg.pinv(Id + L)
except:
print("using octave...")
from oct2py import octave
temp_L = Id + L
L_compset_full = octave.pinv(temp_L)
octave.exit()
try:
L_minor = np.linalg.inv(np.delete(np.delete(L_compset_full,tuple(set_), axis=1),tuple(set_),axis=0))
except:
try:
L_minor = np.linalg.pinv(np.delete(np.delete(L_compset_full,tuple(set_), axis=1),tuple(set_),axis=0))
except:
print("using octave...")
temp_L = np.delete(np.delete(L_compset_full,tuple(set_), axis=1),tuple(set_),axis=0)
from oct2py import octave
L_minor = octave.pinv(temp_L)
octave.exit()
L_compset = L_minor - np.diag([1]*L_minor.shape[0])
# Compute the sample
sample = np.array(sample_dpp(decompose_kernel(L_compset), k)).flatten()
return np.concatenate((set_, sample) ,axis=0)
def sample_conditional_dpp2(L, set_, k=None, chosen_set=None):
'''
Given a kernel matrix L, returns a sample from a k-DPP.
Based on code from [Javier Gonzalez](https://github.com/javiergonzalezh/dpp/blob/master/dpp/samplers/dpp.py~)
The code is hacked in a way that if a set A is provied, samples from a conditional
dpp given A are produced.
See `L-ensembles` section and Theorem 2.1 + Theorem 2.2 in [Determinantal point processes for machine learning](https://arxiv.org/pdf/1207.6083.pdf)
L: kernel matrix
set: index of the conditional elements. Integer numpy array containing the locations
(starting in zero) relative to the rows of L.
k: size of the sample from the DPP
'''
# Calculate the kernel for the marginal
Id = np.array([1]*L.shape[0])
Id[set_] = 0
Id = np.diag(Id)
L_compset_full = np.linalg.inv(Id + L)
L_minor = np.linalg.inv(np.delete(np.delete(L_compset_full,tuple(set_), axis=1),tuple(set_),axis=0))
L_compset = L_minor - np.diag([1]*L_minor.shape[0])
# Compute the sample
sample = sample_dpp2(decompose_kernel(L_compset), k, chosen_set)
if k==2:
sample = [sample]
return np.concatenate((set_, sample) ,axis=0)
if __name__ == "__main__":
# try to do a sample of stuff...
from sklearn.metrics.pairwise import rbf_kernel
from sklearn import datasets
iris = datasets.load_iris()
M = rbf_kernel(iris.data.T)
L = decompose_kernel(M)
indx = sample_dpp(L=L, k=3)
print(indx)
|
{
"content_hash": "4693db5147fd6c4f53ca808550f31ee3",
"timestamp": "",
"source": "github",
"line_count": 533,
"max_line_length": 152,
"avg_line_length": 34.32457786116323,
"alnum_prop": 0.5584039355015031,
"repo_name": "chappers/sklearn-recipes",
"id": "1183ed5bae260a404d848079e2718c4d17a021ea",
"size": "18305",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "streaming/dpp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "113416"
},
{
"name": "Jupyter Notebook",
"bytes": "2590773"
},
{
"name": "Matlab",
"bytes": "120986"
},
{
"name": "Python",
"bytes": "313952"
},
{
"name": "R",
"bytes": "18483"
}
],
"symlink_target": ""
}
|
from wagtail.wagtailcore import blocks
class StructBlockWithStyle(blocks.StructBlock):
css_style = blocks.CharBlock(required=False)
class Meta:
form_template = 'common/block_forms/structblock_with_style.html'
|
{
"content_hash": "858638587202b0f675bfb4125c604111",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 72,
"avg_line_length": 28.5,
"alnum_prop": 0.7587719298245614,
"repo_name": "baylee-d/cos.io",
"id": "46349c04d168f0f1ffc308ca2302115d31d324c9",
"size": "228",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "common/blocks/StructBlockWithStyle.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "481084"
},
{
"name": "HTML",
"bytes": "819147"
},
{
"name": "JavaScript",
"bytes": "409024"
},
{
"name": "Python",
"bytes": "696401"
},
{
"name": "Shell",
"bytes": "1178"
}
],
"symlink_target": ""
}
|
'''
Cylinder mesh generator
FIXME: UV coords!
'''
from ..core import *
#-------------------------------------------------------------------------------
def generate(vertexLayout, baseRadius, topRadius, length, numSlices, numStacks) :
'''
Generate cylinder model
'''
dgLogger.debug('generators.cylinder')
# generate a sin/cos table
sinTable = []
cosTable = []
for i in range(0, numSlices) :
val = 2.0 * math.pi * i / numSlices
sinTable.append(math.sin(val))
cosTable.append(math.cos(val))
deltaRadius = topRadius - baseRadius
sideLength = math.sqrt(deltaRadius * deltaRadius + length * length)
normalXY = 1.0
normalZ = 0.0
if sideLength > 0.00001 :
normalXY = length / sideLength
normalZ = -deltaRadius / sideLength
# initialize mesh object
numVerts = 2 * (numSlices+1) + (numStacks+1) * numSlices
numTris = 2 * numSlices + numSlices * numStacks * 2
mesh = Mesh(vertexLayout, numVerts, numTris)
posOffset = mesh.getComponentOffset(('position', 0))
normOffset = mesh.getComponentOffset(('normal', 0))
# base cap vertices
curVertexIndex = 0
baseZ = -0.5 * length
mesh.setData3(curVertexIndex, posOffset, 0.0, 0.0, baseZ)
mesh.setData3(curVertexIndex, normOffset, 0.0, 0.0, -1.0)
curVertexIndex += 1
for i in range(0, numSlices) :
pos = Vector(baseRadius * sinTable[i], baseRadius * cosTable[i], baseZ)
mesh.setData3(curVertexIndex, posOffset, pos.x, pos.y, pos.z)
mesh.setData3(curVertexIndex, normOffset, 0.0, 0.0, -1.0)
curVertexIndex += 1
# stack vertices
for j in range(0, numStacks + 1) :
frac = float(j) / float(numStacks)
z = length * (frac - 0.5)
radius = baseRadius + frac * deltaRadius
for i in range(0, numSlices) :
pos = Vector(radius * sinTable[i], radius * cosTable[i], z)
norm = Vector(normalXY * sinTable[i], normalXY * cosTable[i], normalZ)
mesh.setData3(curVertexIndex, posOffset, pos.x, pos.y, pos.z)
mesh.setData3(curVertexIndex, normOffset, norm.x, norm.y, norm.z)
curVertexIndex += 1
# top cap vertices
topZ = 0.5 * length
for i in range(0, numSlices) :
pos = Vector(topRadius * sinTable[i], topRadius * cosTable[i], topZ)
mesh.setData3(curVertexIndex, posOffset, pos.x, pos.y, pos.z)
mesh.setData3(curVertexIndex, normOffset, 0.0, 0.0, 1.0)
curVertexIndex += 1
mesh.setData3(curVertexIndex, posOffset, 0.0, 0.0, topZ)
mesh.setData3(curVertexIndex, normOffset, 0.0, 0.0, 1.0)
curVertexIndex += 1
if curVertexIndex != mesh.getNumVertices() :
raise Exception("Vertex count mismatch!")
# generate triangles
triIndex = 0
# base cap triangles
rowA = 0
rowB = 1
for i in range(0, numSlices-1) :
mesh.setTriangle(triIndex, Triangle(rowA, rowB+i, rowB+i+1, 0))
triIndex += 1
i += 1
mesh.setTriangle(triIndex, Triangle(rowA, rowB+i, rowB, 0))
triIndex += 1
# stack triangles
for j in range(0, numStacks) :
rowA = 1 + (j + 1) * numSlices
rowB = rowA + numSlices
for i in range(0, numSlices-1) :
mesh.setTriangle(triIndex, Triangle(rowA+i, rowB+i, rowA+i+1, 0))
triIndex += 1
mesh.setTriangle(triIndex, Triangle(rowA+i+1, rowB+i, rowB+i+1, 0))
triIndex += 1
i += 1
mesh.setTriangle(triIndex, Triangle(rowA+i, rowB+i, rowA, 0))
triIndex += 1
mesh.setTriangle(triIndex, Triangle(rowA, rowB+i, rowB, 0))
triIndex += 1
# top cap triangles
rowA = 1 + (numStacks + 2) * numSlices
rowB = rowA + numSlices
for i in range(0, numSlices - 1) :
mesh.setTriangle(triIndex, Triangle(rowA+i, rowB, rowA+i+1, 0))
triIndex += 1
i += 1
mesh.setTriangle(triIndex, Triangle(rowA+i, rowB, rowA, 0))
triIndex += 1
if triIndex != mesh.getNumTriangles() :
raise Exception("Triangle count mismatch")
# create a dummy model
model = Model('cylinder')
model.mesh = mesh
model.addMaterial(Material.createDefaultMaterial())
return model
#--- eof
|
{
"content_hash": "8e30f1d58d50b2b36495d0b5cc7464a3",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 84,
"avg_line_length": 34.20161290322581,
"alnum_prop": 0.601273284602688,
"repo_name": "floooh/drahtgitter",
"id": "aa3122def409ee4b80c782618e4cae7694f656f0",
"size": "4241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "drahtgitter/generators/cylinder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1713"
},
{
"name": "Python",
"bytes": "97022"
}
],
"symlink_target": ""
}
|
"""
This module is intended to match crystal structures against known crystallographic "prototype"
structures.
In this module, the AflowPrototypeMatcher uses the AFLOW LIBRARY OF CRYSTALLOGRAPHIC PROTOTYPES.
If using this particular class, please cite their publication appropriately:
Mehl, M. J., Hicks, D., Toher, C., Levy, O., Hanson, R. M., Hart, G., & Curtarolo, S. (2017).
The AFLOW library of crystallographic prototypes: part 1.
Computational Materials Science, 136, S1-S828.
http://doi.org/10.1016/j.commatsci.2017.01.017
"""
import os
from monty.serialization import loadfn
from pymatgen.analysis.structure_matcher import StructureMatcher
module_dir = os.path.dirname(os.path.abspath(__file__))
AFLOW_PROTOTYPE_LIBRARY = loadfn(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"aflow_prototypes.json"))
class AflowPrototypeMatcher:
"""
This class will match structures to their crystal prototypes, and will
attempt to group species together to match structures derived from
prototypes (e.g. an A_xB_1-x_C from a binary prototype), and will
give these the names the "-like" suffix.
This class uses data from the AFLOW LIBRARY OF CRYSTALLOGRAPHIC PROTOTYPES.
If using this class, please cite their publication appropriately:
Mehl, M. J., Hicks, D., Toher, C., Levy, O., Hanson, R. M., Hart, G., & Curtarolo, S. (2017).
The AFLOW library of crystallographic prototypes: part 1.
Computational Materials Science, 136, S1-S828.
http://doi.org/10.1016/j.commatsci.2017.01.017
"""
def __init__(self, initial_ltol=0.2, initial_stol=0.3, initial_angle_tol=5):
"""
Tolerances as defined in StructureMatcher. Tolerances will be
gradually decreased until only a single match is found (if possible).
Args:
initial_ltol: fractional length tolerance
initial_stol: site tolerance
initial_angle_tol: angle tolerance
"""
self.initial_ltol = initial_ltol
self.initial_stol = initial_stol
self.initial_angle_tol = initial_angle_tol
@staticmethod
def _match_prototype(structure_matcher, structure):
tags = []
for d in AFLOW_PROTOTYPE_LIBRARY:
p = d['snl'].structure
match = structure_matcher.fit_anonymous(p, structure)
if match:
tags.append(d)
return tags
def _match_single_prototype(self, structure):
sm = StructureMatcher(ltol=self.initial_ltol,
stol=self.initial_stol,
angle_tol=self.initial_angle_tol)
tags = self._match_prototype(sm, structure)
while len(tags) > 1:
sm.ltol *= 0.8
sm.stol *= 0.8
sm.angle_tol *= 0.8
tags = self._match_prototype(sm, structure)
if sm.ltol < 0.01:
break
return tags
def get_prototypes(self, structure):
"""
Get prototype(s) structures for a given
input structure. If you use this method in
your work, please cite the appropriate
AFLOW publication:
Mehl, M. J., Hicks, D., Toher, C., Levy, O.,
Hanson, R. M., Hart, G., & Curtarolo, S. (2017).
The AFLOW library of crystallographic prototypes: part 1.
Computational Materials Science, 136, S1-S828.
http://doi.org/10.1016/j.commatsci.2017.01.017
Args:
structure: structure to match
Returns (list): A list of dicts with keys
'snl' for the matched prototype and 'tags',
a dict of tags ('mineral', 'strukturbericht'
and 'aflow') of that prototype. This should
be a list containing just a single entry,
but it is possible a material can match
multiple prototypes.
"""
tags = self._match_single_prototype(structure)
if len(tags) == 0:
return None
else:
return tags
|
{
"content_hash": "a0484cc0536890293f281a460afa6da0",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 97,
"avg_line_length": 37.31481481481482,
"alnum_prop": 0.6285359801488833,
"repo_name": "mbkumar/pymatgen",
"id": "fb68b908b151d908bcf34224016e5deb828d9c68",
"size": "4140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymatgen/analysis/prototypes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5100"
},
{
"name": "CSS",
"bytes": "7550"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "HTML",
"bytes": "827"
},
{
"name": "Makefile",
"bytes": "5573"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "6933839"
},
{
"name": "Roff",
"bytes": "1135003"
}
],
"symlink_target": ""
}
|
import os
import socket
import unittest
import mock
import utils.misc
class MockedSocket1():
def __init__(self, a, b):
print a, b
pass
def connect(self, dest):
pass
def getsockname(self):
return ['1.2.3.4']
class MockedSocket2():
def __init__(self, a, b):
print a, b
pass
def connect(self, dest):
pass
def getsockname(self):
raise socket.error()
def gethostname(self):
return '1.2.3.4'
class MiscTests(unittest.TestCase):
def test_find_mount_point(self, tmpdir='/'):
assert utils.misc.find_mount_point(str(tmpdir)) == '/'
def test_subprocess_run(self):
assert utils.misc.subprocess_run(
'echo abc', shell=True).strip() == 'abc'
assert utils.misc.subprocess_run('exit 0', shell=True).strip() == ''
with self.assertRaises(RuntimeError):
utils.misc.subprocess_run('exit 1', shell=True)
with self.assertRaises(RuntimeError):
# There should not be a /a/b/c/d/e file
utils.misc.subprocess_run('/a/b/c/d/e', shell=False)
@mock.patch('utils.misc.open')
def test_get_process_env(self, mock_open):
mock_open.return_value = open('tests/unit/mock_environ_file')
env = utils.misc.get_process_env(pid=os.getpid())
assert 'HOME' in env
with self.assertRaises(TypeError):
utils.misc.get_process_env('asdf')
def test_process_is_crawler(self):
assert utils.misc.process_is_crawler(os.getpid())
assert utils.misc.process_is_crawler(1) is False
# make sure 1123... does not actually exist
assert utils.misc.process_is_crawler(1123234325123235) is False
with self.assertRaises(TypeError):
utils.misc.process_is_crawler('asdf')
def test_get_host_ip4_addresses(self):
assert '127.0.0.1' in utils.misc.get_host_ip4_addresses()
def test_is_process_running(self):
assert utils.misc.is_process_running(os.getpid())
assert utils.misc.is_process_running(1)
# make sure 1123... does not actually exist
assert utils.misc.is_process_running(1123234325) is False
with self.assertRaises(TypeError):
utils.misc.is_process_running('asdf')
@mock.patch('utils.misc.socket.socket', side_effect=MockedSocket1)
def test_get_host_ipaddr1(self, mock_socket):
assert utils.misc.get_host_ipaddr() == '1.2.3.4'
@mock.patch('utils.misc.socket.socket', side_effect=MockedSocket2)
@mock.patch('utils.misc.socket.gethostname',
side_effect=lambda: '4.3.2.1')
def test_get_host_ipaddr2(self, *args):
assert utils.misc.get_host_ipaddr() == '4.3.2.1'
def test_execution_path(self):
assert utils.misc.execution_path('abc').endswith('/abc')
# XXX this is more of a functional test
def test_btrfs_list_subvolumes(self):
# we either have it installed and it will raise a RuntimeError because
# the path provided does not exist or it is not and it will raise a
# RuntimeError.
with self.assertRaises(RuntimeError):
for submodule in utils.misc.btrfs_list_subvolumes('asd'):
pass
@mock.patch('utils.misc.subprocess_run')
def test_btrfs_list_subvolumes_with_list(self, mock_run):
mock_run.return_value = (
("ID 257 gen 7 top level 5 path btrfs/subvolumes/a60a763cbaaedd3ac"
"2b77bff939019fda876d8a187cb7e85789bb36377accbce\n"
"ID 258 gen 8 top level 5 path btrfs/subvolumes/9212798f648314583"
"9c72f06a6bc2b0e456ca2b9ec14ea70e2948f098ce51077\n"
"ID 278 gen 1908 top level 5 path btrfs/subvolumes/7cd6c219c63e02"
"82ddbd8437c9b2a0220aff40bbfd6734503bcd58e5afa28426\n"))
assert list(
utils.misc.btrfs_list_subvolumes('asd')) == [
[
'ID',
'257',
'gen',
'7',
'top',
'level',
'5',
'path',
("btrfs/subvolumes/a60a763cbaaedd3ac2b77bff939019fda876d8a187c"
"b7e85789bb36377accbce")],
[
'ID',
'258',
'gen',
'8',
'top',
'level',
'5',
'path',
("btrfs/subvolumes/9212798f6483145839c72f06a6bc2b0e456ca2b9ec1"
"4ea70e2948f098ce51077")],
[
'ID',
'278',
'gen',
'1908',
'top',
'level',
'5',
'path',
("btrfs/subvolumes/7cd6c219c63e0282ddbd8437c9b2a0220aff40bbfd6"
"734503bcd58e5afa28426")]]
|
{
"content_hash": "74aa7a753806071ef7b7cfc1305ebf99",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 79,
"avg_line_length": 33.2108843537415,
"alnum_prop": 0.5718967636214666,
"repo_name": "canturkisci/agentless-system-crawler",
"id": "66864ba47773a6c006b35e2a95690601bee7ae86",
"size": "4882",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/unit/test_misc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "793"
},
{
"name": "Makefile",
"bytes": "2108"
},
{
"name": "Python",
"bytes": "704517"
},
{
"name": "Shell",
"bytes": "94228"
}
],
"symlink_target": ""
}
|
"""The test for light device automation."""
from unittest.mock import patch
import pytest
from homeassistant.components import device_automation
import homeassistant.components.automation as automation
from homeassistant.components.websocket_api.const import TYPE_RESULT
from homeassistant.const import CONF_PLATFORM, STATE_OFF, STATE_ON
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
async_mock_service,
mock_device_registry,
mock_registry,
)
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa: F401
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
def _same_lists(a, b):
if len(a) != len(b):
return False
for d in a:
if d not in b:
return False
return True
async def test_websocket_get_actions(hass, hass_ws_client, device_reg, entity_reg):
"""Test we get the expected conditions from a light through websocket."""
await async_setup_component(hass, "device_automation", {})
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create("light", "test", "5678", device_id=device_entry.id)
expected_actions = [
{
"domain": "light",
"type": "turn_off",
"device_id": device_entry.id,
"entity_id": "light.test_5678",
},
{
"domain": "light",
"type": "turn_on",
"device_id": device_entry.id,
"entity_id": "light.test_5678",
},
{
"domain": "light",
"type": "toggle",
"device_id": device_entry.id,
"entity_id": "light.test_5678",
},
]
client = await hass_ws_client(hass)
await client.send_json(
{"id": 1, "type": "device_automation/action/list", "device_id": device_entry.id}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"]
actions = msg["result"]
assert _same_lists(actions, expected_actions)
async def test_websocket_get_conditions(hass, hass_ws_client, device_reg, entity_reg):
"""Test we get the expected conditions from a light through websocket."""
await async_setup_component(hass, "device_automation", {})
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create("light", "test", "5678", device_id=device_entry.id)
expected_conditions = [
{
"condition": "device",
"domain": "light",
"type": "is_off",
"device_id": device_entry.id,
"entity_id": "light.test_5678",
},
{
"condition": "device",
"domain": "light",
"type": "is_on",
"device_id": device_entry.id,
"entity_id": "light.test_5678",
},
]
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 1,
"type": "device_automation/condition/list",
"device_id": device_entry.id,
}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"]
conditions = msg["result"]
assert _same_lists(conditions, expected_conditions)
async def test_websocket_get_triggers(hass, hass_ws_client, device_reg, entity_reg):
"""Test we get the expected triggers from a light through websocket."""
await async_setup_component(hass, "device_automation", {})
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create("light", "test", "5678", device_id=device_entry.id)
expected_triggers = [
{
"platform": "device",
"domain": "light",
"type": "turned_off",
"device_id": device_entry.id,
"entity_id": "light.test_5678",
},
{
"platform": "device",
"domain": "light",
"type": "turned_on",
"device_id": device_entry.id,
"entity_id": "light.test_5678",
},
]
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 1,
"type": "device_automation/trigger/list",
"device_id": device_entry.id,
}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"]
triggers = msg["result"]
assert _same_lists(triggers, expected_triggers)
async def test_websocket_get_action_capabilities(
hass, hass_ws_client, device_reg, entity_reg
):
"""Test we get the expected action capabilities for an alarm through websocket."""
await async_setup_component(hass, "device_automation", {})
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
"alarm_control_panel", "test", "5678", device_id=device_entry.id
)
hass.states.async_set(
"alarm_control_panel.test_5678", "attributes", {"supported_features": 47}
)
expected_capabilities = {
"arm_away": {"extra_fields": []},
"arm_home": {"extra_fields": []},
"arm_night": {"extra_fields": []},
"arm_vacation": {"extra_fields": []},
"disarm": {
"extra_fields": [{"name": "code", "optional": True, "type": "string"}]
},
"trigger": {"extra_fields": []},
}
client = await hass_ws_client(hass)
await client.send_json(
{"id": 1, "type": "device_automation/action/list", "device_id": device_entry.id}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"]
actions = msg["result"]
id = 2
assert len(actions) == 6
for action in actions:
await client.send_json(
{
"id": id,
"type": "device_automation/action/capabilities",
"action": action,
}
)
msg = await client.receive_json()
assert msg["id"] == id
assert msg["type"] == TYPE_RESULT
assert msg["success"]
capabilities = msg["result"]
assert capabilities == expected_capabilities[action["type"]]
id = id + 1
async def test_websocket_get_bad_action_capabilities(
hass, hass_ws_client, device_reg, entity_reg
):
"""Test we get no action capabilities for a non existing domain."""
await async_setup_component(hass, "device_automation", {})
expected_capabilities = {}
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 1,
"type": "device_automation/action/capabilities",
"action": {"domain": "beer"},
}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"]
capabilities = msg["result"]
assert capabilities == expected_capabilities
async def test_websocket_get_no_action_capabilities(
hass, hass_ws_client, device_reg, entity_reg
):
"""Test we get no action capabilities for a domain with no device action capabilities."""
await async_setup_component(hass, "device_automation", {})
expected_capabilities = {}
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 1,
"type": "device_automation/action/capabilities",
"action": {"domain": "deconz"},
}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"]
capabilities = msg["result"]
assert capabilities == expected_capabilities
async def test_websocket_get_condition_capabilities(
hass, hass_ws_client, device_reg, entity_reg
):
"""Test we get the expected condition capabilities for a light through websocket."""
await async_setup_component(hass, "device_automation", {})
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create("light", "test", "5678", device_id=device_entry.id)
expected_capabilities = {
"extra_fields": [
{"name": "for", "optional": True, "type": "positive_time_period_dict"}
]
}
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 1,
"type": "device_automation/condition/list",
"device_id": device_entry.id,
}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"]
conditions = msg["result"]
id = 2
assert len(conditions) == 2
for condition in conditions:
await client.send_json(
{
"id": id,
"type": "device_automation/condition/capabilities",
"condition": condition,
}
)
msg = await client.receive_json()
assert msg["id"] == id
assert msg["type"] == TYPE_RESULT
assert msg["success"]
capabilities = msg["result"]
assert capabilities == expected_capabilities
id = id + 1
async def test_websocket_get_bad_condition_capabilities(
hass, hass_ws_client, device_reg, entity_reg
):
"""Test we get no condition capabilities for a non existing domain."""
await async_setup_component(hass, "device_automation", {})
expected_capabilities = {}
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 1,
"type": "device_automation/condition/capabilities",
"condition": {"condition": "device", "domain": "beer", "device_id": "1234"},
}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"]
capabilities = msg["result"]
assert capabilities == expected_capabilities
async def test_websocket_get_no_condition_capabilities(
hass, hass_ws_client, device_reg, entity_reg
):
"""Test we get no condition capabilities for a domain with no device condition capabilities."""
await async_setup_component(hass, "device_automation", {})
expected_capabilities = {}
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 1,
"type": "device_automation/condition/capabilities",
"condition": {
"condition": "device",
"domain": "deconz",
"device_id": "abcd",
},
}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"]
capabilities = msg["result"]
assert capabilities == expected_capabilities
async def test_async_get_device_automations_single_device_trigger(
hass, device_reg, entity_reg
):
"""Test we get can fetch the triggers for a device id."""
await async_setup_component(hass, "device_automation", {})
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create("light", "test", "5678", device_id=device_entry.id)
result = await device_automation.async_get_device_automations(
hass, "trigger", [device_entry.id]
)
assert device_entry.id in result
assert len(result[device_entry.id]) == 2
async def test_async_get_device_automations_all_devices_trigger(
hass, device_reg, entity_reg
):
"""Test we get can fetch all the triggers when no device id is passed."""
await async_setup_component(hass, "device_automation", {})
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create("light", "test", "5678", device_id=device_entry.id)
result = await device_automation.async_get_device_automations(hass, "trigger")
assert device_entry.id in result
assert len(result[device_entry.id]) == 2
async def test_async_get_device_automations_all_devices_condition(
hass, device_reg, entity_reg
):
"""Test we get can fetch all the conditions when no device id is passed."""
await async_setup_component(hass, "device_automation", {})
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create("light", "test", "5678", device_id=device_entry.id)
result = await device_automation.async_get_device_automations(hass, "condition")
assert device_entry.id in result
assert len(result[device_entry.id]) == 2
async def test_async_get_device_automations_all_devices_action(
hass, device_reg, entity_reg
):
"""Test we get can fetch all the actions when no device id is passed."""
await async_setup_component(hass, "device_automation", {})
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create("light", "test", "5678", device_id=device_entry.id)
result = await device_automation.async_get_device_automations(hass, "action")
assert device_entry.id in result
assert len(result[device_entry.id]) == 3
async def test_async_get_device_automations_all_devices_action_exception_throw(
hass, device_reg, entity_reg, caplog
):
"""Test we get can fetch all the actions when no device id is passed and can handle one throwing an exception."""
await async_setup_component(hass, "device_automation", {})
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create("light", "test", "5678", device_id=device_entry.id)
with patch(
"homeassistant.components.light.device_trigger.async_get_triggers",
side_effect=KeyError,
):
result = await device_automation.async_get_device_automations(hass, "trigger")
assert device_entry.id in result
assert len(result[device_entry.id]) == 0
assert "KeyError" in caplog.text
async def test_websocket_get_trigger_capabilities(
hass, hass_ws_client, device_reg, entity_reg
):
"""Test we get the expected trigger capabilities for a light through websocket."""
await async_setup_component(hass, "device_automation", {})
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create("light", "test", "5678", device_id=device_entry.id)
expected_capabilities = {
"extra_fields": [
{"name": "for", "optional": True, "type": "positive_time_period_dict"}
]
}
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 1,
"type": "device_automation/trigger/list",
"device_id": device_entry.id,
}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"]
triggers = msg["result"]
id = 2
assert len(triggers) == 2
for trigger in triggers:
await client.send_json(
{
"id": id,
"type": "device_automation/trigger/capabilities",
"trigger": trigger,
}
)
msg = await client.receive_json()
assert msg["id"] == id
assert msg["type"] == TYPE_RESULT
assert msg["success"]
capabilities = msg["result"]
assert capabilities == expected_capabilities
id = id + 1
async def test_websocket_get_bad_trigger_capabilities(
hass, hass_ws_client, device_reg, entity_reg
):
"""Test we get no trigger capabilities for a non existing domain."""
await async_setup_component(hass, "device_automation", {})
expected_capabilities = {}
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 1,
"type": "device_automation/trigger/capabilities",
"trigger": {"platform": "device", "domain": "beer", "device_id": "abcd"},
}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"]
capabilities = msg["result"]
assert capabilities == expected_capabilities
async def test_websocket_get_no_trigger_capabilities(
hass, hass_ws_client, device_reg, entity_reg
):
"""Test we get no trigger capabilities for a domain with no device trigger capabilities."""
await async_setup_component(hass, "device_automation", {})
expected_capabilities = {}
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 1,
"type": "device_automation/trigger/capabilities",
"trigger": {"platform": "device", "domain": "deconz", "device_id": "abcd"},
}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"]
capabilities = msg["result"]
assert capabilities == expected_capabilities
async def test_automation_with_non_existing_integration(hass, caplog):
"""Test device automation with non existing integration."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {
"platform": "device",
"device_id": "none",
"domain": "beer",
},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
assert "Integration 'beer' not found" in caplog.text
async def test_automation_with_integration_without_device_action(
hass, caplog, enable_custom_integrations
):
"""Test automation with integration without device action support."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event1"},
"action": {"device_id": "", "domain": "test"},
}
},
)
assert (
"Integration 'test' does not support device automation actions" in caplog.text
)
async def test_automation_with_integration_without_device_condition(
hass, caplog, enable_custom_integrations
):
"""Test automation with integration without device condition support."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": {
"condition": "device",
"device_id": "none",
"domain": "test",
},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
assert (
"Integration 'test' does not support device automation conditions"
in caplog.text
)
async def test_automation_with_integration_without_device_trigger(
hass, caplog, enable_custom_integrations
):
"""Test automation with integration without device trigger support."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {
"platform": "device",
"device_id": "none",
"domain": "test",
},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
assert (
"Integration 'test' does not support device automation triggers" in caplog.text
)
async def test_automation_with_bad_action(hass, caplog):
"""Test automation with bad device action."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event1"},
"action": {"device_id": "", "domain": "light"},
}
},
)
assert "required key not provided" in caplog.text
async def test_automation_with_bad_condition_action(hass, caplog):
"""Test automation with bad device action."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event1"},
"action": {"condition": "device", "device_id": "", "domain": "light"},
}
},
)
assert "required key not provided" in caplog.text
async def test_automation_with_bad_condition(hass, caplog):
"""Test automation with bad device condition."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": {"condition": "device", "domain": "light"},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
assert "required key not provided" in caplog.text
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_automation_with_sub_condition(hass, calls, enable_custom_integrations):
"""Test automation with device condition under and/or conditions."""
DOMAIN = "light"
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
ent1, ent2, ent3 = platform.ENTITIES
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "and",
"conditions": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "is_on",
},
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent2.entity_id,
"type": "is_on",
},
],
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "and {{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event.event_type"))
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "or",
"conditions": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "is_on",
},
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent2.entity_id,
"type": "is_on",
},
],
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "or {{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event.event_type"))
},
},
},
]
},
)
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
assert hass.states.get(ent2.entity_id).state == STATE_OFF
assert len(calls) == 0
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "or event - test_event1"
hass.states.async_set(ent1.entity_id, STATE_OFF)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set(ent2.entity_id, STATE_ON)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "or event - test_event1"
hass.states.async_set(ent1.entity_id, STATE_ON)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 4
assert _same_lists(
[calls[2].data["some"], calls[3].data["some"]],
["or event - test_event1", "and event - test_event1"],
)
async def test_automation_with_bad_sub_condition(hass, caplog):
"""Test automation with bad device condition under and/or conditions."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": {
"condition": "and",
"conditions": [{"condition": "device", "domain": "light"}],
},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
assert "required key not provided" in caplog.text
async def test_automation_with_bad_trigger(hass, caplog):
"""Test automation with bad device trigger."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "device", "domain": "light"},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
assert "required key not provided" in caplog.text
async def test_websocket_device_not_found(hass, hass_ws_client):
"""Test calling command with unknown device."""
await async_setup_component(hass, "device_automation", {})
client = await hass_ws_client(hass)
await client.send_json(
{"id": 1, "type": "device_automation/action/list", "device_id": "non-existing"}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert not msg["success"]
assert msg["error"] == {"code": "not_found", "message": "Device not found"}
|
{
"content_hash": "73821362a5f0f3105c6792eb56808fbd",
"timestamp": "",
"source": "github",
"line_count": 884,
"max_line_length": 117,
"avg_line_length": 34.27036199095023,
"alnum_prop": 0.5600264069978544,
"repo_name": "aronsky/home-assistant",
"id": "563611b99adb4a76221a8826e11550bf57b56b4d",
"size": "30295",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/components/device_automation/test_init.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38448521"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict
from django.contrib import messages
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from elonomics.models import Player, Game
import re
def games(request):
games = Game.objects.order_by('-time_played')
# Split games into groups based on day.
game_groups = OrderedDict()
for game in games:
key = '{0:%A, %B} {0.day}, {0.year}'.format(game.time_played)
if key in game_groups:
game_groups[key].insert(0, game)
else:
game_groups[key] = [game]
players = Player.objects.order_by('full_name')
outcomes = Game.OUTCOME_CHOICES
return render(request, 'elonomics/games.html', {
'game_groups': game_groups,
'players': players,
'outcomes': outcomes
})
def submit_game(request):
try:
t = request.POST['time_played'].strip()
s = request.POST['story'].strip()
p1 = request.POST['player1']
p2 = request.POST['player2']
s1 = request.POST['signoff1']
s2 = request.POST['signoff2']
o = int(request.POST['outcome'])
except KeyError:
messages.add_message(request, messages.ERROR, "Invalid form submit. Please try again.")
return HttpResponseRedirect(reverse('games'))
if not re.match('^\d\d\d\d-\d\d-\d\d \d\d:\d\d$', t):
messages.add_message(request, messages.ERROR, "I am truly sorry for the lack of calendar. Please input the time in the format YYYY-MM-DD hh:mm")
return HttpResponseRedirect(reverse('games'))
try:
p1 = Player.objects.get(user_name=p1)
p2 = Player.objects.get(user_name=p2)
except Player.DoesNotExist:
messages.add_message(request, messages.ERROR, "Player name does not exist.")
return HttpResponseRedirect(reverse('games'))
if p1.sign_off != s1 or p2.sign_off != s2:
messages.add_message(request, messages.ERROR, "Incorrect signoff!")
return HttpResponseRedirect(reverse('games'))
g = Game(
time_played=t,
story=s,
player1=p1,
player2=p2,
player1_before_game_elo=p1.elo_score,
player2_before_game_elo=p2.elo_score,
outcome=o
)
if o == Game.STALEMATES or o == Game.DRAWS or p1.id == p2.id:
pass
elif o == Game.CHECKMATES or o == Game.BRIBES:
p1.elo_score += 1
p2.elo_score -= 1
else:
messages.add_message(request, messages.ERROR, "Unsupported outcome.")
return HttpResponseRedirect(reverse('games'))
try:
g.save()
p1.save()
p2.save()
except ValidationError:
messages.add_message(request, messages.ERROR, "Validation error.")
return HttpResponseRedirect(reverse('games'))
messages.add_message(request, messages.SUCCESS, "Successfully added!")
return HttpResponseRedirect(reverse('games'))
def players(request):
players = Player.objects.order_by('full_name')
return render(request, 'elonomics/players.html', {
'players': players
})
def player(request, user_name):
player = get_object_or_404(Player, user_name=user_name)
games = (player.player1.all() | player.player2.all()).order_by('-time_played')
return render(request, 'elonomics/player.html', {
'player': player,
'games': games
})
|
{
"content_hash": "45048c25fd69bbe03d406f04913c48e1",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 152,
"avg_line_length": 33.95145631067961,
"alnum_prop": 0.6345438947669431,
"repo_name": "rjoleary/elonomics",
"id": "a4d08cee4417b9506bed308919a39eb54e5cea9d",
"size": "3497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elonomics/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5882"
},
{
"name": "Python",
"bytes": "10710"
}
],
"symlink_target": ""
}
|
import gtk
import gtk.gdk
from envxdot import EnvironmentDotWindow
from ARM import *
import NodeDialogFactory
class EnvironmentModelViewer(EnvironmentDotWindow):
def __init__(self,environmentName,dp):
EnvironmentDotWindow.__init__(self,environmentName,dp)
self.dbProxy = dp
self.environment = self.dbProxy.dimensionObject(environmentName,'environment')
self.widget.connect('clicked', self.on_url_clicked)
self.widget.connect('button_press_event', self.onClick)
def onClick(self,widget,event):
try:
if event.button == 3:
print self.widget.get_url(event.x,event.y).url
return 1
except AttributeError:
pass
def on_url_clicked(self, widget, url, event):
dialog = NodeDialogFactory.build(url,self.environment.name())
return True
def onTypeClicked(self, widget, event):
pass
def onNameClicked(self, widget, event):
pass
def ShowModal(self, tLinks):
self.updateModel(tLinks)
self.connect('destroy', gtk.main_quit)
self.set_modal(True)
gtk.main()
def updateModel(self,tLinks):
self.traceModel = tLinks
xdotcode = self.traceModel.graph()
environmentNames = self.dbProxy.getDimensionNames('environment')
environmentNames.sort(key=str.lower)
self.loadFilters(environmentNames,tLinks.dimensions(),tLinks.objects())
self.set_xdotcode(xdotcode)
self.blockHandlers()
self.environmentCombo.set_active(environmentNames.index(self.environment.name()))
self.unblockHandlers()
self.widget.zoom_to_fit()
|
{
"content_hash": "44a8af34f8112250f184903028c5961d",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 85,
"avg_line_length": 31.244897959183675,
"alnum_prop": 0.7243631613324625,
"repo_name": "RobinQuetin/CAIRIS-web",
"id": "eff7ccebe5cc1a3bd5e7b14f849a9a6c130f7b82",
"size": "2330",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cairis/cairis/EnvironmentModelViewer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11265"
},
{
"name": "Mako",
"bytes": "13226"
},
{
"name": "Python",
"bytes": "3313365"
},
{
"name": "Shell",
"bytes": "19461"
},
{
"name": "XSLT",
"bytes": "35522"
}
],
"symlink_target": ""
}
|
from google.cloud import aiplatform_v1beta1
async def sample_export_feature_values():
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
destination = aiplatform_v1beta1.FeatureValueDestination()
destination.bigquery_destination.output_uri = "output_uri_value"
feature_selector = aiplatform_v1beta1.FeatureSelector()
feature_selector.id_matcher.ids = ['ids_value_1', 'ids_value_2']
request = aiplatform_v1beta1.ExportFeatureValuesRequest(
entity_type="entity_type_value",
destination=destination,
feature_selector=feature_selector,
)
# Make the request
operation = client.export_feature_values(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_ExportFeatureValues_async]
|
{
"content_hash": "98f0baf020fecce654033ff599879cb5",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 93,
"avg_line_length": 31.612903225806452,
"alnum_prop": 0.736734693877551,
"repo_name": "googleapis/python-aiplatform",
"id": "3fb556691658c81d3cd8feeca3fbeb08b310816f",
"size": "2020",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_export_feature_values_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
}
|
import config as cfg
from sequencePlayer import SequencePlayer
from neopixel import Adafruit_NeoPixel
class NeopixelSequencePlayer(SequencePlayer):
def __init__(self):
super().__init__()
self.strip = Adafruit_NeoPixel(
cfg.LED_COUNT,
cfg.LED_DATA_PIN,
cfg.LED_FREQ_HZ,
cfg.LED_DMA,
cfg.LED_INVERT,
cfg.LED_BRIGHTNESS,
cfg.LED_CHANNEL,
cfg.LED_STRIP)
self.strip.begin()
def setrangecolor(self, start, end, color, write=True):
super().setrangecolor(start, end, color, write)
if write:
self.strip.show()
def setcolor(self, led, color, write=True):
self.strip.setPixelColor(led, color.topixel())
if write:
self.strip.show()
|
{
"content_hash": "0a0f11a8eec606478debb2f88db6f272",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 59,
"avg_line_length": 28.03448275862069,
"alnum_prop": 0.5817958179581796,
"repo_name": "vinhui/chistmastree",
"id": "a7383c860561bc887df57f8c4760c0201bd84980",
"size": "813",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neopixelSequencePlayer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7583"
},
{
"name": "JavaScript",
"bytes": "7572"
},
{
"name": "Python",
"bytes": "19685"
}
],
"symlink_target": ""
}
|
import uuid
from datetime import datetime
from cqlengine import columns
from cqlengine import models
class Event(models.Model):
"""
Analytic Event
"""
__keyspace__ = 'ans'
__table_name__ = 'events'
# unique identifier for this event
id = columns.UUID(primary_key=True, default=uuid.uuid4)
# collected using this api_key
api_key = columns.Text(required=False)
# real app id
app_id = columns.Text(required=False)
# Name of collection
collection = columns.Text(required=False)
# Event details
# Event timestamp, for items, this is when the item was completed
event_timestamp = columns.DateTime(default=datetime.utcnow())
# unique device code
device_code = columns.UUID(required=False)
# (choices: -1 unknown, 0 no, 1 yes, )
at_school = columns.Text(required=False, default=-1)
|
{
"content_hash": "61185d52a869d400f01bc0942752e8d6",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 69,
"avg_line_length": 28.6,
"alnum_prop": 0.6818181818181818,
"repo_name": "hkhamm/cqlengine_test",
"id": "23f431e0b24c3e836d3a1a0e819d0c04befeb256",
"size": "858",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cass_models.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, with_statement
|
{
"content_hash": "d8840ff6b5b0d6cc9cecc06f484b3fa1",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 80,
"avg_line_length": 81,
"alnum_prop": 0.7901234567901234,
"repo_name": "tao12345666333/app-turbo",
"id": "cbf964c8b83c83551cc2f1987d69417e6edc2a9e",
"size": "81",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "turbo/test/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1615"
},
{
"name": "JavaScript",
"bytes": "4046"
},
{
"name": "Python",
"bytes": "124548"
}
],
"symlink_target": ""
}
|
"""
Tests for L{twisted.test.iosim}.
"""
from __future__ import absolute_import, division
from twisted.test.iosim import FakeTransport
from twisted.trial.unittest import TestCase
class FakeTransportTests(TestCase):
"""
Tests for L{FakeTransport}.
"""
def test_connectionSerial(self):
"""
Each L{FakeTransport} receives a serial number that uniquely identifies
it.
"""
a = FakeTransport(object(), True)
b = FakeTransport(object(), False)
self.assertIsInstance(a.serial, int)
self.assertIsInstance(b.serial, int)
self.assertNotEqual(a.serial, b.serial)
def test_writeSequence(self):
"""
L{FakeTransport.writeSequence} will write a sequence of L{bytes} to the
transport.
"""
a = FakeTransport(object(), False)
a.write(b"a")
a.writeSequence([b"b", b"c", b"d"])
self.assertEqual(b"".join(a.stream), b"abcd")
|
{
"content_hash": "db2ea8f1393e97e1faf0203a1403d41b",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 79,
"avg_line_length": 25.473684210526315,
"alnum_prop": 0.6198347107438017,
"repo_name": "Tokyo-Buffalo/tokyosouth",
"id": "cf44d24dbefec8dba0fad30bfc5d4d0956597c74",
"size": "1041",
"binary": false,
"copies": "16",
"ref": "refs/heads/master",
"path": "env/lib/python3.6/site-packages/twisted/test/test_iosim.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "145891"
},
{
"name": "Python",
"bytes": "7587"
}
],
"symlink_target": ""
}
|
import sys
import os.path
# Import the relevant PTS classes and modules
from pts.eagle.galaxy import Snapshot
# -----------------------------------------------------------------
# parse the arguments
if not len(sys.argv) in (2,3): raise ValueError("This script expects one or two command-line arguments")
snapshotname = sys.argv[1]
# open the snapshot files
print "Opening the snapshot..."
snap = Snapshot(snapshotname)
snap.printinfo()
print ""
# if the catalog exists, show info on its contents
if os.path.isfile(snap.catalogfilepath()):
galaxies = snap.galaxies()
galaxies.printinfo()
# otherwise, ask the user whether to construct it
else:
proceed = raw_input("--> Would you like to build the catalog for {0} at redshift {1}? [y/n] " \
.format(snap.eaglesim, snap.orig_redshift))
if proceed.lower().startswith("y"):
if len(sys.argv) != 3: raise ValueError("This script expects a second command-line argument")
minstarmass = float(sys.argv[2])
snap.exportcatalog(minstarmass)
# -----------------------------------------------------------------
|
{
"content_hash": "9684769b2ba0d46b6d3f7f7ec9719bc9",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 104,
"avg_line_length": 33.90909090909091,
"alnum_prop": 0.613047363717605,
"repo_name": "Stargrazer82301/CAAPR",
"id": "36d2e18e296ac4491644fb83932b7ce883448185",
"size": "2338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CAAPR/CAAPR_AstroMagic/PTS/pts/do/eagle/catalog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "CSS",
"bytes": "21972"
},
{
"name": "HTML",
"bytes": "2408"
},
{
"name": "Prolog",
"bytes": "16433"
},
{
"name": "Python",
"bytes": "4465217"
},
{
"name": "Shell",
"bytes": "3793"
}
],
"symlink_target": ""
}
|
import tables as tb
import pandas as pd
import sys
class AffmatCleaner(object):
"""docstring for AffmatCleaner"""
def __init__(self, handle, segment):
super(AffmatCleaner, self).__init__()
self.handle = handle
self.accession_strains = None
self.segment = segment
self.distmat = None
self.affmat = None
def run(self):
self.compile_accessions_strains()
# for segment in range(1,9):
print('Segment: {0}'.format(self.segment))
print('Cleaning distmat...')
self.clean_distmat()
print('Reading Distmat...')
self.read_distmat()
print('Saving affmat...')
self.save_affmat()
print('Removing intermediate distmat...')
self.remove_intermediate_distmat()
def compile_accessions_strains(self):
"""
Sets the accession_strains dictionary such that keys are accessions,
and values are strain names.
"""
df = pd.read_csv('{0} Full Isolates.csv'.format(self.handle), index_col=0, parse_dates=['Collection Date'])
self.accession_strains = dict(zip(df['Sequence Accession'], df['Strain Name']))
# self.accession_strains.set_index('Sequence Accession', inplace=True)
def clean_distmat(self):
"""
Replaces accession numbers with strain names.
Removes any double spaces (which are detected as double commas)
"""
oldname = 'distmats/{0} Segment {1} Distmat.txt'.format(self.handle, self.segment)
newname = 'distmats/{0} Segment {1} Distmat Renamed.txt'.format(self.handle, self.segment)
with open(oldname, 'rb') as oldf:
with open(newname, 'w+') as newf:
for line in oldf.readlines():
line = line.decode('utf-8')
if len(line.split(' ')) == 1:
newf.write(str(line))
else:
newline = line.replace(' ', ',')
while ',,' in newline:
newline = newline.replace(',,', ',')
accession = newline.split(',')[0]
# Replace accession number with strain name
newline = newline.replace(accession, self.accession_strains[accession])
newf.write(str(newline))
def read_distmat(self):
"""
Reads the distmat into memory.
"""
self.distmat = pd.read_csv('distmats/{0} Segment {1} Distmat Renamed.txt'.format(self.handle, self.segment), index_col=0, delimiter=',', skiprows=1, header=None)
self.distmat.columns = self.distmat.index
def save_affmat(self):
"""
Saves the affmat to an HDF5 store.
"""
self.affmat = (1 - self.distmat)
self.affmat.to_hdf('{0} Segment Affmats.h5'.format(self.handle), mode='a', key='segment{0}'.format(self.segment))
def remove_intermediate_distmat(self):
import os
os.remove('distmats/{0} Segment {1} Distmat Renamed.txt'.format(self.handle, self.segment))
if __name__ == '__main__':
handle = sys.argv[1]
segment = int(sys.argv[2])
ac = AffmatCleaner(handle, segment)
ac.run()
|
{
"content_hash": "b62c71b266d85f272ad74531b744aeb5",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 169,
"avg_line_length": 33.30612244897959,
"alnum_prop": 0.5674019607843137,
"repo_name": "ericmjl/influenza-reassortment-detector",
"id": "65fce994874b38457940bef1116674c839936204",
"size": "3264",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clean_affmats.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44147"
},
{
"name": "Shell",
"bytes": "485"
}
],
"symlink_target": ""
}
|
from glumpy import gloo
transform_1 = gloo.Snippet("""
uniform float scale;
float forward(float x) { return scale*x; }
float inverse(float x) { return scale*x; }
""")
transform_2 = gloo.Snippet("""
uniform float scale;
float forward(float x) { return scale*x; }
float inverse(float x) { return scale*x; }
""")
transform_3 = gloo.Snippet("""
vec2 compose(float x, float y) { return vec2(x,y); }
vec2 compose(vec2 xy) { return xy; }
""")
code= """
uniform float scale;
void main(void)
{
// ---
float scale_t1 = <transform_1.scale>;
float scale_t2 = <transform_6.scale>;
// ---
// Argument must be given through snippet
<transform_1>;
// Argument cannot be given through snippet
<transform_2>(B);
// Argument can be overriden throught snippet
<transform_3(C)>;
// ---
// Default function (first defined) is used
<transform_4>;
// Forward function is used
<transform_5.forward>;
// Inverse function is used
<transform_6.inverse>;
// ---
// Compose snippet with specific field affectation
<transform_7>;
// Compose snippet with generic field affectation
// Note yet done
<transform_8(H)>;
<transform_8.x.scale>;
<transform_8.y.scale>;
} """
program = gloo.Program(code,"void main(){}")
program["transform_1"] = transform_1("A")
program["transform_2"] = "forward"
program["transform_3"] = transform_1()
program["transform_4"] = transform_1("D")
program["transform_5"] = transform_1(transform_2("E"))
program["transform_6"] = transform_2("F", scale="aliased_scale")
program["transform_7"] = transform_3(transform_1("G.x"), transform_2("G.y"))
program["transform_8"] = transform_3(transform_1('.x', name='x'),
transform_2('.y', name='y'))
print(program.vertex.code)
# Make sure that if snippet code has been already included in another program
# it is nonetheless included in the new program
code= """
void main(void)
{
// Argument must be given through snippet
<transform>;
}
"""
program = gloo.Program(code, "void main(){}")
program["transform"] = transform_1("A")
#print program.vertex.code
|
{
"content_hash": "5746d009f19719972249c78dfca555ec",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 77,
"avg_line_length": 25.270588235294117,
"alnum_prop": 0.6410614525139665,
"repo_name": "duyuan11/glumpy",
"id": "3ca31218ccaa77dc59a2c7fcd1f3a22a80841e7c",
"size": "2461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/snippet-usage.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "26075"
},
{
"name": "GLSL",
"bytes": "165997"
},
{
"name": "Makefile",
"bytes": "407"
},
{
"name": "Python",
"bytes": "1201174"
}
],
"symlink_target": ""
}
|
"""Pipeline, the top-level Beam object.
A pipeline holds a DAG of data transforms. Conceptually the nodes of the DAG
are transforms (:class:`~apache_beam.transforms.ptransform.PTransform` objects)
and the edges are values (mostly :class:`~apache_beam.pvalue.PCollection`
objects). The transforms take as inputs one or more PValues and output one or
more :class:`~apache_beam.pvalue.PValue` s.
The pipeline offers functionality to traverse the graph. The actual operation
to be executed for each node visited is specified through a runner object.
Typical usage::
# Create a pipeline object using a local runner for execution.
with beam.Pipeline('DirectRunner') as p:
# Add to the pipeline a "Create" transform. When executed this
# transform will produce a PCollection object with the specified values.
pcoll = p | 'Create' >> beam.Create([1, 2, 3])
# Another transform could be applied to pcoll, e.g., writing to a text file.
# For other transforms, refer to transforms/ directory.
pcoll | 'Write' >> beam.io.WriteToText('./output')
# run() will execute the DAG stored in the pipeline. The execution of the
# nodes visited is done using the specified local runner.
"""
from __future__ import absolute_import
import abc
import logging
import os
import re
import shutil
import tempfile
from builtins import object
from builtins import zip
from typing import TYPE_CHECKING
from typing import Dict
from typing import Iterable
from typing import List
from typing import Optional
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import Union
from future.utils import with_metaclass
from apache_beam import pvalue
from apache_beam.internal import pickler
from apache_beam.io.filesystems import FileSystems
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import TypeOptions
from apache_beam.options.pipeline_options_validator import PipelineOptionsValidator
from apache_beam.portability import common_urns
from apache_beam.runners import PipelineRunner
from apache_beam.runners import create_runner
from apache_beam.transforms import ptransform
#from apache_beam.transforms import external
from apache_beam.typehints import TypeCheckError
from apache_beam.typehints import typehints
from apache_beam.utils.annotations import deprecated
from apache_beam.utils.interactive_utils import alter_label_if_ipython
if TYPE_CHECKING:
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.runners.pipeline_context import PipelineContext
from apache_beam.runners.runner import PipelineResult
__all__ = ['Pipeline', 'PTransformOverride']
class Pipeline(object):
"""A pipeline object that manages a DAG of
:class:`~apache_beam.pvalue.PValue` s and their
:class:`~apache_beam.transforms.ptransform.PTransform` s.
Conceptually the :class:`~apache_beam.pvalue.PValue` s are the DAG's nodes and
the :class:`~apache_beam.transforms.ptransform.PTransform` s computing
the :class:`~apache_beam.pvalue.PValue` s are the edges.
All the transforms applied to the pipeline must have distinct full labels.
If same transform instance needs to be applied then the right shift operator
should be used to designate new names
(e.g. ``input | "label" >> my_tranform``).
"""
def __init__(self,
runner=None, # type: Optional[Union[str, PipelineRunner]]
options=None, # type: Optional[PipelineOptions]
argv=None # type: Optional[List[str]]
):
"""Initialize a pipeline object.
Args:
runner (~apache_beam.runners.runner.PipelineRunner): An object of
type :class:`~apache_beam.runners.runner.PipelineRunner` that will be
used to execute the pipeline. For registered runners, the runner name
can be specified, otherwise a runner object must be supplied.
options (~apache_beam.options.pipeline_options.PipelineOptions):
A configured
:class:`~apache_beam.options.pipeline_options.PipelineOptions` object
containing arguments that should be used for running the Beam job.
argv (List[str]): a list of arguments (such as :data:`sys.argv`)
to be used for building a
:class:`~apache_beam.options.pipeline_options.PipelineOptions` object.
This will only be used if argument **options** is :data:`None`.
Raises:
~exceptions.ValueError: if either the runner or options argument is not
of the expected type.
"""
if options is not None:
if isinstance(options, PipelineOptions):
self._options = options
else:
raise ValueError(
'Parameter options, if specified, must be of type PipelineOptions. '
'Received : %r' % options)
elif argv is not None:
if isinstance(argv, list):
self._options = PipelineOptions(argv)
else:
raise ValueError(
'Parameter argv, if specified, must be a list. Received : %r'
% argv)
else:
self._options = PipelineOptions([])
FileSystems.set_options(self._options)
if runner is None:
runner = self._options.view_as(StandardOptions).runner
if runner is None:
runner = StandardOptions.DEFAULT_RUNNER
logging.info(('Missing pipeline option (runner). Executing pipeline '
'using the default runner: %s.'), runner)
if isinstance(runner, str):
runner = create_runner(runner)
elif not isinstance(runner, PipelineRunner):
raise TypeError('Runner %s is not a PipelineRunner object or the '
'name of a registered runner.' % runner)
# Validate pipeline options
errors = PipelineOptionsValidator(self._options, runner).validate()
if errors:
raise ValueError(
'Pipeline has validations errors: \n' + '\n'.join(errors))
# set default experiments for portable runners
# (needs to occur prior to pipeline construction)
if runner.is_fnapi_compatible():
experiments = (self._options.view_as(DebugOptions).experiments or [])
if not 'beam_fn_api' in experiments:
experiments.append('beam_fn_api')
self._options.view_as(DebugOptions).experiments = experiments
# Default runner to be used.
self.runner = runner
# Stack of transforms generated by nested apply() calls. The stack will
# contain a root node as an enclosing (parent) node for top transforms.
self.transforms_stack = [AppliedPTransform(None, None, '', None)]
# Set of transform labels (full labels) applied to the pipeline.
# If a transform is applied and the full label is already in the set
# then the transform will have to be cloned with a new label.
self.applied_labels = set() # type: Set[str]
@property # type: ignore[misc] # decorated property not supported
@deprecated(since='First stable release',
extra_message='References to <pipeline>.options'
' will not be supported')
def options(self):
return self._options
def _current_transform(self):
# type: () -> AppliedPTransform
"""Returns the transform currently on the top of the stack."""
return self.transforms_stack[-1]
def _root_transform(self):
# type: () -> AppliedPTransform
"""Returns the root transform of the transform stack."""
return self.transforms_stack[0]
def _remove_labels_recursively(self, applied_transform):
# type: (AppliedPTransform) -> None
for part in applied_transform.parts:
if part.full_label in self.applied_labels:
self.applied_labels.remove(part.full_label)
self._remove_labels_recursively(part)
def _replace(self, override):
assert isinstance(override, PTransformOverride)
# From original transform output --> replacement transform output
output_map = {}
output_replacements = {}
input_replacements = {}
side_input_replacements = {}
class TransformUpdater(PipelineVisitor): # pylint: disable=used-before-assignment
""""A visitor that replaces the matching PTransforms."""
def __init__(self, pipeline):
# type: (Pipeline) -> None
self.pipeline = pipeline
def _replace_if_needed(self, original_transform_node):
if override.matches(original_transform_node):
assert isinstance(original_transform_node, AppliedPTransform)
replacement_transform = override.get_replacement_transform(
original_transform_node.transform)
if replacement_transform is original_transform_node.transform:
return
replacement_transform_node = AppliedPTransform(
original_transform_node.parent, replacement_transform,
original_transform_node.full_label,
original_transform_node.inputs)
# Transform execution could depend on order in which nodes are
# considered. Hence we insert the replacement transform node to same
# index as the original transform node. Note that this operation
# removes the original transform node.
if original_transform_node.parent:
assert isinstance(original_transform_node.parent, AppliedPTransform)
parent_parts = original_transform_node.parent.parts
parent_parts[parent_parts.index(original_transform_node)] = (
replacement_transform_node)
else:
# Original transform has to be a root.
roots = self.pipeline.transforms_stack[0].parts
assert original_transform_node in roots
roots[roots.index(original_transform_node)] = (
replacement_transform_node)
inputs = replacement_transform_node.inputs
# TODO: Support replacing PTransforms with multiple inputs.
if len(inputs) > 1:
raise NotImplementedError(
'PTransform overriding is only supported for PTransforms that '
'have a single input. Tried to replace input of '
'AppliedPTransform %r that has %d inputs'
% original_transform_node, len(inputs))
elif len(inputs) == 1:
input_node = inputs[0]
elif len(inputs) == 0:
input_node = pvalue.PBegin(self)
# We have to add the new AppliedTransform to the stack before expand()
# and pop it out later to make sure that parts get added correctly.
self.pipeline.transforms_stack.append(replacement_transform_node)
# Keeping the same label for the replaced node but recursively
# removing labels of child transforms of original transform since they
# will be replaced during the expand below. This is needed in case
# the replacement contains children that have labels that conflicts
# with labels of the children of the original.
self.pipeline._remove_labels_recursively(original_transform_node)
new_output = replacement_transform.expand(input_node)
if isinstance(new_output, pvalue.PValue):
new_output.element_type = None
self.pipeline._infer_result_type(replacement_transform, inputs,
new_output)
replacement_transform_node.add_output(new_output)
# Recording updated outputs. This cannot be done in the same visitor
# since if we dynamically update output type here, we'll run into
# errors when visiting child nodes.
#
# NOTE: When replacing multiple outputs, the replacement PCollection
# tags must have a matching tag in the original transform.
if isinstance(new_output, pvalue.PValue):
if not new_output.producer:
new_output.producer = replacement_transform_node
output_map[original_transform_node.outputs[None]] = new_output
elif isinstance(new_output, (pvalue.DoOutputsTuple, tuple)):
for pcoll in new_output:
if not pcoll.producer:
pcoll.producer = replacement_transform_node
output_map[original_transform_node.outputs[pcoll.tag]] = pcoll
elif isinstance(new_output, dict):
for tag, pcoll in new_output.items():
if not pcoll.producer:
pcoll.producer = replacement_transform_node
output_map[original_transform_node.outputs[tag]] = pcoll
self.pipeline.transforms_stack.pop()
def enter_composite_transform(self, transform_node):
# type: (AppliedPTransform) -> None
self._replace_if_needed(transform_node)
def visit_transform(self, transform_node):
# type: (AppliedPTransform) -> None
self._replace_if_needed(transform_node)
self.visit(TransformUpdater(self))
# Adjusting inputs and outputs
class InputOutputUpdater(PipelineVisitor): # pylint: disable=used-before-assignment
""""A visitor that records input and output values to be replaced.
Input and output values that should be updated are recorded in maps
input_replacements and output_replacements respectively.
We cannot update input and output values while visiting since that results
in validation errors.
"""
def __init__(self, pipeline):
# type: (Pipeline) -> None
self.pipeline = pipeline
def enter_composite_transform(self, transform_node):
# type: (AppliedPTransform) -> None
self.visit_transform(transform_node)
def visit_transform(self, transform_node):
# type: (AppliedPTransform) -> None
replace_output = False
for tag in transform_node.outputs:
if transform_node.outputs[tag] in output_map:
replace_output = True
break
replace_input = False
for input in transform_node.inputs:
if input in output_map:
replace_input = True
break
replace_side_inputs = False
for side_input in transform_node.side_inputs:
if side_input.pvalue in output_map:
replace_side_inputs = True
break
if replace_output:
output_replacements[transform_node] = []
for original, replacement in output_map.items():
if (original.tag in transform_node.outputs and
transform_node.outputs[original.tag] in output_map):
output_replacements[transform_node].append(
(replacement, original.tag))
if replace_input:
new_input = [
input if not input in output_map else output_map[input]
for input in transform_node.inputs]
input_replacements[transform_node] = new_input
if replace_side_inputs:
new_side_inputs = []
for side_input in transform_node.side_inputs:
if side_input.pvalue in output_map:
side_input.pvalue = output_map[side_input.pvalue]
new_side_inputs.append(side_input)
else:
new_side_inputs.append(side_input)
side_input_replacements[transform_node] = new_side_inputs
self.visit(InputOutputUpdater(self))
for transform in output_replacements:
for output in output_replacements[transform]:
transform.replace_output(output[0], tag=output[1])
for transform in input_replacements:
transform.inputs = input_replacements[transform]
for transform in side_input_replacements:
transform.side_inputs = side_input_replacements[transform]
def _check_replacement(self, override):
class ReplacementValidator(PipelineVisitor):
def visit_transform(self, transform_node):
if override.matches(transform_node):
raise RuntimeError('Transform node %r was not replaced as expected.'
% transform_node)
self.visit(ReplacementValidator())
def replace_all(self, replacements):
# type: (Iterable[PTransformOverride]) -> None
""" Dynamically replaces PTransforms in the currently populated hierarchy.
Currently this only works for replacements where input and output types
are exactly the same.
TODO: Update this to also work for transform overrides where input and
output types are different.
Args:
replacements (List[~apache_beam.pipeline.PTransformOverride]): a list of
:class:`~apache_beam.pipeline.PTransformOverride` objects.
"""
for override in replacements:
assert isinstance(override, PTransformOverride)
self._replace(override)
# Checking if the PTransforms have been successfully replaced. This will
# result in a failure if a PTransform that was replaced in a given override
# gets re-added in a subsequent override. This is not allowed and ordering
# of PTransformOverride objects in 'replacements' is important.
for override in replacements:
self._check_replacement(override)
def run(self, test_runner_api=True):
# type: (...) -> PipelineResult
"""Runs the pipeline. Returns whatever our runner returns after running."""
# When possible, invoke a round trip through the runner API.
if test_runner_api and self._verify_runner_api_compatible():
return Pipeline.from_runner_api(
self.to_runner_api(use_fake_coders=True),
self.runner,
self._options).run(False)
if self._options.view_as(TypeOptions).runtime_type_check:
from apache_beam.typehints import typecheck
self.visit(typecheck.TypeCheckVisitor())
if self._options.view_as(SetupOptions).save_main_session:
# If this option is chosen, verify we can pickle the main session early.
tmpdir = tempfile.mkdtemp()
try:
pickler.dump_session(os.path.join(tmpdir, 'main_session.pickle'))
finally:
shutil.rmtree(tmpdir)
return self.runner.run_pipeline(self, self._options)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not exc_type:
self.run().wait_until_finish()
def visit(self, visitor):
# type: (PipelineVisitor) -> None
"""Visits depth-first every node of a pipeline's DAG.
Runner-internal implementation detail; no backwards-compatibility guarantees
Args:
visitor (~apache_beam.pipeline.PipelineVisitor):
:class:`~apache_beam.pipeline.PipelineVisitor` object whose callbacks
will be called for each node visited. See
:class:`~apache_beam.pipeline.PipelineVisitor` comments.
Raises:
~exceptions.TypeError: if node is specified and is not a
:class:`~apache_beam.pvalue.PValue`.
~apache_beam.error.PipelineError: if node is specified and does not
belong to this pipeline instance.
"""
visited = set() # type: Set[pvalue.PValue]
self._root_transform().visit(visitor, self, visited)
def apply(self, transform, pvalueish=None, label=None):
"""Applies a custom transform using the pvalueish specified.
Args:
transform (~apache_beam.transforms.ptransform.PTransform): the
:class:`~apache_beam.transforms.ptransform.PTransform` to apply.
pvalueish (~apache_beam.pvalue.PCollection): the input for the
:class:`~apache_beam.transforms.ptransform.PTransform` (typically a
:class:`~apache_beam.pvalue.PCollection`).
label (str): label of the
:class:`~apache_beam.transforms.ptransform.PTransform`.
Raises:
~exceptions.TypeError: if the transform object extracted from the
argument list is not a
:class:`~apache_beam.transforms.ptransform.PTransform`.
~exceptions.RuntimeError: if the transform object was already applied to
this pipeline and needs to be cloned in order to apply again.
"""
if isinstance(transform, ptransform._NamedPTransform):
return self.apply(transform.transform, pvalueish,
label or transform.label)
if not isinstance(transform, ptransform.PTransform):
raise TypeError("Expected a PTransform object, got %s" % transform)
if label:
# Fix self.label as it is inspected by some PTransform operations
# (e.g. to produce error messages for type hint violations).
try:
old_label, transform.label = transform.label, label
return self.apply(transform, pvalueish)
finally:
transform.label = old_label
# Attempts to alter the label of the transform to be applied only when it's
# a top-level transform so that the cell number will not be prepended to
# every child transform in a composite.
if self._current_transform() is self._root_transform():
alter_label_if_ipython(transform, pvalueish)
full_label = '/'.join([self._current_transform().full_label,
label or transform.label]).lstrip('/')
if full_label in self.applied_labels:
raise RuntimeError(
'A transform with label "%s" already exists in the pipeline. '
'To apply a transform with a specified label write '
'pvalue | "label" >> transform'
% full_label)
self.applied_labels.add(full_label)
pvalueish, inputs = transform._extract_input_pvalues(pvalueish)
try:
inputs = tuple(inputs)
for leaf_input in inputs:
if not isinstance(leaf_input, pvalue.PValue):
raise TypeError
except TypeError:
raise NotImplementedError(
'Unable to extract PValue inputs from %s; either %s does not accept '
'inputs of this format, or it does not properly override '
'_extract_input_pvalues' % (pvalueish, transform))
current = AppliedPTransform(
self._current_transform(), transform, full_label, inputs)
self._current_transform().add_part(current)
self.transforms_stack.append(current)
type_options = self._options.view_as(TypeOptions)
if type_options.pipeline_type_check:
transform.type_check_inputs(pvalueish)
pvalueish_result = self.runner.apply(transform, pvalueish, self._options)
if type_options is not None and type_options.pipeline_type_check:
transform.type_check_outputs(pvalueish_result)
for result in ptransform.get_nested_pvalues(pvalueish_result):
assert isinstance(result, (pvalue.PValue, pvalue.DoOutputsTuple))
# Make sure we set the producer only for a leaf node in the transform DAG.
# This way we preserve the last transform of a composite transform as
# being the real producer of the result.
if result.producer is None:
result.producer = current
self._infer_result_type(transform, inputs, result)
assert isinstance(result.producer.inputs, tuple)
current.add_output(result)
if (type_options is not None and
type_options.type_check_strictness == 'ALL_REQUIRED' and
transform.get_type_hints().output_types is None):
ptransform_name = '%s(%s)' % (transform.__class__.__name__, full_label)
raise TypeCheckError('Pipeline type checking is enabled, however no '
'output type-hint was found for the '
'PTransform %s' % ptransform_name)
self.transforms_stack.pop()
return pvalueish_result
def _infer_result_type(self, transform, inputs, result_pcollection):
# TODO(robertwb): Multi-input inference.
type_options = self._options.view_as(TypeOptions)
if type_options is None or not type_options.pipeline_type_check:
return
if (isinstance(result_pcollection, pvalue.PCollection)
and (not result_pcollection.element_type
# TODO(robertwb): Ideally we'd do intersection here.
or result_pcollection.element_type == typehints.Any)):
# Single-input, single-output inference.
input_element_type = (
inputs[0].element_type
if len(inputs) == 1
else typehints.Any)
type_hints = transform.get_type_hints()
declared_output_type = type_hints.simple_output_type(transform.label)
if declared_output_type:
input_types = type_hints.input_types
if input_types and input_types[0]:
declared_input_type = input_types[0][0]
result_pcollection.element_type = typehints.bind_type_variables(
declared_output_type,
typehints.match_type_variables(declared_input_type,
input_element_type))
else:
result_pcollection.element_type = declared_output_type
else:
result_pcollection.element_type = transform.infer_output_type(
input_element_type)
elif isinstance(result_pcollection, pvalue.DoOutputsTuple):
# Single-input, multi-output inference.
# TODO(BEAM-4132): Add support for tagged type hints.
# https://github.com/apache/beam/pull/9810#discussion_r338765251
for pcoll in result_pcollection:
if pcoll.element_type is None:
pcoll.element_type = typehints.Any
def __reduce__(self):
# Some transforms contain a reference to their enclosing pipeline,
# which in turn reference all other transforms (resulting in quadratic
# time/space to pickle each transform individually). As we don't
# require pickled pipelines to be executable, break the chain here.
return str, ('Pickled pipeline stub.',)
def _verify_runner_api_compatible(self):
if self._options.view_as(TypeOptions).runtime_type_check:
# This option is incompatible with the runner API as it requires
# the runner to inspect non-serialized hints on the transform
# itself.
return False
class Visitor(PipelineVisitor): # pylint: disable=used-before-assignment
ok = True # Really a nonlocal.
def enter_composite_transform(self, transform_node):
pass
def visit_transform(self, transform_node):
try:
# Transforms must be picklable.
pickler.loads(pickler.dumps(transform_node.transform,
enable_trace=False),
enable_trace=False)
except Exception:
Visitor.ok = False
def visit_value(self, value, _):
if isinstance(value, pvalue.PDone):
Visitor.ok = False
self.visit(Visitor())
return Visitor.ok
def to_runner_api(self,
return_context=False,
context=None, # type: Optional[PipelineContext]
use_fake_coders=False,
default_environment=None # type: Optional[beam_runner_api_pb2.Environment]
):
# type: (...) -> beam_runner_api_pb2.Pipeline
"""For internal use only; no backwards-compatibility guarantees."""
from apache_beam.runners import pipeline_context
from apache_beam.portability.api import beam_runner_api_pb2
if context is None:
context = pipeline_context.PipelineContext(
use_fake_coders=use_fake_coders,
default_environment=default_environment)
elif default_environment is not None:
raise ValueError(
'Only one of context or default_environment may be specified.')
# The RunnerAPI spec requires certain transforms and side-inputs to have KV
# inputs (and corresponding outputs).
# Currently we only upgrade to KV pairs. If there is a need for more
# general shapes, potential conflicts will have to be resolved.
# We also only handle single-input, and (for fixing the output) single
# output, which is sufficient.
class ForceKvInputTypes(PipelineVisitor):
def enter_composite_transform(self, transform_node):
self.visit_transform(transform_node)
def visit_transform(self, transform_node):
if not transform_node.transform:
return
if transform_node.transform.runner_api_requires_keyed_input():
pcoll = transform_node.inputs[0]
pcoll.element_type = typehints.coerce_to_kv_type(
pcoll.element_type, transform_node.full_label)
if len(transform_node.outputs) == 1:
# The runner often has expectations about the output types as well.
output, = transform_node.outputs.values()
if not output.element_type:
output.element_type = transform_node.transform.infer_output_type(
pcoll.element_type
)
for side_input in transform_node.transform.side_inputs:
if side_input.requires_keyed_input():
side_input.pvalue.element_type = typehints.coerce_to_kv_type(
side_input.pvalue.element_type, transform_node.full_label,
side_input_producer=side_input.pvalue.producer.full_label)
self.visit(ForceKvInputTypes())
# Mutates context; placing inline would force dependence on
# argument evaluation order.
root_transform_id = context.transforms.get_id(self._root_transform())
proto = beam_runner_api_pb2.Pipeline(
root_transform_ids=[root_transform_id],
components=context.to_runner_api())
proto.components.transforms[root_transform_id].unique_name = (
root_transform_id)
if return_context:
return proto, context # type: ignore # too complicated for now
else:
return proto
@staticmethod
def from_runner_api(proto, # type: beam_runner_api_pb2.Pipeline
runner, # type: PipelineRunner
options, # type: PipelineOptions
return_context=False,
allow_proto_holders=False
):
# type: (...) -> Pipeline
"""For internal use only; no backwards-compatibility guarantees."""
p = Pipeline(runner=runner, options=options)
from apache_beam.runners import pipeline_context
context = pipeline_context.PipelineContext(
proto.components, allow_proto_holders=allow_proto_holders)
root_transform_id, = proto.root_transform_ids
p.transforms_stack = [
context.transforms.get_by_id(root_transform_id)]
# TODO(robertwb): These are only needed to continue construction. Omit?
p.applied_labels = set([
t.unique_name for t in proto.components.transforms.values()])
for id in proto.components.pcollections:
pcollection = context.pcollections.get_by_id(id)
pcollection.pipeline = p
if not pcollection.producer:
raise ValueError('No producer for %s' % id)
# Inject PBegin input where necessary.
from apache_beam.io.iobase import Read
from apache_beam.transforms.core import Create
has_pbegin = [Read, Create]
for id in proto.components.transforms:
transform = context.transforms.get_by_id(id)
if not transform.inputs and transform.transform.__class__ in has_pbegin:
transform.inputs = (pvalue.PBegin(p),)
if return_context:
return p, context # type: ignore # too complicated for now
else:
return p
class PipelineVisitor(object):
"""For internal use only; no backwards-compatibility guarantees.
Visitor pattern class used to traverse a DAG of transforms
(used internally by Pipeline for bookeeping purposes).
"""
def visit_value(self, value, producer_node):
# type: (pvalue.PValue, AppliedPTransform) -> None
"""Callback for visiting a PValue in the pipeline DAG.
Args:
value: PValue visited (typically a PCollection instance).
producer_node: AppliedPTransform object whose transform produced the
pvalue.
"""
pass
def visit_transform(self, transform_node):
# type: (AppliedPTransform) -> None
"""Callback for visiting a transform leaf node in the pipeline DAG."""
pass
def enter_composite_transform(self, transform_node):
# type: (AppliedPTransform) -> None
"""Callback for entering traversal of a composite transform node."""
pass
def leave_composite_transform(self, transform_node):
# type: (AppliedPTransform) -> None
"""Callback for leaving traversal of a composite transform node."""
pass
class AppliedPTransform(object):
"""For internal use only; no backwards-compatibility guarantees.
A transform node representing an instance of applying a PTransform
(used internally by Pipeline for bookeeping purposes).
"""
def __init__(self,
parent,
transform, # type: ptransform.PTransform
full_label, # type: str
inputs # type: Optional[Sequence[Union[pvalue.PBegin, pvalue.PCollection]]]
):
self.parent = parent
self.transform = transform
# Note that we want the PipelineVisitor classes to use the full_label,
# inputs, side_inputs, and outputs fields from this instance instead of the
# ones of the PTransform instance associated with it. Doing this permits
# reusing PTransform instances in different contexts (apply() calls) without
# any interference. This is particularly useful for composite transforms.
self.full_label = full_label
self.inputs = inputs or ()
self.side_inputs = () if transform is None else tuple(transform.side_inputs) # type: Tuple[pvalue.AsSideInput, ...]
self.outputs = {} # type: Dict[Union[str, int, None], pvalue.PValue]
self.parts = [] # type: List[AppliedPTransform]
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__, self.full_label,
type(self.transform).__name__)
def replace_output(self,
output, # type: Union[pvalue.PValue, pvalue.DoOutputsTuple]
tag=None # type: Union[str, int, None]
):
# type: (...) -> None
"""Replaces the output defined by the given tag with the given output.
Args:
output: replacement output
tag: tag of the output to be replaced.
"""
if isinstance(output, pvalue.DoOutputsTuple):
self.replace_output(output[output._main_tag])
elif isinstance(output, pvalue.PValue):
self.outputs[tag] = output
elif isinstance(output, dict):
for output_tag, out in output.items():
self.outputs[output_tag] = out
else:
raise TypeError("Unexpected output type: %s" % output)
def add_output(self,
output, # type: Union[pvalue.DoOutputsTuple, pvalue.PValue]
tag=None # type: Union[str, int, None]
):
# type: (...) -> None
if isinstance(output, pvalue.DoOutputsTuple):
self.add_output(output[output._main_tag])
elif isinstance(output, pvalue.PValue):
# TODO(BEAM-1833): Require tags when calling this method.
if tag is None and None in self.outputs:
tag = len(self.outputs)
assert tag not in self.outputs
self.outputs[tag] = output
elif isinstance(output, dict):
for output_tag, out in output.items():
self.add_output(out, tag=output_tag)
else:
raise TypeError("Unexpected output type: %s" % output)
def add_part(self, part):
# type: (AppliedPTransform) -> None
assert isinstance(part, AppliedPTransform)
self.parts.append(part)
def is_composite(self):
# type: () -> bool
"""Returns whether this is a composite transform.
A composite transform has parts (inner transforms) or isn't the
producer for any of its outputs. (An example of a transform that
is not a producer is one that returns its inputs instead.)
"""
return bool(self.parts) or all(
pval.producer is not self for pval in self.outputs.values())
def visit(self,
visitor, # type: PipelineVisitor
pipeline, # type: Pipeline
visited # type: Set[pvalue.PValue]
):
# type: (...) -> None
"""Visits all nodes reachable from the current node."""
for pval in self.inputs:
if pval not in visited and not isinstance(pval, pvalue.PBegin):
if pval.producer is not None:
pval.producer.visit(visitor, pipeline, visited)
# The value should be visited now since we visit outputs too.
assert pval in visited, pval
# Visit side inputs.
for pval in self.side_inputs:
if isinstance(pval, pvalue.AsSideInput) and pval.pvalue not in visited:
pval = pval.pvalue # Unpack marker-object-wrapped pvalue.
if pval.producer is not None:
pval.producer.visit(visitor, pipeline, visited)
# The value should be visited now since we visit outputs too.
assert pval in visited
# TODO(silviuc): Is there a way to signal that we are visiting a side
# value? The issue is that the same PValue can be reachable through
# multiple paths and therefore it is not guaranteed that the value
# will be visited as a side value.
# Visit a composite or primitive transform.
if self.is_composite():
visitor.enter_composite_transform(self)
for part in self.parts:
part.visit(visitor, pipeline, visited)
visitor.leave_composite_transform(self)
else:
visitor.visit_transform(self)
# Visit the outputs (one or more). It is essential to mark as visited the
# tagged PCollections of the DoOutputsTuple object. A tagged PCollection is
# connected directly with its producer (a multi-output ParDo), but the
# output of such a transform is the containing DoOutputsTuple, not the
# PCollection inside it. Without the code below a tagged PCollection will
# not be marked as visited while visiting its producer.
for pval in self.outputs.values():
if isinstance(pval, pvalue.DoOutputsTuple):
pvals = (v for v in pval)
else:
pvals = (pval,)
for v in pvals:
if v not in visited:
visited.add(v)
visitor.visit_value(v, self)
def named_inputs(self):
# type: () -> Dict[str, pvalue.PCollection]
# TODO(BEAM-1833): Push names up into the sdk construction.
main_inputs = {str(ix): input
for ix, input in enumerate(self.inputs)
if isinstance(input, pvalue.PCollection)}
side_inputs = {'side%s' % ix: si.pvalue
for ix, si in enumerate(self.side_inputs)}
return dict(main_inputs, **side_inputs)
def named_outputs(self):
# type: () -> Dict[str, pvalue.PCollection]
return {str(tag): output for tag, output in self.outputs.items()
if isinstance(output, pvalue.PCollection)}
def to_runner_api(self, context):
# type: (PipelineContext) -> beam_runner_api_pb2.PTransform
# External tranforms require more splicing than just setting the spec.
from apache_beam.transforms import external
if isinstance(self.transform, external.ExternalTransform):
return self.transform.to_runner_api_transform(context, self.full_label)
from apache_beam.portability.api import beam_runner_api_pb2
def transform_to_runner_api(transform, # type: Optional[ptransform.PTransform]
context # type: PipelineContext
):
# type: (...) -> Optional[beam_runner_api_pb2.FunctionSpec]
if transform is None:
return None
else:
return transform.to_runner_api(context, has_parts=bool(self.parts))
# Iterate over inputs and outputs by sorted key order, so that ids are
# consistently generated for multiple runs of the same pipeline.
return beam_runner_api_pb2.PTransform(
unique_name=self.full_label,
spec=transform_to_runner_api(self.transform, context),
subtransforms=[context.transforms.get_id(part, label=part.full_label)
for part in self.parts],
inputs={tag: context.pcollections.get_id(pc)
for tag, pc in sorted(self.named_inputs().items())},
outputs={str(tag): context.pcollections.get_id(out)
for tag, out in sorted(self.named_outputs().items())},
# TODO(BEAM-366): Add display_data.
display_data=None)
@staticmethod
def from_runner_api(proto, # type: beam_runner_api_pb2.PTransform
context # type: PipelineContext
):
# type: (...) -> AppliedPTransform
def is_side_input(tag):
# As per named_inputs() above.
return tag.startswith('side')
main_inputs = [context.pcollections.get_by_id(id)
for tag, id in proto.inputs.items()
if not is_side_input(tag)]
# Ordering is important here.
indexed_side_inputs = [(int(re.match('side([0-9]+)(-.*)?$', tag).group(1)),
context.pcollections.get_by_id(id))
for tag, id in proto.inputs.items()
if is_side_input(tag)]
side_inputs = [si for _, si in sorted(indexed_side_inputs)]
result = AppliedPTransform(
parent=None,
transform=ptransform.PTransform.from_runner_api(proto.spec, context),
full_label=proto.unique_name,
inputs=main_inputs)
if result.transform and result.transform.side_inputs:
for si, pcoll in zip(result.transform.side_inputs, side_inputs):
si.pvalue = pcoll
result.side_inputs = tuple(result.transform.side_inputs)
result.parts = []
for transform_id in proto.subtransforms:
part = context.transforms.get_by_id(transform_id)
part.parent = result
result.parts.append(part)
result.outputs = {
None if tag == 'None' else tag: context.pcollections.get_by_id(id)
for tag, id in proto.outputs.items()}
# This annotation is expected by some runners.
if proto.spec.urn == common_urns.primitives.PAR_DO.urn:
result.transform.output_tags = set(proto.outputs.keys()).difference(
{'None'})
if not result.parts:
for tag, pcoll_id in proto.outputs.items():
if pcoll_id not in proto.inputs.values():
pc = context.pcollections.get_by_id(pcoll_id)
pc.producer = result
pc.tag = None if tag == 'None' else tag
return result
class PTransformOverride(with_metaclass(abc.ABCMeta, object)): # type: ignore[misc]
"""For internal use only; no backwards-compatibility guarantees.
Gives a matcher and replacements for matching PTransforms.
TODO: Update this to support cases where input and/our output types are
different.
"""
@abc.abstractmethod
def matches(self, applied_ptransform):
# type: (AppliedPTransform) -> bool
"""Determines whether the given AppliedPTransform matches.
Note that the matching will happen *after* Runner API proto translation.
If matching is done via type checks, to/from_runner_api[_parameter] methods
must be implemented to preserve the type (and other data) through proto
serialization.
Consider URN-based translation instead.
Args:
applied_ptransform: AppliedPTransform to be matched.
Returns:
a bool indicating whether the given AppliedPTransform is a match.
"""
raise NotImplementedError
@abc.abstractmethod
def get_replacement_transform(self, ptransform):
# type: (AppliedPTransform) -> AppliedPTransform
"""Provides a runner specific override for a given PTransform.
Args:
ptransform: PTransform to be replaced.
Returns:
A PTransform that will be the replacement for the PTransform given as an
argument.
"""
# Returns a PTransformReplacement
raise NotImplementedError
|
{
"content_hash": "86236379448e8303078b4fedbec77e61",
"timestamp": "",
"source": "github",
"line_count": 1059,
"max_line_length": 120,
"avg_line_length": 41.24834749763928,
"alnum_prop": 0.6653999359003708,
"repo_name": "RyanSkraba/beam",
"id": "d4c94c4675f6c7ffed310d1e8e67ee4aa9d35c49",
"size": "44467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/pipeline.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1597"
},
{
"name": "CSS",
"bytes": "40963"
},
{
"name": "Dockerfile",
"bytes": "16638"
},
{
"name": "FreeMarker",
"bytes": "7428"
},
{
"name": "Go",
"bytes": "2683402"
},
{
"name": "Groovy",
"bytes": "517560"
},
{
"name": "HTML",
"bytes": "183330"
},
{
"name": "Java",
"bytes": "28609011"
},
{
"name": "JavaScript",
"bytes": "16595"
},
{
"name": "Jupyter Notebook",
"bytes": "56365"
},
{
"name": "Python",
"bytes": "6191025"
},
{
"name": "Ruby",
"bytes": "4159"
},
{
"name": "Shell",
"bytes": "235061"
},
{
"name": "TSQL",
"bytes": "841"
}
],
"symlink_target": ""
}
|
from mpinterfaces.mat2d.friction.startup import run_gamma_calculations, \
run_normal_force_calculations
|
{
"content_hash": "f30da7506722868a9b04aa2c675674a0",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 73,
"avg_line_length": 53.5,
"alnum_prop": 0.822429906542056,
"repo_name": "joshgabriel/MPInterfaces",
"id": "9d546a4f960c236bd0b606df53441e45bf4cb5f2",
"size": "107",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mpinterfaces/mat2d/friction/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "514956"
},
{
"name": "Shell",
"bytes": "3252"
}
],
"symlink_target": ""
}
|
from wagtail.contrib.redirects.models import Redirect
from wagtail.permission_policies import ModelPermissionPolicy
permission_policy = ModelPermissionPolicy(Redirect)
|
{
"content_hash": "2372a5ef5e625f10323804473bf5e358",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 61,
"avg_line_length": 42.25,
"alnum_prop": 0.8757396449704142,
"repo_name": "rsalmaso/wagtail",
"id": "9f232643c014b7e81281a8dea4c648a609f58a67",
"size": "169",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "wagtail/contrib/redirects/permissions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2522"
},
{
"name": "Dockerfile",
"bytes": "2041"
},
{
"name": "HTML",
"bytes": "593672"
},
{
"name": "JavaScript",
"bytes": "624463"
},
{
"name": "Makefile",
"bytes": "1413"
},
{
"name": "Python",
"bytes": "6598232"
},
{
"name": "SCSS",
"bytes": "221911"
},
{
"name": "Shell",
"bytes": "6845"
},
{
"name": "TypeScript",
"bytes": "296087"
}
],
"symlink_target": ""
}
|
"""Functional tests for slice op."""
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
class SliceTest(test.TestCase):
def testEmpty(self):
inp = np.random.rand(4, 4).astype("f")
for k in range(4):
with self.cached_session():
a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32)
slice_t = a[2, k:k]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(slice_val, inp[2, k:k])
def testInt32(self):
inp = np.random.rand(4, 4).astype("i")
for k in range(4):
with self.cached_session():
a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.int32)
slice_t = a[2, k:k]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(slice_val, inp[2, k:k])
def testSlicingWithInt64Index(self):
with self.cached_session(force_gpu=test.is_gpu_available()):
a = constant_op.constant([0, 1, 2], dtype=dtypes.int32)
# Slice using int64 Tensor.
i = constant_op.constant(1, dtype=dtypes.int64)
slice_t = a[i]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(1, slice_val)
slice_t = a[i:i+1]
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1], slice_val)
# Slice using int64 integer.
i = np.asarray(1).astype(np.int64)
slice_t = a[i]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(1, slice_val)
slice_t = a[i:i+1]
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1], slice_val)
a_int32 = constant_op.constant([0, 1, 2], dtype=dtypes.int32)
slice_t = array_ops.slice(a_int32,
np.asarray([1]).astype(np.int64),
np.asarray([2]).astype(np.int64))
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1, 2], slice_val)
a_float32 = constant_op.constant([0, 1, 2], dtype=dtypes.float32)
slice_t = array_ops.slice(a_float32,
np.asarray([1]).astype(np.int64),
np.asarray([2]).astype(np.int64))
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1, 2], slice_val)
def testSlicingInt64Tensor(self):
with self.cached_session(force_gpu=test.is_gpu_available()):
a = constant_op.constant([0, 1, 2], dtype=dtypes.int64)
# Slice using int32 Tensor.
i = constant_op.constant(1, dtype=dtypes.int32)
slice_t = a[i]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(1, slice_val)
slice_t = a[i:i + 1]
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1], slice_val)
# Slice using int32 integer.
i = np.asarray(1).astype(np.int32)
slice_t = a[i]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(1, slice_val)
slice_t = a[i:i + 1]
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1], slice_val)
slice_t = array_ops.slice(a, [1], [2])
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1, 2], slice_val)
def testSelectAll(self):
for _ in range(10):
with self.cached_session():
inp = np.random.rand(4, 4, 4, 4).astype("f")
a = constant_op.constant(inp, shape=[4, 4, 4, 4], dtype=dtypes.float32)
slice_explicit_t = array_ops.slice(a, [0, 0, 0, 0], [-1, -1, -1, -1])
slice_implicit_t = a[:, :, :, :]
self.assertAllEqual(inp, self.evaluate(slice_explicit_t))
self.assertAllEqual(inp, self.evaluate(slice_implicit_t))
self.assertEqual(inp.shape, slice_explicit_t.get_shape())
self.assertEqual(inp.shape, slice_implicit_t.get_shape())
def testSingleDimension(self):
for _ in range(10):
with self.cached_session():
inp = np.random.rand(10).astype("f")
a = constant_op.constant(inp, shape=[10], dtype=dtypes.float32)
hi = np.random.randint(0, 9)
scalar_t = a[hi]
scalar_val = self.evaluate(scalar_t)
self.assertAllEqual(scalar_val, inp[hi])
if hi > 0:
lo = np.random.randint(0, hi)
else:
lo = 0
slice_t = a[lo:hi]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(slice_val, inp[lo:hi])
def test3Dimension(self):
with self.cached_session():
input_shape = [8, 16, 16, 16, 8]
total_input_size = 1
for s in input_shape:
total_input_size *= s
inputs = [
i * 1.0 / total_input_size for i in range(1, total_input_size + 1)
]
a = constant_op.constant(inputs, shape=input_shape, dtype=dtypes.float32)
filter_shape = [1, 1, 1, 8, 8]
total_filter_size = 1
for s in filter_shape:
total_filter_size *= s
filters = [
i * 1.0 / total_filter_size for i in range(1, total_filter_size + 1)
]
f = constant_op.constant(
filters, shape=filter_shape, dtype=dtypes.float32)
conv_t = nn_ops.conv3d(
a, filter=f, strides=[1, 1, 1, 1, 1], padding="VALID")
slice_t = array_ops.slice(conv_t, [0, 1, 1, 1, 0], [1, 1, 1, 1, 8])
result = self.evaluate(slice_t)
expected = [
0.03028321, 0.03132677, 0.03237033, 0.03341389, 0.03445745, 0.035501,
0.03654456, 0.03758812
]
self.assertAllClose(expected, result.flatten(), rtol=1e-6)
def testScalarInput(self):
input_val = 0
# Test with constant input; shape inference fails.
with self.assertRaisesWithPredicateMatch(
(ValueError, errors_impl.InvalidArgumentError), "out of range"):
constant_op.constant(input_val)[:].get_shape()
# Test evaluating with non-constant input; kernel execution fails.
@def_function.function
def func(input_t):
slice_t = input_t[:]
return slice_t
with self.assertRaisesWithPredicateMatch(TypeError, "not subscriptable"):
self.evaluate(func(input_val))
def testInvalidIndex(self):
input_val = [1, 2]
# Test with constant input; shape inference fails.
with self.assertRaisesWithPredicateMatch(
(ValueError, errors_impl.InvalidArgumentError), "out of range"):
constant_op.constant(input_val)[1:, 1:].get_shape()
# Test evaluating with non-constant input; kernel execution fails.
@def_function.function
def func(input_t):
slice_t = input_t[1:, 1:]
return slice_t
with self.assertRaisesWithPredicateMatch(
TypeError, "must be integers or slices, not tuple"):
self.evaluate(func(input_val))
def _testSliceMatrixDim0(self, x, begin, size):
tf_ans = self.evaluate(array_ops.slice(x, [begin, 0], [size, x.shape[1]]))
np_ans = x[begin:begin + size, :]
self.assertAllEqual(tf_ans, np_ans)
def testSliceMatrixDim0(self):
x = np.random.rand(8, 4).astype("f")
self._testSliceMatrixDim0(x, 1, 2)
self._testSliceMatrixDim0(x, 3, 3)
y = np.random.rand(8, 7).astype("f") # 7 * sizeof(float) is not aligned
self._testSliceMatrixDim0(y, 1, 2)
self._testSliceMatrixDim0(y, 3, 3)
def testSingleElementAll(self):
for _ in range(10):
with self.cached_session():
inp = np.random.rand(4, 4).astype("f")
a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32)
x, y = np.random.randint(0, 3, size=2).tolist()
slice_t = a[x, 0:y]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(slice_val, inp[x, 0:y])
def testSimple(self):
with test_util.use_gpu():
for dtype in [
np.uint8,
np.int8,
np.uint16,
np.int16,
np.int32,
np.int64,
np.bool_,
np.float16,
np.float32,
np.float64,
np.complex64,
np.complex128,
]:
inp = np.random.rand(4, 4).astype(dtype)
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=[4, 4],
dtype=dtypes.float32)
slice_t = array_ops.slice(a, [0, 0], [2, 2])
slice2_t = a[:2, :2]
slice_val, slice2_val = self.evaluate([slice_t, slice2_t])
self.assertAllEqual(slice_val, np.array(inp[:2, :2], dtype=np.float32))
self.assertAllEqual(slice2_val, np.array(inp[:2, :2], dtype=np.float32))
self.assertEqual(slice_val.shape, slice_t.get_shape())
self.assertEqual(slice2_val.shape, slice2_t.get_shape())
def testComplex(self):
inp = np.random.rand(4, 10, 10, 4).astype("f")
a = constant_op.constant(inp, dtype=dtypes.float32)
x = np.random.randint(0, 9)
z = np.random.randint(0, 9)
if z > 0:
y = np.random.randint(0, z)
else:
y = 0
slice_t = a[:, x, y:z, :]
self.assertAllEqual(slice_t, inp[:, x, y:z, :])
def testRandom(self):
# Random dims of rank 6
input_shape = np.random.randint(0, 20, size=6)
inp = np.random.rand(*input_shape).astype("f")
a = constant_op.constant([float(x) for x in inp.ravel(order="C")],
shape=input_shape,
dtype=dtypes.float32)
indices = [0 if x == 0 else np.random.randint(x) for x in input_shape]
sizes = [
np.random.randint(0, input_shape[i] - indices[i] + 1) for i in range(6)
]
slice_t = array_ops.slice(a, indices, sizes)
slice2_t = a[indices[0]:indices[0] + sizes[0],
indices[1]:indices[1] + sizes[1],
indices[2]:indices[2] + sizes[2],
indices[3]:indices[3] + sizes[3],
indices[4]:indices[4] + sizes[4],
indices[5]:indices[5] + sizes[5]]
slice_val, slice2_val = self.evaluate([slice_t, slice2_t])
expected_val = inp[indices[0]:indices[0] + sizes[0],
indices[1]:indices[1] + sizes[1],
indices[2]:indices[2] + sizes[2],
indices[3]:indices[3] + sizes[3],
indices[4]:indices[4] + sizes[4],
indices[5]:indices[5] + sizes[5]]
self.assertAllEqual(slice_val, expected_val)
self.assertAllEqual(slice2_val, expected_val)
self.assertEqual(expected_val.shape, slice_t.get_shape())
self.assertEqual(expected_val.shape, slice2_t.get_shape())
def testPartialShapeInference(self):
z = array_ops.zeros((1, 2, 3))
self.assertAllEqual(z.get_shape().as_list(), [1, 2, 3])
m1 = array_ops.slice(z, [0, 0, 0], [-1, -1, -1])
self.assertAllEqual(m1.get_shape().as_list(), [1, 2, 3])
m2 = array_ops.slice(z, [0, 0, 0], [constant_op.constant(1) + 0, 2, -1])
self.assertAllEqual(m2.get_shape().as_list(), [1, 2, 3])
def _testGradientSlice(self, input_shape, slice_begin, slice_size):
with self.cached_session():
num_inputs = np.prod(input_shape)
num_grads = np.prod(slice_size)
inp = np.random.rand(num_inputs).astype("f").reshape(input_shape)
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=input_shape,
dtype=dtypes.float32)
slice_t = array_ops.slice(a, slice_begin, slice_size)
grads = np.random.rand(num_grads).astype("f").reshape(slice_size)
grad_tensor = constant_op.constant(grads)
grad = gradients_impl.gradients(slice_t, [a], grad_tensor)[0]
result = self.evaluate(grad)
# Create a zero tensor of the input shape ane place
# the grads into the right location to compare against TensorFlow.
np_ans = np.zeros(input_shape)
slices = []
for i in range(len(input_shape)):
slices.append(slice(slice_begin[i], slice_begin[i] + slice_size[i]))
np_ans[tuple(slices)] = grads
self.assertAllClose(np_ans, result)
def _testGradientSliceTape(self, input_shape, slice_begin, slice_size):
with backprop.GradientTape() as tape:
num_inputs = np.prod(input_shape)
num_grads = np.prod(slice_size)
inp = np.random.rand(num_inputs).astype("f").reshape(input_shape)
a = constant_op.constant([float(x) for x in inp.ravel(order="C")],
shape=input_shape,
dtype=dtypes.float32)
tape.watch(a)
slice_t = array_ops.slice(a, slice_begin, slice_size)
grads = np.random.rand(num_grads).astype("f").reshape(slice_size)
grad_tensor = constant_op.constant(grads)
grad = tape.gradient(slice_t, [a], grad_tensor)[0]
result = self.evaluate(grad)
# Create a zero tensor of the input shape ane place
# the grads into the right location to compare against TensorFlow.
np_ans = np.zeros(input_shape)
slices = []
for i in range(len(input_shape)):
slices.append(slice(slice_begin[i], slice_begin[i] + slice_size[i]))
np_ans[tuple(slices)] = grads
self.assertAllClose(np_ans, result)
def _testGradientVariableSize(self):
with self.cached_session():
inp = constant_op.constant([1.0, 2.0, 3.0], name="in")
out = array_ops.slice(inp, [1], [-1])
grad_actual = self.evaluate(gradients_impl.gradients(out, inp)[0])
self.assertAllClose([0., 1., 1.], grad_actual)
def _testGradientVariableSizeTape(self):
with backprop.GradientTape() as tape:
inp = constant_op.constant([1.0, 2.0, 3.0], name="in")
tape.watch(inp)
out = array_ops.slice(inp, [1], [-1])
grad_actual = self.evaluate(tape.gradient(out, inp))
self.assertAllClose([0., 1., 1.], grad_actual)
def _testGradientVariableSize2D(self):
# Regression test for bug in slice. A low-level bug in Eigen was causing
# incorrect results for negative indices in multi-dimensional tensors.
# See b/114318298.
with self.cached_session():
x = constant_op.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 7]])
loss1 = math_ops.reduce_sum(x[:-1, :-1] * 1.0)
loss2 = math_ops.reduce_sum(x[:-1][:, :-1])
g1 = gradients_impl.gradients(loss1, x)[0]
g2 = gradients_impl.gradients(loss2, x)[0]
g1_val, g2_val = self.evaluate([g1, g2])
self.assertAllEqual(g1_val, g2_val)
def _testGradientVariableSize2DTape(self):
# Regression test for bug in slice. A low-level bug in Eigen was causing
# incorrect results for negative indices in multi-dimensional tensors.
# See b/114318298.
with backprop.GradientTape(persistent=True) as tape:
x = constant_op.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 7]])
tape.watch(x)
loss1 = math_ops.reduce_sum(x[:-1, :-1] * 1.0)
loss2 = math_ops.reduce_sum(x[:-1][:, :-1])
g1 = tape.gradient(loss1, x)
g2 = tape.gradient(loss2, x)
g1_val, g2_val = self.evaluate([g1, g2])
self.assertAllEqual(g1_val, g2_val)
def testGradientsAll(self):
with ops.Graph().as_default():
# Slice the middle square out of a 4x4 input
self._testGradientSlice([4, 4], [1, 1], [2, 2])
# Slice the upper left square out of a 4x4 input
self._testGradientSlice([4, 4], [0, 0], [2, 2])
# Slice a non-square input starting from (2,1)
self._testGradientSlice([4, 4], [2, 1], [1, 2])
# Slice a 3D tensor
self._testGradientSlice([3, 3, 3], [0, 1, 0], [2, 1, 1])
# Use -1 as a slice dimension.
self._testGradientVariableSize()
# Use -1 as a slice dimension on a 2D tensor.
self._testGradientVariableSize2D()
def testGradientsAllTape(self):
# Slice the middle square out of a 4x4 input
self._testGradientSliceTape([4, 4], [1, 1], [2, 2])
# Slice the upper left square out of a 4x4 input
self._testGradientSliceTape([4, 4], [0, 0], [2, 2])
# Slice a non-square input starting from (2,1)
self._testGradientSliceTape([4, 4], [2, 1], [1, 2])
# Slice a 3D tensor
self._testGradientSliceTape([3, 3, 3], [0, 1, 0], [2, 1, 1])
# Use -1 as a slice dimension.
self._testGradientVariableSizeTape()
# Use -1 as a slice dimension on a 2D tensor.
self._testGradientVariableSize2DTape()
def testNotIterable(self):
# Tensor iteration is disabled explicitly for only graph mode.
with ops.Graph().as_default():
# NOTE(mrry): If we register __getitem__ as an overloaded
# operator, Python will valiantly attempt to iterate over the
# Tensor from 0 to infinity. This test ensures that this
# unintended behavior is prevented.
c = constant_op.constant(5.0)
with self.assertRaisesRegex(errors_impl.OperatorNotAllowedInGraphError,
"Iterating over a symbolic `tf.Tensor`"):
for _ in c:
pass
def testComputedShape(self):
# NOTE(mrry): We cannot currently handle partially-known values,
# because `tf.slice()` uses -1 to specify a wildcard size, and
# this can't be handled using the
# `tensor_util.constant_value_as_shape()` trick.
a = constant_op.constant([[1, 2, 3], [4, 5, 6]])
begin = constant_op.constant(0)
size = constant_op.constant(1)
b = array_ops.slice(a, [begin, 0], [size, 2])
self.assertEqual([1, 2], b.get_shape())
# placeholders only make sense in a graph.
with ops.Graph().as_default():
a = constant_op.constant([[1, 2, 3], [4, 5, 6]])
begin = array_ops.placeholder(dtypes.int32, shape=())
c = array_ops.slice(a, [begin, 0], [-1, 2])
self.assertEqual([None, 2], c.get_shape().as_list())
def testSliceOfSlice(self):
with self.session():
a = constant_op.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
b = a[1:, :]
c = b[:-1, :]
d = c[1, :]
res = 2 * d - c[1, :] + a[2, :] - 2 * b[-2, :]
self.assertAllEqual([0, 0, 0], self.evaluate(res))
if __name__ == "__main__":
test.main()
|
{
"content_hash": "e2b1b3014fdd51dba97937a2a5d2295a",
"timestamp": "",
"source": "github",
"line_count": 482,
"max_line_length": 80,
"avg_line_length": 37.6597510373444,
"alnum_prop": 0.6046716615249008,
"repo_name": "tensorflow/tensorflow-experimental_link_static_libraries_once",
"id": "a5ee51dba75b3f7ffaa3dee206233f46a9dc6325",
"size": "18841",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/array_ops/slice_op_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "36962"
},
{
"name": "C",
"bytes": "1343737"
},
{
"name": "C#",
"bytes": "13584"
},
{
"name": "C++",
"bytes": "123969891"
},
{
"name": "CMake",
"bytes": "182027"
},
{
"name": "Cython",
"bytes": "5003"
},
{
"name": "Dockerfile",
"bytes": "416070"
},
{
"name": "Go",
"bytes": "2095490"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1074471"
},
{
"name": "Jupyter Notebook",
"bytes": "789401"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "11067751"
},
{
"name": "Makefile",
"bytes": "2760"
},
{
"name": "Objective-C",
"bytes": "169288"
},
{
"name": "Objective-C++",
"bytes": "294177"
},
{
"name": "Pawn",
"bytes": "5552"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42585406"
},
{
"name": "Roff",
"bytes": "5034"
},
{
"name": "Ruby",
"bytes": "9199"
},
{
"name": "Shell",
"bytes": "620507"
},
{
"name": "Smarty",
"bytes": "89545"
},
{
"name": "SourcePawn",
"bytes": "14577"
},
{
"name": "Starlark",
"bytes": "7486225"
},
{
"name": "Swift",
"bytes": "78435"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
"""
Nova authentication management
"""
import os
import shutil
import string # pylint: disable=W0402
import tempfile
import uuid
import zipfile
from nova import context
from nova import crypto
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
from nova.auth import signer
FLAGS = flags.FLAGS
flags.DEFINE_list('allowed_roles',
['cloudadmin', 'itsec', 'sysadmin', 'netadmin', 'developer'],
'Allowed roles for project')
# NOTE(vish): a user with one of these roles will be a superuser and
# have access to all api commands
flags.DEFINE_list('superuser_roles', ['cloudadmin'],
'Roles that ignore authorization checking completely')
# NOTE(vish): a user with one of these roles will have it for every
# project, even if he or she is not a member of the project
flags.DEFINE_list('global_roles', ['cloudadmin', 'itsec'],
'Roles that apply to all projects')
flags.DEFINE_string('credentials_template',
utils.abspath('auth/novarc.template'),
'Template for creating users rc file')
flags.DEFINE_string('vpn_client_template',
utils.abspath('cloudpipe/client.ovpn.template'),
'Template for creating users vpn file')
flags.DEFINE_string('credential_vpn_file', 'nova-vpn.conf',
'Filename of certificate in credentials zip')
flags.DEFINE_string('credential_key_file', 'pk.pem',
'Filename of private key in credentials zip')
flags.DEFINE_string('credential_cert_file', 'cert.pem',
'Filename of certificate in credentials zip')
flags.DEFINE_string('credential_rc_file', '%src',
'Filename of rc in credentials zip, %s will be '
'replaced by name of the region (nova by default)')
flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver',
'Driver that auth manager uses')
LOG = logging.getLogger('nova.auth.manager')
class AuthBase(object):
"""Base class for objects relating to auth
Objects derived from this class should be stupid data objects with
an id member. They may optionally contain methods that delegate to
AuthManager, but should not implement logic themselves.
"""
@classmethod
def safe_id(cls, obj):
"""Safely get object id.
This method will return the id of the object if the object
is of this class, otherwise it will return the original object.
This allows methods to accept objects or ids as paramaters.
"""
if isinstance(obj, cls):
return obj.id
else:
return obj
class User(AuthBase):
"""Object representing a user
The following attributes are defined:
:id: A system identifier for the user. A string (for LDAP)
:name: The user name, potentially in some more friendly format
:access: The 'username' for EC2 authentication
:secret: The 'password' for EC2 authenticatoin
:admin: ???
"""
def __init__(self, id, name, access, secret, admin):
AuthBase.__init__(self)
assert isinstance(id, basestring)
self.id = id
self.name = name
self.access = access
self.secret = secret
self.admin = admin
def is_superuser(self):
return AuthManager().is_superuser(self)
def is_admin(self):
return AuthManager().is_admin(self)
def has_role(self, role):
return AuthManager().has_role(self, role)
def add_role(self, role):
return AuthManager().add_role(self, role)
def remove_role(self, role):
return AuthManager().remove_role(self, role)
def is_project_member(self, project):
return AuthManager().is_project_member(self, project)
def is_project_manager(self, project):
return AuthManager().is_project_manager(self, project)
def __repr__(self):
return "User('%s', '%s', '%s', '%s', %s)" % (self.id,
self.name,
self.access,
self.secret,
self.admin)
class Project(AuthBase):
"""Represents a Project returned from the datastore"""
def __init__(self, id, name, project_manager_id, description, member_ids):
AuthBase.__init__(self)
self.id = id
self.name = name
self.project_manager_id = project_manager_id
self.description = description
self.member_ids = member_ids
@property
def project_manager(self):
return AuthManager().get_user(self.project_manager_id)
@property
def vpn_ip(self):
ip, _port = AuthManager().get_project_vpn_data(self)
return ip
@property
def vpn_port(self):
_ip, port = AuthManager().get_project_vpn_data(self)
return port
def has_manager(self, user):
return AuthManager().is_project_manager(user, self)
def has_member(self, user):
return AuthManager().is_project_member(user, self)
def add_role(self, user, role):
return AuthManager().add_role(user, role, self)
def remove_role(self, user, role):
return AuthManager().remove_role(user, role, self)
def has_role(self, user, role):
return AuthManager().has_role(user, role, self)
def get_credentials(self, user):
return AuthManager().get_credentials(user, self)
def __repr__(self):
return "Project('%s', '%s', '%s', '%s', %s)" % \
(self.id, self.name, self.project_manager_id, self.description,
self.member_ids)
class AuthManager(object):
"""Manager Singleton for dealing with Users, Projects, and Keypairs
Methods accept objects or ids.
AuthManager uses a driver object to make requests to the data backend.
See ldapdriver for reference.
AuthManager also manages associated data related to Auth objects that
need to be more accessible, such as vpn ips and ports.
"""
_instance = None
def __new__(cls, *args, **kwargs):
"""Returns the AuthManager singleton"""
if not cls._instance or ('new' in kwargs and kwargs['new']):
cls._instance = super(AuthManager, cls).__new__(cls)
return cls._instance
def __init__(self, driver=None, *args, **kwargs):
"""Inits the driver from parameter or flag
__init__ is run every time AuthManager() is called, so we only
reset the driver if it is not set or a new driver is specified.
"""
self.network_manager = utils.import_object(FLAGS.network_manager)
if driver or not getattr(self, 'driver', None):
self.driver = utils.import_class(driver or FLAGS.auth_driver)
if FLAGS.memcached_servers:
import memcache
else:
from nova import fakememcache as memcache
self.mc = memcache.Client(FLAGS.memcached_servers,
debug=0)
def authenticate(self, access, signature, params, verb='GET',
server_string='127.0.0.1:8773', path='/',
check_type='ec2', headers=None):
"""Authenticates AWS request using access key and signature
If the project is not specified, attempts to authenticate to
a project with the same name as the user. This way, older tools
that have no project knowledge will still work.
@type access: str
@param access: Access key for user in the form "access:project".
@type signature: str
@param signature: Signature of the request.
@type params: list of str
@param params: Web paramaters used for the signature.
@type verb: str
@param verb: Web request verb ('GET' or 'POST').
@type server_string: str
@param server_string: Web request server string.
@type path: str
@param path: Web request path.
@type check_type: str
@param check_type: Type of signature to check. 'ec2' for EC2, 's3' for
S3. Any other value will cause signature not to be
checked.
@type headers: list
@param headers: HTTP headers passed with the request (only needed for
s3 signature checks)
@rtype: tuple (User, Project)
@return: User and project that the request represents.
"""
# TODO(vish): check for valid timestamp
(access_key, _sep, project_id) = access.partition(':')
LOG.debug(_('Looking up user: %r'), access_key)
user = self.get_user_from_access_key(access_key)
LOG.debug('user: %r', user)
if user is None:
LOG.audit(_("Failed authorization for access key %s"), access_key)
raise exception.AccessKeyNotFound(access_key=access_key)
# NOTE(vish): if we stop using project name as id we need better
# logic to find a default project for user
if project_id == '':
LOG.debug(_("Using project name = user name (%s)"), user.name)
project_id = user.name
project = self.get_project(project_id)
if project is None:
pjid = project_id
uname = user.name
LOG.audit(_("failed authorization: no project named %(pjid)s"
" (user=%(uname)s)") % locals())
raise exception.ProjectNotFound(project_id=project_id)
if not self.is_admin(user) and not self.is_project_member(user,
project):
uname = user.name
uid = user.id
pjname = project.name
pjid = project.id
LOG.audit(_("Failed authorization: user %(uname)s not admin"
" and not member of project %(pjname)s") % locals())
raise exception.ProjectMembershipNotFound(project_id=pjid,
user_id=uid)
if check_type == 's3':
sign = signer.Signer(user.secret.encode())
expected_signature = sign.s3_authorization(headers, verb, path)
LOG.debug(_('user.secret: %s'), user.secret)
LOG.debug(_('expected_signature: %s'), expected_signature)
LOG.debug(_('signature: %s'), signature)
if signature != expected_signature:
LOG.audit(_("Invalid signature for user %s"), user.name)
raise exception.InvalidSignature(signature=signature,
user=user)
elif check_type == 'ec2':
# NOTE(vish): hmac can't handle unicode, so encode ensures that
# secret isn't unicode
expected_signature = signer.Signer(user.secret.encode()).generate(
params, verb, server_string, path)
LOG.debug(_('user.secret: %s'), user.secret)
LOG.debug(_('expected_signature: %s'), expected_signature)
LOG.debug(_('signature: %s'), signature)
if signature != expected_signature:
(addr_str, port_str) = utils.parse_server_string(server_string)
# If the given server_string contains port num, try without it.
if port_str != '':
host_only_signature = signer.Signer(
user.secret.encode()).generate(params, verb,
addr_str, path)
LOG.debug(_('host_only_signature: %s'),
host_only_signature)
if signature == host_only_signature:
return (user, project)
LOG.audit(_("Invalid signature for user %s"), user.name)
raise exception.InvalidSignature(signature=signature,
user=user)
return (user, project)
def get_access_key(self, user, project):
"""Get an access key that includes user and project"""
if not isinstance(user, User):
user = self.get_user(user)
return "%s:%s" % (user.access, Project.safe_id(project))
def is_superuser(self, user):
"""Checks for superuser status, allowing user to bypass authorization
@type user: User or uid
@param user: User to check.
@rtype: bool
@return: True for superuser.
"""
if not isinstance(user, User):
user = self.get_user(user)
# NOTE(vish): admin flag on user represents superuser
if user.admin:
return True
for role in FLAGS.superuser_roles:
if self.has_role(user, role):
return True
def is_admin(self, user):
"""Checks for admin status, allowing user to access all projects
@type user: User or uid
@param user: User to check.
@rtype: bool
@return: True for admin.
"""
if not isinstance(user, User):
user = self.get_user(user)
if self.is_superuser(user):
return True
for role in FLAGS.global_roles:
if self.has_role(user, role):
return True
def _build_mc_key(self, user, role, project=None):
key_parts = ['rolecache', User.safe_id(user), str(role)]
if project:
key_parts.append(Project.safe_id(project))
return '-'.join(key_parts)
def _clear_mc_key(self, user, role, project=None):
# NOTE(anthony): it would be better to delete the key
self.mc.set(self._build_mc_key(user, role, project), None)
def _has_role(self, user, role, project=None):
mc_key = self._build_mc_key(user, role, project)
rslt = self.mc.get(mc_key)
if rslt is None:
with self.driver() as drv:
rslt = drv.has_role(user, role, project)
self.mc.set(mc_key, rslt)
return rslt
else:
return rslt
def has_role(self, user, role, project=None):
"""Checks existence of role for user
If project is not specified, checks for a global role. If project
is specified, checks for the union of the global role and the
project role.
Role 'projectmanager' only works for projects and simply checks to
see if the user is the project_manager of the specified project. It
is the same as calling is_project_manager(user, project).
@type user: User or uid
@param user: User to check.
@type role: str
@param role: Role to check.
@type project: Project or project_id
@param project: Project in which to look for local role.
@rtype: bool
@return: True if the user has the role.
"""
if role == 'projectmanager':
if not project:
raise exception.Error(_("Must specify project"))
return self.is_project_manager(user, project)
global_role = self._has_role(User.safe_id(user),
role,
None)
if not global_role:
return global_role
if not project or role in FLAGS.global_roles:
return global_role
return self._has_role(User.safe_id(user),
role,
Project.safe_id(project))
def add_role(self, user, role, project=None):
"""Adds role for user
If project is not specified, adds a global role. If project
is specified, adds a local role.
The 'projectmanager' role is special and can't be added or removed.
@type user: User or uid
@param user: User to which to add role.
@type role: str
@param role: Role to add.
@type project: Project or project_id
@param project: Project in which to add local role.
"""
if role not in FLAGS.allowed_roles:
raise exception.UserRoleNotFound(role_id=role)
if project is not None and role in FLAGS.global_roles:
raise exception.GlobalRoleNotAllowed(role_id=role)
uid = User.safe_id(user)
pid = Project.safe_id(project)
if project:
LOG.audit(_("Adding role %(role)s to user %(uid)s"
" in project %(pid)s") % locals())
else:
LOG.audit(_("Adding sitewide role %(role)s to user %(uid)s")
% locals())
with self.driver() as drv:
self._clear_mc_key(uid, role, pid)
drv.add_role(uid, role, pid)
def remove_role(self, user, role, project=None):
"""Removes role for user
If project is not specified, removes a global role. If project
is specified, removes a local role.
The 'projectmanager' role is special and can't be added or removed.
@type user: User or uid
@param user: User from which to remove role.
@type role: str
@param role: Role to remove.
@type project: Project or project_id
@param project: Project in which to remove local role.
"""
uid = User.safe_id(user)
pid = Project.safe_id(project)
if project:
LOG.audit(_("Removing role %(role)s from user %(uid)s"
" on project %(pid)s") % locals())
else:
LOG.audit(_("Removing sitewide role %(role)s"
" from user %(uid)s") % locals())
with self.driver() as drv:
self._clear_mc_key(uid, role, pid)
drv.remove_role(uid, role, pid)
@staticmethod
def get_roles(project_roles=True):
"""Get list of allowed roles"""
if project_roles:
return list(set(FLAGS.allowed_roles) - set(FLAGS.global_roles))
else:
return FLAGS.allowed_roles
def get_user_roles(self, user, project=None):
"""Get user global or per-project roles"""
with self.driver() as drv:
return drv.get_user_roles(User.safe_id(user),
Project.safe_id(project))
def get_project(self, pid):
"""Get project object by id"""
with self.driver() as drv:
project_dict = drv.get_project(pid)
if project_dict:
return Project(**project_dict)
def get_projects(self, user=None):
"""Retrieves list of projects, optionally filtered by user"""
with self.driver() as drv:
project_list = drv.get_projects(User.safe_id(user))
if not project_list:
return []
return [Project(**project_dict) for project_dict in project_list]
def create_project(self, name, manager_user, description=None,
member_users=None):
"""Create a project
@type name: str
@param name: Name of the project to create. The name will also be
used as the project id.
@type manager_user: User or uid
@param manager_user: This user will be the project manager.
@type description: str
@param project: Description of the project. If no description is
specified, the name of the project will be used.
@type member_users: list of User or uid
@param: Initial project members. The project manager will always be
added as a member, even if he isn't specified in this list.
@rtype: Project
@return: The new project.
"""
if member_users:
member_users = [User.safe_id(u) for u in member_users]
with self.driver() as drv:
project_dict = drv.create_project(name,
User.safe_id(manager_user),
description,
member_users)
if project_dict:
LOG.audit(_("Created project %(name)s with"
" manager %(manager_user)s") % locals())
project = Project(**project_dict)
return project
def modify_project(self, project, manager_user=None, description=None):
"""Modify a project
@type name: Project or project_id
@param project: The project to modify.
@type manager_user: User or uid
@param manager_user: This user will be the new project manager.
@type description: str
@param project: This will be the new description of the project.
"""
LOG.audit(_("modifying project %s"), Project.safe_id(project))
if manager_user:
manager_user = User.safe_id(manager_user)
with self.driver() as drv:
drv.modify_project(Project.safe_id(project),
manager_user,
description)
def add_to_project(self, user, project):
"""Add user to project"""
uid = User.safe_id(user)
pid = Project.safe_id(project)
LOG.audit(_("Adding user %(uid)s to project %(pid)s") % locals())
with self.driver() as drv:
return drv.add_to_project(User.safe_id(user),
Project.safe_id(project))
def is_project_manager(self, user, project):
"""Checks if user is project manager"""
if not isinstance(project, Project):
project = self.get_project(project)
return User.safe_id(user) == project.project_manager_id
def is_project_member(self, user, project):
"""Checks to see if user is a member of project"""
if not isinstance(project, Project):
project = self.get_project(project)
return User.safe_id(user) in project.member_ids
def remove_from_project(self, user, project):
"""Removes a user from a project"""
uid = User.safe_id(user)
pid = Project.safe_id(project)
LOG.audit(_("Remove user %(uid)s from project %(pid)s") % locals())
with self.driver() as drv:
return drv.remove_from_project(uid, pid)
@staticmethod
def get_project_vpn_data(project):
"""Gets vpn ip and port for project
@type project: Project or project_id
@param project: Project from which to get associated vpn data
@rvalue: tuple of (str, str)
@return: A tuple containing (ip, port) or None, None if vpn has
not been allocated for user.
"""
network_ref = db.project_get_network(context.get_admin_context(),
Project.safe_id(project), False)
if not network_ref:
return (None, None)
return (network_ref['vpn_public_address'],
network_ref['vpn_public_port'])
def delete_project(self, project):
"""Deletes a project"""
LOG.audit(_("Deleting project %s"), Project.safe_id(project))
with self.driver() as drv:
drv.delete_project(Project.safe_id(project))
def get_user(self, uid):
"""Retrieves a user by id"""
with self.driver() as drv:
user_dict = drv.get_user(uid)
if user_dict:
return User(**user_dict)
def get_user_from_access_key(self, access_key):
"""Retrieves a user by access key"""
with self.driver() as drv:
user_dict = drv.get_user_from_access_key(access_key)
if user_dict:
return User(**user_dict)
def get_users(self):
"""Retrieves a list of all users"""
with self.driver() as drv:
user_list = drv.get_users()
if not user_list:
return []
return [User(**user_dict) for user_dict in user_list]
def create_user(self, name, access=None, secret=None, admin=False):
"""Creates a user
@type name: str
@param name: Name of the user to create.
@type access: str
@param access: Access Key (defaults to a random uuid)
@type secret: str
@param secret: Secret Key (defaults to a random uuid)
@type admin: bool
@param admin: Whether to set the admin flag. The admin flag gives
superuser status regardless of roles specifed for the user.
@type create_project: bool
@param: Whether to create a project for the user with the same name.
@rtype: User
@return: The new user.
"""
if access is None:
access = str(uuid.uuid4())
if secret is None:
secret = str(uuid.uuid4())
with self.driver() as drv:
user_dict = drv.create_user(name, access, secret, admin)
if user_dict:
rv = User(**user_dict)
rvname = rv.name
rvadmin = rv.admin
LOG.audit(_("Created user %(rvname)s"
" (admin: %(rvadmin)r)") % locals())
return rv
def delete_user(self, user):
"""Deletes a user
Additionally deletes all users key_pairs"""
uid = User.safe_id(user)
LOG.audit(_("Deleting user %s"), uid)
db.key_pair_destroy_all_by_user(context.get_admin_context(),
uid)
with self.driver() as drv:
drv.delete_user(uid)
def modify_user(self, user, access_key=None, secret_key=None, admin=None):
"""Modify credentials for a user"""
uid = User.safe_id(user)
if access_key:
LOG.audit(_("Access Key change for user %s"), uid)
if secret_key:
LOG.audit(_("Secret Key change for user %s"), uid)
if admin is not None:
LOG.audit(_("Admin status set to %(admin)r"
" for user %(uid)s") % locals())
with self.driver() as drv:
drv.modify_user(uid, access_key, secret_key, admin)
@staticmethod
def get_key_pairs(context):
return db.key_pair_get_all_by_user(context.elevated(), context.user_id)
def get_credentials(self, user, project=None, use_dmz=True):
"""Get credential zip for user in project"""
if not isinstance(user, User):
user = self.get_user(user)
if project is None:
project = user.id
pid = Project.safe_id(project)
private_key, signed_cert = crypto.generate_x509_cert(user.id, pid)
tmpdir = tempfile.mkdtemp()
zf = os.path.join(tmpdir, "temp.zip")
zippy = zipfile.ZipFile(zf, 'w')
if use_dmz and FLAGS.region_list:
regions = {}
for item in FLAGS.region_list:
region, _sep, region_host = item.partition("=")
regions[region] = region_host
else:
regions = {'nova': FLAGS.ec2_host}
for region, host in regions.iteritems():
rc = self.__generate_rc(user,
pid,
use_dmz,
host)
zippy.writestr(FLAGS.credential_rc_file % region, rc)
zippy.writestr(FLAGS.credential_key_file, private_key)
zippy.writestr(FLAGS.credential_cert_file, signed_cert)
(vpn_ip, vpn_port) = self.get_project_vpn_data(project)
if vpn_ip:
configfile = open(FLAGS.vpn_client_template, "r")
s = string.Template(configfile.read())
configfile.close()
config = s.substitute(keyfile=FLAGS.credential_key_file,
certfile=FLAGS.credential_cert_file,
ip=vpn_ip,
port=vpn_port)
zippy.writestr(FLAGS.credential_vpn_file, config)
else:
LOG.warn(_("No vpn data for project %s"), pid)
zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(pid))
zippy.close()
with open(zf, 'rb') as f:
read_buffer = f.read()
shutil.rmtree(tmpdir)
return read_buffer
def get_environment_rc(self, user, project=None, use_dmz=True):
"""Get credential zip for user in project"""
if not isinstance(user, User):
user = self.get_user(user)
if project is None:
project = user.id
pid = Project.safe_id(project)
return self.__generate_rc(user, pid, use_dmz)
@staticmethod
def __generate_rc(user, pid, use_dmz=True, host=None):
"""Generate rc file for user"""
if use_dmz:
ec2_host = FLAGS.ec2_dmz_host
else:
ec2_host = FLAGS.ec2_host
# NOTE(vish): Always use the dmz since it is used from inside the
# instance
s3_host = FLAGS.s3_dmz
if host:
s3_host = host
ec2_host = host
rc = open(FLAGS.credentials_template).read()
rc = rc % {'access': user.access,
'project': pid,
'secret': user.secret,
'ec2': '%s://%s:%s%s' % (FLAGS.ec2_scheme,
ec2_host,
FLAGS.ec2_port,
FLAGS.ec2_path),
's3': 'http://%s:%s' % (s3_host, FLAGS.s3_port),
'os': '%s://%s:%s%s' % (FLAGS.osapi_scheme,
ec2_host,
FLAGS.osapi_port,
FLAGS.osapi_path),
'user': user.name,
'nova': FLAGS.ca_file,
'cert': FLAGS.credential_cert_file,
'key': FLAGS.credential_key_file}
return rc
|
{
"content_hash": "32eb2b4e954f5416df260be1a8d96d82",
"timestamp": "",
"source": "github",
"line_count": 802,
"max_line_length": 79,
"avg_line_length": 37.49002493765586,
"alnum_prop": 0.5576545714570792,
"repo_name": "termie/nova-migration-demo",
"id": "07235a2a79afe8b810d3e8591d570fd83e7da1f6",
"size": "30844",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/auth/manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "47238"
},
{
"name": "Python",
"bytes": "2431410"
},
{
"name": "Shell",
"bytes": "31459"
}
],
"symlink_target": ""
}
|
from typing import Dict, Union
from sqlalchemy import and_
from lockfile import LockFile
import json
import pypandoc
import re
import os
import shutil
import git
import sys
import traceback
import datetime
import time
import random
import string
import pyparsing as pp
import dateutil.parser
from typing import Callable, List, Any, Tuple, TypedDict
import util
import model
# Deploy muze byt jen jediny na cely server -> pouzivame lockfile.
LOCKFILE = '/var/lock/ksi-task-deploy'
LOGFILE = 'data/deploy.log'
# Deploy je spousten v samostatnem vlakne.
session = None
eval_public = True
def deploy(task_id: int, deployLock: LockFile, scoped: Callable) -> None:
"""
Tato funkce je spoustena v samostatnem vlakne.
Je potreba vyuzit podpory vice vlaken v SQL alchemy:
* V ZADNEM PRIPADE se neodkazovat na db.py a zejmena na session !
* scoped vzniklo z scoped_session(...), vyuzit tuto scoped session
* ze scoped_session si vytvorime session, kterou dale pouzivame
* na konci projistotu scoped.remove(), ale podle dokumentace neni potreba
Vyse zmimeny postup by mel byt v souladu s dokumentaci k sqlalchemy.
* !!! funkci nelze predavat model.Task, protoze tento objekt je vazany na
session; my si ale vytvarime vlasnti session ...
Doporucuje se, ale uloha, se kterou je tato funkce volana uz mela nastaveno
task.deploy_status = 'deploying', nebot nastaveni v tomto vlakne se muze
projevit az za nejakou dobu, behem ktere by GET mohl vratit "done", coz
nechceme.
"""
try:
# Init session
global session
session = scoped()
task = session.query(model.Task).get(task_id)
global eval_public
eval_public = True
# Create log file
create_log(task, "deploying")
task.deploy_status = 'deploying'
task.deploy_date = datetime.datetime.utcnow()
session.commit()
# Init repo object
repo = git.Repo(util.git.GIT_SEMINAR_PATH)
assert not repo.bare
# Fetch origin
# if not task.git_branch in repo.branches:
log("Fetching origin...")
for fetch_info in repo.remotes.origin.fetch():
if str(fetch_info.ref) == "origin/" + task.git_branch:
log("Updated " + str(fetch_info.ref) + " to " +
str(fetch_info.commit))
# Check out task branch
log("Checking out " + task.git_branch)
log(repo.git.checkout(task.git_branch))
# Discard all local changes
log("Hard-reseting to origin/" + task.git_branch)
repo.git.reset("--hard", "origin/" + task.git_branch)
# Check if task path exists
if not os.path.isdir(util.git.GIT_SEMINAR_PATH + task.git_path):
log("Repo dir does not exist")
task.deploy_status = 'error'
session.commit()
return
# Parse task
log("Parsing " + util.git.GIT_SEMINAR_PATH + task.git_path)
process_task(task, util.git.GIT_SEMINAR_PATH + task.git_path)
# Update git entries in db
if task.time_deadline > datetime.datetime.utcnow():
# Tak is being deployed before deadline
task.evaluation_public = eval_public
else:
# Task is deployed after deadline
# |= is important for deploying after task is published
task.evaluation_public |= eval_public
task.git_commit = repo.head.commit.hexsha
task.deploy_status = 'done'
# Update thread name
thread = session.query(model.Thread).get(task.thread)
if thread:
thread.title = task.title
session.commit()
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
log("Exception: " + traceback.format_exc())
session.rollback()
try:
task.deploy_status = 'error'
session.commit()
except BaseException:
session.rollback()
finally:
if deployLock.is_locked():
deployLock.release()
log("Done")
session.close()
scoped.remove()
###############################################################################
# Parsovani dat z repozitare:
def process_task(task: model.Task, path: str) -> None:
"""Zpracovani cele ulohy
Data commitujeme do databaze postupne, abychom videli, kde doslo k
pripadnemu selhani operace.
"""
try:
DATAPATH = f"data/task-content/{task.id}"
process_meta(task, path + "/task.json")
session.commit()
# Remove non-mangled path
to_remove = f"{DATAPATH}/zadani"
if os.path.isdir(to_remove):
log(f"Removing old {to_remove}...")
shutil.rmtree(to_remove)
task.mangled_datadir = mangled_dirname(
f"data/task-content/{task.id}", "zadani_")
task.mangled_soldir = mangled_dirname(
f"data/task-content/{task.id}", "reseni_")
log("Processing assignment")
process_assignment(task, path + "/assignment.md")
session.commit()
log("Processing solution")
process_solution(task, path + "/solution.md")
session.commit()
log("Processing icons & data")
copy_icons(task, path + "/icons/")
copy_data(task, f"{path}/data",
os.path.join(DATAPATH, task.mangled_datadir))
copy_data(task, f"{path}/data_solution",
os.path.join(DATAPATH, task.mangled_soldir))
log("Processing modules")
process_modules(task, path)
session.commit()
except BaseException:
session.rollback()
raise
finally:
log("Task processing done")
def process_meta(task: model.Task, filename: str) -> None:
def local2UTC(LocalTime: datetime) -> datetime:
EpochSecond = time.mktime(LocalTime.timetuple())
return datetime.datetime.utcfromtimestamp(EpochSecond)
log("Processing meta " + filename)
with open(filename, 'r', encoding='utf-8-sig') as f:
data = json.loads(f.read())
task.author = data['author']
if 'co_author' in data:
task.co_author = data['co_author']
else:
task.co_author = None
if 'date_deadline' in data:
task.time_deadline = local2UTC(
dateutil.parser.parse(data['date_deadline']).
replace(hour=23, minute=59, second=59))
else:
task.time_deadline = data['time_deadline']
if ('icon_ref' in data) and (data['icon_ref'] is not None):
# Osetreni reference pres 2 ulohy
tmp_task = session.query(model.Task).get(data['icon_ref'])
if tmp_task.picture_base:
task.picture_base = tmp_task.picture_base
else:
task.picture_base = '/taskContent/' + \
str(data['icon_ref']) + '/icon/'
else:
task.picture_base = None
# Parsovani prerekvizit
if ('prerequisities' in data) and (data['prerequisities'] is not None):
if task.prerequisite is not None:
prq = session.query(model.Prerequisite).get(task.prerequisite)
if prq is None:
task.prerequisite = None
if task.prerequisite is None:
prq = model.Prerequisite(
type=model.PrerequisiteType.ATOMIC,
parent=None,
task=None
)
try:
session.add(prq)
session.commit()
except BaseException:
session.rollback()
raise
# Tady mame zaruceno, ze existuje prave jedna korenova prerekvizita
try:
parsed = parse_prereq_text(data['prerequisities'])
parse_prereq_logic(parsed[0], prq)
session.commit()
except BaseException:
# TODO: pass meaningful error message to user
raise
task.prerequisite = prq.id
else:
task.prerequisite = None
def parse_prereq_text(text: str):
"""Konvertuje text prerekvizit do seznamu [[['7', '&&', '12'], '||', '4']]
Seznam na danem zanoreni obsahuje bud teminal, nebo seznam tri prvku
"""
number = pp.Regex(r"\d+")
expr = pp.operatorPrecedence(number, [
("&&", 2, pp.opAssoc.LEFT, ),
("||", 2, pp.opAssoc.LEFT, ),
])
return expr.parseString(text)
def parse_prereq_logic(logic, prereq) -> None:
"""'logic' je vysledek z parsovani parse_prereq_text
'prereq' je aktualne zpracovana prerekvizita (model.Prerequisite)
"""
if logic:
log("Parsing " + str(logic))
if isinstance(logic, (str)):
# ATOMIC
prereq.type = model.PrerequisiteType.ATOMIC
prereq.task = int(logic)
# Smazeme potencialni strom deti
for child in prereq.children:
session.delete(child)
session.commit()
elif isinstance(logic, (pp.ParseResults)):
if len(logic) > 3:
log('WARNING: Wrong format of `prerequisite` string, you can enter'
' at most one operator at each level, ignoring some '
'prerequisities!')
# && or ||
if logic[1] == '||':
prereq.type = model.PrerequisiteType.OR
else:
prereq.type = model.PrerequisiteType.AND
prereq.task = None
# Rekurzivne se zanorime
children = session.query(model.Prerequisite).\
filter(and_(model.Prerequisite.parent is not None,
model.Prerequisite.parent == prereq.id)).\
all()
# children musi byt prave dve
while len(children) < 2:
new_child = model.Prerequisite(
type=model.PrerequisiteType.ATOMIC,
parent=prereq.id,
task=None
)
try:
session.add(new_child)
session.commit()
except BaseException:
session.rollback()
raise
children.append(new_child)
try:
while len(children) > 2:
session.delete(children[2])
session.commit()
children.remove(children[2])
except BaseException:
session.rollback()
raise
# Rekurzivne se zanorime
parse_prereq_logic(logic[0], children[0])
parse_prereq_logic(logic[2], children[1])
else:
log('ERROR: Unknown type of variable in prerequisite!')
def process_assignment(task: model.Task, filename: str) -> None:
"""Vlozi zadani ulohy do databaze"""
with open(filename, 'r') as f:
data = f.read()
data = format_custom_tags(data)
parsed = parse_pandoc(data).splitlines()
# Intro ulohy
intro = re.search("<p>(.*?)</p>", parsed[0])
if intro is not None:
task.intro = intro.group(1)
parsed.pop(0)
else:
task.intro = "Intro ulohy nenalezeno"
# Nadpis ulohy
title = re.search("<h1(.*?)>(.*?)</h1>", parsed[0])
if title is not None:
task.title = title.group(2)
parsed.pop(0)
else:
task.title = "Nazev ulohy nenalezen"
# Seznam radku spojime na jeden dlouhy text
body = '\n'.join(parsed)
body = replace_h(body)
body = change_links(task, body)
body = add_table_class(body)
task.body = body
def process_solution(task: model.Task, filename: str) -> None:
"""Add solution to database."""
if os.path.isfile(filename):
with open(filename, 'r') as f:
data = f.read()
task.solution = parse_simple_text(task, data)
else:
task.solution = None
def copy_icons(task: model.Task, source_path: str) -> None:
"""Copy icons from repository to backend path."""
target_path = f"data/task-content/{task.id}/icon/"
files = ["base.svg", "correcting.svg", "locked.svg", "done.svg"]
if not os.path.isdir(target_path):
os.makedirs(target_path)
for f in files:
if os.path.isfile(source_path + "/" + f):
shutil.copy2(source_path + "/" + f, target_path + f)
def mangled_dirname(base_directory: str, prefix: str) -> str:
MANGLER_LENGTH = 16
dirs = []
if os.path.isdir(base_directory):
dirs = list(
filter(lambda fn: fn.startswith(prefix), os.listdir(base_directory))
)
if dirs:
assert len(dirs) == 1, f"Mutliple directories {base_directory}/{prefix}*"
whole_path = os.path.join(base_directory, dirs[0])
assert os.path.isdir(whole_path), f"Not directory: {whole_path}"
return dirs[0]
else:
suffix = ''.join(
random.choice(string.ascii_uppercase + string.digits
+ string.ascii_lowercase)
for _ in range(MANGLER_LENGTH)
)
whole_path = os.path.join(base_directory, prefix+suffix)
os.makedirs(whole_path)
return prefix+suffix
def copy_data(task: model.Task, source_path: str, target_path: str) -> None:
"""Copy all data from repository to backend path."""
if not os.path.isdir(target_path):
os.makedirs(target_path)
shutil.rmtree(target_path)
if os.path.isdir(source_path):
shutil.copytree(source_path, target_path)
def process_modules(task: model.Task, git_path: str) -> None:
# Aktualni moduly v databazi
modules = session.query(model.Module).\
filter(model.Module.task == task.id).\
order_by(model.Module.order).all()
i = 0
while (os.path.isdir(git_path + "/module" + str(i + 1))):
if i < len(modules):
module = modules[i]
module.order = i
else:
module = model.Module(
task=task.id,
type="general",
name="",
order=i
)
session.add(module)
session.commit()
log("Processing module" + str(i + 1))
process_module(module, git_path + "/module" + str(i + 1), task)
try:
session.commit()
except BaseException:
session.rollback()
raise
i += 1
if i == 0:
# No module -> no public evaluation
global eval_public
eval_public = False
# Smazeme prebytecne moduly
while i < len(modules):
module = modules[i]
try:
session.delete(module)
session.commit()
except BaseException:
session.rollback()
raise
i += 1
def process_module(module: model.Module, module_path: str,
task: model.Task) -> None:
"""Zpracovani modulu
'module' je vzdy inicializovany
'module'_path muze byt bez lomitka na konci
"""
specific = process_module_json(module, module_path + "/module.json")
# Copy whole module directory into data/modules
log("Copying module data")
target_path = os.path.join("data", "modules", str(module.id))
if os.path.isdir(target_path):
shutil.rmtree(target_path)
shutil.copytree(module_path, target_path)
module.custom = os.path.isfile(os.path.join(target_path, "module-gen"))
process_module_md(module, module_path + "/module.md", specific, task)
ModuleSpecs = Dict[str, Union[str, int, float, bool, None,
Dict[str, Union[str, int]]]]
def process_module_json(module: model.Module, filename: str) -> ModuleSpecs:
"""Zpracovani souboru module.json"""
log("Processing module json")
with open(filename, 'r', encoding='utf-8-sig') as f:
data = json.loads(f.read())
if data['type'] == 'text':
module.type = model.ModuleType.TEXT
elif data['type'] == 'general':
module.type = model.ModuleType.GENERAL
elif data['type'] == 'programming':
module.type = model.ModuleType.PROGRAMMING
elif data['type'] == 'quiz':
module.type = model.ModuleType.QUIZ
elif data['type'] == 'sortable':
module.type = model.ModuleType.SORTABLE
else:
module.type = model.ModuleType.GENERAL
# JSON parametry pro specificky typ modulu
specific = data[data['type']] if data['type'] in data else {}
module.max_points = data['max_points']
module.autocorrect = data['autocorrect']
module.bonus = data['bonus'] if 'bonus' in data else False
module.action = data['action'] if 'action' in data else ""
if isinstance(module.action, dict):
module.action = json.dumps(module.action, indent=2, ensure_ascii=False)
global eval_public
if not module.autocorrect:
eval_public = False
return specific
def process_module_md(module: model.Module, filename: str,
specific: ModuleSpecs, task: model.Task) -> None:
"""Zpracovani module.md
Pandoc spoustime az uplne nakonec, abychom mohli provest analyzu souboru.
"""
log("Processing module md")
with open(filename, 'r') as f:
data = f.readlines()
# Hledame nazev modulu na nultem radku
name = re.search(r"(# .*)", data[0])
if name is not None:
module.name = re.search(r"<h1(.*?)>(.*?)</h1>",
parse_pandoc(name.group(1))).group(2)
data.pop(0)
else:
module.name = "Nazev modulu nenalezen"
# Ukolem nasledujicich metod je zpracovat logiku modulu a v \data zanechat
# uvodni text.
if module.type == model.ModuleType.GENERAL:
data = process_module_general(module, data, specific)
elif module.type == model.ModuleType.PROGRAMMING:
data = process_module_programming(module, data, specific,
os.path.dirname(filename))
elif module.type == model.ModuleType.QUIZ:
data = process_module_quiz(module, data, specific, task)
elif module.type == model.ModuleType.SORTABLE:
data = process_module_sortable(module, data, specific)
elif module.type == model.ModuleType.TEXT:
data = process_module_text(module, data, specific,
os.path.dirname(filename), task)
else:
module.description = "Neznamy typ modulu"
log("Processing body")
# Parsovani tela zadani
module.description = parse_simple_text(task, ''.join(data))
def process_module_general(module: model.Module, lines: List[str],
specific: ModuleSpecs) -> List[str]:
"""Tady opravdu nema nic byt, general module nema zadnou logiku"""
log("Processing general module")
module.data = '{}'
return lines
def process_module_programming(module: model.Module, lines: List[str],
specific: ModuleSpecs,
source_path: str) -> List[str]:
log("Processing programming module")
# Hledame vzorovy kod v zadani
line = 0
while (line < len(lines)) and (not re.match(r"^```~python", lines[line])):
line += 1
if line == len(lines):
return lines
# Hledame konec kodu
end = line + 1
while (end < len(lines)) and (not re.match(r"^```", lines[end])):
end += 1
code = ''.join(lines[line + 1:end])
# Pridame vzorovy kod do \module.data
data = {}
data['programming'] = {}
data['programming']['default_code'] = code
if 'version' in specific:
data['programming']['version'] = specific['version']
target_path = os.path.join("data", "modules", str(module.id))
data['programming']['merge_script'] = os.path.join(target_path, "merge")
data['programming']['stdin'] = os.path.join(target_path, "stdin.txt")
if not os.path.isfile(os.path.join(source_path, "/stdin.txt")):
open(os.path.join(target_path, "stdin.txt"),
"a").close() # create empty stdin
if os.path.isfile(os.path.join(source_path, "post")):
data['programming']['post_trigger_script'] = os.path.join(target_path,
"post")
data['programming']['check_script'] = os.path.join(target_path, "eval")
# direktivy z module.json
if 'limits' in specific:
data['programming']['limits'] = specific['limits']
module.data = json.dumps(data, indent=2, ensure_ascii=False)
return lines[:line]
def process_module_quiz(module: model.Module, lines: List[str],
specific: ModuleSpecs, task: model.Task) -> List[str]:
log("Processing quiz module")
# Hledame jednotlive otazky
quiz_data = []
line = 0
text_end = 0
while (line < len(lines)):
while ((line < len(lines)) and
(not re.match(r"^##(.*?) \((r|c)\)", lines[line]))):
line += 1
if text_end == 0:
text_end = line
if line == len(lines):
break
# Parsovani otazky
question = {}
head = re.match(r"^##(.*?) \((r|c)\)", lines[line])
question['question'] = re.match("<p>(.*)</p>",
parse_pandoc(head.group(1))).group(1)
if head.group(2) == 'r':
question['type'] = 'radio'
else:
question['type'] = 'checkbox'
# Hledame pruvodni text otazky
line += 1
end = line
while (end < len(lines)) and (not re.match(r"^~", lines[end])):
end += 1
question['text'] = parse_simple_text(task, ''.join(lines[line:end]))
# Parsujeme mozne odpovedi
line = end
options = []
correct = []
while line < len(lines):
match = re.match(r"^~\s*(.*?)\s*(\*|-)(\s|-)*$",
lines[line] + " -")
if not match:
break
options.append(parse_pandoc(match.group(1)).replace("<p>", "").
replace("</p>", "").replace('\n', ''))
if match.group(2) == '*':
correct.append(len(options) - 1)
line += 1
question['options'] = options
question['correct'] = correct
# Pridame otazku
quiz_data.append(question)
module.data = json.dumps({'quiz': quiz_data}, indent=2, ensure_ascii=False)
return lines[:text_end]
def process_module_sortable(module: model.Module,
lines: List[str],
specific: ModuleSpecs) -> List[str]:
log("Processing sortable module")
sort_data = {}
sort_data['fixed'] = []
sort_data['movable'] = []
sort_data['correct'] = []
line = 0
while (line < len(lines)) and (not re.match(r"^~", lines[line])):
line += 1
text_end = line
# Parsovani fixed casti
while line < len(lines):
match = re.match(r"^~\s*(.*)", lines[line])
if not match:
break
parsed = parse_pandoc(match.group(1)).replace("<p>", "").\
replace("</p>", "").replace('\n', '')
sort_data['fixed'].append({
'content': parsed,
'offset': get_sortable_offset(parsed)
})
line += 1
# Volny radek mezi fixed a movable casti
line += 1
# Movable cast
while line < len(lines):
match = re.match(r"^~\s*(.*)", lines[line])
if not match:
break
parsed = parse_pandoc(match.group(1)).replace("<p>", "").\
replace("</p>", "").replace('\n', '')
sort_data['movable'].append({
'content': parsed,
'offset': get_sortable_offset(parsed)
})
line += 1
# Parsovani spravnych poradi
while line < len(lines):
match = re.match(r"^\s*\((((a|b)\d+,)*(a|b)\d+)\)", lines[line])
if match:
sort_data['correct'].append(match.group(1).split(','))
line += 1
module.data = json.dumps({'sortable': sort_data}, indent=2, ensure_ascii=False)
return lines[:text_end]
def get_sortable_offset(text: str) -> int:
if re.match(r"^(if|Vstup:|while|for|def) ", text):
return 1
elif re.match(r"^(fi|od)$", text) or re.match(r"^return ", text):
return -1
return 0
def process_module_text(module: model.Module, lines: List[str],
specific: ModuleSpecs, path: str,
task: model.Task) -> List[str]:
log("Processing text module")
text_data = {"inputs": 0}
line = 0
while (line < len(lines)) and (not re.match(r"^~", lines[line])):
line += 1
text_end = line
if line >= len(lines):
module.data = json.dumps(text_data, indent=2, ensure_ascii=False)
return lines
inputs_cnt = 0
diff = []
questions = []
while line < len(lines):
match = re.match(r"^~\s*(.*?)\s*(\*\*(.*?)\*\*)?\s*$", lines[line])
if not match:
break
questions.append(parse_simple_text(task, match.group(1)).
replace("<p>", "").replace("</p>", ""))
inputs_cnt += 1
if match.group(3):
diff.append(match.group(3))
else:
if len(diff) > 0:
diff.append("")
line += 1
text_data['questions'] = questions
text_data['inputs'] = inputs_cnt
if len(diff) > 0:
text_data['diff'] = diff
else:
# Zkopirujeme eval skript
target_path = "data/modules/" + str(module.id) + "/"
if not os.path.isdir(target_path):
os.makedirs(target_path)
shutil.copy2(path + "/eval", target_path + "eval")
text_data['eval_script'] = target_path + "eval"
module.data = json.dumps({'text': text_data}, indent=2, ensure_ascii=False)
return lines[:text_end]
###############################################################################
# Pomocne parsovaci funkce:
def parse_pandoc(source: str) -> str:
"""Parsovani stringu \source pandocem"""
return pypandoc.convert(
source,
'html5',
format='markdown+smart',
extra_args=['--mathjax', '--email-obfuscation=none']
)
def replace_h(source: str) -> str:
"""<h2> -> <h3>, <h3> -> <h4>, <h4> -> <h5> (to musi stacit)"""
return source.replace("<h4", "<h5").replace("</h4>", "</h5>"). \
replace("<h3", "<h4").replace("</h3>", "</h4>"). \
replace("<h2", "<h3").replace("</h2>", "</h3>"). \
replace("<h1", "<h3").replace("</h1>", "</h3>")
def format_custom_tags(source: str) -> str:
"""
Replaces all custom-defined tags with divs
e.g. <ksi-tip> is replaced with <div class="ksi-custom ksi-tip">
:param source: HTML to adjust
:return: adjusted HTML
"""
tags = ('ksi-tip', 'ksi-collapse', 'ksi-pseudocode')
for tag in tags:
tag_escaped = re.escape(tag)
source = re.sub(fr'<{tag_escaped}(.*?)>', fr'<div class="ksi-custom {tag}"\1>', source, flags=re.IGNORECASE)
source = re.sub(fr'</{tag_escaped}>', r"</div>", source, flags=re.IGNORECASE)
return source
def change_links(task: model.Task, source: str) -> str:
"""Nahrazuje odkazy do ../data/ a data/ za odkazy do backendu."""
allowed_prefixes = ['(../', '(', '"'] # These should be mutually exclusive
urls_to_replace = {
"data_solution/": f"{util.config.backend_url()}/taskContent/{task.id}/{task.mangled_soldir}/",
"data/": f"{util.config.backend_url()}/taskContent/{task.id}/{task.mangled_datadir}/"
}
res = source
for url_fragment in urls_to_replace:
for prefix in allowed_prefixes:
res = res.replace(prefix + url_fragment, prefix + urls_to_replace[url_fragment])
return res
def add_table_class(source: str) -> str:
"""Doplni ke kazde tabulce class="table table-striped"
Tohleto bohuzel nejde udelat lip (napriklad explicitnim napsanim do
markdownu).
"""
return re.sub(r"<table>", "<table class='table table-striped'>", source)
def parse_simple_text(task: model.Task, text: str) -> str:
return add_table_class(
change_links(
task, replace_h(
parse_pandoc(
format_custom_tags(
text
)
)
)
)
)
###############################################################################
def create_log(task: model.Task, status: str) -> None:
with open(LOGFILE, 'w') as f:
f.write(str(task.id) + '\n')
def log(text: str) -> None:
with open(LOGFILE, 'a') as f:
f.write(text + '\n')
###############################################################################
|
{
"content_hash": "10db0a673c9b7d9c28ed5d91d6567bb1",
"timestamp": "",
"source": "github",
"line_count": 891,
"max_line_length": 116,
"avg_line_length": 31.97979797979798,
"alnum_prop": 0.5629255281813715,
"repo_name": "fi-ksi/web-backend",
"id": "d9a577284afba44450c4585e19dfc620c4fce4e2",
"size": "28494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/admin/taskDeploy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1669"
},
{
"name": "Python",
"bytes": "386983"
},
{
"name": "Shell",
"bytes": "2561"
}
],
"symlink_target": ""
}
|
"""AppAssure 5 REST API"""
from appassure.api import AppAssureAPI
class INightlyJobsManagement(AppAssureAPI):
"""Full documentation online at
http://docs.appassure.com/display/AA50D/INightlyJobsManagement
"""
def cancelNightlyJobs(self):
"""Cancels all nightly jobs execution."""
return self.session.request('nightlyJobs/')
def getNightlyJobsSettings(self):
"""Gets nightly jobs settings for the Core."""
return self.session.request('nightlyJobs/config')
def setNightlyJobsSettings(self, data):
"""Sets nightly jobs settings for the Core."""
return self.session.request('nightlyJobs/config', 'PUT',
self.getXML(data, 'nightlyJobsSettings'))
def getAgentNightlyJobs(self, agentId):
"""Gets jobs for the specified agent."""
return self.session.request('nightlyJobs/config/%s'
% (agentId))
def setAgentNightlyJobs(self, data, agentId):
"""Sets enabled nightly jobs for the agent."""
return self.session.request('nightlyJobs/config/%s'
% (agentId), 'PUT',
self.getXML(data, 'nightlyJobIds'))
def setJobConfiguration(self, jobId):
"""Sets job configuration."""
return self.session.request('nightlyJobs/jobConfiguration/%s'
% (jobId), 'PUT')
def getJobConfiguration(self, jobId):
"""Gets job configuration for the specified job."""
return self.session.request('nightlyJobs/jobConfiguration/%s'
% (jobId))
def setAgentJobConfiguration(self, jobId, agentId):
"""Sets job configuration for specified agent."""
return self.session.request('nightlyJobs/jobConfiguration/%s/%s'
% (jobId, agentId), 'PUT')
def getAgentJobConfiguration(self, jobId, agentId):
"""Gets job configuration for the specified job of the
agent.
"""
return self.session.request('nightlyJobs/jobConfiguration/%s/%s'
% (jobId, agentId))
def getNightlyJobsStatus(self):
"""Determines whether nightly jobs are in progress and
then gets the transaction id of currently running jobs.
"""
return self.session.request('nightlyJobs/status')
|
{
"content_hash": "3f1b46951074e313d8e0eae8ec316879",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 72,
"avg_line_length": 38.083333333333336,
"alnum_prop": 0.6398249452954048,
"repo_name": "rshipp/python-appassure",
"id": "747aae4ced94b51d14764a0d801cc6290c8b3577",
"size": "2285",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "appassure/core/INightlyJobsManagement.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "174855"
}
],
"symlink_target": ""
}
|
"""
mailme.services.feed
~~~~~~~~~~~~~~~~~~~~
Add support for:
* Opml
* RSS / Atom
* Basically everything that `feedparser` supports.
"""
import time
import socket
import hashlib
from datetime import datetime, timedelta
import listparser
import requests
import feedparser
import pytz
from requests import codes
from django.conf import settings
from django.utils.timezone import utc
from django.utils.encoding import force_text, force_bytes
from mailme import exceptions
from mailme.services.base import BaseService
from mailme.models.feed import (
Feed,
ACCEPTED_STATUSES,
FEED_GENERIC_ERROR_TEXT,
FEED_TIMEDOUT_ERROR_TEXT,
FEED_NOT_FOUND_ERROR_TEXT
)
from mailme.models.post import Post
from mailme.models.category import Category
from mailme.models.enclosure import Enclosure
from mailme.utils.html import cleanup_html
from mailme.utils.logging import logged
GUID_FIELDS = frozenset(("title", "link", "author"))
def find_post_content(feed_obj, entry):
try:
content = entry["content"][0]["value"]
except (IndexError, KeyError):
content = entry.get("description") or entry.get("summary") or ""
return cleanup_html(content)
def date_to_datetime(field_name):
"""Given a post field, convert its :mod:`feedparser` date tuple to
:class:`datetime.datetime` objects.
:param field_name: The post field to use.
"""
def _field_to_datetime(feed_obj, entry):
if field_name in entry:
try:
time_ = time.mktime(entry[field_name])
date = datetime.fromtimestamp(time_).replace(tzinfo=utc)
except TypeError:
date = datetime.now(pytz.utc)
return date
return datetime.now(pytz.utc)
_field_to_datetime.__doc__ = "Convert %s to datetime" % repr(field_name)
return _field_to_datetime
def generate_guid(entry):
"""Generate missing guid for post entry."""
text = "|".join(force_text(entry.get(key) or "") for key in GUID_FIELDS)
return hashlib.md5(force_bytes(text)).hexdigest()
def get_entry_guid(feed_obj, entry):
"""
Get the guid for a post.
If the post doesn't have a guid, a new guid is generated.
"""
if "guid" not in entry:
return generate_guid(entry)
guid = force_text(entry["guid"]).strip()
return guid
def format_date(t):
"""Make sure time object is a :class:`datetime.datetime` object."""
if isinstance(t, time.struct_time):
return datetime(*t[:6], tzinfo=pytz.utc)
return t.replace(tzinfo=utc)
def entries_by_date(entries, limit=None):
"""
Sort the feed entries by date.
:param entries: Entries given from :mod:`feedparser``.
:param limit: Limit number of posts.
"""
now = datetime.now(pytz.utc)
def find_date(entry, counter):
"""Find the most current date entry tuple."""
return (entry.get("published_parsed") or
entry.get("date_parsed") or
now - timedelta(seconds=(counter * 30)))
sorted_entries = []
for counter, entry in enumerate(entries):
date = format_date(find_date(entry, counter))
# the found date is put into the entry
# because some feed just don't have any valid dates.
# This will ensure that the posts will be properly ordered
# later on when put into the database.
entry["published_parsed"] = (entry.get("published_parsed")
or date.timetuple())
sorted_entries.append((date, entry))
sorted_entries.sort(key=lambda key: key[0])
sorted_entries.reverse()
return [entry for _date, entry in sorted_entries[:limit]]
@logged
class FeedService(BaseService):
"""
Import/Update feeds.
:keyword post_limit: See :attr`post_limit`.
:keyword update_on_import: See :attr:`update_on_import`.
:keyword logger: See :attr:`logger`.
:keyword timeout: See :attr:`timeout`.
.. attribute:: post_limit
Default number of posts limit.
.. attribute:: update_on_import
By default, fetch new posts when a feed is imported
.. attribute:: logger
The :class:`logging.Logger` instance used for logging messages.
.. attribute:: include_categories
By default, include feed/post categories.
.. attribute:: include_enclosures
By default, include post enclosures.
.. attribute:: timeout
Default feed timeout.
.. attribute:: parser
The feed parser used. (Default: :mod:`feedparser`.)
"""
post_field_handlers = {
"content": find_post_content,
"published": date_to_datetime("published_parsed"),
"updated": date_to_datetime("published_parsed"),
"link": lambda feed_obj, entry: entry.get("link") or feed_obj.feed_url,
"feed": lambda feed_obj, entry: feed_obj,
"guid": get_entry_guid,
"title": lambda feed_obj, entry: entry.get("title", "(no title)").strip(),
"author": lambda feed_obj, entry: entry.get("author", "").strip(),
}
def __init__(self, **kwargs):
self.post_limit = kwargs.get("post_limit", settings.MAILME_POST_LIMIT)
self.update_on_import = kwargs.get("update_on_import", True)
self.include_categories = kwargs.get("include_categories", True)
self.include_enclosures = kwargs.get("include_enclosures", True)
self.timeout = kwargs.get("timeout", settings.MAILME_FEED_TIMEOUT)
def parse_feed(self, feed_url, etag=None, modified=None, timeout=None,
maxlen=None):
"""
Parse feed using feedparser.
:param feed_url: URL to the feed to parse.
:keyword etag: E-tag recevied from last parse (if any).
:keyword modified: ``Last-Modified`` HTTP header received from last
parse (if any).
:keyword timeout: Parser timeout in seconds.
"""
self.logger.debug('parsing feed {}'.format(feed_url))
prev_timeout = socket.getdefaulttimeout()
timeout = timeout or self.timeout
socket.setdefaulttimeout(timeout)
try:
if maxlen:
headers = requests.head(feed_url).headers
contentlen = int(headers.get("content-length") or 0)
if contentlen > maxlen:
raise exceptions.FeedCriticalError(FEED_GENERIC_ERROR_TEXT)
feed = feedparser.parse(feed_url,
etag=etag,
modified=modified)
finally:
socket.setdefaulttimeout(prev_timeout)
return feed
def handle(self, feed_url, **kwargs):
"""
Import feed.
If feed is not seen before it will be created, otherwise
just updated.
:param feed_url: URL to the feed to import.
:keyword force: Force import of feed even if it's been updated
recently.
"""
self.logger.debug('import feed {}'.format(feed_url))
force = kwargs.pop('force', None)
local = kwargs.pop('local', False)
feed_url = feed_url.strip()
feed = None
try:
feed_obj = Feed.objects.get(feed_url=feed_url)
except Feed.DoesNotExist:
try:
feed = self.parse_feed(feed_url)
except socket.timeout:
Feed.objects.create(feed_url=feed_url)
raise exceptions.TimeoutError(FEED_TIMEDOUT_ERROR_TEXT)
except Exception:
feed = {"status": 500}
default_status = codes.NOT_FOUND
if local:
default_status = codes.OK
status = feed.get("status", default_status)
if status == codes.NOT_FOUND:
raise exceptions.FeedNotFoundError(str(FEED_NOT_FOUND_ERROR_TEXT), feed_url)
if status not in ACCEPTED_STATUSES:
raise exceptions.FeedCriticalError(
FEED_GENERIC_ERROR_TEXT,
status=status)
# Feed can be local/fetched with a HTTP client.
status = feed.get("status") or feed.get("status\n") or codes.OK
if status == codes.FOUND or status == codes.MOVED_PERMANENTLY:
if feed_url != feed.href:
return self.handle(feed.href, force=force)
feed_title = feed.channel.get("title", "(no title)").strip()
feed_obj = Feed.objects.update_or_create(feed_url=feed_url, **{
'title': feed_title,
'description': feed.channel.get('description', '')
})
if self.include_categories:
feed_obj.categories.add(*self.get_categories(feed.channel))
if self.update_on_import:
feed_obj = self.update(feed_obj, feed=feed, force=force)
return feed_obj
def update(self, feed_obj, **kwargs):
"""
Update (refresh) feed.
The feed must already exist in the system, if not you have
to import it using :meth:`import_feed`.
:param feed_obj: the Feed object
:keyword feed: If feed has already been parsed you can pass the
structure returned by the parser so it doesn't have to be parsed
twice.
:keyword force: Force refresh of the feed even if it has been
recently refreshed already.
"""
feed = kwargs.pop('feed', None)
force = kwargs.pop('force', False)
now = datetime.utcnow().replace(tzinfo=utc)
already_fresh = (feed_obj.date_last_refresh and
now < feed_obj.date_last_refresh +
settings.MAILME_MIN_REFRESH_INTERVAL)
if already_fresh and not force:
self.logger.info(
"Feed %s is fresh. Skipping refresh." % feed_obj.feed_url)
return feed_obj
limit = self.post_limit
if not feed:
last_modified = None
if feed_obj.http_last_modified and not force:
last_modified = feed_obj.http_last_modified.timetuple()
etag = feed_obj.http_etag if not force else None
try:
feed = self.parse_feed(feed_obj.feed_url,
etag=etag,
modified=last_modified)
except socket.timeout:
return feed_obj.save_timeout_error()
except Exception:
return feed_obj.save_generic_error()
# Feed can be local/ not fetched with HTTP client.
status = feed.get("status", codes.OK)
if status == codes.NOT_MODIFIED and not force:
return feed_obj
if feed_obj.is_error_status(status):
return feed_obj.set_error_status(status)
if feed.entries:
sorted_by_date = entries_by_date(feed.entries, limit)
for entry in sorted_by_date:
self.import_entry(entry, feed_obj)
feed_obj.date_last_refresh = now
feed_obj.http_etag = feed.get("etag", "")
if hasattr(feed, "modified") and feed.modified:
try:
timestamp = time.mktime(feed.modified)
modified = datetime.fromtimestamp(timestamp).replace(tzinfo=utc)
feed_obj.http_last_modified = modified
except TypeError:
pass
self.logger.debug(
"Saving feed object... %s" % (feed_obj.feed_url)
)
feed_obj.save()
return feed_obj
def import_opml(self, feed_url):
feed = self.parse_feed(feed_url)
success = []
errors = []
if 'opml' in feed['feed']:
opml = listparser.parse(feed_url)
for item in opml['feeds']:
try:
feed = self.handle(item['url'])
success.append(feed)
except (exceptions.FeedCriticalError, exceptions.TimeoutError) as exc:
errors.append((feed_url, exc))
else:
try:
feed = self.handle(feed_url)
success.append(feed)
except (exceptions.FeedCriticalError, exceptions.TimeoutError) as exc:
errors.append((feed_url, exc))
return success, errors
def get_categories(self, obj):
"""Get and save categories."""
categories = []
for category in getattr(obj, 'categories', []):
categories.append(self.create_category(*category))
return categories
def create_category(self, domain, title):
"""
Create new category.
:param domain: The category domain.
:param title: The title of the category.
"""
return Category.objects.update_or_create(
title=title.strip(),
domain=domain and domain.strip() or ""
)
def create_enclosure(self, **kwargs):
"""Create new enclosure."""
kwargs.setdefault("length", 0)
return Enclosure.objects.update_or_create(**kwargs)
def get_enclosures(self, entry):
"""Get and create enclosures for feed."""
enclosures = []
for enclosure in getattr(entry, 'enclosures', []):
href = getattr(enclosure, 'href', None)
type = getattr(enclosure, 'type', None)
if href is None or type is None:
# Example feed with fully empty enclosures: http://blog.xfce.org/feed/
continue
if enclosure and hasattr(enclosure, 'length'):
try:
# Some feeds an empty length instead of an explicit 0
# to determine that they are not capable of determining
# the lengh.
# Spec: When an enclosure's size cannot be determined,
# a publisher should use a length of 0.
length = int(enclosure.length)
except ValueError:
length = 0
enclosures.append(self.create_enclosure(
url=enclosure.href,
length=length,
type=enclosure.type
))
return enclosures
def post_fields_parsed(self, entry, feed_obj):
"""Parse post fields."""
return dict(
(key, handler(feed_obj, entry))
for key, handler in list(self.post_field_handlers.items())
)
def import_entry(self, entry, feed_obj):
"""Import feed post entry."""
self.logger.debug("Importing entry... %s" % feed_obj.feed_url)
fields = self.post_fields_parsed(entry, feed_obj)
post = Post.objects.update_or_create(feed_obj, **fields)
if self.include_enclosures:
post.enclosures.add(*(self.get_enclosures(entry) or []))
if self.include_categories:
post.categories.add(*(self.get_categories(entry) or []))
self.logger.debug("Post successfully imported... %s" % (
feed_obj.feed_url))
return post
|
{
"content_hash": "811ec7a0a4a36965fef9b03c12e08b4c",
"timestamp": "",
"source": "github",
"line_count": 450,
"max_line_length": 92,
"avg_line_length": 33.58444444444444,
"alnum_prop": 0.581155296764375,
"repo_name": "mailme/mailme.io",
"id": "c440dc6707694b23685cd5e1557124fc746df934",
"size": "15137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/mailme/services/feed/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "39916"
},
{
"name": "JavaScript",
"bytes": "116"
},
{
"name": "Python",
"bytes": "161761"
},
{
"name": "Ruby",
"bytes": "1212"
},
{
"name": "Shell",
"bytes": "5106"
}
],
"symlink_target": ""
}
|
"""
Cascade Project deletion to Release
Revision ID: 29d87a24d79e
Revises: c0682028c857
Create Date: 2018-03-09 22:37:21.343619
"""
from alembic import op
revision = "29d87a24d79e"
down_revision = "c0682028c857"
def upgrade():
op.drop_constraint("releases_name_fkey", "releases", type_="foreignkey")
op.create_foreign_key(
"releases_name_fkey",
"releases",
"packages",
["name"],
["name"],
onupdate="CASCADE",
ondelete="CASCADE",
)
def downgrade():
op.drop_constraint("releases_name_fkey", "releases", type_="foreignkey")
op.create_foreign_key(
"releases_name_fkey",
"releases",
"packages",
["name"],
["name"],
onupdate="CASCADE",
)
|
{
"content_hash": "845d15a99499b4fc58e94f503f30c9f4",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 76,
"avg_line_length": 20.210526315789473,
"alnum_prop": 0.59375,
"repo_name": "dstufft/warehouse",
"id": "462d34df8f2c99f09b08a26720094cdf62b55d61",
"size": "1308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "warehouse/migrations/versions/29d87a24d79e_cascade_project_deletion_to_release.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "182835"
},
{
"name": "Dockerfile",
"bytes": "7026"
},
{
"name": "HTML",
"bytes": "287733"
},
{
"name": "JavaScript",
"bytes": "59981"
},
{
"name": "Makefile",
"bytes": "6026"
},
{
"name": "Mako",
"bytes": "1505"
},
{
"name": "Python",
"bytes": "1545647"
},
{
"name": "Shell",
"bytes": "2326"
}
],
"symlink_target": ""
}
|
import multiprocessing, logging # Fix atexit bug
from setuptools import setup, find_packages
def readme():
try:
return open('README.rst').read()
except:
pass
return ''
setup(
name='s3peat',
# Make sure to also update the version in s3peat/__init__.py
version='1.0.0',
author="Jacob Alheid",
author_email="[email protected]",
description="Fast uploader to S3",
long_description=readme(),
url='http://github.com/shakefu/s3peat',
packages=find_packages(exclude=['test']),
install_requires=[
'boto',
'pytool',
],
entry_points={
'console_scripts': {
"s3peat = s3peat.scripts:Main.console_script",
},
},
# test_suite='nose.collector',
# tests_require=[
# 'nose',
# 'mock',
# ],
)
|
{
"content_hash": "97ad8372fd1423cbc5a56cbae476be02",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 68,
"avg_line_length": 24.789473684210527,
"alnum_prop": 0.5010615711252654,
"repo_name": "shakefu/s3peat",
"id": "a0a52e86efabe52972ab200441862534e13e66bc",
"size": "942",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19004"
}
],
"symlink_target": ""
}
|
import unittest
import numpy as np
import paddle
from paddle.fluid import core
from paddle.fluid.contrib.sparsity.asp import ASPHelper
class MyLayer(paddle.nn.Layer):
def __init__(self):
super().__init__()
self.conv1 = paddle.nn.Conv2D(
in_channels=3, out_channels=2, kernel_size=3, padding=2
)
self.linear1 = paddle.nn.Linear(1352, 32)
self.linear2 = paddle.nn.Linear(32, 10)
def forward(self, img):
hidden = self.conv1(img)
hidden = paddle.flatten(hidden, start_axis=1)
hidden = self.linear1(hidden)
prediction = self.linear2(hidden)
return prediction
class TestASPDynamicPruningBase(unittest.TestCase):
def setUp(self):
self.layer = MyLayer()
place = paddle.CPUPlace()
if core.is_compiled_with_cuda():
place = paddle.CUDAPlace(0)
self.img = paddle.to_tensor(
np.random.uniform(low=-0.5, high=0.5, size=(32, 3, 24, 24)),
dtype=np.float32,
place=place,
stop_gradient=False,
)
self.set_config()
def set_config(self):
self.mask_gen_func = 'mask_1d'
self.mask_check_func = (
paddle.fluid.contrib.sparsity.CheckMethod.CHECK_1D
)
def test_inference_pruning(self):
self.__pruning_and_checking(False)
def test_training_pruning(self):
optimizer = paddle.optimizer.SGD(
learning_rate=0.01, parameters=self.layer.parameters()
)
optimizer = paddle.incubate.asp.decorate(optimizer)
self.__pruning_and_checking(True)
def __pruning_and_checking(self, with_mask):
paddle.incubate.asp.prune_model(
self.layer, mask_algo=self.mask_gen_func, with_mask=with_mask
)
for param in self.layer.parameters():
if ASPHelper._is_supported_layer(
paddle.static.default_main_program(), param.name
):
mat = param.numpy()
if (len(param.shape) == 4 and param.shape[1] < 4) or (
len(param.shape) == 2 and param.shape[0] < 4
):
self.assertFalse(
paddle.fluid.contrib.sparsity.check_sparsity(
mat.T, n=2, m=4
)
)
else:
self.assertTrue(
paddle.fluid.contrib.sparsity.check_sparsity(
mat.T, func_name=self.mask_check_func, n=2, m=4
)
)
class TestASPDynamicPruning1D(TestASPDynamicPruningBase):
def set_config(self):
self.mask_gen_func = 'mask_1d'
self.mask_check_func = (
paddle.fluid.contrib.sparsity.CheckMethod.CHECK_1D
)
class TestASPDynamicPruning2DBest(TestASPDynamicPruningBase):
def set_config(self):
self.mask_gen_func = 'mask_2d_best'
self.mask_check_func = (
paddle.fluid.contrib.sparsity.CheckMethod.CHECK_2D
)
class TestASPDynamicPruning2DGreedy(TestASPDynamicPruningBase):
def set_config(self):
self.mask_gen_func = 'mask_2d_greedy'
self.mask_check_func = (
paddle.fluid.contrib.sparsity.CheckMethod.CHECK_2D
)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "cadb18f2cc98ff6c57e60dc8686fed8b",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 75,
"avg_line_length": 29.728070175438596,
"alnum_prop": 0.5644732959575096,
"repo_name": "PaddlePaddle/Paddle",
"id": "13282e0222c87fb1be89485908a6820b11d38db1",
"size": "4063",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/asp/test_asp_pruning_dynamic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36848680"
},
{
"name": "CMake",
"bytes": "902619"
},
{
"name": "Cuda",
"bytes": "5227207"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36203874"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553177"
}
],
"symlink_target": ""
}
|
import datetime
import iso8601
from django.template.defaultfilters import register
from django.template.defaultfilters import timesince
from django.utils.safestring import mark_safe
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
@register.filter
def replace_underscores(string):
return string.replace("_", " ")
@register.filter
def parse_isotime(timestr, default=None):
# This duplicates oslo timeutils parse_isotime but with a
# @register.filter annotation and a silent fallback on error.
try:
return iso8601.parse_date(timestr)
except (iso8601.ParseError, TypeError):
return default or ''
@register.filter
def timesince_or_never(dt, default=None):
"""Call the Django ``timesince`` filter or a given default string.
It returns the string *default* if *dt* is not a valid ``date``
or ``datetime`` object.
When *default* is None, "Never" is returned.
"""
if default is None:
default = _("Never")
if isinstance(dt, datetime.date):
return timesince(dt)
return default
@register.filter
def timesince_sortable(dt):
delta = timezone.now() - dt
# timedelta.total_seconds() not supported on python < 2.7
seconds = delta.seconds + (delta.days * 24 * 3600)
return mark_safe("<span data-seconds=\"%d\">%s</span>" %
(seconds, timesince(dt)))
|
{
"content_hash": "fcc76d114d83c8a1724d25109f3e2f7f",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 70,
"avg_line_length": 28.693877551020407,
"alnum_prop": 0.689900426742532,
"repo_name": "ChameleonCloud/horizon",
"id": "3fcb29c19ae5ffa25cf3bab852180ec97419a700",
"size": "2011",
"binary": false,
"copies": "1",
"ref": "refs/heads/chameleoncloud/xena",
"path": "horizon/utils/filters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "601681"
},
{
"name": "JavaScript",
"bytes": "2486133"
},
{
"name": "Python",
"bytes": "5346021"
},
{
"name": "SCSS",
"bytes": "129668"
},
{
"name": "Shell",
"bytes": "7466"
}
],
"symlink_target": ""
}
|
import os
from tests.base_case import ChatBotTestCase
from chatterbot.trainers import ChatterBotCorpusTrainer
class ChatterBotCorpusTrainingTestCase(ChatBotTestCase):
def setUp(self):
super(ChatterBotCorpusTrainingTestCase, self).setUp()
self.chatbot.set_trainer(ChatterBotCorpusTrainer)
def test_train_with_english_greeting_corpus(self):
self.chatbot.train('chatterbot.corpus.english.greetings')
statement = self.chatbot.storage.find('Hello')
self.assertIsNotNone(statement)
def test_train_with_multiple_corpora(self):
self.chatbot.train(
'chatterbot.corpus.english.greetings',
'chatterbot.corpus.english.conversations',
)
statement = self.chatbot.storage.find('Hello')
self.assertIsNotNone(statement)
def test_train_with_english_corpus(self):
self.chatbot.train('chatterbot.corpus.english')
statement = self.chatbot.storage.find('Hello')
self.assertIsNotNone(statement)
class ChatterBotCorpusFilePathTestCase(ChatBotTestCase):
def setUp(self):
super(ChatterBotCorpusFilePathTestCase, self).setUp()
from chatterbot_corpus import corpus
self.chatbot.set_trainer(ChatterBotCorpusTrainer)
corpus_data_directory = os.path.dirname(corpus.__file__)
self.corpus_directory = os.path.join(corpus_data_directory, 'data')
def test_train_with_english_greeting_corpus(self):
file_path = os.path.join(self.corpus_directory, 'english', 'greetings.corpus.json')
self.chatbot.train(file_path)
statement = self.chatbot.storage.find('Hello')
self.assertIsNotNone(statement)
def test_train_with_multiple_corpora(self):
self.chatbot.train(
os.path.join(self.corpus_directory, 'english', 'greetings.corpus.json'),
os.path.join(self.corpus_directory, 'english', 'conversations.corpus.json')
)
statement = self.chatbot.storage.find('Hello')
self.assertIsNotNone(statement)
def test_train_with_english_corpus(self):
file_path = os.path.join(self.corpus_directory, 'english')
self.chatbot.train(file_path)
statement = self.chatbot.storage.find('Hello')
self.assertIsNotNone(statement)
def test_train_with_english_corpus_training_slash(self):
file_path = os.path.join(self.corpus_directory, 'english') + '/'
self.chatbot.train(file_path)
statement = self.chatbot.storage.find('Hello')
self.assertIsNotNone(statement)
|
{
"content_hash": "ec40f622b22bdd892f2a017b6316aa0c",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 91,
"avg_line_length": 35.55555555555556,
"alnum_prop": 0.690234375,
"repo_name": "Gustavo6046/ChatterBot",
"id": "5e2d08f4b32c024a5d283d96f2f777d260cdd53d",
"size": "2560",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/training_tests/test_chatterbot_corpus_training.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "345611"
}
],
"symlink_target": ""
}
|
"""
Created on Tue Apr 09 09:45:06 2013
@author: Juergen Hasch, [email protected]
Distributed under the terms of the BSD License.
"""
"""
Tornado websocket server
Store cell text under unique id in dict
*This is a proof of concept only*
TODO:
- multiple connects
- persistent storage
"""
import json
import tornado.web
import tornado.websocket
import tornado.ioloop
pushaddress = "tcp://127.0.0.1:5555"
webport = 8889 # port address for web client
GLOBALS={
'sockets': []
}
HISTORY= {}
POSITION = {}
class WebSocketHandler(tornado.websocket.WebSocketHandler):
def open(self):
GLOBALS['sockets'].append(self)
def on_close(self):
GLOBALS['sockets'].remove(self)
def on_message(self, message):
# print 'message received %s' % message
x=json.loads(message)
id = x['id']
if ('action' in x) and (id in POSITION):
if x['action'] == 'forward':
idx = POSITION[id]
imax = len(HISTORY[id])
if idx < (imax-1):
idx += 1
POSITION[id] = idx
reply_str = HISTORY[id][idx]
reply = {"text": reply_str, "id" : id, "idx" : idx, "imax": imax }
self.write_message(json.dumps(reply))
elif x['action'] == 'back':
idx = POSITION[id]
imax = len(HISTORY[id])
if idx > 0:
idx -= 1
POSITION[id] = idx
reply_str = HISTORY[id][idx]
reply = {"text": reply_str, "id" : id, "idx" : idx, "imax": imax }
self.write_message(json.dumps(reply))
elif x['action'] == 'latest':
if id in HISTORY:
imax = len(HISTORY[id])
idx = imax-1
POSITION[id] = idx
reply_str = HISTORY[id][idx]
reply = {"text": reply_str, "id" : id, "idx" : idx, "imax": imax }
self.write_message(json.dumps(reply))
if 'text' in x:
# push in list
if id in HISTORY:
HISTORY[id].append(x['text'])
POSITION[id] = len(HISTORY[id])-1
else:
HISTORY[id] = [x['text']]
POSITION[id] = 0
application = tornado.web.Application([
(r"/websocket", WebSocketHandler),
])
if __name__ == "__main__":
application.listen(webport)
main_loop = tornado.ioloop.IOLoop.instance()
main_loop.start()
|
{
"content_hash": "bebce9082058122eb8f315756efd11f3",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 86,
"avg_line_length": 28.630434782608695,
"alnum_prop": 0.49316628701594534,
"repo_name": "andyneff/IPython-notebook-extensions",
"id": "e9c828f7b41d4674c864e81d6ac207f709ae9ce3",
"size": "2658",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nbextensions/testing/history/ipy_nb_history_websocket.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "38375"
},
{
"name": "HTML",
"bytes": "1134905"
},
{
"name": "JavaScript",
"bytes": "503232"
},
{
"name": "Jupyter Notebook",
"bytes": "296013"
},
{
"name": "Python",
"bytes": "65298"
},
{
"name": "Shell",
"bytes": "3154"
},
{
"name": "Smarty",
"bytes": "8096"
},
{
"name": "TeX",
"bytes": "104015"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import platform
extra = True
if platform.system() == 'Linux':
libraries = ["maxwellsdk"]
extra_compile_args=['-D_LINUX']
extra_link_args=['-Llib']
elif platform.system() == 'Darwin': # OS x
libraries=["mxs"]
extra_compile_args=['-D_MACOSX','-D_LINUX','-DCOMPILER_GCC']
extra_link_args=['-Llib/','-framework CoreGraphics']
else:
libraries=["maxwell_plugins"]
extra_compile_args=['/Zp8']
extra_link_args=['/LIBPATH:Lib']
extra = False
if extra:
extra_compile_args.append('-g')
extra_compile_args.append('-ggdb')
extra_link_args.append('-g')
extra_link_args.append('-ggdb')
setup(
name = "Maxwell",
cmdclass = {"build_ext": build_ext},
ext_modules = [Extension(
"maxwell", # name of extension
["maxwell.pyx"], # filename of our Pyrex/Cython source
language="c++", # this causes Pyrex/Cython to create C++ source
include_dirs=["h"], # usual stuff
libraries=libraries, # ditto
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
embedsignature=True
)]
)
|
{
"content_hash": "4d89819eb8b674014386482ae58c7a8c",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 80,
"avg_line_length": 30.75609756097561,
"alnum_prop": 0.6209357652656622,
"repo_name": "martijnberger/cython-maxwell",
"id": "b4f06bd92af443916671738560a2130e954a9a93",
"size": "1286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "81935"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.utils.translation import ugettext as _
from django.core.validators import RegexValidator
from sanitizer.models import SanitizedCharField
from config.settings import SANITIZER_ALLOWED_TAGS, SANITIZER_ALLOWED_ATTRIBUTES
from apps.account.models import Account
class Page(models.Model):
team = models.ForeignKey("team.Team", related_name="pages")
link = models.SlugField(unique=True)
name = models.CharField(max_length=1024)
content = SanitizedCharField(
max_length=50000, allowed_tags=SANITIZER_ALLOWED_TAGS,
allowed_attributes=SANITIZER_ALLOWED_ATTRIBUTES, strip=False
)
order = models.IntegerField(blank=True, null=True)
# meta
created_by = models.ForeignKey(Account, related_name="pages_created")
created_on = models.DateTimeField(auto_now_add=True)
updated_by = models.ForeignKey(Account, related_name="pages_updated")
updated_on = models.DateTimeField(auto_now=True)
def __unicode__(self):
return u"%s: %s" % (self.team.name, self.name)
class Meta:
unique_together = (("team", "name"), ("team", "link"))
ordering = ["order", "name"]
|
{
"content_hash": "24cc8ab409440805957cdf0120127ec0",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 112,
"avg_line_length": 41.74285714285714,
"alnum_prop": 0.5742642026009582,
"repo_name": "F483/bikesurf.org",
"id": "098afaf2bb5f570bc449f79bd3372b1e2bc3af33",
"size": "1607",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "apps/page/models.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1950764"
},
{
"name": "HTML",
"bytes": "6079063"
},
{
"name": "JavaScript",
"bytes": "284942"
},
{
"name": "Makefile",
"bytes": "4210"
},
{
"name": "Python",
"bytes": "215658"
},
{
"name": "Ruby",
"bytes": "4418"
},
{
"name": "Shell",
"bytes": "553"
}
],
"symlink_target": ""
}
|
from django.utils.translation import ugettext as _
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from models import CheckoutPlugin, ProductPlugin
class CheckoutPlugin(CMSPluginBase):
model = CheckoutPlugin
module = _('stripe shop')
name = _("Checkout")
render_template = "aldryn_stripe_shop/checkout.html"
def render(self, context, instance, placeholder):
stripe = instance.stripe
context.update({
'instance': instance,
'placeholder': placeholder,
'stripe': stripe,
})
return context
class ProductPlugin(CMSPluginBase):
model = ProductPlugin
module = _('stripe shop')
name = _("Product")
render_template = "aldryn_stripe_shop/product.html"
allow_children = True
def render(self, context, instance, placeholder):
products = instance.product
context.update({
'instance': instance,
'placeholder': placeholder,
'product': products,
})
return context
plugin_pool.register_plugin(CheckoutPlugin)
plugin_pool.register_plugin(ProductPlugin)
|
{
"content_hash": "6e0c7b7ec485e63076b5213622b416a2",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 56,
"avg_line_length": 28.21951219512195,
"alnum_prop": 0.6568712186689715,
"repo_name": "Lionardo/aldryn-stripe-shop",
"id": "1528f38d1748ebee62cd98e9d67847785f6bf30c",
"size": "1157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aldryn_stripe_shop/cms_plugins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3856"
},
{
"name": "JavaScript",
"bytes": "26916"
},
{
"name": "Python",
"bytes": "22323"
}
],
"symlink_target": ""
}
|
""" Testing array coords
"""
import numpy as np
from nose.tools import assert_true, assert_false, \
assert_equal, assert_raises
from numpy.testing import assert_array_equal, assert_array_almost_equal
from nipy.testing import parametric
from nipy.core.api import Affine
import nipy.core.reference.array_coords as acs
@parametric
def test_array_coord_map():
# array coord map recreates the affine when you slice an image. In
# general, if you take an integer slice in some dimension, the
# corresponding column of the affine will go, leaving a row for the
# lost dimension, with all zeros, execpt for the translation in the
# now-removed dimension, encoding the position of that particular
# slice
xz = 1.1; yz = 2.3; zz = 3.5
xt = 10.0; yt = 11; zt = 12
aff = np.diag([xz, yz, zz, 1])
aff[:3,3] = [xt, yt, zt]
shape = (2,3,4)
cmap = Affine.from_params('ijk', 'xyz', aff)
acm = acs.ArrayCoordMap(cmap, shape)
# slice the coordinate map for the first axis
sacm = acm[1]
# The affine has lost the first column, but has a remaining row (the
# first) encoding the translation to get to this slice
yield assert_array_almost_equal(sacm.coordmap.affine,
np.array([
[0, 0, xz+xt],
[yz, 0, yt],
[0, zz, zt],
[0, 0, 1]]))
sacm = acm[:,1]
# lost second column, remaining second row with translation
yield assert_array_almost_equal(sacm.coordmap.affine,
np.array([
[xz, 0, xt],
[0, 0, yz+yt],
[0, zz, zt],
[0, 0, 1]]))
sacm = acm[:,:,2]
# ditto third column and row
yield assert_array_almost_equal(sacm.coordmap.affine,
np.array([
[xz, 0, xt],
[0, yz, yt],
[0, 0, 2*zz+zt],
[0, 0, 1]]))
# check ellipsis slicing is the same as [:,: ...
sacm = acm[...,2]
yield assert_array_almost_equal(sacm.coordmap.affine,
np.array([
[xz, 0, xt],
[0, yz, yt],
[0, 0, 2*zz+zt],
[0, 0, 1]]))
# that ellipsis can follow other slice types
sacm = acm[:,...,2]
yield assert_array_almost_equal(sacm.coordmap.affine,
np.array([
[xz, 0, xt],
[0, yz, yt],
[0, 0, 2*zz+zt],
[0, 0, 1]]))
# that there can be only one ellipsis
yield assert_raises(ValueError, acm.__getitem__, (
(Ellipsis,Ellipsis,2)))
# that you can integer slice in all three dimensions, leaving only
# the translation column
sacm = acm[1,0,2]
yield assert_array_almost_equal(sacm.coordmap.affine,
np.array([
[xz+xt],
[yt],
[2*zz+zt],
[1]]))
# that anything other than an int, slice or Ellipsis is an error
yield assert_raises(ValueError, acm.__getitem__, ([0,2],))
yield assert_raises(ValueError, acm.__getitem__, (np.array([0,2]),))
|
{
"content_hash": "265e116b64c31b616309f128aa5f9e12",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 72,
"avg_line_length": 35.24175824175824,
"alnum_prop": 0.5291549734954787,
"repo_name": "yarikoptic/NiPy-OLD",
"id": "4fffac8a7fc985122f669c2a84f3f240e8faaaa1",
"size": "3207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nipy/core/reference/tests/test_array_coords.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4411801"
},
{
"name": "Objective-C",
"bytes": "4262"
},
{
"name": "Python",
"bytes": "2617786"
}
],
"symlink_target": ""
}
|
import base64
import io
import json
import os
import subprocess
import sys
BASEDIR = os.path.expanduser('~/git/aur-pkgbuilds')
def commitaur(msg):
with open('.SRCINFO', 'wb') as fh:
fh.write(subprocess.check_output(['makepkg', '--printsrcinfo']))
subprocess.check_call(['git', 'add', '.'])
subprocess.check_call(['git', 'commit', '-asm', msg])
subprocess.check_call(['git', 'push', '-u', 'origin', 'master'])
data = base64.b64decode(sys.stdin.read().encode('utf-8'))
data = json.loads(data.decode('utf-8'))
if data['_api'] != 2:
print("API version does not match")
msg = data['project'] + ' v' + data['version']
sys.stderr.write("[host] Updating AUR packages...\n")
sys.stderr.flush()
os.chdir(BASEDIR)
os.chdir(data['aur_pkgname'])
with io.open('PKGBUILD', 'w', encoding='utf-8') as fh:
fh.write(data['pkgbuild'])
commitaur(msg)
os.chdir(BASEDIR)
if data['use_git']:
os.chdir(data['aur_pkgname_git'])
subprocess.check_call(["sed", "s/pkgver=.*/pkgver=" + data['gitver'] + "/", "PKGBUILD", "-i"])
commitaur(msg)
os.chdir(BASEDIR)
subprocess.check_call(['./UPDATE-REQUIREMENTS.py'])
subprocess.check_call(['git', 'commit', '-asm', msg])
subprocess.check_call(['git', 'push'])
sys.stderr.write("[host] Done!\n")
sys.stderr.flush()
|
{
"content_hash": "89079639685949ee1ae2cd73d769998d",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 98,
"avg_line_length": 28.57777777777778,
"alnum_prop": 0.6469673405909798,
"repo_name": "Kwpolska/cheqlist",
"id": "607f20b09f4d87c27029c8ef0947cb6971bc2ceb",
"size": "2988",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": ".pypt/aurvm_host.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "88070"
},
{
"name": "Shell",
"bytes": "14402"
}
],
"symlink_target": ""
}
|
'''Train the model.'''
|
{
"content_hash": "85a5bf9e9cd4bbb6a515e0de27009183",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 22,
"avg_line_length": 23,
"alnum_prop": 0.5652173913043478,
"repo_name": "cavestruz/StrongCNN",
"id": "c0e2a3a25b2f60581b5e06a5a9b7b89f426abf70",
"size": "23",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/training/train_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "7822"
},
{
"name": "Python",
"bytes": "94637"
},
{
"name": "Shell",
"bytes": "4089"
}
],
"symlink_target": ""
}
|
""" a text adventure game
"""
import sys
from random import randint, random
from random import choice as randchoice
from collections import defaultdict
from copy import copy
from utils import Loop, Container, TextInput, first, sjoin, nl, space
from board import StackableBoard, Loc, BaseTile
commands = dict(a="left", s="back", w="forward", d="right", p="pickup", i="inventory", m="map", l="look")
roomchance = Container(door=0.8, shaky_floor=0.1)
itemchance = Container(Gem=0.1, Key=0.05, Gold=0.25, Anvil=0.01)
itemchance = Container(Gem=0.1, Key=0.1, Gold=0.1, Anvil=0.2)
crystalchance = 0.1
size = 15
lhoriz = '─'
lvertical = '│'
doorchar = '⌺'
roomchar = '▢'
player_char = '☺'
absdirs = range(4)
class Item(BaseTile):
gem = key = gold = anvil = crystal = False
def __init__(self):
super().__init__(self)
self.name = self.__class__.__name__
def __eq__(self, other):
return self.__class__ == other.__class__
def __str__(self):
if self.gold : return "a piece of gold"
else : return a_an(self.name)
def __hash__(self):
return hash(self.name)
class Gem(Item) : pass
class Key(Item) : pass
class Gold(Item) : pass
class Anvil(Item) : pass
class Crystal(Item) : pass
class DirLoop(Loop):
def cw(self, n=1) : return super(DirLoop, self).next(n)
def ccw(self, n=1) : return super(DirLoop, self).prev(n)
def rotate_cw(self, n=1):
for _ in range(n):
self.items.append(self.items.pop(0))
self.update_attr()
class AdvBoard(StackableBoard):
def nlocs(self, loc):
x, y = loc
locs = ((x, y-1), (x+1, y), (x, y+1), (x-1, y))
locs = [Loc(*tup) for tup in locs]
return [(loc if self.valid(loc) else None) for loc in locs]
def center(self):
return Loc(self.width // 2, self.height // 2)
class Room(object):
def __init__(self, loc):
self.loc = loc
self.doors = list(self.gendoors())
self.item = genitem()
self.shaky_floor = bool(random() < roomchance.shaky_floor)
board[loc] = self
def __str__(self):
return roomchar
def gendoors(self):
inverse_dirs = (2,3,0,1)
for rd, nd, nloc in zip(absdirs, inverse_dirs, board.nlocs(self.loc)):
if not nloc:
yield False
else:
room = board.get_instance(Room, nloc)
yield bool( (random()<roomchance.door or room and room.doors[nd]) )
def show_doors(self, doors):
d = "%s"
h, v = lhoriz, lvertical
walls = ''.join([h*13, nl, v, space, d, space, v, space, d, space, v, space, d, space, v, nl])
return walls % tuple((doorchar if d else space) for d in doors)
class PlayerDir(object):
dir = DirLoop(absdirs, name="dir")
bearings = "North East South West".split()
def __init__(self, player):
self.player = player
self.update()
self.update_doors()
def update(self, dirnum=0):
self.dir.cw(dirnum)
self.absdir = DirLoop(board.dirlist).cw(self.dir.dir)
def update_doors(self):
self.doors = DirLoop(copy(self.player.room.doors))
self.doors.rotate_cw(self.dir.dir)
self.viewdoors = [self.doors[d] for d in (3,0,1)]
descdoors = ["on the left", "in front", "on the right"]
self.descdoors = [d[1] for d in zip(self.viewdoors, descdoors) if d[0]]
def bearing(self):
return "Bearing: " + self.bearings[self.dir.dir]
class Msg(object):
bump_wall = "You bump into the wall."
item_tpl = "You see %s lying on the floor."
pickedup = "You pick up %s."
shfloor = "This room appears to have a shaky floor."
ent_room = "You enter a room."
fall_through = "The floor can no longer hold your weight and starts breaking up into pieces; " \
"you fall down through the floor."
win = "You have found the goal of your journey: the Crystal of Light and Darkness! " \
"You activate its teleportation ability and it brings you back home, safe and sound." \
"\n\nThe End."
class Player(object):
items = defaultdict(int)
invtpl = "%20s %4d"
def __init__(self, room):
self.room = room
self.loc = room.loc
board[self.loc] = self
self.dir = PlayerDir(self)
def __str__(self):
return player_char
def move(self, ndir):
if not self.dir.doors[ndir]:
print(Msg.bump_wall)
return
M = [Msg.ent_room] # messages for the player
self.dir.update(ndir)
newloc = board.nextloc(self, self.dir.absdir)
self.room = Room(newloc) if board.empty(newloc) else board[newloc]
board.move(self, newloc)
self.dir.update_doors()
self.roomview(M)
if self.room.shaky_floor and any(i.anvil for i in self.items):
self.next_level(M)
print(nl.join(M))
def next_level(self, messages):
messages.append(Msg.fall_through)
board.reset()
self.room = Room(board.center())
self.loc = self.room.loc
board[self.loc] = self
itemchance["Crystal"] = crystalchance
def forward(self) : self.move(0)
def right(self) : self.move(1)
def back(self) : self.move(2)
def left(self) : self.move(3)
def pickup(self):
item = self.room.item
if item:
self.items[item] += 1
print(Msg.pickedup % item)
self.room.item = None
if item.crystal:
print(Msg.win)
sys.exit()
def inventory(self):
for item in self.items.items():
print(self.invtpl % item)
def roomview(self, M=None):
M = M or []
room = self.room
M[:] = [nl*5, room.show_doors(self.dir.viewdoors) + nl] + M + [self.dir.bearing()]
if room.item : M.append(Msg.item_tpl % room.item)
if room.shaky_floor : M.append(Msg.shfloor)
self.doormsg(M)
return M
def look(self):
print(nl*5)
print(nl.join(self.roomview()))
def doormsg(self, messages):
descdoors = copy(self.dir.descdoors)
if descdoors:
msg = "You see a door "
end = " of you."
_and = " and "
if len(descdoors) == 1:
msg += first(descdoors) + end
elif len(descdoors) == 2:
msg += sjoin(descdoors, _and) + end
else:
last = descdoors.pop()
msg += sjoin(descdoors, ", ") + _and + last + end
messages.append(msg)
def map(self):
board.draw()
class Adv(object):
pass
class BasicInterface(object):
def run(self):
player.look()
while True:
cmd = TextInput("(a|s|w|d|p|i|m|l)").getval()
getattr(player, commands[cmd])()
def genitem():
for name, chance in itemchance.items():
if chance >= random():
return globals()[name]()
def a_an(item):
return "an " + item if item.startswith('A') else "a " + item
if __name__ == "__main__":
board = AdvBoard(size, space, screen_sep=0)
room = Room(board.center())
player = Player(room)
BasicInterface().run()
|
{
"content_hash": "ed8bb4a83a89a07adcb84542c67ee085",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 110,
"avg_line_length": 28.570342205323193,
"alnum_prop": 0.5489752462070802,
"repo_name": "pythonbyexample/PBE",
"id": "9cf496264282c2f234d25d1d46734ac2afa85a97",
"size": "7574",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chapter01/adv.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "251897"
},
{
"name": "CSS",
"bytes": "90956"
},
{
"name": "JavaScript",
"bytes": "85995"
},
{
"name": "Python",
"bytes": "1255167"
},
{
"name": "Shell",
"bytes": "82"
},
{
"name": "VimL",
"bytes": "46347"
}
],
"symlink_target": ""
}
|
"""
Created on Tue Feb 16 13:41:19 2016
Simple Lofted Wing between airconics primitive classes using the AddSurfaceLoft
airconics tool
@author: pchambers
"""
if __name__ == "__main__":
import airconics
import airconics.AirCONICStools as act
# Initialise the display
from OCC.Display.SimpleGui import init_display
display, start_display, add_menu, add_function_to_menu = init_display()
# Add NACA 4 digit airfoils to loft between:
Af2 = airconics.primitives.Airfoil([0., 0., 0.], ChordLength=3.,
Naca4Profile='2412')
display.DisplayShape(Af2.Curve, update=True, color='GREEN')
Af3 = airconics.primitives.Airfoil([0., 5., 0.], ChordLength=1.,
Naca4Profile='0012')
display.DisplayShape(Af3.Curve, update=True, color='GREEN')
Af4 = airconics.primitives.Airfoil([0., 6., 0.2], ChordLength=0.2,
Naca4Profile='0012')
display.DisplayShape(Af4.Curve, update=True, color='GREEN')
surf = act.AddSurfaceLoft([Af2, Af3, Af4])
# Note that surf is a TOPO_DS Shape, and hence no surf.Shape is required for display
display.DisplayShape(surf, update=True)
start_display()
|
{
"content_hash": "8880c15e13184cdb97059c1dd10212a5",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 88,
"avg_line_length": 36.55882352941177,
"alnum_prop": 0.6395816572807723,
"repo_name": "p-chambers/occ_airconics",
"id": "a61b5a37775a1fe70f85618e2ab890f9053a8583",
"size": "1267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/core/simple_wing_loft.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "236"
},
{
"name": "Groovy",
"bytes": "308"
},
{
"name": "Python",
"bytes": "250769"
},
{
"name": "Shell",
"bytes": "4599"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('campaign', '0004_newsletter_from_email'),
('fluentcms_emailtemplates', '__first__'),
]
operations = [
migrations.AlterField(
model_name='campaign',
name='template',
field=models.ForeignKey(verbose_name='Template', to='fluentcms_emailtemplates.EmailTemplate'),
preserve_default=True,
),
migrations.DeleteModel(
name='MailTemplate',
),
]
|
{
"content_hash": "861c15206ed8c3d66c86ff1a1808924a",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 106,
"avg_line_length": 26.26086956521739,
"alnum_prop": 0.5993377483443708,
"repo_name": "bashu/fluentcms-campaign",
"id": "62505ac3f6103ab145a35a0aa13d73c7a0903e3e",
"size": "628",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "campaign/migrations/0005_auto_20160128_0634.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3428"
},
{
"name": "Python",
"bytes": "42319"
}
],
"symlink_target": ""
}
|
from django.db import models
from individuals.models import Individual
class Variant(models.Model):
individual = models.ForeignKey(Individual, on_delete=models.CASCADE)
index = models.TextField(db_index=True)#ex. 1-2387623-G-T
pos_index = models.TextField(db_index=True)#ex. 1-326754756
#First save all 9 VCF columns
chr = models.TextField(verbose_name="Chr", db_index=True)
pos = models.IntegerField(db_index=True)
variant_id = models.TextField(verbose_name="ID", db_index=True)
ref = models.TextField(null=True, blank=True, db_index=True)
alt = models.TextField(null=True, blank=True, db_index=True)
qual = models.FloatField(db_index=True)
filter = models.TextField(db_index=True)
info = models.TextField(null=True, blank=True)
format = models.TextField(null=True, blank=True, db_index=True)
genotype_col = models.TextField(null=True, blank=True, db_index=True)
genotype = models.TextField(db_index=True)
#metrics from genotype_info DP field
read_depth = models.IntegerField()
gene = models.TextField(null=True, blank=True, db_index=True)
mutation_type = models.TextField(null=True, db_index=True)
vartype = models.TextField(null=True, db_index=True)
#Annotation From 1000genomes
genomes1k_maf = models.FloatField(null=True, blank=True, verbose_name="1000 Genomes Frequency", db_index=True)
dbsnp_maf = models.FloatField(null=True, blank=True, verbose_name="dbSNP Frequency", db_index=True)
esp_maf = models.FloatField(null=True, blank=True, verbose_name="ESP6500 Frequency", db_index=True)
#dbsnp
# dbsnp_pm = models.TextField(null=True, blank=True)
# dbsnp_clnsig = models.TextField(null=True, blank=True)
dbsnp_build = models.IntegerField(null=True, db_index=True)
#VEP
sift = models.FloatField(null=True, blank=True, db_index=True)
sift_pred = models.TextField(null=True, blank=True, db_index=True)
polyphen2 = models.FloatField(null=True, blank=True, db_index=True)
polyphen2_pred = models.TextField(null=True, blank=True, db_index=True)
condel = models.FloatField(null=True, blank=True, db_index=True)
condel_pred = models.TextField(null=True, blank=True, db_index=True)
dann = models.FloatField(null=True, blank=True, db_index=True)
cadd = models.FloatField(null=True, blank=True, db_index=True)
#hi_index
hi_index_str = models.TextField(null=True, blank=True, db_index=True)
hi_index = models.FloatField(null=True, blank=True, db_index=True)
hi_index_perc = models.FloatField(null=True, blank=True, db_index=True)
#OMIM
is_at_omim = models.BooleanField(default=False, db_index=True)
#HGMD
is_at_hgmd = models.BooleanField(default=False, db_index=True)
hgmd_entries = models.TextField(null=True, blank=True, db_index=True)
#snpeff annotation
snpeff_effect = models.TextField(null=True, blank=True, db_index=True)
snpeff_impact = models.TextField(null=True, blank=True, db_index=True)
snpeff_func_class = models.TextField(null=True, blank=True, db_index=True)
snpeff_codon_change = models.TextField(null=True, blank=True, db_index=True)
snpeff_aa_change = models.TextField(null=True, blank=True, db_index=True)
# snpeff_aa_len = models.TextField(null=True, blank=True)
snpeff_gene_name = models.TextField(null=True, blank=True, db_index=True)
snpeff_biotype = models.TextField(null=True, blank=True, db_index=True)
snpeff_gene_coding = models.TextField(null=True, blank=True, db_index=True)
snpeff_transcript_id = models.TextField(null=True, blank=True, db_index=True)
snpeff_exon_rank = models.TextField(null=True, blank=True, db_index=True)
# snpeff_genotype_number = models.TextField(null=True, blank=True)
#vep annotation
vep_allele = models.TextField(null=True, blank=True, db_index=True)
vep_gene = models.TextField(null=True, blank=True, db_index=True)
vep_feature = models.TextField(null=True, blank=True, db_index=True)
vep_feature_type = models.TextField(null=True, blank=True, db_index=True)
vep_consequence = models.TextField(null=True, blank=True, db_index=True)
vep_cdna_position = models.TextField(null=True, blank=True, db_index=True)
vep_cds_position = models.TextField(null=True, blank=True, db_index=True)
vep_protein_position = models.TextField(null=True, blank=True, db_index=True)
vep_amino_acids = models.TextField(null=True, blank=True, db_index=True)
vep_codons = models.TextField(null=True, blank=True, db_index=True)
vep_existing_variation = models.TextField(null=True, blank=True, db_index=True)
vep_distance = models.TextField(null=True, blank=True, db_index=True)
vep_strand = models.TextField(null=True, blank=True, db_index=True)
vep_symbol = models.TextField(null=True, blank=True, db_index=True)
vep_symbol_source = models.TextField(null=True, blank=True, db_index=True)
vep_sift = models.TextField(null=True, blank=True, db_index=True)
vep_polyphen = models.TextField(null=True, blank=True, db_index=True)
vep_condel = models.TextField(null=True, blank=True, db_index=True)
#new annotations
ensembl_clin_HGMD = models.BooleanField(default=False, db_index=True)
ensembl_clin_HGMD = models.BooleanField(default=False, db_index=True)
clinvar_CLNSRC = models.TextField(null=True, blank=True, db_index=True)
# ensembl_phen.CLIN_pathogenic
#ensembl_phen.CLIN_likely_pathogenic
# ensembl_clin.CLIN_pathogenic
#DBNFSP
SIFT_score = models.TextField(null=True, blank=True, db_index=True)
SIFT_converted_rankscore = models.TextField(null=True, blank=True, db_index=True)
# SIFT_pred = models.TextField(null=True, blank=True)
Uniprot_acc_Polyphen2 = models.TextField(null=True, blank=True, db_index=True)
Uniprot_id_Polyphen2 = models.TextField(null=True, blank=True, db_index=True)
Uniprot_aapos_Polyphen2 = models.TextField(null=True, blank=True, db_index=True)
Polyphen2_HDIV_score = models.TextField(null=True, blank=True, db_index=True)
Polyphen2_HDIV_rankscore = models.TextField(null=True, blank=True, db_index=True)
Polyphen2_HDIV_pred = models.TextField(null=True, blank=True, db_index=True)
Polyphen2_HVAR_score = models.TextField(null=True, blank=True, db_index=True)
Polyphen2_HVAR_rankscore = models.TextField(null=True, blank=True, db_index=True)
Polyphen2_HVAR_pred = models.TextField(null=True, blank=True, db_index=True)
LRT_score = models.TextField(null=True, blank=True, db_index=True)
LRT_converted_rankscore = models.TextField(null=True, blank=True, db_index=True)
LRT_pred = models.TextField(null=True, blank=True, db_index=True)
LRT_Omega = models.TextField(null=True, blank=True, db_index=True)
MutationTaster_score = models.TextField(null=True, blank=True, db_index=True)
MutationTaster_converted_rankscore = models.TextField(null=True, blank=True, db_index=True)
MutationTaster_pred = models.TextField(null=True, blank=True, db_index=True)
MutationTaster_model = models.TextField(null=True, blank=True, db_index=True)
MutationTaster_AAE = models.TextField(null=True, blank=True, db_index=True)
MutationAssessor_UniprotID = models.TextField(null=True, blank=True, db_index=True)
MutationAssessor_variant = models.TextField(null=True, blank=True, db_index=True)
MutationAssessor_score = models.TextField(null=True, blank=True, db_index=True)
MutationAssessor_rankscore = models.TextField(null=True, blank=True, db_index=True)
MutationAssessor_pred = models.TextField(null=True, blank=True, db_index=True)
FATHMM_score = models.TextField(null=True, blank=True, db_index=True)
FATHMM_converted_rankscore = models.TextField(null=True, blank=True, db_index=True)
FATHMM_pred = models.TextField(null=True, blank=True, db_index=True)
PROVEAN_score = models.TextField(null=True, blank=True, db_index=True)
PROVEAN_converted_rankscore = models.TextField(null=True, blank=True, db_index=True)
PROVEAN_pred = models.TextField(null=True, blank=True, db_index=True)
Transcript_id_VEST3 = models.TextField(null=True, blank=True, db_index=True)
Transcript_var_VEST3 = models.TextField(null=True, blank=True, db_index=True)
VEST3_score = models.TextField(null=True, blank=True, db_index=True)
VEST3_rankscore = models.TextField(null=True, blank=True, db_index=True)
MetaSVM_score = models.TextField(null=True, blank=True, db_index=True)
MetaSVM_rankscore = models.TextField(null=True, blank=True, db_index=True)
MetaSVM_pred = models.TextField(null=True, blank=True, db_index=True)
MetaLR_score = models.TextField(null=True, blank=True, db_index=True)
MetaLR_rankscore = models.TextField(null=True, blank=True, db_index=True)
MetaLR_pred = models.TextField(null=True, blank=True, db_index=True)
Reliability_index = models.TextField(null=True, blank=True, db_index=True)
CADD_raw = models.TextField(null=True, blank=True, db_index=True)
CADD_raw_rankscore = models.TextField(null=True, blank=True, db_index=True)
CADD_phred = models.TextField(null=True, blank=True, db_index=True)
DANN_score = models.TextField(null=True, blank=True, db_index=True)
DANN_rankscore = models.TextField(null=True, blank=True, db_index=True)
fathmm_MKL_coding_score = models.TextField(null=True, blank=True, db_index=True)
fathmm_MKL_coding_rankscore = models.TextField(null=True, blank=True, db_index=True)
fathmm_MKL_coding_pred = models.TextField(null=True, blank=True, db_index=True)
fathmm_MKL_coding_group = models.TextField(null=True, blank=True, db_index=True)
Eigen_raw = models.TextField(null=True, blank=True, db_index=True)
Eigen_phred = models.TextField(null=True, blank=True, db_index=True)
Eigen_raw_rankscore = models.TextField(null=True, blank=True, db_index=True)
Eigen_PC_raw = models.TextField(null=True, blank=True, db_index=True)
Eigen_PC_raw_rankscore = models.TextField(null=True, blank=True, db_index=True)
GenoCanyon_score = models.TextField(null=True, blank=True, db_index=True)
GenoCanyon_score_rankscore = models.TextField(null=True, blank=True, db_index=True)
integrated_fitCons_score = models.TextField(null=True, blank=True, db_index=True)
integrated_fitCons_rankscore = models.TextField(null=True, blank=True, db_index=True)
integrated_confidence_value = models.TextField(null=True, blank=True, db_index=True)
GM12878_fitCons_score = models.TextField(null=True, blank=True, db_index=True)
GM12878_fitCons_rankscore = models.TextField(null=True, blank=True, db_index=True)
GM12878_confidence_value = models.TextField(null=True, blank=True, db_index=True)
H1_hESC_fitCons_score = models.TextField(null=True, blank=True, db_index=True)
H1_hESC_fitCons_rankscore = models.TextField(null=True, blank=True, db_index=True)
H1_hESC_confidence_value = models.TextField(null=True, blank=True, db_index=True)
HUVEC_fitCons_score = models.TextField(null=True, blank=True, db_index=True)
HUVEC_fitCons_rankscore = models.TextField(null=True, blank=True, db_index=True)
HUVEC_confidence_value = models.TextField(null=True, blank=True, db_index=True)
GERP_NR = models.TextField(null=True, blank=True, db_index=True)
GERP_RS = models.TextField(null=True, blank=True, db_index=True)
GERP_RS_rankscore = models.TextField(null=True, blank=True, db_index=True)
phyloP100way_vertebrate = models.TextField(null=True, blank=True, db_index=True)
phyloP100way_vertebrate_rankscore = models.TextField(null=True, blank=True, db_index=True)
phyloP20way_mammalian = models.TextField(null=True, blank=True, db_index=True)
phyloP20way_mammalian_rankscore = models.TextField(null=True, blank=True, db_index=True)
phastCons100way_vertebrate = models.TextField(null=True, blank=True, db_index=True)
phastCons100way_vertebrate_rankscore = models.TextField(null=True, blank=True, db_index=True)
phastCons20way_mammalian = models.TextField(null=True, blank=True, db_index=True)
phastCons20way_mammalian_rankscore = models.TextField(null=True, blank=True, db_index=True)
SiPhy_29way_pi = models.TextField(null=True, blank=True, db_index=True)
SiPhy_29way_logOdds = models.TextField(null=True, blank=True, db_index=True)
SiPhy_29way_logOdds_rankscore = models.TextField(null=True, blank=True, db_index=True)
clinvar_rs = models.TextField(null=True, blank=True, db_index=True)
clinvar_clnsig = models.TextField(null=True, blank=True, db_index=True)
clinvar_trait = models.TextField(null=True, blank=True, db_index=True)
clinvar_golden_stars = models.TextField(null=True, blank=True, db_index=True)
mcap_score = models.FloatField(null=True, blank=True, db_index=True)
mcap_rankscore = models.FloatField(null=True, blank=True, db_index=True)
mcap_pred = models.TextField(null=True, blank=True, db_index=True)
revel_score = models.TextField(null=True, blank=True, db_index=True)
def get_fields(self):
return [(field.name, field.verbose_name.title().replace('_', ' ')) for field in Variant._meta.fields]
|
{
"content_hash": "a3e4a52d7244c74213308f5826a4045e",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 114,
"avg_line_length": 63.28846153846154,
"alnum_prop": 0.7342752962625342,
"repo_name": "raonyguimaraes/mendelmd",
"id": "b5738cb6a0a789893a4b5cb18f60b5156ba912b6",
"size": "13164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "variants/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1089"
},
{
"name": "C++",
"bytes": "748"
},
{
"name": "CSS",
"bytes": "159812"
},
{
"name": "Dockerfile",
"bytes": "3164"
},
{
"name": "Go",
"bytes": "7075"
},
{
"name": "HTML",
"bytes": "889609"
},
{
"name": "JavaScript",
"bytes": "471651"
},
{
"name": "PHP",
"bytes": "52678"
},
{
"name": "Python",
"bytes": "754270"
},
{
"name": "Shell",
"bytes": "14174"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class BooksConfig(AppConfig):
name = 'books'
verbose_name = '書籍管理'
|
{
"content_hash": "8eb17f75c4d6414976bd17a46e31c81d",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 33,
"avg_line_length": 18.5,
"alnum_prop": 0.7027027027027027,
"repo_name": "djangogirlstaipei/eshop",
"id": "30a7697d75ee47b8efc0f592eaae425d667000b3",
"size": "119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bookshop/books/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "870"
},
{
"name": "HTML",
"bytes": "6442"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Nginx",
"bytes": "1222"
},
{
"name": "Python",
"bytes": "17935"
},
{
"name": "Shell",
"bytes": "531"
}
],
"symlink_target": ""
}
|
import re
from migrate.changeset import UniqueConstraint
import sqlalchemy
from sqlalchemy import Boolean
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy.engine import reflection
from sqlalchemy.ext.compiler import compiles
from sqlalchemy import func
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql.expression import UpdateBase
from sqlalchemy.sql import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy.types import NullType
from payload.openstack.common.gettextutils import _ # noqa
from payload.openstack.common import log as logging
from payload.openstack.common import timeutils
LOG = logging.getLogger(__name__)
_DBURL_REGEX = re.compile(r"[^:]+://([^:]+):([^@]+)@.+")
def sanitize_db_url(url):
match = _DBURL_REGEX.match(url)
if match:
return '%s****:****%s' % (url[:match.start(1)], url[match.end(2):])
return url
class InvalidSortKey(Exception):
message = _("Sort key supplied was not valid.")
# copy from glance/db/sqlalchemy/api.py
def paginate_query(query, model, limit, sort_keys, marker=None,
sort_dir=None, sort_dirs=None):
"""Returns a query with sorting / pagination criteria added.
Pagination works by requiring a unique sort_key, specified by sort_keys.
(If sort_keys is not unique, then we risk looping through values.)
We use the last row in the previous page as the 'marker' for pagination.
So we must return values that follow the passed marker in the order.
With a single-valued sort_key, this would be easy: sort_key > X.
With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
the lexicographical ordering:
(k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
We also have to cope with different sort_directions.
Typically, the id of the last row is used as the client-facing pagination
marker, then the actual marker object must be fetched from the db and
passed in to us as marker.
:param query: the query object to which we should add paging/sorting
:param model: the ORM model class
:param limit: maximum number of items to return
:param sort_keys: array of attributes by which results should be sorted
:param marker: the last item of the previous page; we returns the next
results after this value.
:param sort_dir: direction in which results should be sorted (asc, desc)
:param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys
:rtype: sqlalchemy.orm.query.Query
:return: The query with sorting/pagination added.
"""
if 'id' not in sort_keys:
# TODO(justinsb): If this ever gives a false-positive, check
# the actual primary key, rather than assuming its id
LOG.warn(_('Id not in sort_keys; is sort_keys unique?'))
assert(not (sort_dir and sort_dirs))
# Default the sort direction to ascending
if sort_dirs is None and sort_dir is None:
sort_dir = 'asc'
# Ensure a per-column sort direction
if sort_dirs is None:
sort_dirs = [sort_dir for _sort_key in sort_keys]
assert(len(sort_dirs) == len(sort_keys))
# Add sorting
for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
try:
sort_dir_func = {
'asc': sqlalchemy.asc,
'desc': sqlalchemy.desc,
}[current_sort_dir]
except KeyError:
raise ValueError(_("Unknown sort direction, "
"must be 'desc' or 'asc'"))
try:
sort_key_attr = getattr(model, current_sort_key)
except AttributeError:
raise InvalidSortKey()
query = query.order_by(sort_dir_func(sort_key_attr))
# Add pagination
if marker is not None:
marker_values = []
for sort_key in sort_keys:
v = getattr(marker, sort_key)
marker_values.append(v)
# Build up an array of sort criteria as in the docstring
criteria_list = []
for i in range(0, len(sort_keys)):
crit_attrs = []
for j in range(0, i):
model_attr = getattr(model, sort_keys[j])
crit_attrs.append((model_attr == marker_values[j]))
model_attr = getattr(model, sort_keys[i])
if sort_dirs[i] == 'desc':
crit_attrs.append((model_attr < marker_values[i]))
else:
crit_attrs.append((model_attr > marker_values[i]))
criteria = sqlalchemy.sql.and_(*crit_attrs)
criteria_list.append(criteria)
f = sqlalchemy.sql.or_(*criteria_list)
query = query.filter(f)
if limit is not None:
query = query.limit(limit)
return query
def get_table(engine, name):
"""Returns an sqlalchemy table dynamically from db.
Needed because the models don't work for us in migrations
as models will be far out of sync with the current data.
"""
metadata = MetaData()
metadata.bind = engine
return Table(name, metadata, autoload=True)
class InsertFromSelect(UpdateBase):
"""Form the base for `INSERT INTO table (SELECT ... )` statement."""
def __init__(self, table, select):
self.table = table
self.select = select
@compiles(InsertFromSelect)
def visit_insert_from_select(element, compiler, **kw):
"""Form the `INSERT INTO table (SELECT ... )` statement."""
return "INSERT INTO %s %s" % (
compiler.process(element.table, asfrom=True),
compiler.process(element.select))
class ColumnError(Exception):
"""Error raised when no column or an invalid column is found."""
def _get_not_supported_column(col_name_col_instance, column_name):
try:
column = col_name_col_instance[column_name]
except KeyError:
msg = _("Please specify column %s in col_name_col_instance "
"param. It is required because column has unsupported "
"type by sqlite).")
raise ColumnError(msg % column_name)
if not isinstance(column, Column):
msg = _("col_name_col_instance param has wrong type of "
"column instance for column %s It should be instance "
"of sqlalchemy.Column.")
raise ColumnError(msg % column_name)
return column
def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
**col_name_col_instance):
"""Drop unique constraint from table.
This method drops UC from table and works for mysql, postgresql and sqlite.
In mysql and postgresql we are able to use "alter table" construction.
Sqlalchemy doesn't support some sqlite column types and replaces their
type with NullType in metadata. We process these columns and replace
NullType with the correct column type.
:param migrate_engine: sqlalchemy engine
:param table_name: name of table that contains uniq constraint.
:param uc_name: name of uniq constraint that will be dropped.
:param columns: columns that are in uniq constraint.
:param col_name_col_instance: contains pair column_name=column_instance.
column_instance is instance of Column. These params
are required only for columns that have unsupported
types by sqlite. For example BigInteger.
"""
meta = MetaData()
meta.bind = migrate_engine
t = Table(table_name, meta, autoload=True)
if migrate_engine.name == "sqlite":
override_cols = [
_get_not_supported_column(col_name_col_instance, col.name)
for col in t.columns
if isinstance(col.type, NullType)
]
for col in override_cols:
t.columns.replace(col)
uc = UniqueConstraint(*columns, table=t, name=uc_name)
uc.drop()
def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
use_soft_delete, *uc_column_names):
"""Drop all old rows having the same values for columns in uc_columns.
This method drop (or mark ad `deleted` if use_soft_delete is True) old
duplicate rows form table with name `table_name`.
:param migrate_engine: Sqlalchemy engine
:param table_name: Table with duplicates
:param use_soft_delete: If True - values will be marked as `deleted`,
if False - values will be removed from table
:param uc_column_names: Unique constraint columns
"""
meta = MetaData()
meta.bind = migrate_engine
table = Table(table_name, meta, autoload=True)
columns_for_group_by = [table.c[name] for name in uc_column_names]
columns_for_select = [func.max(table.c.id)]
columns_for_select.extend(columns_for_group_by)
duplicated_rows_select = select(columns_for_select,
group_by=columns_for_group_by,
having=func.count(table.c.id) > 1)
for row in migrate_engine.execute(duplicated_rows_select):
# NOTE(boris-42): Do not remove row that has the biggest ID.
delete_condition = table.c.id != row[0]
is_none = None # workaround for pyflakes
delete_condition &= table.c.deleted_at == is_none
for name in uc_column_names:
delete_condition &= table.c[name] == row[name]
rows_to_delete_select = select([table.c.id]).where(delete_condition)
for row in migrate_engine.execute(rows_to_delete_select).fetchall():
LOG.info(_("Deleting duplicated row with id: %(id)s from table: "
"%(table)s") % dict(id=row[0], table=table_name))
if use_soft_delete:
delete_statement = table.update().\
where(delete_condition).\
values({
'deleted': literal_column('id'),
'updated_at': literal_column('updated_at'),
'deleted_at': timeutils.utcnow()
})
else:
delete_statement = table.delete().where(delete_condition)
migrate_engine.execute(delete_statement)
def _get_default_deleted_value(table):
if isinstance(table.c.id.type, Integer):
return 0
if isinstance(table.c.id.type, String):
return ""
raise ColumnError(_("Unsupported id columns type"))
def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes):
table = get_table(migrate_engine, table_name)
insp = reflection.Inspector.from_engine(migrate_engine)
real_indexes = insp.get_indexes(table_name)
existing_index_names = dict(
[(index['name'], index['column_names']) for index in real_indexes])
# NOTE(boris-42): Restore indexes on `deleted` column
for index in indexes:
if 'deleted' not in index['column_names']:
continue
name = index['name']
if name in existing_index_names:
column_names = [table.c[c] for c in existing_index_names[name]]
old_index = Index(name, *column_names, unique=index["unique"])
old_index.drop(migrate_engine)
column_names = [table.c[c] for c in index['column_names']]
new_index = Index(index["name"], *column_names, unique=index["unique"])
new_index.create(migrate_engine)
def change_deleted_column_type_to_boolean(migrate_engine, table_name,
**col_name_col_instance):
if migrate_engine.name == "sqlite":
return _change_deleted_column_type_to_boolean_sqlite(
migrate_engine, table_name, **col_name_col_instance)
insp = reflection.Inspector.from_engine(migrate_engine)
indexes = insp.get_indexes(table_name)
table = get_table(migrate_engine, table_name)
old_deleted = Column('old_deleted', Boolean, default=False)
old_deleted.create(table, populate_default=False)
table.update().\
where(table.c.deleted == table.c.id).\
values(old_deleted=True).\
execute()
table.c.deleted.drop()
table.c.old_deleted.alter(name="deleted")
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name,
**col_name_col_instance):
insp = reflection.Inspector.from_engine(migrate_engine)
table = get_table(migrate_engine, table_name)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = _get_not_supported_column(col_name_col_instance,
column.name)
else:
column_copy = column.copy()
else:
column_copy = Column('deleted', Boolean, default=0)
columns.append(column_copy)
constraints = [constraint.copy() for constraint in table.constraints]
meta = table.metadata
new_table = Table(table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"], *column_names,
unique=index["unique"]))
c_select = []
for c in table.c:
if c.name != "deleted":
c_select.append(c)
else:
c_select.append(table.c.deleted == table.c.id)
ins = InsertFromSelect(new_table, select(c_select))
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
new_table.rename(table_name)
new_table.update().\
where(new_table.c.deleted == new_table.c.id).\
values(deleted=True).\
execute()
def change_deleted_column_type_to_id_type(migrate_engine, table_name,
**col_name_col_instance):
if migrate_engine.name == "sqlite":
return _change_deleted_column_type_to_id_type_sqlite(
migrate_engine, table_name, **col_name_col_instance)
insp = reflection.Inspector.from_engine(migrate_engine)
indexes = insp.get_indexes(table_name)
table = get_table(migrate_engine, table_name)
new_deleted = Column('new_deleted', table.c.id.type,
default=_get_default_deleted_value(table))
new_deleted.create(table, populate_default=True)
deleted = True # workaround for pyflakes
table.update().\
where(table.c.deleted == deleted).\
values(new_deleted=table.c.id).\
execute()
table.c.deleted.drop()
table.c.new_deleted.alter(name="deleted")
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name,
**col_name_col_instance):
# NOTE(boris-42): sqlaclhemy-migrate can't drop column with check
# constraints in sqlite DB and our `deleted` column has
# 2 check constraints. So there is only one way to remove
# these constraints:
# 1) Create new table with the same columns, constraints
# and indexes. (except deleted column).
# 2) Copy all data from old to new table.
# 3) Drop old table.
# 4) Rename new table to old table name.
insp = reflection.Inspector.from_engine(migrate_engine)
meta = MetaData(bind=migrate_engine)
table = Table(table_name, meta, autoload=True)
default_deleted_value = _get_default_deleted_value(table)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = _get_not_supported_column(col_name_col_instance,
column.name)
else:
column_copy = column.copy()
else:
column_copy = Column('deleted', table.c.id.type,
default=default_deleted_value)
columns.append(column_copy)
def is_deleted_column_constraint(constraint):
# NOTE(boris-42): There is no other way to check is CheckConstraint
# associated with deleted column.
if not isinstance(constraint, CheckConstraint):
return False
sqltext = str(constraint.sqltext)
return (sqltext.endswith("deleted in (0, 1)") or
sqltext.endswith("deleted IN (:deleted_1, :deleted_2)"))
constraints = []
for constraint in table.constraints:
if not is_deleted_column_constraint(constraint):
constraints.append(constraint.copy())
new_table = Table(table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"], *column_names,
unique=index["unique"]))
ins = InsertFromSelect(new_table, table.select())
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
new_table.rename(table_name)
deleted = True # workaround for pyflakes
new_table.update().\
where(new_table.c.deleted == deleted).\
values(deleted=new_table.c.id).\
execute()
# NOTE(boris-42): Fix value of deleted column: False -> "" or 0.
deleted = False # workaround for pyflakes
new_table.update().\
where(new_table.c.deleted == deleted).\
values(deleted=default_deleted_value).\
execute()
|
{
"content_hash": "f208620b702a7e8e1b20cee8702688e7",
"timestamp": "",
"source": "github",
"line_count": 481,
"max_line_length": 79,
"avg_line_length": 37.767151767151766,
"alnum_prop": 0.6178575360563691,
"repo_name": "kickstandproject/payload",
"id": "e5bc72681e5a5b23d1ed0b763993b5dfa6ba6337",
"size": "19025",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "payload/openstack/common/db/sqlalchemy/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "348293"
}
],
"symlink_target": ""
}
|
import unittest
from ..validators import Validate
class TestValidate(unittest.TestCase):
def setUp(self):
self.validate = Validate(lambda x: x == 0, "must be 0")
def test_it_returns_false_when_validation_fails(self):
self.assertFalse(self.validate(1))
def test_it_returns_true_when_validation_is_ok(self):
self.assertTrue(self.validate(0))
def test_it_raises_exception_if_message_is_none(self):
with self.assertRaises(ValueError):
Validate(lambda x: x == 0, None)
|
{
"content_hash": "a56c9493daf59364d90f26e77f06c997",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 59,
"avg_line_length": 26.36842105263158,
"alnum_prop": 0.7145708582834331,
"repo_name": "mlackman/pyavali",
"id": "f99c9628c8003856fb57592ebff37b9a93c30da8",
"size": "501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyavali/tests/test_validate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17808"
}
],
"symlink_target": ""
}
|
from . import comparison, pythagorean, rotation, vector
|
{
"content_hash": "dffcccfc7bfbda69b32150f83b7f4589",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 55,
"avg_line_length": 55,
"alnum_prop": 0.8181818181818182,
"repo_name": "tea2code/gamemath",
"id": "9a5429ab97e8124e74334d0d0c326e8c38ca4040",
"size": "55",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gamemath/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10794"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from __future__ import division
import sys
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.model_selection import H2OModelSelectionEstimator
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
# test to find out why maxr is slow
def test_glm_backward_compare():
tst_data = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/bigdata/laptop/model_selection/backwardBinomial200C50KRows.csv")
predictors = tst_data.columns[0:-1]
response_col = 'response'
weight = 'wt'
tst_data['wt']=1
tst_data[tst_data['response']==1,'wt'] = 100
tst_data['response']=tst_data['response'].asfactor()
min_predictor_num = 200
backward_model = H2OModelSelectionEstimator(family = 'binomial', weights_column = weight, mode='backward',
min_predictor_number=min_predictor_num)
backward_model.train(predictors, response_col, training_frame=tst_data)
backward_model_coeff = backward_model.coef()[0]
glm_model = H2OGeneralizedLinearEstimator(family = 'binomial',
lambda_ = 0,
compute_p_values = True,
weights_column = weight)
glm_model.train(predictors, response_col, training_frame=tst_data)
glm_coeff = glm_model.coef()
pyunit_utils.assertEqualCoeffDicts(glm_coeff, backward_model_coeff, tol = 1e-6)
if __name__ == "__main__":
pyunit_utils.standalone_test(test_glm_backward_compare)
else:
test_glm_backward_compare()
|
{
"content_hash": "bbfdfa0c74f7206500fe676f65cd41c5",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 142,
"avg_line_length": 46.97142857142857,
"alnum_prop": 0.6496350364963503,
"repo_name": "h2oai/h2o-3",
"id": "ce8309a3e16db93138d4ab71050b3303d9479811",
"size": "1644",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_algos/modelselection/pyunit_PUBDEV_8655_glm_backward_compare_large.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12803"
},
{
"name": "CSS",
"bytes": "882321"
},
{
"name": "CoffeeScript",
"bytes": "7550"
},
{
"name": "DIGITAL Command Language",
"bytes": "106"
},
{
"name": "Dockerfile",
"bytes": "10459"
},
{
"name": "Emacs Lisp",
"bytes": "2226"
},
{
"name": "Groovy",
"bytes": "205646"
},
{
"name": "HCL",
"bytes": "36232"
},
{
"name": "HTML",
"bytes": "8018117"
},
{
"name": "HiveQL",
"bytes": "3985"
},
{
"name": "Java",
"bytes": "15981357"
},
{
"name": "JavaScript",
"bytes": "148426"
},
{
"name": "Jupyter Notebook",
"bytes": "20638329"
},
{
"name": "Makefile",
"bytes": "46043"
},
{
"name": "PHP",
"bytes": "800"
},
{
"name": "Python",
"bytes": "8188608"
},
{
"name": "R",
"bytes": "4149977"
},
{
"name": "Ruby",
"bytes": "64"
},
{
"name": "Sass",
"bytes": "23790"
},
{
"name": "Scala",
"bytes": "4845"
},
{
"name": "Shell",
"bytes": "214495"
},
{
"name": "Smarty",
"bytes": "1792"
},
{
"name": "TeX",
"bytes": "554940"
}
],
"symlink_target": ""
}
|
"""
#Downloaded from http://code.activestate.com/recipes/576918/
#Created by Michael Fogleman
#Short URL Generator
"""
#DEFAULT_ALPHABET = 'JedR8LNFY2j6MrhkBSADUyfP5amuH9xQCX4VqbgpsGtnW7vc3TwKE'
#DEFAULT_BLOCK_SIZE = 22
DEFAULT_ALPHABET = 'ed82j6rh1kyfo5almu9x4iqzbgpstn7vc3w'
DEFAULT_BLOCK_SIZE = 18
class UrlEncoder(object):
def __init__(self, alphabet=DEFAULT_ALPHABET, block_size=DEFAULT_BLOCK_SIZE):
self.alphabet = alphabet
self.block_size = block_size
self.mask = (1 << block_size) - 1
self.mapping = range(block_size)
self.mapping.reverse()
def encode_url(self, n, min_length=0):
return self.enbase(self.encode(n), min_length)
def decode_url(self, n):
return self.decode(self.debase(n))
def encode(self, n):
return (n & ~self.mask) | self._encode(n & self.mask)
def _encode(self, n):
result = 0
for i, b in enumerate(self.mapping):
if n & (1 << i):
result |= (1 << b)
return result
def decode(self, n):
return (n & ~self.mask) | self._decode(n & self.mask)
def _decode(self, n):
result = 0
for i, b in enumerate(self.mapping):
if n & (1 << b):
result |= (1 << i)
return result
def enbase(self, x, min_length=0):
result = self._enbase(x)
padding = self.alphabet[0] * (min_length - len(result))
return '%s%s' % (padding, result)
def _enbase(self, x):
n = len(self.alphabet)
if x < n:
return self.alphabet[x]
return self.enbase(x/n) + self.alphabet[x%n]
def debase(self, x):
n = len(self.alphabet)
result = 0
for i, c in enumerate(reversed(x)):
result += self.alphabet.index(c) * (n**i)
return result
DEFAULT_ENCODER = UrlEncoder()
def encode(n):
return DEFAULT_ENCODER.encode(n)
def decode(n):
return DEFAULT_ENCODER.decode(n)
def enbase(n, min_length=0):
return DEFAULT_ENCODER.enbase(n, min_length)
def debase(n):
return DEFAULT_ENCODER.debase(n)
def encode_url(n, min_length=0):
return DEFAULT_ENCODER.encode_url(n, min_length)
def decode_url(n):
return DEFAULT_ENCODER.decode_url(n)
if __name__ == '__main__':
for a in range(0, 200000, 37):
b = encode(a)
c = enbase(b)
d = debase(c)
e = decode(d)
assert a == e
assert b == d
c = (' ' * (7 - len(c))) + c
#print '%6d %12d %s %12d %6d' % (a, b, c, d, e)
|
{
"content_hash": "04c352824bf1f27a38ea32e8977c924d",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 81,
"avg_line_length": 30.305882352941175,
"alnum_prop": 0.5683229813664596,
"repo_name": "benosteen/RDFDatabank",
"id": "d46e946580d001305235ac0349db77ef7e5d7d55",
"size": "2600",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "rdfdatabank/lib/short_pid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "8"
},
{
"name": "Python",
"bytes": "1410423"
},
{
"name": "Shell",
"bytes": "201"
}
],
"symlink_target": ""
}
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from resource_management import *
from hbase import hbase
from hbase_service import hbase_service
class HbaseRegionServer(Script):
def install(self, env):
self.install_packages(env)
def configure(self, env, action = None):
import params
env.set_params(params)
hbase('regionserver', action)
def start(self, env):
import params
env.set_params(params)
self.configure(env, action = 'start') # for security
hbase_service( 'regionserver',
action = 'start'
)
def stop(self, env):
import params
env.set_params(params)
hbase_service( 'regionserver',
action = 'stop'
)
def status(self, env):
import status_params
env.set_params(status_params)
pid_file = format("{pid_dir}/hbase-{hbase_user}-regionserver.pid")
check_process_status(pid_file)
def decommission(self, env):
print "Decommission not yet implemented!"
if __name__ == "__main__":
HbaseRegionServer().execute()
|
{
"content_hash": "8a44fa66acb44c018422fe0f8a5bf3f0",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 72,
"avg_line_length": 26.876923076923077,
"alnum_prop": 0.723526044647968,
"repo_name": "arenadata/ambari",
"id": "a5121b85f48e6897259037991979f83023c93c4c",
"size": "1769",
"binary": false,
"copies": "2",
"ref": "refs/heads/branch-adh-1.6",
"path": "ambari-server/src/main/resources/stacks/BigInsights/4.0/services/AMBARI_METRICS/package/scripts/hbase_regionserver.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "46700"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "343739"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "Dockerfile",
"bytes": "6387"
},
{
"name": "EJS",
"bytes": "777"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Gherkin",
"bytes": "990"
},
{
"name": "Groovy",
"bytes": "15882"
},
{
"name": "HTML",
"bytes": "717983"
},
{
"name": "Handlebars",
"bytes": "1819641"
},
{
"name": "Java",
"bytes": "29172298"
},
{
"name": "JavaScript",
"bytes": "18571926"
},
{
"name": "Jinja",
"bytes": "1490416"
},
{
"name": "Less",
"bytes": "412933"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "287501"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "18507704"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Ruby",
"bytes": "38590"
},
{
"name": "SCSS",
"bytes": "40072"
},
{
"name": "Shell",
"bytes": "924115"
},
{
"name": "Stylus",
"bytes": "820"
},
{
"name": "TSQL",
"bytes": "42351"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
}
|
import ftplib
import socket
import itertools
from yapsy.IPlugin import IPlugin
from time_functions import SpeedBenchmark
class FTP(IPlugin):
time_benchmark = SpeedBenchmark()
list_usrnames = []
list_passwds = []
pass_found = 0
def get_usernames(self, file_path):
try:
self.list_usrnames = [line.strip() for line in open(file_path)]
return self.list_usrnames
except:
self.get_usernames(input("Invalid file. Try again: "))
def get_passwords(self, file_path):
try:
self.list_passwds = [line.strip() for line in open(file_path)]
return self.list_passwds
except:
self.get_passwords(input("Invalid file. Try again: "))
def connection(self, user_host, user_port, user_name, pass_word, ftp_s):
if ftp_s == "y":
ftp = ftplib.FTP_TLS(host=user_host)
else:
ftp = ftplib.FTP(host=user_host)
try:
ftp.login(user=user_name, passwd=pass_word)
return 1
except (socket.error, ftplib.error_reply):
raise SystemExit("\n[!] Could not connect to server!")
except:
return 0
def exec_module(self):
menu_break = False
usr_quit = input("Quit when valid pair found? (y/n) ")
ftp_s = input("Explicit FTP Secure (FTPS, if normal FTP say no!)? (y/n) ")
usr_host = input("Host: ")
usr_port = input("Port: ")
file_usernames = self.get_usernames(input("Path to usernames file: "))
file_passwords = self.get_passwords(input("Path to passwords file: "))
self.time_benchmark.start_benchmark()
for name in self.list_usrnames:
if menu_break == True:
break
print("Trying username {}:".format(name))
for passwd in self.list_passwds:
print(" {}".format(passwd))
code_return = self.connection(usr_host, usr_port, name, passwd, ftp_s)
if code_return == 1:
print("Logged in successfully! Username {} and password {}".format(name, passwd))
self.pass_found += 1
if usr_quit == "y":
menu_break = True
break
self.time_benchmark.stop_benchmark()
print("\nFound {} username/password combos!\n".format(self.pass_found))
print("Took {} to process {} usernames and {} passwords.".format(
self.time_benchmark.get_benchmark(),
len(self.list_usrnames),
len(self.list_passwds),
len(list(itertools.product(self.list_usrnames,self.list_passwds)))))
|
{
"content_hash": "e10a1559b1bf5746bb03b4789eccdfe8",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 120,
"avg_line_length": 38.62162162162162,
"alnum_prop": 0.5339398180545836,
"repo_name": "Plasticoo/NNBruteforcer",
"id": "6995c416c186a202a77fbc24c5019bf885fc7257",
"size": "2858",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/plugins/ftp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10653"
}
],
"symlink_target": ""
}
|
from aql.util_types.aql_list_types import toSequence
__all__ = (
'Builder', 'FileBuilder'
)
import os
import errno
from aql.util_types import FilePath
from aql.utils import simpleObjectSignature, simplifyValue, executeCommand, eventDebug, groupPathsByDir, groupItems, relativeJoin, relativeJoinList
from aql.values import ValueBase, FileChecksumValue, FileTimestampValue, SimpleValue
#//===========================================================================//
@eventDebug
def eventExecCmd( settings, cmd, cwd, env ):
# from aql.utils import logDebug
# cmd = ' '.join( cmd )
# logDebug("EXEC: %s" % (cmd, ) )
pass
#//===========================================================================//
def _makeBuildPath( path_dir, _path_cache = set() ):
if path_dir not in _path_cache:
if not os.path.isdir( path_dir ):
try:
os.makedirs( path_dir )
except OSError as e:
if e.errno != errno.EEXIST:
raise
_path_cache.add( path_dir )
#//===========================================================================//
def _makeBuildPaths( dirnames ):
for dirname in dirnames:
_makeBuildPath( dirname )
#//===========================================================================//
def _splitFileName( file_path, ext = None, prefix = None, suffix = None ):
if isinstance( file_path, ValueBase ):
file_path = file_path.get()
dirname, filename = os.path.split( file_path )
name, path_ext = os.path.splitext( filename )
if ext is None: ext = path_ext
if prefix: name = prefix + name
if suffix: name += suffix
filename = name + ext
return dirname, filename
#//===========================================================================//
def _splitFileNames( file_paths, ext = None, prefix = None, suffix = None ):
dirnames = []
filenames = []
for file_path in file_paths:
dirname, filename = _splitFileName( file_path, ext, prefix, suffix )
dirnames.append( dirname )
filenames.append( filename )
return dirnames, filenames
#//===========================================================================//
def _fileSinature2Type( file_signature_type ):
return FileTimestampValue if file_signature_type == 'timestamp' else FileChecksumValue
#//===========================================================================//
class BuilderInitiator( object ):
__slots__ = ( 'is_initiated', 'builder', 'options', 'args', 'kw' )
def __init__( self, builder, options, args, kw ):
self.is_initiated = False
self.builder = builder
self.options = options
self.args = self.__storeArgs( args )
self.kw = self.__storeKw( kw )
#//=======================================================//
def __storeArgs( self, args ):
return tuple( map( self.options._storeValue, args ) )
#//=======================================================//
def __loadArgs( self ):
return tuple( map( self.options._loadValue, self.args ) )
#//=======================================================//
def __storeKw( self, kw ):
storeValue = self.options._storeValue
return { name: storeValue( value ) for name, value in kw.items() }
#//=======================================================//
def __loadKw( self ):
loadValue = self.options._loadValue
return { name: loadValue( value ) for name, value in self.kw.items() }
#//=======================================================//
def initiate( self ):
if self.is_initiated:
return self.builder
builder = self.builder
kw = self.__loadKw()
args = self.__loadArgs()
options = self.options
builder._initAttrs( options )
builder.__init__( options, *args, **kw )
if not hasattr( builder, 'name' ):
builder.setName()
if not hasattr( builder, 'signature' ):
builder.setSignature()
self.is_initiated = True
return builder
#//=======================================================//
def canBuildBatch(self):
return self.builder.canBuildBatch()
def canBuild(self):
return self.builder.canBuild()
def isBatch(self):
return self.builder.isBatch()
#//===========================================================================//
# noinspection PyAttributeOutsideInit
class Builder (object):
"""
Base class for all builders
'name' - uniquely identifies builder
'signature' - uniquely identifies builder's parameters
"""
NAME_ATTRS = None
SIGNATURE_ATTRS = None
#//-------------------------------------------------------//
def __new__(cls, options, *args, **kw):
self = super(Builder, cls).__new__(cls)
self.makeValue = self.makeSimpleValue
self.default_value_type = SimpleValue
self.__is_batch = (options.batch_build.get() or not self.canBuild()) and self.canBuildBatch()
return BuilderInitiator( self, options, args, kw )
#//-------------------------------------------------------//
def _initAttrs( self, options ):
self.build_dir = options.build_dir.get()
self.build_path = options.build_path.get()
self.relative_build_paths = options.relative_build_paths.get()
self.file_value_type = _fileSinature2Type( options.file_signature.get() )
self.env = options.env.get().dump()
#//-------------------------------------------------------//
def canBuildBatch(self):
return self.__class__.buildBatch != Builder.buildBatch
#//-------------------------------------------------------//
def canBuild(self):
return self.__class__.build != Builder.build
#//-------------------------------------------------------//
def isBatch(self):
return self.__is_batch
#//-------------------------------------------------------//
def initiate( self ):
return self
#//-------------------------------------------------------//
def setName( self ):
cls = self.__class__
name = [ cls.__module__, cls.__name__, simplifyValue( self.build_path ), bool(self.relative_build_paths) ]
if self.NAME_ATTRS:
for attr_name in self.NAME_ATTRS:
value = getattr( self, attr_name )
value = simplifyValue( value )
name.append( value )
self.name = simpleObjectSignature( name )
#//-------------------------------------------------------//
def setSignature( self ):
sign = []
if self.SIGNATURE_ATTRS:
for attr_name in self.SIGNATURE_ATTRS:
value = getattr( self, attr_name )
value = simplifyValue( value )
sign.append( value )
self.signature = simpleObjectSignature( sign )
#//-------------------------------------------------------//
def clear( self, node ):
node.removeTargets()
#//-------------------------------------------------------//
def depends( self, node ):
"""
Could be used to dynamically generate dependency nodes
Returns list of dependency nodes or None
"""
return None
#//-------------------------------------------------------//
def replace( self, node ):
"""
Could be used to dynamically replace sources
Returns list of nodes/values or None (if sources are not changed)
"""
return None
#//-------------------------------------------------------//
def split( self, node ):
"""
Could be used to dynamically split building sources to several nodes
Returns list of nodes or None
"""
return None
#//-------------------------------------------------------//
def splitSingle( self, node ):
"""
Splits each source to separate nodes
Returns list of nodes or None
"""
source_values = node.getSourceValues()
if len(source_values) < 2:
return None
return tuple( node.split( src_value ) for src_value in source_values )
#//-------------------------------------------------------//
def splitBatchByBuildDir( self, node ):
src_groups = self.groupSourcesByBuildDir( node )
if len(src_groups) < 2:
return None
return tuple( node.split( src_group ) for src_group in src_groups )
#//-------------------------------------------------------//
def build( self, node ):
"""
Builds a node
Returns a build output string or None
"""
raise NotImplementedError( "Abstract method. It should be implemented in a child class." )
#//-------------------------------------------------------//
def buildBatch( self, node ):
"""
Builds a node
Returns a build output string or None
"""
raise NotImplementedError( "Abstract method. It should be implemented in a child class." )
#//-------------------------------------------------------//
def getTargetValues( self, source_values ):
"""
If it's possible returns target values of the node, otherwise None
"""
return None
#//-------------------------------------------------------//
def getTraceName(self, brief ):
return self.__class__.__name__
#//-------------------------------------------------------//
def getTraceSources( self, node, brief ):
return node.getSourceValues()
#//-------------------------------------------------------//
def getTraceTargets( self, node, brief ):
return node.getBuildTargetValues()
#//-------------------------------------------------------//
def getBuildStrArgs( self, node, brief ):
try:
name = self.getTraceName( brief )
except Exception:
name = ''
try:
sources = self.getTraceSources( node, brief )
except Exception:
sources = None
try:
targets = self.getTraceTargets( node, brief )
except Exception:
targets = None
return name, sources, targets
#//-------------------------------------------------------//
def getBuildDir( self ):
_makeBuildPath( self.build_dir )
return self.build_dir
#//-------------------------------------------------------//
def getBuildPath( self ):
_makeBuildPath( self.build_path )
return self.build_path
#//-------------------------------------------------------//
def getFileBuildPath( self, file_path, ext = None, prefix = None, suffix = None ):
build_path = self.build_path
dirname, filename = _splitFileName( file_path, ext, prefix, suffix )
if self.relative_build_paths:
build_path = relativeJoin( build_path, dirname )
_makeBuildPath( build_path )
build_path = os.path.join( build_path, filename )
return FilePath( build_path )
#//-------------------------------------------------------//
def getFileBuildPaths( self, file_paths, ext = None, prefix = None, suffix = None ):
build_path = self.build_path
dirnames, filenames = _splitFileNames( file_paths, ext, prefix, suffix )
if self.relative_build_paths:
dirnames = relativeJoinList( build_path, dirnames )
_makeBuildPaths( dirnames )
build_paths = [ FilePath( os.path.join( dirname, filename ) ) for dirname, filename in zip( dirnames, filenames ) ]
else:
_makeBuildPath( build_path )
build_paths = [ FilePath( os.path.join( build_path, filename ) ) for filename in filenames ]
return build_paths
#//-------------------------------------------------------//
def groupSourcesByBuildDir( self, node ):
src_files = node.getSourceValues()
num_groups = node.options.batch_groups.get()
group_size = node.options.batch_size.get()
if self.relative_build_paths:
groups = groupPathsByDir( src_files, num_groups, group_size, pathGetter = lambda value: value.get() )
else:
groups = groupItems( src_files, num_groups, group_size )
return groups
#//-------------------------------------------------------//
def getDefaultValueType( self ):
return self.default_value_type
#//-------------------------------------------------------//
def getFileValueType( self ):
return self.file_value_type
#//-------------------------------------------------------//
def makeSimpleValue(self, value, tags = None, use_cache = False ):
if isinstance( value, ValueBase):
return value
if isinstance( value, FilePath ):
return self.file_value_type( name = value, tags = tags, use_cache = use_cache )
return SimpleValue( value )
#//-------------------------------------------------------//
def makeFileValue( self, value, tags = None, use_cache = False ):
if isinstance( value, ValueBase ):
return value
return self.file_value_type( name = value, tags = tags, use_cache = use_cache )
#//-------------------------------------------------------//
def makeFileValues( self, values, tags = None, use_cache = False ):
make_value = self.makeFileValue
return [ make_value( value, tags = tags, use_cache = use_cache ) for value in toSequence( values ) ]
#//-------------------------------------------------------//
def execCmd(self, cmd, cwd = None, env = None, file_flag = None, stdin = None ):
result = self.execCmdResult( cmd, cwd = cwd, env = env, file_flag = file_flag, stdin = stdin )
if result.failed():
raise result
return result.output
#//-------------------------------------------------------//
def execCmdResult(self, cmd, cwd = None, env = None, file_flag = None, stdin = None ):
if env is None:
env = self.env
if cwd is None:
cwd = self.getBuildPath()
result = executeCommand( cmd, cwd = cwd, env = env, file_flag = file_flag, stdin = stdin )
eventExecCmd( cmd, cwd, env )
return result
#//===========================================================================//
class FileBuilder (Builder):
def _initAttrs( self, options ):
super(FileBuilder,self)._initAttrs( options )
self.makeValue = self.makeFileValue
self.default_value_type = self.file_value_type
|
{
"content_hash": "fd3fb27cd9472d4ea0b67b8ceb60b792",
"timestamp": "",
"source": "github",
"line_count": 481,
"max_line_length": 147,
"avg_line_length": 29.64241164241164,
"alnum_prop": 0.48674428391078695,
"repo_name": "menify/sandbox",
"id": "2810cfcbc35170566a59016db32fd48b11cbb446",
"size": "15405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aql/aql/nodes/aql_builder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "554478"
},
{
"name": "CMake",
"bytes": "13284"
},
{
"name": "Python",
"bytes": "1851627"
},
{
"name": "Shell",
"bytes": "5173"
}
],
"symlink_target": ""
}
|
'''
Created on Oct 9, 2010
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
'''
from tkinter import *
try:
from tkinter.ttk import *
except ImportError:
from ttk import *
from arelle.CntlrWinTooltip import ToolTip
import os
class ViewTree:
def __init__(self, modelXbrl, tabWin, tabTitle, hasToolTip=False, lang=None):
self.tabWin = tabWin
self.viewFrame = Frame(tabWin)
self.viewFrame.view = self
self.viewFrame.grid(row=0, column=0, sticky=(N, S, E, W))
tabWin.add(self.viewFrame,text=tabTitle)
self.tabTitle = tabTitle # for error messages
vScrollbar = Scrollbar(self.viewFrame, orient=VERTICAL)
hScrollbar = Scrollbar(self.viewFrame, orient=HORIZONTAL)
self.treeView = Treeview(self.viewFrame, xscrollcommand=hScrollbar.set, yscrollcommand=vScrollbar.set)
self.treeView.grid(row=0, column=0, sticky=(N, S, E, W))
self.treeView.tag_configure("ELR", background="#E0F0FF")
self.treeView.tag_configure("even", background="#F0F0F0")
self.treeView.tag_configure("odd", background="#FFFFFF")
if modelXbrl.modelManager.cntlr.isMac or modelXbrl.modelManager.cntlr.isMSW:
highlightColor = "#%04x%04x%04x" % self.treeView.winfo_rgb("SystemHighlight")
else:
highlightColor = "#33339999ffff" # using MSW value for Unix/Linux which has no named colors
self.treeView.tag_configure("selected-ELR", background=highlightColor)
self.treeView.tag_configure("selected-even", background=highlightColor)
self.treeView.tag_configure("selected-odd", background=highlightColor)
self.treeViewSelection = ()
self.treeView.bind("<<TreeviewSelect>>", self.viewSelectionChange, '+')
self.treeView.bind("<1>", self.onViewClick, '+')
hScrollbar["command"] = self.treeView.xview
hScrollbar.grid(row=1, column=0, sticky=(E,W))
vScrollbar["command"] = self.treeView.yview
vScrollbar.grid(row=0, column=1, sticky=(N,S))
self.viewFrame.columnconfigure(0, weight=1)
self.viewFrame.rowconfigure(0, weight=1)
self.modelXbrl = modelXbrl
self.hasToolTip = hasToolTip
self.toolTipText = StringVar()
if hasToolTip:
self.treeView.bind("<Motion>", self.motion, '+')
self.treeView.bind("<Leave>", self.leave, '+')
self.toolTipText = StringVar()
self.toolTip = ToolTip(self.treeView,
textvariable=self.toolTipText,
wraplength=480,
follow_mouse=True,
state="disabled")
self.toolTipColId = None
self.toolTipRowId = None
self.modelXbrl = modelXbrl
self.lang = lang
self.labelrole = None
self.nameIsPrefixed = False
if modelXbrl:
modelXbrl.views.append(self)
if not lang:
self.lang = modelXbrl.modelManager.defaultLang
def clearTreeView(self):
self.treeViewSelection = ()
for node in self.treeView.get_children():
self.treeView.delete(node)
def viewSelectionChange(self, event=None):
for node in self.treeViewSelection:
if self.treeView.exists(node):
priorTags = self.treeView.item(node)["tags"]
if priorTags:
priorBgTag = priorTags[0]
if priorBgTag.startswith("selected-"):
self.treeView.item(node, tags=(priorBgTag[9:],))
self.treeViewSelection = self.treeView.selection()
for node in self.treeViewSelection:
priorTags = self.treeView.item(node)["tags"]
if priorTags:
self.treeView.item(node, tags=("selected-" + priorTags[0],))
def onViewClick(self, *args):
self.modelXbrl.modelManager.cntlr.currentView = self
def close(self):
del self.viewFrame.view
if self.modelXbrl:
self.tabWin.forget(self.viewFrame)
self.modelXbrl.views.remove(self)
self.modelXbrl = None
self.view = None
def select(self):
self.tabWin.select(self.viewFrame)
def leave(self, *args):
self.toolTipColId = None
self.toolTipRowId = None
def motion(self, *args):
tvColId = self.treeView.identify_column(args[0].x)
tvRowId = self.treeView.identify_row(args[0].y)
if tvColId != self.toolTipColId or tvRowId != self.toolTipRowId:
self.toolTipColId = tvColId
self.toolTipRowId = tvRowId
newValue = self.getToolTip(tvRowId, tvColId)
if newValue is None and tvRowId and len(tvRowId) > 0:
try:
col = int(tvColId[1:])
if col == 0:
newValue = self.treeView.item(tvRowId,"text")
else:
values = self.treeView.item(tvRowId,"values")
if col <= len(values):
newValue = values[col - 1]
except ValueError:
pass
self.setToolTip(newValue, tvColId)
def getToolTip(self, rowId, colId):
return None
def setToolTip(self, text, colId="#0"):
self.toolTip._hide()
if isinstance(text,str) and len(text) > 0:
width = self.treeView.column(colId,"width")
if len(text) * 8 > width or '\n' in text:
self.toolTipText.set(text)
self.toolTip.configure(state="normal")
self.toolTip._schedule()
else:
self.toolTipText.set("")
self.toolTip.configure(state="disabled")
else:
self.toolTipText.set("")
self.toolTip.configure(state="disabled")
def contextMenu(self):
try:
return self.menu
except AttributeError:
try:
self.menu = Menu( self.viewFrame, tearoff = 0 )
self.treeView.bind( self.modelXbrl.modelManager.cntlr.contextMenuClick, self.popUpMenu, '+' )
return self.menu
except Exception as ex: # tkinter menu problem maybe
self.modelXbrl.info("arelle:internalException",
_("Exception creating context menu in %(title)s: %(error)s"),
modelObject=self.modelXbrl.modelDocument, title=self.tabTitle, error=str(ex))
self.menu = None
return None
def popUpMenu(self, event):
if self.menu:
self.menuRow = self.treeView.identify_row(event.y)
self.menuCol = self.treeView.identify_column(event.x)
self.menu.post( event.x_root, event.y_root )
def expand(self):
self.setTreeItemOpen(self.menuRow,open=True)
def expandAll(self):
self.setTreeItemOpen("",open=True)
def collapse(self):
self.setTreeItemOpen(self.menuRow,open=False)
def collapseAll(self):
self.setTreeItemOpen("",open=False)
def setTreeItemOpen(self, node, open=True):
if node:
self.treeView.item(node, open=open)
for childNode in self.treeView.get_children(node):
self.setTreeItemOpen(childNode, open)
def menuAddExpandCollapse(self):
if self.menu:
self.menu.add_command(label=_("Expand"), underline=0, command=self.expand)
self.menu.add_command(label=_("Collapse"), underline=0, command=self.collapse)
self.menu.add_command(label=_("Expand all"), underline=0, command=self.expandAll)
self.menu.add_command(label=_("Collapse all"), underline=0, command=self.collapseAll)
def menuAddClipboard(self):
if self.menu and self.modelXbrl.modelManager.cntlr.hasClipboard:
try:
clipboardMenu = Menu(self.viewFrame, tearoff=0)
clipboardMenu.add_command(label=_("Cell"), underline=0, command=self.copyCellToClipboard)
clipboardMenu.add_command(label=_("Row"), underline=0, command=self.copyRowToClipboard)
clipboardMenu.add_command(label=_("Table"), underline=0, command=self.copyTableToClipboard)
self.menu.add_cascade(label=_("Copy to clipboard"), menu=clipboardMenu, underline=0)
except Exception as ex: # tkinter menu problem maybe
self.modelXbrl.info("arelle:internalException",
_("Exception creating clipboard menu in %(title)s: %(error)s"),
modelObject=self.modelXbrl.modelDocument, title=self.tabTitle, error=str(ex))
self.menu = None
def menuAddLangs(self):
if self.menu:
try:
langsMenu = Menu(self.viewFrame, tearoff=0)
self.menu.add_cascade(label=_("Language"), menu=langsMenu, underline=0)
for lang in sorted(self.modelXbrl.langs):
langsMenu.add_command(label=lang, underline=0, command=lambda l=lang: self.setLang(l))
except Exception as ex: # tkinter menu problem maybe
self.modelXbrl.info("arelle:internalException",
_("Exception creating context languages menu in %(title)s: %(error)s"),
modelObject=self.modelXbrl.modelDocument, title=self.tabTitle, error=str(ex))
self.menu = None
def menuAddLabelRoles(self, includeConceptName=False, menulabel=None):
if self.menu:
try:
if menulabel is None: menulabel = _("Label role")
rolesMenu = Menu(self.viewFrame, tearoff=0)
self.menu.add_cascade(label=menulabel, menu=rolesMenu, underline=0)
from arelle.ModelRelationshipSet import labelroles
for x in labelroles(self.modelXbrl, includeConceptName):
rolesMenu.add_command(label=x[0][1:], underline=0, command=lambda a=x[1]: self.setLabelrole(a))
except Exception as ex: # tkinter menu problem maybe
self.modelXbrl.info("arelle:internalException",
_("Exception creating context label roles menu in %(title)s: %(error)s"),
modelObject=self.modelXbrl.modelDocument, title=self.tabTitle, error=str(ex))
self.menu = None
def menuAddNameStyle(self, menulabel=None):
if self.menu:
try:
if menulabel is None: menulabel = _("Name Style")
nameStyleMenu = Menu(self.viewFrame, tearoff=0)
self.menu.add_cascade(label=menulabel, menu=nameStyleMenu, underline=0)
from arelle.ModelRelationshipSet import labelroles
nameStyleMenu.add_command(label=_("Prefixed"), underline=0, command=lambda a=True: self.setNamestyle(a))
nameStyleMenu.add_command(label=_("No prefix"), underline=0, command=lambda a=False: self.setNamestyle(a))
except Exception as ex: # tkinter menu problem maybe
self.modelXbrl.info("arelle:internalException",
_("Exception creating context name style menu in %(title)s: %(error)s"),
modelObject=self.modelXbrl.modelDocument, title=self.tabTitle, error=str(ex))
self.menu = None
def menuAddUnitDisplay(self):
if self.menu:
try:
rolesMenu = Menu(self.viewFrame, tearoff=0)
self.menu.add_cascade(label=_("Units"), menu=rolesMenu, underline=0)
rolesMenu.add_command(label=_("Unit ID"), underline=0, command=lambda: self.setUnitDisplay(unitDisplayID=True))
rolesMenu.add_command(label=_("Measures"), underline=0, command=lambda: self.setUnitDisplay(unitDisplayID=False))
except Exception as ex: # tkinter menu problem maybe
self.modelXbrl.info("arelle:internalException",
_("Exception creating context unit menu in %(title)s: %(error)s"),
modelObject=self.modelXbrl.modelDocument, title=self.tabTitle, error=str(ex))
self.menu = None
def menuAddViews(self, addClose=True, tabWin=None):
if self.menu:
try:
if tabWin is None: tabWin = self.tabWin
viewMenu = Menu(self.viewFrame, tearoff=0)
self.menu.add_cascade(label=_("View"), menu=viewMenu, underline=0)
newViewsMenu = Menu(self.viewFrame, tearoff=0)
if addClose:
viewMenu.add_command(label=_("Close"), underline=0, command=self.close)
viewMenu.add_cascade(label=_("Additional view"), menu=newViewsMenu, underline=0)
newViewsMenu.add_command(label=_("Arcrole group..."), underline=0, command=lambda: self.newArcroleGroupView(tabWin))
from arelle.ModelRelationshipSet import baseSetArcroles
for x in baseSetArcroles(self.modelXbrl) + [( " Role Types","!CustomRoleTypes!"), (" Arcrole Types", "!CustomArcroleTypes!")]:
newViewsMenu.add_command(label=x[0][1:], underline=0, command=lambda a=x[1]: self.newView(a, tabWin))
except Exception as ex: # tkinter menu problem maybe
self.modelXbrl.info("arelle:internalException",
_("Exception creating context add-views menu in %(title)s: %(error)s"),
modelObject=self.modelXbrl.modelDocument, title=self.tabTitle, error=str(ex))
self.menu = None
def newView(self, arcrole, tabWin):
if arcrole in ("!CustomRoleTypes!", "!CustomArcroleTypes!"):
from arelle import ViewWinRoleTypes
ViewWinRoleTypes.viewRoleTypes(self.modelXbrl, tabWin, arcrole=="!CustomArcroleTypes!", lang=self.lang)
else:
from arelle import ViewWinRelationshipSet
ViewWinRelationshipSet.viewRelationshipSet(self.modelXbrl, tabWin, arcrole, lang=self.lang)
def newArcroleGroupView(self, tabWin):
from arelle.DialogArcroleGroup import getArcroleGroup
from arelle import ViewWinRelationshipSet
arcroleGroup = getArcroleGroup(self.modelXbrl.modelManager.cntlr, self.modelXbrl)
if arcroleGroup:
ViewWinRelationshipSet.viewRelationshipSet(self.modelXbrl, tabWin, arcroleGroup, lang=self.lang)
def setLang(self, lang):
self.lang = lang
self.view()
def setLabelrole(self, labelrole):
self.labelrole = labelrole
self.view()
def setNamestyle(self, isPrefixed):
self.nameIsPrefixed = isPrefixed
self.view()
def setUnitDisplay(self, unitDisplayID=False):
self.unitDisplayID = unitDisplayID
self.view()
def setColumnsSortable(self, treeColIsInt=False, startUnsorted=False, initialSortCol="#0", initialSortDirForward=True):
if hasattr(self, 'lastSortColumn') and self.lastSortColumn:
self.treeView.heading(self.lastSortColumn, image=self.sortImages[2])
self.lastSortColumn = None if startUnsorted else initialSortCol
self.lastSortColumnForward = initialSortDirForward
self.treeColIsInt = treeColIsInt
if not hasattr(self, "sortImages"):
self.sortImages = (PhotoImage(file=os.path.join(self.modelXbrl.modelManager.cntlr.imagesDir, "columnSortUp.gif")),
PhotoImage(file=os.path.join(self.modelXbrl.modelManager.cntlr.imagesDir, "columnSortDown.gif")),
PhotoImage())
for col in ("#0",) + self.treeView["columns"]:
self.treeView.heading(col, command=lambda c=col: self.sortColumn(c))
if not startUnsorted:
self.treeView.heading(initialSortCol, image=self.sortImages[not initialSortDirForward])
def colSortVal(self, node, col):
if col == "#0":
treeColVal = self.treeView.item(node)["text"]
if self.treeColIsInt:
return int(treeColVal)
else:
treeColVal = self.treeView.set(node, col)
if col == "sequence":
try:
return int(treeColVal)
except:
return 0
return treeColVal
def sortNestedRows(self, parentNode, col, reverse):
l = [(self.colSortVal(node, col), node) for node in self.treeView.get_children(parentNode)]
l.sort(reverse=reverse)
# rearrange items in sorted positions
for i, (cell, node) in enumerate(l):
self.treeView.move(node, parentNode, i)
# reset even/odd tags
for i, node in enumerate(self.treeView.get_children(parentNode)):
self.treeView.item(node, tags=('even' if i & 1 else 'odd',))
self.sortNestedRows(node, col, reverse)
def sortColumn(self, col):
if col == self.lastSortColumn:
reverse = self.lastSortColumnForward
self.lastSortColumnForward = not reverse
else:
if self.lastSortColumn:
self.treeView.heading(self.lastSortColumn, image=self.sortImages[2])
reverse = False
self.lastSortColumnForward = True
self.lastSortColumn = col
self.treeView.heading(col, image=self.sortImages[reverse])
self.sortNestedRows('', col, reverse)
self.viewSelectionChange() # reselect selected rows
def copyCellToClipboard(self, *ignore):
self.modelXbrl.modelManager.cntlr.clipboardData(
text=self.treeView.item(self.menuRow)['text'] if self.menuCol == '#0' else self.treeView.set(self.menuRow,self.menuCol))
def copyRowToClipboard(self, *ignore):
self.modelXbrl.modelManager.cntlr.clipboardData(
text='\t'.join([self.treeView.item(self.menuRow)['text']] +
[self.treeView.set(self.menuRow,c) for c in self.treeView['columns']]))
def copyTableToClipboard(self, *ignore):
cols = self.treeView['columns']
lines = ['\t'.join([self.treeView.heading('#0')['text']] +
[self.treeView.heading(h)['text'] for h in cols])]
self.tabLines('', '', cols, lines)
self.modelXbrl.modelManager.cntlr.clipboardData(text='\n'.join(lines))
def tabLines(self, parentNode, indent, cols, lines):
for node in self.treeView.get_children(parentNode):
lines.append('\t'.join([indent + self.treeView.item(node)['text']] +
[self.treeView.set(node,c) for c in cols]))
self.tabLines(node, indent+' ', cols, lines)
|
{
"content_hash": "38db1899e0ba5c60ed88b09047a75b67",
"timestamp": "",
"source": "github",
"line_count": 385,
"max_line_length": 142,
"avg_line_length": 49.984415584415586,
"alnum_prop": 0.592756183745583,
"repo_name": "sternshus/Arelle",
"id": "b7df014853ddc6f25cfce8889c6273fc12278412",
"size": "19244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "arelle/ViewWinTree.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "31873"
},
{
"name": "C#",
"bytes": "850"
},
{
"name": "HTML",
"bytes": "8640"
},
{
"name": "Java",
"bytes": "4663"
},
{
"name": "Makefile",
"bytes": "5565"
},
{
"name": "NSIS",
"bytes": "9050"
},
{
"name": "PLSQL",
"bytes": "1056360"
},
{
"name": "Python",
"bytes": "5523072"
},
{
"name": "Shell",
"bytes": "13921"
}
],
"symlink_target": ""
}
|
from flask import Blueprint, render_template, current_app, Response
from application.database import global_db
from application.helpers import requires_auth
from core.monitoring.models import SENSOR_CLASS_MAP
sensor_heatmap_pages = Blueprint('sensor_heatmap', __name__
, template_folder='templates', static_folder='static')
import time
def __heatmap():
cpu = SENSOR_CLASS_MAP["cpu_user"]
gpu = SENSOR_CLASS_MAP["gpu_load"]
interval = current_app.app_config.monitoring["aggregation_interval"]
time_thr = int(time.time() - interval * 3)
cpu_data = list(global_db.session.query(cpu.node_id, cpu.avg)
.filter(cpu.time > time_thr)
.order_by("time desc").all())
gpu_data = list(global_db.session.query(gpu.node_id, gpu.avg)
.filter(gpu.time > time_thr)
.order_by("time desc").all())
return render_template("cpu.html", cpu_data = cpu_data, gpu_data = gpu_data)
@sensor_heatmap_pages.route("/general")
@requires_auth
def heatmap() -> Response:
return __heatmap()
|
{
"content_hash": "8f312a346d736ef07563a484d91ba607",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 77,
"avg_line_length": 29.757575757575758,
"alnum_prop": 0.7219959266802444,
"repo_name": "srcc-msu/job_statistics",
"id": "9b0f1a13712825631df7188e15d8e692b672191c",
"size": "982",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/sensor_heatmap/controllers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "19266"
},
{
"name": "HTML",
"bytes": "62218"
},
{
"name": "JavaScript",
"bytes": "97906"
},
{
"name": "Makefile",
"bytes": "342"
},
{
"name": "Python",
"bytes": "102112"
}
],
"symlink_target": ""
}
|
"""Utilities for iotop/top style profiling for android."""
import collections
import json
import os
import subprocess
import sys
import urllib
import constants
import io_stats_parser
class DeviceStatsMonitor(object):
"""Class for collecting device stats such as IO/CPU usage.
Args:
adb: Instance of AndroidComannds.
hz: Frequency at which to sample device stats.
"""
DEVICE_PATH = '/data/local/tmp/device_stats_monitor'
HOST_PATH = os.path.abspath(os.path.join(
constants.CHROME_DIR, 'out', 'Release', 'device_stats_monitor'))
PROFILE_PATH = '/sdcard/Download/device_stats_monitor.profile'
RESULT_VIEWER_PATH = os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'device_stats_monitor.html'))
def __init__(self, adb, hz):
self._adb = adb
self._adb.PushIfNeeded(DeviceStatsMonitor.HOST_PATH,
DeviceStatsMonitor.DEVICE_PATH)
self._hz = hz
def Start(self):
"""Starts device stats monitor on the device."""
self._adb.SetFileContents(DeviceStatsMonitor.PROFILE_PATH, '')
self._process = subprocess.Popen(
['adb', 'shell', '%s --hz=%d %s' % (
DeviceStatsMonitor.DEVICE_PATH, self._hz,
DeviceStatsMonitor.PROFILE_PATH)])
def StopAndCollect(self, output_path):
"""Stops monitoring and saves results.
Args:
output_path: Path to save results.
Returns:
String of URL to load results in browser.
"""
assert self._process
self._adb.KillAll(DeviceStatsMonitor.DEVICE_PATH)
self._process.wait()
profile = self._adb.GetFileContents(DeviceStatsMonitor.PROFILE_PATH)
results = collections.defaultdict(list)
last_io_stats = None
last_cpu_stats = None
for line in profile:
if ' mmcblk0 ' in line:
stats = io_stats_parser.ParseIoStatsLine(line)
if last_io_stats:
results['sectors_read'].append(stats.num_sectors_read -
last_io_stats.num_sectors_read)
results['sectors_written'].append(stats.num_sectors_written -
last_io_stats.num_sectors_written)
last_io_stats = stats
elif line.startswith('cpu '):
stats = self._ParseCpuStatsLine(line)
if last_cpu_stats:
results['user'].append(stats.user - last_cpu_stats.user)
results['nice'].append(stats.nice - last_cpu_stats.nice)
results['system'].append(stats.system - last_cpu_stats.system)
results['idle'].append(stats.idle - last_cpu_stats.idle)
results['iowait'].append(stats.iowait - last_cpu_stats.iowait)
results['irq'].append(stats.irq - last_cpu_stats.irq)
results['softirq'].append(stats.softirq- last_cpu_stats.softirq)
last_cpu_stats = stats
units = {
'sectors_read': 'sectors',
'sectors_written': 'sectors',
'user': 'jiffies',
'nice': 'jiffies',
'system': 'jiffies',
'idle': 'jiffies',
'iowait': 'jiffies',
'irq': 'jiffies',
'softirq': 'jiffies',
}
with open(output_path, 'w') as f:
f.write('display(%d, %s, %s);' % (self._hz, json.dumps(results), units))
return 'file://%s?results=file://%s' % (
DeviceStatsMonitor.RESULT_VIEWER_PATH, urllib.quote(output_path))
@staticmethod
def _ParseCpuStatsLine(line):
"""Parses a line of cpu stats into a CpuStats named tuple."""
# Field definitions: http://www.linuxhowtos.org/System/procstat.htm
cpu_stats = collections.namedtuple('CpuStats',
['device',
'user',
'nice',
'system',
'idle',
'iowait',
'irq',
'softirq',
])
fields = line.split()
return cpu_stats._make([fields[0]] + [int(f) for f in fields[1:8]])
|
{
"content_hash": "d18d9de19ddc5795adae522975971a99",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 80,
"avg_line_length": 36.6875,
"alnum_prop": 0.5750790946702361,
"repo_name": "keishi/chromium",
"id": "79225028baf7c19ac51276b956181e1ab8eb33da",
"size": "4276",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "build/android/pylib/device_stats_monitor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "1172794"
},
{
"name": "C",
"bytes": "67452317"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "132681259"
},
{
"name": "F#",
"bytes": "381"
},
{
"name": "Go",
"bytes": "19048"
},
{
"name": "Java",
"bytes": "361412"
},
{
"name": "JavaScript",
"bytes": "16603687"
},
{
"name": "Objective-C",
"bytes": "9609581"
},
{
"name": "PHP",
"bytes": "97796"
},
{
"name": "Perl",
"bytes": "918683"
},
{
"name": "Python",
"bytes": "6407891"
},
{
"name": "R",
"bytes": "524"
},
{
"name": "Shell",
"bytes": "4192593"
},
{
"name": "Tcl",
"bytes": "277077"
}
],
"symlink_target": ""
}
|
"""
Google OpenID and OAuth support
OAuth works straightforward using anonymous configurations, username
is generated by requesting email to the not documented, googleapis.com
service. Registered applications can define settings GOOGLE_CONSUMER_KEY
and GOOGLE_CONSUMER_SECRET and they will be used in the auth process.
Setting GOOGLE_OAUTH_EXTRA_SCOPE can be used to access different user
related data, like calendar, contacts, docs, etc.
OAuth2 works similar to OAuth but application must be defined on Google
APIs console https://code.google.com/apis/console/ Identity option.
OpenID also works straightforward, it doesn't need further configurations.
"""
from urllib import urlencode
from urllib2 import Request, urlopen
from oauth2 import Request as OAuthRequest
from django.utils import simplejson
from social_auth.utils import setting
from social_auth.backends import OpenIdAuth, ConsumerBasedOAuth, BaseOAuth2, \
OAuthBackend, OpenIDBackend, USERNAME
from social_auth.backends.exceptions import AuthFailed
# Google OAuth base configuration
GOOGLE_OAUTH_SERVER = 'www.google.com'
AUTHORIZATION_URL = 'https://www.google.com/accounts/OAuthAuthorizeToken'
REQUEST_TOKEN_URL = 'https://www.google.com/accounts/OAuthGetRequestToken'
ACCESS_TOKEN_URL = 'https://www.google.com/accounts/OAuthGetAccessToken'
# Google OAuth2 base configuration
GOOGLE_OAUTH2_SERVER = 'accounts.google.com'
GOOGLE_OATUH2_AUTHORIZATION_URL = 'https://accounts.google.com/o/oauth2/auth'
# scope for user email, specify extra scopes in settings, for example:
# GOOGLE_OAUTH_EXTRA_SCOPE = ['https://www.google.com/m8/feeds/']
GOOGLE_OAUTH_SCOPE = ['https://www.googleapis.com/auth/userinfo#email']
GOOGLE_OAUTH2_SCOPE = ['https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/userinfo.profile']
GOOGLEAPIS_EMAIL = 'https://www.googleapis.com/userinfo/email'
GOOGLEAPIS_PROFILE = 'https://www.googleapis.com/oauth2/v1/userinfo'
GOOGLE_OPENID_URL = 'https://www.google.com/accounts/o8/id'
# Backends
class GoogleOAuthBackend(OAuthBackend):
"""Google OAuth authentication backend"""
name = 'google-oauth'
def get_user_id(self, details, response):
"""Use google email as unique id"""
validate_whitelists(self, details['email'])
return details['email']
def get_user_details(self, response):
"""Return user details from Orkut account"""
email = response['email']
return {USERNAME: email.split('@', 1)[0],
'email': email,
'fullname': '',
'first_name': '',
'last_name': ''}
class GoogleOAuth2Backend(GoogleOAuthBackend):
"""Google OAuth2 authentication backend"""
name = 'google-oauth2'
EXTRA_DATA = [
('refresh_token', 'refresh_token', True),
('expires_in', setting('SOCIAL_AUTH_EXPIRATION', 'expires'))
]
def get_user_id(self, details, response):
"""Use google email or id as unique id"""
user_id = super(GoogleOAuth2Backend, self).get_user_id(details,
response)
if setting('GOOGLE_OAUTH2_USE_UNIQUE_USER_ID', False):
return response['id']
return user_id
def get_user_details(self, response):
email = response['email']
return {USERNAME: email.split('@', 1)[0],
'email': email,
'fullname': response.get('name', ''),
'first_name': response.get('given_name', ''),
'last_name': response.get('family_name', '')}
class GoogleBackend(OpenIDBackend):
"""Google OpenID authentication backend"""
name = 'google'
def get_user_id(self, details, response):
"""
Return user unique id provided by service. For google user email
is unique enought to flag a single user. Email comes from schema:
http://axschema.org/contact/email
"""
validate_whitelists(self, details['email'])
return details['email']
# Auth classes
class GoogleAuth(OpenIdAuth):
"""Google OpenID authentication"""
AUTH_BACKEND = GoogleBackend
def openid_url(self):
"""Return Google OpenID service url"""
return GOOGLE_OPENID_URL
class BaseGoogleOAuth(ConsumerBasedOAuth):
"""Base class for Google OAuth mechanism"""
AUTHORIZATION_URL = AUTHORIZATION_URL
REQUEST_TOKEN_URL = REQUEST_TOKEN_URL
ACCESS_TOKEN_URL = ACCESS_TOKEN_URL
SERVER_URL = GOOGLE_OAUTH_SERVER
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from G service"""
raise NotImplementedError('Implement in subclass')
class GoogleOAuth(BaseGoogleOAuth):
"""Google OAuth authorization mechanism"""
AUTH_BACKEND = GoogleOAuthBackend
SETTINGS_KEY_NAME = 'GOOGLE_CONSUMER_KEY'
SETTINGS_SECRET_NAME = 'GOOGLE_CONSUMER_SECRET'
def user_data(self, access_token, *args, **kwargs):
"""Return user data from Google API"""
request = self.oauth_request(access_token, GOOGLEAPIS_EMAIL,
{'alt': 'json'})
url, params = request.to_url().split('?', 1)
return googleapis_email(url, params)
def oauth_authorization_request(self, token):
"""Generate OAuth request to authorize token."""
return OAuthRequest.from_consumer_and_token(self.consumer,
token=token,
http_url=self.AUTHORIZATION_URL)
def oauth_request(self, token, url, extra_params=None):
extra_params = extra_params or {}
scope = GOOGLE_OAUTH_SCOPE + setting('GOOGLE_OAUTH_EXTRA_SCOPE', [])
extra_params.update({
'scope': ' '.join(scope),
})
if not self.registered():
xoauth_displayname = setting('GOOGLE_DISPLAY_NAME', 'Social Auth')
extra_params['xoauth_displayname'] = xoauth_displayname
return super(GoogleOAuth, self).oauth_request(token, url, extra_params)
@classmethod
def get_key_and_secret(cls):
"""Return Google OAuth Consumer Key and Consumer Secret pair, uses
anonymous by default, beware that this marks the application as not
registered and a security badge is displayed on authorization page.
http://code.google.com/apis/accounts/docs/OAuth_ref.html#SigningOAuth
"""
try:
return super(GoogleOAuth, cls).get_key_and_secret()
except AttributeError:
return 'anonymous', 'anonymous'
@classmethod
def enabled(cls):
"""Google OAuth is always enabled because of anonymous access"""
return True
def registered(self):
"""Check if Google OAuth Consumer Key and Consumer Secret are set"""
return self.get_key_and_secret() != ('anonymous', 'anonymous')
# TODO: Remove this setting name check, keep for backward compatibility
_OAUTH2_KEY_NAME = setting('GOOGLE_OAUTH2_CLIENT_ID') and \
'GOOGLE_OAUTH2_CLIENT_ID' or \
'GOOGLE_OAUTH2_CLIENT_KEY'
class GoogleOAuth2(BaseOAuth2):
"""Google OAuth2 support"""
AUTH_BACKEND = GoogleOAuth2Backend
AUTHORIZATION_URL = 'https://accounts.google.com/o/oauth2/auth'
ACCESS_TOKEN_URL = 'https://accounts.google.com/o/oauth2/token'
SETTINGS_KEY_NAME = _OAUTH2_KEY_NAME
SETTINGS_SECRET_NAME = 'GOOGLE_OAUTH2_CLIENT_SECRET'
SCOPE_VAR_NAME = 'GOOGLE_OAUTH_EXTRA_SCOPE'
DEFAULT_SCOPE = GOOGLE_OAUTH2_SCOPE
def user_data(self, access_token, *args, **kwargs):
"""Return user data from Google API"""
return googleapis_profile(GOOGLEAPIS_PROFILE, access_token)
def googleapis_email(url, params):
"""Loads user data from googleapis service, only email so far as it's
described in http://sites.google.com/site/oauthgoog/Home/emaildisplayscope
Parameters must be passed in queryset and Authorization header as described
on Google OAuth documentation at:
http://groups.google.com/group/oauth/browse_thread/thread/d15add9beb418ebc
and: http://code.google.com/apis/accounts/docs/OAuth2.html#CallingAnAPI
"""
request = Request(url + '?' + params, headers={'Authorization': params})
try:
return simplejson.loads(urlopen(request).read())['data']
except (ValueError, KeyError, IOError):
return None
def googleapis_profile(url, access_token):
"""
Loads user data from googleapis service, such as name, given_name,
family_name, etc. as it's described in:
https://developers.google.com/accounts/docs/OAuth2Login
"""
data = {'access_token': access_token, 'alt': 'json'}
request = Request(url + '?' + urlencode(data))
try:
return simplejson.loads(urlopen(request).read())
except (ValueError, KeyError, IOError):
return None
def validate_whitelists(backend, email):
"""
Validates allowed domains and emails against the following settings:
GOOGLE_WHITE_LISTED_DOMAINS
GOOGLE_WHITE_LISTED_EMAILS
All domains and emails are allowed if setting is an empty list.
"""
emails = setting('GOOGLE_WHITE_LISTED_EMAILS', [])
domains = setting('GOOGLE_WHITE_LISTED_DOMAINS', [])
if emails and email in emails:
return # you're good
if domains and email.split('@', 1)[1] not in domains:
raise AuthFailed(backend, 'Domain not allowed')
# Backend definition
BACKENDS = {
'google': GoogleAuth,
'google-oauth': GoogleOAuth,
'google-oauth2': GoogleOAuth2,
}
|
{
"content_hash": "fae1c259afd4db36114f920fe157ef30",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 79,
"avg_line_length": 37.51372549019608,
"alnum_prop": 0.6660045996236672,
"repo_name": "makinacorpus/django-social-auth",
"id": "295a3db5e4247d7735ef8c7643d736c791aa126b",
"size": "9566",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "social_auth/backends/google.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from juriscraper.opinions.united_states.federal_special import cit
import time
from datetime import date
from lxml import html
class Site(cit.Site):
def __init__(self):
super(Site, self).__init__()
self.url = 'http://www.cit.uscourts.gov/SlipOpinions/SlipOps-2010.html'
self.court_id = self.__module__
def _get_case_dates(self):
case_dates = []
for e in self.html.xpath('//table[3]/tr/td[3][../td/a]'):
date_string = html.tostring(e, method='text', encoding='unicode').strip()
if date_string == "06/25//2010":
# Special case.
date_string = "06/25/2010"
case_dates.append(date.fromtimestamp(
time.mktime(time.strptime(date_string, '%m/%d/%Y'))))
return case_dates
def _get_download_urls(self):
return [t for t in self.html.xpath('//table[3]//tr/td[1]/a/@href')]
def _get_neutral_citations(self):
return [t for t in self.html.xpath('//table[3]//tr/td[1]/a/text()')]
def _get_case_names(self):
# Exclude confidential rows
case_names = []
for e in self.html.xpath('//table[3]/tr[position() > 1]/td[2]'):
s = html.tostring(e, method='text', encoding='unicode')
if "confidential" in s:
continue
elif 'Errata' in s:
index = s.find('Errata')
elif 'Public version' in s:
index = s.find('Public version')
else:
index = -1
case_names.append(s[:index] if index > -1 else s)
return case_names
def _get_precedential_statuses(self):
statuses = []
for e in self.html.xpath('//table[3]//tr[position() > 1]/td[2][../td/a]'):
s = html.tostring(e, method='text', encoding='unicode').lower().strip()
if "errata" in s:
statuses.append('Errata')
else:
statuses.append('Published')
return statuses
def _get_docket_numbers(self):
docket_numbers = []
for e in self.html.xpath('//table[3]//tr[position() > 1]/td[4][../td/a]'):
docket_numbers.append(html.tostring(
e, method='text', encoding='unicode').strip())
return docket_numbers
def _get_judges(self):
judges = []
for e in self.html.xpath('//table[3]//tr[position() > 1]/td[5][../td/a]'):
s = html.tostring (e, method='text', encoding='unicode')
judges.append(s)
return judges
def _get_nature_of_suit(self):
return [t for t in self.html.xpath('//table[3]//tr/td[6][../td/a]/text()')]
|
{
"content_hash": "6a4440c1f1520f61c091424c1ffafa5a",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 85,
"avg_line_length": 36.986301369863014,
"alnum_prop": 0.5348148148148149,
"repo_name": "brianwc/juriscraper",
"id": "f0f6d30c49661b90176518560608fc97cc5db778",
"size": "2700",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opinions/united_states_backscrapers/federal_special/cit_2010.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "24111143"
},
{
"name": "Python",
"bytes": "661024"
}
],
"symlink_target": ""
}
|
import sys
import unittest
import maya.cmds as cmds
import pymel.core as pm
import pymel.core.uitypes as ui
import pymel.core.windows as windows
class TestMenu(unittest.TestCase):
def setUp(self):
cmds.setParent(None, menu=1)
self.win = cmds.window()
def tearDown(self):
cmds.deleteUI(self.win, window=True)
def testOptionMenuAsMenu(self):
cmds.formLayout()
om = ui.OptionMenu('someOptionMenu', create=True)
cmds.menuItem( label='Yellow' )
self.assertEqual(windows.menu(om, q=1, numberOfItems=1), 1)
self.assertEqual(windows.menu(om.name(), q=1, numberOfItems=1), 1)
self.assertEqual(windows.menu(om.shortName(), q=1, numberOfItems=1), 1)
ui.Menu(om)
ui.Menu(om.name())
ui.Menu(om.shortName())
if not pm.about(batch=1):
for key, obj in globals().items():
if isinstance(obj, unittest.TestCase):
del globals()[key]
obj.__name__ = '_canceledTest_' + obj.__name__
|
{
"content_hash": "d513e2d8f0549a64c93cc5331a7fe276",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 79,
"avg_line_length": 34.36666666666667,
"alnum_prop": 0.6217264791464597,
"repo_name": "CountZer0/PipelineConstructionSet",
"id": "e1ea9ebd4cd81794008813fee36ceab72ae72173",
"size": "1031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/maya/site-packages/pymel-1.0.3/tests/test_windows.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "49130"
},
{
"name": "JavaScript",
"bytes": "21455"
},
{
"name": "Python",
"bytes": "24534027"
},
{
"name": "Shell",
"bytes": "784"
}
],
"symlink_target": ""
}
|
"""Regression test for issue #51."""
import unittest
import os.path
from xml.etree import ElementTree as ET
from statik.generator import generate
class TestStaticPagesFromProjectDynamicContext(unittest.TestCase):
def test_issue(self):
test_path = os.path.dirname(os.path.realpath(__file__))
output_data = generate(
os.path.join(test_path, 'data-non-root-base'),
in_memory=True
)
self.assertIn('index.html', output_data)
self.assertIn('about', output_data)
self.assertIn('index.html', output_data['about'])
self.assertIn('contact', output_data)
self.assertIn('index.html', output_data['contact'])
html = ET.fromstring(output_data['index.html'])
static_page_links = html.findall("body/div[@class='menu']/ul/li/a")
self.assertEqual(2, len(static_page_links))
self.assertEqual('/non/standard/about/', static_page_links[0].attrib['href'])
self.assertEqual('/non/standard/contact/', static_page_links[1].attrib['href'])
self.assert_static_page_compiles(
output_data['about']['index.html'],
"About",
"Here's the About page."
)
self.assert_static_page_compiles(
output_data['contact']['index.html'],
"Contact",
"Here's how to contact us."
)
def assert_static_page_compiles(self, content, expected_title, expected_body):
html = ET.fromstring(content)
title = html.find('head/title')
self.assertEqual(expected_title, title.text.strip())
body = html.find('body/p')
self.assertEqual(expected_body, body.text.strip())
|
{
"content_hash": "97b3d81b81efb7aca7222fc7f7375a03",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 87,
"avg_line_length": 36.80434782608695,
"alnum_prop": 0.6196101594802126,
"repo_name": "thanethomson/statik",
"id": "50c41cee084684043be1851b0c88c7f4c165f18b",
"size": "1717",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/test_issue_51.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "8950"
},
{
"name": "Python",
"bytes": "236375"
},
{
"name": "Shell",
"bytes": "91"
}
],
"symlink_target": ""
}
|
from django.core.management.base import BaseCommand, CommandError
from conference import models
from collections import defaultdict
from optparse import make_option
class Command(BaseCommand):
"""
"""
option_list = BaseCommand.option_list + (
make_option('--by-ticket',
action='store_true',
dest='by_ticket',
default=False,
help='list by ticket instead of person',
),
make_option('--no-staff',
action='store_true',
dest='no_staff',
default=False,
help='exclude staff tickets',
),
)
def handle(self, *args, **options):
qs = models.Ticket.objects\
.filter(orderitem__order___complete=True)\
.exclude(fare__ticket_type='partner')\
.select_related('user', 'fare', 'p3_conference')
if options['no_staff']:
qs = qs.exclude(ticket_type='staff')
buyers = defaultdict(list)
names = defaultdict(list)
non_conference_tickets = defaultdict(list)
conference_tickets = []
alien_tickets = []
for t in qs.filter(fare__ticket_type='conference'):
name = t.name or '%s %s' % (t.user.first_name, t.user.last_name)
data = {
'name': name,
'ticket': t,
'additional': [],
}
conference_tickets.append(data)
buyers[t.user_id].append(data)
names[name].append(data)
for t in qs.exclude(fare__ticket_type='conference'):
if t.name:
if t.name in names:
founds = names[t.name]
if len(founds) == 1:
ix = 0
maybe = False
else:
maybe = True
for ix, tdata in enumerate(founds):
if tdata['ticket'].user_id == t.user_id:
maybe = False
break
else:
ix = 0
founds[ix]['additional'].append({
'ticket': t,
'maybe': maybe,
})
non_conference_tickets[t.fare].append({
'ticket': t,
'maybe': maybe,
'conference': founds[ix],
})
continue
if t.user_id in buyers:
buyers[t.user_id][0]['additional'].append({
'ticket': t,
'maybe': False,
})
non_conference_tickets[t.fare].append({
'ticket': t,
'maybe': False,
'conference': buyers[t.user_id][0],
})
continue
name = t.name or '%s %s' % (t.user.first_name, t.user.last_name)
alien_tickets.append({
'name': name,
'ticket': t,
})
non_conference_tickets[t.fare].append({
'ticket': t,
'maybe': False,
'conference': None,
})
conference_tickets.sort(key=lambda x: x['name'].upper())
alien_tickets.sort(key=lambda x: x['name'].upper())
if not options['by_ticket']:
letter = None
for t in conference_tickets:
row = [
t['name'].encode('utf-8'),
'STAFF' if t['ticket'].ticket_type == 'staff' else t['ticket'].fare.name,
t['ticket'].p3_conference.days if t['ticket'].p3_conference and t['ticket'].fare.code[2] == 'D' else '',
]
if row[0][0].upper() != letter:
letter = row[0][0].upper()
print('\n\n')
print('\t\t\t', letter)
print('-' * 80)
print('\n\n')
print('\t'.join(map(str, row)))
for linked in t['additional']:
row = [
'%s%s' % ('(*) ' if linked['maybe'] else '', linked['ticket'].name.encode('utf-8')),
linked['ticket'].fare.code,
linked['ticket'].fare.name,
]
print('\t', '\t'.join(map(str, row)))
if alien_tickets:
print('\n\n')
print('\t\t\t', 'ALIEN')
print('-' * 80)
print('\n\n')
for t in alien_tickets:
row = [
t['name'].encode('utf-8'),
'STAFF' if t['ticket'].ticket_type == 'staff' else t['ticket'].fare.name,
t['ticket'].p3_conference.days if t['ticket'].p3_conference and t['ticket'].fare.code[2] == 'D' else '',
]
print('\t'.join(map(str, row)))
else:
for fare, items in non_conference_tickets.items():
print('\n\n')
print('\t\t\t', fare.code, fare.name.encode('utf-8'))
print('-' * 80)
print('\n\n')
def k(x):
t = x['ticket']
return t.name or '%s %s' % (t.user.first_name, t.user.last_name)
for t in sorted(items, key=k):
if t['maybe']:
print('(*)', end=' ')
print(k(t).encode('utf-8'), '->', end=' ')
if t['conference']:
print(t['conference']['name'].encode('utf-8'))
else:
print('')
|
{
"content_hash": "b028524eef1662e062bb8a8f26e96aa5",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 128,
"avg_line_length": 39.29530201342282,
"alnum_prop": 0.40478223740392827,
"repo_name": "EuroPython/epcon",
"id": "944da163a191be0e343a97e765dd28f6ca37e661",
"size": "5856",
"binary": false,
"copies": "1",
"ref": "refs/heads/ep2021",
"path": "p3/management/commands/ticket_list.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "6475"
},
{
"name": "Dockerfile",
"bytes": "609"
},
{
"name": "HTML",
"bytes": "412025"
},
{
"name": "JavaScript",
"bytes": "421281"
},
{
"name": "Makefile",
"bytes": "4679"
},
{
"name": "Python",
"bytes": "991334"
},
{
"name": "Shell",
"bytes": "1182"
}
],
"symlink_target": ""
}
|
import pygame
import sys
from pygame.locals import *
from variables import *
from Player import *
from Card import *
from create_board import *
from create_game_options import *
from create_player_info import *
from handle_mouse_event import *
from update_game_dice import *
from handle_game import *
# roll dice and take action based on the new position of the current player
def handle_dice_roll(screen,Players,Cards,cur_player,Cards_Rects,Option_Rects,Info_Cards_Rects):
# adding static part
screen.fill(BACKGROUND_COLOR)
create_board(screen,Cards)
create_game_options(screen)
create_player_info(screen,Players,Cards,cur_player)
# rolling dice
no1,no2,a,b = roll_dice(screen)
steps = int(no1) + int(no2)
player = Players[cur_player]
initial_card = get_rect_pressed_index(player.cur_position,Cards_Rects)
final_card = (initial_card + steps)%40
# changing the current players' piece position
final_pos = Cards[final_card].board_pos
next_position = ()
if player.color == "BLUE":
next_position = (final_pos[0] + 19,final_pos[1] + 19)
elif player.color == "RED":
next_position = (final_pos[0] + 59,final_pos[1] + 19)
elif player.color == "GREEN":
next_position = (final_pos[0] + 19,final_pos[1] + 49)
elif player.color == "YELLOW":
next_position = (final_pos[0] + 59,final_pos[1] + 49)
else:
next_position = ()
player.cur_position = next_position
#player.move_player(screen,next_position)
# updating the position of all players' piece on the board
for temp in Players:
temp.move_player(screen,temp.cur_position)
# add the necessary actions to be taken due to dice roll
final_card = update_game_dice(screen,initial_card,final_card,no1,no2,Players,Cards,cur_player)
# changing the position if there is some related instruction in chance or community card
final_pos = Cards[final_card].board_pos
next_position = ()
if player.color == "BLUE":
next_position = (final_pos[0] + 19,final_pos[1] + 19)
elif player.color == "RED":
next_position = (final_pos[0] + 59,final_pos[1] + 19)
elif player.color == "GREEN":
next_position = (final_pos[0] + 19,final_pos[1] + 49)
elif player.color == "YELLOW":
next_position = (final_pos[0] + 59,final_pos[1] + 49)
else:
next_position = ()
player.cur_position = next_position
isBankrupt = False
quit_player = False
# check if user is bankrupt or not
if Players[cur_player].cur_balance < 0:
Players[cur_player].isBankrupt = True
isBankrupt = True
else:
Players[cur_player].isBankrupt = False
# run loop till player removes bankruptcy or declares himself as bankrupt
while not quit_player and isBankrupt:
# show the state after update
screen.fill(BACKGROUND_COLOR)
create_board(screen,Cards)
create_game_options(screen)
for player in Players:
player.move_player(screen,player.cur_position)
create_player_info(screen,Players,Cards,cur_player)
quit_player = display_bankrupt_window(screen,Players,Cards,cur_player,Cards_Rects,Option_Rects,Info_Cards_Rects)
if Players[cur_player].cur_balance < 0:
Players[cur_player].isBankrupt = True
isBankrupt = True
else:
Players[cur_player].isBankrupt = False
isBankrupt = False
# true implies player declares himself as bankrupt and withdraws from the game
if quit_player == True:
handle_quit_player(screen,Players,Cards,cur_player)
# implies player has removed bankruptcy
else:
end_turn = False
while not end_turn:
# show the state after update
screen.fill(BACKGROUND_COLOR)
create_board(screen,Cards)
create_game_options(screen)
for player in Players:
player.move_player(screen,player.cur_position)
create_player_info(screen,Players,Cards,cur_player)
end_turn = display_end_turn_window(screen,Players,Cards,cur_player,Cards_Rects,Option_Rects,Info_Cards_Rects)
# change turn to next player
screen.fill(BACKGROUND_COLOR)
create_board(screen,Cards)
create_game_options(screen)
roll_dice(screen,4,2)
for player in Players:
player.move_player(screen,player.cur_position)
cur_player = (cur_player+1)%len(Players)
create_player_info(screen,Players,Cards,cur_player)
return cur_player
|
{
"content_hash": "baec2d104fab21a3e305a0357225b128",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 121,
"avg_line_length": 26.292134831460675,
"alnum_prop": 0.6425213675213676,
"repo_name": "idnaninitesh/monopoly_python",
"id": "e8f4f87cf87274425a39e023a46c54c845d85add",
"size": "4680",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "handle_dice_roll.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "253695"
}
],
"symlink_target": ""
}
|
import shlex, subprocess
from StringIO import StringIO
from django.db import models
from django.db.models.fields import IPAddressField
import paramiko
from paramiko.rsakey import RSAKey
from expedient.clearinghouse.aggregate.models import Aggregate
from expedient.clearinghouse.resources.models import Resource, Sliver
from expedient.common.utils.modelfields import LimitedIntegerField
from expedient.common.middleware import threadlocals
from expedient.clearinghouse.utils import post_message_to_current_user
from expedient.common.messaging.models import DatedMessage
from expedient.clearinghouse.slice.models import Slice
# SSHServer class
class SSHServer(Resource):
# SSHServer fields
ip_address = IPAddressField(
"IP address",
help_text="Specify the server's IP address.",
)
ssh_port = LimitedIntegerField(
"SSH port number",
min_value=1,
max_value=2**16-1,
default=22,
help_text="Specify the SSH port number to use."
)
# end
def is_alive(self):
"""Ping the server and check if it's alive.
@return: True if ping succeeds, False otherwise.
"""
ret = subprocess.call(
shlex.split("ping -c 1 -W 2 %s" % self.ip_address),
stdout=open('/dev/null', 'w'),
stderr=subprocess.STDOUT,
)
if ret == 0:
return True
else:
return False
def exec_command(self, command, **connection_info):
"""Connect to the server using an SSH session and execute a command.
@param command: The command to execute
@type command: C{str}
@param username: The username to use to connect to the server.
@type username: C{str}
@keyword connection_info: A dict of other info to pass to
C{paramiko.SSHClient.exec_command}.
@return: A (out, err) tuple that is the output read on the
stdout and stderr channels.
@rtype: C{tuple(str, str)}
"""
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(
str(self.ip_address),
port=int(self.ssh_port),
**connection_info
)
_, sout, serr = client.exec_command(command)
o = sout.read()
e = serr.read()
client.close()
return o, e
def __unicode__(self):
return u"SSH server at IP %s" % self.ip_address
class SSHServerSliver(Sliver):
pass
class SSHSliceInfo(models.Model):
slice = models.OneToOneField(Slice)
public_key = models.TextField()
# SSHAggregate class
class SSHAggregate(Aggregate):
# SSHAggregate information field
information = "An aggregate of SSH servers that are controlled" \
" by a single administrator, to which users can request" \
" access. Once approved, users get SSH access to all" \
" machines using a public key they provide."
# SSHAggregate end information field
# SSHAggregate meta
class Meta:
verbose_name = "SSH Aggregate"
# SSHAggregate end meta
# SSHAggregate required fields
admin_username = models.CharField(max_length=255)
private_key = models.TextField()
# SSHAggregate end required fields
# SSHAggregate optional fields
add_user_command = models.TextField(
default="sh -c 'sudo useradd -m %(username)s'",
help_text="Specify the command to create a new user. " \
"'%(username)s' will be replaced by the user's " \
" username. The command should return non-zero on failure " \
" and 0 on success.",
)
del_user_command = models.TextField(
default="sh -c 'sudo userdel -r -f %(username)s'",
help_text="Specify the command to delete an existing user. " \
"'%(username)s' will be replaced by the user's " \
" username. The command should return non-zero on failure " \
" and 0 on success.",
)
add_pubkey_user_command = models.TextField(
default="sudo -u %(username)s mkdir /home/%(username)s/.ssh; "
"sudo -u %(username)s chmod 700 /home/%(username)s/.ssh; "
"sh -c 'sudo -u %(username)s echo %(pubkey)s >> "
"/home/%(username)s/.ssh/authorized_keys'",
help_text="Specify the command to add a public key to a user's " \
"account. '%(username)s' will be replaced by the user's " \
" username and '%(pubkey)s' will be replaced by the public key." \
" The command should return non-zero on failure " \
" and 0 on success.",
)
# SSHAggregate end optional fields
def _op_user(self, op, server, cmd_subs, quiet=False):
"""common code for adding/removing users."""
pkey_f = StringIO(self.private_key)
pkey = RSAKey.from_private_key(pkey_f)
pkey_f.close()
cmd = getattr(self, "%s_user_command" % op) % cmd_subs
cmd = cmd + "; echo $?"
out, err = server.exec_command(
cmd,
username=str(self.admin_username),
pkey=pkey,
)
lines = out.strip().split("\n")
ret = int(lines[-1])
if ret != 0:
error = "".join(lines[:-1])
if not quiet:
# msg example
msg = "Failed to %s user on %s. Output was:\n%s" \
% (op, server, error),
post_message_to_current_user(
msg,
msg_type=DatedMessage.TYPE_ERROR,
)
# end msg example
raise Exception(msg)
def add_user(self, server, username, pubkey, quiet=False):
"""Add a user to a server.
Add a user with username C{username} with public key C{pubkey} to
server C{server}.
@param server: The server to add the user to.
@type server: L{SSHServer}
@param username: the new user's username
@type username: C{str}
@param pubkey: The public key to add to the user's account.
@type pubkey: the public key's value a C{str}
@keyword quiet: If True, no messages will be sent on failure.
Defaults to False.
@type quiet: C{boolean}
"""
self._op_user("add", server, {"username": username}, quiet)
self._op_user(
"add_pubkey",
server,
{"username": username, "pubkey": pubkey},
quiet,
)
def del_user(self, server, username, quiet=False):
"""Remove user from a server.
Remove user with username C{username} from server C{server}.
@param server: The server to remove the user from.
@type server: L{SSHServer}
@param username: the user's username
@type username: C{str}
@keyword quiet: If True, no messages will be sent on failure.
Defaults to False.
@type quiet: C{boolean}
"""
self._op_user("del", server, {"username": username}, quiet)
def check_status(self):
return self.available and reduce(
lambda x, y: x and y.is_alive(),
SSHServer.objects.filter(aggregate__id=self.id),
)
# start_slice func
def start_slice(self, slice):
# start_slice call super
super(SSHAggregate, self).start_slice(slice)
# start_slice end call super
# start_slice get info
slice_info = SSHSliceInfo.objects.get(slice=slice)
# start_slice get user
user = slice.owner
# start_slice get slivers
slivers = SSHServerSliver.objects.filter(
slice=slice, resource__aggregate__id=self.id)
# start_slice end info
# start_slice loop
succeeded = []
for sliver in slivers:
# Execute the command on the server and get status
server = sliver.resource.as_leaf_class()
# start_slice add user
try:
self.add_user(server, user.username, slice_info.public_key)
except:
for s in succeeded:
try:
self.del_user(s, user.username)
except:
pass
raise
succeeded.append(server)
# start_slice end loop
def stop_slice(self, slice):
super(SSHAggregate, self).start_slice(slice)
user = threadlocals.get_thread_locals()["user"]
for sliver in SSHServerSliver.objects.filter(slice=slice):
server = sliver.resource.as_leaf_class()
try:
self.del_user(server, user.username)
except:
pass
|
{
"content_hash": "f1ff4b26d7f7d34b5e3672eb39ef6197",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 78,
"avg_line_length": 35.85140562248996,
"alnum_prop": 0.5735409432060042,
"repo_name": "dana-i2cat/felix",
"id": "4442c7cdcfc7b4f4455287ef3e1423e0b342cbf3",
"size": "8927",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "expedient/src/doc/expedient/source/developer/sshaggregate/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "337811"
},
{
"name": "DTrace",
"bytes": "370"
},
{
"name": "Elixir",
"bytes": "17243"
},
{
"name": "Emacs Lisp",
"bytes": "1098"
},
{
"name": "Groff",
"bytes": "1735"
},
{
"name": "HTML",
"bytes": "660363"
},
{
"name": "Java",
"bytes": "18362"
},
{
"name": "JavaScript",
"bytes": "838960"
},
{
"name": "Makefile",
"bytes": "11211"
},
{
"name": "Perl",
"bytes": "5416"
},
{
"name": "Python",
"bytes": "7875883"
},
{
"name": "Shell",
"bytes": "258079"
}
],
"symlink_target": ""
}
|
import argparse
from flask import Flask, render_template
import app_config
from render_utils import make_context, urlencode_filter
import static
app = Flask(app_config.PROJECT_NAME)
app.jinja_env.filters['urlencode'] = urlencode_filter
# Example application views
@app.route('/')
def index():
"""
Example view demonstrating rendering a simple HTML page.
"""
return render_template('index.html', **make_context())
@app.route('/widget.html')
def widget():
"""
Embeddable widget example page.
"""
return render_template('widget.html', **make_context())
@app.route('/test_widget.html')
def test_widget():
"""
Example page displaying widget at different embed sizes.
"""
return render_template('test_widget.html', **make_context())
@app.route('/test/test.html')
def test_dir():
return render_template('index.html', **make_context())
app.register_blueprint(static.static)
# Boilerplate
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port')
args = parser.parse_args()
server_port = 8000
if args.port:
server_port = int(args.port)
app.run(host='0.0.0.0', port=server_port, debug=app_config.DEBUG)
|
{
"content_hash": "f38a05bba605eb4c5383f84172cd7fd5",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 69,
"avg_line_length": 24.64,
"alnum_prop": 0.6704545454545454,
"repo_name": "nprapps/visits",
"id": "cd46d0cab0ec0ab0f39e923f1e342b710573ea3c",
"size": "1255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "272045"
},
{
"name": "JavaScript",
"bytes": "358082"
},
{
"name": "Python",
"bytes": "58091"
},
{
"name": "Shell",
"bytes": "83"
}
],
"symlink_target": ""
}
|
import pytest
def test_list_field_no_field():
""" Assert that raises error if a ListField is given without a type, for instance: ListField() """
from graphene_mongodb.fields.special_fields import list_field
from mongoengine import ListField
with pytest.raises(Exception) as e_info:
list_field('test_field', ListField())
assert str(e_info.value) == str(AttributeError('Error in {} field, have sure that this is defined with a '
'mongoengine field'.format('test_field')))
|
{
"content_hash": "05aaad4f7ee80b8c057ae3abb7602b5c",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 110,
"avg_line_length": 42.53846153846154,
"alnum_prop": 0.6473779385171791,
"repo_name": "joaovitorsilvestre/graphene-mongo",
"id": "9d72a74856c999504d83c2cc1f45ae48fbd8e1c0",
"size": "553",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/fields/test_fields.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62110"
}
],
"symlink_target": ""
}
|
""" QiBuild """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from qidoc.test.conftest import TestDocWorkTree
def test_convert_from_qi2(qidoc_action):
""" Test Convert From Qi 2 """
qidoc_action.add_test_project("qidoc2/with_src")
qidoc_action("convert", "--all")
doc_worktree = TestDocWorkTree()
assert len(doc_worktree.doc_projects) == 3
|
{
"content_hash": "6eb29a4bb8cf473869df0d171cc6fd82",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 52,
"avg_line_length": 31,
"alnum_prop": 0.7050691244239631,
"repo_name": "aldebaran/qibuild",
"id": "3fcfd06b91a4c49233eec97be179db50f983f293",
"size": "631",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/qidoc/test/test_qidoc_convert.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "6892"
},
{
"name": "C++",
"bytes": "23130"
},
{
"name": "CMake",
"bytes": "292637"
},
{
"name": "Makefile",
"bytes": "755"
},
{
"name": "Nix",
"bytes": "563"
},
{
"name": "Python",
"bytes": "1581825"
},
{
"name": "SWIG",
"bytes": "306"
},
{
"name": "Shell",
"bytes": "888"
}
],
"symlink_target": ""
}
|
import ast
from sys import version_info
from pyflakes import messages as m, checker
from pyflakes.test.harness import TestCase, skipIf, skip
class Test(TestCase):
def test_undefined(self):
self.flakes('bar', m.UndefinedName)
def test_definedInListComp(self):
self.flakes('[a for a in range(10) if a]')
@skipIf(version_info < (3,),
'in Python 2 list comprehensions execute in the same scope')
def test_undefinedInListComp(self):
self.flakes('''
[a for a in range(10)]
a
''',
m.UndefinedName)
@skipIf(version_info < (3,),
'in Python 2 exception names stay bound after the except: block')
def test_undefinedExceptionName(self):
"""Exception names can't be used after the except: block.
The exc variable is unused inside the exception handler."""
self.flakes('''
try:
raise ValueError('ve')
except ValueError as exc:
pass
exc
''', m.UndefinedName, m.UnusedVariable)
def test_namesDeclaredInExceptBlocks(self):
"""Locals declared in except: blocks can be used after the block.
This shows the example in test_undefinedExceptionName is
different."""
self.flakes('''
try:
raise ValueError('ve')
except ValueError as exc:
e = exc
e
''')
@skip('error reporting disabled due to false positives below')
def test_undefinedExceptionNameObscuringLocalVariable(self):
"""Exception names obscure locals, can't be used after.
Last line will raise UnboundLocalError on Python 3 after exiting
the except: block. Note next two examples for false positives to
watch out for."""
self.flakes('''
exc = 'Original value'
try:
raise ValueError('ve')
except ValueError as exc:
pass
exc
''',
m.UndefinedName)
@skipIf(version_info < (3,),
'in Python 2 exception names stay bound after the except: block')
def test_undefinedExceptionNameObscuringLocalVariable2(self):
"""Exception names are unbound after the `except:` block.
Last line will raise UnboundLocalError on Python 3 but would print out
've' on Python 2. The exc variable is unused inside the exception
handler."""
self.flakes('''
try:
raise ValueError('ve')
except ValueError as exc:
pass
print(exc)
exc = 'Original value'
''', m.UndefinedName, m.UnusedVariable)
def test_undefinedExceptionNameObscuringLocalVariableFalsePositive1(self):
"""Exception names obscure locals, can't be used after. Unless.
Last line will never raise UnboundLocalError because it's only
entered if no exception was raised."""
# The exc variable is unused inside the exception handler.
expected = [] if version_info < (3,) else [m.UnusedVariable]
self.flakes('''
exc = 'Original value'
try:
raise ValueError('ve')
except ValueError as exc:
print('exception logged')
raise
exc
''', *expected)
def test_delExceptionInExcept(self):
"""The exception name can be deleted in the except: block."""
self.flakes('''
try:
pass
except Exception as exc:
del exc
''')
def test_undefinedExceptionNameObscuringLocalVariableFalsePositive2(self):
"""Exception names obscure locals, can't be used after. Unless.
Last line will never raise UnboundLocalError because `error` is
only falsy if the `except:` block has not been entered."""
# The exc variable is unused inside the exception handler.
expected = [] if version_info < (3,) else [m.UnusedVariable]
self.flakes('''
exc = 'Original value'
error = None
try:
raise ValueError('ve')
except ValueError as exc:
error = 'exception logged'
if error:
print(error)
else:
exc
''', *expected)
@skip('error reporting disabled due to false positives below')
def test_undefinedExceptionNameObscuringGlobalVariable(self):
"""Exception names obscure globals, can't be used after.
Last line will raise UnboundLocalError on both Python 2 and
Python 3 because the existence of that exception name creates
a local scope placeholder for it, obscuring any globals, etc."""
self.flakes('''
exc = 'Original value'
def func():
try:
pass # nothing is raised
except ValueError as exc:
pass # block never entered, exc stays unbound
exc
''',
m.UndefinedLocal)
@skip('error reporting disabled due to false positives below')
def test_undefinedExceptionNameObscuringGlobalVariable2(self):
"""Exception names obscure globals, can't be used after.
Last line will raise NameError on Python 3 because the name is
locally unbound after the `except:` block, even if it's
nonlocal. We should issue an error in this case because code
only working correctly if an exception isn't raised, is invalid.
Unless it's explicitly silenced, see false positives below."""
self.flakes('''
exc = 'Original value'
def func():
global exc
try:
raise ValueError('ve')
except ValueError as exc:
pass # block never entered, exc stays unbound
exc
''',
m.UndefinedLocal)
def test_undefinedExceptionNameObscuringGlobalVariableFalsePositive1(self):
"""Exception names obscure globals, can't be used after. Unless.
Last line will never raise NameError because it's only entered
if no exception was raised."""
# The exc variable is unused inside the exception handler.
expected = [] if version_info < (3,) else [m.UnusedVariable]
self.flakes('''
exc = 'Original value'
def func():
global exc
try:
raise ValueError('ve')
except ValueError as exc:
print('exception logged')
raise
exc
''', *expected)
def test_undefinedExceptionNameObscuringGlobalVariableFalsePositive2(self):
"""Exception names obscure globals, can't be used after. Unless.
Last line will never raise NameError because `error` is only
falsy if the `except:` block has not been entered."""
# The exc variable is unused inside the exception handler.
expected = [] if version_info < (3,) else [m.UnusedVariable]
self.flakes('''
exc = 'Original value'
def func():
global exc
error = None
try:
raise ValueError('ve')
except ValueError as exc:
error = 'exception logged'
if error:
print(error)
else:
exc
''', *expected)
def test_functionsNeedGlobalScope(self):
self.flakes('''
class a:
def b():
fu
fu = 1
''')
def test_builtins(self):
self.flakes('range(10)')
def test_builtinWindowsError(self):
"""
C{WindowsError} is sometimes a builtin name, so no warning is emitted
for using it.
"""
self.flakes('WindowsError')
@skipIf(version_info < (3, 6), 'new feature in 3.6')
def test_moduleAnnotations(self):
"""
Use of the C{__annotations__} in module scope should not emit
an undefined name warning when version is greater than or equal to 3.6.
"""
self.flakes('__annotations__')
def test_magicGlobalsFile(self):
"""
Use of the C{__file__} magic global should not emit an undefined name
warning.
"""
self.flakes('__file__')
def test_magicGlobalsBuiltins(self):
"""
Use of the C{__builtins__} magic global should not emit an undefined
name warning.
"""
self.flakes('__builtins__')
def test_magicGlobalsName(self):
"""
Use of the C{__name__} magic global should not emit an undefined name
warning.
"""
self.flakes('__name__')
def test_magicGlobalsPath(self):
"""
Use of the C{__path__} magic global should not emit an undefined name
warning, if you refer to it from a file called __init__.py.
"""
self.flakes('__path__', m.UndefinedName)
self.flakes('__path__', filename='package/__init__.py')
def test_magicModuleInClassScope(self):
"""
Use of the C{__module__} magic builtin should not emit an undefined
name warning if used in class scope.
"""
self.flakes('__module__', m.UndefinedName)
self.flakes('''
class Foo:
__module__
''')
self.flakes('''
class Foo:
def bar(self):
__module__
''', m.UndefinedName)
@skipIf(version_info < (3, 3), "Python >= 3.3 only")
def test_magicQualnameInClassScope(self):
"""
Use of the C{__qualname__} magic builtin should not emit an undefined
name warning if used in class scope.
"""
self.flakes('__qualname__', m.UndefinedName)
self.flakes('''
class Foo:
__qualname__
''')
self.flakes('''
class Foo:
def bar(self):
__qualname__
''', m.UndefinedName)
def test_globalImportStar(self):
"""Can't find undefined names with import *."""
self.flakes('from fu import *; bar',
m.ImportStarUsed, m.ImportStarUsage)
@skipIf(version_info >= (3,), 'obsolete syntax')
def test_localImportStar(self):
"""
A local import * still allows undefined names to be found
in upper scopes.
"""
self.flakes('''
def a():
from fu import *
bar
''', m.ImportStarUsed, m.UndefinedName, m.UnusedImport)
@skipIf(version_info >= (3,), 'obsolete syntax')
def test_unpackedParameter(self):
"""Unpacked function parameters create bindings."""
self.flakes('''
def a((bar, baz)):
bar; baz
''')
def test_definedByGlobal(self):
"""
"global" can make an otherwise undefined name in another function
defined.
"""
self.flakes('''
def a(): global fu; fu = 1
def b(): fu
''')
self.flakes('''
def c(): bar
def b(): global bar; bar = 1
''')
def test_definedByGlobalMultipleNames(self):
"""
"global" can accept multiple names.
"""
self.flakes('''
def a(): global fu, bar; fu = 1; bar = 2
def b(): fu; bar
''')
def test_globalInGlobalScope(self):
"""
A global statement in the global scope is ignored.
"""
self.flakes('''
global x
def foo():
print(x)
''', m.UndefinedName)
def test_global_reset_name_only(self):
"""A global statement does not prevent other names being undefined."""
# Only different undefined names are reported.
# See following test that fails where the same name is used.
self.flakes('''
def f1():
s
def f2():
global m
''', m.UndefinedName)
@skip("todo")
def test_unused_global(self):
"""An unused global statement does not define the name."""
self.flakes('''
def f1():
m
def f2():
global m
''', m.UndefinedName)
def test_del(self):
"""Del deletes bindings."""
self.flakes('a = 1; del a; a', m.UndefinedName)
def test_delGlobal(self):
"""Del a global binding from a function."""
self.flakes('''
a = 1
def f():
global a
del a
a
''')
def test_delUndefined(self):
"""Del an undefined name."""
self.flakes('del a', m.UndefinedName)
def test_delConditional(self):
"""
Ignores conditional bindings deletion.
"""
self.flakes('''
context = None
test = True
if False:
del(test)
assert(test)
''')
def test_delConditionalNested(self):
"""
Ignored conditional bindings deletion even if they are nested in other
blocks.
"""
self.flakes('''
context = None
test = True
if False:
with context():
del(test)
assert(test)
''')
def test_delWhile(self):
"""
Ignore bindings deletion if called inside the body of a while
statement.
"""
self.flakes('''
def test():
foo = 'bar'
while False:
del foo
assert(foo)
''')
def test_delWhileTestUsage(self):
"""
Ignore bindings deletion if called inside the body of a while
statement and name is used inside while's test part.
"""
self.flakes('''
def _worker():
o = True
while o is not True:
del o
o = False
''')
def test_delWhileNested(self):
"""
Ignore bindings deletions if node is part of while's test, even when
del is in a nested block.
"""
self.flakes('''
context = None
def _worker():
o = True
while o is not True:
while True:
with context():
del o
o = False
''')
def test_globalFromNestedScope(self):
"""Global names are available from nested scopes."""
self.flakes('''
a = 1
def b():
def c():
a
''')
def test_laterRedefinedGlobalFromNestedScope(self):
"""
Test that referencing a local name that shadows a global, before it is
defined, generates a warning.
"""
self.flakes('''
a = 1
def fun():
a
a = 2
return a
''', m.UndefinedLocal)
def test_laterRedefinedGlobalFromNestedScope2(self):
"""
Test that referencing a local name in a nested scope that shadows a
global declared in an enclosing scope, before it is defined, generates
a warning.
"""
self.flakes('''
a = 1
def fun():
global a
def fun2():
a
a = 2
return a
''', m.UndefinedLocal)
def test_intermediateClassScopeIgnored(self):
"""
If a name defined in an enclosing scope is shadowed by a local variable
and the name is used locally before it is bound, an unbound local
warning is emitted, even if there is a class scope between the enclosing
scope and the local scope.
"""
self.flakes('''
def f():
x = 1
class g:
def h(self):
a = x
x = None
print(x, a)
print(x)
''', m.UndefinedLocal)
def test_doubleNestingReportsClosestName(self):
"""
Test that referencing a local name in a nested scope that shadows a
variable declared in two different outer scopes before it is defined
in the innermost scope generates an UnboundLocal warning which
refers to the nearest shadowed name.
"""
exc = self.flakes('''
def a():
x = 1
def b():
x = 2 # line 5
def c():
x
x = 3
return x
return x
return x
''', m.UndefinedLocal).messages[0]
# _DoctestMixin.flakes adds two lines preceding the code above.
expected_line_num = 7 if self.withDoctest else 5
self.assertEqual(exc.message_args, ('x', expected_line_num))
def test_laterRedefinedGlobalFromNestedScope3(self):
"""
Test that referencing a local name in a nested scope that shadows a
global, before it is defined, generates a warning.
"""
self.flakes('''
def fun():
a = 1
def fun2():
a
a = 1
return a
return a
''', m.UndefinedLocal)
def test_undefinedAugmentedAssignment(self):
self.flakes(
'''
def f(seq):
a = 0
seq[a] += 1
seq[b] /= 2
c[0] *= 2
a -= 3
d += 4
e[any] = 5
''',
m.UndefinedName, # b
m.UndefinedName, # c
m.UndefinedName, m.UnusedVariable, # d
m.UndefinedName, # e
)
def test_nestedClass(self):
"""Nested classes can access enclosing scope."""
self.flakes('''
def f(foo):
class C:
bar = foo
def f(self):
return foo
return C()
f(123).f()
''')
def test_badNestedClass(self):
"""Free variables in nested classes must bind at class creation."""
self.flakes('''
def f():
class C:
bar = foo
foo = 456
return foo
f()
''', m.UndefinedName)
def test_definedAsStarArgs(self):
"""Star and double-star arg names are defined."""
self.flakes('''
def f(a, *b, **c):
print(a, b, c)
''')
@skipIf(version_info < (3,), 'new in Python 3')
def test_definedAsStarUnpack(self):
"""Star names in unpack are defined."""
self.flakes('''
a, *b = range(10)
print(a, b)
''')
self.flakes('''
*a, b = range(10)
print(a, b)
''')
self.flakes('''
a, *b, c = range(10)
print(a, b, c)
''')
@skipIf(version_info < (3,), 'new in Python 3')
def test_usedAsStarUnpack(self):
"""
Star names in unpack are used if RHS is not a tuple/list literal.
"""
self.flakes('''
def f():
a, *b = range(10)
''')
self.flakes('''
def f():
(*a, b) = range(10)
''')
self.flakes('''
def f():
[a, *b, c] = range(10)
''')
@skipIf(version_info < (3,), 'new in Python 3')
def test_unusedAsStarUnpack(self):
"""
Star names in unpack are unused if RHS is a tuple/list literal.
"""
self.flakes('''
def f():
a, *b = any, all, 4, 2, 'un'
''', m.UnusedVariable, m.UnusedVariable)
self.flakes('''
def f():
(*a, b) = [bool, int, float, complex]
''', m.UnusedVariable, m.UnusedVariable)
self.flakes('''
def f():
[a, *b, c] = 9, 8, 7, 6, 5, 4
''', m.UnusedVariable, m.UnusedVariable, m.UnusedVariable)
@skipIf(version_info < (3,), 'new in Python 3')
def test_keywordOnlyArgs(self):
"""Keyword-only arg names are defined."""
self.flakes('''
def f(*, a, b=None):
print(a, b)
''')
self.flakes('''
import default_b
def f(*, a, b=default_b):
print(a, b)
''')
@skipIf(version_info < (3,), 'new in Python 3')
def test_keywordOnlyArgsUndefined(self):
"""Typo in kwonly name."""
self.flakes('''
def f(*, a, b=default_c):
print(a, b)
''', m.UndefinedName)
@skipIf(version_info < (3,), 'new in Python 3')
def test_annotationUndefined(self):
"""Undefined annotations."""
self.flakes('''
from abc import note1, note2, note3, note4, note5
def func(a: note1, *args: note2,
b: note3=12, **kw: note4) -> note5: pass
''')
self.flakes('''
def func():
d = e = 42
def func(a: {1, d}) -> (lambda c: e): pass
''')
@skipIf(version_info < (3,), 'new in Python 3')
def test_metaClassUndefined(self):
self.flakes('''
from abc import ABCMeta
class A(metaclass=ABCMeta): pass
''')
def test_definedInGenExp(self):
"""
Using the loop variable of a generator expression results in no
warnings.
"""
self.flakes('(a for a in [1, 2, 3] if a)')
self.flakes('(b for b in (a for a in [1, 2, 3] if a) if b)')
def test_undefinedInGenExpNested(self):
"""
The loop variables of generator expressions nested together are
not defined in the other generator.
"""
self.flakes('(b for b in (a for a in [1, 2, 3] if b) if b)',
m.UndefinedName)
self.flakes('(b for b in (a for a in [1, 2, 3] if a) if a)',
m.UndefinedName)
def test_undefinedWithErrorHandler(self):
"""
Some compatibility code checks explicitly for NameError.
It should not trigger warnings.
"""
self.flakes('''
try:
socket_map
except NameError:
socket_map = {}
''')
self.flakes('''
try:
_memoryview.contiguous
except (NameError, AttributeError):
raise RuntimeError("Python >= 3.3 is required")
''')
# If NameError is not explicitly handled, generate a warning
self.flakes('''
try:
socket_map
except:
socket_map = {}
''', m.UndefinedName)
self.flakes('''
try:
socket_map
except Exception:
socket_map = {}
''', m.UndefinedName)
def test_definedInClass(self):
"""
Defined name for generator expressions and dict/set comprehension.
"""
self.flakes('''
class A:
T = range(10)
Z = (x for x in T)
L = [x for x in T]
B = dict((i, str(i)) for i in T)
''')
self.flakes('''
class A:
T = range(10)
X = {x for x in T}
Y = {x:x for x in T}
''')
def test_definedInClassNested(self):
"""Defined name for nested generator expressions in a class."""
self.flakes('''
class A:
T = range(10)
Z = (x for x in (a for a in T))
''')
def test_undefinedInLoop(self):
"""
The loop variable is defined after the expression is computed.
"""
self.flakes('''
for i in range(i):
print(i)
''', m.UndefinedName)
self.flakes('''
[42 for i in range(i)]
''', m.UndefinedName)
self.flakes('''
(42 for i in range(i))
''', m.UndefinedName)
def test_definedFromLambdaInDictionaryComprehension(self):
"""
Defined name referenced from a lambda function within a dict/set
comprehension.
"""
self.flakes('''
{lambda: id(x) for x in range(10)}
''')
def test_definedFromLambdaInGenerator(self):
"""
Defined name referenced from a lambda function within a generator
expression.
"""
self.flakes('''
any(lambda: id(x) for x in range(10))
''')
def test_undefinedFromLambdaInDictionaryComprehension(self):
"""
Undefined name referenced from a lambda function within a dict/set
comprehension.
"""
self.flakes('''
{lambda: id(y) for x in range(10)}
''', m.UndefinedName)
def test_undefinedFromLambdaInComprehension(self):
"""
Undefined name referenced from a lambda function within a generator
expression.
"""
self.flakes('''
any(lambda: id(y) for x in range(10))
''', m.UndefinedName)
def test_dunderClass(self):
"""
`__class__` is defined in class scope under Python 3, but is not
in Python 2.
"""
code = '''
class Test(object):
def __init__(self):
print(__class__.__name__)
self.x = 1
t = Test()
'''
if version_info < (3,):
self.flakes(code, m.UndefinedName)
else:
self.flakes(code)
class NameTests(TestCase):
"""
Tests for some extra cases of name handling.
"""
def test_impossibleContext(self):
"""
A Name node with an unrecognized context results in a RuntimeError being
raised.
"""
tree = ast.parse("x = 10")
file_tokens = checker.make_tokens("x = 10")
# Make it into something unrecognizable.
tree.body[0].targets[0].ctx = object()
self.assertRaises(RuntimeError, checker.Checker, tree, file_tokens=file_tokens)
|
{
"content_hash": "d41547043135198fde70a3dc0f7bb11f",
"timestamp": "",
"source": "github",
"line_count": 871,
"max_line_length": 87,
"avg_line_length": 29.62686567164179,
"alnum_prop": 0.5185041658593296,
"repo_name": "sonntagsgesicht/regtest",
"id": "e0e628dc296649f6391d5a5f503c9f53e712514c",
"size": "25805",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": ".aux/venv/lib/python3.9/site-packages/pyflakes/test/test_undefined_names.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13888"
}
],
"symlink_target": ""
}
|
from django.http import HttpResponse
from django.utils import unittest
from django.test.client import Client
from postleware import PostResponseCachebusterMiddleware
class PostResponseMiddleware(unittest.TestCase):
def setUp(self):
self.client = Client()
def test_header_added_when_necessary(self):
# 'Cache-Control: no-cache' is added to POSTs
response = self.client.post('/test1', {'foo':'bar'})
self.assertEqual(response['Cache-Control'], 'no-cache')
# 'Cache-Control' is NOT added to GETs
response = self.client.get('/test1')
self.assertFalse(response.has_header('Cache-Control'))
def test_header_not_added_when_present(self):
middleware = PostResponseCachebusterMiddleware()
test_header_setting = 'test-setting'
raw_response = HttpResponse()
# 'Cache-Control' header isn't modified when present on POSTs
request = MockRequest('POST')
raw_response['Cache-Control'] = test_header_setting
response = middleware.process_response(request, raw_response)
self.assertEqual(response['Cache-Control'], test_header_setting)
# 'Cache-Control' header isn't modified when present on GETs
request = MockRequest('GET')
raw_response['Cache-Control'] = test_header_setting
response = middleware.process_response(request, raw_response)
self.assertEqual(response['Cache-Control'], test_header_setting)
class MockRequest(object):
def __init__(self, method=None):
self.method = method
|
{
"content_hash": "fe0ffe47f079e465482c7c94f8b08cc5",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 72,
"avg_line_length": 39.025,
"alnum_prop": 0.6860986547085202,
"repo_name": "thurloat/django-postleware",
"id": "f082e268965011ef766791aca3fd9a01d4ef2343",
"size": "1561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "postleware/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10102"
}
],
"symlink_target": ""
}
|
"""
Test for the automatic completion text editor.
"""
import unittest, sys
from PyQt4 import QtCore, QtGui
from datafinder.core.search_restriction import SearchRestrictionParser
from datafinder.gui.user.dialogs.search_dialog.search_query_editor import SearchQueryEditor, SearchQueryAnalyzer
__version__ = "$Revision-Id:$"
class SearchQueryEditorTestCase(unittest.TestCase):
"""
Tests the auto completion text edit module.
"""
_availableProperties = ["Content", "Date time", "Content type descriptor"]
_availableComparisonOperators = ["=", "<", ">", ">=", "<=", "is"]
_availableConjuntionsOperators = ["AND", "OR"]
_application = QtGui.QApplication(sys.argv)
def setUp(self):
""" Setups the test fixture. """
self.autoComplEdit = SearchQueryEditor(None)
self.autoComplEdit.registerCompleter(QtGui.QCompleter(self._availableProperties),
SearchQueryAnalyzer.PROPERTY_TYPE)
completer = QtGui.QCompleter(self._availableComparisonOperators)
completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.autoComplEdit.registerCompleter(completer, SearchQueryAnalyzer.COMPARISON_TYPE)
completer = QtGui.QCompleter(self._availableConjuntionsOperators)
completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.autoComplEdit.registerCompleter(completer, SearchQueryAnalyzer.CONJUNCTION_TYPE)
self.autoComplEdit.registerCompleter(QtGui.QCompleter(["''"]),
SearchQueryAnalyzer.LITERAL_TYPE)
self._searchQueryAnalyzer = SearchQueryAnalyzer(SearchRestrictionParser(), dict())
self.autoComplEdit._searchQueryAnalyzer = self._searchQueryAnalyzer
def testPropertyCompletion(self):
""" Tests auto completion for property names. """
self.autoComplEdit.setText("Con")
self._requestAutocompletionAtPosition(3)
self.assertEquals(self.autoComplEdit.completer().currentCompletion(), "Content")
self.autoComplEdit.setText("Conz")
self._requestAutocompletionAtPosition(4)
self.assertEquals(self.autoComplEdit.completer().currentCompletion(), "")
def _requestAutocompletionAtPosition(self, position):
""" Sets the cursor position in the text editor. """
textCursor = self.autoComplEdit.textCursor()
textCursor.setPosition(position)
self.autoComplEdit.setTextCursor(textCursor)
self._searchQueryAnalyzer.analyze(unicode(self.autoComplEdit.toPlainText()))
self.autoComplEdit.keyPressEvent(QtGui.QKeyEvent(QtCore.QEvent.KeyPress, QtCore.Qt.Key_Space, QtCore.Qt.ControlModifier))
def testConjunctionCompletion(self):
""" Tests the completion of conjunction operators. """
self.autoComplEdit.setText("Content = 'tada' ")
self._requestAutocompletionAtPosition(17)
self.assertEquals(self.autoComplEdit.completer().currentCompletion(), "AND")
def testComparisonCompletion(self):
""" Tests the completion of comparison operators. """
self.autoComplEdit.setText("Content >")
self._requestAutocompletionAtPosition(9)
completions = list()
i = 0
while self.autoComplEdit.completer().setCurrentRow(i):
completions.append(self.autoComplEdit.completer().currentCompletion())
i += 1
self.assertEquals(completions, [">", ">="])
def testPropertyCompletionAfterConjunction(self):
""" Tests auto completion for property names after a conjunction. """
self.autoComplEdit.setText("Content = 'tada' and C")
self._requestAutocompletionAtPosition(22)
self.assertEquals(self.autoComplEdit.completer().currentCompletion(), "Content")
def testMultipleCompletion(self):
""" Tests the multiple use of auto completion in one query. """
self.autoComplEdit.setText("")
self._requestAutocompletionAtPosition(0)
self.autoComplEdit.insertCompletion(self.autoComplEdit.completer().currentCompletion())
self.assertEquals(self.autoComplEdit.toPlainText(), "Content ")
self._requestAutocompletionAtPosition(7)
self.autoComplEdit.insertCompletion(self.autoComplEdit.completer().currentCompletion())
self.assertEquals(self.autoComplEdit.toPlainText(), "Content ")
self._requestAutocompletionAtPosition(8)
self.autoComplEdit.insertCompletion(self.autoComplEdit.completer().currentCompletion())
self.assertEquals(self.autoComplEdit.toPlainText(), "Content = ")
self._requestAutocompletionAtPosition(10)
self.autoComplEdit.insertCompletion(self.autoComplEdit.completer().currentCompletion())
self.assertEquals(self.autoComplEdit.toPlainText(), "Content = '' ")
self._requestAutocompletionAtPosition(0)
self.autoComplEdit.insertCompletion(self.autoComplEdit.completer().currentCompletion())
self.assertEquals(self.autoComplEdit.toPlainText(), "Content = '' ")
self._requestAutocompletionAtPosition(4)
self.assertEquals(self.autoComplEdit.completer().currentCompletion(), "Content")
self.autoComplEdit.insertCompletion(self.autoComplEdit.completer().currentCompletion())
self.assertEquals(self.autoComplEdit.toPlainText(), "Content = '' ")
self.autoComplEdit.setText("Content = '' ")
self._requestAutocompletionAtPosition(8)
self.autoComplEdit.insertCompletion(self.autoComplEdit.completer().currentCompletion())
self.assertEquals(self.autoComplEdit.toPlainText(), "Content = = '' ")
def testConjunctionRecognition(self):
""" Tests the recognition of conjunction terms when already a character is typed. """
self.autoComplEdit.setText("Content = 'Peter hhg' o")
self._requestAutocompletionAtPosition(23)
self.assertEquals(self.autoComplEdit.completer().currentCompletion(), "OR")
def testConjunctionRecognitionWithNoTokenUnderCursor(self):
""" Tests the recognition of conjunction terms with no token under the cursor. """
self.autoComplEdit.setText("Content = 'Peter hhg' AND Content = 'Peter hhg'")
self._requestAutocompletionAtPosition(22)
self.assertEquals(self.autoComplEdit.completer().currentCompletion(), "AND")
def testConjunctionRecognitionWithTokenUnderCursor(self):
""" Tests the recognition of conjunction terms with token under the cursor. """
self.autoComplEdit.setText("Content = 'Peter hhg' AND NOT Content = 'Peter hhg'")
self._requestAutocompletionAtPosition(24)
self.assertEquals(self.autoComplEdit.completer().currentCompletion(), "AND")
|
{
"content_hash": "9b3852523352d29c61e1f2e1a461540e",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 129,
"avg_line_length": 47.35616438356164,
"alnum_prop": 0.689470639282615,
"repo_name": "DLR-SC/DataFinder",
"id": "cc4713fa8c9b2a35b248abc9b57450b114f69d36",
"size": "8610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unittest/datafinder_test/gui/user/dialogs/search_dialog/search_query_editor_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "NSIS",
"bytes": "7649"
},
{
"name": "Python",
"bytes": "7056802"
},
{
"name": "QMake",
"bytes": "1975"
}
],
"symlink_target": ""
}
|
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Xml;
namespace SeriesNamer.tvdb
{
public class Series
{
private readonly string seriesid;
private readonly string name;
public Series(string id, string name)
{
this.seriesid = id;
this.name = name;
}
public string Id
{
get
{
return seriesid;
}
}
public IEnumerable<Season> Seasons
{
get
{
foreach (KeyValuePair<int, Season> k in seasons)
{
yield return k.Value;
}
}
}
Dictionary<int, Season> seasons = new Dictionary<int, Season>();
public void add(Season s)
{
seasons.Add(s.Index, s);
}
public string Name
{
get
{
return name;
}
}
internal Season getSeason(int s)
{
if (false == seasons.ContainsKey(s)) return null;
return seasons[s];
}
internal void parse(XmlElement root)
{
foreach(XmlElement episode in Xml.ElementsNamed(root, "Episode") )
{
string seasonName = Xml.GetTextOfSubElement(episode, "Combined_season");
Season s = getOrCreateSeason(seasonName);
string episodeName = Xml.GetTextOfSubElement(episode, "EpisodeNumber").Trim();
string title = Xml.GetTextOfSubElementOrNull(episode, "EpisodeName");
string overview = Xml.GetTextOfSubElementOrNull(episode, "Overview");
int episodeId = int.Parse(episodeName);
s.add(new Episode(episodeId, title, overview));
}
}
private Season getOrCreateSeason(string seasonName)
{
int season = int.Parse(seasonName);
Season s = getSeason(season);
if (s != null) return s;
s = new Season(season);
seasons.Add(season, s);
return s;
}
}
}
|
{
"content_hash": "78f65325c296cc4b6144b7489b56b762",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 94,
"avg_line_length": 25.686046511627907,
"alnum_prop": 0.504753282028067,
"repo_name": "madeso/prettygood",
"id": "e47cc8c3386407b70daa42f15e668e1d5fa6c9df",
"size": "2211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dotnet/SeriesNamer/tvdb/Series.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "384285"
}
],
"symlink_target": ""
}
|
from pyon import loads, dumps
__all__ = ['Node', 'Leaf', 'resolverName']
def resolverName(name):
"""
Return new function for creation
of node or leaf with a given tag name
or return existing one from the cache.
"""
def func(value=None, tag=name):
if value is None:
e = Node(tag)
else:
e = Leaf(tag, value)
return e
func.__name__ = name
func.__module__ = '__cache__'
return func
class Leaf(object):
def __init__(self, _tag, _value, **kw):
self._tag = _tag
self._value = _value
self.__dict__.update(kw)
#
@property
def tag(self):
return self._tag
#
@property
def value(self):
return self._value
#
def __reduce__(self):
_dict = dict(self.__dict__)
_tag = _dict.pop('_tag')
_value = _dict.pop('_value')
return resolverName(_tag), (_value,), _dict
#
def __setstate__(self, state):
self.__dict__.update(state)
class Node(object):
#
def __init__(self, _tag, **kw):
self._tag = _tag
self.__dict__.update(kw)
self._children = []
#
@property
def tag(self):
return self._tag
#
@property
def children(self):
return self._children
#
def append(self, element):
self._children.append(element)
#
def __reduce__(self):
_dict = dict(self.__dict__)
_tag = _dict.pop('_tag')
_children = _dict.pop('_children')
return resolverName(_tag), (), _dict, _children
#
def __setstate__(self, state):
self.__dict__.update(state)
if __name__ == '__main__':
res = loads("""
menu(id='File',
*[ item(id='Open', action='OpenFile'),
item(id='Close', action='CloseFile'),
menu(id='SendTo',
*[ item(id='Desktop', action='SendToDesktop'),
item(id='RemoteDesktop', action='SendToRemoteDesktop')
]
)
]
)
""", resolver=resolverName)
print(dumps(res, pretty=True))
res = loads("""
menu(id='File',
*[ item(id='Open', action='OpenFile'),
item(id='Close', action='CloseFile'),
menu(id='SendTo',
*[ item(id='Desktop', action='SendToDesktop'),
item(id='RemoteDesktop', action='SendToRemoteDesktop')
]
)
]
)
""", safe=True)
print(dumps(res, pretty=True, fast=True))
|
{
"content_hash": "4c1c7b5309cdefcfa84d24b8d983176e",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 64,
"avg_line_length": 24.584158415841586,
"alnum_prop": 0.5058397100281917,
"repo_name": "intellimath/pyon",
"id": "220dadb7810385f0575741826fd530023dccf81d",
"size": "2483",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/etree/etree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "61881"
}
],
"symlink_target": ""
}
|
if __name__ == '__main__':
import sys
import os
pkg_dir = os.path.split(os.path.abspath(__file__))[0]
parent_dir, pkg_name = os.path.split(pkg_dir)
is_pygame_pkg = (pkg_name == 'tests' and
os.path.split(parent_dir)[1] == 'pygame')
if not is_pygame_pkg:
sys.path.insert(0, parent_dir)
else:
is_pygame_pkg = __name__.startswith('pygame.tests.')
if is_pygame_pkg:
from pygame.tests.test_utils import test_not_implemented, unittest
else:
from test.test_utils import test_not_implemented, unittest
import pygame
import time
Clock = pygame.time.Clock
################################################################################
class ClockTypeTest(unittest.TestCase):
def test_construction(self):
c = Clock()
self.assert_(c, "Clock can be constructed")
def todo_test_get_fps(self):
# __doc__ (as of 2008-08-02) for pygame.time.Clock.get_fps:
# Clock.get_fps(): return float
# compute the clock framerate
#
# Compute your game's framerate (in frames per second). It is computed
# by averaging the last few calls to Clock.tick().
#
self.fail()
# delay_per_frame = 1 / 100.0
#
# c = Clock()
#
# for f in range(100):
# c.tick()
# time.sleep(delay_per_frame)
#
# self.assert_(99.0 < c.get_fps() < 101.0)
def todo_test_get_rawtime(self):
# __doc__ (as of 2008-08-02) for pygame.time.Clock.get_rawtime:
# Clock.get_rawtime(): return milliseconds
# actual time used in the previous tick
#
# Similar to Clock.get_time(), but this does not include any time used
# while Clock.tick() was delaying to limit the framerate.
#
self.fail()
def todo_test_get_time(self):
# __doc__ (as of 2008-08-02) for pygame.time.Clock.get_time:
# Clock.get_time(): return milliseconds
# time used in the previous tick
#
# Returns the parameter passed to the last call to Clock.tick(). It is
# the number of milliseconds passed between the previous two calls to
# Pygame.tick().
#
self.fail()
# c = Clock()
# c.tick() #between here
# time.sleep(0.02)
# #get_time()
# c.tick() # here
#
# time.sleep(0.02)
#
# self.assert_(20 <= c.get_time() <= 30)
def todo_test_tick(self):
# __doc__ (as of 2008-08-02) for pygame.time.Clock.tick:
# Clock.tick(framerate=0): return milliseconds
# control timer events
# update the clock
#
# This method should be called once per frame. It will compute how
# many milliseconds have passed since the previous call.
#
# If you pass the optional framerate argument the function will delay
# to keep the game running slower than the given ticks per second.
# This can be used to help limit the runtime speed of a game. By
# calling Clock.tick(40) once per frame, the program will never run at
# more than 40 frames per second.
#
# Note that this function uses SDL_Delay function which is not
# accurate on every platform, but does not use much cpu. Use
# tick_busy_loop if you want an accurate timer, and don't mind chewing
# cpu.
#
self.fail()
# collection = []
# c = Clock()
#
# c.tick()
# for i in range(100):
# time.sleep(0.005)
# collection.append(c.tick())
#
# for outlier in [min(collection), max(collection)]:
# if outlier != 5: collection.remove(outlier)
#
# self.assert_(sum(collection) / len(collection) == 5)
def todo_test_tick_busy_loop(self):
# __doc__ (as of 2008-08-02) for pygame.time.Clock.tick_busy_loop:
# Clock.tick_busy_loop(framerate=0): return milliseconds
# control timer events
# update the clock
#
# This method should be called once per frame. It will compute how
# many milliseconds have passed since the previous call.
#
# If you pass the optional framerate argument the function will delay
# to keep the game running slower than the given ticks per second.
# This can be used to help limit the runtime speed of a game. By
# calling Clock.tick(40) once per frame, the program will never run at
# more than 40 frames per second.
#
# Note that this function uses pygame.time.delay, which uses lots of
# cpu in a busy loop to make sure that timing is more acurate.
#
# New in pygame 1.8.0.
self.fail()
class TimeModuleTest(unittest.TestCase):
def todo_test_delay(self):
# __doc__ (as of 2008-08-02) for pygame.time.delay:
# pygame.time.delay(milliseconds): return time
# pause the program for an amount of time
#
# Will pause for a given number of milliseconds. This function will
# use the processor (rather than sleeping) in order to make the delay
# more accurate than pygame.time.wait().
#
# This returns the actual number of milliseconds used.
self.fail()
def todo_test_get_ticks(self):
# __doc__ (as of 2008-08-02) for pygame.time.get_ticks:
# pygame.time.get_ticks(): return milliseconds
# get the time in milliseconds
#
# Return the number of millisconds since pygame.init() was called.
# Before pygame is initialized this will always be 0.
#
self.fail()
def todo_test_set_timer(self):
# __doc__ (as of 2008-08-02) for pygame.time.set_timer:
# pygame.time.set_timer(eventid, milliseconds): return None
# repeatedly create an event on the event queue
#
# Set an event type to appear on the event queue every given number of
# milliseconds. The first event will not appear until the amount of
# time has passed.
#
# Every event type can have a separate timer attached to it. It is
# best to use the value between pygame.USEREVENT and pygame.NUMEVENTS.
#
# To disable the timer for an event, set the milliseconds argument to 0.
self.fail()
def todo_test_wait(self):
# __doc__ (as of 2008-08-02) for pygame.time.wait:
# pygame.time.wait(milliseconds): return time
# pause the program for an amount of time
#
# Will pause for a given number of milliseconds. This function sleeps
# the process to share the processor with other programs. A program
# that waits for even a few milliseconds will consume very little
# processor time. It is slightly less accurate than the
# pygame.time.delay() function.
#
# This returns the actual number of milliseconds used.
self.fail()
################################################################################
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "1406dd4d1d86a83c56606c7c1477577d",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 83,
"avg_line_length": 35.19457013574661,
"alnum_prop": 0.5371560812548213,
"repo_name": "JRock007/boxxy",
"id": "c79c4db08cd8e876c600ad45c2acde6181f0a145",
"size": "7860",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "dist/Boxxy.app/Contents/Resources/lib/python2.7/pygame/tests/time_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "662738"
},
{
"name": "C++",
"bytes": "119316"
},
{
"name": "FORTRAN",
"bytes": "7414"
},
{
"name": "Java",
"bytes": "11586"
},
{
"name": "Python",
"bytes": "12647184"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
}
|
import getpass
import json
import requests
import util
import auth
import compose
from service import Service
from backup import Backup
from organization import Organization
from execute import execute
def login(username, password, cloud, endpoint):
if not username:
username = raw_input('Username: ')
if not password:
password = getpass.getpass()
api_endpoint = endpoint
if api_endpoint is None:
api_endpoint = auth.get_api_endpoint(cloud)
url = api_endpoint + 'generate-api-token/'
payload = {'username': username, 'password': password}
r = requests.post(url, payload)
util.check_response(r)
d = json.loads(r.text)
token = d['token']
auth.save_token(api_endpoint, token, username)
print '[alauda] Successfully logged in as {}.'.format(username)
def logout():
auth.delete_token()
print '[alauda] Bye'
def service_create(image, name, start, target_num_instances, instance_size, run_command, env, ports, exposes,
volumes, links, namespace, scaling_info, custom_domain_name, region_name):
image_name, image_tag = util.parse_image_name_tag(image)
instance_ports, port_list = util.parse_instance_ports(ports)
expose_list = util.merge_internal_external_ports(port_list, exposes)
instance_ports.extend(expose_list)
instance_envvars = util.parse_envvars(env)
links = util.parse_links(links)
volumes = util.parse_volumes(volumes)
scaling_mode, scaling_cfg = util.parse_autoscale_info(scaling_info)
if scaling_mode is None:
scaling_mode = 'MANUAL'
service = Service(name=name,
image_name=image_name,
image_tag=image_tag,
target_num_instances=target_num_instances,
instance_size=instance_size,
run_command=run_command,
instance_ports=instance_ports,
instance_envvars=instance_envvars,
volumes=volumes,
links=links,
namespace=namespace,
scaling_mode=scaling_mode,
autoscaling_config=scaling_cfg,
custom_domain_name=custom_domain_name,
region_name=region_name)
if start:
service.run()
else:
service.create()
def service_inspect(name, namespace):
service = Service.fetch(name, namespace)
result = service.inspect()
util.print_json_result(result)
def service_start(name, namespace):
service = Service.fetch(name, namespace)
service.start()
def service_stop(name, namespace):
service = Service.fetch(name, namespace)
service.stop()
def service_rm(name, namespace):
Service.remove(name, namespace)
def service_ps(namespace, page):
service_list = Service.list(namespace, page)
util.print_ps_output(service_list)
def service_scale(descriptor, namespace):
scale_dict = util.parse_scale(descriptor)
for service_name, service_num in scale_dict.items():
service = Service.fetch(service_name, namespace)
service.scale(service_num)
def service_enable_autoscaling(name, namespace, autoscaling_config):
_, scaling_cfg = util.parse_autoscale_info(('AUTO', autoscaling_config))
service = Service.fetch(name, namespace)
service.enable_autoscaling(scaling_cfg)
def service_disable_autoscaling(name, namespace, target_num_instances):
service = Service.fetch(name, namespace)
service.disable_autoscaling(target_num_instances)
def service_logs(name, namespace, start_time, end_time):
service = Service.fetch(name, namespace)
result = service.logs(start_time, end_time)
util.print_logs(result)
## add by letian
def service_exec(ssh_client, namespace, is_verbose, container, command):
execute(ssh_client, namespace, is_verbose, container, command)
def instance_ps(name, namespace):
service = Service.fetch(name, namespace)
instance_list = service.list_instances()
util.print_instance_ps_output(instance_list)
def instance_inspect(name, uuid, namespace):
service = Service.fetch(name, namespace)
instance = service.get_instance(uuid)
result = instance.inspect()
util.print_json_result(result)
def instance_logs(name, uuid, namespace, start_time=None, end_time=None):
service = Service.fetch(name, namespace)
instance = service.get_instance(uuid)
result = instance.logs(start_time, end_time)
util.print_logs(result)
def compose_up(file, strict, namespace, region):
project = compose.load_project(file, namespace, region)
if strict:
project.strict_up()
else:
project.up()
def compose_ps(file, namespace):
project = compose.load_project(file, namespace, None)
project.ps(namespace)
def compose_start(file, strict, namespace):
project = compose.load_project(file, namespace, None)
if strict:
project.strict_start()
else:
project.start()
def compose_stop(file, namespace):
project = compose.load_project(file, namespace, None)
project.stop()
def compose_restart(file, strict, namespace):
project = compose.load_project(file, namespace, None)
if strict:
project.strict_restart()
else:
project.restart()
def compose_rm(file, namespace):
project = compose.load_project(file, namespace, None)
project.rm(namespace)
def compose_scale(descriptor, file, namespace):
project = compose.load_project(file, namespace, None)
scale_dict = util.parse_scale(descriptor)
project.scale(scale_dict, namespace)
def backup_create(name, service_name, mounted_dir, namespace):
service = Service.fetch(service_name, namespace)
backup = Backup(service=service, name=name, mounted_dir=mounted_dir)
backup.create()
def backup_list(namespace):
backup_list = Backup.list(namespace)
util.print_backup_ps_output(backup_list)
def backup_inspect(id, namespace):
backup = Backup.fetch(id, namespace)
result = backup.inspect()
util.print_json_result(result)
def backup_rm(id, namespace):
Backup.remove(id, namespace)
def organization_create(name, company):
orgs = Organization(name=name, company=company)
orgs.create()
def organization_list():
orgs_list = Organization.list()
util.print_organization_ps_output(orgs_list)
def organization_inspect(name):
orgs = Organization.fetch(name)
result = orgs.inspect()
util.print_json_result(result)
def organization_update(name, company):
orgs = Organization.fetch(name)
orgs.update(company)
|
{
"content_hash": "dfada720e15202170af836252eb767d7",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 109,
"avg_line_length": 29.105263157894736,
"alnum_prop": 0.6767631103074141,
"repo_name": "someus/alauda-CLI",
"id": "2c950cb0c5f1273c91c6c5e5e268eb64009b0d05",
"size": "6636",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "alaudacli/commands.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "972"
},
{
"name": "Python",
"bytes": "104054"
},
{
"name": "Shell",
"bytes": "713"
}
],
"symlink_target": ""
}
|
u"""
.. module:: organizations
"""
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.shortcuts import redirect
from django.shortcuts import render
from django.utils.text import slugify
from django.views.generic import View
from apps.volontulo.forms import VolounteerToOrganizationContactForm
from apps.volontulo.lib.email import send_mail
from apps.volontulo.models import Offer
from apps.volontulo.models import Organization
from apps.volontulo.models import UserProfile
from apps.volontulo.utils import correct_slug
def organizations_list(request):
u"""View responsible for listing all organizations.
:param request: WSGIRequest instance
"""
organizations = Organization.objects.all()
return render(
request,
"organizations/list.html",
{'organizations': organizations},
)
class OrganizationsCreate(View):
u"""Class view supporting creation of new organization."""
@staticmethod
@login_required
def get(request):
u"""Method responsible for rendering form for new organization."""
return render(
request,
"organizations/organization_form.html",
{'organization': Organization()}
)
@staticmethod
@login_required
def post(request):
u"""Method responsible for saving new organization."""
if not (
request.POST.get('name') and
request.POST.get('address') and
request.POST.get('description')
):
messages.error(
request,
u"Należy wypełnić wszystkie pola formularza."
)
return render(
request,
"organizations/organization_form.html",
{'organization': Organization()}
)
organization = Organization(
name=request.POST.get('name'),
address=request.POST.get('address'),
description=request.POST.get('description'),
)
organization.save()
request.user.userprofile.organizations.add(organization)
messages.success(
request,
u"Organizacja została dodana."
)
return redirect(
'organization_view',
slug=slugify(organization.name),
id_=organization.id,
)
@correct_slug(Organization, 'organization_form', 'name')
@login_required
# pylint: disable=unused-argument
def organization_form(request, slug, id_):
u"""View responsible for editing organization.
Edition will only work, if logged user has been registered as organization.
"""
org = Organization.objects.get(pk=id_)
users = [profile.user.email for profile in org.userprofiles.all()]
if (
request.user.is_authenticated() and
request.user.email not in users
):
messages.error(
request,
u'Nie masz uprawnień do edycji tej organizacji.'
)
return redirect(
reverse(
'organization_view',
args=[slugify(org.name), org.id]
)
)
if not (
request.user.is_authenticated() and
UserProfile.objects.get(user=request.user).organizations
):
return redirect('homepage')
if request.method == 'POST':
if (
request.POST.get('name') and
request.POST.get('address') and
request.POST.get('description')
):
org.name = request.POST.get('name')
org.address = request.POST.get('address')
org.description = request.POST.get('description')
org.save()
messages.success(
request,
u'Oferta została dodana/zmieniona.'
)
return redirect(
reverse(
'organization_view',
args=[slugify(org.name), org.id]
)
)
else:
messages.error(
request,
u"Należy wypełnić wszystkie pola formularza."
)
return render(
request,
"organizations/organization_form.html",
{'organization': org},
)
@correct_slug(Organization, 'organization_view', 'name')
# pylint: disable=unused-argument
def organization_view(request, slug, id_):
u"""View responsible for viewing organization."""
org = get_object_or_404(Organization, id=id_)
offers = Offer.objects.filter(organization_id=id_)
allow_contact = True
allow_edit = False
allow_offer_create = False
if (
request.user.is_authenticated() and
request.user.userprofile in org.userprofiles.all()
):
allow_contact = False
allow_edit = True
allow_offer_create = True
if request.method == 'POST':
form = VolounteerToOrganizationContactForm(request.POST)
if form.is_valid():
# send email to first organization user (I assume it's main user)
profile = Organization.objects.get(id=id_).userprofiles.all()[0]
send_mail(
request,
'volunteer_to_organisation',
[
profile.user.email,
request.POST.get('email'),
],
{k: v for k, v in request.POST.items()},
)
messages.success(request, u'Email został wysłany.')
else:
messages.error(
request,
u"Formularz zawiera nieprawidłowe dane: {}".format(form.errors)
)
return render(
request,
"organizations/organization_view.html",
{
'organization': org,
'contact_form': form,
'offers': offers,
'allow_contact': allow_contact,
'allow_edit': allow_edit,
'allow_offer_create': allow_offer_create,
},
)
return render(
request,
"organizations/organization_view.html",
{
'organization': org,
'contact_form': VolounteerToOrganizationContactForm(),
'offers': offers,
'allow_contact': allow_contact,
'allow_edit': allow_edit,
'allow_offer_create': allow_offer_create,
}
)
|
{
"content_hash": "d34c1a5b4ee183e309244e6563a51b33",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 79,
"avg_line_length": 31.745192307692307,
"alnum_prop": 0.5673178858094805,
"repo_name": "nbbn/volontulo",
"id": "a93d86b4f50cad0d2a7038282d17d4419fc98d97",
"size": "6642",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "apps/volontulo/views/organizations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "19780"
},
{
"name": "HTML",
"bytes": "83562"
},
{
"name": "JavaScript",
"bytes": "1714"
},
{
"name": "Python",
"bytes": "185832"
},
{
"name": "Shell",
"bytes": "1067"
}
],
"symlink_target": ""
}
|
"""
Factorial digit sum
Computes the sum of the digits of nbr!
Konstantinos Ameranis 10.9.2013
"""
def factorial( n ):
if n == 1:
return 1
return n * factorial( n - 1 )
nbr = input("Enter the number: ")
nbr = factorial(nbr)
summ = 0
while nbr:
summ += nbr % 10
nbr /= 10
print summ
|
{
"content_hash": "630725590c6fd736162ae7d6ea0db146",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 38,
"avg_line_length": 17.11111111111111,
"alnum_prop": 0.6136363636363636,
"repo_name": "kameranis/Project_Euler",
"id": "b7daa521dbaf13701256a0cb2a59a9d0f030f586",
"size": "308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Problem 20 - Factorial digit sum/fds.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3622"
},
{
"name": "C++",
"bytes": "437"
},
{
"name": "Python",
"bytes": "7057"
}
],
"symlink_target": ""
}
|
"""Fail if the C extension module doesn't exist.
Only really intended to be used by internal build scripts.
"""
import sys
sys.path[0:0] = [""]
import bson
import pymongo
if not pymongo.has_c() or not bson.has_c():
sys.exit("could not load C extensions")
|
{
"content_hash": "72e4d729d68e1fecc69fec12b6f05092",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 58,
"avg_line_length": 20.23076923076923,
"alnum_prop": 0.6996197718631179,
"repo_name": "Mitali-Sodhi/CodeLingo",
"id": "628e23f1f599555a88a6c21ce15bcba5f1f36ea4",
"size": "840",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Dataset/python/fail_if_no_c.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9681846"
},
{
"name": "C#",
"bytes": "1741915"
},
{
"name": "C++",
"bytes": "5686017"
},
{
"name": "HTML",
"bytes": "11812193"
},
{
"name": "Java",
"bytes": "11198971"
},
{
"name": "JavaScript",
"bytes": "21693468"
},
{
"name": "M",
"bytes": "61627"
},
{
"name": "Objective-C",
"bytes": "4085820"
},
{
"name": "Perl",
"bytes": "193472"
},
{
"name": "Perl6",
"bytes": "176248"
},
{
"name": "Python",
"bytes": "10296284"
},
{
"name": "Ruby",
"bytes": "1050136"
}
],
"symlink_target": ""
}
|
"""Test runner for COT.commands doctest tests."""
import logging
from logging import NullHandler
from doctest import DocTestSuite
from unittest import TestSuite
logging.getLogger('COT').addHandler(NullHandler())
def load_tests(*_):
"""Load doctests as unittest test suite.
For the parameters, see :mod:`unittest`. The parameters are unused here.
"""
suite = TestSuite()
suite.addTests(DocTestSuite('COT.commands.add_disk'))
suite.addTests(DocTestSuite('COT.commands.deploy'))
suite.addTests(DocTestSuite('COT.commands.edit_hardware'))
suite.addTests(DocTestSuite('COT.commands.edit_properties'))
return suite
|
{
"content_hash": "a3d38f90101ca6a9dfa51c3a61867aaa",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 76,
"avg_line_length": 29.59090909090909,
"alnum_prop": 0.7373271889400922,
"repo_name": "glennmatthews/cot",
"id": "f2fccd58fd2f81616cad0e8291e968c54b99baf2",
"size": "1382",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "COT/commands/tests/test_doctests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1293158"
},
{
"name": "Roff",
"bytes": "37442"
},
{
"name": "Shell",
"bytes": "3840"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
def convert_lines_data(apps, schema_editor):
CartLine = apps.get_model('cart', 'CartLine')
# Iterate over all cart lines, due to wrong JSONField None handling
for line in CartLine.objects.all():
if line.data is None:
line.data = {}
line.save(update_fields=['data'])
class Migration(migrations.Migration):
dependencies = [
('cart', '0002_auto_20161014_1221'),
]
operations = [
migrations.RunPython(convert_lines_data)
]
|
{
"content_hash": "fd193b81f55b22482a9a9c82de7d869d",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 71,
"avg_line_length": 25,
"alnum_prop": 0.6417391304347826,
"repo_name": "itbabu/saleor",
"id": "7b0c33e2e53e39fd40a651581f82521d11687aae",
"size": "648",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "saleor/cart/migrations/fix_empty_data_in_lines.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "63640"
},
{
"name": "HTML",
"bytes": "381272"
},
{
"name": "JavaScript",
"bytes": "58958"
},
{
"name": "Python",
"bytes": "651031"
}
],
"symlink_target": ""
}
|
import os
from urlparse import urljoin, urlparse
from django.contrib import admin
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.admin.options import ModelAdmin
from django.contrib.staticfiles import finders
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.db.models import get_model
from django.http import (HttpResponse, HttpResponseServerError,
HttpResponseNotFound)
from django.shortcuts import redirect
from django.template import RequestContext
from django.template.loader import get_template
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.csrf import requires_csrf_token
from mezzanine.conf import settings
from mezzanine.core.forms import get_edit_form
from mezzanine.core.models import Displayable, SitePermission
from mezzanine.utils.cache import add_cache_bypass
from mezzanine.utils.views import is_editable, paginate, render, set_cookie
from mezzanine.utils.sites import has_site_permission
def set_device(request, device=""):
"""
Sets a device name in a cookie when a user explicitly wants to go
to the site for a particular device (eg mobile).
"""
response = redirect(add_cache_bypass(request.GET.get("next") or "/"))
set_cookie(response, "mezzanine-device", device, 60 * 60 * 24 * 365)
return response
@staff_member_required
def set_site(request):
"""
Put the selected site ID into the session - posted to from
the "Select site" drop-down in the header of the admin. The
site ID is then used in favour of the current request's
domain in ``mezzanine.core.managers.CurrentSiteManager``.
"""
site_id = int(request.GET["site_id"])
if not request.user.is_superuser:
try:
SitePermission.objects.get(user=request.user, sites=site_id)
except SitePermission.DoesNotExist:
raise PermissionDenied
request.session["site_id"] = site_id
admin_url = reverse("admin:index")
next = request.GET.get("next") or admin_url
# Don't redirect to a change view for an object that won't exist
# on the selected site - go to its list view instead.
if next.startswith(admin_url):
parts = next.split("/")
if len(parts) > 4 and parts[4].isdigit():
next = "/".join(parts[:4])
return redirect(next)
def direct_to_template(request, template, extra_context=None, **kwargs):
"""
Replacement for Django's ``direct_to_template`` that uses
``TemplateResponse`` via ``mezzanine.utils.views.render``.
"""
context = extra_context or {}
context["params"] = kwargs
for (key, value) in context.items():
if callable(value):
context[key] = value()
return render(request, template, context)
@staff_member_required
def edit(request):
"""
Process the inline editing form.
"""
model = get_model(request.POST["app"], request.POST["model"])
obj = model.objects.get(id=request.POST["id"])
form = get_edit_form(obj, request.POST["fields"], data=request.POST,
files=request.FILES)
if not (is_editable(obj, request) and has_site_permission(request.user)):
response = _("Permission denied")
elif form.is_valid():
form.save()
model_admin = ModelAdmin(model, admin.site)
message = model_admin.construct_change_message(request, form, None)
model_admin.log_change(request, obj, message)
response = ""
else:
response = form.errors.values()[0][0]
return HttpResponse(unicode(response))
def search(request, template="search_results.html"):
"""
Display search results. Takes an optional "contenttype" GET parameter
in the form "app-name.ModelName" to limit search results to a single model.
"""
settings.use_editable()
query = request.GET.get("q", "")
page = request.GET.get("page", 1)
per_page = settings.SEARCH_PER_PAGE
max_paging_links = settings.MAX_PAGING_LINKS
try:
search_model = get_model(*request.GET.get("type", "").split(".", 1))
if not issubclass(search_model, Displayable):
raise TypeError
except TypeError:
search_model = Displayable
search_type = _("Everything")
else:
search_type = search_model._meta.verbose_name_plural.capitalize()
results = search_model.objects.search(query, for_user=request.user)
paginated = paginate(results, page, per_page, max_paging_links)
context = {"query": query, "results": paginated,
"search_type": search_type}
return render(request, template, context)
@staff_member_required
def static_proxy(request):
"""
Serves TinyMCE plugins inside the inline popups and the uploadify
SWF, as these are normally static files, and will break with
cross-domain JavaScript errors if ``STATIC_URL`` is an external
host. URL for the file is passed in via querystring in the inline
popup plugin template.
"""
# Get the relative URL after STATIC_URL.
url = request.GET["u"]
protocol = "http" if not request.is_secure() else "https"
host = protocol + "://" + request.get_host()
generic_host = "//" + request.get_host()
for prefix in (settings.STATIC_URL, host, generic_host, '/'):
if url.startswith(prefix):
url = url.replace(prefix, "", 1)
response = ""
mimetype = ""
path = finders.find(url)
if path:
if isinstance(path, (list, tuple)):
path = path[0]
with open(path, "rb") as f:
response = f.read()
mimetype = "application/octet-stream"
if url.endswith(".htm"):
# Inject <base href="{{ STATIC_URL }}"> into TinyMCE
# plugins, since the path static files in these won't be
# on the same domain.
mimetype = "text/html"
static_url = settings.STATIC_URL + os.path.split(url)[0] + "/"
if not urlparse(static_url).scheme:
static_url = urljoin(host, static_url)
base_tag = "<base href='%s'>" % static_url
response = response.replace("<head>", "<head>" + base_tag)
return HttpResponse(response, mimetype=mimetype)
@requires_csrf_token
def page_not_found(request, template_name="errors/404.html"):
"""
Mimics Django's 404 handler but with a different template path.
"""
context = RequestContext(request, {
"STATIC_URL": settings.STATIC_URL,
"request_path": request.path,
})
t = get_template(template_name)
return HttpResponseNotFound(t.render(context))
@requires_csrf_token
def server_error(request, template_name="errors/500.html"):
"""
Mimics Django's error handler but adds ``STATIC_URL`` to the
context.
"""
context = RequestContext(request, {"STATIC_URL": settings.STATIC_URL})
t = get_template(template_name)
return HttpResponseServerError(t.render(context))
|
{
"content_hash": "0a972d6f8fb5ab35b4188a51072c4b2f",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 79,
"avg_line_length": 38.130434782608695,
"alnum_prop": 0.6637685290763968,
"repo_name": "stbarnabas/mezzanine",
"id": "325e26b078085302faeb87ffaed9d84ed9554123",
"size": "7016",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mezzanine/core/views.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "224938"
},
{
"name": "JavaScript",
"bytes": "277433"
},
{
"name": "Python",
"bytes": "1012084"
}
],
"symlink_target": ""
}
|
import pytest
from saltfactories.utils import random_string
import salt.utils.files
import salt.utils.platform
import salt.utils.stringutils
from tests.support.case import ModuleCase
from tests.support.helpers import runs_on
from tests.support.unit import skipIf
if not salt.utils.platform.is_windows():
import grp
@pytest.mark.skip_if_not_root
@pytest.mark.destructive_test
@pytest.mark.windows_whitelisted
@runs_on(kernel=("Linux", "Windows"))
class GroupModuleTest(ModuleCase):
"""
Validate the linux group system module
"""
def setUp(self):
"""
Get current settings
"""
super().setUp()
self._user = random_string("tg-", uppercase=False)
self._user1 = random_string("tg-", uppercase=False)
self._no_user = random_string("tg-", uppercase=False)
self._group = random_string("tg-", uppercase=False)
self._no_group = random_string("tg-", uppercase=False)
_gid = _new_gid = None
if not salt.utils.platform.is_windows():
_gid = 64989
_new_gid = 64998
self._gid = _gid
self._new_gid = _new_gid
def tearDown(self):
"""
Reset to original settings
"""
self.run_function("user.delete", [self._user])
self.run_function("user.delete", [self._user1])
self.run_function("group.delete", [self._group])
def __get_system_group_gid_range(self):
"""
Returns (SYS_GID_MIN, SYS_GID_MAX)
"""
try:
login_defs = {}
with salt.utils.files.fopen("/etc/login.defs") as defs_fd:
for line in defs_fd:
line = salt.utils.stringutils.to_unicode(line).strip()
if line.startswith("#"):
continue
try:
key, val = line.split()
except ValueError:
pass
else:
login_defs[key] = val
except OSError:
login_defs = {"SYS_GID_MIN": 101, "SYS_GID_MAX": 999}
gid_min = login_defs.get("SYS_GID_MIN", 101)
gid_max = login_defs.get(
"SYS_GID_MAX", int(login_defs.get("GID_MIN", 1000)) - 1
)
return int(gid_min), int(gid_max)
def __get_free_system_gid(self):
"""
Find a free system gid
"""
gid_min, gid_max = self.__get_system_group_gid_range()
busy_gids = [x.gr_gid for x in grp.getgrall() if gid_min <= x.gr_gid <= gid_max]
# find free system gid
for gid in range(gid_min, gid_max + 1):
if gid not in busy_gids:
return gid
@pytest.mark.destructive_test
@pytest.mark.slow_test
def test_add(self):
"""
Test the add group function
"""
# add a new group
self.assertTrue(self.run_function("group.add", [self._group], gid=self._gid))
group_info = self.run_function("group.info", [self._group])
self.assertEqual(group_info["gid"], self._gid)
self.assertEqual(group_info["name"], self._group)
# try adding the group again
self.assertFalse(self.run_function("group.add", [self._group], gid=self._gid))
@pytest.mark.destructive_test
@skipIf(salt.utils.platform.is_windows(), "Skip on Windows")
@pytest.mark.slow_test
def test_add_system_group(self):
"""
Test the add group function with system=True
"""
gid_min, gid_max = self.__get_system_group_gid_range()
# add a new system group
self.assertTrue(self.run_function("group.add", [self._group, None, True]))
group_info = self.run_function("group.info", [self._group])
self.assertEqual(group_info["name"], self._group)
self.assertTrue(gid_min <= group_info["gid"] <= gid_max)
# try adding the group again
self.assertFalse(self.run_function("group.add", [self._group]))
@pytest.mark.destructive_test
@skipIf(salt.utils.platform.is_windows(), "Skip on Windows")
@pytest.mark.slow_test
def test_add_system_group_gid(self):
"""
Test the add group function with system=True and a specific gid
"""
gid = self.__get_free_system_gid()
# add a new system group
self.assertTrue(self.run_function("group.add", [self._group, gid, True]))
group_info = self.run_function("group.info", [self._group])
self.assertEqual(group_info["name"], self._group)
self.assertEqual(group_info["gid"], gid)
# try adding the group again
self.assertFalse(self.run_function("group.add", [self._group, gid]))
@pytest.mark.destructive_test
@pytest.mark.slow_test
def test_delete(self):
"""
Test the delete group function
"""
self.assertTrue(self.run_function("group.add", [self._group]))
# correct functionality
self.assertTrue(self.run_function("group.delete", [self._group]))
# group does not exist
self.assertFalse(self.run_function("group.delete", [self._no_group]))
@pytest.mark.slow_test
def test_info(self):
"""
Test the info group function
"""
self.run_function("group.add", [self._group], gid=self._gid)
self.run_function("user.add", [self._user])
self.run_function("group.adduser", [self._group, self._user])
group_info = self.run_function("group.info", [self._group])
self.assertEqual(group_info["name"], self._group)
self.assertEqual(group_info["gid"], self._gid)
self.assertIn(self._user, str(group_info["members"]))
@skipIf(salt.utils.platform.is_windows(), "gid test skipped on windows")
@pytest.mark.slow_test
def test_chgid(self):
"""
Test the change gid function
"""
self.run_function("group.add", [self._group], gid=self._gid)
self.assertTrue(self.run_function("group.chgid", [self._group, self._new_gid]))
group_info = self.run_function("group.info", [self._group])
self.assertEqual(group_info["gid"], self._new_gid)
@pytest.mark.slow_test
def test_adduser(self):
"""
Test the add user to group function
"""
self.run_function("group.add", [self._group], gid=self._gid)
self.run_function("user.add", [self._user])
self.assertTrue(self.run_function("group.adduser", [self._group, self._user]))
group_info = self.run_function("group.info", [self._group])
self.assertIn(self._user, str(group_info["members"]))
# try add a non existing user
self.assertFalse(
self.run_function("group.adduser", [self._group, self._no_user])
)
# try add a user to non existing group
self.assertFalse(
self.run_function("group.adduser", [self._no_group, self._user])
)
# try add a non existing user to a non existing group
self.assertFalse(
self.run_function("group.adduser", [self._no_group, self._no_user])
)
@pytest.mark.slow_test
def test_deluser(self):
"""
Test the delete user from group function
"""
self.run_function("group.add", [self._group], gid=self._gid)
self.run_function("user.add", [self._user])
self.run_function("group.adduser", [self._group, self._user])
self.assertTrue(self.run_function("group.deluser", [self._group, self._user]))
group_info = self.run_function("group.info", [self._group])
self.assertNotIn(self._user, str(group_info["members"]))
@pytest.mark.slow_test
def test_members(self):
"""
Test the members function
"""
self.run_function("group.add", [self._group], gid=self._gid)
self.run_function("user.add", [self._user])
self.run_function("user.add", [self._user1])
m = "{},{}".format(self._user, self._user1)
ret = self.run_function("group.members", [self._group, m])
self.assertTrue(ret)
group_info = self.run_function("group.info", [self._group])
self.assertIn(self._user, str(group_info["members"]))
self.assertIn(self._user1, str(group_info["members"]))
@pytest.mark.slow_test
def test_getent(self):
"""
Test the getent function
"""
self.run_function("group.add", [self._group], gid=self._gid)
self.run_function("user.add", [self._user])
self.run_function("group.adduser", [self._group, self._user])
ginfo = self.run_function("user.getent")
self.assertIn(self._group, str(ginfo))
self.assertIn(self._user, str(ginfo))
self.assertNotIn(self._no_group, str(ginfo))
self.assertNotIn(self._no_user, str(ginfo))
|
{
"content_hash": "56d6b3b46d763cec04deceeb9c425207",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 88,
"avg_line_length": 36.70124481327801,
"alnum_prop": 0.5843979649519503,
"repo_name": "saltstack/salt",
"id": "8cf368e1555e09e976d0728ad0cbe1dc3d904955",
"size": "8845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/modules/test_groupadd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
}
|
from polltaskclient.http import HTTPClient
from polltaskclient.openstack.common import utils
from polltaskclient.v1.device import DeviceManager
from polltaskclient.v1.storage import StorageManager
from polltaskclient import Logger as LOG
class Client(object):
def __init__(self, endpoint, *args, **kwargs):
self.http_client = HTTPClient(utils.strip_version(endpoint), *args, **kwargs)
self.device = DeviceManager(self.http_client)
self.storage = StorageManager(self.http_client)
# self.session = self.http_client.session
# url = '/'.join([endpoint, 'device/poweroff'])
# data = self.session.request('get', url)
|
{
"content_hash": "dbc495d125a3fccf56d5a2197988c697",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 85,
"avg_line_length": 41.75,
"alnum_prop": 0.7170658682634731,
"repo_name": "xuweiliang/Codelibrary",
"id": "d5099b79a613b1d18e5ce804e6c89bc5aa425624",
"size": "668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polltaskclient/v1/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "134284"
},
{
"name": "HTML",
"bytes": "830844"
},
{
"name": "JavaScript",
"bytes": "2421484"
},
{
"name": "Makefile",
"bytes": "4934"
},
{
"name": "Python",
"bytes": "17185807"
},
{
"name": "Shell",
"bytes": "9144"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('product_id', models.IntegerField(unique=True)),
('description', models.CharField(max_length=255)),
('account_number', models.IntegerField(null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Transaction',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('price', models.DecimalField(max_digits=6, decimal_places=2)),
('created_at', models.DateTimeField()),
('exported_at', models.DateTimeField(null=True, editable=False)),
('product', models.ForeignKey(to='product.Product')),
],
options={
},
bases=(models.Model,),
),
]
|
{
"content_hash": "b79435114fdd7bde28b62e8f85e91e74",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 114,
"avg_line_length": 34.32432432432432,
"alnum_prop": 0.5346456692913386,
"repo_name": "CorbanU/corban-shopify",
"id": "d24c01c880aaba185d22768864de6e4c0d4d129a",
"size": "1294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shopify/product/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "435"
},
{
"name": "Python",
"bytes": "49574"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.