text
stringlengths 4
1.02M
| meta
dict |
---|---|
from openinghours.models import OpeningHours, WEEKDAYS
from openinghours.forms import Slot, time_to_str, str_to_time
from openinghours.utils import get_premises_model
from django.shortcuts import render, redirect, get_object_or_404
from django.views.generic import DetailView
from collections import OrderedDict
class OpeningHoursEditView(DetailView):
"""Powers editing UI supporting up to 2 time slots (sets) per day.
Models still support more slots via shell or admin UI.
This UI will delete and not recreate anything above 2 daily slots.
Inspired by Google local opening hours UI and earlier works.
"""
model = get_premises_model()
template_name = "openinghours/edit_base.html"
def form_prefix(self, day_n, slot_n):
"""Form prefix made up of day number and slot number.
- day number 1-7 for Monday to Sunday
- slot 1-2 typically morning and afternoon
"""
return "day%s_%s" % (day_n, slot_n)
def post(self, request, pk):
""" Clean the data and save opening hours in the database.
Old opening hours are purged before new ones are saved.
"""
location = self.get_object()
# open days, disabled widget data won't make it into request.POST
present_prefixes = [x.split('-')[0] for x in request.POST.keys()]
day_forms = OrderedDict()
for day_no, day_name in WEEKDAYS:
for slot_no in (1, 2):
prefix = self.form_prefix(day_no, slot_no)
# skip closed day as it would be invalid form due to no data
if prefix not in present_prefixes:
continue
day_forms[prefix] = (day_no, Slot(request.POST, prefix=prefix))
if all([day_form[1].is_valid() for pre, day_form in day_forms.items()]):
OpeningHours.objects.filter(company=location).delete()
for prefix, day_form in day_forms.items():
day, form = day_form
opens, shuts = [str_to_time(form.cleaned_data[x])
for x in ('opens', 'shuts')]
if opens != shuts:
OpeningHours(from_hour=opens, to_hour=shuts,
company=location, weekday=day).save()
return redirect(request.path_info)
def get(self, request, pk):
""" Initialize the editing form
1. Build opening_hours, a lookup dictionary to populate the form
slots: keys are day numbers, values are lists of opening
hours for that day.
2. Build days, a list of days with 2 slot forms each.
3. Build form initials for the 2 slots padding/trimming
opening_hours to end up with exactly 2 slots even if it's
just None values.
"""
location = self.get_object()
two_sets = False
closed = None
opening_hours = {}
for o in OpeningHours.objects.filter(company=location):
opening_hours.setdefault(o.weekday, []).append(o)
days = []
for day_no, day_name in WEEKDAYS:
if day_no not in opening_hours.keys():
if opening_hours:
closed = True
ini1, ini2 = [None, None]
else:
closed = False
ini = [{'opens': time_to_str(oh.from_hour),
'shuts': time_to_str(oh.to_hour)}
for oh in opening_hours[day_no]]
ini += [None] * (2 - len(ini[:2])) # pad
ini1, ini2 = ini[:2] # trim
if ini2:
two_sets = True
days.append({
'name': day_name,
'number': day_no,
'slot1': Slot(prefix=self.form_prefix(day_no, 1), initial=ini1),
'slot2': Slot(prefix=self.form_prefix(day_no, 2), initial=ini2),
'closed': closed
})
return render(request, self.template_name, {
'days': days,
'two_sets': two_sets,
'location': location,
})
| {
"content_hash": "2d1fdd97ed31001713f098d1765616d4",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 80,
"avg_line_length": 41.95918367346939,
"alnum_prop": 0.5634727626459144,
"repo_name": "arteria/django-openinghours",
"id": "0622bf5c9ad99acf6f1f69e57f3539c1088b9420",
"size": "4112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openinghours/views_edit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4873"
},
{
"name": "Makefile",
"bytes": "279"
},
{
"name": "Python",
"bytes": "36705"
}
],
"symlink_target": ""
} |
from JumpScale import j
import JumpScale.baselib.serializers
import JumpScale.grid.serverbase
from JumpScale.grid.serverbase.DaemonClient import Transport
import time
def retry(func):
def wrapper(self, *args, **kwargs):
try:
if j.system.net.tcpPortConnectionTest(*self._connection[:2]):
clientfunc = getattr(self._client, func.__name__)
return clientfunc(*args, **kwargs)
except:
pass # we will execute the reconnect
self._connection[2] = time.time()
self.connect(self._id)
clientfunc = getattr(self._client, func.__name__)
return clientfunc(*args, **kwargs)
return wrapper
class TCPHATransport(Transport):
def __init__(self, connections, clientclass, *args, **kwargs):
self._connections = [ [ip, port, 0] for ip, port in connections ]
self._timeout = 60
self._args = args
self._kwargs = kwargs
self._clientclass = clientclass
self._client = None
self._connection = None
self._id = None
def connect(self, sessionid):
if self._client:
self._client.close()
for attempt in xrange(2):
for connection in sorted(self._connections, key=lambda c: c[-1]):
try:
if j.system.net.tcpPortConnectionTest(*connection[:2]):
self._id = sessionid
ip, port, timestamp = connection
args = list(connection[:-1]) + list(self._args)
client = self._clientclass(*args, **self._kwargs)
client.connect(sessionid)
self._connection = connection
self._client = client
return
except Exception, e:
print "Error occured %s" % e
pass # invalidate the client
if self._client:
self._client.close()
connection[2] = time.time()
ips = [ "%s:%s" % (con[0], con[1]) for con in self._connections ]
msg = "Failed to connect to %s" % (", ".join(ips))
j.events.opserror_critical(msg)
@retry
def sendMsg(self, category, cmd, data, sendformat="", returnformat="",timeout=None):
pass
def close(self):
if self._client:
self._client.close()
def __str__(self):
return "%s %s" % (self.__class__.__name__, self._connections)
| {
"content_hash": "3cfb5c357a679cc0126ca173d36b8fe9",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 88,
"avg_line_length": 38.09090909090909,
"alnum_prop": 0.5389817024661894,
"repo_name": "Jumpscale/jumpscale6_core",
"id": "79dd58bf84f4c4a48b089e39a946773abda45308",
"size": "2514",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/JumpScale/grid/serverbase/TCPHATransport.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3681"
},
{
"name": "HTML",
"bytes": "11738"
},
{
"name": "JavaScript",
"bytes": "70132"
},
{
"name": "Lua",
"bytes": "2162"
},
{
"name": "Python",
"bytes": "5848017"
},
{
"name": "Shell",
"bytes": "7692"
}
],
"symlink_target": ""
} |
from jobsched.watchdog import watchdog
import os
import subprocess
import sys
class RunScript(object):
def __init__(self, script=None):
self.script = os.path.abspath(script)
@watchdog()
def __call__(self, info):
if os.path.exists(self.script):
script = os.path.abspath(self.script)
p = subprocess.Popen([script], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while p.poll() is None:
sys.stdout.write(p.stdout.readline())
sys.stdout.write(p.stdout.read())
if p.returncode:
raise RuntimeError("Script %s terminated "
"with status %d" % (script, p.returncode))
else:
raise RuntimeError("No such script, %s" % self.script)
| {
"content_hash": "7c88b4bfa88b4237d3a921d4751bcef2",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 92,
"avg_line_length": 33.416666666666664,
"alnum_prop": 0.5810473815461347,
"repo_name": "frenzykryger/jobsched",
"id": "478c8732d6bcbbb32c83f86c2b2ed5a655d89686",
"size": "802",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tasks/runscript.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "8452"
},
{
"name": "Shell",
"bytes": "443"
}
],
"symlink_target": ""
} |
from datetime import datetime
from decimal import Decimal
import pytz
import random
import time
from django.conf import settings as django_settings
from django.utils.datastructures import SortedDict
from dolphin import settings
from dolphin.middleware import LocalStoreMiddleware
from dolphin.utils import get_ip, get_geoip_coords, calc_dist
COOKIE_PREFIX = getattr(settings, 'DOLPHIN_COOKIE', 'dolphin_%s')
class Backend(object):
"""A base backend"""
def __init__(self, **kwargs):
self.backend_settings = kwargs
def _check_maxb(self, flag, request):
raise NotImplementedError("Must be overriden by backend")
def _enabled_for_site(self, flag):
raise NotImplementedError("Must be overriden by backend")
def _get_flag(self, key):
raise NotImplementedError("Must be overriden by backend")
def delete(self, key, *args, **kwargs):
raise NotImplementedError("Must be overriden by backend")
def all_flags(self):
raise NotImplementedError("Must be overriden by backend")
def _get_request(self, **kwargs):
if kwargs.get('request', None) is not None:
return kwargs['request']
return LocalStoreMiddleware.request()
def _in_circle(self, ff, lat, lon):
if isinstance(ff, (tuple, list)):
f_lat = ff[0]
f_lon = ff[1]
else:
f_lat = ff.center.latitude
f_lon = ff.center.longitude
dist = calc_dist(float(f_lat), float(f_lon), lat, lon)
return dist <= ff.radius
def _check_percent(self, flag):
return False if flag.percent is 0 else random.uniform(0, 100) <= flag.percent
def _limit(self, name, flag, func, request):
"""
Limits the option to once per request
and once per session if it's enabled (requires the session middleware)
"""
if hasattr(request, 'session') and settings.DOLPHIN_LIMIT_TO_SESSION:
d = request.session.setdefault(name, {})
else:
d = LocalStoreMiddleware.local.setdefault(name, {})
if flag.name not in d:
d[flag.name] = func(flag)
return d[flag.name]
def set_cookie(self, request, flag, active):
"""
Set a flag value in dolphin's local store that will
be set as a cookie in the middleware's process response function.
"""
cookie = COOKIE_PREFIX % flag.name
dolphin_cookies = LocalStoreMiddleware.local.setdefault('dolphin_cookies', {})
dolphin_cookies[cookie] = (active, flag.cookie_max_age)
def is_active(self, key, *args, **kwargs):
"""
Checks if a flag exists and is active
"""
overrides = LocalStoreMiddleware.local.setdefault('overrides', {})
if key in overrides:
return overrides[key]
flag = self._get_flag(key)
request = self._get_request(**kwargs)
if flag is None:
return False
#If there is a cookie for this flag, use it
if hasattr(request, 'COOKIES'):
cookie = COOKIE_PREFIX % flag.name
if cookie in request.COOKIES:
return request.COOKIES[cookie]
return self._flag_is_active(flag, request)
def active_flags(self, *args, **kwargs):
"""Returns active flags for the current request"""
request = self._get_request(**kwargs)
return [flag for flag in self.all_flags() if self._flag_is_active(flag, request)]
def _in_group(self, flag, request):
"""Checks if the request's users is in the group specified by flag.group(_id)"""
if isinstance(flag.group_id, int):
group_id = flag.group_id
else:
group_id = flag.group # for the cache objects and redis
return request.user.groups.filter(id=group_id).exists()
def _flag_key(self, ff, request):
"""
This creates a tuple key with various values to uniquely identify the request
and flag
"""
d = SortedDict()
d['name'] = ff.name
d['ip_address'] = get_ip(request)
#avoid fake requests for tests
if hasattr(request, 'user'):
d['user_id'] = request.user.id
else:
d['user_id'] = None
return tuple(d.values())
def _flag_is_active(self, flag, request):
"""
Checks the flag to see if it should be enabled or not.
Encompases A/B tests, regional, and user based flags as well.
Will only calculate random and max flags once per request.
Will store flags for the request if DOLPHIN_STORE_FLAGS is True (default).
"""
key = self._flag_key(flag, request)
flags = LocalStoreMiddleware.local.setdefault('flags', {})
store_flags = settings.DOLPHIN_STORE_FLAGS
if store_flags and key in flags:
return flags[key]
def store(val):
"""quick wrapper to store the flag results if it needs to"""
if flag.cookie_max_age:
self.set_cookie(request, flag, val)
if store_flags:
flags[key] = val
return val
if not flag.enabled:
return store(False)
enabled_for_site = self._limit('enabled_for_site', flag, self._enabled_for_site, request)
if not enabled_for_site:
return store(False)
enabled = True
if flag.registered_only or flag.limit_to_group or flag.staff_only:
#user based flag
if not request: enabled = False
elif not request.user.is_authenticated():
enabled = False
else:
if flag.limit_to_group:
enabled = enabled and self._in_group(flag, request)
if flag.staff_only:
enabled = enabled and request.user.is_staff
if flag.registered_only:
enabled = enabled and True
if enabled == False:
return store(enabled)
if flag.enable_geo:
#distance based
x = get_geoip_coords(get_ip(request))
if x is None or flag.center is None:
enabled = False
else:
enabled = enabled and self._in_circle(flag, x[0], x[1])
if enabled == False:
return store(enabled)
#A/B flags
if flag.random:
#doing this so that the random key is only calculated once per request
def rand_bool(flag):
random.seed(time.time())
return bool(random.randrange(0, 2))
enabled = enabled and self._limit('random', flag, rand_bool, request)
if flag.b_test_start:
#start date
if flag.b_test_start.tzinfo is not None:
now = datetime.utcnow().replace(tzinfo=pytz.UTC)
else:
now = datetime.now()
enabled = enabled and now >= flag.b_test_start
if flag.b_test_end:
#end date
if flag.b_test_end.tzinfo is not None:
now = datetime.utcnow().replace(tzinfo=pytz.UTC)
else:
now = datetime.now()
enabled = enabled and now <= flag.b_test_end
if flag.maximum_b_tests:
#max B tests
enabled = enabled and self._limit('maxb', flag, self._check_maxb, request)
percent_enabled = self._limit('percent', flag, self._check_percent, request)
enabled = enabled and percent_enabled
return store(enabled)
| {
"content_hash": "7e569af62c7c26c7f8b8863ffb278104",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 97,
"avg_line_length": 34.935483870967744,
"alnum_prop": 0.5851470782218705,
"repo_name": "coxmediagroup/dolphin",
"id": "e4c3a2b5ba5fa6ddbf04b161689943806da4e1b7",
"size": "7581",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dolphin/backends/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "264"
},
{
"name": "Python",
"bytes": "84795"
},
{
"name": "Shell",
"bytes": "713"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class UniformtextValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="uniformtext", parent_name="layout", **kwargs):
super(UniformtextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Uniformtext"),
data_docs=kwargs.pop(
"data_docs",
"""
minsize
Sets the minimum text size between traces of
the same type.
mode
Determines how the font size for various text
elements are uniformed between each trace type.
If the computed text sizes were smaller than
the minimum size defined by
`uniformtext.minsize` using "hide" option hides
the text; and using "show" option shows the
text without further downscaling. Please note
that if the size defined by `minsize` is
greater than the font size defined by trace,
then the `minsize` is used.
""",
),
**kwargs,
)
| {
"content_hash": "814d4f7c762d4179ab4604ea8814de1b",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 82,
"avg_line_length": 41.266666666666666,
"alnum_prop": 0.5605815831987075,
"repo_name": "plotly/plotly.py",
"id": "e768a0c7a0cea06639ae9dab0bcf094a186a0f64",
"size": "1238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/_uniformtext.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
"""
PlexAPI Play PlayQueues
"""
import plexapi, requests
from plexapi import video
from plexapi import utils
class PlayQueue(object):
def __init__(self, server, data, initpath):
self.server = server
self.initpath = initpath
self.identifier = data.attrib.get('identifier')
self.mediaTagPrefix = data.attrib.get('mediaTagPrefix')
self.mediaTagVersion = data.attrib.get('mediaTagVersion')
self.playQueueID = data.attrib.get('playQueueID')
self.playQueueSelectedItemID = data.attrib.get('playQueueSelectedItemID')
self.playQueueSelectedItemOffset = data.attrib.get('playQueueSelectedItemOffset')
self.playQueueTotalCount = data.attrib.get('playQueueTotalCount')
self.playQueueVersion = data.attrib.get('playQueueVersion')
self.items = [video.build_item(server, elem, initpath) for elem in data]
@classmethod
def create(cls, server, video, shuffle=0, continuous=0):
# NOTE: I have not yet figured out what __GID__ is below or where the proper value
# can be obtained. However, the good news is passing anything in seems to work.
path = 'playQueues%s' % utils.joinArgs({
'uri': 'library://__GID__/item/%s' % video.key,
'key': video.key,
'type': 'video',
'shuffle': shuffle,
'continuous': continuous,
'X-Plex-Client-Identifier': plexapi.X_PLEX_IDENTIFIER,
})
data = server.query(path, method=requests.post)
return cls(server, data, initpath=path)
| {
"content_hash": "2cae52a9d6be3d28fcaa16954a767e22",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 90,
"avg_line_length": 42.270270270270274,
"alnum_prop": 0.6547314578005116,
"repo_name": "ayerlock/python-plexcontrol",
"id": "96261c4d1c4819e9bcedc866673df1eda4ada2a4",
"size": "1564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/plexapi/playqueue.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "75752"
}
],
"symlink_target": ""
} |
'''
Classes to solve and simulate consumption-savings model with a discrete, exogenous,
stochastic Markov state. The only solver here extends ConsIndShockModel to
include a Markov state; the interest factor, permanent growth factor, and income
distribution can vary with the discrete state.
'''
import sys
sys.path.insert(0,'../')
from copy import deepcopy
import numpy as np
from ConsIndShockModel import ConsIndShockSolver, ValueFunc, MargValueFunc, ConsumerSolution, IndShockConsumerType
from HARKutilities import warnings # Because of "patch" to warnings modules
from HARKsimulation import drawDiscrete
from HARKinterpolation import CubicInterp, LowerEnvelope, LinearInterp
from HARKutilities import CRRAutility, CRRAutilityP, CRRAutilityPP, CRRAutilityP_inv, \
CRRAutility_invP, CRRAutility_inv, CRRAutilityP_invP
utility = CRRAutility
utilityP = CRRAutilityP
utilityPP = CRRAutilityPP
utilityP_inv = CRRAutilityP_inv
utility_invP = CRRAutility_invP
utility_inv = CRRAutility_inv
utilityP_invP = CRRAutilityP_invP
class ConsMarkovSolver(ConsIndShockSolver):
'''
A class to solve a single period consumption-saving problem with risky income
and stochastic transitions between discrete states, in a Markov fashion.
Extends ConsIndShockSolver, with identical inputs but for a discrete
Markov state, whose transition rule is summarized in MrkvArray. Markov
states can differ in their interest factor, permanent growth factor, live probability, and
income distribution, so the inputs Rfree, PermGroFac, IncomeDstn, and LivPrb are
now arrays or lists specifying those values in each (succeeding) Markov state.
'''
def __init__(self,solution_next,IncomeDstn_list,LivPrb,DiscFac,
CRRA,Rfree_list,PermGroFac_list,MrkvArray,BoroCnstArt,
aXtraGrid,vFuncBool,CubicBool):
'''
Constructor for a new solver for a one period problem with risky income
and transitions between discrete Markov states. In the descriptions below,
N is the number of discrete states.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncomeDstn_list : [[np.array]]
A length N list of income distributions in each succeeding Markov
state. Each income distribution contains three arrays of floats,
representing a discrete approximation to the income process at the
beginning of the succeeding period. Order: event probabilities,
permanent shocks, transitory shocks.
LivPrb : np.array
Survival probability; likelihood of being alive at the beginning of
the succeeding period for each Markov state.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
Rfree_list : np.array
Risk free interest factor on end-of-period assets for each Markov
state in the succeeding period.
PermGroGac_list : np.array
Expected permanent income growth factor at the end of this period
for each Markov state in the succeeding period.
MrkvArray : np.array
An NxN array representing a Markov transition matrix between discrete
states. The i,j-th element of MrkvArray is the probability of
moving from state i in period t to state j in period t+1.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with. If it is less than the natural borrowing constraint,
then it is irrelevant; BoroCnstArt=None indicates no artificial bor-
rowing constraint.
aXtraGrid: np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
CubicBool: boolean
An indicator for whether the solver should use cubic or linear inter-
polation.
Returns
-------
None
'''
# Set basic attributes of the problem
ConsIndShockSolver.assignParameters(self,solution_next,np.nan,LivPrb,DiscFac,CRRA,np.nan,
np.nan,BoroCnstArt,aXtraGrid,vFuncBool,CubicBool)
self.defUtilityFuncs()
# Set additional attributes specific to the Markov model
self.IncomeDstn_list = IncomeDstn_list
self.Rfree_list = Rfree_list
self.PermGroFac_list = PermGroFac_list
self.MrkvArray = MrkvArray
self.StateCount = MrkvArray.shape[0]
def solve(self):
'''
Solve the one period problem of the consumption-saving model with a Markov state.
Parameters
----------
none
Returns
-------
solution : ConsumerSolution
The solution to the single period consumption-saving problem. Includes
a consumption function cFunc (using cubic or linear splines), a marg-
inal value function vPfunc, a minimum acceptable level of normalized
market resources mNrmMin, normalized human wealth hNrm, and bounding
MPCs MPCmin and MPCmax. It might also have a value function vFunc
and marginal marginal value function vPPfunc. All of these attributes
are lists or arrays, with elements corresponding to the current
Markov state. E.g. solution.cFunc[0] is the consumption function
when in the i=0 Markov state this period.
'''
# Find the natural borrowing constraint in each current state
self.defBoundary()
# Initialize end-of-period (marginal) value functions
self.EndOfPrdvFunc_list = []
self.EndOfPrdvPfunc_list = []
self.ExIncNextAll = np.zeros(self.StateCount) + np.nan # expected income conditional on the next state
self.WorstIncPrbAll = np.zeros(self.StateCount) + np.nan # probability of getting the worst income shock in each next period state
# Loop through each next-period-state and calculate the end-of-period
# (marginal) value function
for j in range(self.StateCount):
# Condition values on next period's state (and record a couple for later use)
self.conditionOnState(j)
self.ExIncNextAll[j] = np.dot(self.ShkPrbsNext,self.PermShkValsNext*self.TranShkValsNext)
self.WorstIncPrbAll[j] = self.WorstIncPrb
# Construct the end-of-period marginal value function conditional
# on next period's state and add it to the list of value functions
EndOfPrdvPfunc_cond = self.makeEndOfPrdvPfuncCond()
self.EndOfPrdvPfunc_list.append(EndOfPrdvPfunc_cond)
# Construct the end-of-period value functional conditional on next
# period's state and add it to the list of value functions
if self.vFuncBool:
EndOfPrdvFunc_cond = self.makeEndOfPrdvFuncCond()
self.EndOfPrdvFunc_list.append(EndOfPrdvFunc_cond)
# EndOfPrdvP_cond is EndOfPrdvP conditional on *next* period's state.
# Take expectations to get EndOfPrdvP conditional on *this* period's state.
self.calcEndOfPrdvP()
# Calculate the bounding MPCs and PDV of human wealth for each state
self.calcHumWealthAndBoundingMPCs()
# Find consumption and market resources corresponding to each end-of-period
# assets point for each state (and add an additional point at the lower bound)
aNrm = np.asarray(self.aXtraGrid)[np.newaxis,:] + np.array(self.BoroCnstNat_list)[:,np.newaxis]
self.getPointsForInterpolation(self.EndOfPrdvP,aNrm)
cNrm = np.hstack((np.zeros((self.StateCount,1)),self.cNrmNow))
mNrm = np.hstack((np.reshape(self.mNrmMin_list,(self.StateCount,1)),self.mNrmNow))
# Package and return the solution for this period
self.BoroCnstNat = self.BoroCnstNat_list
solution = self.makeSolution(cNrm,mNrm)
return solution
def defBoundary(self):
'''
Find the borrowing constraint for each current state and save it as an
attribute of self for use by other methods.
Parameters
----------
none
Returns
-------
none
'''
self.BoroCnstNatAll = np.zeros(self.StateCount) + np.nan
# Find the natural borrowing constraint conditional on next period's state
for j in range(self.StateCount):
PermShkMinNext = np.min(self.IncomeDstn_list[j][1])
TranShkMinNext = np.min(self.IncomeDstn_list[j][2])
self.BoroCnstNatAll[j] = (self.solution_next.mNrmMin[j] - TranShkMinNext)*\
(self.PermGroFac_list[j]*PermShkMinNext)/self.Rfree_list[j]
self.BoroCnstNat_list = np.zeros(self.StateCount) + np.nan
self.mNrmMin_list = np.zeros(self.StateCount) + np.nan
self.BoroCnstDependency = np.zeros((self.StateCount,self.StateCount)) + np.nan
# The natural borrowing constraint in each current state is the *highest*
# among next-state-conditional natural borrowing constraints that could
# occur from this current state.
for i in range(self.StateCount):
possible_next_states = self.MrkvArray[i,:] > 0
self.BoroCnstNat_list[i] = np.max(self.BoroCnstNatAll[possible_next_states])
self.mNrmMin_list[i] = np.max([self.BoroCnstNat_list[i],self.BoroCnstArt])
self.BoroCnstDependency[i,:] = self.BoroCnstNat_list[i] == self.BoroCnstNatAll
# Also creates a Boolean array indicating whether the natural borrowing
# constraint *could* be hit when transitioning from i to j.
def conditionOnState(self,state_index):
'''
Temporarily assume that a particular Markov state will occur in the
succeeding period, and condition solver attributes on this assumption.
Allows the solver to construct the future-state-conditional marginal
value function (etc) for that future state.
Parameters
----------
state_index : int
Index of the future Markov state to condition on.
Returns
-------
none
'''
# Set future-state-conditional values as attributes of self
self.IncomeDstn = self.IncomeDstn_list[state_index]
self.Rfree = self.Rfree_list[state_index]
self.PermGroFac = self.PermGroFac_list[state_index]
self.vPfuncNext = self.solution_next.vPfunc[state_index]
self.mNrmMinNow = self.mNrmMin_list[state_index]
self.BoroCnstNat = self.BoroCnstNatAll[state_index]
self.setAndUpdateValues(self.solution_next,self.IncomeDstn,self.LivPrb,self.DiscFac)
self.DiscFacEff = self.DiscFac # survival probability LivPrb represents probability from
# *current* state, so DiscFacEff is just DiscFac for now
# These lines have to come after setAndUpdateValues to override the definitions there
self.vPfuncNext = self.solution_next.vPfunc[state_index]
if self.CubicBool:
self.vPPfuncNext= self.solution_next.vPPfunc[state_index]
if self.vFuncBool:
self.vFuncNext = self.solution_next.vFunc[state_index]
def calcEndOfPrdvPP(self):
'''
Calculates end-of-period marginal marginal value using a pre-defined
array of next period market resources in self.mNrmNext.
Parameters
----------
none
Returns
-------
EndOfPrdvPP : np.array
End-of-period marginal marginal value of assets at each value in
the grid of assets.
'''
EndOfPrdvPP = self.DiscFacEff*self.Rfree*self.Rfree*self.PermGroFac**(-self.CRRA-1.0)*\
np.sum(self.PermShkVals_temp**(-self.CRRA-1.0)*self.vPPfuncNext(self.mNrmNext)
*self.ShkPrbs_temp,axis=0)
return EndOfPrdvPP
def makeEndOfPrdvFuncCond(self):
'''
Construct the end-of-period value function conditional on next period's
state. NOTE: It might be possible to eliminate this method and replace
it with ConsIndShockSolver.makeEndOfPrdvFunc, but the self.X_cond
variables must be renamed.
Parameters
----------
none
Returns
-------
EndofPrdvFunc_cond : ValueFunc
The end-of-period value function conditional on a particular state
occuring in the next period.
'''
VLvlNext = (self.PermShkVals_temp**(1.0-self.CRRA)*
self.PermGroFac**(1.0-self.CRRA))*self.vFuncNext(self.mNrmNext)
EndOfPrdv_cond = self.DiscFacEff*np.sum(VLvlNext*self.ShkPrbs_temp,axis=0)
EndOfPrdvNvrs_cond = self.uinv(EndOfPrdv_cond)
EndOfPrdvNvrsP_cond = self.EndOfPrdvP_cond*self.uinvP(EndOfPrdv_cond)
EndOfPrdvNvrs_cond = np.insert(EndOfPrdvNvrs_cond,0,0.0)
EndOfPrdvNvrsP_cond = np.insert(EndOfPrdvNvrsP_cond,0,EndOfPrdvNvrsP_cond[0])
aNrm_temp = np.insert(self.aNrm_cond,0,self.BoroCnstNat)
EndOfPrdvNvrsFunc_cond = CubicInterp(aNrm_temp,EndOfPrdvNvrs_cond,EndOfPrdvNvrsP_cond)
EndofPrdvFunc_cond = ValueFunc(EndOfPrdvNvrsFunc_cond,self.CRRA)
return EndofPrdvFunc_cond
def calcEndOfPrdvPcond(self):
'''
Calculate end-of-period marginal value of assets at each point in aNrmNow
conditional on a particular state occuring in the next period.
Parameters
----------
None
Returns
-------
EndOfPrdvP : np.array
A 1D array of end-of-period marginal value of assets.
'''
EndOfPrdvPcond = ConsIndShockSolver.calcEndOfPrdvP(self)
return EndOfPrdvPcond
def makeEndOfPrdvPfuncCond(self):
'''
Construct the end-of-period marginal value function conditional on next
period's state.
Parameters
----------
None
Returns
-------
EndofPrdvPfunc_cond : MargValueFunc
The end-of-period marginal value function conditional on a particular
state occuring in the succeeding period.
'''
# Get data to construct the end-of-period marginal value function (conditional on next state)
self.aNrm_cond = self.prepareToCalcEndOfPrdvP()
self.EndOfPrdvP_cond= self.calcEndOfPrdvPcond()
EndOfPrdvPnvrs_cond = self.uPinv(self.EndOfPrdvP_cond) # "decurved" marginal value
if self.CubicBool:
EndOfPrdvPP_cond = self.calcEndOfPrdvPP()
EndOfPrdvPnvrsP_cond = EndOfPrdvPP_cond*self.uPinvP(self.EndOfPrdvP_cond) # "decurved" marginal marginal value
# Construct the end-of-period marginal value function conditional on the next state.
if self.CubicBool:
EndOfPrdvPnvrsFunc_cond = CubicInterp(self.aNrm_cond,EndOfPrdvPnvrs_cond,
EndOfPrdvPnvrsP_cond,lower_extrap=True)
else:
EndOfPrdvPnvrsFunc_cond = LinearInterp(self.aNrm_cond,EndOfPrdvPnvrs_cond,
lower_extrap=True)
EndofPrdvPfunc_cond = MargValueFunc(EndOfPrdvPnvrsFunc_cond,self.CRRA) # "recurve" the interpolated marginal value function
return EndofPrdvPfunc_cond
def calcEndOfPrdvP(self):
'''
Calculates end of period marginal value (and marginal marginal) value
at each aXtra gridpoint for each current state, unconditional on the
future Markov state (i.e. weighting conditional end-of-period marginal
value by transition probabilities).
Parameters
----------
none
Returns
-------
none
'''
# Find unique values of minimum acceptable end-of-period assets (and the
# current period states for which they apply).
aNrmMin_unique, state_inverse = np.unique(self.BoroCnstNat_list,return_inverse=True)
self.possible_transitions = self.MrkvArray > 0
# Calculate end-of-period marginal value (and marg marg value) at each
# asset gridpoint for each current period state
EndOfPrdvP = np.zeros((self.StateCount,self.aXtraGrid.size))
EndOfPrdvPP = np.zeros((self.StateCount,self.aXtraGrid.size))
for k in range(aNrmMin_unique.size):
aNrmMin = aNrmMin_unique[k] # minimum assets for this pass
which_states = state_inverse == k # the states for which this minimum applies
aGrid = aNrmMin + self.aXtraGrid # assets grid for this pass
EndOfPrdvP_all = np.zeros((self.StateCount,self.aXtraGrid.size))
EndOfPrdvPP_all = np.zeros((self.StateCount,self.aXtraGrid.size))
for j in range(self.StateCount):
if np.any(np.logical_and(self.possible_transitions[:,j],which_states)): # only consider a future state if one of the relevant states could transition to it
EndOfPrdvP_all[j,:] = self.EndOfPrdvPfunc_list[j](aGrid)
if self.CubicBool: # Add conditional end-of-period (marginal) marginal value to the arrays
EndOfPrdvPP_all[j,:] = self.EndOfPrdvPfunc_list[j].derivative(aGrid)
# Weight conditional marginal (marginal) values by transition probs
# to get unconditional marginal (marginal) value at each gridpoint.
EndOfPrdvP_temp = np.dot(self.MrkvArray,EndOfPrdvP_all)
EndOfPrdvP[which_states,:] = EndOfPrdvP_temp[which_states,:] # only take the states for which this asset minimum applies
if self.CubicBool:
EndOfPrdvPP_temp = np.dot(self.MrkvArray,EndOfPrdvPP_all)
EndOfPrdvPP[which_states,:] = EndOfPrdvPP_temp[which_states,:]
# Store the results as attributes of self, scaling end of period marginal value by survival probability from each current state
LivPrb_tiled = np.tile(np.reshape(self.LivPrb,(self.StateCount,1)),(1,self.aXtraGrid.size))
self.EndOfPrdvP = LivPrb_tiled*EndOfPrdvP
if self.CubicBool:
self.EndOfPrdvPP = LivPrb_tiled*EndOfPrdvPP
def calcHumWealthAndBoundingMPCs(self):
'''
Calculates human wealth and the maximum and minimum MPC for each current
period state, then stores them as attributes of self for use by other methods.
Parameters
----------
none
Returns
-------
none
'''
# Upper bound on MPC at lower m-bound
WorstIncPrb_array = self.BoroCnstDependency*np.tile(np.reshape(self.WorstIncPrbAll,
(1,self.StateCount)),(self.StateCount,1))
temp_array = self.MrkvArray*WorstIncPrb_array
WorstIncPrbNow = np.sum(temp_array,axis=1) # Probability of getting the "worst" income shock and transition from each current state
ExMPCmaxNext = (np.dot(temp_array,self.Rfree_list**(1.0-self.CRRA)*
self.solution_next.MPCmax**(-self.CRRA))/WorstIncPrbNow)**\
(-1.0/self.CRRA)
DiscFacEff_temp = self.DiscFac*self.LivPrb
self.MPCmaxNow = 1.0/(1.0 + ((DiscFacEff_temp*WorstIncPrbNow)**
(1.0/self.CRRA))/ExMPCmaxNext)
self.MPCmaxEff = self.MPCmaxNow
self.MPCmaxEff[self.BoroCnstNat_list < self.mNrmMin_list] = 1.0
# State-conditional PDV of human wealth
hNrmPlusIncNext = self.ExIncNextAll + self.solution_next.hNrm
self.hNrmNow = np.dot(self.MrkvArray,(self.PermGroFac_list/self.Rfree_list)*
hNrmPlusIncNext)
# Lower bound on MPC as m gets arbitrarily large
temp = (DiscFacEff_temp*np.dot(self.MrkvArray,self.solution_next.MPCmin**
(-self.CRRA)*self.Rfree_list**(1.0-self.CRRA)))**(1.0/self.CRRA)
self.MPCminNow = 1.0/(1.0 + temp)
def makeSolution(self,cNrm,mNrm):
'''
Construct an object representing the solution to this period's problem.
Parameters
----------
cNrm : np.array
Array of normalized consumption values for interpolation. Each row
corresponds to a Markov state for this period.
mNrm : np.array
Array of normalized market resource values for interpolation. Each
row corresponds to a Markov state for this period.
Returns
-------
solution : ConsumerSolution
The solution to the single period consumption-saving problem. Includes
a consumption function cFunc (using cubic or linear splines), a marg-
inal value function vPfunc, a minimum acceptable level of normalized
market resources mNrmMin, normalized human wealth hNrm, and bounding
MPCs MPCmin and MPCmax. It might also have a value function vFunc
and marginal marginal value function vPPfunc. All of these attributes
are lists or arrays, with elements corresponding to the current
Markov state. E.g. solution.cFunc[0] is the consumption function
when in the i=0 Markov state this period.
'''
solution = ConsumerSolution() # An empty solution to which we'll add state-conditional solutions
# Calculate the MPC at each market resource gridpoint in each state (if desired)
if self.CubicBool:
dcda = self.EndOfPrdvPP/self.uPP(np.array(self.cNrmNow))
MPC = dcda/(dcda+1.0)
self.MPC_temp = np.hstack((np.reshape(self.MPCmaxNow,(self.StateCount,1)),MPC))
interpfunc = self.makeCubiccFunc
else:
interpfunc = self.makeLinearcFunc
# Loop through each current period state and add its solution to the overall solution
for i in range(self.StateCount):
# Set current-period-conditional human wealth and MPC bounds
self.hNrmNow_j = self.hNrmNow[i]
self.MPCminNow_j = self.MPCminNow[i]
if self.CubicBool:
self.MPC_temp_j = self.MPC_temp[i,:]
# Construct the consumption function by combining the constrained and unconstrained portions
self.cFuncNowCnst = LinearInterp([self.mNrmMin_list[i], self.mNrmMin_list[i]+1.0],
[0.0,1.0])
cFuncNowUnc = interpfunc(mNrm[i,:],cNrm[i,:])
cFuncNow = LowerEnvelope(cFuncNowUnc,self.cFuncNowCnst)
# Make the marginal value function and pack up the current-state-conditional solution
vPfuncNow = MargValueFunc(cFuncNow,self.CRRA)
solution_cond = ConsumerSolution(cFunc=cFuncNow, vPfunc=vPfuncNow,
mNrmMin=self.mNrmMinNow)
if self.CubicBool: # Add the state-conditional marginal marginal value function (if desired)
solution_cond = self.addvPPfunc(solution_cond)
# Add the current-state-conditional solution to the overall period solution
solution.appendSolution(solution_cond)
# Add the lower bounds of market resources, MPC limits, human resources,
# and the value functions to the overall solution
solution.mNrmMin = self.mNrmMin_list
solution = self.addMPCandHumanWealth(solution)
if self.vFuncBool:
vFuncNow = self.makevFunc(solution)
solution.vFunc = vFuncNow
# Return the overall solution to this period
return solution
def makeLinearcFunc(self,mNrm,cNrm):
'''
Make a linear interpolation to represent the (unconstrained) consumption
function conditional on the current period state.
Parameters
----------
mNrm : np.array
Array of normalized market resource values for interpolation.
cNrm : np.array
Array of normalized consumption values for interpolation.
Returns
-------
cFuncUnc: an instance of HARKinterpolation.LinearInterp
'''
cFuncUnc = LinearInterp(mNrm,cNrm,self.MPCminNow_j*self.hNrmNow_j,self.MPCminNow_j)
return cFuncUnc
def makeCubiccFunc(self,mNrm,cNrm):
'''
Make a cubic interpolation to represent the (unconstrained) consumption
function conditional on the current period state.
Parameters
----------
mNrm : np.array
Array of normalized market resource values for interpolation.
cNrm : np.array
Array of normalized consumption values for interpolation.
Returns
-------
cFuncUnc: an instance of HARKinterpolation.CubicInterp
'''
cFuncUnc = CubicInterp(mNrm,cNrm,self.MPC_temp_j,self.MPCminNow_j*self.hNrmNow_j,
self.MPCminNow_j)
return cFuncUnc
def makevFunc(self,solution):
'''
Construct the value function for each current state.
Parameters
----------
solution : ConsumerSolution
The solution to the single period consumption-saving problem. Must
have a consumption function cFunc (using cubic or linear splines) as
a list with elements corresponding to the current Markov state. E.g.
solution.cFunc[0] is the consumption function when in the i=0 Markov
state this period.
Returns
-------
vFuncNow : [ValueFunc]
A list of value functions (defined over normalized market resources
m) for each current period Markov state.
'''
vFuncNow = [] # Initialize an empty list of value functions
# Loop over each current period state and construct the value function
for i in range(self.StateCount):
# Make state-conditional grids of market resources and consumption
mNrmMin = self.mNrmMin_list[i]
mGrid = mNrmMin + self.aXtraGrid
cGrid = solution.cFunc[i](mGrid)
aGrid = mGrid - cGrid
# Calculate end-of-period value at each gridpoint
EndOfPrdv_all = np.zeros((self.StateCount,self.aXtraGrid.size))
for j in range(self.StateCount):
if self.possible_transitions[i,j]:
EndOfPrdv_all[j,:] = self.EndOfPrdvFunc_list[j](aGrid)
EndOfPrdv = np.dot(self.MrkvArray[i,:],EndOfPrdv_all)
# Calculate (normalized) value and marginal value at each gridpoint
vNrmNow = self.u(cGrid) + EndOfPrdv
vPnow = self.uP(cGrid)
# Make a "decurved" value function with the inverse utility function
vNvrs = self.uinv(vNrmNow) # value transformed through inverse utility
vNvrsP = vPnow*self.uinvP(vNrmNow)
mNrm_temp = np.insert(mGrid,0,mNrmMin) # add the lower bound
vNvrs = np.insert(vNvrs,0,0.0)
vNvrsP = np.insert(vNvrsP,0,self.MPCmaxEff[i]**(-self.CRRA/(1.0-self.CRRA)))
MPCminNvrs = self.MPCminNow[i]**(-self.CRRA/(1.0-self.CRRA))
vNvrsFunc_i = CubicInterp(mNrm_temp,vNvrs,vNvrsP,MPCminNvrs*self.hNrmNow[i],MPCminNvrs)
# "Recurve" the decurved value function and add it to the list
vFunc_i = ValueFunc(vNvrsFunc_i,self.CRRA)
vFuncNow.append(vFunc_i)
return vFuncNow
def solveConsMarkov(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree,PermGroFac,
MrkvArray,BoroCnstArt,aXtraGrid,vFuncBool,CubicBool):
'''
Solves a single period consumption-saving problem with risky income and
stochastic transitions between discrete states, in a Markov fashion. Has
identical inputs as solveConsIndShock, except for a discrete
Markov transitionrule MrkvArray. Markov states can differ in their interest
factor, permanent growth factor, and income distribution, so the inputs Rfree,
PermGroFac, and IncomeDstn are arrays or lists specifying those values in each
(succeeding) Markov state.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncomeDstn_list : [[np.array]]
A length N list of income distributions in each succeeding Markov
state. Each income distribution contains three arrays of floats,
representing a discrete approximation to the income process at the
beginning of the succeeding period. Order: event probabilities,
permanent shocks, transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
Rfree_list : np.array
Risk free interest factor on end-of-period assets for each Markov
state in the succeeding period.
PermGroGac_list : float
Expected permanent income growth factor at the end of this period
for each Markov state in the succeeding period.
MrkvArray : numpy.array
An NxN array representing a Markov transition matrix between discrete
states. The i,j-th element of MrkvArray is the probability of
moving from state i in period t to state j in period t+1.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with. If it is less than the natural borrowing constraint,
then it is irrelevant; BoroCnstArt=None indicates no artificial bor-
rowing constraint.
aXtraGrid: np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
CubicBool: boolean
An indicator for whether the solver should use cubic or linear inter-
polation.
Returns
-------
solution : ConsumerSolution
The solution to the single period consumption-saving problem. Includes
a consumption function cFunc (using cubic or linear splines), a marg-
inal value function vPfunc, a minimum acceptable level of normalized
market resources mNrmMin, normalized human wealth hNrm, and bounding
MPCs MPCmin and MPCmax. It might also have a value function vFunc
and marginal marginal value function vPPfunc. All of these attributes
are lists or arrays, with elements corresponding to the current
Markov state. E.g. solution.cFunc[0] is the consumption function
when in the i=0 Markov state this period.
'''
solver = ConsMarkovSolver(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree,
PermGroFac,MrkvArray,BoroCnstArt,aXtraGrid,vFuncBool,CubicBool)
solution_now = solver.solve()
return solution_now
####################################################################################################
####################################################################################################
class MarkovConsumerType(IndShockConsumerType):
'''
An agent in the Markov consumption-saving model. His problem is defined by a sequence
of income distributions, survival probabilities, discount factors, and permanent
income growth rates, as well as time invariant values for risk aversion, the
interest rate, the grid of end-of-period assets, and how he is borrowing constrained.
'''
time_inv_ = IndShockConsumerType.time_inv_ + ['MrkvArray']
def __init__(self,cycles=1,time_flow=True,**kwds):
IndShockConsumerType.__init__(self,cycles=1,time_flow=True,**kwds)
self.solveOnePeriod = solveConsMarkov
def checkMarkovInputs(self):
"""
Many parameters used by MarkovConsumerType are arrays. Make sure those arrays are the
right shape.
Parameters
----------
none
Returns
-------
none
"""
StateCount = self.MrkvArray.shape[0]
# Check that arrays are the right shape
assert self.MrkvArray.shape == (StateCount,StateCount),'MrkvArray not the right shape!'
assert self.Rfree.shape == (StateCount,),'Rfree not the right shape!'
# Check that arrays in lists are the right shape
for LivPrb_t in self.LivPrb:
assert LivPrb_t.shape == (StateCount,),'Array in LivPrb is not the right shape!'
for PermGroFac_t in self.LivPrb:
assert PermGroFac_t.shape == (StateCount,),'Array in PermGroFac is not the right shape!'
# Now check the income distribution.
# Note IncomeDstn is (potentially) time-varying, so it is in time_vary.
# Therefore it is a list, and each element of that list responds to the income distribution
# at a particular point in time. Each income distribution at a point in time should itself
# be a list, with each element corresponding to the income distribution
# conditional on a particular Markov state.
for IncomeDstn_t in self.IncomeDstn:
assert len(IncomeDstn_t) == StateCount,'List in IncomeDstn is not the right length!'
def preSolve(self):
"""
Do preSolve stuff inherited from IndShockConsumerType.
Also, check to make sure that the inputs that are specific to MarkovConsumerType
are of the right shape (if arrays) or length (if lists).
Parameters
----------
none
Returns
-------
none
"""
IndShockConsumerType.preSolve(self)
self.checkMarkovInputs()
def makeIncShkHist(self):
'''
Makes histories of simulated income shocks for this consumer type by
drawing from the discrete income distributions, respecting the Markov
state for each agent in each period. Should be run after makeMrkvHist().
Parameters
----------
none
Returns
-------
none
'''
orig_time = self.time_flow
self.timeFwd()
self.resetRNG()
# Initialize the shock histories
N = self.MrkvArray.shape[0]
PermShkHist = np.zeros((self.sim_periods,self.Nagents)) + np.nan
TranShkHist = np.zeros((self.sim_periods,self.Nagents)) + np.nan
PermShkHist[0,:] = 1.0
TranShkHist[0,:] = 1.0
t_idx = 0
# Draw income shocks for each simulated period, respecting the Markov state
for t in range(1,self.sim_periods):
MrkvNow = self.MrkvHist[t,:]
IncomeDstn_list = self.IncomeDstn[t_idx]
PermGroFac_list = self.PermGroFac[t_idx]
for n in range(N):
these = MrkvNow == n
IncomeDstnNow = IncomeDstn_list[n]
PermGroFacNow = PermGroFac_list[n]
Indices = np.arange(IncomeDstnNow[0].size) # just a list of integers
# Get random draws of income shocks from the discrete distribution
EventDraws = drawDiscrete(N=np.sum(these),X=Indices,P=IncomeDstnNow[0],exact_match=False,seed=self.RNG.randint(0,2**31-1))
PermShkHist[t,these] = IncomeDstnNow[1][EventDraws]*PermGroFacNow
TranShkHist[t,these] = IncomeDstnNow[2][EventDraws]
# Advance the time index, looping if we've run out of income distributions
t_idx += 1
if t_idx >= len(self.IncomeDstn):
t_idx = 0
# Store the results as attributes of self and restore time to its original flow
self.PermShkHist = PermShkHist
self.TranShkHist = TranShkHist
if not orig_time:
self.timeRev()
def makeMrkvHist(self):
'''
Makes a history of simulated discrete Markov states, starting from the
initial states in markov_init. Assumes that MrkvArray is constant.
Parameters
----------
none
Returns
-------
none
'''
orig_time = self.time_flow
self.timeFwd()
self.resetRNG()
# Initialize the Markov state history
MrkvHist = np.zeros((self.sim_periods,self.Nagents),dtype=int)
MrkvNow = self.Mrkv_init
MrkvHist[0,:] = MrkvNow
base_draws = np.arange(self.Nagents,dtype=float)/self.Nagents + 1.0/(2*self.Nagents)
# Make an array of Markov transition cutoffs
N = self.MrkvArray.shape[0] # number of states
Cutoffs = np.cumsum(self.MrkvArray,axis=1)
# Draw Markov transitions for each period
for t in range(1,self.sim_periods):
draws_now = self.RNG.permutation(base_draws)
MrkvNext = np.zeros(self.Nagents) + np.nan
for n in range(N):
these = MrkvNow == n
MrkvNext[these] = np.searchsorted(Cutoffs[n,:],draws_now[these])
MrkvHist[t,:] = MrkvNext
MrkvNow = MrkvNext
# Store the results and return time to its original flow
self.MrkvHist = MrkvHist
if not orig_time:
self.timeRev()
def simOnePrd(self):
'''
Simulate a single period of a consumption-saving model with permanent
and transitory income shocks.
Parameters
----------
none
Returns
-------
none
'''
# Unpack objects from self for convenience
aPrev = self.aNow
pPrev = self.pNow
TranShkNow = self.TranShkNow
PermShkNow = self.PermShkNow
RfreeNow = self.RfreeNow[self.MrkvNow]
cFuncNow = self.cFuncNow
# Simulate the period
pNow = pPrev*PermShkNow # Updated permanent income level
ReffNow = RfreeNow/PermShkNow # "effective" interest factor on normalized assets
bNow = ReffNow*aPrev # Bank balances before labor income
mNow = bNow + TranShkNow # Market resources after income
N = self.MrkvArray.shape[0]
cNow = np.zeros_like(mNow)
MPCnow = np.zeros_like(mNow)
for n in range(N):
these = self.MrkvNow == n
cNow[these], MPCnow[these] = cFuncNow[n].eval_with_derivative(mNow[these]) # Consumption and maginal propensity to consume
aNow = mNow - cNow # Assets after all actions are accomplished
# Store the new state and control variables
self.pNow = pNow
self.bNow = bNow
self.mNow = mNow
self.cNow = cNow
self.MPCnow = MPCnow
self.aNow = aNow
def advanceIncShks(self):
'''
Advance the permanent and transitory income shocks to the next period of
the shock history objects.
Parameters
----------
none
Returns
-------
none
'''
self.MrkvNow = self.MrkvHist[self.Shk_idx,:]
IndShockConsumerType.advanceIncShks(self)
def updateSolutionTerminal(self):
'''
Update the terminal period solution. This method should be run when a
new AgentType is created or when CRRA changes.
Parameters
----------
none
Returns
-------
none
'''
IndShockConsumerType.updateSolutionTerminal(self)
# Make replicated terminal period solution: consume all resources, no human wealth, minimum m is 0
StateCount = self.MrkvArray.shape[0]
self.solution_terminal.cFunc = StateCount*[self.cFunc_terminal_]
self.solution_terminal.vFunc = StateCount*[self.solution_terminal.vFunc]
self.solution_terminal.vPfunc = StateCount*[self.solution_terminal.vPfunc]
self.solution_terminal.vPPfunc = StateCount*[self.solution_terminal.vPPfunc]
self.solution_terminal.mNrmMin = np.zeros(StateCount)
self.solution_terminal.hRto = np.zeros(StateCount)
self.solution_terminal.MPCmax = np.ones(StateCount)
self.solution_terminal.MPCmin = np.ones(StateCount)
def calcBoundingValues(self):
'''
Calculate human wealth plus minimum and maximum MPC in an infinite
horizon model with only one period repeated indefinitely. Store results
as attributes of self. Human wealth is the present discounted value of
expected future income after receiving income this period, ignoring mort-
ality. The maximum MPC is the limit of the MPC as m --> mNrmMin. The
minimum MPC is the limit of the MPC as m --> infty. Results are all
np.array with elements corresponding to each Markov state.
NOT YET IMPLEMENTED FOR THIS CLASS
Parameters
----------
None
Returns
-------
None
'''
raise NotImplementedError()
def makeEulerErrorFunc(self,mMax=100,approx_inc_dstn=True):
'''
Creates a "normalized Euler error" function for this instance, mapping
from market resources to "consumption error per dollar of consumption."
Stores result in attribute eulerErrorFunc as an interpolated function.
Has option to use approximate income distribution stored in self.IncomeDstn
or to use a (temporary) very dense approximation.
NOT YET IMPLEMENTED FOR THIS CLASS
Parameters
----------
mMax : float
Maximum normalized market resources for the Euler error function.
approx_inc_dstn : Boolean
Indicator for whether to use the approximate discrete income distri-
bution stored in self.IncomeDstn[0], or to use a very accurate
discrete approximation instead. When True, uses approximation in
IncomeDstn; when False, makes and uses a very dense approximation.
Returns
-------
None
'''
raise NotImplementedError()
###############################################################################
if __name__ == '__main__':
import ConsumerParameters as Params
from HARKutilities import plotFuncs
from time import clock
from copy import copy
mystr = lambda number : "{:.4f}".format(number)
do_simulation = True
# Define the Markov transition matrix for serially correlated unemployment
unemp_length = 5 # Averange length of unemployment spell
urate_good = 0.05 # Unemployment rate when economy is in good state
urate_bad = 0.12 # Unemployment rate when economy is in bad state
bust_prob = 0.01 # Probability of economy switching from good to bad
recession_length = 20 # Averange length of bad state
p_reemploy =1.0/unemp_length
p_unemploy_good = p_reemploy*urate_good/(1-urate_good)
p_unemploy_bad = p_reemploy*urate_bad/(1-urate_bad)
boom_prob = 1.0/recession_length
MrkvArray = np.array([[(1-p_unemploy_good)*(1-bust_prob),p_unemploy_good*(1-bust_prob),
(1-p_unemploy_good)*bust_prob,p_unemploy_good*bust_prob],
[p_reemploy*(1-bust_prob),(1-p_reemploy)*(1-bust_prob),
p_reemploy*bust_prob,(1-p_reemploy)*bust_prob],
[(1-p_unemploy_bad)*boom_prob,p_unemploy_bad*boom_prob,
(1-p_unemploy_bad)*(1-boom_prob),p_unemploy_bad*(1-boom_prob)],
[p_reemploy*boom_prob,(1-p_reemploy)*boom_prob,
p_reemploy*(1-boom_prob),(1-p_reemploy)*(1-boom_prob)]])
# Make a consumer with serially correlated unemployment, subject to boom and bust cycles
init_serial_unemployment = copy(Params.init_idiosyncratic_shocks)
init_serial_unemployment['MrkvArray'] = MrkvArray
init_serial_unemployment['UnempPrb'] = 0 # to make income distribution when employed
SerialUnemploymentExample = MarkovConsumerType(**init_serial_unemployment)
SerialUnemploymentExample.cycles = 0
SerialUnemploymentExample.vFuncBool = False # for easy toggling here
# Replace the default (lognormal) income distribution with a custom one
employed_income_dist = [np.ones(1),np.ones(1),np.ones(1)] # Definitely get income
unemployed_income_dist = [np.ones(1),np.ones(1),np.zeros(1)] # Definitely don't
SerialUnemploymentExample.IncomeDstn = [[employed_income_dist,unemployed_income_dist,employed_income_dist,
unemployed_income_dist]]
# Interest factor, permanent growth rates, and survival probabilities are constant arrays
SerialUnemploymentExample.Rfree = np.array(4*[SerialUnemploymentExample.Rfree])
SerialUnemploymentExample.PermGroFac = [np.array(4*SerialUnemploymentExample.PermGroFac)]
SerialUnemploymentExample.LivPrb = [SerialUnemploymentExample.LivPrb*np.ones(4)]
# Solve the serial unemployment consumer's problem and display solution
SerialUnemploymentExample.timeFwd()
start_time = clock()
SerialUnemploymentExample.solve()
end_time = clock()
print('Solving a Markov consumer took ' + mystr(end_time-start_time) + ' seconds.')
print('Consumption functions for each discrete state:')
plotFuncs(SerialUnemploymentExample.solution[0].cFunc,0,50)
if SerialUnemploymentExample.vFuncBool:
print('Value functions for each discrete state:')
plotFuncs(SerialUnemploymentExample.solution[0].vFunc,5,50)
# Simulate some data; results stored in cHist, mHist, bHist, aHist, MPChist, and pHist
if do_simulation:
SerialUnemploymentExample.sim_periods = 120
SerialUnemploymentExample.Mrkv_init = np.zeros(SerialUnemploymentExample.Nagents,dtype=int)
SerialUnemploymentExample.makeMrkvHist()
SerialUnemploymentExample.makeIncShkHist()
SerialUnemploymentExample.initializeSim()
SerialUnemploymentExample.simConsHistory()
###############################################################################
# Make a consumer who occasionally gets "unemployment immunity" for a fixed period
UnempPrb = 0.05 # Probability of becoming unemployed each period
ImmunityPrb = 0.01 # Probability of becoming "immune" to unemployment
ImmunityT = 6 # Number of periods of immunity
StateCount = ImmunityT+1 # Total number of Markov states
IncomeDstnReg = [np.array([1-UnempPrb,UnempPrb]), np.array([1.0,1.0]), np.array([1.0/(1.0-UnempPrb),0.0])] # Ordinary income distribution
IncomeDstnImm = [np.array([1.0]), np.array([1.0]), np.array([1.0])] # Income distribution when unemployed
IncomeDstn = [IncomeDstnReg] + ImmunityT*[IncomeDstnImm] # Income distribution for each Markov state, in a list
# Make the Markov transition array. MrkvArray[i,j] is the probability of transitioning
# to state j in period t+1 from state i in period t.
MrkvArray = np.zeros((StateCount,StateCount))
MrkvArray[0,0] = 1.0 - ImmunityPrb # Probability of not becoming immune in ordinary state: stay in ordinary state
MrkvArray[0,ImmunityT] = ImmunityPrb # Probability of becoming immune in ordinary state: begin immunity periods
for j in range(ImmunityT):
MrkvArray[j+1,j] = 1.0 # When immune, have 100% chance of transition to state with one fewer immunity periods remaining
init_unemployment_immunity = copy(Params.init_idiosyncratic_shocks)
init_unemployment_immunity['MrkvArray'] = MrkvArray
ImmunityExample = MarkovConsumerType(**init_unemployment_immunity)
ImmunityExample.assignParameters(Rfree = np.array(np.array(StateCount*[1.03])), # Interest factor same in all states
PermGroFac = [np.array(StateCount*[1.01])], # Permanent growth factor same in all states
LivPrb = [np.array(StateCount*[0.98])], # Same survival probability in all states
BoroCnstArt = None, # No artificial borrowing constraint
cycles = 0) # Infinite horizon
ImmunityExample.IncomeDstn = [IncomeDstn]
# Solve the unemployment immunity problem and display the consumption functions
start_time = clock()
ImmunityExample.solve()
end_time = clock()
print('Solving an "unemployment immunity" consumer took ' + mystr(end_time-start_time) + ' seconds.')
print('Consumption functions for each discrete state:')
mNrmMin = np.min([ImmunityExample.solution[0].mNrmMin[j] for j in range(StateCount)])
plotFuncs(ImmunityExample.solution[0].cFunc,mNrmMin,10)
###############################################################################
# Make a consumer with serially correlated permanent income growth
UnempPrb = 0.05 # Unemployment probability
StateCount = 5 # Number of permanent income growth rates
Persistence = 0.5 # Probability of getting the same permanent income growth rate next period
IncomeDstnReg = [np.array([1-UnempPrb,UnempPrb]), np.array([1.0,1.0]), np.array([1.0,0.0])]
IncomeDstn = StateCount*[IncomeDstnReg] # Same simple income distribution in each state
# Make the state transition array for this type: Persistence probability of remaining in the same state, equiprobable otherwise
MrkvArray = Persistence*np.eye(StateCount) + (1.0/StateCount)*(1.0-Persistence)*np.ones((StateCount,StateCount))
init_serial_growth = copy(Params.init_idiosyncratic_shocks)
init_serial_growth['MrkvArray'] = MrkvArray
SerialGroExample = MarkovConsumerType(**init_serial_growth)
SerialGroExample.assignParameters(Rfree = np.array(np.array(StateCount*[1.03])), # Same interest factor in each Markov state
PermGroFac = [np.array([0.97,0.99,1.01,1.03,1.05])], # Different permanent growth factor in each Markov state
LivPrb = [np.array(StateCount*[0.98])], # Same survival probability in all states
cycles = 0)
SerialGroExample.IncomeDstn = [IncomeDstn]
# Solve the serially correlated permanent growth shock problem and display the consumption functions
start_time = clock()
SerialGroExample.solve()
end_time = clock()
print('Solving a serially correlated growth consumer took ' + mystr(end_time-start_time) + ' seconds.')
print('Consumption functions for each discrete state:')
plotFuncs(SerialGroExample.solution[0].cFunc,0,10)
###############################################################################
# Make a consumer with serially correlated interest factors
SerialRExample = deepcopy(SerialGroExample) # Same as the last problem...
SerialRExample.assignParameters(PermGroFac = [np.array(StateCount*[1.01])], # ...but now the permanent growth factor is constant...
Rfree = np.array([1.01,1.02,1.03,1.04,1.05])) # ...and the interest factor is what varies across states
# Solve the serially correlated interest rate problem and display the consumption functions
start_time = clock()
SerialRExample.solve()
end_time = clock()
print('Solving a serially correlated interest consumer took ' + mystr(end_time-start_time) + ' seconds.')
print('Consumption functions for each discrete state:')
plotFuncs(SerialRExample.solution[0].cFunc,0,10)
| {
"content_hash": "6f9db1cb974d936eb0a5d9afc73b1599",
"timestamp": "",
"source": "github",
"line_count": 1122,
"max_line_length": 171,
"avg_line_length": 47.889483065953655,
"alnum_prop": 0.6216779572694111,
"repo_name": "bolmez/Class-HARK",
"id": "9b8de5cf83b089a96399657f2e804e13962a4b43",
"size": "53732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ConsumptionSaving/ConsMarkovModel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "855355"
}
],
"symlink_target": ""
} |
import ast
import csv
import logging
import math
import os
from nose_parameterized import parameterized
import numpy
import SimpleITK as sitk
import six
from radiomics import featureextractor, getTestCase, imageoperations
# Get the logger. This is done outside the class, as it is needed by both the class and the custom_name_func
logger = logging.getLogger('radiomics.testing')
def custom_name_func(testcase_func, param_num, param):
"""
A custom test name function that will ensure that the tests are run such that they're batched with all tests for a
given data set are run together, avoiding re-reading the data more than necessary. Tests are run in alphabetical
order, so put the test case first. An alternate option is to right justify the test number (param_num) with zeroes
so that the numerical and alphabetical orders are the same. Not providing this method when there are more than 10
tests results in tests running in an order similar to:
test_*.test_scenario_0_*
test_*.test_scenario_10_*
test_*.test_scenario_11_*
...
test_*.test_scenario_19_*
test_*.test_scenario_1_*
test_*.test_scenario_20_*
"""
global logger
logger.debug('custom_name_func: function name = %s, param_num = {0:0>3}, param.args = %s'.format(param_num),
testcase_func.__name__, param.args)
return str("%s_%s" % (
testcase_func.__name__,
parameterized.to_safe_name("_".join(str(x) for x in param.args)),
))
class RadiomicsTestUtils:
"""
This utility class reads in and stores the baseline files stored in 'data/baseline' (one per feature class)
It provides utility methods to get the baseline feature value for a feature class and compare it to the result
generated by the test.
"""
def __init__(self):
self._logger = logging.getLogger('radiomics.testing.utils')
self._logger.debug('RadiomicsTestUtils')
# the image and mask volumes
self._image = None
self._mask = None
self._current_image = None
self._current_mask = None
self._bb = None
self._imageType = None
# set up file paths
self._dataDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "data")
self._baselineDir = os.path.join(self._dataDir, 'baseline')
self._tests = set()
self._test = None # Test, specifies an image and mask and some configuration (settings)
self._testCase = None # Test image and mask to use in configured test
self._testedSet = set()
self._baseline = {}
self.readBaselineFiles()
self._current_config = {}
self._featureClassName = None
self._results = {}
self._diffs = {}
for test in self.getTests():
self._results[test] = {}
self._diffs[test] = {}
def readBaselineFiles(self):
"""
Reads the 'baseline' folder contained in dataDir. All files starting with 'baseline_' are read as baseline files.
These files should therefore be named as follows: 'baseline_<className>.csv'.
"""
baselineFiles = [fileName for fileName in os.listdir(self._baselineDir)
if os.path.isfile(os.path.join(self._baselineDir, fileName)) and fileName.startswith('baseline_')]
assert len(baselineFiles) > 0
for baselineFile in baselineFiles:
newBaseline = PyRadiomicsBaseline.readBaselineFile(os.path.join(self._baselineDir, baselineFile))
cls = newBaseline.cls
self._logger.debug('Read baseline for class %s', cls)
self._baseline[cls] = newBaseline
self._tests |= newBaseline.tests
def getTests(self):
"""
Return all the tests for which there are baseline information.
"""
return self._tests
def getFeatureNames(self, className, test):
"""
Gets all features for which a baseline value is available for the current class and test case. Returns a list
containing the feature names (without image type and feature class specifiers, i.e. just the feature name).
"""
if className not in self._baseline:
raise AssertionError('No baseline available for class %s.' % className)
return self._baseline[className].getTestFeatures(test)
def setFeatureClassAndTestCase(self, className, test):
"""
Set testing suite to specified testCase and feature class. Throws an assertion error if either class or test case
are not recognized. These have to be set here together, as the settings with which the test case has to be loaded
are defined per feature class in the baseline (extracted from provenance information).
Only (re)loads an image/mask if the test case has changed, or the change of feature class causes a change in test
settings.
If feature class and test case are unchanged, nothing is reloaded and function returns False. If either feature
class or test case is changed, function returns True.
"""
if self._featureClassName == className and self._test == test:
return False
self._test = test
self._testedSet.add(self._test)
# First set featureClass if necessary, because if settings have changed, testCase needs te be reloaded
if self._featureClassName != className:
self._logger.debug('Setting feature class name to %s', className)
assert className in self._baseline.keys() # Check if a baseline has been read for this class
self._featureClassName = className
# Check if test settings have changed
if self._current_config != self._baseline[className].getTestConfig(test):
self._current_config = self._baseline[className].getTestConfig(test)
self._testCase = None # forces image to be reloaded (as settings have changed)
# Next, set testCase if necessary
if self._testCase != self._current_config['TestCase']:
self._testCase = self._current_config['TestCase']
self._logger.info("Reading the image and mask for test case %s", self._testCase)
imageName, maskName = getTestCase(self._testCase) # Throws ValueError if test case is not recognized
assert imageName is not None
assert maskName is not None
self._image = sitk.ReadImage(imageName)
self._mask = sitk.ReadImage(maskName, sitk.sitkUInt32)
if 'ImageHash' in self._current_config:
assert sitk.Hash(self._image) == self._current_config['ImageHash']
if 'MaskHash' in self._current_config:
assert sitk.Hash(self._mask) == self._current_config['MaskHash']
imageTypes = self._current_config.get('EnabledImageTypes', {'Original': {}})
settings = self._current_config.get('Settings', {})
extractor = featureextractor.RadiomicsFeatureExtractor({'imageType': imageTypes, 'setting': settings})
self._image, self._mask = extractor.loadImage(self._image, self._mask, **settings)
assert self._image is not None
assert self._mask is not None
self._bb, correctedMask = imageoperations.checkMask(self._image, self._mask, **settings)
if correctedMask is not None:
self._mask = correctedMask
assert self._bb is not None
resegmentRange = settings.get('resegmentRange', None)
if resegmentRange is not None:
resegmentedMask = imageoperations.resegmentMask(self._image, self._mask, **settings)
# Recheck to see if the mask is still valid
self._bb, correctedMask = imageoperations.checkMask(self._image, resegmentedMask, **settings)
# Update the mask if it had to be resampled
if correctedMask is not None:
resegmentedMask = correctedMask
assert self._bb is not None
# Resegmentation successful
self._mask = resegmentedMask
self._imageType = None
return True
def getImage(self, imageType):
if self._imageType != imageType:
self._applyFilter(imageType)
return self._current_image
def getMask(self, imageType):
if self._imageType != imageType:
self._applyFilter(imageType)
return self._current_mask
def _applyFilter(self, imageType):
if imageType == 'original':
self._current_image, self._current_mask = imageoperations.cropToTumorMask(self._image, self._mask, self._bb)
else:
raise NotImplementedError()
self._imageType = imageType
def getSettings(self):
return self._current_config.get('Settings', {})
def checkResult(self, featureName, value):
"""
Use utility methods to get and test the results against the expected baseline value for this key.
"""
longName = '_'.join(featureName)
if value is None:
self._diffs[self._test][longName] = None
self._results[self._test][longName] = None
assert (value is not None)
if math.isnan(value):
self._diffs[self._test][longName] = numpy.nan
self._results[self._test][longName] = numpy.nan
assert (not math.isnan(value))
# save the result using the baseline class and feature names
self._logger.debug('checkResults: featureName = %s', featureName)
self._results[self._test][longName] = value
baselineValue = self._baseline[self._featureClassName].getBaselineValue(self._test, longName)
assert baselineValue is not None
baselineValue = float(baselineValue)
self._logger.debug('checkResults: for featureName %s, got baseline value = %f', featureName, baselineValue)
if baselineValue == 0.0:
# avoid divide by zero, the difference is either 0% if the value is also zero, or 100%
if value - baselineValue == 0.0:
percentDiff = 0.0
else:
percentDiff = 1.0
else:
percentDiff = abs(1.0 - (value / baselineValue))
# save the difference
self._diffs[self._test][longName] = percentDiff
# check for a less than three percent difference
if percentDiff >= 0.03:
self._logger.error('checkResult %s, baseline value = %f, calculated = %f, diff = %f%%', featureName,
float(baselineValue), value, percentDiff * 100)
assert (percentDiff < 0.03)
def getResults(self):
return self._results
def getDiffs(self):
return self._diffs
def getDataDir(self):
return self._dataDir
def writeCSV(self, data, fileName):
"""
Write out data in a csv file.
Assumes a data structure with:
{'id1' : {'f1':n1, 'f2':n2}, 'id2' : {'f1':n3, 'f2':n4}}
"""
# Get the headers from the first testCase in _testedSet
# If no tests were run, the length of _testedSet will be 0, and no files should be written
if len(self._testedSet) > 0:
with open(fileName, 'w') as csvFile:
csvFileWriter = csv.writer(csvFile, lineterminator='\n')
testedCases = sorted(self._testedSet)
header = sorted(data[testedCases[0]].keys())
header = ['testCase'] + header
csvFileWriter.writerow(header)
for testCase in testedCases:
thisCase = data[testCase]
thisCase['testCase'] = testCase
row = []
for h in header:
row = row + [thisCase.get(h, "N/A")]
csvFileWriter.writerow(row)
self._logger.info('Wrote to file %s', fileName)
else:
self._logger.info('No test cases run, aborting file write to %s', fileName)
def addTest(self, case, configuration, baselines, force=False):
self._results[case] = {}
self._diffs[case] = {}
for featureClass in baselines:
if featureClass not in self._baseline:
self._logger.warning('Feature class %s does not yet have a baseline, creating a new one', featureClass)
self._baseline[featureClass] = PyRadiomicsBaseline(featureClass)
if self._baseline[featureClass].addTest(case, configuration, baselines[featureClass], force):
self._baseline[featureClass].writeBaselineFile(self._baselineDir)
class PyRadiomicsBaseline:
def __init__(self, featureClassName):
self.logger = logging.getLogger('radiomics.testing.baseline')
self.cls = featureClassName
self.configuration = {}
self.baseline = {}
self.tests = set()
self._configKeys = []
self._baselineKeys = []
@classmethod
def readBaselineFile(cls, baselineFile):
featureClassName = os.path.basename(baselineFile)[9:-4]
new_baseline = cls(featureClassName)
new_baseline.logger.debug('Reading baseline for class %s', new_baseline.cls)
with open(baselineFile, 'r' if six.PY3 else 'rb') as baselineReader:
csvReader = csv.reader(baselineReader)
tests = six.next(csvReader)[1:]
for case in tests:
new_baseline.configuration[case] = {}
new_baseline.baseline[case] = {}
for testRow in csvReader:
if 'diagnostics' in testRow[0]:
new_baseline._configKeys.append(testRow[0])
else:
new_baseline._baselineKeys.append(testRow[0])
for case_idx, case in enumerate(tests, start=1):
if 'diagnostics' in testRow[0]:
new_baseline.configuration[case][testRow[0]] = testRow[case_idx]
else:
new_baseline.baseline[case][testRow[0]] = testRow[case_idx]
new_baseline.tests = set(tests)
return new_baseline
def addTest(self, case, configuration, baseline, force=False):
if case not in self.tests:
self.tests.add(case)
elif not force:
self.logger.warning('Test %s already present in the baseline for class %s, skipping addTest', case, self.cls)
return False
self.configuration[case] = configuration
self.baseline[case] = baseline
return True
def getTestConfig(self, test):
if test not in self.configuration:
return {} # This test is not present in the baseline for this class
config = {
'TestCase': self.configuration[test].get('diagnostics_Configuration_TestCase', None),
'Settings': ast.literal_eval(self.configuration[test].get('diagnostics_Configuration_Settings', '{}')),
'EnabledImageTypes': ast.literal_eval(self.configuration[test].get('diagnostics_Configuration_EnabledImageTypes',
'{}'))
}
# ensure resegmentation is disable for shape class
if self.cls == 'shape' and 'resegmentRange' in config['Settings']:
config['Settings']['resegmentRange'] = None
if config['TestCase'] is None:
self.logger.error('Missing key "diagnostics_Configuration_TestCase". Cannot configure!')
return None
if 'diagnostics_Image-original_Hash' in self.configuration[test]:
config['ImageHash'] = self.configuration[test]['diagnostics_Image-original_Hash']
if 'diagnostics_Mask-original_Hash' in self.configuration[test]:
config['MaskHash'] = self.configuration[test]['diagnostics_Mask-original_Hash']
return config
def getTestFeatures(self, test):
"""
Gets all features for which a baseline value is available for the current class and test case. Returns a list
containing the feature names.
"""
if test not in self.baseline:
return None # This test is not present in the baseline for this class
return list(self.baseline[test].keys())
def getBaselineValue(self, test, featureName):
if test not in self.baseline:
return None
return self.baseline[test].get(featureName, None)
def writeBaselineFile(self, baselineDir):
baselineFile = os.path.join(baselineDir, 'baseline_%s.csv' % self.cls)
cases = sorted(self.tests)
with open(baselineFile, 'w') as baseline:
csvWriter = csv.writer(baseline, lineterminator='\n')
header = ['featureName'] + cases
csvWriter.writerow(header)
config = self.configuration[cases[0]].keys()
self._configKeys += list(set(config) - set(self._configKeys))
for c in self._configKeys:
if c not in config:
continue
row = [c]
for testCase in cases:
row.append(str(self.configuration[testCase].get(c, '')))
csvWriter.writerow(row)
features = self.baseline[cases[0]].keys()
self._baselineKeys += list(set(features) - set(self._baselineKeys))
for f in self._baselineKeys:
if f not in features:
continue
row = [f]
for testCase in cases:
row.append(str(self.baseline[testCase].get(f, '')))
csvWriter.writerow(row)
| {
"content_hash": "984cb917a3e9a7bde9bc8fe2c82bd511",
"timestamp": "",
"source": "github",
"line_count": 439,
"max_line_length": 119,
"avg_line_length": 36.78132118451025,
"alnum_prop": 0.6734997213104601,
"repo_name": "Radiomics/pyradiomics",
"id": "3b8857822b4e2a3d50de624bf9b9d801b19ee9d3",
"size": "16148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/testUtils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "354"
},
{
"name": "C",
"bytes": "101073"
},
{
"name": "C++",
"bytes": "1295"
},
{
"name": "Dockerfile",
"bytes": "7637"
},
{
"name": "Jupyter Notebook",
"bytes": "4347504"
},
{
"name": "Python",
"bytes": "455615"
},
{
"name": "Shell",
"bytes": "600"
}
],
"symlink_target": ""
} |
import vx
from vx.window import window
from .undo import undo_tree, addition, removal
import vx.movement as move
import vx.text as text
from .keybindings import ctrl, keys
from vx.copystack import CopyStack
import re
from contextlib import contextmanager
class buffer(window):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.undo_tree = undo_tree(self)
def print_undo():
from .pointer import panes
p = panes.focused.split()
p.buffer.blank()
p.buffer.add_string(self.undo_tree.stringify())
p.buffer.unfocus()
p.buffer.keybinding_table = vx.undo.load(p.buffer, attached_to=self)
p.buffer.focus()
self.keybinding_table.bind(ctrl + keys.y, print_undo)
self.copystack = CopyStack()
self.last_seeked_column = 1
def redo(self):
self.undo_tree.redo()
def undo(self):
self.undo_tree.undo()
def save(self):
super().save()
self.undo_tree.mark_save_point()
@contextmanager
def cursor_wander(self, command=None):
y, x = self.cursor
if command is not None:
command()
yp, xp = self.cursor
yield (yp, xp)
self.cursor = (y, x)
@contextmanager
def column_restore(self, column=None):
if not column:
column = self.last_seeked_column
yield
with self.cursor_wander():
move.eol()
_, end = self.cursor
self.cursor = (self.cursor[0], min(end, column))
@contextmanager
def column_save(self):
yield
self.last_seeked_column = self.cursor[1]
@contextmanager
def cursor_jail(self, la, ca, lb, cb):
old_cursor = getattr(buffer, 'cursor')
def _set_cursor(inst, linecol):
if self is inst:
line, col = linecol
if line < la: return
if line > lb: return
if line == la and col < ca: return
if line == lb and col > cb: return
return vx.set_linecol_window(self, line, col)
def _get_cursor(self):
return vx.get_linecol_window(self)
setattr(buffer, 'cursor', property(_get_cursor, _set_cursor))
yield
setattr(buffer, 'cursor', old_cursor)
def backspace(self, track=True):
if track:
self.dirty = True
l, c = self.cursor
lb, cb = l, c
if l > 1 or c > 1:
c = c - 1
if c == 0:
l -= 1
move.up()
move.eol()
_, c = self.cursor
move.down()
move.bol()
ch = vx.get_ch_linecol_window(self, l, c)
if ch == '\t':
c -= 7
self.undo_tree.add(removal(ch, l, c, hold=False, box=(l, c, lb, cb)))
super().backspace()
def delete(self, track=True):
if track:
self.dirty = True
l, c = self.cursor
move.right()
lb, cb = self.cursor
move.left()
ch = vx.get_ch_linecol_window(self, l, c)
self.undo_tree.add(removal(ch, l, c, hold=True, box=(l, c, lb, cb)))
super().delete()
def add_string(self, s, track=True):
if track:
la, ca = self.cursor
self.dirty = True
super().add_string(s)
if track:
lb, cb = self.cursor
self.undo_tree.add(addition(s, lb, cb, (la, ca, lb, cb)))
def remove_text(self, from_line, from_col, to_line, to_col):
"""Removes all the text between from_line/from_col to to_line/to_col"""
with self.cursor_wander():
self.cursor = (to_line, to_col)
line, col = self.cursor
while line != from_line or col != from_col:
self.backspace(track=False)
line, col = self.cursor
def get_text_lines(self):
return len(self.contents.split('\n'))
def get_linecoloffset_of(self, to_find, contents=None, start=0, forward=True):
if forward:
contents = contents or self.get_contents_from_cursor()
index = contents.find(to_find, start)
lines, columns = text.get_linecol_offset(contents[:index])
l, c = self.cursor
columns = c + columns if lines == 0 else columns + 1
lines = l + lines
return lines, columns, index
else:
raise Exception('not implemented')
def get_all_linecoloffsets_of(self, to_find, forward=True):
if not forward:
raise Exception('not implemented')
contents = self.get_contents_from_cursor()
start = 0
while True:
line, col, start = self.get_linecoloffset_of(to_find, contents, start, forward)
if start == -1: break
yield (line, col, start)
start += 1
def get_regex_linecoloffsetlength_of(self, regex, contents=None, start=0, forward=True):
if forward:
contents = contents or self.get_contents_from_cursor()
try:
rgx = re.compile(regex, re.MULTILINE)
except:
return (0, 0, -1, 0)
match = rgx.search(contents, start)
if match is None:
return (0, 0, -1, 0)
index = match.start()
lines, columns = text.get_linecol_offset(contents[:index])
l, c = self.cursor
columns = c + columns if lines == 0 else columns + 1
lines = l + lines
length = match.end() - match.start()
return lines, columns, index, length
else:
contents = contents or self.get_contents_before_cursor()
try:
rgx = re.compile(regex, re.MULTILINE)
except:
return (0, 0, -1, 0)
match = None
for match in rgx.finditer(contents, endpos=len(contents)-start):
pass
if match is None:
return (0, 0, -1, 0)
index = match.start()
lines, columns = text.get_linecol_offset(contents[index:], forward=False)
back = index - contents.rfind('\n', 0, index) - 1
if back == -1: back = 0
columns = back+1
tabs = contents[contents.rfind('\n', 0, index):index].count('\t')
columns += 7*tabs
ol, oc = lines, columns
l, c = self.cursor
lines = l - lines
length = match.end() - match.start()
return lines, columns, index, length
def get_all_regex_linecoloffsetlengths_of(self, regex, forward=True):
if forward:
contents = self.get_contents_from_cursor()
start = 0
while True:
line, col, start, length = self.get_regex_linecoloffsetlength_of(regex, contents, start, forward)
if start == -1: break
yield (line, col, start, length)
start += 1
else:
contents = self.get_contents_before_cursor()
start = 0
while True:
line, col, start, length = self.get_regex_linecoloffsetlength_of(regex, contents, start, forward)
if start == -1:
break
yield (line, col, start, length)
start = len(contents) - start + 1
| {
"content_hash": "205c02e071351e26e04de3958e65c093",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 113,
"avg_line_length": 34.967592592592595,
"alnum_prop": 0.5180722891566265,
"repo_name": "philipdexter/vx",
"id": "9e844a4b6d3e0e124f16feb04e69f550b621b64d",
"size": "7553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vx/buffer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "43085"
},
{
"name": "Makefile",
"bytes": "812"
},
{
"name": "Python",
"bytes": "99474"
}
],
"symlink_target": ""
} |
from openerp import models, fields, api
class Wizard(models.TransientModel):
_name = 'openacademy.wizard'
def _default_sessions(self):
return self.env['openacademy.session'].browse(
self._context.get('active_ids'))
session_ids = fields.Many2many(
'openacademy.session',
string="Sessions",
required=True,
default=_default_sessions)
attendee_ids = fields.Many2many('res.partner', string="Attendees")
@api.multi
def subscribe(self):
for session in self.session_ids:
session.attendee_ids |= self.attendee_ids
return {}
| {
"content_hash": "4a7afbdfb01ede9ee7a433885878b079",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 70,
"avg_line_length": 28.363636363636363,
"alnum_prop": 0.6362179487179487,
"repo_name": "umiphos/vauxooTechnical",
"id": "f97a76918409112673efaf8de98123cba4891aba",
"size": "648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openacademy/wizard/openacademy_wizard.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "93802"
}
],
"symlink_target": ""
} |
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-g_4rnw%a=v@*ntp%7+mej&sh(6zgi2a1im=_cf##=xx=lav_8#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'redisboard',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'test_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'debug': DEBUG,
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'caca.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': str(BASE_DIR / 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator'},
{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator'},
{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| {
"content_hash": "daf6abf9a7d3363348715ffc4f9f4533",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 89,
"avg_line_length": 29.333333333333332,
"alnum_prop": 0.6945592286501377,
"repo_name": "ionelmc/django-redisboard",
"id": "d4c852b7de80f8bda1328f4c57c643ebf581e137",
"size": "2904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_project/settings.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "1498"
},
{
"name": "HTML",
"bytes": "28774"
},
{
"name": "Python",
"bytes": "70269"
}
],
"symlink_target": ""
} |
import os
def _recursive_update(d, u):
"""
Yanked from ``http://stackoverflow.com/questions/3232943/``
Recursively update dictionary ``d`` with ``u``
"""
for k, v in u.iteritems():
if isinstance(v, dict):
r = _recursive_update(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
class SuperDict(dict):
def recursive_update(self, updated):
self = SuperDict(_recursive_update(self, updated))
def files_from_dir(directory):
files = []
for filename in os.listdir(directory):
fullname = os.path.join(os.path.abspath(directory), filename)
# resolve links
if os.path.islink(fullname):
fullname = os.readlink(fullname)
# skip non-files
if not os.path.isfile(fullname):
continue
files.append(fullname)
return files
| {
"content_hash": "1258bfc31f1debe9dfca0acfd7c49b58",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 69,
"avg_line_length": 26.78787878787879,
"alnum_prop": 0.5769230769230769,
"repo_name": "jcmcken/pallium",
"id": "b390ba2b214d24a99b925bbacdb448d361459464",
"size": "884",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pallium/util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "19376"
},
{
"name": "Shell",
"bytes": "92"
}
],
"symlink_target": ""
} |
"""Implementations of different data feeders to provide data for TF trainer."""
# Copyright 2015-present Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function, absolute_import
import itertools
import math
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
from sklearn.utils import check_array
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size):
"""Returns shape for input and output of the data feeder."""
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
y_shape = list(y_shape[1:]) if len(y_shape) > 1 else []
# Skip first dimention if it is 1.
if y_shape and y_shape[0] == 1:
y_shape = y_shape[1:]
if n_classes > 1:
output_shape = [batch_size] + y_shape + [n_classes]
else:
output_shape = [batch_size] + y_shape
return input_shape, output_shape
def _data_type_filter(X, y):
"""Filter data types into acceptable format"""
if HAS_DASK:
X = extract_dask_data(X)
y = extract_dask_labels(y)
if HAS_PANDAS:
X = extract_pandas_data(X)
y = extract_pandas_labels(y)
return X, y
def _is_iterable(X):
return hasattr(X, 'next') or hasattr(X, '__next__')
def setup_train_data_feeder(X, y, n_classes, batch_size):
"""Create data feeder, to sample inputs from dataset.
If X and y are iterators, use StreamingDataFeeder.
Args:
X: numpy, pandas or Dask matrix or iterable.
y: numpy, pandas or Dask array or iterable.
n_classes: number of classes.
batch_size: size to split data into parts.
Returns:
DataFeeder object that returns training data.
"""
X, y = _data_type_filter(X, y)
if HAS_DASK:
import dask.dataframe as dd
allowed_classes = (dd.Series, dd.DataFrame)
if isinstance(X, allowed_classes) and isinstance(y, allowed_classes):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(X):
if not _is_iterable(y):
raise ValueError("Both X and y should be iterators for "
"streaming learning to work.")
data_feeder_cls = StreamingDataFeeder
return data_feeder_cls(X, y, n_classes, batch_size)
def _batch_data(X, batch_size):
chunk = []
for data in X:
chunk.append(data)
if batch_size > 0 and len(chunk) >= batch_size:
yield np.matrix(chunk)
chunk = []
yield np.matrix(chunk)
def setup_predict_data_feeder(X, batch_size=-1):
"""Returns an iterable for feeding into predict step.
Args:
X: numpy, pandas, Dask array or iterable.
batch_size: Size of batches to split data into.
If negative, returns one batch of full size.
Returns:
List or iterator of parts of data to predict on.
"""
if HAS_DASK:
X = extract_dask_data(X)
if HAS_PANDAS:
X = extract_pandas_data(X)
if _is_iterable(X):
return _batch_data(X, batch_size)
if len(X.shape) == 1:
X = np.reshape(X, (-1, 1))
if batch_size > 0:
n_batches = int(math.ceil(float(len(X)) / batch_size))
return [X[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [X]
def setup_processor_data_feeder(X):
"""Sets up processor iterable.
Args:
X: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
X = extract_pandas_matrix(X)
return X
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer.
Parameters:
X: feature Nd numpy matrix of shape [n_samples, n_features, ...].
y: target vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence
of targets.
n_classes: number of classes, 0 and 1 are considered regression.
batch_size: mini batch size to accumulate.
random_state: numpy RandomState object to reproduce sampling.
Attributes:
X: input features.
y: input target.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
"""
def __init__(self, X, y, n_classes, batch_size, random_state=None):
x_dtype = np.int64 if X.dtype == np.int64 else np.float32
y_dtype = np.int64 if n_classes > 1 else np.float32
self.X = check_array(X, ensure_2d=False,
allow_nd=True, dtype=x_dtype)
self.y = check_array(y, ensure_2d=False, dtype=y_dtype)
self.n_classes = n_classes
self.batch_size = batch_size
self.input_shape, self.output_shape = _get_in_out_shape(
self.X.shape, self.y.shape, n_classes, batch_size)
# Input dtype matches dtype of X.
self.input_dtype = self.X.dtype
# Output dtype always float32 (because for classification we use
# one-hot vectors.
self.output_dtype = np.float32
self.random_state = np.random.RandomState(42) if random_state is None else random_state
self.indices = self.random_state.permutation(self.X.shape[0])
self.offset = 0
self.epoch = 0
def get_feed_params(self):
"""Function returns a dict with data feed params while training.
Returns:
A dict with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self.batch_size
}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to given
placeholders.
Args:
input_placeholder: tf.Placeholder for input features mini batch.
output_placeholder: tf.Placeholder for output targets.
Returns:
A function that when called samples a random subset of batch size
from X and y.
"""
def _feed_dict_fn():
# take random indices
batch_indices = self.indices[self.offset: self.offset+self.batch_size]
# assign input features from random indices
inp = np.array(self.X[batch_indices]).reshape((batch_indices.shape[0], 1)) \
if len(self.X.shape) == 1 else self.X[batch_indices]
# assign labels from random indices
self.output_shape[0] = batch_indices.shape[0]
out = np.zeros(self.output_shape, dtype=self.output_dtype)
for i in xrange(out.shape[0]):
sample = batch_indices[i]
if self.n_classes > 1:
if len(self.output_shape) == 2:
out.itemset((i, self.y[sample]), 1.0)
else:
for idx, value in enumerate(self.y[sample]):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = self.y[sample]
# move offset and reset it if necessary
self.offset += self.batch_size
if self.offset >= self.X.shape[0]:
self.indices = self.random_state.permutation(self.X.shape[0])
self.offset = 0
self.epoch += 1
return {input_placeholder.name: inp, output_placeholder.name: out}
return _feed_dict_fn
class StreamingDataFeeder(object):
"""Data feeder for TF trainer that reads data from iterator.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
Parameters:
X: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the target has.
batch_size: Mini batch size to accumulate.
Attributes:
X: input features.
y: input target.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
"""
def __init__(self, X, y, n_classes, batch_size):
X_first_el = six.next(X)
y_first_el = six.next(y)
self.X = itertools.chain([X_first_el], X)
self.y = itertools.chain([y_first_el], y)
self.n_classes = n_classes
self.batch_size = batch_size
self.input_shape, self.output_shape = _get_in_out_shape(
[1] + list(X_first_el.shape),
[1] + list(y_first_el.shape), n_classes, batch_size)
self.input_dtype = X_first_el.dtype
# Convert float64 to float32, as all the parameters in the model are
# floats32 and there is a lot of benefits in using it in NNs.
if self.input_dtype == np.float64:
self.input_dtype = np.float32
# Output types are floats, due to both softmaxes and regression req.
self.output_dtype = np.float32
def get_feed_params(self):
"""Function returns a dict with data feed params while training.
Returns:
A dict with data feed params while training.
"""
return {'batch_size': self.batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to given
placeholders.
Args:
input_placeholder: tf.Placeholder for input features mini batch.
output_placeholder: tf.Placeholder for output targets.
Returns:
A function that when called samples a random subset of batch size
from X and y.
"""
def _feed_dict_fn():
inp = np.zeros(self.input_shape, dtype=self.input_dtype)
out = np.zeros(self.output_shape, dtype=self.output_dtype)
for i in xrange(self.batch_size):
inp[i, :] = six.next(self.X)
y = six.next(self.y)
if self.n_classes > 1:
if len(self.output_shape) == 2:
out.itemset((i, y), 1.0)
else:
for idx, value in enumerate(y):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = y
return {input_placeholder.name: inp, output_placeholder.name: out}
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for TF trainer that reads data from dask.Series and dask.DataFrame.
Numpy arrays can be serialized to disk and it's possible to do random seeks into them.
DaskDataFeeder will remove requirement to have full dataset in the memory and still do
random seeks for sampling of batches.
Parameters:
X: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the target has.
batch_size: Mini batch size to accumulate.
random_state: random state for RNG. Note that it will mutate so use a int value
for this if you want consistent sized batches.
Attributes:
X: input features.
y: input target.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
"""
def __init__(self, X, y, n_classes, batch_size, random_state=None):
import dask.dataframe as dd
# TODO: check X and y dtypes in dask_io like pandas
self.X = X
self.y = y
# save column names
self.X_columns = list(X.columns)
if isinstance(y.columns[0], str):
self.y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self.y_columns = len(self.X_columns)+1
self.y = self.y.rename(columns={y.columns[0]: self.y_columns})
# combine into a data frame
self.df = dd.multi.concat([self.X, self.y], axis=1)
self.n_classes = n_classes
X_count = X.count().compute()[0]
X_shape = (X_count, len(self.X.columns))
y_shape = (X_count, len(self.y.columns))
self.sample_fraction = batch_size/float(X_count)
self.input_shape, self.output_shape = _get_in_out_shape(
X_shape, y_shape, n_classes, batch_size)
# self.X.dtypes[0], self.y.dtypes[self.y_columns]
self.input_dtype, self.output_dtype = np.float32, np.float32
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
self.batch_size = batch_size
def get_feed_params(self):
"""Function returns a dict with data feed params while training.
Returns:
A dict with data feed params while training.
"""
return {'batch_size': self.batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to given
placeholders.
Args:
input_placeholder: tf.Placeholder for input features mini batch.
output_placeholder: tf.Placeholder for output targets.
Returns:
A function that when called samples a random subset of batch size
from X and y.
"""
def _feed_dict_fn():
# TODO: option for with/without replacement (dev version of dask)
sample = self.df.random_split([self.sample_fraction, 1-self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self.X_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self.y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self.input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self.y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max+1), dtype=self.output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp, output_placeholder.name: encoded_out}
return _feed_dict_fn
| {
"content_hash": "eab75bf8018db90ba48c8f79e3e2e048",
"timestamp": "",
"source": "github",
"line_count": 413,
"max_line_length": 100,
"avg_line_length": 38.64164648910412,
"alnum_prop": 0.6046744783507738,
"repo_name": "panmari/tensorflow",
"id": "3a33dd7a70b456a756cf46f4de2c604dd95f8358",
"size": "15959",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/skflow/python/skflow/io/data_feeder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "153226"
},
{
"name": "C++",
"bytes": "7360924"
},
{
"name": "CMake",
"bytes": "29325"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "HTML",
"bytes": "683163"
},
{
"name": "Java",
"bytes": "50361"
},
{
"name": "JavaScript",
"bytes": "7188"
},
{
"name": "Jupyter Notebook",
"bytes": "1771416"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "102168"
},
{
"name": "Python",
"bytes": "4526515"
},
{
"name": "Shell",
"bytes": "117381"
},
{
"name": "TypeScript",
"bytes": "340911"
}
],
"symlink_target": ""
} |
exec(open('functional/tests/_standalone_rst_defaults.py').read())
# Source and destination file names.
test_source = "standalone_rst_latex.txt"
test_destination = "standalone_rst_latex.tex"
# Keyword parameters passed to publish_file.
writer_name = "latex"
# Settings
# use "smartquotes" transition:
settings_overrides['smart_quotes'] = True
| {
"content_hash": "8b8430eb380060d6ce7a3cc7f151867f",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 65,
"avg_line_length": 28.75,
"alnum_prop": 0.7565217391304347,
"repo_name": "kawamon/hue",
"id": "0809fcc939566ddfeba6e96da8eee72e955cad50",
"size": "345",
"binary": false,
"copies": "18",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/docutils-0.14/test/functional/tests/standalone_rst_latex.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "5786"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "118907"
},
{
"name": "C",
"bytes": "3196521"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "308860"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "1050129"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "10981"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "7312"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "24999718"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "471854"
},
{
"name": "JavaScript",
"bytes": "28075556"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "Jupyter Notebook",
"bytes": "73168"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Lex",
"bytes": "264449"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "269655"
},
{
"name": "Mako",
"bytes": "3614942"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "76440000"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "95764"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "190718"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TSQL",
"bytes": "10013"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "317058"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "2133855"
}
],
"symlink_target": ""
} |
from collections import defaultdict
from datetime import datetime
import logging
import pytz
from website.settings import KEEN as keen_settings
from keen.client import KeenClient
logger = logging.getLogger(__name__)
class DailyReporter:
def report(self, report_date):
"""build reports for the given date
return an iterable of DailyReport (unsaved)
"""
raise NotImplementedError(f'{self.__name__} must implement `report`')
def keen_events_from_report(self, report):
"""given one of this reporter's own reports, build equivalent keen events
(for back-compat; to be deleted once we don't need keen anymore)
return a mapping from keen collection name to iterable of events
e.g. {'my_keen_collection': [event1, event2, ...]}
"""
raise NotImplementedError(f'{self.__name__} should probably implement keen_events_from_report')
def run_and_record_for_date(self, report_date, *, also_send_to_keen=False):
reports = self.report(report_date)
# expecting each reporter to spit out only a handful of reports per day;
# not bothering with bulk-create
for report in reports:
report.save()
if also_send_to_keen:
self.send_to_keen(reports)
def send_to_keen(self, reports):
keen_project = keen_settings['private']['project_id']
write_key = keen_settings['private']['write_key']
if not (keen_project and write_key):
logger.warning(f'keen not configured; not sending events for {self.__class__.__name__}')
return
keen_events_by_collection = defaultdict(list)
for report in reports:
keen_event_timestamp = datetime(
report.report_date.year,
report.report_date.month,
report.report_date.day,
tzinfo=pytz.utc,
)
for collection_name, keen_events in self.keen_events_from_report(report).items():
for event in keen_events:
event['keen'] = {'timestamp': keen_event_timestamp.isoformat()}
keen_events_by_collection[collection_name].extend(keen_events)
client = KeenClient(
project_id=keen_project,
write_key=write_key,
)
client.add_events(keen_events_by_collection)
| {
"content_hash": "93ac65658222c33219c666b50054a9f6",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 103,
"avg_line_length": 36.015151515151516,
"alnum_prop": 0.6243163651661758,
"repo_name": "cslzchen/osf.io",
"id": "e37a1ee1ac9b972c1f8e0769d412547c34da3e9c",
"size": "2377",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "osf/metrics/reporters/_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "93635"
},
{
"name": "Dockerfile",
"bytes": "5876"
},
{
"name": "HTML",
"bytes": "373738"
},
{
"name": "JavaScript",
"bytes": "1596130"
},
{
"name": "Jupyter Notebook",
"bytes": "41326"
},
{
"name": "Mako",
"bytes": "679193"
},
{
"name": "Python",
"bytes": "11612029"
},
{
"name": "Shell",
"bytes": "2841"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
} |
import six
import yaml
from django import template
from django.template import loader
from django.test.utils import override_settings
from django.urls import reverse
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse('horizon:project:api_access:index')
API_URL = "horizon:project:api_access"
EC2_URL = reverse(API_URL + ":ec2")
OPENRC_URL = reverse(API_URL + ":openrc")
OPENRCV2_URL = reverse(API_URL + ":openrcv2")
CREDS_URL = reverse(API_URL + ":view_credentials")
RECREATE_CREDS_URL = reverse(API_URL + ":recreate_credentials")
class APIAccessTests(test.TestCase):
@test.create_mocks({api.keystone: ('create_ec2_credentials',
'list_ec2_credentials')})
def test_ec2_download_view(self):
creds = self.ec2.first()
self.mock_list_ec2_credentials.return_value = []
self.mock_create_ec2_credentials.return_value = creds
res = self.client.get(EC2_URL)
self.assertEqual(res.status_code, 200)
self.assertEqual(res['content-type'], 'application/zip')
self.mock_list_ec2_credentials.assert_called_once_with(
test.IsHttpRequest(), self.user.id)
self.mock_create_ec2_credentials.assert_called_once_with(
test.IsHttpRequest(), self.user.id, self.tenant.id)
def test_openrcv2_credentials(self):
res = self.client.get(OPENRCV2_URL)
self.assertEqual(res.status_code, 200)
openrc = 'project/api_access/openrc_v2.sh.template'
self.assertTemplateUsed(res, openrc)
name = 'export OS_USERNAME="{}"'.format(self.request.user.username)
t_id = 'export OS_TENANT_ID={}'.format(self.request.user.tenant_id)
domain = 'export OS_USER_DOMAIN_NAME="{}"'.format(
self.request.user.user_domain_name)
self.assertIn(name.encode('utf-8'), res.content)
self.assertIn(t_id.encode('utf-8'), res.content)
# domain content should not be present for v2
self.assertNotIn(domain.encode('utf-8'), res.content)
@override_settings(OPENSTACK_API_VERSIONS={"identity": 3})
def test_openrc_credentials(self):
res = self.client.get(OPENRC_URL)
self.assertEqual(res.status_code, 200)
openrc = 'project/api_access/openrc.sh.template'
self.assertTemplateUsed(res, openrc)
name = 'export OS_USERNAME="{}"'.format(self.request.user.username)
p_id = 'export OS_PROJECT_ID={}'.format(self.request.user.tenant_id)
domain = 'export OS_USER_DOMAIN_NAME="{}"'.format(
self.request.user.user_domain_name)
self.assertIn(name.encode('utf-8'), res.content)
self.assertIn(p_id.encode('utf-8'), res.content)
self.assertIn(domain.encode('utf-8'), res.content)
@test.create_mocks({api.keystone: ('list_ec2_credentials',)})
def test_credential_api(self):
certs = self.ec2.list()
self.mock_list_ec2_credentials.return_value = certs
res = self.client.get(CREDS_URL)
self.assertEqual(res.status_code, 200)
credentials = 'project/api_access/credentials.html'
self.assertTemplateUsed(res, credentials)
self.assertEqual(self.user.id, res.context['openrc_creds']['user'].id)
self.assertEqual(certs[0].access,
res.context['ec2_creds']['ec2_access_key'])
self.mock_list_ec2_credentials.assert_called_once_with(
test.IsHttpRequest(), self.user.id)
@test.create_mocks({api.keystone: ('create_ec2_credentials',
'list_ec2_credentials',
'delete_user_ec2_credentials')})
def _test_recreate_user_credentials(self, exists_credentials=True):
old_creds = self.ec2.list() if exists_credentials else []
new_creds = self.ec2.first()
self.mock_list_ec2_credentials.return_value = old_creds
if exists_credentials:
self.mock_delete_user_ec2_credentials.return_value = []
self.mock_create_ec2_credentials.return_value = new_creds
res_get = self.client.get(RECREATE_CREDS_URL)
self.assertEqual(res_get.status_code, 200)
credentials = \
'project/api_access/recreate_credentials.html'
self.assertTemplateUsed(res_get, credentials)
res_post = self.client.post(RECREATE_CREDS_URL)
self.assertNoFormErrors(res_post)
self.assertRedirectsNoFollow(res_post, INDEX_URL)
self.mock_list_ec2_credentials.assert_called_once_with(
test.IsHttpRequest(), self.user.id)
if exists_credentials:
self.mock_delete_user_ec2_credentials.assert_called_once_with(
test.IsHttpRequest(), self.user.id, old_creds[0].access)
else:
self.mock_delete_user_ec2_credentials.assert_not_called()
self.mock_create_ec2_credentials.assert_called_once_with(
test.IsHttpRequest(), self.user.id, self.tenant.id)
def test_recreate_user_credentials(self):
self._test_recreate_user_credentials()
def test_recreate_user_credentials_with_no_existing_creds(self):
self._test_recreate_user_credentials(exists_credentials=False)
class ASCIITenantNameRCTests(test.TestCase):
TENANT_NAME = 'tenant'
def _setup_user(self, **kwargs):
super(ASCIITenantNameRCTests, self)._setup_user(
tenant_name=self.TENANT_NAME)
def test_openrcv2_credentials_filename(self):
expected = 'attachment; filename="%s-openrc.sh"' % self.TENANT_NAME
res = self.client.get(OPENRCV2_URL)
self.assertEqual(res.status_code, 200)
self.assertEqual(expected, res['content-disposition'])
@override_settings(OPENSTACK_API_VERSIONS={"identity": 3})
def test_openrc_credentials_filename(self):
expected = 'attachment; filename="%s-openrc.sh"' % self.TENANT_NAME
res = self.client.get(OPENRC_URL)
self.assertEqual(res.status_code, 200)
self.assertEqual(expected, res['content-disposition'])
class UnicodeTenantNameRCTests(test.TestCase):
TENANT_NAME = u'\u043f\u0440\u043e\u0435\u043a\u0442'
def _setup_user(self, **kwargs):
super(UnicodeTenantNameRCTests, self)._setup_user(
tenant_name=self.TENANT_NAME)
def test_openrcv2_credentials_filename(self):
expected = ('attachment; filename="%s-openrc.sh"' %
self.TENANT_NAME).encode('utf-8')
res = self.client.get(OPENRCV2_URL)
self.assertEqual(res.status_code, 200)
result_content_disposition = res['content-disposition']
# we need to encode('latin-1') because django response object
# has custom setter which encodes all values to latin-1 for Python3.
# https://github.com/django/django/blob/1.9.6/django/http/response.py#L142
# see _convert_to_charset() method for details.
if six.PY3:
result_content_disposition = result_content_disposition.\
encode('latin-1')
self.assertEqual(expected,
result_content_disposition)
@override_settings(OPENSTACK_API_VERSIONS={"identity": 3})
def test_openrc_credentials_filename(self):
expected = ('attachment; filename="%s-openrc.sh"' %
self.TENANT_NAME).encode('utf-8')
res = self.client.get(OPENRC_URL)
self.assertEqual(res.status_code, 200)
result_content_disposition = res['content-disposition']
if six.PY3:
result_content_disposition = result_content_disposition.\
encode('latin-1')
self.assertEqual(expected,
result_content_disposition)
class FakeUser(object):
username = "cool user"
class TemplateRenderTest(test.TestCase):
"""Tests for templates render."""
def test_openrc_html_escape(self):
context = {
"user": FakeUser(),
"tenant_id": "some-cool-id",
"auth_url": "http://tests.com",
"tenant_name": "ENG Perf R&D"}
out = loader.render_to_string(
'project/api_access/openrc.sh.template',
context,
template.Context(context))
self.assertNotIn("&", out)
self.assertIn("ENG Perf R&D", out)
def test_openrc_html_evil_shell_escape(self):
context = {
"user": FakeUser(),
"tenant_id": "some-cool-id",
"auth_url": "http://tests.com",
"tenant_name": 'o"; sudo rm -rf /'}
out = loader.render_to_string(
'project/api_access/openrc.sh.template',
context,
template.Context(context))
self.assertNotIn('o"', out)
self.assertIn('\"', out)
def test_openrc_html_evil_shell_backslash_escape(self):
context = {
"user": FakeUser(),
"tenant_id": "some-cool-id",
"auth_url": "http://tests.com",
"tenant_name": 'o\"; sudo rm -rf /'}
out = loader.render_to_string(
'project/api_access/openrc.sh.template',
context,
template.Context(context))
self.assertNotIn('o\"', out)
self.assertNotIn('o"', out)
self.assertIn('\\"', out)
def test_openrc_set_region(self):
context = {
"user": FakeUser(),
"tenant_id": "some-cool-id",
"auth_url": "http://tests.com",
"tenant_name": "Tenant",
"region": "Colorado"}
out = loader.render_to_string(
'project/api_access/openrc.sh.template',
context,
template.Context(context))
self.assertIn("OS_REGION_NAME=\"Colorado\"", out)
def test_openrc_region_not_set(self):
context = {
"user": FakeUser(),
"tenant_id": "some-cool-id",
"auth_url": "http://tests.com",
"tenant_name": "Tenant"}
out = loader.render_to_string(
'project/api_access/openrc.sh.template',
context,
template.Context(context))
self.assertIn("OS_REGION_NAME=\"\"", out)
def test_clouds_yaml_set_region(self):
context = {
"cloud_name": "openstack",
"user": FakeUser(),
"tenant_id": "some-cool-id",
"auth_url": "http://example.com",
"tenant_name": "Tenant",
"region": "Colorado"}
out = yaml.safe_load(loader.render_to_string(
'project/api_access/clouds.yaml.template',
context,
template.Context(context)))
self.assertIn('clouds', out)
self.assertIn('openstack', out['clouds'])
self.assertNotIn('profile', out['clouds']['openstack'])
self.assertEqual(
"http://example.com",
out['clouds']['openstack']['auth']['auth_url'])
self.assertEqual("Colorado", out['clouds']['openstack']['region_name'])
self.assertNotIn('regions', out['clouds']['openstack'])
def test_clouds_yaml_region_not_set(self):
context = {
"cloud_name": "openstack",
"user": FakeUser(),
"tenant_id": "some-cool-id",
"auth_url": "http://example.com",
"tenant_name": "Tenant"}
out = yaml.safe_load(loader.render_to_string(
'project/api_access/clouds.yaml.template',
context,
template.Context(context)))
self.assertIn('clouds', out)
self.assertIn('openstack', out['clouds'])
self.assertNotIn('profile', out['clouds']['openstack'])
self.assertEqual(
"http://example.com",
out['clouds']['openstack']['auth']['auth_url'])
self.assertNotIn('region_name', out['clouds']['openstack'])
self.assertNotIn('regions', out['clouds']['openstack'])
def test_clouds_yaml_regions(self):
regions = ['region1', 'region2']
context = {
"cloud_name": "openstack",
"user": FakeUser(),
"tenant_id": "some-cool-id",
"auth_url": "http://example.com",
"tenant_name": "Tenant",
"regions": regions}
out = yaml.safe_load(loader.render_to_string(
'project/api_access/clouds.yaml.template',
context,
template.Context(context)))
self.assertIn('clouds', out)
self.assertIn('openstack', out['clouds'])
self.assertNotIn('profile', out['clouds']['openstack'])
self.assertEqual(
"http://example.com",
out['clouds']['openstack']['auth']['auth_url'])
self.assertNotIn('region_name', out['clouds']['openstack'])
self.assertIn('regions', out['clouds']['openstack'])
self.assertEqual(regions, out['clouds']['openstack']['regions'])
def test_clouds_yaml_profile(self):
regions = ['region1', 'region2']
context = {
"cloud_name": "openstack",
"user": FakeUser(),
"profile": "example",
"tenant_id": "some-cool-id",
"auth_url": "http://example.com",
"tenant_name": "Tenant",
"regions": regions}
out = yaml.safe_load(loader.render_to_string(
'project/api_access/clouds.yaml.template',
context,
template.Context(context)))
self.assertIn('clouds', out)
self.assertIn('openstack', out['clouds'])
self.assertIn('profile', out['clouds']['openstack'])
self.assertEqual('example', out['clouds']['openstack']['profile'])
self.assertNotIn('auth_url', out['clouds']['openstack']['auth'])
self.assertNotIn('region_name', out['clouds']['openstack'])
self.assertNotIn('regions', out['clouds']['openstack'])
| {
"content_hash": "561f852f362cb8c4c49012ed91fcc50c",
"timestamp": "",
"source": "github",
"line_count": 351,
"max_line_length": 82,
"avg_line_length": 39.37321937321937,
"alnum_prop": 0.597684515195369,
"repo_name": "noironetworks/horizon",
"id": "982e5aac949ccdba2a0c421dc1ed3185cfe64430",
"size": "14423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/project/api_access/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "129247"
},
{
"name": "HTML",
"bytes": "581169"
},
{
"name": "JavaScript",
"bytes": "2455930"
},
{
"name": "Python",
"bytes": "5190295"
},
{
"name": "Shell",
"bytes": "7108"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python3
# Copyright 2014 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for enum_preprocess.py.
This test suite contains various tests for the C++ -> Java enum generator.
"""
import collections
from datetime import date
import unittest
import java_cpp_enum
from java_cpp_enum import EnumDefinition, GenerateOutput
from java_cpp_enum import HeaderParser
from util import java_cpp_utils
class TestPreprocess(unittest.TestCase):
def testOutput(self):
definition = EnumDefinition(original_enum_name='ClassName',
enum_package='some.package',
entries=[('E1', 1), ('E2', '2 << 2')],
comments=[('E2', 'This is a comment.'),
('E1', 'This is a multiple line '
'comment that is really long. '
'This is a multiple line '
'comment that is really '
'really long.')])
output = GenerateOutput('path/to/file', definition)
expected = """
// Copyright %d The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is autogenerated by
// %s
// From
// path/to/file
package some.package;
import androidx.annotation.IntDef;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
@IntDef({
ClassName.E1, ClassName.E2
})
@Retention(RetentionPolicy.SOURCE)
public @interface ClassName {
/**
* %s
* really really long.
*/
int E1 = 1;
/**
* This is a comment.
*/
int E2 = 2 << 2;
}
"""
long_comment = ('This is a multiple line comment that is really long. '
'This is a multiple line comment that is')
self.assertEqual(
expected % (date.today().year, java_cpp_utils.GetScriptName(),
long_comment), output)
def testParseSimpleEnum(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
enum EnumName {
VALUE_ZERO,
VALUE_ONE,
};
""".split('\n')
definitions = HeaderParser(test_data).ParseDefinitions()
self.assertEqual(1, len(definitions))
definition = definitions[0]
self.assertEqual('EnumName', definition.class_name)
self.assertEqual('test.namespace', definition.enum_package)
self.assertEqual(collections.OrderedDict([('VALUE_ZERO', 0),
('VALUE_ONE', 1)]),
definition.entries)
def testParseBitShifts(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
enum EnumName {
VALUE_ZERO = 1 << 0,
VALUE_ONE = 1 << 1,
};
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
enum EnumName {
ENUM_NAME_ZERO = 1 << 0,
ENUM_NAME_ONE = 1 << 1,
ENUM_NAME_TWO = ENUM_NAME_ZERO | ENUM_NAME_ONE,
};
""".split('\n')
definitions = HeaderParser(test_data).ParseDefinitions()
self.assertEqual(2, len(definitions))
definition = definitions[0]
self.assertEqual('EnumName', definition.class_name)
self.assertEqual('test.namespace', definition.enum_package)
self.assertEqual(collections.OrderedDict([('VALUE_ZERO', '1 << 0'),
('VALUE_ONE', '1 << 1')]),
definition.entries)
definition = definitions[1]
expected_entries = collections.OrderedDict([
('ZERO', '1 << 0'),
('ONE', '1 << 1'),
('TWO', 'ZERO | ONE')])
self.assertEqual(expected_entries, definition.entries)
def testParseMultilineEnumEntry(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: bar.namespace
enum Foo {
VALUE_ZERO = 1 << 0,
VALUE_ONE =
SymbolKey | FnKey | AltGrKey | MetaKey | AltKey | ControlKey,
VALUE_TWO = 1 << 18,
};
""".split('\n')
expected_entries = collections.OrderedDict([
('VALUE_ZERO', '1 << 0'),
('VALUE_ONE', 'SymbolKey | FnKey | AltGrKey | MetaKey | AltKey | '
'ControlKey'),
('VALUE_TWO', '1 << 18')])
definitions = HeaderParser(test_data).ParseDefinitions()
self.assertEqual(1, len(definitions))
definition = definitions[0]
self.assertEqual('Foo', definition.class_name)
self.assertEqual('bar.namespace', definition.enum_package)
self.assertEqual(expected_entries, definition.entries)
def testParseEnumEntryWithTrailingMultilineEntry(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: bar.namespace
enum Foo {
VALUE_ZERO = 1,
VALUE_ONE =
SymbolKey | FnKey | AltGrKey | MetaKey |
AltKey | ControlKey | ShiftKey,
};
""".split('\n')
expected_entries = collections.OrderedDict([
('VALUE_ZERO', '1'),
('VALUE_ONE', 'SymbolKey | FnKey | AltGrKey | MetaKey | AltKey | '
'ControlKey | ShiftKey')])
definitions = HeaderParser(test_data).ParseDefinitions()
self.assertEqual(1, len(definitions))
definition = definitions[0]
self.assertEqual('Foo', definition.class_name)
self.assertEqual('bar.namespace', definition.enum_package)
self.assertEqual(expected_entries, definition.entries)
def testParseNoCommaAfterLastEntry(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: bar.namespace
enum Foo {
VALUE_ZERO = 1,
// This is a multiline
//
// comment with an empty line.
VALUE_ONE = 2
};
""".split('\n')
expected_entries = collections.OrderedDict([
('VALUE_ZERO', '1'),
('VALUE_ONE', '2')])
expected_comments = collections.OrderedDict([
('VALUE_ONE', 'This is a multiline comment with an empty line.')])
definitions = HeaderParser(test_data).ParseDefinitions()
self.assertEqual(1, len(definitions))
definition = definitions[0]
self.assertEqual('Foo', definition.class_name)
self.assertEqual('bar.namespace', definition.enum_package)
self.assertEqual(expected_entries, definition.entries)
self.assertEqual(expected_comments, definition.comments)
def testParseClassNameOverride(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
// GENERATED_JAVA_CLASS_NAME_OVERRIDE: OverrideName
enum EnumName {
FOO
};
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
// GENERATED_JAVA_CLASS_NAME_OVERRIDE: OtherOverride
enum PrefixTest {
PREFIX_TEST_A,
PREFIX_TEST_B,
};
""".split('\n')
definitions = HeaderParser(test_data).ParseDefinitions()
self.assertEqual(2, len(definitions))
definition = definitions[0]
self.assertEqual('OverrideName', definition.class_name)
definition = definitions[1]
self.assertEqual('OtherOverride', definition.class_name)
self.assertEqual(collections.OrderedDict([('A', 0),
('B', 1)]),
definition.entries)
def testParsePreservesCommentsWhenPrefixStripping(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
enum EnumOne {
ENUM_ONE_A = 1,
// Comment there
ENUM_ONE_B = A,
};
enum EnumIgnore {
C, D, E
};
// GENERATED_JAVA_ENUM_PACKAGE: other.package
// GENERATED_JAVA_PREFIX_TO_STRIP: P_
enum EnumTwo {
P_A,
// This comment spans
// two lines.
P_B
};
""".split('\n')
definitions = HeaderParser(test_data).ParseDefinitions()
self.assertEqual(2, len(definitions))
definition = definitions[0]
self.assertEqual('EnumOne', definition.class_name)
self.assertEqual('test.namespace', definition.enum_package)
self.assertEqual(collections.OrderedDict([('A', '1'),
('B', 'A')]),
definition.entries)
self.assertEqual(collections.OrderedDict([('B', 'Comment there')]),
definition.comments)
definition = definitions[1]
self.assertEqual('EnumTwo', definition.class_name)
self.assertEqual('other.package', definition.enum_package)
self.assertEqual(collections.OrderedDict(
[('B', 'This comment spans two lines.')]), definition.comments)
self.assertEqual(collections.OrderedDict([('A', 0),
('B', 1)]),
definition.entries)
def testParseTwoEnums(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
enum AnEnum {
ENUM_ONE_A = 1,
ENUM_ONE_B = A,
};
enum EnumIgnore {
C, D, E
};
// GENERATED_JAVA_ENUM_PACKAGE: other.package
enum EnumTwo {
P_A,
P_B
};
""".split('\n')
definitions = HeaderParser(test_data).ParseDefinitions()
self.assertEqual(2, len(definitions))
definition = definitions[0]
self.assertEqual('AnEnum', definition.class_name)
self.assertEqual('test.namespace', definition.enum_package)
self.assertEqual(collections.OrderedDict([('ENUM_ONE_A', '1'),
('ENUM_ONE_B', 'A')]),
definition.entries)
definition = definitions[1]
self.assertEqual('EnumTwo', definition.class_name)
self.assertEqual('other.package', definition.enum_package)
self.assertEqual(collections.OrderedDict([('P_A', 0),
('P_B', 1)]),
definition.entries)
def testParseSingleLineEnum(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: other.package
// GENERATED_JAVA_PREFIX_TO_STRIP: P_
enum EnumTwo { P_A, P_B };
""".split('\n')
definitions = HeaderParser(test_data).ParseDefinitions()
definition = definitions[0]
self.assertEqual('EnumTwo', definition.class_name)
self.assertEqual('other.package', definition.enum_package)
self.assertEqual(collections.OrderedDict([('A', 0),
('B', 1)]),
definition.entries)
def testParseWithStrippingAndRelativeReferences(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: other.package
// GENERATED_JAVA_PREFIX_TO_STRIP: P_
enum EnumTwo {
P_A = 1,
// P_A is old-don't use P_A.
P_B = P_A,
};
""".split('\n')
definitions = HeaderParser(test_data).ParseDefinitions()
definition = definitions[0]
self.assertEqual('EnumTwo', definition.class_name)
self.assertEqual('other.package', definition.enum_package)
self.assertEqual(collections.OrderedDict([('A', '1'),
('B', 'A')]),
definition.entries)
self.assertEqual(collections.OrderedDict([('B', 'A is old-don\'t use A.')]),
definition.comments)
def testParseSingleLineAndRegularEnum(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
enum EnumOne {
ENUM_ONE_A = 1,
// Comment there
ENUM_ONE_B = A,
};
// GENERATED_JAVA_ENUM_PACKAGE: other.package
enum EnumTwo { P_A, P_B };
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
// GENERATED_JAVA_CLASS_NAME_OVERRIDE: OverrideName
enum EnumName {
ENUM_NAME_FOO
};
""".split('\n')
definitions = HeaderParser(test_data).ParseDefinitions()
definition = definitions[0]
self.assertEqual(
collections.OrderedDict([('A', '1'), ('B', 'A')]), definition.entries)
self.assertEqual(collections.OrderedDict([('B', 'Comment there')]),
definition.comments)
self.assertEqual(3, len(definitions))
definition = definitions[1]
self.assertEqual(
collections.OrderedDict([('P_A', 0), ('P_B', 1)]), definition.entries)
definition = definitions[2]
self.assertEqual(collections.OrderedDict([('FOO', 0)]), definition.entries)
def testParseWithCamelCaseNames(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
enum EnumTest {
EnumTestA = 1,
// comment for EnumTestB.
EnumTestB = 2,
};
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
// GENERATED_JAVA_PREFIX_TO_STRIP: Test
enum AnEnum {
TestHTTPOption,
TestHTTPSOption,
};
""".split('\n')
definitions = HeaderParser(test_data).ParseDefinitions()
definition = definitions[0]
self.assertEqual(
collections.OrderedDict([('A', '1'), ('B', '2')]),
definition.entries)
self.assertEqual(
collections.OrderedDict([('B', 'comment for B.')]),
definition.comments)
definition = definitions[1]
self.assertEqual(
collections.OrderedDict([('HTTP_OPTION', 0), ('HTTPS_OPTION', 1)]),
definition.entries)
def testParseWithKCamelCaseNames(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
enum EnumOne {
kEnumOne = 1,
// comment for kEnumTwo.
kEnumTwo = 2,
};
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
// GENERATED_JAVA_CLASS_NAME_OVERRIDE: OverrideName
enum EnumName {
kEnumNameFoo,
kEnumNameBar
};
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
enum EnumName {
kEnumNameFoo,
kEnumBar,
};
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
enum Keys {
kSymbolKey = 1 << 0,
kAltKey = 1 << 1,
kUpKey = 1 << 2,
kKeyModifiers = kSymbolKey | kAltKey | kUpKey | kKeyModifiers,
};
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
enum Mixed {
kTestVal,
kCodecMPEG2
};
""".split('\n')
definitions = HeaderParser(test_data).ParseDefinitions()
definition = definitions[0]
self.assertEqual(
collections.OrderedDict([('ENUM_ONE', '1'), ('ENUM_TWO', '2')]),
definition.entries)
self.assertEqual(
collections.OrderedDict([('ENUM_TWO', 'comment for ENUM_TWO.')]),
definition.comments)
definition = definitions[1]
self.assertEqual(
collections.OrderedDict([('FOO', 0), ('BAR', 1)]),
definition.entries)
definition = definitions[2]
self.assertEqual(
collections.OrderedDict([('ENUM_NAME_FOO', 0), ('ENUM_BAR', 1)]),
definition.entries)
definition = definitions[3]
expected_entries = collections.OrderedDict([
('SYMBOL_KEY', '1 << 0'),
('ALT_KEY', '1 << 1'),
('UP_KEY', '1 << 2'),
('KEY_MODIFIERS', 'SYMBOL_KEY | ALT_KEY | UP_KEY | KEY_MODIFIERS')])
self.assertEqual(expected_entries, definition.entries)
definition = definitions[4]
self.assertEqual(
collections.OrderedDict([('TEST_VAL', 0), ('CODEC_MPEG2', 1)]),
definition.entries)
def testParseThrowsOnUnknownDirective(self):
test_data = """
// GENERATED_JAVA_UNKNOWN: Value
enum EnumName {
VALUE_ONE,
};
""".split('\n')
with self.assertRaises(Exception):
HeaderParser(test_data).ParseDefinitions()
def testParseReturnsEmptyListWithoutDirectives(self):
test_data = """
enum EnumName {
VALUE_ONE,
};
""".split('\n')
self.assertEqual([], HeaderParser(test_data).ParseDefinitions())
def testParseEnumClass(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
enum class Foo {
FOO_A,
};
""".split('\n')
definitions = HeaderParser(test_data).ParseDefinitions()
self.assertEqual(1, len(definitions))
definition = definitions[0]
self.assertEqual('Foo', definition.class_name)
self.assertEqual('test.namespace', definition.enum_package)
self.assertEqual(collections.OrderedDict([('A', 0)]),
definition.entries)
def testParseEnumClassOneValueSubstringOfAnother(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
enum class SafeBrowsingStatus {
kChecking = 0,
kEnabled = 1,
kDisabled = 2,
kDisabledByAdmin = 3,
kDisabledByExtension = 4,
kEnabledStandard = 5,
kEnabledEnhanced = 6,
// New enum values must go above here.
kMaxValue = kEnabledEnhanced,
};
""".split('\n')
definitions = HeaderParser(test_data).ParseDefinitions()
self.assertEqual(1, len(definitions))
definition = definitions[0]
self.assertEqual('SafeBrowsingStatus', definition.class_name)
self.assertEqual('test.namespace', definition.enum_package)
self.assertEqual(
collections.OrderedDict([
('CHECKING', '0'),
('ENABLED', '1'),
('DISABLED', '2'),
('DISABLED_BY_ADMIN', '3'),
('DISABLED_BY_EXTENSION', '4'),
('ENABLED_STANDARD', '5'),
('ENABLED_ENHANCED', '6'),
('MAX_VALUE', 'ENABLED_ENHANCED'),
]), definition.entries)
self.assertEqual(
collections.OrderedDict([
('MAX_VALUE', 'New enum values must go above here.')
]), definition.comments)
def testParseEnumStruct(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
enum struct Foo {
FOO_A,
};
""".split('\n')
definitions = HeaderParser(test_data).ParseDefinitions()
self.assertEqual(1, len(definitions))
definition = definitions[0]
self.assertEqual('Foo', definition.class_name)
self.assertEqual('test.namespace', definition.enum_package)
self.assertEqual(collections.OrderedDict([('A', 0)]),
definition.entries)
def testParseFixedTypeEnum(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
enum Foo : int {
FOO_A,
};
""".split('\n')
definitions = HeaderParser(test_data).ParseDefinitions()
self.assertEqual(1, len(definitions))
definition = definitions[0]
self.assertEqual('Foo', definition.class_name)
self.assertEqual('test.namespace', definition.enum_package)
self.assertEqual('int', definition.fixed_type)
self.assertEqual(collections.OrderedDict([('A', 0)]),
definition.entries)
def testParseFixedTypeEnumClass(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
enum class Foo: unsigned short {
FOO_A,
};
""".split('\n')
definitions = HeaderParser(test_data).ParseDefinitions()
self.assertEqual(1, len(definitions))
definition = definitions[0]
self.assertEqual('Foo', definition.class_name)
self.assertEqual('test.namespace', definition.enum_package)
self.assertEqual('unsigned short', definition.fixed_type)
self.assertEqual(collections.OrderedDict([('A', 0)]),
definition.entries)
def testParseUnknownFixedTypeRaises(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: test.namespace
enum class Foo: foo_type {
FOO_A,
};
""".split('\n')
with self.assertRaises(Exception):
HeaderParser(test_data).ParseDefinitions()
def testParseSimpleMultiLineDirective(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: (
// test.namespace)
// GENERATED_JAVA_CLASS_NAME_OVERRIDE: Bar
enum Foo {
FOO_A,
};
""".split('\n')
definitions = HeaderParser(test_data).ParseDefinitions()
self.assertEqual('test.namespace', definitions[0].enum_package)
self.assertEqual('Bar', definitions[0].class_name)
def testParseMultiLineDirective(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: (te
// st.name
// space)
enum Foo {
FOO_A,
};
""".split('\n')
definitions = HeaderParser(test_data).ParseDefinitions()
self.assertEqual('test.namespace', definitions[0].enum_package)
def testParseMultiLineDirectiveWithOtherDirective(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: (
// test.namespace)
// GENERATED_JAVA_CLASS_NAME_OVERRIDE: (
// Ba
// r
// )
enum Foo {
FOO_A,
};
""".split('\n')
definitions = HeaderParser(test_data).ParseDefinitions()
self.assertEqual('test.namespace', definitions[0].enum_package)
self.assertEqual('Bar', definitions[0].class_name)
def testParseMalformedMultiLineDirectiveWithOtherDirective(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: (
// test.name
// space
// GENERATED_JAVA_CLASS_NAME_OVERRIDE: Bar
enum Foo {
FOO_A,
};
""".split('\n')
with self.assertRaises(Exception):
HeaderParser(test_data).ParseDefinitions()
def testParseMalformedMultiLineDirective(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: (
// test.name
// space
enum Foo {
FOO_A,
};
""".split('\n')
with self.assertRaises(Exception):
HeaderParser(test_data).ParseDefinitions()
def testParseMalformedMultiLineDirectiveShort(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE: (
enum Foo {
FOO_A,
};
""".split('\n')
with self.assertRaises(Exception):
HeaderParser(test_data).ParseDefinitions()
def testParseMalformedMultiLineDirectiveMissingBrackets(self):
test_data = """
// GENERATED_JAVA_ENUM_PACKAGE:
// test.namespace
enum Foo {
FOO_A,
};
""".split('\n')
with self.assertRaises(Exception):
HeaderParser(test_data).ParseDefinitions()
def testEnumValueAssignmentNoneDefined(self):
definition = EnumDefinition(original_enum_name='c', enum_package='p')
definition.AppendEntry('A', None)
definition.AppendEntry('B', None)
definition.AppendEntry('C', None)
definition.Finalize()
self.assertEqual(collections.OrderedDict([('A', 0),
('B', 1),
('C', 2)]),
definition.entries)
def testEnumValueAssignmentAllDefined(self):
definition = EnumDefinition(original_enum_name='c', enum_package='p')
definition.AppendEntry('A', '1')
definition.AppendEntry('B', '2')
definition.AppendEntry('C', '3')
definition.Finalize()
self.assertEqual(collections.OrderedDict([('A', '1'),
('B', '2'),
('C', '3')]),
definition.entries)
def testEnumValueAssignmentReferences(self):
definition = EnumDefinition(original_enum_name='c', enum_package='p')
definition.AppendEntry('A', None)
definition.AppendEntry('B', 'A')
definition.AppendEntry('C', None)
definition.AppendEntry('D', 'C')
definition.Finalize()
self.assertEqual(collections.OrderedDict([('A', 0),
('B', 0),
('C', 1),
('D', 1)]),
definition.entries)
def testEnumValueAssignmentSet(self):
definition = EnumDefinition(original_enum_name='c', enum_package='p')
definition.AppendEntry('A', None)
definition.AppendEntry('B', '2')
definition.AppendEntry('C', None)
definition.Finalize()
self.assertEqual(collections.OrderedDict([('A', 0),
('B', 2),
('C', 3)]),
definition.entries)
def testEnumValueAssignmentSetReferences(self):
definition = EnumDefinition(original_enum_name='c', enum_package='p')
definition.AppendEntry('A', None)
definition.AppendEntry('B', 'A')
definition.AppendEntry('C', 'B')
definition.AppendEntry('D', None)
definition.Finalize()
self.assertEqual(collections.OrderedDict([('A', 0),
('B', 0),
('C', 0),
('D', 1)]),
definition.entries)
def testEnumValueAssignmentRaises(self):
definition = EnumDefinition(original_enum_name='c', enum_package='p')
definition.AppendEntry('A', None)
definition.AppendEntry('B', 'foo')
definition.AppendEntry('C', None)
with self.assertRaises(Exception):
definition.Finalize()
def testExplicitPrefixStripping(self):
definition = EnumDefinition(original_enum_name='c', enum_package='p')
definition.AppendEntry('P_A', None)
definition.AppendEntry('B', None)
definition.AppendEntry('P_C', None)
definition.AppendEntry('P_LAST', 'P_C')
definition.prefix_to_strip = 'P_'
definition.Finalize()
self.assertEqual(collections.OrderedDict([('A', 0),
('B', 1),
('C', 2),
('LAST', 2)]),
definition.entries)
def testImplicitPrefixStripping(self):
definition = EnumDefinition(original_enum_name='ClassName',
enum_package='p')
definition.AppendEntry('CLASS_NAME_A', None)
definition.AppendEntry('CLASS_NAME_B', None)
definition.AppendEntry('CLASS_NAME_C', None)
definition.AppendEntry('CLASS_NAME_LAST', 'CLASS_NAME_C')
definition.Finalize()
self.assertEqual(collections.OrderedDict([('A', 0),
('B', 1),
('C', 2),
('LAST', 2)]),
definition.entries)
def testImplicitPrefixStrippingRequiresAllConstantsToBePrefixed(self):
definition = EnumDefinition(original_enum_name='Name',
enum_package='p')
definition.AppendEntry('A', None)
definition.AppendEntry('B', None)
definition.AppendEntry('NAME_LAST', None)
definition.Finalize()
self.assertEqual(['A', 'B', 'NAME_LAST'], list(definition.entries.keys()))
def testGenerateThrowsOnEmptyInput(self):
with self.assertRaises(Exception):
original_do_parse = java_cpp_enum.DoParseHeaderFile
try:
java_cpp_enum.DoParseHeaderFile = lambda _: []
for _ in java_cpp_enum.DoGenerate(['file']):
pass
finally:
java_cpp_enum.DoParseHeaderFile = original_do_parse
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "0c93905f93d26f277164570b3fbbf8e8",
"timestamp": "",
"source": "github",
"line_count": 783,
"max_line_length": 80,
"avg_line_length": 34.33588761174968,
"alnum_prop": 0.5858285289194718,
"repo_name": "nwjs/chromium.src",
"id": "c14f2a085edbce9037b6a2453b3d687119baae48",
"size": "26885",
"binary": false,
"copies": "11",
"ref": "refs/heads/nw70",
"path": "build/android/gyp/java_cpp_enum_tests.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import app
from splunklib.searchcommands import app_root, execute
from os import environ, path
import sys
pypy_argv = ['pypy', path.join(app_root, 'bin', 'generatetext.py')] + sys.argv[1:]
pypy_environ = dict(environ)
pypy_environ.pop('PYTHONPATH', None) # On Windows Splunk is a 64-bit service, but pypy is a 32-bit program
pypy_environ.pop('DYLD_LIBRARY_PATH', None) # On *nix Splunk includes shared objects that are incompatible with pypy
execute('pypy', pypy_argv, pypy_environ)
| {
"content_hash": "a26791b7343c6e58d6780c4b26a25aed",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 117,
"avg_line_length": 40.857142857142854,
"alnum_prop": 0.7517482517482518,
"repo_name": "sullivanmatt/splunk-sdk-python",
"id": "aec19e2f912b6509c0eded1a0be8d33c62f1a62a",
"size": "2763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/searchcommands_app/package/bin/pypygeneratetext.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "727032"
}
],
"symlink_target": ""
} |
n, k = map(int, input().split())
str = input()
dictionary = dict((letter, str.count(letter)) for letter in set(str))
for item in range(26):
if k == 0:
break
key = chr((item + ord('a')))
if key in dictionary:
if dictionary[key] >= k:
dictionary[key] -= k
k = 0
break
else:
tmp = dictionary[key]
dictionary[key] -= k
k -= tmp
ans = list()
for item in reversed(str):
if item in dictionary:
if dictionary[item] > 0:
dictionary[item] -= 1
ans.append(item)
print("".join([x for x in list(reversed(ans))]))
| {
"content_hash": "0dbc32fb9563dafcc116e2aa2866d80a",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 69,
"avg_line_length": 23.178571428571427,
"alnum_prop": 0.5069337442218799,
"repo_name": "AHJenin/acm-type-problems",
"id": "0b2a279024ca7a383259f1daf049a7eb153c9d08",
"size": "1488",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Codeforces/AC/999C.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "76491"
},
{
"name": "C++",
"bytes": "2244531"
},
{
"name": "Java",
"bytes": "6724"
},
{
"name": "Python",
"bytes": "2227"
}
],
"symlink_target": ""
} |
"""
==================
Default Resolvers
==================
This module holds all the default resolvers used by "BaseSettings".
Normally you don't have to import something form this module, Resolver
are found automatic based on the default type or the name of the key.
based on the following steps we try to find the resolver.
1. If the developer provided a type, use it to find the resolver.
2. Try to use the default.__class__ , name of the setting and the default to find the resolver.
3. return default "SettingResolver"
The following table is used to map the resolvers.
+----------------------+-------------+----------------+--------------+------------------------------+------------+
| String type | class | key startswith | key endswith | Resover | Requires |
+======================+=============+================+==============+==============================+============+
| 'default' | | | | `SettingsResolver` | |
+----------------------+-------------+----------------+--------------+------------------------------+------------+
| 'int' | ``int`` | | | `IntSettingsResolver` | |
+----------------------+-------------+----------------+--------------+------------------------------+------------+
| 'float' | ``float`` | | | `FloatSettingsResolver` | |
+----------------------+-------------+----------------+--------------+------------------------------+------------+
| 'bool' | ``bool`` | | | `BoolSettingsResolver` | |
+----------------------+-------------+----------------+--------------+------------------------------+------------+
| 'str' | ``str`` | | | `StrSettingsResolver` | |
+----------------------+-------------+----------------+--------------+------------------------------+------------+
| 'unicode' | ``unicode`` | | | `UnicodeSettingsResolver` | |
+----------------------+-------------+----------------+--------------+------------------------------+------------+
| 'path' | | | | `PathSettingsResolver` | |
+----------------------+-------------+----------------+--------------+------------------------------+------------+
| 'dir' | | | 'dir' | `DirSettingsResolver` | |
+----------------------+-------------+----------------+--------------+------------------------------+------------+
| 'file' | | | 'file' | `FileSettingsResolver` | |
+----------------------+-------------+----------------+--------------+------------------------------+------------+
| 'secret' | | | | `SecretSettingsResolver` | |
+----------------------+-------------+----------------+--------------+------------------------------+------------+
| 'password', 'pass' | | | | `PassSettingsResolver` | |
+----------------------+-------------+----------------+--------------+------------------------------+------------+
| 'timedelta' |``timedelta``| | | `TimeDeltaSettingsResolver` | |
+----------------------+-------------+----------------+--------------+------------------------------+------------+
| 'datetime' |``datetime`` | | | `DateTimeSettingsResolver` | |
+----------------------+-------------+----------------+--------------+------------------------------+------------+
| 'time' | ``time`` | | | `TimeSettingsResolver` | |
+----------------------+-------------+----------------+--------------+------------------------------+------------+
| 'date' | ``date`` | | | `DateSettingsResolver` | |
+----------------------+-------------+----------------+--------------+------------------------------+------------+
| 'tulpe' | ``tulpe`` | | | `TulpeSettingsResolver` | |
+----------------------+-------------+----------------+--------------+------------------------------+------------+
| 'namedtulpe' | | | | `NamedTulpeSettingsResolver` | |
+----------------------+-------------+----------------+--------------+------------------------------+------------+
| 'list' | ``list`` | | | `ListSettingsResolver` | |
+----------------------+-------------+----------------+--------------+------------------------------+------------+
| 'dict' | ``dict`` | | | `DictSettingsResolver` | |
+----------------------+-------------+----------------+--------------+------------------------------+------------+
| | ``Section`` | | | `SectionSettingsResolver` | |
+----------------------+-------------+----------------+--------------+------------------------------+------------+
"""
from __future__ import absolute_import
import re
import sys
import collections
import logging
import os
import hashlib
import base64
import json
import datetime
from . import configfile
from . import basesettings
logger = logging.getLogger(__name__)
__all__ = ['SettingsResolver' , 'IntSettingsResolver', 'FloatSettingsResolver', 'BoolSettingsResolver',
'StrSettingsResolver', 'UnicodeSettingsResolver', 'DirSettingsResolver', 'FileSettingsResolver',
'PassSettingsResolver', 'SecretSettingsResolver', 'DateSettingsResolver', 'TimeSettingsResolver',
'DatetimeSettingsResolver',
'TupleSettingsResolver', 'NamedTupleSettingsResolver', 'ListSettingsResolver', 'DictSettingsResolver',
'SectionSettingsResolver', 'ResolveException']
class ResolveException(Exception):
pass
class SettingsResolver(object):
""" Default SettingsResolver
:param settings: The settingsobject this resolver belongs to
:param validate: Must be a callable that accecpts (self, value), Must return True of False based on value
:type settings: `BaseSettings`
:type validate: Fuction or Callable
:return: return description
:rtype: the return type description
"""
multivalue = False
""" True is the get value can hold childs, else False."""
resolve_types = ('default',)
""" The string types and real type this resolver supports."""
def __init__(self , settings, validate=None):
self.settings = settings
self.validate_fuc = validate
def get(self, value):
""" Coerce the str value to the types used in the app.
:param value: The value to coerce to the final type
:type value: `str` of final type
:return: Return same value as passed to the function
:rtype: The same type as passed to the function
"""
return value
def raw(self, value):
""" Turn the value in to a str so it can be saved to a file using the configfile module.
This function is called we a attribute of `BaseSettings` is set. Its job is to turn the
value in a string. The function `get` must be able to load this string to the final type.
:param value: The value to coerce to a savable type
:type value: supported type of this resolver
:return: Returns the savable converted value
:rtype: `str`
"""
return str(value)
def validate(self, value):
""" Validate if ``value`` is a validate value for this settings attribute.
This is called before a value is set to a settings attribute to make sure
it is a valid value. If not `BaseSettings` will raise an Exception.
If ``__init__`` was provided with a `validate` keyword, this is used to
validate the value. Else the default is used.
:param value: The value to validate
:type value: supported type of this resolver
:return: Returns True of False based on the value provided
:rtype: ``bool``
"""
if self.validate_fuc is not None:
return self.validate_fuc(value)
return self._validate(value)
def _validate(self, value):
""" Default validate function.
:param value: The value to validate
:type value: supported type of this resolver
:return: Returns True of False based on the value provided
:rtype: bool
"""
return True
@classmethod
def supports(cls, type=None, key=None, default=None):
""" Checks of this Resolver supports a settings attribute base on type, attribute key and default value.
We first check if the attribute is supported base on the key and the default. If
that is not the case we check if type is in ``resolve_types``.
:param type: A string descripting a resolver type of the python class.
:param key: The name of settings attribute.
:param default: The default value of the settings attribute.
:type type: ``type`` (Python Class) of ``str``
:type key: ``str``
:type default: supported final type
:return: Returns True if attribute is supported else False.
:rtype: ``bool``
"""
try:
if cls._supports(key, default):
return True
except:
pass
return type in cls.resolve_types
@classmethod
def _supports(cls, key, default):
""" Checks of this Resolver supports a settings attribute base on attribute key and default value."""
return False
class IntSettingsResolver(SettingsResolver):
""" Resolver to coerce values to `int`
:param min: the minimum int value allowed
:param max: the maximum int value allowed
:param step: steps just like in xrange, use with `min` and `max`
:param choices: a list of int allowed to be set, use without min, max and step
:type min: ``int``
:type max: ``int``
:type step: ``int``
:type choices: ``list``
"""
resolve_types = ('int', int)
def __init__(self, settings, validate=None, min=None, max=None, step=None, choices=None):
super(IntSettingsResolver, self).__init__(settings, validate)
self.min = min
self.max = max
if step and min and max:
self.choices = [x for x in xrange(min, max, step)]
self.max = None
self.min = None
else:
self.choices = choices
def get(self, value):
""" Coerce ``value`` to ``int``.
:return: Value as int.
:rtype: ``int``
"""
return int(value)
def _validate(self, value):
""" Validate if value is between min and max or is in choices
:param value: A value to validate
:type value: ``int``
:return: True or False based of value
:rtype: ``bool``
"""
if self.max or self.min:
if self.max:
return self.min < value < self.max
else:
return self.min < value
if self.choices:
return value in self.choices
return super(IntSettingsResolver, self)._validate(value)
class FloatSettingsResolver(IntSettingsResolver):
""" Resolver to coerce values to ``float`` """
resolve_types = ('float', float)
def get(self, value):
""" Coerce `value` to `float`.
:return: Value as float
:rtype: `float`
"""
return float(value)
class BoolSettingsResolver(IntSettingsResolver):
""" Resolver to coerce values to `bool`.
We are very strict so only the following values are
considered False:
- False, "False", 0, "0", "no", "n"
Only the following values are considered True
- True, "True", 1, "1", "yes", "y"
We don't alway empty lists and dicts because the don't coerce well
to string.
No extra validate args are needed.
"""
NO_VALUES = [False, "False", 0, "0", "no", "n"]
YES_VALUES = [True, "True", 1, "1", "yes", "y"]
resolve_types = ('bool', bool)
def __init__(self, settings, validate=None):
super(BoolSettingsResolver, self).__init__(settings, validate, choices=self.NO_VALUES + self.YES_VALUES)
def get(self, value):
""" Coerce `value` to `bool`.
:param value: The value to coerce
:type value: `str`, `int` or `bool`
:return: Value as bool
:rtype: `bool`
"""
if value in self.NO_VALUES:
return False
elif value in self.YES_VALUES:
return True
class StrSettingsResolver(SettingsResolver):
""" Resolver to coerce values to `str`.
This resolver also implements the functionalty to
make string based on other settings values. This
is done using the `str.format()` function.
Example:
.. code-block:: python
class Settings(BaseSettings):
IP = '127.0.0.1'
PORT = 5589
HOST = '{HOST}:{PORT}'
:param choices: List of valid strings for this setting
:type choices: ``list``
"""
SETTING_REGEX = '\{([1-9A-Z_\.]+)\}'
resolve_types = ('str', str)
def __init__(self, settings, validate=None, choices=None):
super(StrSettingsResolver, self).__init__(settings, validate)
self.choices = choices
def get(self, value):
""" Coerce `value` to ``str``.
:param value: The value to coerce
:type value: ``str``
:return: Value as string, value replace is done.
:rtype: ``str``
"""
value = str(value)
if re.search(self.SETTING_REGEX, value):
kwargs = dict((v.split('.')[0] , self.settings.root.get(v.split('.')[0])) for v in re.findall(self.SETTING_REGEX, value))
else:
kwargs = {}
return value.format(**kwargs)
def _validate(self, value):
""" Validate if value is in choices (if choices was supplied)
:param value: A value to validate
:type value: ``str``
:return: True or False based of value
:rtype: ``bool``
"""
if self.choices and value in self.choices:
return True
elif self.choices:
return False
return super(StrSettingsResolver, self)._validate(value)
class UnicodeSettingsResolver(StrSettingsResolver):
""" Resolver to coerce values to ``unicode``.
Works the same as the ``StrSettingsResolver``.
:param choices: List of valid strings for this setting.
:type choices: ``list``
"""
resolve_types = ('unicode', unicode)
def get(self, value):
""" Coerce `value` to ``unicode``.
:param value: The value to coerce.
:type value: ``str``
:return: Value as string, value replace is done.
:rtype: ``unicode``
"""
return super(UnicodeSettingsResolver, self).get(value).decode('utf8')
def raw(self, value):
return super(UnicodeSettingsResolver, self).set_value(value).encode('utf8')
class PathSettingsResolver(StrSettingsResolver):
""" Resolver to proper return paths based on platform.
On Unix "\\"-slashes are replaces by "/"-slashes.
On Windows "/"-slashes are replaces by "\\"-slashes.
Formatting like `StrSettingsResolver` is supported.
"""
resolve_types = ('path',)
def __init__(self, settings, validate=None):
super(StrSettingsResolver, self).__init__(settings, validate)
self.choices = choices
def get(self, value):
""" Coerce ``value`` to proper path.
:param value: The value to coerce
:type value: ``str``
:return: path as string, value replace is done, slashes are according to platform.
:rtype: ``str``
"""
value = super(PathSettingsResolver, self).get(value)
if sys.platform == 'win32':
value = value.replace('/', '\\') if '/' in value else value
else:
value = value.replace('\\', '/') if '\\' in value else value
return value
class DirSettingsResolver(PathSettingsResolver):
""" Resolver to proper return dir-paths based on platform.
On Unix "\"-slashes are replaces by "/"-slashes.
On Windows "/"-slashes are replaces by "\"-slashes.
Formatting like `StrSettingsResolver` is supported.
Dir can be automatic create when the path is requested.
:param create: Automatic create the dir if is doesn't exists. True is default.
:type create: ``bool``
"""
resolve_types = ('dir',)
def __init__(self, settings, validate=None, create=True):
super(DirSettingsResolver, self).__init__(settings, validate)
self.create = create
def get(self, value):
""" Coerce `value` to proper path.
If path does not exist and create is True the path is created.
:param value: The value to coerce
:type value: ``str``
:return: path as string, value replace is done, slashes are according to platform.
:rtype: ``str``
"""
value = super(DirSettingsResolver, self).get(value)
if self.create and not os.path.isdir(value):
os.makedirs(value)
return value
@classmethod
def _supports(self, key=None, default=None):
return key.lower().endswith('dir')
class FileSettingsResolver(PathSettingsResolver):
""" Resolver to proper return dir-paths based on platform.
On Unix "\"-slashes are replaces by "/"-slashes.
On Windows "/"-slashes are replaces by "\"-slashes.
Formatting like `StrSettingsResolver` is supported.
File and dir of file can be automatic create when the path is requested.
:param create: Automatic create the file if is doesn't exists. False is default.
:param create_dir: Automatic create the dir in with the file lives if is doesn't exists. True is default.
:param file_ext: Validate if a file has the correct extension.
:type create: ``bool``
:type create_dir: ``bool``
:type file_ext: ``str``
"""
resolve_types = ('file',)
def __init__(self, settings, validate=None, create=False, create_dir=True, file_ext=None):
super(FileSettingsResolver, self).__init__(settings, validate)
self.create = create
self.create_dir = create_dir
self.file_ext = file_ext
def get(self, value):
""" Coerce `value` to proper path.
If file does not exist and create is True the file is created.
If dir of file does not exist and create_dir is True the dir is created.
:param value: The value to coerce
:type value: `str`
:return: path as string, value replace is done, slashes are according to platform.
:rtype: `str`
"""
value = super(FileSettingsResolver, self).get(value)
if self.create and not os.path.isfile(value):
open(value, 'a').close()
if self.create_dir and not os.path.isdir(os.path.dirname(value)):
os.makedirs(value)
return value
def _validate(self, value):
if self.file_ext and not value.endswith(self.file_ext):
return False
return super(FileSettingsResolver, self)._validate(value)
@classmethod
def _supports(cls, key=None, default=None):
return key.lower().endswith('file')
class SecretSettingsResolver(SettingsResolver):
""" Resolver that encrypts value before storing it.
Formatting like ``StrSettingsResolver`` is supported.
.. warning::
The default must already be encrypted, use SecretSettingsResolver.encrypte(key, value)
to use the default implementation.
This is a basic implementation. It is not 100% save.
:param get_secret: Function to the key, takes must take 1 args, this is the resolver (self),
the default implentation returns settings.SECRET_KEY or raises ResolveException
:param encode: Callable to override de default implementation. Must take 2 args, (key, encrypte_text)
:param decode: Callable to override de default implementation. Must take 2 args, (key, text)
:type get_secret: ``callable``
:type encode: ``callable``
:type decode: ``callable``
"""
resolve_types = ('secret',)
def __init__(self, settings, validate=None, get_secret=None, encode=None, decode=None):
super(SecretSettingsResolver, self).__init__(settings, validate)
self.get_secret = get_secret if get_secret is not None else self.get_secret
self.encode = encode if encode is not None else self.encode
self.decode = decode if decode is not None else self.decode
def get(self, value):
""" Return decrypted text."""
dec = self.decrypte(self.get_secret(), value)
return super(SecretSettingsResolver , self).get(dec)
def raw(self, value):
""" Return encrypted text."""
return self.encrypte(self.get_secret(), value)
def get_secret(self):
""" Default implementation of `get_key` function.
Default implementation return SECRET_KEY attribute of root settings
"""
try:
return self.settings.root.SECRET_KEY
except AttributeError:
raise ResolveException("SecretSettingsResolver : You must provide a get_key function or set SECRET_KEY on the root settings")
@staticmethod
def encrypte(key, clear):
""" Default encrypt implementation.
This implenentation is not 100% safe.
:param key: Key returned by `get_secret`
:param clear: Plaintext to encrypte.
:type key: ``str``
:type clear: ``str``
:return: Return encrypted text, baseencoded.
:rtype: ``str``
"""
enc = []
for i in range(len(clear)):
key_c = key[i % len(key)]
enc_c = chr((ord(clear[i]) + ord(key_c)) % 256)
enc.append(enc_c)
return base64.urlsafe_b64encode("".join(enc))
@staticmethod
def decrypte(key, enc):
""" Default decrypt implementation.
Only works with default encrypt implementation
:param key: Key returned by `get_secret`
:param clear: Encrypted text to decrypte.
:type key: ``str``
:type clear: ``str``
:return: Return encrypted text, baseencoded.
:rtype: ``str``
"""
dec = []
enc = base64.urlsafe_b64decode(enc)
for i in range(len(enc)):
key_c = key[i % len(key)]
dec_c = chr((256 + ord(enc[i]) - ord(key_c)) % 256)
dec.append(dec_c)
return "".join(dec)
class PassSettingsResolver(SettingsResolver):
""" Resolver returns a password object, use it to compare given password to stored password.
.. warning::
The default must already be hashed, use SecretSettingsResolver.hash(password, SecretSettingsResolver.salt())
to use the default implementation.
This resolver return a special object. It can be used to match plain passwords to
the hashed password sorted in the settings object.
Example:
.. code-block
password = "some plain password"
if settings.PASSWORD == password: # or settings.PASSWORD.equals(password)
# do something special
pass
:param salt: Function to get the salt, or salt as `str`, this is passed to the hasher
:param hasher: Callable to override de default implementation. Must take 2 args, (password, salt)
:type salt: ``callable``
:type hasher: ``callable``
"""
resolve_types = ('pass','password')
class Password(object):
def __init__(self, password, hasher, salt):
self.password = password
self.hasher = hasher
self.salt = salt
def equals(self, other):
return self.password == self.hasher(other)
def __eq__(self, other):
return self.equals(other)
def __str__(self):
return self.password
def __repr__(self):
return '{name}({password})'.format(name=self.__name__, password=self.password)
def __init__(self, settings, validate=None, salt=None, hasher=None):
super(PassSettingsResolver, self).__init__(settings, validate)
self.salt = salt if salt is not None else self.salt
self.hash = hasher if hasher is not None else self.hash
def get(self, value):
""" Returns password object.
:return: Special object to match plain text password
:rtype: ``Password``
"""
return self.Password(value, self.hash, self.salt)
def raw(self, value):
""" Hash the value if it is not a Password type.
:return: The hashed value of `value`
:rtype: ``str``
"""
if isinstance(value , self.Password):
return value.password
return self.hash(value, self.salt(value))
@staticmethod
def hash(value, salt):
""" Default hash implementation.
:param value: The value to hash.
:param salt: A salt to use in the hash procces
:type value: ``str``
:type salt: ``str``
:return: Return hashed password, (as hexdigest).
:rtype: ``str``
"""
if callable(salt):
salt = salt(value)
return hashlib.sha256('{}.{}'.format(value,salt)).hexdigest()
@staticmethod
def salt(value=None):
""" Return the default salt ('default')
:param value: The value that is going to be hashed..
:type value: ``str``
:return: The salt to use in the hash, this function returns `'default'`
:rtype: ``str``
"""
return 'default'
@classmethod
def _supports(self, key=None, default=None):
return key.lower().endswith('password')
class TimeDeltaSettingsResolver(SettingsResolver):
""" Resolver to coerce value to TimeDelta object.
:param min: The minimum valid timedelta
:param max: The maximum valid timedelta
:type min: ``timedelta``
:type max: ``timedelta``
"""
resolve_types = ('timedelta', datetime.timedelta)
def __init__(self, settings, validate=None, min=None, max=None):
super(TimeDeltaSettingsResolver, self).__init__(settings, validate)
self.min = min
self.max = max
def get(self, value):
if isinstance(value, datetime.timedelta):
return value
return datetime.timedelta(seconds=int(value))
def raw(self, value):
return value.total_seconds()
def _validate(self, value):
if self.max is not None and value > self.max:
return False
if self.min is not None and value < self.min:
return False
return True
class DatetimeSettingsResolver(SettingsResolver):
""" Resolver to coerce value to Datetime object.
:param min: The minimum valid datetime
:param max: The maximum valid datetime
:type min: ``datetime``
:type max: ``datetime``
"""
resolve_types = ('datetime', datetime.datetime)
format = "%Y-%m-%d %H:%M:%S"
def __init__(self, settings, validate=None, min=None, max=None):
super(DatetimeSettingsResolver, self).__init__(settings, validate)
self.min = min
self.max = max
def get(self, value):
if isinstance(value, datetime.datetime):
return value
return datetime.datetime.strptime(value, self.format)
def raw(self, value):
return value.strftime(self.format)
def _validate(self, value):
if self.max is not None and value > self.max:
return False
if self.min is not None and value < self.min:
return False
return True
class TimeSettingsResolver(DatetimeSettingsResolver):
""" Resolver to coerce value to Time objects.
Take the same args as `DatetimeSettingsResolver` only
with time objects.
"""
resolve_types = ('time', datetime.time)
format = "%H:%M:%S"
def get(self, value):
if isinstance(value, datetime.time):
return value
return super(TimeSettingsResover, self).get(value).time()
class DateSettingsResolver(DatetimeSettingsResolver):
""" Resolver to coerce value to Date objects.
Take the same args as `DatetimeSettingsResolver` only
with date objects.
"""
resolve_types = ('date', datetime.date)
format = "%Y-%m-%d"
def get(self, value):
if isinstance(value, datetime.time):
return value
return super(DateSettingsResolver, self).get(value).date()
class MultiValueSettingsResolver(SettingsResolver):
""" Baseclass for resolver handing type that can hold other """
multivalue = True
resolvers = False
def has_childs(self):
""" Checks if child resolvers are already set on a multivalue resolver
:return: True or False based on if the childs resolvers are set on a resovler.
:rtype: `bool`
"""
return bool(self.resolvers)
def set_childs(self, default):
""" If the child resolvers are not set this function called with the default
value. The resolver can use this to set the correct child resolvers.
:param default: The default value for this settings.
"""
pass
class TupleSettingsResolver(MultiValueSettingsResolver):
""" Resolver to coerce value to tulpe.
We expect that the value in tuples are always in the same order
and the same type. If childs was passed this is used to determine
what the type of the childs where, else we use the default value.
example:
.. code-block
#if childs was passed
childs = ['str', 'path', 'int') --> TupleSettingsResolver(StrSettingsResolver, PathSettingsResolver, IntSettingsResolver)
if childs is None:
(1 , 'hello', u'test') --> TupleSettingsResolver(IntSettingsResolver, StrSettingsResolver, UnicodeSettingsResolver)
:param childs: A list of str type or `Resolver` used to determine what the childs types are.
:type chidls: `list`
"""
delimiter = ','
resolve_types = ('tuple', tuple)
def __init__(self, settings, validate=None, childs=None):
super(TupleSettingsResolver, self).__init__(settings, validate)
self.resolvers = []
if childs:
for r in childs:
if isinstance(r, basesettings.Resolver):
resolver = settings._get_resolver(r.type, kwargs=r.resolverKwargs)
else:
resolver = settings._get_resolver(r)
self.resolvers.append(resolver)
def get(self, value):
""" Returns tuple using ' , ' to split string in tuple childs
:return: Tulpe, all childs resolvers are applied to the childs
:rtype: ``tuple``
"""
if isinstance(value, basestring):
value = value.split(self.delimiter)
return tuple(self.resolvers[i].get(v) for i,v in enumerate(value))
def raw(self, value):
""" Coerce ``tuple`` to ``str``
Childs are passed to there raw function and joined using a `,`.
:return: Childs as str, joined with a `,`
:rtype: ``str``
"""
l = []
for i,v in enumerate(value):
l.append(self.resolvers[i].raw(v))
if self.delimiter in l[i]:
logger.warning("Delimiter in raw value, key : {key}, value : {value}".format(key=ReferenceResolverMixin.get_key(self), value=value))
return self.delimiter.join(l)
def _validate(self, value):
if len(value) != len(self.resolvers):
return False
for i,v in enumerate(value):
if not self.resolvers[i].validate(value):
return False
return True
def set_childs(self, defaults):
for i,d in enumerate(defaults):
r = self.settings._get_resolver(d.__class__, None, d, kwargs={})
if r.multivalue and not r.has_childs():
r.set_childs(d)
self.resolvers.append(r)
class NamedTupleSettingsResolver(TupleSettingsResolver):
""" Same as TupleSettingsResolver, but takes a extra
key parameter. This is passed to the namedtuple factory.
:param key: ``list`` of names passed to ``namedtulpe`` factory.
:type key: ``list``
"""
resolve_types = ('namedtuple',)
def __init__(self, settings, validate=None, keys=(), childs=()):
super(NamedTupleSettingsResolver).__init__(settings, validate ,childs)
self.cls = collections.namedtuple("NamedSettingsTulpe" , keys)
def get(self, value):
return self.cls(*super(NamedTupleSettingsResolver).get(value))
class ReferenceResolverMixin(object):
""" Mixin for object that are mutable
Provide a `get_key` function so the resolver can update the settingsvalue
each time the mutable object changes.
:return: The key this resolver is linked to
:rtype: ``str``
"""
def get_key(self):
for key, resolver in self.settings.resolvers.items():
if self is resolver:
return key
class ListSettingsResolver(MultiValueSettingsResolver, ReferenceResolverMixin):
""" Resolver to coerce value to ``list``
This resolver returns a special SyncList. This acts the same as a list,
but syncs all changes back to the settingsobject. We expect that a list
only contain the same type.
Json is used to coerce the list to string format.
:param child: string type or resolver to construct the resolver used for all childs
:param duplicate: True of False allowing duplicate entries in the `list`
:param options: A list withs contain all allowed values a entry in the list may be.
:param sort: A function to sort the list. It is called after each change to the list
and is called with the list as param
:param minLen: the minimum length of the list
:param maxLen: the maximum length of the list
:type child: ``str`` or `Resolver`
:type duplicate: ``bool``
:type options: ``list``
:type sort: ``callable``
:type minLen: ``int``
:type maxLen: ``int``
"""
resolve_types = ('list', list)
class SyncList(list):
def __init__(self, key, l, resolver, settings):
self._l = l
self._key = key
self._resolver = resolver
self._settings = settings
def __sort(self):
if self._resolver.sort is not None:
self._resolver.sort(self._l)
def __sync(self):
" This function makes sure that we write all changes to the list back to the userfile. "
self.__sort()
self._settings.userconfig[self._key.lower()] = self._resolver._raw(self._l)
def append(self, v):
self._l.append(self._resolver.resolver.get(v))
self.__sync()
def count(self):
return self._l.count()
def extend(self, l):
self._l.extend(self._resolver.get(l))
self.__sync()
def index(self, obj):
return self._l.index(obj)
def insert(self, i, obj):
self._l.insert(i, self._resolver.resolver.get(obj))
self.__sync()
def pop(self):
v = self._l.pop()
self.__sync()
return v
def remove(self, obj):
self._l.remove(self._resolver.resolver.get(obj))
self.__sync()
def reverse(self):
if self._resolver.sort is not None:
raise Exception("The sorting of this list is solid")
self._l.reverse()
self.__sync()
def sort(self,func):
if self._resolver.sort is not None:
raise Exception("The sorting of this list is solid")
self._l.sort(func)
self.__sync()
def __getitem__(self,i):
return self._l[i]
def __setitem__(self,i, value):
self._l[i] = self._resolver.resolver.get(value)
self.__sync()
def __delitem__(self,i):
del self._l[i]
self.__sync()
def __getslice__(self, start, end):
return self._l[start:end]
def __setslice__(self, start, end, value):
self._l[start:end] = self._resolver.get(value)._l
self.__sync()
def __delslice__(self, start, end):
del self._l[start:end]
self.__sync()
def __eq__(self, other):
return self._l.__eq__(other)
def __ge__(self, other):
return self._l.__ge__(other)
def __gt__(self, other):
return self._l.__gt__(other)
def __lt__(self, other):
return self._l.__lt__(other)
def __ne__(self, other):
return self._l.__ne__(other)
def __add__(self, value):
return self._l + self._resolver.get(value)._l
def __iadd__(self, value):
self._l =+ self._resolver.get(value)._l
self.__sync()
def __mul__(self, i):
return self._l.__mul__(i)
def __rmul__(self, i):
return self._l.__rmul__(i)
def __imul__(self, i):
self._l.__imul__(i)
self.__sync()
def __contains__(self, value):
return self._resolver.resolver.get(value) in self._l
def __len__(self):
return len(self._l)
def __iter__(self):
return iter(self._l)
def __format__(self):
return self._l.__format__()
def __reversed__(self):
return reversed(self._l)
def __reduce__(self):
return self._l.__reduce__()
def __reduce_ex__(self, protocol):
return self._l.__reduce_ex__(protocol)
def __init__(self, settings, validate=None, child=None, duplicate=False, options = None, sort=None, minLen=None, maxLen=None):
super(ListSettingsResolver, self).__init__(settings, validate)
self.resolver = None
self.resolvers = []
self.duplicate = duplicate
self.options = options
self.minLen = minLen
self.maxLen = maxLen
self.sort = sort
if child:
if isinstance(child, basesettings.Resolver):
self.resolver = settings._get_resolver(child.type, kwargs=child.resolverKwargs)
else:
self.resolver = settings._get_resolver(child)
self.resolvers = (self.resolver,)
def get(self, values):
""" Returns the SyncList based on the list.
:return: Special SyncList to sync back all changes to settings object.
:rtype: ``SyncList``
"""
key = self.get_key()
if isinstance(values, self.SyncList):
raise ResolveException("ListSettingsResolver : We are getting a synclist in the get function, this should not be possible")
elif isinstance(values, list):
values = values[:]
else:
values = self._get(values)
return self.SyncList(key, values, self, self.settings)
def _get(self, values):
""" Internal function to coerce value to `list`"""
l = []
for val in json.loads(values):
l.append(self.resolver.get(val))
return l
def raw(self, value):
if isinstance(value, self.SyncList):
value = value._l
return self._raw(value)
def _raw(self, values):
""" Internal function to coerce list to `str`"""
return json.dumps([self.resolver.raw(value) for value in values])
def _validate(self, values):
for value in values:
if self.options and value not in self.options:
return False
if self.minLen and len(values) < self.minLen:
return False
if self.maxLen and len(values) > self.maxLen:
return False
if not self.duplicate and len(set(values)) != len(values):
return False
return super(StrSettingsResolver, self)._validate(value)
def set_childs(self, defaults):
if defaults:
r = self.settings._get_resolver(defaults[0].__class__, {})
if r.multivalue and not r.has_childs():
r.set_childs(defaults[0])
self.resolver = r
else:
self.resolver = StrSettingsResolver(self.settings)
self.resolvers = (self.resolver,)
class DictSettingsResolver(MultiValueSettingsResolver, ReferenceResolverMixin):
""" Resolver to load and dump dict.
It not a good idea to use the dicts in your settings, you have sections so use them!!!
** Warning : dict are save and loaded by json not using resolvers so string formatting won't work **
This resolver returns a dict like object (SyncDict) to sync back changes to the settings object.
:param default: value to pass to ``dict.setdefault``.
"""
resolve_types = ('dict', dict)
class SyncDict(dict):
def __init__(self, key, d, resolver, settings):
self._d = d
self._key = key
self._resolver = resolver
self._settings = settings
if resolver.default is not None:
self._d.setdefault(resolver.default)
def __sync(self):
" This function makes sure that we write all changes to the list back to the userfile. "
self._settings.userconfig[self._key.lower()] = self._resolver._raw(self._d)
def copy(self):
return self._d.copy()
def clear(self):
self._d.clear()
self.__sync()
def update(self, d):
self._d.update(d)
self.__sync()
def get(self, key, default=None):
return self._d.get(key, default)
def fromkeys(self, keys, value=None):
self._d.fromkeys(keys, value)
self.__sync()
def setdefault(self, default):
ResolveException("this is not possible, use the default keyword of the resolver")
def has_key(self, key):
return self._d.has_key(key)
def pop(self, key, default=None):
v = self._d.pop(key, default)
self.__sync()
return v
def popitem(self):
v = self._d.popitem()
self.__sync()
return v
def keys(self):
return self._d.keys()
def values(self):
return self._d.values()
def items(self):
return self._d.items()
def iterkeys(self):
return self._d.iterkeys()
def itervalues(self):
return self._d.itervalues()
def iteritems(self):
return self._d.iteritems()
def viewkeys(self):
return self._d.viewkeys()
def viewvalues(self):
return self._d.viewvalues()
def viewitems(self):
return self._d.viewitems()
#def __getattribute__(self, key):
# return self._d.__dict__['__getattribute__'](key)
#def __setattr__(self, key, value):
# self._d.__setattr__(key, value)
#def __delattr__(self, key):
# self._d.__delattr__(key)
def __getitem__(self, key):
return self._d[key]
self.__sync() # if defaults are set than getitem may change the dict
def __setitem__(self, key, value):
self._d[key] = value
self.__sync()
def __delitem__(self, key):
del self._d[key]
self.__sync()
def __cmp__(self, other):
return self._d.__cmp__(other)
def __eq__(self, other):
return self._d.__eq__(other)
def __gt__(self, other):
return self._d.__gt__(other)
def __ge__(self, other):
return self._d.__ge__(other)
def __le__(self, other):
return self._d.__le__(other)
def __lt__(self, other):
return self._d.__lt__(other)
def __ne__(self, other):
return self._d.__ne__(other)
def __format__(self):
return self._d.__format__(self)
def __contains__(self, key):
return key in self._d
def __iter__(self):
return iter(self._d)
def __len__(self):
return len(self._d)
def __sizeof__(self):
return self._d.__sizeof__()
def __reduce__(self):
return self._d.__reduce__()
def __reduce_ex__(self, protocol):
return self._d.__reduce_ex__(protocol)
def __init__(self, settings, validate=None, default=None):
super(DictSettingsResolver, self).__init__(settings, validate)
self.default = default
self.resolvers = True
def get(self, values):
key = self.get_key()
if isinstance(values, self.SyncDict):
raise ResolveException("DictSettingsResolver : We are getting a syncdict in the get function, this should not be possible")
elif isinstance(values, dict):
values = dict(values)
else:
values = self._get(values)
return self.SyncDict(key, values, self, self.settings)
def _get(self, value):
""" Internal function to coerce json to `dict`"""
return json.loads(value)
def raw(self,value):
if isinstance(value, self.SyncDict):
value = value._d
return self._raw(value)
def _raw(self, value):
""" Internal function to coerce dict to `str`"""
return json.dumps(value)
def _validate(self, value):
return True
class SectionSettingsResolver(SettingsResolver, ReferenceResolverMixin):
""" A special resolver used for Sections.
This resolver makes sure subsection are correct supported.
"""
resolve_types = (basesettings.Section,)
def __init__(self , settings, validate=None):
super(SectionSettingsResolver, self).__init__(settings, validate)
def get(self, value):
key = self.get_key()
if isinstance(value, basesettings.Section):
# the default is passed
section = value
else:
section = self.settings.defaults[key.lower()]
# try to get the userconfig of this section, if it does not exist we create it.
userconfig = self.settings.userconfig[key.lower()]
# try to pasover the nosave values for this section
try:
nosave = self.settings.nosave[key.lower()]
except KeyError:
nosave = self.settings.nosave[key.lower()] = {}
# try to get the this section form a the config files, if it does not exist we pass.
fileconfigs = []
for cfg in self.settings.fileconfigs:
try:
fileconfigs.append(cfg[key.lower()])
except KeyError:
pass
try:
options = self.settings.options[key.lower()]
except KeyError:
options = self.settings.options[key.lower()] = {}
try:
envconfig = self.settings.envconfig[key.lower()]
except KeyError:
envconfig = self.settings.envconfig[key.lower()] = {}
return section(self.settings, options, userconfig, nosave, envconfig, fileconfigs)
def raw(self, value):
raise ResolveException("It is not possible to set a section")
@classmethod
def _supports(cls, key=None, default=None):
return issubclass(default, basesettings.Section)
# register all resolvers with base settings cls
basesettings.add_resolver_type(SettingsResolver)
basesettings.add_resolver_type(IntSettingsResolver)
basesettings.add_resolver_type(FloatSettingsResolver)
basesettings.add_resolver_type(BoolSettingsResolver)
basesettings.add_resolver_type(StrSettingsResolver)
basesettings.add_resolver_type(UnicodeSettingsResolver)
basesettings.add_resolver_type(PathSettingsResolver)
basesettings.add_resolver_type(DirSettingsResolver)
basesettings.add_resolver_type(FileSettingsResolver)
basesettings.add_resolver_type(SecretSettingsResolver)
basesettings.add_resolver_type(PassSettingsResolver)
basesettings.add_resolver_type(DatetimeSettingsResolver)
basesettings.add_resolver_type(TimeSettingsResolver)
basesettings.add_resolver_type(DateSettingsResolver)
basesettings.add_resolver_type(TupleSettingsResolver)
basesettings.add_resolver_type(NamedTupleSettingsResolver)
basesettings.add_resolver_type(ListSettingsResolver)
basesettings.add_resolver_type(DictSettingsResolver)
basesettings.add_resolver_type(SectionSettingsResolver)
| {
"content_hash": "b11201367cd3775b4eafe66cbf432cae",
"timestamp": "",
"source": "github",
"line_count": 1349,
"max_line_length": 148,
"avg_line_length": 37.415122312824316,
"alnum_prop": 0.5424088126324966,
"repo_name": "loek17/settingslib",
"id": "62ad42c2c5d7d36e3495946654802ef82f77ac3a",
"size": "51618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settingslib/resolvers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "124023"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from services import views
urlpatterns = [
# service configurations
url(r'^(?P<id>\d+)/$', views.profile, name='profile'),
url(r'^create/', views.create, name='create'),
url(r'^(?P<id>\d+)/delete/$', views.delete, name='delete'),
url(r'^(?P<id>\d+)/edit/$', views.update, name='update'),
url(r'^list/', views.list, name='list'),
]
| {
"content_hash": "4e7762092a78b9172559cfc47716f779",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 60,
"avg_line_length": 33.81818181818182,
"alnum_prop": 0.6370967741935484,
"repo_name": "omwomotieno/tunza_v3",
"id": "25ef6301aff431999cf4d51ed37f936327f1f325",
"size": "372",
"binary": false,
"copies": "1",
"ref": "refs/heads/work_branch",
"path": "services/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5369"
},
{
"name": "HTML",
"bytes": "46161"
},
{
"name": "JavaScript",
"bytes": "3142"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "91934"
},
{
"name": "Shell",
"bytes": "7027"
}
],
"symlink_target": ""
} |
import os
import json
import sys
import pydoc
import textwrap
import typing
from datetime import datetime
from coupling import fs
from fnmatch import fnmatch
try:
from os.path import samefile
except ImportError:
from shutil import _samefile as samefile
import logging
logger = logging.getLogger(__name__)
def get_datetime_string(dt: datetime = None, with_timezone: bool = True) -> typing.Optional[str]:
if not dt:
dt = datetime.now()
if with_timezone:
dt = dt.astimezone()
s = dt.isoformat()
return s
def remove_path_illegal_chars(name: str) -> str:
illegal_chars = r'/\*?<>|:"' if sys.platform == "win32" else '/\0'
return name.translate({ord(i): None for i in illegal_chars})
def revise_path(path: str) -> str:
return r"\\?\%s" % path if len(path) > 255 and sys.platform == "win32" else path
def new_object_from_dict(data: dict) -> object:
class_ = locate(data["class"])
args = data.get("args", [])
kwargs = data.get("kwargs", {})
obj = class_(*args, **kwargs)
properties = data.get("properties", {})
for k, v in properties.items():
setattr(obj, k, v)
return obj
def get_module_name_safely(path: str, package_dir: str = None) -> str:
if not package_dir:
package_dir = os.path.dirname(path)
if package_dir not in sys.path:
sys.path.insert(0, package_dir)
path_without_ext = os.path.splitext(path)[0]
return os.path.relpath(path_without_ext, package_dir).replace(os.path.sep, '.')
def get_object_name_list_by_path(path: str, package_dir: str = None, pattern: str = "test*.py") -> typing.List[str]:
"""
Base class for handling test event.
Parameters
----------
path : str
path can be a file, dir, link or python path of package, module, function, class and method.
package_dir : bool, optional
package dir for locate path
pattern : bool, optional
used to found matched test file.
"""
names = []
if os.path.isfile(path):
module_name = get_module_name_safely(path, package_dir)
names.append(module_name)
elif os.path.isdir(path):
for found in fs.find(path, top_dir=False):
if os.path.isfile(found) and fnmatch(os.path.basename(found), pattern):
module_name = get_module_name_safely(found, package_dir)
names.append(module_name)
elif os.path.islink(path):
names.extend(get_object_name_list_by_path(os.path.realpath(path), package_dir, pattern))
else:
names.append(path)
return names
def pformat_json(data, ensure_ascii=False, indent=2):
def on_fail(x):
return str(x)
return json.dumps(data, ensure_ascii=ensure_ascii, indent=indent, default=on_fail)
def locate(path):
obj = pydoc.locate(path)
if obj is None:
raise LookupError("Can't locate object with path: {}".format(path))
return obj
def truncate_str(s, length=255):
if not isinstance(s, str):
s = str(s)
return textwrap.shorten(s, length, placeholder='...')
| {
"content_hash": "f2c41ffaa34eeeaa71ac323ec84e322e",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 116,
"avg_line_length": 27.936363636363637,
"alnum_prop": 0.6384640416531077,
"repo_name": "yyang179/ngta",
"id": "148ec17928cf427411fc5b12a1c76b074f629653",
"size": "3098",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ngta/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3364"
},
{
"name": "HTML",
"bytes": "23836"
},
{
"name": "JavaScript",
"bytes": "14324"
},
{
"name": "Python",
"bytes": "293620"
}
],
"symlink_target": ""
} |
import json
from django.http import HttpResponse
def render_json(view_func):
""" render http response to json decorator
"""
def wrap(request, *args, **kwargs):
retval = view_func(request, *args, **kwargs)
if isinstance(retval, HttpResponse):
retval.mimetype = 'application/json; charset=utf-8'
return retval
else:
js = json.dumps(retval)
return HttpResponse(js, content_type='application/json; charset=utf-8')
return wrap
| {
"content_hash": "657663374df1153649c8beef42fdf3c3",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 83,
"avg_line_length": 32,
"alnum_prop": 0.626953125,
"repo_name": "zym1115718204/xspider",
"id": "e7d927c0b25d5296ddd4a10011c01b1afdda32c6",
"size": "603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xspider/libs/decorator/decorator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "812460"
},
{
"name": "HTML",
"bytes": "705330"
},
{
"name": "JavaScript",
"bytes": "5867"
},
{
"name": "PHP",
"bytes": "2199"
},
{
"name": "Python",
"bytes": "14618343"
},
{
"name": "Shell",
"bytes": "2176"
}
],
"symlink_target": ""
} |
import argparse
import logging
from neutronclient.common import utils
from neutronclient.neutron import v2_0 as neutronV20
def _format_fixed_ips(port):
try:
return '\n'.join([utils.dumps(ip) for ip in port['fixed_ips']])
except Exception:
return ''
class ListPort(neutronV20.ListCommand):
"""List ports that belong to a given tenant."""
resource = 'port'
log = logging.getLogger(__name__ + '.ListPort')
_formatters = {'fixed_ips': _format_fixed_ips, }
list_columns = ['id', 'name', 'mac_address', 'fixed_ips']
pagination_support = True
sorting_support = True
class ListRouterPort(neutronV20.ListCommand):
"""List ports that belong to a given tenant, with specified router."""
resource = 'port'
log = logging.getLogger(__name__ + '.ListRouterPort')
_formatters = {'fixed_ips': _format_fixed_ips, }
list_columns = ['id', 'name', 'mac_address', 'fixed_ips']
pagination_support = True
sorting_support = True
def get_parser(self, prog_name):
parser = super(ListRouterPort, self).get_parser(prog_name)
parser.add_argument(
'id', metavar='router',
help='ID or name of router to look up')
return parser
def get_data(self, parsed_args):
neutron_client = self.get_client()
neutron_client.format = parsed_args.request_format
_id = neutronV20.find_resourceid_by_name_or_id(
neutron_client, 'router', parsed_args.id)
self.values_specs.append('--device_id=%s' % _id)
return super(ListRouterPort, self).get_data(parsed_args)
class ShowPort(neutronV20.ShowCommand):
"""Show information of a given port."""
resource = 'port'
log = logging.getLogger(__name__ + '.ShowPort')
class CreatePort(neutronV20.CreateCommand):
"""Create a port for a given tenant."""
resource = 'port'
log = logging.getLogger(__name__ + '.CreatePort')
def add_known_arguments(self, parser):
parser.add_argument(
'--name',
help='name of this port')
parser.add_argument(
'--admin-state-down',
dest='admin_state', action='store_false',
help='set admin state up to false')
parser.add_argument(
'--admin_state_down',
dest='admin_state', action='store_false',
help=argparse.SUPPRESS)
parser.add_argument(
'--mac-address',
help='mac address of this port')
parser.add_argument(
'--mac_address',
help=argparse.SUPPRESS)
parser.add_argument(
'--device-id',
help='device id of this port')
parser.add_argument(
'--device_id',
help=argparse.SUPPRESS)
parser.add_argument(
'--fixed-ip', metavar='ip_address=IP_ADDR',
action='append',
help='desired IP for this port: '
'subnet_id=<name_or_id>,ip_address=<ip>, '
'(This option can be repeated.)')
parser.add_argument(
'--fixed_ip',
action='append',
help=argparse.SUPPRESS)
parser.add_argument(
'--security-group', metavar='SECURITY_GROUP',
default=[], action='append', dest='security_groups',
help='security group associated with the port '
'(This option can be repeated)')
parser.add_argument(
'network_id', metavar='NETWORK',
help='Network id or name this port belongs to')
def args2body(self, parsed_args):
_network_id = neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'network', parsed_args.network_id)
body = {'port': {'admin_state_up': parsed_args.admin_state,
'network_id': _network_id, }, }
if parsed_args.mac_address:
body['port'].update({'mac_address': parsed_args.mac_address})
if parsed_args.device_id:
body['port'].update({'device_id': parsed_args.device_id})
if parsed_args.tenant_id:
body['port'].update({'tenant_id': parsed_args.tenant_id})
if parsed_args.name:
body['port'].update({'name': parsed_args.name})
ips = []
if parsed_args.fixed_ip:
for ip_spec in parsed_args.fixed_ip:
ip_dict = utils.str2dict(ip_spec)
if 'subnet_id' in ip_dict:
subnet_name_id = ip_dict['subnet_id']
_subnet_id = neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'subnet', subnet_name_id)
ip_dict['subnet_id'] = _subnet_id
ips.append(ip_dict)
if ips:
body['port'].update({'fixed_ips': ips})
_sgids = []
for sg in parsed_args.security_groups:
_sgids.append(neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'security_group', sg))
if _sgids:
body['port']['security_groups'] = _sgids
return body
class DeletePort(neutronV20.DeleteCommand):
"""Delete a given port."""
resource = 'port'
log = logging.getLogger(__name__ + '.DeletePort')
class UpdatePort(neutronV20.UpdateCommand):
"""Update port's information."""
resource = 'port'
log = logging.getLogger(__name__ + '.UpdatePort')
def add_known_arguments(self, parser):
parser.add_argument(
'--no-security-groups',
action='store_true',
help='remove security groups from port')
def args2body(self, parsed_args):
body = {'port': {}}
if parsed_args.no_security_groups:
body['port'].update({'security_groups': None})
return body
| {
"content_hash": "d16de6888df96c25bd709a62ba45842d",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 74,
"avg_line_length": 34.64670658682635,
"alnum_prop": 0.5732803318354649,
"repo_name": "CiscoSystems/python-neutronclient",
"id": "f8ac9a9f49fe093df67e7ec58a0e59bcc42a3c1c",
"size": "6461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutronclient/neutron/v2_0/port.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "418088"
},
{
"name": "Shell",
"bytes": "5278"
}
],
"symlink_target": ""
} |
class Fail(Exception):
pass
class InvalidArgument(Fail):
pass
| {
"content_hash": "b6533deb596b34ccfbc50ac236a1fb44",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 28,
"avg_line_length": 14.2,
"alnum_prop": 0.704225352112676,
"repo_name": "samuel/kokki",
"id": "1fa8779f78d90051967d0f28441bba24beda1854",
"size": "72",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kokki/exceptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "211476"
}
],
"symlink_target": ""
} |
import os
import re
import sys
from setuptools import setup, find_packages
needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
def find_version(*paths):
fname = os.path.join(*paths)
with open(fname) as fhandler:
version_file = fhandler.read()
version_match = re.search(r"^__VERSION__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if not version_match:
raise RuntimeError("Unable to find version string in %s" % (fname,))
version = version_match.group(1)
return version
version = find_version('pymesos', '__init__.py')
setup(
name='pymesos',
version=version,
description="A pure python implementation of Mesos scheduler and executor",
packages=find_packages(),
platforms=['POSIX'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX',
'Programming Language :: Python',
],
author="Zhongbo Tian",
author_email="[email protected]",
url="https://github.com/douban/pymesos",
download_url=(
'https://github.com/douban/pymesos/archive/%s.tar.gz' % version
),
install_requires=['six', 'http-parser', 'addict'],
setup_requires=pytest_runner,
tests_require=[
'pytest-cov',
'pytest-randomly',
'pytest-mock',
'pytest'],
)
| {
"content_hash": "0bdca4f1acb9b8454f2dda77a21a8e85",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 79,
"avg_line_length": 28.764705882352942,
"alnum_prop": 0.6080436264485344,
"repo_name": "yuyang0/pymesos",
"id": "792f7c0f9bfc49a2566523fd13c2b0bbc1fc0956",
"size": "1467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "111765"
}
],
"symlink_target": ""
} |
import numpy as np
# import py.cat
import re
def drop_brackets(cat):
if cat.startswith('(') and \
cat.endswith(')') and \
find_closing_bracket(cat, 0) == len(cat)-1:
return cat[1:-1]
else:
return cat
def find_closing_bracket(source, start):
open_brackets = 0
for i, c in enumerate(source):
if c == '(':
open_brackets += 1
elif c == ')':
open_brackets -= 1
if open_brackets == 0:
return i
raise Exception("Mismatched brackets in string: " + source)
def find_non_nested_char(haystack, needles):
open_brackets = 0
# for i, c in enumerate(haystack):
for i in range(len(haystack) -1, -1, -1):
c = haystack[i]
if c == '(':
open_brackets += 1
elif c == ')':
open_brackets -= 1
elif open_brackets == 0:
for n in needles:
if n == c: return i
return -1
def get_context_by_window(items, window_size, lpad, rpad):
res = []
for i, item in enumerate(items):
context = []
if window_size - i > 0:
for j in range(window_size - i):
context.append(lpad)
for j in range(i):
context.append(items[j])
else:
for j in range(i - window_size, i):
context.append(items[j])
context.append(item)
if i + window_size >= len(items):
for j in range(i + 1, len(items)):
context.append(items[j])
for j in range(i + window_size - len(items) + 1):
context.append(rpad)
else:
for j in range(i + 1, i + window_size + 1):
context.append(items[j])
assert len(context) == window_size * 2 + 1
res.append(context)
return res
def read_pretrained_embeddings(filepath):
nvocab = 0
io = open(filepath)
dim = len(io.readline().split())
io.seek(0)
for _ in io:
nvocab += 1
io.seek(0)
res = np.empty((nvocab, dim), dtype=np.float32)
for i, line in enumerate(io):
line = line.strip()
if len(line) == 0: continue
res[i] = line.split()
io.close()
return res
import codecs
def read_model_defs(filepath):
res = {}
for i, line in enumerate(codecs.open(filepath, encoding="utf-8")):
word, _ = line.strip().split(" ")
res[word] = i
return res
def load_unary(filename):
for line in open(filename):
comment = line.find("#")
if comment > -1:
line = line[:comment]
line = line.strip()
if len(line) == 0:
continue
items = line.split()
assert len(items) == 2
inp = cat.parse(items[0])
out = cat.parse(items[1])
if res.has_key(inp):
res[inp].append(out)
else:
res[inp] = [out]
return res
| {
"content_hash": "8260827ce0971bc91c80bd763efc6b68",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 70,
"avg_line_length": 25.938053097345133,
"alnum_prop": 0.5110883657454793,
"repo_name": "masashi-y/myccg",
"id": "efae7fb952a2e822a282b5d00b5f6087f91b376c",
"size": "2932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/py/py_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5099"
},
{
"name": "C++",
"bytes": "114374"
},
{
"name": "Makefile",
"bytes": "979"
},
{
"name": "Python",
"bytes": "179658"
},
{
"name": "Shell",
"bytes": "718"
}
],
"symlink_target": ""
} |
"""
Defines clients for connecting to different services.
"""
import gmusicapi
import playcl.util
class GooglePlayClient(object):
"""
A client for connecting to Google Play for music.
"""
def __init__(self, email, password):
"""
Creates a client that is logged in using the given credentials.
"""
if not playcl.util.EMAIL_REGEX.match(email):
raise ValueError('Username is not a valid email')
self.play_api = gmusicapi.Mobileclient()
if not self.play_api.login(email, password):
raise RuntimeError('Unable to log in to Google Play. Invalid username or password?')
self.email = email
self.music_library = {}
self.playlists = {}
def __del__(self):
"""
Makes sure to exit the client properly just in case...
"""
self.logout()
def update_local_music_lib(self):
"""
Update the local copy of the user's music library.
"""
# Clear out the library.
self.music_library = {}
self.playlists = {}
songs = self.play_api.get_all_songs()
# Do some formatting on the data that was returned.
for song in songs:
if song['artist'] == '':
song['artist'] = 'Unknown Artist'
if song['album'] == '':
song['album'] = 'Unknown Album'
artist = song['artist']
if artist not in self.music_library:
self.music_library[artist] = {}
artist_albums = self.music_library[artist]
album = song['album']
if album not in artist_albums:
artist_albums[album] = []
album_tracks = artist_albums[album]
album_tracks.append(song)
playlists = self.play_api.get_all_user_playlist_contents()
for playlist in playlists:
self.playlists[playlist['name']] = playlist['tracks']
def logout(self):
"""
Logs the client out.
"""
self.play_api.logout()
| {
"content_hash": "46a364d1ba7d08658f23655a88c8c64a",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 87,
"avg_line_length": 22.126582278481013,
"alnum_prop": 0.6584668192219679,
"repo_name": "cadyyan/Play-Cl",
"id": "a4eb53db709ce3af343f355e3b0ddad664faf8ed",
"size": "1748",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "playcl/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4979"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
"""
Build a ply lexer, but without the implicit magic and global
state, just load prebuilt ply parser and lexers at roughly the
same cost of loading a Python module.
"""
import functools
from ply.lex import Lexer, LexerReflect
from ply.yacc import ParserReflect, LRTable, LRParser
def memoize(obj):
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
if args not in cache:
cache[args] = obj(*args, **kwargs)
return cache[args]
return memoizer
# lexer is passed as argument to ensure that memoization is
# unique for parser/lexer pair.
@memoize
def yaccfrom(module, tabmodule, lexer):
# Get the module dictionary used for the parser
_items = [(k,getattr(module,k)) for k in dir(module)]
pdict = dict(_items)
# Collect parser information from the dictionary
pinfo = ParserReflect(pdict)
pinfo.get_all()
# Read the tables
lr = LRTable()
lr.read_table(tabmodule)
lr.bind_callables(pinfo.pdict)
return LRParser(lr,pinfo.error_func)
@memoize
def lexfrom(module, lexmodule):
lexobj = Lexer()
lexobj.lexoptimize = 1
_items = [(k,getattr(module,k)) for k in dir(module)]
ldict = dict(_items)
# Collect parser information from the dictionary
linfo = LexerReflect(ldict)
linfo.get_all()
lexobj.readtab(lexmodule,ldict)
return lexobj
| {
"content_hash": "71cb0a3c4ae5f5b83d172fe56cc59f6c",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 64,
"avg_line_length": 26.77777777777778,
"alnum_prop": 0.6894882434301521,
"repo_name": "zeeshanali/blaze",
"id": "56e33ae5f8f210aa306f4a1189337197bfbe8194",
"size": "1446",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "blaze/plyhacks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import os
import shlex
import sys
from pathlib import Path
from subprocess import PIPE, STDOUT, run
ACTION_PATH = Path(os.environ["GITHUB_ACTION_PATH"])
ENV_PATH = ACTION_PATH / ".black-env"
ENV_BIN = ENV_PATH / ("Scripts" if sys.platform == "win32" else "bin")
OPTIONS = os.getenv("INPUT_OPTIONS", default="")
SRC = os.getenv("INPUT_SRC", default="")
JUPYTER = os.getenv("INPUT_JUPYTER") == "true"
BLACK_ARGS = os.getenv("INPUT_BLACK_ARGS", default="")
VERSION = os.getenv("INPUT_VERSION", default="")
run([sys.executable, "-m", "venv", str(ENV_PATH)], check=True)
version_specifier = VERSION
if VERSION and VERSION[0] in "0123456789":
version_specifier = f"=={VERSION}"
if JUPYTER:
extra_deps = "[colorama,jupyter]"
else:
extra_deps = "[colorama]"
req = f"black{extra_deps}{version_specifier}"
pip_proc = run(
[str(ENV_BIN / "python"), "-m", "pip", "install", req],
stdout=PIPE,
stderr=STDOUT,
encoding="utf-8",
)
if pip_proc.returncode:
print(pip_proc.stdout)
print("::error::Failed to install Black.", flush=True)
sys.exit(pip_proc.returncode)
base_cmd = [str(ENV_BIN / "black")]
if BLACK_ARGS:
# TODO: remove after a while since this is deprecated in favour of SRC + OPTIONS.
proc = run([*base_cmd, *shlex.split(BLACK_ARGS)])
else:
proc = run([*base_cmd, *shlex.split(OPTIONS), *shlex.split(SRC)])
sys.exit(proc.returncode)
| {
"content_hash": "85133f0c750e8d3908415bc74e690993",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 85,
"avg_line_length": 30.822222222222223,
"alnum_prop": 0.6705118961788031,
"repo_name": "psf/black",
"id": "ff9d4112aed7f19f14930284ef38ee6134a8bc05",
"size": "1387",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "action/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "929"
},
{
"name": "Jupyter Notebook",
"bytes": "2848"
},
{
"name": "Python",
"bytes": "4932376"
},
{
"name": "Vim Script",
"bytes": "9445"
}
],
"symlink_target": ""
} |
import json
import os
import time
import traceback
import math
from swift import gettext_ as _
from eventlet import Timeout
import six
from six.moves.urllib.parse import quote
import swift.common.db
from swift.container.sync_store import ContainerSyncStore
from swift.container.backend import ContainerBroker, DATADIR, \
RECORD_TYPE_SHARD, UNSHARDED, SHARDING, SHARDED, SHARD_UPDATE_STATES
from swift.container.replicator import ContainerReplicatorRpc
from swift.common.db import DatabaseAlreadyExists
from swift.common.container_sync_realms import ContainerSyncRealms
from swift.common.request_helpers import split_and_validate_path, \
is_sys_or_user_meta, validate_internal_container, validate_internal_obj, \
validate_container_params
from swift.common.utils import get_logger, hash_path, public, \
Timestamp, storage_directory, validate_sync_to, \
config_true_value, timing_stats, replication, \
override_bytes_from_content_type, get_log_line, \
config_fallocate_value, fs_has_free_space, list_from_csv, \
ShardRange
from swift.common.constraints import valid_timestamp, check_utf8, \
check_drive, AUTO_CREATE_ACCOUNT_PREFIX
from swift.common.bufferedhttp import http_connect
from swift.common.exceptions import ConnectionTimeout
from swift.common.http import HTTP_NO_CONTENT, HTTP_NOT_FOUND, is_success
from swift.common.middleware import listing_formats
from swift.common.storage_policy import POLICIES
from swift.common.base_storage_server import BaseStorageServer
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPConflict, \
HTTPCreated, HTTPInternalServerError, HTTPNoContent, HTTPNotFound, \
HTTPPreconditionFailed, HTTPMethodNotAllowed, Request, Response, \
HTTPInsufficientStorage, HTTPException, HTTPMovedPermanently, \
wsgi_to_str, str_to_wsgi
def gen_resp_headers(info, is_deleted=False):
"""
Convert container info dict to headers.
"""
# backend headers are always included
headers = {
'X-Backend-Timestamp': Timestamp(info.get('created_at', 0)).internal,
'X-Backend-PUT-Timestamp': Timestamp(info.get(
'put_timestamp', 0)).internal,
'X-Backend-DELETE-Timestamp': Timestamp(
info.get('delete_timestamp', 0)).internal,
'X-Backend-Status-Changed-At': Timestamp(
info.get('status_changed_at', 0)).internal,
'X-Backend-Storage-Policy-Index': info.get('storage_policy_index', 0),
}
if not is_deleted:
# base container info on deleted containers is not exposed to client
headers.update({
'X-Container-Object-Count': info.get('object_count', 0),
'X-Container-Bytes-Used': info.get('bytes_used', 0),
'X-Timestamp': Timestamp(info.get('created_at', 0)).normal,
'X-PUT-Timestamp': Timestamp(
info.get('put_timestamp', 0)).normal,
'X-Backend-Sharding-State': info.get('db_state', UNSHARDED),
})
return headers
def get_container_name_and_placement(req):
"""
Split and validate path for a container.
:param req: a swob request
:returns: a tuple of path parts as strings
"""
drive, part, account, container = split_and_validate_path(req, 4)
validate_internal_container(account, container)
return drive, part, account, container
def get_obj_name_and_placement(req):
"""
Split and validate path for an object.
:param req: a swob request
:returns: a tuple of path parts as strings
"""
drive, part, account, container, obj = split_and_validate_path(
req, 4, 5, True)
validate_internal_obj(account, container, obj)
return drive, part, account, container, obj
class ContainerController(BaseStorageServer):
"""WSGI Controller for the container server."""
# Ensure these are all lowercase
save_headers = ['x-container-read', 'x-container-write',
'x-container-sync-key', 'x-container-sync-to']
server_type = 'container-server'
def __init__(self, conf, logger=None):
super(ContainerController, self).__init__(conf)
self.logger = logger or get_logger(conf, log_route='container-server')
self.log_requests = config_true_value(conf.get('log_requests', 'true'))
self.root = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.node_timeout = float(conf.get('node_timeout', 3))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
#: ContainerSyncCluster instance for validating sync-to values.
self.realms_conf = ContainerSyncRealms(
os.path.join(
conf.get('swift_dir', '/etc/swift'),
'container-sync-realms.conf'),
self.logger)
#: The list of hosts we're allowed to send syncs to. This can be
#: overridden by data in self.realms_conf
self.allowed_sync_hosts = [
h.strip()
for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
if h.strip()]
self.replicator_rpc = ContainerReplicatorRpc(
self.root, DATADIR, ContainerBroker, self.mount_check,
logger=self.logger)
if conf.get('auto_create_account_prefix'):
self.logger.warning('Option auto_create_account_prefix is '
'deprecated. Configure '
'auto_create_account_prefix under the '
'swift-constraints section of '
'swift.conf. This option will '
'be ignored in a future release.')
self.auto_create_account_prefix = \
conf['auto_create_account_prefix']
else:
self.auto_create_account_prefix = AUTO_CREATE_ACCOUNT_PREFIX
self.shards_account_prefix = (
self.auto_create_account_prefix + 'shards_')
if config_true_value(conf.get('allow_versions', 'f')):
self.save_headers.append('x-versions-location')
if 'allow_versions' in conf:
self.logger.warning('Option allow_versions is deprecated. '
'Configure the versioned_writes middleware in '
'the proxy-server instead. This option will '
'be ignored in a future release.')
swift.common.db.DB_PREALLOCATION = \
config_true_value(conf.get('db_preallocation', 'f'))
swift.common.db.QUERY_LOGGING = \
config_true_value(conf.get('db_query_logging', 'f'))
self.sync_store = ContainerSyncStore(self.root,
self.logger,
self.mount_check)
self.fallocate_reserve, self.fallocate_is_percent = \
config_fallocate_value(conf.get('fallocate_reserve', '1%'))
def _get_container_broker(self, drive, part, account, container, **kwargs):
"""
Get a DB broker for the container.
:param drive: drive that holds the container
:param part: partition the container is in
:param account: account name
:param container: container name
:returns: ContainerBroker object
"""
hsh = hash_path(account, container)
db_dir = storage_directory(DATADIR, part, hsh)
db_path = os.path.join(self.root, drive, db_dir, hsh + '.db')
kwargs.setdefault('account', account)
kwargs.setdefault('container', container)
kwargs.setdefault('logger', self.logger)
return ContainerBroker(db_path, **kwargs)
def get_and_validate_policy_index(self, req):
"""
Validate that the index supplied maps to a policy.
:returns: policy index from request, or None if not present
:raises HTTPBadRequest: if the supplied index is bogus
"""
header = 'X-Backend-Storage-Policy-Index'
policy_index = req.headers.get(header, None)
if policy_index is None:
return None
try:
policy_index = int(policy_index)
policy = POLICIES.get_by_index(policy_index)
if policy is None:
raise ValueError
except ValueError:
raise HTTPBadRequest(
request=req, content_type="text/plain",
body="Invalid %s %r" % (header, policy_index))
else:
return int(policy)
def account_update(self, req, account, container, broker):
"""
Update the account server(s) with latest container info.
:param req: swob.Request object
:param account: account name
:param container: container name
:param broker: container DB broker object
:returns: if all the account requests return a 404 error code,
HTTPNotFound response object,
if the account cannot be updated due to a malformed header,
an HTTPBadRequest response object,
otherwise None.
"""
account_hosts = [h.strip() for h in
req.headers.get('X-Account-Host', '').split(',')]
account_devices = [d.strip() for d in
req.headers.get('X-Account-Device', '').split(',')]
account_partition = req.headers.get('X-Account-Partition', '')
if len(account_hosts) != len(account_devices):
# This shouldn't happen unless there's a bug in the proxy,
# but if there is, we want to know about it.
self.logger.error(_(
'ERROR Account update failed: different '
'numbers of hosts and devices in request: '
'"%(hosts)s" vs "%(devices)s"') % {
'hosts': req.headers.get('X-Account-Host', ''),
'devices': req.headers.get('X-Account-Device', '')})
return HTTPBadRequest(req=req)
if account_partition:
# zip is lazy on py3, but we need a list, so force evaluation.
# On py2 it's an extra list copy, but the list is so small
# (one element per replica in account ring, usually 3) that it
# doesn't matter.
updates = list(zip(account_hosts, account_devices))
else:
updates = []
account_404s = 0
for account_host, account_device in updates:
account_ip, account_port = account_host.rsplit(':', 1)
new_path = '/' + '/'.join([account, container])
info = broker.get_info()
account_headers = HeaderKeyDict({
'x-put-timestamp': info['put_timestamp'],
'x-delete-timestamp': info['delete_timestamp'],
'x-object-count': info['object_count'],
'x-bytes-used': info['bytes_used'],
'x-trans-id': req.headers.get('x-trans-id', '-'),
'X-Backend-Storage-Policy-Index': info['storage_policy_index'],
'user-agent': 'container-server %s' % os.getpid(),
'referer': req.as_referer()})
if req.headers.get('x-account-override-deleted', 'no').lower() == \
'yes':
account_headers['x-account-override-deleted'] = 'yes'
try:
with ConnectionTimeout(self.conn_timeout):
conn = http_connect(
account_ip, account_port, account_device,
account_partition, 'PUT', new_path, account_headers)
with Timeout(self.node_timeout):
account_response = conn.getresponse()
account_response.read()
if account_response.status == HTTP_NOT_FOUND:
account_404s += 1
elif not is_success(account_response.status):
self.logger.error(_(
'ERROR Account update failed '
'with %(ip)s:%(port)s/%(device)s (will retry '
'later): Response %(status)s %(reason)s'),
{'ip': account_ip, 'port': account_port,
'device': account_device,
'status': account_response.status,
'reason': account_response.reason})
except (Exception, Timeout):
self.logger.exception(_(
'ERROR account update failed with '
'%(ip)s:%(port)s/%(device)s (will retry later)'),
{'ip': account_ip, 'port': account_port,
'device': account_device})
if updates and account_404s == len(updates):
return HTTPNotFound(req=req)
else:
return None
def _update_sync_store(self, broker, method):
try:
self.sync_store.update_sync_store(broker)
except Exception:
self.logger.exception('Failed to update sync_store %s during %s' %
(broker.db_file, method))
def _redirect_to_shard(self, req, broker, obj_name):
"""
If the request indicates that it can accept a redirection, look for a
shard range that contains ``obj_name`` and if one exists return a
HTTPMovedPermanently response.
:param req: an instance of :class:`~swift.common.swob.Request`
:param broker: a container broker
:param obj_name: an object name
:return: an instance of :class:`swift.common.swob.HTTPMovedPermanently`
if a shard range exists for the given ``obj_name``, otherwise None.
"""
if not config_true_value(
req.headers.get('x-backend-accept-redirect', False)):
# We want to avoid fetching shard ranges for the (more
# time-sensitive) object-server update, so allow some misplaced
# objects to land between when we've started sharding and when the
# proxy learns about it. Note that this path is also used by old,
# pre-sharding updaters during a rolling upgrade.
return None
shard_ranges = broker.get_shard_ranges(
includes=obj_name, states=SHARD_UPDATE_STATES)
if not shard_ranges:
return None
# note: obj_name may be included in both a created sub-shard and its
# sharding parent. get_shard_ranges will return the created sub-shard
# in preference to the parent, which is the desired result.
containing_range = shard_ranges[0]
location = "/%s/%s" % (containing_range.name, obj_name)
if location != quote(location) and not config_true_value(
req.headers.get('x-backend-accept-quoted-location', False)):
# Sender expects the destination to be unquoted, but it isn't safe
# to send unquoted. Eat the update for now and let the sharder
# move it later. Should only come up during rolling upgrades.
return None
headers = {'Location': quote(location),
'X-Backend-Location-Is-Quoted': 'true',
'X-Backend-Redirect-Timestamp':
containing_range.timestamp.internal}
# we do not want the host added to the location
req.environ['swift.leave_relative_location'] = True
return HTTPMovedPermanently(headers=headers, request=req)
def check_free_space(self, drive):
drive_root = os.path.join(self.root, drive)
return fs_has_free_space(
drive_root, self.fallocate_reserve, self.fallocate_is_percent)
@public
@timing_stats()
def DELETE(self, req):
"""Handle HTTP DELETE request."""
drive, part, account, container, obj = get_obj_name_and_placement(req)
req_timestamp = valid_timestamp(req)
try:
check_drive(self.root, drive, self.mount_check)
except ValueError:
return HTTPInsufficientStorage(drive=drive, request=req)
# policy index is only relevant for delete_obj (and transitively for
# auto create accounts)
obj_policy_index = self.get_and_validate_policy_index(req) or 0
broker = self._get_container_broker(drive, part, account, container)
if obj:
self._maybe_autocreate(broker, req_timestamp, account,
obj_policy_index, req)
elif not os.path.exists(broker.db_file):
return HTTPNotFound()
if obj: # delete object
# redirect if a shard range exists for the object name
redirect = self._redirect_to_shard(req, broker, obj)
if redirect:
return redirect
broker.delete_object(obj, req.headers.get('x-timestamp'),
obj_policy_index)
return HTTPNoContent(request=req)
else:
# delete container
if not broker.empty():
return HTTPConflict(request=req)
existed = Timestamp(broker.get_info()['put_timestamp']) and \
not broker.is_deleted()
broker.delete_db(req_timestamp.internal)
if not broker.is_deleted():
return HTTPConflict(request=req)
self._update_sync_store(broker, 'DELETE')
resp = self.account_update(req, account, container, broker)
if resp:
return resp
if existed:
return HTTPNoContent(request=req)
return HTTPNotFound()
def _update_or_create(self, req, broker, timestamp, new_container_policy,
requested_policy_index):
"""
Create new database broker or update timestamps for existing database.
:param req: the swob request object
:param broker: the broker instance for the container
:param timestamp: internalized timestamp
:param new_container_policy: the storage policy index to use
when creating the container
:param requested_policy_index: the storage policy index sent in the
request, may be None
:returns: created, a bool, if database did not previously exist
"""
if not os.path.exists(broker.db_file):
try:
broker.initialize(timestamp, new_container_policy)
except DatabaseAlreadyExists:
pass
else:
return True # created
recreated = broker.is_deleted()
if recreated:
# only set storage policy on deleted containers
broker.set_storage_policy_index(new_container_policy,
timestamp=timestamp)
elif requested_policy_index is not None:
# validate requested policy with existing container
if requested_policy_index != broker.storage_policy_index:
raise HTTPConflict(request=req,
headers={'x-backend-storage-policy-index':
broker.storage_policy_index})
broker.update_put_timestamp(timestamp)
if broker.is_deleted():
raise HTTPConflict(request=req)
if recreated:
broker.update_status_changed_at(timestamp)
return recreated
def _should_autocreate(self, account, req):
auto_create_header = req.headers.get('X-Backend-Auto-Create')
if auto_create_header:
# If the caller included an explicit X-Backend-Auto-Create header,
# assume they know the behavior they want
return config_true_value(auto_create_header)
if account.startswith(self.shards_account_prefix):
# we have to specical case this subset of the
# auto_create_account_prefix because we don't want the updater
# accidently auto-creating shards; only the sharder creates
# shards and it will explicitly tell the server to do so
return False
return account.startswith(self.auto_create_account_prefix)
def _maybe_autocreate(self, broker, req_timestamp, account,
policy_index, req):
created = False
should_autocreate = self._should_autocreate(account, req)
if should_autocreate and not os.path.exists(broker.db_file):
if policy_index is None:
raise HTTPBadRequest(
'X-Backend-Storage-Policy-Index header is required')
try:
broker.initialize(req_timestamp.internal, policy_index)
except DatabaseAlreadyExists:
pass
else:
created = True
if not os.path.exists(broker.db_file):
raise HTTPNotFound()
return created
def _update_metadata(self, req, broker, req_timestamp, method):
metadata = {
wsgi_to_str(key): (wsgi_to_str(value), req_timestamp.internal)
for key, value in req.headers.items()
if key.lower() in self.save_headers
or is_sys_or_user_meta('container', key)}
if metadata:
if 'X-Container-Sync-To' in metadata:
if 'X-Container-Sync-To' not in broker.metadata or \
metadata['X-Container-Sync-To'][0] != \
broker.metadata['X-Container-Sync-To'][0]:
broker.set_x_container_sync_points(-1, -1)
broker.update_metadata(metadata, validate_metadata=True)
self._update_sync_store(broker, method)
@public
@timing_stats()
def PUT(self, req):
"""Handle HTTP PUT request."""
drive, part, account, container, obj = get_obj_name_and_placement(req)
req_timestamp = valid_timestamp(req)
if 'x-container-sync-to' in req.headers:
err, sync_to, realm, realm_key = validate_sync_to(
req.headers['x-container-sync-to'], self.allowed_sync_hosts,
self.realms_conf)
if err:
return HTTPBadRequest(err)
try:
check_drive(self.root, drive, self.mount_check)
except ValueError:
return HTTPInsufficientStorage(drive=drive, request=req)
if not self.check_free_space(drive):
return HTTPInsufficientStorage(drive=drive, request=req)
requested_policy_index = self.get_and_validate_policy_index(req)
broker = self._get_container_broker(drive, part, account, container)
if obj: # put container object
# obj put expects the policy_index header, default is for
# legacy support during upgrade.
obj_policy_index = requested_policy_index or 0
self._maybe_autocreate(
broker, req_timestamp, account, obj_policy_index, req)
# redirect if a shard exists for this object name
response = self._redirect_to_shard(req, broker, obj)
if response:
return response
broker.put_object(obj, req_timestamp.internal,
int(req.headers['x-size']),
wsgi_to_str(req.headers['x-content-type']),
wsgi_to_str(req.headers['x-etag']), 0,
obj_policy_index,
wsgi_to_str(req.headers.get(
'x-content-type-timestamp')),
wsgi_to_str(req.headers.get('x-meta-timestamp')))
return HTTPCreated(request=req)
record_type = req.headers.get('x-backend-record-type', '').lower()
if record_type == RECORD_TYPE_SHARD:
try:
# validate incoming data...
shard_ranges = [ShardRange.from_dict(sr)
for sr in json.loads(req.body)]
except (ValueError, KeyError, TypeError) as err:
return HTTPBadRequest('Invalid body: %r' % err)
created = self._maybe_autocreate(
broker, req_timestamp, account, requested_policy_index, req)
self._update_metadata(req, broker, req_timestamp, 'PUT')
if shard_ranges:
# TODO: consider writing the shard ranges into the pending
# file, but if so ensure an all-or-none semantic for the write
broker.merge_shard_ranges(shard_ranges)
else: # put container
if requested_policy_index is None:
# use the default index sent by the proxy if available
new_container_policy = req.headers.get(
'X-Backend-Storage-Policy-Default', int(POLICIES.default))
else:
new_container_policy = requested_policy_index
created = self._update_or_create(req, broker,
req_timestamp.internal,
new_container_policy,
requested_policy_index)
self._update_metadata(req, broker, req_timestamp, 'PUT')
resp = self.account_update(req, account, container, broker)
if resp:
return resp
if created:
return HTTPCreated(request=req,
headers={'x-backend-storage-policy-index':
broker.storage_policy_index})
else:
return HTTPAccepted(request=req,
headers={'x-backend-storage-policy-index':
broker.storage_policy_index})
@public
@timing_stats(sample_rate=0.1)
def HEAD(self, req):
"""Handle HTTP HEAD request."""
drive, part, account, container, obj = get_obj_name_and_placement(req)
out_content_type = listing_formats.get_listing_content_type(req)
try:
check_drive(self.root, drive, self.mount_check)
except ValueError:
return HTTPInsufficientStorage(drive=drive, request=req)
broker = self._get_container_broker(drive, part, account, container,
pending_timeout=0.1,
stale_reads_ok=True)
info, is_deleted = broker.get_info_is_deleted()
headers = gen_resp_headers(info, is_deleted=is_deleted)
if is_deleted:
return HTTPNotFound(request=req, headers=headers)
headers.update(
(str_to_wsgi(key), str_to_wsgi(value))
for key, (value, timestamp) in broker.metadata.items()
if value != '' and (key.lower() in self.save_headers or
is_sys_or_user_meta('container', key)))
headers['Content-Type'] = out_content_type
resp = HTTPNoContent(request=req, headers=headers, charset='utf-8')
resp.last_modified = math.ceil(float(headers['X-PUT-Timestamp']))
return resp
def update_data_record(self, record):
"""
Perform any mutations to container listing records that are common to
all serialization formats, and returns it as a dict.
Converts created time to iso timestamp.
Replaces size with 'swift_bytes' content type parameter.
:params record: object entry record
:returns: modified record
"""
if isinstance(record, ShardRange):
created = record.timestamp
response = dict(record)
else:
(name, created, size, content_type, etag) = record[:5]
name_ = name.decode('utf8') if six.PY2 else name
if content_type is None:
return {'subdir': name_}
response = {
'bytes': size, 'hash': etag, 'name': name_,
'content_type': content_type}
override_bytes_from_content_type(response, logger=self.logger)
response['last_modified'] = Timestamp(created).isoformat
return response
@public
@timing_stats()
def GET(self, req):
"""
Handle HTTP GET request.
The body of the response to a successful GET request contains a listing
of either objects or shard ranges. The exact content of the listing is
determined by a combination of request headers and query string
parameters, as follows:
* The type of the listing is determined by the
``X-Backend-Record-Type`` header. If this header has value ``shard``
then the response body will be a list of shard ranges; if this header
has value ``auto``, and the container state is ``sharding`` or
``sharded``, then the listing will be a list of shard ranges;
otherwise the response body will be a list of objects.
* Both shard range and object listings may be filtered according to
the constraints described below. However, the
``X-Backend-Ignore-Shard-Name-Filter`` header may be used to override
the application of the ``marker``, ``end_marker``, ``includes`` and
``reverse`` parameters to shard range listings. These parameters will
be ignored if the header has the value 'sharded' and the current db
sharding state is also 'sharded'. Note that this header does not
override the ``states`` constraint on shard range listings.
* The order of both shard range and object listings may be reversed by
using a ``reverse`` query string parameter with a
value in :attr:`swift.common.utils.TRUE_VALUES`.
* Both shard range and object listings may be constrained to a name
range by the ``marker`` and ``end_marker`` query string parameters.
Object listings will only contain objects whose names are greater
than any ``marker`` value and less than any ``end_marker`` value.
Shard range listings will only contain shard ranges whose namespace
is greater than or includes any ``marker`` value and is less than or
includes any ``end_marker`` value.
* Shard range listings may also be constrained by an ``includes`` query
string parameter. If this parameter is present the listing will only
contain shard ranges whose namespace includes the value of the
parameter; any ``marker`` or ``end_marker`` parameters are ignored
* The length of an object listing may be constrained by the ``limit``
parameter. Object listings may also be constrained by ``prefix``,
``delimiter`` and ``path`` query string parameters.
* Shard range listings will include deleted shard ranges if and only if
the ``X-Backend-Include-Deleted`` header value is one of
:attr:`swift.common.utils.TRUE_VALUES`. Object listings never
include deleted objects.
* Shard range listings may be constrained to include only shard ranges
whose state is specified by a query string ``states`` parameter. If
present, the ``states`` parameter should be a comma separated list of
either the string or integer representation of
:data:`~swift.common.utils.ShardRange.STATES`.
Two alias values may be used in a ``states`` parameter value:
``listing`` will cause the listing to include all shard ranges in a
state suitable for contributing to an object listing; ``updating``
will cause the listing to include all shard ranges in a state
suitable to accept an object update.
If either of these aliases is used then the shard range listing will
if necessary be extended with a synthesised 'filler' range in order
to satisfy the requested name range when insufficient actual shard
ranges are found. Any 'filler' shard range will cover the otherwise
uncovered tail of the requested name range and will point back to the
same container.
* Listings are not normally returned from a deleted container. However,
the ``X-Backend-Override-Deleted`` header may be used with a value in
:attr:`swift.common.utils.TRUE_VALUES` to force a shard range
listing to be returned from a deleted container whose DB file still
exists.
:param req: an instance of :class:`swift.common.swob.Request`
:returns: an instance of :class:`swift.common.swob.Response`
"""
drive, part, account, container, obj = get_obj_name_and_placement(req)
params = validate_container_params(req)
path = params.get('path')
prefix = params.get('prefix')
delimiter = params.get('delimiter')
marker = params.get('marker', '')
end_marker = params.get('end_marker')
limit = params['limit']
reverse = config_true_value(params.get('reverse'))
out_content_type = listing_formats.get_listing_content_type(req)
try:
check_drive(self.root, drive, self.mount_check)
except ValueError:
return HTTPInsufficientStorage(drive=drive, request=req)
broker = self._get_container_broker(drive, part, account, container,
pending_timeout=0.1,
stale_reads_ok=True)
info, is_deleted = broker.get_info_is_deleted()
record_type = req.headers.get('x-backend-record-type', '').lower()
db_state = info.get('db_state')
if record_type == 'auto' and db_state in (SHARDING, SHARDED):
record_type = 'shard'
if record_type == 'shard':
override_deleted = info and config_true_value(
req.headers.get('x-backend-override-deleted', False))
resp_headers = gen_resp_headers(
info, is_deleted=is_deleted and not override_deleted)
if is_deleted and not override_deleted:
return HTTPNotFound(request=req, headers=resp_headers)
resp_headers['X-Backend-Record-Type'] = 'shard'
includes = params.get('includes')
override_filter_hdr = req.headers.get(
'x-backend-override-shard-name-filter', '').lower()
if override_filter_hdr == db_state == 'sharded':
# respect the request to send back *all* ranges if the db is in
# sharded state
resp_headers['X-Backend-Override-Shard-Name-Filter'] = 'true'
marker = end_marker = includes = None
reverse = False
states = params.get('states')
fill_gaps = False
if states:
states = list_from_csv(states)
fill_gaps = any(('listing' in states, 'updating' in states))
try:
states = broker.resolve_shard_range_states(states)
except ValueError:
return HTTPBadRequest(request=req, body='Bad state')
include_deleted = config_true_value(
req.headers.get('x-backend-include-deleted', False))
container_list = broker.get_shard_ranges(
marker, end_marker, includes, reverse, states=states,
include_deleted=include_deleted, fill_gaps=fill_gaps)
else:
resp_headers = gen_resp_headers(info, is_deleted=is_deleted)
if is_deleted:
return HTTPNotFound(request=req, headers=resp_headers)
resp_headers['X-Backend-Record-Type'] = 'object'
# Use the retired db while container is in process of sharding,
# otherwise use current db
src_broker = broker.get_brokers()[0]
container_list = src_broker.list_objects_iter(
limit, marker, end_marker, prefix, delimiter, path,
storage_policy_index=info['storage_policy_index'],
reverse=reverse, allow_reserved=req.allow_reserved_names)
return self.create_listing(req, out_content_type, info, resp_headers,
broker.metadata, container_list, container)
def create_listing(self, req, out_content_type, info, resp_headers,
metadata, container_list, container):
for key, (value, _timestamp) in metadata.items():
if value and (key.lower() in self.save_headers or
is_sys_or_user_meta('container', key)):
resp_headers[str_to_wsgi(key)] = str_to_wsgi(value)
listing = [self.update_data_record(record)
for record in container_list]
if out_content_type.endswith('/xml'):
body = listing_formats.container_to_xml(listing, container)
elif out_content_type.endswith('/json'):
body = json.dumps(listing).encode('ascii')
else:
body = listing_formats.listing_to_text(listing)
ret = Response(request=req, headers=resp_headers, body=body,
content_type=out_content_type, charset='utf-8')
ret.last_modified = math.ceil(float(resp_headers['X-PUT-Timestamp']))
if not ret.body:
ret.status_int = HTTP_NO_CONTENT
return ret
@public
@replication
@timing_stats(sample_rate=0.01)
def REPLICATE(self, req):
"""
Handle HTTP REPLICATE request (json-encoded RPC calls for replication.)
"""
post_args = split_and_validate_path(req, 3)
drive, partition, hash = post_args
try:
check_drive(self.root, drive, self.mount_check)
except ValueError:
return HTTPInsufficientStorage(drive=drive, request=req)
if not self.check_free_space(drive):
return HTTPInsufficientStorage(drive=drive, request=req)
try:
args = json.load(req.environ['wsgi.input'])
except ValueError as err:
return HTTPBadRequest(body=str(err), content_type='text/plain')
ret = self.replicator_rpc.dispatch(post_args, args)
ret.request = req
return ret
@public
@timing_stats()
def UPDATE(self, req):
"""
Handle HTTP UPDATE request (merge_items RPCs coming from the proxy.)
"""
drive, part, account, container = get_container_name_and_placement(req)
req_timestamp = valid_timestamp(req)
try:
check_drive(self.root, drive, self.mount_check)
except ValueError:
return HTTPInsufficientStorage(drive=drive, request=req)
if not self.check_free_space(drive):
return HTTPInsufficientStorage(drive=drive, request=req)
requested_policy_index = self.get_and_validate_policy_index(req)
broker = self._get_container_broker(drive, part, account, container)
self._maybe_autocreate(broker, req_timestamp, account,
requested_policy_index, req)
try:
objs = json.load(req.environ['wsgi.input'])
except ValueError as err:
return HTTPBadRequest(body=str(err), content_type='text/plain')
broker.merge_items(objs)
return HTTPAccepted(request=req)
@public
@timing_stats()
def POST(self, req):
"""Handle HTTP POST request."""
drive, part, account, container = get_container_name_and_placement(req)
req_timestamp = valid_timestamp(req)
if 'x-container-sync-to' in req.headers:
err, sync_to, realm, realm_key = validate_sync_to(
req.headers['x-container-sync-to'], self.allowed_sync_hosts,
self.realms_conf)
if err:
return HTTPBadRequest(err)
try:
check_drive(self.root, drive, self.mount_check)
except ValueError:
return HTTPInsufficientStorage(drive=drive, request=req)
if not self.check_free_space(drive):
return HTTPInsufficientStorage(drive=drive, request=req)
broker = self._get_container_broker(drive, part, account, container)
if broker.is_deleted():
return HTTPNotFound(request=req)
broker.update_put_timestamp(req_timestamp.internal)
self._update_metadata(req, broker, req_timestamp, 'POST')
return HTTPNoContent(request=req)
def __call__(self, env, start_response):
start_time = time.time()
req = Request(env)
self.logger.txn_id = req.headers.get('x-trans-id', None)
if not check_utf8(wsgi_to_str(req.path_info), internal=True):
res = HTTPPreconditionFailed(body='Invalid UTF8 or contains NULL')
else:
try:
# disallow methods which have not been marked 'public'
if req.method not in self.allowed_methods:
res = HTTPMethodNotAllowed()
else:
res = getattr(self, req.method)(req)
except HTTPException as error_response:
res = error_response
except (Exception, Timeout):
self.logger.exception(_(
'ERROR __call__ error with %(method)s %(path)s '),
{'method': req.method, 'path': req.path})
res = HTTPInternalServerError(body=traceback.format_exc())
if self.log_requests:
trans_time = time.time() - start_time
log_message = get_log_line(req, res, trans_time, '',
self.log_format,
self.anonymization_method,
self.anonymization_salt)
if req.method.upper() == 'REPLICATE':
self.logger.debug(log_message)
else:
self.logger.info(log_message)
return res(env, start_response)
def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI container server apps"""
conf = global_conf.copy()
conf.update(local_conf)
return ContainerController(conf)
| {
"content_hash": "d4b154c2467b9aaf07cbbd470a4edda1",
"timestamp": "",
"source": "github",
"line_count": 898,
"max_line_length": 79,
"avg_line_length": 47.17817371937639,
"alnum_prop": 0.5893641127319077,
"repo_name": "swiftstack/swift",
"id": "0292320fbec899e0599751aed0b993ca063f17a3",
"size": "42961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "swift/container/server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3088"
},
{
"name": "HTML",
"bytes": "625"
},
{
"name": "Python",
"bytes": "12427848"
},
{
"name": "Shell",
"bytes": "8704"
}
],
"symlink_target": ""
} |
import zlib
import base64
def decode_base64_and_inflate( b64string ):
decoded_data = base64.b64decode( b64string )
return zlib.decompress( decoded_data , -15)
def deflate_and_base64_encode( string_val ):
zlibbed_str = zlib.compress( string_val )
compressed_string = zlibbed_str[2:-4]
return base64.b64encode( compressed_string )
# uncompressed_string.encode('zlib')[2:-4]
# or
# zlib.compress(uncompressed_string)[2:-4]
# Throws away 2-byte zlib header and the 4-byte checksum.
# Version using encode vanishes in Python 3.x.
# import zlib
def deflate(data, compresslevel=9):
compress = zlib.compressobj(
compresslevel, # level: 0-9
zlib.DEFLATED, # method: must be DEFLATED
-zlib.MAX_WBITS, # window size in bits:
# -15..-8: negate, suppress header
# 8..15: normal
# 16..30: subtract 16, gzip header
zlib.DEF_MEM_LEVEL, # mem level: 1..8/9
0 # strategy:
# 0 = Z_DEFAULT_STRATEGY
# 1 = Z_FILTERED
# 2 = Z_HUFFMAN_ONLY
# 3 = Z_RLE
# 4 = Z_FIXED
)
deflated = compress.compress(data)
deflated += compress.flush()
return deflated
def inflate(data):
decompress = zlib.decompressobj(
-zlib.MAX_WBITS # see above
)
inflated = decompress.decompress(data)
inflated += decompress.flush()
return inflated | {
"content_hash": "675e6dd272739e6d3d7c88ecf5cd0b83",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 70,
"avg_line_length": 35.702127659574465,
"alnum_prop": 0.5214541120381406,
"repo_name": "yv84/pyph",
"id": "031e562d4c0f36c5b9bdf8af4ac422bfa5617a60",
"size": "2309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/aa/compress.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2398"
},
{
"name": "CoffeeScript",
"bytes": "4144"
},
{
"name": "JavaScript",
"bytes": "14256"
},
{
"name": "Python",
"bytes": "419803"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid.layers import detection
from paddle.fluid.framework import Program, program_guard
import unittest
class TestDetection(unittest.TestCase):
def test_detection_output(self):
program = Program()
with program_guard(program):
pb = layers.data(
name='prior_box',
shape=[10, 4],
append_batch_size=False,
dtype='float32')
pbv = layers.data(
name='prior_box_var',
shape=[10, 4],
append_batch_size=False,
dtype='float32')
loc = layers.data(
name='target_box',
shape=[2, 10, 4],
append_batch_size=False,
dtype='float32')
scores = layers.data(
name='scores',
shape=[2, 10, 20],
append_batch_size=False,
dtype='float32')
out = layers.detection_output(
scores=scores, loc=loc, prior_box=pb, prior_box_var=pbv)
out2, index = layers.detection_output(
scores=scores,
loc=loc,
prior_box=pb,
prior_box_var=pbv,
return_index=True)
self.assertIsNotNone(out)
self.assertIsNotNone(out2)
self.assertIsNotNone(index)
self.assertEqual(out.shape[-1], 6)
print(str(program))
def test_box_coder_api(self):
program = Program()
with program_guard(program):
x = layers.data(name='x', shape=[4], dtype='float32')
y = layers.data(name='z', shape=[4], dtype='float32', lod_level=1)
bcoder = layers.box_coder(
prior_box=x,
prior_box_var=[0.1, 0.2, 0.1, 0.2],
target_box=y,
code_type='encode_center_size')
self.assertIsNotNone(bcoder)
print(str(program))
def test_detection_api(self):
program = Program()
with program_guard(program):
x = layers.data(name='x', shape=[4], dtype='float32')
y = layers.data(name='y', shape=[4], dtype='float32')
z = layers.data(name='z', shape=[4], dtype='float32', lod_level=1)
iou = layers.iou_similarity(x=x, y=y)
bcoder = layers.box_coder(
prior_box=x,
prior_box_var=y,
target_box=z,
code_type='encode_center_size')
self.assertIsNotNone(iou)
self.assertIsNotNone(bcoder)
matched_indices, matched_dist = layers.bipartite_match(iou)
self.assertIsNotNone(matched_indices)
self.assertIsNotNone(matched_dist)
gt = layers.data(
name='gt', shape=[1, 1], dtype='int32', lod_level=1)
trg, trg_weight = layers.target_assign(
gt, matched_indices, mismatch_value=0)
self.assertIsNotNone(trg)
self.assertIsNotNone(trg_weight)
gt2 = layers.data(
name='gt2', shape=[10, 4], dtype='float32', lod_level=1)
trg, trg_weight = layers.target_assign(
gt2, matched_indices, mismatch_value=0)
self.assertIsNotNone(trg)
self.assertIsNotNone(trg_weight)
print(str(program))
def test_ssd_loss(self):
program = Program()
with program_guard(program):
pb = layers.data(
name='prior_box',
shape=[10, 4],
append_batch_size=False,
dtype='float32')
pbv = layers.data(
name='prior_box_var',
shape=[10, 4],
append_batch_size=False,
dtype='float32')
loc = layers.data(name='target_box', shape=[10, 4], dtype='float32')
scores = layers.data(name='scores', shape=[10, 21], dtype='float32')
gt_box = layers.data(
name='gt_box', shape=[4], lod_level=1, dtype='float32')
gt_label = layers.data(
name='gt_label', shape=[1], lod_level=1, dtype='int32')
loss = layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv)
self.assertIsNotNone(loss)
self.assertEqual(loss.shape[-1], 1)
print(str(program))
class TestPriorBox(unittest.TestCase):
def test_prior_box(self):
program = Program()
with program_guard(program):
data_shape = [3, 224, 224]
images = fluid.layers.data(
name='pixel', shape=data_shape, dtype='float32')
conv1 = fluid.layers.conv2d(images, 3, 3, 2)
box, var = layers.prior_box(
input=conv1,
image=images,
min_sizes=[100.0],
aspect_ratios=[1.],
flip=True,
clip=True)
assert len(box.shape) == 4
assert box.shape == var.shape
assert box.shape[3] == 4
class TestDensityPriorBox(unittest.TestCase):
def test_density_prior_box(self):
program = Program()
with program_guard(program):
data_shape = [3, 224, 224]
images = fluid.layers.data(
name='pixel', shape=data_shape, dtype='float32')
conv1 = fluid.layers.conv2d(images, 3, 3, 2)
box, var = layers.density_prior_box(
input=conv1,
image=images,
densities=[3, 4],
fixed_sizes=[50., 60.],
fixed_ratios=[1.0],
clip=True)
assert len(box.shape) == 4
assert box.shape == var.shape
assert box.shape[-1] == 4
class TestAnchorGenerator(unittest.TestCase):
def test_anchor_generator(self):
data_shape = [3, 224, 224]
images = fluid.layers.data(
name='pixel', shape=data_shape, dtype='float32')
conv1 = fluid.layers.conv2d(images, 3, 3, 2)
anchor, var = fluid.layers.anchor_generator(
input=conv1,
anchor_sizes=[64, 128, 256, 512],
aspect_ratios=[0.5, 1.0, 2.0],
variance=[0.1, 0.1, 0.2, 0.2],
stride=[16.0, 16.0],
offset=0.5)
assert len(anchor.shape) == 4
assert anchor.shape == var.shape
assert anchor.shape[3] == 4
class TestGenerateProposalLabels(unittest.TestCase):
def test_generate_proposal_labels(self):
program = Program()
with program_guard(program):
rpn_rois = layers.data(
name='rpn_rois',
shape=[4, 4],
dtype='float32',
lod_level=1,
append_batch_size=False)
gt_classes = layers.data(
name='gt_classes',
shape=[6],
dtype='int32',
lod_level=1,
append_batch_size=False)
is_crowd = layers.data(
name='is_crowd',
shape=[6],
dtype='int32',
lod_level=1,
append_batch_size=False)
gt_boxes = layers.data(
name='gt_boxes',
shape=[6, 4],
dtype='float32',
lod_level=1,
append_batch_size=False)
im_info = layers.data(
name='im_info',
shape=[1, 3],
dtype='float32',
lod_level=1,
append_batch_size=False)
class_nums = 5
outs = fluid.layers.generate_proposal_labels(
rpn_rois=rpn_rois,
gt_classes=gt_classes,
is_crowd=is_crowd,
gt_boxes=gt_boxes,
im_info=im_info,
batch_size_per_im=2,
fg_fraction=0.5,
fg_thresh=0.5,
bg_thresh_hi=0.5,
bg_thresh_lo=0.0,
bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
class_nums=class_nums)
rois = outs[0]
labels_int32 = outs[1]
bbox_targets = outs[2]
bbox_inside_weights = outs[3]
bbox_outside_weights = outs[4]
assert rois.shape[1] == 4
assert rois.shape[0] == labels_int32.shape[0]
assert rois.shape[0] == bbox_targets.shape[0]
assert rois.shape[0] == bbox_inside_weights.shape[0]
assert rois.shape[0] == bbox_outside_weights.shape[0]
assert bbox_targets.shape[1] == 4 * class_nums
assert bbox_inside_weights.shape[1] == 4 * class_nums
assert bbox_outside_weights.shape[1] == 4 * class_nums
class TestGenerateMaskLabels(unittest.TestCase):
def test_generate_mask_labels(self):
program = Program()
with program_guard(program):
im_info = layers.data(
name='im_info',
shape=[1, 3],
dtype='float32',
lod_level=1,
append_batch_size=False)
gt_classes = layers.data(
name='gt_classes',
shape=[2, 1],
dtype='int32',
lod_level=1,
append_batch_size=False)
is_crowd = layers.data(
name='is_crowd',
shape=[2, 1],
dtype='int32',
lod_level=1,
append_batch_size=False)
gt_segms = layers.data(
name='gt_segms',
shape=[20, 2],
dtype='float32',
lod_level=3,
append_batch_size=False)
rois = layers.data(
name='rois',
shape=[4, 4],
dtype='float32',
lod_level=1,
append_batch_size=False)
labels_int32 = layers.data(
name='labels_int32',
shape=[4, 1],
dtype='int32',
lod_level=1,
append_batch_size=False)
num_classes = 5
resolution = 14
outs = fluid.layers.generate_mask_labels(
im_info=im_info,
gt_classes=gt_classes,
is_crowd=is_crowd,
gt_segms=gt_segms,
rois=rois,
labels_int32=labels_int32,
num_classes=num_classes,
resolution=resolution)
mask_rois, roi_has_mask_int32, mask_int32 = outs
assert mask_rois.shape[1] == 4
assert mask_int32.shape[1] == num_classes * resolution * resolution
class TestMultiBoxHead(unittest.TestCase):
def test_multi_box_head(self):
data_shape = [3, 224, 224]
mbox_locs, mbox_confs, box, var = self.multi_box_head_output(data_shape)
assert len(box.shape) == 2
assert box.shape == var.shape
assert box.shape[1] == 4
assert mbox_locs.shape[1] == mbox_confs.shape[1]
def multi_box_head_output(self, data_shape):
images = fluid.layers.data(
name='pixel', shape=data_shape, dtype='float32')
conv1 = fluid.layers.conv2d(images, 3, 3, 2)
conv2 = fluid.layers.conv2d(conv1, 3, 3, 2)
conv3 = fluid.layers.conv2d(conv2, 3, 3, 2)
conv4 = fluid.layers.conv2d(conv3, 3, 3, 2)
conv5 = fluid.layers.conv2d(conv4, 3, 3, 2)
mbox_locs, mbox_confs, box, var = layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv5],
image=images,
num_classes=21,
min_ratio=20,
max_ratio=90,
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
return mbox_locs, mbox_confs, box, var
class TestDetectionMAP(unittest.TestCase):
def test_detection_map(self):
program = Program()
with program_guard(program):
detect_res = layers.data(
name='detect_res',
shape=[10, 6],
append_batch_size=False,
dtype='float32')
label = layers.data(
name='label',
shape=[10, 6],
append_batch_size=False,
dtype='float32')
map_out = detection.detection_map(detect_res, label, 21)
self.assertIsNotNone(map_out)
self.assertEqual(map_out.shape, (1, ))
print(str(program))
class TestRpnTargetAssign(unittest.TestCase):
def test_rpn_target_assign(self):
program = Program()
with program_guard(program):
bbox_pred_shape = [10, 50, 4]
cls_logits_shape = [10, 50, 2]
anchor_shape = [50, 4]
bbox_pred = layers.data(
name='bbox_pred',
shape=bbox_pred_shape,
append_batch_size=False,
dtype='float32')
cls_logits = layers.data(
name='cls_logits',
shape=cls_logits_shape,
append_batch_size=False,
dtype='float32')
anchor_box = layers.data(
name='anchor_box',
shape=anchor_shape,
append_batch_size=False,
dtype='float32')
anchor_var = layers.data(
name='anchor_var',
shape=anchor_shape,
append_batch_size=False,
dtype='float32')
gt_boxes = layers.data(
name='gt_boxes', shape=[4], lod_level=1, dtype='float32')
is_crowd = layers.data(
name='is_crowd',
shape=[1, 10],
dtype='int32',
lod_level=1,
append_batch_size=False)
im_info = layers.data(
name='im_info',
shape=[1, 3],
dtype='float32',
lod_level=1,
append_batch_size=False)
outs = layers.rpn_target_assign(
bbox_pred=bbox_pred,
cls_logits=cls_logits,
anchor_box=anchor_box,
anchor_var=anchor_var,
gt_boxes=gt_boxes,
is_crowd=is_crowd,
im_info=im_info,
rpn_batch_size_per_im=256,
rpn_straddle_thresh=0.0,
rpn_fg_fraction=0.5,
rpn_positive_overlap=0.7,
rpn_negative_overlap=0.3,
use_random=False)
pred_scores = outs[0]
pred_loc = outs[1]
tgt_lbl = outs[2]
tgt_bbox = outs[3]
bbox_inside_weight = outs[4]
self.assertIsNotNone(pred_scores)
self.assertIsNotNone(pred_loc)
self.assertIsNotNone(tgt_lbl)
self.assertIsNotNone(tgt_bbox)
self.assertIsNotNone(bbox_inside_weight)
assert pred_scores.shape[1] == 1
assert pred_loc.shape[1] == 4
assert pred_loc.shape[1] == tgt_bbox.shape[1]
print(str(program))
class TestGenerateProposals(unittest.TestCase):
def test_generate_proposals(self):
program = Program()
with program_guard(program):
data_shape = [20, 64, 64]
images = fluid.layers.data(
name='images', shape=data_shape, dtype='float32')
im_info = fluid.layers.data(
name='im_info', shape=[3], dtype='float32')
anchors, variances = fluid.layers.anchor_generator(
name='anchor_generator',
input=images,
anchor_sizes=[32, 64],
aspect_ratios=[1.0],
variance=[0.1, 0.1, 0.2, 0.2],
stride=[16.0, 16.0],
offset=0.5)
num_anchors = anchors.shape[2]
scores = fluid.layers.data(
name='scores', shape=[num_anchors, 8, 8], dtype='float32')
bbox_deltas = fluid.layers.data(
name='bbox_deltas',
shape=[num_anchors * 4, 8, 8],
dtype='float32')
rpn_rois, rpn_roi_probs = fluid.layers.generate_proposals(
name='generate_proposals',
scores=scores,
bbox_deltas=bbox_deltas,
im_info=im_info,
anchors=anchors,
variances=variances,
pre_nms_top_n=6000,
post_nms_top_n=1000,
nms_thresh=0.5,
min_size=0.1,
eta=1.0)
self.assertIsNotNone(rpn_rois)
self.assertIsNotNone(rpn_roi_probs)
print(rpn_rois.shape)
class TestYoloDetection(unittest.TestCase):
def test_yolov3_loss(self):
program = Program()
with program_guard(program):
x = layers.data(name='x', shape=[30, 7, 7], dtype='float32')
gt_box = layers.data(name='gt_box', shape=[10, 4], dtype='float32')
gt_label = layers.data(name='gt_label', shape=[10], dtype='int32')
gt_score = layers.data(name='gt_score', shape=[10], dtype='float32')
loss = layers.yolov3_loss(
x,
gt_box,
gt_label, [10, 13, 30, 13], [0, 1],
10,
0.7,
32,
gt_score=gt_score,
use_label_smooth=False)
self.assertIsNotNone(loss)
def test_yolo_box(self):
program = Program()
with program_guard(program):
x = layers.data(name='x', shape=[30, 7, 7], dtype='float32')
img_size = layers.data(name='img_size', shape=[2], dtype='int32')
boxes, scores = layers.yolo_box(x, img_size, [10, 13, 30, 13], 10,
0.01, 32)
self.assertIsNotNone(boxes)
self.assertIsNotNone(scores)
class TestBoxClip(unittest.TestCase):
def test_box_clip(self):
program = Program()
with program_guard(program):
input_box = layers.data(
name='input_box', shape=[7, 4], dtype='float32', lod_level=1)
im_info = layers.data(name='im_info', shape=[3], dtype='float32')
out = layers.box_clip(input_box, im_info)
self.assertIsNotNone(out)
class TestMulticlassNMS(unittest.TestCase):
def test_multiclass_nms(self):
program = Program()
with program_guard(program):
bboxes = layers.data(
name='bboxes', shape=[-1, 10, 4], dtype='float32')
scores = layers.data(name='scores', shape=[-1, 10], dtype='float32')
output = layers.multiclass_nms(bboxes, scores, 0.3, 400, 200, 0.7)
self.assertIsNotNone(output)
class TestMulticlassNMS2(unittest.TestCase):
def test_multiclass_nms2(self):
program = Program()
with program_guard(program):
bboxes = layers.data(
name='bboxes', shape=[-1, 10, 4], dtype='float32')
scores = layers.data(name='scores', shape=[-1, 10], dtype='float32')
output = layers.multiclass_nms2(bboxes, scores, 0.3, 400, 200, 0.7)
output2, index = layers.multiclass_nms2(
bboxes, scores, 0.3, 400, 200, 0.7, return_index=True)
self.assertIsNotNone(output)
self.assertIsNotNone(output2)
self.assertIsNotNone(index)
class TestCollectFpnPropsals(unittest.TestCase):
def test_collect_fpn_proposals(self):
program = Program()
with program_guard(program):
multi_bboxes = []
multi_scores = []
for i in range(4):
bboxes = layers.data(
name='rois' + str(i),
shape=[10, 4],
dtype='float32',
lod_level=1,
append_batch_size=False)
scores = layers.data(
name='scores' + str(i),
shape=[10, 1],
dtype='float32',
lod_level=1,
append_batch_size=False)
multi_bboxes.append(bboxes)
multi_scores.append(scores)
fpn_rois = layers.collect_fpn_proposals(multi_bboxes, multi_scores,
2, 5, 10)
self.assertIsNotNone(fpn_rois)
class TestDistributeFpnProposals(unittest.TestCase):
def test_distribute_fpn_proposals(self):
program = Program()
with program_guard(program):
fpn_rois = fluid.layers.data(
name='data', shape=[4], dtype='float32', lod_level=1)
multi_rois, restore_ind = layers.distribute_fpn_proposals(
fpn_rois=fpn_rois,
min_level=2,
max_level=5,
refer_level=4,
refer_scale=224)
self.assertIsNotNone(multi_rois)
self.assertIsNotNone(restore_ind)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "6b34058d35eb344fa0692849fac76673",
"timestamp": "",
"source": "github",
"line_count": 578,
"max_line_length": 80,
"avg_line_length": 36.97923875432526,
"alnum_prop": 0.4956021334331431,
"repo_name": "tensor-tang/Paddle",
"id": "d4151428c8a61e976fb593c8cf9363d6cfccdebf",
"size": "21987",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/test_detection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "32490"
},
{
"name": "C++",
"bytes": "10161819"
},
{
"name": "CMake",
"bytes": "290828"
},
{
"name": "Cuda",
"bytes": "1183095"
},
{
"name": "Dockerfile",
"bytes": "10002"
},
{
"name": "Python",
"bytes": "7082088"
},
{
"name": "Ruby",
"bytes": "353"
},
{
"name": "Shell",
"bytes": "200906"
}
],
"symlink_target": ""
} |
"Core classes and exceptions for lacrm"
from __future__ import print_function
import logging
import requests
import json
from lacrm.utils import LacrmArgumentError, BaseLacrmError
from os.path import expanduser
LOGGER = logging.getLogger(__name__)
class Lacrm(object):
"""Less Annoying CRM Instance
An instance of Lacrm wraps a LACRM REST API session.
"""
def __init__(self, user_code=None, api_token=None):
creds = self._parse_creds()
if user_code is None and api_token is None:
self.user_code = creds[0]
self.api_token = creds[1]
else:
self.user_code = user_code
self.api_token = api_token
self.payload = {'UserCode': self.user_code,
'APIToken': self.api_token}
self.endpoint_url = 'https://api.lessannoyingcrm.com'
# Mapping that allows us to parse different API methods' response
# meaningfully
self.api_method_responses = {'CreateContact': 'ContactId',
'CreateNote': 'NoteId',
'CreateTask': 'TaskId',
'CreateEvent': 'EventId',
'GetContact': 'Contact',
'CreatePipeline': 'PipelineItemId',
'SearchContacts': 'Result',
'GetPipelineReport': 'Result'}
def _parse_creds(self, filename='.lacrm'):
""" Parses dot file for lacrm credentials """
creds = None
try:
file_path = expanduser('~') + '/' + filename
with open(file_path, 'r') as credfile:
for line in credfile:
if line.strip()[0] == '#':
pass
elif ':' in line:
user_code = line.strip().split(':')[0]
api_token = line.strip().split(':')[1]
creds = user_code, api_token
break
return creds
# Fail silently as most people will not have creds file
except IOError:
return None
except (UnboundLocalError, IndexError):
print('Attempted to use a credentials dotfile ({}) but '
'it is either empty or malformed. Credentials should be in '
'the form USER_CODE:API_TOKEN.'.format(file_path))
raise
def api_call(func):
""" Decorator calls out to the API for specifics API methods """
def make_api_call(self, *args, **kwargs):
api_method, data, expected_parameters = func(self, *args)
parameters = {}
for key, value in data.items():
parameters[key] = value
if expected_parameters:
self.__validator(parameters.keys(), expected_parameters)
method_payload = self.payload
method_payload['Function'] = api_method
method_payload['Parameters'] = json.dumps(parameters)
response = requests.post(self.endpoint_url, data=method_payload)
status_code = response.status_code
if status_code != 200:
raise BaseLacrmError(content='Unknown error occurred -- check'
'https://www.lessannoyingcrm.com/account/'
'api/ for more detailed information.')
elif kwargs.get('raw_response'):
return response.json()
else:
response = response.json()
return response.get(self.api_method_responses.get(api_method), status_code)
return make_api_call
@api_call
def search_contacts(self, term, params=None, raw_response=False):
""" Searches LACRM contacts for a given term """
api_method = 'SearchContacts'
if not params:
params = {'SearchTerms': term}
else:
params.update({'SearchTerms': term})
return api_method, params, None
def get_all_contacts(self, params=None):
""" Searches and returns all LACRM contacts """
defaults = {'NumRows': 500,
'Page': 1,
'Sort': 'DateEntered'}
if not params:
params = defaults
else:
params = defaults.update(params)
all_contacts = []
while True:
page_of_contacts = self.search_contacts("", params)
all_contacts = all_contacts + page_of_contacts
if len(page_of_contacts) < 500:
break
else:
params['Page'] = params['Page'] + 1
return all_contacts
@api_call
def add_contact_to_group(self, contact_id, group_name, raw_response=False):
""" Adds a contact to a group in LACRM """
data = {}
data['ContactId'] = contact_id
data['GroupName'] = group_name
if group_name.find(' ') > 0:
raise LacrmArgumentError(
content='The group name you passed "{0}" contains spaces. '
'Spaces should be replaced them with underscores (eg "cool '
'group" should be "cool_group"). See '
'https://www.lessannoyingcrm.com/help/topic/API_Function_Definitions/8/AddContactToGroup+Function+Definition '
'for more details.'.format(group_name))
api_method = 'AddContactToGroup'
return api_method, data, None
@api_call
def delete_contact(self, contact_id, raw_response=False):
""" Deletes a given contact from LACRM """
data = {}
data['ContactId'] = contact_id
api_method = 'DeleteContact'
return api_method, data, None
@api_call
def get_contact(self, contact_id, raw_response=False):
""" Get all information in LACRM for given contact """
data = {}
data['ContactId'] = contact_id
api_method = 'GetContact'
return api_method, data, None
@api_call
def create_contact(self, data):
""" Creates a new contact in LACRM """
api_method = 'CreateContact'
expected_parameters = ['FullName',
'Salutation',
'FirstName',
'MiddleName',
'LastName',
'Suffix',
'CompanyName',
'CompanyId',
'Title',
'Industry',
'NumEmployees',
'BackgroundInfo',
'Email',
'Phone',
'Address',
'Website',
'Birthday',
'CustomFields',
'assignedTo']
return api_method, data, expected_parameters
@api_call
def edit_contact(self, contact_id, data, raw_response=False):
""" Edits a contact in LACRM for given """
data['ContactId'] = contact_id
api_method = 'EditContact'
expected_parameters = ['FullName',
'Salutation',
'FirstName',
'MiddleName',
'LastName',
'Suffix',
'CompanyName',
'ContactId',
'CompanyId',
'Title',
'Industry',
'NumEmployees',
'BackgroundInfo',
'Email',
'Phone',
'Address',
'Website',
'Birthday',
'CustomFields',
'assignedTo']
return api_method, data, expected_parameters
@api_call
def create_pipeline(self, contact_id, data, raw_response=False):
""" Creates a new pipeline in LACRM for given contactid """
data['ContactId'] = contact_id
api_method = 'CreatePipeline'
expected_parameters = ['ContactId',
'Note',
'PipelineId',
'StatusId',
'Priority',
'CustomFields']
return api_method, data, expected_parameters
@api_call
def update_pipeline(self, pipeline_item_id, data, raw_response=False):
""" Update a pipeline in LACRM """
data['PipelineItemId'] = pipeline_item_id
api_method = 'UpdatePipelineItem'
expected_parameters = ['PipelineItemId',
'Note',
'StatusId',
'Priority',
'CustomFields']
return api_method, data, expected_parameters
@api_call
def create_note(self, contact_id, note, raw_response=False):
""" Creates a new note in LACRM for a given contactid """
data = {}
data['ContactId'] = contact_id
data['Note'] = note
api_method = 'CreateNote'
expected_parameters = ['ContactId', 'Note']
return api_method, data, expected_parameters
@api_call
def create_task(self, data, raw_response=False):
""" Creates a new task in LACRM """
api_method = 'CreateTask'
expected_parameters = ['ContactId',
'DueDate', # YYYY-MM-DD
'Description',
'ContactId',
'AssignedTo']
return api_method, data, expected_parameters
@api_call
def create_event(self, data, raw_response=False):
""" Creates a new event in LACRM """
api_method = 'CreateEvent'
expected_parameters = ['Date',
'StartTime', # 24:00
'EndTime', # 24:00
'Name',
'Description',
'Contacts',
'Users']
return api_method, data, expected_parameters
@api_call
def get_pipeline_report(self, pipeline_id, data, raw_response=False):
""" Grabs a pipeline_report in LACRM """
data['PipelineId'] = pipeline_id
api_method = 'GetPipelineReport'
expected_parameters = ['PipelineId',
'SortBy',
'NumRows',
'Page',
'SortDirection',
'UserFilter',
'StatusFilter']
return api_method, data, expected_parameters
def get_all_pipeline_report(self, pipeline_id, status=None):
""" Grabs a pipeline_report in LACRM """
continue_flag = True
page = 1
output = []
while continue_flag:
params = {'NumRows': 500,
'Page': page,
'SortBy': 'Status'}
if status in ['all', 'closed']:
params['StatusFilter'] = status
else:
print('That status code is not recognized via the API.')
respjson = self.get_pipeline_report(pipeline_id, params)
for i in respjson:
output.append(i)
if len(respjson) == 500:
page += 1
else:
continue_flag = False
return output
def __validator(self, parameters, known_parameters):
for param in parameters:
if param not in known_parameters:
raise LacrmArgumentError(content='The provided parameter "{}" '
'cannot be recognized by the '
'API'.format(param))
return
| {
"content_hash": "aa45e3a6819b4e1bc14830b24e3ba23a",
"timestamp": "",
"source": "github",
"line_count": 361,
"max_line_length": 130,
"avg_line_length": 34.33240997229917,
"alnum_prop": 0.469985476843634,
"repo_name": "HighMileage/lacrm",
"id": "f13eb056eaa301db1388887bb89ea73174e2dc4a",
"size": "12394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lacrm/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21915"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import pika, uuid
SERVER = 'gose.fiehnlab.ucdavis.edu'
FOUT = open(str(uuid.uuid1()) +'.csv', 'wb')
def callback(ch, method, properties, body):
FOUT.write(body)
FOUT.write(b'\n')
FOUT.flush()
if __name__ == '__main__':
credentials = pika.PlainCredentials('sajjan', 'fiehnlab2015')
parameters = pika.ConnectionParameters(host = SERVER, credentials = credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.queue_declare(queue = 'splash_aggregation')
channel.basic_consume(callback, queue = 'splash_aggregation', no_ack = True)
channel.start_consuming() | {
"content_hash": "40df1c7528ae11b9288f068304321bd2",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 81,
"avg_line_length": 27.541666666666668,
"alnum_prop": 0.7155824508320726,
"repo_name": "berlinguyinca/spectra-hash",
"id": "6840a6e9cd1b6e44f71d3b7eac6219256538295d",
"size": "684",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "utilities/splash-analysis/scripts/aggregation_consumer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C#",
"bytes": "74466"
},
{
"name": "C++",
"bytes": "399366"
},
{
"name": "CMake",
"bytes": "482"
},
{
"name": "Dockerfile",
"bytes": "2647"
},
{
"name": "Groovy",
"bytes": "6901"
},
{
"name": "HTML",
"bytes": "18834"
},
{
"name": "Java",
"bytes": "112064"
},
{
"name": "JavaScript",
"bytes": "10205"
},
{
"name": "Makefile",
"bytes": "1367"
},
{
"name": "Python",
"bytes": "38411"
},
{
"name": "R",
"bytes": "7262"
},
{
"name": "Scala",
"bytes": "17205"
},
{
"name": "Shell",
"bytes": "1665"
}
],
"symlink_target": ""
} |
""" Contains the base classes for mechanical EM-style score reels."""
# score_reel.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/mpf
import logging
import time
from collections import deque
from mpf.system.device import Device
from mpf.system.tasks import DelayManager
from mpf.system.timing import Timing
from mpf.system.config import Config
# Known limitations of this module:
# Assumes all score reels include a zero value
# Assumes all score reels count up or down by one
# Assumes all score reels map their displayed value to their stored value
# in a 1:1 way. (i.e. value[0] displays 0, value[5] displays 5, etc.
# Note, currently this module only supports "incrementing" reels (i.e. counting
# up). Decrementing support will be added in the future
class ScoreReelController(object):
"""The overall controller that is in charge of and manages the score reels
in a pinball machine.
The main thing this controller does is keep track of how many
ScoreReelGroups there are in the machine and how many players there are,
as well as maps the current player to the proper score reel.
This controller is also responsible for working around broken
ScoreReelGroups and "stacking" and switching out players when there are
multiple players per ScoreReelGroup.
"""
def __init__(self, machine):
self.machine = machine
self.log = logging.getLogger("ScoreReelController")
self.log.debug("Loading the ScoreReelController")
self.active_scorereelgroup = None
"""Pointer to the active ScoreReelGroup for the current player.
"""
self.player_to_scorereel_map = []
"""This is a list of ScoreReelGroup objects which corresponds to player
indexes. The first element [0] in this list is the first player (which
is player index [0], the next one is the next player, etc.
"""
self.reset_queue = []
"""List of score reel groups that still need to be reset"""
self.queue = None
"""Holds any active queue event queue objects"""
# register for events
# switch the active score reel group and reset it (if needed)
self.machine.events.add_handler('player_turn_start',
self.rotate_player)
# receive notification of score changes
self.machine.events.add_handler('player_score', self.score_change)
# receives notifications of game starts to reset the reels
self.machine.events.add_handler('game_starting', self.game_starting)
def rotate_player(self, **kwargs):
"""Called when a new player's turn starts.
The main purpose of this method is to map the current player to their
ScoreReelGroup in the backbox. It will do this by comparing length of
the list which holds those mappings (`player_to_scorereel_map`) to
the length of the list of players. If the player list is longer that
means we don't have a ScoreReelGroup for that player.
In that case it will check the tags of the ScoreReelGroups to see if
one of them is tagged with playerX which corresponds to this player.
If not then it will pick the next free one. If there are none free,
then it will "double up" that player on an existing one which means
the same Score Reels will be used for both players, and they will
reset themselves automatically between players.
"""
# if our player to reel map is less than the number of players, we need
# to create a new mapping
if (len(self.player_to_scorereel_map) <
len(self.machine.game.player_list)):
self.map_new_score_reel_group()
self.active_scorereelgroup = self.player_to_scorereel_map[
self.machine.game.player.index]
self.log.debug("Mapping Player %s to ScoreReelGroup '%s'",
self.machine.game.player.number,
self.active_scorereelgroup.name)
# Make sure this score reel group is showing the right score
self.log.debug("Current player's score: %s",
self.machine.game.player.score)
self.log.debug("Score displayed on reels: %s",
self.active_scorereelgroup.assumed_value_int)
if (self.active_scorereelgroup.assumed_value_int !=
self.machine.game.player.score):
self.active_scorereelgroup.set_value(
self.machine.game.player.score)
# light up this group
for group in self.machine.score_reel_groups:
group.unlight()
self.active_scorereelgroup.light()
def map_new_score_reel_group(self):
"""Creates a mapping of a player to a score reel group."""
# do we have a reel group tagged for this player?
for reel_group in self.machine.score_reel_groups.items_tagged(
"player" + str(self.machine.game.player.number)):
self.player_to_scorereel_map.append(reel_group)
self.log.debug("Found a mapping to add: %s", reel_group.name)
return
# if we didn't find one, then we'll just use the first player's group
# for all the additional ones.
# todo maybe we should get fancy with looping through? Meh... we'll
# cross that bridge when we get to it.
self.player_to_scorereel_map.append(self.player_to_scorereel_map[0])
def score_change(self, value, change, **kwargs):
"""Called whenever the score changes and adds the score increase to the
current active ScoreReelGroup.
This method is the handler for the score change event, so it's called
automatically.
Args:
score: Integer value of the new score. This parameter is ignored,
and included only because the score change event passes it.
change: Interget value of the change to the score.
"""
self.active_scorereelgroup.add_value(value=change, target=value)
def game_starting(self, queue, game):
"""Resets the score reels when a new game starts.
This is a queue event so it doesn't allow the game start to continue
until it's done.
Args:
queue: A reference to the queue object for the game starting event.
game: A reference to the main game object. This is ignored and only
included because the game_starting event passes it.
"""
self.queue = queue
# tell the game_starting event queue that we have stuff to do
self.queue.wait()
# populate the reset queue
self.reset_queue = []
for player, score_reel_group in self.machine.score_reel_groups.iteritems():
self.reset_queue.append(score_reel_group)
self.reset_queue.sort(key=lambda x: x.name)
# todo right now this sorts by ScoreGroupName. Need to change to tags
self._reset_next_group() # kick off the reset process
def _reset_next_group(self, value=0):
# param `value` since that's what validate passes. Dunno if we need it.
if self.reset_queue: # there's still more to reset
next_group = self.reset_queue.pop(0)
self.log.debug("Resetting ScoreReelGroup %s", next_group.name)
# add the handler to know when this group is reset
self.machine.events.add_handler('scorereelgroup_' +
next_group.name +
'_valid', self._reset_next_group)
next_group.set_value(value)
else: # no more to reset
# clear the event queue
self.queue.clear()
self.queue = None
# remove all these handlers watching for 0
self.machine.events.remove_handler(self._reset_next_group)
class ScoreReelGroup(Device):
"""Represents a logical grouping of score reels in a pinball machine, where
multiple individual ScoreReel object make up the individual digits of this
group. This group also has support for the blank zero "inserts" that some
machines use. This is a subclass of mpf.system.device.Device.
"""
config_section = 'score_reel_groups'
collection = 'score_reel_groups'
class_label = 'score_reel_group'
@classmethod
def device_class_init(cls, machine):
# If we have at least one score reel group, we need a
# ScoreReelController
machine.score_reel_controller = ScoreReelController(machine)
def __init__(self, machine, name, config, collection=None, validate=True):
super(ScoreReelGroup, self).__init__(machine, name, config, collection,
validate=validate)
self.wait_for_valid_queue = None
self.valid = True # Confirmed reels are showing the right values
self.lit = False # This group has its lights on
self.unlight_on_resync_key = None
self.light_on_valid_key = None
self.reels = []
# A list of individual ScoreReel objects that make up this
# ScoreReelGroup. The number of items in the list correspondis to the
# number of digits that can be displayed. A value of `None` indicates a
# position that is not controlled by a moving reel (like a fake ones
# digit).
# Note that this is "backwards," with element 0 representing the ones
# digit, element 1 representing the tens, etc..
self.desired_value_list = []
# A list of what values the machine desires to have the score reel
# group set to.
self.reset_pulses_per_round = 5
# Interger value of how many "pulses" should be done per reset round.
# This is used to simulate the actual mechnical resets the way a classic
# EM machine would do it. If you watch an EM game reset, you'll notice
# they pulse the reels in groups, like click-click-click-click-click..
# pause.. click-click-click-click-click.. pause.. etc. Once each reel
# gets to zero, it stops advancing.
# If you don't want to emulate this behavior, set this to 0. The default
# is 5.
# TODO / NOTE: This feature is not yet implemented.
self.advance_queue = deque()
# Holds a list of the next reels that for step advances.
self.jump_in_progress = False
# Boolean attribute that is True when a jump advance is in progress.
# convert self.config['reels'] from strings to objects
for reel in self.config['reels']:
# find the object
if reel:
reel = self.machine.score_reels[reel]
self.reels.append(reel)
self.reels.reverse() # We want our smallest digit in the 0th element
# ---- temp chimes code. todo move this --------------------
self.config['chimes'].reverse()
for i in range(len(self.config['chimes'])):
if self.config['chimes'][i]:
self.machine.events.add_handler(event='reel_' +
self.reels[
i].name + '_advance',
handler=self.chime,
chime=self.config['chimes'][i])
# ---- temp chimes code end --------------------------------
# register for events
self.machine.events.add_handler('init_phase_4',
self.initialize)
self.machine.events.add_handler('timer_tick', self.tick)
# Need to hook this in case reels aren't done when ball ends
self.machine.events.add_handler('ball_ending', self._ball_ending, 900)
# ----- temp method for chime ------------------------------------
def chime(self, chime):
self.machine.coils[chime].pulse()
# ---- temp chimes code end --------------------------------------
@property
def assumed_value_list(self):
# List that holds the values of the reels in the group
value_list = []
for reel in self.reels:
if reel:
value_list.append(reel.assumed_value)
# Used lambda above so this list will always lookup the latest
else:
value_list.append(None)
return value_list
@property
def assumed_value_int(self):
# Integer representation of the value we assume is shown on this
# ScoreReelGroup. A value of -999 means the value is unknown.
return self.reel_list_to_int(self.assumed_value_list)
def initialize(self):
"""Initialized the score reels by reading their current physical values
and setting each reel's rollover reel. This is a separate method since
it can't run int __iniit__() because all the other reels have to be
setup first.
"""
self.get_physical_value_list()
self.set_rollover_reels()
def set_rollover_reels(self):
"""Calls each reel's `_set_rollover_reel` method and passes it a
pointer to the next higher up reel. This is how we know whether we're
able to advance the next higher up reel when a particular reel rolls
over during a step advance.
"""
for reel in range(len(self.reels)):
if self.reels[reel] and (reel < len(self.reels) - 1):
self.reels[reel]._set_rollover_reel(self.reels[reel + 1])
def tick(self):
"""Automatically called once per machine tick and checks to see if there
are any jumps or advances in progress, and, if so, calls those methods.
"""
if self.jump_in_progress:
self._jump_advance_step()
elif self.advance_queue:
self._step_advance_step()
elif not self.valid:
self.validate()
def is_desired_valid(self, notify_event=False):
"""Tests to see whether the machine thinks the ScoreReelGroup is
currently showing the desired value. In other words, is the
ScoreReelGroup "done" moving?
Note this ignores placeholder non-controllable digits.
Returns: True or False
"""
for i in range(len(self.reels)):
if self.reels[i]:
if self.assumed_value_list[i] != self.desired_value_list[i]:
if notify_event:
self.machine.events.post('scorereel_' +
self.reels[i].name +
'_resync')
return False
return True
def get_physical_value_list(self):
"""Queries all the reels in the group and builds a list of their actual
current physical state, with either the value of the current switch
or -999 if no switch is active.
This method also updates each reel's physical value.
Returns: List of physical reel values.
"""
output_list = []
for reel in self.reels:
if reel:
output_list.append(reel.check_hw_switches())
return output_list
def validate(self, value=None):
"""Called to validate that this score reel group is in the position
the machine wants it to be in.
If lazy or strict confirm is enabled, this method will also make sure
the reels are in their proper physical positions.
Args:
value (ignored): This method takes an argument of `value`, but
it's not used. It's only there because when reels post their
events after they're done moving, they include a parameter of
`value` which is the position they're in. So we just need to
have this argument listed so we can use this method as an event
handler for those events.
"""
self.log.debug("Checking to see if score reels are valid.")
# Can't validate until the reels are done moving. This shouldn't happen
# but we look for it just in case.
if self.jump_in_progress or self.advance_queue:
return False
# If any reels are set to lazy or strict confirm, we're only going to
# validate if they've hw_confirmed
for reel in self.reels:
if (reel and
(reel.config['confirm'] == 'lazy' or
reel.config['confirm'] == 'strict') and
not reel.hw_sync):
return False # need hw_sync to proceed
self.log.debug("Desired list: %s", self.desired_value_list)
self.log.debug("Assumed list: %s", self.assumed_value_list)
self.log.debug("Assumed integer: %s", self.assumed_value_int)
try:
self.log.debug("Player's Score: %s",
self.machine.game.player.score)
except:
pass
# todo if confirm is set to none, should we at least wait until the
# coils are not energized to validate?
if not self.is_desired_valid(notify_event=True):
# FYI each reel will hw check during hw_sync, so if there's a
# misfire that we can know about then it will be caught here
self.machine.events.post('scorereelgroup_' + self.name + '_resync')
self.set_value(value_list=self.desired_value_list)
return False
self.valid = True
self.machine.events.post('scorereelgroup_' + self.name + '_valid',
value=self.assumed_value_int)
if self.wait_for_valid_queue:
self.log.debug("Found a wait queue. Clearing now.")
self.wait_for_valid_queue.clear()
self.wait_for_valid_queue = None
return True
def add_value(self, value, jump=False, target=None):
"""Adds value to a ScoreReelGroup.
You can also pass a negative value to subtract points.
You can control the logistics of how these pulses are applied via the
`jump` parameter. If jump is False (default), then this method will
respect the proper "sequencing" of reel advances. For example, if the
current value is 1700 and the new value is 2200, this method will fire
the hundreds reel twice (to go to 1800 then 1900), then on the third
pulse it will fire the thousands and hundreds (to go to 2000), then do
the final two pulses to land at 2200.
Args:
value: The integer value you'd like to add to (or subtract
from) the current value
jump: Optional boolean value which controls whether the reels should
"count up" to the new value in the classic EM way (jump=False)
or whether they should just jump there as fast as they can
(jump=True). Default is False.
target: Optional integer that's the target for where this reel group
should end up after it's done advancing. If this is not
specified then the target value will be calculated based on the
current reel positions, though sometimes this get's wonky if the
reel is jumping or moving, so it's best to specify the target if
you can.
"""
self.log.debug("Adding '%s' to the displayed value. Jump=%s", value,
jump)
# As a starting point, we'll base our assumed current value of the reels
# based on whatever the machine thinks they are. This is subject to
# change, which is why we use our own variable here.
current_reel_value = self.assumed_value_int
if self.jump_in_progress:
self.log.debug("There's a jump in progress, so we'll just change "
"the target of the jump to include our values.")
# We'll base our desired value off whatever the reel is advancing
# plus our value, because since there's a jump in progress we have
# no idea where the reels are at this exact moment
current_reel_value = self.reel_list_to_int(self.desired_value_list)
jump = True
if current_reel_value == - 999:
self.log.debug("Current displayed value is unkown, "
"so we're jumping to the new value.")
current_reel_value = 0
jump = True
# If we have a target, yay! (And thank you to whatever called this!!)
# If not we have to use our current_reel_value as the baseline which is
# fine, but it makes a lot of assumptions
if target is None:
target = current_reel_value + value
elif value < 0:
self.log.debug("add_value is negative, so we're subtracting this "
"value. We will do this via a jump.")
jump = True
# If we have to jump to this new value (for whatever reason), go for it
if jump:
self.set_value(target)
# Otherwise we can do the cool step-wise advance
else:
self.desired_value_list = self.int_to_reel_list(target)
self._step_advance_add_steps(value)
def set_value(self, value=None, value_list=None):
"""Resets the score reel group to display the value passed.
This method will "jump" the score reel group to display the value
that's passed as an it. (Note this "jump" technique means it will just
move the reels as fast as it can, and nonsensical values might show up
on the reel while the movement is in progress.)
This method is used to "reset" a reel group to all zeros at the
beginning of a game, and can also be used to reset a reel group that is
confused or to switch a reel to the new player's score if multiple
players a sharing the same reel group.
Note you can choose to pass either an integer representation of the
value, or a value list.
Args:
value: An integer value of what the new displayed value (i.e. score)
should be. This is the default option if you only pass a single
positional argument, e.g. `set_value(2100)`.
value_list: A list of the value you'd like the reel group to
display.
"""
if value is None and value_list is None:
return # we can't do anything here if we don't get a new value
if value_list is None:
value_list = self.int_to_reel_list(value)
self.log.debug("Jumping to %s.", value_list)
# set the new desired value which we'll use to verify the reels land
# where we want them to.
self.desired_value_list = value_list
self.log.debug("set_value() just set DVL to: %s",
self.desired_value_list)
self._jump_advance_step()
def _jump_advance_step(self):
# Checks the assumed values of the reels in the group, and if they're
# off will automatically correct them.
self.jump_in_progress = True
self.valid = False
if self.is_desired_valid():
self.log.debug("They match! Jump is done.")
self._jump_advance_complete()
return
reels_needing_advance = [] # reels that need to be advanced
num_energized = 0 # count of the number of coils currently energized
current_time = time.time() # local reference for speed
# loop through the reels one by one
for i in range(len(self.reels)):
this_reel = self.reels[i] # local reference for speed
if this_reel:
# While we're in here let's get a count of the total number
# of reels that are energized
if (this_reel.config['coil_inc'].
time_when_done > current_time):
num_energized += 1
# Does this reel want to be advanced, and is it ready?
if (self.desired_value_list[i] !=
self.assumed_value_list[i] and
this_reel.ready):
# Do we need (and have) hw_sync to advance this reel?
if (self.assumed_value_list[i] == -999 or
this_reel.config['confirm'] == 'strict'):
if this_reel.hw_sync:
reels_needing_advance.append(this_reel)
elif this_reel.ready:
reels_needing_advance.append(this_reel)
# How many reels can we advance now?
coils_this_round = (self.config['max_simultaneous_coils'] -
num_energized)
# sort by last firing time, oldest first (so those are fired first)
reels_needing_advance.sort(key=lambda x: x.next_pulse_time)
if len(reels_needing_advance) < coils_this_round:
coils_this_round = len(reels_needing_advance)
for i in range(coils_this_round):
reels_needing_advance[i].advance(direction=1)
# Any leftover reels that don't get fired this time will get picked up
# whenever the next reel changes state and this method is called again.
def _jump_advance_complete(self):
# Called when a jump advance routine is complete and the score reel
# group has been validated.
self.log.debug("Jump complete")
self.log.debug("Assumed values: %s", self.assumed_value_list)
self.log.debug("Desired values: %s", self.desired_value_list)
self.jump_in_progress = False
def _step_advance_add_steps(self, value):
# Receives an integer value, converts it to steps, adds them to the
# step queue, and kicks off the step advance process. For example,
# adding a value of 210 would result the following items added to the
# advance queue: [coil_10, coil_100, coil_100]
value_list = self.int_to_reel_list(value)
self.log.debug("Will add '%s' to this reel group", value)
for position in range(len(value_list)):
if value_list[position]:
for num in range(value_list[position]):
self.advance_queue.append(self.reels[position])
# if there's a jump in progress we don't want to step on it, so we'll
# just do nothing more here and _step_advance_step will be called when
# the jump is done since we have entries in the advance queue
if not self.jump_in_progress:
self._step_advance_step()
def _step_advance_step(self):
# Attempts to kick off any advances that are in the advance_queue, but
# that's not also possible. (For example, all the reels might be busy.)
# todo if reel status is bad, do something
if not self.advance_queue:
self.validate()
return
self.valid = False
# set our working reel to be the next one in the queue
reel = self.advance_queue[0]
# Check to see if this reel is ready. "Ready" depends on whether we're
# using strict confirmation or not.
# todo what if the hw is -999. Then we should return if we don't have
# hw_sync also, right?
if reel.config['confirm'] == 'strict' and not reel.hw_sync:
return
elif not reel.ready:
return
# is this real going to need a buddy pulse?
self.log.debug("Reel: %s, Limit: %s, Current assumed value: %s",
reel.name, reel.config['limit_hi'], reel.assumed_value)
if (reel.config['limit_hi'] == reel.assumed_value and
not reel.rollover_reel_advanced):
buddy_pulse = True
# track that we've already ordered the buddy pulse so it doesn't
# happen twice if this reel can't fire now for some reason
reel.rollover_reel_advanced = True
self.log.debug("Setting buddy pulse")
else:
buddy_pulse = False
# todo we may not need the rollover_reel_advanced tracker anymore since
# we wrapped the reel.advance below in an if block.
# remove this reel from our queue from the queue
self.advance_queue.popleft()
# try to advance the reel, We use `if` here so this code block only runs
# if the reel accepted our advance request
if reel.advance(direction=1):
self.log.debug("Reel '%s' accepted advance", reel.name)
self.log.debug("Reels (assumed): %s", self.assumed_value_int)
try:
self.log.debug("Score: %s",
self.machine.game.player.score)
except:
pass
self.machine.events.post('reel_' + reel.name + "_advance")
# todo should this advance event be posted here? Or by the reel?
# Add the reel's buddy to the advance queue
if buddy_pulse:
# insert the rollover reel
if reel.rollover_reel:
self.advance_queue.appendleft(reel.rollover_reel)
# run through this again now so we pulse the buddy reel
# immediately (assuming we don't have too many pulsing
# currently, etc.)
self._step_advance_step()
else:
# whoops, we don't have a rollover reel. Yay for player!
self.machine.events.post('scorereelgroup_' + self.name +
'_rollover')
else: # the reel did not accept the advance. Put it back in the queue
self.advance_queue.appendleft(reel)
self.log.debug("Reel '%s' rejected advance. We'll try again.",
reel.name)
def int_to_reel_list(self, value):
"""Converts an integer to a list of integers that represent each
positional digit in this ScoreReelGroup.
The list returned is in reverse order. (See the example below.)
The list returned is customized for this ScoreReelGroup both in terms
of number of elements and values of `None` used to represent blank
plastic zero inserts that are not controlled by a score reel unit.
For example, if you have a 5-digit score reel group that has 4
phyiscial reels in the tens through ten-thousands position and a fake
plastic "0" insert for the ones position, if you pass this method a
value of `12300`, it will return `[None, 0, 3, 2, 1]`
This method will pad shorter ints with zeros, and it will chop off
leading digits for ints that are too long. (For example, if you pass a
value of 10000 to a ScoreReelGroup which only has 4 digits, the
returns list would correspond to 0000, since your score reel unit has
rolled over.)
Args:
value: The interger value you'd like to convert.
Returns:
A list containing the values for each corresponding score reel,
with the lowest reel digit position in list position 0.
"""
if value == -999:
value = 0
# todo hack
output_list = []
# convert our number to a string
value = str(value)
# pad the string with leading zeros
value = value.zfill(len(self.reels))
# slice off excess characters if the value is longer than num of reels
# how many digits do we have to slice?
trim = len(value) - len(self.reels)
# and... slice!
value = value[trim:]
# todo if we don't do the above trim then it will just show the highest
# digits, effective "shifting" the score by one. Might be a fun feature?
# generate our list with one digit per item
for digit in value:
output_list.append(int(digit))
# reverse the list so the least significant is first
output_list.reverse()
# replace fake position digits with `None`
for i in range(len(output_list)):
if not self.reels[i]:
output_list[i] = None
return output_list
def reel_list_to_int(self, reel_list):
"""Converts an list of integers to a single integer.
This method is like `int_to_reel_list` except that it works in the
opposite direction.
The list inputted is expected to be in "reverse" order, with the ones
digit in the [0] index position. Values of `None` are converted to
zeros. For example, if you pass `[None, 0, 3, 2, 1]`, this method will
return an integer value of `12300`.
Note this method does not take into consideration how many reel
positions are in this ScoreReelGroup. It just converts whatever you
pass it.
Args:
value: The list containing the values for each score reel
position.
Returns:
The resultant integer based on the list passed.
"""
# reverse the list so it's in normal order
reel_list.reverse()
output = ""
for item in reel_list:
if type(item) is int:
if item == -999: # if any reels are unknown, then our int
return -999 # is unkown too.
else:
output += str(item)
elif type(item) is str and item.isdigit():
# Just in case we have an number that's a string
output += str(int(item)) # ensure no leading zeros
else:
output += "0"
return int(output)
def light(self, relight_on_valid=False, **kwargs):
"""Lights up this ScoreReelGroup based on the 'light_tag' in its
config.
"""
self.log.debug("Turning on Lights")
for light in self.machine.lights.items_tagged(
self.config['lights_tag']):
light.on()
self.lit = True
# Watch for these reels going out of sync so we can turn off the lights
# while they're resyncing
self.unlight_on_resync_key = self.machine.events.add_handler(
'scorereelgroup_' + self.name + '_resync',
self.unlight,
relight_on_valid=True)
if relight_on_valid:
self.machine.events.remove_handler_by_key(self.light_on_valid_key)
def unlight(self, relight_on_valid=False, **kwargs):
"""Turns off the lights for this ScoreReelGroup based on the
'light_tag' in its config.
"""
self.log.debug("Turning off Lights")
for light in self.machine.lights.items_tagged(
self.config['lights_tag']):
light.off()
self.lit = False
if relight_on_valid:
self.light_on_valid_key = self.machine.events.add_handler(
'scorereelgroup_' + self.name + '_valid',
self.light,
relight_on_valid=True)
else:
self.machine.events.remove_handler_by_key(
self.unlight_on_resync_key)
def _ball_ending(self, queue=None):
# We need to hook the ball_ending event in case the ball ends while the
# score reel is still catching up.
# only do this if this is the active group
if self.machine.score_reel_controller.active_scorereelgroup != self:
return
if not self.valid:
self.log.debug("Score reel group is not valid. Setting a queue")
self.wait_for_valid_queue = queue
self.wait_for_valid_queue.wait()
else:
self.log.debug("Score reel group is valid. No queue needed.")
class ScoreReel(Device):
"""Represents an individual electro-mechanical score reel in a pinball
machine.
Multiples reels of this class can be grouped together into ScoreReelGroups
which collectively make up a display like "Player 1 Score" or "Player 2
card value", etc.
This device class is used for all types of mechanical number reels in a
machine, including reels that have more than ten numbers and that can move
in multiple directions (such as the credit reel).
"""
config_section = 'score_reels'
collection = 'score_reels'
class_label = 'score_reel'
def __init__(self, machine, name, config, collection=None, validate=True):
super(ScoreReel, self).__init__(machine, name, config, collection,
validate=validate)
self.delay = DelayManager()
self.rollover_reel_advanced = False
# True when a rollover pulse has been ordered
self.value_switches = []
# This is a list with each element corresponding to a value on the
# reel. An entry of None means there's no value switch there. An entry
# of a reference to a switch object (todo or switch name?) means there
# is a switch there.
self.num_values = 0
# The number of values on this wheel. This starts with zero, so a
# wheel with 10 values will have this value set to 9. (This actually
# makes sense since most (all?) score reels also have a zero value.)
self.physical_value = -999
# The physical confirmed value of this reel. This will always be the
# value of whichever switch is active or -999. This differs from
# `self.assumed_value` in that assumed value will make assumptions about
# where the reel is as it pulses through values with no swithces,
# whereas this physical value will always be -999 if there is no switch
# telling it otherwise.
# Note this value will be initialized via self.check_hw_switches()
# below.
self.hw_sync = False
# Specifies whether this reel has verified it's positions via the
# switches since it was last advanced."""
self.ready = True
# Whether this reel is ready to advance. Typically used to make sure
# it's not trying to re-fire a stuck position.
self.assumed_value = -999
self.assumed_value = self.check_hw_switches()
# The assumed value the machine thinks this reel is showing. A value
# of -999 indicates that the value is unknown.
self.next_pulse_time = 0
# The time when this reel next wants to be pulsed. The reel will set
# this on its own (based on its own attribute of how fast pulses can
# happen). If the ScoreReelController is ready to pulse this reel and
# the value is in the past, it will do a pulse. A value of 0 means this
# reel does not currently need to be pulsed.
self.rollover_reel = None
# A reference to the ScoreReel object of the next higher reel in the
# group. This is used so the reel can notify its neighbor that it needs
# to advance too when this reel rolls over.
self.misfires = dict()
# Counts the number of "misfires" this reel has, which is when we
# advanced a reel to a value where we expected a switch to activate but
# didn't receive that activation as expected. This is a dictionary with
# the key equal to the switch position and the value is a tuple with
# the first entry being the number of misfires this attempt, and the
# second value being the number of misfires overall.
self._destination_index = 0
# Holds the index of the destination the reel is trying to advance to.
# todo add some kind of status for broken?
self.log.debug("Configuring score reel with: %s", self.config)
# figure out how many values we have
# Add 1 so range is inclusive of the lower limit
self.num_values = self.config['limit_hi'] - \
self.config['limit_lo'] + 1
self.log.debug("Total reel values: %s", self.num_values)
for value in range(self.num_values):
self.value_switches.append(self.config.get('switch_' + str(value)))
@property
def pulse_ms(self, direction=1):
"""Returns an integer representing the number of milliseconds the coil
will pulse for.
This method is used by the jump and step advances so they know when a
reel's coil is done firing so they can fire the next reel in the group.
Args:
direction (int, optional): Lets you specify which coil you want to
get the time for. Default is 1 (up), but you can also specify -1 (
down).
Returns: Interger of the coil pulse time. If there is no coil for the
direction you specify, returns 0.
"""
if direction == 1:
return self.config['coil_inc'].config['pulse_ms']
elif self.config['coil_dec']:
return self.config['coil_dec'].config['pulse_ms']
else:
return 0
def logical_to_physical(self, value):
"""Converts a logical reel displayed value to what the physical switch
value should be.
For example, if a reel has switches for the 0 and 9 values, then an
input of 0 will return 0 (since that's what the physical value should
be for that logical value). In that case it will return 9 for an input
of 9, but it will return -999 for any input value of 1 through 8 since
there are no switches for those values.
Note this method does not perform any physical or logical check against
the reel's actual position, rather, it's only used to indicate what
hardware switch value should be expected for the display value passed.
Args:
value (int): The value you want to check.
Returns:
The phsyical switch value, which is same as the input value if
there's a switch there, or -999 if not.
"""
if value != -999:
if self.value_switches[value]:
return value
else:
return -999
return -999
def _set_rollover_reel(self, reel):
# Sets this reels' rollover_reel to the object of the next higher
# reel
self.log.debug("Setting rollover reel: %s", reel.name)
self.rollover_reel = reel
def advance(self, direction=None):
"""Performs the coil firing to advance this reel one position (up or
down).
This method also schedules delays to post the following events:
`reel_<name>_pulse_done`: When the coil is done pulsing
`reel_<name>_ready`: When the config['repeat_pulse_time'] time is up
`reel_<name>_hw_value`: When the config['hw_confirm_time'] time is up
Args:
direction (int, optional): If direction is 1, advances the reel
to the next higher position. If direction is -1, advances the
reel down one position (if the reel has a decrement coil). If
direction is not passed, this method will compare the reel's
`_destination_index` to its `assumed_value` and will advance it
in the direction it needs to go if those values do not match.
Returns: If this method is unable to advance the reel (either because
it's not ready, because it's at its maximum value and does not have
rollover capabilities, or because you're trying to advance it in a
direction but it doesn't have a coil for that direction), it will
return `False`. If it's able to pulse the advance coil, it returns
`True`.
"""
self.log.debug("Received command advance Reel in direction: '%s'",
direction)
if not direction:
# A direction wasn't specified, but let's see if this reel wants
# to be in another position and fire it if so
if (self._destination_index != self.assumed_value and
self.config['rollover']):
direction = 1
elif (self._destination_index < self.assumed_value and
self.config['coil_dec']):
direction = -1
else: # no direction specified and everything seems ok
return
self.set_destination_value(direction)
# above line also sets self._destination_index
if self.next_pulse_time > time.time():
# This reel is not ready to pulse again
# Note we don't allow this to be overridden. Figure the
# recycle time is there for a reason and we don't want to
# potentially break an old delicate mechanism
self.log.debug("Received advance request but this reel is not "
"ready")
return False # since we didn't advance...in case anyone cares?
if direction == 1:
# Ensure we're not at the limit of a reel that can't roll over
if not ((self.physical_value == self.config['limit_hi']) and
not self.config['rollover']):
self.log.debug("Ok to advance")
# Since we're firing, assume we're going to make it
self.assumed_value = self._destination_index
self.log.debug("+++Setting assumed value to: %s",
self.assumed_value)
# Reset our statuses (stati?) :)
self.ready = False
self.hw_sync = False
# fire the coil
self.config['coil_inc'].pulse()
# set delay to notify when this reel can be fired again
self.delay.add(name='ready_to_fire',
ms=self.config['repeat_pulse_time'],
callback=self._ready_to_fire)
self.next_pulse_time = (time.time() +
(self.config['repeat_pulse_time'] /
1000.0))
self.log.debug("@@@ New Next pulse ready time: %s",
self.next_pulse_time)
# set delay to check the hw switches
self.delay.add(name='hw_switch_check',
ms=self.config['hw_confirm_time'],
callback=self.check_hw_switches)
return True
else:
self.log.warning("Received command to increment reel, but "
"we're at the max limit and this reel "
"cannot roll over")
return False
# if direction is not 1 we'll assume down, but only if we have
# the ability to decrement this reel
elif 'coil_dec' in self.config:
return False # since we haven't written this yet todo
# todo log else error?
def _pulse_done(self):
# automatically called (via a delay) after the reel fires to post an
# event that the reel's coil is done pulsing
self.machine.events.post('reel_' + self.name + "_pulse_done")
def _ready_to_fire(self):
# automatically called (via a delay) after the reel fires to post an
# event that the reel is ready to fire again
self.ready = True
self.machine.events.post('reel_' + self.name + "_ready")
def check_hw_switches(self, no_event=False):
"""Checks all the value switches for this score reel.
This check only happens if `self.ready` is `True`. If the reel is not
ready, it means another advance request has come in after the initial
one. In that case then the subsequent advance will call this method
again when after that advance is done.
If this method finds an active switch, it sets `self.physical_value` to
that. Otherwise it sets it to -999. It will also update
`self.assumed_value` if it finds an active switch. Otherwise it leaves
that value unchanged.
This method is automatically called (via a delay) after the reel
advances. The delay is based on the config value
`self.config['hw_confirm_time']`.
TODO: What happens if there are multiple active switches? Currently it
will return the highest one. Is that ok?
Args:
no_event: A boolean switch that allows you to suppress the event
posting from this call if you just want to update the values.
Returns: The hardware value of the switch, either the position or -999.
If the reel is not ready, it returns `False`.
"""
# check to make sure the 'hw_confirm_time' time has passed. If not then
# we cannot trust any value we read from the switches
if (self.config['coil_inc'].time_last_changed +
(self.config['hw_confirm_time'] / 1000.0) <= time.time()):
self.log.debug("Checking hw switches to determine reel value")
value = -999
for i in range(len(self.value_switches)):
if self.value_switches[i]: # not all values have a switch
if self.machine.switch_controller.is_active(
self.value_switches[i].name):
value = i
self.log.debug("+++Setting hw value to: %s", value)
self.physical_value = value
self.hw_sync = True
# only change this if we know where we are or can confirm that
# we're not in the right position
if value != -999:
self.assumed_value = value
# if value is -999, but we have a switch for the assumed value,
# then we're in the wrong position because our hw_value should be
# at the assumed value
elif (self.assumed_value != -999 and
self.value_switches[self.assumed_value]):
self.assumed_value = -999
if not no_event:
self.machine.events.post('reel_' + self.name + "_hw_value",
value=value)
return value
else:
return False
def set_destination_value(self, direction=1):
"""Returns the integer value of the destination this reel is moving to.
Args:
direction (int, optional): The direction of the reel movement this
method should get the value for. Default is 1 which means of 'up'.
You can pass -1 the next lower value.
Returns: The value of the destination. If the current
`self.assumed_value` is -999, this method will always return -999
since it doesn't know where the reel is and therefore doesn't know
what the destination value would be.
"""
# We can only know if we have a destination if we know where we are
self.log.debug("@@@ set_destination_value")
self.log.debug("@@@ old destination_index: %s",
self._destination_index)
if self.assumed_value != -999:
if direction == 1:
self._destination_index = self.assumed_value + 1
if self._destination_index > (self.num_values - 1):
self._destination_index = 0
if self._destination_index == 1:
self.rollover_reel_advanced = False
self.log.debug("@@@ new destination_index: %s",
self._destination_index)
return self._destination_index
elif direction == -1:
self._destination_index = self.assumed_value - 1
if self._destination_index < 0:
self._destination_index = (self.num_values - 1)
self.log.debug("@@@ new destination_index: %s",
self._destination_index)
return self._destination_index
else:
self.log.debug("@@@ new destination_index: -999")
self._destination_index = -999
return -999
# The MIT License (MIT)
# Copyright (c) 2013-2015 Brian Madden and Gabe Knuth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
| {
"content_hash": "f47708b3ee47a8edba375faaf5b5c24e",
"timestamp": "",
"source": "github",
"line_count": 1267,
"max_line_length": 83,
"avg_line_length": 42.42620363062352,
"alnum_prop": 0.6001599880939092,
"repo_name": "spierepf/mpf",
"id": "d071bcc72ecb3cb327a8b3df2dc88f9946568bd6",
"size": "53754",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mpf/devices/score_reel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1839"
},
{
"name": "Makefile",
"bytes": "170"
},
{
"name": "Python",
"bytes": "1685472"
},
{
"name": "Shell",
"bytes": "729"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('topics', '0022_auto_20170306_0242'),
]
operations = [
migrations.AddField(
model_name='programmingexerciselanguage',
name='slug',
field=models.SlugField(default=''),
preserve_default=False,
),
]
| {
"content_hash": "9a69269531ab27c08165c3b43a7fcc14",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 53,
"avg_line_length": 22.473684210526315,
"alnum_prop": 0.5971896955503513,
"repo_name": "uccser/cs-unplugged",
"id": "acf88f1882b36425727dcd71150192796178178a",
"size": "500",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "csunplugged/topics/migrations/0023_programmingexerciselanguage_slug.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "7927"
},
{
"name": "HTML",
"bytes": "432891"
},
{
"name": "JavaScript",
"bytes": "104806"
},
{
"name": "Python",
"bytes": "1257568"
},
{
"name": "SCSS",
"bytes": "67560"
},
{
"name": "Shell",
"bytes": "12461"
}
],
"symlink_target": ""
} |
from oslo.config import cfg
import six
from solum.common import exception
from solum.tests import base
class ExceptionTestCase(base.BaseTestCase):
"""Test cases for exception code."""
def test_with_kwargs(self):
exc = exception.ResourceNotFound(name='application', id='green_paint')
self.assertIn('green_paint could not be found.',
six.text_type(exc))
self.assertEqual(exc.code, 404)
def test_with_kwargs_ru(self):
exc = exception.ResourceNotFound(name='application',
id=u'зеленой_краской')
self.assertIn(u'зеленой_краской could not be found',
six.text_type(exc))
self.assertEqual(exc.code, 404)
def test_bad_kwargs_exception(self):
cfg.CONF.set_override('fatal_exception_format_errors', True)
self.assertRaises(KeyError,
exception.ResourceNotFound, a_field='green')
def test_bad_kwargs(self):
cfg.CONF.set_override('fatal_exception_format_errors', False)
exc = exception.ResourceNotFound(a_field='green')
self.assertIn('An unknown exception occurred', six.text_type(exc))
self.assertEqual(exc.code, 404)
def test_resource_exists(self):
exc = exception.ResourceExists(name='test')
self.assertIn("The test resource already exists.",
six.text_type(exc))
self.assertEqual(exc.code, 409)
def test_application_exists(self):
exc = exception.ResourceExists(name='test')
self.assertIn("The test resource already exists.",
six.text_type(exc))
self.assertEqual(exc.code, 409)
def test_not_implemented(self):
exc = exception.NotImplemented()
self.assertIn("The requested operation is not implemented.",
six.text_type(exc))
self.assertEqual(exc.code, 501)
| {
"content_hash": "8aff2a21ebe56ad829d5d310f39c47f6",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 78,
"avg_line_length": 37.86274509803921,
"alnum_prop": 0.6240290005178664,
"repo_name": "jamesyli/solum",
"id": "e7e31d97599667eb51ef14ee0b9f01b7d8dd677b",
"size": "2567",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "solum/tests/common/test_exception_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import mock
from oslo_utils import importutils
import six
from heat.common import exception
from heat.common.i18n import _
from heat.common import template_format
from heat.engine import resource
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
import testtools
from heat_docker.resources import docker_container
from heat_docker.tests import fake_docker_client as fakeclient
docker = importutils.try_import('docker')
template = '''
{
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Test template",
"Parameters": {},
"Resources": {
"Blog": {
"Type": "DockerInc::Docker::Container",
"Properties": {
"image": "samalba/wordpress",
"env": [
"FOO=bar"
]
}
}
}
}
'''
class DockerContainerTest(common.HeatTestCase):
def setUp(self):
super(DockerContainerTest, self).setUp()
for res_name, res_class in docker_container.resource_mapping().items():
resource._register_class(res_name, res_class)
self.addCleanup(self.m.VerifyAll)
def create_container(self, resource_name):
t = template_format.parse(template)
self.stack = utils.parse_stack(t)
resource = docker_container.DockerContainer(
resource_name,
self.stack.t.resource_definitions(self.stack)[resource_name],
self.stack)
self.m.StubOutWithMock(resource, 'get_client')
resource.get_client().MultipleTimes().AndReturn(
fakeclient.FakeDockerClient())
self.assertIsNone(resource.validate())
self.m.ReplayAll()
scheduler.TaskRunner(resource.create)()
self.assertEqual((resource.CREATE, resource.COMPLETE),
resource.state)
return resource
def get_container_state(self, resource):
client = resource.get_client()
return client.inspect_container(resource.resource_id)['State']
def test_resource_create(self):
container = self.create_container('Blog')
self.assertTrue(container.resource_id)
running = self.get_container_state(container)['Running']
self.assertIs(True, running)
client = container.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertIsNone(client.container_create[0]['name'])
def test_create_with_name(self):
t = template_format.parse(template)
self.stack = utils.parse_stack(t)
definition = self.stack.t.resource_definitions(self.stack)['Blog']
definition['Properties']['name'] = 'super-blog'
resource = docker_container.DockerContainer(
'Blog', definition, self.stack)
self.m.StubOutWithMock(resource, 'get_client')
resource.get_client().MultipleTimes().AndReturn(
fakeclient.FakeDockerClient())
self.assertIsNone(resource.validate())
self.m.ReplayAll()
scheduler.TaskRunner(resource.create)()
self.assertEqual((resource.CREATE, resource.COMPLETE),
resource.state)
client = resource.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertEqual('super-blog', client.container_create[0]['name'])
@mock.patch.object(docker_container.DockerContainer, 'get_client')
def test_create_failed(self, test_client):
mock_client = mock.Mock()
mock_client.inspect_container.return_value = {
"State": {
"ExitCode": -1
}
}
mock_client.logs.return_value = "Container startup failed"
test_client.return_value = mock_client
mock_stack = mock.Mock()
mock_stack.has_cache_data.return_value = False
mock_stack.db_resource_get.return_value = None
res_def = mock.Mock(spec=rsrc_defn.ResourceDefinition)
docker_res = docker_container.DockerContainer("test", res_def,
mock_stack)
exc = self.assertRaises(exception.ResourceInError,
docker_res.check_create_complete,
'foo')
self.assertIn("Container startup failed", six.text_type(exc))
def test_start_with_bindings_and_links(self):
t = template_format.parse(template)
self.stack = utils.parse_stack(t)
definition = self.stack.t.resource_definitions(self.stack)['Blog']
definition['Properties']['port_bindings'] = {
'80/tcp': [{'HostPort': '80'}]}
definition['Properties']['links'] = {'db': 'mysql'}
resource = docker_container.DockerContainer(
'Blog', definition, self.stack)
self.m.StubOutWithMock(resource, 'get_client')
resource.get_client().MultipleTimes().AndReturn(
fakeclient.FakeDockerClient())
self.assertIsNone(resource.validate())
self.m.ReplayAll()
scheduler.TaskRunner(resource.create)()
self.assertEqual((resource.CREATE, resource.COMPLETE),
resource.state)
client = resource.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertEqual({'db': 'mysql'}, client.container_start[0]['links'])
self.assertEqual(
{'80/tcp': [{'HostPort': '80'}]},
client.container_start[0]['port_bindings'])
def test_resource_attributes(self):
container = self.create_container('Blog')
# Test network info attributes
self.assertEqual('172.17.42.1', container.FnGetAtt('network_gateway'))
self.assertEqual('172.17.0.3', container.FnGetAtt('network_ip'))
self.assertEqual('1080', container.FnGetAtt('network_tcp_ports'))
self.assertEqual('', container.FnGetAtt('network_udp_ports'))
# Test logs attributes
self.assertEqual('---logs_begin---', container.FnGetAtt('logs_head'))
self.assertEqual('---logs_end---', container.FnGetAtt('logs_tail'))
# Test a non existing attribute
self.assertRaises(exception.InvalidTemplateAttribute,
container.FnGetAtt, 'invalid_attribute')
def test_resource_delete(self):
container = self.create_container('Blog')
scheduler.TaskRunner(container.delete)()
self.assertEqual((container.DELETE, container.COMPLETE),
container.state)
running = self.get_container_state(container)['Running']
self.assertIs(False, running)
def test_resource_already_deleted(self):
container = self.create_container('Blog')
scheduler.TaskRunner(container.delete)()
running = self.get_container_state(container)['Running']
self.assertIs(False, running)
scheduler.TaskRunner(container.delete)()
self.m.VerifyAll()
@testtools.skipIf(docker is None, 'docker-py not available')
def test_resource_delete_exception(self):
response = mock.MagicMock()
response.status_code = 404
response.content = 'some content'
container = self.create_container('Blog')
self.m.StubOutWithMock(container.get_client(), 'kill')
container.get_client().kill(container.resource_id).AndRaise(
docker.errors.APIError('Not found', response))
self.m.StubOutWithMock(container, '_get_container_status')
container._get_container_status(container.resource_id).AndRaise(
docker.errors.APIError('Not found', response))
self.m.ReplayAll()
scheduler.TaskRunner(container.delete)()
self.m.VerifyAll()
def test_resource_suspend_resume(self):
container = self.create_container('Blog')
# Test suspend
scheduler.TaskRunner(container.suspend)()
self.assertEqual((container.SUSPEND, container.COMPLETE),
container.state)
running = self.get_container_state(container)['Running']
self.assertIs(False, running)
# Test resume
scheduler.TaskRunner(container.resume)()
self.assertEqual((container.RESUME, container.COMPLETE),
container.state)
running = self.get_container_state(container)['Running']
self.assertIs(True, running)
def test_start_with_restart_policy_no(self):
t = template_format.parse(template)
self.stack = utils.parse_stack(t)
definition = self.stack.t.resource_definitions(self.stack)['Blog']
definition['Properties']['restart_policy'] = {
'Name': 'no', 'MaximumRetryCount': 0}
resource = docker_container.DockerContainer(
'Blog', definition, self.stack)
get_client_mock = self.patchobject(resource, 'get_client')
get_client_mock.return_value = fakeclient.FakeDockerClient()
self.assertIsNone(resource.validate())
scheduler.TaskRunner(resource.create)()
self.assertEqual((resource.CREATE, resource.COMPLETE),
resource.state)
client = resource.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertEqual({'Name': 'no', 'MaximumRetryCount': 0},
client.container_start[0]['restart_policy'])
def test_start_with_restart_policy_on_failure(self):
t = template_format.parse(template)
self.stack = utils.parse_stack(t)
definition = self.stack.t.resource_definitions(self.stack)['Blog']
definition['Properties']['restart_policy'] = {
'Name': 'on-failure', 'MaximumRetryCount': 10}
resource = docker_container.DockerContainer(
'Blog', definition, self.stack)
get_client_mock = self.patchobject(resource, 'get_client')
get_client_mock.return_value = fakeclient.FakeDockerClient()
self.assertIsNone(resource.validate())
scheduler.TaskRunner(resource.create)()
self.assertEqual((resource.CREATE, resource.COMPLETE),
resource.state)
client = resource.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertEqual({'Name': 'on-failure', 'MaximumRetryCount': 10},
client.container_start[0]['restart_policy'])
def test_start_with_restart_policy_always(self):
t = template_format.parse(template)
self.stack = utils.parse_stack(t)
definition = self.stack.t.resource_definitions(self.stack)['Blog']
definition['Properties']['restart_policy'] = {
'Name': 'always', 'MaximumRetryCount': 0}
resource = docker_container.DockerContainer(
'Blog', definition, self.stack)
get_client_mock = self.patchobject(resource, 'get_client')
get_client_mock.return_value = fakeclient.FakeDockerClient()
self.assertIsNone(resource.validate())
scheduler.TaskRunner(resource.create)()
self.assertEqual((resource.CREATE, resource.COMPLETE),
resource.state)
client = resource.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertEqual({'Name': 'always', 'MaximumRetryCount': 0},
client.container_start[0]['restart_policy'])
def test_start_with_caps(self):
t = template_format.parse(template)
self.stack = utils.parse_stack(t)
definition = self.stack.t.resource_definitions(self.stack)['Blog']
definition['Properties']['cap_add'] = ['NET_ADMIN']
definition['Properties']['cap_drop'] = ['MKNOD']
resource = docker_container.DockerContainer(
'Blog', definition, self.stack)
get_client_mock = self.patchobject(resource, 'get_client')
get_client_mock.return_value = fakeclient.FakeDockerClient()
self.assertIsNone(resource.validate())
scheduler.TaskRunner(resource.create)()
self.assertEqual((resource.CREATE, resource.COMPLETE),
resource.state)
client = resource.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertEqual(['NET_ADMIN'], client.container_start[0]['cap_add'])
self.assertEqual(['MKNOD'], client.container_start[0]['cap_drop'])
def test_start_with_read_only(self):
t = template_format.parse(template)
self.stack = utils.parse_stack(t)
definition = self.stack.t.resource_definitions(self.stack)['Blog']
definition['Properties']['read_only'] = True
resource = docker_container.DockerContainer(
'Blog', definition, self.stack)
get_client_mock = self.patchobject(resource, 'get_client')
get_client_mock.return_value = fakeclient.FakeDockerClient()
get_client_mock.return_value.set_api_version('1.17')
self.assertIsNone(resource.validate())
scheduler.TaskRunner(resource.create)()
self.assertEqual((resource.CREATE, resource.COMPLETE),
resource.state)
client = resource.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertIs(True, client.container_start[0]['read_only'])
def arg_for_low_api_version(self, arg, value, low_version):
t = template_format.parse(template)
self.stack = utils.parse_stack(t)
definition = self.stack.t.resource_definitions(self.stack)['Blog']
definition['Properties'][arg] = value
my_resource = docker_container.DockerContainer(
'Blog', definition, self.stack)
get_client_mock = self.patchobject(my_resource, 'get_client')
get_client_mock.return_value = fakeclient.FakeDockerClient()
get_client_mock.return_value.set_api_version(low_version)
msg = self.assertRaises(docker_container.InvalidArgForVersion,
my_resource.validate)
min_version = docker_container.MIN_API_VERSION_MAP[arg]
args = dict(arg=arg, min_version=min_version)
expected = _('"%(arg)s" is not supported for API version '
'< "%(min_version)s"') % args
self.assertEqual(expected, six.text_type(msg))
def test_start_with_read_only_for_low_api_version(self):
self.arg_for_low_api_version('read_only', True, '1.16')
def test_compare_version(self):
self.assertEqual(docker_container.compare_version('1.17', '1.17'), 0)
self.assertEqual(docker_container.compare_version('1.17', '1.16'), -1)
self.assertEqual(docker_container.compare_version('1.17', '1.18'), 1)
def test_create_with_cpu_shares(self):
t = template_format.parse(template)
self.stack = utils.parse_stack(t)
definition = self.stack.t.resource_definitions(self.stack)['Blog']
definition['Properties']['cpu_shares'] = 512
my_resource = docker_container.DockerContainer(
'Blog', definition, self.stack)
get_client_mock = self.patchobject(my_resource, 'get_client')
get_client_mock.return_value = fakeclient.FakeDockerClient()
self.assertIsNone(my_resource.validate())
scheduler.TaskRunner(my_resource.create)()
self.assertEqual((my_resource.CREATE, my_resource.COMPLETE),
my_resource.state)
client = my_resource.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertEqual(512, client.container_create[0]['cpu_shares'])
def test_create_with_cpu_shares_for_low_api_version(self):
self.arg_for_low_api_version('cpu_shares', 512, '1.7')
def test_start_with_mapping_devices(self):
t = template_format.parse(template)
self.stack = utils.parse_stack(t)
definition = self.stack.t.resource_definitions(self.stack)['Blog']
definition['Properties']['devices'] = (
[{'path_on_host': '/dev/sda',
'path_in_container': '/dev/xvdc',
'permissions': 'r'},
{'path_on_host': '/dev/mapper/a_bc-d',
'path_in_container': '/dev/xvdd',
'permissions': 'rw'}])
my_resource = docker_container.DockerContainer(
'Blog', definition, self.stack)
get_client_mock = self.patchobject(my_resource, 'get_client')
get_client_mock.return_value = fakeclient.FakeDockerClient()
self.assertIsNone(my_resource.validate())
scheduler.TaskRunner(my_resource.create)()
self.assertEqual((my_resource.CREATE, my_resource.COMPLETE),
my_resource.state)
client = my_resource.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertEqual(['/dev/sda:/dev/xvdc:r',
'/dev/mapper/a_bc-d:/dev/xvdd:rw'],
client.container_start[0]['devices'])
def test_start_with_mapping_devices_also_with_privileged(self):
t = template_format.parse(template)
self.stack = utils.parse_stack(t)
definition = self.stack.t.resource_definitions(self.stack)['Blog']
definition['Properties']['devices'] = (
[{'path_on_host': '/dev/sdb',
'path_in_container': '/dev/xvdc',
'permissions': 'r'}])
definition['Properties']['privileged'] = True
my_resource = docker_container.DockerContainer(
'Blog', definition, self.stack)
get_client_mock = self.patchobject(my_resource, 'get_client')
get_client_mock.return_value = fakeclient.FakeDockerClient()
self.assertIsNone(my_resource.validate())
scheduler.TaskRunner(my_resource.create)()
self.assertEqual((my_resource.CREATE, my_resource.COMPLETE),
my_resource.state)
client = my_resource.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertNotIn('devices', client.container_start[0])
def test_start_with_mapping_devices_for_low_api_version(self):
value = ([{'path_on_host': '/dev/sda',
'path_in_container': '/dev/xvdc',
'permissions': 'rwm'}])
self.arg_for_low_api_version('devices', value, '1.13')
def test_start_with_mapping_devices_not_set_path_in_container(self):
t = template_format.parse(template)
self.stack = utils.parse_stack(t)
definition = self.stack.t.resource_definitions(self.stack)['Blog']
definition['Properties']['devices'] = (
[{'path_on_host': '/dev/sda',
'permissions': 'rwm'}])
my_resource = docker_container.DockerContainer(
'Blog', definition, self.stack)
get_client_mock = self.patchobject(my_resource, 'get_client')
get_client_mock.return_value = fakeclient.FakeDockerClient()
self.assertIsNone(my_resource.validate())
scheduler.TaskRunner(my_resource.create)()
self.assertEqual((my_resource.CREATE, my_resource.COMPLETE),
my_resource.state)
client = my_resource.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertEqual(['/dev/sda:/dev/sda:rwm'],
client.container_start[0]['devices'])
def test_create_with_cpu_set(self):
t = template_format.parse(template)
self.stack = utils.parse_stack(t)
definition = self.stack.t.resource_definitions(self.stack)['Blog']
definition['Properties']['cpu_set'] = '0-8,16-24,28'
my_resource = docker_container.DockerContainer(
'Blog', definition, self.stack)
get_client_mock = self.patchobject(my_resource, 'get_client')
get_client_mock.return_value = fakeclient.FakeDockerClient()
self.assertIsNone(my_resource.validate())
scheduler.TaskRunner(my_resource.create)()
self.assertEqual((my_resource.CREATE, my_resource.COMPLETE),
my_resource.state)
client = my_resource.get_client()
self.assertEqual(['samalba/wordpress'], client.pulled_images)
self.assertEqual('0-8,16-24,28',
client.container_create[0]['cpuset'])
def test_create_with_cpu_set_for_low_api_version(self):
self.arg_for_low_api_version('cpu_set', '0-8,^2', '1.11')
| {
"content_hash": "b51e53fb2753b3d836281d9874ff1633",
"timestamp": "",
"source": "github",
"line_count": 441,
"max_line_length": 79,
"avg_line_length": 46.48299319727891,
"alnum_prop": 0.6263720181472267,
"repo_name": "gonzolino/heat",
"id": "5b3bd9ee7e7d5794e115e563c57cebb65f1cf6fe",
"size": "21133",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "contrib/heat_docker/heat_docker/tests/test_docker_container.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7214144"
},
{
"name": "Shell",
"bytes": "32170"
}
],
"symlink_target": ""
} |
from __future__ import print_function
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
ROM_SIZE = 2 * 1024 * 1024
src, dest = sys.argv[1:]
print("Padding %s with 0xFF to create a ROM file %s" % (src, dest))
data = open(src, "rb").read()
print(len(data), ROM_SIZE - len(data))
data += "\xFF" * (ROM_SIZE - len(data))
print(len(data))
open(dest, "wb").write(data)
| {
"content_hash": "e0ed50c52d7c2804b2851e3164b140d7",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 74,
"avg_line_length": 29.225806451612904,
"alnum_prop": 0.7141280353200883,
"repo_name": "google/myelin-acorn-electron-hardware",
"id": "87e74e90aef954236ae9389fe4c28d6e99847afa",
"size": "906",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "a3000_rom_emulator/os_switcher_bootloader/pad_to_rom.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "35755"
},
{
"name": "Batchfile",
"bytes": "2930"
},
{
"name": "C",
"bytes": "10872"
},
{
"name": "C++",
"bytes": "182764"
},
{
"name": "HTML",
"bytes": "7245"
},
{
"name": "Java",
"bytes": "1186"
},
{
"name": "Makefile",
"bytes": "39894"
},
{
"name": "Processing",
"bytes": "4821"
},
{
"name": "Python",
"bytes": "399952"
},
{
"name": "Shell",
"bytes": "2572"
},
{
"name": "Tcl",
"bytes": "512"
},
{
"name": "TeX",
"bytes": "13986"
},
{
"name": "VHDL",
"bytes": "166794"
},
{
"name": "Verilog",
"bytes": "182557"
}
],
"symlink_target": ""
} |
"""
sentry.utils.data_filters.py
~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import fnmatch
import ipaddress
import six
from django.utils.encoding import force_text
from sentry import tsdb
class FilterStatKeys(object):
IP_ADDRESS = 'ip-address'
RELEASE_VERSION = 'release-version'
ERROR_MESSAGE = 'error-message'
BROWSER_EXTENSION = 'browser-extensions'
LEGACY_BROWSER = 'legacy-browsers'
LOCALHOST = 'localhost'
WEB_CRAWLER = 'web-crawlers'
INVALID_CSP = 'invalid-csp'
CORS = 'cors'
DISCARDED_HASH = 'discarded-hash'
FILTER_STAT_KEYS_TO_VALUES = {
FilterStatKeys.IP_ADDRESS: tsdb.models.project_total_received_ip_address,
FilterStatKeys.RELEASE_VERSION: tsdb.models.project_total_received_release_version,
FilterStatKeys.ERROR_MESSAGE: tsdb.models.project_total_received_error_message,
FilterStatKeys.BROWSER_EXTENSION: tsdb.models.project_total_received_browser_extensions,
FilterStatKeys.LEGACY_BROWSER: tsdb.models.project_total_received_legacy_browsers,
FilterStatKeys.LOCALHOST: tsdb.models.project_total_received_localhost,
FilterStatKeys.WEB_CRAWLER: tsdb.models.project_total_received_web_crawlers,
FilterStatKeys.INVALID_CSP: tsdb.models.project_total_received_invalid_csp,
FilterStatKeys.CORS: tsdb.models.project_total_received_cors,
FilterStatKeys.DISCARDED_HASH: tsdb.models.project_total_received_discarded,
}
class FilterTypes(object):
ERROR_MESSAGES = 'error_messages'
RELEASES = 'releases'
def is_valid_ip(project, ip_address):
"""
Verify that an IP address is not being blacklisted
for the given project.
"""
blacklist = project.get_option('sentry:blacklisted_ips')
if not blacklist:
return True
for addr in blacklist:
# We want to error fast if it's an exact match
if ip_address == addr:
return False
# Check to make sure it's actually a range before
try:
if '/' in addr and (
ipaddress.ip_address(six.text_type(ip_address)) in ipaddress.ip_network(
six.text_type(addr), strict=False
)
):
return False
except ValueError:
# Ignore invalid values here
pass
return True
def is_valid_release(project, release):
"""
Verify that a release is not being filtered
for the given project.
"""
invalid_versions = project.get_option('sentry:{}'.format(FilterTypes.RELEASES))
if not invalid_versions:
return True
release = force_text(release).lower()
for version in invalid_versions:
if fnmatch.fnmatch(release, version.lower()):
return False
return True
def is_valid_error_message(project, message):
"""
Verify that an error message is not being filtered
for the given project.
"""
filtered_errors = project.get_option('sentry:{}'.format(FilterTypes.ERROR_MESSAGES))
if not filtered_errors:
return True
message = force_text(message).lower()
for error in filtered_errors:
if fnmatch.fnmatch(message, error.lower()):
return False
return True
| {
"content_hash": "92cba4bb4489c72b94e8ac8d47af6e13",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 92,
"avg_line_length": 29.758928571428573,
"alnum_prop": 0.675967596759676,
"repo_name": "gencer/sentry",
"id": "2b675273597a7a1263d43a963bddbccc6c38f333",
"size": "3333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/utils/data_filters.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "318167"
},
{
"name": "HTML",
"bytes": "281885"
},
{
"name": "JavaScript",
"bytes": "2342569"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "8393"
},
{
"name": "Python",
"bytes": "28161647"
},
{
"name": "Ruby",
"bytes": "4233"
},
{
"name": "Shell",
"bytes": "2149"
}
],
"symlink_target": ""
} |
"""
A linked list is said to contain a cycle if any node is visited more than once while traversing the list.
Complete the function provided for you in your editor. It has one parameter: a pointer to a Node object named that points to the head of a linked list. Your function must return a boolean denoting whether or not there is a cycle in the list. If there is a cycle, return true; otherwise, return false.
Note: If the list is empty, will be null.
Input Format
Our hidden code checker passes the appropriate argument to your function. You are not responsible for reading any input from stdin.
Constraints
Output Format
If the list contains a cycle, your function must return true. If the list does not contain a cycle, it must return false. The binary integer corresponding to the boolean value returned by your function is printed to stdout by our hidden code checker.
Sample Input
The following linked lists are passed as arguments to your function:
Sample Inputs
Sample Output
0
1
Explanation
The first list has no cycle, so we return false and the hidden code checker prints to stdout.
The second list has a cycle, so we return true and the hidden code checker prints to stdout.
"""
#!/bin/python3
import math
import os
import random
import re
import sys
class SinglyLinkedListNode:
def __init__(self, node_data):
self.data = node_data
self.next = None
class SinglyLinkedList:
def __init__(self):
self.head = None
self.tail = None
def insert_node(self, node_data):
node = SinglyLinkedListNode(node_data)
if not self.head:
self.head = node
else:
self.tail.next = node
self.tail = node
def print_singly_linked_list(node, sep, fptr):
while node:
fptr.write(str(node.data))
node = node.next
if node:
fptr.write(sep)
# Complete the has_cycle function below.
#
# For your reference:
#
# SinglyLinkedListNode:
# int data
# SinglyLinkedListNode next
#
#
def has_cycle(head) -> bool:
if (head == None):
return False
else:
slow = head
fast = head.next
while (slow != fast) :
if (fast == None or fast.next == None):
return False
else:
slow = slow.next;
fast = fast.next.next;
return True
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
tests = int(input())
for tests_itr in range(tests):
index = int(input())
llist_count = int(input())
llist = SinglyLinkedList()
for _ in range(llist_count):
llist_item = int(input())
llist.insert_node(llist_item)
extra = SinglyLinkedListNode(-1);
temp = llist.head;
for i in range(llist_count):
if i == index:
extra = temp
if i != llist_count-1:
temp = temp.next
temp.next = extra
result = has_cycle(llist.head)
fptr.write(str(int(result)) + '\n')
fptr.close() | {
"content_hash": "5df06a695946d738c6a2698c5f6a14f6",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 301,
"avg_line_length": 25.05426356589147,
"alnum_prop": 0.6036509900990099,
"repo_name": "lemming52/white_pawn",
"id": "369160fe1e7c8d308c7cf0c7935eab5e8733570c",
"size": "3232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hackerrank/listCycle/solution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3728"
},
{
"name": "Go",
"bytes": "438183"
},
{
"name": "TeX",
"bytes": "79678"
}
],
"symlink_target": ""
} |
"""
couchbasekit.fields
~~~~~~~~~~~~~~~~~~~
:website: http://github.com/kirpit/couchbasekit
:copyright: Copyright 2012, Roy Enjoy <kirpit *at* gmail.com>, see AUTHORS.txt.
:license: MIT, see LICENSE.txt for details.
* :class:`couchbasekit.fields.CustomField`
* :class:`couchbasekit.fields.ChoiceField`
* :class:`couchbasekit.fields.EmailField`
* :class:`couchbasekit.fields.PasswordField`
"""
from abc import ABCMeta
class CustomField(object):
"""The abstract custom field to be extended by all other field classes.
.. note::
You can also create your own custom field types by implementing this
class. All you have to do is to assign your final (that is calculated
and ready to be saved) value to the :attr:`value` property. Please
note that it should also accept unicode raw values, which are fetched
and returned from couchbase server. See :class:`PasswordField` source
code as an example.
Please contribute back if you create a generic and useful custom field.
"""
__metaclass__ = ABCMeta
_value = None
def __init__(self):
raise NotImplementedError()
def __repr__(self):
return repr(self.value)
def __eq__(self, other):
if type(self) is type(other) and self.value==other.value:
return True
return False
@property
def value(self):
"""Property to be used when saving a custom field into
:class:`couchbasekit.document.Document` instance.
:returns: The value to be saved for the field within
:class:`couchbasekit.document.Document` instances.
:rtype: unicode
"""
if not isinstance(self._value, basestring):
raise ValueError("%s's 'value' is not set." % type(self).__name__)
return self._value
@value.setter
def value(self, value):
"""Propery setter that should be used to assign final (calculated)
value.
"""
self._value = value
class ChoiceField(CustomField):
"""The custom field to be used for multi choice options such as gender,
static category list etc. This class can't be used directly that has to be
extended by your choice list class. Thankfully, it's just easy::
class Gender(ChoiceField):
CHOICES = {
'M': 'Male',
'F': 'Female',
}
and all you have to do is to pass the current value to create your choice
object:
>>> choice = Gender('F')
>>> choice.value
'F'
>>> choice.text
'Female'
:param choice: The choice value.
:type choice: basestring
"""
__metaclass__ = ABCMeta
CHOICES = {}
def __eq__(self, other):
if super(ChoiceField, self).__eq__(other) and \
self.CHOICES==other.CHOICES:
return True
return False
def __init__(self, choice):
if not isinstance(self.CHOICES, dict) or not len(self.CHOICES):
raise AttributeError("ChoiceFields must have dictionary 'CHOICES' "
"attribute and cannot be empty.")
if choice not in self.CHOICES:
raise ValueError("Default choice for %s must be "
"within the 'CHOICES' attribute."
% type(self).__name__)
self.value = choice
@property
def text(self):
"""Returns the text of the current choice, object property.
:rtype: unicode
"""
return self.CHOICES.get(self.value)
def iteritems(self):
return self.CHOICES.iteritems()
class EmailField(CustomField):
"""The custom field to be used for email addresses and intended to validate
them as well.
.. warning::
Email address validation is NOT implemented yet. A best practice
validation that doesn't depend on any other package is much appreciated.
:param email: Email address to be saved.
:type email: basestring
"""
def __init__(self, email):
if not self.is_valid(email):
raise ValueError("Email address is invalid.")
self.value = email
@staticmethod
def is_valid(email):
"""Email address validation method.
:param email: Email address to be saved.
:type email: basestring
:returns: Always True, as it's not implemented yet.
:rtype: bool
"""
# TODO: validate email address
RuntimeWarning('EmailField.is_valid() method is not implemented yet.')
return True
class PasswordField(CustomField):
"""The custom field to be used for password types.
It encrypts the raw passwords on-the-fly and depends on
`py-bcrypt` library for such encryption.
:param password: Raw or encrypted password value.
:type password: unicode
:raises: :exc:`ImportError` if `py-bcrypt` was not found.
"""
LOG_ROUNDS = 12
def __init__(self, password):
if not isinstance(password, basestring):
raise ValueError("Password must be a string or unicode.")
# do the encryption if raw password provided
if not password.startswith(('$2a$', '$2y$')):
bcrypt = self.get_bcrypt()
password = bcrypt.hashpw(password, bcrypt.gensalt(self.LOG_ROUNDS))
self.value = password
@staticmethod
def get_bcrypt():
"""Returns the `py-bcrypt` library for internal usage.
:returns: `py-bcrypt` package.
:raises: :exc:`ImportError` if `py-bcrypt` was not found.
"""
try: import bcrypt
except ImportError:
raise ImportError("PasswordField requires 'py-bcrypt' "
"library to hash the passwords.")
else: return bcrypt
def check_password(self, raw_password):
"""Validates the given raw password against the intance's encrypted one.
:param raw_password: Raw password to be checked against.
:type raw_password: unicode
:returns: True if comparison was successful, False otherwise.
:rtype: bool
:raises: :exc:`ImportError` if `py-bcrypt` was not found.
"""
bcrypt = self.get_bcrypt()
return bcrypt.hashpw(raw_password, self.value)==self.value | {
"content_hash": "cd18ef724decfbbd4dea764a01ccb5aa",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 80,
"avg_line_length": 32.963157894736845,
"alnum_prop": 0.617276065783171,
"repo_name": "rbin/Django-Unchained",
"id": "bc5a8f88041137bd78670c790de9914d7bcace3a",
"size": "6286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django-couchbase-engine/fields.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "1318"
},
{
"name": "Python",
"bytes": "50266"
}
],
"symlink_target": ""
} |
import os, sys
from identifiers import Identifiers
# add python module logger to path
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'logger'))
from logger import Logger
# add python module device files to path
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'device_files'))
from device_identifier import DeviceIdentifier
class Property:
""" Property
Encapsulates a device property.
This manages merging and accessing of properties.
"""
def __init__(self, id, name, value, logger=None):
if logger == None:
self.log = Logger()
else:
self.log = logger
self.name = name
self.values = [PropertyValue(id, value, self.log)]
def addValue(self, other):
if isinstance(other.value, list):
for prop in self.values:
# create intersection of both lists
intersection_value = [val for val in other.value if val in prop.value]
# what are the differences in both lists?
self_diff = [val for val in prop.value if val not in intersection_value]
other_diff = [val for val in other.value if val not in intersection_value]
# if there is an intersection, we can add the other.ids here
if len(intersection_value) > 0:
# if this value has more items than the intersection
if len(self_diff) > 0:
# set it to only the intersecting items
prop.value = intersection_value
# and add a new PropertyValue with only the differences
self.values.append(PropertyValue(prop.ids, self_diff, self.log))
# add the other.ids to this value
prop.ids.extend(other.ids)
# continue looking with the differences of other
other.value = other_diff
# order is important
prop.value.sort()
# no more values to add, we can stop looking now
if len(other.value) == 0:
return
else:
for prop in self.values:
if (prop.value == other.value):
prop.ids.extend(other.ids)
return
# apparently this value does not exist yet, so add it
self.values.append(other)
def getMergedProperty(self, other):
assert isinstance(other, Property)
assert other.name == self.name
for value in other.values:
self.addValue(value)
self.values.sort(key=lambda k : k.value)
return self
def getValues(self):
value_list = []
for value in self.values:
value_list.append(value.value)
return value_list
def __repr__(self):
return self.__str__()
def __str__(self):
return ("Property(name='%s', values=[\n%s ])" % (self.name, \
",\n".join([str(value) for value in self.values]))) \
.replace('\n', '\n\t')
class PropertyValue:
""" PropertyValue
Encapsulates a device property value with ids.
"""
def __init__(self, id, value, logger=None):
if logger == None:
self.log = Logger()
else:
self.log = logger
if isinstance(id, list):
self.ids = list(id)
else:
self.ids = Identifiers(id, self.log)
self.value = value
if isinstance(self.value, list):
self.value.sort()
@property
def id(self):
return self.ids.intersection
def __repr__(self):
return self.__str__()
def __str__(self):
return ("PropertyValue(value='%s',\nids= %s )" % (self.value, self.ids)).replace('\n', '\n\t')
| {
"content_hash": "d2d71939232b6f329b9693355a659b7e",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 96,
"avg_line_length": 27.059322033898304,
"alnum_prop": 0.6617601002192296,
"repo_name": "chrism333/xpcc",
"id": "ddcf99f15c32f7b9d82c9c7de3684076080245a2",
"size": "3524",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tools/device_file_generator/property.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "30882959"
},
{
"name": "C++",
"bytes": "3321062"
},
{
"name": "Gnuplot",
"bytes": "823"
},
{
"name": "Python",
"bytes": "175242"
},
{
"name": "Shell",
"bytes": "51"
}
],
"symlink_target": ""
} |
__author__ = 'darshanhegde'
"""
Reads in all the chunks of paragraph vectors generated and puts them in a single
numpy aray. Finds the nearest neighbour sentences to given sentences.
"""
import os
import sys
import numpy as np
import pickle
def pickle_load(in_file_path):
in_file = open(in_file_path, "rU")
data = pickle.load(in_file)
in_file.close()
return data
def pickle_dump(data, out_file_path):
out_file = open(out_file_path, "w")
pickle.dump(data, out_file)
out_file.close()
class SentIter:
def __init__(self, review_sent_file_path):
self.review_sent_file_path = review_sent_file_path
def __iter__(self):
review_sent_file = open(self.review_sent_file_path, "rU")
for sentence in review_sent_file:
yield sentence.strip().split()
review_sent_file.close()
def collect_model_files(model_folder_path, model_file_name, model_size, sentence_file_path, out_file_path):
#count number of sentences
SI = SentIter(sentence_file_path)
num_sents = 0
for sentence in SI:
num_sents += 1
model = np.zeros((num_sents, model_size), dtype=np.float32)
all_files = os.listdir(model_folder_path)
model_files = filter(lambda f_name: f_name.find(model_file_name) != -1, all_files)
model_files_aug = [(model_file, int(model_file.split("_")[-1])) for model_file in model_files]
model_files_aug = sorted(model_files_aug, key=lambda x: x[-1])
model_files = [model_file for model_file, idx in model_files_aug]
for m_idx, model_file in enumerate(model_files):
model_chunk = pickle_load(os.path.join(model_folder_path, model_file))
if model_chunk.shape[0] == 1024:
model[1024*m_idx:1024*(m_idx+1), :] = model_chunk
else:
model[1024*m_idx:, :] = model_chunk
pickle_dump(model, out_file_path)
class ParagraphNearestNeaighbour:
def __init__(self, model_file_path, sentence_file_path):
self.model = pickle_load(model_file_path)
self.SI = SentIter(sentence_file_path)
self.sentences = []
for sentence in self.SI:
self.sentences.append(sentence)
def find_nearest_neighbours(self, test_sentece_file, out_file_path, topn):
norm = np.linalg.norm(self.model, axis=1)
self.model = self.model / norm[:, np.newaxis]
test_senteces = open(test_sentece_file, "rU")
out_file = open(out_file_path, "w")
for test_sentence in test_senteces:
sent_idx, sentence = test_sentence.strip().split("\t")
print " Given sentence: ", sentence
out_file.write(" Given: " + sentence + "\n")
sent_idx = int(sent_idx)-1
sent_rep = self.model[sent_idx]
dists = np.dot(self.model, sent_rep)
best = np.argsort(dists)[::-1][:topn+1]
results = [(self.sentences[sim], float(dists[sim])) for sim in best]
for sentence, score in results[1:]:
print " ".join(sentence), score
out_file.write(" ".join(sentence) + str(score) + "\n")
out_file.write("\n\n")
print "\n"
test_senteces.close()
out_file.close()
def main():
collect_model_files("../model_sub/", "paragraph_model", 96, "../data/paragraph_data.txt", "../model/paragraph_model.pkl")
PVNN = ParagraphNearestNeaighbour("../model/paragraph_model.pkl", "../data/paragraph_data.txt")
PVNN.find_nearest_neighbours("../data/nn_sentences.txt", "../results/nn_sentences_result.txt", 5)
if __name__ == '__main__':
main() | {
"content_hash": "a35cca0709abf21bd1296ab9cb9879f0",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 125,
"avg_line_length": 36.58163265306123,
"alnum_prop": 0.6178521617852162,
"repo_name": "darshanhegde/ParagraphVec",
"id": "ef254600bb4fed318ead4c85411a8fe3af4f9324",
"size": "3585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/find_nearest_neighbour.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15451"
}
],
"symlink_target": ""
} |
import base64
import copy
import datetime
import functools
import os
import string
import tempfile
import fixtures
import iso8601
import mock
from oslo.config import cfg
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.api.ec2 import inst_state
from nova.api.metadata import password
from nova.compute import api as compute_api
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova.image import s3
from nova.network import api as network_api
from nova.network import base_api as base_network_api
from nova.network import model
from nova.network import neutronv2
from nova import objects
from nova.objects import base as obj_base
from nova.openstack.common import log as logging
from nova.openstack.common import policy as common_policy
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import policy
from nova import test
from nova.tests.api.openstack.compute.contrib import (
test_neutron_security_groups as test_neutron)
from nova.tests import cast_as_call
from nova.tests import fake_block_device
from nova.tests import fake_network
from nova.tests import fake_notifier
from nova.tests import fake_utils
from nova.tests.image import fake
from nova.tests import matchers
from nova import utils
from nova.virt import fake as fake_virt
from nova import volume
CONF = cfg.CONF
CONF.import_opt('compute_driver', 'nova.virt.driver')
CONF.import_opt('default_flavor', 'nova.compute.flavors')
CONF.import_opt('use_ipv6', 'nova.netconf')
LOG = logging.getLogger(__name__)
HOST = "testhost"
def get_fake_cache(get_floating):
def _ip(ip, fixed=True, floats=None):
ip_dict = {'address': ip, 'type': 'fixed'}
if not fixed:
ip_dict['type'] = 'floating'
if fixed and floats:
ip_dict['floating_ips'] = [_ip(f, fixed=False) for f in floats]
return ip_dict
if get_floating:
ip_info = [_ip('192.168.0.3',
floats=['1.2.3.4', '5.6.7.8']),
_ip('192.168.0.4')]
else:
ip_info = [_ip('192.168.0.3'),
_ip('192.168.0.4')]
info = [{'address': 'aa:bb:cc:dd:ee:ff',
'id': 1,
'network': {'bridge': 'br0',
'id': 1,
'label': 'private',
'subnets': [{'cidr': '192.168.0.0/24',
'ips': ip_info}]}}]
if CONF.use_ipv6:
ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff'
info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64',
'ips': [_ip(ipv6_addr)]})
return model.NetworkInfo.hydrate(info)
def get_instances_with_cached_ips(orig_func, get_floating,
*args, **kwargs):
"""Kludge the cache into instance(s) without having to create DB
entries
"""
instances = orig_func(*args, **kwargs)
if kwargs.get('want_objects', False):
info_cache = objects.InstanceInfoCache()
info_cache.network_info = get_fake_cache(get_floating)
info_cache.obj_reset_changes()
else:
info_cache = {'network_info': get_fake_cache(get_floating)}
if isinstance(instances, (list, obj_base.ObjectListBase)):
for instance in instances:
instance['info_cache'] = info_cache
else:
instances['info_cache'] = info_cache
return instances
class CloudTestCase(test.TestCase):
def setUp(self):
super(CloudTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
ec2utils.reset_cache()
self.flags(compute_driver='nova.virt.fake.FakeDriver',
volume_api_class='nova.tests.fake_volume.API')
self.useFixture(fixtures.FakeLogger('boto'))
fake_utils.stub_out_utils_spawn_n(self.stubs)
def fake_show(meh, context, id, **kwargs):
return {'id': id,
'name': 'fake_name',
'container_format': 'ami',
'status': 'active',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine',
'image_state': 'available'}}
def fake_detail(_self, context, **kwargs):
image = fake_show(None, context, None)
image['name'] = kwargs.get('filters', {}).get('name')
return [image]
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
fake.stub_out_image_service(self.stubs)
def dumb(*args, **kwargs):
pass
self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
fake_network.set_stub_network_methods(self.stubs)
# set up our cloud
self.cloud = cloud.CloudController()
self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler')
# Short-circuit the conductor service
self.flags(use_local=True, group='conductor')
# Stub out the notification service so we use the no-op serializer
# and avoid lazy-load traces with the wrap_exception decorator in
# the compute service.
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
# set up services
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self.compute = self.start_service('compute')
self.scheduler = self.start_service('scheduler')
self.network = self.start_service('network')
self.consoleauth = self.start_service('consoleauth')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
self.volume_api = volume.API()
self.useFixture(cast_as_call.CastAsCall(self.stubs))
# make sure we can map ami-00000001/2 to a uuid in FakeImageService
db.s3_image_create(self.context,
'cedef40a-ed67-4d10-800e-17455edce175')
db.s3_image_create(self.context,
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
def tearDown(self):
self.volume_api.reset_fake_api(self.context)
super(CloudTestCase, self).tearDown()
fake.FakeImageService_reset()
def fake_get_target(obj, iqn):
return 1
def fake_remove_iscsi_target(obj, tid, lun, vol_id, **kwargs):
pass
def _stub_instance_get_with_fixed_ips(self,
func_name, get_floating=True):
orig_func = getattr(self.cloud.compute_api, func_name)
def fake_get(*args, **kwargs):
return get_instances_with_cached_ips(orig_func, get_floating,
*args, **kwargs)
self.stubs.Set(self.cloud.compute_api, func_name, fake_get)
def _create_key(self, name):
# NOTE(vish): create depends on pool, so just call helper directly
keypair_api = compute_api.KeypairAPI()
return keypair_api.create_key_pair(self.context, self.context.user_id,
name)
def test_describe_regions(self):
# Makes sure describe regions runs without raising an exception.
result = self.cloud.describe_regions(self.context)
self.assertEqual(len(result['regionInfo']), 1)
self.flags(region_list=["one=test_host1", "two=test_host2"])
result = self.cloud.describe_regions(self.context)
self.assertEqual(len(result['regionInfo']), 2)
def test_describe_addresses(self):
# Makes sure describe addresses runs without raising an exception.
address = "10.10.10.10"
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
self.flags(network_api_class='nova.network.api.API')
self.cloud.allocate_address(self.context)
self.cloud.describe_addresses(self.context)
self.cloud.release_address(self.context,
public_ip=address)
db.floating_ip_destroy(self.context, address)
def test_describe_addresses_in_neutron(self):
# Makes sure describe addresses runs without raising an exception.
address = "10.10.10.10"
self.flags(network_api_class='nova.network.neutronv2.api.API')
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
self.cloud.allocate_address(self.context)
self.cloud.describe_addresses(self.context)
self.cloud.release_address(self.context,
public_ip=address)
db.floating_ip_destroy(self.context, address)
def test_describe_specific_address(self):
# Makes sure describe specific address works.
addresses = ["10.10.10.10", "10.10.10.11"]
for address in addresses:
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
self.cloud.allocate_address(self.context)
result = self.cloud.describe_addresses(self.context)
self.assertEqual(len(result['addressesSet']), 2)
result = self.cloud.describe_addresses(self.context,
public_ip=['10.10.10.10'])
self.assertEqual(len(result['addressesSet']), 1)
for address in addresses:
self.cloud.release_address(self.context,
public_ip=address)
db.floating_ip_destroy(self.context, address)
def test_allocate_address(self):
address = "10.10.10.10"
allocate = self.cloud.allocate_address
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
self.assertEqual(allocate(self.context)['publicIp'], address)
db.floating_ip_destroy(self.context, address)
self.assertRaises(exception.NoMoreFloatingIps,
allocate,
self.context)
def test_release_address(self):
address = "10.10.10.10"
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova',
'project_id': self.project_id})
result = self.cloud.release_address(self.context, address)
self.assertEqual(result.get('return', None), 'true')
def test_associate_disassociate_address(self):
# Verifies associate runs cleanly without raising an exception.
address = "10.10.10.10"
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
self.cloud.allocate_address(self.context)
# TODO(jkoelker) Probably need to query for instance_type_id and
# make sure we get a valid one
inst = db.instance_create(self.context, {'host': self.compute.host,
'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
nw_info = self.network.allocate_for_instance(self.context,
instance_id=inst['id'],
instance_uuid=inst['uuid'],
host=inst['host'],
vpn=None,
rxtx_factor=3,
project_id=project_id,
macs=None)
fixed_ips = nw_info.fixed_ips()
ec2_id = ec2utils.id_to_ec2_inst_id(inst['uuid'])
self.stubs.Set(ec2utils, 'get_ip_info_for_instance',
lambda *args: {'fixed_ips': ['10.0.0.1'],
'fixed_ip6s': [],
'floating_ips': []})
self.stubs.Set(network_api.API, 'get_instance_id_by_floating_address',
lambda *args: 1)
def fake_update_instance_cache_with_nw_info(api, context, instance,
nw_info=None,
update_cells=True):
return
self.stubs.Set(base_network_api, "update_instance_cache_with_nw_info",
fake_update_instance_cache_with_nw_info)
self.cloud.associate_address(self.context,
instance_id=ec2_id,
public_ip=address)
self.cloud.disassociate_address(self.context,
public_ip=address)
self.cloud.release_address(self.context,
public_ip=address)
self.network.deallocate_fixed_ip(self.context, fixed_ips[0]['address'],
inst['host'])
db.instance_destroy(self.context, inst['uuid'])
db.floating_ip_destroy(self.context, address)
def test_disassociate_auto_assigned_address(self):
"""Verifies disassociating auto assigned floating IP
raises an exception
"""
address = "10.10.10.10"
def fake_get(*args, **kwargs):
pass
def fake_disassociate_floating_ip(*args, **kwargs):
raise exception.CannotDisassociateAutoAssignedFloatingIP()
self.stubs.Set(network_api.API, 'get_instance_id_by_floating_address',
lambda *args: 1)
self.stubs.Set(self.cloud.compute_api, 'get', fake_get)
self.stubs.Set(network_api.API, 'disassociate_floating_ip',
fake_disassociate_floating_ip)
self.assertRaises(exception.CannotDisassociateAutoAssignedFloatingIP,
self.cloud.disassociate_address,
self.context, public_ip=address)
def test_disassociate_unassociated_address(self):
address = "10.10.10.10"
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
self.cloud.allocate_address(self.context)
self.cloud.describe_addresses(self.context)
result = self.cloud.disassociate_address(self.context,
public_ip=address)
self.assertEqual(result['return'], 'true')
db.floating_ip_destroy(self.context, address)
def test_describe_security_groups(self):
# Makes sure describe_security_groups works and filters results.
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
result = self.cloud.describe_security_groups(self.context)
# NOTE(vish): should have the default group as well
self.assertEqual(len(result['securityGroupInfo']), 2)
result = self.cloud.describe_security_groups(self.context,
group_name=[sec['name']])
self.assertEqual(len(result['securityGroupInfo']), 1)
self.assertEqual(
result['securityGroupInfo'][0]['groupName'],
sec['name'])
db.security_group_destroy(self.context, sec['id'])
def test_describe_security_groups_all_tenants(self):
# Makes sure describe_security_groups works and filters results.
sec = db.security_group_create(self.context,
{'project_id': 'foobar',
'name': 'test'})
def _check_name(result, i, expected):
self.assertEqual(result['securityGroupInfo'][i]['groupName'],
expected)
# include all tenants
filter = [{'name': 'all-tenants', 'value': {'1': 1}}]
result = self.cloud.describe_security_groups(self.context,
filter=filter)
self.assertEqual(len(result['securityGroupInfo']), 2)
_check_name(result, 0, 'default')
_check_name(result, 1, sec['name'])
# exclude all tenants
filter = [{'name': 'all-tenants', 'value': {'1': 0}}]
result = self.cloud.describe_security_groups(self.context,
filter=filter)
self.assertEqual(len(result['securityGroupInfo']), 1)
_check_name(result, 0, 'default')
# default all tenants
result = self.cloud.describe_security_groups(self.context)
self.assertEqual(len(result['securityGroupInfo']), 1)
_check_name(result, 0, 'default')
db.security_group_destroy(self.context, sec['id'])
def test_describe_security_groups_by_id(self):
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
result = self.cloud.describe_security_groups(self.context,
group_id=[sec['id']])
self.assertEqual(len(result['securityGroupInfo']), 1)
self.assertEqual(
result['securityGroupInfo'][0]['groupName'],
sec['name'])
default = db.security_group_get_by_name(self.context,
self.context.project_id,
'default')
result = self.cloud.describe_security_groups(self.context,
group_id=[default['id']])
self.assertEqual(len(result['securityGroupInfo']), 1)
self.assertEqual(
result['securityGroupInfo'][0]['groupName'],
'default')
db.security_group_destroy(self.context, sec['id'])
def test_create_delete_security_group(self):
descript = 'test description'
create = self.cloud.create_security_group
result = create(self.context, 'testgrp', descript)
group_descript = result['securityGroupSet'][0]['groupDescription']
self.assertEqual(descript, group_descript)
delete = self.cloud.delete_security_group
self.assertTrue(delete(self.context, 'testgrp'))
def test_security_group_quota_limit(self):
self.flags(quota_security_groups=10)
for i in range(1, CONF.quota_security_groups):
name = 'test name %i' % i
descript = 'test description %i' % i
create = self.cloud.create_security_group
create(self.context, name, descript)
# 11'th group should fail
self.assertRaises(exception.SecurityGroupLimitExceeded,
create, self.context, 'foo', 'bar')
def test_delete_security_group_by_id(self):
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
delete = self.cloud.delete_security_group
self.assertTrue(delete(self.context, group_id=sec['id']))
def test_delete_security_group_with_bad_name(self):
delete = self.cloud.delete_security_group
notfound = exception.SecurityGroupNotFound
self.assertRaises(notfound, delete, self.context, 'badname')
def test_delete_security_group_with_bad_group_id(self):
delete = self.cloud.delete_security_group
notfound = exception.SecurityGroupNotFound
self.assertRaises(notfound, delete, self.context, group_id=999)
def test_delete_security_group_no_params(self):
delete = self.cloud.delete_security_group
self.assertRaises(exception.MissingParameter, delete, self.context)
def test_delete_security_group_policy_not_allowed(self):
rules = {'compute_extension:security_groups':
common_policy.parse_rule('project_id:%(project_id)s')}
policy.set_rules(rules)
with mock.patch.object(self.cloud.security_group_api,
'get') as get:
get.return_value = {'project_id': 'invalid'}
self.assertRaises(exception.PolicyNotAuthorized,
self.cloud.delete_security_group, self.context,
'fake-name', 'fake-id')
def test_authorize_security_group_ingress_policy_not_allowed(self):
rules = {'compute_extension:security_groups':
common_policy.parse_rule('project_id:%(project_id)s')}
policy.set_rules(rules)
with mock.patch.object(self.cloud.security_group_api,
'get') as get:
get.return_value = {'project_id': 'invalid'}
self.assertRaises(exception.PolicyNotAuthorized,
self.cloud.authorize_security_group_ingress, self.context,
'fake-name', 'fake-id')
def test_authorize_security_group_ingress(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
def test_authorize_security_group_ingress_ip_permissions_ip_ranges(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
'ip_ranges':
{'1': {'cidr_ip': u'0.0.0.0/0'},
'2': {'cidr_ip': u'10.10.10.10/32'}},
'ip_protocol': u'tcp'}]}
self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
def test_authorize_security_group_fail_missing_source_group(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
'ip_ranges': {'1': {'cidr_ip': u'0.0.0.0/0'},
'2': {'cidr_ip': u'10.10.10.10/32'}},
'groups': {'1': {'user_id': u'someuser',
'group_name': u'somegroup1'}},
'ip_protocol': u'tcp'}]}
self.assertRaises(exception.SecurityGroupNotFound, authz,
self.context, group_name=sec['name'], **kwargs)
def test_authorize_security_group_ingress_ip_permissions_groups(self):
kwargs = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
'name': 'test'
}
sec = db.security_group_create(self.context,
{'project_id': 'someuser',
'user_id': 'someuser',
'description': '',
'name': 'somegroup1'})
sec = db.security_group_create(self.context,
{'project_id': 'someuser',
'user_id': 'someuser',
'description': '',
'name': 'othergroup2'})
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
'groups': {'1': {'user_id': u'someuser',
'group_name': u'somegroup1'},
'2': {'user_id': u'someuser',
'group_name': u'othergroup2'}},
'ip_protocol': u'tcp'}]}
self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
def test_describe_security_group_ingress_groups(self):
kwargs = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
'name': 'test'
}
sec1 = db.security_group_create(self.context, kwargs)
sec2 = db.security_group_create(self.context,
{'project_id': 'someuser',
'user_id': 'someuser',
'description': '',
'name': 'somegroup1'})
sec3 = db.security_group_create(self.context,
{'project_id': 'someuser',
'user_id': 'someuser',
'description': '',
'name': 'othergroup2'})
authz = self.cloud.authorize_security_group_ingress
kwargs = {'ip_permissions': [
{'groups': {'1': {'user_id': u'someuser',
'group_name': u'somegroup1'}}},
{'ip_protocol': 'tcp',
'from_port': 80,
'to_port': 80,
'groups': {'1': {'user_id': u'someuser',
'group_name': u'othergroup2'}}}]}
self.assertTrue(authz(self.context, group_name=sec1['name'], **kwargs))
describe = self.cloud.describe_security_groups
groups = describe(self.context, group_name=['test'])
self.assertEqual(len(groups['securityGroupInfo']), 1)
actual_rules = groups['securityGroupInfo'][0]['ipPermissions']
self.assertEqual(len(actual_rules), 4)
expected_rules = [{'fromPort': -1,
'groups': [{'groupName': 'somegroup1',
'userId': 'someuser'}],
'ipProtocol': 'icmp',
'ipRanges': [],
'toPort': -1},
{'fromPort': 1,
'groups': [{'groupName': u'somegroup1',
'userId': u'someuser'}],
'ipProtocol': 'tcp',
'ipRanges': [],
'toPort': 65535},
{'fromPort': 1,
'groups': [{'groupName': u'somegroup1',
'userId': u'someuser'}],
'ipProtocol': 'udp',
'ipRanges': [],
'toPort': 65535},
{'fromPort': 80,
'groups': [{'groupName': u'othergroup2',
'userId': u'someuser'}],
'ipProtocol': u'tcp',
'ipRanges': [],
'toPort': 80}]
for rule in expected_rules:
self.assertIn(rule, actual_rules)
db.security_group_destroy(self.context, sec3['id'])
db.security_group_destroy(self.context, sec2['id'])
db.security_group_destroy(self.context, sec1['id'])
def test_revoke_security_group_ingress_policy_not_allowed(self):
rules = {'compute_extension:security_groups':
common_policy.parse_rule('project_id:%(project_id)s')}
policy.set_rules(rules)
with mock.patch.object(self.cloud.security_group_api,
'get') as get:
get.return_value = {'project_id': 'invalid'}
self.assertRaises(exception.PolicyNotAuthorized,
self.cloud.revoke_security_group_ingress, self.context,
'fake-name', 'fake-id')
def test_revoke_security_group_ingress(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
authz(self.context, group_id=sec['id'], **kwargs)
revoke = self.cloud.revoke_security_group_ingress
self.assertTrue(revoke(self.context, group_name=sec['name'], **kwargs))
def test_authorize_revoke_security_group_ingress_by_id(self):
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
authz = self.cloud.authorize_security_group_ingress
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
authz(self.context, group_id=sec['id'], **kwargs)
revoke = self.cloud.revoke_security_group_ingress
self.assertTrue(revoke(self.context, group_id=sec['id'], **kwargs))
def test_authorize_security_group_ingress_missing_protocol_params(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
self.assertRaises(exception.MissingParameter, authz, self.context,
'test')
def test_authorize_security_group_ingress_missing_group_name_or_id(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
authz = self.cloud.authorize_security_group_ingress
self.assertRaises(exception.MissingParameter, authz, self.context,
**kwargs)
def test_authorize_security_group_ingress_already_exists(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
authz(self.context, group_name=sec['name'], **kwargs)
self.assertRaises(exception.SecurityGroupRuleExists, authz,
self.context, group_name=sec['name'], **kwargs)
def test_security_group_ingress_quota_limit(self):
self.flags(quota_security_group_rules=20)
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec_group = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
for i in range(100, 120):
kwargs = {'to_port': i, 'from_port': i, 'ip_protocol': 'tcp'}
authz(self.context, group_id=sec_group['id'], **kwargs)
kwargs = {'to_port': 121, 'from_port': 121, 'ip_protocol': 'tcp'}
self.assertRaises(exception.SecurityGroupLimitExceeded, authz,
self.context, group_id=sec_group['id'], **kwargs)
def _test_authorize_security_group_no_ports_with_source_group(self, proto):
kwargs = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
'description': '',
'name': 'test'
}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
auth_kwargs = {'ip_protocol': proto,
'groups': {'1': {'user_id': self.context.user_id,
'group_name': u'test'}}}
self.assertTrue(authz(self.context, group_name=sec['name'],
**auth_kwargs))
describe = self.cloud.describe_security_groups
groups = describe(self.context, group_name=['test'])
self.assertEqual(len(groups['securityGroupInfo']), 1)
actual_rules = groups['securityGroupInfo'][0]['ipPermissions']
expected_rules = [{'groups': [{'groupName': 'test',
'userId': self.context.user_id}],
'ipProtocol': proto,
'ipRanges': []}]
if proto == 'icmp':
expected_rules[0]['fromPort'] = -1
expected_rules[0]['toPort'] = -1
else:
expected_rules[0]['fromPort'] = 1
expected_rules[0]['toPort'] = 65535
self.assertTrue(expected_rules == actual_rules)
describe = self.cloud.describe_security_groups
groups = describe(self.context, group_name=['test'])
db.security_group_destroy(self.context, sec['id'])
def _test_authorize_security_group_no_ports_no_source_group(self, proto):
kwargs = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
'description': '',
'name': 'test'
}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
auth_kwargs = {'ip_protocol': proto}
self.assertRaises(exception.MissingParameter, authz, self.context,
group_name=sec['name'], **auth_kwargs)
db.security_group_destroy(self.context, sec['id'])
def test_authorize_security_group_no_ports_icmp(self):
self._test_authorize_security_group_no_ports_with_source_group('icmp')
self._test_authorize_security_group_no_ports_no_source_group('icmp')
def test_authorize_security_group_no_ports_tcp(self):
self._test_authorize_security_group_no_ports_with_source_group('tcp')
self._test_authorize_security_group_no_ports_no_source_group('tcp')
def test_authorize_security_group_no_ports_udp(self):
self._test_authorize_security_group_no_ports_with_source_group('udp')
self._test_authorize_security_group_no_ports_no_source_group('udp')
def test_revoke_security_group_ingress_missing_group_name_or_id(self):
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
revoke = self.cloud.revoke_security_group_ingress
self.assertRaises(exception.MissingParameter, revoke,
self.context, **kwargs)
def test_delete_security_group_in_use_by_group(self):
self.cloud.create_security_group(self.context, 'testgrp1',
"test group 1")
self.cloud.create_security_group(self.context, 'testgrp2',
"test group 2")
kwargs = {'groups': {'1': {'user_id': u'%s' % self.context.user_id,
'group_name': u'testgrp2'}},
}
self.cloud.authorize_security_group_ingress(self.context,
group_name='testgrp1', **kwargs)
group1 = db.security_group_get_by_name(self.context,
self.project_id, 'testgrp1')
get_rules = db.security_group_rule_get_by_security_group
self.assertTrue(get_rules(self.context, group1['id']))
self.cloud.delete_security_group(self.context, 'testgrp2')
self.assertFalse(get_rules(self.context, group1['id']))
def test_delete_security_group_in_use_by_instance(self):
# Ensure that a group can not be deleted if in use by an instance.
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
args = {'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'vm_state': 'active'}
inst = db.instance_create(self.context, args)
args = {'user_id': self.context.user_id,
'project_id': self.context.project_id,
'name': 'testgrp',
'description': 'Test group'}
group = db.security_group_create(self.context, args)
db.instance_add_security_group(self.context, inst['uuid'], group['id'])
self.assertRaises(exception.InvalidGroup,
self.cloud.delete_security_group,
self.context, 'testgrp')
db.instance_destroy(self.context, inst['uuid'])
self.cloud.delete_security_group(self.context, 'testgrp')
def test_describe_availability_zones(self):
# Makes sure describe_availability_zones works and filters results.
service1 = db.service_create(self.context, {'host': 'host1_zones',
'binary': "nova-compute",
'topic': 'compute',
'report_count': 0})
service2 = db.service_create(self.context, {'host': 'host2_zones',
'binary': "nova-compute",
'topic': 'compute',
'report_count': 0})
# Aggregate based zones
agg = db.aggregate_create(self.context,
{'name': 'agg1'}, {'availability_zone': 'zone1'})
db.aggregate_host_add(self.context, agg['id'], 'host1_zones')
agg = db.aggregate_create(self.context,
{'name': 'agg2'}, {'availability_zone': 'zone2'})
db.aggregate_host_add(self.context, agg['id'], 'host2_zones')
result = self.cloud.describe_availability_zones(self.context)
self.assertEqual(len(result['availabilityZoneInfo']), 3)
admin_ctxt = context.get_admin_context(read_deleted="no")
result = self.cloud.describe_availability_zones(admin_ctxt,
zone_name='verbose')
self.assertEqual(len(result['availabilityZoneInfo']), 18)
db.service_destroy(self.context, service1['id'])
db.service_destroy(self.context, service2['id'])
def test_describe_availability_zones_verbose(self):
# Makes sure describe_availability_zones works and filters results.
service1 = db.service_create(self.context, {'host': 'host1_zones',
'binary': "nova-compute",
'topic': 'compute',
'report_count': 0})
service2 = db.service_create(self.context, {'host': 'host2_zones',
'binary': "nova-compute",
'topic': 'compute',
'report_count': 0})
agg = db.aggregate_create(self.context,
{'name': 'agg1'}, {'availability_zone': 'second_zone'})
db.aggregate_host_add(self.context, agg['id'], 'host2_zones')
admin_ctxt = context.get_admin_context(read_deleted="no")
result = self.cloud.describe_availability_zones(admin_ctxt,
zone_name='verbose')
self.assertEqual(len(result['availabilityZoneInfo']), 17)
db.service_destroy(self.context, service1['id'])
db.service_destroy(self.context, service2['id'])
def assertEqualSorted(self, x, y):
self.assertEqual(sorted(x), sorted(y))
def test_describe_instances(self):
# Makes sure describe_instances works and filters results.
self.flags(use_ipv6=True)
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
sys_meta = flavors.save_flavor_info(
{}, flavors.get_flavor(1))
sys_meta['EC2_client_token'] = "client-token-1"
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'hostname': 'server-1234',
'vm_state': 'active',
'system_metadata': sys_meta})
sys_meta['EC2_client_token'] = "client-token-2"
inst2 = db.instance_create(self.context, {'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host2',
'hostname': 'server-4321',
'vm_state': 'active',
'system_metadata': sys_meta})
comp1 = db.service_create(self.context, {'host': 'host1',
'topic': "compute"})
agg = db.aggregate_create(self.context,
{'name': 'agg1'}, {'availability_zone': 'zone1'})
db.aggregate_host_add(self.context, agg['id'], 'host1')
comp2 = db.service_create(self.context, {'host': 'host2',
'topic': "compute"})
agg2 = db.aggregate_create(self.context,
{'name': 'agg2'}, {'availability_zone': 'zone2'})
db.aggregate_host_add(self.context, agg2['id'], 'host2')
result = self.cloud.describe_instances(self.context)
result = result['reservationSet'][0]
self.assertEqual(len(result['instancesSet']), 2)
# Now try filtering.
instance_id = ec2utils.id_to_ec2_inst_id(inst2['uuid'])
result = self.cloud.describe_instances(self.context,
instance_id=[instance_id])
result = result['reservationSet'][0]
self.assertEqual(len(result['instancesSet']), 1)
instance = result['instancesSet'][0]
self.assertEqual(instance['instanceId'], instance_id)
self.assertEqual(instance['placement']['availabilityZone'], 'zone2')
self.assertEqual(instance['ipAddress'], '1.2.3.4')
self.assertEqual(instance['dnsName'], '1.2.3.4')
self.assertEqual(instance['tagSet'], [])
self.assertEqual(instance['privateDnsName'], 'server-4321')
self.assertEqual(instance['privateIpAddress'], '192.168.0.3')
self.assertEqual(instance['dnsNameV6'],
'fe80:b33f::a8bb:ccff:fedd:eeff')
self.assertEqual(instance['clientToken'], 'client-token-2')
# A filter with even one invalid id should cause an exception to be
# raised
self.assertRaises(exception.InstanceNotFound,
self.cloud.describe_instances, self.context,
instance_id=[instance_id, '435679'])
db.instance_destroy(self.context, inst1['uuid'])
db.instance_destroy(self.context, inst2['uuid'])
db.service_destroy(self.context, comp1['id'])
db.service_destroy(self.context, comp2['id'])
def test_describe_instances_all_invalid(self):
# Makes sure describe_instances works and filters results.
self.flags(use_ipv6=True)
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
instance_id = ec2utils.id_to_ec2_inst_id('435679')
self.assertRaises(exception.InstanceNotFound,
self.cloud.describe_instances, self.context,
instance_id=[instance_id])
def test_describe_instances_with_filters(self):
# Makes sure describe_instances works and filters results.
filters = {'filter': [{'name': 'test',
'value': ['a', 'b']},
{'name': 'another_test',
'value': 'a string'}]}
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
result = self.cloud.describe_instances(self.context, **filters)
self.assertEqual(result, {'reservationSet': []})
def test_describe_instances_with_filters_tags(self):
# Makes sure describe_instances works and filters tag results.
# We need to stub network calls
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
# We need to stub out the MQ call - it won't succeed. We do want
# to check that the method is called, though
meta_changes = [None]
def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
instance_uuid=None):
meta_changes[0] = diff
self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
fake_change_instance_metadata)
utc = iso8601.iso8601.Utc()
# Create some test images
sys_meta = flavors.save_flavor_info(
{}, flavors.get_flavor(1))
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
inst1_kwargs = {
'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'vm_state': 'active',
'launched_at': timeutils.utcnow(),
'hostname': 'server-1111',
'created_at': datetime.datetime(2012, 5, 1, 1, 1, 1,
tzinfo=utc),
'system_metadata': sys_meta
}
inst2_kwargs = {
'reservation_id': 'b',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host2',
'vm_state': 'active',
'launched_at': timeutils.utcnow(),
'hostname': 'server-1112',
'created_at': datetime.datetime(2012, 5, 1, 1, 1, 2,
tzinfo=utc),
'system_metadata': sys_meta
}
inst1 = db.instance_create(self.context, inst1_kwargs)
ec2_id1 = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
inst2 = db.instance_create(self.context, inst2_kwargs)
ec2_id2 = ec2utils.id_to_ec2_inst_id(inst2['uuid'])
# Create some tags
# We get one overlapping pair, one overlapping key, and a
# disparate pair
# inst1 : {'foo': 'bar', 'baz': 'wibble', 'bax': 'wobble'}
# inst2 : {'foo': 'bar', 'baz': 'quux', 'zog': 'bobble'}
md = {'key': 'foo', 'value': 'bar'}
self.cloud.create_tags(self.context, resource_id=[ec2_id1, ec2_id2],
tag=[md])
md2 = {'key': 'baz', 'value': 'wibble'}
md3 = {'key': 'bax', 'value': 'wobble'}
self.cloud.create_tags(self.context, resource_id=[ec2_id1],
tag=[md2, md3])
md4 = {'key': 'baz', 'value': 'quux'}
md5 = {'key': 'zog', 'value': 'bobble'}
self.cloud.create_tags(self.context, resource_id=[ec2_id2],
tag=[md4, md5])
# We should be able to search by:
inst1_ret = {
'groupSet': None,
'instancesSet': [{'amiLaunchIndex': None,
'dnsName': '1.2.3.4',
'dnsNameV6': 'fe80:b33f::a8bb:ccff:fedd:eeff',
'imageId': 'ami-00000001',
'instanceId': 'i-00000001',
'instanceState': {'code': 16,
'name': 'running'},
'instanceType': u'm1.medium',
'ipAddress': '1.2.3.4',
'keyName': 'None (None, host1)',
'launchTime':
datetime.datetime(2012, 5, 1, 1, 1, 1,
tzinfo=utc),
'placement': {
'availabilityZone': 'nova'},
'privateDnsName': u'server-1111',
'privateIpAddress': '192.168.0.3',
'productCodesSet': None,
'rootDeviceName': '/dev/sda1',
'rootDeviceType': 'instance-store',
'tagSet': [{'key': u'foo',
'value': u'bar'},
{'key': u'baz',
'value': u'wibble'},
{'key': u'bax',
'value': u'wobble'}]}],
'ownerId': None,
'reservationId': u'a'}
inst2_ret = {
'groupSet': None,
'instancesSet': [{'amiLaunchIndex': None,
'dnsName': '1.2.3.4',
'dnsNameV6': 'fe80:b33f::a8bb:ccff:fedd:eeff',
'imageId': 'ami-00000001',
'instanceId': 'i-00000002',
'instanceState': {'code': 16,
'name': 'running'},
'instanceType': u'm1.medium',
'ipAddress': '1.2.3.4',
'keyName': u'None (None, host2)',
'launchTime':
datetime.datetime(2012, 5, 1, 1, 1, 2,
tzinfo=utc),
'placement': {
'availabilityZone': 'nova'},
'privateDnsName': u'server-1112',
'privateIpAddress': '192.168.0.3',
'productCodesSet': None,
'rootDeviceName': '/dev/sda1',
'rootDeviceType': 'instance-store',
'tagSet': [{'key': u'foo',
'value': u'bar'},
{'key': u'baz',
'value': u'quux'},
{'key': u'zog',
'value': u'bobble'}]}],
'ownerId': None,
'reservationId': u'b'}
# No filter
result = self.cloud.describe_instances(self.context)
self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
# Key search
# Both should have tags with key 'foo' and value 'bar'
filters = {'filter': [{'name': 'tag:foo',
'value': ['bar']}]}
result = self.cloud.describe_instances(self.context, **filters)
self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
# Both should have tags with key 'foo'
filters = {'filter': [{'name': 'tag-key',
'value': ['foo']}]}
result = self.cloud.describe_instances(self.context, **filters)
self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
# Value search
# Only inst2 should have tags with key 'baz' and value 'quux'
filters = {'filter': [{'name': 'tag:baz',
'value': ['quux']}]}
result = self.cloud.describe_instances(self.context, **filters)
self.assertEqual(result, {'reservationSet': [inst2_ret]})
# Only inst2 should have tags with value 'quux'
filters = {'filter': [{'name': 'tag-value',
'value': ['quux']}]}
result = self.cloud.describe_instances(self.context, **filters)
self.assertEqual(result, {'reservationSet': [inst2_ret]})
# Multiple values
# Both should have tags with key 'baz' and values in the set
# ['quux', 'wibble']
filters = {'filter': [{'name': 'tag:baz',
'value': ['quux', 'wibble']}]}
result = self.cloud.describe_instances(self.context, **filters)
self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
# Both should have tags with key 'baz' or tags with value 'bar'
filters = {'filter': [{'name': 'tag-key',
'value': ['baz']},
{'name': 'tag-value',
'value': ['bar']}]}
result = self.cloud.describe_instances(self.context, **filters)
self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
# Confirm deletion of tags
# Check for format 'tag:'
self.cloud.delete_tags(self.context, resource_id=[ec2_id1], tag=[md])
filters = {'filter': [{'name': 'tag:foo',
'value': ['bar']}]}
result = self.cloud.describe_instances(self.context, **filters)
self.assertEqual(result, {'reservationSet': [inst2_ret]})
# Check for format 'tag-'
filters = {'filter': [{'name': 'tag-key',
'value': ['foo']}]}
result = self.cloud.describe_instances(self.context, **filters)
self.assertEqual(result, {'reservationSet': [inst2_ret]})
filters = {'filter': [{'name': 'tag-value',
'value': ['bar']}]}
result = self.cloud.describe_instances(self.context, **filters)
self.assertEqual(result, {'reservationSet': [inst2_ret]})
# destroy the test instances
db.instance_destroy(self.context, inst1['uuid'])
db.instance_destroy(self.context, inst2['uuid'])
def test_describe_instances_sorting(self):
# Makes sure describe_instances works and is sorted as expected.
self.flags(use_ipv6=True)
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
sys_meta = flavors.save_flavor_info(
{}, flavors.get_flavor(1))
inst_base = {
'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'vm_state': 'active',
'system_metadata': sys_meta,
}
utc = iso8601.iso8601.Utc()
inst1_kwargs = {}
inst1_kwargs.update(inst_base)
inst1_kwargs['host'] = 'host1'
inst1_kwargs['hostname'] = 'server-1111'
inst1_kwargs['created_at'] = datetime.datetime(2012, 5, 1, 1, 1, 1,
tzinfo=utc)
inst1 = db.instance_create(self.context, inst1_kwargs)
inst2_kwargs = {}
inst2_kwargs.update(inst_base)
inst2_kwargs['host'] = 'host2'
inst2_kwargs['hostname'] = 'server-2222'
inst2_kwargs['created_at'] = datetime.datetime(2012, 2, 1, 1, 1, 1,
tzinfo=utc)
inst2 = db.instance_create(self.context, inst2_kwargs)
inst3_kwargs = {}
inst3_kwargs.update(inst_base)
inst3_kwargs['host'] = 'host3'
inst3_kwargs['hostname'] = 'server-3333'
inst3_kwargs['created_at'] = datetime.datetime(2012, 2, 5, 1, 1, 1,
tzinfo=utc)
inst3 = db.instance_create(self.context, inst3_kwargs)
comp1 = db.service_create(self.context, {'host': 'host1',
'topic': "compute"})
comp2 = db.service_create(self.context, {'host': 'host2',
'topic': "compute"})
result = self.cloud.describe_instances(self.context)
result = result['reservationSet'][0]['instancesSet']
self.assertEqual(result[0]['launchTime'], inst2_kwargs['created_at'])
self.assertEqual(result[1]['launchTime'], inst3_kwargs['created_at'])
self.assertEqual(result[2]['launchTime'], inst1_kwargs['created_at'])
db.instance_destroy(self.context, inst1['uuid'])
db.instance_destroy(self.context, inst2['uuid'])
db.instance_destroy(self.context, inst3['uuid'])
db.service_destroy(self.context, comp1['id'])
db.service_destroy(self.context, comp2['id'])
def test_describe_instance_state(self):
# Makes sure describe_instances for instanceState works.
def test_instance_state(expected_code, expected_name,
power_state_, vm_state_, values=None):
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
sys_meta = flavors.save_flavor_info(
{}, flavors.get_flavor(1))
values = values or {}
values.update({'image_ref': image_uuid, 'instance_type_id': 1,
'power_state': power_state_, 'vm_state': vm_state_,
'system_metadata': sys_meta})
inst = db.instance_create(self.context, values)
instance_id = ec2utils.id_to_ec2_inst_id(inst['uuid'])
result = self.cloud.describe_instances(self.context,
instance_id=[instance_id])
result = result['reservationSet'][0]
result = result['instancesSet'][0]['instanceState']
name = result['name']
code = result['code']
self.assertEqual(code, expected_code)
self.assertEqual(name, expected_name)
db.instance_destroy(self.context, inst['uuid'])
test_instance_state(inst_state.RUNNING_CODE, inst_state.RUNNING,
power_state.RUNNING, vm_states.ACTIVE)
test_instance_state(inst_state.STOPPED_CODE, inst_state.STOPPED,
power_state.NOSTATE, vm_states.STOPPED,
{'shutdown_terminate': False})
def test_describe_instances_no_ipv6(self):
# Makes sure describe_instances w/ no ipv6 works.
self.flags(use_ipv6=False)
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
sys_meta = flavors.save_flavor_info(
{}, flavors.get_flavor(1))
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'hostname': 'server-1234',
'vm_state': 'active',
'system_metadata': sys_meta})
comp1 = db.service_create(self.context, {'host': 'host1',
'topic': "compute"})
result = self.cloud.describe_instances(self.context)
result = result['reservationSet'][0]
self.assertEqual(len(result['instancesSet']), 1)
instance = result['instancesSet'][0]
instance_id = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
self.assertEqual(instance['instanceId'], instance_id)
self.assertEqual(instance['ipAddress'], '1.2.3.4')
self.assertEqual(instance['dnsName'], '1.2.3.4')
self.assertEqual(instance['privateDnsName'], 'server-1234')
self.assertEqual(instance['privateIpAddress'], '192.168.0.3')
self.assertNotIn('dnsNameV6', instance)
db.instance_destroy(self.context, inst1['uuid'])
db.service_destroy(self.context, comp1['id'])
def test_describe_instances_deleted(self):
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
sys_meta = flavors.save_flavor_info(
{}, flavors.get_flavor(1))
args1 = {'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'vm_state': 'active',
'system_metadata': sys_meta}
inst1 = db.instance_create(self.context, args1)
args2 = {'reservation_id': 'b',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'vm_state': 'active',
'system_metadata': sys_meta}
inst2 = db.instance_create(self.context, args2)
db.instance_destroy(self.context, inst1['uuid'])
result = self.cloud.describe_instances(self.context)
self.assertEqual(len(result['reservationSet']), 1)
result1 = result['reservationSet'][0]['instancesSet']
self.assertEqual(result1[0]['instanceId'],
ec2utils.id_to_ec2_inst_id(inst2['uuid']))
def test_describe_instances_with_image_deleted(self):
image_uuid = 'aebef54a-ed67-4d10-912f-14455edce176'
sys_meta = flavors.save_flavor_info(
{}, flavors.get_flavor(1))
args1 = {'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'vm_state': 'active',
'system_metadata': sys_meta}
db.instance_create(self.context, args1)
args2 = {'reservation_id': 'b',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'vm_state': 'active',
'system_metadata': sys_meta}
db.instance_create(self.context, args2)
result = self.cloud.describe_instances(self.context)
self.assertEqual(len(result['reservationSet']), 2)
def test_describe_instances_dnsName_set(self):
# Verifies dnsName doesn't get set if floating IP is set.
self._stub_instance_get_with_fixed_ips('get_all', get_floating=False)
self._stub_instance_get_with_fixed_ips('get', get_floating=False)
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
sys_meta = flavors.save_flavor_info(
{}, flavors.get_flavor(1))
db.instance_create(self.context, {'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'hostname': 'server-1234',
'vm_state': 'active',
'system_metadata': sys_meta})
result = self.cloud.describe_instances(self.context)
result = result['reservationSet'][0]
instance = result['instancesSet'][0]
self.assertIsNone(instance['dnsName'])
def test_describe_instances_booting_from_a_volume(self):
sys_meta = flavors.save_flavor_info(
{}, flavors.get_flavor(1))
inst = objects.Instance(self.context)
inst.reservation_id = 'a'
inst.image_ref = ''
inst.root_device_name = '/dev/sdh'
inst.instance_type_id = 1
inst.vm_state = vm_states.ACTIVE
inst.host = 'host1'
inst.system_metadata = sys_meta
inst.create()
result = self.cloud.describe_instances(self.context)
result = result['reservationSet'][0]
instance = result['instancesSet'][0]
self.assertIsNone(instance['imageId'])
def test_describe_images(self):
describe_images = self.cloud.describe_images
def fake_detail(meh, context, **kwargs):
return [{'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'container_format': 'ami',
'status': 'active',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'}}]
def fake_show_none(meh, context, id):
raise exception.ImageNotFound(image_id='bad_image_id')
def fake_detail_none(self, context, **kwargs):
return []
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
# list all
result1 = describe_images(self.context)
result1 = result1['imagesSet'][0]
self.assertEqual(result1['imageId'], 'ami-00000001')
# provided a valid image_id
result2 = describe_images(self.context, ['ami-00000001'])
self.assertEqual(1, len(result2['imagesSet']))
# provide more than 1 valid image_id
result3 = describe_images(self.context, ['ami-00000001',
'ami-00000002'])
self.assertEqual(2, len(result3['imagesSet']))
# provide a non-existing image_id
self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show_none)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail_none)
self.assertRaises(exception.ImageNotFound, describe_images,
self.context, ['ami-fake'])
def assertDictListUnorderedMatch(self, L1, L2, key):
self.assertEqual(len(L1), len(L2))
for d1 in L1:
self.assertIn(key, d1)
for d2 in L2:
self.assertIn(key, d2)
if d1[key] == d2[key]:
self.assertThat(d1, matchers.DictMatches(d2))
def _setUpImageSet(self, create_volumes_and_snapshots=False):
self.flags(max_local_block_devices=-1)
mappings1 = [
{'device': '/dev/sda1', 'virtual': 'root'},
{'device': 'sdb0', 'virtual': 'ephemeral0'},
{'device': 'sdb1', 'virtual': 'ephemeral1'},
{'device': 'sdb2', 'virtual': 'ephemeral2'},
{'device': 'sdb3', 'virtual': 'ephemeral3'},
{'device': 'sdb4', 'virtual': 'ephemeral4'},
{'device': 'sdc0', 'virtual': 'swap'},
{'device': 'sdc1', 'virtual': 'swap'},
{'device': 'sdc2', 'virtual': 'swap'},
{'device': 'sdc3', 'virtual': 'swap'},
{'device': 'sdc4', 'virtual': 'swap'}]
block_device_mapping1 = [
{'device_name': '/dev/sdb1',
'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e3'},
{'device_name': '/dev/sdb2',
'volume_id': 'ccec42a2-c220-4806-b762-6b12fbb592e4'},
{'device_name': '/dev/sdb3', 'virtual_name': 'ephemeral5'},
{'device_name': '/dev/sdb4', 'no_device': True},
{'device_name': '/dev/sdc1',
'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e5'},
{'device_name': '/dev/sdc2',
'volume_id': 'ccec42a2-c220-4806-b762-6b12fbb592e6'},
{'device_name': '/dev/sdc3', 'virtual_name': 'ephemeral6'},
{'device_name': '/dev/sdc4', 'no_device': True}]
image1 = {
'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'status': 'active',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine',
'image_state': 'available',
'mappings': mappings1,
'block_device_mapping': block_device_mapping1,
}
}
mappings2 = [{'device': '/dev/sda1', 'virtual': 'root'}]
block_device_mapping2 = [{'device_name': '/dev/sdb1',
'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e7',
'volume_id': None}]
image2 = {
'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fake_name',
'status': 'active',
'properties': {
'kernel_id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'type': 'machine',
'root_device_name': '/dev/sdb1',
'mappings': mappings2,
'block_device_mapping': block_device_mapping2}}
def fake_show(meh, context, image_id, **kwargs):
_images = [copy.deepcopy(image1), copy.deepcopy(image2)]
for i in _images:
if str(i['id']) == str(image_id):
return i
raise exception.ImageNotFound(image_id=image_id)
def fake_detail(meh, context, **kwargs):
return [copy.deepcopy(image1), copy.deepcopy(image2)]
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
volumes = []
snapshots = []
if create_volumes_and_snapshots:
for bdm in block_device_mapping1:
if 'volume_id' in bdm:
vol = self._volume_create(bdm['volume_id'])
volumes.append(vol['id'])
if 'snapshot_id' in bdm:
snap = self._snapshot_create(bdm['snapshot_id'])
snapshots.append(snap['id'])
return (volumes, snapshots)
def _assertImageSet(self, result, root_device_type, root_device_name):
self.assertEqual(1, len(result['imagesSet']))
result = result['imagesSet'][0]
self.assertIn('rootDeviceType', result)
self.assertEqual(result['rootDeviceType'], root_device_type)
self.assertIn('rootDeviceName', result)
self.assertEqual(result['rootDeviceName'], root_device_name)
self.assertIn('blockDeviceMapping', result)
return result
_expected_root_device_name1 = '/dev/sda1'
# NOTE(yamahata): noDevice doesn't make sense when returning mapping
# It makes sense only when user overriding existing
# mapping.
_expected_bdms1 = [
{'deviceName': '/dev/sdb0', 'virtualName': 'ephemeral0'},
{'deviceName': '/dev/sdb1', 'ebs': {'snapshotId':
'snap-00000001'}},
{'deviceName': '/dev/sdb2', 'ebs': {'snapshotId':
'vol-00000001'}},
{'deviceName': '/dev/sdb3', 'virtualName': 'ephemeral5'},
# {'deviceName': '/dev/sdb4', 'noDevice': True},
{'deviceName': '/dev/sdc0', 'virtualName': 'swap'},
{'deviceName': '/dev/sdc1', 'ebs': {'snapshotId':
'snap-00000002'}},
{'deviceName': '/dev/sdc2', 'ebs': {'snapshotId':
'vol-00000002'}},
{'deviceName': '/dev/sdc3', 'virtualName': 'ephemeral6'},
# {'deviceName': '/dev/sdc4', 'noDevice': True}
]
_expected_root_device_name2 = '/dev/sdb1'
_expected_bdms2 = [{'deviceName': '/dev/sdb1',
'ebs': {'snapshotId': 'snap-00000003'}}]
# NOTE(yamahata):
# InstanceBlockDeviceMappingItemType
# rootDeviceType
# rootDeviceName
# blockDeviceMapping
# deviceName
# virtualName
# ebs
# snapshotId
# volumeSize
# deleteOnTermination
# noDevice
def test_describe_image_mapping(self):
# test for rootDeviceName and blockDeviceMapping.
describe_images = self.cloud.describe_images
self._setUpImageSet()
result = describe_images(self.context, ['ami-00000001'])
result = self._assertImageSet(result, 'instance-store',
self._expected_root_device_name1)
self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
self._expected_bdms1, 'deviceName')
result = describe_images(self.context, ['ami-00000002'])
result = self._assertImageSet(result, 'ebs',
self._expected_root_device_name2)
self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
self._expected_bdms2, 'deviceName')
def test_describe_image_attribute(self):
describe_image_attribute = self.cloud.describe_image_attribute
def fake_show(meh, context, id, **kwargs):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'status': 'active',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'container_format': 'ami',
'is_public': True}
def fake_detail(self, context, **kwargs):
image = fake_show(None, context, None)
image['name'] = kwargs.get('filters', {}).get('name')
return [image]
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
result = describe_image_attribute(self.context, 'ami-00000001',
'launchPermission')
self.assertEqual([{'group': 'all'}], result['launchPermission'])
result = describe_image_attribute(self.context, 'ami-00000001',
'kernel')
self.assertEqual('aki-00000001', result['kernel']['value'])
result = describe_image_attribute(self.context, 'ami-00000001',
'ramdisk')
self.assertEqual('ari-00000001', result['ramdisk']['value'])
def test_describe_image_attribute_root_device_name(self):
describe_image_attribute = self.cloud.describe_image_attribute
self._setUpImageSet()
result = describe_image_attribute(self.context, 'ami-00000001',
'rootDeviceName')
self.assertEqual(result['rootDeviceName'],
self._expected_root_device_name1)
result = describe_image_attribute(self.context, 'ami-00000002',
'rootDeviceName')
self.assertEqual(result['rootDeviceName'],
self._expected_root_device_name2)
def test_describe_image_attribute_block_device_mapping(self):
describe_image_attribute = self.cloud.describe_image_attribute
self._setUpImageSet()
result = describe_image_attribute(self.context, 'ami-00000001',
'blockDeviceMapping')
self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
self._expected_bdms1, 'deviceName')
result = describe_image_attribute(self.context, 'ami-00000002',
'blockDeviceMapping')
self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
self._expected_bdms2, 'deviceName')
def test_modify_image_attribute(self):
modify_image_attribute = self.cloud.modify_image_attribute
fake_metadata = {
'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'container_format': 'ami',
'status': 'active',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'is_public': False}
def fake_show(meh, context, id, **kwargs):
return copy.deepcopy(fake_metadata)
def fake_detail(self, context, **kwargs):
image = fake_show(None, context, None)
image['name'] = kwargs.get('filters', {}).get('name')
return [image]
def fake_update(meh, context, image_id, metadata, data=None):
self.assertEqual(metadata['properties']['kernel_id'],
fake_metadata['properties']['kernel_id'])
self.assertEqual(metadata['properties']['ramdisk_id'],
fake_metadata['properties']['ramdisk_id'])
self.assertTrue(metadata['is_public'])
image = copy.deepcopy(fake_metadata)
image.update(metadata)
return image
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
self.stubs.Set(fake._FakeImageService, 'update', fake_update)
result = modify_image_attribute(self.context, 'ami-00000001',
'launchPermission', 'add',
user_group=['all'])
self.assertTrue(result['is_public'])
def test_register_image(self):
register_image = self.cloud.register_image
def fake_create(*args, **kwargs):
# NOTE(vish): We are mocking s3 so make sure we have converted
# to ids instead of uuids.
return {'id': 1,
'name': 'fake_name',
'container_format': 'ami',
'properties': {'kernel_id': 1,
'ramdisk_id': 1,
'type': 'machine'
},
'is_public': False
}
self.stubs.Set(s3.S3ImageService, 'create', fake_create)
image_location = 'fake_bucket/fake.img.manifest.xml'
result = register_image(self.context, image_location)
self.assertEqual(result['imageId'], 'ami-00000001')
def test_register_image_empty(self):
register_image = self.cloud.register_image
self.assertRaises(exception.MissingParameter, register_image,
self.context, image_location=None)
def test_register_image_name(self):
register_image = self.cloud.register_image
def fake_create(_self, context, metadata, data=None):
self.assertEqual(metadata['name'], self.expected_name)
metadata['id'] = 1
metadata['container_format'] = 'ami'
metadata['is_public'] = False
return metadata
self.stubs.Set(s3.S3ImageService, 'create', fake_create)
self.expected_name = 'fake_bucket/fake.img.manifest.xml'
register_image(self.context,
image_location=self.expected_name,
name=None)
self.expected_name = 'an image name'
register_image(self.context,
image_location='some_location',
name=self.expected_name)
def test_format_image(self):
image = {
'id': 1,
'container_format': 'ami',
'name': 'name',
'owner': 'someone',
'properties': {
'image_location': 'location',
'kernel_id': 1,
'ramdisk_id': 1,
'type': 'machine'},
'is_public': False}
expected = {'name': 'name',
'imageOwnerId': 'someone',
'isPublic': False,
'imageId': 'ami-00000001',
'imageState': None,
'rootDeviceType': 'instance-store',
'architecture': None,
'imageLocation': 'location',
'kernelId': 'aki-00000001',
'ramdiskId': 'ari-00000001',
'rootDeviceName': '/dev/sda1',
'imageType': 'machine',
'description': None}
result = self.cloud._format_image(image)
self.assertThat(result, matchers.DictMatches(expected))
image['properties']['image_location'] = None
expected['imageLocation'] = 'None (name)'
result = self.cloud._format_image(image)
self.assertThat(result, matchers.DictMatches(expected))
image['name'] = None
image['properties']['image_location'] = 'location'
expected['imageLocation'] = 'location'
expected['name'] = 'location'
result = self.cloud._format_image(image)
self.assertThat(result, matchers.DictMatches(expected))
def test_deregister_image(self):
deregister_image = self.cloud.deregister_image
def fake_delete(self, context, id):
return None
self.stubs.Set(fake._FakeImageService, 'delete', fake_delete)
# valid image
result = deregister_image(self.context, 'ami-00000001')
self.assertTrue(result)
# invalid image
self.stubs.UnsetAll()
def fake_detail_empty(self, context, **kwargs):
return []
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail_empty)
self.assertRaises(exception.ImageNotFound, deregister_image,
self.context, 'ami-bad001')
def test_deregister_image_wrong_container_type(self):
deregister_image = self.cloud.deregister_image
def fake_delete(self, context, id):
return None
self.stubs.Set(fake._FakeImageService, 'delete', fake_delete)
self.assertRaises(exception.NotFound, deregister_image, self.context,
'aki-00000001')
def _run_instance(self, **kwargs):
rv = self.cloud.run_instances(self.context, **kwargs)
instance_id = rv['instancesSet'][0]['instanceId']
return instance_id
def test_get_password_data(self):
instance_id = self._run_instance(
image_id='ami-1',
instance_type=CONF.default_flavor,
max_count=1)
self.stubs.Set(password, 'extract_password', lambda i: 'fakepass')
output = self.cloud.get_password_data(context=self.context,
instance_id=[instance_id])
self.assertEqual(output['passwordData'], 'fakepass')
self.cloud.terminate_instances(self.context, [instance_id])
def test_console_output(self):
instance_id = self._run_instance(
image_id='ami-1',
instance_type=CONF.default_flavor,
max_count=1)
output = self.cloud.get_console_output(context=self.context,
instance_id=[instance_id])
self.assertEqual(base64.b64decode(output['output']),
'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE')
# TODO(soren): We need this until we can stop polling in the rpc code
# for unit tests.
self.cloud.terminate_instances(self.context, [instance_id])
def test_key_generation(self):
result, private_key = self._create_key('test')
expected = db.key_pair_get(self.context,
self.context.user_id,
'test')['public_key']
(fd, fname) = tempfile.mkstemp()
os.write(fd, private_key)
public_key, err = utils.execute('ssh-keygen', '-e', '-f', fname)
os.unlink(fname)
# assert key fields are equal
self.assertEqual(''.join(public_key.split("\n")[2:-2]),
expected.split(" ")[1].strip())
def test_describe_key_pairs(self):
self._create_key('test1')
self._create_key('test2')
result = self.cloud.describe_key_pairs(self.context)
keys = result["keySet"]
self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys))
self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys))
def test_describe_bad_key_pairs(self):
self.assertRaises(exception.KeypairNotFound,
self.cloud.describe_key_pairs, self.context,
key_name=['DoesNotExist'])
def test_import_key_pair(self):
pubkey_path = os.path.join(os.path.dirname(__file__), 'public_key')
with open(pubkey_path + '/dummy.pub') as f:
dummypub = f.readline().rstrip()
with open(pubkey_path + '/dummy.fingerprint') as f:
dummyfprint = f.readline().rstrip()
key_name = 'testimportkey'
public_key_material = base64.b64encode(dummypub)
result = self.cloud.import_key_pair(self.context,
key_name,
public_key_material)
self.assertEqual(result['keyName'], key_name)
self.assertEqual(result['keyFingerprint'], dummyfprint)
keydata = db.key_pair_get(self.context,
self.context.user_id,
key_name)
self.assertEqual(dummypub, keydata['public_key'])
self.assertEqual(dummyfprint, keydata['fingerprint'])
def test_import_key_pair_quota_limit(self):
self.flags(quota_key_pairs=0)
pubkey_path = os.path.join(os.path.dirname(__file__), 'public_key')
f = open(pubkey_path + '/dummy.pub', 'r')
dummypub = f.readline().rstrip()
f.close
f = open(pubkey_path + '/dummy.fingerprint', 'r')
f.readline().rstrip()
f.close
key_name = 'testimportkey'
public_key_material = base64.b64encode(dummypub)
self.assertRaises(exception.KeypairLimitExceeded,
self.cloud.import_key_pair, self.context, key_name,
public_key_material)
def test_create_key_pair(self):
good_names = ('a', 'a' * 255, string.ascii_letters + ' -_')
bad_names = ('', 'a' * 256, '*', '/')
for key_name in good_names:
result = self.cloud.create_key_pair(self.context,
key_name)
self.assertEqual(result['keyName'], key_name)
for key_name in bad_names:
self.assertRaises(exception.InvalidKeypair,
self.cloud.create_key_pair,
self.context,
key_name)
def test_create_key_pair_quota_limit(self):
self.flags(quota_key_pairs=10)
for i in range(0, 10):
key_name = 'key_%i' % i
result = self.cloud.create_key_pair(self.context,
key_name)
self.assertEqual(result['keyName'], key_name)
# 11'th group should fail
self.assertRaises(exception.KeypairLimitExceeded,
self.cloud.create_key_pair,
self.context,
'foo')
def test_delete_key_pair(self):
self._create_key('test')
self.cloud.delete_key_pair(self.context, 'test')
def test_run_instances(self):
kwargs = {'image_id': 'ami-00000001',
'instance_type': CONF.default_flavor,
'max_count': 1}
run_instances = self.cloud.run_instances
def fake_show(self, context, id, **kwargs):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'container_format': 'ami',
'status': 'active'}
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
def dumb(*args, **kwargs):
pass
self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
self.useFixture(cast_as_call.CastAsCall(self.stubs))
result = run_instances(self.context, **kwargs)
instance = result['instancesSet'][0]
self.assertEqual(instance['imageId'], 'ami-00000001')
self.assertEqual(instance['instanceId'], 'i-00000001')
self.assertEqual(instance['instanceState']['name'], 'running')
self.assertEqual(instance['instanceType'], 'm1.small')
def test_run_instances_invalid_maxcount(self):
kwargs = {'image_id': 'ami-00000001',
'instance_type': CONF.default_flavor,
'max_count': 0}
run_instances = self.cloud.run_instances
def fake_show(self, context, id, **kwargs):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'container_format': 'ami',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'status': 'active'}
self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.assertRaises(exception.InvalidInput, run_instances,
self.context, **kwargs)
def test_run_instances_invalid_mincount(self):
kwargs = {'image_id': 'ami-00000001',
'instance_type': CONF.default_flavor,
'min_count': 0}
run_instances = self.cloud.run_instances
def fake_show(self, context, id, **kwargs):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'container_format': 'ami',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'status': 'active'}
self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.assertRaises(exception.InvalidInput, run_instances,
self.context, **kwargs)
def test_run_instances_invalid_count(self):
kwargs = {'image_id': 'ami-00000001',
'instance_type': CONF.default_flavor,
'max_count': 1,
'min_count': 2}
run_instances = self.cloud.run_instances
def fake_show(self, context, id, **kwargs):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'container_format': 'ami',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'status': 'active'}
self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.assertRaises(exception.InvalidInput, run_instances,
self.context, **kwargs)
def test_run_instances_availability_zone(self):
kwargs = {'image_id': 'ami-00000001',
'instance_type': CONF.default_flavor,
'max_count': 1,
'placement': {'availability_zone': 'fake'},
}
run_instances = self.cloud.run_instances
def fake_show(self, context, id, **kwargs):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'container_format': 'ami',
'status': 'active'}
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.useFixture(cast_as_call.CastAsCall(self.stubs))
def fake_format(*args, **kwargs):
pass
self.stubs.Set(self.cloud, '_format_run_instances', fake_format)
def fake_create(*args, **kwargs):
self.assertEqual(kwargs['availability_zone'], 'fake')
return ({'id': 'fake-instance'}, 'fake-res-id')
self.stubs.Set(self.cloud.compute_api, 'create', fake_create)
# NOTE(vish) the assert for this call is in the fake_create method.
run_instances(self.context, **kwargs)
def test_empty_reservation_id_from_token(self):
client_token = 'client-token-1'
def fake_get_all_system_metadata(context, search_filts):
reference = [{'key': ['EC2_client_token']},
{'value': ['client-token-1']}]
self.assertEqual(search_filts, reference)
return []
self.stubs.Set(self.cloud.compute_api, 'get_all_system_metadata',
fake_get_all_system_metadata)
resv_id = self.cloud._resv_id_from_token(self.context, client_token)
self.assertIsNone(resv_id)
def test_run_instances_idempotent(self):
# Ensure subsequent run_instances calls with same client token
# are idempotent and that ones with different client_token are not
kwargs = {'image_id': 'ami-00000001',
'instance_type': CONF.default_flavor,
'max_count': 1}
run_instances = self.cloud.run_instances
def fake_show(self, context, id, **kwargs):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'container_format': 'ami',
'status': 'active'}
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
def dumb(*args, **kwargs):
pass
self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
self.useFixture(cast_as_call.CastAsCall(self.stubs))
kwargs['client_token'] = 'client-token-1'
result = run_instances(self.context, **kwargs)
instance = result['instancesSet'][0]
self.assertEqual(instance['instanceId'], 'i-00000001')
kwargs['client_token'] = 'client-token-2'
result = run_instances(self.context, **kwargs)
instance = result['instancesSet'][0]
self.assertEqual(instance['instanceId'], 'i-00000002')
kwargs['client_token'] = 'client-token-2'
result = run_instances(self.context, **kwargs)
instance = result['instancesSet'][0]
self.assertEqual(instance['instanceId'], 'i-00000002')
kwargs['client_token'] = 'client-token-1'
result = run_instances(self.context, **kwargs)
instance = result['instancesSet'][0]
self.assertEqual(instance['instanceId'], 'i-00000001')
kwargs['client_token'] = 'client-token-3'
result = run_instances(self.context, **kwargs)
instance = result['instancesSet'][0]
self.assertEqual(instance['instanceId'], 'i-00000003')
# make sure terminated instances lose their client tokens
self.cloud.stop_instances(self.context,
instance_id=[instance['instanceId']])
self.cloud.terminate_instances(self.context,
instance_id=[instance['instanceId']])
kwargs['client_token'] = 'client-token-3'
result = run_instances(self.context, **kwargs)
instance = result['instancesSet'][0]
self.assertEqual(instance['instanceId'], 'i-00000004')
def test_run_instances_image_state_none(self):
kwargs = {'image_id': 'ami-00000001',
'instance_type': CONF.default_flavor,
'max_count': 1}
run_instances = self.cloud.run_instances
def fake_show_no_state(self, context, id):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'}, 'container_format': 'ami'}
self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show_no_state)
self.assertRaises(exception.ImageNotActive, run_instances,
self.context, **kwargs)
def test_run_instances_image_state_invalid(self):
kwargs = {'image_id': 'ami-00000001',
'instance_type': CONF.default_flavor,
'max_count': 1}
run_instances = self.cloud.run_instances
def fake_show_decrypt(self, context, id):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'container_format': 'ami',
'status': 'active',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine', 'image_state': 'decrypting'}}
self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show_decrypt)
self.assertRaises(exception.ImageNotActive, run_instances,
self.context, **kwargs)
def test_run_instances_image_status_active(self):
kwargs = {'image_id': 'ami-00000001',
'instance_type': CONF.default_flavor,
'max_count': 1}
run_instances = self.cloud.run_instances
def fake_show_stat_active(self, context, id, **kwargs):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'container_format': 'ami',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'status': 'active'}
def fake_id_to_glance_id(context, id):
return 'cedef40a-ed67-4d10-800e-17455edce175'
self.stubs.Set(fake._FakeImageService, 'show', fake_show_stat_active)
self.stubs.Set(ec2utils, 'id_to_glance_id', fake_id_to_glance_id)
result = run_instances(self.context, **kwargs)
self.assertEqual(len(result['instancesSet']), 1)
def _restart_compute_service(self, periodic_interval_max=None):
"""restart compute service. NOTE: fake driver forgets all instances."""
self.compute.kill()
if periodic_interval_max:
self.compute = self.start_service(
'compute', periodic_interval_max=periodic_interval_max)
else:
self.compute = self.start_service('compute')
def test_stop_start_instance(self):
# Makes sure stop/start instance works.
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval_max=0.3)
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
# a running instance can't be started.
self.assertRaises(exception.InstanceInvalidState,
self.cloud.start_instances,
self.context, [instance_id])
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 80,
'name': 'stopped'}}]}
result = self.cloud.stop_instances(self.context, [instance_id])
self.assertEqual(result, expected)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 80,
'name': 'stopped'},
'currentState': {'code': 16,
'name': 'running'}}]}
result = self.cloud.start_instances(self.context, [instance_id])
self.assertEqual(result, expected)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 80,
'name': 'stopped'}}]}
result = self.cloud.stop_instances(self.context, [instance_id])
self.assertEqual(result, expected)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 80,
'name': 'stopped'},
'currentState': {'code': 48,
'name': 'terminated'}}]}
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertEqual(result, expected)
def test_start_instances(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
result = self.cloud.stop_instances(self.context, [instance_id])
self.assertTrue(result)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 80,
'name': 'stopped'},
'currentState': {'code': 16,
'name': 'running'}}]}
result = self.cloud.start_instances(self.context, [instance_id])
self.assertEqual(result, expected)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 48,
'name': 'terminated'}}]}
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertEqual(result, expected)
self._restart_compute_service()
def test_start_instances_policy_failed(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
rules = {
"compute:start":
common_policy.parse_rule("project_id:non_fake"),
}
policy.set_rules(rules)
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.cloud.start_instances,
self.context, [instance_id])
self.assertIn("compute:start", exc.format_message())
self._restart_compute_service()
def test_stop_instances(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 80,
'name': 'stopped'}}]}
result = self.cloud.stop_instances(self.context, [instance_id])
self.assertEqual(result, expected)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 80,
'name': 'stopped'},
'currentState': {'code': 48,
'name': 'terminated'}}]}
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertEqual(result, expected)
self._restart_compute_service()
def test_stop_instances_policy_failed(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
rules = {
"compute:stop":
common_policy.parse_rule("project_id:non_fake")
}
policy.set_rules(rules)
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.cloud.stop_instances,
self.context, [instance_id])
self.assertIn("compute:stop", exc.format_message())
self._restart_compute_service()
def test_terminate_instances(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
# a running instance can't be started.
self.assertRaises(exception.InstanceInvalidState,
self.cloud.start_instances,
self.context, [instance_id])
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 48,
'name': 'terminated'}}]}
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertEqual(result, expected)
self._restart_compute_service()
def test_terminate_instances_invalid_instance_id(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1, }
self._run_instance(**kwargs)
self.assertRaises(exception.InstanceNotFound,
self.cloud.terminate_instances,
self.context, ['i-2'])
self._restart_compute_service()
def test_terminate_instances_disable_terminate(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
internal_uuid = db.get_instance_uuid_by_ec2_id(self.context,
ec2utils.ec2_id_to_id(instance_id))
db.instance_update(self.context, internal_uuid,
{'disable_terminate': True})
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 16,
'name': 'running'}}]}
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertEqual(result, expected)
db.instance_update(self.context, internal_uuid,
{'disable_terminate': False})
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 48,
'name': 'terminated'}}]}
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertEqual(result, expected)
self._restart_compute_service()
def test_terminate_instances_two_instances(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1, }
inst1 = self._run_instance(**kwargs)
inst2 = self._run_instance(**kwargs)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 80,
'name': 'stopped'}}]}
result = self.cloud.stop_instances(self.context, [inst1])
self.assertEqual(result, expected)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 80,
'name': 'stopped'},
'currentState': {'code': 48,
'name': 'terminated'}},
{'instanceId': 'i-00000002',
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 48,
'name': 'terminated'}}]}
result = self.cloud.terminate_instances(self.context, [inst1, inst2])
self.assertEqual(result, expected)
self._restart_compute_service()
def test_reboot_instances(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
# a running instance can't be started.
self.assertRaises(exception.InstanceInvalidState,
self.cloud.start_instances,
self.context, [instance_id])
result = self.cloud.reboot_instances(self.context, [instance_id])
self.assertTrue(result)
def _volume_create(self, volume_id=None):
kwargs = {'name': 'test-volume',
'description': 'test volume description',
'status': 'available',
'host': 'fake',
'size': 1,
'attach_status': 'detached'}
if volume_id:
kwargs['volume_id'] = volume_id
return self.volume_api.create_with_kwargs(self.context, **kwargs)
def _snapshot_create(self, snapshot_id=None):
kwargs = {'volume_id': 'ccec42a2-c220-4806-b762-6b12fbb592e4',
'status': "available",
'volume_size': 1}
if snapshot_id:
kwargs['snap_id'] = snapshot_id
return self.volume_api.create_snapshot_with_kwargs(self.context,
**kwargs)
def _create_snapshot(self, ec2_volume_id):
result = self.cloud.create_snapshot(self.context,
volume_id=ec2_volume_id)
return result['snapshotId']
def _do_test_create_image(self, no_reboot):
"""Make sure that CreateImage works."""
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval_max=0.3)
(volumes, snapshots) = self._setUpImageSet(
create_volumes_and_snapshots=True)
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1}
ec2_instance_id = self._run_instance(**kwargs)
def fake_show(meh, context, id, **kwargs):
bdm = [dict(snapshot_id=snapshots[0],
volume_size=1,
device_name='sda1',
delete_on_termination=False)]
props = dict(kernel_id='cedef40a-ed67-4d10-800e-17455edce175',
ramdisk_id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
root_device_name='/dev/sda1',
block_device_mapping=bdm)
return dict(id=id,
properties=props,
container_format='ami',
status='active',
is_public=True)
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
def fake_block_device_mapping_get_all_by_instance(context, inst_id,
use_slave=False):
return [fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': volumes[0],
'snapshot_id': snapshots[0],
'source_type': 'snapshot',
'destination_type': 'volume',
'volume_size': 1,
'device_name': 'sda1',
'boot_index': 0,
'delete_on_termination': False,
'connection_info': '{"foo":"bar"}',
'no_device': None})]
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_block_device_mapping_get_all_by_instance)
virt_driver = {}
def fake_power_on(self, context, instance, network_info,
block_device_info):
virt_driver['powered_on'] = True
self.stubs.Set(fake_virt.FakeDriver, 'power_on', fake_power_on)
def fake_power_off(self, instance):
virt_driver['powered_off'] = True
self.stubs.Set(fake_virt.FakeDriver, 'power_off', fake_power_off)
result = self.cloud.create_image(self.context, ec2_instance_id,
no_reboot=no_reboot)
ec2_ids = [result['imageId']]
created_image = self.cloud.describe_images(self.context,
ec2_ids)['imagesSet'][0]
self.assertIn('blockDeviceMapping', created_image)
bdm = created_image['blockDeviceMapping'][0]
self.assertEqual(bdm.get('deviceName'), 'sda1')
self.assertIn('ebs', bdm)
self.assertEqual(bdm['ebs'].get('snapshotId'),
ec2utils.id_to_ec2_snap_id(snapshots[0]))
self.assertEqual(created_image.get('kernelId'), 'aki-00000001')
self.assertEqual(created_image.get('ramdiskId'), 'ari-00000002')
self.assertEqual(created_image.get('rootDeviceType'), 'ebs')
self.assertNotEqual(virt_driver.get('powered_on'), no_reboot)
self.assertNotEqual(virt_driver.get('powered_off'), no_reboot)
self.cloud.terminate_instances(self.context, [ec2_instance_id])
self._restart_compute_service()
def test_create_image_no_reboot(self):
# Make sure that CreateImage works.
self._do_test_create_image(True)
def test_create_image_with_reboot(self):
# Make sure that CreateImage works.
self._do_test_create_image(False)
def test_create_image_instance_store(self):
"""Ensure CreateImage fails as expected for an instance-store-backed
instance
"""
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval_max=0.3)
(volumes, snapshots) = self._setUpImageSet(
create_volumes_and_snapshots=True)
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1}
ec2_instance_id = self._run_instance(**kwargs)
def fake_block_device_mapping_get_all_by_instance(context, inst_id,
use_slave=False):
return [fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': volumes[0],
'snapshot_id': snapshots[0],
'source_type': 'snapshot',
'destination_type': 'volume',
'volume_size': 1,
'device_name': 'vda',
'delete_on_termination': False,
'no_device': None})]
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_block_device_mapping_get_all_by_instance)
self.assertRaises(exception.InvalidParameterValue,
self.cloud.create_image,
self.context,
ec2_instance_id,
no_reboot=True)
@staticmethod
def _fake_bdm_get(ctxt, id, use_slave=False):
blockdms = [{'volume_id': 87654321,
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'no_device': None,
'delete_on_termination': True,
'device_name': '/dev/sdh'},
{'volume_id': None,
'snapshot_id': 98765432,
'source_type': 'snapshot',
'destination_type': 'volume',
'no_device': None,
'delete_on_termination': True,
'device_name': '/dev/sdi'},
{'volume_id': None,
'snapshot_id': None,
'no_device': True,
'source_type': 'blank',
'destination_type': None,
'delete_on_termination': None,
'device_name': None},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'source_type': 'blank',
'destination_type': 'local',
'guest_format': None,
'delete_on_termination': None,
'device_name': '/dev/sdb'},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'source_type': 'blank',
'destination_type': 'local',
'guest_format': 'swap',
'delete_on_termination': None,
'device_name': '/dev/sdc'},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'source_type': 'blank',
'destination_type': 'local',
'guest_format': None,
'delete_on_termination': None,
'device_name': '/dev/sdd'},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'source_type': 'blank',
'destination_type': 'local',
'guest_format': None,
'delete_on_termination': None,
'device_name': '/dev/sd3'},
]
extra = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'id': 0,
'device_type': None,
'disk_bus': None,
'instance_uuid': '',
'image_id': None,
'volume_size': None,
'connection_info': None,
'boot_index': None,
'guest_format': None,
}
for bdm in blockdms:
bdm.update(extra)
return blockdms
def test_describe_instance_attribute(self):
# Make sure that describe_instance_attribute works.
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
self._fake_bdm_get)
def fake_get(ctxt, instance_id, want_objects=False):
self.assertTrue(want_objects)
inst_type = flavors.get_default_flavor()
inst_type['name'] = 'fake_type'
sys_meta = flavors.save_flavor_info({}, inst_type)
secgroups = objects.SecurityGroupList()
secgroups.objects.append(
objects.SecurityGroup(name='fake0'))
secgroups.objects.append(
objects.SecurityGroup(name='fake1'))
instance = objects.Instance(ctxt)
instance.id = 0
instance.uuid = 'e5fe5518-0288-4fa3-b0c4-c79764101b85'
instance.root_device_name = '/dev/sdh'
instance.security_groups = secgroups
instance.vm_state = vm_states.STOPPED
instance.kernel_id = 'cedef40a-ed67-4d10-800e-17455edce175'
instance.ramdisk_id = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
instance.user_data = 'fake-user data'
instance.shutdown_terminate = False
instance.disable_terminate = False
instance.system_metadata = sys_meta
return instance
self.stubs.Set(self.cloud.compute_api, 'get', fake_get)
def fake_ec2_instance_get_by_id(ctxt, int_id):
if int_id == 305419896:
fake_map = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'id': 305419896,
'uuid': 'e5fe5518-0288-4fa3-b0c4-c79764101b85',
}
return fake_map
raise exception.InstanceNotFound(instance_id=int_id)
self.stubs.Set(db, 'ec2_instance_get_by_id',
fake_ec2_instance_get_by_id)
get_attribute = functools.partial(
self.cloud.describe_instance_attribute,
self.context, 'i-12345678')
bdm = get_attribute('blockDeviceMapping')
bdm['blockDeviceMapping'].sort()
expected_bdm = {'instance_id': 'i-12345678',
'rootDeviceType': 'ebs',
'blockDeviceMapping': [
{'deviceName': '/dev/sdh',
'ebs': {'status': 'attached',
'deleteOnTermination': True,
'volumeId': 'vol-05397fb1',
'attachTime': '13:56:24'}}]}
expected_bdm['blockDeviceMapping'].sort()
self.assertEqual(bdm, expected_bdm)
groupSet = get_attribute('groupSet')
groupSet['groupSet'].sort()
expected_groupSet = {'instance_id': 'i-12345678',
'groupSet': [{'groupId': 'fake0'},
{'groupId': 'fake1'}]}
expected_groupSet['groupSet'].sort()
self.assertEqual(groupSet, expected_groupSet)
self.assertEqual(get_attribute('instanceInitiatedShutdownBehavior'),
{'instance_id': 'i-12345678',
'instanceInitiatedShutdownBehavior': 'stop'})
self.assertEqual(get_attribute('disableApiTermination'),
{'instance_id': 'i-12345678',
'disableApiTermination': False})
self.assertEqual(get_attribute('instanceType'),
{'instance_id': 'i-12345678',
'instanceType': 'fake_type'})
self.assertEqual(get_attribute('kernel'),
{'instance_id': 'i-12345678',
'kernel': 'aki-00000001'})
self.assertEqual(get_attribute('ramdisk'),
{'instance_id': 'i-12345678',
'ramdisk': 'ari-00000002'})
self.assertEqual(get_attribute('rootDeviceName'),
{'instance_id': 'i-12345678',
'rootDeviceName': '/dev/sdh'})
# NOTE(yamahata): this isn't supported
# get_attribute('sourceDestCheck')
self.assertEqual(get_attribute('userData'),
{'instance_id': 'i-12345678',
'userData': '}\xa9\x1e\xba\xc7\xabu\xabZ'})
def test_instance_initiated_shutdown_behavior(self):
def test_dia_iisb(expected_result, **kwargs):
"""test describe_instance_attribute
attribute instance_initiated_shutdown_behavior
"""
kwargs.update({'instance_type': CONF.default_flavor,
'max_count': 1})
instance_id = self._run_instance(**kwargs)
result = self.cloud.describe_instance_attribute(self.context,
instance_id, 'instanceInitiatedShutdownBehavior')
self.assertEqual(result['instanceInitiatedShutdownBehavior'],
expected_result)
expected = {'instancesSet': [
{'instanceId': instance_id,
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 48,
'name': 'terminated'}}]}
result = self.cloud.terminate_instances(self.context,
[instance_id])
self.assertEqual(result, expected)
self._restart_compute_service()
test_dia_iisb('stop', image_id='ami-1')
block_device_mapping = [{'device_name': '/dev/vdb',
'virtual_name': 'ephemeral0'}]
test_dia_iisb('stop', image_id='ami-2',
block_device_mapping=block_device_mapping)
def fake_show(self, context, id_, **kwargs):
LOG.debug("id_ %s", id_)
prop = {}
if id_ == 'ami-3':
pass
elif id_ == 'ami-4':
prop = {'mappings': [{'device': 'sdb0',
'virtual': 'ephemeral0'}]}
elif id_ == 'ami-5':
prop = {'block_device_mapping':
[{'device_name': '/dev/sdb0',
'virtual_name': 'ephemeral0'}]}
elif id_ == 'ami-6':
prop = {'mappings': [{'device': 'sdb0',
'virtual': 'ephemeral0'}],
'block_device_mapping':
[{'device_name': '/dev/sdb0',
'virtual_name': 'ephemeral0'}]}
prop_base = {'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'}
prop_base.update(prop)
return {
'id': id_,
'name': 'fake_name',
'properties': prop_base,
'container_format': 'ami',
'status': 'active'}
# NOTE(yamahata): create ami-3 ... ami-7
# ami-1 and ami-2 is already created by setUp()
for i in range(3, 8):
db.s3_image_create(self.context, 'ami-%d' % i)
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
test_dia_iisb('stop', image_id='ami-3')
test_dia_iisb('stop', image_id='ami-4')
test_dia_iisb('stop', image_id='ami-5')
test_dia_iisb('stop', image_id='ami-6')
test_dia_iisb('terminate', image_id='ami-7',
instance_initiated_shutdown_behavior='terminate')
def test_create_delete_tags(self):
# We need to stub network calls
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
# We need to stub out the MQ call - it won't succeed. We do want
# to check that the method is called, though
meta_changes = [None]
def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
instance_uuid=None):
meta_changes[0] = diff
self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
fake_change_instance_metadata)
# Create a test image
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
inst1_kwargs = {
'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'vm_state': 'active',
'launched_at': timeutils.utcnow(),
'hostname': 'server-1111',
'created_at': datetime.datetime(2012, 5, 1, 1, 1, 1)
}
inst1 = db.instance_create(self.context, inst1_kwargs)
ec2_id = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
# Create some tags
md = {'key': 'foo', 'value': 'bar'}
md_result = {'foo': 'bar'}
self.cloud.create_tags(self.context, resource_id=[ec2_id],
tag=[md])
metadata = self.cloud.compute_api.get_instance_metadata(self.context,
inst1)
self.assertEqual(metadata, md_result)
self.assertEqual(meta_changes, [{'foo': ['+', 'bar']}])
# Delete them
self.cloud.delete_tags(self.context, resource_id=[ec2_id],
tag=[{'key': 'foo', 'value': 'bar'}])
metadata = self.cloud.compute_api.get_instance_metadata(self.context,
inst1)
self.assertEqual(metadata, {})
self.assertEqual(meta_changes, [{'foo': ['-']}])
def test_describe_tags(self):
# We need to stub network calls
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
# We need to stub out the MQ call - it won't succeed. We do want
# to check that the method is called, though
meta_changes = [None]
def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
instance_uuid=None):
meta_changes[0] = diff
self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
fake_change_instance_metadata)
# Create some test images
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
inst1_kwargs = {
'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'vm_state': 'active',
'launched_at': timeutils.utcnow(),
'hostname': 'server-1111',
'created_at': datetime.datetime(2012, 5, 1, 1, 1, 1)
}
inst2_kwargs = {
'reservation_id': 'b',
'image_ref': image_uuid,
'instance_type_id': 1,
'vm_state': 'active',
'launched_at': timeutils.utcnow(),
'hostname': 'server-1112',
'created_at': datetime.datetime(2012, 5, 1, 1, 1, 2)
}
inst1 = db.instance_create(self.context, inst1_kwargs)
ec2_id1 = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
inst2 = db.instance_create(self.context, inst2_kwargs)
ec2_id2 = ec2utils.id_to_ec2_inst_id(inst2['uuid'])
# Create some tags
# We get one overlapping pair, and each has a different key value pair
# inst1 : {'foo': 'bar', 'bax': 'wibble'}
# inst1 : {'foo': 'bar', 'baz': 'quux'}
md = {'key': 'foo', 'value': 'bar'}
md_result = {'foo': 'bar'}
self.cloud.create_tags(self.context, resource_id=[ec2_id1, ec2_id2],
tag=[md])
self.assertEqual(meta_changes, [{'foo': ['+', 'bar']}])
metadata = self.cloud.compute_api.get_instance_metadata(self.context,
inst1)
self.assertEqual(metadata, md_result)
metadata = self.cloud.compute_api.get_instance_metadata(self.context,
inst2)
self.assertEqual(metadata, md_result)
md2 = {'key': 'baz', 'value': 'quux'}
md2_result = {'baz': 'quux'}
md2_result.update(md_result)
self.cloud.create_tags(self.context, resource_id=[ec2_id2],
tag=[md2])
self.assertEqual(meta_changes, [{'baz': ['+', 'quux']}])
metadata = self.cloud.compute_api.get_instance_metadata(self.context,
inst2)
self.assertEqual(metadata, md2_result)
md3 = {'key': 'bax', 'value': 'wibble'}
md3_result = {'bax': 'wibble'}
md3_result.update(md_result)
self.cloud.create_tags(self.context, resource_id=[ec2_id1],
tag=[md3])
self.assertEqual(meta_changes, [{'bax': ['+', 'wibble']}])
metadata = self.cloud.compute_api.get_instance_metadata(self.context,
inst1)
self.assertEqual(metadata, md3_result)
inst1_key_foo = {'key': u'foo', 'resource_id': 'i-00000001',
'resource_type': 'instance', 'value': u'bar'}
inst1_key_bax = {'key': u'bax', 'resource_id': 'i-00000001',
'resource_type': 'instance', 'value': u'wibble'}
inst2_key_foo = {'key': u'foo', 'resource_id': 'i-00000002',
'resource_type': 'instance', 'value': u'bar'}
inst2_key_baz = {'key': u'baz', 'resource_id': 'i-00000002',
'resource_type': 'instance', 'value': u'quux'}
# We should be able to search by:
# No filter
tags = self.cloud.describe_tags(self.context)['tagSet']
self.assertEqualSorted(tags, [inst1_key_foo, inst2_key_foo,
inst2_key_baz, inst1_key_bax])
# Resource ID
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'resource-id',
'value': [ec2_id1]}])['tagSet']
self.assertEqualSorted(tags, [inst1_key_foo, inst1_key_bax])
# Resource Type
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'resource-type',
'value': ['instance']}])['tagSet']
self.assertEqualSorted(tags, [inst1_key_foo, inst2_key_foo,
inst2_key_baz, inst1_key_bax])
# Key, either bare or with wildcards
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'key',
'value': ['foo']}])['tagSet']
self.assertEqualSorted(tags, [inst1_key_foo, inst2_key_foo])
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'key',
'value': ['baz']}])['tagSet']
self.assertEqualSorted(tags, [inst2_key_baz])
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'key',
'value': ['ba?']}])['tagSet']
self.assertEqualSorted(tags, [inst1_key_bax, inst2_key_baz])
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'key',
'value': ['b*']}])['tagSet']
self.assertEqualSorted(tags, [inst1_key_bax, inst2_key_baz])
# Value, either bare or with wildcards
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'value',
'value': ['bar']}])['tagSet']
self.assertEqualSorted(tags, [inst1_key_foo, inst2_key_foo])
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'value',
'value': ['wi*']}])['tagSet']
self.assertEqual(tags, [inst1_key_bax])
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'value',
'value': ['quu?']}])['tagSet']
self.assertEqual(tags, [inst2_key_baz])
# Multiple values
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'key',
'value': ['baz', 'bax']}])['tagSet']
self.assertEqualSorted(tags, [inst2_key_baz, inst1_key_bax])
# Multiple filters (AND): no match
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'key',
'value': ['baz']},
{'name': 'value',
'value': ['wibble']}])['tagSet']
self.assertEqual(tags, [])
# Multiple filters (AND): match
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'key',
'value': ['baz']},
{'name': 'value',
'value': ['quux']}])['tagSet']
self.assertEqualSorted(tags, [inst2_key_baz])
# And we should fail on supported resource types
self.assertRaises(exception.InvalidParameterValue,
self.cloud.describe_tags,
self.context,
filter=[{'name': 'resource-type',
'value': ['instance', 'volume']}])
def test_resource_type_from_id(self):
self.assertEqual(
ec2utils.resource_type_from_id(self.context, 'i-12345'),
'instance')
self.assertEqual(
ec2utils.resource_type_from_id(self.context, 'r-12345'),
'reservation')
self.assertEqual(
ec2utils.resource_type_from_id(self.context, 'vol-12345'),
'volume')
self.assertEqual(
ec2utils.resource_type_from_id(self.context, 'snap-12345'),
'snapshot')
self.assertEqual(
ec2utils.resource_type_from_id(self.context, 'ami-12345'),
'image')
self.assertEqual(
ec2utils.resource_type_from_id(self.context, 'ari-12345'),
'image')
self.assertEqual(
ec2utils.resource_type_from_id(self.context, 'aki-12345'),
'image')
self.assertIsNone(
ec2utils.resource_type_from_id(self.context, 'x-12345'))
@mock.patch.object(ec2utils, 'ec2_vol_id_to_uuid',
side_effect=lambda
ec2_volume_id: uuidutils.generate_uuid())
def test_detach_volume_unattched_error(self, mock_ec2_vol_id_to_uuid):
# Validates that VolumeUnattached is raised if the volume doesn't
# have an instance_uuid value.
ec2_volume_id = 'vol-987654321'
with mock.patch.object(self.cloud.volume_api, 'get',
side_effect=lambda context, volume_id:
{'id': volume_id}) as mock_get:
self.assertRaises(exception.VolumeUnattached,
self.cloud.detach_volume,
self.context,
ec2_volume_id)
mock_get.assert_called_once_with(self.context, mock.ANY)
mock_ec2_vol_id_to_uuid.assert_called_once_with(ec2_volume_id)
class CloudTestCaseNeutronProxy(test.NoDBTestCase):
def setUp(self):
super(CloudTestCaseNeutronProxy, self).setUp()
cfg.CONF.set_override('security_group_api', 'neutron')
self.cloud = cloud.CloudController()
self.original_client = neutronv2.get_client
neutronv2.get_client = test_neutron.get_client
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
def tearDown(self):
neutronv2.get_client = self.original_client
test_neutron.get_client()._reset()
super(CloudTestCaseNeutronProxy, self).tearDown()
def test_describe_security_groups(self):
# Makes sure describe_security_groups works and filters results.
group_name = 'test'
description = 'test'
self.cloud.create_security_group(self.context, group_name,
description)
result = self.cloud.describe_security_groups(self.context)
# NOTE(vish): should have the default group as well
self.assertEqual(len(result['securityGroupInfo']), 2)
result = self.cloud.describe_security_groups(self.context,
group_name=[group_name])
self.assertEqual(len(result['securityGroupInfo']), 1)
self.assertEqual(result['securityGroupInfo'][0]['groupName'],
group_name)
self.cloud.delete_security_group(self.context, group_name)
def test_describe_security_groups_by_id(self):
group_name = 'test'
description = 'test'
self.cloud.create_security_group(self.context, group_name,
description)
neutron = test_neutron.get_client()
# Get id from neutron since cloud.create_security_group
# does not expose it.
search_opts = {'name': group_name}
groups = neutron.list_security_groups(
**search_opts)['security_groups']
result = self.cloud.describe_security_groups(self.context,
group_id=[groups[0]['id']])
self.assertEqual(len(result['securityGroupInfo']), 1)
self.assertEqual(
result['securityGroupInfo'][0]['groupName'],
group_name)
self.cloud.delete_security_group(self.context, group_name)
def test_create_delete_security_group(self):
descript = 'test description'
create = self.cloud.create_security_group
result = create(self.context, 'testgrp', descript)
group_descript = result['securityGroupSet'][0]['groupDescription']
self.assertEqual(descript, group_descript)
delete = self.cloud.delete_security_group
self.assertTrue(delete(self.context, 'testgrp'))
| {
"content_hash": "83f4b9f94e8108a8e592cd11bcb80c78",
"timestamp": "",
"source": "github",
"line_count": 3182,
"max_line_length": 79,
"avg_line_length": 44.83689503456945,
"alnum_prop": 0.5196430949527234,
"repo_name": "srajag/nova",
"id": "d9ff15e2826fd85986f53e668173a10c76aaf583",
"size": "143496",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/tests/api/ec2/test_cloud.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from toee import *
import char_editor
def CheckPrereq(attachee, classLevelled, abilityScoreRaised):
#Many Shot, Point Blank Shot and Rapid Shot are required either standard feats or from the ranger class
if char_editor.has_feat(feat_manyshot) or char_editor.has_feat(feat_ranger_manyshot):
if char_editor.has_feat(feat_rapid_shot) or char_editor.has_feat(feat_ranger_rapid_shot):
if char_editor.has_feat(feat_point_blank_shot):
return 1
return 0
| {
"content_hash": "2d54af71ba3a5bb92d3e5deab0a9115f",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 104,
"avg_line_length": 38.666666666666664,
"alnum_prop": 0.7672413793103449,
"repo_name": "GrognardsFromHell/TemplePlus",
"id": "223da01cc55aaa5786d5fb71a971e99e8920c4e2",
"size": "464",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tpdatasrc/tpgamefiles/scr/feats/feat - Improved Rapid Shot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "683"
},
{
"name": "C",
"bytes": "629718"
},
{
"name": "C#",
"bytes": "167885"
},
{
"name": "C++",
"bytes": "10018792"
},
{
"name": "CMake",
"bytes": "91980"
},
{
"name": "CSS",
"bytes": "1292"
},
{
"name": "HLSL",
"bytes": "18884"
},
{
"name": "HTML",
"bytes": "433942"
},
{
"name": "PowerShell",
"bytes": "5374"
},
{
"name": "Python",
"bytes": "2850350"
}
],
"symlink_target": ""
} |
import sys
import binascii
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.component import Component, run
from autobahn.twisted.util import sleep
from autobahn.wamp.types import SubscribeOptions
AUTHMETHOD, TRANSPORT, SERIALIZER = sys.argv[1].split('-')
if AUTHMETHOD not in ['anonymous', 'ticket']:
raise Exception('invalid AUTHMETHOD "{}"'.format(AUTHMETHOD))
if TRANSPORT not in ['websocket', 'rawsocket']:
raise Exception('invalid TRANSPORT "{}"'.format(TRANSPORT))
if SERIALIZER not in ['cbor', 'msgpack', 'json', 'ubjson']:
raise Exception('invalid TRANSPORT "{}"'.format(TRANSPORT))
if AUTHMETHOD == 'ticket':
AUTHENTICATION = {
'ticket': {
'authid': 'user1',
'ticket': 'secret1'
}
}
elif AUTHMETHOD == 'anonymous':
AUTHENTICATION = None
if TRANSPORT == 'websocket':
comp = Component(
transports=[
{
"type": "websocket",
"url": "ws://localhost:8080/ws",
"endpoint": {
"type": "tcp",
"host": "localhost",
"port": 8080,
},
"serializers": [SERIALIZER],
},
],
realm="realm1",
authentication=AUTHENTICATION
)
elif TRANSPORT == 'rawsocket':
comp = Component(
transports=[
{
"type": "rawsocket",
"url": "rs://localhost:8080",
"endpoint": {
"type": "tcp",
"host": "localhost",
"port": 8080,
},
"serializer": SERIALIZER,
},
],
realm="realm1",
authentication=AUTHENTICATION
)
@comp.on_join
@inlineCallbacks
def _(session, details):
print("joined: {}".format(details))
if details.authmethod == 'anonymous':
topic_name = "io.crossbar.demo.public."
else:
topic_name = "io.crossbar.demo."
def _foo(*args, **kwargs):
print("{}: {} {}".format(topic_name, args, kwargs))
assert 'foo' in kwargs and type(kwargs['foo']) == str and len(kwargs['foo']) == 22
assert 'baz' in kwargs and type(kwargs['baz']) == bytes and len(kwargs['baz']) == 10
assert binascii.a2b_hex(kwargs['foo'][2:]) == kwargs['baz']
yield session.subscribe(_foo, topic_name, options=SubscribeOptions(match='prefix'))
print("subscribed")
while session.is_connected():
print(".")
yield sleep(1)
if __name__ == "__main__":
run([comp], log_level='info')
| {
"content_hash": "4ad56c4a04e83b74835f65695bce1c7e",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 92,
"avg_line_length": 30.694117647058825,
"alnum_prop": 0.5415868148715983,
"repo_name": "crossbario/crossbar-examples",
"id": "15004e6c3f116867206b24e3179622362784216a",
"size": "2609",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "proxy/client-subscribe.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5120"
},
{
"name": "C#",
"bytes": "22931"
},
{
"name": "C++",
"bytes": "77209"
},
{
"name": "CSS",
"bytes": "216506"
},
{
"name": "Dockerfile",
"bytes": "1423"
},
{
"name": "Erlang",
"bytes": "16493"
},
{
"name": "HTML",
"bytes": "4701160"
},
{
"name": "Hack",
"bytes": "4082"
},
{
"name": "Java",
"bytes": "20795"
},
{
"name": "JavaScript",
"bytes": "2989112"
},
{
"name": "Jupyter Notebook",
"bytes": "335655"
},
{
"name": "Lua",
"bytes": "1233"
},
{
"name": "Makefile",
"bytes": "68685"
},
{
"name": "PHP",
"bytes": "45600"
},
{
"name": "PLSQL",
"bytes": "157154"
},
{
"name": "PLpgSQL",
"bytes": "5053"
},
{
"name": "Python",
"bytes": "856797"
},
{
"name": "SCSS",
"bytes": "58669"
},
{
"name": "Shell",
"bytes": "46285"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from twisted.trial import unittest
from twisted.python.failure import Failure
from twisted.internet import reactor
from twisted.internet.defer import Deferred, inlineCallbacks
from twisted.python import log as txlog
from scrapy.http import Request, Response
from scrapy.spider import BaseSpider
from scrapy.utils.request import request_fingerprint
from scrapy.contrib.pipeline.media import MediaPipeline
from scrapy.utils.signal import disconnect_all
from scrapy import signals
from scrapy import log
def _mocked_download_func(request, info):
response = request.meta.get('response')
return response() if callable(response) else response
class BaseMediaPipelineTestCase(unittest.TestCase):
pipeline_class = MediaPipeline
def setUp(self):
self.spider = BaseSpider('media.com')
self.pipe = self.pipeline_class(download_func=_mocked_download_func)
self.pipe.open_spider(self.spider)
self.info = self.pipe.spiderinfo
def tearDown(self):
for name, signal in vars(signals).items():
if not name.startswith('_'):
disconnect_all(signal)
def test_default_media_to_download(self):
request = Request('http://url')
assert self.pipe.media_to_download(request, self.info) is None
def test_default_get_media_requests(self):
item = dict(name='name')
assert self.pipe.get_media_requests(item, self.info) is None
def test_default_media_downloaded(self):
request = Request('http://url')
response = Response('http://url', body='')
assert self.pipe.media_downloaded(response, request, self.info) is response
def test_default_media_failed(self):
request = Request('http://url')
fail = Failure(Exception())
assert self.pipe.media_failed(fail, request, self.info) is fail
def test_default_item_completed(self):
item = dict(name='name')
assert self.pipe.item_completed([], item, self.info) is item
# Check that failures are logged by default
fail = Failure(Exception())
results = [(True, 1), (False, fail)]
events = []
txlog.addObserver(events.append)
new_item = self.pipe.item_completed(results, item, self.info)
txlog.removeObserver(events.append)
self.flushLoggedErrors()
assert new_item is item
assert len(events) == 1
assert events[0]['logLevel'] == log.ERROR
assert events[0]['failure'] is fail
# disable failure logging and check again
self.pipe.LOG_FAILED_RESULTS = False
events = []
txlog.addObserver(events.append)
new_item = self.pipe.item_completed(results, item, self.info)
txlog.removeObserver(events.append)
self.flushLoggedErrors()
assert new_item is item
assert len(events) == 0
@inlineCallbacks
def test_default_process_item(self):
item = dict(name='name')
new_item = yield self.pipe.process_item(item, self.spider)
assert new_item is item
class MockedMediaPipeline(MediaPipeline):
def __init__(self, *args, **kwargs):
super(MockedMediaPipeline, self).__init__(*args, **kwargs)
self._mockcalled = []
def download(self, request, info):
self._mockcalled.append('download')
return super(MockedMediaPipeline, self).download(request, info)
def media_to_download(self, request, info):
self._mockcalled.append('media_to_download')
if 'result' in request.meta:
return request.meta.get('result')
return super(MockedMediaPipeline, self).media_to_download(request, info)
def get_media_requests(self, item, info):
self._mockcalled.append('get_media_requests')
return item.get('requests')
def media_downloaded(self, response, request, info):
self._mockcalled.append('media_downloaded')
return super(MockedMediaPipeline, self).media_downloaded(response, request, info)
def media_failed(self, failure, request, info):
self._mockcalled.append('media_failed')
return super(MockedMediaPipeline, self).media_failed(failure, request, info)
def item_completed(self, results, item, info):
self._mockcalled.append('item_completed')
item = super(MockedMediaPipeline, self).item_completed(results, item, info)
item['results'] = results
return item
class MediaPipelineTestCase(BaseMediaPipelineTestCase):
pipeline_class = MockedMediaPipeline
@inlineCallbacks
def test_result_succeed(self):
cb = lambda _: self.pipe._mockcalled.append('request_callback') or _
eb = lambda _: self.pipe._mockcalled.append('request_errback') or _
rsp = Response('http://url1')
req = Request('http://url1', meta=dict(response=rsp), callback=cb, errback=eb)
item = dict(requests=req)
new_item = yield self.pipe.process_item(item, self.spider)
self.assertEqual(new_item['results'], [(True, rsp)])
self.assertEqual(self.pipe._mockcalled,
['get_media_requests', 'media_to_download',
'media_downloaded', 'request_callback', 'item_completed'])
@inlineCallbacks
def test_result_failure(self):
self.pipe.LOG_FAILED_RESULTS = False
cb = lambda _: self.pipe._mockcalled.append('request_callback') or _
eb = lambda _: self.pipe._mockcalled.append('request_errback') or _
fail = Failure(Exception())
req = Request('http://url1', meta=dict(response=fail), callback=cb, errback=eb)
item = dict(requests=req)
new_item = yield self.pipe.process_item(item, self.spider)
self.assertEqual(new_item['results'], [(False, fail)])
self.assertEqual(self.pipe._mockcalled,
['get_media_requests', 'media_to_download',
'media_failed', 'request_errback', 'item_completed'])
@inlineCallbacks
def test_mix_of_success_and_failure(self):
self.pipe.LOG_FAILED_RESULTS = False
rsp1 = Response('http://url1')
req1 = Request('http://url1', meta=dict(response=rsp1))
fail = Failure(Exception())
req2 = Request('http://url2', meta=dict(response=fail))
item = dict(requests=[req1, req2])
new_item = yield self.pipe.process_item(item, self.spider)
self.assertEqual(new_item['results'], [(True, rsp1), (False, fail)])
m = self.pipe._mockcalled
# only once
self.assertEqual(m[0], 'get_media_requests') # first hook called
self.assertEqual(m.count('get_media_requests'), 1)
self.assertEqual(m.count('item_completed'), 1)
self.assertEqual(m[-1], 'item_completed') # last hook called
# twice, one per request
self.assertEqual(m.count('media_to_download'), 2)
# one to handle success and other for failure
self.assertEqual(m.count('media_downloaded'), 1)
self.assertEqual(m.count('media_failed'), 1)
@inlineCallbacks
def test_get_media_requests(self):
# returns single Request (without callback)
req = Request('http://url')
item = dict(requests=req) # pass a single item
new_item = yield self.pipe.process_item(item, self.spider)
assert new_item is item
assert request_fingerprint(req) in self.info.downloaded
# returns iterable of Requests
req1 = Request('http://url1')
req2 = Request('http://url2')
item = dict(requests=iter([req1, req2]))
new_item = yield self.pipe.process_item(item, self.spider)
assert new_item is item
assert request_fingerprint(req1) in self.info.downloaded
assert request_fingerprint(req2) in self.info.downloaded
@inlineCallbacks
def test_results_are_cached_across_multiple_items(self):
rsp1 = Response('http://url1')
req1 = Request('http://url1', meta=dict(response=rsp1))
item = dict(requests=req1)
new_item = yield self.pipe.process_item(item, self.spider)
self.assertTrue(new_item is item)
self.assertEqual(new_item['results'], [(True, rsp1)])
# rsp2 is ignored, rsp1 must be in results because request fingerprints are the same
req2 = Request(req1.url, meta=dict(response=Response('http://donot.download.me')))
item = dict(requests=req2)
new_item = yield self.pipe.process_item(item, self.spider)
self.assertTrue(new_item is item)
self.assertEqual(request_fingerprint(req1), request_fingerprint(req2))
self.assertEqual(new_item['results'], [(True, rsp1)])
@inlineCallbacks
def test_results_are_cached_for_requests_of_single_item(self):
rsp1 = Response('http://url1')
req1 = Request('http://url1', meta=dict(response=rsp1))
req2 = Request(req1.url, meta=dict(response=Response('http://donot.download.me')))
item = dict(requests=[req1, req2])
new_item = yield self.pipe.process_item(item, self.spider)
self.assertTrue(new_item is item)
self.assertEqual(new_item['results'], [(True, rsp1), (True, rsp1)])
@inlineCallbacks
def test_wait_if_request_is_downloading(self):
def _check_downloading(response):
fp = request_fingerprint(req1)
self.assertTrue(fp in self.info.downloading)
self.assertTrue(fp in self.info.waiting)
self.assertTrue(fp not in self.info.downloaded)
self.assertEqual(len(self.info.waiting[fp]), 2)
return response
rsp1 = Response('http://url')
def rsp1_func():
dfd = Deferred().addCallback(_check_downloading)
reactor.callLater(.1, dfd.callback, rsp1)
return dfd
def rsp2_func():
self.fail('it must cache rsp1 result and must not try to redownload')
req1 = Request('http://url', meta=dict(response=rsp1_func))
req2 = Request(req1.url, meta=dict(response=rsp2_func))
item = dict(requests=[req1, req2])
new_item = yield self.pipe.process_item(item, self.spider)
self.assertEqual(new_item['results'], [(True, rsp1), (True, rsp1)])
@inlineCallbacks
def test_use_media_to_download_result(self):
req = Request('http://url', meta=dict(result='ITSME', response=self.fail))
item = dict(requests=req)
new_item = yield self.pipe.process_item(item, self.spider)
self.assertEqual(new_item['results'], [(True, 'ITSME')])
self.assertEqual(self.pipe._mockcalled, \
['get_media_requests', 'media_to_download', 'item_completed'])
| {
"content_hash": "93a1f2f5441ad934985016c03aada2c4",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 92,
"avg_line_length": 41.66015625,
"alnum_prop": 0.6470698546647914,
"repo_name": "nett55/scrapy",
"id": "443894feb828ae8417b7026ba00afd5b02564f9f",
"size": "10665",
"binary": false,
"copies": "3",
"ref": "refs/heads/0.20",
"path": "scrapy/tests/test_pipeline_media.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Groff",
"bytes": "2008"
},
{
"name": "HTML",
"bytes": "1809"
},
{
"name": "Python",
"bytes": "1274934"
},
{
"name": "Shell",
"bytes": "258"
}
],
"symlink_target": ""
} |
from .dev_settings import * # noqa
# These endpoints will throw an error on the django debug panel
EXCLUDED_DEBUG_URLS = [
"/content/storage",
]
DEBUG_PANEL_ACTIVE = True
def custom_show_toolbar(request):
return not any(
request.path.startswith(url) for url in EXCLUDED_DEBUG_URLS
) # noqa F405
# if debug_panel exists, add it to our INSTALLED_APPS
INSTALLED_APPS += ("debug_panel", "debug_toolbar", "pympler") # noqa F405
MIDDLEWARE += ( # noqa F405
"contentcuration.debug.middleware.CustomDebugPanelMiddleware",
)
DEBUG_TOOLBAR_CONFIG = {
"SHOW_TOOLBAR_CALLBACK": custom_show_toolbar,
}
| {
"content_hash": "eeb01c10842a39ff1d312f1e16f5b0b5",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 74,
"avg_line_length": 26.125,
"alnum_prop": 0.7049441786283892,
"repo_name": "DXCanas/content-curation",
"id": "79f9ddac6ee9fef94e49fc7b9c72703b2f4da237",
"size": "627",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "contentcuration/contentcuration/debug_panel_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "173955"
},
{
"name": "Dockerfile",
"bytes": "2215"
},
{
"name": "HTML",
"bytes": "503467"
},
{
"name": "JavaScript",
"bytes": "601189"
},
{
"name": "Makefile",
"bytes": "3409"
},
{
"name": "Python",
"bytes": "813881"
},
{
"name": "Shell",
"bytes": "6970"
},
{
"name": "Smarty",
"bytes": "6584"
},
{
"name": "Vue",
"bytes": "21539"
}
],
"symlink_target": ""
} |
"""
Low-level LAPACK functions (:mod:`scipy.linalg.lapack`)
=======================================================
This module contains low-level functions from the LAPACK library.
.. versionadded:: 0.12.0
.. note::
The common ``overwrite_<>`` option in many routines, allows the
input arrays to be overwritten to avoid extra memory allocation.
However this requires the array to satisfy two conditions
which are memory order and the data type to match exactly the
order and the type expected by the routine.
As an example, if you pass a double precision float array to any
``S....`` routine which expects single precision arguments, f2py
will create an intermediate array to match the argument types and
overwriting will be performed on that intermediate array.
Similarly, if a C-contiguous array is passed, f2py will pass a
FORTRAN-contiguous array internally. Please make sure that these
details are satisfied. More information can be found in the f2py
documentation.
.. warning::
These functions do little to no error checking.
It is possible to cause crashes by mis-using them,
so prefer using the higher-level routines in `scipy.linalg`.
Finding functions
-----------------
.. autosummary::
:toctree: generated/
get_lapack_funcs
All functions
-------------
.. autosummary::
:toctree: generated/
sgbsv
dgbsv
cgbsv
zgbsv
sgbtrf
dgbtrf
cgbtrf
zgbtrf
sgbtrs
dgbtrs
cgbtrs
zgbtrs
sgebal
dgebal
cgebal
zgebal
sgecon
dgecon
cgecon
zgecon
sgeequ
dgeequ
cgeequ
zgeequ
sgeequb
dgeequb
cgeequb
zgeequb
sgees
dgees
cgees
zgees
sgeev
dgeev
cgeev
zgeev
sgeev_lwork
dgeev_lwork
cgeev_lwork
zgeev_lwork
sgehrd
dgehrd
cgehrd
zgehrd
sgehrd_lwork
dgehrd_lwork
cgehrd_lwork
zgehrd_lwork
sgejsv
dgejsv
sgels
dgels
cgels
zgels
sgels_lwork
dgels_lwork
cgels_lwork
zgels_lwork
sgelsd
dgelsd
cgelsd
zgelsd
sgelsd_lwork
dgelsd_lwork
cgelsd_lwork
zgelsd_lwork
sgelss
dgelss
cgelss
zgelss
sgelss_lwork
dgelss_lwork
cgelss_lwork
zgelss_lwork
sgelsy
dgelsy
cgelsy
zgelsy
sgelsy_lwork
dgelsy_lwork
cgelsy_lwork
zgelsy_lwork
sgeqp3
dgeqp3
cgeqp3
zgeqp3
sgeqrf
dgeqrf
cgeqrf
zgeqrf
sgeqrf_lwork
dgeqrf_lwork
cgeqrf_lwork
zgeqrf_lwork
sgeqrfp
dgeqrfp
cgeqrfp
zgeqrfp
sgeqrfp_lwork
dgeqrfp_lwork
cgeqrfp_lwork
zgeqrfp_lwork
sgerqf
dgerqf
cgerqf
zgerqf
sgesdd
dgesdd
cgesdd
zgesdd
sgesdd_lwork
dgesdd_lwork
cgesdd_lwork
zgesdd_lwork
sgesv
dgesv
cgesv
zgesv
sgesvd
dgesvd
cgesvd
zgesvd
sgesvd_lwork
dgesvd_lwork
cgesvd_lwork
zgesvd_lwork
sgesvx
dgesvx
cgesvx
zgesvx
sgetrf
dgetrf
cgetrf
zgetrf
sgetc2
dgetc2
cgetc2
zgetc2
sgetri
dgetri
cgetri
zgetri
sgetri_lwork
dgetri_lwork
cgetri_lwork
zgetri_lwork
sgetrs
dgetrs
cgetrs
zgetrs
sgesc2
dgesc2
cgesc2
zgesc2
sgges
dgges
cgges
zgges
sggev
dggev
cggev
zggev
sgglse
dgglse
cgglse
zgglse
sgglse_lwork
dgglse_lwork
cgglse_lwork
zgglse_lwork
sgtsv
dgtsv
cgtsv
zgtsv
sgtsvx
dgtsvx
cgtsvx
zgtsvx
chbevd
zhbevd
chbevx
zhbevx
checon
zhecon
cheequb
zheequb
cheev
zheev
cheev_lwork
zheev_lwork
cheevd
zheevd
cheevd_lwork
zheevd_lwork
cheevr
zheevr
cheevr_lwork
zheevr_lwork
cheevx
zheevx
cheevx_lwork
zheevx_lwork
chegst
zhegst
chegv
zhegv
chegv_lwork
zhegv_lwork
chegvd
zhegvd
chegvx
zhegvx
chegvx_lwork
zhegvx_lwork
chesv
zhesv
chesv_lwork
zhesv_lwork
chesvx
zhesvx
chesvx_lwork
zhesvx_lwork
chetrd
zhetrd
chetrd_lwork
zhetrd_lwork
chetrf
zhetrf
chetrf_lwork
zhetrf_lwork
chfrk
zhfrk
slamch
dlamch
slange
dlange
clange
zlange
slarf
dlarf
clarf
zlarf
slarfg
dlarfg
clarfg
zlarfg
slartg
dlartg
clartg
zlartg
slasd4
dlasd4
slaswp
dlaswp
claswp
zlaswp
slauum
dlauum
clauum
zlauum
sorcsd
dorcsd
sorcsd_lwork
dorcsd_lwork
sorghr
dorghr
sorghr_lwork
dorghr_lwork
sorgqr
dorgqr
sorgrq
dorgrq
sormqr
dormqr
sormrz
dormrz
sormrz_lwork
dormrz_lwork
spbsv
dpbsv
cpbsv
zpbsv
spbtrf
dpbtrf
cpbtrf
zpbtrf
spbtrs
dpbtrs
cpbtrs
zpbtrs
spftrf
dpftrf
cpftrf
zpftrf
spftri
dpftri
cpftri
zpftri
spftrs
dpftrs
cpftrs
zpftrs
spocon
dpocon
cpocon
zpocon
spstrf
dpstrf
cpstrf
zpstrf
spstf2
dpstf2
cpstf2
zpstf2
sposv
dposv
cposv
zposv
sposvx
dposvx
cposvx
zposvx
spotrf
dpotrf
cpotrf
zpotrf
spotri
dpotri
cpotri
zpotri
spotrs
dpotrs
cpotrs
zpotrs
sppcon
dppcon
cppcon
zppcon
sppsv
dppsv
cppsv
zppsv
spptrf
dpptrf
cpptrf
zpptrf
spptri
dpptri
cpptri
zpptri
spptrs
dpptrs
cpptrs
zpptrs
sptsv
dptsv
cptsv
zptsv
sptsvx
dptsvx
cptsvx
zptsvx
spttrf
dpttrf
cpttrf
zpttrf
spttrs
dpttrs
cpttrs
zpttrs
spteqr
dpteqr
cpteqr
zpteqr
crot
zrot
ssbev
dsbev
ssbevd
dsbevd
ssbevx
dsbevx
ssfrk
dsfrk
sstebz
dstebz
sstein
dstein
sstemr
dstemr
sstemr_lwork
dstemr_lwork
ssterf
dsterf
sstev
dstev
ssycon
dsycon
csycon
zsycon
ssyconv
dsyconv
csyconv
zsyconv
ssyequb
dsyequb
csyequb
zsyequb
ssyev
dsyev
ssyev_lwork
dsyev_lwork
ssyevd
dsyevd
ssyevd_lwork
dsyevd_lwork
ssyevr
dsyevr
ssyevr_lwork
dsyevr_lwork
ssyevx
dsyevx
ssyevx_lwork
dsyevx_lwork
ssygst
dsygst
ssygv
dsygv
ssygv_lwork
dsygv_lwork
ssygvd
dsygvd
ssygvx
dsygvx
ssygvx_lwork
dsygvx_lwork
ssysv
dsysv
csysv
zsysv
ssysv_lwork
dsysv_lwork
csysv_lwork
zsysv_lwork
ssysvx
dsysvx
csysvx
zsysvx
ssysvx_lwork
dsysvx_lwork
csysvx_lwork
zsysvx_lwork
ssytf2
dsytf2
csytf2
zsytf2
ssytrd
dsytrd
ssytrd_lwork
dsytrd_lwork
ssytrf
dsytrf
csytrf
zsytrf
ssytrf_lwork
dsytrf_lwork
csytrf_lwork
zsytrf_lwork
stbtrs
dtbtrs
ctbtrs
ztbtrs
stfsm
dtfsm
ctfsm
ztfsm
stfttp
dtfttp
ctfttp
ztfttp
stfttr
dtfttr
ctfttr
ztfttr
stgexc
dtgexc
ctgexc
ztgexc
stgsen
dtgsen
ctgsen
ztgsen
stgsen_lwork
dtgsen_lwork
ctgsen_lwork
ztgsen_lwork
stpttf
dtpttf
ctpttf
ztpttf
stpttr
dtpttr
ctpttr
ztpttr
strexc
dtrexc
ctrexc
ztrexc
strsen
dtrsen
ctrsen
ztrsen
strsen_lwork
dtrsen_lwork
ctrsen_lwork
ztrsen_lwork
strsyl
dtrsyl
ctrsyl
ztrsyl
strtri
dtrtri
ctrtri
ztrtri
strtrs
dtrtrs
ctrtrs
ztrtrs
strttf
dtrttf
ctrttf
ztrttf
strttp
dtrttp
ctrttp
ztrttp
stzrzf
dtzrzf
ctzrzf
ztzrzf
stzrzf_lwork
dtzrzf_lwork
ctzrzf_lwork
ztzrzf_lwork
cunghr
zunghr
cunghr_lwork
zunghr_lwork
cungqr
zungqr
cungrq
zungrq
cunmqr
zunmqr
sgeqrt
dgeqrt
cgeqrt
zgeqrt
sgemqrt
dgemqrt
cgemqrt
zgemqrt
sgttrf
dgttrf
cgttrf
zgttrf
sgttrs
dgttrs
cgttrs
zgttrs
stpqrt
dtpqrt
ctpqrt
ztpqrt
stpmqrt
dtpmqrt
ctpmqrt
ztpmqrt
cuncsd
zuncsd
cuncsd_lwork
zuncsd_lwork
cunmrz
zunmrz
cunmrz_lwork
zunmrz_lwork
ilaver
"""
#
# Author: Pearu Peterson, March 2002
#
import numpy as _np
from .blas import _get_funcs, _memoize_get_funcs
from scipy.linalg import _flapack
from re import compile as regex_compile
try:
from scipy.linalg import _clapack
except ImportError:
_clapack = None
try:
from scipy.linalg import _flapack_64
HAS_ILP64 = True
except ImportError:
HAS_ILP64 = False
_flapack_64 = None
# Expose all functions (only flapack --- clapack is an implementation detail)
empty_module = None
from scipy.linalg._flapack import *
del empty_module
__all__ = ['get_lapack_funcs']
# some convenience alias for complex functions
_lapack_alias = {
'corghr': 'cunghr', 'zorghr': 'zunghr',
'corghr_lwork': 'cunghr_lwork', 'zorghr_lwork': 'zunghr_lwork',
'corgqr': 'cungqr', 'zorgqr': 'zungqr',
'cormqr': 'cunmqr', 'zormqr': 'zunmqr',
'corgrq': 'cungrq', 'zorgrq': 'zungrq',
}
# Place guards against docstring rendering issues with special characters
p1 = regex_compile(r'with bounds (?P<b>.*?)( and (?P<s>.*?) storage){0,1}\n')
p2 = regex_compile(r'Default: (?P<d>.*?)\n')
def backtickrepl(m):
if m.group('s'):
return ('with bounds ``{}`` with ``{}`` storage\n'
''.format(m.group('b'), m.group('s')))
else:
return 'with bounds ``{}``\n'.format(m.group('b'))
for routine in [ssyevr, dsyevr, cheevr, zheevr,
ssyevx, dsyevx, cheevx, zheevx,
ssygvd, dsygvd, chegvd, zhegvd]:
if routine.__doc__:
routine.__doc__ = p1.sub(backtickrepl, routine.__doc__)
routine.__doc__ = p2.sub('Default ``\\1``\n', routine.__doc__)
else:
continue
del regex_compile, p1, p2, backtickrepl
@_memoize_get_funcs
def get_lapack_funcs(names, arrays=(), dtype=None, ilp64=False):
"""Return available LAPACK function objects from names.
Arrays are used to determine the optimal prefix of LAPACK routines.
Parameters
----------
names : str or sequence of str
Name(s) of LAPACK functions without type prefix.
arrays : sequence of ndarrays, optional
Arrays can be given to determine optimal prefix of LAPACK
routines. If not given, double-precision routines will be
used, otherwise the most generic type in arrays will be used.
dtype : str or dtype, optional
Data-type specifier. Not used if `arrays` is non-empty.
ilp64 : {True, False, 'preferred'}, optional
Whether to return ILP64 routine variant.
Choosing 'preferred' returns ILP64 routine if available, and
otherwise the 32-bit routine. Default: False
Returns
-------
funcs : list
List containing the found function(s).
Notes
-----
This routine automatically chooses between Fortran/C
interfaces. Fortran code is used whenever possible for arrays with
column major order. In all other cases, C code is preferred.
In LAPACK, the naming convention is that all functions start with a
type prefix, which depends on the type of the principal
matrix. These can be one of {'s', 'd', 'c', 'z'} for the NumPy
types {float32, float64, complex64, complex128} respectively, and
are stored in attribute ``typecode`` of the returned functions.
Examples
--------
Suppose we would like to use '?lange' routine which computes the selected
norm of an array. We pass our array in order to get the correct 'lange'
flavor.
>>> import numpy as np
>>> import scipy.linalg as LA
>>> rng = np.random.default_rng()
>>> a = rng.random((3,2))
>>> x_lange = LA.get_lapack_funcs('lange', (a,))
>>> x_lange.typecode
'd'
>>> x_lange = LA.get_lapack_funcs('lange',(a*1j,))
>>> x_lange.typecode
'z'
Several LAPACK routines work best when its internal WORK array has
the optimal size (big enough for fast computation and small enough to
avoid waste of memory). This size is determined also by a dedicated query
to the function which is often wrapped as a standalone function and
commonly denoted as ``###_lwork``. Below is an example for ``?sysv``
>>> a = rng.random((1000, 1000))
>>> b = rng.random((1000, 1)) * 1j
>>> # We pick up zsysv and zsysv_lwork due to b array
... xsysv, xlwork = LA.get_lapack_funcs(('sysv', 'sysv_lwork'), (a, b))
>>> opt_lwork, _ = xlwork(a.shape[0]) # returns a complex for 'z' prefix
>>> udut, ipiv, x, info = xsysv(a, b, lwork=int(opt_lwork.real))
"""
if isinstance(ilp64, str):
if ilp64 == 'preferred':
ilp64 = HAS_ILP64
else:
raise ValueError("Invalid value for 'ilp64'")
if not ilp64:
return _get_funcs(names, arrays, dtype,
"LAPACK", _flapack, _clapack,
"flapack", "clapack", _lapack_alias,
ilp64=False)
else:
if not HAS_ILP64:
raise RuntimeError("LAPACK ILP64 routine requested, but Scipy "
"compiled only with 32-bit BLAS")
return _get_funcs(names, arrays, dtype,
"LAPACK", _flapack_64, None,
"flapack_64", None, _lapack_alias,
ilp64=True)
_int32_max = _np.iinfo(_np.int32).max
_int64_max = _np.iinfo(_np.int64).max
def _compute_lwork(routine, *args, **kwargs):
"""
Round floating-point lwork returned by lapack to integer.
Several LAPACK routines compute optimal values for LWORK, which
they return in a floating-point variable. However, for large
values of LWORK, single-precision floating point is not sufficient
to hold the exact value --- some LAPACK versions (<= 3.5.0 at
least) truncate the returned integer to single precision and in
some cases this can be smaller than the required value.
Examples
--------
>>> from scipy.linalg import lapack
>>> n = 5000
>>> s_r, s_lw = lapack.get_lapack_funcs(('sysvx', 'sysvx_lwork'))
>>> lwork = lapack._compute_lwork(s_lw, n)
>>> lwork
32000
"""
dtype = getattr(routine, 'dtype', None)
int_dtype = getattr(routine, 'int_dtype', None)
ret = routine(*args, **kwargs)
if ret[-1] != 0:
raise ValueError("Internal work array size computation failed: "
"%d" % (ret[-1],))
if len(ret) == 2:
return _check_work_float(ret[0].real, dtype, int_dtype)
else:
return tuple(_check_work_float(x.real, dtype, int_dtype)
for x in ret[:-1])
def _check_work_float(value, dtype, int_dtype):
"""
Convert LAPACK-returned work array size float to integer,
carefully for single-precision types.
"""
if dtype == _np.float32 or dtype == _np.complex64:
# Single-precision routine -- take next fp value to work
# around possible truncation in LAPACK code
value = _np.nextafter(value, _np.inf, dtype=_np.float32)
value = int(value)
if int_dtype.itemsize == 4:
if value < 0 or value > _int32_max:
raise ValueError("Too large work array required -- computation "
"cannot be performed with standard 32-bit"
" LAPACK.")
elif int_dtype.itemsize == 8:
if value < 0 or value > _int64_max:
raise ValueError("Too large work array required -- computation"
" cannot be performed with standard 64-bit"
" LAPACK.")
return value
| {
"content_hash": "f0e5a1c58ec9048a3019e25606708b97",
"timestamp": "",
"source": "github",
"line_count": 1036,
"max_line_length": 77,
"avg_line_length": 15.083011583011583,
"alnum_prop": 0.6147446563419942,
"repo_name": "anntzer/scipy",
"id": "3da8a03ce5a675bc40015f888eee44bbc6d55d9b",
"size": "15626",
"binary": false,
"copies": "7",
"ref": "refs/heads/main",
"path": "scipy/linalg/lapack.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4615958"
},
{
"name": "C++",
"bytes": "961697"
},
{
"name": "Cython",
"bytes": "1059655"
},
{
"name": "Dockerfile",
"bytes": "10630"
},
{
"name": "Fortran",
"bytes": "5212087"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Meson",
"bytes": "151326"
},
{
"name": "Python",
"bytes": "15648288"
},
{
"name": "R",
"bytes": "3059"
},
{
"name": "Shell",
"bytes": "17744"
},
{
"name": "Starlark",
"bytes": "1757"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import with_statement
from mock import Mock
from celery.concurrency.threads import NullDict, TaskPool, apply_target
from celery.tests.utils import Case, mask_modules, mock_module
class test_NullDict(Case):
def test_setitem(self):
x = NullDict()
x["foo"] = 1
with self.assertRaises(KeyError):
x["foo"]
class test_TaskPool(Case):
def test_without_threadpool(self):
with mask_modules("threadpool"):
with self.assertRaises(ImportError):
TaskPool()
def test_with_threadpool(self):
with mock_module("threadpool"):
x = TaskPool()
self.assertTrue(x.ThreadPool)
self.assertTrue(x.WorkRequest)
def test_on_start(self):
with mock_module("threadpool"):
x = TaskPool()
x.on_start()
self.assertTrue(x._pool)
self.assertIsInstance(x._pool.workRequests, NullDict)
def test_on_stop(self):
with mock_module("threadpool"):
x = TaskPool()
x.on_start()
x.on_stop()
x._pool.dismissWorkers.assert_called_with(x.limit, do_join=True)
def test_on_apply(self):
with mock_module("threadpool"):
x = TaskPool()
x.on_start()
callback = Mock()
accept_callback = Mock()
target = Mock()
req = x.on_apply(target, args=(1, 2), kwargs={"a": 10},
callback=callback, accept_callback=accept_callback)
x.WorkRequest.assert_called_with(apply_target, (
target, (1, 2), {"a": 10}, callback, accept_callback))
x._pool.putRequest.assert_called_with(req)
x._pool._results_queue.queue.clear.assert_called_with()
| {
"content_hash": "07c7de5d1cf856b4a0b5a5141bf60b85",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 76,
"avg_line_length": 30.55,
"alnum_prop": 0.5777414075286416,
"repo_name": "ask/celery",
"id": "2b12e18aff2a29ff252a768ca2952212db116ad9",
"size": "1833",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "celery/tests/concurrency/test_threads.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "1651"
},
{
"name": "Python",
"bytes": "1369873"
},
{
"name": "Racket",
"bytes": "732"
},
{
"name": "Shell",
"bytes": "38949"
}
],
"symlink_target": ""
} |
import datetime
import uuid
from django.utils import datetime_safe
from keystoneauth1.access import access
from keystoneauth1.access import service_catalog
from keystoneclient.v3 import domains
from keystoneclient.v3 import projects
from keystoneclient.v3 import roles
from keystoneclient.v3 import users
import requests
class TestDataContainer(object):
"""Arbitrary holder for test data in an object-oriented fashion."""
pass
class TestResponse(requests.Response):
"""Class used to wrap requests.Response.
It also provides some convenience to initialize with a dict.
"""
def __init__(self, data):
self._text = None
super(TestResponse, self).__init__()
if isinstance(data, dict):
self.status_code = data.get('status_code', 200)
self.headers = data.get('headers', None)
# Fake the text attribute to streamline Response creation
self._text = data.get('text', None)
else:
self.status_code = data
def __eq__(self, other):
return self.__dict__ == other.__dict__
@property
def text(self):
return self._text
def generate_test_data(service_providers=False, endpoint='localhost'):
'''Builds a set of test_data data as returned by Keystone V2.'''
test_data = TestDataContainer()
keystone_service = {
'type': 'identity',
'id': uuid.uuid4().hex,
'endpoints': [
{
'url': 'http://admin.%s:5000/v3' % endpoint,
'region': 'RegionOne',
'interface': 'admin',
'id': uuid.uuid4().hex,
},
{
'url': 'http://internal.%s:5000/v3' % endpoint,
'region': 'RegionOne',
'interface': 'internal',
'id': uuid.uuid4().hex
},
{
'url': 'http://public.%s:5000/v3' % endpoint,
'region': 'RegionOne',
'interface': 'public',
'id': uuid.uuid4().hex
}
]
}
# Domains
domain_dict = {'id': uuid.uuid4().hex,
'name': 'domain',
'description': '',
'enabled': True}
test_data.domain = domains.Domain(domains.DomainManager(None),
domain_dict, loaded=True)
# Users
user_dict = {'id': uuid.uuid4().hex,
'name': 'gabriel',
'email': '[email protected]',
'password': 'swordfish',
'domain_id': domain_dict['id'],
'token': '',
'enabled': True}
test_data.user = users.User(users.UserManager(None),
user_dict, loaded=True)
# Projects
project_dict_1 = {'id': uuid.uuid4().hex,
'name': 'tenant_one',
'description': '',
'domain_id': domain_dict['id'],
'enabled': True}
project_dict_2 = {'id': uuid.uuid4().hex,
'name': 'tenant_two',
'description': '',
'domain_id': domain_dict['id'],
'enabled': False}
test_data.project_one = projects.Project(projects.ProjectManager(None),
project_dict_1,
loaded=True)
test_data.project_two = projects.Project(projects.ProjectManager(None),
project_dict_2,
loaded=True)
# Roles
role_dict = {'id': uuid.uuid4().hex,
'name': 'Member'}
test_data.role = roles.Role(roles.RoleManager, role_dict)
nova_service = {
'type': 'compute',
'id': uuid.uuid4().hex,
'endpoints': [
{
'url': ('http://nova-admin.%s:8774/v2.0/%s'
% (endpoint, project_dict_1['id'])),
'region': 'RegionOne',
'interface': 'admin',
'id': uuid.uuid4().hex,
},
{
'url': ('http://nova-internal.%s:8774/v2.0/%s'
% (endpoint, project_dict_1['id'])),
'region': 'RegionOne',
'interface': 'internal',
'id': uuid.uuid4().hex
},
{
'url': ('http://nova-public.%s:8774/v2.0/%s'
% (endpoint, project_dict_1['id'])),
'region': 'RegionOne',
'interface': 'public',
'id': uuid.uuid4().hex
},
{
'url': ('http://nova2-admin.%s:8774/v2.0/%s'
% (endpoint, project_dict_1['id'])),
'region': 'RegionTwo',
'interface': 'admin',
'id': uuid.uuid4().hex,
},
{
'url': ('http://nova2-internal.%s:8774/v2.0/%s'
% (endpoint, project_dict_1['id'])),
'region': 'RegionTwo',
'interface': 'internal',
'id': uuid.uuid4().hex
},
{
'url': ('http://nova2-public.%s:8774/v2.0/%s'
% (endpoint, project_dict_1['id'])),
'region': 'RegionTwo',
'interface': 'public',
'id': uuid.uuid4().hex
}
]
}
# Tokens
tomorrow = datetime_safe.datetime.now() + datetime.timedelta(days=1)
expiration = datetime_safe.datetime.isoformat(tomorrow)
auth_token = uuid.uuid4().hex
auth_response_headers = {
'X-Subject-Token': auth_token
}
auth_response = TestResponse({
"headers": auth_response_headers
})
scoped_token_dict = {
'token': {
'methods': ['password'],
'expires_at': expiration,
'project': {
'id': project_dict_1['id'],
'name': project_dict_1['name'],
'domain': {
'id': domain_dict['id'],
'name': domain_dict['name']
}
},
'user': {
'id': user_dict['id'],
'name': user_dict['name'],
'domain': {
'id': domain_dict['id'],
'name': domain_dict['name']
}
},
'roles': [role_dict],
'catalog': [keystone_service, nova_service]
}
}
sp_list = None
if service_providers:
test_data.sp_auth_url = 'http://service_provider_endp:5000/v3'
test_data.service_provider_id = 'k2kserviceprovider'
# The access info for the identity provider
# should return a list of service providers
sp_list = [
{'auth_url': test_data.sp_auth_url,
'id': test_data.service_provider_id,
'sp_url': 'https://k2kserviceprovider/sp_url'}
]
scoped_token_dict['token']['service_providers'] = sp_list
test_data.scoped_access_info = access.create(
resp=auth_response,
body=scoped_token_dict
)
domain_token_dict = {
'token': {
'methods': ['password'],
'expires_at': expiration,
'domain': {
'id': domain_dict['id'],
'name': domain_dict['name'],
},
'user': {
'id': user_dict['id'],
'name': user_dict['name'],
'domain': {
'id': domain_dict['id'],
'name': domain_dict['name']
}
},
'roles': [role_dict],
'catalog': [keystone_service, nova_service]
}
}
test_data.domain_scoped_access_info = access.create(
resp=auth_response,
body=domain_token_dict
)
unscoped_token_dict = {
'token': {
'methods': ['password'],
'expires_at': expiration,
'user': {
'id': user_dict['id'],
'name': user_dict['name'],
'domain': {
'id': domain_dict['id'],
'name': domain_dict['name']
}
},
'catalog': [keystone_service]
}
}
if service_providers:
unscoped_token_dict['token']['service_providers'] = sp_list
test_data.unscoped_access_info = access.create(
resp=auth_response,
body=unscoped_token_dict
)
# Service Catalog
test_data.service_catalog = service_catalog.ServiceCatalogV3(
[keystone_service, nova_service])
# federated user
federated_scoped_token_dict = {
'token': {
'methods': ['password'],
'expires_at': expiration,
'project': {
'id': project_dict_1['id'],
'name': project_dict_1['name'],
'domain': {
'id': domain_dict['id'],
'name': domain_dict['name']
}
},
'user': {
'id': user_dict['id'],
'name': user_dict['name'],
'domain': {
'id': domain_dict['id'],
'name': domain_dict['name']
},
'OS-FEDERATION': {
'identity_provider': 'ACME',
'protocol': 'OIDC',
'groups': [
{'id': uuid.uuid4().hex},
{'id': uuid.uuid4().hex}
]
}
},
'roles': [role_dict],
'catalog': [keystone_service, nova_service]
}
}
test_data.federated_scoped_access_info = access.create(
resp=auth_response,
body=federated_scoped_token_dict
)
federated_unscoped_token_dict = {
'token': {
'methods': ['password'],
'expires_at': expiration,
'user': {
'id': user_dict['id'],
'name': user_dict['name'],
'domain': {
'id': domain_dict['id'],
'name': domain_dict['name']
},
'OS-FEDERATION': {
'identity_provider': 'ACME',
'protocol': 'OIDC',
'groups': [
{'id': uuid.uuid4().hex},
{'id': uuid.uuid4().hex}
]
}
},
'catalog': [keystone_service]
}
}
test_data.federated_unscoped_access_info = access.create(
resp=auth_response,
body=federated_unscoped_token_dict
)
return test_data
| {
"content_hash": "4f93ad76315aacd04653c3def70e31d7",
"timestamp": "",
"source": "github",
"line_count": 341,
"max_line_length": 75,
"avg_line_length": 32.03225806451613,
"alnum_prop": 0.43916506454270804,
"repo_name": "NeCTAR-RC/horizon",
"id": "81429efbf84f486b9a4363a94600fb0d8519ba7c",
"size": "11469",
"binary": false,
"copies": "2",
"ref": "refs/heads/nectar/train",
"path": "openstack_auth/tests/data_v3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "598098"
},
{
"name": "JavaScript",
"bytes": "2474550"
},
{
"name": "Python",
"bytes": "5323984"
},
{
"name": "SCSS",
"bytes": "132603"
},
{
"name": "Shell",
"bytes": "7466"
}
],
"symlink_target": ""
} |
'''This module exposes the post bug that Eric Moritz is experiences
where smisk segfaults
:See: Fixed in 77188bce80d5 <http://hg.hunch.se/smisk/diff/77188bce80d5/src/Stream.c>
'''
from smisk import wsgi
import smisk.core
from StringIO import StringIO
def safe_copyfileobj(fsrc, fdst, length=16*1024, size=0):
'''
A version of shutil.copyfileobj that will not read more than 'size' bytes.
This makes it safe from clients sending more than CONTENT_LENGTH bytes of
data in the body.
'''
if not size:
return
while size > 0:
buf = fsrc.read(min(length, size))
if not buf:
break
fdst.write(buf)
size -= len(buf)
# I think this is the offender, taken from Django's WSGIRequest object in
# django.core.handlers.wsgi
def _get_raw_post_data(environ):
buf = StringIO()
try:
# CONTENT_LENGTH might be absent if POST doesn't have content at all (lighttpd)
content_length = int(environ.get('CONTENT_LENGTH', 0))
except ValueError: # if CONTENT_LENGTH was empty string or not an integer
content_length = 0
if content_length > 0:
safe_copyfileobj(environ['wsgi.input'], buf,
size=content_length)
_raw_post_data = buf.getvalue()
buf.close()
return _raw_post_data
def WSGIPostTest(environ, start_request):
if environ['REQUEST_METHOD'] == 'GET':
fh = file("./html/test_POST.html")
lines = fh.readlines()
fh.close()
start_request("200 OK", [])
return lines
elif environ['REQUEST_METHOD'] == 'POST':
raw_post_data = _get_raw_post_data(environ)
start_request("200 OK", [])
return [raw_post_data]
smisk.core.bind("127.0.0.1:3030")
wsgi.Application(WSGIPostTest).run()
| {
"content_hash": "84b87377e7393c5dacbe3e20e93c21d4",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 87,
"avg_line_length": 30.896551724137932,
"alnum_prop": 0.6389508928571429,
"repo_name": "rsms/smisk",
"id": "1ba89b2f9f519b2d9532789da1dc007acd51829e",
"size": "1792",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/wsgi/test_POST.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "216703"
},
{
"name": "Python",
"bytes": "435347"
},
{
"name": "Shell",
"bytes": "10500"
}
],
"symlink_target": ""
} |
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "Sharecoin-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
| {
"content_hash": "000877d9f3a73edc98449b658efead6b",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 69,
"avg_line_length": 27.291666666666668,
"alnum_prop": 0.7099236641221374,
"repo_name": "FrancoisJ/ShareCoin",
"id": "92d89ade32b192def287488025236695489a06a8",
"size": "899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "share/qt/clean_mac_info_plist.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "92422"
},
{
"name": "C++",
"bytes": "2554534"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Objective-C++",
"bytes": "5864"
},
{
"name": "Python",
"bytes": "69719"
},
{
"name": "Shell",
"bytes": "13173"
},
{
"name": "TypeScript",
"bytes": "5240524"
}
],
"symlink_target": ""
} |
import ConfigParser
import var
import irc
# Reading from ini files.
# Return dictionary of option:data.
def fill_dict (filename, section, raw_path = False):
if filename.startswith("ini/") or raw_path:
filepath = filename
else:
filepath = "ini/{}/{}".format(irc.server, filename)
config = ConfigParser.RawConfigParser()
config.optionxform = str
config.read(filepath)
rd_dict = {}
if config.has_section(section):
for option in config.options(section):
rd_dict[option.replace("~", "[")] = config.get(section, option).split("\n")
return rd_dict
# Return list of lines in a file without "\n" at the end.
def fill_list (filename, raw_path = False):
if filename.startswith("ini/") or raw_path:
filepath = filename
else:
filepath = "ini/{}/{}".format(irc.server, filename)
with open(filepath, "a+") as list_file:
rd_list = [line.strip() for line in list_file]
return rd_list
# Making changes to ini files.
# Set an option inside a section on a config(ini) file.
def add_to_ini (section, option, data, filename, raw_path = False):
if filename.startswith("ini/") or raw_path:
filepath = filename
else:
filepath = "ini/{}/{}".format(irc.server, filename)
option = option.replace("[", "~")
config = ConfigParser.RawConfigParser()
config.optionxform = str
config.read(filepath)
# Check if the section is present and if not, create it.
if not config.has_section(section):
config.add_section(section)
if data:
config.set(section, option, data)
else:
remove_from_ini(section, option, filepath, raw_path = raw_path)
return
with open(filepath, "wb") as ini_file:
config.write(ini_file)
# Remove option from a config(ini) file.
def remove_from_ini (section, option, filename, raw_path = False):
if filename.startswith("ini/") or raw_path:
filepath = filename
else:
filepath = "ini/{}/{}".format(irc.server, filename)
option = option.replace("[", "~")
config = ConfigParser.RawConfigParser()
config.optionxform = str
config.read(filepath)
try:
config.remove_option(section, option)
except:
print "Not in .ini file: [{}] {}".format(section, option)
with open(filepath, "wb") as ini_file:
config.write(ini_file)
# Add line to a list file.
def add_to_list (line, filename, raw_path = False):
if raw_path or filename.startswith("ini/"):
filepath = filename
else:
filepath = "ini/{}/{}".format(irc.server, filename)
# Write line to file if it isn't already in it.
with open(filepath, "a+") as list_file:
if line + "\n" not in list_file.readlines():
list_file.write(line + "\n")
# Remove line from a list file.
def remove_from_list (line, filename, raw_path = False):
if raw_path or filename.startswith("ini/"):
filepath = filename
else:
filepath = "ini/{}/{}".format(irc.server, filename)
with open(filepath, "r+") as list_file:
# List every line in the file and return to the beginning.
lines = list_file.readlines()
list_file.seek(0)
# Write everything on the file except line.
for curr_line in lines:
if curr_line != (line + "\n"):
list_file.write(curr_line)
list_file.truncate()
| {
"content_hash": "08c273242e0e0da5aa5484c1f389af5e",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 87,
"avg_line_length": 28.84552845528455,
"alnum_prop": 0.5997745208568207,
"repo_name": "mikotohe2/deskbot",
"id": "cbdb6792a97207041d4de29278a74f2ce5d7c78d",
"size": "3548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/ini.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "109325"
}
],
"symlink_target": ""
} |
import unittest
import re
import os
from urllib.request import urlopen
import ssl
from themis.modules.general import regex_base_url, get_looker_instance, format_output
class GeneralTestCase(unittest.TestCase):
base_url = os.environ.get('LOOKERSDK_BASE_URL')
def test_legacy_instance_url(self):
self.assertEqual(regex_base_url('https://company.looker.com:19999'), 'https://company.looker.com', msg="Issue formatting url")
def test_legacy_region_url(self):
self.assertEqual(regex_base_url('https://company.eu.looker.com:19999'), 'https://company.eu.looker.com', msg="Issue formatting url")
def test_legacy_api_url(self):
self.assertEqual(regex_base_url('https://company.api.looker.com'), 'https://company.api.looker.com', msg="Issue formatting url")
def test_legacy_op_url(self):
self.assertEqual(regex_base_url('https://looker.company.com:19999'), 'https://looker.company.com', msg="Issue formatting url")
def test_legacy_other_port_url(self):
self.assertEqual(regex_base_url('https://looker.company.com:443'), 'https://looker.company.com', msg="Issue formatting url")
def test_k8s_instance_url(self):
self.assertEqual(regex_base_url('https://company.cloud.looker.com'), 'https://company.cloud.looker.com', msg="Issue formatting url")
def test_k8s_region_url(self):
self.assertEqual(regex_base_url('https://company.cloud.looker.com'), 'https://company.cloud.looker.com', msg="Issue formatting url")
def test_looker_version(self):
'''Pulls instance version to confirm >= 7.10'''
url = regex_base_url(str(self.__class__.base_url))
request_data = urlopen(url + "/version",context=ssl._create_unverified_context()).read()
instance_version = re.search(r'\d{1,2}\.{1}\d{1,2}', str(request_data))
self.assertTrue(float(instance_version[0]) >= 7.10, msg="Issue with instance version")
def test_format_output(self):
'''Validates formatting of results is 20 items and a trailing `...`'''
input_array = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w']
self.assertTrue(len(format_output(input_array)) <= 21, msg="Issue formatting number of results of Looker functions")
self.assertTrue(format_output(input_array)[-1] == "...", msg="Issue formatting trailing result of Looker functions")
def test_format_no_output(self):
'''Validates formatting of empty results'''
input_array = []
self.assertTrue(len(format_output(input_array)) == 1, msg="Issue formatting empty results of Looker functions")
self.assertTrue(format_output(input_array)[0] == 'No issues found.', msg="Issue formatting empty results of Looker functions") | {
"content_hash": "3056afa4a4380d297dcfa8b94aa5f5ee",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 140,
"avg_line_length": 53.28846153846154,
"alnum_prop": 0.6726813424756406,
"repo_name": "looker-open-source/themis",
"id": "58ccc113a7287588334b1a2aa8e7ec66191e3d0a",
"size": "2771",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_general.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4611"
},
{
"name": "Python",
"bytes": "47099"
}
],
"symlink_target": ""
} |
"""
Google Web App gateway tests.
@since: 0.3.1
"""
import unittest
from StringIO import StringIO
from google.appengine.ext import webapp
import pyamf
from pyamf import remoting
from pyamf.remoting.gateway import google as _google
class WebAppGatewayTestCase(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.gw = _google.WebAppGateway()
self.environ = {
'wsgi.input': StringIO(),
'wsgi.output': StringIO()
}
self.request = webapp.Request(self.environ)
self.response = webapp.Response()
self.gw.initialize(self.request, self.response)
def test_get(self):
self.gw.get()
self.assertEquals(self.response.__dict__['_Response__status'][0], 405)
def test_bad_request(self):
self.environ['wsgi.input'].write('Bad request')
self.environ['wsgi.input'].seek(0, 0)
self.gw.post()
self.assertEquals(self.response.__dict__['_Response__status'][0], 400)
def test_unknown_request(self):
self.environ['wsgi.input'].write(
'\x00\x00\x00\x00\x00\x01\x00\x09test.test\x00\x02/1\x00\x00\x00'
'\x14\x0a\x00\x00\x00\x01\x08\x00\x00\x00\x00\x00\x01\x61\x02\x00'
'\x01\x61\x00\x00\x09')
self.environ['wsgi.input'].seek(0, 0)
self.gw.post()
self.assertEquals(self.response.__dict__['_Response__status'][0], 200)
envelope = remoting.decode(self.response.out.getvalue())
message = envelope['/1']
self.assertEquals(message.status, remoting.STATUS_ERROR)
body = message.body
self.assertTrue(isinstance(body, remoting.ErrorFault))
self.assertEquals(body.code, 'Service.ResourceNotFound')
def test_expose_request(self):
self.executed = False
def test(request):
self.assertEquals(self.request, request)
self.assertTrue(hasattr(self.request, 'amf_request'))
self.executed = True
self.gw.expose_request = True
self.gw.addService(test, 'test.test')
self.environ['wsgi.input'].write('\x00\x00\x00\x00\x00\x01\x00\x09'
'test.test\x00\x02/1\x00\x00\x00\x05\x0a\x00\x00\x00\x00')
self.environ['wsgi.input'].seek(0, 0)
self.gw.post()
self.assertTrue(self.executed)
def test_timezone(self):
import datetime
self.executed = False
td = datetime.timedelta(hours=-5)
now = datetime.datetime.utcnow()
def echo(d):
self.assertEquals(d, now + td)
self.executed = True
return d
self.gw.addService(echo)
self.gw.timezone_offset = -18000
msg = remoting.Envelope(amfVersion=pyamf.AMF0)
msg['/1'] = remoting.Request(target='echo', body=[now])
stream = remoting.encode(msg)
self.environ['wsgi.input'] = stream
self.gw.post()
envelope = remoting.decode(self.response.out.getvalue())
message = envelope['/1']
self.assertEquals(message.body, now)
self.assertTrue(self.executed)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(WebAppGatewayTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| {
"content_hash": "4f3f56e9521e729f6aee5c5a20749b1a",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 78,
"avg_line_length": 26.56,
"alnum_prop": 0.6147590361445783,
"repo_name": "cardmagic/PyAMF",
"id": "e7f9cff448412baa1c1f85df0f6a1394a6a9209f",
"size": "3423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyamf/tests/gateway/test_google.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "87097"
},
{
"name": "C",
"bytes": "635399"
},
{
"name": "Java",
"bytes": "374"
},
{
"name": "Python",
"bytes": "955083"
}
],
"symlink_target": ""
} |
"""Tests for haiku._src.nets.vqvae."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
from haiku._src import stateful
from haiku._src import test_utils
from haiku._src import transform
from haiku._src.nets import vqvae
import jax
import jax.numpy as jnp
import numpy as np
class VqvaeTest(parameterized.TestCase):
@parameterized.parameters((vqvae.VectorQuantizer, {
'embedding_dim': 4,
'num_embeddings': 8,
'commitment_cost': 0.25
}), (vqvae.VectorQuantizerEMA, {
'embedding_dim': 6,
'num_embeddings': 13,
'commitment_cost': 0.5,
'decay': 0.1
}))
@test_utils.transform_and_run
def testConstruct(self, constructor, kwargs):
vqvae_module = constructor(**kwargs)
# Batch of input vectors to quantize
inputs_np = np.random.randn(100, kwargs['embedding_dim']).astype(np.float32)
inputs = jnp.array(inputs_np)
# Set is_training to False, otherwise for the EMA case just evaluating the
# forward pass will change the embeddings, meaning that some of our computed
# closest embeddings will be incorrect.
vq_output = vqvae_module(inputs, is_training=False)
# Output shape is correct
self.assertEqual(vq_output['quantize'].shape, inputs.shape)
vq_output_np = jax.tree_util.tree_map(lambda t: t, vq_output)
embeddings_np = vqvae_module.embeddings
self.assertEqual(embeddings_np.shape,
(kwargs['embedding_dim'], kwargs['num_embeddings']))
# Check that each input was assigned to the embedding it is closest to.
distances = (jnp.square(inputs_np).sum(axis=1, keepdims=True) -
2 * np.dot(inputs_np, embeddings_np) +
jnp.square(embeddings_np).sum(axis=0, keepdims=True))
closest_index = np.argmax(-distances, axis=1)
# On TPU, distances can be different by ~1% due to precision. This can cause
# the distanc to the closest embedding to flip, leading to a difference
# in the encoding indices tensor. First we check that the continuous
# distances are reasonably close, and then we only allow N differences in
# the encodings. For batch of 100, N == 3 seems okay (passed 1000x tests).
np.testing.assert_allclose(distances, vq_output_np['distances'], atol=5e-2)
num_differences_in_encodings = (closest_index !=
vq_output_np['encoding_indices']).sum()
num_differences_allowed = 3
self.assertLessEqual(num_differences_in_encodings, num_differences_allowed)
@parameterized.parameters((vqvae.VectorQuantizer, {
'embedding_dim': 4,
'num_embeddings': 8,
'commitment_cost': 0.25
}), (vqvae.VectorQuantizerEMA, {
'embedding_dim': 6,
'num_embeddings': 13,
'commitment_cost': 0.5,
'decay': 0.1
}))
@test_utils.transform_and_run
def testShapeChecking(self, constructor, kwargs):
vqvae_module = constructor(**kwargs)
wrong_shape_input = np.random.randn(100, kwargs['embedding_dim'] * 2)
with self.assertRaisesRegex(TypeError, 'total size must be unchanged'):
vqvae_module(
jnp.array(wrong_shape_input.astype(np.float32)), is_training=False)
@parameterized.parameters((vqvae.VectorQuantizer, {
'embedding_dim': 4,
'num_embeddings': 8,
'commitment_cost': 0.25
}), (vqvae.VectorQuantizerEMA, {
'embedding_dim': 6,
'num_embeddings': 13,
'commitment_cost': 0.5,
'decay': 0.1
}))
@test_utils.transform_and_run
def testNoneBatch(self, constructor, kwargs):
"""Check that vqvae can be built on input with a None batch dimension."""
vqvae_module = constructor(**kwargs)
inputs = jnp.zeros([0, 5, 5, kwargs['embedding_dim']])
vqvae_module(inputs, is_training=False)
@parameterized.parameters({'use_jit': True, 'dtype': jnp.float32},
{'use_jit': True, 'dtype': jnp.float64},
{'use_jit': False, 'dtype': jnp.float32},
{'use_jit': False, 'dtype': jnp.float64})
@test_utils.transform_and_run
def testEmaUpdating(self, use_jit, dtype):
if jax.local_devices()[0].platform == 'tpu' and dtype == jnp.float64:
self.skipTest('F64 not supported by TPU')
embedding_dim = 6
np_dtype = np.float64 if dtype is jnp.float64 else np.float32
decay = np.array(0.1, dtype=np_dtype)
vqvae_module = vqvae.VectorQuantizerEMA(
embedding_dim=embedding_dim,
num_embeddings=7,
commitment_cost=0.5,
decay=decay,
dtype=dtype)
if use_jit:
vqvae_f = stateful.jit(vqvae_module, static_argnums=1)
else:
vqvae_f = vqvae_module
batch_size = 16
prev_embeddings = vqvae_module.embeddings
# Embeddings should change with every forwards pass if is_training == True.
for _ in range(10):
inputs = np.random.rand(batch_size, embedding_dim).astype(dtype)
vqvae_f(inputs, True)
current_embeddings = vqvae_module.embeddings
self.assertFalse((prev_embeddings == current_embeddings).all())
prev_embeddings = current_embeddings
# Forward passes with is_training == False don't change anything
for _ in range(10):
inputs = np.random.rand(batch_size, embedding_dim).astype(dtype)
vqvae_f(inputs, False)
current_embeddings = vqvae_module.embeddings
self.assertTrue((current_embeddings == prev_embeddings).all())
def testEmaCrossReplica(self):
embedding_dim = 6
batch_size = 16
inputs = np.random.rand(jax.local_device_count(), batch_size, embedding_dim)
embeddings = {}
perplexities = {}
for axis_name in [None, 'i']:
def my_function(x, axis_name):
decay = np.array(0.9, dtype=np.float32)
vqvae_module = vqvae.VectorQuantizerEMA(
embedding_dim=embedding_dim,
num_embeddings=7,
commitment_cost=0.5,
decay=decay,
cross_replica_axis=axis_name,
dtype=jnp.float32)
outputs = vqvae_module(x, is_training=True)
return vqvae_module.embeddings, outputs['perplexity']
vqvae_f = transform.transform_with_state(
functools.partial(my_function, axis_name=axis_name))
rng = jax.random.PRNGKey(42)
rng = jnp.broadcast_to(rng, (jax.local_device_count(), rng.shape[0]))
params, state = jax.pmap(
vqvae_f.init, axis_name='i')(rng, inputs)
update_fn = jax.pmap(vqvae_f.apply, axis_name='i')
for _ in range(10):
outputs, state = update_fn(params, state, None, inputs)
embeddings[axis_name], perplexities[axis_name] = outputs
# In the single-device case, specifying a cross_replica_axis should have
# no effect. Otherwise, it should!
if jax.device_count() == 1:
# Have to use assert_allclose here rather than checking exact matches to
# make the test pass on GPU, presumably because of nondeterministic
# reductions.
np.testing.assert_allclose(
embeddings[None], embeddings['i'], rtol=1e-6, atol=1e-6)
np.testing.assert_allclose(
perplexities[None], perplexities['i'], rtol=1e-6, atol=1e-6)
else:
self.assertFalse((embeddings[None] == embeddings['i']).all())
self.assertFalse((perplexities[None] == perplexities['i']).all())
if __name__ == '__main__':
absltest.main()
| {
"content_hash": "cfb306702292b6eae84b6f6a68557713",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 80,
"avg_line_length": 37.738461538461536,
"alnum_prop": 0.6506318793314309,
"repo_name": "deepmind/dm-haiku",
"id": "d8279a581a195929eca68c9c4e7a5b0f52a05fa8",
"size": "8055",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "haiku/_src/nets/vqvae_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1024855"
},
{
"name": "Shell",
"bytes": "1907"
},
{
"name": "Starlark",
"bytes": "31643"
}
],
"symlink_target": ""
} |
import logging
from time import monotonic as monotonic_clock
class _LogRateLimit(logging.Filter):
def __init__(self, burst, interval, except_level=None):
logging.Filter.__init__(self)
self.burst = burst
self.interval = interval
self.except_level = except_level
self.logger = logging.getLogger()
self._reset()
def _reset(self, now=None):
if now is None:
now = monotonic_clock()
self.counter = 0
self.end_time = now + self.interval
self.emit_warn = False
def filter(self, record):
if (self.except_level is not None
and record.levelno >= self.except_level):
# don't limit levels >= except_level
return True
timestamp = monotonic_clock()
if timestamp >= self.end_time:
self._reset(timestamp)
self.counter += 1
return True
self.counter += 1
if self.counter <= self.burst:
return True
if self.emit_warn:
# Allow to log our own warning: self.logger is also filtered by
# rate limiting
return True
if self.counter == self.burst + 1:
self.emit_warn = True
self.logger.error("Logging rate limit: "
"drop after %s records/%s sec",
self.burst, self.interval)
self.emit_warn = False
# Drop the log
return False
def _iter_loggers():
"""Iterate on existing loggers."""
# Sadly, Logger.manager and Manager.loggerDict are not documented,
# but there is no logging public function to iterate on all loggers.
# The root logger is not part of loggerDict.
yield logging.getLogger()
manager = logging.Logger.manager
for logger in manager.loggerDict.values():
if isinstance(logger, logging.PlaceHolder):
continue
yield logger
_LOG_LEVELS = {
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'DEBUG': logging.DEBUG,
}
def install_filter(burst, interval, except_level='CRITICAL'):
"""Install a rate limit filter on existing and future loggers.
Limit logs to *burst* messages every *interval* seconds, except of levels
>= *except_level*. *except_level* is a log level name like 'CRITICAL'. If
*except_level* is an empty string, all levels are filtered.
The filter uses a monotonic clock, the timestamp of log records is not
used.
Raise an exception if a rate limit filter is already installed.
"""
if install_filter.log_filter is not None:
raise RuntimeError("rate limit filter already installed")
try:
except_levelno = _LOG_LEVELS[except_level]
except KeyError:
raise ValueError("invalid log level name: %r" % except_level)
log_filter = _LogRateLimit(burst, interval, except_levelno)
install_filter.log_filter = log_filter
install_filter.logger_class = logging.getLoggerClass()
class RateLimitLogger(install_filter.logger_class):
def __init__(self, *args, **kw):
logging.Logger.__init__(self, *args, **kw)
self.addFilter(log_filter)
# Setup our own logger class to automatically add the filter
# to new loggers.
logging.setLoggerClass(RateLimitLogger)
# Add the filter to all existing loggers
for logger in _iter_loggers():
logger.addFilter(log_filter)
install_filter.log_filter = None
install_filter.logger_class = None
def uninstall_filter():
"""Uninstall the rate filter installed by install_filter().
Do nothing if the filter was already uninstalled.
"""
if install_filter.log_filter is None:
# not installed (or already uninstalled)
return
# Restore the old logger class
logging.setLoggerClass(install_filter.logger_class)
# Remove the filter from all existing loggers
for logger in _iter_loggers():
logger.removeFilter(install_filter.log_filter)
install_filter.logger_class = None
install_filter.log_filter = None
| {
"content_hash": "9005c64f3621d80fdf598dad1f43c436",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 77,
"avg_line_length": 29.93525179856115,
"alnum_prop": 0.6327805815909637,
"repo_name": "openstack/oslo.log",
"id": "05ad581f28a141dfd910847be2055b3c7f50c756",
"size": "4762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oslo_log/rate_limit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "218052"
}
],
"symlink_target": ""
} |
"""Utils for the booking app."""
from django.contrib.sessions.models import Session
from .models import Booking
def get_booking(request):
"""
Returns the booking that is in progress for the current user or None
We assume that a user can only have one booking that is in-progress.
TODO: This implementation assumes that there is a status called
'inprogress' and that there should only be one such booking for a given
user. We need to see if this can be more generic for future projects.
:param request: The Request object.
"""
booking = None
if request.user.is_authenticated():
try:
booking = Booking.objects.get(
user=request.user,
booking_status__slug='inprogress')
except Booking.DoesNotExist:
# The user does not have any open bookings
pass
else:
session = Session.objects.get(
session_key=request.session.session_key)
try:
booking = Booking.objects.get(session=session)
except Booking.DoesNotExist:
# The user does not have any bookings in his session
pass
return booking
def persist_booking(booking, user):
"""
Ties an in-progress booking from a session to a user when the user logs in.
If we don't do this, the booking will be lost, because on a login, the
old session will be deleted and a new one will be created. Since the
booking has a FK to the session, it would be deleted as well when the user
logs in.
We assume that a user can only have one booking that is in-progress.
Therefore we will delete any existing in-progress bookings of this user
before tying the one from the session to the user.
TODO: Find a more generic solution for this, as this assumes that there is
a status called inprogress and that a user can only have one such booking.
:param booking: The booking that should be tied to the user.
:user: The user the booking should be tied to.
"""
if booking is not None:
existing_bookings = Booking.objects.filter(
user=user, booking_status__slug='inprogress').exclude(
pk=booking.pk)
existing_bookings.delete()
booking.session = None
booking.user = user
booking.save()
| {
"content_hash": "0a5a4cc6270b8923df1904efd344e7c2",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 79,
"avg_line_length": 34.35294117647059,
"alnum_prop": 0.6639554794520548,
"repo_name": "zcqHub/django-booking",
"id": "c2d754b14137c0ecf7cf0383c5cfbb06babaaa0d",
"size": "2336",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "booking/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "951"
},
{
"name": "Python",
"bytes": "140146"
}
],
"symlink_target": ""
} |
import logging
import json
import multiprocessing
import os
import signal
from threading import Lock
import time
import tornado.httpserver
import tornado.netutil
import tornado.web
from zmq.eventloop import ioloop
from threading import Thread
from twisted.internet import reactor
from db_store import Obdb
from market import Market
from transport import CryptoTransportLayer
import upnp
from util import open_default_webbrowser, is_mac
from ws import WebSocketHandler
if is_mac():
from util import osx_check_dyld_library_path
osx_check_dyld_library_path()
ioloop.install()
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.redirect("/html/index.html")
class OpenBazaarStaticHandler(tornado.web.StaticFileHandler):
def set_extra_headers(self, path):
self.set_header("X-Frame-Options", "DENY")
self.set_header("X-Content-Type-Options", "nosniff")
class OpenBazaarContext(object):
"""
This Object holds all of the runtime parameters
necessary to start an OpenBazaar instance.
This object is convenient to pass on method interfaces,
and reduces issues of API inconsistencies (as in the order
in which parameters are passed, which can cause bugs)
"""
def __init__(self,
nat_status,
server_ip,
server_port,
http_ip,
http_port,
db_path,
log_path,
log_level,
market_id,
bm_user,
bm_pass,
bm_port,
seeds,
seed_mode,
dev_mode,
dev_nodes,
disable_upnp,
disable_stun_check,
disable_open_browser,
disable_sqlite_crypt,
enable_ip_checker):
self.nat_status = nat_status
self.server_ip = server_ip
self.server_port = server_port
self.http_ip = http_ip
self.http_port = http_port
self.db_path = db_path
self.log_path = log_path
self.log_level = log_level
self.market_id = market_id
self.bm_user = bm_user
self.bm_pass = bm_pass
self.bm_port = bm_port
self.seeds = seeds
self.seed_mode = seed_mode
self.dev_mode = dev_mode
self.dev_nodes = dev_nodes
self.disable_upnp = disable_upnp
self.disable_stun_check = disable_stun_check
self.disable_open_browser = disable_open_browser
self.disable_sqlite_crypt = disable_sqlite_crypt
self.enable_ip_checker = enable_ip_checker
# to deduct up-time, and (TODO) average up-time
# time stamp in (non-local) Coordinated Universal Time format.
self.started_utc_timestamp = long(time.time())
def __repr__(self):
r = {"server_ip": self.server_ip,
"server_port": self.server_port,
"http_ip": self.http_ip,
"http_port": self.http_port,
"log_path": self.log_path,
"market_id": self.market_id,
"bm_user": self.bm_user,
"bm_pass": self.bm_pass,
"bm_port": self.bm_port,
"seeds": self.seeds,
"seed_mode": self.seed_mode,
"dev_mode": self.dev_mode,
"dev_nodes": self.dev_nodes,
"log_level": self.log_level,
"db_path": self.db_path,
"disable_upnp": self.disable_upnp,
"disable_open_browser": self.disable_open_browser,
"disable_sqlite_crypt": self.disable_sqlite_crypt,
"enable_ip_checker": self.enable_ip_checker,
"started_utc_timestamp": self.started_utc_timestamp,
"uptime_in_secs": (long(time.time()) -
long(self.started_utc_timestamp))}
return json.dumps(r).replace(", ", ",\n ")
@staticmethod
def get_defaults():
return {'market_id': 1,
'server_ip': '127.0.0.1',
'server_port': 12345,
'log_dir': 'logs',
'log_file': 'production.log',
'dev_log_file': 'development-{0}.log',
'db_dir': 'db',
'db_file': 'ob.db',
'dev_db_file': 'ob-dev-{0}.db',
'dev_mode': False,
'dev_nodes': 3,
'seed_mode': False,
'seeds': [
'seed.openbazaar.org',
'seed2.openbazaar.org',
'seed.openlabs.co',
'us.seed.bizarre.company',
'eu.seed.bizarre.company'
],
'disable_upnp': False,
'disable_stun_check': False,
'disable_open_browser': False,
'disable_sqlite_crypt': False,
'log_level': 30,
# CRITICAL=50, ERROR=40, WARNING=30, DEBUG=10, DATADUMP=5, NOTSET=0
'http_ip': '127.0.0.1',
'http_port': 0,
'bm_user': None,
'bm_pass': None,
'bm_port': -1,
'enable_ip_checker': False,
'config_file': None}
@staticmethod
def create_default_instance():
defaults = OpenBazaarContext.get_defaults()
return OpenBazaarContext(
None,
server_ip=defaults['server_ip'],
server_port=defaults['server_port'],
http_ip=defaults['http_ip'],
http_port=defaults['http_port'],
db_path=os.path.join(defaults['db_dir'], defaults['db_file']),
log_path=os.path.join(defaults['log_dir'], defaults['log_file']),
log_level=defaults['log_level'],
market_id=defaults['market_id'],
bm_user=defaults['bm_user'],
bm_pass=defaults['bm_pass'],
bm_port=defaults['bm_port'],
seeds=defaults['seeds'],
seed_mode=defaults['seed_mode'],
dev_mode=defaults['dev_mode'],
dev_nodes=defaults['dev_nodes'],
disable_upnp=defaults['disable_upnp'],
disable_stun_check=defaults['disable_stun_check'],
disable_open_browser=defaults['disable_open_browser'],
disable_sqlite_crypt=defaults['disable_sqlite_crypt'],
enable_ip_checker=defaults['enable_ip_checker']
)
class MarketApplication(tornado.web.Application):
def __init__(self, ob_ctx):
self.shutdown_mutex = Lock()
self.ob_ctx = ob_ctx
db = Obdb(ob_ctx.db_path, ob_ctx.disable_sqlite_crypt)
self.transport = CryptoTransportLayer(ob_ctx, db)
self.market = Market(self.transport, db)
self.upnp_mapper = None
Thread(target=reactor.run, args=(False,)).start()
peers = ob_ctx.seeds if not ob_ctx.seed_mode else []
self.transport.join_network(peers)
handlers = [
(r"/", MainHandler),
(r"/main", MainHandler),
(r"/html/(.*)", OpenBazaarStaticHandler, {'path': './html'}),
(r"/ws", WebSocketHandler,
dict(transport=self.transport, market_application=self, db=db))
]
# TODO: Move debug settings to configuration location
settings = dict(debug=True)
super(MarketApplication, self).__init__(handlers, **settings)
def start_app(self):
# If self.ob_ctx.http_port is 0, the kernel is queried for a port.
sockets = tornado.netutil.bind_sockets(
self.ob_ctx.http_port,
address=self.ob_ctx.http_ip
)
server = tornado.httpserver.HTTPServer(self)
server.add_sockets(sockets)
self.ob_ctx.http_port = sockets[0].getsockname()[1]
if not self.ob_ctx.disable_upnp:
self.setup_upnp_port_mappings(self.ob_ctx.server_port)
else:
print "MarketApplication.start_app(): Disabling upnp setup"
def get_transport(self):
return self.transport
def setup_upnp_port_mappings(self, p2p_port):
result = False
if not self.ob_ctx.disable_upnp:
upnp.PortMapper.DEBUG = False
print "Setting up UPnP Port Map Entry..."
self.upnp_mapper = upnp.PortMapper()
self.upnp_mapper.clean_my_mappings(p2p_port)
result_tcp_p2p_mapping = self.upnp_mapper.add_port_mapping(
p2p_port, p2p_port
)
print "UPnP TCP P2P Port Map configuration done ",
print "(%s -> %s) => %s" % (
p2p_port, p2p_port, result_tcp_p2p_mapping
)
result_udp_p2p_mapping = self.upnp_mapper.add_port_mapping(
p2p_port, p2p_port, 'UDP'
)
print "UPnP UDP P2P Port Map configuration done ",
print "(%s -> %s) => %s" % (
p2p_port, p2p_port, result_udp_p2p_mapping
)
result = result_tcp_p2p_mapping and result_udp_p2p_mapping
if not result:
print "Warning: UPnP was not setup correctly. ",
print "Ports could not be automatically mapped."
return result
def cleanup_upnp_port_mapping(self):
if not self.ob_ctx.disable_upnp:
try:
if self.upnp_mapper is not None:
print "Cleaning UPnP Port Mapping -> ", \
self.upnp_mapper.clean_my_mappings(self.transport.port)
except AttributeError:
print (
"[openbazaar] "
"MarketApplication.clean_upnp_port_mapping() failed!"
)
def shutdown(self, x=None, y=None):
self.shutdown_mutex.acquire()
print "MarketApplication.shutdown!"
log = logging.getLogger(
'[%s] %s' % (self.market.market_id, 'root')
)
log.info("Received TERMINATE, exiting...")
self.cleanup_upnp_port_mapping()
tornado.ioloop.IOLoop.instance().stop()
self.transport.shutdown()
self.shutdown_mutex.release()
os._exit(0)
def start_io_loop():
if not tornado.ioloop.IOLoop.instance():
ioloop.install()
try:
tornado.ioloop.IOLoop.instance().start()
except Exception as e:
print "openbazaar::start_io_loop Exception:", e
raise
def create_logger(ob_ctx):
logger = None
try:
logger = logging.getLogger()
logger.setLevel(int(ob_ctx.log_level))
handler = logging.handlers.RotatingFileHandler(
ob_ctx.log_path,
encoding='utf-8',
maxBytes=50000000,
backupCount=1
)
logFormat = logging.Formatter(
u'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(logFormat)
logger.addHandler(handler)
logging.addLevelName(5, "DATADUMP")
def datadump(self, message, *args, **kwargs):
if self.isEnabledFor(5):
self._log(5, message, args, **kwargs)
logging.Logger.datadump = datadump
except Exception as e:
print "Could not setup logger, continuing: ", e.message
return logger
def log_openbazaar_start(log, ob_ctx):
log.info("Started OpenBazaar Web App at http://%s:%s" %
(ob_ctx.http_ip, ob_ctx.http_port))
print "Started OpenBazaar Web App at http://%s:%s" % (ob_ctx.http_ip, ob_ctx.http_port)
def attempt_browser_open(ob_ctx):
if not ob_ctx.disable_open_browser:
open_default_webbrowser(
'http://%s:%s' % (ob_ctx.http_ip, ob_ctx.http_port))
def setup_signal_handlers(application):
try:
signal.signal(signal.SIGTERM, application.shutdown)
except ValueError:
pass
def node_starter(ob_ctxs):
# This is the target for the the Process which
# will spawn the children processes that spawn
# the actual OpenBazaar instances.
for ob_ctx in ob_ctxs:
p = multiprocessing.Process(
target=start_node, args=(ob_ctx,),
name="Process::openbazaar_daemon::target(start_node)")
p.daemon = False # python has to wait for this user thread to end.
p.start()
def start_node(ob_ctx):
logger = create_logger(ob_ctx)
application = MarketApplication(ob_ctx)
setup_signal_handlers(application)
application.start_app()
log_openbazaar_start(logger, ob_ctx)
attempt_browser_open(ob_ctx)
start_io_loop()
| {
"content_hash": "7ae78156a071bf4b4c39242c6aae81c3",
"timestamp": "",
"source": "github",
"line_count": 373,
"max_line_length": 91,
"avg_line_length": 33.6970509383378,
"alnum_prop": 0.5586761078844776,
"repo_name": "STRML/OpenBazaar",
"id": "c39ef54b1e43240ff11bcb30f81056e597be0908",
"size": "12569",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "node/openbazaar_daemon.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8930"
},
{
"name": "JavaScript",
"bytes": "107310"
},
{
"name": "Makefile",
"bytes": "134"
},
{
"name": "Perl",
"bytes": "655"
},
{
"name": "PowerShell",
"bytes": "6205"
},
{
"name": "Python",
"bytes": "361116"
},
{
"name": "Shell",
"bytes": "16486"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class IdsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="ids", parent_name="contourcarpet", **kwargs):
super(IdsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
| {
"content_hash": "b231620e615f5d34d2c8d9eb1e721f1d",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 81,
"avg_line_length": 35.81818181818182,
"alnum_prop": 0.6192893401015228,
"repo_name": "plotly/plotly.py",
"id": "b9e45d342c9059f57f596295e16377e809bbd772",
"size": "394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/contourcarpet/_ids.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import numpy as np
import pandas as pd
import grizzly_impl
from lazy_op import LazyOpResult, to_weld_type
from weld.weldobject import *
from utils import *
from groupbyweld import GroupByWeld
from seriesweld import SeriesWeld
class DataFrameWeld:
"""Summary
Attributes:
df (TYPE): Description
predicates (TYPE): Description
unmaterialized_cols (TYPE): Description
expr (TYPE): Description
"""
def __init__(self, df, predicates=None, expr=None):
self.df = df
self.unmaterialized_cols = dict()
self.predicates = predicates
self.raw_columns = dict()
for key in self.df:
raw_column = self.df[key].values
if raw_column.dtype == object:
raw_column = np.array(self.df[key], dtype=str)
self.raw_columns[key] = raw_column
def __getitem__(self, key):
"""Summary
Args:
key (TYPE): Description
Returns:
TYPE: Description
Raises:
Exception: Description
"""
if isinstance(key, str): # Single-key get
# First check if key corresponds to an un-materialized column
if key in self.unmaterialized_cols:
return self.unmaterialized_cols[key]
raw_column = self.df[key].values
dtype = str(raw_column.dtype)
# If column type is "object", then cast as "vec[char]" in Weld
if dtype == 'object':
raw_column = self.raw_columns[key]
weld_type = WeldVec(WeldChar())
else:
weld_type = grizzly_impl.numpy_to_weld_type_mapping[dtype]
if self.predicates is None:
return SeriesWeld(raw_column, weld_type, self, key)
return SeriesWeld(
grizzly_impl.filter(
raw_column,
self.predicates.expr,
weld_type
),
weld_type,
self,
key
)
elif isinstance(key, list):
# For multi-key get, return type is a dataframe
return DataFrameWeld(self.df[key], self.predicates)
elif isinstance(key, SeriesWeld):
# Can also apply predicate to a dataframe
if self.predicates is not None:
return DataFrameWeld(self.df, key.per_element_and(self.predicates))
return DataFrameWeld(self.df, key)
raise Exception("Invalid type in __getitem__")
def __setitem__(self, key, value):
"""Summary
Args:
key (TYPE): Description
value (TYPE): Description
Returns:
TYPE: Description
"""
if isinstance(value, np.ndarray):
dtype = str(value.dtype)
weld_type = grizzly_impl.numpy_to_weld_type_mapping[dtype]
self.unmaterialized_cols[key] = SeriesWeld(
value,
weld_type,
self,
key
)
elif isinstance(value, SeriesWeld):
self.unmaterialized_cols[key] = value
elif isinstance(value, LazyOpResult):
self.unmaterialized_cols[key] = SeriesWeld(
value.expr,
value.weld_type,
self,
key
)
@property
def values(self):
if self.predicates is None:
return self.df.values
else:
if isinstance(self.df.values, np.ndarray):
weld_type = grizzly_impl.numpy_to_weld_type_mapping[
str(self.df.values.dtype)]
dim = self.df.values.ndim
return LazyOpResult(
grizzly_impl.filter(
self.df.values,
self.predicates.expr,
weld_type
),
weld_type,
dim
)
def _get_column_names(self):
"""Summary
Returns:
TYPE: Description
"""
column_names = set()
for column in self.df:
column_names.add(column)
for column in self.unmaterialized_cols:
column_names.add(column)
return list(column_names)
def filter(self, predicates):
"""Summary
Args:
grouping_column_name (TYPE): Description
Returns:
TYPE: Description
"""
tys = []
for col_name, raw_column in self.raw_columns.items():
dtype = str(raw_column.dtype)
if dtype == 'object' or dtype == '|S64':
weld_type = WeldVec(WeldChar())
else:
weld_type = grizzly_impl.numpy_to_weld_type_mapping[dtype]
tys.append(weld_type)
if len(tys) == 1:
weld_type = tys[0]
else:
weld_type = WeldStruct(tys)
if isinstance(predicates, SeriesWeld):
predicates = predicates.expr
return DataFrameWeldExpr(
grizzly_impl.filter(
grizzly_impl.zip_columns(
self.raw_columns.values(),
),
predicates
),
self.raw_columns.keys(),
weld_type
)
def pivot_table(self, values, index, columns, aggfunc='sum'):
tys = []
for col_name, raw_column in self.raw_columns.items():
dtype = str(raw_column.dtype)
if dtype == 'object' or dtype == '|S64':
weld_type = WeldVec(WeldChar())
else:
weld_type = grizzly_impl.numpy_to_weld_type_mapping[dtype]
tys.append(weld_type)
if len(tys) == 1:
weld_type = tys[0]
else:
weld_type = WeldStruct(tys)
return DataFrameWeldExpr(
grizzly_impl.zip_columns(
self.raw_columns.values(),
),
self.raw_columns.keys(),
weld_type
).pivot_table(values, index, columns, aggfunc)
def groupby(self, grouping_column_name):
"""Summary
Args:
grouping_column_name (TYPE): Description
Returns:
TYPE: Description
"""
return GroupByWeld(
self,
grouping_column_name
)
def to_pandas(self):
"""Summary
Returns:
TYPE: Description
"""
# TODO: Do more work here (need to materialize all columns as needed)
return self.df
class DataFrameWeldExpr:
# TODO We need to merge this with the original DataFrameWeld class
def __init__(self, expr, column_names, weld_type, is_pivot=False):
if isinstance(weld_type, WeldStruct):
self.column_types = weld_type.field_types
self.weld_type = weld_type
else:
raise Exception("DataFrameWeldExpr can only except struct types")
self.expr = expr
self.column_names = column_names
self.colindex_map = {name:i for i, name in enumerate(column_names)}
self.is_pivot = is_pivot
def __setitem__(self, key, item):
if self.is_pivot:
# Note that if this is a pivot table,
# We have to modify the structure of the pivot table
# that is append item to nested vector of vectors
# and update the col_vec field
# Also setitem appends a new item to the pivot table
# Modifying an existing item is not implemented yet
# TODO if pivot table check that the column being added
# is same type
if isinstance(item, SeriesWeld):
if item.index_type is not None:
item_expr = grizzly_impl.get_field(item.expr, 1)
else:
item_expr = item.expr
self.expr = grizzly_impl.set_pivot_column(
self.expr,
key,
item_expr,
self.column_types[1].elemType,
self.column_types[2].elemType
)
else:
raise Exception("Setitem not implemented for non-pivot table")
def __getitem__(self, item):
if self.is_pivot:
# Note that if this is a pivot table,
# then we can't check if the item is in the column
# since we have not materialized the unique column values
# Note if
return SeriesWeld(
grizzly_impl.get_pivot_column(
self.expr,
item,
self.column_types[2].elemType
),
self.column_types[1].elemType.elemType,
df=None,
column_name=None,
index_type=self.column_types[0].elemType,
index_name=self.column_names[0]
)
@property
def loc(self):
return DataFrameWeldLoc(self)
def merge(self, df2):
if not isinstance(df2, DataFrameWeldExpr):
raise Exception("df2 must be of type DataFrameWeldExpr")
keys_d1 = set(self.colindex_map.keys())
keys_d2 = set(df2.colindex_map.keys())
join_keys = keys_d1 & keys_d2
key_index_d1 = [self.colindex_map[key] for key in join_keys]
key_index_d2 = [df2.colindex_map[key] for key in join_keys]
rest_keys_d1 = keys_d1.difference(join_keys)
rest_keys_d2 = keys_d2.difference(join_keys)
rest_index_d1 = [self.colindex_map[key] for key in rest_keys_d1]
rest_index_d2 = [df2.colindex_map[key] for key in rest_keys_d2]
new_column_names = list(join_keys) + list(rest_keys_d1) + list(rest_keys_d2)
# We add the key column first, followed by those in self, and finally df2
key_index_types = []
for i in key_index_d1:
key_index_types.append(self.column_types[i])
if len(key_index_types) > 1:
join_keys_type = WeldStruct(key_index_types)
else:
join_keys_type = key_index_types[0]
rest_types_d1 = []
for i in rest_index_d1:
rest_types_d1.append(self.column_types[i])
rest_types_d2 = []
for i in rest_index_d2:
rest_types_d2.append(df2.column_types[i])
new_types = key_index_types + rest_types_d1 + rest_types_d2
return DataFrameWeldExpr(
grizzly_impl.join(
self.expr,
df2.expr,
key_index_d1,
key_index_d2,
join_keys_type,
rest_index_d1,
WeldStruct(rest_types_d1),
rest_index_d2,
WeldStruct(rest_types_d2)
),
new_column_names,
WeldStruct(new_types)
)
def pivot_table(self, values, index, columns, aggfunc='sum'):
value_index = self.colindex_map[values]
index_index = self.colindex_map[index]
columns_index = self.colindex_map[columns]
ind_ty = to_weld_type(self.column_types[index_index], 1)
piv_ty = to_weld_type(WeldDouble(), 2)
col_ty = to_weld_type(self.column_types[columns_index], 1)
return DataFrameWeldExpr(
grizzly_impl.pivot_table(
self.expr,
value_index,
self.column_types[value_index],
index_index,
self.column_types[index_index],
columns_index,
self.column_types[columns_index],
aggfunc
),
[index, columns, values],
WeldStruct([ind_ty, piv_ty, col_ty]),
is_pivot=True
)
def sum(self, axis):
if axis == 1:
if self.is_pivot:
if isinstance(self.column_types[1], WeldVec):
elem_type = self.column_types[1].elemType
value_type = elem_type.elemType
return SeriesWeld(
grizzly_impl.pivot_sum(
self.expr,
value_type
),
value_type
)
else:
raise Exception("Sum for non-pivot table data frames not supported")
elif axis == 0:
raise Exception("Sum not implemented yet for axis = 0")
def div(self, series, axis):
if axis == 0:
if self.is_pivot:
if isinstance(self.column_types[1], WeldVec):
elem_type = self.column_types[1].elemType
value_type = elem_type.elemType
ind_ty = self.column_types[0]
piv_ty = to_weld_type(WeldDouble(), 2)
col_ty = self.column_types[2]
return DataFrameWeldExpr(
grizzly_impl.pivot_div(
self.expr,
series.expr,
elem_type,
value_type,
),
self.column_names,
WeldStruct([ind_ty, piv_ty, col_ty]),
is_pivot=True
)
else:
raise Exception("Div for non-pivot table data frames not supported")
elif axis == 1:
raise Exception("Div not implemented yet for axis = 0")
def sort_values(self, by):
if self.is_pivot:
return DataFrameWeldExpr(
grizzly_impl.pivot_sort(
self.expr,
by,
self.column_types[0].elemType, # The index type
self.column_types[2].elemType, # The column name types (usually string)
self.column_types[1].elemType.elemType # The column value type
),
self.column_names,
self.weld_type,
is_pivot=True
)
else:
raise Expcetion("sort_values needs to be implemented for non pivot tables")
def evaluate(self, verbose=True, passes=None):
"""Summary
Returns:
TYPE: Description
"""
if self.is_pivot:
index, pivot, columns = LazyOpResult(
self.expr,
self.weld_type,
0
).evaluate(verbose=verbose, passes=passes)
df_dict = {}
for i, column_name in enumerate(columns):
df_dict[column_name] = pivot[i]
return DataFrameWeld(pd.DataFrame(df_dict, index=index))
else:
df = pd.DataFrame(columns=[])
weldvec_type_list = []
for type in self.column_types:
weldvec_type_list.append(WeldVec(type))
columns = LazyOpResult(
grizzly_impl.unzip_columns(
self.expr,
self.column_types
),
WeldStruct(weldvec_type_list),
0
).evaluate(verbose=verbose, passes=passes)
for i, column_name in enumerate(self.column_names):
df[column_name] = columns[i]
return DataFrameWeld(df)
def get_column(self, column_name, column_type, index, verbose=True):
"""Summary
Args:
column_name (TYPE): Description
column_type (TYPE): Description
index (TYPE): Description
Returns:
TYPE: Description
"""
return LazyOpResult(
grizzly_impl.get_column(
self.expr,
self.weld_type,
index
),
column_type,
1
).evaluate(verbose=verbose)
class DataFrameWeldLoc:
"""
Label location based indexer for selection by label for dataframe objects.
Attributes:
df (TYPE): The DataFrame being indexed into.
"""
def __init__(self, df):
self.df = df
def __getitem__(self, key):
if isinstance(key, SeriesWeld):
# We're going to assume that the first column in these dataframes
# is an index column. This assumption does not hold throughout grizzly,
# so we should fix that moving forward.
index_expr = grizzly_impl.get_field(self.df.expr, 0)
if self.df.is_pivot:
index_type, pivot_type, column_type = self.df.column_types
index_elem_type = index_type.elemType
index_expr_predicate = grizzly_impl.isin(index_expr, key.expr, index_elem_type)
return DataFrameWeldExpr(
grizzly_impl.pivot_filter(
self.df.expr,
index_expr_predicate
),
self.df.column_names,
self.df.weld_type,
is_pivot=True
)
# TODO : Need to implement for non-pivot tables
raise Exception("Cannot invoke getitem on an object that is not SeriesWeld")
| {
"content_hash": "dcef529c0c1fb90409f1875a89001e5b",
"timestamp": "",
"source": "github",
"line_count": 508,
"max_line_length": 95,
"avg_line_length": 33.960629921259844,
"alnum_prop": 0.5103756086250869,
"repo_name": "weld-project/weld",
"id": "ea4b17d01417416220f2317bdf110a78f6b6de34",
"size": "17252",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/grizzly/grizzly/dataframeweld.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "799"
},
{
"name": "C",
"bytes": "660"
},
{
"name": "C++",
"bytes": "27987"
},
{
"name": "Makefile",
"bytes": "2660"
},
{
"name": "Python",
"bytes": "367344"
},
{
"name": "Rust",
"bytes": "1126668"
},
{
"name": "Shell",
"bytes": "2090"
}
],
"symlink_target": ""
} |
from nose.tools import assert_raises
from libsheep.protocol import Message, MessagePart, Context, Command
def test_message_args_set_message_parts():
m = Message('H', 'SUP')
assert isinstance(m.context, MessagePart)
assert isinstance(m.command, MessagePart)
def test_message_parts_have_arg_codes():
m = Message('H', 'SUP')
assert m.context.code == 'H'
assert m.command.code == 'SUP'
def test_h_context_creates_h_instance():
from libsheep.protocol import CIH
context = Context('H')
assert isinstance(context, CIH)
def test_unknown_context_creates_generic_instance():
context = Context('Z')
assert type(context) is Context
def test_message_decode_parses_message():
m = Message.decode('HSUP ADBASE ADTIGR')
assert isinstance(m, Message)
| {
"content_hash": "d6099338986c1978e12c0275b1ba985d",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 68,
"avg_line_length": 31.6,
"alnum_prop": 0.7075949367088608,
"repo_name": "exogen/80sheep",
"id": "bffa255d1dbcd6c2c304297a12bf26a8019fc2b6",
"size": "790",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_protocol.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62616"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/mission/quest_item/shared_rovim_minnoni_q1_needed.iff"
result.attribute_template_id = -1
result.stfName("loot_nboo_n","rovim_minnoni_q1_needed")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "ee3144c242e25a5e1ae3f0c00d9bc93e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 90,
"avg_line_length": 25.846153846153847,
"alnum_prop": 0.7053571428571429,
"repo_name": "anhstudios/swganh",
"id": "a2d1340d04a399d0393d4978d95e43d209645d51",
"size": "481",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/mission/quest_item/shared_rovim_minnoni_q1_needed.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
"""
https://leetcode.com/problems/median-of-two-sorted-arrays/
"""
import sys
import random
class Solution:
"""
Wrapper class for Leetcode solution
"""
def findMedianSortedArrays(self, nums1: [int], nums2: [int]) -> float:
"""
Parameters
----------
nums1: [int]
A sorted array of numbers
nums2: [int]
A sorted array of numbers
Returns
-------
int
Median number which partitions both arrays if they were already merged and sorted
"""
if not nums1 and not nums2:
raise "Invalid input"
# Single array (trivial algorithm)
if not nums1 and nums2:
return self._get_median_single_array(nums2)
if not nums2 and nums1:
return self._get_median_single_array(nums1)
# Get the median for two arrays, smallest array first
if len(nums1) < len(nums2):
return self._find_median_sorted_arrays(small_array=nums1, large_array=nums2)
return self._find_median_sorted_arrays(small_array=nums2, large_array=nums1)
@classmethod
def _find_median_sorted_arrays(cls, small_array: [int], large_array: [int]) -> float:
"""
Time complexity O(log(len(small_array)))
Parameters
----------
small_array: [int]
A list of integers sorted
large_array: [int]
A list of integers sorted, must be larger or the same length as small_array
Returns
-------
float
The median that splits the two arrays equally as if they were sorted and merged
"""
total_elements = len(small_array) + len(large_array)
# Begin the search within the entire range of the smaller array
start_x=0
end_x = len(small_array)
while (start_x <= end_x):
# Try and find the partition points for the x and y array such that an equal number of elements appear on both sides
partition_x = cls._get_partition_x(start_x, end_x)
partition_y = cls._get_partition_y(total_elements, partition_x)
# Find immediate element to the left and to the right of partition_x in the x array
left_val_x = small_array[partition_x-1] if partition_x > 0 else -sys.maxsize
right_val_x = small_array[partition_x] if partition_x < len(small_array) else sys.maxsize
# Find immediate element to the left and to the right of partitionY in the y array
left_val_y = large_array[partition_y-1] if partition_y > 0 else -sys.maxsize
right_val_y = large_array[partition_y] if partition_y < len(large_array) else sys.maxsize
# All values to the left on the x partition are less or equal to all the values on the right of the y partition
left_x_less_eq_to_right_y = left_val_x <= right_val_y
# All values to the left of the y partition are less or equal to all the values to the right of the x partition
left_y_less_eq_to_right_x = left_val_y <= right_val_x
# Print information about the current state
print("-- INFORMATION---")
print(f"start_x={start_x} end_x={end_x}")
print(f"partition_x={partition_x} partitionY={partition_y}")
print("-----------------")
print("The 4 values hugging the 2 partition points")
print(f"maxLeftX={left_val_x} minRightX={right_val_x}")
print(f"maxLeftY={left_val_y} minRightY={right_val_y}")
print(f"left_x_less_eq_to_right_y={left_x_less_eq_to_right_y}")
print(f"left_y_less_eq_to_right_x={left_y_less_eq_to_right_x}")
# Partition found where median can be calculated
if left_x_less_eq_to_right_y and left_y_less_eq_to_right_x:
print(f"Found the perfect partition indexes. partition_x={partition_x} partitionY={partition_y}")
print(f"small_array_left={small_array[0:max(0, partition_x)]} small_array_right={small_array[partition_x::]}")
print(f"large_array_left={large_array[0:max(0, partition_y)]} large_array_right={large_array[partition_y::]}")
is_even = total_elements % 2 == 0
if is_even:
median = (max(left_val_x, left_val_y) + min(right_val_x, right_val_y)) / 2
else:
median = int(max(left_val_x, left_val_y))
print(f"Found the perfect median {median}")
return median
# Move search space backward or forward
if left_val_x > right_val_y:
end_x = partition_x - 1
else:
start_x = partition_x + 1
@classmethod
def _get_partition_x(cls, start_x: int, end_x: int):
"""
Parameters
----------
start_x: int
The current start_x
end_x: int
The current end_x
Returns
-------
int
The partition index for the x array, if > len(x) then the median of both arrays is in y
"""
return int((start_x + end_x) / 2)
@classmethod
def _get_partition_y(cls, total_elements: int, partition_x: int):
"""
Parameters
----------
total_elements: int
The total number of elements in both arrays
partition_x: int
The current partition_x
Returns
-------
int
The partition point of y
"""
return int(((total_elements + 1) / 2) - partition_x)
@classmethod
def _get_median_single_array(cls, nums: list) -> float:
"""
Gets the median of the sorted array
Returns
-------
float
Median number which partitions the array such that both sides have equal number of elements.
"""
if len(nums) == 1:
return nums[0]
median = len(nums) / 2
if not median.is_integer():
# Odd number then return the middle
return nums[int(median)]
# Even so must split
median = int(median) - 1
return (nums[median] + nums[median+1]) / 2
def main():
""" The entry point of the Python script """
# Trying to find the median of the combined arrays
sln = Solution()
# We know the median to this is 11
#print(sln.findMedianSortedArrays(nums1=[1,3,8,9,15], nums2=[7,11,18,19,21,25]))
print(sln.findMedianSortedArrays(nums1=[1,1,1,1,1], nums2=[1,1,1,1,1,1]))
# We know the median to this is between 11 and 16 so 13.5
#print(sln.findMedianSortedArrays(nums1=[23,26,31,35], nums2=[3,5,7,9,11,16]))
# The median here is -1
#print(sln.findMedianSortedArrays([3], [-2,-1]))
# The median here is 3
#print(sln.findMedianSortedArrays([1,2,5],[1,3,5,6]))
if __name__ == "__main__":
main()
| {
"content_hash": "cf53832012056903f365d2f6d7bd96f9",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 128,
"avg_line_length": 34.62686567164179,
"alnum_prop": 0.5695402298850575,
"repo_name": "paulness/paulness.github.io",
"id": "1292dd778885668050efde10fa65d342d8df72cd",
"size": "6980",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "leetcode_work_dir/find_median_two_sorted_arrays.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "978"
},
{
"name": "HTML",
"bytes": "61"
},
{
"name": "JavaScript",
"bytes": "122121"
},
{
"name": "Python",
"bytes": "17569"
}
],
"symlink_target": ""
} |
"""
WSGI config for horseapi project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "horseapi.settings")
application = get_wsgi_application()
| {
"content_hash": "f84d0da7ff1ad07490b10d10d5399cfe",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.5625,
"alnum_prop": 0.7709923664122137,
"repo_name": "deforestg/Air-Horse-One",
"id": "965b2f77225654f6f21f48c2e794a65314092e83",
"size": "393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/horseapi/horseapi/base/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "915"
},
{
"name": "JavaScript",
"bytes": "382"
},
{
"name": "Python",
"bytes": "3811"
},
{
"name": "TypeScript",
"bytes": "1497"
}
],
"symlink_target": ""
} |
from base import BaseBot
from scraping import general, defense, scraper
import sys
class DefenderBot(BaseBot):
def __init__(self, browser, config, planets):
self.defense_client = defense.Defense(browser, config)
self.general_client = general.General(browser, config)
self.planets = planets
super(DefenderBot, self).__init__(browser, config, planets)
def auto_build_defenses(self):
"""Auto build defenses on all planets"""
planets = self.planets
for planet in planets:
self.auto_build_defenses_to_planet(planet)
def auto_build_defenses_to_planet(self, planet, iteration_budget=.5):
"""
Automatically build defenses to the specified planet
:param planet: planet to build defenses on
:param iteration_budget: percentage of resouces that can be spent at each build iteration
"""
if planet.resources is None:
planet.resources = self.general_client.get_resources(planet)
if planet.defenses is None:
planet.defenses = self.defense_client.get_defenses(planet)
defense_proportion_list = self.parse_defense_proportion(self.config.defense_proportion)
available_defenses_proportion_list = filter(lambda x: x.item.cost < planet.resources.times(iteration_budget),
defense_proportion_list)
if self.check_exit_conditions_for_auto_build_defenses(planet, defense_proportion_list,
available_defenses_proportion_list,
planet.resources):
return
defenses_relation = self.get_defenses_proportion_comparison_table(available_defenses_proportion_list,
planet.defenses)
type_and_amount_to_build = self.get_type_and_amount_to_build(defenses_relation, planet.resources.times(iteration_budget))
self.defense_client.build_defense_to_planet(type_and_amount_to_build.item, type_and_amount_to_build.amount,
planet)
spent_resources = type_and_amount_to_build.item.cost.times(type_and_amount_to_build.amount)
planet.resources -= spent_resources
planet_defense = filter(lambda x: x.item.id == type_and_amount_to_build.item.id, planet.defenses).pop()
planet_defense.amount += type_and_amount_to_build.amount
next_iteration_budget = min(iteration_budget * 1.25, 1)
return self.auto_build_defenses_to_planet(planet, next_iteration_budget)
def get_type_and_amount_to_build(self, defenses_relation, planet_resources):
# the worst defense to build is the defense type that has the highest number of defenses
# built by the desired proportion
worst_defense_to_build = min(defenses_relation, key=lambda x: x.proportion_rate())
# the best defense to build is the defense type that has the lowest number of defenses
# built by the desired proportion
best_defense_to_build = max(defenses_relation, key=lambda x: x.proportion_rate())
if worst_defense_to_build == best_defense_to_build:
target_amount_to_build = sys.maxint
else:
# Get necessary amount to build so that the defense proportion rates are equal
target_amount = worst_defense_to_build.current_amount * best_defense_to_build.target_amount / \
worst_defense_to_build.target_amount
target_amount_to_build = target_amount - best_defense_to_build.current_amount
# Limit amount of the defenses to build according to the planet resources
max_amount_by_budget = self.get_maximum_amount_of_defenses_by_budget(best_defense_to_build.item.cost,
planet_resources)
return scraper.ItemAction(best_defense_to_build.item, min(target_amount_to_build, max_amount_by_budget))
def check_exit_conditions_for_auto_build_defenses(self, planet, defense_proportion_list,
available_defenses_proportion_list,
planet_resources):
if len(available_defenses_proportion_list) == 1 \
and available_defenses_proportion_list[0].item.id == defense.ROCKET_LAUNCHER.id \
and len(defense_proportion_list) > 1:
if self.config.spend_excess_metal_on_rl is False:
self.logger.info("Can only build rocket launchers, and SpendExcessMetalOnRL is False")
return True
else:
planet_resources.energy = 0
self.logger.info("%s left" % planet_resources)
self.logger.info("Spending excess metal on rocket launchers")
if len(available_defenses_proportion_list) == 0:
self.logger.info("No more resources on planet %s to continue building defenses" % planet.name)
return True
return False
def get_least_defended_planet(self):
least_defended_planet = min(self.planets, key=self.get_defense_points_for_planet)
return least_defended_planet
def get_defense_points_for_planet(self, planet):
defenses = self.defense_client.get_defenses(planet)
defense_points = sum(
[defense.DEFENSES_DATA.get(str(x.item.id)).cost.times(x.amount).get_points() for x in defenses])
return defense_points
def get_defenses_proportion_comparison_table(self, defenses_proportion_list, planet_defenses):
defenses_proportion_comparison_table = []
for defense_proportion in defenses_proportion_list:
current_defense_amount = next(x.amount for x in planet_defenses if
x.item.id == defense_proportion.item.id)
defense_proportion_comparison = self.DefenseProportionComparison(defense_proportion.item,
defense_proportion.amount,
current_defense_amount)
defenses_proportion_comparison_table.append(defense_proportion_comparison)
return defenses_proportion_comparison_table
@staticmethod
def parse_defense_proportion(defense_proportion_str):
parsed_defense_proportion = map(lambda x: scraper.ItemAction(defense.DEFENSES_DATA.get(filter(str.isalpha, x)),
int(filter(str.isdigit, x))),
defense_proportion_str)
return filter(lambda x: x.item is not None and x.amount is not None, parsed_defense_proportion)
@staticmethod
def get_maximum_amount_of_defenses_by_budget(defense_type_cost, resources):
max_by_metal = int(resources.metal / defense_type_cost.metal)
max_by_crystal = int(resources.crystal / defense_type_cost.crystal) \
if defense_type_cost.crystal != 0 else sys.maxint
max_by_deuterium = int(resources.deuterium / defense_type_cost.deuterium) \
if defense_type_cost.deuterium != 0 else sys.maxint
return min(max_by_metal, max_by_crystal, max_by_deuterium)
class DefenseProportionComparison:
def __init__(self, defense_item, target_amount, current_amount):
self.item = defense_item
self.target_amount = target_amount
self.current_amount = current_amount
def proportion_rate(self):
return self.target_amount / float(self.current_amount)
| {
"content_hash": "fd4643fbb2f02b52732b188138049856",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 129,
"avg_line_length": 50.72727272727273,
"alnum_prop": 0.6158474142345111,
"repo_name": "winiciuscota/OG-Bot",
"id": "2ca6bd8fb97e2c4d3c2b71b5cec9676fac7a45c5",
"size": "7812",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ogbot/core/defender.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "119264"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python
#
# Copyright 2013, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
import struct
import threading
import time
import logging
import unittest
from vtproto import topodata_pb2
from vtdb import keyrange_constants
import environment
import tablet
import utils
keyspace_id_type = keyrange_constants.KIT_UINT64
pack_keyspace_id = struct.Struct('!Q').pack
# initial shards
# range '' - 80
shard_0_master = tablet.Tablet()
shard_0_replica = tablet.Tablet()
shard_0_ny_rdonly = tablet.Tablet(cell='ny')
# range 80 - ''
shard_1_master = tablet.Tablet()
shard_1_slave1 = tablet.Tablet()
shard_1_slave2 = tablet.Tablet()
shard_1_ny_rdonly = tablet.Tablet(cell='ny')
shard_1_rdonly1 = tablet.Tablet()
# split shards
# range 80 - c0
shard_2_master = tablet.Tablet()
shard_2_replica1 = tablet.Tablet()
shard_2_replica2 = tablet.Tablet()
# range c0 - ''
shard_3_master = tablet.Tablet()
shard_3_replica = tablet.Tablet()
shard_3_rdonly1 = tablet.Tablet()
def setUpModule():
try:
environment.topo_server().setup()
setup_procs = [
shard_0_master.init_mysql(),
shard_0_replica.init_mysql(),
shard_0_ny_rdonly.init_mysql(),
shard_1_master.init_mysql(),
shard_1_slave1.init_mysql(),
shard_1_slave2.init_mysql(),
shard_1_ny_rdonly.init_mysql(),
shard_1_rdonly1.init_mysql(),
shard_2_master.init_mysql(),
shard_2_replica1.init_mysql(),
shard_2_replica2.init_mysql(),
shard_3_master.init_mysql(),
shard_3_replica.init_mysql(),
shard_3_rdonly1.init_mysql(),
]
utils.Vtctld().start()
utils.wait_procs(setup_procs)
except:
tearDownModule()
raise
def tearDownModule():
if utils.options.skip_teardown:
return
teardown_procs = [
shard_0_master.teardown_mysql(),
shard_0_replica.teardown_mysql(),
shard_0_ny_rdonly.teardown_mysql(),
shard_1_master.teardown_mysql(),
shard_1_slave1.teardown_mysql(),
shard_1_slave2.teardown_mysql(),
shard_1_ny_rdonly.teardown_mysql(),
shard_1_rdonly1.teardown_mysql(),
shard_2_master.teardown_mysql(),
shard_2_replica1.teardown_mysql(),
shard_2_replica2.teardown_mysql(),
shard_3_master.teardown_mysql(),
shard_3_replica.teardown_mysql(),
shard_3_rdonly1.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
shard_0_master.remove_tree()
shard_0_replica.remove_tree()
shard_0_ny_rdonly.remove_tree()
shard_1_master.remove_tree()
shard_1_slave1.remove_tree()
shard_1_slave2.remove_tree()
shard_1_ny_rdonly.remove_tree()
shard_1_rdonly1.remove_tree()
shard_2_master.remove_tree()
shard_2_replica1.remove_tree()
shard_2_replica2.remove_tree()
shard_3_master.remove_tree()
shard_3_replica.remove_tree()
shard_3_rdonly1.remove_tree()
# InsertThread will insert a value into the timestamps table, and then
# every 1/5s will update its value with the current timestamp
class InsertThread(threading.Thread):
def __init__(self, tablet_obj, object_name, user_id, keyspace_id):
threading.Thread.__init__(self)
self.tablet = tablet_obj
self.object_name = object_name
self.user_id = user_id
self.keyspace_id = keyspace_id
self.str_keyspace_id = utils.uint64_to_hex(keyspace_id)
self.done = False
self.tablet.mquery(
'vt_test_keyspace',
['begin',
'insert into timestamps(name, time_milli, keyspace_id) '
"values('%s', %d, 0x%x) "
'/* vtgate:: keyspace_id:%s */ /* user_id:%d */' %
(self.object_name, long(time.time() * 1000), self.keyspace_id,
self.str_keyspace_id, self.user_id),
'commit'],
write=True, user='vt_app')
self.start()
def run(self):
try:
while not self.done:
self.tablet.mquery(
'vt_test_keyspace',
['begin',
'update timestamps set time_milli=%d '
'where name="%s" /* vtgate:: keyspace_id:%s */ /* user_id:%d */' %
(long(time.time() * 1000), self.object_name,
self.str_keyspace_id, self.user_id),
'commit'],
write=True, user='vt_app')
time.sleep(0.2)
except Exception:
logging.exception('InsertThread got exception.')
# MonitorLagThread will get values from a database, and compare the timestamp
# to evaluate lag. Since the qps is really low, and we send binlogs as chuncks,
# the latency is pretty high (a few seconds).
class MonitorLagThread(threading.Thread):
def __init__(self, tablet_obj, object_name):
threading.Thread.__init__(self)
self.tablet = tablet_obj
self.object_name = object_name
self.done = False
self.max_lag = 0
self.lag_sum = 0
self.sample_count = 0
self.start()
def run(self):
try:
while not self.done:
result = self.tablet.mquery(
'vt_test_keyspace',
'select time_milli from timestamps where name="%s"' %
self.object_name)
if result:
lag = long(time.time() * 1000) - long(result[0][0])
logging.debug('MonitorLagThread(%s) got %d', self.object_name, lag)
self.sample_count += 1
self.lag_sum += lag
if lag > self.max_lag:
self.max_lag = lag
time.sleep(1.0)
except Exception:
logging.exception('MonitorLagThread got exception.')
class TestResharding(unittest.TestCase):
# create_schema will create the same schema on the keyspace
# then insert some values
def _create_schema(self):
if keyspace_id_type == keyrange_constants.KIT_BYTES:
t = 'varbinary(64)'
else:
t = 'bigint(20) unsigned'
create_table_template = '''create table %s(
id bigint auto_increment,
msg varchar(64),
keyspace_id ''' + t + ''' not null,
primary key (id),
index by_msg (msg)
) Engine=InnoDB'''
create_view_template = (
'create view %s(id, msg, keyspace_id) as select id, msg, keyspace_id '
'from %s')
create_timestamp_table = '''create table timestamps(
name varchar(64),
time_milli bigint(20) unsigned not null,
keyspace_id ''' + t + ''' not null,
primary key (name)
) Engine=InnoDB'''
create_unrelated_table = '''create table unrelated(
name varchar(64),
primary key (name)
) Engine=InnoDB'''
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_template % ('resharding1'),
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_template % ('resharding2'),
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_view_template % ('view1', 'resharding1'),
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_timestamp_table,
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_unrelated_table,
'test_keyspace'],
auto_log=True)
# _insert_value inserts a value in the MySQL database along with the comments
# required for routing.
def _insert_value(self, tablet_obj, table, mid, msg, keyspace_id):
k = utils.uint64_to_hex(keyspace_id)
tablet_obj.mquery(
'vt_test_keyspace',
['begin',
'insert into %s(id, msg, keyspace_id) '
'values(%d, "%s", 0x%x) /* vtgate:: keyspace_id:%s */ '
'/* user_id:%d */' %
(table, mid, msg, keyspace_id, k, mid),
'commit'],
write=True)
def _get_value(self, tablet_obj, table, mid):
return tablet_obj.mquery(
'vt_test_keyspace',
'select id, msg, keyspace_id from %s where id=%d' % (table, mid))
def _check_value(self, tablet_obj, table, mid, msg, keyspace_id,
should_be_here=True):
result = self._get_value(tablet_obj, table, mid)
if keyspace_id_type == keyrange_constants.KIT_BYTES:
fmt = '%s'
keyspace_id = pack_keyspace_id(keyspace_id)
else:
fmt = '%x'
if should_be_here:
self.assertEqual(result, ((mid, msg, keyspace_id),),
('Bad row in tablet %s for id=%d, keyspace_id=' +
fmt + ', row=%s') % (tablet_obj.tablet_alias, mid,
keyspace_id, str(result)))
else:
self.assertEqual(
len(result), 0,
('Extra row in tablet %s for id=%d, keyspace_id=' +
fmt + ': %s') % (tablet_obj.tablet_alias, mid, keyspace_id,
str(result)))
# _is_value_present_and_correct tries to read a value.
# if it is there, it will check it is correct and return True if it is.
# if not correct, it will self.fail.
# if not there, it will return False.
def _is_value_present_and_correct(
self, tablet_obj, table, mid, msg, keyspace_id):
result = self._get_value(tablet_obj, table, mid)
if not result:
return False
if keyspace_id_type == keyrange_constants.KIT_BYTES:
fmt = '%s'
keyspace_id = pack_keyspace_id(keyspace_id)
else:
fmt = '%x'
self.assertEqual(result, ((mid, msg, keyspace_id),),
('Bad row in tablet %s for id=%d, keyspace_id=' + fmt) % (
tablet_obj.tablet_alias, mid, keyspace_id))
return True
def _insert_startup_values(self):
self._insert_value(shard_0_master, 'resharding1', 1, 'msg1',
0x1000000000000000)
self._insert_value(shard_1_master, 'resharding1', 2, 'msg2',
0x9000000000000000)
self._insert_value(shard_1_master, 'resharding1', 3, 'msg3',
0xD000000000000000)
def _check_startup_values(self):
# check first value is in the right shard
self._check_value(shard_2_master, 'resharding1', 2, 'msg2',
0x9000000000000000)
self._check_value(shard_2_replica1, 'resharding1', 2, 'msg2',
0x9000000000000000)
self._check_value(shard_2_replica2, 'resharding1', 2, 'msg2',
0x9000000000000000)
self._check_value(shard_3_master, 'resharding1', 2, 'msg2',
0x9000000000000000, should_be_here=False)
self._check_value(shard_3_replica, 'resharding1', 2, 'msg2',
0x9000000000000000, should_be_here=False)
self._check_value(shard_3_rdonly1, 'resharding1', 2, 'msg2',
0x9000000000000000, should_be_here=False)
# check second value is in the right shard too
self._check_value(shard_2_master, 'resharding1', 3, 'msg3',
0xD000000000000000, should_be_here=False)
self._check_value(shard_2_replica1, 'resharding1', 3, 'msg3',
0xD000000000000000, should_be_here=False)
self._check_value(shard_2_replica2, 'resharding1', 3, 'msg3',
0xD000000000000000, should_be_here=False)
self._check_value(shard_3_master, 'resharding1', 3, 'msg3',
0xD000000000000000)
self._check_value(shard_3_replica, 'resharding1', 3, 'msg3',
0xD000000000000000)
self._check_value(shard_3_rdonly1, 'resharding1', 3, 'msg3',
0xD000000000000000)
def _insert_lots(self, count, base=0):
for i in xrange(count):
self._insert_value(shard_1_master, 'resharding1', 10000 + base + i,
'msg-range1-%d' % i, 0xA000000000000000 + base + i)
self._insert_value(shard_1_master, 'resharding1', 20000 + base + i,
'msg-range2-%d' % i, 0xE000000000000000 + base + i)
# _check_lots returns how many of the values we have, in percents.
def _check_lots(self, count, base=0):
found = 0
for i in xrange(count):
if self._is_value_present_and_correct(shard_2_replica2, 'resharding1',
10000 + base + i, 'msg-range1-%d' %
i, 0xA000000000000000 + base + i):
found += 1
if self._is_value_present_and_correct(shard_3_replica, 'resharding1',
20000 + base + i, 'msg-range2-%d' %
i, 0xE000000000000000 + base + i):
found += 1
percent = found * 100 / count / 2
logging.debug('I have %d%% of the data', percent)
return percent
def _check_lots_timeout(self, count, threshold, timeout, base=0):
while True:
value = self._check_lots(count, base=base)
if value >= threshold:
return value
if timeout == 0:
self.fail('timeout waiting for %d%% of the data' % threshold)
logging.debug('sleeping until we get %d%%', threshold)
time.sleep(1)
timeout -= 1
# _check_lots_not_present makes sure no data is in the wrong shard
def _check_lots_not_present(self, count, base=0):
for i in xrange(count):
self._check_value(shard_3_replica, 'resharding1', 10000 + base + i,
'msg-range1-%d' % i, 0xA000000000000000 + base + i,
should_be_here=False)
self._check_value(shard_2_replica2, 'resharding1', 20000 + base + i,
'msg-range2-%d' % i, 0xE000000000000000 + base + i,
should_be_here=False)
def _check_binlog_server_vars(self, tablet_obj):
v = utils.get_vars(tablet_obj.port)
self.assertIn('UpdateStreamKeyRangeStatements', v)
self.assertIn('UpdateStreamKeyRangeTransactions', v)
def _check_binlog_player_vars(self, tablet_obj, seconds_behind_master_max=0):
v = utils.get_vars(tablet_obj.port)
self.assertIn('BinlogPlayerMapSize', v)
self.assertIn('BinlogPlayerSecondsBehindMaster', v)
self.assertIn('BinlogPlayerSecondsBehindMasterMap', v)
self.assertIn('BinlogPlayerSourceShardNameMap', v)
self.assertIn('0', v['BinlogPlayerSourceShardNameMap'])
self.assertEquals(
v['BinlogPlayerSourceShardNameMap']['0'], 'test_keyspace/80-')
self.assertIn('BinlogPlayerSourceTabletAliasMap', v)
self.assertIn('0', v['BinlogPlayerSourceTabletAliasMap'])
if seconds_behind_master_max != 0:
self.assertTrue(
v['BinlogPlayerSecondsBehindMaster'] <
seconds_behind_master_max,
'BinlogPlayerSecondsBehindMaster is too high: %d > %d' % (
v['BinlogPlayerSecondsBehindMaster'],
seconds_behind_master_max))
self.assertTrue(
v['BinlogPlayerSecondsBehindMasterMap']['0'] <
seconds_behind_master_max,
'BinlogPlayerSecondsBehindMasterMap is too high: %d > %d' % (
v['BinlogPlayerSecondsBehindMasterMap']['0'],
seconds_behind_master_max))
def _check_stream_health_equals_binlog_player_vars(self, tablet_obj):
blp_stats = utils.get_vars(tablet_obj.port)
# Enforce health check because it's not running by default as
# tablets are not started with it.
utils.run_vtctl(['RunHealthCheck', tablet_obj.tablet_alias, 'replica'])
stream_health = utils.run_vtctl_json(['VtTabletStreamHealth',
'-count', '1',
tablet_obj.tablet_alias])
logging.debug('Got health: %s', str(stream_health))
self.assertNotIn('serving', stream_health)
self.assertIn('realtime_stats', stream_health)
self.assertNotIn('health_error', stream_health['realtime_stats'])
# count is > 0 and therefore not omitted by the Go JSON marshaller.
self.assertIn('binlog_players_count', stream_health['realtime_stats'])
self.assertEqual(blp_stats['BinlogPlayerMapSize'],
stream_health['realtime_stats']['binlog_players_count'])
self.assertEqual(blp_stats['BinlogPlayerSecondsBehindMaster'],
stream_health['realtime_stats'].get(
'seconds_behind_master_filtered_replication', 0))
def _test_keyrange_constraints(self):
with self.assertRaisesRegexp(
Exception, '.*enforce keyspace_id range.*'):
shard_0_master.execute(
"insert into resharding1(id, msg, keyspace_id) "
" values(1, 'msg', :keyspace_id)",
bindvars={'keyspace_id': 0x9000000000000000},
)
with self.assertRaisesRegexp(
Exception, '.*enforce keyspace_id range.*'):
shard_0_master.execute(
"update resharding1 set msg = 'msg' where id = 1",
bindvars={'keyspace_id': 0x9000000000000000},
)
with self.assertRaisesRegexp(
Exception, '.*enforce keyspace_id range.*'):
shard_0_master.execute(
'delete from resharding1 where id = 1',
bindvars={'keyspace_id': 0x9000000000000000},
)
def test_resharding(self):
# we're going to reparent and swap these two
global shard_2_master, shard_2_replica1
utils.run_vtctl(['CreateKeyspace',
'--sharding_column_name', 'bad_column',
'--sharding_column_type', 'bytes',
'--split_shard_count', '2',
'test_keyspace'])
utils.run_vtctl(['SetKeyspaceShardingInfo', 'test_keyspace',
'keyspace_id', 'uint64'], expect_fail=True)
utils.run_vtctl(['SetKeyspaceShardingInfo',
'-force', '-split_shard_count', '4',
'test_keyspace', 'keyspace_id', keyspace_id_type])
shard_0_master.init_tablet('master', 'test_keyspace', '-80')
shard_0_replica.init_tablet('replica', 'test_keyspace', '-80')
shard_0_ny_rdonly.init_tablet('rdonly', 'test_keyspace', '-80')
shard_1_master.init_tablet('master', 'test_keyspace', '80-')
shard_1_slave1.init_tablet('replica', 'test_keyspace', '80-')
shard_1_slave2.init_tablet('spare', 'test_keyspace', '80-')
shard_1_ny_rdonly.init_tablet('rdonly', 'test_keyspace', '80-')
shard_1_rdonly1.init_tablet('rdonly', 'test_keyspace', '80-')
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
ks = utils.run_vtctl_json(['GetSrvKeyspace', 'test_nj', 'test_keyspace'])
self.assertEqual(ks['split_shard_count'], 4)
# we set full_mycnf_args to True as a test in the KIT_BYTES case
full_mycnf_args = keyspace_id_type == keyrange_constants.KIT_BYTES
# create databases so vttablet can start behaving normally
for t in [shard_0_master, shard_0_replica, shard_0_ny_rdonly,
shard_1_master, shard_1_slave1, shard_1_slave2, shard_1_ny_rdonly,
shard_1_rdonly1]:
t.create_db('vt_test_keyspace')
t.start_vttablet(wait_for_state=None, full_mycnf_args=full_mycnf_args)
# wait for the tablets
shard_0_master.wait_for_vttablet_state('SERVING')
shard_0_replica.wait_for_vttablet_state('SERVING')
shard_0_ny_rdonly.wait_for_vttablet_state('SERVING')
shard_1_master.wait_for_vttablet_state('SERVING')
shard_1_slave1.wait_for_vttablet_state('SERVING')
shard_1_slave2.wait_for_vttablet_state('NOT_SERVING') # spare
shard_1_ny_rdonly.wait_for_vttablet_state('SERVING')
shard_1_rdonly1.wait_for_vttablet_state('SERVING')
# reparent to make the tablets work
utils.run_vtctl(['InitShardMaster', 'test_keyspace/-80',
shard_0_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', 'test_keyspace/80-',
shard_1_master.tablet_alias], auto_log=True)
# create the tables
self._create_schema()
self._insert_startup_values()
self._test_keyrange_constraints()
# run a health check on source replicas so they respond to discovery
# (for binlog players) and on the source rdonlys (for workers)
for t in [shard_0_replica, shard_1_slave1]:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias, 'replica'])
for t in [shard_0_ny_rdonly, shard_1_ny_rdonly, shard_1_rdonly1]:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias, 'rdonly'])
# create the split shards
shard_2_master.init_tablet('master', 'test_keyspace', '80-c0')
shard_2_replica1.init_tablet('spare', 'test_keyspace', '80-c0')
shard_2_replica2.init_tablet('spare', 'test_keyspace', '80-c0')
shard_3_master.init_tablet('master', 'test_keyspace', 'c0-')
shard_3_replica.init_tablet('spare', 'test_keyspace', 'c0-')
shard_3_rdonly1.init_tablet('rdonly', 'test_keyspace', 'c0-')
# start vttablet on the split shards (no db created,
# so they're all not serving)
shard_3_master.start_vttablet(wait_for_state=None,
target_tablet_type='replica')
for t in [shard_2_master, shard_2_replica1, shard_2_replica2,
shard_3_replica, shard_3_rdonly1]:
t.start_vttablet(wait_for_state=None)
for t in [shard_2_master, shard_2_replica1, shard_2_replica2,
shard_3_master, shard_3_replica, shard_3_rdonly1]:
t.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['InitShardMaster', 'test_keyspace/80-c0',
shard_2_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', 'test_keyspace/c0-',
shard_3_master.tablet_alias], auto_log=True)
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'],
auto_log=True)
utils.check_srv_keyspace(
'test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=keyspace_id_type)
# the worker will do everything. We test with source_reader_count=10
# (down from default=20) as connection pool is not big enough for 20.
# min_table_size_for_split is set to 1 as to force a split even on the
# small table we have.
# we need to create the schema, and the worker will do data copying
for keyspace_shard in ('test_keyspace/80-c0', 'test_keyspace/c0-'):
utils.run_vtctl(['CopySchemaShard', '--exclude_tables', 'unrelated',
shard_1_rdonly1.tablet_alias, keyspace_shard],
auto_log=True)
utils.run_vtworker(['--cell', 'test_nj',
'--command_display_interval', '10ms',
'SplitClone',
'--exclude_tables', 'unrelated',
'--source_reader_count', '10',
'--min_table_size_for_split', '1',
'test_keyspace/80-'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly1.tablet_alias,
'rdonly'], auto_log=True)
# TODO(alainjobart): experiment with the dontStartBinlogPlayer option
# check the startup values are in the right place
self._check_startup_values()
# check the schema too
utils.run_vtctl(['ValidateSchemaKeyspace', '--exclude_tables=unrelated',
'test_keyspace'], auto_log=True)
# check the binlog players are running and exporting vars
shard_2_master.wait_for_binlog_player_count(1)
shard_3_master.wait_for_binlog_player_count(1)
self._check_binlog_player_vars(shard_2_master)
self._check_binlog_player_vars(shard_3_master)
# check that binlog server exported the stats vars
self._check_binlog_server_vars(shard_1_slave1)
self._check_stream_health_equals_binlog_player_vars(shard_2_master)
self._check_stream_health_equals_binlog_player_vars(shard_3_master)
# testing filtered replication: insert a bunch of data on shard 1,
# check we get most of it after a few seconds, wait for binlog server
# timeout, check we get all of it.
logging.debug('Inserting lots of data on source shard')
self._insert_lots(1000)
logging.debug('Checking 80 percent of data is sent quickly')
v = self._check_lots_timeout(1000, 80, 5)
if v != 100:
# small optimization: only do this check if we don't have all the data
# already anyway.
logging.debug('Checking all data goes through eventually')
self._check_lots_timeout(1000, 100, 20)
logging.debug('Checking no data was sent the wrong way')
self._check_lots_not_present(1000)
self._check_binlog_player_vars(shard_2_master, seconds_behind_master_max=30)
self._check_binlog_player_vars(shard_3_master, seconds_behind_master_max=30)
# use vtworker to compare the data (after health-checking the destination
# rdonly tablets so discovery works)
utils.run_vtctl(['RunHealthCheck', shard_3_rdonly1.tablet_alias, 'rdonly'])
logging.debug('Running vtworker SplitDiff')
utils.run_vtworker(['-cell', 'test_nj', 'SplitDiff', '--exclude_tables',
'unrelated', 'test_keyspace/c0-'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_3_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
utils.pause('Good time to test vtworker for diffs')
# get status for a destination master tablet, make sure we have it all
shard_2_master_status = shard_2_master.get_status()
self.assertIn('Binlog player state: Running', shard_2_master_status)
self.assertIn(
'<td><b>All</b>: 6000<br><b>Query</b>: 4000<br>'
'<b>Transaction</b>: 2000<br></td>', shard_2_master_status)
self.assertIn('</html>', shard_2_master_status)
# start a thread to insert data into shard_1 in the background
# with current time, and monitor the delay
insert_thread_1 = InsertThread(shard_1_master, 'insert_low', 10000,
0x9000000000000000)
insert_thread_2 = InsertThread(shard_1_master, 'insert_high', 10001,
0xD000000000000000)
monitor_thread_1 = MonitorLagThread(shard_2_replica2, 'insert_low')
monitor_thread_2 = MonitorLagThread(shard_3_replica, 'insert_high')
# tests a failover switching serving to a different replica
utils.run_vtctl(['ChangeSlaveType', shard_1_slave2.tablet_alias, 'replica'])
utils.run_vtctl(['ChangeSlaveType', shard_1_slave1.tablet_alias, 'spare'])
shard_1_slave2.wait_for_vttablet_state('SERVING')
shard_1_slave1.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['RunHealthCheck', shard_1_slave2.tablet_alias, 'replica'])
# test data goes through again
logging.debug('Inserting lots of data on source shard')
self._insert_lots(1000, base=1000)
logging.debug('Checking 80 percent of data was sent quickly')
self._check_lots_timeout(1000, 80, 5, base=1000)
# check we can't migrate the master just yet
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'master'],
expect_fail=True)
# check query service is off on master 2 and master 3, as filtered
# replication is enabled. Even health check that is enabled on
# master 3 should not interfere (we run it to be sure).
utils.run_vtctl(['RunHealthCheck', shard_3_master.tablet_alias, 'replica'],
auto_log=True)
for master in [shard_2_master, shard_3_master]:
utils.check_tablet_query_service(self, master, False, False)
stream_health = utils.run_vtctl_json(['VtTabletStreamHealth',
'-count', '1',
master.tablet_alias])
logging.debug('Got health: %s', str(stream_health))
self.assertIn('realtime_stats', stream_health)
self.assertNotIn('serving', stream_health)
# check the destination master 3 is healthy, even though its query
# service is not running (if not healthy this would exception out)
shard_3_master.get_healthz()
# now serve rdonly from the split shards, in test_nj only
utils.run_vtctl(['MigrateServedTypes', '--cells=test_nj',
'test_keyspace/80-', 'rdonly'], auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=keyspace_id_type)
utils.check_srv_keyspace('test_ny', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=keyspace_id_type)
utils.check_tablet_query_service(self, shard_0_ny_rdonly, True, False)
utils.check_tablet_query_service(self, shard_1_ny_rdonly, True, False)
utils.check_tablet_query_service(self, shard_1_rdonly1, False, True)
# now serve rdonly from the split shards, everywhere
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'rdonly'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=keyspace_id_type)
utils.check_srv_keyspace('test_ny', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=keyspace_id_type)
utils.check_tablet_query_service(self, shard_0_ny_rdonly, True, False)
utils.check_tablet_query_service(self, shard_1_ny_rdonly, False, True)
utils.check_tablet_query_service(self, shard_1_rdonly1, False, True)
# then serve replica from the split shards
destination_shards = ['test_keyspace/80-c0', 'test_keyspace/c0-']
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'replica'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-c0 c0-\n',
keyspace_id_type=keyspace_id_type)
utils.check_tablet_query_service(self, shard_1_slave2, False, True)
# move replica back and forth
utils.run_vtctl(
['MigrateServedTypes', '-reverse', 'test_keyspace/80-', 'replica'],
auto_log=True)
# After a backwards migration, queryservice should be enabled on
# source and disabled on destinations
utils.check_tablet_query_service(self, shard_1_slave2, True, False)
# Destination tablets would have query service disabled for other
# reasons than the migration, so check the shard record instead of
# the tablets directly.
utils.check_shard_query_services(self, destination_shards,
topodata_pb2.REPLICA, False)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=keyspace_id_type)
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'replica'],
auto_log=True)
# After a forwards migration, queryservice should be disabled on
# source and enabled on destinations
utils.check_tablet_query_service(self, shard_1_slave2, False, True)
# Destination tablets would have query service disabled for other
# reasons than the migration, so check the shard record instead of
# the tablets directly
utils.check_shard_query_services(self, destination_shards,
topodata_pb2.REPLICA, True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-c0 c0-\n',
keyspace_id_type=keyspace_id_type)
# reparent shard_2 to shard_2_replica1, then insert more data and
# see it flow through still
utils.run_vtctl(['PlannedReparentShard', 'test_keyspace/80-c0',
shard_2_replica1.tablet_alias])
# update our test variables to point at the new master
shard_2_master, shard_2_replica1 = shard_2_replica1, shard_2_master
logging.debug('Inserting lots of data on source shard after reparenting')
self._insert_lots(3000, base=2000)
logging.debug('Checking 80 percent of data was sent fairly quickly')
self._check_lots_timeout(3000, 80, 10, base=2000)
# use vtworker to compare the data again
logging.debug('Running vtworker SplitDiff')
utils.run_vtworker(['-cell', 'test_nj', 'SplitDiff', '--exclude_tables',
'unrelated', 'test_keyspace/c0-'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_3_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
# going to migrate the master now, check the delays
monitor_thread_1.done = True
monitor_thread_2.done = True
insert_thread_1.done = True
insert_thread_2.done = True
logging.debug('DELAY 1: %s max_lag=%d avg_lag=%d',
monitor_thread_1.object_name,
monitor_thread_1.max_lag,
monitor_thread_1.lag_sum / monitor_thread_1.sample_count)
logging.debug('DELAY 2: %s max_lag=%d avg_lag=%d',
monitor_thread_2.object_name,
monitor_thread_2.max_lag,
monitor_thread_2.lag_sum / monitor_thread_2.sample_count)
# mock with the SourceShard records to test 'vtctl SourceShardDelete'
# and 'vtctl SourceShardAdd'
utils.run_vtctl(['SourceShardDelete', 'test_keyspace/c0-', '0'],
auto_log=True)
utils.run_vtctl(['SourceShardAdd', '--key_range=80-',
'test_keyspace/c0-', '0', 'test_keyspace/80-'],
auto_log=True)
# then serve master from the split shards, make sure the source master's
# query service is now turned off
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'master'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-c0 c0-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-c0 c0-\n',
keyspace_id_type=keyspace_id_type)
utils.check_tablet_query_service(self, shard_1_master, False, True)
# check the binlog players are gone now
shard_2_master.wait_for_binlog_player_count(0)
shard_3_master.wait_for_binlog_player_count(0)
# get status for a destination master tablet, make sure it's good
shard_2_master_status = shard_2_master.get_status()
self.assertIn('No binlog player is running', shard_2_master_status)
self.assertIn('</html>', shard_2_master_status)
# delete the original tablets in the original shard
tablet.kill_tablets([shard_1_master, shard_1_slave1, shard_1_slave2,
shard_1_ny_rdonly, shard_1_rdonly1])
for t in [shard_1_slave1, shard_1_slave2, shard_1_ny_rdonly,
shard_1_rdonly1]:
utils.run_vtctl(['DeleteTablet', t.tablet_alias], auto_log=True)
utils.run_vtctl(['DeleteTablet', '-allow_master',
shard_1_master.tablet_alias], auto_log=True)
# rebuild the serving graph, all mentions of the old shards shoud be gone
utils.run_vtctl(
['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
# test RemoveShardCell
utils.run_vtctl(
['RemoveShardCell', 'test_keyspace/-80', 'test_nj'], auto_log=True,
expect_fail=True)
utils.run_vtctl(
['RemoveShardCell', 'test_keyspace/80-', 'test_nj'], auto_log=True)
utils.run_vtctl(
['RemoveShardCell', 'test_keyspace/80-', 'test_ny'], auto_log=True)
shard = utils.run_vtctl_json(['GetShard', 'test_keyspace/80-'])
self.assertNotIn('cells', shard)
# delete the original shard
utils.run_vtctl(['DeleteShard', 'test_keyspace/80-'], auto_log=True)
# kill everything
tablet.kill_tablets([shard_0_master, shard_0_replica, shard_0_ny_rdonly,
shard_2_master, shard_2_replica1, shard_2_replica2,
shard_3_master, shard_3_replica, shard_3_rdonly1])
if __name__ == '__main__':
utils.main()
| {
"content_hash": "6ec42b02a02c08a5bdc4ba5a56a39423",
"timestamp": "",
"source": "github",
"line_count": 857,
"max_line_length": 80,
"avg_line_length": 43.47024504084014,
"alnum_prop": 0.6091963279110968,
"repo_name": "guokeno0/vitess",
"id": "32d43b286a6e91ac21c930236d0f13ef53df2512",
"size": "37254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/resharding.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9588"
},
{
"name": "CSS",
"bytes": "202224"
},
{
"name": "Go",
"bytes": "4798380"
},
{
"name": "HTML",
"bytes": "46179"
},
{
"name": "Java",
"bytes": "203703"
},
{
"name": "JavaScript",
"bytes": "48391"
},
{
"name": "Liquid",
"bytes": "8009"
},
{
"name": "Makefile",
"bytes": "5070"
},
{
"name": "PHP",
"bytes": "932518"
},
{
"name": "Protocol Buffer",
"bytes": "75511"
},
{
"name": "Python",
"bytes": "764036"
},
{
"name": "Ruby",
"bytes": "466"
},
{
"name": "Shell",
"bytes": "34184"
},
{
"name": "Yacc",
"bytes": "20859"
}
],
"symlink_target": ""
} |
"""
WSGI config for proyecto_eees project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "proyecto_eees.settings")
application = get_wsgi_application()
| {
"content_hash": "8f3591fbb2b31db69f5196d31cb2aee0",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 25.25,
"alnum_prop": 0.7722772277227723,
"repo_name": "cgutierr3z/proyecto-eees",
"id": "5562cff12b94a05938520ecbd2597498089537cd",
"size": "404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "proyecto_eees/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12394"
},
{
"name": "HTML",
"bytes": "53032"
},
{
"name": "JavaScript",
"bytes": "82925"
},
{
"name": "Python",
"bytes": "40084"
}
],
"symlink_target": ""
} |
from flask import current_app
from manager_rest import constants
from manager_rest.storage import models
from manager_rest.security.authorization import authorize
from manager_rest.security import MissingPremiumFeatureResource
from manager_rest.manager_exceptions import (
BadParametersError,
MethodNotAllowedError,
)
from .. import rest_decorators, rest_utils
from ..responses_v3 import BaseResponse
try:
from cloudify_premium.multi_tenancy.responses import GroupResponse
from cloudify_premium.multi_tenancy.secured_tenant_resource \
import SecuredMultiTenancyResource
except ImportError:
GroupResponse = BaseResponse
SecuredMultiTenancyResource = MissingPremiumFeatureResource
class UserGroups(SecuredMultiTenancyResource):
@authorize('user_group_list')
@rest_decorators.marshal_with(GroupResponse)
@rest_decorators.create_filters(models.Group)
@rest_decorators.paginate
@rest_decorators.sortable(models.Group)
@rest_decorators.search('name')
def get(self, multi_tenancy, _include=None, filters=None, pagination=None,
sort=None, search=None, **kwargs):
"""
List groups
"""
return multi_tenancy.list_groups(
_include,
filters,
pagination,
sort,
search
)
@authorize('user_group_create')
@rest_decorators.marshal_with(GroupResponse)
def post(self, multi_tenancy):
"""
Create a group
"""
request_dict = rest_utils.get_json_and_verify_params()
group_name = request_dict['group_name']
ldap_group_dn = request_dict.get('ldap_group_dn')
role = request_dict.get('role', constants.DEFAULT_SYSTEM_ROLE)
rest_utils.verify_role(role, is_system_role=True)
rest_utils.validate_inputs({'group_name': group_name})
if group_name == 'users':
raise BadParametersError(
'{0!r} is not allowed as a user group name '
"because it wouldn't be possible to remove it later due to "
'a conflict with the remove {0} from user group endpoint'
.format(str(group_name))
)
return multi_tenancy.create_group(group_name, ldap_group_dn, role)
class UserGroupsId(SecuredMultiTenancyResource):
@authorize('user_group_update')
@rest_decorators.marshal_with(GroupResponse)
def post(self, group_name, multi_tenancy):
"""
Set role for a certain group
"""
request_dict = rest_utils.get_json_and_verify_params()
role_name = request_dict.get('role')
if not role_name:
raise BadParametersError('`role` not provided')
rest_utils.verify_role(role_name, is_system_role=True)
return multi_tenancy.set_group_role(group_name, role_name)
@authorize('user_group_get')
@rest_decorators.marshal_with(GroupResponse)
def get(self, group_name, multi_tenancy):
"""
Get info for a single group
"""
rest_utils.validate_inputs({'group_name': group_name})
return multi_tenancy.get_group(group_name)
@authorize('user_group_delete')
def delete(self, group_name, multi_tenancy):
"""
Delete a user group
"""
rest_utils.validate_inputs({'group_name': group_name})
multi_tenancy.delete_group(group_name)
return None, 204
class UserGroupsUsers(SecuredMultiTenancyResource):
@authorize('user_group_add_user')
@rest_decorators.marshal_with(GroupResponse)
@rest_decorators.no_external_authenticator('add user to group')
def put(self, multi_tenancy):
"""
Add a user to a group
"""
if current_app.external_auth \
and current_app.external_auth.configured():
raise MethodNotAllowedError(
'Explicit group to user association is not permitted when '
'using LDAP. Group association to users is done automatically'
' according to the groups associated with the user in LDAP.')
request_dict = rest_utils.get_json_and_verify_params({'username',
'group_name'})
rest_utils.validate_inputs(request_dict)
return multi_tenancy.add_user_to_group(
request_dict['username'],
request_dict['group_name']
)
@authorize('user_group_remove_user')
def delete(self, multi_tenancy):
"""
Remove a user from a group
"""
request_dict = rest_utils.get_json_and_verify_params({'username',
'group_name'})
multi_tenancy.remove_user_from_group(
request_dict['username'],
request_dict['group_name']
)
return None, 204
| {
"content_hash": "b7e98455df35920b6f0432b9a7ff79d9",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 78,
"avg_line_length": 36.63157894736842,
"alnum_prop": 0.6258210180623974,
"repo_name": "cloudify-cosmo/cloudify-manager",
"id": "3f2f0b4e6751b815cd7bf4e0a0a2113666770b46",
"size": "5510",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest-service/manager_rest/rest/resources_v3/user_groups.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "4067"
},
{
"name": "Dockerfile",
"bytes": "3843"
},
{
"name": "HTML",
"bytes": "320"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "PLpgSQL",
"bytes": "119062"
},
{
"name": "Python",
"bytes": "3825971"
},
{
"name": "Shell",
"bytes": "49121"
}
],
"symlink_target": ""
} |
"""Basic tests for gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import custom_gradient
from tensorflow.python.eager import tape
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
# Importing nn_grad for the registration functions.
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
@custom_gradient.custom_gradient
def two_outputs(a, b):
mm = math_ops.matmul(a, b)
r = math_ops.reduce_sum(mm)
def grad(dmm, dr):
return [
math_ops.matmul(dmm, b, transpose_b=True) +
math_ops.matmul(array_ops.ones_like(b * dr), b, transpose_b=True),
math_ops.matmul(a, dmm, transpose_b=True) +
math_ops.matmul(a, array_ops.ones_like(a) * dr, transpose_b=True)
]
return [mm, r], grad
@custom_gradient.custom_gradient
def gradient_is_constant(x):
result = x * x
def grad(dr):
return [dr]
return result, grad
class TapeTest(test.TestCase):
def testMultiOutput(self):
def fn(x, y):
c = x + y
# Multiple outputs from split.
d, f = array_ops.split(c, 2)
return d + f
a = constant_op.constant([[1., 0.], [0., 1.]])
b = constant_op.constant([[1., 2.], [3., 4.]])
da, db = backprop.gradients_function(fn, [0, 1])(a, b)
with context.graph_mode(), self.test_session():
tf_a = constant_op.constant([[1, 0], [0, 1]], dtype=dtypes.float32)
tf_b = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float32)
tf_c = tf_a + tf_b
tf_d, tf_f = array_ops.split(tf_c, 2, axis=1)
tf_e = tf_d + tf_f
tf_da, tf_db = gradients_impl.gradients(tf_e, [tf_a, tf_b])
self.assertAllEqual(da, tf_da.eval())
self.assertAllEqual(db, tf_db.eval())
def testBasicFunctional(self):
def forward(a, b):
mm = math_ops.matmul(a, b)
return math_ops.reduce_sum(mm)
aa = constant_op.constant([[1., 0.], [0., 1.]])
bb = constant_op.constant([[1., 2.], [3., 4.]])
da, = backprop.gradients_function(forward, ['a'])(aa, bb)
self.assertAllEqual(da,
math_ops.matmul(
array_ops.ones_like(aa),
array_ops.transpose(bb)).numpy())
def testBasicFunctionalPositionalArg(self):
def forward(a, b):
mm = math_ops.matmul(a, b)
return math_ops.reduce_sum(mm)
aa = constant_op.constant([[1., 0.], [0., 1.]])
bb = constant_op.constant([[1., 2.], [3., 4.]])
da, = backprop.gradients_function(forward, [0])(aa, bb)
self.assertAllEqual(da,
math_ops.matmul(
array_ops.ones_like(aa),
array_ops.transpose(bb)).numpy())
def testBasicFunctionalWithValue(self):
def forward(a, b):
mm = math_ops.matmul(a, b)
return math_ops.reduce_sum(mm)
aa = constant_op.constant([[1., 0.], [0., 1.]])
bb = constant_op.constant([[1., 2.], [3., 4.]])
val, (da,) = backprop.val_and_grad_function(forward, ['a'])(aa, bb)
self.assertAllEqual(da,
math_ops.matmul(
array_ops.ones_like(aa),
array_ops.transpose(bb)))
self.assertAllEqual(val, forward(aa, bb))
def testTwoOutputs(self):
def fn(x, y):
mm, r = two_outputs(x, y)
return r + math_ops.reduce_sum(mm)
a = constant_op.constant([[1., 0.], [0., 1.]])
b = constant_op.constant([[1., 2.], [3., 4.]])
da, db = backprop.gradients_function(fn, [0, 1])(a, b)
with context.graph_mode(), self.test_session():
tf_a = constant_op.constant([[1, 0], [0, 1]], dtype=dtypes.float32)
tf_b = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float32)
tf_mm = math_ops.matmul(tf_a, tf_b)
tf_rr = 2 * math_ops.reduce_sum(tf_mm)
tf_da, tf_db = gradients_impl.gradients(tf_rr, [tf_a, tf_b])
self.assertAllEqual(da, tf_da.eval())
self.assertAllEqual(db, tf_db.eval())
def testGcTwoOutputs(self):
def fn(x, y):
return nn_ops.sparse_softmax_cross_entropy_with_logits(logits=x,
labels=y)[0]
labels = constant_op.constant([0])
logits = constant_op.constant([[0.0]])
grad, = backprop.gradients_function(fn, [0])(logits, labels)
self.assertAllEqual(grad, [[0.0]])
def testTfTensor(self):
def fn(x):
return x
t = constant_op.constant(1.0)
g, = backprop.gradients_function(fn, [0])(t)
self.assertAllEqual(g, 1.0)
def testTapeGC(self):
# TODO(apassos) figure out how to test this without using tape internal
# APIs.
tape.push_new_tape()
def f():
x = constant_op.constant(1.0)
tape.watch(x)
x = gradient_is_constant(x)
x = gradient_is_constant(x)
x = gradient_is_constant(x)
f()
t = tape.pop_tape()
tensor_tape, op_tape = t.export()
self.assertEqual(len(tensor_tape), 1) # The watched tensor will remain on
# the tape
self.assertEqual(len(op_tape), 0) # No operations should remain on the tape
def testCustomGradientGraphMode(self):
with context.graph_mode(), self.test_session():
@custom_gradient.custom_gradient
def f(x):
def grad(dresult):
return dresult * 10.0
return x, grad
inp = constant_op.constant(1.0)
grad = gradients_impl.gradients(f(inp), inp)
self.assertAllEqual(grad[0].eval(), 10.0)
if __name__ == '__main__':
test.main()
| {
"content_hash": "b7a0e3ed1a39f73e0a5013acb2c1a8ac",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 80,
"avg_line_length": 31.141361256544503,
"alnum_prop": 0.5946536650975117,
"repo_name": "ychfan/tensorflow",
"id": "c97cb62125741ccdec495d925651a3559bd5fb9c",
"size": "6637",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/python/eager/tape_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8458"
},
{
"name": "C",
"bytes": "206941"
},
{
"name": "C++",
"bytes": "31053371"
},
{
"name": "CMake",
"bytes": "650214"
},
{
"name": "Go",
"bytes": "1001136"
},
{
"name": "Java",
"bytes": "441709"
},
{
"name": "Jupyter Notebook",
"bytes": "1940755"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "38533"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6186"
},
{
"name": "Perl 6",
"bytes": "1360"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "28819616"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "382741"
}
],
"symlink_target": ""
} |
import socket
from multiprocessing.pool import ThreadPool
from time import sleep
import six
from kombu import Connection, Consumer, Queue
from kombu.exceptions import MessageStateError
from kombu.utils import nested
from microservices.helpers.logs import InstanceLogger
from microservices.utils import get_logger
_logger = get_logger(__name__)
class HandlerError(Exception):
pass
class DeferredMessage(object):
_methods_for_callbacks = {
'ack', 'reject', 'requeue', 'reject_log_error',
'ack_log_error',
}
def __init__(self, message, deferred_callbacks):
self.message = message
self.deferred_callbacks = deferred_callbacks
@property
def with_deferred_callbacks(self):
return self.deferred_callbacks is not None
def __getattr__(self, item):
entity = getattr(self.message, item)
if self.with_deferred_callbacks:
if item in self._methods_for_callbacks:
return lambda *args, **kwargs: self.deferred_callbacks.append(
lambda: entity(*args, **kwargs)
)
else:
return entity
else:
return entity
@six.python_2_unicode_compatible
class Rule(object):
"""Rule"""
def __init__(self, name, handler, logger, autoack=True,
deferred_callbacks=None, pool=None,
**options):
"""Initialization
:param name: name of queue
:param handler: handle for queue
:param autoack: if true, call message.ack()
"""
self.handler = handler
self.name = name
self.options = options
self.autoack = autoack
self.logger = InstanceLogger(self, logger)
self._name = '<queue: {}>'.format(self.name)
self.deferred_callbacks = deferred_callbacks
self.pool = pool
def __str__(self):
return self._name
@property
def with_deferred_callbacks(self):
return self.deferred_callbacks is not None
def add_to_pool(self, handler):
self.pool.apply_async(handler)
def callback(self, body, message):
message = DeferredMessage(message, self.deferred_callbacks)
self.logger.debug('Data (len: %s) received', len(body))
def autoack():
try:
self.logger.debug('Ack message via autoack')
message.ack()
except ConnectionError as e: # pragma: no cover
self.logger.error('Connection error: %s when try message.ack',
e.strerror)
except MessageStateError:
self.logger.warning(
'ACK() was called in handler?')
def handler():
try:
self.logger.debug('Call handler...')
self.handler(body, HandlerContext(message, self))
except Exception:
self.logger.exception('Something happened in user handler')
raise HandlerError('Something happened in user handler')
if self.autoack:
autoack()
if self.with_deferred_callbacks:
self.logger.debug('Add handler to pool')
self.add_to_pool(handler)
else:
handler()
class HandlerContext(object):
"""Context for handler function"""
def __init__(self, message, rule):
"""Initialization
:param message: original message from kombu
:type message: kombu.Message
:param rule: rule object
:type rule: Rule
"""
self.message = message
self.rule = rule
@six.python_2_unicode_compatible
class Microservice(object):
"""Microservice for queues"""
connection = 'amqp:///'
def __init__(self, connection='amqp:///', logger=None, timeout=1, name=None,
workers=None, pool_factory=ThreadPool, reconnect_timeout=1,
reconnect_enable=True, workers_override_prefetch=True,
immediate_connect=True):
"""Initialization
:type pool_factory: callable object, pool should has property size
:param pool_factory: for pool will by configurated as pool_factory(workers)
:type workers: int
:param workers: count of workers in pool
:param connection: connection for queues broker
:type connection: str, None, dict or Connection
:param logger: logging instance
:type logger: Logger
:param timeout: sleeping for loop, default = 0.1
:type timeout: None, int or float
"""
if logger is None:
logger = _logger
self.logger = InstanceLogger(self, logger)
self.connection = self._get_connection(connection)
self.timeout = timeout
self.consumers = []
self.reconnect_timeout = reconnect_timeout
self.reconnect_enable = reconnect_enable
self.workers_override_prefetch = workers_override_prefetch
if name is None:
try:
name = '<microservice: {}>'.format(self.connection.as_uri())
except: # pragma no cover
name = '<microservice: {}>'.format(
self.connection.transport_cls) # pragma: no cover
self.name = name
self._stop = False
self._stopped = False
self.pool = None
self.workers = workers
self.deferred_callbacks = None
if workers:
self.deferred_callbacks = []
self.pool = pool_factory(workers)
if immediate_connect:
self.connect()
def __str__(self):
return self.name
@property
def with_pool(self):
return self.pool is not None
def _get_connection(self, connection):
"""Create connection strategy
:param connection: connection for broker
:type connection: str, None, kombu.connections.Connection, dict
:return: instance of kombu.connections.Connection
:rtype: Connection
"""
if not connection:
connection = self.connection # pragma: no cover
if isinstance(connection, str):
connection = {'hostname': connection}
if isinstance(connection, dict):
connection = Connection(**connection)
return connection
def add_queue_rule(self, handler, name, autoack=True, prefetch_size=0,
prefetch_count=0, **kwargs):
"""Add queue rule to Microservice
:param prefetch_count: count of messages for getting from mq
:param prefetch_size: size in bytes for getting data from mq
:param handler: function for handling messages
:param autoack: if True message.ack() after callback
:type handler: callable object
:param name: name of queue
:type name: str
"""
if self.with_pool:
if self.workers_override_prefetch:
prefetch_count = self.workers
rule = Rule(name, handler, self.logger, autoack=autoack,
deferred_callbacks=self.deferred_callbacks,
pool=self.pool, **kwargs)
else:
rule = Rule(name, handler, self.logger, autoack=autoack, **kwargs)
self.connect()
consumer = Consumer(self.connection, queues=[Queue(rule.name)],
callbacks=[rule.callback], auto_declare=True)
consumer.qos(prefetch_count=prefetch_count, prefetch_size=prefetch_size)
self.consumers.append(consumer)
self.logger.debug('Rule "%s" added!', rule.name)
def _start(self):
self._stopped = False
self._stop = False
self.connect()
def stop(self):
self._stop = True
self.logger.info('Try to stop microservice draining events')
def queue(self, name, autoack=True, prefetch_size=0, prefetch_count=0,
**kwargs):
"""Decorator for handler function
>>>app = Microservice()
>>>
>>>@app.queue('queue')
>>>def function(payload, context):
>>> pass
:param prefetch_count: count of messages for getting from mq
:param prefetch_size: size in bytes for getting data from mq
:param autoack: if True message.ack() after callback
:param name: name of queue
:type name: str
"""
def decorator(f):
self.add_queue_rule(f, name, autoack=autoack,
prefetch_size=prefetch_size,
prefetch_count=prefetch_count,
**kwargs)
return f
return decorator
def connect(self): # pragma no cover
"""Try connect to mq"""
while not self._stop and not self.connection.connected:
try:
self.connection.connect()
self.logger.info('Connected to mq broker')
break
except ConnectionError as e: # pragma: no cover
if self.reconnect_enable:
self.logger.error(
'Connection error, cause: %s. Reconnecting...',
e.strerror
)
else:
self.stop()
break
except Exception: # pragma: no cover
self.logger.exception(
'Error when try to connect') # pragma: no cover
sleep(self.reconnect_timeout)
def revive(self): # pragma no cover
def _revive():
for i, consumer in enumerate(self.consumers):
self.logger.debug('Try revive consumer: %s', i)
consumer.channel = self.connection
consumer.revive(consumer.channel)
self.logger.debug('Consumer: %s was revived', i)
while not self._stop:
try:
_revive()
break
except ConnectionError: # pragma: no cover
if self.reconnect_enable:
self.connect()
else:
self.stop()
break
except Exception: # pragma: no cover
self.logger.exception(
'Error when try to revive') # pragma: no cover
sleep(self.reconnect_timeout)
self.logger.debug('All consumers %s was revived...', len(self.consumers))
@property
def stopped(self):
return self._stopped
def drain_results(self):
while self.deferred_callbacks:
callback = self.deferred_callbacks.pop()
try:
callback()
self.logger.debug('Called callback. All: %s',
len(self.deferred_callbacks))
except ConnectionError as e: # pragma: no cover
self.logger.error(
'Connection error when try callback: %s. Cause: %s. '
'Message will be handled on next iteration',
callback, e.strerror
)
except Exception: # pragma no cover
self.logger.exception(
'Unknown exception when try callback: %s', callback
)
def drain_events(self, infinity=True):
with nested(*self.consumers):
while not self._stop:
try:
self.connection.drain_events(timeout=self.timeout)
except socket.timeout:
if not infinity:
break
except ConnectionError as e: # pragma no cover
self.logger.error(
'Connection to mq has broken off because: %s. Try to reconnect, %s',
e)
self.connect()
self.revive()
break
except HandlerError:
self.logger.exception('Handler error')
except Exception as e: # pragma no cover
if not self._stop:
self.logger.exception(
'Something wrong! Try to restart the loop')
self.revive()
break
else: # pragma: no cover
self.logger.exception(
'Something wrong! And stopping...')
break
if self.with_pool:
try:
self.drain_results()
except Exception: # pragma no cover
self.logger.exception('Unknown error when '
'draining results')
if self._stop:
if self.with_pool:
try:
self.pool.join()
self.drain_results() # pragma: no cover
except AssertionError:
pass
except Exception: # pragma: no cover
self.logger.exception(
'Unknown error when '
'draining results'
)
self._stopped = True
self.logger.info('Stopped draining events.')
def run(self, debug=False):
"""Run microservice in loop, where handle connections
:param debug: enable/disable debug mode
:type debug: bool
"""
if debug:
from microservices.utils import set_logging
set_logging('DEBUG')
def _run():
self._start()
self.drain_events(infinity=True)
while not self._stopped:
_run()
def read(self, count=1):
for x in range(count):
self.drain_events(infinity=False)
| {
"content_hash": "721aa05e90d8f54a376b21c0528198af",
"timestamp": "",
"source": "github",
"line_count": 404,
"max_line_length": 92,
"avg_line_length": 34.31683168316832,
"alnum_prop": 0.5401759953837276,
"repo_name": "viatoriche/microservices",
"id": "2f72b02463230e304247fd07ade57af06a258a1a",
"size": "13864",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "microservices/queues/service.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1860"
},
{
"name": "Dockerfile",
"bytes": "67"
},
{
"name": "HTML",
"bytes": "9826"
},
{
"name": "JavaScript",
"bytes": "1580"
},
{
"name": "Python",
"bytes": "80727"
},
{
"name": "Shell",
"bytes": "398"
}
],
"symlink_target": ""
} |
"""
Creates all the various image sizes and file structure
for a cordova project.
`python generate.py`
"""
import os
import sys
import logging
import subprocess
import optparse
from config import PLATFORMS
class Converter(object):
"""
Takes an icon image and splash image and generates a folder that includes
all of the various filenames and resized images necessary for a
multi-platform cordova build.
Edit the platform configuration in config.py to adjust which files
are created for each platform.
TODO:
- support more control over imagemagick settings
- portrait vs landscape (include in naming schemes)
- generate config.xml
"""
def __init__(self, platform_config):
self.platform_config = platform_config
self.parse_command_line()
self.verify_dependencies()
logging.debug("initialized converter")
def verify_dependencies(self):
# http://stackoverflow.com/a/11270665
try:
from subprocess import DEVNULL # py3k
except ImportError:
DEVNULL = open(os.devnull, 'wb')
# try to run convert command - if it fails, tell user and bail
if subprocess.call(
'convert -version',
stdout=DEVNULL,
stderr=DEVNULL,
shell=True):
logging.error("could not find ImageMagick " +
"`convert` method. Please install " +
"ImageMagick and/or add it to the path");
sys.exit(1)
def parse_command_line(self):
parser = optparse.OptionParser()
parser.add_option('-v',
dest="verbose",
action="store_true",
default=False,
help="log all the things"
)
parser.add_option('--icon',
dest="icon",
action="store",
default="icon.png",
help="relative path to your icon image"
)
parser.add_option('--splash',
action="store",
dest="splash",
default="splash.png",
help="relative path to your splash image"
)
parser.add_option('--destination',
action="store",
dest="destination",
default=os.path.join('www','res'),
help="relative path where you want the output created"
)
self.settings, args = parser.parse_args(sys.argv)
# set logging verbosity from command line arguments
level = logging.DEBUG if self.settings.verbose else logging.INFO
logging.basicConfig(level=level)
def generate(self):
logging.info("Generating Icons and Splash Screens!")
for platform, settings in self.platform_config.iteritems():
self.generate_platform(platform, settings)
def generate_platform(self, platform_name, settings):
logging.info("processing %s" % platform_name)
self.prep_platform(platform_name)
if 'icon' in settings:
self.generate_platform_icons(platform_name, settings['icon'])
if 'splash' in settings:
self.generate_platform_splashes(platform_name, settings['splash'])
def prep_platform(self, platform_name):
"""Ensure folder is available for platform"""
icon_path = self.get_icon_path(platform_name)
logging.debug("- creating icon path %s if necessary" % icon_path)
try: os.makedirs(icon_path)
except OSError: pass
splash_path = self.get_splash_path(platform_name)
logging.debug("- creating splash path %s if necessary" % splash_path)
try: os.makedirs(splash_path)
except OSError: pass
def get_icon_path(self, platform_name):
return os.path.abspath(
os.path.join(
self.settings.destination,
'icon',
platform_name
)
)
def get_splash_path(self, platform_name):
return os.path.join(
self.settings.destination,
'splash',
platform_name
)
def generate_platform_icons(self, platform_name, icon_settings):
logging.debug("- creating icons")
for icon_type in icon_settings:
for size_config in icon_type['sizes']:
# parse size config into standard
size, dpi_level = self._parse_icon_size_config(size_config)
halfsize = int(size / 2)
# create destination string from filename pattern
filename = icon_type['filename'].format(
size=size,
halfsize=halfsize,
dpi_level=dpi_level
)
destination = os.path.join(
self.get_icon_path(platform_name),
filename
)
# if background is specified send it
background = None
if 'background' in icon_type:
background = icon_type['background']
# resize icon and put it where it belongs
self.resize(
self.settings.icon, destination, size, size, background
)
def _parse_icon_size_config(self, size_config):
dpi_level = 'default'
if type(size_config) == type(0):
size = size_config
elif len(size_config) > 1:
size, dpi_level = size_config
return size, dpi_level
def generate_platform_splashes(self, platform_name, splash_settings):
logging.debug("- creating splash screens")
for splash_type in splash_settings:
for size_config in splash_type['sizes']:
# parse size config into standard
width, height, dpi_level = self._parse_splash_size_config(size_config)
halfwidth = int(width / 2)
halfheight = int(height / 2)
# create destination string from filename pattern
filename = splash_type['filename'].format(
width=width,
height=height,
halfwidth=halfwidth,
halfheight=halfheight,
dpi_level=dpi_level
)
destination = os.path.join(
self.get_splash_path(platform_name),
filename
)
# if background is specified send it
background = None
if 'background' in splash_type:
background = splash_type['background']
# resize spalsh and put it where it belongs
self.resize(
self.settings.splash, destination, width, height, background
)
def _parse_splash_size_config(self, size_config):
dpi_level = 'default'
if type(size_config) == type(0):
width = height = size_config
else:
if len(size_config) == 1:
width = height = size_config[0]
elif len(size_config) == 2:
width, height = size_config
elif len(size_config) == 3:
width, height, dpi_level = size_config
return width, height, dpi_level
def resize(self, source, destination, width, height, background=None):
logging.debug("- - Creating %s (%d, %d)" % (destination, width, height))
# TODO: support other conversion types if desired (PIL?)
self._resize_imagemagick(source, destination, width, height, background)
def _resize_imagemagick(self, source, destination, width, height, background=None):
# use imagemagick's convert method
raw_command = 'convert -background {background} "{source}" -resize {bigger}x{bigger} -gravity center -extent {width}x{height} "{destination}"'
command = raw_command.format(
source=source,
destination=destination,
width=width,
height=height,
bigger=max(width, height),
background=background or 'none'
)
logging.debug(command)
subprocess.call(command, shell=True)
if __name__ == '__main__':
converter = Converter(PLATFORMS)
converter.generate()
| {
"content_hash": "f595e6950265ca97e8f707084944bbe1",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 150,
"avg_line_length": 35.17573221757322,
"alnum_prop": 0.5582252884501011,
"repo_name": "collingreen/cordova-icon-splash-generator",
"id": "873afd47531098941db617e9a0b4c62ce9238921",
"size": "8407",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "generate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13049"
}
],
"symlink_target": ""
} |
import unittest
from .simple import Base
class CrashTest(Base):
config = 'test/crash1.yaml'
def testBadRequest(self):
conn = self.http()
conn.request('GET', '/')
resp = conn.getresponse()
self.assertTrue(b'Not Found' in resp.read())
conn.close()
def testGoodRequest(self):
conn = self.http('hello')
conn.request('GET', '/')
resp = conn.getresponse()
self.assertTrue(b'Not Found' not in resp.read())
conn.close()
class CrashTest2(Base):
config = 'test/crash2.yaml'
def testBadRequest(self):
conn = self.http()
conn.request('GET', '/test?something', headers={
'Host': 'example.com',
'Upgrade': 'websocket',
'Connection': 'Upgrade',
'Sec-WebSocket-Key': 'dGhlIHNhbXBsZSBub25jZQ==',
'Sec-WebSocket-Origin': 'http://example.com',
'Sec-WebSocket-Version': '13',
})
resp = conn.getresponse()
assert resp.code == 101, resp.code
conn.close()
def testGoodRequest(self):
conn = self.http('hello')
conn.request('GET', '/')
resp = conn.getresponse()
self.assertTrue(b'Not Found' in resp.read())
conn.close()
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "966b8ce5c6e3416219e676e426c8c09c",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 60,
"avg_line_length": 26.93877551020408,
"alnum_prop": 0.5522727272727272,
"repo_name": "tailhook/zerogw",
"id": "cedc9dc3d933b1551f7cf7a65a4aaeb6f7cc2c95",
"size": "1320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/crash.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "153900"
},
{
"name": "C++",
"bytes": "457"
},
{
"name": "CSS",
"bytes": "25938"
},
{
"name": "JavaScript",
"bytes": "10600"
},
{
"name": "Makefile",
"bytes": "4590"
},
{
"name": "Perl",
"bytes": "1683"
},
{
"name": "Python",
"bytes": "75596"
},
{
"name": "Shell",
"bytes": "6728"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('spanish_content', '0010_auto_20170304_1848'),
]
operations = [
migrations.AddField(
model_name='eduprogram',
name='video',
field=models.BooleanField(default=False),
),
]
| {
"content_hash": "5541f9b04d7b526c079fdc8055eb951e",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 55,
"avg_line_length": 21.666666666666668,
"alnum_prop": 0.6025641025641025,
"repo_name": "DjangoGirlsSeoul/lightandleadership",
"id": "7950c0eeb49041ab8b71decbeb240076f498a55a",
"size": "463",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "spanish_content/migrations/0011_eduprogram_video.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13028"
},
{
"name": "HTML",
"bytes": "62184"
},
{
"name": "Python",
"bytes": "110989"
}
],
"symlink_target": ""
} |
import logging
from django.conf import settings
from django.test import TestCase
from django.test.client import Client
log = logging.getLogger(__name__)
class URLTests(TestCase):
def setUp(self):
self.client = Client()
self.client.login(
username=settings.TEST_USERNAME,
password=settings.TEST_PASSWORD)
def test_urls(self):
urls = [
"/",
"/contract/",
"/contract/addendum/",
"/contract/addendum/add/",
"/contract/contract/",
"/contract/contract/add/",
"/contract/contract/48/",
"/contract/document/",
"/contract/document/add/",
"/contract/document/2/",
"/contract/ticket/",
"/contract/ticket/add/",
"/contract/print/sample/2/",
"/contract/print/contract/223/"]
for url in urls:
response = self.client.get(url)
log.info("%s, %s" % (response.status_code, url))
self.assertEqual(response.status_code, 200)
| {
"content_hash": "0dd874487dc6140a63170360bf5e99c0",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 60,
"avg_line_length": 29.916666666666668,
"alnum_prop": 0.5459610027855153,
"repo_name": "MattAgile/django-spa-crm",
"id": "5b7ec670d7fac97286d064891606262d9b3f957c",
"size": "1077",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crm/contract/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "2294"
},
{
"name": "CSS",
"bytes": "1977"
},
{
"name": "HTML",
"bytes": "46944"
},
{
"name": "JavaScript",
"bytes": "14998"
},
{
"name": "Nginx",
"bytes": "1255"
},
{
"name": "Puppet",
"bytes": "1990"
},
{
"name": "Python",
"bytes": "93308"
},
{
"name": "Shell",
"bytes": "2809"
}
],
"symlink_target": ""
} |
first = input('write the price of the first product: R$ ')
first = int(first)
second = input('write de price of the second product: R$ ')
second = int(second)
third = input('write the price of the third product: R$ ')
third = int(third)
if first < second < third or first < third < second and first:
print('the first is the cheapest product, so buy it')
elif second < first < third or second < thrid < first:
print('the second is the cheapest product, so buy it')
elif third < first < second or third < second < first:
print('The third is the cheapest product, so buy it')
else:
print('all products have the same price')
| {
"content_hash": "13a37e35de657ff6c1e1ab63a57d7697",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 63,
"avg_line_length": 34.8421052631579,
"alnum_prop": 0.6676737160120846,
"repo_name": "jucimarjr/IPC_2017-1",
"id": "debacba9b1a87f9cc747c9d94fd1023b2577af73",
"size": "998",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lista04/lista04_lista01_questao08.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "2978"
},
{
"name": "Python",
"bytes": "525677"
}
],
"symlink_target": ""
} |
""" Example file submission script
Requires the `aws` command line utility: http://aws.amazon.com/cli/
"""
import hashlib
import json
import os
import requests
import subprocess
import sys
import time
host = 'REPLACEME/'
encoded_access_key = 'UISQC32B'
encoded_secret_access_key = 'ikc2wbs27minvwo4'
path = 'test.fastq.gz'
my_lab = '/labs/thomas-gingeras/'
my_award = '/awards/U54HG004557/'
# From http://hgwdev.cse.ucsc.edu/~galt/encode3/validatePackage/validateEncode3-latest.tgz
encValData = 'encValData'
assembly = 'hg19'
# ~2s/GB
print("Calculating md5sum.")
md5sum = hashlib.md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(1024*1024), b''):
md5sum.update(chunk)
data = {
"dataset": "TSTSR771968",
"file_format": "fastq",
"file_size": os.path.getsize(path),
"md5sum": md5sum.hexdigest(),
"output_type": "reads",
"submitted_file_name": path,
"lab": my_lab,
"award": my_award,
"replicate": "bed9349d-d6ad-45aa-b9bb-8351059c1167"
}
####################
# Local validation
gzip_types = [
"CEL",
"bam",
"bed",
"bed_bed3",
"bed_bed6",
"bed_bedLogR",
"bed_bedMethyl",
"bed_bedRnaElements",
"bed_broadPeak",
"bed_gappedPeak",
"bed_narrowPeak",
"bed_peptideMapping",
"csfasta",
"csqual",
"fasta",
"fastq",
"gff",
"gtf",
"tar",
"sam",
"wig"
]
magic_number = open(path, 'rb').read(2)
is_gzipped = magic_number == b'\x1f\x8b'
if data['file_format'] in gzip_types:
assert is_gzipped, 'Expected gzipped file'
else:
assert not is_gzipped, 'Expected un-gzipped file'
chromInfo = '-chromInfo=%s/%s/chrom.sizes' % (encValData, assembly)
validate_map = {
'bam': ['-type=bam', chromInfo],
'bed': ['-type=bed6+', chromInfo], # if this fails we will drop to bed3+
'bedLogR': ['-type=bigBed9+1', chromInfo, '-as=%s/as/bedLogR.as' % encValData],
'bed_bedLogR': ['-type=bed9+1', chromInfo, '-as=%s/as/bedLogR.as' % encValData],
'bedMethyl': ['-type=bigBed9+2', chromInfo, '-as=%s/as/bedMethyl.as' % encValData],
'bed_bedMethyl': ['-type=bed9+2', chromInfo, '-as=%s/as/bedMethyl.as' % encValData],
'bigBed': ['-type=bigBed6+', chromInfo], # if this fails we will drop to bigBed3+
'bigWig': ['-type=bigWig', chromInfo],
'broadPeak': ['-type=bigBed6+3', chromInfo, '-as=%s/as/broadPeak.as' % encValData],
'bed_broadPeak': ['-type=bed6+3', chromInfo, '-as=%s/as/broadPeak.as' % encValData],
'fasta': ['-type=fasta'],
'fastq': ['-type=fastq'],
'gappedPeak': ['-type=bigBed12+3', chromInfo, '-as=%s/as/gappedPeak.as' % encValData],
'bed_gappedPeak': ['-type=bed12+3', chromInfo, '-as=%s/as/gappedPeak.as' % encValData],
'gtf': None,
'idat': ['-type=idat'],
'narrowPeak': ['-type=bigBed6+4', chromInfo, '-as=%s/as/narrowPeak.as' % encValData],
'bed_narrowPeak': ['-type=bed6+4', chromInfo, '-as=%s/as/narrowPeak.as' % encValData],
'rcc': ['-type=rcc'],
'tar': None,
'tsv': None,
'2bit': None,
'csfasta': ['-type=csfasta'],
'csqual': ['-type=csqual'],
'bedRnaElements': ['-type=bed6+3', chromInfo, '-as=%s/as/bedRnaElements.as' % encValData],
'CEL': None,
'sam': None,
'wig': None,
'hdf5': None,
'gff': None
}
validate_args = validate_map.get(data['file_format'])
if validate_args is not None:
print("Validating file.")
try:
subprocess.check_output(['validateFiles'] + validate_args + [path])
except subprocess.CalledProcessError as e:
print(e.output)
raise
####################
# POST metadata
headers = {
'Content-type': 'application/json',
'Accept': 'application/json',
}
print("Submitting metadata.")
r = requests.post(
host + '/file',
auth=(encoded_access_key, encoded_secret_access_key),
data=json.dumps(data),
headers=headers,
)
try:
r.raise_for_status()
except:
print('Submission failed: %s %s' % (r.status_code, r.reason))
print(r.text)
raise
item = r.json()['@graph'][0]
print(json.dumps(item, indent=4, sort_keys=True))
####################
# POST file to S3
creds = item['upload_credentials']
env = os.environ.copy()
env.update({
'AWS_ACCESS_KEY_ID': creds['access_key'],
'AWS_SECRET_ACCESS_KEY': creds['secret_key'],
'AWS_SECURITY_TOKEN': creds['session_token'],
})
# ~10s/GB from Stanford - AWS Oregon
# ~12-15s/GB from AWS Ireland - AWS Oregon
print("Uploading file.")
start = time.time()
try:
subprocess.check_call(['aws', 's3', 'cp', path, creds['upload_url']], env=env)
except subprocess.CalledProcessError as e:
# The aws command returns a non-zero exit code on error.
print("Upload failed with exit code %d" % e.returncode)
sys.exit(e.returncode)
else:
end = time.time()
duration = end - start
print("Uploaded in %.2f seconds" % duration)
| {
"content_hash": "b27c803f3ecfd84a7556ba142bb366b4",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 94,
"avg_line_length": 28.186046511627907,
"alnum_prop": 0.6206683168316832,
"repo_name": "ENCODE-DCC/pyencoded-tools",
"id": "888ac405b5f9e5bce873bb9b0aa05a3f0c2e9560",
"size": "4848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "permissions_qa_scripts/originals/submit_file.E2.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "AngelScript",
"bytes": "9064"
},
{
"name": "Batchfile",
"bytes": "1411"
},
{
"name": "Jupyter Notebook",
"bytes": "19265564"
},
{
"name": "Python",
"bytes": "939689"
},
{
"name": "Shell",
"bytes": "5829"
}
],
"symlink_target": ""
} |
from dns.resolver import Resolver
import socket
import ipaddress
import os
DEBUG = os.environ.get("EEHDEBUG_SPF")
__version__ = "0.0.1"
PASS = "pass"
FAIL = "fail"
SOFTFAIL = "softfail"
NEUTRAL = "neutral"
PERMERROR = "permerror"
TEMPERROR = "temperror"
NONE = "none"
SMTP_PERM_FAIL = 550
statusmap = {
PASS: (250, "sender SPF authorized"),
FAIL: (SMTP_PERM_FAIL, "SPF fail - not authorized"),
NEUTRAL: (250, "access neither permitted nor denied"),
SOFTFAIL: (250, "domain owner discourages use of this host"),
PERMERROR: (450, "permanent error"),
TEMPERROR: (450, "temporary error"),
NONE: (250, "equivocal SPF header"),
}
def _status(code):
return (code, ) + statusmap[code]
def handle_all(arg, domain):
return lambda c: True
def handle_ip(arg, domain):
def validate(c):
try:
c = socket.gethostbyname(c)
except:
return False
if "/" in arg:
return ipaddress.ip_address(c) in ipaddress.ip_network(
arg, False)
else:
return c == arg
return validate
def handle_a(arg, domain):
if "/" in arg:
arg, length = arg.split("/")
network = True
else:
network = False
if not arg:
arg = domain
ip = socket.gethostbyname(arg)
if network:
return handle_ip("/".join([ip, length]), domain)
else:
return handle_ip(ip, domain)
def handle_mx(arg, domain):
if "/" in arg:
arg, length = arg.split("/")
network = True
else:
network = False
if not arg:
arg = domain
a = Resolver().query(arg, "MX")
ips = map(socket.gethostbyname,
map(lambda c: c.exchange.to_text(True), a))
if network:
def validate(c):
c = ipaddress.ip_address(socket.gethostbyname(c))
o = False
for i in ips:
o |= c in ipaddress.ip_network(i+"/"+length, False)
return o
return validate
else:
return lambda c: socket.gethostbyname(c) in ips
def handle_ptr(arg, domain):
if not arg:
arg = domain
def validate(c):
try:
name, aliases, ip = socket.gethostbyaddr(c)
except OSError:
return False
hostnames = [name] + aliases
for hostname in hostnames:
try:
res = socket.gethostbyname(hostname)
except:
continue
else:
if hostname.endswith(arg) and n == ip:
return True
else:
return False
return validate
def handle_include(arg, domain):
return lambda c: spf(arg, c)[1] != SMTP_PERM_FAIL
def handle_exp(arg, domain):
return lambda c: False
def handle_exists(arg, domain):
def validate(c):
try:
socket.gethostbyname(c)
except:
return False
else:
return True
return validate
MECHANISMS = {
"all": handle_all,
"ip4": handle_ip,
"ip6": handle_ip,
"a": handle_a,
"mx": handle_mx,
"ptr": handle_ptr,
"include": handle_include,
"exists": handle_exists,
"exp": handle_exp,
}
def spf(domain, greeting):
r = Resolver()
answers = r.query(domain, "TXT")
for answer in answers:
if DEBUG:
print(answer.strings[0])
if answer.strings[0].startswith(b"v=spf"):
policy = answer.strings[0]
break
else:
return _status(NEUTRAL)
spfp = policy.decode().lower().split(" ")[1:]
for action in spfp:
if action.startswith("+"):
action = action[1:]
verb = PASS
elif action.startswith("-"):
action = action[1:]
verb = FAIL
elif action.startswith("~"):
action = action[1:]
verb = SOFTFAIL
elif action.startswith("?"):
action = action[1:]
verb = NEUTRAL
else:
verb = PASS
if DEBUG:
print(action)
if ":" in action:
action, _, param = action.partition(":")
elif "=" in action:
action, _, param = action.partition("=")
else:
param = ""
if DEBUG:
print(param)
if action == "redirect":
return spf(param, greeting)
elif action not in MECHANISMS:
_status(PERMERROR)
else:
if DEBUG:
print(verb, action, param, MECHANISMS[action](param, domain)(greeting))
if MECHANISMS[action](param, domain)(greeting):
return _status(verb)
else:
return _status(NONE)
| {
"content_hash": "4b7e7d0fb434f64023349a36f55e4d73",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 87,
"avg_line_length": 22.31132075471698,
"alnum_prop": 0.5317124735729387,
"repo_name": "Varbin/EEH",
"id": "be265ac2319e4c2406a07ec4162963ae837401f4",
"size": "4730",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/EEHlib/spam/SPF.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "497"
},
{
"name": "Python",
"bytes": "75615"
},
{
"name": "Shell",
"bytes": "532"
}
],
"symlink_target": ""
} |
def main():
sum = 0.0
count = 0
print ("Press [Enter] to exit.")
temp = input("What's the temperature? ")
while temp != "":
sum += float(temp)
count += 1
temp = input("What's the temperature? ")
print("The average temperature is:", sum / count)
if __name__ == '__main__':
print ("Get the average temperature.")
main()
| {
"content_hash": "4c2aaf284b0bae99a1b9a91e1e02da12",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 53,
"avg_line_length": 21.055555555555557,
"alnum_prop": 0.5303430079155673,
"repo_name": "SeattleCentral/ITC110",
"id": "6f6607c5a692f9a3c7362078465c6f237016bd87",
"size": "379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/lecture14d.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63718"
}
],
"symlink_target": ""
} |
'''
Copyright 2014-2015 Teppo Perä
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import inspect
import itertools
from pytraits.core.errors import TypeConversionError
__all__ = ["type_safe", "type_converted"]
class ErrorMessage:
"""
Encapsulates building of error message.
"""
def __init__(self, main_msg, repeat_msg, get_func_name):
self.__errors = []
self.__get_func_name = get_func_name
self.__main_msg = main_msg
self.__repeat_msg = repeat_msg
def __bool__(self):
return bool(self.__errors)
def __str__(self):
msg = [self.__main_msg.format(self.__get_func_name())]
for error in self.__errors:
msg.append(" - " + self.__repeat_msg.format(**error))
return "\n".join(msg)
def set_main_messsage(self, msg):
self.__main_msg = msg
def set_repeat_message(self, msg):
self.__repeat_msg = msg
def add(self, **kwargs):
self.__errors.append(kwargs)
def reset(self):
self.__errors = []
class type_safe:
"""
Decorator to enforce type safety. It certainly kills some ducks
but allows us also to fail fast.
>>> @type_safe
... def check(value: int, answer: bool, anything):
... return value, answer, anything
...
>>> check("12", "false", True)
Traceback (most recent call last):
...
TypeError: While calling check(value:int, answer:bool, anything):
- parameter 'value' had value '12' of type 'str'
- parameter 'answer' had value 'false' of type 'str'
>>> check(1000, True)
Traceback (most recent call last):
...
TypeError: check() missing 1 required positional argument: 'anything'
"""
def __init__(self, function):
self._function = function
self._specs = inspect.getfullargspec(self._function)
self._self = None
self._errors = ErrorMessage(
'While calling {}:',
"parameter '{name}' had value '{value}' of type '{typename}'",
self.function_signature)
def __get__(self, instance, clazz):
"""
Stores calling instances and returns this decorator object as function.
"""
# In Python, every function is a property. Before Python invokes function,
# it must access the function using __get__, where it can deliver the calling
# object. After the __get__, function is ready for being invoked by __call__.
self._self = instance
return self
def iter_positional_args(self, args):
"""
Yields type, name, value combination of function arguments.
"""
# specs.args contains all arguments of the function. Loop here all
# argument names and their values putting them together. If there
# are arguments missing values, fill them with None.
for name, val in itertools.zip_longest(self._specs.args, args, fillvalue=None):
# __annotations__ is a dictionary of argument name and annotation.
# We accept empty annotations, in which case the argument has no
# type requirement.
yield self._function.__annotations__.get(name, None), name, val
def function_signature(self):
"""
Returns signature and class of currently invoked function.
>>> @type_converted
... def test(value: int, answer: bool): pass
>>> test.function_signature()
'test(value:int, answer:bool)'
"""
sig = str(inspect.signature(self._function))
name = self._function.__name__
if self._self:
return "%s.%s%s" % (self._self.__class__.__name__, name, sig)
else:
return "%s%s" % (name, sig)
def _analyze_args(self, args):
"""
Invoked by __call__ in order to work with positional arguments.
This function does the actual work of evaluating arguments against
their annotations. Any deriving class can override this function
to do different kind of handling for the arguments. Overriding function
must return list of arguments that will be used to call the decorated
function.
@param args: Arguments given for the function.
@return same list of arguments given in parameter.
"""
for arg_type, arg_name, arg_value in self.iter_positional_args(args):
if not arg_type or isinstance(arg_value, arg_type):
continue
self._errors.add(
typename=type(arg_value).__name__,
name=arg_name,
value=arg_value)
if self._errors:
raise TypeError(str(self._errors))
return args
def __match_arg_count(self, args):
"""
Verifies that proper number of arguments are given to function.
"""
# With default values this verification is bit tricky. In case
# given arguments match with number of arguments in function signature,
# we can proceed.
if len(args) == len(self._specs.args):
return True
# It's possible to have less arguments given than defined in function
# signature in case any default values exist.
if len(args) - len(self._specs.defaults or []) == len(self._specs.args):
return True
# When exceeding number of args, also check if function accepts
# indefinite number of positional arguments.
if len(args) > len(self._specs.args) and self._specs.varargs:
return True
# We got either too many arguments or too few.
return False
def __call__(self, *args, **kwargs):
"""
Converts annotated types into proper type and calls original function.
"""
self._errors.reset()
# Methods require instance of the class to be first argument. We
# stored it in __get__ and now add it to argument list so that
# function can be invoked correctly.
if self._self:
args = (self._self, ) + args
# Before doing any type checks, make sure argument count matches.
if self.__match_arg_count(args):
args = self._analyze_args(args)
return self._function(*args, **kwargs)
class type_converted(type_safe):
"""
Decorator to enforce types and do auto conversion to values.
>>> @type_converted
... def convert(value: int, answer: bool, anything):
... return value, answer, anything
...
>>> convert("12", "false", None)
(12, False, None)
>>> class Example:
... @type_converted
... def convert(self, value: int, answer: bool, anything):
... return value, answer, anything
...
>>> Example().convert("12", 0, "some value")
(12, False, 'some value')
>>> Example().convert(None, None, None)
Traceback (most recent call last):
...
pytraits.core.errors.TypeConversionError: While calling Example.convert(self, value:int, answer:bool, anything):
- got arg 'value' as 'None' of type 'NoneType' which cannot be converted to 'int'
- got arg 'answer' as 'None' of type 'NoneType' which cannot be converted to 'bool'
"""
def __init__(self, function):
super().__init__(function)
self.__converters = {bool: self.boolean_conversion}
self._errors = ErrorMessage(
'While calling {}:',
"got arg '{name}' as '{value}' of type '{typename}' "
"which cannot be converted to '{expectedtype}'",
self.function_signature)
def convert(self, arg_type, arg_name, arg_value):
"""
Converts argument to given type.
"""
# If no type required, return value as is.
if arg_type is None:
return arg_value
try:
return self.__converters[arg_type](arg_value)
except KeyError:
return arg_type(arg_value)
def boolean_conversion(self, value):
"""
Convert given value to boolean.
>>> conv = type_converted(lambda self: None)
>>> conv.boolean_conversion("True"), conv.boolean_conversion("false")
(True, False)
>>> conv.boolean_conversion(1), conv.boolean_conversion(0)
(True, False)
"""
if isinstance(value, bool):
return value
elif isinstance(value, str):
if value.lower() == "true":
return True
if value.lower() == "false":
return False
elif isinstance(value, int):
if not value:
return False
if value == 1:
return True
raise TypeConversionError() # This will be caught by convert method.
def _analyze_args(self, args):
"""
Converts annotated types into proper type and calls original function.
"""
self._errors.reset()
new_args = []
for arg_type, arg_name, arg_value in self.iter_positional_args(args):
try:
new_args.append(self.convert(arg_type, arg_name, arg_value))
except (TypeConversionError, TypeError):
self._errors.add(
name=arg_name,
value=arg_value,
typename=type(arg_value).__name__,
expectedtype=arg_type.__name__)
if self._errors:
raise TypeConversionError(str(self._errors))
return new_args
if __name__ == "__main__":
import doctest
doctest.testmod()
| {
"content_hash": "e25237b4de9cf8dcc3203f49ce4ff8e1",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 116,
"avg_line_length": 33.81879194630873,
"alnum_prop": 0.5906925977376464,
"repo_name": "justanr/py3traits",
"id": "3451d3716d54e18f20805a9c0aba0303b9831d37",
"size": "10125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pytraits/core/magic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1489"
},
{
"name": "PowerShell",
"bytes": "2986"
},
{
"name": "Python",
"bytes": "96649"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('invoices', '0005_auto_20160109_1041'),
]
operations = [
migrations.AddField(
model_name='invoiceposition',
name='money_gross',
field=models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=32, verbose_name='Price gross'),
preserve_default=False,
),
migrations.AddField(
model_name='invoiceposition',
name='money_net',
field=models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=32, verbose_name='Price net'),
preserve_default=False,
),
migrations.AddField(
model_name='invoiceposition',
name='tax',
field=models.DecimalField(blank=True, decimal_places=4, default=0, max_digits=10, verbose_name='Price gross'),
preserve_default=False,
),
migrations.AlterField(
model_name='invoice',
name='date_delivered',
field=models.DateField(default=django.utils.timezone.now, verbose_name='Delivery date'),
),
migrations.AlterField(
model_name='invoice',
name='date_issued',
field=models.DateField(default=django.utils.timezone.now, verbose_name='Issue date'),
),
migrations.AlterField(
model_name='invoice',
name='date_payment',
field=models.DateField(blank=True, help_text='If you leave this field blank, it will be filled in automatically.', null=True, verbose_name='Payment date'),
),
migrations.AlterField(
model_name='invoice',
name='invoice_number',
field=models.CharField(blank=True, help_text='If you leave this field blank, it will be filled in automatically.', max_length=128, verbose_name='Invoice number'),
),
migrations.AlterField(
model_name='invoice',
name='number',
field=models.IntegerField(blank=True, help_text='If you leave this field blank, it will be filled in automatically.', verbose_name='Invoice internal number'),
),
]
| {
"content_hash": "8abf8003ee1d8544be834757c2c9db8a",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 174,
"avg_line_length": 40.64912280701754,
"alnum_prop": 0.6098403107466551,
"repo_name": "samupl/simpleERP",
"id": "56d87c9f69108c8ef28a9e3f8ead6995edae39ac",
"size": "2389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/invoices/migrations/0006_auto_20160109_1239.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4622"
},
{
"name": "HTML",
"bytes": "15437"
},
{
"name": "JavaScript",
"bytes": "628"
},
{
"name": "Python",
"bytes": "96558"
}
],
"symlink_target": ""
} |
import random
class Guid:
def __innit__(self):
| {
"content_hash": "d8182b17894cd285fbfc804d727b18f3",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 21,
"avg_line_length": 8.833333333333334,
"alnum_prop": 0.6226415094339622,
"repo_name": "SeVenOPS/DevChat",
"id": "7a3f5c35cfbc72686e9c780687c6235ce35c9e9a",
"size": "54",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Guid.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2488"
},
{
"name": "JavaScript",
"bytes": "1573"
},
{
"name": "Python",
"bytes": "29446"
}
],
"symlink_target": ""
} |
import pytest
from {{ cookiecutter.project_slug }}.users.models import User
from {{ cookiecutter.project_slug }}.users.tests.factories import UserFactory
@pytest.fixture(autouse=True)
def media_storage(settings, tmpdir):
settings.MEDIA_ROOT = tmpdir.strpath
@pytest.fixture
def user(db) -> User:
return UserFactory()
| {
"content_hash": "d624a69b6bff429e984fedd698e9345e",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 77,
"avg_line_length": 23.571428571428573,
"alnum_prop": 0.7545454545454545,
"repo_name": "trungdong/cookiecutter-django",
"id": "7095a4714b216ee3cc67248eeff51da72db96d06",
"size": "330",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/conftest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1037"
},
{
"name": "CSS",
"bytes": "228"
},
{
"name": "Dockerfile",
"bytes": "8132"
},
{
"name": "HTML",
"bytes": "21569"
},
{
"name": "JavaScript",
"bytes": "5218"
},
{
"name": "Makefile",
"bytes": "1124"
},
{
"name": "Procfile",
"bytes": "420"
},
{
"name": "Python",
"bytes": "126082"
},
{
"name": "SCSS",
"bytes": "662"
},
{
"name": "Shell",
"bytes": "16009"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class UValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="u", parent_name="cone", **kwargs):
super(UValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
| {
"content_hash": "554785f3c99980725761c72b94d1242d",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 70,
"avg_line_length": 35.25,
"alnum_prop": 0.5910165484633569,
"repo_name": "plotly/python-api",
"id": "9d58adf8ef8e67ec6d0256641ee4222597f49538",
"size": "423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/cone/_u.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
from django import forms
from cropimg.widgets import CIImgWidget, CIThumbnailWidget
class CIImageField(forms.ImageField):
widget = CIImgWidget
class CIThumbnailField(forms.CharField):
widget = CIThumbnailWidget
def __init__(self, image_field, size, *args, **kwargs):
"""
:param image_field: name of the image field you are generating thumbnail for
:type image_field: str
:param size:
:type size: None | tuple | list
"""
assert isinstance(size, (list, tuple)), "Size must be either tuple or list of two items"
self.image_field = image_field
self.size = size
self.name = kwargs.pop("name", "")
defaults = {"widget": self.widget}
defaults.update(kwargs)
super(CIThumbnailField, self).__init__(*args, **defaults)
def widget_attrs(self, widget):
attrs = super(CIThumbnailField, self).widget_attrs(widget)
attrs.update({
"data-image-field": self.image_field,
"data-thumb-field": self.name,
"data-thumb-size": "%d,%d" % self.size,
"data-type": "thumbnail_field",
"class": "cropimg-field"
})
return attrs
| {
"content_hash": "704f36c94813ad87ccca475102ac748e",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 96,
"avg_line_length": 32.026315789473685,
"alnum_prop": 0.6055875102711585,
"repo_name": "rewardz/cropimg-django",
"id": "9bbdfcdf26c19c26b24b35a2fd90d910f59b654b",
"size": "1217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cropimg/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "64"
},
{
"name": "JavaScript",
"bytes": "46358"
},
{
"name": "Python",
"bytes": "9668"
}
],
"symlink_target": ""
} |
from tkinter import *
#Ventana
#se le asigna a la variable ventana dicho sea de paso la ventana
ventana = Tk()
#se le asigna el nombre que va a tener la ventana
ventana.title("Computadores CE")
#definimos el tamaño minimo que va a tener nuestra ventana especificamente corresponden al ancho x alto
ventana.minsize(300,300)
#definimos el tamaño maximo que va a tener nuestra ventana especificamente corresponden al ancho x alto
ventana.maxsize(600,700)
#Muestra las coordenadas donde se hace un click en el formato x,y
def mostrarCoordenada(event):
x = event.x
y = event.y
print (x, y)
#se vincula la funcion mostrarCoordenada a la ventana, por medio del evento <Button-1>
ventana.bind("<Button-1>", mostrarCoordenada)
#esto lo que hace es mostrar la ventana en pantalla
ventana.mainloop()
| {
"content_hash": "859364eca34dbeda00c83556e9756f0f",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 103,
"avg_line_length": 34.625,
"alnum_prop": 0.7460890493381468,
"repo_name": "baiper06/tutorial-tkinter",
"id": "db3a81e1ce42321cb130ca5a161e02e3f2a3b140",
"size": "887",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Ventana/ventana.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11683"
}
],
"symlink_target": ""
} |
import os
import sys
from stat import ST_SIZE
from logging import debug, info, warning, error
from Utils import getTextFromXml, getTreeFromXml, formatSize, unicodise, calculateChecksum, parseNodes
from Exceptions import S3UploadError
from collections import defaultdict
class MultiPartUpload(object):
MIN_CHUNK_SIZE_MB = 5 # 5MB
MAX_CHUNK_SIZE_MB = 5120 # 5GB
MAX_FILE_SIZE = 42949672960 # 5TB
def __init__(self, s3, file, uri, headers_baseline = {}):
self.s3 = s3
self.file = file
self.uri = uri
self.parts = {}
self.headers_baseline = headers_baseline
self.upload_id = self.initiate_multipart_upload()
def get_parts_information(self, uri, upload_id):
multipart_response = self.s3.list_multipart(uri, upload_id)
tree = getTreeFromXml(multipart_response['data'])
parts = defaultdict(lambda: None)
for elem in parseNodes(tree):
try:
parts[int(elem['PartNumber'])] = {'checksum': elem['ETag'], 'size': elem['Size']}
except KeyError:
pass
return parts
def get_unique_upload_id(self, uri):
upload_id = None
multipart_response = self.s3.get_multipart(uri)
tree = getTreeFromXml(multipart_response['data'])
for mpupload in parseNodes(tree):
try:
mp_upload_id = mpupload['UploadId']
mp_path = mpupload['Key']
info("mp_path: %s, object: %s" % (mp_path, uri.object()))
if mp_path == uri.object():
if upload_id is not None:
raise ValueError("More than one UploadId for URI %s. Disable multipart upload, or use\n %s multipart %s\nto list the Ids, then pass a unique --upload-id into the put command." % (uri, sys.argv[0], uri))
upload_id = mp_upload_id
except KeyError:
pass
return upload_id
def initiate_multipart_upload(self):
"""
Begin a multipart upload
http://docs.amazonwebservices.com/AmazonS3/latest/API/index.html?mpUploadInitiate.html
"""
if self.s3.config.upload_id is not None:
self.upload_id = self.s3.config.upload_id
elif self.s3.config.put_continue:
self.upload_id = self.get_unique_upload_id(self.uri)
else:
self.upload_id = None
if self.upload_id is None:
request = self.s3.create_request("OBJECT_POST", uri = self.uri, headers = self.headers_baseline, extra = "?uploads")
response = self.s3.send_request(request)
data = response["data"]
self.upload_id = getTextFromXml(data, "UploadId")
return self.upload_id
def upload_all_parts(self):
"""
Execute a full multipart upload on a file
Returns the seq/etag dict
TODO use num_processes to thread it
"""
if not self.upload_id:
raise RuntimeError("Attempting to use a multipart upload that has not been initiated.")
self.chunk_size = self.s3.config.multipart_chunk_size_mb * 1024 * 1024
if self.file.name != "<stdin>":
size_left = file_size = os.stat(self.file.name)[ST_SIZE]
nr_parts = file_size / self.chunk_size + (file_size % self.chunk_size and 1)
debug("MultiPart: Uploading %s in %d parts" % (self.file.name, nr_parts))
else:
debug("MultiPart: Uploading from %s" % (self.file.name))
remote_statuses = defaultdict(lambda: None)
if self.s3.config.put_continue:
remote_statuses = self.get_parts_information(self.uri, self.upload_id)
seq = 1
if self.file.name != "<stdin>":
while size_left > 0:
offset = self.chunk_size * (seq - 1)
current_chunk_size = min(file_size - offset, self.chunk_size)
size_left -= current_chunk_size
labels = {
'source' : unicodise(self.file.name),
'destination' : unicodise(self.uri.uri()),
'extra' : "[part %d of %d, %s]" % (seq, nr_parts, "%d%sB" % formatSize(current_chunk_size, human_readable = True))
}
try:
self.upload_part(seq, offset, current_chunk_size, labels, remote_status = remote_statuses[seq])
except:
error(u"\nUpload of '%s' part %d failed. Use\n %s abortmp %s %s\nto abort the upload, or\n %s --upload-id %s put ...\nto continue the upload."
% (self.file.name, seq, sys.argv[0], self.uri, self.upload_id, sys.argv[0], self.upload_id))
raise
seq += 1
else:
while True:
buffer = self.file.read(self.chunk_size)
offset = self.chunk_size * (seq - 1)
current_chunk_size = len(buffer)
labels = {
'source' : unicodise(self.file.name),
'destination' : unicodise(self.uri.uri()),
'extra' : "[part %d, %s]" % (seq, "%d%sB" % formatSize(current_chunk_size, human_readable = True))
}
if len(buffer) == 0: # EOF
break
try:
self.upload_part(seq, offset, current_chunk_size, labels, buffer, remote_status = remote_statuses[seq])
except:
error(u"\nUpload of '%s' part %d failed. Use\n %s abortmp %s %s\nto abort, or\n %s --upload-id %s put ...\nto continue the upload."
% (self.file.name, seq, self.uri, sys.argv[0], self.upload_id, sys.argv[0], self.upload_id))
raise
seq += 1
debug("MultiPart: Upload finished: %d parts", seq - 1)
def upload_part(self, seq, offset, chunk_size, labels, buffer = '', remote_status = None):
"""
Upload a file chunk
http://docs.amazonwebservices.com/AmazonS3/latest/API/index.html?mpUploadUploadPart.html
"""
# TODO implement Content-MD5
debug("Uploading part %i of %r (%s bytes)" % (seq, self.upload_id, chunk_size))
if remote_status is not None:
if int(remote_status['size']) == chunk_size:
checksum = calculateChecksum(buffer, self.file, offset, chunk_size, self.s3.config.send_chunk)
remote_checksum = remote_status['checksum'].strip('"')
if remote_checksum == checksum:
warning("MultiPart: size and md5sum match for %s part %d, skipping." % (self.uri, seq))
self.parts[seq] = remote_status['checksum']
return
else:
warning("MultiPart: checksum (%s vs %s) does not match for %s part %d, reuploading."
% (remote_checksum, checksum, self.uri, seq))
else:
warning("MultiPart: size (%d vs %d) does not match for %s part %d, reuploading."
% (int(remote_status['size']), chunk_size, self.uri, seq))
headers = { "content-length": chunk_size }
query_string = "?partNumber=%i&uploadId=%s" % (seq, self.upload_id)
request = self.s3.create_request("OBJECT_PUT", uri = self.uri, headers = headers, extra = query_string)
response = self.s3.send_file(request, self.file, labels, buffer, offset = offset, chunk_size = chunk_size)
self.parts[seq] = response["headers"]["etag"]
return response
def complete_multipart_upload(self):
"""
Finish a multipart upload
http://docs.amazonwebservices.com/AmazonS3/latest/API/index.html?mpUploadComplete.html
"""
debug("MultiPart: Completing upload: %s" % self.upload_id)
parts_xml = []
part_xml = "<Part><PartNumber>%i</PartNumber><ETag>%s</ETag></Part>"
for seq, etag in self.parts.items():
parts_xml.append(part_xml % (seq, etag))
body = "<CompleteMultipartUpload>%s</CompleteMultipartUpload>" % ("".join(parts_xml))
headers = { "content-length": len(body) }
request = self.s3.create_request("OBJECT_POST", uri = self.uri, headers = headers, extra = "?uploadId=%s" % (self.upload_id))
response = self.s3.send_request(request, body = body)
return response
def abort_upload(self):
"""
Abort multipart upload
http://docs.amazonwebservices.com/AmazonS3/latest/API/index.html?mpUploadAbort.html
"""
debug("MultiPart: Aborting upload: %s" % self.upload_id)
#request = self.s3.create_request("OBJECT_DELETE", uri = self.uri, extra = "?uploadId=%s" % (self.upload_id))
#response = self.s3.send_request(request)
response = None
return response
# vim:et:ts=4:sts=4:ai
| {
"content_hash": "7ac244df7a18ef6ac1f75342bfa1e3af",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 227,
"avg_line_length": 45.58673469387755,
"alnum_prop": 0.5647453833240067,
"repo_name": "sharethis-github/OpenSource",
"id": "ed671301369d4762ccc117950a40fb46861caae9",
"size": "9057",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "s3cmd/S3/MultiPart.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groff",
"bytes": "16641"
},
{
"name": "Perl",
"bytes": "9199"
},
{
"name": "Python",
"bytes": "306914"
},
{
"name": "Shell",
"bytes": "350"
}
],
"symlink_target": ""
} |
"""private module containing functions used to emit INSERT, UPDATE
and DELETE statements on behalf of a :class:`.Mapper` and its descending
mappers.
The functions here are called only by the unit of work functions
in unitofwork.py.
"""
import operator
from itertools import groupby
from .. import sql, util, exc as sa_exc, schema
from . import attributes, sync, exc as orm_exc, evaluator
from .base import _state_mapper, state_str, _attr_as_key
from ..sql import expression
from . import loading
def save_obj(base_mapper, states, uowtransaction, single=False):
"""Issue ``INSERT`` and/or ``UPDATE`` statements for a list
of objects.
This is called within the context of a UOWTransaction during a
flush operation, given a list of states to be flushed. The
base mapper in an inheritance hierarchy handles the inserts/
updates for all descendant mappers.
"""
# if batch=false, call _save_obj separately for each object
if not single and not base_mapper.batch:
for state in _sort_states(states):
save_obj(base_mapper, [state], uowtransaction, single=True)
return
states_to_insert, states_to_update = _organize_states_for_save(
base_mapper,
states,
uowtransaction)
cached_connections = _cached_connection_dict(base_mapper)
for table, mapper in base_mapper._sorted_tables.items():
insert = _collect_insert_commands(base_mapper, uowtransaction,
table, states_to_insert)
update = _collect_update_commands(base_mapper, uowtransaction,
table, states_to_update)
if update:
_emit_update_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, update)
if insert:
_emit_insert_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, insert)
_finalize_insert_update_commands(base_mapper, uowtransaction,
states_to_insert, states_to_update)
def post_update(base_mapper, states, uowtransaction, post_update_cols):
"""Issue UPDATE statements on behalf of a relationship() which
specifies post_update.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_update = _organize_states_for_post_update(
base_mapper,
states, uowtransaction)
for table, mapper in base_mapper._sorted_tables.items():
update = _collect_post_update_commands(base_mapper, uowtransaction,
table, states_to_update,
post_update_cols)
if update:
_emit_post_update_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, update)
def delete_obj(base_mapper, states, uowtransaction):
"""Issue ``DELETE`` statements for a list of objects.
This is called within the context of a UOWTransaction during a
flush operation.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_delete = _organize_states_for_delete(
base_mapper,
states,
uowtransaction)
table_to_mapper = base_mapper._sorted_tables
for table in reversed(list(table_to_mapper.keys())):
delete = _collect_delete_commands(base_mapper, uowtransaction,
table, states_to_delete)
mapper = table_to_mapper[table]
_emit_delete_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, delete)
for state, state_dict, mapper, has_identity, connection \
in states_to_delete:
mapper.dispatch.after_delete(mapper, connection, state)
def _organize_states_for_save(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for INSERT or
UPDATE.
This includes splitting out into distinct lists for
each, calling before_insert/before_update, obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state,
and the identity flag.
"""
states_to_insert = []
states_to_update = []
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction,
states):
has_identity = bool(state.key)
instance_key = state.key or mapper._identity_key_from_state(state)
row_switch = None
# call before_XXX extensions
if not has_identity:
mapper.dispatch.before_insert(mapper, connection, state)
else:
mapper.dispatch.before_update(mapper, connection, state)
if mapper._validate_polymorphic_identity:
mapper._validate_polymorphic_identity(mapper, state, dict_)
# detect if we have a "pending" instance (i.e. has
# no instance_key attached to it), and another instance
# with the same identity key already exists as persistent.
# convert to an UPDATE if so.
if not has_identity and \
instance_key in uowtransaction.session.identity_map:
instance = \
uowtransaction.session.identity_map[instance_key]
existing = attributes.instance_state(instance)
if not uowtransaction.is_deleted(existing):
raise orm_exc.FlushError(
"New instance %s with identity key %s conflicts "
"with persistent instance %s" %
(state_str(state), instance_key,
state_str(existing)))
base_mapper._log_debug(
"detected row switch for identity %s. "
"will update %s, remove %s from "
"transaction", instance_key,
state_str(state), state_str(existing))
# remove the "delete" flag from the existing element
uowtransaction.remove_state_actions(existing)
row_switch = existing
if not has_identity and not row_switch:
states_to_insert.append(
(state, dict_, mapper, connection,
has_identity, instance_key, row_switch)
)
else:
states_to_update.append(
(state, dict_, mapper, connection,
has_identity, instance_key, row_switch)
)
return states_to_insert, states_to_update
def _organize_states_for_post_update(base_mapper, states,
uowtransaction):
"""Make an initial pass across a set of states for UPDATE
corresponding to post_update.
This includes obtaining key information for each state
including its dictionary, mapper, the connection to use for
the execution per state.
"""
return list(_connections_for_states(base_mapper, uowtransaction,
states))
def _organize_states_for_delete(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for DELETE.
This includes calling out before_delete and obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state.
"""
states_to_delete = []
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction,
states):
mapper.dispatch.before_delete(mapper, connection, state)
states_to_delete.append((state, dict_, mapper,
bool(state.key), connection))
return states_to_delete
def _collect_insert_commands(base_mapper, uowtransaction, table,
states_to_insert):
"""Identify sets of values to use in INSERT statements for a
list of states.
"""
insert = []
for state, state_dict, mapper, connection, has_identity, \
instance_key, row_switch in states_to_insert:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
params = {}
value_params = {}
has_all_pks = True
has_all_defaults = True
for col in mapper._cols_by_table[table]:
if col is mapper.version_id_col and \
mapper.version_id_generator is not False:
val = mapper.version_id_generator(None)
params[col.key] = val
else:
# pull straight from the dict for
# pending objects
prop = mapper._columntoproperty[col]
value = state_dict.get(prop.key, None)
if value is None:
if col in pks:
has_all_pks = False
elif col.default is None and \
col.server_default is None:
params[col.key] = value
elif col.server_default is not None and \
mapper.base_mapper.eager_defaults:
has_all_defaults = False
elif isinstance(value, sql.ClauseElement):
value_params[col] = value
else:
params[col.key] = value
insert.append((state, state_dict, params, mapper,
connection, value_params, has_all_pks,
has_all_defaults))
return insert
def _collect_update_commands(base_mapper, uowtransaction,
table, states_to_update):
"""Identify sets of values to use in UPDATE statements for a
list of states.
This function works intricately with the history system
to determine exactly what values should be updated
as well as how the row should be matched within an UPDATE
statement. Includes some tricky scenarios where the primary
key of an object might have been changed.
"""
update = []
for state, state_dict, mapper, connection, has_identity, \
instance_key, row_switch in states_to_update:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
params = {}
value_params = {}
hasdata = hasnull = False
for col in mapper._cols_by_table[table]:
if col is mapper.version_id_col:
params[col._label] = \
mapper._get_committed_state_attr_by_column(
row_switch or state,
row_switch and row_switch.dict
or state_dict,
col)
prop = mapper._columntoproperty[col]
history = attributes.get_state_history(
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE
)
if history.added:
params[col.key] = history.added[0]
hasdata = True
else:
if mapper.version_id_generator is not False:
val = mapper.version_id_generator(params[col._label])
params[col.key] = val
# HACK: check for history, in case the
# history is only
# in a different table than the one
# where the version_id_col is.
for prop in mapper._columntoproperty.values():
history = attributes.get_state_history(
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
hasdata = True
else:
prop = mapper._columntoproperty[col]
history = attributes.get_state_history(
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
if isinstance(history.added[0],
sql.ClauseElement):
value_params[col] = history.added[0]
else:
value = history.added[0]
params[col.key] = value
if col in pks:
if history.deleted and \
not row_switch:
# if passive_updates and sync detected
# this was a pk->pk sync, use the new
# value to locate the row, since the
# DB would already have set this
if ("pk_cascaded", state, col) in \
uowtransaction.attributes:
value = history.added[0]
params[col._label] = value
else:
# use the old value to
# locate the row
value = history.deleted[0]
params[col._label] = value
hasdata = True
else:
# row switch logic can reach us here
# remove the pk from the update params
# so the update doesn't
# attempt to include the pk in the
# update statement
del params[col.key]
value = history.added[0]
params[col._label] = value
if value is None:
hasnull = True
else:
hasdata = True
elif col in pks:
value = state.manager[prop.key].impl.get(
state, state_dict)
if value is None:
hasnull = True
params[col._label] = value
if hasdata:
if hasnull:
raise orm_exc.FlushError(
"Can't update table "
"using NULL for primary "
"key value")
update.append((state, state_dict, params, mapper,
connection, value_params))
return update
def _collect_post_update_commands(base_mapper, uowtransaction, table,
states_to_update, post_update_cols):
"""Identify sets of values to use in UPDATE statements for a
list of states within a post_update operation.
"""
update = []
for state, state_dict, mapper, connection in states_to_update:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
params = {}
hasdata = False
for col in mapper._cols_by_table[table]:
if col in pks:
params[col._label] = \
mapper._get_state_attr_by_column(
state,
state_dict, col)
elif col in post_update_cols:
prop = mapper._columntoproperty[col]
history = attributes.get_state_history(
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
value = history.added[0]
params[col.key] = value
hasdata = True
if hasdata:
update.append((state, state_dict, params, mapper,
connection))
return update
def _collect_delete_commands(base_mapper, uowtransaction, table,
states_to_delete):
"""Identify values to use in DELETE statements for a list of
states to be deleted."""
delete = util.defaultdict(list)
for state, state_dict, mapper, has_identity, connection \
in states_to_delete:
if not has_identity or table not in mapper._pks_by_table:
continue
params = {}
delete[connection].append(params)
for col in mapper._pks_by_table[table]:
params[col.key] = \
value = \
mapper._get_state_attr_by_column(
state, state_dict, col)
if value is None:
raise orm_exc.FlushError(
"Can't delete from table "
"using NULL for primary "
"key value")
if mapper.version_id_col is not None and \
table.c.contains_column(mapper.version_id_col):
params[mapper.version_id_col.key] = \
mapper._get_committed_state_attr_by_column(
state, state_dict,
mapper.version_id_col)
return delete
def _emit_update_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, update):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_update_commands()."""
needs_version_id = mapper.version_id_col is not None and \
table.c.contains_column(mapper.version_id_col)
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(col == sql.bindparam(col._label,
type_=col.type))
if needs_version_id:
clause.clauses.append(mapper.version_id_col ==\
sql.bindparam(mapper.version_id_col._label,
type_=mapper.version_id_col.type))
stmt = table.update(clause)
if mapper.base_mapper.eager_defaults:
stmt = stmt.return_defaults()
elif mapper.version_id_col is not None:
stmt = stmt.return_defaults(mapper.version_id_col)
return stmt
statement = base_mapper._memo(('update', table), update_stmt)
rows = 0
for state, state_dict, params, mapper, \
connection, value_params in update:
if value_params:
c = connection.execute(
statement.values(value_params),
params)
else:
c = cached_connections[connection].\
execute(statement, params)
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
value_params)
rows += c.rowcount
if connection.dialect.supports_sane_rowcount:
if rows != len(update):
raise orm_exc.StaleDataError(
"UPDATE statement on table '%s' expected to "
"update %d row(s); %d were matched." %
(table.description, len(update), rows))
elif needs_version_id:
util.warn("Dialect %s does not support updated rowcount "
"- versioning cannot be verified." %
c.dialect.dialect_description,
stacklevel=12)
def _emit_insert_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, insert):
"""Emit INSERT statements corresponding to value lists collected
by _collect_insert_commands()."""
statement = base_mapper._memo(('insert', table), table.insert)
for (connection, pkeys, hasvalue, has_all_pks, has_all_defaults), \
records in groupby(insert,
lambda rec: (rec[4],
list(rec[2].keys()),
bool(rec[5]),
rec[6], rec[7])
):
if \
(
has_all_defaults
or not base_mapper.eager_defaults
or not connection.dialect.implicit_returning
) and has_all_pks and not hasvalue:
records = list(records)
multiparams = [rec[2] for rec in records]
c = cached_connections[connection].\
execute(statement, multiparams)
for (state, state_dict, params, mapper_rec,
conn, value_params, has_all_pks, has_all_defaults), \
last_inserted_params in \
zip(records, c.context.compiled_parameters):
_postfetch(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
c,
last_inserted_params,
value_params)
else:
if not has_all_defaults and base_mapper.eager_defaults:
statement = statement.return_defaults()
elif mapper.version_id_col is not None:
statement = statement.return_defaults(mapper.version_id_col)
for state, state_dict, params, mapper_rec, \
connection, value_params, \
has_all_pks, has_all_defaults in records:
if value_params:
result = connection.execute(
statement.values(value_params),
params)
else:
result = cached_connections[connection].\
execute(statement, params)
primary_key = result.context.inserted_primary_key
if primary_key is not None:
# set primary key attributes
for pk, col in zip(primary_key,
mapper._pks_by_table[table]):
prop = mapper_rec._columntoproperty[col]
if state_dict.get(prop.key) is None:
# TODO: would rather say:
#state_dict[prop.key] = pk
mapper_rec._set_state_attr_by_column(
state,
state_dict,
col, pk)
_postfetch(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
result,
result.context.compiled_parameters[0],
value_params)
def _emit_post_update_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, update):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_post_update_commands()."""
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(col == sql.bindparam(col._label,
type_=col.type))
return table.update(clause)
statement = base_mapper._memo(('post_update', table), update_stmt)
# execute each UPDATE in the order according to the original
# list of states to guarantee row access order, but
# also group them into common (connection, cols) sets
# to support executemany().
for key, grouper in groupby(
update, lambda rec: (rec[4], list(rec[2].keys()))
):
connection = key[0]
multiparams = [params for state, state_dict,
params, mapper, conn in grouper]
cached_connections[connection].\
execute(statement, multiparams)
def _emit_delete_statements(base_mapper, uowtransaction, cached_connections,
mapper, table, delete):
"""Emit DELETE statements corresponding to value lists collected
by _collect_delete_commands()."""
need_version_id = mapper.version_id_col is not None and \
table.c.contains_column(mapper.version_id_col)
def delete_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(
col == sql.bindparam(col.key, type_=col.type))
if need_version_id:
clause.clauses.append(
mapper.version_id_col ==
sql.bindparam(
mapper.version_id_col.key,
type_=mapper.version_id_col.type
)
)
return table.delete(clause)
for connection, del_objects in delete.items():
statement = base_mapper._memo(('delete', table), delete_stmt)
connection = cached_connections[connection]
if need_version_id:
# TODO: need test coverage for this [ticket:1761]
if connection.dialect.supports_sane_rowcount:
rows = 0
# execute deletes individually so that versioned
# rows can be verified
for params in del_objects:
c = connection.execute(statement, params)
rows += c.rowcount
if rows != len(del_objects):
raise orm_exc.StaleDataError(
"DELETE statement on table '%s' expected to "
"delete %d row(s); %d were matched." %
(table.description, len(del_objects), c.rowcount)
)
else:
util.warn(
"Dialect %s does not support deleted rowcount "
"- versioning cannot be verified." %
connection.dialect.dialect_description,
stacklevel=12)
connection.execute(statement, del_objects)
else:
connection.execute(statement, del_objects)
def _finalize_insert_update_commands(base_mapper, uowtransaction,
states_to_insert, states_to_update):
"""finalize state on states that have been inserted or updated,
including calling after_insert/after_update events.
"""
for state, state_dict, mapper, connection, has_identity, \
instance_key, row_switch in states_to_insert + \
states_to_update:
if mapper._readonly_props:
readonly = state.unmodified_intersection(
[p.key for p in mapper._readonly_props
if p.expire_on_flush or p.key not in state.dict]
)
if readonly:
state._expire_attributes(state.dict, readonly)
# if eager_defaults option is enabled, load
# all expired cols. Else if we have a version_id_col, make sure
# it isn't expired.
toload_now = []
if base_mapper.eager_defaults:
toload_now.extend(state._unloaded_non_object)
elif mapper.version_id_col is not None and \
mapper.version_id_generator is False:
prop = mapper._columntoproperty[mapper.version_id_col]
if prop.key in state.unloaded:
toload_now.extend([prop.key])
if toload_now:
state.key = base_mapper._identity_key_from_state(state)
loading.load_on_ident(
uowtransaction.session.query(base_mapper),
state.key, refresh_state=state,
only_load_props=toload_now)
# call after_XXX extensions
if not has_identity:
mapper.dispatch.after_insert(mapper, connection, state)
else:
mapper.dispatch.after_update(mapper, connection, state)
def _postfetch(mapper, uowtransaction, table,
state, dict_, result, params, value_params):
"""Expire attributes in need of newly persisted database state,
after an INSERT or UPDATE statement has proceeded for that
state."""
prefetch_cols = result.context.prefetch_cols
postfetch_cols = result.context.postfetch_cols
returning_cols = result.context.returning_cols
if mapper.version_id_col is not None:
prefetch_cols = list(prefetch_cols) + [mapper.version_id_col]
if returning_cols:
row = result.context.returned_defaults
if row is not None:
for col in returning_cols:
if col.primary_key:
continue
mapper._set_state_attr_by_column(state, dict_, col, row[col])
for c in prefetch_cols:
if c.key in params and c in mapper._columntoproperty:
mapper._set_state_attr_by_column(state, dict_, c, params[c.key])
if postfetch_cols:
state._expire_attributes(state.dict,
[mapper._columntoproperty[c].key
for c in postfetch_cols if c in
mapper._columntoproperty]
)
# synchronize newly inserted ids from one table to the next
# TODO: this still goes a little too often. would be nice to
# have definitive list of "columns that changed" here
for m, equated_pairs in mapper._table_to_equated[table]:
sync.populate(state, m, state, m,
equated_pairs,
uowtransaction,
mapper.passive_updates)
def _connections_for_states(base_mapper, uowtransaction, states):
"""Return an iterator of (state, state.dict, mapper, connection).
The states are sorted according to _sort_states, then paired
with the connection they should be using for the given
unit of work transaction.
"""
# if session has a connection callable,
# organize individual states with the connection
# to use for update
if uowtransaction.session.connection_callable:
connection_callable = \
uowtransaction.session.connection_callable
else:
connection = None
connection_callable = None
for state in _sort_states(states):
if connection_callable:
connection = connection_callable(base_mapper, state.obj())
elif not connection:
connection = uowtransaction.transaction.connection(
base_mapper)
mapper = _state_mapper(state)
yield state, state.dict, mapper, connection
def _cached_connection_dict(base_mapper):
# dictionary of connection->connection_with_cache_options.
return util.PopulateDict(
lambda conn: conn.execution_options(
compiled_cache=base_mapper._compiled_cache
))
def _sort_states(states):
pending = set(states)
persistent = set(s for s in pending if s.key is not None)
pending.difference_update(persistent)
return sorted(pending, key=operator.attrgetter("insert_order")) + \
sorted(persistent, key=lambda q: q.key[1])
class BulkUD(object):
"""Handle bulk update and deletes via a :class:`.Query`."""
def __init__(self, query):
self.query = query.enable_eagerloads(False)
@property
def session(self):
return self.query.session
@classmethod
def _factory(cls, lookup, synchronize_session, *arg):
try:
klass = lookup[synchronize_session]
except KeyError:
raise sa_exc.ArgumentError(
"Valid strategies for session synchronization "
"are %s" % (", ".join(sorted(repr(x)
for x in lookup))))
else:
return klass(*arg)
def exec_(self):
self._do_pre()
self._do_pre_synchronize()
self._do_exec()
self._do_post_synchronize()
self._do_post()
def _do_pre(self):
query = self.query
self.context = context = query._compile_context()
if len(context.statement.froms) != 1 or \
not isinstance(context.statement.froms[0], schema.Table):
self.primary_table = query._only_entity_zero(
"This operation requires only one Table or "
"entity be specified as the target."
).mapper.local_table
else:
self.primary_table = context.statement.froms[0]
session = query.session
if query._autoflush:
session._autoflush()
def _do_pre_synchronize(self):
pass
def _do_post_synchronize(self):
pass
class BulkEvaluate(BulkUD):
"""BulkUD which does the 'evaluate' method of session state resolution."""
def _additional_evaluators(self, evaluator_compiler):
pass
def _do_pre_synchronize(self):
query = self.query
try:
evaluator_compiler = evaluator.EvaluatorCompiler()
if query.whereclause is not None:
eval_condition = evaluator_compiler.process(
query.whereclause)
else:
def eval_condition(obj):
return True
self._additional_evaluators(evaluator_compiler)
except evaluator.UnevaluatableError:
raise sa_exc.InvalidRequestError(
"Could not evaluate current criteria in Python. "
"Specify 'fetch' or False for the "
"synchronize_session parameter.")
target_cls = query._mapper_zero().class_
#TODO: detect when the where clause is a trivial primary key match
self.matched_objects = [
obj for (cls, pk), obj in
query.session.identity_map.items()
if issubclass(cls, target_cls) and
eval_condition(obj)]
class BulkFetch(BulkUD):
"""BulkUD which does the 'fetch' method of session state resolution."""
def _do_pre_synchronize(self):
query = self.query
session = query.session
select_stmt = self.context.statement.with_only_columns(
self.primary_table.primary_key)
self.matched_rows = session.execute(
select_stmt,
params=query._params).fetchall()
class BulkUpdate(BulkUD):
"""BulkUD which handles UPDATEs."""
def __init__(self, query, values):
super(BulkUpdate, self).__init__(query)
self.query._no_select_modifiers("update")
self.values = values
@classmethod
def factory(cls, query, synchronize_session, values):
return BulkUD._factory({
"evaluate": BulkUpdateEvaluate,
"fetch": BulkUpdateFetch,
False: BulkUpdate
}, synchronize_session, query, values)
def _do_exec(self):
update_stmt = sql.update(self.primary_table,
self.context.whereclause, self.values)
self.result = self.query.session.execute(
update_stmt, params=self.query._params)
self.rowcount = self.result.rowcount
def _do_post(self):
session = self.query.session
session.dispatch.after_bulk_update(self)
class BulkDelete(BulkUD):
"""BulkUD which handles DELETEs."""
def __init__(self, query):
super(BulkDelete, self).__init__(query)
self.query._no_select_modifiers("delete")
@classmethod
def factory(cls, query, synchronize_session):
return BulkUD._factory({
"evaluate": BulkDeleteEvaluate,
"fetch": BulkDeleteFetch,
False: BulkDelete
}, synchronize_session, query)
def _do_exec(self):
delete_stmt = sql.delete(self.primary_table,
self.context.whereclause)
self.result = self.query.session.execute(delete_stmt,
params=self.query._params)
self.rowcount = self.result.rowcount
def _do_post(self):
session = self.query.session
session.dispatch.after_bulk_delete(self)
class BulkUpdateEvaluate(BulkEvaluate, BulkUpdate):
"""BulkUD which handles UPDATEs using the "evaluate"
method of session resolution."""
def _additional_evaluators(self, evaluator_compiler):
self.value_evaluators = {}
for key, value in self.values.items():
key = _attr_as_key(key)
self.value_evaluators[key] = evaluator_compiler.process(
expression._literal_as_binds(value))
def _do_post_synchronize(self):
session = self.query.session
states = set()
evaluated_keys = list(self.value_evaluators.keys())
for obj in self.matched_objects:
state, dict_ = attributes.instance_state(obj),\
attributes.instance_dict(obj)
# only evaluate unmodified attributes
to_evaluate = state.unmodified.intersection(
evaluated_keys)
for key in to_evaluate:
dict_[key] = self.value_evaluators[key](obj)
state._commit(dict_, list(to_evaluate))
# expire attributes with pending changes
# (there was no autoflush, so they are overwritten)
state._expire_attributes(dict_,
set(evaluated_keys).
difference(to_evaluate))
states.add(state)
session._register_altered(states)
class BulkDeleteEvaluate(BulkEvaluate, BulkDelete):
"""BulkUD which handles DELETEs using the "evaluate"
method of session resolution."""
def _do_post_synchronize(self):
self.query.session._remove_newly_deleted(
[attributes.instance_state(obj)
for obj in self.matched_objects])
class BulkUpdateFetch(BulkFetch, BulkUpdate):
"""BulkUD which handles UPDATEs using the "fetch"
method of session resolution."""
def _do_post_synchronize(self):
session = self.query.session
target_mapper = self.query._mapper_zero()
states = set([
attributes.instance_state(session.identity_map[identity_key])
for identity_key in [
target_mapper.identity_key_from_primary_key(
list(primary_key))
for primary_key in self.matched_rows
]
if identity_key in session.identity_map
])
attrib = [_attr_as_key(k) for k in self.values]
for state in states:
session._expire_state(state, attrib)
session._register_altered(states)
class BulkDeleteFetch(BulkFetch, BulkDelete):
"""BulkUD which handles DELETEs using the "fetch"
method of session resolution."""
def _do_post_synchronize(self):
session = self.query.session
target_mapper = self.query._mapper_zero()
for primary_key in self.matched_rows:
# TODO: inline this and call remove_newly_deleted
# once
identity_key = target_mapper.identity_key_from_primary_key(
list(primary_key))
if identity_key in session.identity_map:
session._remove_newly_deleted(
[attributes.instance_state(
session.identity_map[identity_key]
)]
)
| {
"content_hash": "cb41d059f040459daab4fd0106112977",
"timestamp": "",
"source": "github",
"line_count": 1079,
"max_line_length": 78,
"avg_line_length": 37.77386468952734,
"alnum_prop": 0.5300309141763581,
"repo_name": "FRC-Team-3140/north-american-happiness",
"id": "35631988bba6dfd9736c0d3e129cdbcf41d268c4",
"size": "40995",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/sqlalchemy/orm/persistence.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "6472"
},
{
"name": "JavaScript",
"bytes": "6648"
},
{
"name": "Python",
"bytes": "6901716"
},
{
"name": "Shell",
"bytes": "3801"
}
],
"symlink_target": ""
} |
class Solution:
def minMeetingRooms(self, intervals: 'List[Interval]') -> 'int':
START = 1
END = 0 # Sort by placing end first
points = []
for i in intervals:
points.append((i.start,START))
points.append((i.end,END))
points = sorted(points)
maxrooms = 0
currooms = 0
for p in points:
ptime, ptype = p
if ptype == END:
currooms -= 1
else:
currooms += 1
maxrooms = max(currooms, maxrooms)
return maxrooms
| {
"content_hash": "be1905d82ccc2d48cd95d171379183f5",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 68,
"avg_line_length": 30.94736842105263,
"alnum_prop": 0.4846938775510204,
"repo_name": "akras14/cs-101",
"id": "5e1c91ece48ad899339fbe0fae768a8c11c9925b",
"size": "721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "leetcode/253.meeting-rooms-ii/253.sorted-points.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2656"
},
{
"name": "HTML",
"bytes": "662"
},
{
"name": "JavaScript",
"bytes": "20934"
},
{
"name": "Python",
"bytes": "32433"
}
],
"symlink_target": ""
} |
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SITE_ROOT = os.path.dirname(BASE_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
DEBUG = False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'django.contrib.sites', # for sitemaps
'django.contrib.sitemaps',
# third party apps
'django_extensions',
'django_forms_bootstrap',
'rules',
'social_django',
'stdimage',
# project apps
'communities',
'events',
'locations',
'main',
'users',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware',
]
ROOT_URLCONF = 'letsmeet.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.normpath(os.path.join(SITE_ROOT, 'templates'))],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
'letsmeet.context_processors.environment',
],
},
},
]
WSGI_APPLICATION = 'letsmeet.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
# Database in Docker container
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'letsmeet',
'USER': 'postgres',
'HOST': 'db',
'PORT': 5432,
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Berlin'
USE_I18N = True
USE_L10N = False
TIME_FORMAT = "H:i"
DATE_FORMAT = "Y-m-d"
DATETIME_FORMAT = "Y-m-d H:i"
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(SITE_ROOT, 'static')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(SITE_ROOT, 'media')
LOGIN_URL = 'login'
LOGIN_REDIRECT_URL = 'home'
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/'
SOCIAL_AUTH_LOGIN_ERROR_URL = "login"
SOCIAL_AUTH_FORCE_EMAIL_VALIDATION = True # ??
AUTHENTICATION_BACKENDS = (
'rules.permissions.ObjectPermissionBackend',
'social_core.backends.github.GithubOAuth2',
'social_core.backends.twitter.TwitterOAuth',
'social_core.backends.facebook.FacebookOAuth2',
'social_core.backends.steam.SteamOpenId',
'social_core.backends.google.GoogleOAuth2',
# 'social_core.backends.email.EmailAuth',
'django.contrib.auth.backends.ModelBackend',
)
SOCIAL_AUTH_PIPELINE = (
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
'social_core.pipeline.social_auth.social_user',
'social_core.pipeline.user.get_username',
'social_core.pipeline.social_auth.associate_by_email',
'social_core.pipeline.user.create_user',
'social_core.pipeline.social_auth.associate_user',
'social_core.pipeline.social_auth.load_extra_data',
'social_core.pipeline.user.user_details',
)
SOCIAL_AUTH_PROTECTED_USER_FIELDS = (
'first_name',
'last_name'
)
from django.contrib.messages import constants as messages
MESSAGE_TAGS = {
messages.ERROR: 'danger'
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_SUBJECT_PREFIX = '[letsmeet.click] '
SITE_ID = 1
| {
"content_hash": "89671cebabde57b0e27dd610e6b2a2c6",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 91,
"avg_line_length": 28.508196721311474,
"alnum_prop": 0.6760590377611654,
"repo_name": "letsmeet-click/letsmeet.click",
"id": "469f19724d0a2527f5ea212fa490c7c18b6895bc",
"size": "5217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "letsmeet/letsmeet/settings/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "425"
},
{
"name": "Dockerfile",
"bytes": "766"
},
{
"name": "HTML",
"bytes": "67033"
},
{
"name": "JavaScript",
"bytes": "3716"
},
{
"name": "Makefile",
"bytes": "74"
},
{
"name": "Python",
"bytes": "98881"
},
{
"name": "Shell",
"bytes": "1027"
}
],
"symlink_target": ""
} |
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_IBM_static_data'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_IBM_static_data',error_checker=_errors._error_checker)
GL_ALL_STATIC_DATA_IBM=_C('GL_ALL_STATIC_DATA_IBM',103060)
GL_STATIC_VERTEX_ARRAY_IBM=_C('GL_STATIC_VERTEX_ARRAY_IBM',103061)
@_f
@_p.types(None,_cs.GLenum)
def glFlushStaticDataIBM(target):pass
| {
"content_hash": "9ce4baa8578ec32bad8820e25e3da0d2",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 112,
"avg_line_length": 38.111111111111114,
"alnum_prop": 0.7478134110787172,
"repo_name": "alexus37/AugmentedRealityChess",
"id": "d7162d34e2a614a4745fdd8621b7fb270ec6e3ec",
"size": "686",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/raw/GL/IBM/static_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "158062"
},
{
"name": "C++",
"bytes": "267993"
},
{
"name": "CMake",
"bytes": "11319"
},
{
"name": "Fortran",
"bytes": "3707"
},
{
"name": "Makefile",
"bytes": "14618"
},
{
"name": "Python",
"bytes": "12813086"
},
{
"name": "Roff",
"bytes": "3310"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
} |
import time
import array
import itertools
import math
import platform
import json
import collections
import argparse
import arrayfunc
##############################################################################
########################################################
def InitOptionData(arraycode, arraysize, funcname):
"""Initialise the data used only for some tests.
"""
odata = collections.namedtuple('optiondata', ['truediv_type', 'ldexp_y',
'compval', 'pycomp', 'startcycle', 'endcycle',
'invertmaxval', 'invertop', 'fidataout'])
optiondata = odata
# Ensure the data is in the right format for the array type.
if arraycode in ('f', 'd'):
optiondata.truediv_type = float
else:
optiondata.truediv_type = int
# Function ldexp needs a specific array type as the second parameter.
if funcname == 'ldexp':
ydata = [0,1,2,3,4,5]
optiondata.ldexp_y = int(ydata[-1])
else:
optiondata.ldexp_y = None
# This is used for some tests.
if arraycode in ('f', 'd'):
optiondata.compval = float(0)
else:
optiondata.compval = int(0)
# Used for compress.
if 'xor' == 'compress':
optiondata.compdata = array.array(arraycode, [1,0,1,0])
optiondata.pycomp = array.array(arraycode, (x for x,y in zip(itertools.cycle(optiondata.compdata), itertools.repeat(0, arraysize))))
else:
optiondata.compdata = None
optiondata.pycomp = None
# Used for cycle.
if 'xor' == 'cycle':
optiondata.startcycle = comptype(arraycode, 0)
optiondata.endcycle = comptype(arraycode, 127)
else:
optiondata.startcycle = None
optiondata.endcycle = None
# Used for invert.
if 'xor' == 'invert':
optiondata.invertmaxval = allinvertlimits[arraycode]
if arraycode in ('b', 'h', 'i', 'l', 'q'):
optiondata.invertop = invertpysigned
else:
optiondata.invertop = invertpyunsigned
else:
optiondata.invertmaxval = None
optiondata.invertop = None
# Used for findindices.
if 'fidataout' in ('dataout'):
optiondata.fidataout = array.array('q', itertools.repeat(0, arraysize))
else:
optiondata.fidataout = None
return optiondata
########################################################
def InitDataArrays(arraycode, arraysize):
"""Initialise the data arrays used to run the tests.
"""
adata = collections.namedtuple('arraydata', ['datax', 'dataout',
'yvalue', 'zvalue', 'arraylength'])
arraydata = adata
# Ensure the data is in the right format for the array type.
if arraycode in ('f', 'd'):
xdata = [float(x) for x in [100,101,102,103,104,105,106,107,108,109]]
else:
xdata = [int(x) for x in [100,101,102,103,104,105,106,107,108,109]]
arraydata.datax = array.array(arraycode, (x for x,y in zip(itertools.cycle(xdata), itertools.repeat(0, arraysize))))
assert len(arraydata.datax) == arraysize, 'datax is not expected length %d' % len(arraydata.datax)
arraydata.arraylength = len(arraydata.datax)
# Y data.
ydata = [0,1,2,3,4,5]
if len(ydata) > 0:
yvalue = abs(ydata[-1])
if arraycode in ('f', 'd'):
arraydata.yvalue = float(yvalue)
else:
arraydata.yvalue = int(yvalue)
else:
arraydata.yvalue = None
# Z data.
zdata = []
if len(zdata) > 0:
zvalue = abs(zdata[-1])
if arraycode in ('f', 'd'):
arraydata.zvalue = float(zvalue)
else:
arraydata.zvalue = int(zvalue)
else:
arraydata.zvalue = None
# Output array.
if 'dataout' in ('dataout'):
arraydata.dataout = array.array(arraycode, itertools.repeat(0, arraydata.arraylength))
assert len(arraydata.dataout) == arraysize, 'dataout is not expected length %d' % len(arraydata.dataout)
else:
arraydata.dataout = None
return arraydata
########################################################
def calibrateruntime(arraycode, arraysize, arraydata, optiondata, runtimetarget):
"""Calibrate the run time for Python and default ArrayFunc.
"""
pyitercounts = 1
afitercounts = 50
# First, do a timing calibration run.
# Python native time.
pytime = BenchmarkPython(pyitercounts, arraycode, arraysize, arraydata, optiondata)
# Arrayfunc time.
aftime = BenchmarkAF(afitercounts, arraycode, arraydata, optiondata)
# Now calculate the average execution time and adjust the iterations
# so that the tests will take approximately 0.1 seconds.
# The time returned by the benchmark function is per iteration, so
# we don't need to adjust for this again.
pyitercounts = int(runtimetarget / pytime)
afitercounts = int(runtimetarget / aftime)
# Make sure the iteration count is at least 1.
if pyitercounts < 1:
pyitercounts = 1
if afitercounts < 1:
afitercounts = 1
return pyitercounts, afitercounts
########################################################
def calibratesimdruntime(arraycode, arraydata, optiondata, runtimetarget):
"""Calibrate the run time with SIMD disabled.
"""
afitersidmcounts = 50
# Arrayfunc time without SIMD for functions with SIMD.
aftimenosimd = BenchmarkAFErrTrueSimdFalse(afitersidmcounts, arraycode, arraydata, optiondata)
afitersidmcounts = int(runtimetarget / aftimenosimd)
if afitersidmcounts < 1:
afitersidmcounts = 1
return afitersidmcounts
########################################################
def BenchmarkPython(pyitercounts, arraycode, arraysize, arraydata, optiondata):
"""Measure execution time of native Python code.
"""
# This is used for some tests only.
result = True
# We provide a local reference to the arrays to make the representation simpler.
datax = arraydata.datax
dataout = arraydata.dataout
yvalue = arraydata.yvalue
zvalue = arraydata.zvalue
arraylength = arraydata.arraylength
# Used for ldexp only.
ldexp_y = optiondata.ldexp_y
compval = optiondata.compval
truediv_type = optiondata.truediv_type
fidataout = optiondata.fidataout
startcycle = optiondata.startcycle
endcycle = optiondata.endcycle
pycomp = optiondata.pycomp
compdata = optiondata.compdata
invertmaxval = optiondata.invertmaxval
invertop = optiondata.invertop
# Time for python.
starttime = time.perf_counter()
if True:
for x in range(pyitercounts):
for i in range(arraylength):
dataout[i] = datax[i] ^ yvalue
else:
for x in range(pyitercounts):
dataout[i] = datax[i] ^ yvalue
endtime = time.perf_counter()
pythontime = (endtime - starttime) / pyitercounts
return pythontime
########################################################
def BenchmarkAF(afitercounts, arraycode, arraydata, optiondata):
"""Measure execution time for arrayfunc with defaults.
"""
# This is used for some tests only.
result = True
# We provide a local reference to the arrays to make the representation simpler.
datax = arraydata.datax
dataout = arraydata.dataout
yvalue = arraydata.yvalue
zvalue = arraydata.zvalue
# Used for ldexp only.
ldexp_y = optiondata.ldexp_y
compval = optiondata.compval
fidataout = optiondata.fidataout
startcycle = optiondata.startcycle
endcycle = optiondata.endcycle
pycomp = optiondata.pycomp
compdata = optiondata.compdata
# Time for arrayfunc version.
starttime = time.perf_counter()
for i in range(afitercounts):
arrayfunc.xor(datax, yvalue, dataout)
endtime = time.perf_counter()
aftime = (endtime - starttime) / afitercounts
return aftime
########################################################
def BenchmarkAFErrTrueSimdTrue(afitercounts, arraycode, arraydata, optiondata):
"""Measure execution time for arrayfunc with MathErrors ignored and SIMD turned off.
"""
# This is used for some tests only.
result = True
# We provide a local reference to the arrays to make the representation simpler.
datax = arraydata.datax
dataout = arraydata.dataout
yvalue = arraydata.yvalue
zvalue = arraydata.zvalue
# Used for ldexp only.
ldexp_y = optiondata.ldexp_y
compval = optiondata.compval
fidataout = optiondata.fidataout
startcycle = optiondata.startcycle
endcycle = optiondata.endcycle
pycomp = optiondata.pycomp
compdata = optiondata.compdata
# Time for arrayfunc version.
starttime = time.perf_counter()
for i in range(afitercounts):
arrayfunc.xor(datax, yvalue, dataout, nosimd=True)
endtime = time.perf_counter()
aftime = (endtime - starttime) / afitercounts
return aftime
########################################################
def BenchmarkAFErrFalseSimdTrue(afitercounts, arraycode, arraydata, optiondata):
"""Measure execution time for arrayfunc with SIMD turned off.
"""
# This is used for some tests only.
result = True
# We provide a local reference to the arrays to make the representation simpler.
datax = arraydata.datax
dataout = arraydata.dataout
yvalue = arraydata.yvalue
zvalue = arraydata.zvalue
# Used for ldexp only.
ldexp_y = optiondata.ldexp_y
compval = optiondata.compval
fidataout = optiondata.fidataout
startcycle = optiondata.startcycle
endcycle = optiondata.endcycle
pycomp = optiondata.pycomp
compdata = optiondata.compdata
# Time for arrayfunc version.
starttime = time.perf_counter()
for i in range(afitercounts):
arrayfunc.xor(datax, yvalue, dataout, nosimd=True)
endtime = time.perf_counter()
aftime = (endtime - starttime) / afitercounts
return aftime
########################################################
def BenchmarkAFErrTrueSimdFalse(afitercounts, arraycode, arraydata, optiondata):
"""Measure execution time for arrayfunc with matherrors=True.
"""
# This is used for some tests only.
result = True
# We provide a local reference to the arrays to make the representation simpler.
datax = arraydata.datax
dataout = arraydata.dataout
yvalue = arraydata.yvalue
zvalue = arraydata.zvalue
# Used for ldexp only.
ldexp_y = optiondata.ldexp_y
compval = optiondata.compval
fidataout = optiondata.fidataout
startcycle = optiondata.startcycle
endcycle = optiondata.endcycle
pycomp = optiondata.pycomp
compdata = optiondata.compdata
# Time for arrayfunc version.
starttime = time.perf_counter()
for i in range(afitercounts):
arrayfunc.xor(datax, yvalue, dataout)
endtime = time.perf_counter()
aftime = (endtime - starttime) / afitercounts
return aftime
##############################################################################
def GetCmdArguments():
""" Get any command line arguments. These modify the operation of the program.
rawoutput = If specified, will output raw data instead of a report.
mintest = If specified, will do a minimal test.
arraysize = Size of the array in elements.
runtimetarget = The target length of time in seconds to run a benchmark for.
"""
arraysize = 100000
runtimetarget = 0.1
# Get any command line arguments.
parser = argparse.ArgumentParser()
# Output just the raw data.
parser.add_argument('--rawoutput', action = 'store_true', help = 'Output raw data.')
# Do a minimal test. This will save time when full results are not required.
parser.add_argument('--mintest', action = 'store_true', help = 'Do minimal test.')
# Size of the test arrays.
parser.add_argument('--arraysize', type = int, default = arraysize,
help='Size of test arrays in number of elements.')
# The length of time to run each benchmark.
parser.add_argument('--runtimetarget', type = float, default = runtimetarget,
help='Target length of time to run each benchmark for.')
args = parser.parse_args()
return args
##############################################################################
CmdArgs = GetCmdArguments()
ArraySize = CmdArgs.arraysize
RunTimeTarget = CmdArgs.runtimetarget
##############################################################################
# Run the benchmarks.
funcname = 'xor'
supportedarrays = ('b', 'B', 'h', 'H', 'i', 'I', 'l', 'L', 'q', 'Q')
# True if platform supports SIMD.
PlatformHasSIMD = arrayfunc.simdsupport.hassimd
# Detect the hardware platform, and assign the correct platform data table to it.
def platformdetect():
""" Return a string containing the array codes if the machine supports
SIMD for this function. The results will vary depending upon which platform
it is running on.
"""
# These are the supported options for SIMD. The values depend on
# the particular function in question.
# i686 = 32 bit x86, this never has SIMD.
# x86_64 = 64 bit x86, supported on Linux with GCC only.
# armv7l = 32 bit ARM, for Raspberry Pi 3 with 32 bit Linux.
# aarch64 = 64 bit ARM, for Raspberry Pi 3 or 4 with 64 bit Linux.
# These values were derived from the platform data reported by the benchmark.
signatures = {
'i686' : '',
'x86_64' : 'bBhHiI',
'armv7l' : 'bBhHiI',
'aarch64' : 'bBhHiI',
}
return signatures.get(platform.machine(), '')
if PlatformHasSIMD:
SIMDArrays = platformdetect()
else:
SIMDArrays = ''
# Uses SIMD on at least one array type.
HasSIMDOption = len(SIMDArrays) > 0
##############################################################################
# True if this benchmark allows math error detection to be turned off.
# We check a copy of the equation from the template in order to check this.
# Note: Need double quotes around the equation because some functions contain
# a string with single quotes, and this would cause a conflict if we used single
# quotes to enclose this.
HasMathErrorOption = 'matherrors' in "arrayfunc.xor(datax, yvalue, dataout, nosimd=True)"
##############################################################################
# Used to collect the results.
PyData = {}
AfData = {}
AfDataErrTrueSimdTrue = {}
AfDataErrFalseSimdTrue = {}
AfDataErrTrueSimdFalse = {}
# Test using each array type.
for arraycode in supportedarrays:
# This array type supports SIMD. Some functions do not support SIMD at all,
# while others support it only for some array types on some platforms.
ArrayHasSIMD = arraycode in SIMDArrays
# Initialise the data arrays.
ArrayData = InitDataArrays(arraycode, ArraySize)
# Initialise the optional data elements that are only used for some tests.
OptionData = InitOptionData(arraycode, ArraySize, funcname)
# Calibrate the test runtime targets.
pyitercounts, afitercounts = calibrateruntime(arraycode, ArraySize, ArrayData, OptionData, RunTimeTarget)
if ArrayHasSIMD:
afitersidmcounts = calibratesimdruntime(arraycode, ArrayData, OptionData, RunTimeTarget)
# Benchmark the Python implementation.
PyData[arraycode] = BenchmarkPython(pyitercounts, arraycode, ArraySize, ArrayData, OptionData)
# Benchmark the Arrayfunc implementation with default parameters.
# This covers user requested minimal tests, plus functions which do not
# have either error checking or SIMD.
AfData[arraycode] = BenchmarkAF(afitercounts, arraycode, ArrayData, OptionData)
# A minimal test only involves the default parameters.
if not CmdArgs.mintest:
# Function has error checking but not SIMD. Test error checking turned off.
# The default case covers with error checking turned on.
if HasMathErrorOption and not ArrayHasSIMD:
AfDataErrTrueSimdTrue[arraycode] = BenchmarkAFErrTrueSimdTrue(afitercounts, arraycode, ArrayData, OptionData)
# Function does not have error checking but does have SIMD.
# Test SIMD turned off. The default case covers with SIMD turned on.
if (not HasMathErrorOption) and ArrayHasSIMD:
AfDataErrTrueSimdTrue[arraycode] = BenchmarkAFErrTrueSimdTrue(afitercounts, arraycode, ArrayData, OptionData)
# Function has both error checking and SIMD. Check for:
# error checking on and SIMD off,
# error checking off and SIMD off,
# error checking off and SIMD on
if HasMathErrorOption and ArrayHasSIMD:
AfDataErrFalseSimdTrue[arraycode] = BenchmarkAFErrFalseSimdTrue(afitercounts, arraycode, ArrayData, OptionData)
AfDataErrTrueSimdTrue[arraycode] = BenchmarkAFErrTrueSimdTrue(afitercounts, arraycode, ArrayData, OptionData)
AfDataErrTrueSimdFalse[arraycode] = BenchmarkAFErrTrueSimdFalse(afitersidmcounts, arraycode, ArrayData, OptionData)
##############################################################################
##############################################################################
# Report the benchmarks.
# The format string used to print out results in stand alone mode.
def sformatter(pos, val):
if val is None:
return 17 * ' '
elif (val is not None) and (1.0 <= val < 10.0):
return '{%d:>8.1f} ' % (pos + 1)
elif (val is not None) and (val < 1.0):
return '{%d:>8.2f} ' % (pos + 1)
else:
return '{%d:>8.0f} ' % (pos + 1)
def printline(label1, col2, col3, col4, col5):
lineresult = [col2, col3, col4, col5]
standformat = '{0:^7}' + ''.join([sformatter(x,y) for x,y in enumerate(lineresult)])
print(standformat.format(label1, col2, col3, col4, col5))
# Report labels will vary depending on the options available with this function.
if HasMathErrorOption and HasSIMDOption:
theaderlabels = 'Err on SIMD off Err off SIMD off Err off SIMD on'
elif HasMathErrorOption and (not HasSIMDOption):
theaderlabels = ' Error check off'
elif (not HasMathErrorOption) and HasSIMDOption:
theaderlabels = ' SIMD off'
else:
theaderlabels = ''
theader = """
Function = {0}
======= ================ ================ ================ ================
Array AF vs Python {1}
======= ================ ================ ================ ================""".format(funcname, theaderlabels)
tfooter = '======= ================ ================ ================ ================'
def calcstats(statscolumn):
"""Calculate the states for a column of data.
Return the average, max, and min.
If the data column is empty, return None for each value.
"""
if len(statscolumn) > 0:
return sum(statscolumn) / len(statscolumn), max(statscolumn), min(statscolumn)
else:
return None, None, None
########################################################
def outputstandalone():
"""Output the results for when the benchmark is run in standalone mode.
This outputs whatever data is present, and so inherently adapts
itself to functions which have varying test options.
"""
totalpyrel = []
totalmathnosimdrel = []
totalsimdvsnosimd = []
totalnoerrwithsimd = []
print(theader)
for x in supportedarrays:
# Default versus native Python.
pyafrel = PyData[x] / AfData[x]
totalpyrel.append(pyafrel)
# Default versus math error checking on and no SIMD.
# If the function doesn't use SIMD then comparing it with SIMD off
# is pointless. Also skip for array types which don't use SIMD or
# for minimal tests.
if x in AfDataErrFalseSimdTrue:
mathnosimdrel = AfData[x] / AfDataErrFalseSimdTrue[x]
totalmathnosimdrel.append(mathnosimdrel)
else:
mathnosimdrel = None
# Default versus no error checking and no SIMD.
# If the function doesn't use math error checking then comparing it
# with math error off is pointless. Also skip for minimal tests.
if x in AfDataErrTrueSimdTrue:
simdnoerrnosimdrel = AfData[x] / AfDataErrTrueSimdTrue[x]
totalsimdvsnosimd.append(simdnoerrnosimdrel)
else:
simdnoerrnosimdrel = None
# No data exists if SIMD is not available.
if x in AfDataErrTrueSimdFalse:
# Default versus error checking turned off but SIMD enabled.
noerrwithsimd = AfData[x] / AfDataErrTrueSimdFalse[x]
totalnoerrwithsimd.append(noerrwithsimd)
else:
noerrwithsimd = None
printline(x, pyafrel, mathnosimdrel, simdnoerrnosimdrel, noerrwithsimd)
print(tfooter)
print()
print(tfooter)
# Calculate stats.
# Default versus native Python.
col2avg, col2max, col2min = calcstats(totalpyrel)
# Default versus math error checking on and no SIMD.
col3avg, col3max, col3min = calcstats(totalmathnosimdrel)
# Default versus no error checking and no SIMD.
col4avg, col4max, col4min = calcstats(totalsimdvsnosimd)
# Default versus error checking turned off but SIMD enabled.
col5avg, col5max, col5min = calcstats(totalnoerrwithsimd)
printline('avg', col2avg, col3avg, col4avg, col5avg)
printline('max', col2max, col3max, col4max, col5max)
printline('min', col2min, col3min, col4min, col5min)
print(tfooter)
########################################################
# If raw data is requested, output the raw numbers as JSON.
# This will normally be used by a parent process which called this
# benchmark as a child process.
if CmdArgs.rawoutput:
# Called by another process, return data as json.
testresults = {'pydata' : PyData,
'afdata' : AfData,
'afdataerrtruesimdtrue' : AfDataErrTrueSimdTrue,
'afdataerrtruesimdfalse' : AfDataErrTrueSimdFalse,
'afdataerrfalsesimdtrue' : AfDataErrFalseSimdTrue,
'benchname' : 'arrayfunc',
}
print(json.dumps(testresults))
else:
# If standalone, print out data in readable format.
outputstandalone()
##############################################################################
| {
"content_hash": "0058319aa842b1cc4c45b5daa4ba20c4",
"timestamp": "",
"source": "github",
"line_count": 686,
"max_line_length": 134,
"avg_line_length": 30.113702623906704,
"alnum_prop": 0.6804143673153258,
"repo_name": "m1griffin/arrayfunc",
"id": "f4f91dd688834ad43787412df8b32bb0093b9ef0",
"size": "21822",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "benchmarks/benchmark_xor.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3084"
},
{
"name": "C",
"bytes": "6063930"
},
{
"name": "Python",
"bytes": "42119174"
},
{
"name": "Shell",
"bytes": "4004"
}
],
"symlink_target": ""
} |
"""
Driver for Linux servers running LVM.
"""
import math
import os
import socket
from oslo.config import cfg
from cinder.brick import exception as brick_exception
from cinder.brick.local_dev import lvm as lvm
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import fileutils
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder.openstack.common import units
from cinder import utils
from cinder.volume import driver
from cinder.volume import utils as volutils
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.StrOpt('volume_group',
default='cinder-volumes',
help='Name for the VG that will contain exported volumes'),
cfg.IntOpt('lvm_mirrors',
default=0,
help='If >0, create LVs with multiple mirrors. Note that '
'this requires lvm_mirrors + 2 PVs with available space'),
cfg.StrOpt('lvm_type',
default='default',
help='Type of LVM volumes to deploy; (default or thin)'),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
class LVMVolumeDriver(driver.VolumeDriver):
"""Executes commands relating to Volumes."""
VERSION = '2.0.0'
def __init__(self, vg_obj=None, *args, **kwargs):
super(LVMVolumeDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(volume_opts)
self.hostname = socket.gethostname()
self.vg = vg_obj
self.backend_name =\
self.configuration.safe_get('volume_backend_name') or 'LVM'
self.protocol = 'local'
def set_execute(self, execute):
self._execute = execute
def check_for_setup_error(self):
"""Verify that requirements are in place to use LVM driver."""
if self.vg is None:
root_helper = utils.get_root_helper()
try:
self.vg = lvm.LVM(self.configuration.volume_group,
root_helper,
lvm_type=self.configuration.lvm_type,
executor=self._execute)
except brick_exception.VolumeGroupNotFound:
message = ("Volume Group %s does not exist" %
self.configuration.volume_group)
raise exception.VolumeBackendAPIException(data=message)
vg_list = volutils.get_all_volume_groups(
self.configuration.volume_group)
vg_dict = \
(vg for vg in vg_list if vg['name'] == self.vg.vg_name).next()
if vg_dict is None:
message = ("Volume Group %s does not exist" %
self.configuration.volume_group)
raise exception.VolumeBackendAPIException(data=message)
if self.configuration.lvm_type == 'thin':
# Specific checks for using Thin provisioned LV's
if not volutils.supports_thin_provisioning():
message = ("Thin provisioning not supported "
"on this version of LVM.")
raise exception.VolumeBackendAPIException(data=message)
pool_name = "%s-pool" % self.configuration.volume_group
if self.vg.get_volume(pool_name) is None:
try:
self.vg.create_thin_pool(pool_name)
except processutils.ProcessExecutionError as exc:
exception_message = ("Failed to create thin pool, "
"error message was: %s"
% exc.stderr)
raise exception.VolumeBackendAPIException(
data=exception_message)
def _sizestr(self, size_in_g):
if int(size_in_g) == 0:
return '100m'
return '%sg' % size_in_g
def _volume_not_present(self, volume_name):
return self.vg.get_volume(volume_name) is None
def _delete_volume(self, volume, is_snapshot=False):
"""Deletes a logical volume."""
if self.configuration.volume_clear != 'none' and \
self.configuration.lvm_type != 'thin':
self._clear_volume(volume, is_snapshot)
name = volume['name']
if is_snapshot:
name = self._escape_snapshot(volume['name'])
self.vg.delete(name)
def _clear_volume(self, volume, is_snapshot=False):
# zero out old volumes to prevent data leaking between users
# TODO(ja): reclaiming space should be done lazy and low priority
if is_snapshot:
# if the volume to be cleared is a snapshot of another volume
# we need to clear out the volume using the -cow instead of the
# directly volume path. We need to skip this if we are using
# thin provisioned LVs.
# bug# lp1191812
dev_path = self.local_path(volume) + "-cow"
else:
dev_path = self.local_path(volume)
# TODO(jdg): Maybe we could optimize this for snaps by looking at
# the cow table and only overwriting what's necessary?
# for now we're still skipping on snaps due to hang issue
if not os.path.exists(dev_path):
msg = (_('Volume device file path %s does not exist.')
% dev_path)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
size_in_g = volume.get('size', volume.get('volume_size', None))
if size_in_g is None:
msg = (_("Size for volume: %s not found, "
"cannot secure delete.") % volume['id'])
LOG.error(msg)
raise exception.InvalidParameterValue(msg)
# clear_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
vol_sz_in_meg = size_in_g * units.Ki
volutils.clear_volume(
vol_sz_in_meg, dev_path,
volume_clear=self.configuration.volume_clear,
volume_clear_size=self.configuration.volume_clear_size)
def _escape_snapshot(self, snapshot_name):
# Linux LVM reserves name that starts with snapshot, so that
# such volume name can't be created. Mangle it.
if not snapshot_name.startswith('snapshot'):
return snapshot_name
return '_' + snapshot_name
def _create_volume(self, name, size, lvm_type, mirror_count, vg=None):
vg_ref = self.vg
if vg is not None:
vg_ref = vg
vg_ref.create_volume(name, size, lvm_type, mirror_count)
def create_volume(self, volume):
"""Creates a logical volume."""
mirror_count = 0
if self.configuration.lvm_mirrors:
mirror_count = self.configuration.lvm_mirrors
self._create_volume(volume['name'],
self._sizestr(volume['size']),
self.configuration.lvm_type,
mirror_count)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
self._create_volume(volume['name'],
self._sizestr(volume['size']),
self.configuration.lvm_type,
self.configuration.lvm_mirrors)
# Some configurations of LVM do not automatically activate
# ThinLVM snapshot LVs.
self.vg.activate_lv(snapshot['name'], is_snapshot=True)
# copy_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
volutils.copy_volume(self.local_path(snapshot),
self.local_path(volume),
snapshot['volume_size'] * units.Ki,
self.configuration.volume_dd_blocksize,
execute=self._execute)
def delete_volume(self, volume):
"""Deletes a logical volume."""
# NOTE(jdg): We don't need to explicitly call
# remove export here because we already did it
# in the manager before we got here.
if self._volume_not_present(volume['name']):
# If the volume isn't present, then don't attempt to delete
return True
if self.vg.lv_has_snapshot(volume['name']):
LOG.error(_('Unabled to delete due to existing snapshot '
'for volume: %s') % volume['name'])
raise exception.VolumeIsBusy(volume_name=volume['name'])
self._delete_volume(volume)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
self.vg.create_lv_snapshot(self._escape_snapshot(snapshot['name']),
snapshot['volume_name'],
self.configuration.lvm_type)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
if self._volume_not_present(self._escape_snapshot(snapshot['name'])):
# If the snapshot isn't present, then don't attempt to delete
LOG.warning(_("snapshot: %s not found, "
"skipping delete operations") % snapshot['name'])
return True
# TODO(yamahata): zeroing out the whole snapshot triggers COW.
# it's quite slow.
self._delete_volume(snapshot, is_snapshot=True)
def local_path(self, volume, vg=None):
if vg is None:
vg = self.configuration.volume_group
# NOTE(vish): stops deprecation warning
escaped_group = vg.replace('-', '--')
escaped_name = self._escape_snapshot(volume['name']).replace('-', '--')
return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
image_utils.fetch_to_raw(context,
image_service,
image_id,
self.local_path(volume),
self.configuration.volume_dd_blocksize,
size=volume['size'])
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
image_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume))
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
mirror_count = 0
if self.configuration.lvm_mirrors:
mirror_count = self.configuration.lvm_mirrors
LOG.info(_('Creating clone of volume: %s') % src_vref['id'])
volume_name = src_vref['name']
temp_id = 'tmp-snap-%s' % volume['id']
temp_snapshot = {'volume_name': volume_name,
'size': src_vref['size'],
'volume_size': src_vref['size'],
'name': 'clone-snap-%s' % volume['id'],
'id': temp_id}
self.create_snapshot(temp_snapshot)
# copy_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
try:
self._create_volume(volume['name'],
self._sizestr(volume['size']),
self.configuration.lvm_type,
mirror_count)
self.vg.activate_lv(temp_snapshot['name'], is_snapshot=True)
volutils.copy_volume(
self.local_path(temp_snapshot),
self.local_path(volume),
src_vref['size'] * units.Ki,
self.configuration.volume_dd_blocksize,
execute=self._execute)
finally:
self.delete_snapshot(temp_snapshot)
def clone_image(self, volume, image_location, image_id, image_meta):
return None, False
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup['volume_id'])
volume_path = self.local_path(volume)
with utils.temporary_chown(volume_path):
with fileutils.file_open(volume_path) as volume_file:
backup_service.backup(backup, volume_file)
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
volume_path = self.local_path(volume)
with utils.temporary_chown(volume_path):
with fileutils.file_open(volume_path, 'wb') as volume_file:
backup_service.restore(backup, volume['id'], volume_file)
def get_volume_stats(self, refresh=False):
"""Get volume status.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats")
if self.vg is None:
LOG.warning(_('Unable to update stats on non-initialized '
'Volume Group: %s'), self.configuration.volume_group)
return
self.vg.update_volume_group_info()
data = {}
# Note(zhiteng): These information are driver/backend specific,
# each driver may define these values in its own config options
# or fetch from driver specific configuration file.
data["volume_backend_name"] = self.backend_name
data["vendor_name"] = 'Open Source'
data["driver_version"] = self.VERSION
data["storage_protocol"] = self.protocol
if self.configuration.lvm_mirrors > 0:
data['total_capacity_gb'] =\
self.vg.vg_mirror_size(self.configuration.lvm_mirrors)
data['free_capacity_gb'] =\
self.vg.vg_mirror_free_space(self.configuration.lvm_mirrors)
elif self.configuration.lvm_type == 'thin':
data['total_capacity_gb'] = self.vg.vg_thin_pool_size
data['free_capacity_gb'] = self.vg.vg_thin_pool_free_space
else:
data['total_capacity_gb'] = self.vg.vg_size
data['free_capacity_gb'] = self.vg.vg_free_space
data['reserved_percentage'] = self.configuration.reserved_percentage
data['QoS_support'] = False
data['location_info'] =\
('LVMVolumeDriver:%(hostname)s:%(vg)s'
':%(lvm_type)s:%(lvm_mirrors)s' %
{'hostname': self.hostname,
'vg': self.configuration.volume_group,
'lvm_type': self.configuration.lvm_type,
'lvm_mirrors': self.configuration.lvm_mirrors})
self._stats = data
def extend_volume(self, volume, new_size):
"""Extend an existing volume's size."""
self.vg.extend_volume(volume['name'],
self._sizestr(new_size))
def manage_existing(self, volume, existing_ref):
"""Manages an existing LV.
Renames the LV to match the expected name for the volume.
Error checking done by manage_existing_get_size is not repeated.
"""
lv_name = existing_ref['source-name']
self.vg.get_volume(lv_name)
# Attempt to rename the LV to match the OpenStack internal name.
try:
self.vg.rename_volume(lv_name, volume['name'])
except processutils.ProcessExecutionError as exc:
exception_message = (_("Failed to rename logical volume %(name)s, "
"error message was: %(err_msg)s")
% {'name': lv_name,
'err_msg': exc.stderr})
raise exception.VolumeBackendAPIException(
data=exception_message)
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of an existing LV for manage_existing.
existing_ref is a dictionary of the form:
{'source-name': <name of LV>}
"""
# Check that the reference is valid
if 'source-name' not in existing_ref:
reason = _('Reference must contain source-name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
lv_name = existing_ref['source-name']
lv = self.vg.get_volume(lv_name)
# Raise an exception if we didn't find a suitable LV.
if not lv:
kwargs = {'existing_ref': lv_name,
'reason': 'Specified logical volume does not exist.'}
raise exception.ManageExistingInvalidReference(**kwargs)
# LV size is returned in gigabytes. Attempt to parse size as a float
# and round up to the next integer.
try:
lv_size = int(math.ceil(float(lv['size'])))
except ValueError:
exception_message = (_("Failed to manage existing volume "
"%(name)s, because reported size %(size)s "
"was not a floating-point number.")
% {'name': lv_name,
'size': lv['size']})
raise exception.VolumeBackendAPIException(
data=exception_message)
return lv_size
class LVMISCSIDriver(LVMVolumeDriver, driver.ISCSIDriver):
"""Executes commands relating to ISCSI volumes.
We make use of model provider properties as follows:
``provider_location``
if present, contains the iSCSI target information in the same
format as an ietadm discovery
i.e. '<ip>:<port>,<portal> <target IQN>'
``provider_auth``
if present, contains a space-separated triple:
'<auth method> <auth username> <auth password>'.
`CHAP` is the only auth_method in use at the moment.
"""
def __init__(self, *args, **kwargs):
self.db = kwargs.get('db')
self.target_helper = self.get_target_helper(self.db)
super(LVMISCSIDriver, self).__init__(*args, **kwargs)
self.backend_name =\
self.configuration.safe_get('volume_backend_name') or 'LVM_iSCSI'
self.protocol = 'iSCSI'
def set_execute(self, execute):
super(LVMISCSIDriver, self).set_execute(execute)
if self.target_helper is not None:
self.target_helper.set_execute(execute)
def _create_target(self, iscsi_name, iscsi_target,
volume_path, chap_auth, lun=0,
check_exit_code=False, old_name=None):
# NOTE(jdg): tgt driver has an issue where with a lot of activity
# (or sometimes just randomly) it will get *confused* and attempt
# to reuse a target ID, resulting in a target already exists error
# Typically a simple retry will address this
# For now we have this while loop, might be useful in the
# future to throw a retry decorator in common or utils
attempts = 2
while attempts > 0:
attempts -= 1
try:
# NOTE(jdg): For TgtAdm case iscsi_name is all we need
# should clean this all up at some point in the future
tid = self.target_helper.create_iscsi_target(
iscsi_name,
iscsi_target,
0,
volume_path,
chap_auth,
check_exit_code=check_exit_code,
old_name=old_name)
break
except brick_exception.ISCSITargetCreateFailed:
if attempts == 0:
raise
else:
LOG.warning(_('Error creating iSCSI target, retrying '
'creation for target: %s') % iscsi_name)
return tid
def ensure_export(self, context, volume):
volume_name = volume['name']
iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix,
volume_name)
volume_path = "/dev/%s/%s" % (self.configuration.volume_group,
volume_name)
# NOTE(jdg): For TgtAdm case iscsi_name is the ONLY param we need
# should clean this all up at some point in the future
model_update = self.target_helper.ensure_export(
context, volume,
iscsi_name,
volume_path,
self.configuration.volume_group,
self.configuration)
if model_update:
self.db.volume_update(context, volume['id'], model_update)
def create_export(self, context, volume):
return self._create_export(context, volume)
def _create_export(self, context, volume, vg=None):
"""Creates an export for a logical volume."""
if vg is None:
vg = self.configuration.volume_group
volume_path = "/dev/%s/%s" % (vg, volume['name'])
data = self.target_helper.create_export(context,
volume,
volume_path,
self.configuration)
return {
'provider_location': data['location'],
'provider_auth': data['auth'],
}
def remove_export(self, context, volume):
self.target_helper.remove_export(context, volume)
def migrate_volume(self, ctxt, volume, host, thin=False, mirror_count=0):
"""Optimize the migration if the destination is on the same server.
If the specified host is another back-end on the same server, and
the volume is not attached, we can do the migration locally without
going through iSCSI.
"""
false_ret = (False, None)
if volume['status'] != 'available':
return false_ret
if 'location_info' not in host['capabilities']:
return false_ret
info = host['capabilities']['location_info']
try:
(dest_type, dest_hostname, dest_vg, lvm_type, lvm_mirrors) =\
info.split(':')
lvm_mirrors = int(lvm_mirrors)
except ValueError:
return false_ret
if (dest_type != 'LVMVolumeDriver' or dest_hostname != self.hostname):
return false_ret
if dest_vg != self.vg.vg_name:
vg_list = volutils.get_all_volume_groups()
try:
(vg for vg in vg_list if vg['name'] == dest_vg).next()
except StopIteration:
message = (_("Destination Volume Group %s does not exist") %
dest_vg)
LOG.error(message)
return false_ret
helper = utils.get_root_helper()
dest_vg_ref = lvm.LVM(dest_vg, helper,
lvm_type=lvm_type,
executor=self._execute)
self.remove_export(ctxt, volume)
self._create_volume(volume['name'],
self._sizestr(volume['size']),
lvm_type,
lvm_mirrors,
dest_vg_ref)
volutils.copy_volume(self.local_path(volume),
self.local_path(volume, vg=dest_vg),
volume['size'],
self.configuration.volume_dd_blocksize,
execute=self._execute)
self._delete_volume(volume)
model_update = self._create_export(ctxt, volume, vg=dest_vg)
return (True, model_update)
def _iscsi_location(self, ip, target, iqn, lun=None):
return "%s:%s,%s %s %s" % (ip, self.configuration.iscsi_port,
target, iqn, lun)
def _iscsi_authentication(self, chap, name, password):
return "%s %s %s" % (chap, name, password)
class LVMISERDriver(LVMISCSIDriver, driver.ISERDriver):
"""Executes commands relating to ISER volumes.
We make use of model provider properties as follows:
``provider_location``
if present, contains the iSER target information in the same
format as an ietadm discovery
i.e. '<ip>:<port>,<portal> <target IQN>'
``provider_auth``
if present, contains a space-separated triple:
'<auth method> <auth username> <auth password>'.
`CHAP` is the only auth_method in use at the moment.
"""
def __init__(self, *args, **kwargs):
self.target_helper = self.get_target_helper(kwargs.get('db'))
LVMVolumeDriver.__init__(self, *args, **kwargs)
self.backend_name =\
self.configuration.safe_get('volume_backend_name') or 'LVM_iSER'
self.protocol = 'iSER'
| {
"content_hash": "ccc9f58aeb4f7731f009ed082f377327",
"timestamp": "",
"source": "github",
"line_count": 619,
"max_line_length": 79,
"avg_line_length": 40.701130856219706,
"alnum_prop": 0.5619591966341192,
"repo_name": "github-borat/cinder",
"id": "f984a506c4786b0fc0b5afd95292facca00bd15d",
"size": "25925",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/lvm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6575951"
},
{
"name": "Shell",
"bytes": "8998"
}
],
"symlink_target": ""
} |
def break_words(stuff):
"""This function breaks stuff into words"""
words = stuff.split(' ')
return words
def sort_words(words):
"""Sorts list of words alphabetically"""
return sorted(words)
def print_first_word(words):
"""removed and prints first word in a list"""
first = words.pop(0)
print first
def print_last_word(words):
"""removes and prints last word in a list"""
last = words.pop(-1)
print last
def sort_sentence(sentence):
"""takes a string sentence and returns sorted list of words"""
words = break_words(sentence)
return sort_words(words)
def print_first_and_last(sentence):
"""prints first and last word from string sentence"""
words = break_words(sentence)
print_first_word(words)
print_last_word(words)
# dreload(ex25) in ipython is very useful
| {
"content_hash": "2e03f2b3aab4bf2f14545fecf66a0241",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 63,
"avg_line_length": 24.53125,
"alnum_prop": 0.7197452229299363,
"repo_name": "cohadar/learn-python-the-hard-way",
"id": "15891d32ef19ef111324c88f8b73201ae742efde",
"size": "845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ex25.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30545"
}
],
"symlink_target": ""
} |
"""Unit tests for classroom_domain.py"""
from __future__ import annotations
from core.domain import classroom_domain
from core.tests import test_utils
class ClassroomDomainTests(test_utils.GenericTestBase):
def test_that_domain_object_is_created_correctly(self) -> None:
classroom_data = classroom_domain.Classroom(
'exp', 'exp/', [], 'general details', 'general intro')
self.assertEqual(classroom_data.name, 'exp')
self.assertEqual(classroom_data.url_fragment, 'exp/')
self.assertEqual(classroom_data.topic_ids, [])
self.assertEqual(classroom_data.course_details, 'general details')
self.assertEqual(classroom_data.topic_list_intro, 'general intro')
| {
"content_hash": "32aeb94e046635bb300d809171afe220",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 74,
"avg_line_length": 40,
"alnum_prop": 0.7055555555555556,
"repo_name": "oppia/oppia",
"id": "ba6624c21e1b89dfc3521b8ed154e0a90606eddb",
"size": "1343",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "core/domain/classroom_domain_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "476480"
},
{
"name": "HTML",
"bytes": "2092923"
},
{
"name": "JavaScript",
"bytes": "1247116"
},
{
"name": "PEG.js",
"bytes": "71377"
},
{
"name": "Python",
"bytes": "17628953"
},
{
"name": "Shell",
"bytes": "2240"
},
{
"name": "TypeScript",
"bytes": "15541372"
}
],
"symlink_target": ""
} |
"""Routines related to PyPI, indexes"""
import sys
import os
import re
import mimetypes
import posixpath
from pip.log import logger
from pip.util import Inf, normalize_name, splitext, is_prerelease
from pip.exceptions import (
DistributionNotFound, BestVersionAlreadyInstalled, InvalidWheelFilename,
UnsupportedWheel,
)
from pip.backwardcompat import urlparse, url2pathname
from pip.download import PipSession, url_to_path, path_to_url
from pip.wheel import Wheel, wheel_ext
from pip.pep425tags import supported_tags, supported_tags_noarch, get_platform
from pip._vendor import html5lib, requests, pkg_resources
from pip._vendor.requests.exceptions import SSLError
__all__ = ['PackageFinder']
INSECURE_SCHEMES = {
"http": ["https"],
}
class PackageFinder(object):
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links
"""
def __init__(self, find_links, index_urls,
use_wheel=True, allow_external=[], allow_unverified=[],
allow_all_external=False, allow_all_prereleases=False,
session=None):
self.find_links = find_links
self.index_urls = index_urls
self.cache = PageCache()
# These are boring links that have already been logged somehow:
self.logged_links = set()
self.use_wheel = use_wheel
# Do we allow (safe and verifiable) externally hosted files?
self.allow_external = set(normalize_name(n) for n in allow_external)
# Which names are allowed to install insecure and unverifiable files?
self.allow_unverified = set(
normalize_name(n) for n in allow_unverified
)
# Anything that is allowed unverified is also allowed external
self.allow_external |= self.allow_unverified
# Do we allow all (safe and verifiable) externally hosted files?
self.allow_all_external = allow_all_external
# Stores if we ignored any external links so that we can instruct
# end users how to install them if no distributions are available
self.need_warn_external = False
# Stores if we ignored any unsafe links so that we can instruct
# end users how to install them if no distributions are available
self.need_warn_unverified = False
# Do we want to allow _all_ pre-releases?
self.allow_all_prereleases = allow_all_prereleases
# The Session we'll use to make requests
self.session = session or PipSession()
def _sort_locations(self, locations):
"""
Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls)
"""
files = []
urls = []
# puts the url for the given file path into the appropriate list
def sort_path(path):
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
is_find_link = url in self.find_links
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if is_find_link and os.path.isdir(path):
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url and os.path.isdir(path):
urls.append(url)
elif os.path.isfile(path):
sort_path(path)
else:
urls.append(url)
return files, urls
def _link_sort_key(self, link_tuple):
"""
Function used to generate link sort key for link tuples.
The greater the return value, the more preferred it is.
If not finding wheels, then sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min()
3. source archives
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
parsed_version, link, _ = link_tuple
if self.use_wheel:
support_num = len(supported_tags)
if link == INSTALLED_VERSION:
pri = 1
elif link.ext == wheel_ext:
wheel = Wheel(link.filename) # can raise InvalidWheelFilename
if not wheel.supported():
raise UnsupportedWheel(
"%s is not a supported wheel for this platform. It "
"can't be sorted." % wheel.filename
)
pri = -(wheel.support_index_min())
else: # sdist
pri = -(support_num)
return (parsed_version, pri)
else:
return parsed_version
def _sort_versions(self, applicable_versions):
"""
Bring the latest version (and wheels) to the front, but maintain the
existing ordering as secondary. See the docstring for `_link_sort_key`
for details. This function is isolated for easier unit testing.
"""
return sorted(
applicable_versions,
key=self._link_sort_key,
reverse=True
)
def find_requirement(self, req, upgrade):
def mkurl_pypi_url(url):
loc = posixpath.join(url, url_name)
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's
# behavior.
if not loc.endswith('/'):
loc = loc + '/'
return loc
url_name = req.url_name
# Only check main index if index URL is given:
main_index_url = None
if self.index_urls:
# Check that we have the url_name correctly spelled:
main_index_url = Link(
mkurl_pypi_url(self.index_urls[0]),
trusted=True,
)
# This will also cache the page, so it's okay that we get it again
# later:
page = self._get_page(main_index_url, req)
if page is None:
url_name = self._find_url_name(
Link(self.index_urls[0], trusted=True),
url_name, req
) or req.url_name
if url_name is not None:
locations = [
mkurl_pypi_url(url)
for url in self.index_urls] + self.find_links
else:
locations = list(self.find_links)
for version in req.absolute_versions:
if url_name is not None and main_index_url is not None:
locations = [
posixpath.join(main_index_url.url, version)] + locations
file_locations, url_locations = self._sort_locations(locations)
# We trust every url that the user has given us whether it was given
# via --index-url or --find-links
locations = [Link(url, trusted=True) for url in url_locations]
logger.debug('URLs to search for versions for %s:' % req)
for location in locations:
logger.debug('* %s' % location)
# Determine if this url used a secure transport mechanism
parsed = urlparse.urlparse(str(location))
if parsed.scheme in INSECURE_SCHEMES:
secure_schemes = INSECURE_SCHEMES[parsed.scheme]
if len(secure_schemes) == 1:
ctx = (location, parsed.scheme, secure_schemes[0],
parsed.netloc)
logger.warn("%s uses an insecure transport scheme (%s). "
"Consider using %s if %s has it available" %
ctx)
elif len(secure_schemes) > 1:
ctx = (
location,
parsed.scheme,
", ".join(secure_schemes),
parsed.netloc,
)
logger.warn("%s uses an insecure transport scheme (%s). "
"Consider using one of %s if %s has any of "
"them available" % ctx)
else:
ctx = (location, parsed.scheme)
logger.warn("%s uses an insecure transport scheme (%s)." %
ctx)
found_versions = []
found_versions.extend(
self._package_versions(
# We trust every directly linked archive in find_links
[Link(url, '-f', trusted=True) for url in self.find_links],
req.name.lower()
)
)
page_versions = []
for page in self._get_pages(locations, req):
logger.debug('Analyzing links from page %s' % page.url)
logger.indent += 2
try:
page_versions.extend(
self._package_versions(page.links, req.name.lower())
)
finally:
logger.indent -= 2
file_versions = list(
self._package_versions(
[Link(url) for url in file_locations],
req.name.lower()
)
)
if (not found_versions
and not page_versions
and not file_versions):
logger.fatal(
'Could not find any downloads that satisfy the requirement'
' %s' % req
)
if self.need_warn_external:
logger.warn("Some externally hosted files were ignored (use "
"--allow-external %s to allow)." % req.name)
if self.need_warn_unverified:
logger.warn("Some insecure and unverifiable files were ignored"
" (use --allow-unverified %s to allow)." %
req.name)
raise DistributionNotFound(
'No distributions at all found for %s' % req
)
installed_version = []
if req.satisfied_by is not None:
installed_version = [(
req.satisfied_by.parsed_version,
INSTALLED_VERSION,
req.satisfied_by.version,
)]
if file_versions:
file_versions.sort(reverse=True)
logger.info(
'Local files found: %s' %
', '.join([
url_to_path(link.url)
for _, link, _ in file_versions
])
)
# this is an intentional priority ordering
all_versions = installed_version + file_versions + found_versions \
+ page_versions
applicable_versions = []
for (parsed_version, link, version) in all_versions:
if version not in req.req:
logger.info(
"Ignoring link %s, version %s doesn't match %s" %
(
link,
version,
','.join([''.join(s) for s in req.req.specs])
)
)
continue
elif (is_prerelease(version)
and not (self.allow_all_prereleases or req.prereleases)):
# If this version isn't the already installed one, then
# ignore it if it's a pre-release.
if link is not INSTALLED_VERSION:
logger.info(
"Ignoring link %s, version %s is a pre-release (use "
"--pre to allow)." % (link, version)
)
continue
applicable_versions.append((parsed_version, link, version))
applicable_versions = self._sort_versions(applicable_versions)
existing_applicable = bool([
link
for parsed_version, link, version in applicable_versions
if link is INSTALLED_VERSION
])
if not upgrade and existing_applicable:
if applicable_versions[0][1] is INSTALLED_VERSION:
logger.info(
'Existing installed version (%s) is most up-to-date and '
'satisfies requirement' % req.satisfied_by.version
)
else:
logger.info(
'Existing installed version (%s) satisfies requirement '
'(most up-to-date version is %s)' %
(req.satisfied_by.version, applicable_versions[0][2])
)
return None
if not applicable_versions:
logger.fatal(
'Could not find a version that satisfies the requirement %s '
'(from versions: %s)' %
(
req,
', '.join(
sorted(set([
version
for parsed_version, link, version in all_versions
])))
)
)
if self.need_warn_external:
logger.warn("Some externally hosted files were ignored (use "
"--allow-external to allow).")
if self.need_warn_unverified:
logger.warn("Some insecure and unverifiable files were ignored"
" (use --allow-unverified %s to allow)." %
req.name)
raise DistributionNotFound(
'No distributions matching the version for %s' % req
)
if applicable_versions[0][1] is INSTALLED_VERSION:
# We have an existing version, and its the best version
logger.info(
'Installed version (%s) is most up-to-date (past versions: '
'%s)' % (
req.satisfied_by.version,
', '.join([
version for parsed_version, link, version
in applicable_versions[1:]
]) or 'none'))
raise BestVersionAlreadyInstalled
if len(applicable_versions) > 1:
logger.info(
'Using version %s (newest of versions: %s)' %
(
applicable_versions[0][2],
', '.join([
version for parsed_version, link, version
in applicable_versions
])
)
)
selected_version = applicable_versions[0][1]
if (selected_version.internal is not None
and not selected_version.internal):
logger.warn("%s an externally hosted file and may be "
"unreliable" % req.name)
if (selected_version.verifiable is not None
and not selected_version.verifiable):
logger.warn("%s is potentially insecure and "
"unverifiable." % req.name)
if selected_version._deprecated_regex:
logger.deprecated(
"1.7",
"%s discovered using a deprecated method of parsing, "
"in the future it will no longer be discovered" % req.name
)
return selected_version
def _find_url_name(self, index_url, url_name, req):
"""
Finds the true URL name of a package, when the given name isn't quite
correct.
This is usually used to implement case-insensitivity.
"""
if not index_url.url.endswith('/'):
# Vaguely part of the PyPI API... weird but true.
# FIXME: bad to modify this?
index_url.url += '/'
page = self._get_page(index_url, req)
if page is None:
logger.fatal('Cannot fetch index base URL %s' % index_url)
return
norm_name = normalize_name(req.url_name)
for link in page.links:
base = posixpath.basename(link.path.rstrip('/'))
if norm_name == normalize_name(base):
logger.notify(
'Real name of requirement %s is %s' % (url_name, base)
)
return base
return None
def _get_pages(self, locations, req):
"""
Yields (page, page_url) from the given locations, skipping
locations that have errors, and adding download/homepage links
"""
all_locations = list(locations)
seen = set()
while all_locations:
location = all_locations.pop(0)
if location in seen:
continue
seen.add(location)
page = self._get_page(location, req)
if page is None:
continue
yield page
for link in page.rel_links():
normalized = normalize_name(req.name).lower()
if (normalized not in self.allow_external
and not self.allow_all_external):
self.need_warn_external = True
logger.debug("Not searching %s for files because external "
"urls are disallowed." % link)
continue
if (link.trusted is not None
and not link.trusted
and normalized not in self.allow_unverified):
logger.debug(
"Not searching %s for urls, it is an "
"untrusted link and cannot produce safe or "
"verifiable files." % link
)
self.need_warn_unverified = True
continue
all_locations.append(link)
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
_egg_info_re = re.compile(r'([a-z0-9_.]+)-([a-z0-9_.-]+)', re.I)
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
def _sort_links(self, links):
"""
Returns elements of links in order, non-egg links first, egg links
second, while eliminating duplicates
"""
eggs, no_eggs = [], []
seen = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _package_versions(self, links, search_name):
for link in self._sort_links(links):
for v in self._link_package_versions(link, search_name):
yield v
def _known_extensions(self):
extensions = ('.tar.gz', '.tar.bz2', '.tar', '.tgz', '.zip')
if self.use_wheel:
return extensions + (wheel_ext,)
return extensions
def _link_package_versions(self, link, search_name):
"""
Return an iterable of triples (pkg_resources_version_key,
link, python_version) that can be extracted from the given
link.
Meant to be overridden by subclasses, not called by clients.
"""
platform = get_platform()
version = None
if link.egg_fragment:
egg_info = link.egg_fragment
else:
egg_info, ext = link.splitext()
if not ext:
if link not in self.logged_links:
logger.debug('Skipping link %s; not a file' % link)
self.logged_links.add(link)
return []
if egg_info.endswith('.tar'):
# Special double-extension case:
egg_info = egg_info[:-4]
ext = '.tar' + ext
if ext not in self._known_extensions():
if link not in self.logged_links:
logger.debug(
'Skipping link %s; unknown archive format: %s' %
(link, ext)
)
self.logged_links.add(link)
return []
if "macosx10" in link.path and ext == '.zip':
if link not in self.logged_links:
logger.debug('Skipping link %s; macosx10 one' % (link))
self.logged_links.add(link)
return []
if ext == wheel_ext:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
logger.debug(
'Skipping %s because the wheel filename is invalid' %
link
)
return []
if wheel.name.lower() != search_name.lower():
logger.debug(
'Skipping link %s; wrong project name (not %s)' %
(link, search_name)
)
return []
if not wheel.supported():
logger.debug(
'Skipping %s because it is not compatible with this '
'Python' % link
)
return []
# This is a dirty hack to prevent installing Binary Wheels from
# PyPI unless it is a Windows or Mac Binary Wheel. This is
# paired with a change to PyPI disabling uploads for the
# same. Once we have a mechanism for enabling support for
# binary wheels on linux that deals with the inherent problems
# of binary distribution this can be removed.
comes_from = getattr(link, "comes_from", None)
if (
(
not platform.startswith('win')
and not platform.startswith('macosx')
)
and comes_from is not None
and urlparse.urlparse(
comes_from.url
).netloc.endswith("pypi.python.org")):
if not wheel.supported(tags=supported_tags_noarch):
logger.debug(
"Skipping %s because it is a pypi-hosted binary "
"Wheel on an unsupported platform" % link
)
return []
version = wheel.version
if not version:
version = self._egg_info_matches(egg_info, search_name, link)
if version is None:
logger.debug(
'Skipping link %s; wrong project name (not %s)' %
(link, search_name)
)
return []
if (link.internal is not None
and not link.internal
and not normalize_name(search_name).lower()
in self.allow_external
and not self.allow_all_external):
# We have a link that we are sure is external, so we should skip
# it unless we are allowing externals
logger.debug("Skipping %s because it is externally hosted." % link)
self.need_warn_external = True
return []
if (link.verifiable is not None
and not link.verifiable
and not (normalize_name(search_name).lower()
in self.allow_unverified)):
# We have a link that we are sure we cannot verify its integrity,
# so we should skip it unless we are allowing unsafe installs
# for this requirement.
logger.debug("Skipping %s because it is an insecure and "
"unverifiable file." % link)
self.need_warn_unverified = True
return []
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
logger.debug(
'Skipping %s because Python version is incorrect' % link
)
return []
logger.debug('Found link %s, version: %s' % (link, version))
return [(
pkg_resources.parse_version(version),
link,
version,
)]
def _egg_info_matches(self, egg_info, search_name, link):
match = self._egg_info_re.search(egg_info)
if not match:
logger.debug('Could not parse version from link: %s' % link)
return None
name = match.group(0).lower()
# To match the "safe" name that pkg_resources creates:
name = name.replace('_', '-')
# project name and version must be separated by a dash
look_for = search_name.lower() + "-"
if name.startswith(look_for):
return match.group(0)[len(look_for):]
else:
return None
def _get_page(self, link, req):
return HTMLPage.get_page(
link, req,
cache=self.cache,
session=self.session,
)
class PageCache(object):
"""Cache of HTML pages"""
failure_limit = 3
def __init__(self):
self._failures = {}
self._pages = {}
self._archives = {}
def too_many_failures(self, url):
return self._failures.get(url, 0) >= self.failure_limit
def get_page(self, url):
return self._pages.get(url)
def is_archive(self, url):
return self._archives.get(url, False)
def set_is_archive(self, url, value=True):
self._archives[url] = value
def add_page_failure(self, url, level):
self._failures[url] = self._failures.get(url, 0) + level
def add_page(self, urls, page):
for url in urls:
self._pages[url] = page
class HTMLPage(object):
"""Represents one page, along with its URL"""
# FIXME: these regexes are horrible hacks:
_homepage_re = re.compile(r'<th>\s*home\s*page', re.I)
_download_re = re.compile(r'<th>\s*download\s+url', re.I)
_href_re = re.compile(
'href=(?:"([^"]*)"|\'([^\']*)\'|([^>\\s\\n]*))',
re.I | re.S
)
def __init__(self, content, url, headers=None, trusted=None):
self.content = content
self.parsed = html5lib.parse(self.content, namespaceHTMLElements=False)
self.url = url
self.headers = headers
self.trusted = trusted
def __str__(self):
return self.url
@classmethod
def get_page(cls, link, req, cache=None, skip_archives=True, session=None):
if session is None:
session = PipSession()
url = link.url
url = url.split('#', 1)[0]
if cache.too_many_failures(url):
return None
# Check for VCS schemes that do not support lookup as web pages.
from pip.vcs import VcsSupport
for scheme in VcsSupport.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
logger.debug(
'Cannot look at %(scheme)s URL %(link)s' % locals()
)
return None
if cache is not None:
inst = cache.get_page(url)
if inst is not None:
return inst
try:
if skip_archives:
if cache is not None:
if cache.is_archive(url):
return None
filename = link.filename
for bad_ext in ['.tar', '.tar.gz', '.tar.bz2', '.tgz', '.zip']:
if filename.endswith(bad_ext):
content_type = cls._get_content_type(
url, session=session,
)
if content_type.lower().startswith('text/html'):
break
else:
logger.debug(
'Skipping page %s because of Content-Type: '
'%s' % (link, content_type)
)
if cache is not None:
cache.set_is_archive(url)
return None
logger.debug('Getting page %s' % url)
# Tack index.html onto file:// URLs that point to directories
(scheme, netloc, path, params, query, fragment) = \
urlparse.urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
# add trailing slash if not present so urljoin doesn't trim
# final segment
if not url.endswith('/'):
url += '/'
url = urlparse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s' % url)
resp = session.get(url, headers={"Accept": "text/html"})
resp.raise_for_status()
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement of an url. Unless we issue a HEAD request on every
# url we cannot know ahead of time for sure if something is HTML
# or not. However we can check after we've downloaded it.
content_type = resp.headers.get('Content-Type', 'unknown')
if not content_type.lower().startswith("text/html"):
logger.debug(
'Skipping page %s because of Content-Type: %s' %
(link, content_type)
)
if cache is not None:
cache.set_is_archive(url)
return None
inst = cls(resp.text, resp.url, resp.headers, trusted=link.trusted)
except requests.HTTPError as exc:
level = 2 if exc.response.status_code == 404 else 1
cls._handle_fail(req, link, exc, url, cache=cache, level=level)
except requests.ConnectionError as exc:
cls._handle_fail(
req, link, "connection error: %s" % exc, url,
cache=cache,
)
except requests.Timeout:
cls._handle_fail(req, link, "timed out", url, cache=cache)
except SSLError as exc:
reason = ("There was a problem confirming the ssl certificate: "
"%s" % exc)
cls._handle_fail(
req, link, reason, url,
cache=cache,
level=2,
meth=logger.notify,
)
else:
if cache is not None:
cache.add_page([url, resp.url], inst)
return inst
@staticmethod
def _handle_fail(req, link, reason, url, cache=None, level=1, meth=None):
if meth is None:
meth = logger.info
meth("Could not fetch URL %s: %s", link, reason)
meth("Will skip URL %s when looking for download links for %s" %
(link.url, req))
if cache is not None:
cache.add_page_failure(url, level)
@staticmethod
def _get_content_type(url, session=None):
"""Get the Content-Type of the given url, using a HEAD request"""
if session is None:
session = PipSession()
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
if scheme not in ('http', 'https', 'ftp', 'ftps'):
# FIXME: some warning or something?
# assertion error?
return ''
resp = session.head(url, allow_redirects=True)
resp.raise_for_status()
return resp.headers.get("Content-Type", "")
@property
def api_version(self):
if not hasattr(self, "_api_version"):
_api_version = None
metas = [
x for x in self.parsed.findall(".//meta")
if x.get("name", "").lower() == "api-version"
]
if metas:
try:
_api_version = int(metas[0].get("value", None))
except (TypeError, ValueError):
_api_version = None
self._api_version = _api_version
return self._api_version
@property
def base_url(self):
if not hasattr(self, "_base_url"):
base = self.parsed.find(".//base")
if base is not None and base.get("href"):
self._base_url = base.get("href")
else:
self._base_url = self.url
return self._base_url
@property
def links(self):
"""Yields all links in the page"""
for anchor in self.parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(urlparse.urljoin(self.base_url, href))
# Determine if this link is internal. If that distinction
# doesn't make sense in this context, then we don't make
# any distinction.
internal = None
if self.api_version and self.api_version >= 2:
# Only api_versions >= 2 have a distinction between
# external and internal links
internal = bool(
anchor.get("rel")
and "internal" in anchor.get("rel").split()
)
yield Link(url, self, internal=internal)
def rel_links(self):
for url in self.explicit_rel_links():
yield url
for url in self.scraped_rel_links():
yield url
def explicit_rel_links(self, rels=('homepage', 'download')):
"""Yields all links with the given relations"""
rels = set(rels)
for anchor in self.parsed.findall(".//a"):
if anchor.get("rel") and anchor.get("href"):
found_rels = set(anchor.get("rel").split())
# Determine the intersection between what rels were found and
# what rels were being looked for
if found_rels & rels:
href = anchor.get("href")
url = self.clean_link(
urlparse.urljoin(self.base_url, href)
)
yield Link(url, self, trusted=False)
def scraped_rel_links(self):
# Can we get rid of this horrible horrible method?
for regex in (self._homepage_re, self._download_re):
match = regex.search(self.content)
if not match:
continue
href_match = self._href_re.search(self.content, pos=match.end())
if not href_match:
continue
url = (
href_match.group(1)
or href_match.group(2)
or href_match.group(3)
)
if not url:
continue
url = self.clean_link(urlparse.urljoin(self.base_url, url))
yield Link(url, self, trusted=False, _deprecated_regex=True)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
def clean_link(self, url):
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self._clean_re.sub(
lambda match: '%%%2x' % ord(match.group(0)), url)
class Link(object):
def __init__(self, url, comes_from=None, internal=None, trusted=None,
_deprecated_regex=False):
self.url = url
self.comes_from = comes_from
self.internal = internal
self.trusted = trusted
self._deprecated_regex = _deprecated_regex
def __str__(self):
if self.comes_from:
return '%s (from %s)' % (self.url, self.comes_from)
else:
return str(self.url)
def __repr__(self):
return '<Link %s>' % self
def __eq__(self, other):
return self.url == other.url
def __ne__(self, other):
return self.url != other.url
def __lt__(self, other):
return self.url < other.url
def __le__(self, other):
return self.url <= other.url
def __gt__(self, other):
return self.url > other.url
def __ge__(self, other):
return self.url >= other.url
def __hash__(self):
return hash(self.url)
@property
def filename(self):
_, netloc, path, _, _ = urlparse.urlsplit(self.url)
name = posixpath.basename(path.rstrip('/')) or netloc
assert name, ('URL %r produced no filename' % self.url)
return name
@property
def scheme(self):
return urlparse.urlsplit(self.url)[0]
@property
def path(self):
return urlparse.urlsplit(self.url)[2]
def splitext(self):
return splitext(posixpath.basename(self.path.rstrip('/')))
@property
def ext(self):
return self.splitext()[1]
@property
def url_without_fragment(self):
scheme, netloc, path, query, fragment = urlparse.urlsplit(self.url)
return urlparse.urlunsplit((scheme, netloc, path, query, None))
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
@property
def egg_fragment(self):
match = self._egg_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_hash_re = re.compile(
r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)'
)
@property
def hash(self):
match = self._hash_re.search(self.url)
if match:
return match.group(2)
return None
@property
def hash_name(self):
match = self._hash_re.search(self.url)
if match:
return match.group(1)
return None
@property
def show_url(self):
return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0])
@property
def verifiable(self):
"""
Returns True if this link can be verified after download, False if it
cannot, and None if we cannot determine.
"""
trusted = self.trusted or getattr(self.comes_from, "trusted", None)
if trusted is not None and trusted:
# This link came from a trusted source. It *may* be verifiable but
# first we need to see if this page is operating under the new
# API version.
try:
api_version = getattr(self.comes_from, "api_version", None)
api_version = int(api_version)
except (ValueError, TypeError):
api_version = None
if api_version is None or api_version <= 1:
# This link is either trusted, or it came from a trusted,
# however it is not operating under the API version 2 so
# we can't make any claims about if it's safe or not
return
if self.hash:
# This link came from a trusted source and it has a hash, so we
# can consider it safe.
return True
else:
# This link came from a trusted source, using the new API
# version, and it does not have a hash. It is NOT verifiable
return False
elif trusted is not None:
# This link came from an untrusted source and we cannot trust it
return False
# An object to represent the "link" for the installed version of a requirement.
# Using Inf as the url makes it sort higher.
INSTALLED_VERSION = Link(Inf)
def get_requirement_from_url(url):
"""Get a requirement from the URL, if possible. This looks for #egg
in the URL"""
link = Link(url)
egg_info = link.egg_fragment
if not egg_info:
egg_info = splitext(link.filename)[0]
return package_to_requirement(egg_info)
def package_to_requirement(package_name):
"""Translate a name like Foo-1.2 to Foo==1.3"""
match = re.search(r'^(.*?)-(dev|\d.*)', package_name)
if match:
name = match.group(1)
version = match.group(2)
else:
name = package_name
version = ''
if version:
return '%s==%s' % (name, version)
else:
return name
| {
"content_hash": "1c6dfad3b1f4ce2454d2619668e0a9ef",
"timestamp": "",
"source": "github",
"line_count": 1107,
"max_line_length": 79,
"avg_line_length": 37.15356820234869,
"alnum_prop": 0.5148921685428773,
"repo_name": "blueyed/pip",
"id": "295246a7d56111b5ce0dd04e6f995dbb702f0c14",
"size": "41129",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pip/index.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from json import JSONEncoder
import random
import os
import uuid
import json
from .config import Config
# for function serialization
class HCEncoder(JSONEncoder):
def default(self, o):
if(hasattr(o, '__call__')): # is function
return "function:" +o.__module__+"."+o.__name__
else:
try:
return o.__dict__
except AttributeError:
try:
return str(o)
except AttributeError:
return super(o)
class Selector:
def __init__(self, initialStore = {}):
self.store = initialStore
self.results = []
def set(self, key, value):
"""Sets a hyperparameter. Can be used to set an array of hyperparameters."""
self.store[key]=value
return self.store
def count_configs(self):
count = 1
for key in self.store:
value = self.store[key]
if(isinstance(value,list)):
count *= len(value)
return count
def get_config_value(self, k, i):
"""Gets the ith config value for k. e.g. get_config_value('x', 1)"""
if(not isinstance(self.store[k], list)):
return self.store[k]
else:
return self.store[k][i]
def configs(self, max_configs=1, offset=None, serial=False, create_uuid=True):
"""Generate max configs, each one a dictionary. e.g. [{'x': 1}]
Will also add a config UUID, useful for tracking configs.
You can turn this off by passing create_uuid=False.
"""
if len(self.store)==0:
return []
configs = []
if(offset is None):
offset = max(0, random.randint(0, self.count_configs()))
for i in range(max_configs):
# get an element to index over
config = self.config_at(offset)
if(create_uuid):
config["uuid"]=uuid.uuid4().hex
configs.append(config)
if(serial):
offset+=1
else:
offset = max(0, random.randint(0, self.count_configs()))
return configs
def config_at(self, i):
"""Gets the ith config"""
selections = {}
for key in self.store:
value = self.store[key]
if isinstance(value, list):
selected = i % len(value)
i = i // len(value)
selections[key]= value[selected]
else:
selections[key]= value
return Config(selections)
def random_config(self):
offset = max(0, random.randint(0, self.count_configs()))
return self.config_at(offset)
def reset(self):
"""Reset the hyperchamber variables"""
self.store = {}
self.results = []
return
def top(self, sort_by):
"""Get the best results according to your custom sort method."""
sort = sorted(self.results, key=sort_by)
return sort
def record(self, config, result):
"""Record the results of a config."""
self.results.append((config, result))
def load(self, filename, load_toml=False):
"""Loads a config from disk"""
content = open(filename)
if load_toml:
import toml
return Config(toml.load(content))
else:
return Config(json.load(content))
def load_or_create_config(self, filename, config=None):
"""Loads a config from disk. Defaults to a random config if none is specified"""
os.makedirs(os.path.dirname(os.path.expanduser(filename)), exist_ok=True)
if os.path.exists(filename):
return self.load(filename)
if(config == None):
config = self.random_config()
self.save(filename, config)
return config
def save(self, filename, config):
"""Loads a config from disk"""
return open(os.path.expanduser(filename), 'w').write(json.dumps(config, cls=HCEncoder, sort_keys=True, indent=2, separators=(',', ': ')))
| {
"content_hash": "7ee8e91333e2f6ccdaa5538b500ceab1",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 148,
"avg_line_length": 29.992481203007518,
"alnum_prop": 0.5632990724492354,
"repo_name": "255BITS/hyperchamber",
"id": "129e36f21bd86dea89e8963756759dd433df4a06",
"size": "3989",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hyperchamber/selector.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16723"
},
{
"name": "Shell",
"bytes": "30"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.