text
stringlengths 4
1.02M
| meta
dict |
---|---|
from electrum_ltc.i18n import _
fullname = 'Trezor Wallet'
description = _('Provides support for Trezor hardware wallet')
requires = [('trezorlib','pypi.org/project/trezor/')]
registers_keystore = ('hardware', 'trezor', _("Trezor wallet"))
available_for = ['qt', 'cmdline']
| {
"content_hash": "03d3d62d6f23a0753e148f82c263617f",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 63,
"avg_line_length": 34.5,
"alnum_prop": 0.7065217391304348,
"repo_name": "vialectrum/vialectrum",
"id": "fdb0a91bfc0caec612869d1ec00402914f23fa6d",
"size": "276",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "electrum_ltc/plugins/trezor/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "Java",
"bytes": "1574"
},
{
"name": "Makefile",
"bytes": "839"
},
{
"name": "NSIS",
"bytes": "7496"
},
{
"name": "Python",
"bytes": "1895270"
},
{
"name": "Shell",
"bytes": "16219"
}
],
"symlink_target": ""
} |
"""drive_pot_fr
Drive the landlab potentiality flow routing component.
Created on Wed Mar 4 2015
@author: danhobley
"""
from __future__ import print_function
from landlab import RasterModelGrid, ModelParameterDictionary
from landlab.plot.imshow import imshow_node_grid
import numpy as np
from pylab import imshow, show, contour, figure, clabel, quiver, plot, close
from landlab.components.potentiality_flowrouting.route_flow_by_boundary import PotentialityFlowRouter
from landlab.components.flow_routing.route_flow_dn import FlowRouter
from landlab.components.stream_power.fastscape_stream_power import SPEroder
from landlab.grid.mappers import map_link_end_node_max_value_to_link
inputs = ModelParameterDictionary('./pot_fr_params.txt')
nrows = 50#inputs.read_int('nrows')
ncols = 50#inputs.read_int('ncols')
dx = inputs.read_float('dx')
init_elev = inputs.read_float('init_elev')
mg = RasterModelGrid(nrows, ncols, dx)
# attempt to implement diffusion with flow routing...
#modify the fields in the grid
z = mg.create_node_array_zeros() + init_elev
mg.at_node['topographic__elevation'] = z + np.random.rand(len(z))/1000.
mg.create_node_array_zeros('water__volume_flux_in')
#Set boundary conditions
mg.set_closed_boundaries_at_grid_edges(True, True, True, False)
mg.set_fixed_value_boundaries_at_grid_edges(False, False, False, True)
inlet_node = np.array((mg.number_of_node_columns + 1))
mg.at_node['water__volume_flux_in'].fill(0.)
mg.at_node['water__volume_flux_in'][inlet_node] = 1.
pfr = PotentialityFlowRouter(mg, 'pot_fr_params.txt')
interior_nodes = mg.core_nodes
# do the loop
for i in xrange(2000):
if i%50==0:
print('loop '+str(i))
mg.at_node['topographic__elevation'][inlet_node] = 1.
pfr.route_flow(route_on_diagonals=True)
#imshow(mg, 'water__volume_flux_magnitude')
#show()
kd = mg.at_node['water__volume_flux_magnitude'] # 0.01 m2 per year
# dt = np.nanmin(0.2*mg.dx*mg.dx/kd) # CFL condition
dt = 0.5
g = mg.calculate_gradients_at_active_links(mg.at_node['topographic__elevation'])
map_link_end_node_max_value_to_link(mg, 'water__volume_flux_magnitude')
kd_link = 1.e6*mg.at_link['water__volume_flux_magnitude'][mg.active_links]
qs = -kd_link*g
dqsdx = mg.calculate_flux_divergence_at_nodes(qs)
dzdt = -dqsdx
mg.at_node['topographic__elevation'][interior_nodes] += dzdt[interior_nodes]*dt
figure(1)
imshow_node_grid(mg, 'topographic__elevation')
figure(2)
imshow_node_grid(mg, 'water__depth')
figure(3)
imshow_node_grid(mg, 'water__volume_flux_magnitude')
| {
"content_hash": "5723d12d20f6696c1fab919877c7e07b",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 101,
"avg_line_length": 37.072463768115945,
"alnum_prop": 0.7267396403440187,
"repo_name": "decvalts/landlab",
"id": "2a1dcafb84f02756eed70206790bcb27aa9ea550",
"size": "2582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "landlab/components/potentiality_flowrouting/examples/drive_pot_fr_coupled.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1840"
},
{
"name": "PowerShell",
"bytes": "5599"
},
{
"name": "Python",
"bytes": "2464679"
},
{
"name": "Shell",
"bytes": "2255"
}
],
"symlink_target": ""
} |
"""
Copyright 2013 Mortar Data Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pig_util import outputSchema
@outputSchema('mongo_data:bag{t:(keyname:chararray, type:chararray, val:chararray)}')
def mongo_map(d, prefix=""):
"""
Go through a dictionary and for every key record the key name, type, and data value.
Recursively goes through embedded lists/dictionaries and prepends parent keys to the key name.
"""
output = []
for k,v in d.iteritems():
key_name = "%s%s" % (prefix, k)
if type(v) == list:
output.append( (key_name, type(v).__name__, type(v).__name__) )
for t in v:
for t_item in t:
if type(t_item) == dict:
output += mongo_map(t_item, "%s." % key_name)
elif type(v) == dict:
output.append( (key_name, type(v).__name__, type(v).__name__) )
output += mongo_map(v, "%s." % key_name)
else:
#For simple types, keep example values
output.append( (key_name, type(v).__name__, "%s" % v) )
return output
@outputSchema('schema:chararray')
def create_mongo_schema(results, prefix_to_remove=""):
"""
Create a schema string that can be used by the MongoLoader for this collection.
results: List of keyname with ordered counts of the type:
[ (keyname, [ (type1, count1), (type2, count2) ]),
(keyname, [ (type1, count1), (type2, count2) ]), ... ]
prefix_to_remove: String to remove from keyname.
"""
params = []
index = 0
while index < len(results):
t = results[index]
full_key_name = t[0]
short_key_name = t[0].replace(prefix_to_remove, "")
key_type_counts = t[1]
key_type_counts.sort(key=lambda x: x[1])
key_type = key_type_counts[0][1]
if key_type == 'NoneType':
if len(key_type_counts) > 1:
key_type = key_type_counts[1][1]
else:
#Default to loading field as a string
key_type = "unicode"
if key_type == 'list':
inner_params = []
index += 1
while index < len(results) and results[index][0].startswith(full_key_name):
inner_params.append(results[index])
index += 1
inner_schema = create_mongo_schema(inner_params, "%s%s." % (prefix_to_remove, short_key_name))
param = "%s:bag{t:tuple(%s)}" % (short_key_name, inner_schema)
elif key_type == 'dict':
inner_params = []
index += 1
while index < len(results) and results[index][0].startswith(full_key_name):
inner_params.append(results[index])
index += 1
inner_schema = create_mongo_schema(inner_params, "%s%s." % (prefix_to_remove, short_key_name))
param = "%s:tuple(%s)" % (short_key_name, inner_schema)
else:
pig_key_type = _get_pig_type(key_type)
param = "%s:%s" % (short_key_name, pig_key_type)
index += 1
params.append(param)
depth = "\t" * prefix_to_remove.count(".")
join_str = ",\n%s" % depth
schema = "\n%s%s" % (depth, join_str.join(params))
#Print out final schema but not intermediate ones.
#This allows a schema to be printed from a single document with illustrate
if not prefix_to_remove:
print schema
return schema
def _get_pig_type(python_type):
if python_type == 'unicode':
return 'chararray'
elif python_type == 'bytearray':
return 'bytearray'
elif python_type == 'long':
return 'long'
elif python_type == 'int':
return 'int'
elif python_type == 'float':
return 'double'
else:
return 'unknown'
| {
"content_hash": "71486d59d29b7ac778d5180d770434aa",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 106,
"avg_line_length": 37.37068965517241,
"alnum_prop": 0.5737024221453287,
"repo_name": "brekru212/mongo-pig-examples",
"id": "a89222a36c9cf6c9febb932d144793173ea07e2d",
"size": "4335",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "udfs/python/mongo_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PigLatin",
"bytes": "8243"
},
{
"name": "Python",
"bytes": "6213"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import os
import zlib
import json
import click
import logging
import six
from datetime import datetime
from subprocess import Popen, PIPE
from contextlib import contextmanager
from six.moves.urllib.parse import urlparse
HERE = os.path.abspath(os.path.dirname(__file__))
SENTRY_CONFIG = os.environ['SENTRY_CONF'] = os.path.join(HERE, 'sentry.conf.py')
os.environ['SENTRY_SKIP_BACKEND_VALIDATION'] = '1'
# No sentry or django imports before that point
from sentry.runner import configure
configure()
from django.conf import settings
# Fair game from here
from django.core.management import call_command
from sentry.utils.apidocs import Runner, MockUtils, iter_scenarios, \
iter_endpoints, get_sections
OUTPUT_PATH = os.path.join(HERE, 'cache')
HOST = urlparse(settings.SENTRY_OPTIONS['system.url-prefix']).netloc
# We don't care about you, go away
_logger = logging.getLogger('sentry.events')
_logger.disabled = True
def color_for_string(s):
colors = ('red', 'green', 'yellow', 'blue', 'cyan', 'magenta')
return colors[zlib.crc32(s) % len(colors)]
def report(category, message, fg=None):
if fg is None:
fg = color_for_string(category)
click.echo('[%s] %s: %s' % (
six.text_type(datetime.utcnow()).split('.')[0],
click.style(category, fg=fg),
message
))
def launch_redis():
report('redis', 'Launching redis server')
cl = Popen(['redis-server', '-'], stdin=PIPE, stdout=open(os.devnull, 'r+'))
cl.stdin.write('''
port %(port)s
databases %(databases)d
save ""
''' % {
'port': six.text_type(settings.SENTRY_APIDOCS_REDIS_PORT),
'databases': 4,
})
cl.stdin.flush()
cl.stdin.close()
return cl
def spawn_sentry():
report('sentry', 'Launching sentry server')
cl = Popen(['sentry', '--config=' + SENTRY_CONFIG, 'run', 'web',
'-w', '1', '--bind', '127.0.0.1:%s' % settings.SENTRY_APIDOCS_WEB_PORT])
return cl
@contextmanager
def management_connection():
from sqlite3 import connect
cfg = settings.DATABASES['default']
con = connect(cfg['NAME'])
try:
con.cursor()
yield con
finally:
con.close()
def init_db():
drop_db()
report('db', 'Migrating database (this can time some time)')
call_command('syncdb', migrate=True, interactive=False,
traceback=True, verbosity=0)
def drop_db():
report('db', 'Dropping database')
try:
os.remove(settings.DATABASES['default']['NAME'])
except (OSError, IOError):
pass
class SentryBox(object):
def __init__(self):
self.redis = None
self.sentry = None
self.task_runner = None
def __enter__(self):
self.redis = launch_redis()
self.sentry = spawn_sentry()
init_db()
return self
def __exit__(self, exc_type, exc_value, tb):
if self.sentry is not None:
report('sentry', 'Shutting down sentry server')
self.sentry.kill()
self.sentry.wait()
if self.redis is not None:
report('redis', 'Stopping redis server')
self.redis.kill()
self.redis.wait()
drop_db()
def dump_json(path, data):
path = os.path.join(OUTPUT_PATH, path)
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
with open(path, 'w') as f:
for line in json.dumps(data, indent=2, sort_keys=True).splitlines():
f.write(line.rstrip() + '\n')
def run_scenario(vars, scenario_ident, func):
runner = Runner(scenario_ident, func, **vars)
report('scenario', 'Running scenario "%s"' % scenario_ident)
func(runner)
dump_json('scenarios/%s.json' % scenario_ident, runner.to_json())
@click.command()
@click.option('--output-path', type=click.Path())
def cli(output_path):
"""API docs dummy generator."""
global OUTPUT_PATH
if output_path is not None:
OUTPUT_PATH = os.path.abspath(output_path)
with SentryBox():
utils = MockUtils()
report('org', 'Creating user and organization')
user = utils.create_user('[email protected]')
org = utils.create_org('The Interstellar Jurisdiction',
owner=user)
api_key = utils.create_api_key(org)
report('org', 'Creating team')
team = utils.create_team('Powerful Abolitionist',
org=org)
projects = []
for project_name in 'Pump Station', 'Prime Mover':
report('project', 'Creating project "%s"' % project_name)
project = utils.create_project(project_name, teams=[team], org=org)
release = utils.create_release(project=project, user=user)
report('event', 'Creating event for "%s"' % project_name)
event1 = utils.create_event(project=project, release=release,
platform='python')
event2 = utils.create_event(project=project, release=release,
platform='java')
projects.append({
'project': project,
'release': release,
'events': [event1, event2],
})
vars = {
'org': org,
'me': user,
'api_key': api_key,
'teams': [{
'team': team,
'projects': projects,
}],
}
for scenario_ident, func in iter_scenarios():
run_scenario(vars, scenario_ident, func)
section_mapping = {}
report('docs', 'Exporting endpoint documentation')
for endpoint in iter_endpoints():
report('endpoint', 'Exporting docs for "%s"' %
endpoint['endpoint_name'])
section_mapping.setdefault(endpoint['section'], []) \
.append((endpoint['endpoint_name'],
endpoint['title']))
dump_json('endpoints/%s.json' % endpoint['endpoint_name'], endpoint)
report('docs', 'Exporting sections')
dump_json('sections.json', {
'sections': dict((section, {
'title': title,
'entries': dict(section_mapping.get(section, ())),
}) for section, title in six.iteritems(get_sections()))
})
if __name__ == '__main__':
cli()
| {
"content_hash": "941ac43e90f801d72d5eafff7ea8f232",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 88,
"avg_line_length": 29.53211009174312,
"alnum_prop": 0.5792171481826655,
"repo_name": "ifduyue/sentry",
"id": "7ab746e31d74944b9e6b6f17a3c1582cae71b812",
"size": "6438",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "api-docs/generator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "301292"
},
{
"name": "HTML",
"bytes": "241298"
},
{
"name": "JavaScript",
"bytes": "3295572"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "6892"
},
{
"name": "Python",
"bytes": "36910084"
},
{
"name": "Ruby",
"bytes": "217"
},
{
"name": "Shell",
"bytes": "5701"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import powerdns.models.powerdns
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('powerdns', '0017_auto_20160104_0642'),
]
operations = [
migrations.AlterField(
model_name='domain',
name='name',
field=models.CharField(validators=[django.core.validators.RegexValidator('^(\\*\\.)?([_A-Za-z0-9-]+\\.)*([A-Za-z0-9])+$'), powerdns.models.powerdns.SubDomainValidator()], verbose_name='name', max_length=255, unique=True),
),
migrations.AlterField(
model_name='recordrequest',
name='type',
field=models.CharField(verbose_name='type', default='TXT', choices=[('A', 'A'), ('AAAA', 'AAAA'), ('AFSDB', 'AFSDB'), ('CERT', 'CERT'), ('CNAME', 'CNAME'), ('DNSKEY', 'DNSKEY'), ('DS', 'DS'), ('HINFO', 'HINFO'), ('KEY', 'KEY'), ('LOC', 'LOC'), ('MX', 'MX'), ('NAPTR', 'NAPTR'), ('NS', 'NS'), ('NSEC', 'NSEC'), ('PTR', 'PTR'), ('RP', 'RP'), ('RRSIG', 'RRSIG'), ('SOA', 'SOA'), ('SPF', 'SPF'), ('SRV', 'SRV'), ('SSHFP', 'SSHFP'), ('TXT', 'TXT')], max_length=6, help_text='Record qtype'),
preserve_default=False,
),
]
| {
"content_hash": "cd3cd1a05899d84f719b99dd7385ea55",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 497,
"avg_line_length": 48.57692307692308,
"alnum_prop": 0.5581947743467933,
"repo_name": "dominikkowalski/django-powerdns-dnssec",
"id": "578f3c294a10fbcec643f59c202e674e6908169f",
"size": "1287",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "powerdns/migrations/0018_auto_20160105_0824.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "956"
},
{
"name": "HTML",
"bytes": "26522"
},
{
"name": "JavaScript",
"bytes": "4419"
},
{
"name": "Makefile",
"bytes": "247"
},
{
"name": "Python",
"bytes": "277740"
},
{
"name": "Shell",
"bytes": "727"
},
{
"name": "TypeScript",
"bytes": "51692"
}
],
"symlink_target": ""
} |
from pythonz.commands import Command, command_map
from pythonz.log import logger
class HelpCommand(Command):
name = "help"
usage = "%prog [COMMAND]"
summary = "Show available commands"
def run_command(self, options, args):
if args:
command = args[0]
if command not in command_map:
self.parser.error("Unknown command: `%s`" % command)
return
command = command_map[command]
command.parser.print_help()
return
self.parser.print_help()
logger.log("\nCommands available:")
commands = [command_map[key] for key in sorted(command_map.keys())]
for command in commands:
logger.log(" %s: %s" % (command.name, command.summary))
logger.log("\nFurther Instructions:")
logger.log(" https://github.com/saghul/pythonz")
HelpCommand()
| {
"content_hash": "49a54a6fb8e37fb83763bb7fabd6032f",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 75,
"avg_line_length": 32.17857142857143,
"alnum_prop": 0.5937846836847946,
"repo_name": "Mitali-Sodhi/CodeLingo",
"id": "bafae83cf59d23ea2140875bf3744c10e47af3d6",
"size": "902",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Dataset/python/help.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9681846"
},
{
"name": "C#",
"bytes": "1741915"
},
{
"name": "C++",
"bytes": "5686017"
},
{
"name": "HTML",
"bytes": "11812193"
},
{
"name": "Java",
"bytes": "11198971"
},
{
"name": "JavaScript",
"bytes": "21693468"
},
{
"name": "M",
"bytes": "61627"
},
{
"name": "Objective-C",
"bytes": "4085820"
},
{
"name": "Perl",
"bytes": "193472"
},
{
"name": "Perl6",
"bytes": "176248"
},
{
"name": "Python",
"bytes": "10296284"
},
{
"name": "Ruby",
"bytes": "1050136"
}
],
"symlink_target": ""
} |
import time
import aiohttp
from datetime import timedelta
import json
import encryption
from secret import *
from AList_ProfileProcessor import profile_preprocessor
import random
class AList:
def __init__(self, client):
self.apiurl = "https://anilist.co/api"
self.commands = [['awaifu', self.waifu], ['ahusbando', self.husbando], ['acharacter', self.searchcharacter],
['acurrent', self.currentanime], ['aanime', self.searchanime], ['amanga', self.searchmanga],
['auser', self.get_user], ['apeople', self.user_search],
['afollow', self.follow_user], ['anilist', self.get_anime_list], ['amangalist', self.get_manga_list],
['awatch', self.mark_to_watch], ['anext', self.mark_one_up], ['awatching', self.get_watching],
['areading', self.get_reading], ['aread', self.mark_to_read]]
self.pools = client.redis
self.bot = client
self.enc = encryption.AESCipher(cryptokey)
self.headers = {'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': 'AngelBot (aiohttp 0.21.6 python 3.5.1)'}
if self.bot.shard_id == 0:
self.bot.loop.call_soon_threadsafe(self.get_readonly, self.bot.loop)
def get_readonly(self, loop):
loop.create_task(self._get_readonly())
loop.call_later(3500, self.get_readonly, loop)
async def _get_readonly(self):
async with self.pools.get() as pool:
cid = await pool.hget("AniList", "ClientID")
csecret = await pool.hget("AniList", "ClientSecret")
data = {'grant_type': 'client_credentials', 'client_id': cid,
'client_secret': csecret}
url = self.apiurl + "/auth/access_token"
with aiohttp.ClientSession() as session:
async with session.post(url, data=data) as response:
jsd = await response.json()
await pool.hset("ALReadOnly", "AccessToken", jsd['access_token'])
async def get_oauth(self, id):
async with self.pools.get() as dbp:
test = await dbp.exists(id)
if test:
test = await dbp.hexists(id, "Anilist_Expires")
if test:
expiration = await dbp.hget(id, "Anilist_Expires")
if int(expiration) < time.time():
refresh = await dbp.hget(id, "Anilist_Refresh")
cid = await dbp.hget("AniList", "ClientID")
csec = await dbp.hget("AniList", "ClientSecret")
params = {'grant_type': 'refresh_token', 'client_id': cid, 'client_secret': csec, 'refresh_token': refresh}
with aiohttp.ClientSession() as session:
async with session.post("https://anilist.co/api/auth/access_token", data=params) as response:
text = await response.text()
if text == "\n" or response.status == 404:
return 0
else:
jsd = json.loads(text)
await dbp.hset(id, "Anilist_Expires", int(time.time())+3600)
await dbp.hset(id, "Anilist_Token", self.enc.encrypt(jsd['access_token']))
return jsd['access_token']
else:
atoken = await dbp.hget(id, "Anilist_Token")
return self.enc.decrypt(atoken).decode()
else:
return 0
else:
return 0
async def waifu(self, message):
name = "%20".join(message.content.split(" ")[1:])
async with self.pools.get() as pool:
token = await pool.hget("ALReadOnly", "AccessToken")
data = {'access_token': token}
if name.isdigit():
url = self.apiurl + "/character/" + str(name)
else:
url = self.apiurl + "/character/search/" + name
with aiohttp.ClientSession() as session:
async with session.get(url, params=data) as response:
text = await response.text()
if response.status == 404 or text == "\n":
await self.bot.send_message(message.channel, "What character? You don't even know the name of your waifu? The shame.")
jsd = json.loads(text)
if 'error' in jsd:
await self.bot.send_message(message.channel, "What character? You don't even know the name of your waifu? The shame.")
else:
if isinstance(jsd, list) and len(jsd) > 0:
jsd = jsd[0]
elif isinstance(jsd, list) and len(jsd) == 0:
print("[" + jsd + "\n" + response.status + "]")
whc = "{0} confesses their undying devotion to their waifu {1}{2}!\n{3}".format(message.author.name,
jsd['name_first'],
' ' + jsd['name_last'] if jsd['name_last'] is not None else '',
jsd['image_url_med'])
await self.bot.send_message(message.channel, whc)
async def husbando(self, message):
name = "%20".join(message.content.split(" ")[1:])
async with self.pools.get() as pool:
token = await pool.hget("ALReadOnly", "AccessToken")
data = {'access_token': token}
if name.isdigit():
url = self.apiurl + "/character/" + str(name)
else:
url = self.apiurl + "/character/search/" + name
with aiohttp.ClientSession() as session:
async with session.get(url, params=data) as response:
text = await response.text()
if text == "\n" or response.status == 404:
await self.bot.send_message(message.channel, "What character? You don't even know the name of your husbando? The shame.")
jsd = json.loads(text)
if 'error' in jsd:
await self.bot.send_message(message.channel, "What character? You don't even know the name of your husbando? The shame.")
else:
if isinstance(jsd, list) and len(jsd) > 0:
jsd = jsd[0]
elif isinstance(jsd, list) and len(jsd) == 0:
print("[" + jsd + "\n" + response.status + "]")
whc = "{0} confesses their undying devotion to their husbando {1}{2}!\n{3}".format(message.author.name,
jsd['name_first'],
' ' + jsd['name_last'] if jsd['name_last'] is not None else '',
jsd['image_url_med'])
await self.bot.send_message(message.channel, whc)
async def searchcharacter(self, message):
name = "%20".join(message.content.split(" ")[1:])
if name.isdigit():
await self.bot.send_message(message.channel, await self.parsecharacter(name))
async with self.pools.get() as pool:
token = await pool.hget("ALReadOnly", "AccessToken")
data = {'access_token': token}
url = self.apiurl + "/character/search/" + name.replace(' ', '%20')
with aiohttp.ClientSession() as session:
async with session.get(url, params=data) as response:
text = await response.text()
if text == '\n' or response.status == 404:
await self.bot.send_message(message.channel, "[ANILIST] No results for a character named {0} in Anilist.".format(name))
else:
jsd = json.loads(text)
if 'error' in jsd:
await self.bot.send_message(message.channel, "[ANILIST] No results for a character named {0} in Anilist.".format(name))
else:
if len(jsd) > 1:
msg = "Found these characters ->\n"
for i in jsd:
msg += " {0}{1} (ID: {2})\n".format(i['name_first'], '\b' + i.get('name_last', ''), i['id'])
await self.bot.send_message(message.channel, msg)
elif len(jsd) == 1:
await self.bot.send_message(message.channel, await self.parsecharacter(jsd[0]['id']))
async def parsecharacter(self, id):
async with self.pools.get() as pool:
token = await pool.hget("ALReadOnly", "AccessToken")
data = {'access_token': token}
url = self.apiurl + "/character/" + str(id)
with aiohttp.ClientSession() as session:
async with session.get(url, params=data) as response:
jsd = await response.json()
return "{0} {1}\nInfo: {2}\n{3}".format(jsd['name_first'], jsd.get('name_last', ''),
jsd['info'], jsd['image_url_med'])
async def searchanime(self, message):
name = "%20".join(message.content.split(" ")[1:])
if name.isdigit():
await self.bot.send_message(message.channel, await self.parseanime(name))
async with self.pools.get() as pool:
token = await pool.hget("ALReadOnly", "AccessToken")
data = {'access_token': token}
url = self.apiurl + "/anime/search/" + name.replace(' ', '%20')
with aiohttp.ClientSession() as session:
async with session.get(url, params=data) as response:
text = await response.text()
if text == '\n' or response.status == 404:
await self.bot.send_message(message.channel, "[ANILIST] No results found on Anilist for Anime {0}".format(name.replace("%20", " ")))
else:
jsd = json.loads(text)
if 'error' in jsd:
await self.bot.send_message(message.channel, "[ANILIST] No results found on Anilist for Anime {0}".format(name.replace("%20", " ")))
else:
if len(jsd) > 1:
msg = "Found these Anime ->\n"
for i in jsd:
msg += " {0} (ID: {1})\n".format(i['title_english'], i['id'])
return msg
elif len(jsd) == 1:
await self.bot.send_message(message.channel, await self.parseanime(jsd[0]['id']))
async def parseanime(self, id):
async with self.pools.get() as pool:
token = await pool.hget("ALReadOnly", "AccessToken")
data = {'access_token': token}
url = self.apiurl + "/anime/" + str(id)
with aiohttp.ClientSession() as session:
async with session.get(url, params=data) as response:
jsd = await response.json()
return "Titles\n English: {0}\n Romaji: {1}\n Japanese: {2}\nStatus: {3}\n{4}\nAverage Score: {5}\nGenres: {6}\nDescriptions: {7}\n{8}".format(
jsd['title_english'], jsd['title_romaji'], jsd['title_japanese'],
jsd['airing_status'], 'Episode {0} in {1}'.format(jsd['airing']['next_episode'], str(timedelta(seconds=jsd['airing']['countdown']))) if jsd['airing_status'].lower() == 'currently airing' else 'Episodes: {0}'.format(jsd['total_episodes']),
jsd['average_score'], ', '.join(jsd['genres']), jsd['description'].replace('<br>', '\n'),
jsd['image_url_med'])
async def currentanime(self, message):
async with self.pools.get() as pool:
token = await pool.hget("ALReadOnly", "AccessToken")
data = {'access_token': token, 'status': 'Currently Airing',
'sort': 'popularity-desc', 'year': '2016'}
url = self.apiurl + "/browse/anime"
with aiohttp.ClientSession() as session:
async with session.get(url, params=data) as response:
jsd = await response.json()
msg = "Currently Airing Popular Anime ->\n"
for item in jsd[0:10]:
msg += " {0}: {1}\n".format(item['title_english'], item['id'])
await self.bot.send_message(message.channel, msg)
async def searchmanga(self, message):
name = "%20".join(message.content.split(" ")[1:])
if name.isdigit():
await self.bot.send_message(message.channel, await self.parsemanga(name))
async with self.pools.get() as pool:
token = await pool.hget("ALReadOnly", "AccessToken")
data = {'access_token': token}
url = self.apiurl + "/manga/search/" + name.replace(' ', '%20')
with aiohttp.ClientSession() as session:
async with session.get(url, params=data) as response:
text = await response.text()
if response.text() == "\n" or response.status == 404:
await self.bot.send_message(message.channel, "[ANILIST] No results found for {0} in Manga.".format(name))
else:
jsd = json.loads(text)
if 'error' in jsd:
await self.bot.send_message(message.channel, "[ANILIST] No results found for {0} in Manga.".format(name))
else:
if len(jsd) == 1:
await self.bot.send_message(message.channel, await self.parsemanga(jsd[0]['id']))
elif len(jsd) > 1:
msg = "Found these Manga ->\n"
for i in jsd:
msg += " {0} (ID: {1})\n".format(i['title_english'], i['id'])
await self.bot.send_message(message.channel, msg)
async def parsemanga(self, id):
async with self.pools.get() as pool:
token = await pool.hget("ALReadOnly", "AccessToken")
data = {'access_token': token}
url = self.apiurl + "/manga/" + str(id)
with aiohttp.ClientSession() as session:
async with session.get(url, params=data) as response:
jsd = await response.json()
return "Titles\n English: {0}\n Romaji: {1}\n Japanese: {2}\nStatus: {3}\nLength: {4} volumes and {5} chapters\nAverage Score: {6}\nGenres: {7}\nDescriptions: {8}\n{9}".format(
jsd['title_english'], jsd['title_romaji'], jsd['title_japanese'],
jsd['publishing_status'], jsd['total_volumes'], jsd['total_chapters'],
jsd['average_score'], ','.join(jsd['genres']), jsd['description'].replace('<br>', '\n'),
jsd['image_url_med'])
async def get_user(self, message):
url = self.apiurl + "/user"
if len(message.content.split(" ")) == 1:
key = await self.get_oauth(message.author.id)
if key == 0:
await self.bot.send_message(message.channel, "I can't pull your details from AniList because you haven't verified your account. PM me about anilist to do that.")
else:
header = self.headers
header['Authorization'] = 'Bearer {0}'.format(key)
with aiohttp.ClientSession() as session:
async with session.get(url, headers=header) as response:
text = await response.text()
if response.status == 404 or text == "\n":
await self.bot.send_message(message.channel, "Anilist says you don't exist.")
else:
jsd = json.loads(text)
if 'about' in jsd and jsd['about']:
about = await profile_preprocessor(jsd['about'])
else:
about = "No about for this user."
ret = "{0} ({1})\n{2} Pending Notifications.\n{3}\n\nI've spent {4} on Anime and read {5} Manga Chapters.\n{6}".format(jsd['display_name'], jsd['id'], jsd['notifications'], about, str(timedelta(minutes=jsd['anime_time'])), jsd['manga_chap'], jsd['image_url_lge'])
if len(ret) > 2000:
await self.bot.send_message(message.channel, "{0} ({1})\n{2} Pending Notifications.\n{3}\n\nI've spent {4} on Anime and read {5} Manga Chapters.\n{6}".format(jsd['display_name'], jsd['id'], jsd['notifications'], "Attempt to parse novel failed. Visit <http://anilist.co/user/{0}> to view about section.".format(jsd['display_name']), str(timedelta(minutes=jsd['anime_time'])), jsd['manga_chap'], jsd['image_url_lge']))
else:
await self.bot.send_message(message.channel, ret)
else:
name = "%20".join(message.content.split(" ")[1:])
async with self.pools.get() as dbp:
token = await dbp.hget("ALReadOnly", "AccessToken")
data = {'access_token': token}
url = url + "/" + name
with aiohttp.ClientSession() as session:
async with session.get(url, params=data) as response:
if response.status in [403, 401]:
await self.bot.send_message(message.channel, "Your profile is private.")
elif response.status == 404:
await self.bot.send_message(message.channel, "No user found by name {0}".format(name))
else:
text = await response.text()
if text == "\n":
await self.bot.send_message(message.channel, "No user found by name {0}".format(name))
else:
jsd = json.loads(text)
if 'about' in jsd and jsd['about']:
about = await profile_preprocessor(jsd['about'])
else:
about = "No about for this user."
ret = "{0} ({1})\n{2}\n\nI've spent {3} on Anime and read {4} Manga Chapters.\n{5}".format(jsd['display_name'], jsd['id'], about, str(timedelta(minutes=jsd['anime_time'])), jsd['manga_chap'], jsd['image_url_lge'])
if len(ret) > 2000:
await self.bot.send_message(message.channel, "{0} ({1})\n{2} Pending Notifications.\n{3}\n\nI've spent {4} on Anime and read {5} Manga Chapters.\n{6}".format(jsd['display_name'], jsd['id'], jsd['notifications'], "Attempt to parse novel failed. Visit <http://anilist.co/user/{0}> to view about section.".format(jsd['display_name']), str(timedelta(minutes=jsd['anime_time'])), jsd['manga_chap'], jsd['image_url_lge']))
else:
await self.bot.send_message(message.channel, ret)
async def get_notifications(self, message):
url = self.apiurl + "/user/notifications"
key = await self.get_oauth(message.author.id)
if key == 0:
await self.bot.send_message(message.channel, "Notifications require you to verify your account with Oauth. PM me about anilist to do that.")
else:
header = self.headers
header['Authorization'] = 'Bearer {0}'.format(key)
with aiohttp.ClientSession() as session:
async with session.get(url, headers=header) as response:
text = await response.text()
if text == "\n" or response.status == 404:
await self.bot.send_message(message.channel, "Something went wrong. I wasn't able to get your notifications.")
else:
jsd = json.loads(text)
msg = "Notifications ->\n"
for item in jsd:
print(item.keys())
msg += "{0}({1}) {2}".format(item['user']['display_name'], item['user']['id'], item['value'])
if 'thread' in item:
msg += " {0}({1})".format(item['thread']['title'], item['thread']['id'])
msg += "\n"
await self.bot.send_message(message.channel, msg)
async def follow_user(self, message):
url = self.apiurl + "/user/follow"
key = await self.get_oauth(message.author.id)
if len(message.content.split(" ")) == 1:
await self.bot.send_message(message.channel, "Need a user id.")
else:
uid = "%20".join(message.content.split(" ")[1:])
if not uid.isdigit():
uid = self.get_user_id(uid)
if uid == 0:
await self.bot.send_message(message.channel, "Couldn't narrow that down to one user.")
header = self.headers
header['Authorization'] = 'Bearer {0}'.format(key)
with aiohttp.ClientSession() as session:
async with session.post(url, headers=header, data=json.dumps({'id': uid})) as response:
text = await response.text()
if response.text == "\n" or response.status == 404:
await self.bot.send_message(message.channel, "Encountered an error following that user.")
elif response.status in [401, 403]:
await self.bot.send_message(message.channel, "I'm not authorized to follow that user for you.")
else:
await self.bot.send_message(message.channel, "You are now following that user.")
async def user_search(self, message):
async with self.pools.get() as dbp:
token = await dbp.hget("ALReadOnly", "AccessToken")
data = {'access_token': token}
url = self.apiurl + "/user/search/{0}".format(" ".join(message.content.split(" ")[1:])).replace(" ", "%20")
with aiohttp.ClientSession() as session:
async with session.get(url, params=data) as response:
text = await response.text()
if response.status == 404 or text == "\n":
await self.bot.send_message(message.channel, "No users found.")
elif response.status in [403, 401]:
await self.bot.send_message(message.channel, "Bot is not authorized.")
else:
jsd = json.loads(text)
if isinstance(jsd, list):
msg = "Found {0} Users. Here are the first few. =>\n".format(len(jsd))
for x in jsd[0:10]:
msg += " {0} ({1})\n".format(x['display_name'], x['id'])
await self.bot.send_message(message.channel, msg)
else:
await self.bot.send_message(message.channel, "{0} ({1})".format(jsd['display_name'], jsd['id']))
async def get_user_id(self, id):
async with self.pools.get() as dbp:
token = await dbp.hget("ALReadOnly", "AccessToken")
data = {'access_token': token}
url = self.apiurl + "/user/search/{0}".format(id)
with aiohttp.ClientSession() as session:
async with session.get(url, params=data) as response:
text = await response.text()
if response.status == 404 or text == "\n":
return 0
elif response.status in [403, 401]:
return 0
else:
jsd = json.loads(text)
if isinstance(jsd, list) and len(jsd) == 1:
return jsd[0]['id']
elif isinstance(jsd, list) and len(jsd) > 1:
return 0
else:
return jsd['id']
async def get_anime_list(self, message):
url = self.apiurl + "/user/{0}/animelist"
if len(message.content.split(" ")) == 1:
await self.bot.send_message(message.channel, "You didn't provide a username.")
name = "%20".join(message.content.split(" ")[1:])
async with self.pools.get() as dbp:
token = await dbp.hget("ALReadOnly", "AccessToken")
data = {'access_token': token}
with aiohttp.ClientSession() as session:
async with session.get(url.format(name), params=data) as response:
if response.status == 404:
await self.bot.send_message(message.channel, "User not found")
elif response.status in [403, 401]:
await self.bot.send_message(message.channel, "Access denied for that user's AnimeList. Must be private.")
jsd = await response.json()
jsd = jsd['lists'].get('completed', None)
if not jsd:
await self.bot.send_message(message.channel, "That user has no completed anime.")
msg = "{} has watched ->\n".format(name.replace("%20", " ").capitalize())
if len(jsd) > 20:
pids = []
while len(pids) < 20:
randid = random.randint(0, len(jsd)-1)
if randid not in pids:
pids.append(randid)
for x in pids:
msg += " {}({}) - {}/{} Episodes Watched and {}\n".format(jsd[x]['anime']['title_english'],
jsd[x]['anime']['id'],
jsd[x]['episodes_watched'],
jsd[x]['anime']['total_episodes'],
'scored it {}'.format(jsd[x]['score_raw']) if jsd[x]['score_raw'] != 0 else "not scored.")
else:
for x in jsd:
msg += " {}({}) - {}/{} Chapters Read and {}\n".format(x['manga']['title_english'],
x['manga']['id'],
x['chapters_read'],
x['manga']['total_chapters'],
"scored it {}".format(jsd[x]['score_raw']) if jsd[x]['score_raw'] != 0 else 'not scored.')
await self.bot.send_message(message.channel, msg)
async def get_manga_list(self, message):
url = self.apiurl + "/user/{0}/mangalist"
if len(message.content.split(" ")) == 1:
await self.bot.send_message(message.channel, "You didn't provide a username.")
name = "%20".join(message.content.split(" ")[1:])
async with self.pools.get() as dbp:
token = await dbp.hget("ALReadOnly", "AccessToken")
data = {'access_token': token}
with aiohttp.ClientSession() as session:
async with session.get(url.format(name), params=data) as response:
if response.status == 404:
await self.bot.send_message(message.channel, "User not found.")
elif response.status in [403, 401]:
await self.bot.send_message(message.channel, "Permission denied for that user's MangaList.")
jsd = await response.json()
jsd = jsd['lists'].get('completed', None)
if not jsd:
await self.bot.send_message(message.channel, "That user has no completed Manga.")
msg = "{} has read ->\n".format(name.replace("%20", " ").capitalize())
if len(jsd) > 20:
pids = []
while len(pids) < 20:
randid = random.randint(0, len(jsd)-1)
if randid not in pids:
pids.append(randid)
for x in pids:
msg += " {}({}) - {}/{} Chapters Read and {}\n".format(jsd[x]['manga']['title_english'],
jsd[x]['manga']['id'],
jsd[x]['chapters_read'],
jsd[x]['manga']['total_chapters'],
"scored it {}".format(jsd[x]['score_raw']) if jsd[x]['score_raw'] != 0 else 'not scored.')
else:
for x in jsd:
msg += " {}({}) - {}/{} Chapters Read and {}\n".format(x['manga']['title_english'],
x['manga']['id'],
x['chapters_read'],
x['manga']['total_chapters'],
"scored it {}".format(x['score_raw']) if x['score_raw'] != 0 else 'not scored.')
await self.bot.send_message(message.channel, msg)
async def mark_to_watch(self, message):
if len(message.content.split(" ")) == 1:
await self.bot.send_message(message.channel, "Need an anime.")
name = "%20".join(message.content.split(" ")[1:])
if name.isdigit():
url = self.apiurl + "/anime/{}".format(name)
else:
url = self.apiurl + "/anime/search/{}".format(name)
key = await self.get_oauth(message.author.id)
if key == 0:
await self.bot.send_message(message.channel, "This requires OAuth permission for your account. PM me about Anilist to start that.")
async with self.pools.get() as pool:
token = await pool.hget("ALReadOnly", "AccessToken")
data = {'access_token': token}
with aiohttp.ClientSession() as session:
async with session.get(url, params=data) as response:
if response.status == 404:
await self.bot.send_message(message.channel, "No anime by that name.")
jsd = await response.json()
if isinstance(jsd, list):
jsd = jsd[0]
payload = {'id': jsd['id'], 'list_status': 'plan to watch'}
header = self.headers
header['Authorization'] = 'Bearer {0}'.format(key)
async with session.post(self.apiurl+"/animelist", headers=header, params=payload) as response:
if response.status in [403, 401]:
await self.bot.send_message(message.channel, "The bot wasn't authorized to take that action for you.")
elif response.status == 200:
await self.bot.send_message(message.channel, "I marked {} as Plan to Watch for you.".format(jsd['title_english']))
async def mark_to_read(self, message):
if len(message.content.split(" ")) == 1:
await self.bot.send_message(message.channel, "Need a manga.")
name = "%20".join(message.content.split(" ")[1:])
if name.isdigit():
url = self.apiurl + "/manga/{}".format(name)
else:
url = self.apiurl + "/manga/search/{}".format(name)
key = await self.get_oauth(message.author.id)
if key == 0:
await self.bot.send_message(message.channel, "This requires OAuth permission for your account. PM me about Anilist to start that.")
async with self.pools.get() as pool:
token = await pool.hget("ALReadOnly", "AccessToken")
data = {'access_token': token}
with aiohttp.ClientSession() as session:
async with session.get(url, params=data) as response:
if response.status == 404:
await self.bot.send_message(message.channel, "No manga by that name.")
jsd = await response.json()
if isinstance(jsd, list):
jsd = jsd[0]
payload = {'id': jsd['id'], 'list_status': 'plan to read'}
header = self.headers
header['Authorization'] = 'Bearer {0}'.format(key)
async with session.post(self.apiurl+"/mangalist", headers=header, params=payload) as response:
if response.status in [403, 401]:
await self.bot.send_message(message.channel, "The bot wasn't authorized to take that action for you.")
elif response.status == 200:
await self.bot.send_message(message.channel, "I marked {} as Plan to Read for you.".format(jsd['title_english']))
async def mark_one_up(self, message):
if len(message.content.split(" ")) == 1:
await self.bot.send_message(message.channel, "Need to tell me what you read or watched.")
type = message.content.split(" ")[1].split(":")[0]
what = message.content.split(":")[1].replace(" ", "%20")
key = await self.get_oauth(message.author.id)
if key == 0:
await self.bot.send_message(message.channel, "This requires you to authenticate your account. PM me about anilist to do that.")
header = self.headers
header['Authorization'] = 'Bearer {}'.format(key)
uid = 0
async with self.pools.get() as dbp:
token = await dbp.hget("ALReadOnly", "AccessToken")
data = {'access_token': token}
with aiohttp.ClientSession() as session:
async with session.get(self.apiurl+"/user", headers=header) as userdata:
jsd = await userdata.json()
uid = jsd['id']
if what.isdigit():
if type == "m":
async with session.get(self.apiurl+"/user/{}/mangalist".format(uid), params=data) as response:
if response.status in [403, 401]:
await self.bot.send_message(message.channel, "Not authorized to access your manga list.")
current, maxc = 0, 0
jsd = await response.json()
title = 0
for x in jsd['lists']['reading']:
if str(x['manga']['id']) == what:
current = x['chapters_read']
maxc = x['manga']['total_chapters'] if x['manga']['publishing_status'] in ['finished', 'cancelled'] else 0
title = x['manga']['title_english']
break
payload = {'id': what, 'list_status': 'reading' if current+1 < maxc or maxc == 0 else 'completed', 'chapters_read': current+1}
async with session.put(self.apiurl+"/mangalist", headers=header, params=payload) as mangalist:
if mangalist.status in [403, 401]:
await self.bot.send_message(message.channel, "I wasn't authorized to modify that list item.")
elif mangalist.status == 200:
await self.bot.send_message(message.channel, "+1'd {}. Now {}.".format(title, "at {} Chapters read.".format(current+1) if payload['list_status'] == 'reading' else 'Completed!'))
elif type == "a":
async with session.get(self.apiurl + "/user/{}/animelist".format(uid), params=data) as response:
if response.status in [403, 401]:
await self.bot.send_message(message.channel, "Not authorized to access your anime list.")
current, maxc = 0, 0
jsd = await response.json()
title = 0
for x in jsd['lists']['watching']:
if str(x['anime']['id']) == what:
current = x['episodes_watched']
maxc = x['anime']['total_episodes'] if x['anime']['airing_status'] in ['finished airing', 'cancelled'] else 0
title = x['anime']['title_english']
break
payload = {'id': what, 'list_status': 'watching' if current + 1 < maxc or maxc == 0 else 'completed', 'episodes_watched': current + 1}
async with session.put(self.apiurl + "/animelist", headers=header, params=payload) as animelist:
if animelist.status in [403, 401]:
await self.bot.send_message(message.channel, "I wasn't authorized to modify that list item.")
elif animelist.status == 200:
await self.bot.send_message(message.channel, "+1'd {}. Now {}.".format(title, "at {} Episodes watched".format(current+1) if payload['list_status'] == 'watching' else "Completed"))
else:
await self.bot.send_message(message.channel, "Unknown type {}. Must be a(nime) or m(anga).".format(type))
else:
if type == 'm':
async with session.get(self.apiurl+"/manga/search/{}".format(what), params=data) as response:
if response.status == 404:
await self.bot.send_message(message.channel, "Couldn't find a manga by the name {}".format(what.replace("%20", " ")))
else:
jsd = await response.json()
if len(jsd) > 1:
await self.bot.send_message(message.channel, "That search matched {} results. Please use an ID or add more terms to narrow the result.".format(len(jsd)))
else:
mid = jsd[0]['id']
maxc = jsd[0]['total_chapters'] if jsd[0]['publishing_status'] in ['finished', 'cancelled'] else 0
title = jsd[0]['title_english']
current = 0
async with session.get(self.apiurl+"/user/{}/mangalist".format(uid), headers=header) as mangalist:
if mangalist.status in [403, 401]:
await self.bot.send_message(message.channel, "Not authorized to access your manga list.")
jsd = await mangalist.json()
for x in jsd['lists']['reading']:
if x['manga']['id'] == mid:
current = x['chapters_read']
break
payload = {'id': mid, 'list_status': 'reading' if current + 1 < maxc or maxc == 0 else 'completed', 'episodes_watched': current + 1}
async with session.put(self.apiurl + "/mangalist", headers=header, params=payload) as markup:
if markup.status in [403, 401]:
await self.bot.send_message(message.channel, "I wasn't authorized to modify that list item.")
elif markup.status == 200:
await self.bot.send_message(message.channel, "+1'd {}. Now {}.".format(title, "at {} Chapters read".format(current + 1) if payload['list_status'] == 'reading' else "Completed"))
elif type == 'a':
async with session.get(self.apiurl + "/anime/search/{}".format(what), params=data) as response:
if response.status == 404:
await self.bot.send_message(message.channel, "Couldn't find an anime by the name {}".format(what.replace("%20", " ")))
else:
jsd = await response.json()
if len(jsd) > 1:
await self.bot.send_message(message.channel, "That search matched {} results. Please use an ID or add more terms to narrow the result.".format(len(jsd)))
else:
mid = jsd[0]['id']
maxc = jsd[0]['total_episodes'] if jsd[0]['airing_status'] in ['finished airing', 'cancelled'] else 0
title = jsd[0]['title_english']
current = 0
async with session.get(self.apiurl + "/user/{}/animelist".format(uid), headers=header) as animelist:
if animelist.status in [403, 401]:
await self.bot.send_message(message.channel, "Not authorized to access your anime list.")
jsd = await animelist.json()
for x in jsd['lists']['watching']:
if x['anime']['id'] == mid:
current = x['episodes_watched']
break
payload = {'id': mid, 'list_status': 'watching' if current + 1 < maxc or maxc == 0 else 'completed', 'episodes_watched': current + 1}
async with session.put(self.apiurl + "/animelist", headers=header, params=payload) as markup:
if markup.status in [403, 401]:
await self.bot.send_message(message.channel, "I wasn't authorized to modify that list item.")
elif markup.status == 200:
await self.bot.send_message(message.channel, "+1'd {}. Now {}.".format(title, "at {} Episodes watched".format(current + 1) if payload['list_status'] == 'watching' else "Completed"))
else:
await self.bot.send_message(message.channel, "Unknown type {}. Must be a(nime) or m(anga).".format(type))
async def get_watching(self, message):
if len(message.content.split(" ")) == 1:
await self.bot.send_message(message.channel, "Need a username.")
name = "%20".join(message.content.split(" ")[1:])
async with self.pools.get() as dbp:
token = await dbp.hget("ALReadOnly", "AccessToken")
data = {'access_token': token}
with aiohttp.ClientSession() as session:
async with session.get(self.apiurl+"/user/{}/animelist".format(name), params=data) as response:
if response.status == 404:
await self.bot.send_message(message.channel, "No user by that name or ID.")
elif response.status in [403, 401]:
await self.bot.send_message(message.channel, "Not authorized to access that user's list.")
else:
jsd = await response.json()
jsd = jsd['lists']['watching']
pids = []
msg = "{} is currently watching ->\n".format(name.replace("%20", " "))
if len(jsd) > 20:
while len(pids) < 20:
randid = random.randint(0, len(jsd)-1)
if randid not in pids:
pids.append(randid)
for x in pids:
msg += " {}({}) - Last Episode watched was {}\n".format(jsd[x]['anime']['title_english'], jsd[x]['anime']['id'], jsd[x]['episodes_watched'])
await self.bot.send_message(message.channel, msg)
elif len(jsd) == 0:
await self.bot.send_message(message.channel, "Not watching any Anime.")
else:
for x in jsd:
msg += " {}({}) - Last Episode watched was {}\n".format(x['anime']['title_english'], x['anime']['id'], x['episodes_watched'])
await self.bot.send_message(message.channel, msg)
async def get_reading(self, message):
if len(message.content.split(" ")) == 1:
await self.bot.send_message(message.channel, "Need a username.")
name = "%20".join(message.content.split(" ")[1:])
async with self.pools.get() as dbp:
token = await dbp.hget("ALReadOnly", "AccessToken")
data = {'access_token': token}
with aiohttp.ClientSession() as session:
async with session.get(self.apiurl + "/user/{}/mangalist".format(name), params=data) as response:
if response.status == 404:
await self.bot.send_message(message.channel, "No user by that name or ID.")
elif response.status in [403, 401]:
await self.bot.send_message(message.channel, "Not authorized to access that user's list.")
else:
jsd = await response.json()
jsd = jsd['lists']['reading']
pids = []
msg = "{} is currently reading ->\n".format(name.replace("%20", " "))
if len(jsd) > 20:
while len(pids) < 20:
randid = random.randint(0, len(jsd) - 1)
if randid not in pids:
pids.append(randid)
for x in pids:
msg += " {}({}) - Last Chapter read was {}\n".format(jsd[x]['manga']['title_english'], jsd[x]['manga']['id'], jsd[x]['chapters_read'])
await self.bot.send_message(message.channel, msg)
elif len(jsd) == 0:
await self.bot.send_message(message.channel, "Not reading any Manga.")
else:
for x in jsd:
msg += " {}({}) - Last Chapter read was {}\n".format(x['manga']['title_english'], x['manga']['id'], x['chapters_read'])
await self.bot.send_message(message.channel, msg)
| {
"content_hash": "8cbdf4205806abff87a07249b258ddf9",
"timestamp": "",
"source": "github",
"line_count": 724,
"max_line_length": 452,
"avg_line_length": 68.0207182320442,
"alnum_prop": 0.4691859402603204,
"repo_name": "ccubed/AngelBot",
"id": "3ec0f9c19a1e18e785d729734c215e2880c430f6",
"size": "49247",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AList.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "169432"
},
{
"name": "HTML",
"bytes": "24325"
},
{
"name": "Python",
"bytes": "174747"
}
],
"symlink_target": ""
} |
from common_fixtures import * # NOQA
import os
if_upgrade_testing = pytest.mark.skipif(
os.environ.get("UPGRADE_TESTING") != "true",
reason='UPGRADE_TESTING is not true')
pre_upgrade_namespace = ""
post_upgrade_namespace = ""
pre_port_ext = ""
post_port_ext = ""
@pytest.fixture(scope='session')
def get_env():
global pre_upgrade_namespace
global post_upgrade_namespace
global pre_port_ext
global post_port_ext
pre_upgrade_namespace = os.environ.get("PRE_UPGRADE_NAMESPACE")
post_upgrade_namespace = os.environ.get("POST_UPGRADE_NAMESPACE")
pre_port_ext = os.environ.get("PRE_PORT_EXT")
post_port_ext = os.environ.get("POST_PORT_EXT")
@if_upgrade_testing
def test_pre_upgrade_validate_stack(kube_hosts, get_env):
input_config = {
"namespace": pre_upgrade_namespace,
"port_ext": pre_port_ext
}
k8s_create_stack(input_config)
k8s_validate_stack(input_config)
@if_upgrade_testing
def test_post_upgrade_validate_stack(kube_hosts, get_env):
# Validate pre upgrade stack after the upgrade
input_config = {
"namespace": pre_upgrade_namespace,
"port_ext": pre_port_ext
}
k8s_validate_stack(input_config)
k8s_modify_stack(input_config)
# Create and validate new stack on the upgraded setup
input_config = {
"namespace": post_upgrade_namespace,
"port_ext": post_port_ext
}
k8s_create_stack(input_config)
k8s_validate_stack(input_config)
| {
"content_hash": "cae2511a23f21106d272c9592797716b",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 69,
"avg_line_length": 28.48076923076923,
"alnum_prop": 0.6772451046590142,
"repo_name": "rancherio/validation-tests",
"id": "0f27f70f7cc3b87ac20af21aea9d62bcb3c1352a",
"size": "1481",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/v2_validation/cattlevalidationtest/core/test_k8s_upgrade.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1383015"
},
{
"name": "Shell",
"bytes": "4069"
}
],
"symlink_target": ""
} |
class Order:
"""Allows ordering arbitrary objects with regard to arbitrary binary relation.
The primary use case is the gcc toolset, which is sensitive to
library order: if library 'a' uses symbols from library 'b',
then 'a' must be present before 'b' on the linker's command line.
This requirement can be lifted for gcc with GNU ld, but for gcc with
Solaris LD (and for Solaris toolset as well), the order always matters.
So, we need to store order requirements and then order libraries
according to them. It it not possible to use dependency graph as
order requirements. What we need is "use symbols" relationship
while dependency graph provides "needs to be updated" relationship.
For example::
lib a : a.cpp b;
lib b ;
For static linking, the 'a' library need not depend on 'b'. However, it
still should come before 'b' on the command line.
"""
def __init__ (self):
self.constraints_ = []
def add_pair (self, first, second):
""" Adds the constraint that 'first' should precede 'second'.
"""
self.constraints_.append ((first, second))
def order (self, objects):
""" Given a list of objects, reorder them so that the constains specified
by 'add_pair' are satisfied.
The algorithm was adopted from an awk script by Nikita Youshchenko
(yoush at cs dot msu dot su)
"""
# The algorithm used is the same is standard transitive closure,
# except that we're not keeping in-degree for all vertices, but
# rather removing edges.
result = []
if not objects:
return result
constraints = self.__eliminate_unused_constraits (objects)
# Find some library that nobody depends upon and add it to
# the 'result' array.
obj = None
while objects:
new_objects = []
while objects:
obj = objects [0]
if self.__has_no_dependents (obj, constraints):
# Emulate break ;
new_objects.extend (objects [1:])
objects = []
else:
new_objects.append (obj)
obj = None
objects = objects [1:]
if not obj:
raise BaseException ("Circular order dependencies")
# No problem with placing first.
result.append (obj)
# Remove all containts where 'obj' comes first,
# since they are already satisfied.
constraints = self.__remove_satisfied (constraints, obj)
# Add the remaining objects for further processing
# on the next iteration
objects = new_objects
return result
def __eliminate_unused_constraits (self, objects):
""" Eliminate constraints which mention objects not in 'objects'.
In graph-theory terms, this is finding subgraph induced by
ordered vertices.
"""
result = []
for c in self.constraints_:
if c [0] in objects and c [1] in objects:
result.append (c)
return result
def __has_no_dependents (self, obj, constraints):
""" Returns true if there's no constraint in 'constraints' where
'obj' comes second.
"""
failed = False
while constraints and not failed:
c = constraints [0]
if c [1] == obj:
failed = True
constraints = constraints [1:]
return not failed
def __remove_satisfied (self, constraints, obj):
result = []
for c in constraints:
if c [0] != obj:
result.append (c)
return result
| {
"content_hash": "ca3cf88f2b50b4b6f8120a1da39ba80f",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 82,
"avg_line_length": 34.99145299145299,
"alnum_prop": 0.538104543234001,
"repo_name": "MisterTea/HyperNEAT",
"id": "576229fe90f9a26b47bbdbbcfb0285dc313723e9",
"size": "4322",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "boost_1_57_0/tools/build/src/util/order.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Ada",
"bytes": "91920"
},
{
"name": "Assembly",
"bytes": "324212"
},
{
"name": "Batchfile",
"bytes": "32748"
},
{
"name": "Bison",
"bytes": "10393"
},
{
"name": "C",
"bytes": "4172510"
},
{
"name": "C#",
"bytes": "97576"
},
{
"name": "C++",
"bytes": "163699928"
},
{
"name": "CLIPS",
"bytes": "7056"
},
{
"name": "CMake",
"bytes": "92433"
},
{
"name": "CSS",
"bytes": "248695"
},
{
"name": "Cuda",
"bytes": "26521"
},
{
"name": "DIGITAL Command Language",
"bytes": "13695"
},
{
"name": "Fortran",
"bytes": "1387"
},
{
"name": "Gnuplot",
"bytes": "2361"
},
{
"name": "Groff",
"bytes": "15745"
},
{
"name": "HTML",
"bytes": "145331688"
},
{
"name": "IDL",
"bytes": "15"
},
{
"name": "JFlex",
"bytes": "1290"
},
{
"name": "JavaScript",
"bytes": "134468"
},
{
"name": "Makefile",
"bytes": "1053202"
},
{
"name": "Max",
"bytes": "37424"
},
{
"name": "Module Management System",
"bytes": "1593"
},
{
"name": "Objective-C",
"bytes": "33988"
},
{
"name": "Objective-C++",
"bytes": "214"
},
{
"name": "PHP",
"bytes": "60249"
},
{
"name": "Pascal",
"bytes": "41721"
},
{
"name": "Perl",
"bytes": "30505"
},
{
"name": "Perl6",
"bytes": "2130"
},
{
"name": "PostScript",
"bytes": "81121"
},
{
"name": "Python",
"bytes": "1943687"
},
{
"name": "QML",
"bytes": "613"
},
{
"name": "QMake",
"bytes": "7148"
},
{
"name": "Rebol",
"bytes": "372"
},
{
"name": "SAS",
"bytes": "1776"
},
{
"name": "Scilab",
"bytes": "107733"
},
{
"name": "Shell",
"bytes": "394881"
},
{
"name": "Tcl",
"bytes": "29403"
},
{
"name": "TeX",
"bytes": "1196144"
},
{
"name": "XSLT",
"bytes": "770994"
}
],
"symlink_target": ""
} |
import sys
sys.path.append("/vagrant/dockerEE/service")
import os
from environment_emulation_runner import EnvironmentEmulationRunner
from service_daemon import ServiceDaemon
## TestService
#
# The test service daemon class
class TestService(ServiceDaemon):
## constructor
def __init__(self):
ServiceDaemon.__init__(self, "~/.dockerEE/test_service_service_daemon.pid")
## check counter for _getInstance
self.__counter = None
## check file whether _delApp is called
self.__check_del_app_file = None
## the implementation of application specific initialization before service loop
# @param self The object pointer
def _initApp(self):
# initialize check variable
self.__counter = 0
self.__check_del_app_file = file(os.path.expanduser("~/.dockerEE/test_service_service_daemon.check"), "w")
## the implementation of application specific destruction before service stop
# @param self The object pointer
def _delApp(self):
os.remove(self.__check_del_app_file.name)
## exposed method of getting counter
# @param self The object pointer
# @return counter
def getCount(self):
return self.__counter
## exposed method of counting up counter
# @param self The object pointer
def countUp(self):
self.__counter += 1
## the implementation of displaying status
# @param self The object pointer
# @return The status message
def getStatus(self):
return "counter = " + str(self._getInstance().getCount())
## the implementation of reloading
# @param self The object pointer
def reload(self):
self._getInstance().countUp()
if __name__ == "__main__":
service = EnvironmentEmulationRunner(TestService())
service.do_action()
| {
"content_hash": "3ccdbab856adf904e7f4103d10f22aba",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 114,
"avg_line_length": 35.372549019607845,
"alnum_prop": 0.6751662971175166,
"repo_name": "ynaka81/dockerEE",
"id": "e5db775d534541fb0a11ad8db674e33592d9f51b",
"size": "1804",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_case/service_service_daemon_stub.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "75324"
},
{
"name": "Ruby",
"bytes": "667"
}
],
"symlink_target": ""
} |
from oslo.config import cfg
from ceilometer import nova_client
from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log
from ceilometer import plugin
LOG = log.getLogger(__name__)
OPTS = [
cfg.StrOpt('url_scheme',
default='snmp://',
help='URL scheme to use for hardware nodes'),
cfg.StrOpt('readonly_user_name',
default='ro_snmp_user',
help='SNMPd user name of all nodes running in the cloud.'),
cfg.StrOpt('readonly_user_password',
default='password',
help='SNMPd password of all the nodes running in the cloud.',
secret=True),
]
cfg.CONF.register_opts(OPTS, group='hardware')
class NodesDiscoveryTripleO(plugin.DiscoveryBase):
def __init__(self):
super(NodesDiscoveryTripleO, self).__init__()
self.nova_cli = nova_client.Client()
@staticmethod
def _address(instance, field):
return instance.addresses['ctlplane'][0].get(field)
def discover(self, manager, param=None):
"""Discover resources to monitor."""
instances = self.nova_cli.instance_get_all()
resources = []
for instance in instances:
try:
ip_address = self._address(instance, 'addr')
final_address = (
cfg.CONF.hardware.url_scheme +
cfg.CONF.hardware.readonly_user_name + ':' +
cfg.CONF.hardware.readonly_user_password + '@' +
ip_address)
resource = {
'resource_id': instance.id,
'resource_url': final_address,
'mac_addr': self._address(instance,
'OS-EXT-IPS-MAC:mac_addr'),
'image_id': instance.image['id'],
'flavor_id': instance.flavor['id']
}
resources.append(resource)
except KeyError:
LOG.error(_("Couldn't obtain IP address of"
"instance %s") % instance.id)
return resources
| {
"content_hash": "4c1fd0ce336f6b4410547ca4ae47a1cb",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 76,
"avg_line_length": 34.38095238095238,
"alnum_prop": 0.5433979686057249,
"repo_name": "luogangyi/Ceilometer-oVirt",
"id": "44a156824a98bb5a62b2e4f903d2be52825609e4",
"size": "2740",
"binary": false,
"copies": "3",
"ref": "refs/heads/stable/juno",
"path": "build/lib/ceilometer/hardware/discovery.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5438675"
},
{
"name": "Shell",
"bytes": "1304"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('autotrader', '0016_markethistory'),
]
operations = [
migrations.RemoveField(
model_name='markethistory',
name='left',
),
migrations.RemoveField(
model_name='markethistory',
name='right',
),
migrations.AddField(
model_name='markethistory',
name='market',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='autotrader.Market'),
preserve_default=False,
),
]
| {
"content_hash": "697f07dfc95335183d7586d0ec9f4f13",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 123,
"avg_line_length": 26.428571428571427,
"alnum_prop": 0.5878378378378378,
"repo_name": "scealiontach/cryptotrading",
"id": "6dbf4ef8670efdbc61986e388316db3e00dadc7f",
"size": "811",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/autotrader/migrations/0017_auto_20171019_1841.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "14379"
},
{
"name": "JavaScript",
"bytes": "2762"
},
{
"name": "Nix",
"bytes": "17356"
},
{
"name": "Python",
"bytes": "109439"
}
],
"symlink_target": ""
} |
import binascii
from os import urandom
from urllib.parse import urljoin
from django.db import models
from django.utils import timezone
from django.utils.translation import gettext_lazy as _, pgettext_lazy
from shop.models import order
class Order(order.BaseOrder):
"""Default materialized model for Order"""
number = models.PositiveIntegerField(
_("Order Number"),
null=True,
default=None,
unique=True,
)
shipping_address_text = models.TextField(
_("Shipping Address"),
blank=True,
null=True,
help_text=_("Shipping address at the moment of purchase."),
)
billing_address_text = models.TextField(
_("Billing Address"),
blank=True,
null=True,
help_text=_("Billing address at the moment of purchase."),
)
token = models.CharField(
_("Token"),
max_length=40,
editable=False,
null=True,
help_text=_("Secret key to verify ownership on detail view without requiring authentication."),
)
class Meta:
verbose_name = pgettext_lazy('order_models', "Order")
verbose_name_plural = pgettext_lazy('order_models', "Orders")
def get_or_assign_number(self):
"""
Set a unique number to identify this Order object. The first 4 digits represent the
current year. The last five digits represent a zero-padded incremental counter.
"""
if self.number is None:
epoch = timezone.now()
epoch = epoch.replace(epoch.year, 1, 1, 0, 0, 0, 0)
aggr = Order.objects.filter(number__isnull=False, created_at__gt=epoch).aggregate(models.Max('number'))
try:
epoc_number = int(str(aggr['number__max'])[4:]) + 1
self.number = int('{0}{1:05d}'.format(epoch.year, epoc_number))
except (KeyError, ValueError):
# the first order this year
self.number = int('{0}00001'.format(epoch.year))
return self.get_number()
def get_number(self):
number = str(self.number)
return '{}-{}'.format(number[:4], number[4:])
@classmethod
def resolve_number(cls, number):
bits = number.split('-')
return dict(number=''.join(bits))
def assign_secret(self):
self.token = binascii.hexlify(urandom(20)).decode()
return self.token
@property
def secret(self):
return self.token
def get_absolute_url(self):
url = super().get_absolute_url()
if self.token:
if not url.endswith('/'):
url += '/'
url = urljoin(url, self.token)
return url
def populate_from_cart(self, cart, request):
self.shipping_address_text = cart.shipping_address.as_text() if cart.shipping_address else ''
self.billing_address_text = cart.billing_address.as_text() if cart.billing_address else ''
# in case one of the addresses was None, the customer presumably intended the other one.
if not self.shipping_address_text:
self.shipping_address_text = self.billing_address_text
if not self.billing_address_text:
self.billing_address_text = self.shipping_address_text
super().populate_from_cart(cart, request)
| {
"content_hash": "3710ec7a9bc55c779d549e78137d5769",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 115,
"avg_line_length": 34.134020618556704,
"alnum_prop": 0.6097855632739354,
"repo_name": "awesto/django-shop",
"id": "54c809d690afb934d3444cabb1538e367f398dca",
"size": "3311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shop/models/defaults/order.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8455"
},
{
"name": "HTML",
"bytes": "107122"
},
{
"name": "JavaScript",
"bytes": "51946"
},
{
"name": "Python",
"bytes": "588560"
}
],
"symlink_target": ""
} |
import bob.ip.base
import numpy
import math
# create test image
image = numpy.zeros((21,21))
image[5:16, 5:16] = 1
# perform Sobel filtering
sobel = bob.ip.base.sobel(image)
# compute direction-independent and direction-dependent results
abs_sobel = numpy.sqrt(numpy.square(sobel[0]) + numpy.square(sobel[1]))
angle = 45.
rot_sobel = math.sin(angle*math.pi/180) * sobel[0] + math.cos(angle*math.pi/180) * sobel[1]
# plot results
from matplotlib import pyplot
pyplot.figure(figsize=(20,4))
pyplot.subplot(151) ; pyplot.imshow(image, cmap='gray') ; pyplot.title('Image')
pyplot.subplot(152) ; pyplot.imshow(sobel[0], cmap='gray') ; pyplot.title('Sobel - Y')
pyplot.subplot(153) ; pyplot.imshow(sobel[1], cmap='gray') ; pyplot.title('Sobel - X')
pyplot.subplot(154) ; pyplot.imshow(abs_sobel, cmap='gray') ; pyplot.title('Sobel - Abs')
pyplot.subplot(155) ; pyplot.imshow(rot_sobel, cmap='gray') ; pyplot.title('Sobel - %3.0f$^\circ$'%angle)
pyplot.show()
| {
"content_hash": "81a8ae8f264af0bb51d416537b655047",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 105,
"avg_line_length": 37.07692307692308,
"alnum_prop": 0.7064315352697096,
"repo_name": "tiagofrepereira2012/bob.ip.base",
"id": "dbd69fd0c2fbdfb63cb445a4dedbc6903dbbcede",
"size": "964",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "doc/plot/sobel.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5269"
},
{
"name": "C++",
"bytes": "798081"
},
{
"name": "Python",
"bytes": "131858"
}
],
"symlink_target": ""
} |
"""Init and utils."""
from zope.i18nmessageid import MessageFactory
_ = MessageFactory('ck.sitetheme')
def initialize(context):
"""Initializer called when used as a Zope 2 product."""
| {
"content_hash": "904c7c24a1d887075aacbef7293b309a",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 59,
"avg_line_length": 21.333333333333332,
"alnum_prop": 0.71875,
"repo_name": "a25kk/ck",
"id": "ebe3e6540e64f085139cdb3fc030148def179c09",
"size": "216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ck.sitetheme/ck/sitetheme/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "184097"
},
{
"name": "Dockerfile",
"bytes": "110"
},
{
"name": "HTML",
"bytes": "289504"
},
{
"name": "JavaScript",
"bytes": "97539"
},
{
"name": "Makefile",
"bytes": "4773"
},
{
"name": "Python",
"bytes": "63764"
},
{
"name": "Shell",
"bytes": "3746"
}
],
"symlink_target": ""
} |
import IECore
import Gaffer
import GafferScene
## \todo Nice geometry
class ShaderBall( GafferScene.SceneNode ) :
def __init__( self, name = "ShaderBall" ) :
GafferScene.SceneNode.__init__( self, name )
# Public plugs
self["shader"] = GafferScene.ShaderPlug()
self["resolution"] = Gaffer.IntPlug( defaultValue = 512, minValue = 0 )
# Private internal network
self["__sphere"] = GafferScene.Sphere()
self["__sphere"]["type"].setValue( GafferScene.Sphere.Type.Primitive )
self["__camera"] = GafferScene.Camera()
self["__camera"]["transform"]["translate"]["z"].setValue( 3.5 )
self["__group"] = GafferScene.Group()
self["__group"]["in"][0].setInput( self["__sphere"]["out"] )
self["__group"]["in"][1].setInput( self["__camera"]["out"] )
self["__subTree"] = GafferScene.SubTree()
self["__subTree"]["in"].setInput( self["__group"]["out"] )
self["__subTree"]["root"].setValue( "/group" )
self["__shaderAssignment"] = GafferScene.ShaderAssignment()
self["__shaderAssignment"]["in"].setInput( self["__subTree"]["out"] )
self["__shaderAssignment"]["shader"].setInput( self["shader"] )
self["__options"] = GafferScene.StandardOptions()
self["__options"]["in"].setInput( self["__shaderAssignment"]["out"] )
self["__options"]["options"]["renderCamera"]["enabled"].setValue( True )
self["__options"]["options"]["renderCamera"]["value"].setValue( "/camera" )
self["__options"]["options"]["renderResolution"]["enabled"].setValue( True )
self["__options"]["options"]["renderResolution"]["value"][0].setInput( self["resolution"] )
self["__options"]["options"]["renderResolution"]["value"][1].setInput( self["resolution"] )
self["__emptyScene"] = GafferScene.ScenePlug()
self["__enabler"] = GafferScene.SceneSwitch()
self["__enabler"]["in"][0].setInput( self["__emptyScene"] )
self["__enabler"]["in"][1].setInput( self["__options"]["out"] )
self["__enabler"]["enabled"].setInput( self["enabled"] )
self["__enabler"]["index"].setValue( 1 )
self["out"].setFlags( Gaffer.Plug.Flags.Serialisable, False )
self["out"].setInput( self["__enabler"]["out"] )
## Internal plug which the final scene is connected into.
# Derived classes may insert additional nodes between this
# plug and its input to modify the scene.
def _outPlug( self ) :
return self["__enabler"]["in"][1]
IECore.registerRunTimeTyped( ShaderBall, typeName = "GafferScene::ShaderBall" )
| {
"content_hash": "614a20ab41b355c2787f210e8ad6958d",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 93,
"avg_line_length": 37.246153846153845,
"alnum_prop": 0.6534489880214788,
"repo_name": "chippey/gaffer",
"id": "28d621a7447dd35fc2312de3e6ebb1b99804e970",
"size": "4224",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "python/GafferScene/ShaderBall.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2258"
},
{
"name": "C++",
"bytes": "5420141"
},
{
"name": "CSS",
"bytes": "28027"
},
{
"name": "GLSL",
"bytes": "6250"
},
{
"name": "Objective-C",
"bytes": "2228"
},
{
"name": "Python",
"bytes": "5348174"
},
{
"name": "Shell",
"bytes": "8370"
},
{
"name": "Slash",
"bytes": "41159"
}
],
"symlink_target": ""
} |
import logging
import time
import os
import sys
from ConfigParser import ConfigParser
from subprocess import Popen, PIPE
class JavaConfig:
'''Enable access to properities in java siteConfig file'''
def __init__(self, fname):
self.prop_d = {}
for line in open(fname):
line = line.strip();
if line.startswith('#') or len(line) == 0:
continue
pair = line.split('=')
if len(pair) != 2:
log.error("Invalid property (%s)" % line)
continue
self.prop_d[pair[0].strip()] = pair[1].strip()
def get(self, prop):
return self.prop_d[prop]
def file_len(fname):
i=0
for line in open(fname):
i += 1
return i
def runTest(testName, siteConfig, testDir, numNodes, fdata):
log('Stopping accumulo')
syscall('$ACCUMULO_HOME/bin/stop-all.sh')
log('Creating slaves file for this test')
slavesPath = siteConfig.get('SLAVES')
nodesPath = testDir+'/nodes/%d' % numNodes
syscall('head -n %d %s > %s' % (numNodes,slavesPath,nodesPath))
log('Copying slaves file to accumulo config')
syscall('cp '+nodesPath+' $ACCUMULO_CONF_DIR/slaves');
log('Removing /accumulo directory in HDFS')
syscall("hadoop fs -rmr /accumulo")
log('Initializing new Accumulo instance')
instance = siteConfig.get('INSTANCE_NAME')
passwd = siteConfig.get('PASSWORD')
syscall('printf "%s\nY\n%s\n%s\n" | $ACCUMULO_HOME/bin/accumulo init' % (instance, passwd, passwd))
log('Starting new Accumulo instance')
syscall('$ACCUMULO_HOME/bin/start-all.sh')
sleepTime = 30
if numNodes > 120:
sleepTime = int(numNodes / 4)
log('Sleeping for %d seconds' % sleepTime)
time.sleep(sleepTime)
log('Setting up %s test' % testName)
syscall('$ACCUMULO_HOME/bin/accumulo org.apache.accumulo.test.scalability.Run %s setup %s' % (testName, numNodes))
log('Sleeping for 5 seconds')
time.sleep(5)
log('Starting %s clients' % testName)
numThreads = numNodes
if int(numNodes) > 128:
numThreads='128'
syscall('pssh -P -h %s -p %s "$ACCUMULO_HOME/bin/accumulo org.apache.accumulo.test.scalability.Run %s client %s >/tmp/scale.out 2>/tmp/scale.err &" < /dev/null' % (nodesPath, numThreads, testName, numNodes))
log('Sleeping for 30 sec before checking how many clients started...')
time.sleep(30)
output = Popen(["hadoop fs -ls /accumulo-scale/clients"], stdout=PIPE, shell=True).communicate()[0]
num_clients = int(output.split()[1])
log('%s clients started!' % num_clients)
log('Waiting until %d clients finish.' % num_clients)
last = 0
done = 0
while done < num_clients:
time.sleep(5)
output = Popen(["hadoop fs -ls /accumulo-scale/results"], stdout=PIPE, shell=True).communicate()[0]
if not output:
sys.stdout.write('.')
sys.stdout.flush()
continue
done = int(output.split()[1])
if done != last:
sys.stdout.write('.%s' % done)
else:
sys.stdout.write('.')
sys.stdout.flush()
last = done
sys.stdout.flush()
log('\nAll clients are finished!')
log('Copying results from HDFS')
resultsDir = "%s/results/%s" % (testDir, numNodes)
syscall('hadoop fs -copyToLocal /accumulo-scale/results %s' % resultsDir)
log('Calculating results from clients')
times = []
totalMs = 0L
totalEntries = 0L
totalBytes = 0L
for fn in os.listdir(resultsDir):
for line in open('%s/%s' % (resultsDir,fn)):
words = line.split()
if words[0] == 'ELAPSEDMS':
ms = long(words[1].strip())
totalMs += ms
times.append(ms)
totalEntries += long(words[2].strip())
totalBytes += long(words[3].strip())
times.sort()
print times
numClients = len(times)
min = times[0] / 1000
avg = (float(totalMs) / numClients) / 1000
median = times[int(numClients/2)] / 1000
max = times[numClients-1] / 1000
log('Tservs\tClients\tMin\tAvg\tMed\tMax\tEntries\tMB')
log('%d\t%d\t%d\t%d\t%d\t%d\t%dM\t%d' % (numNodes, numClients, min, avg, median, max, totalEntries / 1000000, totalBytes / 1000000))
fdata.write('%d\t%d\t%d\t%d\t%d\t%d\t%dM\t%d\n' % (numNodes, numClients, min, avg, median, max, totalEntries / 1000000, totalBytes / 1000000))
fdata.flush()
time.sleep(5)
log('Tearing down %s test' % testName)
syscall('$ACCUMULO_HOME/bin/accumulo org.apache.accumulo.test.scalability.Run %s teardown %s' % (testName, numNodes))
time.sleep(10)
def syscall(cmd):
log('> %s' % cmd)
os.system(cmd)
def run(cmd, **kwargs):
log.debug("Running %s", ' '.join(cmd))
handle = Popen(cmd, stdout=PIPE, **kwargs)
out, err = handle.communicate()
log.debug("Result %d (%r, %r)", handle.returncode, out, err)
return handle.returncode
def log(msg):
print msg
sys.stdout.flush()
def main():
if not os.getenv('ACCUMULO_HOME'):
raise 'ACCUMULO_HOME needs to be set!'
if not os.getenv('ACCUMULO_CONF_DIR'):
os.environ['ACCUMULO_CONF_DIR'] = os.path.join(os.getenv('ACCUMULO_HOME'), 'conf')
if not os.getenv('HADOOP_HOME'):
raise 'HADOOP_HOME needs to be set!'
if len(sys.argv) != 2:
log('Usage: run.py <testName>')
sys.exit()
testName = sys.argv[1]
logging.basicConfig(level=logging.DEBUG)
log('Creating test directory structure')
testDir = 'test-%d' % time.time()
nodesDir = testDir+'/nodes'
resultsDir = testDir+'/results'
syscall('mkdir %s' % testDir)
syscall('mkdir %s' % nodesDir)
syscall('mkdir %s' % resultsDir)
log('Removing current /accumulo-scale directory')
syscall('hadoop fs -rmr /accumulo-scale')
log('Creating new /accumulo-scale directory structure')
syscall('hadoop fs -mkdir /accumulo-scale')
syscall('hadoop fs -mkdir /accumulo-scale/clients')
syscall('hadoop fs -mkdir /accumulo-scale/results')
syscall('hadoop fs -chmod -R 777 /accumulo-scale')
log('Copying config to HDFS')
syscall('hadoop fs -copyFromLocal ./conf /accumulo-scale/conf')
siteConfig = JavaConfig('conf/site.conf');
slavesPath = siteConfig.get('SLAVES')
maxNodes = file_len(slavesPath)
fdata = open('%s/scale.dat' % testDir, 'w')
fdata.write('Tservs\tClients\tMin\tAvg\tMed\tMax\tEntries\tMB\n')
for numNodes in siteConfig.get('TEST_CASES').split(','):
log('Running %s test with %s nodes' % (testName, numNodes))
if int(numNodes) > maxNodes:
logging.error('Skipping %r test case as slaves file %r contains only %r nodes', numNodes, slavesPath, maxNodes)
continue
runTest(testName, siteConfig, testDir, int(numNodes), fdata)
sys.stdout.flush()
if __name__ == '__main__':
main()
| {
"content_hash": "d5c1b07bbf7ea4ed6dbbe3610ed47472",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 211,
"avg_line_length": 33.65217391304348,
"alnum_prop": 0.615848406546081,
"repo_name": "joshelser/accumulo",
"id": "df8654501ef2529555ebcb591b7a4a58d9265348",
"size": "7768",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/system/scalability/run.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "17503"
},
{
"name": "C++",
"bytes": "1390022"
},
{
"name": "CSS",
"bytes": "8269"
},
{
"name": "Groovy",
"bytes": "1385"
},
{
"name": "Java",
"bytes": "16665425"
},
{
"name": "JavaScript",
"bytes": "249600"
},
{
"name": "Perl",
"bytes": "28118"
},
{
"name": "Python",
"bytes": "724582"
},
{
"name": "Ruby",
"bytes": "209507"
},
{
"name": "Shell",
"bytes": "167654"
},
{
"name": "TeX",
"bytes": "170280"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('sphinxext'))
sys.path.append(os.path.abspath('openravepy'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig','sphinx.ext.todo','sphinx.ext.ifconfig','sphinx.ext.viewcode','sphinx.ext.autosummary','only_directives','gen_gallery','shellblock_directive','exampleblock_directive']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'OpenRAVE'
copyright = u'2006-2012, Rosen Diankov and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
__builtins__['__openravepy_build_doc__'] = True
import openravepy
version = unicode(openravepy.__version__)
# The full version, including alpha/beta/rc tags.
#import pysvn
#entry=pysvn.Client().info('..')
#release = 'r%d'%entry.revision.number
release = ''
# relative to the source directory?
locale_dirs = ['../locale']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# exclude all svn cache
exclude_patterns=['**/.svn']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'#'trac' # sphinx
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['openravepy.','openravepy.databases.','openravepy.examples.','openravepy.interfaces.']
# for internationalization
# docs: http://sphinx.pocoo.org/latest/intl.html
# japanese tutorial: http://d.hatena.ne.jp/tk0miya/20111203
needs_sphinx='1.1'
autodoc_default_flags = ['members','show-inheritance','undoc-members']
autoclass_content = 'both'
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinxdoc'
#html_style='openrave_sphinx.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = u'OpenRAVE ' + version
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
favicon = 'openrave_icon_32.png'
html_favicon = 'openrave_icon_32.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# HTML translator class for the builder
#html_translator_class = "openravedocs.OpenRAVEHTMLTranslator"
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'index':['localtoc.html','mailinglist.html','resources.html','sourcelink.html'],
'**':['localtoc.html','mailinglist.html','resources.html','sourcelink.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {}#'databases':'databases.html' }#,'examples':'examples.html'}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'openravedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('openravepy', 'openrave.tex', u'OpenRAVE Documentation',
u'author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
pngmath_latex_preamble = """
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'sphinx': ('http://sphinx.pocoo.org/', None),
'numpy':('http://docs.scipy.org/doc/numpy',None),
}
# Python's docs don't change every week.
intersphinx_cache_limit = 90 # days
import gettext
try:
if 'SPHINXLANGUAGE' in os.environ:
t = gettext.translation('openravesphinx','locale',languages=[os.environ['SPHINXLANGUAGE']])
else:
t = gettext.translation('openravesphinx','locale')
except IOError:
# no translation file found?
t=gettext.NullTranslations()
# added to the end of every file
rst_epilog = u"""
.. role:: red
"""
| {
"content_hash": "e92fca20de2f586544df8a3c28ac53a0",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 314,
"avg_line_length": 34.3034188034188,
"alnum_prop": 0.7106017191977078,
"repo_name": "jdsika/TUM_HOly",
"id": "7566f69a313285f542b937a20befc90aed9dc047",
"size": "8446",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "openrave/docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "361"
},
{
"name": "C",
"bytes": "111973"
},
{
"name": "C#",
"bytes": "24641"
},
{
"name": "C++",
"bytes": "11966748"
},
{
"name": "CMake",
"bytes": "212392"
},
{
"name": "CSS",
"bytes": "2102"
},
{
"name": "HTML",
"bytes": "16213"
},
{
"name": "Makefile",
"bytes": "41"
},
{
"name": "Matlab",
"bytes": "198171"
},
{
"name": "Modelica",
"bytes": "621"
},
{
"name": "Objective-C",
"bytes": "51576"
},
{
"name": "Python",
"bytes": "10053508"
},
{
"name": "Shell",
"bytes": "11963"
},
{
"name": "XSLT",
"bytes": "471726"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import acads.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Files',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file_name', models.CharField(max_length=100, verbose_name='Topic')),
('files', models.FileField(upload_to=acads.models.get_file_path)),
('verified', models.BooleanField(default=False)),
('uploader_roll_no', models.CharField(max_length=100)),
('uploader_email', models.EmailField(max_length=254)),
('description', models.TextField()),
],
options={
'verbose_name': 'File',
'verbose_name_plural': 'Files',
},
),
migrations.CreateModel(
name='Subject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('department_code', models.CharField(max_length=2)),
('subject_code', models.CharField(max_length=7)),
('subject_name', models.CharField(max_length=100)),
],
),
migrations.AddField(
model_name='files',
name='subject',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='acads.Subject'),
),
]
| {
"content_hash": "d3d2850858ea0ce1f7d22beff963984d",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 114,
"avg_line_length": 35.95652173913044,
"alnum_prop": 0.5532043530834341,
"repo_name": "Vipul999ujawane/acad-search",
"id": "ac7c97cd22d72c06f959d17caa95e9d4772b958d",
"size": "1725",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "acads/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1486"
},
{
"name": "HTML",
"bytes": "8338"
},
{
"name": "JavaScript",
"bytes": "404"
},
{
"name": "Python",
"bytes": "14193"
}
],
"symlink_target": ""
} |
from PIL.Image import Image
class VideoEncoder:
""" Video encoder interface. Implemented to provide a means of encoding frames to a video file. """
def get_options(self):
return None
def open(self, path, width, height):
""" Opens a stream to a file"""
raise NotImplemented
def write_frame(self, bitmap: Image, duration):
""" Writes a frame to the stream for the duration, in seconds """
raise NotImplemented
def finalize(self):
""" Finalize/close the stream """
raise NotImplemented
def set_framerate(self, fps):
raise NotImplemented
def is_ok(self):
""" Returns True if this stream is open and is not in an error state """
raise NotImplemented
| {
"content_hash": "3601a55a61ffa35f414874dd66f3321f",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 103,
"avg_line_length": 24.677419354838708,
"alnum_prop": 0.6339869281045751,
"repo_name": "VISTAS-IVES/pyvistas",
"id": "48c64de650967b1cb23dbb0bbe469b9cc8f806cc",
"size": "765",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/vistas/core/encoders/interface.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "GLSL",
"bytes": "13189"
},
{
"name": "NSIS",
"bytes": "1775"
},
{
"name": "Python",
"bytes": "652431"
}
],
"symlink_target": ""
} |
"""Implements ThreadPoolExecutor."""
__author__ = 'Brian Quinlan ([email protected])'
from concurrent.futures.multithreaded import Future
from concurrent.futures.old.executor import Executor
import atexit
import queue
import threading
import weakref
import logging
logger = logging.getLogger(__package__)
# Workers are created as daemon threads. This is done to allow the interpreter
# to exit when there are still idle threads in a ThreadPoolExecutor's thread
# pool (i.e. shutdown() was not called). However, allowing workers to die with
# the interpreter has two undesirable properties:
# - The workers would still be running during interpretor shutdown,
# meaning that they would fail in unpredictable ways.
# - The workers could be killed while evaluating a work item, which could
# be bad if the callable being evaluated has external side-effects e.g.
# writing to a file.
#
# To work around this problem, an exit handler is installed which tells the
# workers to exit when their work queues are empty and then waits until the
# threads finish.
_threads_queues = weakref.WeakKeyDictionary()
_shutdown = False
def _python_exit():
global _shutdown
_shutdown = True
items = list(_threads_queues.items())
for t, q in items:
q.put(None)
for t, q in items:
t.join()
atexit.register(_python_exit)
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
if self.future.cancelled():
return
try:
result = self.fn(*self.args, **self.kwargs)
except BaseException as e:
self.future.set_exception(e)
else:
self.future.set_result(result)
def _worker(executor_reference, work_queue):
try:
while True:
work_item = work_queue.get(block=True)
if work_item is not None:
work_item.run()
# Delete references to object. See issue16284
del work_item
continue
executor = executor_reference()
# Exit if:
# - The interpreter is shutting down OR
# - The executor that owns the worker has been collected OR
# - The executor that owns the worker has been shutdown.
if _shutdown or executor is None or executor._shutdown:
# Notice other workers
work_queue.put(None)
return
del executor
except BaseException:
logger.critical('Exception in worker', exc_info=True)
class ThreadPoolExecutor(Executor):
def __init__(self, max_workers):
"""Initializes a new ThreadPoolExecutor instance.
Args:
max_workers: The maximum number of threads that can be used to
execute the given calls.
"""
self._max_workers = max_workers
self._work_queue = queue.Queue()
self._threads = set()
self._shutdown = False
self._shutdown_lock = threading.Lock()
def __call__(self, fn, *args, **kwargs):
"""Allows using as callback executor for futures."""
self.submit(fn, *args, **kwargs)
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._shutdown:
raise RuntimeError('cannot schedule new futures after shutdown')
f = Future()
w = _WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
self._adjust_thread_count()
return f
submit.__doc__ = Executor.submit.__doc__
def _adjust_thread_count(self):
# When the executor gets lost, the weakref callback will wake up
# the worker threads.
def weakref_cb(_, q=self._work_queue):
q.put(None)
# TODO(bquinlan): Should avoid creating new threads if there are more
# idle threads than items in the work queue.
if len(self._threads) < self._max_workers:
t = threading.Thread(target=_worker,
args=(weakref.ref(self, weakref_cb),
self._work_queue))
t.daemon = True
t.start()
self._threads.add(t)
_threads_queues[t] = self._work_queue
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown = True
self._work_queue.put(None)
if wait:
for t in self._threads:
t.join()
shutdown.__doc__ = Executor.shutdown.__doc__
| {
"content_hash": "88c265a8b52cb6a6c9406cb23b5bf29d",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 81,
"avg_line_length": 32.034246575342465,
"alnum_prop": 0.5976053025443661,
"repo_name": "mikhtonyuk/rxpython",
"id": "dc494cb2e09d7a763547954973d637b37b43fff4",
"size": "4780",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "concurrent/executors/thread.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "303672"
}
],
"symlink_target": ""
} |
from ec2.base.action import Action
from ec2.error import UndefinedError, MissingParameter
from ec2.helpers.parse import parseFilters
from ec2.helpers.filters import applyEc2Filters
"""@package src.ec2.region
EC2 actions for regions
@copyright Copyright (c) 2012 Institute of Nuclear Physics PAS <http://www.ifj.edu.pl/>
@author Oleksandr Gituliar <[email protected]>
@author Łukasz Chrząszcz <[email protected]>
"""
def getClusterManagers(endpoint, cluster_manager):
try:
base_URL = endpoint.split('.', 1)[1]
except IndexError:
raise UndefinedError
cluster_managers = cluster_manager.cloud_manager.cluster_managers()
return {
'cms': [{
'address': "%s.%s" % (cm.name, base_URL),
'name': cm.name,
} for cm in cluster_managers]
}
class DescribeRegions(Action):
def _execute(self):
try:
filters = parseFilters(self.parameters)
endpoint = self.parameters['Endpoint']
except KeyError:
raise MissingParameter(parameter='Endpoint')
cms = getClusterManagers(endpoint, self.cluster_manager)
cms = applyEc2Filters(cms, filters)
return cms
class DescribeAvailabilityZones(Action):
def _execute(self): # TODO wspieranie filtrów
try:
endpoint = self.parameters['Endpoint']
except KeyError:
raise MissingParameter(parameter='Endpoint')
return getClusterManagers(endpoint, self.cluster_manager)
| {
"content_hash": "5e105ea08b1ed127181f3945503de89a",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 87,
"avg_line_length": 27.436363636363637,
"alnum_prop": 0.6686547382372432,
"repo_name": "cc1-cloud/cc1",
"id": "8ea5a8d3ca42df6a62824c0e24fb39ddc74c0429",
"size": "2209",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ec2/region.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "63829"
},
{
"name": "HTML",
"bytes": "323260"
},
{
"name": "JavaScript",
"bytes": "458924"
},
{
"name": "Python",
"bytes": "1466456"
},
{
"name": "Shell",
"bytes": "14317"
}
],
"symlink_target": ""
} |
import time #imports the time module which allows for a pause inbetween printing
score = 0 #defines the user's score before starting
name = input("What is your name? ") #takes the user's name so it can be used elsewhere
print("Welcome to The Quiz, ",name,"\nWe'll be asking a series of general knowledge questions, \nplease answer all questions in lower case.") #introduces the user to the quiz on two seperate lines
time.sleep(1) #stops the program for one second
print("Question 1:\nWhich country features a maple leaf on its flag? ")
time.sleep(1)
print("A: Japan")
time.sleep(1)
print("B: Canada")
time.sleep(1)
print("C: France")
time.sleep(1)
answerOne = input("Make your choice: ")
if answerOne == "canada" or answerOne == "b":
print("Well done! You scored a point! ")
time.sleep(1)
score = score + 1
else:
print("Unlucky! You didnt score ")
time.sleep(1)
answerTwo = input("Question 2:\nWhich planet did Superman come from?")
time.sleep(1)
if answerTwo == "krypton":
print("Well done! You scored a point! ")
time.sleep(1)
score = score + 1
else:
print("Unlucky! You didnt score")
time.sleep(1)
answerThree = input("Question 3:How many syllables make up a haiku? ")
time.sleep(1)
if answerThree == "five" or answerThree == "5":
print("Well done! You scored a point! ")
time.sleep(1)
score = score + 1
else:
print("Unlucky! You didnt score")
time.sleep(1)
answerFour = input("Question4:\nWhen was IRN BRU first produced? ")
time.sleep(1)
if answerFour == "1901":
print("Well done! You scored a point!")
time.sleep(1)
score = score + 1
else:
print("Unlucky! You didnt score")
time.sleep(1)
answerFive = input("Question 5:\nWhat is the Capital of Turkey? ")
time.sleep(1)
if answerFour == "ankara":
print("Correct! You scored a point!")
time.sleep(1)
score = score + 1
else:
print("Unlucky! You didnt score")
time.sleep(1)
print("Question6:\nWho created facebook? ")
time.sleep(1)
print("A: Mark Zuckerberg")
time.sleep(1)
print("B: Steve Jobs")
time.sleep(1)
print("C: Bill Gates")
time.sleep(1)
answerSix = input("Make your choice: ")
if answerSix == "A" or answerSix == "mark zuckerberg" :
print ("Well done! You scored a point! ")
time.sleep(1)
score = score + 1
else:
print("Unlucky! You didnt score")
time.sleep(1)
print("Thats all the questions! your total score was...,")
time.sleep(2)
print(score,"!!!\nAMAZING!")
| {
"content_hash": "91850bc781a6a61fb2abd7a77ba94729",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 199,
"avg_line_length": 29.127906976744185,
"alnum_prop": 0.6622754491017964,
"repo_name": "AlexEaton1105/Computer-Science",
"id": "c9205c3cb4721ff4301c48ce35148c32323f1e16",
"size": "2604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "General Knowledge quiz.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8816"
}
],
"symlink_target": ""
} |
from imports import *
def get_user_id(nick):
"""Get userid by nickname
Args:
nick: the nickname of the user
Returns:
the user id of the nickname
"""
user_id = db_session.query(User.user_id).filter_by(nick=nick).first()
return user_id[0]
def getNick():
"""Get nickname
Returns:
The nickname of current User
"""
nick = None
if 'user_id' in session:
result = db_session.query(User.nick).filter_by(
user_id=int(session['user_id'])).first()
nick = result[0]
return nick
def get_avatar():
"""Get avatar
Returns:
THe avatar of current user
"""
nick = getNick()
return db_session.query(User.photo).filter_by(nick=nick).first()[0] if nick is not None else None
# if nick is None:
# return None
# avatar = db_session.query(User.photo).filter_by(nick=nick).first()
# return avatar[0]
def get_role(user_id):
"""Get role by id
Args:
user_id: the id of the user
Returns:
The role of the user
"""
result = db_session.query(User.role).filter_by(user_id=user_id).first()
return result[0]
def get_user_by_nick(nick):
"""Get user by nickname
Args:
nick: the nickname of the user
Returns:
The user object
"""
result = db_session.query(User).filter_by(nick=nick).first()
return result
def examine_user_id(user_id):
"""Examine if user of user_id exists
Args:
user_id: the id of the user
Returns:
Whether user exists
"""
result = db_session.query(User).filter_by(user_id=user_id).all()
if len(result) > 0:
return True
else:
return False
| {
"content_hash": "707a32635b86384d3b0e47353637b411",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 101,
"avg_line_length": 19.22222222222222,
"alnum_prop": 0.5878612716763005,
"repo_name": "NewBeeStudio/xichao-new",
"id": "14a351818c70ce250f8d93b0c66017e3002c919d",
"size": "1754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xichao/packages/function/userinfo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "304421"
},
{
"name": "HTML",
"bytes": "614986"
},
{
"name": "JavaScript",
"bytes": "1455427"
},
{
"name": "PHP",
"bytes": "18819"
},
{
"name": "Python",
"bytes": "222994"
}
],
"symlink_target": ""
} |
"""Add room principals table
Revision ID: cbe630695800
Revises: 252c0015c9a0
Create Date: 2018-12-13 11:10:12.684382
"""
from __future__ import print_function
import json
import sqlalchemy as sa
from alembic import context, op
from sqlalchemy.dialects import postgresql
from indico.core.auth import multipass
from indico.core.db.sqlalchemy import PyIntEnum
from indico.core.db.sqlalchemy.principals import PrincipalType
from indico.core.db.sqlalchemy.protection import ProtectionMode
# revision identifiers, used by Alembic.
revision = 'cbe630695800'
down_revision = '252c0015c9a0'
branch_labels = None
depends_on = None
def _create_acl_entry(conn, room_id, user_id=None, group_id=None, mp_group_provider=None, mp_group_name=None,
full_access=False, permissions=()):
permissions = list(permissions)
if user_id is not None:
conn.execute('''
INSERT INTO roombooking.room_principals
(room_id, user_id, type, read_access, full_access, permissions) VALUES
(%s, %s, %s, false, %s, %s)
''', (room_id, user_id, PrincipalType.user.value, full_access, permissions))
elif group_id is not None:
conn.execute('''
INSERT INTO roombooking.room_principals
(room_id, local_group_id, type, read_access, full_access, permissions) VALUES
(%s, %s, %s, false, %s, %s)
''', (room_id, group_id, PrincipalType.local_group.value, full_access, permissions))
else:
conn.execute('''
INSERT INTO roombooking.room_principals
(room_id, mp_group_provider, mp_group_name, type, read_access, full_access, permissions) VALUES
(%s, %s, %s, %s, false, %s, %s)
''', (room_id, mp_group_provider, mp_group_name, PrincipalType.multipass_group.value, full_access, permissions))
def _get_attr_value(conn, room_id, attr_id):
query = 'SELECT value FROM roombooking.room_attribute_values WHERE room_id = %s AND attribute_id = %s'
return conn.execute(query, (room_id, attr_id)).scalar()
def _group_to_kwargs(group):
if multipass.default_group_provider:
return {'mp_group_provider': multipass.default_group_provider.name, 'mp_group_name': group}
else:
try:
return {'local_group_id': int(group)}
except ValueError:
# non-numeric group id
return None
def _upgrade_permissions():
conn = op.get_bind()
booking_group_attr_id = conn.execute('SELECT id FROM roombooking.room_attributes WHERE name = %s',
('allowed-booking-group',)).scalar()
manager_group_attr_id = conn.execute('SELECT id FROM roombooking.room_attributes WHERE name = %s',
('manager-group',)).scalar()
query = 'SELECT id, owner_id, reservations_need_confirmation, is_reservable FROM roombooking.rooms'
for room_id, owner_id, reservations_need_confirmation, is_reservable in conn.execute(query):
booking_group = manager_group = None
if booking_group_attr_id is not None:
booking_group = _get_attr_value(conn, room_id, booking_group_attr_id)
if manager_group_attr_id is not None:
manager_group = _get_attr_value(conn, room_id, manager_group_attr_id)
if not booking_group and is_reservable:
conn.execute('UPDATE roombooking.rooms SET protection_mode = %s WHERE id = %s',
(ProtectionMode.public.value, room_id))
if booking_group:
group_kwargs = _group_to_kwargs(booking_group)
if group_kwargs is None:
print('WARNING: Invalid booking group: {}'.format(booking_group))
else:
permission = 'prebook' if reservations_need_confirmation else 'book'
_create_acl_entry(conn, room_id, permissions={permission}, **group_kwargs)
if manager_group:
group_kwargs = _group_to_kwargs(manager_group)
if group_kwargs is None:
print('WARNING: Invalid manager group: {}'.format(manager_group))
else:
_create_acl_entry(conn, room_id, full_access=True, **group_kwargs)
# is_reservable==false used allow the room owner to book the room, which
# isn't the case anymore, so we mark all rooms as reservable.
# above we already kept non-reservable rooms as protected
conn.execute('UPDATE roombooking.rooms SET is_reservable = true')
def _create_attribute(conn, name, title):
res = conn.execute('''
INSERT INTO roombooking.room_attributes (name, title, is_hidden)
VALUES (%s, %s, false)
RETURNING id
''', (name, title))
return res.fetchone()[0]
def _set_attribute_value(conn, room_id, attribute_id, value):
res = conn.execute('SELECT value FROM roombooking.room_attribute_values WHERE room_id = %s AND attribute_id = %s',
(room_id, attribute_id))
if not res.rowcount:
conn.execute('''
INSERT INTO roombooking.room_attribute_values
(room_id, attribute_id, value) VALUES
(%s, %s, %s)
''', (room_id, attribute_id, json.dumps(value)))
elif res.scalar() != value:
conn.execute('''
UPDATE roombooking.room_attribute_values
SET value = %s
WHERE room_id = %s AND attribute_id = %s
''', (json.dumps(value), room_id, attribute_id))
def _downgrade_permissions():
conn = op.get_bind()
booking_group_attr_id = conn.execute('SELECT id FROM roombooking.room_attributes WHERE name = %s',
('allowed-booking-group',)).scalar()
manager_group_attr_id = conn.execute('SELECT id FROM roombooking.room_attributes WHERE name = %s',
('manager-group',)).scalar()
if booking_group_attr_id is None:
booking_group_attr_id = _create_attribute(conn, 'allowed-booking-group', 'Allowed Booking Group')
if manager_group_attr_id is None:
manager_group_attr_id = _create_attribute(conn, 'manager-group', 'Manager Group')
query = 'SELECT id, owner_id, protection_mode FROM roombooking.rooms'
for room_id, owner_id, protection_mode in conn.execute(query):
res = conn.execute('SELECT * FROM roombooking.room_principals WHERE room_id = %s', (room_id,))
if not res.rowcount and protection_mode == ProtectionMode.protected:
conn.execute('UPDATE roombooking.rooms SET is_reservable = false WHERE id = %s', (room_id,))
for row in res:
if row.type == PrincipalType.user and row.user_id == owner_id:
continue
if row.type == PrincipalType.local_group and not multipass.default_group_provider:
if row.full_access:
_set_attribute_value(conn, room_id, manager_group_attr_id, unicode(row.local_group_id))
if 'book' in row.permissions or 'prebook' in row.permissions:
_set_attribute_value(conn, room_id, booking_group_attr_id, unicode(row.local_group_id))
elif (row.type == PrincipalType.multipass_group and multipass.default_group_provider and
row.mp_group_provider == multipass.default_group_provider.name):
if row.full_access:
_set_attribute_value(conn, room_id, manager_group_attr_id, row.mp_group_name)
if 'book' in row.permissions or 'prebook' in row.permissions:
_set_attribute_value(conn, room_id, booking_group_attr_id, row.mp_group_name)
def upgrade():
if context.is_offline_mode():
raise Exception('This upgrade is only possible in online mode')
op.create_table(
'room_principals',
sa.Column('read_access', sa.Boolean(), nullable=False),
sa.Column('full_access', sa.Boolean(), nullable=False),
sa.Column('permissions', postgresql.ARRAY(sa.String()), nullable=False),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('room_id', sa.Integer(), nullable=False, index=True),
sa.Column('local_group_id', sa.Integer(), nullable=True, index=True),
sa.Column('mp_group_provider', sa.String(), nullable=True),
sa.Column('mp_group_name', sa.String(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True, index=True),
sa.Column('type', PyIntEnum(PrincipalType, exclude_values={PrincipalType.email, PrincipalType.network,
PrincipalType.event_role}), nullable=False),
sa.CheckConstraint('NOT read_access', name='no_read_access'),
sa.CheckConstraint('read_access OR full_access OR array_length(permissions, 1) IS NOT NULL', name='has_privs'),
sa.CheckConstraint('type != 1 OR (local_group_id IS NULL AND mp_group_name IS NULL AND '
'mp_group_provider IS NULL AND user_id IS NOT NULL)', name='valid_user'),
sa.CheckConstraint('type != 2 OR (mp_group_name IS NULL AND mp_group_provider IS NULL AND user_id IS NULL AND '
'local_group_id IS NOT NULL)', name='valid_local_group'),
sa.CheckConstraint('type != 3 OR (local_group_id IS NULL AND user_id IS NULL AND mp_group_name IS NOT NULL AND '
'mp_group_provider IS NOT NULL)', name='valid_multipass_group'),
sa.ForeignKeyConstraint(['local_group_id'], ['users.groups.id']),
sa.ForeignKeyConstraint(['room_id'], ['roombooking.rooms.id']),
sa.ForeignKeyConstraint(['user_id'], ['users.users.id']),
sa.PrimaryKeyConstraint('id'),
schema='roombooking'
)
op.create_index(None, 'room_principals', ['mp_group_provider', 'mp_group_name'], schema='roombooking')
op.create_index('ix_uq_room_principals_user', 'room_principals', ['user_id', 'room_id'], unique=True,
schema='roombooking', postgresql_where=sa.text('type = 1'))
op.create_index('ix_uq_room_principals_local_group', 'room_principals', ['local_group_id', 'room_id'], unique=True,
schema='roombooking', postgresql_where=sa.text('type = 2'))
op.create_index('ix_uq_room_principals_mp_group', 'room_principals',
['mp_group_provider', 'mp_group_name', 'room_id'], unique=True, schema='roombooking',
postgresql_where=sa.text('type = 3'))
op.add_column('rooms', sa.Column('protection_mode',
PyIntEnum(ProtectionMode, exclude_values={ProtectionMode.inheriting}),
nullable=False, server_default=unicode(ProtectionMode.protected.value)),
schema='roombooking')
_upgrade_permissions()
op.alter_column('rooms', 'protection_mode', server_default=None, schema='roombooking')
def downgrade():
if context.is_offline_mode():
raise Exception('This downgrade is only possible in online mode')
_downgrade_permissions()
op.drop_column('rooms', 'protection_mode', schema='roombooking')
op.drop_table('room_principals', schema='roombooking')
| {
"content_hash": "165419879890496c21faa3cfa3a14e36",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 120,
"avg_line_length": 52.61214953271028,
"alnum_prop": 0.6203037569944044,
"repo_name": "mvidalgarcia/indico",
"id": "7addcba8575254c522730836f989807af7a39efa",
"size": "11259",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "indico/migrations/versions/20181213_1110_cbe630695800_add_room_principals_table.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "538590"
},
{
"name": "HTML",
"bytes": "1345380"
},
{
"name": "JavaScript",
"bytes": "1781971"
},
{
"name": "Mako",
"bytes": "1340"
},
{
"name": "Python",
"bytes": "4381847"
},
{
"name": "Shell",
"bytes": "3568"
},
{
"name": "TeX",
"bytes": "22182"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
} |
from kubernetes import simplejson
class TypeMeta(object):
'''A Class representing the TypeMeta structure used by the kubernetes API
TypeMeta is shared by all objects sent to, or returned from the client.
The TypeMeta structure exposes the following properties:
TypeMeta.Kind
TypeMeta.ID
TypeMeta.UID
TypeMeta.CreationTimestamp
TypeMeta.SelfLink
TypeMeta.ResourceVersion
TypeMeta.APIVersion
TypeMeta.Namespace
TypeMeta.Annotations
'''
def __init__(self, **kwargs):
'''An object to hold a Kubernetes TypeMeta.
Args:
Kind:
ID:
UID:
CreationTimestamp:
SelfLink:
ResourceVersion:
APIVersion:
Namespace:
Annotations:
Annotations are unstructured key value data stored with a resource that may be set by
external tooling. They are not queryable and should be preserved when modifying
objects.
'''
param_defaults = {
'Kind': None,
'ID': None,
'UID': None,
'CreationTimestamp': None,
'SelfLink': None,
'ResourceVersion': None,
'APIVersion': None,
'Namespace': None,
'Annotations': None}
for (param, default) in param_defaults.iteritems():
setattr(self, param, kwargs.get(param, default))
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.Kind == other.Kind and \
self.ID == other.ID and \
self.UID == other.UID and \
self.CreationTimestamp == other.CreationTimestamp and \
self.SelfLink == other.SelfLink and \
self.ResourceVersion == other.ResourceVersion and \
self.APIVersion == other.APIVersion and \
self.Namespace == other.Namespace and \
self.Annotations == other.Annotations
except AttributeError:
return False
def __str__(self):
'''A string representation of this kubernetes.TypeMeta instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this kubernetes.TypeMeta instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this kubernetes.TypeMeta instance.
Returns:
A JSON string representation of this kubernetes.TypeMeta instance.
'''
return simplejson.dumps(self.AsDict(), sort_keys=True)
def AsDict(self):
''' A dic representation of this kubernetes.TypeMeta instance.
The return values uses the same key names as the JSON representation.
Returns:
A dict representing this kubernetes.TypeMeta instance
'''
data = {}
if self.Kind:
data['kind'] = self.Kind
if self.ID:
data['id'] = self.ID
if self.UID:
data['uid'] = self.UID
if self.CreationTimestamp:
data['creationTimestamp'] = self.CreationTimestamp
if self.SelfLink:
data['selfLink'] = self.SelfLink
if self.ResourceVersion:
data['resourceVersion'] = self.ResourceVersion
if self.APIVersion:
data['apiVersion'] = self.APIVersion
if self.Namespace:
data['namespace'] = self.Namespace
if self.Annotations:
data['annotations'] = self.Annotations
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance base on a JSON dict
Args:
data: A JSON dict, as converted from the JSON in the kubernetes API
Returns:
A kubernetes.TypeMeta instance
'''
return TypeMeta(kind=data.get('kind', None),
id=data.get('id', None),
uid=data.get('uid', None),
creationTimestamp=data.get('creationTimestamp', None),
selfLink=data.get('selfLink', None),
resourceVersion=data.get('resourceVersion', None),
apiVersion=data.get('apiVersion', None),
namespace=data.get('namespace', None),
annotations=data.get('annotations', None))
class PodStatus(object):
"""PodStatus represents a status of a pod.
"""
'''PodWaiting means that we're waiting for the pod to begin running.
'''
PodWaiting = "Waiting"
'''PodRunning means that the pod is up and running.
'''
PodRunning = "Running"
'''PodTerminated means that the pod has stopped.
'''
PodTerminated = "Terminated"
class ContainerStateWaiting(object):
'''A Class representing the ContainerStateWaiting structure used by the kubernetes API
The ContainerStateWaiting structure exposes the following properties:
ContainerStateWaiting.Reason
'''
def __init__(self, **kwargs):
'''An object to hold a Kubernetes ContainerStateWaiting.
Args:
reason:
Reason could be pulling image,
'''
param_defaults = {
'Reason': None}
for (param, default) in param_defaults.iteritems():
setattr(self, param, kwargs.get(param, default))
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.reason == other.reason
except AttributeError:
return False
def __str__(self):
'''A string representation of this kubernetes.ContainerStateWaiting instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this kubernetes.ContainerStateWaiting instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this kubernetes.ContainerStateWaiting instance.
Returns:
A JSON string representation of this kubernetes.ContainerStateWaiting instance.
'''
return simplejson.dumps(self.AsDict(), sort_keys=True)
def AsDict(self):
''' A dic representation of this kubernetes.ContainerStateWaiting instance.
The return values uses the same key names as the JSON representation.
Returns:
A dict representing this kubernetes.ContainerStateWaiting instance
'''
data = {}
if self.Reason:
data['reason'] = self.Reason
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance base on a JSON dict
Args:
data: A JSON dict, as converted from the JSON in the kubernetes API
Returns:
A kubernetes.ContainerStateWaiting instance
'''
return ContainerStateWaiting(Reason=data.get('reason', None))
class ContainerStateRunning(object):
'''A Class representing the ContainerStateRunning structure used by the kubernetes API
The ContainerStateRunning structure exposes the following properties:
ContainerStateRunning.StartedAt
'''
def __init__(self, **kwargs):
'''An object to hold a Kubernetes ContainerStateRunning.
Args:
StartedAt:
'''
param_defaults = {
'StartedAt': None}
for (param, default) in param_defaults.iteritems():
setattr(self, param, kwargs.get(param, default))
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.StartedAt == other.StartedAt
except AttributeError:
return False
def __str__(self):
'''A string representation of this kubernetes.ContainerStateRunning instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this kubernetes.ContainerStateRunning instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this kubernetes.ContainerStateRunning instance.
Returns:
A JSON string representation of this kubernetes.ContainerStateRunning instance.
'''
return simplejson.dumps(self.AsDict(), sort_keys=True)
def AsDict(self):
''' A dic representation of this kubernetes.ContainerStateRunning instance.
The return values uses the same key names as the JSON representation.
Returns:
A dict representing this kubernetes.ContainerStateRunning instance
'''
data = {}
if self.StartedAt:
data['startedAt'] = self.StartedAt
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance base on a JSON dict
Args:
data: A JSON dict, as converted from the JSON in the kubernetes API
Returns:
A kubernetes.ContainerStateRunning instance
'''
return ContainerStateRunning(StartedAt=data.get('startedAt', None))
class ContainerStateTerminated(object):
'''A Class representing the ContainerStateTerminated structure used by the kubernetes API
ContainerStateTerminated is shared by all objects sent to, or returned from the client.
The ContainerStateTerminated structure exposes the following properties:
ContainerStateTerminated.ExitCode
ContainerStateTerminated.Signal
ContainerStateTerminated.Reason
ContainerStateTerminated.StartedAt
ContainerStateTerminated.FinishedAt
'''
def __init__(self, **kwargs):
'''An object to hold a Kubernetes ContainerStateTerminated.
Args:
ExitCode:
Signal:
Reason:
StartedAt:
FinishedAt:
'''
param_defaults = {
'ExitCode': None,
'Signal': None,
'Reason': None,
'StartedAt': None,
'FinishedAt': None}
for (param, default) in param_defaults.iteritems():
setattr(self, param, kwargs.get(param, default))
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.ExitCode == other.ExitCode and \
self.Signal == other.Signal and \
self.Reason == other.Reason and \
self.StartedAt == other.StartedAt and \
self.FinishedAt == other.FinishedAt
except AttributeError:
return False
def __str__(self):
'''A string representation of this kubernetes.ContainerStateTerminated instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this kubernetes.ContainerStateTerminated instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this kubernetes.ContainerStateTerminated instance.
Returns:
A JSON string representation of this kubernetes.ContainerStateTerminated instance.
'''
return simplejson.dumps(self.AsDict(), sort_keys=True)
def AsDict(self):
''' A dic representation of this kubernetes.ContainerStateTerminated instance.
The return values uses the same key names as the JSON representation.
Returns:
A dict representing this kubernetes.ContainerStateTerminated instance
'''
data = {}
if self.ExitCode:
data['exitCode'] = self.ExitCode
if self.Signal:
data['signal'] = self.Signal
if self.Reason:
data['reason'] = self.Reason
if self.StartedAt:
data['startedAt'] = self.StartedAt
if self.FinishedAt:
data['finishedAt'] = self.FinishedAt
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance base on a JSON dict
Args:
data: A JSON dict, as converted from the JSON in the kubernetes API
Returns:
A kubernetes.ContainerStateTerminated instance
'''
return ContainerStateTerminated(ExitCode=data.get('exitCode', None),
Signal=data.get('signal', None),
Reason=data.get('reason', None),
StartedAt=data.get('startedAt', None),
FinishedAt=data.get('finishedAt', None))
class ContainerState(object):
"""A Class representing the ContainerState structure used by the kubernetes API
The ContainerState structure exposes the following properties:
ContainerState.Waiting
ContainerState.Running
ContainerState.Termination
"""
def __init__(self, **kwargs):
'''An object to hold a Kubernete ContainerState.
Arg:
Only one of the following ContainerState may be specified.
If none of them is specified, the default one is ContainerStateWaiting.
Waiting:
Running:
Termination:
'''
param_defaults = {
'Waiting': None,
'Running': None,
'Termination': None}
for (param, default) in param_defaults.iteritems():
setattr(self, param, kwargs.get(param, default))
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.Waiting == other.Waiting and \
self.Running == other.Running and \
self.Termination == other.Termination
except AttributeError:
return False
def __str__(self):
'''A string representation of this Kubernetes.ContainerState instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this kubernetes.ContainerState instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this kubernetes.ContainerState instance.
Returns:
A JSON string representation of this kubernetes.ContainerState instance.
'''
return simplejson.dumps(self.AsDict(), sort_keys=True)
def AsDict(self):
''' A dic representation of this kubernetes.ContainerState instance.
The return values uses the same key names as the JSON representation.
Returns:
A dict representing this kubernetes.ContainerState instance
'''
data = {}
if self.Waiting:
data['waiting'] = self.Waiting.AsDict()
if self.Running:
data['running'] = self.Running.AsDict()
if self.Termination:
data['termination'] = self.Termination.AsDict()
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance base on a JSON dict
Args:
data: A JSON dict, as converted from the JSON in the kubernetes API
Returns:
A kubernetes.ContainerState instance
'''
waiting = None
running = None
termination = None
if 'waiting' in data:
from kubernetes import ContainerStateWaiting
waiting = ContainerStateWaiting.NewFromJsonDict(data['waiting'])
if 'running' in data:
from kubernetes import ContainerStateRunning
running = ContainerStateRunning.NewFromJsonDict(data['running'])
if 'termination' in data:
from kubernetes import ContainerStateTerminated
termination = ContainerStateTerminated.NewFromJsonDict(data['termination'])
return ContainerState(
Waiting=waiting,
Running=running,
Termination=termination)
class ContainerStatus(object):
"""A Class representing the ContainerStatus structure used by the kubernetes API
The ContainerStatus structure exposes the following properties:
ContainerStatus.State
ContainerStatus.RestartCount
ContainerStatus.PodIP
ContainerStatus.Image
"""
def __init__(self, **kwargs):
'''An object to hold a Kubernete ContainerStatus.
Arg:
State:
TODO(dchen1107): Should we rename PodStatus to a more generic name or have a separate states
defined for container?
RestartCount:
PodIP:
TODO(dchen1107): Deprecated this soon once we pull entire PodStatus from node,
not just PodInfo. Now we need this to remove docker.Container from API
Image:
TODO(dchen1107): Once we have done with integration with cadvisor, resource
usage should be included.
'''
param_defaults = {
'State': None,
'RestartCount': None,
'PodIP': None,
'Image': None}
for (param, default) in param_defaults.iteritems():
setattr(self, param, kwargs.get(param, default))
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.State == other.State and \
self.RestartCount == other.RestartCount and \
self.PodIP == other.PodIP and \
self.Image == other.Image
except AttributeError:
return False
def __str__(self):
'''A string representation of this Kubernetes.ContainerStatus instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this kubernetes.ContainerStatus instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this kubernetes.ContainerStatus instance.
Returns:
A JSON string representation of this kubernetes.ContainerStatus instance.
'''
return simplejson.dumps(self.AsDict(), sort_keys=True)
def AsDict(self):
''' A dic representation of this kubernetes.ContainerStatus instance.
The return values uses the same key names as the JSON representation.
Returns:
A dict representing this kubernetes.ContainerStatus instance
'''
data = {}
if self.State:
data['state'] = self.State.AsDict()
if self.RestartCount:
data['restartCount'] = self.RestartCount
if self.PodIP:
data['podIP'] = self.PodIP
if self.Image:
data['image'] = self.Image
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance base on a JSON dict
Args:
data: A JSON dict, as converted from the JSON in the kubernetes API
Returns:
A kubernetes.ContainerStatus instance
'''
state = None
if 'state' in data:
from kubernetes import ContainerState
state = ContainerState.NewFromJsonDict(data['state'])
return ContainerStatus(
State=state,
RestartCount=data.get('restartCount', None),
PodIP=data.get('podIP', None),
Image=data.get('image', None))
| {
"content_hash": "7c6dea1c26f8ec1e1e42b6b166b206ee",
"timestamp": "",
"source": "github",
"line_count": 626,
"max_line_length": 96,
"avg_line_length": 26.517571884984026,
"alnum_prop": 0.7137349397590361,
"repo_name": "pkilambi/python-kubernetes",
"id": "f564725317c323eb85dfeae37fe0cf68a9d916d6",
"size": "17198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/container_status.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "177876"
}
],
"symlink_target": ""
} |
from .partition_safety_check import PartitionSafetyCheck
class WaitForPrimaryPlacementSafetyCheck(PartitionSafetyCheck):
"""Safety check that waits for the primary replica that was moved out of the
node due to upgrade to be placed back again on that node.
:param Kind: Polymorphic Discriminator
:type Kind: str
:param partition_id: Id of the partition which is undergoing the safety
check.
:type partition_id: str
"""
_validation = {
'Kind': {'required': True},
}
def __init__(self, partition_id=None):
super(WaitForPrimaryPlacementSafetyCheck, self).__init__(partition_id=partition_id)
self.Kind = 'WaitForPrimaryPlacement'
| {
"content_hash": "f85ce8b454030bb11e3ec097f2e810fe",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 91,
"avg_line_length": 33.333333333333336,
"alnum_prop": 0.7014285714285714,
"repo_name": "v-iam/azure-sdk-for-python",
"id": "216fc1af569b4e99c8f8a494841f2f413a0d56ae",
"size": "1174",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-servicefabric/azure/servicefabric/models/wait_for_primary_placement_safety_check.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19856874"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import unittest
import imp
import operator
import sys
is_pypy = '__pypy__' in sys.builtin_module_names
import wrapt
from compat import PY2, PY3, exec_
OBJECTS_CODE = """
class TargetBaseClass(object):
"documentation"
class Target(TargetBaseClass):
"documentation"
def target():
"documentation"
pass
"""
objects = imp.new_module('objects')
exec_(OBJECTS_CODE, objects.__dict__, objects.__dict__)
class TestAttributeAccess(unittest.TestCase):
def test_init_not_called(self):
a = wrapt.ObjectProxy.__new__(wrapt.ObjectProxy)
b = wrapt.ObjectProxy.__new__(wrapt.ObjectProxy)
try:
a.__wrapped__
except ValueError:
pass
try:
a + b
except ValueError:
pass
def test_attributes(self):
def function1(*args, **kwargs):
return args, kwargs
function2 = wrapt.ObjectProxy(function1)
self.assertEqual(function2.__wrapped__, function1)
def test_get_wrapped(self):
def function1(*args, **kwargs):
return args, kwargs
function2 = wrapt.ObjectProxy(function1)
self.assertEqual(function2.__wrapped__, function1)
function3 = wrapt.ObjectProxy(function2)
self.assertEqual(function3.__wrapped__, function1)
def test_set_wrapped(self):
def function1(*args, **kwargs):
return args, kwargs
function2 = wrapt.ObjectProxy(function1)
self.assertEqual(function2, function1)
self.assertEqual(function2.__wrapped__, function1)
self.assertEqual(function2.__name__, function1.__name__)
if PY3:
self.assertEqual(function2.__qualname__, function1.__qualname__)
function2.__wrapped__ = None
self.assertFalse(hasattr(function1, '__wrapped__'))
self.assertEqual(function2, None)
self.assertEqual(function2.__wrapped__, None)
self.assertFalse(hasattr(function2, '__name__'))
if PY3:
self.assertFalse(hasattr(function2, '__qualname__'))
def function3(*args, **kwargs):
return args, kwargs
function2.__wrapped__ = function3
self.assertEqual(function2, function3)
self.assertEqual(function2.__wrapped__, function3)
self.assertEqual(function2.__name__, function3.__name__)
if PY3:
self.assertEqual(function2.__qualname__, function3.__qualname__)
def test_delete_wrapped(self):
def function1(*args, **kwargs):
return args, kwargs
function2 = wrapt.ObjectProxy(function1)
def run(*args):
del function2.__wrapped__
self.assertRaises(TypeError, run, ())
def test_proxy_attribute(self):
def function1(*args, **kwargs):
return args, kwargs
function2 = wrapt.ObjectProxy(function1)
function2._self_variable = True
self.assertFalse(hasattr(function1, '_self_variable'))
self.assertTrue(hasattr(function2, '_self_variable'))
self.assertEqual(function2._self_variable, True)
del function2._self_variable
self.assertFalse(hasattr(function1, '_self_variable'))
self.assertFalse(hasattr(function2, '_self_variable'))
self.assertEqual(getattr(function2, '_self_variable', None), None)
def test_wrapped_attribute(self):
def function1(*args, **kwargs):
return args, kwargs
function2 = wrapt.ObjectProxy(function1)
function2.variable = True
self.assertTrue(hasattr(function1, 'variable'))
self.assertTrue(hasattr(function2, 'variable'))
self.assertEqual(function2.variable, True)
del function2.variable
self.assertFalse(hasattr(function1, 'variable'))
self.assertFalse(hasattr(function2, 'variable'))
self.assertEqual(getattr(function2, 'variable', None), None)
class TestNamingObjectProxy(unittest.TestCase):
def test_class_object_name(self):
# Test preservation of class __name__ attribute.
target = objects.Target
wrapper = wrapt.ObjectProxy(target)
self.assertEqual(wrapper.__name__, target.__name__)
def test_class_object_qualname(self):
# Test preservation of class __qualname__ attribute.
target = objects.Target
wrapper = wrapt.ObjectProxy(target)
try:
__qualname__ = target.__qualname__
except AttributeError:
pass
else:
self.assertEqual(wrapper.__qualname__, __qualname__)
def test_class_module_name(self):
# Test preservation of class __module__ attribute.
target = objects.Target
wrapper = wrapt.ObjectProxy(target)
self.assertEqual(wrapper.__module__, target.__module__)
def test_class_doc_string(self):
# Test preservation of class __doc__ attribute.
target = objects.Target
wrapper = wrapt.ObjectProxy(target)
self.assertEqual(wrapper.__doc__, target.__doc__)
def test_instance_module_name(self):
# Test preservation of instance __module__ attribute.
target = objects.Target()
wrapper = wrapt.ObjectProxy(target)
self.assertEqual(wrapper.__module__, target.__module__)
def test_instance_doc_string(self):
# Test preservation of instance __doc__ attribute.
target = objects.Target()
wrapper = wrapt.ObjectProxy(target)
self.assertEqual(wrapper.__doc__, target.__doc__)
def test_function_object_name(self):
# Test preservation of function __name__ attribute.
target = objects.target
wrapper = wrapt.ObjectProxy(target)
self.assertEqual(wrapper.__name__, target.__name__)
def test_function_object_qualname(self):
# Test preservation of function __qualname__ attribute.
target = objects.target
wrapper = wrapt.ObjectProxy(target)
try:
__qualname__ = target.__qualname__
except AttributeError:
pass
else:
self.assertEqual(wrapper.__qualname__, __qualname__)
def test_function_module_name(self):
# Test preservation of function __module__ attribute.
target = objects.target
wrapper = wrapt.ObjectProxy(target)
self.assertEqual(wrapper.__module__, target.__module__)
def test_function_doc_string(self):
# Test preservation of function __doc__ attribute.
target = objects.target
wrapper = wrapt.ObjectProxy(target)
self.assertEqual(wrapper.__doc__, target.__doc__)
class TestTypeObjectProxy(unittest.TestCase):
def test_class_of_class(self):
# Test preservation of class __class__ attribute.
target = objects.Target
wrapper = wrapt.ObjectProxy(target)
self.assertEqual(wrapper.__class__, target.__class__)
self.assertTrue(isinstance(wrapper, type(target)))
def test_class_of_instance(self):
# Test preservation of instance __class__ attribute.
target = objects.Target()
wrapper = wrapt.ObjectProxy(target)
self.assertEqual(wrapper.__class__, target.__class__)
self.assertTrue(isinstance(wrapper, objects.Target))
self.assertTrue(isinstance(wrapper, objects.TargetBaseClass))
def test_class_of_function(self):
# Test preservation of function __class__ attribute.
target = objects.target
wrapper = wrapt.ObjectProxy(target)
self.assertEqual(wrapper.__class__, target.__class__)
self.assertTrue(isinstance(wrapper, type(target)))
class TestDirObjectProxy(unittest.TestCase):
def test_dir_of_class(self):
# Test preservation of class __dir__ attribute.
target = objects.Target
wrapper = wrapt.ObjectProxy(target)
self.assertEqual(dir(wrapper), dir(target))
def test_vars_of_class(self):
# Test preservation of class __dir__ attribute.
target = objects.Target
wrapper = wrapt.ObjectProxy(target)
self.assertEqual(vars(wrapper), vars(target))
def test_dir_of_instance(self):
# Test preservation of instance __dir__ attribute.
target = objects.Target()
wrapper = wrapt.ObjectProxy(target)
self.assertEqual(dir(wrapper), dir(target))
def test_vars_of_instance(self):
# Test preservation of instance __dir__ attribute.
target = objects.Target()
wrapper = wrapt.ObjectProxy(target)
self.assertEqual(vars(wrapper), vars(target))
def test_dir_of_function(self):
# Test preservation of function __dir__ attribute.
target = objects.target
wrapper = wrapt.ObjectProxy(target)
self.assertEqual(dir(wrapper), dir(target))
def test_vars_of_function(self):
# Test preservation of function __dir__ attribute.
target = objects.target
wrapper = wrapt.ObjectProxy(target)
self.assertEqual(vars(wrapper), vars(target))
class TestCallingObject(unittest.TestCase):
def test_function_no_args(self):
_args = ()
_kwargs = {}
def function(*args, **kwargs):
return args, kwargs
wrapper = wrapt.CallableObjectProxy(function)
result = wrapper()
self.assertEqual(result, (_args, _kwargs))
def test_function_args(self):
_args = (1, 2)
_kwargs = {}
def function(*args, **kwargs):
return args, kwargs
wrapper = wrapt.CallableObjectProxy(function)
result = wrapper(*_args)
self.assertEqual(result, (_args, _kwargs))
def test_function_kwargs(self):
_args = ()
_kwargs = {"one": 1, "two": 2}
def function(*args, **kwargs):
return args, kwargs
wrapper = wrapt.CallableObjectProxy(function)
result = wrapper(**_kwargs)
self.assertEqual(result, (_args, _kwargs))
def test_function_args_plus_kwargs(self):
_args = (1, 2)
_kwargs = {"one": 1, "two": 2}
def function(*args, **kwargs):
return args, kwargs
wrapper = wrapt.CallableObjectProxy(function)
result = wrapper(*_args, **_kwargs)
self.assertEqual(result, (_args, _kwargs))
def test_instancemethod_no_args(self):
_args = ()
_kwargs = {}
class Class(object):
def function(self, *args, **kwargs):
return args, kwargs
wrapper = wrapt.CallableObjectProxy(Class().function)
result = wrapper()
self.assertEqual(result, (_args, _kwargs))
def test_instancemethod_args(self):
_args = (1, 2)
_kwargs = {}
class Class(object):
def function(self, *args, **kwargs):
return args, kwargs
wrapper = wrapt.CallableObjectProxy(Class().function)
result = wrapper(*_args)
self.assertEqual(result, (_args, _kwargs))
def test_instancemethod_kwargs(self):
_args = ()
_kwargs = {"one": 1, "two": 2}
class Class(object):
def function(self, *args, **kwargs):
return args, kwargs
wrapper = wrapt.CallableObjectProxy(Class().function)
result = wrapper(**_kwargs)
self.assertEqual(result, (_args, _kwargs))
def test_instancemethod_args_plus_kwargs(self):
_args = (1, 2)
_kwargs = {"one": 1, "two": 2}
class Class(object):
def function(self, *args, **kwargs):
return args, kwargs
wrapper = wrapt.CallableObjectProxy(Class().function)
result = wrapper(*_args, **_kwargs)
self.assertEqual(result, (_args, _kwargs))
def test_instancemethod_via_class_no_args(self):
_args = ()
_kwargs = {}
class Class(object):
def function(self, *args, **kwargs):
return args, kwargs
wrapper = wrapt.CallableObjectProxy(Class.function)
result = wrapper(Class())
self.assertEqual(result, (_args, _kwargs))
def test_instancemethod_via_class_args(self):
_args = (1, 2)
_kwargs = {}
class Class(object):
def function(self, *args, **kwargs):
return args, kwargs
wrapper = wrapt.CallableObjectProxy(Class.function)
result = wrapper(Class(), *_args)
self.assertEqual(result, (_args, _kwargs))
def test_instancemethod_via_class_kwargs(self):
_args = ()
_kwargs = {"one": 1, "two": 2}
class Class(object):
def function(self, *args, **kwargs):
return args, kwargs
wrapper = wrapt.CallableObjectProxy(Class.function)
result = wrapper(Class(), **_kwargs)
self.assertEqual(result, (_args, _kwargs))
def test_instancemethod_via_class_args_plus_kwargs(self):
_args = (1, 2)
_kwargs = {"one": 1, "two": 2}
class Class(object):
def function(self, *args, **kwargs):
return args, kwargs
wrapper = wrapt.CallableObjectProxy(Class.function)
result = wrapper(Class(), *_args, **_kwargs)
self.assertEqual(result, (_args, _kwargs))
def test_classmethod_no_args(self):
_args = ()
_kwargs = {}
class Class(object):
@classmethod
def function(cls, *args, **kwargs):
return args, kwargs
wrapper = wrapt.CallableObjectProxy(Class().function)
result = wrapper()
self.assertEqual(result, (_args, _kwargs))
def test_classmethod_args(self):
_args = (1, 2)
_kwargs = {}
class Class(object):
@classmethod
def function(cls, *args, **kwargs):
return args, kwargs
wrapper = wrapt.CallableObjectProxy(Class().function)
result = wrapper(*_args)
self.assertEqual(result, (_args, _kwargs))
def test_classmethod_kwargs(self):
_args = ()
_kwargs = {"one": 1, "two": 2}
class Class(object):
@classmethod
def function(cls, *args, **kwargs):
return args, kwargs
wrapper = wrapt.CallableObjectProxy(Class().function)
result = wrapper(**_kwargs)
self.assertEqual(result, (_args, _kwargs))
def test_classmethod_args_plus_kwargs(self):
_args = (1, 2)
_kwargs = {"one": 1, "two": 2}
class Class(object):
@classmethod
def function(cls, *args, **kwargs):
return args, kwargs
wrapper = wrapt.CallableObjectProxy(Class().function)
result = wrapper(*_args, **_kwargs)
self.assertEqual(result, (_args, _kwargs))
def test_classmethod_via_class_no_args(self):
_args = ()
_kwargs = {}
class Class(object):
@classmethod
def function(cls, *args, **kwargs):
return args, kwargs
wrapper = wrapt.CallableObjectProxy(Class.function)
result = wrapper()
self.assertEqual(result, (_args, _kwargs))
def test_classmethod_via_class_args(self):
_args = (1, 2)
_kwargs = {}
class Class(object):
@classmethod
def function(cls, *args, **kwargs):
return args, kwargs
wrapper = wrapt.CallableObjectProxy(Class.function)
result = wrapper(*_args)
self.assertEqual(result, (_args, _kwargs))
def test_classmethod_via_class_kwargs(self):
_args = ()
_kwargs = {"one": 1, "two": 2}
class Class(object):
@classmethod
def function(cls, *args, **kwargs):
return args, kwargs
wrapper = wrapt.CallableObjectProxy(Class.function)
result = wrapper(**_kwargs)
self.assertEqual(result, (_args, _kwargs))
def test_classmethod_via_class_args_plus_kwargs(self):
_args = (1, 2)
_kwargs = {"one": 1, "two": 2}
class Class(object):
@classmethod
def function(cls, *args, **kwargs):
return args, kwargs
wrapper = wrapt.CallableObjectProxy(Class.function)
result = wrapper(*_args, **_kwargs)
self.assertEqual(result, (_args, _kwargs))
def test_staticmethod_no_args(self):
_args = ()
_kwargs = {}
class Class(object):
@staticmethod
def function(*args, **kwargs):
return args, kwargs
wrapper = wrapt.CallableObjectProxy(Class().function)
result = wrapper()
self.assertEqual(result, (_args, _kwargs))
def test_staticmethod_args(self):
_args = (1, 2)
_kwargs = {}
class Class(object):
@staticmethod
def function(*args, **kwargs):
return args, kwargs
wrapper = wrapt.CallableObjectProxy(Class().function)
result = wrapper(*_args)
self.assertEqual(result, (_args, _kwargs))
def test_staticmethod_kwargs(self):
_args = ()
_kwargs = {"one": 1, "two": 2}
class Class(object):
@staticmethod
def function(*args, **kwargs):
return args, kwargs
wrapper = wrapt.CallableObjectProxy(Class().function)
result = wrapper(**_kwargs)
self.assertEqual(result, (_args, _kwargs))
def test_staticmethod_args_plus_kwargs(self):
_args = (1, 2)
_kwargs = {"one": 1, "two": 2}
class Class(object):
@staticmethod
def function(*args, **kwargs):
return args, kwargs
wrapper = wrapt.CallableObjectProxy(Class().function)
result = wrapper(*_args, **_kwargs)
self.assertEqual(result, (_args, _kwargs))
def test_staticmethod_via_class_no_args(self):
_args = ()
_kwargs = {}
class Class(object):
@staticmethod
def function(*args, **kwargs):
return args, kwargs
wrapper = wrapt.CallableObjectProxy(Class.function)
result = wrapper()
self.assertEqual(result, (_args, _kwargs))
def test_staticmethod_via_class_args(self):
_args = (1, 2)
_kwargs = {}
class Class(object):
@staticmethod
def function(*args, **kwargs):
return args, kwargs
wrapper = wrapt.CallableObjectProxy(Class.function)
result = wrapper(*_args)
self.assertEqual(result, (_args, _kwargs))
def test_staticmethod_via_class_kwargs(self):
_args = ()
_kwargs = {"one": 1, "two": 2}
class Class(object):
@staticmethod
def function(*args, **kwargs):
return args, kwargs
wrapper = wrapt.CallableObjectProxy(Class.function)
result = wrapper(**_kwargs)
self.assertEqual(result, (_args, _kwargs))
def test_staticmethod_via_class_args_plus_kwargs(self):
_args = (1, 2)
_kwargs = {"one": 1, "two": 2}
class Class(object):
@staticmethod
def function(*args, **kwargs):
return args, kwargs
wrapper = wrapt.CallableObjectProxy(Class.function)
result = wrapper(*_args, **_kwargs)
self.assertEqual(result, (_args, _kwargs))
class TestIterObjectProxy(unittest.TestCase):
def test_iteration(self):
items = [1, 2]
wrapper = wrapt.ObjectProxy(items)
result = [x for x in wrapper]
self.assertEqual(result, items)
class TestContextManagerObjectProxy(unittest.TestCase):
def test_context_manager(self):
class Class(object):
def __enter__(self):
return self
def __exit__(*args, **kwargs):
return
instance = Class()
wrapper = wrapt.ObjectProxy(instance)
with wrapper:
pass
class TestEqualityObjectProxy(unittest.TestCase):
def test_object_hash(self):
def function1(*args, **kwargs):
return args, kwargs
function2 = wrapt.ObjectProxy(function1)
self.assertEqual(hash(function2), hash(function1))
def test_mapping_key(self):
def function1(*args, **kwargs):
return args, kwargs
function2 = wrapt.ObjectProxy(function1)
table = dict()
table[function1] = True
self.assertTrue(table.get(function2))
table = dict()
table[function2] = True
self.assertTrue(table.get(function1))
def test_comparison(self):
one = wrapt.ObjectProxy(1)
two = wrapt.ObjectProxy(2)
three = wrapt.ObjectProxy(3)
self.assertTrue(two > 1)
self.assertTrue(two >= 1)
self.assertTrue(two < 3)
self.assertTrue(two <= 3)
self.assertTrue(two != 1)
self.assertTrue(two == 2)
self.assertTrue(two != 3)
self.assertTrue(2 > one)
self.assertTrue(2 >= one)
self.assertTrue(2 < three)
self.assertTrue(2 <= three)
self.assertTrue(2 != one)
self.assertTrue(2 == two)
self.assertTrue(2 != three)
self.assertTrue(two > one)
self.assertTrue(two >= one)
self.assertTrue(two < three)
self.assertTrue(two <= three)
self.assertTrue(two != one)
self.assertTrue(two == two)
self.assertTrue(two != three)
class TestAsNumberObjectProxy(unittest.TestCase):
def test_nonzero(self):
true = wrapt.ObjectProxy(True)
false = wrapt.ObjectProxy(False)
self.assertTrue(true)
self.assertFalse(false)
self.assertTrue(bool(true))
self.assertFalse(bool(false))
self.assertTrue(not false)
self.assertFalse(not true)
def test_int(self):
one = wrapt.ObjectProxy(1)
self.assertEqual(int(one), 1)
if not PY3:
self.assertEqual(long(one), 1)
def test_float(self):
one = wrapt.ObjectProxy(1)
self.assertEqual(float(one), 1.0)
def test_add(self):
one = wrapt.ObjectProxy(1)
two = wrapt.ObjectProxy(2)
self.assertEqual(one+two, 1+2)
self.assertEqual(1+two, 1+2)
self.assertEqual(one+2, 1+2)
def test_add_uninitialized_args(self):
result = object()
one = wrapt.ObjectProxy.__new__(wrapt.ObjectProxy)
two = wrapt.ObjectProxy(2)
try:
assert one + two == result
except ValueError:
pass
one = wrapt.ObjectProxy(1)
two = wrapt.ObjectProxy.__new__(wrapt.ObjectProxy)
try:
assert one + two == result
except ValueError:
pass
def test_sub(self):
one = wrapt.ObjectProxy(1)
two = wrapt.ObjectProxy(2)
self.assertEqual(one-two, 1-2)
self.assertEqual(1-two, 1-2)
self.assertEqual(one-2, 1-2)
def test_sub_uninitialized_args(self):
result = object()
one = wrapt.ObjectProxy.__new__(wrapt.ObjectProxy)
two = wrapt.ObjectProxy(2)
try:
assert one - two == result
except ValueError:
pass
one = wrapt.ObjectProxy(1)
two = wrapt.ObjectProxy.__new__(wrapt.ObjectProxy)
try:
assert one - two == result
except ValueError:
pass
def test_mul(self):
two = wrapt.ObjectProxy(2)
three = wrapt.ObjectProxy(3)
self.assertEqual(two*three, 2*3)
self.assertEqual(2*three, 2*3)
self.assertEqual(two*3, 2*3)
def test_mul_uninitialized_args(self):
result = object()
two = wrapt.ObjectProxy.__new__(wrapt.ObjectProxy)
three = wrapt.ObjectProxy(3)
try:
assert two * three == result
except ValueError:
pass
two = wrapt.ObjectProxy(2)
three = wrapt.ObjectProxy.__new__(wrapt.ObjectProxy)
try:
assert two * three == result
except ValueError:
pass
def test_div(self):
# On Python 2 this will pick up div and on Python
# 3 it will pick up truediv.
two = wrapt.ObjectProxy(2)
three = wrapt.ObjectProxy(3)
self.assertEqual(two/three, 2/3)
self.assertEqual(2/three, 2/3)
self.assertEqual(two/3, 2/3)
def test_div_uninitialized_args(self):
result = object()
two = wrapt.ObjectProxy.__new__(wrapt.ObjectProxy)
three = wrapt.ObjectProxy(3)
try:
assert two / three == result
except ValueError:
pass
two = wrapt.ObjectProxy(2)
three = wrapt.ObjectProxy.__new__(wrapt.ObjectProxy)
try:
assert two / three == result
except ValueError:
pass
def test_floordiv(self):
two = wrapt.ObjectProxy(2)
four = wrapt.ObjectProxy(4)
self.assertEqual(four//two, 4//2)
self.assertEqual(4//two, 4//2)
self.assertEqual(four//2, 4//2)
def test_floordiv_uninitialized_args(self):
result = object()
two = wrapt.ObjectProxy.__new__(wrapt.ObjectProxy)
four = wrapt.ObjectProxy(4)
try:
assert two // four == result
except ValueError:
pass
two = wrapt.ObjectProxy(2)
four = wrapt.ObjectProxy.__new__(wrapt.ObjectProxy)
try:
assert two // four == result
except ValueError:
pass
def test_mod(self):
two = wrapt.ObjectProxy(2)
four = wrapt.ObjectProxy(4)
self.assertEqual(four % two, 4 % 2)
self.assertEqual(4 % two, 4 % 2)
self.assertEqual(four % 2, 4 % 2)
def test_mod_uninitialized_args(self):
result = object()
two = wrapt.ObjectProxy.__new__(wrapt.ObjectProxy)
four = wrapt.ObjectProxy(4)
try:
assert two % four == result
except ValueError:
pass
two = wrapt.ObjectProxy(2)
four = wrapt.ObjectProxy.__new__(wrapt.ObjectProxy)
try:
assert two % four == result
except ValueError:
pass
def test_divmod(self):
two = wrapt.ObjectProxy(2)
three = wrapt.ObjectProxy(3)
self.assertEqual(divmod(three, two), divmod(3, 2))
self.assertEqual(divmod(3, two), divmod(3, 2))
self.assertEqual(divmod(three, 2), divmod(3, 2))
def test_divmod_uninitialized_args(self):
result = object()
two = wrapt.ObjectProxy.__new__(wrapt.ObjectProxy)
three = wrapt.ObjectProxy(3)
try:
assert divmod(two, three) == result
except ValueError:
pass
two = wrapt.ObjectProxy(2)
three = wrapt.ObjectProxy.__new__(wrapt.ObjectProxy)
try:
assert divmod(two, three) == result
except ValueError:
pass
def test_pow(self):
two = wrapt.ObjectProxy(2)
three = wrapt.ObjectProxy(3)
self.assertEqual(three**two, pow(3, 2))
self.assertEqual(3**two, pow(3, 2))
self.assertEqual(three**2, pow(3, 2))
self.assertEqual(pow(three, two), pow(3, 2))
self.assertEqual(pow(3, two), pow(3, 2))
self.assertEqual(pow(three, 2), pow(3, 2))
# Only PyPy implements __rpow__ for ternary pow().
if is_pypy:
self.assertEqual(pow(three, two, 2), pow(3, 2, 2))
self.assertEqual(pow(3, two, 2), pow(3, 2, 2))
self.assertEqual(pow(three, 2, 2), pow(3, 2, 2))
def test_pow_uninitialized_args(self):
result = object()
two = wrapt.ObjectProxy.__new__(wrapt.ObjectProxy)
three = wrapt.ObjectProxy(3)
try:
assert three**two == result
except ValueError:
pass
two = wrapt.ObjectProxy(2)
three = wrapt.ObjectProxy.__new__(wrapt.ObjectProxy)
try:
assert three**two == result
except ValueError:
pass
def test_lshift(self):
two = wrapt.ObjectProxy(2)
three = wrapt.ObjectProxy(3)
self.assertEqual(three << two, 3 << 2)
self.assertEqual(3 << two, 3 << 2)
self.assertEqual(three << 2, 3 << 2)
def test_lshift_uninitialized_args(self):
result = object()
two = wrapt.ObjectProxy.__new__(wrapt.ObjectProxy)
three = wrapt.ObjectProxy(3)
try:
assert three << two == result
except ValueError:
pass
two = wrapt.ObjectProxy(2)
three = wrapt.ObjectProxy.__new__(wrapt.ObjectProxy)
try:
assert three << two == result
except ValueError:
pass
def test_rshift(self):
two = wrapt.ObjectProxy(2)
three = wrapt.ObjectProxy(3)
self.assertEqual(three >> two, 3 >> 2)
self.assertEqual(3 >> two, 3 >> 2)
self.assertEqual(three >> 2, 3 >> 2)
def test_rshift_uninitialized_args(self):
result = object()
two = wrapt.ObjectProxy.__new__(wrapt.ObjectProxy)
three = wrapt.ObjectProxy(3)
try:
assert three >> two == result
except ValueError:
pass
two = wrapt.ObjectProxy(2)
three = wrapt.ObjectProxy.__new__(wrapt.ObjectProxy)
try:
assert three >> two == result
except ValueError:
pass
def test_and(self):
two = wrapt.ObjectProxy(2)
three = wrapt.ObjectProxy(3)
self.assertEqual(three & two, 3 & 2)
self.assertEqual(3 & two, 3 & 2)
self.assertEqual(three & 2, 3 & 2)
def test_and_uninitialized_args(self):
result = object()
two = wrapt.ObjectProxy.__new__(wrapt.ObjectProxy)
three = wrapt.ObjectProxy(3)
try:
assert three & two == result
except ValueError:
pass
two = wrapt.ObjectProxy(2)
three = wrapt.ObjectProxy.__new__(wrapt.ObjectProxy)
try:
assert three & two == result
except ValueError:
pass
def test_xor(self):
two = wrapt.ObjectProxy(2)
three = wrapt.ObjectProxy(3)
self.assertEqual(three ^ two, 3 ^ 2)
self.assertEqual(3 ^ two, 3 ^ 2)
self.assertEqual(three ^ 2, 3 ^ 2)
def test_xor_uninitialized_args(self):
result = object()
two = wrapt.ObjectProxy.__new__(wrapt.ObjectProxy)
three = wrapt.ObjectProxy(3)
try:
assert three ^ two == result
except ValueError:
pass
two = wrapt.ObjectProxy(2)
three = wrapt.ObjectProxy.__new__(wrapt.ObjectProxy)
try:
assert three ^ two == result
except ValueError:
pass
def test_or(self):
two = wrapt.ObjectProxy(2)
three = wrapt.ObjectProxy(3)
self.assertEqual(three | two, 3 | 2)
self.assertEqual(3 | two, 3 | 2)
self.assertEqual(three | 2, 3 | 2)
def test_or_uninitialized_args(self):
result = object()
two = wrapt.ObjectProxy.__new__(wrapt.ObjectProxy)
three = wrapt.ObjectProxy(3)
try:
assert three | two == result
except ValueError:
pass
two = wrapt.ObjectProxy(2)
three = wrapt.ObjectProxy.__new__(wrapt.ObjectProxy)
try:
assert three | two == result
except ValueError:
pass
def test_iadd(self):
value = wrapt.ObjectProxy(1)
one = wrapt.ObjectProxy(1)
value += 1
self.assertEqual(value, 2)
self.assertEqual(type(value), wrapt.ObjectProxy)
value += one
self.assertEqual(value, 3)
self.assertEqual(type(value), wrapt.ObjectProxy)
def test_isub(self):
value = wrapt.ObjectProxy(1)
one = wrapt.ObjectProxy(1)
value -= 1
self.assertEqual(value, 0)
self.assertEqual(type(value), wrapt.ObjectProxy)
value -= one
self.assertEqual(value, -1)
self.assertEqual(type(value), wrapt.ObjectProxy)
def test_imul(self):
value = wrapt.ObjectProxy(2)
two = wrapt.ObjectProxy(2)
value *= 2
self.assertEqual(value, 4)
self.assertEqual(type(value), wrapt.ObjectProxy)
value *= two
self.assertEqual(value, 8)
self.assertEqual(type(value), wrapt.ObjectProxy)
def test_idiv(self):
# On Python 2 this will pick up div and on Python
# 3 it will pick up truediv.
value = wrapt.ObjectProxy(2)
two = wrapt.ObjectProxy(2)
value /= 2
self.assertEqual(value, 2/2)
self.assertEqual(type(value), wrapt.ObjectProxy)
value /= two
self.assertEqual(value, 2/2/2)
self.assertEqual(type(value), wrapt.ObjectProxy)
def test_ifloordiv(self):
value = wrapt.ObjectProxy(2)
two = wrapt.ObjectProxy(2)
value //= 2
self.assertEqual(value, 2//2)
self.assertEqual(type(value), wrapt.ObjectProxy)
value //= two
self.assertEqual(value, 2//2//2)
self.assertEqual(type(value), wrapt.ObjectProxy)
def test_imod(self):
value = wrapt.ObjectProxy(10)
two = wrapt.ObjectProxy(2)
value %= 2
self.assertEqual(value, 10 % 2)
self.assertEqual(type(value), wrapt.ObjectProxy)
value %= two
self.assertEqual(value, 10 % 2 % 2)
self.assertEqual(type(value), wrapt.ObjectProxy)
def test_ipow(self):
value = wrapt.ObjectProxy(10)
two = wrapt.ObjectProxy(2)
value **= 2
self.assertEqual(value, 10**2)
self.assertEqual(type(value), wrapt.ObjectProxy)
value **= two
self.assertEqual(value, 10**2**2)
self.assertEqual(type(value), wrapt.ObjectProxy)
def test_ilshift(self):
value = wrapt.ObjectProxy(256)
two = wrapt.ObjectProxy(2)
value <<= 2
self.assertEqual(value, 256 << 2)
self.assertEqual(type(value), wrapt.ObjectProxy)
value <<= two
self.assertEqual(value, 256 << 2 << 2)
self.assertEqual(type(value), wrapt.ObjectProxy)
def test_irshift(self):
value = wrapt.ObjectProxy(2)
two = wrapt.ObjectProxy(2)
value >>= 2
self.assertEqual(value, 2 >> 2)
self.assertEqual(type(value), wrapt.ObjectProxy)
value >>= two
self.assertEqual(value, 2 >> 2 >> 2)
self.assertEqual(type(value), wrapt.ObjectProxy)
def test_iand(self):
value = wrapt.ObjectProxy(1)
two = wrapt.ObjectProxy(2)
value &= 2
self.assertEqual(value, 1 & 2)
self.assertEqual(type(value), wrapt.ObjectProxy)
value &= two
self.assertEqual(value, 1 & 2 & 2)
self.assertEqual(type(value), wrapt.ObjectProxy)
def test_ixor(self):
value = wrapt.ObjectProxy(1)
two = wrapt.ObjectProxy(2)
value ^= 2
self.assertEqual(value, 1 ^ 2)
self.assertEqual(type(value), wrapt.ObjectProxy)
value ^= two
self.assertEqual(value, 1 ^ 2 ^ 2)
self.assertEqual(type(value), wrapt.ObjectProxy)
def test_ior(self):
value = wrapt.ObjectProxy(1)
two = wrapt.ObjectProxy(2)
value |= 2
self.assertEqual(value, 1 | 2)
self.assertEqual(type(value), wrapt.ObjectProxy)
value |= two
self.assertEqual(value, 1 | 2 | 2)
self.assertEqual(type(value), wrapt.ObjectProxy)
def test_ior_list_self(self):
value = wrapt.ObjectProxy([])
try:
value |= value
except TypeError:
pass
def test_neg(self):
value = wrapt.ObjectProxy(1)
self.assertEqual(-value, -1)
def test_pos(self):
value = wrapt.ObjectProxy(1)
self.assertEqual(+value, 1)
def test_abs(self):
value = wrapt.ObjectProxy(-1)
self.assertEqual(abs(value), 1)
def test_invert(self):
value = wrapt.ObjectProxy(1)
self.assertEqual(~value, ~1)
def test_oct(self):
value = wrapt.ObjectProxy(20)
self.assertEqual(oct(value), oct(20))
def test_hex(self):
value = wrapt.ObjectProxy(20)
self.assertEqual(hex(value), hex(20))
def test_index(self):
class Class(object):
def __index__(self):
return 1
value = wrapt.ObjectProxy(Class())
items = [0, 1, 2]
self.assertEqual(items[value], items[1])
class TestAsSequenceObjectProxy(unittest.TestCase):
def test_length(self):
value = wrapt.ObjectProxy(list(range(3)))
self.assertEqual(len(value), 3)
def test_contains(self):
value = wrapt.ObjectProxy(list(range(3)))
self.assertTrue(2 in value)
self.assertFalse(-2 in value)
def test_getitem(self):
value = wrapt.ObjectProxy(list(range(3)))
self.assertEqual(value[1], 1)
def test_setitem(self):
value = wrapt.ObjectProxy(list(range(3)))
value[1] = -1
self.assertEqual(value[1], -1)
def test_delitem(self):
value = wrapt.ObjectProxy(list(range(3)))
self.assertEqual(len(value), 3)
del value[1]
self.assertEqual(len(value), 2)
self.assertEqual(value[1], 2)
def test_getslice(self):
value = wrapt.ObjectProxy(list(range(5)))
self.assertEqual(value[1:4], [1, 2, 3])
def test_setslice(self):
value = wrapt.ObjectProxy(list(range(5)))
value[1:4] = reversed(value[1:4])
self.assertEqual(value[1:4], [3, 2, 1])
def test_delslice(self):
value = wrapt.ObjectProxy(list(range(5)))
del value[1:4]
self.assertEqual(len(value), 2)
self.assertEqual(value, [0, 4])
class TestAsMappingObjectProxy(unittest.TestCase):
def test_length(self):
value = wrapt.ObjectProxy(dict.fromkeys(range(3), False))
self.assertEqual(len(value), 3)
def test_contains(self):
value = wrapt.ObjectProxy(dict.fromkeys(range(3), False))
self.assertTrue(2 in value)
self.assertFalse(-2 in value)
def test_getitem(self):
value = wrapt.ObjectProxy(dict.fromkeys(range(3), False))
self.assertEqual(value[1], False)
def test_setitem(self):
value = wrapt.ObjectProxy(dict.fromkeys(range(3), False))
value[1] = True
self.assertEqual(value[1], True)
def test_delitem(self):
value = wrapt.ObjectProxy(dict.fromkeys(range(3), False))
self.assertEqual(len(value), 3)
del value[1]
self.assertEqual(len(value), 2)
class TestObjectRepresentationObjectProxy(unittest.TestCase):
def test_str(self):
value = wrapt.ObjectProxy(10)
self.assertEqual(str(value), str(10))
value = wrapt.ObjectProxy((10,))
self.assertEqual(str(value), str((10,)))
value = wrapt.ObjectProxy([10])
self.assertEqual(str(value), str([10]))
value = wrapt.ObjectProxy({10: 10})
self.assertEqual(str(value), str({10: 10}))
def test_repr(self):
number = 10
value = wrapt.ObjectProxy(number)
self.assertNotEqual(repr(value).find('ObjectProxy at'), -1)
class TestDerivedClassCreation(unittest.TestCase):
def test_derived_new(self):
class DerivedObjectProxy(wrapt.ObjectProxy):
def __new__(cls, wrapped):
instance = super(DerivedObjectProxy, cls).__new__(cls)
instance.__init__(wrapped)
def __init__(self, wrapped):
super(DerivedObjectProxy, self).__init__(wrapped)
def function():
pass
obj = DerivedObjectProxy(function)
def test_derived_setattr(self):
class DerivedObjectProxy(wrapt.ObjectProxy):
def __init__(self, wrapped):
self._self_attribute = True
super(DerivedObjectProxy, self).__init__(wrapped)
def function():
pass
obj = DerivedObjectProxy(function)
def test_derived_missing_init(self):
class DerivedObjectProxy(wrapt.ObjectProxy):
def __init__(self, wrapped):
self.__wrapped__ = wrapped
def function():
pass
obj = DerivedObjectProxy(function)
self.assertEqual(function, obj)
self.assertEqual(function, obj.__wrapped__)
class DerivedClassAttributes(unittest.TestCase):
def test_setup_class_attributes(self):
def function():
pass
class DerivedObjectProxy(wrapt.ObjectProxy):
pass
obj = DerivedObjectProxy(function)
DerivedObjectProxy.ATTRIBUTE = 1
self.assertEqual(obj.ATTRIBUTE, 1)
self.assertFalse(hasattr(function, 'ATTRIBUTE'))
del DerivedObjectProxy.ATTRIBUTE
self.assertFalse(hasattr(DerivedObjectProxy, 'ATTRIBUTE'))
self.assertFalse(hasattr(obj, 'ATTRIBUTE'))
self.assertFalse(hasattr(function, 'ATTRIBUTE'))
def test_override_class_attributes(self):
def function():
pass
class DerivedObjectProxy(wrapt.ObjectProxy):
ATTRIBUTE = 1
obj = DerivedObjectProxy(function)
self.assertEqual(DerivedObjectProxy.ATTRIBUTE, 1)
self.assertEqual(obj.ATTRIBUTE, 1)
obj.ATTRIBUTE = 2
self.assertEqual(DerivedObjectProxy.ATTRIBUTE, 1)
self.assertEqual(obj.ATTRIBUTE, 2)
self.assertFalse(hasattr(function, 'ATTRIBUTE'))
del DerivedObjectProxy.ATTRIBUTE
self.assertFalse(hasattr(DerivedObjectProxy, 'ATTRIBUTE'))
self.assertEqual(obj.ATTRIBUTE, 2)
self.assertFalse(hasattr(function, 'ATTRIBUTE'))
def test_class_properties(self):
def function():
pass
class DerivedObjectProxy(wrapt.ObjectProxy):
def __init__(self, wrapped):
super(DerivedObjectProxy, self).__init__(wrapped)
self._self_attribute = 1
@property
def ATTRIBUTE(self):
return self._self_attribute
@ATTRIBUTE.setter
def ATTRIBUTE(self, value):
self._self_attribute = value
@ATTRIBUTE.deleter
def ATTRIBUTE(self):
del self._self_attribute
obj = DerivedObjectProxy(function)
self.assertEqual(obj.ATTRIBUTE, 1)
obj.ATTRIBUTE = 2
self.assertEqual(obj.ATTRIBUTE, 2)
self.assertFalse(hasattr(function, 'ATTRIBUTE'))
del obj.ATTRIBUTE
self.assertFalse(hasattr(obj, 'ATTRIBUTE'))
self.assertFalse(hasattr(function, 'ATTRIBUTE'))
obj.ATTRIBUTE = 1
self.assertEqual(obj.ATTRIBUTE, 1)
obj.ATTRIBUTE = 2
self.assertEqual(obj.ATTRIBUTE, 2)
self.assertFalse(hasattr(function, 'ATTRIBUTE'))
del obj.ATTRIBUTE
self.assertFalse(hasattr(obj, 'ATTRIBUTE'))
self.assertFalse(hasattr(function, 'ATTRIBUTE'))
class OverrideAttributeAccess(unittest.TestCase):
def test_attr_functions(self):
def function():
pass
proxy = wrapt.ObjectProxy(function)
self.assertTrue(hasattr(proxy, '__getattr__'))
self.assertTrue(hasattr(proxy, '__setattr__'))
self.assertTrue(hasattr(proxy, '__delattr__'))
def test_override_getattr(self):
def function():
pass
accessed = []
class DerivedObjectProxy(wrapt.ObjectProxy):
def __getattr__(self, name):
accessed.append(name)
try:
__getattr__ = super(DerivedObjectProxy, self).__getattr__
except AttributeError as e:
raise RuntimeError(str(e))
return __getattr__(name)
function.attribute = 1
proxy = DerivedObjectProxy(function)
self.assertEqual(proxy.attribute, 1)
self.assertTrue('attribute' in accessed)
class CallableFunction(unittest.TestCase):
def test_proxy_hasattr_call(self):
proxy = wrapt.ObjectProxy(None)
self.assertFalse(hasattr(proxy, '__call__'))
def test_proxy_getattr_call(self):
proxy = wrapt.ObjectProxy(None)
self.assertEqual(getattr(proxy, '__call__', None), None)
def test_proxy_is_callable(self):
proxy = wrapt.ObjectProxy(None)
self.assertFalse(callable(proxy))
def test_callable_proxy_hasattr_call(self):
proxy = wrapt.CallableObjectProxy(None)
self.assertTrue(hasattr(proxy, '__call__'))
def test_callable_proxy_getattr_call(self):
proxy = wrapt.CallableObjectProxy(None)
self.assertTrue(getattr(proxy, '__call__', None), None)
def test_callable_proxy_is_callable(self):
proxy = wrapt.CallableObjectProxy(None)
self.assertTrue(callable(proxy))
class SpecialMethods(unittest.TestCase):
def test_class_bytes(self):
if PY3:
class Class(object):
def __bytes__(self):
return b'BYTES'
instance = Class()
proxy = wrapt.ObjectProxy(instance)
self.assertEqual(bytes(instance), bytes(proxy))
def test_str_format(self):
instance = 'abcd'
proxy = wrapt.ObjectProxy(instance)
self.assertEqual(format(instance, ''), format(proxy, ''))
def test_list_reversed(self):
instance = [1, 2]
proxy = wrapt.ObjectProxy(instance)
self.assertEqual(list(reversed(instance)), list(reversed(proxy)))
def test_complex(self):
instance = 1.0+2j
proxy = wrapt.ObjectProxy(instance)
self.assertEqual(complex(instance), complex(proxy))
def test_decimal_complex(self):
import decimal
instance = decimal.Decimal(123)
proxy = wrapt.ObjectProxy(instance)
self.assertEqual(complex(instance), complex(proxy))
def test_fractions_round(self):
import fractions
instance = fractions.Fraction('1/2')
proxy = wrapt.ObjectProxy(instance)
self.assertEqual(round(instance), round(proxy))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "d1d91d14e8a0cf51843bb3bb4d848392",
"timestamp": "",
"source": "github",
"line_count": 1795,
"max_line_length": 77,
"avg_line_length": 25.95431754874652,
"alnum_prop": 0.5805786897913626,
"repo_name": "GrahamDumpleton/wrapt",
"id": "e2493063f0af060655cb335f47a9cd9cc69fa6af",
"size": "46588",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_object_proxy.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "95815"
},
{
"name": "Makefile",
"bytes": "217"
},
{
"name": "Python",
"bytes": "263681"
}
],
"symlink_target": ""
} |
import os
import sys
from conary import conarycfg
from conary.deps import deps
from conary.lib import log
from conary.lib import options
from rmake import compat, errors
from rmake.compat import cvccmd as cvc
from rmake.cmdline import query
(NO_PARAM, ONE_PARAM) = (options.NO_PARAM, options.ONE_PARAM)
(OPT_PARAM, MULT_PARAM) = (options.OPT_PARAM, options.MULT_PARAM)
(NORMAL_HELP, VERBOSE_HELP) = (options.NORMAL_HELP, options.VERBOSE_HELP)
CG_MISC = 'Miscellaneous Commands'
CG_BUILD = 'Job Manipulation'
CG_INFO = 'Information Display'
# helper function to get list of commands we support
_commands = []
def register(cmd):
_commands.append(cmd)
class rMakeCommand(options.AbstractCommand):
defaultGroup = 'Common Options'
commandGroup = CG_MISC
docs = {'config' : (VERBOSE_HELP,
"Set config KEY to VALUE", "'KEY VALUE'"),
'server-config' : (VERBOSE_HELP,
"Set server config KEY to VALUE", "'KEY VALUE'"),
'config-file' : (VERBOSE_HELP,
"Read PATH config file", "PATH"),
'context' : (VERBOSE_HELP,
"Set the configuration context to use"),
'server-config-file' : (VERBOSE_HELP,
"Read PATH config file", "PATH"),
'conary-config-file' : (VERBOSE_HELP,
"Read PATH conary config file", "PATH"),
'build-config-file' : (VERBOSE_HELP,
"Read PATH config file", "PATH"),
'rmake-config-file' : (VERBOSE_HELP,
"Read PATH config file", "PATH"),
'skip-default-config': (VERBOSE_HELP,
"Don't read default configs"),
'verbose' : (VERBOSE_HELP,
"Display more detailed information where available") }
def addParameters(self, argDef):
d = {}
d["context"] = ONE_PARAM
d["config"] = MULT_PARAM
d["server-config"] = MULT_PARAM
d["server-config-file"] = MULT_PARAM
d["build-config-file"] = MULT_PARAM
d["conary-config-file"] = MULT_PARAM
d["skip-default-config"] = NO_PARAM
d["verbose"] = NO_PARAM
argDef[self.defaultGroup] = d
def _getContext(self, buildConfig, conaryConfig, argSet):
context = conaryConfig.context
if buildConfig.context:
context = buildConfig.context
if os.path.exists('CONARY'):
conaryState = compat.ConaryVersion().ConaryStateFromFile('CONARY',
parseSource=False)
if conaryState.hasContext():
context = conaryState.getContext()
context = os.environ.get('CONARY_CONTEXT', context)
context = argSet.pop('context', context)
return context
def _setContext(self, buildConfig, conaryConfig, argSet):
context = self._getContext(buildConfig, conaryConfig, argSet)
usedContext = False
if conaryConfig and context:
if conaryConfig.hasSection(context):
usedContext = True
conaryConfig.setContext(context)
buildConfig.useConaryConfig(conaryConfig)
if context and buildConfig.hasSection(context):
buildConfig.setContext(context)
usedContext = True
if not usedContext and context:
raise errors.RmakeError('No such context "%s"' % context)
def processConfigOptions(self, (buildConfig, conaryConfig, pluginManager),
cfgMap, argSet):
"""
Manage any config maps we've set up, converting
assigning them to the config object.
"""
configFileList = argSet.pop('build-config-file', [])
if not isinstance(configFileList, list):
configFileList = list(configFileList)
configFileList.extend(argSet.pop('config-file', []))
for path in configFileList:
buildConfig.read(path, exception=True)
configFileList = argSet.pop('conary-config-file', [])
if not isinstance(configFileList, list):
configFileList = list(configFileList)
if configFileList and not conaryConfig:
conaryConfig = conarycfg.ConaryConfiguration(readConfigFiles=False)
for path in configFileList:
conaryConfig.read(path, exception=True)
self._setContext(buildConfig, conaryConfig, argSet)
for (arg, data) in cfgMap.items():
cfgName, paramType = data[0:2]
value = argSet.pop(arg, None)
if value is not None:
if arg.startswith('no-'):
value = not value
buildConfig.configLine("%s %s" % (cfgName, value))
for line in argSet.pop('config', []):
buildConfig.configLine(line)
for line in argSet.pop('server-config', []):
serverConfig.configLine(line)
if argSet.pop('verbose', False):
log.setVerbosity(log.DEBUG)
def requireParameters(self, args, expected=None, allowExtra=False,
appendExtra=False, maxExtra=None):
args = args[1:] # cut off argv[0]
command = repr(args[0])
if isinstance(expected, str):
expected = [expected]
if expected is None:
expected = ['command']
else:
expected = ['command'] + expected
if expected:
missing = expected[len(args):]
if missing:
raise errors.BadParameters('%s missing %s command'
' parameter(s): %s' % (
command, len(missing),
', '.join(missing)))
extra = len(args) - len(expected)
if not allowExtra and not appendExtra:
maxExtra = 0
if maxExtra is not None and extra > maxExtra:
if maxExtra:
numParams = '%s-%s' % (len(expected)-1,
len(expected) + maxExtra - 1)
else:
numParams = '%s' % (len(expected)-1)
raise errors.BadParameters('%s takes %s arguments, received %s' % (command, numParams, len(args)-1))
if appendExtra:
# final parameter is list
return args[:len(expected)-1] + [args[len(expected)-1:]]
elif allowExtra:
return args[:len(expected)] + [args[len(expected):]]
else:
return args
def _getJobIdOrUUIds(val):
return [ _getJobIdOrUUId(x) for x in val ]
def _getJobIdOrUUId(val):
try:
return int(val)
except (ValueError, TypeError):
if isinstance(val, str) and len(val) == 32:
return val
else:
raise errors.ParseError, 'Not a valid jobId or UUID: %s' % val
class BuildCommand(rMakeCommand):
'''Builds the specified packages or recipes. '''
commands = ['build']
commandGroup = CG_BUILD
paramHelp = '<troveSpec>[{context}] [<troveSpec>][{context}]*'
help = 'Build packages or recipes'
docs = {'flavor' : "flavor to build with",
'host' : "host to limit build to",
'label' : "label to limit build to",
'match' : (options.VERBOSE_HELP,
"Only build troves that match the given specification"),
'no-watch' : "do not show build status",
'poll' : (options.VERBOSE_HELP, 'backwards compatibility option'),
'prep' : (options.VERBOSE_HELP,
'do not build package, only create chroot'),
'quiet' : "show less build info - don't tail logs",
'commit' : "commit job when it is done",
'message' : "Message to assign to troves upon commit",
'macro' : ('set macro NAME to VALUE', "'NAME VALUE'"),
'no-clean': 'do not remove build directory even if build is'
' successful',
'to-file': (options.VERBOSE_HELP,
'store job in a file instead of sending it'
' to the server. This makes it possible for others'
' to start the job.'),
'binary-search': (options.VERBOSE_HELP,
'Search for the binary'
'version of group and build the latest'
'sources on that branch with the same flavor'),
'reuse': ('reuse old chroot if possible instead of removing'
' and recreating'),
'info' : ('Gather and display all the information necessary to perform the build'),
'recurse': ('recurse groups, building all included sources'),
'ignore-rebuild-deps': ('Do not rebuild packages if the only'
' change to them is the packages to be'
' installed in their chroot.'),
'ignore-external-rebuild-deps': ('Do not rebuild packages unless'
' their source has changed or'
' another package in the job will'
' be installed in this package\'s'
' chroot')}
def addParameters(self, argDef):
self.addBuildParameters(argDef)
rMakeCommand.addParameters(self, argDef)
argDef['flavor'] = ONE_PARAM
argDef['host'] = MULT_PARAM
argDef['label'] = MULT_PARAM
argDef['match'] = MULT_PARAM
argDef['binary-search'] = NO_PARAM
argDef['recurse'] = NO_PARAM
def addBuildParameters(self, argDef):
argDef['commit'] = NO_PARAM
argDef['prep'] = NO_PARAM
argDef['macro'] = MULT_PARAM
argDef['message'] = '-m', ONE_PARAM
argDef['no-watch'] = NO_PARAM
argDef['poll'] = NO_PARAM
argDef['no-clean'] = NO_PARAM
argDef['to-file'] = ONE_PARAM
argDef['quiet'] = NO_PARAM
argDef['info'] = NO_PARAM
def addConfigOptions(self, cfgMap, argDef):
cfgMap['reuse'] = 'reuseRoots', NO_PARAM
rMakeCommand.addConfigOptions(self, cfgMap, argDef)
def runCommand(self, client, cfg, argSet, args):
if self.verbose:
log.setVerbosity(log.DEBUG)
else:
log.setVerbosity(log.INFO)
command, troveSpecs = self.requireParameters(args, 'troveSpec',
appendExtra=True)
if command == 'buildgroup':
log.warning('"buildgroup" is deprecated and will be removed in a future release - use "build --recurse" instead')
rebuild = (command == 'rebuild')
flavorSpec = argSet.pop('flavor', None)
if flavorSpec:
flavor = deps.parseFlavor(flavorSpec)
if flavor is None:
raise errors.ParseError("Invalid flavor: '%s'" % flavorSpec)
newFlavor = deps.overrideFlavor(client.buildConfig.buildFlavor,
flavor)
client.buildConfig.buildFlavor = newFlavor
newFlavors = []
for oldFlavor in client.buildConfig.flavor:
newFlavors.append(deps.overrideFlavor(oldFlavor, flavor))
client.buildConfig.flavor = newFlavors
matchSpecs = argSet.pop('match', [])
hosts = argSet.pop('host', [])
labels = argSet.pop('label', [])
recurseGroups = argSet.pop('recurse', False) or command == 'buildgroup'
if recurseGroups:
if argSet.pop('binary-search', False):
recurseGroups = client.BUILD_RECURSE_GROUPS_BINARY
elif not compat.ConaryVersion().supportsFindGroupSources():
log.warning('Your conary does not support recursing a group'
' source component, defaulting to searching the'
' binary version')
recurseGroups = client.BUILD_RECURSE_GROUPS_BINARY
else:
recurseGroups = client.BUILD_RECURSE_GROUPS_SOURCE
self._prep(client, argSet)
job = client.createBuildJob(troveSpecs, limitToHosts=hosts,
limitToLabels=labels,
recurseGroups=recurseGroups,
matchSpecs=matchSpecs,
rebuild=rebuild)
return self._build(client, job, argSet)
def _prep(self, client, argSet):
if 'no-clean' in argSet:
client.buildConfig.cleanAfterCook = False
del argSet['no-clean']
if 'prep' in argSet:
client.buildConfig.prepOnly = argSet.pop('prep')
if 'ignore-rebuild-deps' in argSet:
client.buildConfig.ignoreAllRebuildDeps = True
argSet.pop('ignore-rebuild-deps')
if 'ignore-external-rebuild-deps' in argSet:
client.buildConfig.ignoreExternalRebuildDeps = True
argSet.pop('ignore-external-rebuild-deps')
macros = argSet.pop('macro', [])
for macro in macros:
client.buildConfig.configLine('macros ' + macro)
if 'no-clean' in argSet:
client.buildConfig.cleanAfterCook = False
del argSet['no-clean']
def _build(self, client, job, argSet):
savePath = argSet.pop('to-file', False)
quiet = argSet.pop('quiet', False)
commit = argSet.pop('commit', False)
message = argSet.pop('message', None)
infoOnly = argSet.pop('info', False)
monitorJob = not argSet.pop('no-watch', False)
if infoOnly:
client.displayJob(job, quiet=quiet)
if savePath:
job.writeToFile(savePath, sanitize=True)
if infoOnly or savePath:
return 0
jobId = client.buildJob(job, quiet=quiet)
if monitorJob:
if quiet:
if not client.waitForJob(jobId):
return 1
elif not client.watch(jobId, showTroveLogs=not quiet,
showBuildLogs=not quiet,
commit=commit, message=message):
return 1
elif commit:
if not client.commitJob(jobId, commitWithFailures=False,
waitForJob=True, message=message):
return 1
return 0
register(BuildCommand)
class RebuildCommand(BuildCommand):
'''\
Rebuilds packages whose source or dependencies have changed.
'''
commands = ['rebuild']
commandGroup = CG_BUILD
paramHelp = '<troveSpec>[{context}] [<troveSpec>][{context}]*'
help = 'Rebuild packages or recipes if they\'ve changed'
def addParameters(self, argDef):
BuildCommand.addParameters(self, argDef)
argDef['ignore-rebuild-deps'] = NO_PARAM
argDef['ignore-external-rebuild-deps'] = NO_PARAM
register(RebuildCommand)
class LoadJobCommand(BuildCommand):
'''Loads a job from a file that was created with --to-file'''
commands = ['load']
commandGroup = CG_BUILD
paramHelp = '<path>'
def addParameters(self, argDef):
self.addBuildParameters(argDef)
rMakeCommand.addParameters(self, argDef)
def runCommand(self, client, cfg, argSet, args):
if self.verbose:
log.setVerbosity(log.DEBUG)
else:
log.setVerbosity(log.INFO)
command, loadPath = self.requireParameters(args, 'path')
self._prep(client, argSet)
job = client.loadJobFromFile(loadPath)
return self._build(client, job, argSet)
register(LoadJobCommand)
class RestartCommand(BuildCommand):
'''Restarts the specified job'''
commands = ['restart']
commandGroup = CG_BUILD
paramHelp = '<jobId> [<troveSpec>]*'
help = 'Restart an earlier job'
def addParameters(self, argDef):
self.addBuildParameters(argDef)
rMakeCommand.addParameters(self, argDef)
argDef['exclude'] = MULT_PARAM
argDef['update'] = MULT_PARAM
argDef['update-config'] = MULT_PARAM
argDef['no-update'] = NO_PARAM
argDef['clear-build-list'] = NO_PARAM
argDef['clear-prebuilt-list'] = NO_PARAM
argDef['ignore-rebuild-deps'] = NO_PARAM
argDef['ignore-external-rebuild-deps'] = NO_PARAM
def runCommand(self, client, cfg, argSet, args):
if self.verbose:
log.setVerbosity(log.DEBUG)
else:
log.setVerbosity(log.INFO)
command, jobId, troveSpecs = self.requireParameters(args, 'jobId',
allowExtra=True)
jobId = _getJobIdOrUUId(jobId)
noUpdate = argSet.pop('no-update', False)
clearBuildList = argSet.pop('clear-build-list', False)
clearPrebuiltList = argSet.pop('clear-prebuilt-list', False)
updateConfigKeys = argSet.pop('update-config', None)
if noUpdate:
updateSpecs = ['-*']
else:
updateSpecs = []
updateSpecs.extend(argSet.pop('update', []))
excludeSpecs = argSet.pop('exclude', [])
self._prep(client, argSet)
job = client.createRestartJob(jobId, troveSpecs,
updateSpecs=updateSpecs,
excludeSpecs=excludeSpecs,
updateConfigKeys=updateConfigKeys,
clearBuildList=clearBuildList,
clearPrebuiltList=clearPrebuiltList)
return self._build(client, job, argSet)
register(RestartCommand)
class ChangeSetCommand(rMakeCommand):
commands = ['changeset']
hidden = True
paramHelp = '''\
<jobId> <troveSpec>* <outfile>
Creates a changeset with the troves from the job <jobId> and stores in outFile'
'''
help = 'Create a changeset file from the packages in a job'
def runCommand(self, client, cfg, argSet, args):
command, jobId, path = self.requireParameters(args, ['jobId', 'path'],
appendExtra=True)
if len(path) > 1:
troveSpecs = path[:-1]
path = path[-1]
else:
troveSpecs = []
path = path[0]
jobId = _getJobIdOrUUId(jobId)
client.createChangeSetFile(jobId, path, troveSpecs)
register(ChangeSetCommand)
class CommitCommand(rMakeCommand):
commands = ['commit', 'ci']
commandGroup = CG_BUILD
paramHelp = '''<jobId> [<jobId>]
Commits the build packages from the jobs, moving them from rMake's internal
repository back into the repository where their source package came from.
'''
help = 'Commit a job'
docs = {'commit-outdated-sources' : ("Allow commits of source components when another"
" commit has been made upstream"),
'source-only' : "Only commit the source changes",
'exclude' : "Do not commit from specified"
" sources",
'message' : "The message to give for all"
" committed sources"}
def addParameters(self, argDef):
argDef['source-only'] = NO_PARAM
argDef['message'] = '-m', ONE_PARAM
argDef['exclude'] = MULT_PARAM
argDef['to-file'] = ONE_PARAM
argDef['commit-outdated-sources'] = NO_PARAM
rMakeCommand.addParameters(self, argDef)
def runCommand(self, client, cfg, argSet, args):
command, jobIds = self.requireParameters(args, ['jobId'],
appendExtra=True)
commitOutdated = argSet.pop('commit-outdated-sources', False)
sourceOnly = argSet.pop('source-only', False)
message = argSet.pop('message', None)
excludeSpecs = argSet.pop('exclude', None)
jobIds = _getJobIdOrUUIds(jobIds)
toFile = argSet.pop('to-file', None)
success = client.commitJobs(jobIds,
commitOutdatedSources=commitOutdated,
commitWithFailures=True, waitForJob=True,
sourceOnly=sourceOnly,
message=message,
excludeSpecs=excludeSpecs,
writeToFile=toFile)
if success:
return 0
else:
return 1
register(CommitCommand)
class ConfigCommand(rMakeCommand):
commands = ['config']
commandGroup = CG_INFO
help = 'Display the current configuration'
docs = {'show-passwords' : 'do not mask passwords'}
def addParameters(self, argDef):
rMakeCommand.addParameters(self, argDef)
argDef["show-passwords"] = NO_PARAM
def runCommand(self, client, cfg, argSet, args):
self.requireParameters(args)
showPasswords = argSet.pop('show-passwords', False)
try:
prettyPrint = sys.stdout.isatty()
except AttributeError:
prettyPrint = False
client.displayConfig(hidePasswords=not showPasswords,
prettyPrint=prettyPrint)
register(ConfigCommand)
class DeleteCommand(rMakeCommand):
commands = ['delete']
commandGroup = CG_BUILD
paramHelp = '<jobId>[-<jobId>]+'
help = 'Delete jobs from rmake\'s history'
def runCommand(self, client, cfg, argSet, args):
toDelete = []
command, jobList = self.requireParameters(args, 'jobId',
appendExtra=True)
for arg in jobList:
values = arg.split(',')
for value in values:
range = value.split('-', 1)
if len(range) == 1:
toDelete.append(_getJobIdOrUUId(value))
else:
fromVal = _getJobIdOrUUId(range[0])
toVal = _getJobIdOrUUId(range[1])
if (not isinstance(fromVal, int)
or not isinstance(toVal, int)):
raise ParseError('Must use jobIds when specifying'
' range to delete')
toDelete.extend(xrange(fromVal, toVal + 1))
client.deleteJobs(toDelete)
register(DeleteCommand)
class HelpCommand(rMakeCommand):
commands = ['help']
help = 'Display help information'
commandGroup = CG_INFO
def runCommand(self, client, cfg, argSet, args):
command, subCommands = self.requireParameters(args, allowExtra=True,
maxExtra=1)
if subCommands:
command = subCommands[0]
commands = self.mainHandler._supportedCommands
if not command in commands:
print "%s: no such command: '%s'" % (self.mainHandler.name,
command)
sys.exit(1)
commands[command].usage()
else:
self.mainHandler.usage(showAll=True)
return 0
register(HelpCommand)
class PollCommand(rMakeCommand):
commands = ['poll', 'watch']
commandGroup = CG_INFO
paramHelp = '''<jobId>
Watch the progress of job <jobId> as it builds its packages
'''
help = 'Watch a job build'
docs = { 'quiet' : 'Only display major job status changes',
'commit' : "Commit job when it is done"}
def addParameters(self, argDef):
rMakeCommand.addParameters(self, argDef)
argDef['quiet'] = NO_PARAM
argDef['commit'] = NO_PARAM
def runCommand(self, client, cfg, argSet, args):
command, jobId = self.requireParameters(args, 'jobId')
log.setVerbosity(log.INFO)
quiet = argSet.pop('quiet', False)
commit = argSet.pop('commit', False)
jobId = _getJobIdOrUUId(jobId)
success = client.watch(jobId, showBuildLogs = not quiet,
showTroveLogs = not quiet,
commit = commit)
if success:
return 0
else:
return 1
register(PollCommand)
class StopCommand(rMakeCommand):
commands = ['stop']
commandGroup = CG_BUILD
help = 'Stop job from building'
paramHelp = '''<jobId>
Stops job <jobId> from building.
'''
def runCommand(self, client, cfg, argSet, args):
command, jobId = self.requireParameters(args, 'jobId')
log.setVerbosity(log.INFO)
jobId = _getJobIdOrUUId(jobId)
client.stopJob(jobId)
register(StopCommand)
class QueryCommand(rMakeCommand):
commands = ['query', 'q']
commandGroup = CG_INFO
help = 'Display information about a job'
paramHelp = '''[<jobId> <troveSpec>*]
Display information about the job <jobId> (limited to <troveSpec>
if specified)
'''
docs = {'troves' : 'Display troves for this job',
'info' : 'Display details',
'logs' : 'Display logs associated with jobs/troves',
'watch' : 'Continually update status while job builds',
'full-versions' : 'Show full versions',
'labels' : 'Show labels',
'flavors' : 'Show full flavors',
'tracebacks' : 'Show tracebacks',
'all' : 'Show all jobs (not just last 20)',
'active' : 'Show only active jobs',
'show-config' : 'Show configuration for this job',
}
def addParameters(self, argDef):
argDef['troves'] = NO_PARAM
argDef['info'] = NO_PARAM
argDef['tracebacks'] = NO_PARAM
argDef['full-versions'] = NO_PARAM
argDef['labels'] = NO_PARAM
argDef['flavors'] = NO_PARAM
argDef['logs'] = NO_PARAM
argDef['watch'] = NO_PARAM
argDef['all'] = NO_PARAM
argDef['active'] = NO_PARAM
argDef['show-config'] = NO_PARAM
rMakeCommand.addParameters(self, argDef)
def runCommand(self, client, cfg, argSet, args):
command, args = self.requireParameters(args, allowExtra=True)
if args:
jobId = _getJobIdOrUUId(args[0])
troveSpecs = args[1:]
try:
jobId = int(jobId)
except ValueError:
self.usage()
log.error("bad jobId '%s'", jobId)
return 1
else:
jobId = None
troveSpecs = []
displayTroves = argSet.pop('troves', False)
displayDetails = argSet.pop('info', False)
showFullVersions = argSet.pop('full-versions', False)
showFullFlavors = argSet.pop('flavors', False)
showLabels = argSet.pop('labels', False)
showTracebacks = argSet.pop('tracebacks', False)
showLogs = argSet.pop('logs', False)
showConfig = argSet.pop('show-config', False)
if argSet.pop('all', False):
limit = None
else:
limit = 20
activeOnly = argSet.pop('active', False)
watchJob = argSet.pop('watch', False)
query.displayJobInfo(client, jobId, troveSpecs,
displayTroves=displayTroves,
displayDetails=displayDetails,
showLogs=showLogs,
showBuildLogs=showLogs,
showFullVersions=showFullVersions,
showFullFlavors=showFullFlavors,
showLabels=showLabels,
showTracebacks=showTracebacks,
showConfig=showConfig,
jobLimit=limit,
activeOnly=activeOnly)
if watchJob:
client.watch(jobId, showBuildLogs = True, showTroveLogs = True)
register(QueryCommand)
class ListCommand(rMakeCommand):
"""\
List information about the given rmake server.
Types Available:
list [ch]roots - lists chroots on this rmake server"""
commands = ['list']
paramHelp = "<type>"
help = 'List various information about this rmake server'
commandGroup = CG_INFO
docs = {'all' : 'Backwards compatibility option',
'active' : 'Display only active items' }
def addParameters(self, argDef):
argDef['all'] = NO_PARAM
argDef['active'] = NO_PARAM
rMakeCommand.addParameters(self, argDef)
def runCommand(self, client, cfg, argSet, args):
command, subCommand = self.requireParameters(args, 'command')
commandFn = getattr(self, 'list%s' % subCommand.title(), None)
if not commandFn:
self.usage()
raise errors.RmakeError('No such list command %s' % subCommand)
commandFn(client, cfg, argSet)
def listChroots(self, client, cfg, argSet):
allChroots = not argSet.pop('active', False)
query.listChroots(client, cfg, allChroots=allChroots)
listRoots = listChroots
register(ListCommand)
class ChrootCommand(rMakeCommand):
"""\
Runs /bin/sh in the given chroot.
This command allows you to debug problems that occur with a build in
rMake. By default, it enters the chroot as the user who built the
trove. With the --super parameter you can cause it to run as the
"rmake" user, who can then run commands like "conary update strace."\
"""
help = 'Run /bin/sh in a given chroot'
paramHelp = "<jobId> <trove>"
commands = ['chroot']
docs = {'super' :
'Run as a user capable of modifying the contents of the root',
'path' : 'Specify the chroot path to use'}
def addParameters(self, argDef):
argDef['super'] = NO_PARAM
argDef['path'] = ONE_PARAM
rMakeCommand.addParameters(self, argDef)
def _getChroot(self, chroot):
return '_local_', chroot
def runCommand(self, client, cfg, argSet, args):
command, jobId, troveSpec = self.requireParameters(args,
['jobId'],
allowExtra=True,
maxExtra=1)
superUser = argSet.pop('super', False)
path = argSet.pop('path', None)
if path:
chrootHost, chrootPath = self._getChroot(path)
else:
chrootHost = chrootPath = None
if not troveSpec:
troveSpec = None
else:
troveSpec = troveSpec[0]
client.startChrootSession(jobId, troveSpec, ['/bin/bash', '-l'],
superUser=superUser,
chrootHost=chrootHost,
chrootPath=chrootPath)
register(ChrootCommand)
class ArchiveCommand(rMakeCommand):
"""\
Archive a chroot so that it will not be overwritten by rmake during the
build process.
By default, rmake will reuse particular names for chroots
whenever building something with that same name. This command can be used
to safely move a chroot out of the way for further debugging without
requiring that normal rmake use be stopped."""
commands = ['archive']
paramHelp = '<chrootName> <newName>'
help = 'Archives a chroot for later use'
def addParameters(self, argDef):
rMakeCommand.addParameters(self, argDef)
def _getChroot(self, chroot):
return '_local_', chroot
def runCommand(self, client, cfg, argSet, args):
command, chroot, extra = self.requireParameters(args,
['chrootPath'],
allowExtra=1)
host, chroot = self._getChroot(chroot)
if extra:
newPath = extra[0]
else:
newPath = chroot
client.archiveChroot(host, chroot, newPath)
register(ArchiveCommand)
class CleanCommand(rMakeCommand):
"""\
Removes the given chroot, freeing its space.
This command simply removes the given chroot and everything within it,
freeing its diskspace.
Specifying --all means remove all old chroots.
"""
commands = ['clean']
help = 'Deletes a chroot'
paramHelp = '<chroot>'
def addParameters(self, argDef):
argDef['all'] = NO_PARAM
rMakeCommand.addParameters(self, argDef)
def _getChroot(self, chroot):
return '_local_', chroot
def runCommand(self, client, cfg, argSet, args):
if argSet.pop('all', False):
client.deleteAllChroots()
else:
command, chroot = self.requireParameters(args, ['chrootPath'])
client.deleteChroot(*self._getChroot(chroot))
register(CleanCommand)
class CheckoutCommand(cvc.CheckoutCommand,rMakeCommand):
# Move this to the same section as NewPkg
commandGroup = 'Setup Commands'
def processConfigOptions(self, *args, **kw):
return rMakeCommand.processConfigOptions(self, *args, **kw)
def runCommand(self, client, cfg, argSet, args):
return cvc.CheckoutCommand.runCommand(self, cfg, argSet, args,
repos=client.getRepos())
register(CheckoutCommand)
class NewPkgCommand(cvc.NewPkgCommand, rMakeCommand):
commandGroup = 'Setup Commands'
def processConfigOptions(self, *args, **kw):
return rMakeCommand.processConfigOptions(self, *args, **kw)
def runCommand(self, client, cfg, argSet, args):
return cvc.NewPkgCommand.runCommand(self, cfg, argSet, args,
repos=client.getRepos())
register(NewPkgCommand)
class ContextCommand(cvc.ContextCommand, rMakeCommand):
def processConfigOptions(self, *args, **kw):
return rMakeCommand.processConfigOptions(self, *args, **kw)
def runCommand(self, client, cfg, argSet, args):
return cvc.ContextCommand.runCommand(self, cfg, argSet, args,
repos=client.getRepos())
register(ContextCommand)
class BuildImageCommand(BuildCommand):
'''Builds the specified rbuilder image.'''
paramHelp = '<productName> <troveSpec> <imageType>'
docs = {
'option': ('options for the image build, e.g., swapSize=128', 'optionName=value'),
}
commands = ['buildimage']
commandGroup = CG_BUILD
def addParameters(self, argDef):
argDef['option'] = MULT_PARAM
rMakeCommand.addParameters(self, argDef)
def addConfigOptions(self, cfgMap, argDef):
rMakeCommand.addConfigOptions(self, cfgMap, argDef)
def runCommand(self, client, cfg, argSet, args):
(command, product,
troveSpec, imageType) = self.requireParameters(args, ['product',
'troveSpec',
'imageType'])
options = {}
for option in argSet.pop('option', []):
key, value = option.split('=', 1)
options[key] = value
job = client.createImageJob(product, [(troveSpec, imageType, options)])
return self._build(client, job, argSet)
register(BuildImageCommand)
def addCommands(main):
for command in _commands:
main._registerCommand(command)
| {
"content_hash": "d9c6461b82bb99d3a1ab1533c82f833a",
"timestamp": "",
"source": "github",
"line_count": 920,
"max_line_length": 125,
"avg_line_length": 39.10434782608696,
"alnum_prop": 0.5595952857460529,
"repo_name": "fedora-conary/rmake-2",
"id": "39cf0b56366a629fe03ddb2401064915eaaabf9d",
"size": "36563",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rmake/cmdline/command.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "35796"
},
{
"name": "C++",
"bytes": "3953"
},
{
"name": "Python",
"bytes": "1682020"
},
{
"name": "Shell",
"bytes": "12415"
}
],
"symlink_target": ""
} |
from pgmpy import exceptions
class Independencies(object):
"""
Base class for independencies.
independencies class represents a set of Conditional Independence
assertions (eg: "X is independent of Y given Z" where X, Y and Z
are random variables) or Independence assertions (eg: "X is
independent of Y" where X and Y are random variables).
Initialize the independencies Class with Conditional Independence
assertions or Independence assertions.
Parameters
----------
assertions: Lists or Tuples
Each assertion is a list or tuple of the form: [event1,
event2 and event3]
eg: assertion ['X', 'Y', 'Z'] would be X is independent
of Y given Z.
Examples
--------
Creating an independencies object with one independence assertion:
Random Variable X is independent of Y
>>> independencies = independencies(['X', 'Y'])
Creating an independencies object with three conditional
independence assertions:
First assertion is Random Variable X is independent of Y given Z.
>>> independencies = independencies(['X', 'Y', 'Z'],
... ['a', ['b', 'c'], 'd'],
... ['l', ['m', 'n'], 'o'])
Public Methods
--------------
add_assertions
get_assertions
get_factorized_product
"""
def __init__(self, *assertions):
self.independencies = []
self.add_assertions(*assertions)
def __str__(self):
string = '\n'.join([str(assertion) for assertion in self.independencies])
return string
__repr__ = __str__
def get_assertions(self):
"""
Returns the independencies object which is a set of IndependenceAssertion objects.
Examples
--------
>>> from pgmpy.independencies import Independencies
>>> independencies = Independencies(['X', 'Y', 'Z'])
>>> independencies.get_assertions()
"""
return self.independencies
def add_assertions(self, *assertions):
"""
Adds assertions to independencies.
Parameters
----------
assertions: Lists or Tuples
Each assertion is a list or tuple of variable, independent_of and given.
Examples
--------
>>> from pgmpy.independencies import Independencies
>>> independencies = Independencies()
>>> independencies.add_assertions(['X', 'Y', 'Z'])
>>> independencies.add_assertions(['a', ['b', 'c'], 'd'])
"""
for assertion in assertions:
if isinstance(assertion, IndependenceAssertion):
self.independencies.append(assertion)
else:
try:
self.independencies.append(IndependenceAssertion(assertion[0], assertion[1], assertion[2]))
except IndexError:
self.independencies.append(IndependenceAssertion(assertion[0], assertion[1]))
# TODO: write reduce function.
def reduce(self):
"""
Add function to remove duplicate Independence Assertions
"""
pass
def latex_string(self):
"""
Returns a list of string.
Each string represents the IndependenceAssertion in latex.
"""
return [assertion.latex_string() for assertion in self.get_assertions()]
def get_factorized_product(self, random_variables=None, latex=False):
# TODO: Write this whole function
#
# The problem right now is that the factorized product for all
# P(A, B, C), P(B, A, C) etc should be same but on solving normally
# we get different results which have to be simplified to a simpler
# form. How to do that ??? and also how to decide which is the most
# simplified form???
#
pass
class IndependenceAssertion(object):
"""
Represents Conditional Independence or Independence assertion.
Each assertion has 3 attributes: event1, event2, event3.
The attributes for
.. math:: U \perp X, Y | Z
is read as: Random Variable U is independent of X and Y given Z would be:
event1 = {U}
event2 = {X, Y}
event3 = {Z}
Parameters
----------
event1: String or List of strings
Random Variable which is independent.
event2: String or list of strings.
Random Variables from which event1 is independent
event3: String or list of strings.
Random Variables given which event1 is independent of event2.
Examples
--------
>>> from pgmpy.independencies import IndependenceAssertion
>>> assertion = IndependenceAssertion('U', 'X')
>>> assertion = IndependenceAssertion('U', ['X', 'Y'])
>>> assertion = IndependenceAssertion('U', ['X', 'Y'], 'Z')
>>> assertion = IndependenceAssertion(['U', 'V'], ['X', 'Y'], ['Z', 'A'])
Public Methods
--------------
get_assertion
set_assertion
"""
def __init__(self, event1=[], event2=[], event3=[]):
"""
Initialize an IndependenceAssertion object with event1, event2 and event3 attributes.
event2
^
event1 / event3
^ / ^
| / |
(U || X, Y | Z) read as Random variable U is independent of X and Y given Z.
---
"""
if event1 and not event2:
raise exceptions.RequiredError('event2 needed')
if any([event2, event3]) and not event1:
raise exceptions.RequiredError('event1')
if event3 and not all([event1, event2]):
raise exceptions.RequiredError('event1' if not event1 else 'event2')
self.event1 = set(self._return_list_if_str(event1))
self.event2 = set(self._return_list_if_str(event2))
self.event3 = set(self._return_list_if_str(event3))
def __str__(self):
if self.event3:
return('({event1} _|_ {event2} | {event3})'.format(event1=', '.join(self.event1),
event2=', '.join(self.event2),
event3=', '.join(self.event3)))
else:
return('({event1} _|_ {event2})'.format(event1=', '.join(self.event1),
event2=', '.join(self.event2)))
__repr__ = __str__
@staticmethod
def _return_list_if_str(event):
"""
If variable is a string returns a list containing variable.
Else returns variable itself.
"""
if isinstance(event, str):
return [event]
else:
return event
def get_assertion(self):
"""
Returns a tuple of the attributes: variable, independent_of, given.
Examples
--------
>>> from pgmpy.independencies import IndependenceAssertion
>>> asser = IndependenceAssertion('X', 'Y', 'Z')
>>> asser.get_assertion()
"""
return self.event1, self.event2, self.event3
def set_assertion(self, event1, event2, event3=[]):
"""
Sets the attributes event1, event2 and event3.
.. math:: U \perp X, Y | Z
event1 = {U}
event2 = {X, Y}
event3 = {Z}
Parameters
----------
event1: String or List
Random Variable which is independent.
event2: String or list of strings.
Random Variables from which event1 is independent
event3: String or list of strings.
Random Variables given which event1 is independent of event2.
Example
-------
For a random variable U independent of X and Y given Z, the function should be called as
>>> from pgmpy.independencies import IndependenceAssertion
>>> asser = IndependenceAssertion()
>>> asser.set_assertion('U', ['X', 'Y'], 'Z')
>>> asser.set_assertion('U', ['X', 'Y'], ['Z', 'A'])
"""
self.__init__(event1, event2, event3)
def latex_string(self):
return ('%s \perp %s \mid %s' % (', '.join(self.event1), ', '.join(self.event2),
', '.join(self.event3)))
| {
"content_hash": "258e18722b93051a3f3847b1e5abea0c",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 111,
"avg_line_length": 33.032,
"alnum_prop": 0.5635747154274643,
"repo_name": "yashu-seth/pgmpy",
"id": "d5a155571eb5c651e66424c4280b741d2e73cedc",
"size": "8258",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "pgmpy/independencies/Independencies.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "797207"
},
{
"name": "Shell",
"bytes": "1022"
}
],
"symlink_target": ""
} |
class Spam:
def doit(self, message):
print(message)
object1 = Spam()
object1.doit('hello world!')
x = object1.doit
x('hello world!')
t = Spam.doit
t(object1, 'howdy!')
class Eggs:
def m1(self, n):
print(n)
def m2(self):
x = self.m1
x(42)
Eggs().m2()
class Selfless:
def __init__(self, data):
self.data = data
def selfless(args1, args2):
return args1 + args2
def normal(self, args1, args2):
return self.data + args1 + args2
X = Selfless(2)
print(X.normal(3, 4))
print(Selfless.normal(X, 3, 4))
print(Selfless.selfless(3, 4))
# The following twos have errors
# X.selfless(3, 4)
# Selfless.normal(3, 4)
class Number:
def __init__(self, base):
self.base = base
def double(self):
return self.base * 2
def triple(self):
return self.base * 3
x = Number(2)
y = Number(3)
z = Number(4)
print(x.double())
acts = [x.double, y.double, y.triple, z.double]
for act in acts:
print(act())
bound = x.double
print(bound.__self__, bound.__func__)
print(bound.__self__.base)
def square(arg):
return arg ** 2
class Sum:
def __init__(self, val):
self.val = val
def __call__(self, arg):
return self.val + arg
class Product:
def __init__(self, val):
self.val = val
def method(self, arg):
return self.val * arg
sobject = Sum(2)
pobject = Product(3)
actions = [square, sobject, pobject.method]
for act in actions:
print(act(5))
print(actions[-1](5))
print([act(5) for act in actions])
print(list(map(lambda act: act(5), actions)))
class Negate:
def __init__(self, val):
self.val = -val
def __repr__(self):
return str(self.val)
actions = [square, sobject, pobject.method, Negate]
print([act(5) for act in actions])
table = {act(5) : act for act in actions}
for (key, value) in table.items():
print('%2s => %s' % (key, value))
# print('{0:2} => {1}'.format(key, value))
| {
"content_hash": "0e23143bea34a3730024730b6333545b",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 51,
"avg_line_length": 20.705263157894738,
"alnum_prop": 0.5922724961870869,
"repo_name": "eroicaleo/LearningPython",
"id": "6681478395c5d0c52304669ade34a0a768447917",
"size": "1990",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ch31/bound.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18342"
},
{
"name": "HTML",
"bytes": "95429"
},
{
"name": "Java",
"bytes": "5182"
},
{
"name": "JavaScript",
"bytes": "31062"
},
{
"name": "Jupyter Notebook",
"bytes": "439846"
},
{
"name": "Makefile",
"bytes": "39"
},
{
"name": "Python",
"bytes": "1489221"
},
{
"name": "TeX",
"bytes": "795"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'marathon-client.py'
copyright = u'2014, Emilien Kenler'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'marathon-clientpydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'marathon-clientpy.tex', u'marathon-client.py Documentation',
u'Emilien Kenler', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'marathon-clientpy', u'marathon-client.py Documentation',
[u'Emilien Kenler'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'marathon-clientpy', u'marathon-client.py Documentation',
u'Emilien Kenler', 'marathon-clientpy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'marathon-client.py'
epub_author = u'Emilien Kenler'
epub_publisher = u'Emilien Kenler'
epub_copyright = u'2014, Emilien Kenler'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'marathon-client.py'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
| {
"content_hash": "5c03224eae5f2938f1ec860ef28e7eae",
"timestamp": "",
"source": "github",
"line_count": 317,
"max_line_length": 80,
"avg_line_length": 31.41640378548896,
"alnum_prop": 0.7075007530876594,
"repo_name": "Wizcorp/marathon-client.py",
"id": "c32c0dedf0805269d4f35651d10188a3bb1cf746",
"size": "10390",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13462"
}
],
"symlink_target": ""
} |
"""
Budgetting Module - Controllers
"""
module = request.controller
resourcename = request.function
# Requires 'project' module too
if module not in deployment_settings.modules or not deployment_settings.has_module("project"):
session.error = T("Module disabled!")
redirect(URL(r=request, c="default", f="index"))
# Options Menu (available in all Functions' Views)
response.menu_options = [
[T("Parameters"), False, URL(r=request, f="parameters")],
[T("Items"), False, URL(r=request, f="item"), [
[T("List"), False, URL(r=request, f="item")],
[T("Add"), False, URL(r=request, f="item", args="create")],
]],
[T("Kits"), False, URL(r=request, f="kit"), [
[T("List"), False, URL(r=request, f="kit")],
[T("Add"), False, URL(r=request, f="kit", args="create")],
]],
[T("Bundles"), False, URL(r=request, f="bundle"), [
[T("List"), False, URL(r=request, f="bundle")],
[T("Add"), False, URL(r=request, f="bundle", args="create")],
]],
[T("Staff"), False, URL(r=request, f="staff"), [
[T("List"), False, URL(r=request, f="staff")],
[T("Add"), False, URL(r=request, f="staff", args="create")],
]],
[T("Locations"), False, URL(r=request, f="location"), [
[T("List"), False, URL(r=request, f="location")],
[T("Add"), False, URL(r=request, f="location", args="create")],
]],
[T("Projects"), False, URL(r=request, f="project"), [
[T("List"), False, URL(r=request, f="project")],
[T("Add"), False, URL(r=request, f="project", args="create")],
]],
[T("Budgets"), False, URL(r=request, f="budget"), [
[T("List"), False, URL(r=request, f="budget")],
[T("Add"), False, URL(r=request, f="budget", args="create")],
]]
]
# Options used in multiple functions
table = db.budget_item
table.code.label = T("Code")
table.description.label = T("Description")
table.unit_cost.label = T("Unit Cost")
table.monthly_cost.label = T("Monthly Cost")
table.minute_cost.label = T("Cost per Minute")
table.megabyte_cost.label = T("Cost per Megabyte")
table.comments.label = T("Comments")
table = db.budget_kit
table.code.label = T("Code")
table.description.label = T("Description")
table.total_unit_cost.label = T("Total Unit Cost")
table.total_monthly_cost.label = T("Total Monthly Cost")
table.total_minute_cost.label = T("Total Cost per Minute")
table.total_megabyte_cost.label = T("Total Cost per Megabyte")
table.comments.label = T("Comments")
table = db.budget_kit_item
table.kit_id.label = T("Kit")
table.kit_id.represent = lambda kit_id: db(db.budget_kit.id == kit_id).select(db.budget_kit.code, limitby=(0, 1)).first().code
table.item_id.label = T("Item")
table.item_id.represent = lambda item_id: db(db.budget_item.id == item_id).select(db.budget_item.description, limitby=(0, 1)).first().description
table.quantity.label = T("Quantity")
table = db.budget_bundle
table.name.label = T("Name")
table.description.label = T("Description")
table.total_unit_cost.label = T("One time cost")
table.total_monthly_cost.label = T("Recurring cost")
table.comments.label = T("Comments")
table = db.budget_bundle_kit
table.bundle_id.label = T("Bundle")
table.bundle_id.represent = lambda bundle_id: db(db.budget_bundle.id == bundle_id).select(db.budget_bundle.description, limitby=(0, 1)).first().description
table.kit_id.label = T("Kit")
table.kit_id.represent = lambda kit_id: db(db.budget_kit.id == kit_id).select(db.budget_kit.code, limitby=(0, 1)).first().code
table.quantity.label = T("Quantity")
table.minutes.label = T("Minutes per Month")
table.megabytes.label = T("Megabytes per Month")
table = db.budget_bundle_item
table.bundle_id.label = T("Bundle")
table.bundle_id.represent = lambda bundle_id: db(db.budget_bundle.id == bundle_id).select(db.budget_bundle.description, limitby=(0, 1)).first().description
table.item_id.label = T("Item")
table.item_id.represent = lambda item_id: db(db.budget_item.id == item_id).select(db.budget_item.description, limitby=(0, 1)).first().description
table.quantity.label = T("Quantity")
table.minutes.label = T("Minutes per Month")
table.megabytes.label = T("Megabytes per Month")
table = db.budget_staff
table.name.label = T("Name")
table.grade.label = T("Grade")
table.salary.label = T("Monthly Salary")
table.travel.label = T("Travel Cost")
table.comments.label = T("Comments")
table = db.budget_location
table.code.label = T("Code")
table.description.label = T("Description")
table.subsistence.label = T("Subsistence Cost")
# UN terminology
#table.subsistence.label = "DSA"
table.hazard_pay.label = T("Hazard Pay")
table.comments.label = T("Comments")
#table = db.budget_project
#table.code.label = T("Code")
#table.title.label = T("Title")
#table.comments.label = T("Comments")
table = db.budget_budget
table.name.label = T("Name")
table.description.label = T("Description")
table.total_onetime_costs.label = T("Total One-time Costs")
table.total_recurring_costs.label = T("Total Recurring Costs")
table.comments.label = T("Comments")
table = db.budget_budget_bundle
table.budget_id.label = T("Budget")
table.budget_id.represent = lambda budget_id: db(db.budget_budget.id == budget_id).select(db.budget_budget.name, limitby=(0, 1)).first().name
#table.project_id.label = T("Project")
#table.project_id.represent = lambda project_id: db(db.budget_project.id == project_id).select(db.budget_project.code, limitby=(0, 1)).first().code
table.location_id.label = T("Location")
table.location_id.represent = lambda location_id: db(db.budget_location.id == location_id).select(db.budget_location.code, limitby=(0, 1)).first().code
table.bundle_id.label = T("Bundle")
table.bundle_id.represent = lambda bundle_id: db(db.budget_bundle.id == bundle_id).select(db.budget_bundle.name, limitby=(0, 1)).first().name
table.quantity.label = T("Quantity")
table.months.label = T("Months")
table = db.budget_budget_staff
table.budget_id.label = T("Budget")
table.budget_id.represent = lambda budget_id: db(db.budget_budget.id == budget_id).select(db.budget_budget.name, limitby=(0, 1)).first().name
#table.project_id.label = T("Project")
#table.project_id.represent = lambda project_id: db(db.budget_project.id == project_id).select(db.budget_project.code, limitby=(0, 1)).first().code
table.location_id.label = T("Location")
table.location_id.represent = lambda location_id: db(db.budget_location.id == location_id).select(db.budget_location.code, limitby=(0, 1)).first().code
table.staff_id.label = T("Staff")
table.staff_id.represent = lambda bundle_id: db(db.budget_staff.id == staff_id).select(db.budget_staff.description, limitby=(0, 1)).first().description
table.quantity.label = T("Quantity")
table.months.label = T("Months")
# S3 framework functions
def index():
"Module's Home Page"
module_name = deployment_settings.modules[module].name_nice
response.title = module_name
return dict(module_name=module_name)
def parameters():
"Select which page to go to depending on login status"
table = db.budget_parameter
authorised = s3_has_permission("update", table)
if authorised:
redirect (URL(r=request, f="parameter", args=[1, "update"]))
else:
redirect (URL(r=request, f="parameter", args=[1, "read"]))
def parameter():
""" RESTful CRUD controller """
tablename = module + "_" + resourcename
table = db[tablename]
# Model Options
table.shipping.label = "Shipping cost"
table.logistics.label = "Procurement & Logistics cost"
table.admin.label = "Administrative support cost"
table.indirect.label = "Indirect support cost HQ"
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_update = T("Edit Parameters"),
title_display = T("Parameters"))
s3xrc.model.configure(table, deletable=False)
return s3_rest_controller(module, resourcename)
def item():
""" RESTful CRUD controller """
tablename = module + "_" + resourcename
table = db[tablename]
# Model options used in multiple controllers so defined at the top of the file
# CRUD Strings
ADD_ITEM = T("Add Item")
LIST_ITEMS = T("List Items")
s3.crud_strings[tablename] = Storage(
title_create = ADD_ITEM,
title_display = T("Item Details"),
title_list = LIST_ITEMS,
title_update = T("Edit Item"),
title_search = T("Search Items"),
subtitle_create = T("Add New Item"),
subtitle_list = T("Items"),
label_list_button = LIST_ITEMS,
label_create_button = ADD_ITEM,
label_delete_button = T("Delete Item"),
label_search_button = T("Search Items"),
msg_record_created = T("Item added"),
msg_record_modified = T("Item updated"),
msg_record_deleted = T("Item deleted"),
msg_list_empty = T("No Items currently registered"))
response.s3.formats.pdf = URL(r=request, f="item_export_pdf")
s3xrc.model.configure(table,
main="code",
extra="description",
orderby=db.budget_item.category_type)
return s3_rest_controller(module, resourcename)
def item_export_pdf():
"""
Export a list of Items in Adobe PDF format
Uses Geraldo Grouping Report
"""
try:
from reportlab.lib.units import cm
from reportlab.lib.pagesizes import A4
from reportlab.lib.enums import TA_CENTER, TA_RIGHT
except ImportError:
session.error = REPORTLAB_ERROR
redirect(URL(r=request, c="item"))
try:
from geraldo import Report, ReportBand, ReportGroup, Label, ObjectValue, SystemField, landscape, BAND_WIDTH
from geraldo.generators import PDFGenerator
except ImportError:
session.error = GERALDO_ERROR
redirect(URL(r=request, c="item"))
table = db.budget_item
objects_list = db(table.id > 0).select(orderby=table.category_type)
if not objects_list:
session.warning = T("No data in this table - cannot create PDF!")
redirect(URL(r=request, f="item"))
import StringIO
output = StringIO.StringIO()
class MyReport(Report):
def __init__(self, queryset=None, T=None):
" Initialise parent class & make any necessary modifications "
Report.__init__(self, queryset)
self.T = T
def _T(self, rawstring):
return self.T(rawstring)
# can't use T() here!
#title = _T("Items")
title = "Items"
page_size = landscape(A4)
class band_page_header(ReportBand):
height = 1.3*cm
elements = [
SystemField(expression="%(report_title)s", top=0.1*cm,
left=0, width=BAND_WIDTH, style={"fontName": "Helvetica-Bold",
"fontSize": 14, "alignment": TA_CENTER}
),
Label(text="Code", top=0.8*cm, left=0.2*cm),
Label(text="Description", top=0.8*cm, left=3*cm),
Label(text="Unit Cost", top=0.8*cm, left=13*cm),
Label(text="per Month", top=0.8*cm, left=15*cm),
Label(text="per Minute", top=0.8*cm, left=17*cm),
Label(text="per Megabyte", top=0.8*cm, left=19*cm),
Label(text="Comments", top=0.8*cm, left=21*cm),
]
borders = {"bottom": True}
class band_page_footer(ReportBand):
height = 0.5*cm
elements = [
Label(text="%s" % request.utcnow.date(), top=0.1*cm, left=0),
SystemField(expression="Page # %(page_number)d of %(page_count)d", top=0.1*cm,
width=BAND_WIDTH, style={"alignment": TA_RIGHT}),
]
borders = {"top": True}
class band_detail(ReportBand):
height = 0.5*cm
auto_expand_height = True
elements = (
ObjectValue(attribute_name="code", left=0.2*cm, width=2.8*cm),
ObjectValue(attribute_name="description", left=3*cm, width=10*cm),
ObjectValue(attribute_name="unit_cost", left=13*cm, width=2*cm),
ObjectValue(attribute_name="monthly_cost", left=15*cm, width=2*cm),
ObjectValue(attribute_name="minute_cost", left=17*cm, width=2*cm),
ObjectValue(attribute_name="megabyte_cost", left=19*cm, width=2*cm),
ObjectValue(attribute_name="comments", left=21*cm, width=6*cm),
)
groups = [
ReportGroup(attribute_name="category_type",
band_header=ReportBand(
height=0.7*cm,
elements=[
ObjectValue(attribute_name="category_type", left=0, top=0.1*cm,
get_value=lambda instance: instance.category_type and budget_category_type_opts[instance.category_type],
style={"fontName": "Helvetica-Bold", "fontSize": 12})
],
borders={"bottom": True},
),
),
]
#report = MyReport(queryset=objects_list)
report = MyReport(queryset=objects_list, T=T)
report.generate_by(PDFGenerator, filename=output)
output.seek(0)
import gluon.contenttype
response.headers["Content-Type"] = gluon.contenttype.contenttype(".pdf")
filename = "%s_items.pdf" % (request.env.server_name)
response.headers["Content-disposition"] = "attachment; filename=\"%s\"" % filename
return output.read()
def kit():
""" RESTful CRUD controller """
tablename = module + "_" + resourcename
table = db[tablename]
# Model options used in multiple controllers so defined at the top of the file
# CRUD Strings
ADD_KIT = T("Add Kit")
LIST_KITS = T("List Kits")
s3.crud_strings[tablename] = Storage(
title_create = ADD_KIT,
title_display = T("Kit Details"),
title_list = LIST_KITS,
title_update = T("Edit Kit"),
title_search = T("Search Kits"),
subtitle_create = T("Add New Kit"),
subtitle_list = T("Kits"),
label_list_button = LIST_KITS,
label_create_button = ADD_KIT,
label_delete_button = T("Delete Kit"),
msg_record_created = T("Kit added"),
msg_record_modified = T("Kit updated"),
msg_record_deleted = T("Kit deleted"),
msg_list_empty = T("No Kits currently registered"))
response.s3.formats.pdf = URL(r=request, f="kit_export_pdf")
response.s3.formats.xls = URL(r=request, f="kit_export_xls")
if len(request.args) == 2:
s3xrc.model.configure(table,
update_next=URL(r=request, f="kit_item", args=request.args[1]))
return s3_rest_controller(module, resourcename, main="code")
def kit_item():
"Many to Many CRUD Controller"
format = request.vars.get("format", None)
if format:
if format == "xls":
redirect(URL(r=request, f="kit_export_xls"))
elif format == "pdf":
redirect(URL(r=request, f="kit_export_pdf"))
elif format == "csv":
if request.args(0):
if str.lower(request.args(0)) == "create":
return kit_import_csv()
else:
session.error = BADMETHOD
redirect(URL(r=request))
else:
# List
redirect(URL(r=request, f="kit_export_csv"))
else:
session.error = BADFORMAT
redirect(URL(r=request))
try:
kit = int(request.args(0))
except TypeError, ValueError:
session.error = T("Need to specify a Kit!")
redirect(URL(r=request, f="kit"))
table = db.budget_kit_item
authorised = s3_has_permission("update", table)
_kit = db.budget_kit[kit]
title = _kit.code
kit_description = _kit.description
kit_total_cost = _kit.total_unit_cost
kit_monthly_cost = _kit.total_monthly_cost
query = (table.kit_id == kit)
# Start building the Return with the common items
output = dict(title=title, description=kit_description, total_cost=kit_total_cost, monthly_cost=kit_monthly_cost)
# Audit
s3_audit("list", module, "kit_item", record=kit, representation="html")
item_list = []
sqlrows = db(query).select()
even = True
if authorised:
# Audit
crud.settings.create_onaccept = lambda form: s3_audit("create", module, "kit_item",
form=form,
representation="html")
# Display a List_Create page with editable Quantities
for row in sqlrows:
if even:
theclass = "even"
even = False
else:
theclass = "odd"
even = True
id = row.item_id
_item = db.budget_item[id]
description = _item.description
id_link = A(id, _href=URL(r=request, f="item", args=[id, "read"]))
quantity_box = INPUT(_value=row.quantity, _size=4, _name="qty" + str(id))
unit_cost = _item.unit_cost
monthly_cost = _item.monthly_cost
minute_cost = _item.minute_cost
megabyte_cost = _item.megabyte_cost
total_units = unit_cost * row.quantity
total_monthly = monthly_cost * row.quantity
checkbox = INPUT(_type="checkbox", _value="on", _name=id, _class="remove_item")
item_list.append(TR(TD(id_link), TD(description, _align="left"), TD(quantity_box), TD(unit_cost), TD(monthly_cost), TD(minute_cost), TD(megabyte_cost), TD(total_units), TD(total_monthly), TD(checkbox, _align="center"), _class=theclass, _align="right"))
table_header = THEAD(TR(TH("ID"), TH(table.item_id.label), TH(table.quantity.label), TH(db.budget_item.unit_cost.label), TH(db.budget_item.monthly_cost.label), TH(db.budget_item.minute_cost.label), TH(db.budget_item.megabyte_cost.label), TH(T("Total Units")), TH(T("Total Monthly")), TH(T("Remove"))))
table_footer = TFOOT(TR(TD(B(T("Totals for Kit:")), _colspan=7), TD(B(kit_total_cost)), TD(B(kit_monthly_cost)), TD(INPUT(_id="submit_button", _type="submit", _value=T("Update")))), _align="right")
items = DIV(FORM(TABLE(table_header, TBODY(item_list), table_footer, _id="table-container"), _name="custom", _method="post", _enctype="multipart/form-data", _action=URL(r=request, f="kit_update_items", args=[kit])))
subtitle = T("Contents")
crud.messages.submit_button = T("Add")
# Check for duplicates before Item is added to DB
crud.settings.create_onvalidation = lambda form: kit_dupes(form)
# Calculate Totals for the Kit after Item is added to DB
crud.settings.create_onaccept = lambda form: kit_total(form)
crud.messages.record_created = T("Kit Updated")
form = crud.create(table, next=URL(r=request, args=[kit]))
addtitle = T("Add New Item to Kit")
response.view = "%s/kit_item_list_create.html" % module
output.update(dict(subtitle=subtitle, items=items, addtitle=addtitle, form=form, kit=kit))
else:
# Display a simple List page
for row in sqlrows:
if even:
theclass = "even"
even = False
else:
theclass = "odd"
even = True
id = row.item_id
_item = db.budget_item[id]
description = _item.description
id_link = A(id, _href=URL(r=request, f="item", args=[id, "read"]))
quantity_box = row.quantity
unit_cost = _item.unit_cost
monthly_cost = _item.monthly_cost
minute_cost = _item.minute_cost
megabyte_cost = _item.megabyte_cost
total_units = unit_cost * row.quantity
total_monthly = monthly_cost * row.quantity
item_list.append(TR(TD(id_link), TD(description, _align="left"), TD(quantity_box), TD(unit_cost), TD(monthly_cost), TD(minute_cost), TD(megabyte_cost), TD(total_units), TD(total_monthly), _class=theclass, _align="right"))
table_header = THEAD(TR(TH("ID"), TH(table.item_id.label), TH(table.quantity.label), TH(db.budget_item.unit_cost.label), TH(db.budget_item.monthly_cost.label), TH(db.budget_item.minute_cost.label), TH(db.budget_item.megabyte_cost.label), TH(T("Total Units")), TH(T("Total Monthly"))))
table_footer = TFOOT(TR(TD(B(T("Totals for Kit:")), _colspan=7), TD(B(kit_total_cost)), TD(B(kit_monthly_cost)), _align="right"))
items = DIV(TABLE(table_header, TBODY(item_list), table_footer, _id="table-container"))
add_btn = A(T("Edit Contents"), _href=URL(r=request, c="default", f="user", args="login"), _class="action-btn")
response.view = "%s/kit_item_list.html" % module
output.update(dict(items=items, add_btn=add_btn))
return output
def kit_dupes(form):
"Checks for duplicate Item before adding to DB"
kit = form.vars.kit_id
item = form.vars.item_id
table = db.budget_kit_item
query = (table.kit_id == kit) & (table.item_id == item)
items = db(query).select()
if items:
session.error = T("Item already in Kit!")
redirect(URL(r=request, args=kit))
else:
return
def kit_update_items():
"Update a Kit's items (Quantity & Delete)"
try:
kit = int(request.args(0))
except TypeError, ValueError:
session.error = T("Need to specify a Kit!")
redirect(URL(r=request, f="kit"))
table = db.budget_kit_item
authorised = s3_has_permission("update", table)
if authorised:
for var in request.vars:
if "qty" in var:
item = var[3:]
quantity = request.vars[var]
query = (table.kit_id == kit) & (table.item_id == item)
db(query).update(quantity=quantity)
else:
# Delete
item = var
query = (table.kit_id == kit) & (table.item_id == item)
db(query).delete()
# Update the Total values
kit_totals(kit)
# Audit
s3_audit("update", module, "kit_item", record=kit, representation="html")
session.flash = T("Kit updated")
else:
session.error = T("Not authorised!")
redirect(URL(r=request, f="kit_item", args=[kit]))
def kit_export_xls():
"""
Export a list of Kits in Excel XLS format
Sheet 1 is a list of Kits
Then there is a separate sheet per kit, listing it's component items
"""
try:
import xlwt
except ImportError:
session.error = XLWT_ERROR
redirect(URL(r=request, c="kit"))
import StringIO
output = StringIO.StringIO()
book = xlwt.Workbook()
# List of Kits
sheet1 = book.add_sheet("Kits")
# Header row for Kits sheet
row0 = sheet1.row(0)
cell = 0
table = db.budget_kit
kits = db(table.id > 0).select()
fields = [table[f] for f in table.fields if table[f].readable]
for field in fields:
row0.write(cell, field.label, xlwt.easyxf("font: bold True;"))
cell += 1
# For Header row on Items sheets
table = db.budget_item
fields_items = [table[f] for f in table.fields if table[f].readable]
row = 1
for kit in kits:
# The Kit details on Sheet 1
rowx = sheet1.row(row)
row += 1
cell1 = 0
for field in fields:
tab, col = str(field).split(".")
rowx.write(cell1, kit[col])
cell1 += 1
# Sheet per Kit detailing constituent Items
# Replace characters which are illegal in sheetnames
sheetname = kit.code.replace("/","_")
sheet = book.add_sheet(sheetname)
# Header row for Items sheet
row0 = sheet.row(0)
cell = 0
for field_item in fields_items:
row0.write(cell, field_item.label, xlwt.easyxf("font: bold True;"))
cell += 1
# List Items in each Kit
table = db.budget_kit_item
contents = db(table.kit_id == kit.id).select()
rowy = 1
for content in contents:
table = db.budget_item
item = db(table.id == content.item_id).select().first()
rowx = sheet.row(rowy)
rowy += 1
cell = 0
for field_item in fields_items:
tab, col = str(field_item).split(".")
# Do lookups for option fields
if col == "cost_type":
opt = item[col]
value = str(budget_cost_type_opts[opt])
elif col == "category_type":
opt = item[col]
value = str(budget_category_type_opts[opt])
else:
value = item[col]
rowx.write(cell, value)
cell += 1
book.save(output)
output.seek(0)
import gluon.contenttype
response.headers["Content-Type"] = gluon.contenttype.contenttype(".xls")
filename = "%s_kits.xls" % (request.env.server_name)
response.headers["Content-disposition"] = "attachment; filename=\"%s\"" % filename
return output.read()
def kit_export_pdf():
"""
Export a list of Kits in Adobe PDF format
Uses Geraldo SubReport
"""
try:
from reportlab.lib.units import cm
from reportlab.lib.pagesizes import A4
from reportlab.lib.enums import TA_CENTER, TA_RIGHT
except ImportError:
session.error = REPORTLAB_ERROR
redirect(URL(r=request, c="kit"))
try:
from geraldo import Report, ReportBand, SubReport, Label, ObjectValue, SystemField, landscape, BAND_WIDTH
from geraldo.generators import PDFGenerator
except ImportError:
session.error = GERALDO_ERROR
redirect(URL(r=request, c="kit"))
table = db.budget_kit
objects_list = db(table.id > 0).select()
if not objects_list:
session.warning = T("No data in this table - cannot create PDF!")
redirect(URL(r=request))
import StringIO
output = StringIO.StringIO()
#class MySubReport(SubReport):
# def __init__(self, db=None, **kwargs):
# " Initialise parent class & make any necessary modifications "
# self.db = db
# SubReport.__init__(self, **kwargs)
class MyReport(Report):
def __init__(self, queryset=None, db=None):
" Initialise parent class & make any necessary modifications "
Report.__init__(self, queryset)
self.db = db
# can't use T() here!
title = "Kits"
page_size = landscape(A4)
class band_page_header(ReportBand):
height = 1.3*cm
elements = [
SystemField(expression="%(report_title)s", top=0.1*cm,
left=0, width=BAND_WIDTH, style={"fontName": "Helvetica-Bold",
"fontSize": 14, "alignment": TA_CENTER}
),
Label(text="Code", top=0.8*cm, left=0.2*cm),
Label(text="Description", top=0.8*cm, left=2*cm),
Label(text="Cost", top=0.8*cm, left=10*cm),
Label(text="Monthly", top=0.8*cm, left=12*cm),
Label(text="per Minute", top=0.8*cm, left=14*cm),
Label(text="per Megabyte", top=0.8*cm, left=16*cm),
Label(text="Comments", top=0.8*cm, left=18*cm),
]
borders = {"bottom": True}
class band_page_footer(ReportBand):
height = 0.5*cm
elements = [
Label(text="%s" % request.utcnow.date(), top=0.1*cm, left=0),
SystemField(expression="Page # %(page_number)d of %(page_count)d", top=0.1*cm,
width=BAND_WIDTH, style={"alignment": TA_RIGHT}),
]
borders = {"top": True}
class band_detail(ReportBand):
height = 0.5*cm
auto_expand_height = True
elements = (
ObjectValue(attribute_name="code", left=0.2*cm, width=1.8*cm),
ObjectValue(attribute_name="description", left=2*cm, width=8*cm),
ObjectValue(attribute_name="total_unit_cost", left=10*cm, width=2*cm),
ObjectValue(attribute_name="total_monthly_cost", left=12*cm, width=2*cm),
ObjectValue(attribute_name="total_minute_cost", left=14*cm, width=2*cm),
ObjectValue(attribute_name="total_megabyte_cost", left=16*cm, width=2*cm),
ObjectValue(attribute_name="comments", left=18*cm, width=6*cm),
)
subreports = [
SubReport(
#queryset_string = "db((db.budget_kit_item.kit_id == %(object)s.id) & (db.budget_item.id == db.budget_kit_item.item_id)).select(db.budget_item.code, db.budget_item.description, db.budget_item.unit_cost)",
#queryset_string = "db(db.budget_kit_item.kit_id == %(object)s.id).select()",
band_header = ReportBand(
height=0.5*cm,
elements=[
Label(text="Item ID", top=0, left=0.2*cm, style={"fontName": "Helvetica-Bold"}),
Label(text="Quantity", top=0, left=2*cm, style={"fontName": "Helvetica-Bold"}),
#Label(text="Unit Cost", top=0, left=4*cm, style={"fontName": "Helvetica-Bold"}),
],
borders={"top": True, "left": True, "right": True},
),
detail_band = ReportBand(
height=0.5*cm,
elements=[
ObjectValue(attribute_name="item_id", top=0, left=0.2*cm),
ObjectValue(attribute_name="quantity", top=0, left=2*cm),
#ObjectValue(attribute_name="unit_cost", top=0, left=4*cm),
]
),
),
]
#report = MyReport(queryset=objects_list)
report = MyReport(queryset=objects_list, db=db)
report.generate_by(PDFGenerator, filename=output)
output.seek(0)
import gluon.contenttype
response.headers["Content-Type"] = gluon.contenttype.contenttype(".pdf")
filename = "%s_kits.pdf" % (request.env.server_name)
response.headers["Content-disposition"] = "attachment; filename=\"%s\"" % filename
return output.read()
def kit_export_csv():
"""
Export kits in CSV format
Concatenates: kits, items & kit_item
"""
output = ""
for resourcename in ["kit", "item", "kit_item"]:
_table = module + "_" + resourcename
table = db[_table]
# Filter Search list to just those records which user can read
query = s3_accessible_query("read", table)
# Filter Search List to remove entries which have been deleted
if "deleted" in table:
query = ((table.deleted == False) | (table.deleted == None)) & query # includes None for backward compatability
output += "TABLE " + _table + "\n"
output += str(db(query).select())
output += "\n\n"
import gluon.contenttype
response.headers["Content-Type"] = gluon.contenttype.contenttype(".csv")
filename = "%s_kits.csv" % (request.env.server_name)
response.headers["Content-disposition"] = "attachment; filename=%s" % filename
return output
def kit_import_csv():
"""
Import kits in CSV format
Assumes concatenated: kits, items & kit_item
"""
# Read in POST
file = request.vars.filename.file
try:
# Assumes that it is a concatenation of tables
shn_import_csv(file)
session.flash = T("Data uploaded")
except:
session.error = T("Unable to parse CSV file!")
redirect(URL(r=request, f="kit"))
def bundle():
""" RESTful CRUD controller """
tablename = module + "_" + resourcename
table = db[tablename]
# Model options used in multiple controllers so defined at the top of the file
# CRUD Strings
ADD_BUNDLE = T("Add Bundle")
LIST_BUNDLES = T("List Bundles")
s3.crud_strings[tablename] = Storage(
title_create = ADD_BUNDLE,
title_display = T("Bundle Details"),
title_list = LIST_BUNDLES,
title_update = T("Edit Bundle"),
title_search = T("Search Bundles"),
subtitle_create = T("Add New Bundle"),
subtitle_list = T("Bundles"),
label_list_button = LIST_BUNDLES,
label_create_button = ADD_BUNDLE,
label_delete_button = T("Delete Bundle"),
msg_record_created = T("Bundle added"),
msg_record_modified = T("Bundle updated"),
msg_record_deleted = T("Bundle deleted"),
msg_list_empty = T("No Bundles currently registered"))
if len(request.args) == 2:
s3xrc.model.configure(table,
update_next=URL(r=request, f="bundle_kit_item", args=request.args[1]))
return s3_rest_controller(module, resourcename)
def bundle_kit_item():
"Many to Many CRUD Controller"
try:
bundle = int(request.args(0))
except TypeError, ValueError:
session.error = T("Need to specify a bundle!")
redirect(URL(r=request, f="bundle"))
tables = [db.budget_bundle_kit, db.budget_bundle_item]
authorised = s3_has_permission("update", tables[0]) and s3_has_permission("update", tables[1])
_bundle = db.budget_bundle[bundle]
title = _bundle.name
bundle_description = _bundle.description
bundle_total_cost = _bundle.total_unit_cost
bundle_monthly_cost = _bundle.total_monthly_cost
# Start building the Return with the common items
output = dict(title=title, description=bundle_description, total_cost=bundle_total_cost, monthly_cost=bundle_monthly_cost)
# Audit
s3_audit("list", module, "bundle_kit_item", record=bundle, representation="html")
item_list = []
even = True
if authorised:
# Audit
crud.settings.create_onaccept = lambda form: s3_audit(module, "bundle_kit_item",
form=form,
representation="html")
# Display a List_Create page with editable Quantities, Minutes & Megabytes
# Kits
query = (tables[0].bundle_id == bundle)
sqlrows = db(query).select()
for row in sqlrows:
if even:
theclass = "even"
even = False
else:
theclass = "odd"
even = True
id = row.kit_id
_kit = db.budget_kit[id]
description = _kit.description
id_link = A(id, _href=URL(r=request, f="kit", args=[id, "read"]))
quantity_box = INPUT(_value=row.quantity, _size=4, _name="kit_qty_" + str(id))
minute_cost = _kit.total_minute_cost
if minute_cost:
minutes_box = INPUT(_value=row.minutes, _size=4, _name="kit_mins_" + str(id))
else:
minutes_box = INPUT(_value=0, _size=4, _name="kit_mins_" + str(id), _disabled="disabled")
megabyte_cost = _kit.total_megabyte_cost
if megabyte_cost:
megabytes_box = INPUT(_value=row.megabytes, _size=4, _name="kit_mbytes_" + str(id))
else:
megabytes_box = INPUT(_value=0, _size=4, _name="kit_mbytes_" + str(id), _disabled="disabled")
unit_cost = _kit.total_unit_cost
monthly_cost = _kit.total_monthly_cost
minute_cost = _kit.total_minute_cost
megabyte_cost = _kit.total_megabyte_cost
total_units = unit_cost * row.quantity
total_monthly = monthly_cost * row.quantity
checkbox = INPUT(_type="checkbox", _value="on", _name="kit_" + str(id), _class="remove_item")
item_list.append(TR(TD(id_link), TD(description, _align="left"), TD(quantity_box), TD(unit_cost), TD(monthly_cost), TD(minutes_box), TD(minute_cost), TD(megabytes_box), TD(megabyte_cost), TD(total_units), TD(total_monthly), TD(checkbox, _align="center"), _class=theclass, _align="right"))
# Items
query = tables[1].bundle_id == bundle
sqlrows = db(query).select()
for row in sqlrows:
if even:
theclass = "even"
even = False
else:
theclass = "odd"
even = True
id = row.item_id
_item = db.budget_item[id]
description = _item.description
id_link = A(id, _href=URL(r=request, f="item", args=[id, "read"]))
quantity_box = INPUT(_value=row.quantity, _size=4, _name="item_qty_" + str(id))
minute_cost = _item.minute_cost
if minute_cost:
minutes_box = INPUT(_value=row.minutes, _size=4, _name="item_mins_" + str(id))
else:
minutes_box = INPUT(_value=0, _size=4, _name="item_mins_" + str(id), _disabled="disabled")
megabyte_cost = _item.megabyte_cost
if megabyte_cost:
megabytes_box = INPUT(_value=row.megabytes, _size=4, _name="item_mbytes_" + str(id))
else:
megabytes_box = INPUT(_value=0, _size=4, _name="item_mbytes_" + str(id), _disabled="disabled")
unit_cost = _item.unit_cost
monthly_cost = _item.monthly_cost
minute_cost = _item.minute_cost
megabyte_cost = _item.megabyte_cost
total_units = unit_cost * row.quantity
total_monthly = monthly_cost * row.quantity
checkbox = INPUT(_type="checkbox", _value="on", _name="item_" + str(id), _class="remove_item")
item_list.append(TR(TD(id_link), TD(description, _align="left"), TD(quantity_box), TD(unit_cost), TD(monthly_cost), TD(minutes_box), TD(minute_cost), TD(megabytes_box), TD(megabyte_cost), TD(total_units), TD(total_monthly), TD(checkbox, _align="center"), _class=theclass, _align="right"))
table_header = THEAD(TR(TH("ID"), TH(T("Description")), TH(tables[0].quantity.label), TH(db.budget_item.unit_cost.label), TH(db.budget_item.monthly_cost.label), TH(tables[0].minutes.label), TH(db.budget_item.minute_cost.label), TH(tables[0].megabytes.label), TH(db.budget_item.megabyte_cost.label), TH(T("Total Units")), TH(T("Total Monthly")), TH(T("Remove"))))
table_footer = TFOOT(TR(TD(B(T("Totals for Bundle:")), _colspan=9), TD(B(bundle_total_cost)), TD(B(bundle_monthly_cost)), TD(INPUT(_id="submit_button", _type="submit", _value=T("Update")))), _align="right")
items = DIV(FORM(TABLE(table_header, TBODY(item_list), table_footer, _id="table-container"), _name="custom", _method="post", _enctype="multipart/form-data", _action=URL(r=request, f="bundle_update_items", args=[bundle])))
subtitle = T("Contents")
crud.messages.submit_button = T("Add")
# Check for duplicates before Item is added to DB
crud.settings.create_onvalidation = lambda form: bundle_dupes(form)
# Calculate Totals for the Bundle after Item is added to DB
crud.settings.create_onaccept = lambda form: bundle_total(form)
crud.messages.record_created = T("Bundle Updated")
form1 = crud.create(tables[0], next=URL(r=request, args=[bundle]))
form1[0][0].append(TR(TD(T("Type") + ":"), TD(LABEL(T("Kit"), INPUT(_type="radio", _name="kit_item1", _value="Kit", value="Kit")), LABEL(T("Item"), INPUT(_type="radio", _name="kit_item1", _value="Item", value="Kit")))))
form2 = crud.create(tables[1], next=URL(r=request, args=[bundle]))
form2[0][0].append(TR(TD(T("Type") + ":"), TD(LABEL(T("Kit"), INPUT(_type="radio", _name="kit_item2", _value="Kit", value="Item")), LABEL(T("Item"), INPUT(_type="radio", _name="kit_item2", _value="Item", value="Item")))))
addtitle = T("Add to Bundle")
response.view = "%s/bundle_kit_item_list_create.html" % module
output.update(dict(subtitle=subtitle, items=items, addtitle=addtitle, form1=form1, form2=form2, bundle=bundle))
else:
# Display a simple List page
# Kits
query = tables[0].bundle_id == bundle
sqlrows = db(query).select()
for row in sqlrows:
if even:
theclass = "even"
even = False
else:
theclass = "odd"
even = True
id = row.kit_id
_kit = db.budget_kit[id]
description = _kit.description
id_link = A(id, _href=URL(r=request, f="kit", args=[id, "read"]))
quantity_box = INPUT(_value=row.quantity, _size=4, _name="kit_qty_" + str(id))
minute_cost = _kit.total_minute_cost
if minute_cost:
minutes_box = INPUT(_value=row.minutes, _size=4, _name="kit_mins_" + str(id))
else:
minutes_box = INPUT(_value=0, _size=4, _name="kit_mins_" + str(id), _disabled="disabled")
megabyte_cost = _kit.total_megabyte_cost
if megabyte_cost:
megabytes_box = INPUT(_value=row.megabytes, _size=4, _name="kit_mbytes_" + str(id))
else:
megabytes_box = INPUT(_value=0, _size=4, _name="kit_mbytes_" + str(id), _disabled="disabled")
unit_cost = _kit.total_unit_cost
monthly_cost = _kit.total_monthly_cost
minute_cost = _kit.total_minute_cost
megabyte_cost = _kit.total_megabyte_cost
total_units = unit_cost * row.quantity
total_monthly = monthly_cost * row.quantity
checkbox = INPUT(_type="checkbox", _value="on", _name="kit_" + str(id), _class="remove_item")
item_list.append(TR(TD(id_link), TD(description, _align="left"), TD(quantity_box), TD(unit_cost), TD(monthly_cost), TD(minutes_box), TD(minute_cost), TD(megabytes_box), TD(megabyte_cost), TD(total_units), TD(total_monthly), _class=theclass, _align="right"))
# Items
query = tables[1].bundle_id == bundle
sqlrows = db(query).select()
for row in sqlrows:
if even:
theclass = "even"
even = False
else:
theclass = "odd"
even = True
id = row.item_id
_item = db.budget_item[id]
description = _item.description
id_link = A(id, _href=URL(r=request, f="item", args=[id, "read"]))
quantity_box = row.quantity
minute_cost = _item.minute_cost
minutes_box = row.minutes
megabyte_cost = _item.megabyte_cost
megabytes_box = row.megabytes
unit_cost = _item.unit_cost
monthly_cost = _item.monthly_cost
minute_cost = _item.minute_cost
megabyte_cost = _item.megabyte_cost
total_units = unit_cost * row.quantity
total_monthly = monthly_cost * row.quantity
item_list.append(TR(TD(id_link), TD(description, _align="left"), TD(quantity_box), TD(unit_cost), TD(monthly_cost), TD(minutes_box), TD(minute_cost), TD(megabytes_box), TD(megabyte_cost), TD(total_units), TD(total_monthly), _class=theclass, _align="right"))
table_header = THEAD(TR(TH("ID"), TH(T("Description")), TH(tables[0].quantity.label), TH(db.budget_item.unit_cost.label), TH(db.budget_item.monthly_cost.label), TH(tables[0].minutes.label), TH(db.budget_item.minute_cost.label), TH(tables[0].megabytes.label), TH(db.budget_item.megabyte_cost.label), TH(T("Total Units")), TH(T("Total Monthly"))))
table_footer = TFOOT(TR(TD(B(T("Totals for Bundle:")), _colspan=9), TD(B(bundle_total_cost)), TD(B(bundle_monthly_cost))), _align="right")
items = DIV(TABLE(table_header, TBODY(item_list), table_footer, _id="table-container"))
add_btn = A(T("Edit Contents"), _href=URL(r=request, c="default", f="user", args="login"), _class="action-btn")
response.view = "%s/bundle_kit_item_list.html" % module
output.update(dict(items=items, add_btn=add_btn))
return output
def bundle_dupes(form):
"Checks for duplicate Kit/Item before adding to DB"
bundle = form.vars.bundle_id
if "kit_id" in form.vars:
kit = form.vars.kit_id
table = db.budget_bundle_kit
query = (table.bundle_id == bundle) & (table.kit_id==kit)
elif "item_id" in form.vars:
item = form.vars.item_id
table = db.budget_bundle_item
query = (table.bundle_id == bundle) & (table.item_id==item)
else:
# Something went wrong!
return
items = db(query).select()
if items:
session.error = T("Item already in Bundle!")
redirect(URL(r=request, args=bundle))
else:
return
def bundle_update_items():
"Update a Bundle's items (Quantity, Minutes, Megabytes & Delete)"
try:
bundle = int(request.args(0))
except TypeError, ValueError:
session.error = T("Need to specify a bundle!")
redirect(URL(r=request, f="bundle"))
tables = [db.budget_bundle_kit, db.budget_bundle_item]
authorised = s3_has_permission("update", tables[0]) and s3_has_permission("update", tables[1])
if authorised:
for var in request.vars:
if "kit" in var:
if "qty" in var:
kit = var[8:]
quantity = request.vars[var]
query = (tables[0].bundle_id == bundle) & (tables[0].kit_id == kit)
db(query).update(quantity=quantity)
elif "mins" in var:
kit = var[9:]
minutes = request.vars[var]
query = (tables[0].bundle_id == bundle) & (tables[0].kit_id == kit)
db(query).update(minutes=minutes)
elif "mbytes" in var:
kit = var[11:]
megabytes = request.vars[var]
query = (tables[0].bundle_id == bundle) & (tables[0].kit_id == kit)
db(query).update(megabytes=megabytes)
else:
# Delete
kit = var[4:]
query = (tables[0].bundle_id == bundle) & (tables[0].kit_id == kit)
db(query).delete()
if "item" in var:
if "qty" in var:
item = var[9:]
quantity = request.vars[var]
query = (tables[1].bundle_id == bundle) & (tables[1].item_id == item)
db(query).update(quantity=quantity)
elif "mins" in var:
item = var[10:]
minutes = request.vars[var]
query = (tables[1].bundle_id == bundle) & (tables[1].item_id == item)
db(query).update(minutes=minutes)
elif "mbytes" in var:
item = var[12:]
megabytes = request.vars[var]
query = (tables[1].bundle_id == bundle) & (tables[1].item_id == item)
db(query).update(megabytes=megabytes)
else:
# Delete
item = var[5:]
query = (tables[1].bundle_id == bundle) & (tables[1].item_id == item)
db(query).delete()
# Update the Total values
bundle_totals(bundle)
# Audit
s3_audit("update", module, "bundle_kit_item", record=bundle, representation="html")
session.flash = T("Bundle updated")
else:
session.error = T("Not authorised!")
redirect(URL(r=request, f="bundle_kit_item", args=[bundle]))
def staff():
""" RESTful CRUD controller """
tablename = module + "_" + resourcename
#table = db[tablename]
# Model options used in multiple controllers so defined at the top of the file
# CRUD Strings
ADD_STAFF_TYPE = T("Add Staff Type")
LIST_STAFF_TYPE = T("List Staff Types")
s3.crud_strings[tablename] = Storage(
title_create = ADD_STAFF_TYPE,
title_display = T("Staff Type Details"),
title_list = LIST_STAFF_TYPE,
title_update = T("Edit Staff Type"),
title_search = T("Search Staff Types"),
subtitle_create = T("Add New Staff Type"),
subtitle_list = T("Staff Types"),
label_list_button = LIST_STAFF_TYPE,
label_create_button = ADD_STAFF_TYPE,
label_delete_button = T("Delete Staff Type"),
msg_record_created = T("Staff Type added"),
msg_record_modified = T("Staff Type updated"),
msg_record_deleted = T("Staff Type deleted"),
msg_list_empty = T("No Staff Types currently registered"))
return s3_rest_controller(module, resourcename)
# This should be deprecated & replaced with a link to gis_location
def location():
""" RESTful CRUD controller """
tablename = module + "_" + resourcename
#table = db[tablename]
# Model options used in multiple controllers so defined at the top of the file
# CRUD Strings
ADD_LOCATION = T("Add Location")
LIST_LOCATIONS = T("List Locations")
s3.crud_strings[tablename] = Storage(
title_create = ADD_LOCATION,
title_display = T("Location Details"),
title_list = LIST_LOCATIONS,
title_update = T("Edit Location"),
title_search = T("Search Locations"),
subtitle_create = T("Add New Location"),
subtitle_list = T("Locations"),
label_list_button = LIST_LOCATIONS,
label_create_button = ADD_LOCATION,
label_delete_button = T("Delete Location"),
msg_record_created = T("Location added"),
msg_record_modified = T("Location updated"),
msg_record_deleted = T("Location deleted"),
msg_list_empty = T("No Locations currently registered"))
return s3_rest_controller(module, resourcename, main="code")
def project():
""" RESTful CRUD controller """
#tablename = "project_%s" % (resourcename)
#table = db[tablename]
tabs = [(T("Basic Details"), None),
(T("Staff"), "staff"),
(T("Tasks"), "task"),
#(T("Donors"), "organisation"),
#(T("Sites"), "site"), # Ticket 195
]
rheader = lambda r: shn_project_rheader(r, tabs=tabs)
output = s3_rest_controller("project", resourcename,
rheader=rheader)
return output
def budget():
""" RESTful CRUD controller """
tablename = module + "_" + resourcename
#table = db[tablename]
# Model options used in multiple controllers so defined at the top of the file
# CRUD Strings
ADD_BUDGET = T("Add Budget")
LIST_BUDGETS = T("List Budgets")
s3.crud_strings[tablename] = Storage(
title_create = ADD_BUDGET,
title_display = T("Budget Details"),
title_list = LIST_BUDGETS,
title_update = T("Edit Budget"),
title_search = T("Search Budgets"),
subtitle_create = T("Add New Budget"),
subtitle_list = T("Budgets"),
label_list_button = LIST_BUDGETS,
label_create_button = ADD_BUDGET,
label_delete_button = T("Delete Budget"),
msg_record_created = T("Budget added"),
msg_record_modified = T("Budget updated"),
msg_record_deleted = T("Budget deleted"),
msg_list_empty = T("No Budgets currently registered"))
return s3_rest_controller(module, resourcename)
def budget_staff_bundle():
"Many to Many CRUD Controller"
try:
budget = int(request.args(0))
except TypeError, ValueError:
session.error = T("Need to specify a Budget!")
redirect(URL(r=request, f="budget"))
tables = [db.budget_budget_staff, db.budget_budget_bundle]
authorised = s3_has_permission("update", tables[0]) and s3_has_permission("update", tables[1])
_budget = db.budget_budget[budget]
title = _budget.name
budget_description = _budget.description
budget_onetime_cost = _budget.total_onetime_costs
budget_recurring_cost = _budget.total_recurring_costs
# Start building the Return with the common items
output = dict(title=title, description=budget_description, onetime_cost=budget_onetime_cost, recurring_cost=budget_recurring_cost)
# Audit
s3_audit("list", module, "budget_staff_bundle", record=budget, representation="html")
item_list = []
even = True
if authorised:
# Audit
crud.settings.create_onaccept = lambda form: s3_audit("create", module, "budget_staff_bundle",
form=form,
representation="html")
# Display a List_Create page with editable Quantities & Months
# Staff
query = tables[0].budget_id == budget
sqlrows = db(query).select()
for row in sqlrows:
if even:
theclass = "even"
even = False
else:
theclass = "odd"
even = True
id = row.staff_id
_staff = db.budget_staff[id]
name = _staff.name
id_link = A(name, _href=URL(r=request, f="staff", args=[id, "read"]))
location = db.budget_location[row.location_id].code
location_link = A(location, _href=URL(r=request, f="location", args=[row.location_id, "read"]))
project = db.project_project[row.project_id].code
project_link = A(project, _href=URL(r=request, c="org", f="project", args=[row.project_id, "read"]))
description = _staff.comments
quantity_box = INPUT(_value=row.quantity, _size=4, _name="staff_qty_" + str(id))
months_box = INPUT(_value=row.months, _size=4, _name="staff_months_" + str(id))
salary = _staff.salary
travel = _staff.travel
onetime = travel * row.quantity
recurring = salary * row.quantity
checkbox = INPUT(_type="checkbox", _value="on", _name="staff_" + str(id), _class="remove_item")
item_list.append(TR(TD(location_link), TD(project_link), TD(id_link), TD(description, _align="left"), TD(quantity_box), TD(travel), TD(salary), TD(months_box), TD(onetime), TD(recurring), TD(checkbox, _align="center"), _class=theclass, _align="right"))
# Bundles
query = tables[1].budget_id==budget
sqlrows = db(query).select()
for row in sqlrows:
if even:
theclass = "even"
even = False
else:
theclass = "odd"
even = True
id = row.bundle_id
_bundle = db.budget_bundle[id]
name = _bundle.name
id_link = A(name, _href=URL(r=request, f="bundle", args=[id, "read"]))
location = db.budget_location[row.location_id].code
location_link = A(location, _href=URL(r=request, f="location", args=[row.location_id, "read"]))
project = db.project_project[row.project_id].code
project_link = A(project, _href=URL(r=request, c="org", f="project", args=[row.project_id, "read"]))
description = _bundle.description
quantity_box = INPUT(_value=row.quantity, _size=4, _name="bundle_qty_" + str(id))
months_box = INPUT(_value=row.months, _size=4, _name="bundle_months_" + str(id))
unit_cost = _bundle.total_unit_cost
monthly_cost = _bundle.total_monthly_cost
onetime = unit_cost * row.quantity
recurring = monthly_cost * row.months
checkbox = INPUT(_type="checkbox", _value="on", _name="bundle_" + str(id), _class="remove_item")
item_list.append(TR(TD(location_link), TD(project_link), TD(id_link), TD(description, _align="left"), TD(quantity_box), TD(unit_cost), TD(monthly_cost), TD(months_box), TD(onetime), TD(recurring), TD(checkbox, _align="center"), _class=theclass, _align="right"))
table_header = THEAD(TR(TH("Location"), TH("Project"), TH("Item"), TH(T("Description")), TH(tables[0].quantity.label), TH(T("One-time costs")), TH(T("Recurring costs")), TH(tables[0].months.label), TH(db.budget_budget.total_onetime_costs.label), TH(db.budget_budget.total_recurring_costs.label), TH(T("Remove"))))
table_footer = TFOOT(TR(TD(B(T("Totals for Budget:")), _colspan=8), TD(B(budget_onetime_cost)), TD(B(budget_recurring_cost)), TD(INPUT(_id="submit_button", _type="submit", _value=T("Update")))), _align="right")
items = DIV(FORM(TABLE(table_header, TBODY(item_list), table_footer, _id="table-container"), _name="custom", _method="post", _enctype="multipart/form-data", _action=URL(r=request, f="budget_update_items", args=[budget])))
subtitle = T("Contents")
crud.messages.submit_button = T("Add")
# Check for duplicates before Item is added to DB
crud.settings.create_onvalidation = lambda form: budget_dupes(form)
# Calculate Totals for the budget after Item is added to DB
crud.settings.create_onaccept = lambda form: budget_total(form)
crud.messages.record_created = T("Budget Updated")
form1 = crud.create(tables[0], next=URL(r=request, args=[budget]))
form1[0][0].append(TR(TD(T("Type") + ":"), TD(LABEL(T("Staff"), INPUT(_type="radio", _name="staff_bundle1", _value="Staff", value="Staff")), LABEL(T("Bundle"), INPUT(_type="radio", _name="staff_bundle1", _value="Bundle", value="Staff")))))
form2 = crud.create(tables[1], next=URL(r=request, args=[budget]))
form2[0][0].append(TR(TD(T("Type") + ":"), TD(LABEL(T("Staff"), INPUT(_type="radio", _name="staff_bundle2", _value="Staff", value="Bundle")), LABEL(T("Bundle"), INPUT(_type="radio", _name="staff_bundle2", _value="Bundle", value="Bundle")))))
addtitle = T("Add to budget")
response.view = "%s/budget_staff_bundle_list_create.html" % module
output.update(dict(subtitle=subtitle, items=items, addtitle=addtitle, form1=form1, form2=form2, budget=budget))
else:
# Display a simple List page
# Staff
query = tables[0].budget_id==budget
sqlrows = db(query).select()
for row in sqlrows:
if even:
theclass = "even"
even = False
else:
theclass = "odd"
even = True
id = row.staff_id
_staff = db.budget_staff[id]
name = _staff.name
id_link = A(name, _href=URL(r=request, f="staff", args=[id, "read"]))
location = db.budget_location[row.location_id].code
location_link = A(location, _href=URL(r=request, f="location", args=[row.location_id, "read"]))
project = db.project_project[row.project_id].code
project_link = A(project, _href=URL(r=request, c="org", f="project", args=[row.project_id, "read"]))
description = _staff.comments
quantity_box = INPUT(_value=row.quantity, _size=4, _name="staff_qty_" + str(id))
months_box = INPUT(_value=row.months, _size=4, _name="staff_mins_" + str(id))
salary = _staff.salary
travel = _staff.travel
onetime = travel * row.quantity
recurring = salary * row.quantity
checkbox = INPUT(_type="checkbox", _value="on", _name="staff_" + str(id), _class="remove_item")
item_list.append(TR(TD(location_link), TD(project_link), TD(id_link), TD(description, _align="left"), TD(quantity_box), TD(travel), TD(salary), TD(months_box), TD(onetime), TD(recurring), _class=theclass, _align="right"))
# Bundles
query = tables[1].budget_id==budget
sqlrows = db(query).select()
for row in sqlrows:
if even:
theclass = "even"
even = False
else:
theclass = "odd"
even = True
id = row.bundle_id
_bundle = db.budget_bundle[id]
name = _bundle.name
id_link = A(name, _href=URL(r=request, f="bundle", args=[id, "read"]))
location = db.budget_location[row.location_id].code
location_link = A(location, _href=URL(r=request, f="location", args=[row.location_id, "read"]))
project = db.project_project[row.project_id].code
project_link = A(project, _href=URL(r=request, c="org", f="project", args=[row.project_id, "read"]))
description = _bundle.description
quantity_box = row.quantity
months_box = row.months
unit_cost = _bundle.total_unit_cost
monthly_cost = _bundle.total_monthly_cost
onetime = unit_cost * row.quantity
recurring = monthly_cost * row.months
item_list.append(TR(TD(location_link), TD(project_link), TD(id_link), TD(description, _align="left"), TD(quantity_box), TD(unit_cost), TD(monthly_cost), TD(months_box), TD(onetime), TD(recurring), _class=theclass, _align="right"))
table_header = THEAD(TR(TH("Location"),
TH("Project"),
TH("Item"),
TH(T("Description")),
TH(tables[0].quantity.label),
TH(T("One-time costs")),
TH(T("Recurring costs")),
TH(tables[0].months.label),
TH(db.budget_budget.total_onetime_costs.label),
TH(db.budget_budget.total_recurring_costs.label)))
table_footer = TFOOT(TR(TD(B(T("Totals for Budget:")), _colspan=8),
TD(B(budget_onetime_cost)),
TD(B(budget_recurring_cost))), _align="right")
items = DIV(TABLE(table_header, TBODY(item_list), table_footer, _id="table-container"))
add_btn = A(T("Edit Contents"), _href=URL(r=request, c="default", f="user", args="login"), _class="action-btn")
response.view = "%s/budget_staff_bundle_list.html" % module
output.update(dict(items=items, add_btn=add_btn))
return output
def budget_dupes(form):
"Checks for duplicate staff/bundle before adding to DB"
budget = form.vars.budget_id
if "staff_id" in form.vars:
staff = form.vars.staff_id
table = db.budget_budget_staff
query = (table.budget_id == budget) & (table.staff_id == staff)
elif "bundle_id" in form.vars:
bundle = form.vars.bundle_id
table = db.budget_budget_bundle
query = (table.budget_id == budget) & (table.bundle_id == bundle)
else:
# Something went wrong!
return
items = db(query).select()
if items:
session.error = T("Item already in budget!")
redirect(URL(r=request, args=budget))
else:
return
def budget_total(form):
"Calculate Totals for the budget specified by Form"
if "budget_id" in form.vars:
# called by budget_staff_bundle()
budget = form.vars.budget_id
else:
# called by budget()
budget = form.vars.id
budget_totals(budget)
def budget_totals(budget):
"Calculate Totals for a budget"
total_onetime_cost = 0
total_recurring_cost = 0
table = db.budget_budget_staff
query = (table.budget_id == budget)
staffs = db(query).select()
for staff in staffs:
query = (table.budget_id == budget) & (table.staff_id == staff.staff_id)
row = db(query).select(table.quantity, table.months, limitby=(0, 1)).first()
quantity = row.quantity
months = row.months
row2 = db(db.budget_staff.id == staff.staff_id).select(db.budget_staff.travel, db.budget_staff.salary, limitby=(0, 1)).first()
row3 = db(db.budget_location.id == staff.location_id).select(db.budget_location.subsistence, db.budget_location.hazard_pay, limitby=(0, 1)).first()
total_onetime_cost += row2.travel * quantity
total_recurring_cost += row2.salary * quantity * months
total_recurring_cost += row3.subsistence * quantity * months
total_recurring_cost += row3.hazard_pay * quantity * months
table = db.budget_budget_bundle
query = (table.budget_id == budget)
bundles = db(query).select()
for bundle in bundles:
query = (table.budget_id == budget) & (table.bundle_id == bundle.bundle_id)
row = db(query).select(table.quantity, table.months, limitby=(0, 1)).first()
quantity = row.quantity
months = row.months
row2 = db(db.budget_bundle.id == bundle.bundle_id).select(db.budget_bundle.total_unit_cost, db.budget_bundle.total_monthly_cost, limitby=(0, 1)).first()
total_onetime_cost += row2.total_unit_cost * quantity
total_recurring_cost += row2.total_monthly_cost * quantity * months
db(db.budget_budget.id == budget).update(total_onetime_costs=total_onetime_cost, total_recurring_costs=total_recurring_cost)
def budget_update_items():
"Update a Budget's items (Quantity, Months & Delete)"
try:
budget = int(request.args(0))
except TypeError, ValueError:
session.error = T("Need to specify a Budget!")
redirect(URL(r=request, f="budget"))
tables = [db.budget_budget_staff, db.budget_budget_bundle]
authorised = s3_has_permission("update", tables[0]) and s3_has_permission("update", tables[1])
if authorised:
for var in request.vars:
if "staff" in var:
if "qty" in var:
staff = var[10:]
quantity = request.vars[var]
query = (tables[0].budget_id == budget) & (tables[0].staff_id == staff)
db(query).update(quantity=quantity)
elif "months" in var:
staff = var[13:]
months = request.vars[var]
query = (tables[0].budget_id == budget) & (tables[0].staff_id == staff)
db(query).update(months=months)
else:
# Delete
staff = var[6:]
query = (tables[0].budget_id == budget) & (tables[0].staff_id == staff)
db(query).delete()
if "bundle" in var:
if "qty" in var:
bundle = var[11:]
quantity = request.vars[var]
query = (tables[1].budget_id == budget) & (tables[1].bundle_id == bundle)
db(query).update(quantity=quantity)
elif "months" in var:
bundle = var[14:]
months = request.vars[var]
query = (tables[1].budget_id == budget) & (tables[1].bundle_id == bundle)
db(query).update(months=months)
else:
# Delete
bundle = var[7:]
query = (tables[1].budget_id == budget) & (tables[1].bundle_id == bundle)
db(query).delete()
# Update the Total values
budget_totals(budget)
# Audit
s3_audit("update", module, "staff_bundle", record=budget, representation="html")
session.flash = T("Budget updated")
else:
session.error = T("Not authorised!")
redirect(URL(r=request, f="budget_staff_bundle", args=[budget]))
| {
"content_hash": "631ed17be99899ad4c963b021b2a36d9",
"timestamp": "",
"source": "github",
"line_count": 1466,
"max_line_length": 370,
"avg_line_length": 46.10709413369713,
"alnum_prop": 0.5799565043717545,
"repo_name": "ptressel/sahana-eden-madpub",
"id": "d0226e32241f7eaeaa10cd7c47b108345997366c",
"size": "67618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "controllers/budget.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "14896489"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Python",
"bytes": "14827014"
},
{
"name": "Shell",
"bytes": "1171"
}
],
"symlink_target": ""
} |
import inspect
import mock
import tests.unit.responses as responses
class MockAWS(object):
def __init__(self, profile=None, region=None):
self.response_map = {}
for name, value in inspect.getmembers(responses):
if name.startswith('__'):
continue
if '_' in name:
service_name, request_name = name.split('_', 1)
if service_name not in self.response_map:
self.response_map[service_name] = {}
self.response_map[service_name][request_name] = value
def create_client(self, client_name):
client = None
if client_name in self.response_map:
client = mock.Mock()
for request in self.response_map[client_name]:
response = self.response_map[client_name][request]
setattr(client, request, mock.Mock(side_effect=response))
return client
def get_aws(context):
return MockAWS()
| {
"content_hash": "02759b3f2cdf04b2df994311c8c72b2c",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 73,
"avg_line_length": 30.8125,
"alnum_prop": 0.5821501014198783,
"repo_name": "ryansb/kappa",
"id": "b0da5ab17fccd035af158a13500ce06536edc3de",
"size": "986",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "tests/unit/mock_aws.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "56412"
}
],
"symlink_target": ""
} |
import unittest, random, sys
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_rf, h2o_import as h2i
params = {
'nbins': 1000,
'ntrees': 10,
# apparently fails with undecided node assertion if all inputs the same
# 'cols': '0,1,2,3,4,',
# 'cols': '8,9',
'cols': 'C9,C10',
'response': 'C11',
'seed': '19823134',
}
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_rf_params_rand1_fvec(self):
csvPathname = 'poker/poker1000'
for trial in range(10):
# params is mutable. This is default.
kwargs = params.copy()
timeoutSecs = 180
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, schema='put', timeoutSecs=timeoutSecs)
h2o_cmd.runRF(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print "Trial #", trial, "completed"
if __name__ == '__main__':
h2o.unit_main()
| {
"content_hash": "8aac13f8aa7d6aae1cf48d0d86577253",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 119,
"avg_line_length": 29.097560975609756,
"alnum_prop": 0.5850796311818944,
"repo_name": "100star/h2o",
"id": "338cf17f67063adc09249bf12d4b9a5daa3491cb",
"size": "1193",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "py/testdir_single_jvm/test_rf_undecided_fail_fvec.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2461"
},
{
"name": "CSS",
"bytes": "218044"
},
{
"name": "CoffeeScript",
"bytes": "205094"
},
{
"name": "Emacs Lisp",
"bytes": "7446"
},
{
"name": "Groovy",
"bytes": "518"
},
{
"name": "Java",
"bytes": "5087375"
},
{
"name": "JavaScript",
"bytes": "92357"
},
{
"name": "Makefile",
"bytes": "50437"
},
{
"name": "PHP",
"bytes": "8490"
},
{
"name": "Perl",
"bytes": "16170"
},
{
"name": "Python",
"bytes": "3259439"
},
{
"name": "R",
"bytes": "1540554"
},
{
"name": "Ruby",
"bytes": "299"
},
{
"name": "Scala",
"bytes": "39365"
},
{
"name": "Shell",
"bytes": "206024"
},
{
"name": "TeX",
"bytes": "189490"
}
],
"symlink_target": ""
} |
"""Test the fundrawtransaction RPC."""
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_fee_amount,
assert_greater_than,
assert_greater_than_or_equal,
assert_raises_rpc_error,
connect_nodes_bi,
count_bytes,
find_vout_for_address,
)
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
def run_test(self):
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
# ensure that setting changePosition in fundraw with an exact match is handled properly
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].getaddressinfo(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
# Lock UTXO so nodes[0] doesn't accidentally spend it
watchonly_vout = find_vout_for_address(self.nodes[0], watchonly_txid, watchonly_address)
self.nodes[0].lockunspent(False, [{"txid": watchonly_txid, "vout": watchonly_vout}])
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
# reserveChangeKey was deprecated and is now removed
assert_raises_rpc_error(-3, "Unexpected key reserveChangeKey", lambda: self.nodes[2].fundrawtransaction(hexstring=rawtx, options={'reserveChangeKey': True}))
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-5, "changeAddress must be a valid bitcoin address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
assert_raises_rpc_error(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################
# test a fundrawtransaction with a provided change type #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[2].fundrawtransaction, rawtx, {'change_type': None})
assert_raises_rpc_error(-5, "Unknown change type ''", self.nodes[2].fundrawtransaction, rawtx, {'change_type': ''})
rawtx = self.nodes[2].fundrawtransaction(rawtx, {'change_type': 'bech32'})
dec_tx = self.nodes[2].decoderawtransaction(rawtx['hex'])
assert_equal('witness_v0_keyhash', dec_tx['vout'][rawtx['changepos']]['scriptPubKey']['type'])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[1].getaddressinfo(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[1].getaddressinfo(addr2)
addr3Obj = self.nodes[1].getaddressinfo(addr3)
addr4Obj = self.nodes[1].getaddressinfo(addr4)
addr5Obj = self.nodes[1].getaddressinfo(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
# send 1.2 BTC to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawtx)
signedTx = self.nodes[2].signrawtransactionwithwallet(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.nodes[1].encryptwallet("test")
self.stop_nodes()
self.start_nodes()
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
# Again lock the watchonly UTXO or nodes[0] may spend it, because
# lockunspent is memory-only and thus lost on restart
self.nodes[0].lockunspent(False, [{"txid": watchonly_txid, "vout": watchonly_vout}])
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
self.nodes[1].getrawchangeaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
assert_raises_rpc_error(-4, "Keypool ran out, please call keypoolrefill first", self.nodes[1].fundrawtransaction, rawtx)
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].keypoolrefill(8) #need to refill the keypool to get an internal change address
self.nodes[1].walletlock()
assert_raises_rpc_error(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
fundedAndSignedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransactionwithwallet(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransactionwithwallet(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
#######################
# Test feeRate option #
#######################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[3].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
assert_raises_rpc_error(-4, "Fee exceeds maximum configured by -maxtxfee", self.nodes[3].fundrawtransaction, rawtx, {"feeRate": 1})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
################################
# Test no address reuse occurs #
################################
result3 = self.nodes[3].fundrawtransaction(rawtx)
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert(changeaddress != "")
nextaddr = self.nodes[3].getnewaddress()
# Now the change address key should be removed from the keypool
assert(changeaddress != nextaddr)
######################################
# Test subtractFeeFromOutputs option #
######################################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress(): 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee, "subtractFeeFromOutputs": [0]})]
dec_tx = [self.nodes[3].decoderawtransaction(tx_['hex']) for tx_ in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
inputs = []
outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx),
# split the fee between outputs 0, 2, and 3, but not output 1
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})]
dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
self.nodes[3].decoderawtransaction(result[1]['hex'])]
# Nested list of non-change output amounts for each transaction
output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
for d, r in zip(dec_tx, result)]
# List of differences in output amounts between normal and subtractFee transactions
share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
# output 1 is the same in both transactions
assert_equal(share[1], 0)
# the other 3 outputs are smaller as a result of subtractFeeFromOutputs
assert_greater_than(share[0], 0)
assert_greater_than(share[2], 0)
assert_greater_than(share[3], 0)
# outputs 2 and 3 take the same share of the fee
assert_equal(share[2], share[3])
# output 0 takes at least as much share of the fee, and no more than 2 satoshis more, than outputs 2 and 3
assert_greater_than_or_equal(share[0], share[2])
assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
# the fee is the same in both transactions
assert_equal(result[0]['fee'], result[1]['fee'])
# the total subtracted from the outputs is equal to the fee
assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
if __name__ == '__main__':
RawTransactionsTest().main()
| {
"content_hash": "b22262c6bbcdbfb5b03daa9d8768e131",
"timestamp": "",
"source": "github",
"line_count": 751,
"max_line_length": 223,
"avg_line_length": 45.26897470039947,
"alnum_prop": 0.5635497249757332,
"repo_name": "myriadteam/myriadcoin",
"id": "8aae249362f84f94585fa788dc2f3ec9138faa4e",
"size": "34211",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/functional/rpc_fundrawtransaction.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "1590918"
},
{
"name": "C++",
"bytes": "6467954"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "201405"
},
{
"name": "Makefile",
"bytes": "121719"
},
{
"name": "Objective-C",
"bytes": "6345"
},
{
"name": "Objective-C++",
"bytes": "5378"
},
{
"name": "Python",
"bytes": "1611450"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Shell",
"bytes": "92134"
}
],
"symlink_target": ""
} |
"""Install script for setuptools."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import imp
from setuptools import find_packages
from setuptools import setup
setup(
name='dm-hard-eight',
version=imp.load_source('_version',
'dm_hard_eight/_version.py').__version__,
description=('DeepMind Hard Eight Tasks, a set of Unity-based machine-'
'learning research tasks.'),
author='DeepMind',
license='Apache License, Version 2.0',
keywords='reinforcement-learning python machine learning',
packages=find_packages(exclude=['examples']),
install_requires=[
'absl-py',
'dm-env',
'dm-env-rpc',
'docker',
'grpcio',
'numpy',
'portpicker',
],
tests_require=['nose'],
python_requires='>=3.6.1',
extras_require={'examples': ['pygame']},
test_suite='nose.collector',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
| {
"content_hash": "61a7c796faedcffbcf2a97ef01bb18c7",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 75,
"avg_line_length": 32.869565217391305,
"alnum_prop": 0.6044973544973545,
"repo_name": "deepmind/dm_hard_eight",
"id": "b76b197f094b0bc1bb69bdb26e287177612b9167",
"size": "2206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19389"
}
],
"symlink_target": ""
} |
import random
##################################################################
###################### Testing Tools #############################
##################################################################
'''
Generates inputs for gradient descent.
Inputs:
voters: the number of voters, or n from the pdf
demographic: number of demographic info.
error: How well our model will fit the data. Below 10 and its pretty good. Above 50 it's pretty bad.
Output:
Theta: n by m matrix from pdf
Y: length n vector of preferences
true_x: x from which we generated Y. If error is low, this will be quite close to optimal.
When testing, you should check if the final x you get has a low mean_square_diff with this
initial_x: perturbed version of true_x. Useful starting point
'''
# Uncomment the block below to generate random parameters. When you submit, comment it out again: You won't be able
# to use numpy on alg.
'''
import numpy as np
def generate_parameters(voters,demographic,error):
#Randomly generate true params Theta, true_x, and Y
Theta = 100*np.random.rand(voters,demographic)
true_x = 10*np.random.rand(1,demographic) - 5
Y = Theta.dot(true_x.transpose())
Y = Y.transpose()
Y = Y + np.random.normal(scale=error,size=voters)
#Perturb the true x to get something close
scaling = 0.5*np.ones((1,demographic))+np.random.rand(1,demographic)
initial_x = np.multiply(true_x,scaling)
#Somewhat hacky way to convert away from np arrays to lists
Theta = Theta.tolist()
Y = [Y[0][i] for i in xrange(voters)]
true_x = [true_x[0][i] for i in xrange(demographic)]
initial_x = [initial_x[0][i] for i in xrange(demographic)]
return Theta,Y,true_x,initial_x
'''
'''
This function is used by the tests and may be useful to use when calculating whether you should stop
This function takes two vectors as input and computes the mean of their squared error
Inputs:
v1, a length k vector
v2, a length k vector
Output:
mse, a float for their mean-squared error
'''
def mean_square_diff(v1,v2):
diff_vector = [v1[i]-v2[i] for i in xrange(len(v1))]
mse = 1.0/len(v1)*sum(difference**2 for difference in diff_vector)
return mse
##################################################################
#################### Part B: Gradient Descent ####################
##################################################################
# GRADIENT DESCENT SPECIFICS
# The stopping condition is given below, namely, when the mean squared diff of the x's
# between iterations is less than some constant. Note, this is not the mean squared diff
# of f(x) but of the vector x itself! For instance
# x_at_iteration_k = [1,2,4,5]
# x_at_iteration_k+1 = [1,4,2,6]
# mean_square_change = mean_square_diff(x_at_iteration_k,x_at_iteration_k+1)
'''
Compute a 'sufficiently close to optimal' x using gradient descent
Inputs:
Theta - The voting Data as a n by m array
Y - The favorabilty scores of the voters
initial_x - An initial guess for the optimal parameters provided to you
eta - The learning rate which will be given to you.
Output:
nearly optimal x.
'''
def find_gradient(Theta,Y,X):
n,m = len(Theta),len(Theta[0])
s = [0 for i in xrange(m)]
for i in range(n):
#print Theta[i]
#print X
#print (dot(Theta[i],X))
new_s = scalar(Theta[i],(dot(Theta[i],X)*-1)+Y[i]) #scalar(scalar(Theta[i], -1),(Y[i] - dot(Theta[i],X)))
#return new_s
for j in range(len(new_s)):
s[j] = s[j] + new_s[j]
return scalar(s, -2.0/n)
def gradient_descent(Theta, Y, initial_x, eta):
#We've initialized some variables for you
n,m = len(Theta),len(Theta[0])
current_x = initial_x
mean_square_change = 1
while mean_square_change > 0.0000000001:
old_x = current_x
current_x = add_vectors(old_x, scalar(find_gradient(Theta,Y,old_x), (-1 * eta)))
mean_square_change = mean_square_diff(current_x, old_x)
#print current_x
return current_x
def add_vectors(X,Y):
n = len(X)
Ans = []
for i in range(n):
Z = X[i] + Y[i]
Ans.append(Z)
return Ans
def scalar(X,c):
size= len(X)
Z = []
for i in range(size):
Z.append(X[i]*c)
return Z
def dot(X,Y):
size = len(X)
total = 0
for i in range(size):
total += X[i] * Y[i]
#print X[i], Y[i]
#print total
return total
##################################################################
############### Part C: Minibatch Gradient Descent################
##################################################################
################################## ALGORITHM OVERVIEW ###########################################
# Very similar to above but now we are going to take a subset of 10 #
# voters on which to perform our gradient update. We could pick a random set of 10 each time #
# but we're going to do this semi-randomly as follows: #
# -Generate a random permutation of [0,1...,n] (say, [5,11,2,8 . . .]) #
# This permutation allows us to choose a subset of 10 voters to focus on. #
# -Have a sliding window of 10 that chooses the first 10 elements in the permutation #
# then the next 10 and so on, cycling once we reach the end of this permutation #
# -For each group of ten, we perform a subgradient update on x. #
# You can derive this from the J(x)^mini #
# -Lastly, we only update our stopping condition, mean_square_change #
# when we iterate through all n voters. Counter keeps track of this. #
#################################################################################################
'''
Minibatch Gradient Descent
Compute a 'sufficiently close to optimal' x using gradient descent with small batches
Inputs:
Theta - The voting Data as a n by m array
Y - The favorabilty scores of the voters
initial_x - An initial guess for the optimal parameters provided to you
eta - The learning rate which will be given to you.
Output:
nearly optimal x.
'''
def minibatch_find_gradient(Theta,Y,X,selected):
n,m = len(Theta),len(Theta[0])
s = [0 for i in xrange(m)]
for i in selected:
new_s = scalar(Theta[i],(dot(Theta[i],X)*-1)+Y[i]) #scalar(scalar(Theta[i], -1),(Y[i] - dot(Theta[i],X)))
for j in range(len(new_s)):
s[j] = s[j] + new_s[j]
return scalar(s, -2.0/10)
def minibatch_gradient_descent(Theta, Y, initial_x, eta):
# We've gotten you started. Voter_ordering is a random permutation.
# Window position can be used to keep track of the sliding window's position
n,m = len(Theta),len(Theta[0])
current_x = initial_x
voter_ordering = range(n)
random.shuffle(voter_ordering)
mean_square_change = 1
window_position = 0
counter = 0
while mean_square_change > 0.000000001:
#TODO: Minibatch updates
old_x = current_x
selected = voter_ordering[window_position: window_position+10]
current_x = add_vectors(old_x, scalar(minibatch_find_gradient(Theta,Y,old_x, selected), (-1 * eta)))
counter+=1
window_position = (window_position + 10)
if counter == n/10:
# TODO: stopping condition updates
mean_square_change = mean_square_diff(current_x, old_x)
counter = 0
#Remove this when you actually fill this out
# mean_square_change = 0
return current_x
##################################################################
############## Part D: Line search Gradient Descent###############
##################################################################
'''
Compute the mean-squared error between the prediction for Y given Theta and the current parameters x
and the actual voter desires, Y.
Input:
Theta - The voting Data as a n by m array
Y - The favorabilty scores of the voters. Length n.
x - The current guess for the optimal parameters. Length m.
Output:
A float for the prediction error.
'''
def prediction_error(Theta,Y,x):
prediction_error = float('inf')
#TODO Compute the MSE between the prediction and Y
prediction = []
for k in range(len(Theta)):
prediction.append(dot(Theta[k], x))
prediction_error = mean_square_diff(prediction, Y)
return prediction_error
'''
This function should return the next current_x after doubling the learning rate
until we hit the max or the prediction error increases
Inputs:
current_x Current guess for x. Length m.
gradient Gradient of current_x. Length m.
min_rate Fixed given rate.
max_rate Fixed max rate.
Output:
updated_x Check pseudocode.
'''
def J(X, Theta, Y):
n = len(Theta)
ans = 0
for i in range(n):
ans += (Y[i] - dot(Theta[i],X))**2
return float(ans)/n
def line_search(Theta,Y,start_x,gradient,min_rate=0.0000001,max_rate=0.1):
#TODO Adapt the pseudocode to working python code
n_current = min_rate
current_x = add_vectors(start_x, scalar(gradient, -n_current))
while n_current < max_rate:
x_temp = add_vectors(current_x, scalar(gradient, -n_current))
if J(x_temp, Theta, Y) < J(current_x, Theta, Y):
current_x = x_temp
n_current *= 2
else:
break
return current_x
'''
Inputs:
Theta The voting Data as a n by m array
Y The favorabilty scores of the voters. Length n.
x The current guess for the optimal parameters. Length m.
Output:
gradient Length m vector of the gradient.
'''
def compute_gradient(Theta,Y,current_x):
#TODO: Compute the gradient. Should be able to copy paste from part b.
X = current_x
n,m = len(Theta),len(Theta[0])
s = [0 for i in xrange(m)]
for i in range(n):
#print Theta[i]
#print X
#print (dot(Theta[i],X))
new_s = scalar(Theta[i],(dot(Theta[i],X)*-1)+Y[i]) #scalar(scalar(Theta[i], -1),(Y[i] - dot(Theta[i],X)))
#return new_s
for j in range(len(new_s)):
s[j] = s[j] + new_s[j]
return scalar(s, -2.0/n)
#return current_x
def gradient_descent_complete(Theta,Y,initial_x):
n,m = len(Theta),len(Theta[0])
delta = 1
current_x = initial_x
last_error = prediction_error(Theta,Y,current_x)
while delta > 0.1:
gradient = compute_gradient(Theta,Y,current_x)
current_x = line_search(Theta,Y,current_x,gradient,0.0000005,0.1)
current_error = prediction_error(Theta,Y,current_x)
delta = last_error - current_error
last_error = current_error
return current_x | {
"content_hash": "783acd340c595dc5403e67b4d126220c",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 115,
"avg_line_length": 37.446308724832214,
"alnum_prop": 0.5660901514472623,
"repo_name": "artuchavez/6006-Projects",
"id": "32fd6b7d494a97f1188113125915469d95f343b3",
"size": "11178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pset6_materials_updated/code/Gradient_Descent_Template.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "126153"
},
{
"name": "TeX",
"bytes": "152687"
}
],
"symlink_target": ""
} |
from ..selinux import SELinux
from ...parsers.grub_conf import Grub1Config, Grub2Config
from ...parsers.selinux_config import SelinuxConfig
from ...parsers.sestatus import SEStatus
from ...tests import context_wrap
GRUB_DISABLED = 'grub_disabled'
GRUB_NOT_ENFORCING = 'grub_not_enforcing'
RUNTIME_DISABLED = 'sestatus_disabled'
RUNTIME_NOT_ENFORCING = 'sestatus_not_enforcing'
BOOT_DISABLED = 'selinux_conf_disabled'
BOOT_NOT_ENFORCING = 'selinux_conf_not_enforcing'
SESTATUS_OUT = """
SELinux status: enabled
SELinuxfs mount: /sys/fs/selinux
SELinux root directory: /etc/selinux
Loaded policy name: targeted
Current mode: enforcing
Mode from config file: enforcing
Policy MLS status: enabled
Policy deny_unknown status: allowed
Max kernel policy version: 30
"""
SESTATUS_OUT_DISABLED = """
SELinux status: disabled
SELinuxfs mount: /sys/fs/selinux
SELinux root directory: /etc/selinux
Loaded policy name: targeted
Current mode: enforcing
Mode from config file: enforcing
Policy MLS status: enabled
Policy deny_unknown status: allowed
Max kernel policy version: 30
"""
SESTATUS_OUT_NOT_ENFORCING = """
SELinux status: enabled
SELinuxfs mount: /sys/fs/selinux
SELinux root directory: /etc/selinux
Loaded policy name: targeted
Current mode: permissive
Mode from config file: enforcing
Policy MLS status: enabled
Policy deny_unknown status: allowed
Max kernel policy version: 30
"""
SELINUX_CONF = """
# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
# enforcing - SELinux security policy is enforced.
# permissive - SELinux prints warnings instead of enforcing.
# disabled - No SELinux policy is loaded.
SELINUX=enforcing
# SELINUXTYPE= can take one of these two values:
# targeted - Targeted processes are protected,
# mls - Multi Level Security protection.
SELINUXTYPE=targeted
"""
SELINUX_CONF_DISABLED = """
# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
# enforcing - SELinux security policy is enforced.
# permissive - SELinux prints warnings instead of enforcing.
# disabled - No SELinux policy is loaded.
SELINUX=disabled
# SELINUXTYPE= can take one of these two values:
# targeted - Targeted processes are protected,
# mls - Multi Level Security protection.
SELINUXTYPE=targeted
"""
SELINUX_CONF_NOT_ENFORCING = """
# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
# enforcing - SELinux security policy is enforced.
# permissive - SELinux prints warnings instead of enforcing.
# disabled - No SELinux policy is loaded.
SELINUX=permissive
# SELINUXTYPE= can take one of these two values:
# targeted - Targeted processes are protected,
# mls - Multi Level Security protection.
SELINUXTYPE=targeted
"""
SELINUX_CONFIGS = [
(
{'SELINUX': 'enforcing', 'SELINUXTYPE': 'targeted'},
{},
),
# Problem.
(
{'SELINUX': 'permissive', 'SELINUXTYPE': 'targeted'},
{BOOT_NOT_ENFORCING: 'permissive'},
),
# Another kind of problem.
(
{'SELINUX': 'disabled', 'SELINUXTYPE': 'targeted'},
{BOOT_DISABLED: 'disabled'},
),
# Changing value of SELINUXTYPE should have no effect.
(
{'SELINUX': 'enforcing', 'SELINUXTYPE': 'mls'},
{},
),
(
{'SELINUX': 'permissive', 'SELINUXTYPE': 'blabla'},
{BOOT_NOT_ENFORCING: 'permissive'},
),
(
{'SELINUX': 'disabled', 'SELINUXTYPE': 'barfoo'},
{BOOT_DISABLED: 'disabled'},
),
]
SESTATUS_TEMPLATE = {
'loaded_policy_name': 'targeted', 'selinux_root_directory': '/etc/selinux',
'selinuxfs_mount': '/sys/fs/selinux', 'mode_from_config_file': 'enforcing',
'policy_mls_status': 'enabled',
'policy_deny_unknown_status': 'allowed', 'max_kernel_policy_version': '30'
}
SESTATUS_OUTPUTS = [
# No problem.
(
{'selinux_status': 'enabled', 'current_mode': 'enforcing'},
{},
),
# Problematic.
(
{'selinux_status': 'disabled', 'current_mode': 'enforcing'},
{RUNTIME_DISABLED: 'disabled'},
),
(
{'selinux_status': 'enabled', 'current_mode': 'permissive'},
{RUNTIME_NOT_ENFORCING: 'permissive'},
),
(
{'selinux_status': 'disabled', 'current_mode': 'permissive'},
{RUNTIME_DISABLED: 'disabled'},
),
]
# rhel-6
GRUB1_TEMPLATE = """
# grub.conf generated by anaconda
#
# Note that you do not have to rerun grub after making changes to this file
# NOTICE: You have a /boot partition. This means that
# all kernel and initrd paths are relative to /boot/, eg.
# root (hd0,0)
# kernel /vmlinuz-version ro root=/dev/mapper/VolGroup-lv_root
# initrd /initrd-[generic-]version.img
#boot=/dev/sda
default=0
timeout=5
splashimage=(hd0,0)/grub/splash.xpm.gz
hiddenmenu
title Red Hat Enterprise Linux 6 (2.6.32-642.el6.x86_64)
root (hd0,0)
kernel /vmlinuz-2.6.32-642.el6.x86_64 {kernel_boot_options} ro root=/dev/mapper/VolGroup-lv_root rd_NO_LUKS LANG=en_US.UTF-8 rd_NO_MD rd_LVM_LV=VolGroup/lv_swap SYSFONT=latarcyrheb-sun16 crashkernel=auto rd_LVM_LV=VolGroup/lv_root KEYBOARDTYPE=pc KEYTABLE=us rd_NO_DM rhgb quiet
initrd /initramfs-2.6.32-642.el6.x86_64.img
""" # noqa
GRUB1_OUTPUTS = [
# noqa
# No problem.
(
{'kernel_boot_options': ''},
{},
),
# Problematic.
(
{'kernel_boot_options': 'selinux=0'},
{GRUB_DISABLED: [
'/vmlinuz-2.6.32-642.el6.x86_64 selinux=0 ro root=/dev/mapper/VolGroup-lv_root rd_NO_LUKS LANG=en_US.UTF-8 rd_NO_MD rd_LVM_LV=VolGroup/lv_swap SYSFONT=latarcyrheb-sun16 crashkernel=auto rd_LVM_LV=VolGroup/lv_root KEYBOARDTYPE=pc KEYTABLE=us rd_NO_DM rhgb quiet',
]},
),
(
{'kernel_boot_options': 'enforcing=0'},
{GRUB_NOT_ENFORCING: [
'/vmlinuz-2.6.32-642.el6.x86_64 enforcing=0 ro root=/dev/mapper/VolGroup-lv_root rd_NO_LUKS LANG=en_US.UTF-8 rd_NO_MD rd_LVM_LV=VolGroup/lv_swap SYSFONT=latarcyrheb-sun16 crashkernel=auto rd_LVM_LV=VolGroup/lv_root KEYBOARDTYPE=pc KEYTABLE=us rd_NO_DM rhgb quiet',
]},
),
(
{'kernel_boot_options': 'selinux=0 enforcing=0'},
{
GRUB_DISABLED: [
'/vmlinuz-2.6.32-642.el6.x86_64 selinux=0 enforcing=0 ro root=/dev/mapper/VolGroup-lv_root rd_NO_LUKS LANG=en_US.UTF-8 rd_NO_MD rd_LVM_LV=VolGroup/lv_swap SYSFONT=latarcyrheb-sun16 crashkernel=auto rd_LVM_LV=VolGroup/lv_root KEYBOARDTYPE=pc KEYTABLE=us rd_NO_DM rhgb quiet',
],
GRUB_NOT_ENFORCING: [
'/vmlinuz-2.6.32-642.el6.x86_64 selinux=0 enforcing=0 ro root=/dev/mapper/VolGroup-lv_root rd_NO_LUKS LANG=en_US.UTF-8 rd_NO_MD rd_LVM_LV=VolGroup/lv_swap SYSFONT=latarcyrheb-sun16 crashkernel=auto rd_LVM_LV=VolGroup/lv_root KEYBOARDTYPE=pc KEYTABLE=us rd_NO_DM rhgb quiet',
]
},
),
]
# rhel-7
GRUB2_TEMPLATE = """
#
# DO NOT EDIT THIS FILE
#
# It is automatically generated by grub2-mkconfig using templates
# from /etc/grub.d and settings from /etc/default/grub
#
### BEGIN /etc/grub.d/00_header ###
set pager=1
if [ -s $prefix/grubenv ]; then
load_env
fi
if [ "${next_entry}" ] ; then
set default="${next_entry}"
set next_entry=
save_env next_entry
set boot_once=true
else
set default="${saved_entry}"
fi
if [ x"${feature_menuentry_id}" = xy ]; then
menuentry_id_option="--id"
else
menuentry_id_option=""
fi
export menuentry_id_option
if [ "${prev_saved_entry}" ]; then
set saved_entry="${prev_saved_entry}"
save_env saved_entry
set prev_saved_entry=
save_env prev_saved_entry
set boot_once=true
fi
function savedefault {
if [ -z "${boot_once}" ]; then
saved_entry="${chosen}"
save_env saved_entry
fi
}
function load_video {
if [ x$feature_all_video_module = xy ]; then
insmod all_video
else
insmod efi_gop
insmod efi_uga
insmod ieee1275_fb
insmod vbe
insmod vga
insmod video_bochs
insmod video_cirrus
fi
}
terminal_output console
if [ x$feature_timeout_style = xy ] ; then
set timeout_style=menu
set timeout=5
# Fallback normal timeout code in case the timeout_style feature is
# unavailable.
else
set timeout=5
fi
### END /etc/grub.d/00_header ###
### BEGIN /etc/grub.d/00_tuned ###
set tuned_params=""
### END /etc/grub.d/00_tuned ###
### BEGIN /etc/grub.d/01_users ###
if [ -f ${prefix}/user.cfg ]; then
source ${prefix}/user.cfg
if [ -n ${GRUB2_PASSWORD} ]; then
set superusers="root"
export superusers
password_pbkdf2 root ${GRUB2_PASSWORD}
fi
fi
### END /etc/grub.d/01_users ###
### BEGIN /etc/grub.d/10_linux ###
menuentry 'Red Hat Enterprise Linux Server (3.10.0-327.el7.x86_64) 7.2 (Maipo)' --class red --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.10.0-327.el7.x86_64-advanced-4f80b3d4-90ba-4545-869c-febdecc586ce' {
load_video
set gfxpayload=keep
insmod gzio
insmod part_msdos
insmod xfs
set root='hd0,msdos1'
if [ x$feature_platform_search_hint = xy ]; then
search --no-floppy --fs-uuid --set=root --hint-bios=hd0,msdos1 --hint-efi=hd0,msdos1 --hint-baremetal=ahci0,msdos1 --hint='hd0,msdos1' 860a7b56-dbdd-498a-b085-53dc93e4650b
else
search --no-floppy --fs-uuid --set=root 860a7b56-dbdd-498a-b085-53dc93e4650b
fi
linux16 /vmlinuz-3.10.0-327.el7.x86_64 %s root=/dev/mapper/rhel-root ro crashkernel=auto rd.lvm.lv=rhel/root rd.lvm.lv=rhel/swap rhgb quiet LANG=en_US.UTF-8
initrd16 /initramfs-3.10.0-327.el7.x86_64.img
}
menuentry 'Red Hat Enterprise Linux Server (0-rescue-9f20b35c9faa49aebe171f62a11b236f) 7.2 (Maipo)' --class red --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-0-rescue-9f20b35c9faa49aebe171f62a11b236f-advanced-4f80b3d4-90ba-4545-869c-febdecc586ce' {
load_video
insmod gzio
insmod part_msdos
insmod xfs
set root='hd0,msdos1'
if [ x$feature_platform_search_hint = xy ]; then
search --no-floppy --fs-uuid --set=root --hint-bios=hd0,msdos1 --hint-efi=hd0,msdos1 --hint-baremetal=ahci0,msdos1 --hint='hd0,msdos1' 860a7b56-dbdd-498a-b085-53dc93e4650b
else
search --no-floppy --fs-uuid --set=root 860a7b56-dbdd-498a-b085-53dc93e4650b
fi
linux16 /vmlinuz-0-rescue-9f20b35c9faa49aebe171f62a11b236f %s root=/dev/mapper/rhel-root ro crashkernel=auto rd.lvm.lv=rhel/root rd.lvm.lv=rhel/swap rhgb quiet
initrd16 /initramfs-0-rescue-9f20b35c9faa49aebe171f62a11b236f.img
}
### END /etc/grub.d/10_linux ###
### BEGIN /etc/grub.d/20_linux_xen ###
### END /etc/grub.d/20_linux_xen ###
### BEGIN /etc/grub.d/20_ppc_terminfo ###
### END /etc/grub.d/20_ppc_terminfo ###
### BEGIN /etc/grub.d/30_os-prober ###
### END /etc/grub.d/30_os-prober ###
### BEGIN /etc/grub.d/40_custom ###
# This file provides an easy way to add custom menu entries. Simply type the
# menu entries you want to add after this comment. Be careful not to change
# the 'exec tail' line above.
### END /etc/grub.d/40_custom ###
### BEGIN /etc/grub.d/41_custom ###
if [ -f ${config_directory}/custom.cfg ]; then
source ${config_directory}/custom.cfg
elif [ -z "${config_directory}" -a -f $prefix/custom.cfg ]; then
source $prefix/custom.cfg;
fi
### END /etc/grub.d/41_custom ###
""" # noqa
GRUB2_OUTPUTS = [
# No problem.
(
{'kernel_boot_options': ''},
{},
),
# Problematic.
(
{'kernel_boot_options': 'selinux=0'},
{GRUB_DISABLED: [
'/vmlinuz-3.10.0-327.el7.x86_64 selinux=0 root=/dev/mapper/rhel-root ro crashkernel=auto rd.lvm.lv=rhel/root rd.lvm.lv=rhel/swap rhgb quiet LANG=en_US.UTF-8',
'/vmlinuz-0-rescue-9f20b35c9faa49aebe171f62a11b236f selinux=0 root=/dev/mapper/rhel-root ro crashkernel=auto rd.lvm.lv=rhel/root rd.lvm.lv=rhel/swap rhgb quiet',
]},
),
(
{'kernel_boot_options': 'enforcing=0'},
{GRUB_NOT_ENFORCING: [
'/vmlinuz-3.10.0-327.el7.x86_64 enforcing=0 root=/dev/mapper/rhel-root ro crashkernel=auto rd.lvm.lv=rhel/root rd.lvm.lv=rhel/swap rhgb quiet LANG=en_US.UTF-8',
'/vmlinuz-0-rescue-9f20b35c9faa49aebe171f62a11b236f enforcing=0 root=/dev/mapper/rhel-root ro crashkernel=auto rd.lvm.lv=rhel/root rd.lvm.lv=rhel/swap rhgb quiet',
]},
),
(
{'kernel_boot_options': 'selinux=0 enforcing=0'},
{
GRUB_DISABLED: [
'/vmlinuz-3.10.0-327.el7.x86_64 selinux=0 enforcing=0 root=/dev/mapper/rhel-root ro crashkernel=auto rd.lvm.lv=rhel/root rd.lvm.lv=rhel/swap rhgb quiet LANG=en_US.UTF-8',
'/vmlinuz-0-rescue-9f20b35c9faa49aebe171f62a11b236f selinux=0 enforcing=0 root=/dev/mapper/rhel-root ro crashkernel=auto rd.lvm.lv=rhel/root rd.lvm.lv=rhel/swap rhgb quiet',
],
GRUB_NOT_ENFORCING: [
'/vmlinuz-3.10.0-327.el7.x86_64 selinux=0 enforcing=0 root=/dev/mapper/rhel-root ro crashkernel=auto rd.lvm.lv=rhel/root rd.lvm.lv=rhel/swap rhgb quiet LANG=en_US.UTF-8',
'/vmlinuz-0-rescue-9f20b35c9faa49aebe171f62a11b236f selinux=0 enforcing=0 root=/dev/mapper/rhel-root ro crashkernel=auto rd.lvm.lv=rhel/root rd.lvm.lv=rhel/swap rhgb quiet',
]
},
),
]
TEST_CASES_1 = [
((SESTATUS_OUT, SELINUX_CONF, GRUB1_TEMPLATE),
(True, {})),
((SESTATUS_OUT, SELINUX_CONF, GRUB1_TEMPLATE.format(kernel_boot_options='selinux=0')),
(False, {GRUB_DISABLED: ['/vmlinuz-2.6.32-642.el6.x86_64 selinux=0 ro root=/dev/mapper/VolGroup-lv_root rd_NO_LUKS LANG=en_US.UTF-8 rd_NO_MD rd_LVM_LV=VolGroup/lv_swap SYSFONT=latarcyrheb-sun16 crashkernel=auto rd_LVM_LV=VolGroup/lv_root KEYBOARDTYPE=pc KEYTABLE=us rd_NO_DM rhgb quiet']})),
((SESTATUS_OUT, SELINUX_CONF, GRUB1_TEMPLATE.format(kernel_boot_options='enforcing=0')),
(False, {GRUB_NOT_ENFORCING: ['/vmlinuz-2.6.32-642.el6.x86_64 enforcing=0 ro root=/dev/mapper/VolGroup-lv_root rd_NO_LUKS LANG=en_US.UTF-8 rd_NO_MD rd_LVM_LV=VolGroup/lv_swap SYSFONT=latarcyrheb-sun16 crashkernel=auto rd_LVM_LV=VolGroup/lv_root KEYBOARDTYPE=pc KEYTABLE=us rd_NO_DM rhgb quiet']})),
((SESTATUS_OUT, SELINUX_CONF_DISABLED, GRUB1_TEMPLATE),
(False, {BOOT_DISABLED: 'disabled'})),
((SESTATUS_OUT, SELINUX_CONF_NOT_ENFORCING, GRUB1_TEMPLATE),
(False, {BOOT_NOT_ENFORCING: 'permissive'})),
((SESTATUS_OUT_DISABLED, SELINUX_CONF_NOT_ENFORCING, GRUB1_TEMPLATE),
(False, {RUNTIME_DISABLED: 'disabled', BOOT_NOT_ENFORCING: 'permissive'})),
((SESTATUS_OUT_NOT_ENFORCING, SELINUX_CONF_DISABLED, GRUB1_TEMPLATE.format(kernel_boot_options='selinux=0')),
(False, {GRUB_DISABLED: ['/vmlinuz-2.6.32-642.el6.x86_64 selinux=0 ro root=/dev/mapper/VolGroup-lv_root rd_NO_LUKS LANG=en_US.UTF-8 rd_NO_MD rd_LVM_LV=VolGroup/lv_swap SYSFONT=latarcyrheb-sun16 crashkernel=auto rd_LVM_LV=VolGroup/lv_root KEYBOARDTYPE=pc KEYTABLE=us rd_NO_DM rhgb quiet'],
RUNTIME_NOT_ENFORCING: 'permissive',
BOOT_DISABLED: 'disabled'
})),
]
TEST_CASES_2 = [
((SESTATUS_OUT, SELINUX_CONF, GRUB2_TEMPLATE % ('selinux=0', 'selinux=0')),
(False, {GRUB_DISABLED: ['/vmlinuz-3.10.0-327.el7.x86_64 selinux=0 root=/dev/mapper/rhel-root ro crashkernel=auto rd.lvm.lv=rhel/root rd.lvm.lv=rhel/swap rhgb quiet LANG=en_US.UTF-8',
'/vmlinuz-0-rescue-9f20b35c9faa49aebe171f62a11b236f selinux=0 root=/dev/mapper/rhel-root ro crashkernel=auto rd.lvm.lv=rhel/root rd.lvm.lv=rhel/swap rhgb quiet',
]})),
((SESTATUS_OUT_DISABLED, SELINUX_CONF, GRUB2_TEMPLATE),
(False, {RUNTIME_DISABLED: 'disabled'})),
((SESTATUS_OUT_NOT_ENFORCING, SELINUX_CONF, GRUB2_TEMPLATE),
(False, {RUNTIME_NOT_ENFORCING: 'permissive'})),
]
def test_integration():
import pprint
for inputs, outputs in TEST_CASES_1:
sestatus = SEStatus(context_wrap(inputs[0]))
selinux_config = SelinuxConfig(context_wrap(inputs[1]))
grub_config = Grub1Config(context_wrap(inputs[2]))
selinux = SELinux(None,
{SEStatus: sestatus,
SelinuxConfig: selinux_config,
Grub1Config: grub_config}
)
assert selinux.ok() == outputs[0]
assert selinux.problems == outputs[1]
pprint.pprint(selinux.problems)
for inputs, outputs in TEST_CASES_2:
sestatus = SEStatus(context_wrap(inputs[0]))
selinux_config = SelinuxConfig(context_wrap(inputs[1]))
grub_config = Grub2Config(context_wrap(inputs[2]))
selinux = SELinux(None,
{SEStatus: sestatus,
SelinuxConfig: selinux_config,
Grub2Config: grub_config}
)
assert selinux.ok() == outputs[0]
assert selinux.problems == outputs[1]
pprint.pprint(selinux.problems)
| {
"content_hash": "39055a80809be67bc94c2563e462c0db",
"timestamp": "",
"source": "github",
"line_count": 439,
"max_line_length": 304,
"avg_line_length": 39.300683371298405,
"alnum_prop": 0.6548426360632933,
"repo_name": "PaulWay/insights-core",
"id": "f57db0c2d92b0257cc48f9e153fa48085c00c248",
"size": "17253",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "insights/combiners/tests/test_selinux.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "19339"
},
{
"name": "Python",
"bytes": "2479830"
},
{
"name": "Shell",
"bytes": "892"
}
],
"symlink_target": ""
} |
from lib.python_actions import PuppetBasePythonAction
__all__ = [
'PuppetCertRevokeAction'
]
class PuppetCertRevokeAction(PuppetBasePythonAction):
def run(self, environment, host):
success = self.client.cert_revoke(environment=environment, host=host)
return success
| {
"content_hash": "34314513311e21f1d22da575351e3010",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 77,
"avg_line_length": 26.636363636363637,
"alnum_prop": 0.7406143344709898,
"repo_name": "lmEshoo/st2contrib",
"id": "97f36c382c40b121bf1687275067b895f868b1c5",
"size": "293",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "packs/puppet/actions/cert_revoke.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "8530"
},
{
"name": "Makefile",
"bytes": "2262"
},
{
"name": "Python",
"bytes": "444890"
},
{
"name": "Shell",
"bytes": "3635"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup, find_packages
from glob import glob
setup(name='AjaxTerm2',
version='1.1.0',
py_modules=['ajaxterm2'],
install_requires=['webob','paste','pastedeploy'],
data_files=[('', glob('*.ini')),
('www', glob('www/*.html')),
('www/js', glob('www/js/*.js')),
('www/css', glob('www/css/*.css')),
],
)
| {
"content_hash": "6d40596ba915cc70758f573f29b15641",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 55,
"avg_line_length": 28.933333333333334,
"alnum_prop": 0.4838709677419355,
"repo_name": "selboo/starl-mangle",
"id": "e1ce3eaf54b226912a9c466428909da496583fdd",
"size": "434",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ajaxterm2/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "1463"
},
{
"name": "CSS",
"bytes": "197524"
},
{
"name": "HTML",
"bytes": "792119"
},
{
"name": "JavaScript",
"bytes": "517786"
},
{
"name": "PHP",
"bytes": "613053"
},
{
"name": "Python",
"bytes": "312293"
},
{
"name": "Shell",
"bytes": "4409"
}
],
"symlink_target": ""
} |
import collections
import inspect
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.sql import text
def _convert(data):
if isinstance(data, basestring):
return str(data)
elif isinstance(data, collections.Mapping):
return dict(map(_convert, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(_convert, data))
else:
return data
def connect(config, app):
dbconfig = {
'user': config.DB_USERNAME,
'password': config.DB_PASSWORD,
'database': config.DB_DATABASE_NAME,
'host': config.DB_HOST,
'port': config.DB_PORT,
}
dbconfig.update(config.DB_OPTIONS)
app.config['SQLALCHEMY_DATABASE_URI'] = "mysql://" + str(config.DB_USERNAME) + ":" + str(config.DB_PASSWORD) + "@127.0.0.1:" + str(config.DB_PORT) + "/" + config.DB_DATABASE_NAME
db = SQLAlchemy(app)
def _query_db(query, data=None):
result = db.session.execute(text(query), data)
if query[0:6].lower() == 'select':
# if the query was a select
# convert the result to a list of dictionaries
list_result = [dict(r) for r in result]
# return the results as a list of dictionaries
return list_result
elif query[0:6].lower() == 'insert':
# if the query was an insert, return the id of the
# commit changes
app.db.session.commit()
# row that was inserted
return result.lastrowid
else:
# if the query was an update or delete, return nothing and commit changes
app.db.session.commit()
def _get_one(query, data=None):
result = db.session.execute(text(query), data).fetchone()
return result
db.query_db = _query_db
db.get_one = _get_one
return db
| {
"content_hash": "38f367656855842df5a2023b4c73d679",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 182,
"avg_line_length": 36.333333333333336,
"alnum_prop": 0.6065839179708581,
"repo_name": "RydrDojo/Ridr_app",
"id": "0d746229532057823acdd89aa7afb4642ab1be0f",
"size": "1853",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "system/db/drivers/_mysql.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "39163"
},
{
"name": "CSS",
"bytes": "25375"
},
{
"name": "HTML",
"bytes": "25148"
},
{
"name": "JavaScript",
"bytes": "6362"
},
{
"name": "Mako",
"bytes": "8887"
},
{
"name": "Python",
"bytes": "9971971"
},
{
"name": "Shell",
"bytes": "3371"
}
],
"symlink_target": ""
} |
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
("objects", "0001_initial"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("typeclasses", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="ScriptDB",
fields=[
(
"id",
models.AutoField(
verbose_name="ID", serialize=False, auto_created=True, primary_key=True
),
),
("db_key", models.CharField(max_length=255, verbose_name="key", db_index=True)),
(
"db_typeclass_path",
models.CharField(
help_text="this defines what 'type' of entity this is. This variable holds a Python path to a module with a valid Evennia Typeclass.",
max_length=255,
null=True,
verbose_name="typeclass",
),
),
(
"db_date_created",
models.DateTimeField(auto_now_add=True, verbose_name="creation date"),
),
(
"db_lock_storage",
models.TextField(
help_text="locks limit access to an entity. A lock is defined as a 'lock string' on the form 'type:lockfunctions', defining what functionality is locked and how to determine access. Not defining a lock means no access is granted.",
verbose_name="locks",
blank=True,
),
),
("db_desc", models.CharField(max_length=255, verbose_name="desc", blank=True)),
(
"db_interval",
models.IntegerField(
default=-1,
help_text="how often to repeat script, in seconds. -1 means off.",
verbose_name="interval",
),
),
(
"db_start_delay",
models.BooleanField(
default=False,
help_text="pause interval seconds before starting.",
verbose_name="start delay",
),
),
(
"db_repeats",
models.IntegerField(
default=0, help_text="0 means off.", verbose_name="number of repeats"
),
),
(
"db_persistent",
models.BooleanField(default=False, verbose_name="survive server reboot"),
),
("db_is_active", models.BooleanField(default=False, verbose_name="script active")),
(
"db_attributes",
models.ManyToManyField(
help_text="attributes on this object. An attribute can hold any pickle-able python object (see docs for special cases).",
to="typeclasses.Attribute",
null=True,
),
),
(
"db_obj",
models.ForeignKey(
blank=True,
to="objects.ObjectDB",
on_delete=models.CASCADE,
help_text="the object to store this script on, if not a global script.",
null=True,
verbose_name="scripted object",
),
),
(
"db_account",
models.ForeignKey(
blank=True,
to=settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
help_text="the account to store this script on (should not be set if obj is set)",
null=True,
verbose_name="scripted account",
),
),
(
"db_tags",
models.ManyToManyField(
help_text="tags on this object. Tags are simple string markers to identify, group and alias objects.",
to="typeclasses.Tag",
null=True,
),
),
],
options={"verbose_name": "Script"},
bases=(models.Model,),
)
]
| {
"content_hash": "80198854ede94df4e4c30c364edaa85d",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 255,
"avg_line_length": 41.18260869565217,
"alnum_prop": 0.4197635135135135,
"repo_name": "jamesbeebop/evennia",
"id": "fd07a9317133512ee513aab8bf04f11ba4bfea09",
"size": "4762",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "evennia/scripts/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "19127"
},
{
"name": "Emacs Lisp",
"bytes": "2734"
},
{
"name": "HTML",
"bytes": "13558"
},
{
"name": "JavaScript",
"bytes": "24398"
},
{
"name": "Python",
"bytes": "2143170"
}
],
"symlink_target": ""
} |
import binascii
from graph.filter_base import FilterBase, FilterState, FilterType
from graph.pad_template import PadTemplate
from graph.pad_capabilities import PadCapabilities
from graph.input_pin import InputPin
from graph.output_pin import OutputPin
class LoggerSink(FilterBase):
"""
A logger sink filter.
Input Pins:
input - Accepts any mime type. Whatever is sent here gets logged.
"""
#########################################################################
# Note - these static methods MUST be implemented by all filters.
print('######## Executing static variable init on LoggerSink')
filter_meta = {}
filter_meta[FilterBase.FILTER_META_FULLY_QUALIFIED] = "com.urbtek.logger_sink"
filter_meta[FilterBase.FILTER_META_NAME] = "LoggerSink"
filter_meta[FilterBase.FILTER_META_DESC] = "Logs whatever data arrives on the sink pad."
filter_meta[FilterBase.FILTER_META_VER] = "0.9.0"
filter_meta[FilterBase.FILTER_META_RANK] = FilterBase.FILTER_RANK_SECONDARY
filter_meta[FilterBase.FILTER_META_ORIGIN_URL] = "https://github.com/koolspin"
filter_meta[FilterBase.FILTER_META_KLASS] = "Sink/Logger"
# Pad templates for this filter
# Note this dictionary is keyed by the actual pad name and not the name template
filter_pad_templates = {}
sink_pad_cap = PadCapabilities.create_caps_any()
sink_pad_template = PadTemplate.create_pad_always_sink([sink_pad_cap])
filter_pad_templates[FilterBase.DEFAULT_SINK_PAD_NAME] = sink_pad_template
# End of filter metadata
#########################################################################
def __init__(self, name, config_dict, graph_manager):
super().__init__(name, config_dict, graph_manager, FilterType.sink)
self._output_pin = OutputPin('output', True)
self._add_output_pin(self._output_pin)
#
mime_type_map = {}
mime_type_map['*'] = self.recv
ipin = InputPin('input', mime_type_map, self)
self._add_input_pin(ipin)
self._output_pin = OutputPin('output', True)
self._add_output_pin(self._output_pin)
# Make sure to crate the pads that are defined for this filter's template
self._create_always_pads_from_template(LoggerSink.filter_pad_templates)
def run(self):
super().run()
self._set_filter_state(FilterState.running)
def stop(self):
super().stop()
self._set_filter_state(FilterState.stopped)
def recv(self, mime_type, payload, metadata_dict):
print('Mime type: {0}'.format(mime_type))
print('meta-dict: {0}'.format(metadata_dict))
if isinstance(payload, str):
# String format, print directly
print('Payload: {0}'.format(payload))
else:
# Must be a binary format, convert to hex first
print('Payload: {0}'.format(self._stringify_payload(mime_type, payload)))
if self.filter_state == FilterState.running:
self._output_pin.send(mime_type, payload, metadata_dict)
else:
raise RuntimeError('{0} tried to process input while filter state is {1}'.format(self.filter_name, self.filter_state))
def _stringify_payload(self, mime_type, payload):
ret_string = ''
if mime_type == 'application/octet-stream':
ret_string = binascii.hexlify(payload)
else:
ret_string = payload.decode("utf-8")
return ret_string
# Note - these static methods MUST be implemented by all filters.
# TODO: Is there a better way to do this?
@staticmethod
def get_filter_metadata():
return LoggerSink.filter_meta
@staticmethod
def get_filter_pad_templates():
return LoggerSink.filter_pad_templates
| {
"content_hash": "55ec964568aef209ae9e9e87753d02c8",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 130,
"avg_line_length": 41.96666666666667,
"alnum_prop": 0.636219221604448,
"repo_name": "koolspin/rosetta",
"id": "a9910750fb76fcea0985b69271e924f8066fb4df",
"size": "3777",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elements/logger_sink.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "86058"
},
{
"name": "Shell",
"bytes": "579"
}
],
"symlink_target": ""
} |
"""
Remove massive tip and internal node labels from Richard Liang's
simulated phylogeny.
While we're at it, we need to rescale the coalescence times
(branch lengths) to confirm to a logistic growth model because
the earliest coalescence events are way too deep.
"""
import sys
import math
from Bio import Phylo
in_tree_file = sys.argv[1]
t = Phylo.read(in_tree_file, 'newick')
nodes = t.get_nonterminals()
for i, node in enumerate(nodes):
node.name = "" # "N" + str(i) # indelible does not like named inner nodes
tips = t.get_terminals()
for i, tip in enumerate(tips):
tip.name = 'otu'+str(i+1)
#t.collapse(t.clade)
# # dictionary of Clade to depth (tree height)
# # depths = t.depths()
#
# #max_height = max(depths.values())
#
# # apply a shrink factor that increases with depth
# # in a logistic manner, mimicking logistic growth
# branches = nodes + tips
#
# kcap = 1000.
# n0 = 30.
# r = 20.
#
# global_scaling_factor = 10000.
#
# # # adjust coalescent times with logistic growth factor
# for i, branch in enumerate(branches):
# cur = branch.branch_length
# #depth = 10.* (max_height - depths[branch]) / max_height # scaled to range from 0 to 10.
# #logistic = kcap * n0 * math.exp(r * depth) / (kcap + n0 * (math.exp(r * depth) - 1))
# #adj = cur / logistic
# #branch.branch_length = adj / global_scaling_factor
# branch.branch_length = cur / global_scaling_factor
# #if i % 500 == 0: print i, cur, depth, logistic, adj
# branches = nodes + tips
# t.clade.branch_length = 0.0
# total_branch_len = t.total_branch_length()
# global_scaling_factor = 100.0
# for i, branch in enumerate(branches):
# branch_scaling_factor = global_scaling_factor/total_branch_len
# branch.branch_length = branch.branch_length * branch_scaling_factor
# Indelible wants beginning of newick to start with (, end of newick to end with );
from cStringIO import StringIO
tree_strio = StringIO()
Phylo.write(t, tree_strio, format="newick", format_branch_length='%1.9f')
tree_strio.flush()
tree_str = tree_strio.getvalue()
tree_strio.close()
tree_str = "(" + tree_str.replace(";", ");")
# tree_str = tree_str.replace(":0.000000000;", ";")
relabled_nwk = in_tree_file.replace(".nwk", ".rename.nwk")
with open(relabled_nwk, "w") as fh_out:
fh_out.write(tree_str)
#Phylo.write(t, file=in_tree_file.replace(".nwk", ".rename.nwk"), format='newick', format_branch_length='%1.9f')
| {
"content_hash": "58c61cd86db17bfdc1900a8a71d95b09",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 112,
"avg_line_length": 31.60759493670886,
"alnum_prop": 0.6567881457749299,
"repo_name": "cfe-lab/Umberjack",
"id": "c91a3e61cd9a95556b94e78c2ee595ba38c8e9e0",
"size": "2497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/simulations/relabel_phylogeny.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Brainfuck",
"bytes": "2497"
},
{
"name": "CSS",
"bytes": "1747"
},
{
"name": "Python",
"bytes": "444311"
},
{
"name": "R",
"bytes": "49021"
}
],
"symlink_target": ""
} |
import errno
import logging
import os
import socket
import subprocess
from time import sleep
from ghpythonremote import rpyc
from .helpers import (
get_python_path,
get_extended_env_path_conda,
get_rhino_executable_path,
WINDOWS,
)
logger = logging.getLogger("ghpythonremote.connectors")
class GrasshopperToPythonRemote:
def __init__(
self,
rpyc_server_py,
python_exe=None,
location=None,
timeout=60,
max_retry=3,
port=None,
log_level=logging.WARNING,
working_dir=None,
):
if python_exe is None:
self.python_exe = get_python_path(location)
else:
if location is not None:
logger.debug(
"python_exe and env_name specified at the same time, ignoring "
"env_name."
)
self.python_exe = python_exe
self.env = get_extended_env_path_conda(self.python_exe)
self.rpyc_server_py = rpyc_server_py
self.timeout = timeout
self.retry = 0
self.max_retry = max(0, max_retry)
self.log_level = log_level
self.working_dir = working_dir
if port is None:
self.port = _get_free_tcp_port()
else:
self.port = port
self.python_popen = self._launch_python()
self.connection = self._get_connection()
self.py_remote_modules = self.connection.root.getmodule
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Cleanup the connection on error and exit.
Parameters
----------
exc_type : Error
Type of the exception that caused the __exit__.
exc_val : str
Value of the exception that caused the __exit__.
exc_tb : type
Exception log.
Returns
-------
True if the connection was successfully closed."""
try:
if exc_type:
logger.error("{!s}, {!s}, {!s}".format(exc_type, exc_val, exc_tb))
except NameError:
pass
self.close()
return True
def run_py_function(self, module_name, function_name, *nargs, **kwargs):
"""Run a specific Python function on the remote, with Python crash handling."""
remote_module = self.py_remote_modules(module_name)
function = getattr(remote_module, function_name)
function_output = kwargs.pop("function_output", None)
try:
result = function(*nargs, **kwargs)
except (socket.error, EOFError):
self._rebuild_py_remote()
return self.run_py_function(*nargs, **kwargs)
if function_output is not None:
try:
result = result[function_output]
except NameError:
pass
return result
def close(self):
if not self.connection.closed:
logger.info("Closing connection.")
self.connection.close()
if self.python_popen.poll() is None:
logger.info("Closing Python.")
self.python_popen.terminate()
def _launch_python(self):
logger.debug("Using python executable: {!s}".format(self.python_exe))
logger.debug("Using rpyc_server module: {!s}".format(self.rpyc_server_py))
logger.debug("Using port: {}".format(self.port))
logger.debug("Using log_level: {!s}".format(self.log_level))
logger.debug("Using working_dir: {!s}".format(self.working_dir))
assert self.python_exe is not "" and self.python_exe is not None
assert self.rpyc_server_py is not "" and self.rpyc_server_py is not None
assert self.port is not "" and self.port is not None
assert self.log_level is not "" and self.log_level is not None
python_call = [
self.python_exe,
self.rpyc_server_py,
str(self.port),
self.log_level,
]
cwd = self.working_dir
python_popen = subprocess.Popen(
python_call,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=cwd,
env=self.env,
)
return python_popen
def _get_connection(self):
connection = None
logger.info("Connecting...")
for i in range(self.timeout):
try:
if not connection:
logger.debug(
"Connecting. Timeout in {:d} seconds.".format(self.timeout - i)
)
connection = rpyc.utils.factory.connect(
"localhost",
self.port,
service=rpyc.core.service.ClassicService,
config={"sync_request_timeout": None},
ipv6=False,
keepalive=True,
)
else:
logger.debug(
"Found connection, testing. Timeout in {:d} seconds.".format(
self.timeout - i
)
)
connection.ping(timeout=1)
logger.debug("Connection ok, returning.")
logger.info("Connected.")
return connection
except socket.error as e:
if self.python_popen.poll() is not None:
raise RuntimeError(
"Remote python {!s} failed on launch. ".format(self.python_exe)
+ "Does the remote python have rpyc installed?"
)
if i == self.timeout - 1 or not e.errno == errno.ECONNREFUSED:
raise RuntimeError(
"Could not connect to remote python {!s}. ".format(
self.python_exe
)
+ "Does the remote python have rpyc installed?"
)
sleep(1)
except (
rpyc.core.protocol.PingError,
rpyc.core.async_.AsyncResultTimeout,
) as e:
logger.debug(str(e))
raise e
def _rebuild_py_remote(self):
if self.retry < self.max_retry:
logger.info("Lost Rhino connection, retrying.")
self.retry += 1
self.close()
[self.rhino_popen, self.connection, self.gh_remote] = [None, None, None]
logger.info("Waiting 10 seconds.")
sleep(10)
self.python_popen = self._launch_python()
self.connection = self._get_connection()
else:
raise RuntimeError(
"Lost connection to Python, and reconnection attempts limit ({:d}) "
"reached. Exiting.".format(self.max_retry)
)
class PythonToGrasshopperRemote:
"""Creates a remote Rhino/IronPython instance (with Grasshopper functions)
connected to a local python engine.
The local instance will be able to import all objects from the Rhino IronPython
engine, as well as Grasshopper components. Rhino will appear frozen on a python
script it is reading.
Parameters
----------
rhino_file_path : str
Absolute file path to a Rhino .3dm file to open in the remote Rhino. Can be
empty.
rpyc_server_py : str
Absolute path to the ghcompservice.py module that launches the server on the
remote.
rhino_ver : int
A Rhino version to use, from 5 to 7. Overridden by rhino_exe. Defaults to 7.
rhino_exe : str
Absolute path to the Rhino executable. By default, fetches from the windows
registry the Rhino install with the same bitness as the platform, and version
given by rhino_ver.
timeout : int
Number of seconds to wait for Rhino and IronPython to startup.
max_retry : int
Number of times Rhino will be restarted if it crashes, before declaring the
connection dead.
Examples
--------
>>> ROOT = os.path.abspath(os.path.join(os.path.curdir, '..'))
>>> rhino_file_path = os.path.join(ROOT, 'examples', 'curves.3dm')
>>> rpyc_server_py = os.path.join(ROOT, 'ghcompservice.py')
>>> with PythonToGrasshopperRemote(
>>> rhino_file_path, rpyc_server_py, rhino_ver=7, timeout=60
>>> ) as py2gh:
>>> rghcomp = py2gh.gh_remote_components
>>> rgh = py2gh.connection
>>> Rhino = rgh.modules.Rhino
>>> rs = rgh.modules.rhinoscriptsyntax
>>> # Do stuff with all this
>>> # See CPython_to_GH.py for a longer example
"""
def __init__(
self,
rhino_file_path,
rpyc_server_py,
rhino_ver=7,
preferred_bitness="same",
rhino_exe=None,
timeout=60,
max_retry=3,
port=None,
log_level=logging.WARNING,
):
if rhino_exe is None:
self.rhino_exe = self._get_rhino_path(
version=rhino_ver, preferred_bitness=preferred_bitness
)
else:
self.rhino_exe = rhino_exe
self.rhino_file_path = rhino_file_path
self.rpyc_server_py = rpyc_server_py
self.timeout = timeout
self.retry = 0
self.max_retry = max(0, max_retry)
if port is None:
self.port = _get_free_tcp_port()
else:
self.port = port
self.log_level = log_level
self.rhino_popen = self._launch_rhino()
self.connection = self._get_connection()
self.gh_remote_components = self.connection.root.ghcomp
self.gh_remote_userobjects = self.connection.root.ghuo
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Cleanup the connection on error and exit.
Parameters
----------
exc_type : Error
Type of the exception that caused the __exit__.
exc_val : str
Value of the exception that caused the __exit__.
exc_tb : type
Exception log.
Returns
-------
True if the connection was successfully closed."""
try:
if exc_type:
logger.error("{!s}, {!s}, {!s}".format(exc_type, exc_val, exc_tb))
except NameError:
pass
self.close()
return True
def run_gh_component(self, component_name, *nargs, **kwargs):
"""Run a specific Grasshopper component on the remote, with Rhino crash
handling.
"""
is_cluster = kwargs.pop("is_cluster", False)
# TODO: improve ghcomp to get clusters the same way we get compiled components,
# thus removing the need for a custom getter
component = self.gh_remote_components(component_name, is_cluster=is_cluster)
component_output = kwargs.pop("component_output", None)
try:
result = component(*nargs, **kwargs)
except (socket.error, EOFError):
self._rebuild_gh_remote()
return self.run_gh_component(*nargs, **kwargs)
if component_output is not None:
try:
result = result[component_output]
except NameError:
pass
return result
def close(self):
if not self.connection.closed:
logger.info("Closing connection.")
self.connection.close()
if self.rhino_popen.poll() is None:
logger.info("Closing Rhino.")
self.rhino_popen.terminate()
@staticmethod
def _get_rhino_path(version, preferred_bitness):
return get_rhino_executable_path(version, preferred_bitness)
def _launch_rhino(self):
assert self.rhino_exe is not "" and self.rhino_exe is not None
assert self.rpyc_server_py is not "" and self.rpyc_server_py is not None
assert self.port is not "" and self.port is not None
if WINDOWS:
rhino_call = [
'"' + self.rhino_exe + '"',
"/nosplash",
"/notemplate",
'/runscript="-_RunPythonScript ""{!s}"" {!s} {!s} -_Exit "'.format(
self.rpyc_server_py, self.port, self.log_level,
),
]
else:
rhino_call = [
self.rhino_exe,
"-nosplash",
"-notemplate",
'-runscript=-_RunPythonScript "{!s}" {!s} {!s} -_Exit'.format(
self.rpyc_server_py, self.port, self.log_level,
),
]
if self.rhino_file_path:
rhino_call.append(self.rhino_file_path)
if WINDOWS:
# Default escaping in subprocess.line2cmd does not work here,
# manually convert to string
rhino_call = " ".join(rhino_call)
rhino_popen = subprocess.Popen(
rhino_call, stdout=subprocess.PIPE, stdin=subprocess.PIPE
)
return rhino_popen
def _get_connection(self):
connection = None
logger.info("Connecting...")
for i in range(self.timeout):
try:
if not connection:
logger.debug(
"Connecting. Timeout in {:d} seconds.".format(self.timeout - i)
)
connection = rpyc.utils.factory.connect(
"localhost",
self.port,
service=rpyc.core.service.ClassicService,
config={"sync_request_timeout": None},
ipv6=False,
keepalive=True,
)
else:
logger.debug(
"Found connection, testing. Timeout in {:d} seconds.".format(
self.timeout - i
)
)
connection.ping(timeout=1)
logger.debug("Connection ok, returning.")
logger.info("Connected.")
return connection
except (
socket.error,
rpyc.core.protocol.PingError,
rpyc.core.async_.AsyncResultTimeout,
) as e:
if e is socket.error and not e.errno == errno.ECONNREFUSED:
raise
if i == self.timeout - 1:
raise
elif e is socket.error or isinstance(e, socket.error):
sleep(1)
def _rebuild_gh_remote(self):
if self.retry < self.max_retry:
logger.info("Lost Rhino connection, retrying.")
self.retry += 1
self.close()
[self.rhino_popen, self.connection, self.gh_remote] = [None, None, None]
logger.info("Waiting 10 seconds.")
sleep(10)
self.rhino_popen = self._launch_rhino()
self.connection = self._get_connection()
self.gh_remote_components = self.connection.root.get_component
else:
raise RuntimeError(
"Lost connection to Rhino, and reconnection attempts limit ({:d}) "
"reached. Exiting.".format(self.max_retry)
)
def _get_free_tcp_port():
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp.bind(("", 0))
addr, port = tcp.getsockname()
tcp.close()
return port
| {
"content_hash": "f5441b29299b76b4b480f94809a0d1c4",
"timestamp": "",
"source": "github",
"line_count": 432,
"max_line_length": 87,
"avg_line_length": 36.08796296296296,
"alnum_prop": 0.5303399615137909,
"repo_name": "pilcru/ghpythonremote",
"id": "52e0b99ae87495fbcc6d2a3b55e836b4c4a104ca",
"size": "15590",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ghpythonremote/connectors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6503"
},
{
"name": "Python",
"bytes": "71570"
}
],
"symlink_target": ""
} |
"""Gradients for operators defined in data_flow_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
@ops.RegisterGradient("DynamicPartition")
def _DynamicPartitionGrads(op, *grads):
"""Gradients for DynamicPartition."""
data = op.inputs[0]
indices = op.inputs[1]
num_partitions = op.get_attr("num_partitions")
prefix_shape = array_ops.shape(indices)
original_indices = array_ops.reshape(
math_ops.range(math_ops.reduce_prod(prefix_shape)), prefix_shape)
partitioned_indices = data_flow_ops.dynamic_partition(
original_indices, indices, num_partitions)
reconstructed = data_flow_ops.dynamic_stitch(partitioned_indices, grads)
reconstructed = array_ops.reshape(reconstructed, array_ops.shape(data))
return [reconstructed, None]
@ops.RegisterGradient("DynamicStitch")
def _DynamicStitchGrads(op, grad):
"""Gradients for DynamicStitch."""
num_values = len(op.inputs) // 2
indices_grad = [None] * num_values
def AsInt32(x):
return (x if op.inputs[0].dtype == dtypes.int32 else
math_ops.cast(x, dtypes.int32))
inputs = [AsInt32(op.inputs[i]) for i in xrange(num_values)]
if isinstance(grad, ops.IndexedSlices):
output_shape = array_ops.shape(op.outputs[0])
output_rows = output_shape[0]
grad = math_ops.unsorted_segment_sum(grad.values, grad.indices, output_rows)
values_grad = [array_ops.gather(grad, inp) for inp in inputs]
return indices_grad + values_grad
ops.NoGradient("Queue")
ops.NoGradient("QueueEnqueue")
ops.NoGradient("QueueEnqueueMany")
ops.NoGradient("QueueDequeue")
ops.NoGradient("QueueDequeueMany")
ops.NoGradient("QueueDequeueUpTo")
ops.NoGradient("QueueClose")
ops.NoGradient("QueueSize")
ops.NoGradient("Stack")
ops.NoGradient("StackPush")
ops.NoGradient("StackPop")
ops.NoGradient("StackClose")
ops.NoGradient("GetSessionHandle")
ops.NoGradient("GetSessionTensor")
ops.NoGradient("DeleteSessionTensor")
| {
"content_hash": "ec0e7c1595720b56566aefb034a2d121",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 80,
"avg_line_length": 34.01470588235294,
"alnum_prop": 0.7518374405533939,
"repo_name": "EvenStrangest/tensorflow",
"id": "af04d41f02304f6792ff8099926d6c4f56866184",
"size": "3003",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/data_flow_grad.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "156263"
},
{
"name": "C++",
"bytes": "9372687"
},
{
"name": "CMake",
"bytes": "29372"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "HTML",
"bytes": "784316"
},
{
"name": "Java",
"bytes": "39229"
},
{
"name": "JavaScript",
"bytes": "10875"
},
{
"name": "Jupyter Notebook",
"bytes": "1533241"
},
{
"name": "Makefile",
"bytes": "11364"
},
{
"name": "Objective-C",
"bytes": "5332"
},
{
"name": "Objective-C++",
"bytes": "45585"
},
{
"name": "Protocol Buffer",
"bytes": "112557"
},
{
"name": "Python",
"bytes": "6949434"
},
{
"name": "Shell",
"bytes": "196466"
},
{
"name": "TypeScript",
"bytes": "411503"
}
],
"symlink_target": ""
} |
"""The tests for Media player device conditions."""
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.media_player import DOMAIN
from homeassistant.const import (
STATE_IDLE,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_conditions(hass, device_reg, entity_reg):
"""Test we get the expected conditions from a media_player."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": "is_off",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"condition": "device",
"domain": DOMAIN,
"type": "is_on",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"condition": "device",
"domain": DOMAIN,
"type": "is_idle",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"condition": "device",
"domain": DOMAIN,
"type": "is_paused",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"condition": "device",
"domain": DOMAIN,
"type": "is_playing",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
]
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
assert_lists_same(conditions, expected_conditions)
async def test_if_state(hass, calls):
"""Test for turn_on and turn_off conditions."""
hass.states.async_set("media_player.entity", STATE_ON)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "media_player.entity",
"type": "is_on",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_on - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event2"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "media_player.entity",
"type": "is_off",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_off - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event3"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "media_player.entity",
"type": "is_idle",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_idle - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event4"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "media_player.entity",
"type": "is_paused",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_paused - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event5"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "media_player.entity",
"type": "is_playing",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_playing - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
]
},
)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
hass.bus.async_fire("test_event3")
hass.bus.async_fire("test_event4")
hass.bus.async_fire("test_event5")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "is_on - event - test_event1"
hass.states.async_set("media_player.entity", STATE_OFF)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
hass.bus.async_fire("test_event3")
hass.bus.async_fire("test_event4")
hass.bus.async_fire("test_event5")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "is_off - event - test_event2"
hass.states.async_set("media_player.entity", STATE_IDLE)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
hass.bus.async_fire("test_event3")
hass.bus.async_fire("test_event4")
hass.bus.async_fire("test_event5")
await hass.async_block_till_done()
assert len(calls) == 3
assert calls[2].data["some"] == "is_idle - event - test_event3"
hass.states.async_set("media_player.entity", STATE_PAUSED)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
hass.bus.async_fire("test_event3")
hass.bus.async_fire("test_event4")
hass.bus.async_fire("test_event5")
await hass.async_block_till_done()
assert len(calls) == 4
assert calls[3].data["some"] == "is_paused - event - test_event4"
hass.states.async_set("media_player.entity", STATE_PLAYING)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
hass.bus.async_fire("test_event3")
hass.bus.async_fire("test_event4")
hass.bus.async_fire("test_event5")
await hass.async_block_till_done()
assert len(calls) == 5
assert calls[4].data["some"] == "is_playing - event - test_event5"
| {
"content_hash": "ce0ad309ba5253521ef8b7c8bf73334f",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 106,
"avg_line_length": 35.82716049382716,
"alnum_prop": 0.45945325063174824,
"repo_name": "robbiet480/home-assistant",
"id": "c52daa80320b619df722ac90e1087edb290c96fc",
"size": "8706",
"binary": false,
"copies": "11",
"ref": "refs/heads/dev",
"path": "tests/components/media_player/test_device_condition.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18837456"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
import config
from middleware.graphers.turtle_utils import generate_hash, generate_uri as gu, link_uris
from middleware.blazegraph.upload_graph import queue_upload
from middleware.graphers.turtle_utils import actual_filename
from rdflib import Namespace, Graph, Literal, plugin
from Bio import SeqIO
from os.path import basename
def generate_graph(transitive=True):
'''
Parses all the Namespaces defined in the config file and returns a graph
with them bound.
Return:
(rdflib.Graph): a graph with all the defined Namespaces bound to it.
'''
graph = Graph()
for key in config.namespaces.keys():
if key is 'root':
graph.bind('', config.namespaces['root'])
else:
graph.bind(key, config.namespaces[key])
# add edge equivlaence properties
if transitive:
graph.add((gu(':hasPart'), gu('rdf:type'), gu('owl:TransitiveProperty')))
graph.add((gu(':isFoundIn'), gu('rdf:type'), gu('owl:TransitiveProperty')))
#graph.add((gu(':hasPart'), gu('rdf:type'), gu('owl:SymmetricProperty')))
# make AntimicrobialResistanceGene & VirulenceFactor subclasses of :Marker
graph.add((gu(':AntimicrobialResistanceGene'), gu('rdfs:subClassOf'), gu(':Marker')))
graph.add((gu(':VirulenceFactor'), gu('rdfs:subClassOf'), gu(':Marker')))
graph.add((gu(':PanGenomeRegion'), gu('rdfs:subClassOf'), gu(':Marker')))
# human-readable dc:description for edge types
graph.add((gu('ge:0001076'), gu('dc:description'), Literal('O-Type')))
graph.add((gu('ge:0001077'), gu('dc:description'), Literal('H-Type')))
graph.add((gu('ge:0000024'), gu('dc:description'), Literal('Upload_Date')))
graph.add((gu(':Marker'), gu('dc:description'), Literal('Any_Marker')))
graph.add((gu(':VirulenceFactor'), gu('dc:description'), Literal('Virulence_Factor')))
graph.add((gu(':AntimicrobialResistanceGene'), gu('dc:description'), Literal('AMR_Gene')))
# human-readable dc:description for object types
graph.add((gu('so:0001462'), gu('dc:description'), Literal('Bag_of_Contigs')))
graph.add((gu(':spfyId'), gu('dc:description'), Literal('SpfyId')))
return graph
def generate_turtle_skeleton(query_file):
'''
Handles the main generation of a turtle object.
NAMING CONVENTIONS:
uriIsolate: this is the top-most entry, a uniq. id per file is allocated by checking our DB for the greatest most entry (not in this file)
ex. :spfy234
uriAssembly: aka. the genome ID, this is a sha1 hash of the file contents
ex. :4eb02f5676bc808f86c0f014bbce15775adf06ba
uriContig: indiv contig ids; from SeqIO.record.id - this should be uniq to a contig (at least within a given file)
ex. :4eb02f5676bc808f86c0f014bbce15775adf06ba/contigs/FLOF01006689.1
note: the record.id is what RGI uses as a prefix for ORF_ID (ORF_ID has additional _314 or w/e #s)
Args:
query_file(str): path to the .fasta file (this should already incl the directory)
Returns:
graph: the graph with all the triples generated from the .fasta file
'''
# Base graph generation
graph = generate_graph()
# uriGenome generation
file_hash = generate_hash(query_file)
uriGenome = gu(':' + file_hash)
# set the object type for uriGenome
graph.add((uriGenome, gu('rdf:type'), gu('g:Genome')))
# this is used as the human readable display of Genome
graph.add((uriGenome, gu('dc:description'), Literal(actual_filename(query_file))))
# note that timestamps are not added in base graph generation, they are only added during the check for duplicate files in blazegraph
# uri for bag of contigs
# ex. :4eb02f5676bc808f86c0f014bbce15775adf06ba/contigs/
uriContigs = gu(uriGenome, "/contigs")
# set the object type for uriContigs
graph.add((uriContigs, gu('rdf:type'), gu('so:0001462')))
# link the bag of contigs to the genome
graph = link_uris(graph, uriGenome, uriContigs)
#graph.add((uriGenome, gu(':hasPart'), uriContigs))
for record in SeqIO.parse(open(query_file), "fasta"):
# ex. :4eb02f5676bc808f86c0f014bbce15775adf06ba/contigs/FLOF01006689.1
uriContig = gu(uriContigs, '/' + record.id)
# add the object type to uriContig
graph.add((uriContig, gu('rdf:type'), gu('g:Contig')))
# linking the spec contig and the bag of contigs
graph = link_uris(graph, uriContigs, uriContig)
#graph.add((uriContigs, gu(':hasPart'), uriContig))
# uriContig attributes
graph.add((uriContig, gu('g:DNASequence'), Literal(record.seq)))
graph.add((uriContig, gu('g:Description'),
Literal(record.description)))
graph.add((uriContig, gu('g:Identifier'),
Literal(record.id)))
# human-readable ; the description here is different because
# record.description tends to be rather long
# instead, record.id is the accession eg: FLOF01006689.1
graph.add((uriContig, gu('dc:description'),
Literal(record.description)))
return graph
def turtle_grapher(query_file):
graph = generate_turtle_skeleton(query_file)
queue_upload(graph)
return graph
| {
"content_hash": "78a8f21f68dc4e6f0f58ab05851718c1",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 142,
"avg_line_length": 46.27433628318584,
"alnum_prop": 0.6731688659399503,
"repo_name": "superphy/backend",
"id": "ae72c7ee0e4d722029855b26b44fc5ad3410f7c5",
"size": "5520",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/middleware/graphers/turtle_grapher.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "392139"
}
],
"symlink_target": ""
} |
import sys
import time
sys.path.append("../config/common/tests")
from test_utils import *
import fixtures
import testtools
import test_common
import test_case
import discoveryclient.client as client
server_list = {}
class DiscoveryServerTestCase(test_case.DsTestCase):
def setUp(self):
extra_config_knobs = [
('foobar', 'policy', 'fixed'),
]
super(DiscoveryServerTestCase, self).setUp(extra_disc_server_config_knobs=extra_config_knobs)
def info_callback(self, info, client_id):
global server_list
print 'In subscribe callback handler for client %s' % client_id
print '%s' % (info)
# [{u'@publisher-id': 'test_discovery-0', u'foobar': 'foobar-0'}, {u'@publisher-id': 'test_discovery-1', u'foobar': 'foobar-1'}]
server_list[client_id] = [entry['@publisher-id'] for entry in info]
pass
def test_fixed_policy(self):
global server_list
# publish 3 instances of service foobar
tasks = []
service_type = 'foobar'
for i in range(3):
client_type = 'test-discovery'
pub_id = 'test_discovery-%d' % i
pub_data = {service_type : '%s-%d' % ('foobar', i)}
disc = client.DiscoveryClient(
self._disc_server_ip, self._disc_server_port,
client_type, pub_id)
task = disc.publish(service_type, pub_data)
tasks.append(task)
time.sleep(1)
time.sleep(1)
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), 3)
self.assertEqual(response['services'][0]['service_type'], 'foobar')
# multiple subscribers for 2 instances each
subcount = 3
service_count = 2
tasks = []
for i in range(subcount):
client_id = "test-fixed-policy-%d" % i
disc = client.DiscoveryClient(
self._disc_server_ip, self._disc_server_port,
client_id, pub_id = client_id)
obj = disc.subscribe(
service_type, service_count, self.info_callback, client_id)
tasks.append(obj.task)
time.sleep(1)
print 'Started %d tasks to subscribe service %s, count %d' \
% (subcount, service_type, service_count)
# validate all clients have subscribed
time.sleep(1)
(code, msg) = self._http_get('/clients.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), subcount*service_count)
# validate all three clients have the same two servers ... 0 and 1
for cid, slist in server_list.items():
self.assertEqual(slist[0], 'test_discovery-0')
self.assertEqual(slist[1], 'test_discovery-1')
print 'All clients got the same servers in correct order'
# mark server 1 down (foobar-1)
puburl = '/service/test_discovery-1'
payload = {
'service-type' : 'foobar',
'oper-state' : 'down',
}
(code, msg) = self._http_put(puburl, json.dumps(payload))
self.assertEqual(code, 200)
(code, msg) = self._http_get('/services.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), 3)
for entry in response['services']:
if entry['service_id'] == 'test_discovery-1:foobar':
break
self.assertEqual(entry['oper_state'], 'down')
# wait for max TTL to expire
time.sleep(60)
# validate all clients have subscribed to foobar-0 and foobar-2
for cid, slist in server_list.items():
self.assertEqual(slist[0], 'test_discovery-0')
self.assertEqual(slist[1], 'test_discovery-2')
print 'All clients got the same servers in correct order'
# bring up foobar-1 server
payload['oper-state'] = 'up'
(code, msg) = self._http_put(puburl, json.dumps(payload))
self.assertEqual(code, 200)
# wait for max TTL to expire
time.sleep(60)
# validate all clients still subscribed to foobar-0 and foobar-2
for cid, slist in server_list.items():
self.assertEqual(slist[0], 'test_discovery-0')
self.assertEqual(slist[1], 'test_discovery-2')
print 'All clients got the same servers in correct order'
# start one more client which should also get foobar-0 and foobar-2
client_id = "test-fixed-policy-3"
disc = client.DiscoveryClient(
self._disc_server_ip, self._disc_server_port,
client_id, pub_id = client_id)
obj = disc.subscribe(
service_type, service_count, self.info_callback, client_id)
tasks.append(obj.task)
time.sleep(1)
print 'Started 1 tasks to subscribe service %s' % service_type
# validate all clients have subscribed
time.sleep(1)
(code, msg) = self._http_get('/clients.json')
self.assertEqual(code, 200)
response = json.loads(msg)
self.assertEqual(len(response['services']), 4*2)
# validate all four clients have the same two servers ... 0 and 2
for cid, slist in server_list.items():
self.assertEqual(slist[0], 'test_discovery-0')
self.assertEqual(slist[1], 'test_discovery-2')
print 'All clients got the same servers in correct order'
| {
"content_hash": "7fce153b80090bc1280e5ee0c3751ec9",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 136,
"avg_line_length": 38.46938775510204,
"alnum_prop": 0.5854995579133511,
"repo_name": "sajuptpm/contrail-controller",
"id": "3f7eb03c12d8e15dc3db495e8e81cadc95ff86af",
"size": "5655",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/discovery/tests/test_fixed_policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "51767"
},
{
"name": "C++",
"bytes": "19050770"
},
{
"name": "CSS",
"bytes": "531"
},
{
"name": "Groff",
"bytes": "36777"
},
{
"name": "HTML",
"bytes": "519766"
},
{
"name": "Java",
"bytes": "171966"
},
{
"name": "LLVM",
"bytes": "2937"
},
{
"name": "Lua",
"bytes": "5819"
},
{
"name": "Makefile",
"bytes": "12449"
},
{
"name": "Protocol Buffer",
"bytes": "6129"
},
{
"name": "Python",
"bytes": "4813021"
},
{
"name": "Shell",
"bytes": "81402"
},
{
"name": "Thrift",
"bytes": "40763"
},
{
"name": "Yacc",
"bytes": "7737"
}
],
"symlink_target": ""
} |
""" Example Plugin (dice roller) (botbot plugins.example) """
# Copyright 2013 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import re
from pyaib.plugins import keyword
from random import SystemRandom
def statsCheck(stats):
total = sum([(s - 10) / 2 for s in stats])
avg = total / 6
return avg > 0 and max(stats) > 13
def statsGen():
rand = SystemRandom()
while True:
stats = []
for s in range(0, 6): # Six Stats
rolls = []
for d in range(0, 4): # Four Dice
roll = rand.randint(1, 6)
if roll == 1: # Reroll 1's once
roll = rand.randint(1, 6)
rolls.append(roll)
rolls.sort()
rolls.reverse()
stats.append(rolls[0] + rolls[1] + rolls[2])
if statsCheck(stats):
return stats
return None
@keyword('stats')
def stats(irc_c, msg, trigger, args, kargs):
msg.reply("%s: Set 1: %r" % (msg.nick, statsGen()))
msg.reply("%s: Set 2: %r" % (msg.nick, statsGen()))
rollRE = re.compile(r'((\d+)?d((?:\d+|%))([+-]\d+)?)', re.IGNORECASE)
modRE = re.compile(r'([+-]\d+)')
def roll(count, sides):
results = []
rand = SystemRandom()
for x in range(count):
if sides == 100 or sides == 1000:
#Special Case for 100 sized dice
results.append(rand.randint(1, 10))
results.append(rand.randrange(0, 100, 10))
if sides == 1000:
results.append(rand.randrange(0, 1000, 100))
else:
results.append(rand.randint(1, sides))
return results
@keyword('roll')
def diceroll(irc_c, msg, trigger, args, kargs):
def help():
txt = ("Dice expected in form [<count>]d<sides|'%'>[+-<modifer>] or "
"+-<modifier> for d20 roll. No argument rolls d20.")
msg.reply(txt)
if 'help' in kargs or 'h' in kargs:
help()
return
rolls = []
if not args:
rolls.append(['d20', 1, 20, 0])
else:
for dice in args:
m = rollRE.match(dice) or modRE.match(dice)
if m:
group = m.groups()
if len(group) == 1:
dice = ['d20%s' % group[0], 1, 20, int(group[0])]
rolls.append(dice)
else:
dice = [group[0], int(group[1] or 1),
100 if group[2] == '%' else int(group[2]),
int(group[3] or 0)]
rolls.append(dice)
if dice[1] > 100 or (dice[2] > 100 and dice[2] != 1000):
msg.reply("%s: I don't play with crazy power gamers!"
% msg.nick)
return
else:
help()
return
for dice in rolls:
results = roll(dice[1], dice[2])
total = sum(results) + int(dice[3])
if len(results) > 10:
srolls = '+'.join([str(x) for x in results[:10]])
srolls += '...'
else:
srolls = '+'.join([str(x) for x in results])
msg.reply("%s: (%s)[%s] = %d" % (
msg.nick, dice[0], srolls, total))
print("Example Plugin Done")
| {
"content_hash": "1f5da20f1aab46eca21cfb8c4e74f5cd",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 77,
"avg_line_length": 32.04132231404959,
"alnum_prop": 0.521279339695641,
"repo_name": "facebook/pyaib",
"id": "ec7cb6b3160f699ef90251a708326b408ffe332d",
"size": "3877",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "example/plugins/example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "111714"
}
],
"symlink_target": ""
} |
import os
import re
from typing import TYPE_CHECKING, Any, Dict, Optional, Sequence
from airflow.configuration import conf
from airflow.models import BaseOperator
from airflow.providers.apache.hive.hooks.hive import HiveCliHook
from airflow.utils import operator_helpers
from airflow.utils.operator_helpers import context_to_airflow_vars
if TYPE_CHECKING:
from airflow.utils.context import Context
class HiveOperator(BaseOperator):
"""
Executes hql code or hive script in a specific Hive database.
:param hql: the hql to be executed. Note that you may also use
a relative path from the dag file of a (template) hive
script. (templated)
:param hive_cli_conn_id: Reference to the
:ref:`Hive CLI connection id <howto/connection:hive_cli>`. (templated)
:param hiveconfs: if defined, these key value pairs will be passed
to hive as ``-hiveconf "key"="value"``
:param hiveconf_jinja_translate: when True, hiveconf-type templating
${var} gets translated into jinja-type templating {{ var }} and
${hiveconf:var} gets translated into jinja-type templating {{ var }}.
Note that you may want to use this along with the
``DAG(user_defined_macros=myargs)`` parameter. View the DAG
object documentation for more details.
:param script_begin_tag: If defined, the operator will get rid of the
part of the script before the first occurrence of `script_begin_tag`
:param run_as_owner: Run HQL code as a DAG's owner.
:param mapred_queue: queue used by the Hadoop CapacityScheduler. (templated)
:param mapred_queue_priority: priority within CapacityScheduler queue.
Possible settings include: VERY_HIGH, HIGH, NORMAL, LOW, VERY_LOW
:param mapred_job_name: This name will appear in the jobtracker.
This can make monitoring easier.
"""
template_fields: Sequence[str] = (
'hql',
'schema',
'hive_cli_conn_id',
'mapred_queue',
'hiveconfs',
'mapred_job_name',
'mapred_queue_priority',
)
template_ext: Sequence[str] = (
'.hql',
'.sql',
)
template_fields_renderers = {'hql': 'hql'}
ui_color = '#f0e4ec'
def __init__(
self,
*,
hql: str,
hive_cli_conn_id: str = 'hive_cli_default',
schema: str = 'default',
hiveconfs: Optional[Dict[Any, Any]] = None,
hiveconf_jinja_translate: bool = False,
script_begin_tag: Optional[str] = None,
run_as_owner: bool = False,
mapred_queue: Optional[str] = None,
mapred_queue_priority: Optional[str] = None,
mapred_job_name: Optional[str] = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.hql = hql
self.hive_cli_conn_id = hive_cli_conn_id
self.schema = schema
self.hiveconfs = hiveconfs or {}
self.hiveconf_jinja_translate = hiveconf_jinja_translate
self.script_begin_tag = script_begin_tag
self.run_as = None
if run_as_owner:
self.run_as = self.dag.owner
self.mapred_queue = mapred_queue
self.mapred_queue_priority = mapred_queue_priority
self.mapred_job_name = mapred_job_name
job_name_template = conf.get(
'hive',
'mapred_job_name_template',
fallback="Airflow HiveOperator task for {hostname}.{dag_id}.{task_id}.{execution_date}",
)
if job_name_template is None:
raise ValueError("Job name template should be set !")
self.mapred_job_name_template: str = job_name_template
# assigned lazily - just for consistency we can create the attribute with a
# `None` initial value, later it will be populated by the execute method.
# This also makes `on_kill` implementation consistent since it assumes `self.hook`
# is defined.
self.hook: Optional[HiveCliHook] = None
def get_hook(self) -> HiveCliHook:
"""Get Hive cli hook"""
return HiveCliHook(
hive_cli_conn_id=self.hive_cli_conn_id,
run_as=self.run_as,
mapred_queue=self.mapred_queue,
mapred_queue_priority=self.mapred_queue_priority,
mapred_job_name=self.mapred_job_name,
)
def prepare_template(self) -> None:
if self.hiveconf_jinja_translate:
self.hql = re.sub(r"(\$\{(hiveconf:)?([ a-zA-Z0-9_]*)\})", r"{{ \g<3> }}", self.hql)
if self.script_begin_tag and self.script_begin_tag in self.hql:
self.hql = "\n".join(self.hql.split(self.script_begin_tag)[1:])
def execute(self, context: "Context") -> None:
self.log.info('Executing: %s', self.hql)
self.hook = self.get_hook()
# set the mapred_job_name if it's not set with dag, task, execution time info
if not self.mapred_job_name:
ti = context['ti']
self.hook.mapred_job_name = self.mapred_job_name_template.format(
dag_id=ti.dag_id,
task_id=ti.task_id,
execution_date=ti.execution_date.isoformat(),
hostname=ti.hostname.split('.')[0],
)
if self.hiveconf_jinja_translate:
self.hiveconfs = context_to_airflow_vars(context)
else:
self.hiveconfs.update(context_to_airflow_vars(context))
self.log.info('Passing HiveConf: %s', self.hiveconfs)
self.hook.run_cli(hql=self.hql, schema=self.schema, hive_conf=self.hiveconfs)
def dry_run(self) -> None:
# Reset airflow environment variables to prevent
# existing env vars from impacting behavior.
self.clear_airflow_vars()
self.hook = self.get_hook()
self.hook.test_hql(hql=self.hql)
def on_kill(self) -> None:
if self.hook:
self.hook.kill()
def clear_airflow_vars(self) -> None:
"""Reset airflow environment variables to prevent existing ones from impacting behavior."""
blank_env_vars = {
value['env_var_format']: '' for value in operator_helpers.AIRFLOW_VAR_NAME_FORMAT_MAPPING.values()
}
os.environ.update(blank_env_vars)
| {
"content_hash": "bea2ff1f042f2e7d3890b95cccdeb9ba",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 110,
"avg_line_length": 39.65605095541401,
"alnum_prop": 0.6222293607452618,
"repo_name": "danielvdende/incubator-airflow",
"id": "45cae0fa4e31f38d69e19e43f5852010f859221c",
"size": "7013",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "airflow/providers/apache/hive/operators/hive.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21824455"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495567"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
} |
from django.db.models import Q
from django.contrib.auth import get_user_model
from rest_framework.filters import (
SearchFilter,
OrderingFilter,
)
from rest_framework.response import Response
from rest_framework.status import HTTP_200_OK, HTTP_400_BAD_REQUEST
from rest_framework.views import APIView
from rest_framework.generics import (
ListAPIView,
RetrieveAPIView,
DestroyAPIView,
UpdateAPIView,
CreateAPIView,
RetrieveUpdateAPIView,
)
from rest_framework.permissions import (
AllowAny,
IsAuthenticated,
IsAdminUser,
IsAuthenticatedOrReadOnly,
)
from posts.api.permissions import IsOwerOrReadOnly
from posts.api.pagination import PostLimitOffsetPagination, PostPageNumberPagination
from rest_framework.mixins import DestroyModelMixin, UpdateModelMixin
User = get_user_model()
from accounts.api.serializers import (
UserCreateSerializer,
UserLoginSerializer,
)
class UserCreateAPIView(CreateAPIView):
serializer_class = UserCreateSerializer
queryset = User.objects.all()
class UserLoginAPIView(CreateAPIView):
permission_classes = [AllowAny]
serializer_class = UserLoginSerializer
def post(self, request, *args, **kwargs):
data = request.data
serializer = UserLoginSerializer(data=data)
if serializer.is_valid(raise_exception=True):
new_data = serializer.data
return Response(new_data,status=HTTP_200_OK)
return Response(serializer.errors,status=HTTP_400_BAD_REQUEST)
| {
"content_hash": "c53107da8933fe9a05891ae9b3cdf662",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 84,
"avg_line_length": 24.918032786885245,
"alnum_prop": 0.7519736842105263,
"repo_name": "lichi6174/django-api-lab",
"id": "04619846b7e157e5f422808e9c5fe3d86c8678c6",
"size": "1536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/accounts/api/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "52162"
},
{
"name": "HTML",
"bytes": "14996"
},
{
"name": "JavaScript",
"bytes": "318231"
},
{
"name": "Python",
"bytes": "54309"
}
],
"symlink_target": ""
} |
import Gaffer
import GafferScene
Gaffer.Metadata.registerNode(
GafferScene.ClosestPointSampler,
"description",
"""
Samples primitive variables from the closest point on
the surface of a source primitive, and transfers the
values onto new primitive variable on the sampling objects.
""",
plugs = {
"position" : [
"description",
"""
The primitive variable that provides the positions
to find the closest point to. This defaults to "P",
the vertex position of the sampling object.
""",
"layout:section", "Settings.Input",
# Put the Input section before the Output section
"layout:index", 2,
],
}
)
| {
"content_hash": "c64ff70f13e4ee7ce369059129fb60d9",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 60,
"avg_line_length": 18.970588235294116,
"alnum_prop": 0.7054263565891473,
"repo_name": "boberfly/gaffer",
"id": "3493917b608188d34c95bc58f0b8ec32a218eae5",
"size": "2441",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "python/GafferSceneUI/ClosestPointSamplerUI.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41979"
},
{
"name": "C++",
"bytes": "7646009"
},
{
"name": "CMake",
"bytes": "85201"
},
{
"name": "GLSL",
"bytes": "6236"
},
{
"name": "Python",
"bytes": "8002810"
},
{
"name": "Shell",
"bytes": "15031"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from thrift.Thrift import *
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except ImportError:
fastbinary = None
class Iface:
def execute(self, request):
"""
Parameters:
- request
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot != None:
self._oprot = oprot
self._seqid = 0
def execute(self, request):
"""
Parameters:
- request
"""
self.send_execute(request)
return self.recv_execute()
def send_execute(self, request):
self._oprot.writeMessageBegin('execute', TMessageType.CALL, self._seqid)
args = execute_args()
args.request = request
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_execute(self):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = execute_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "execute failed: unknown result");
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["execute"] = Processor.process_execute
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_execute(self, seqid, iprot, oprot):
args = execute_args()
args.read(iprot)
iprot.readMessageEnd()
result = execute_result()
result.success = self._handler.execute(args.request)
oprot.writeMessageBegin("execute", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class execute_args:
"""
Attributes:
- request
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'request', (RestRequest, RestRequest.thrift_spec), None, ), # 1
)
def __init__(self, request=None):
self.request = request
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.request = RestRequest()
self.request.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('execute_args')
if self.request != None:
oprot.writeFieldBegin('request', TType.STRUCT, 1)
self.request.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.request is None:
raise TProtocol.TProtocolException(message='Required field request is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class execute_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (RestResponse, RestResponse.thrift_spec), None, ), # 0
)
def __init__(self, success=None):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = RestResponse()
self.success.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('execute_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| {
"content_hash": "bbef9b8045c280e96c0b89adb1dc7c55",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 188,
"avg_line_length": 29.553488372093025,
"alnum_prop": 0.6487252124645893,
"repo_name": "Yelp/pyes",
"id": "a17cfabe3920e7e1bd7fe21a6a7dc22c0d00f238",
"size": "6453",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pyes/pyesthrift/Rest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Tests for db.api layer."""
import datetime as dt
import ddt
from six import moves
from rally.common import db
from rally.common.db import api as db_api
import rally.common.db.sqlalchemy.api as s_api
from rally import consts
from rally import exceptions
from tests.unit import test
NOW = dt.datetime.now()
class FakeSerializable(object):
def __init__(self, **kwargs):
self.dict = {}
self.dict.update(kwargs)
def _as_dict(self):
return self.dict
@ddt.ddt
class SerializeTestCase(test.DBTestCase):
def setUp(self):
super(SerializeTestCase, self).setUp()
@ddt.data(
{"data": 1, "serialized": 1},
{"data": 1.1, "serialized": 1.1},
{"data": "a string", "serialized": "a string"},
{"data": NOW, "serialized": NOW},
{"data": {"k1": 1, "k2": 2}, "serialized": {"k1": 1, "k2": 2}},
{"data": [1, "foo"], "serialized": [1, "foo"]},
{"data": ["foo", 1, {"a": "b"}], "serialized": ["foo", 1, {"a": "b"}]},
{"data": FakeSerializable(a=1), "serialized": {"a": 1}},
{"data": [FakeSerializable(a=1),
FakeSerializable(b=FakeSerializable(c=1))],
"serialized": [{"a": 1}, {"b": {"c": 1}}]},
)
@ddt.unpack
def test_serialize(self, data, serialized):
@db_api.serialize
def fake_method():
return data
results = fake_method()
self.assertEqual(results, serialized)
def test_serialize_value_error(self):
@db_api.serialize
def fake_method():
class Fake(object):
pass
return Fake()
self.assertRaises(ValueError, fake_method)
class FixDeploymentTestCase(test.DBTestCase):
def setUp(self):
super(FixDeploymentTestCase, self).setUp()
def test_fix_deployment(self):
deployment = {
"credentials": [("bong", {"admin": "foo", "users": "bar"})]}
expected = {
"credentials": [("bong", {"admin": "foo", "users": "bar"})],
"admin": "foo",
"users": "bar"
}
@s_api.fix_deployment
def get_deployment():
return deployment
fixed_deployment = get_deployment()
self.assertEqual(fixed_deployment, expected)
class TasksTestCase(test.DBTestCase):
def setUp(self):
super(TasksTestCase, self).setUp()
self.deploy = db.deployment_create({})
def _get_task(self, uuid):
return db.task_get(uuid)
def _get_task_status(self, uuid):
return db.task_get_status(uuid)
def _create_task(self, values=None):
values = values or {}
if "deployment_uuid" not in values:
values["deployment_uuid"] = self.deploy["uuid"]
return db.task_create(values)
def test_task_get_not_found(self):
self.assertRaises(exceptions.TaskNotFound,
db.task_get, "f885f435-f6ca-4f3e-9b3e-aeb6837080f2")
def test_task_get_status_not_found(self):
self.assertRaises(exceptions.TaskNotFound,
db.task_get_status,
"f885f435-f6ca-4f3e-9b3e-aeb6837080f2")
def test_task_create(self):
task = self._create_task()
db_task = self._get_task(task["uuid"])
self.assertIsNotNone(db_task["uuid"])
self.assertIsNotNone(db_task["id"])
self.assertEqual(db_task["status"], consts.TaskStatus.INIT)
def test_task_create_without_uuid(self):
_uuid = "19be8589-48b0-4af1-a369-9bebaaa563ab"
task = self._create_task({"uuid": _uuid})
db_task = self._get_task(task["uuid"])
self.assertEqual(db_task["uuid"], _uuid)
def test_task_update(self):
task = self._create_task({})
db.task_update(task["uuid"], {"status": consts.TaskStatus.FAILED})
db_task = self._get_task(task["uuid"])
self.assertEqual(db_task["status"], consts.TaskStatus.FAILED)
def test_task_update_not_found(self):
self.assertRaises(exceptions.TaskNotFound,
db.task_update,
"fake_uuid", {})
def test_task_update_status(self):
self.assertRaises(exceptions.RallyException,
db.task_update_status,
"fake_uuid", consts.TaskStatus.RUNNING,
[consts.TaskStatus.RUNNING])
def test_task_update_all_stats(self):
_uuid = self._create_task({})["uuid"]
for status in consts.TaskStatus:
db.task_update(_uuid, {"status": status})
db_task = self._get_task(_uuid)
self.assertEqual(db_task["status"], status)
def test_task_list_empty(self):
self.assertEqual([], db.task_list())
def test_task_list(self):
INIT = consts.TaskStatus.INIT
task_init = sorted(self._create_task()["uuid"] for i in moves.range(3))
FINISHED = consts.TaskStatus.FINISHED
task_finished = sorted(self._create_task(
{"status": FINISHED,
"deployment_uuid": self.deploy["uuid"]}
)["uuid"] for i in moves.range(3))
task_all = sorted(task_init + task_finished)
def get_uuids(status=None, deployment=None):
tasks = db.task_list(status=status, deployment=deployment)
return sorted(task["uuid"] for task in tasks)
self.assertEqual(task_all, get_uuids(None))
self.assertEqual(task_init, get_uuids(status=INIT))
self.assertEqual(task_finished, get_uuids(status=FINISHED))
self.assertRaises(exceptions.DeploymentNotFound,
get_uuids, deployment="non-existing-deployment")
deleted_task_uuid = task_finished.pop()
db.task_delete(deleted_task_uuid)
self.assertEqual(task_init, get_uuids(INIT))
self.assertEqual(sorted(task_finished), get_uuids(FINISHED))
def test_task_delete(self):
task1, task2 = self._create_task()["uuid"], self._create_task()["uuid"]
db.task_delete(task1)
self.assertRaises(exceptions.TaskNotFound, self._get_task, task1)
self.assertEqual(task2, self._get_task(task2)["uuid"])
def test_task_delete_not_found(self):
self.assertRaises(exceptions.TaskNotFound,
db.task_delete,
"da6f820c-b133-4b9f-8534-4c3bcc40724b")
def test_task_delete_with_results(self):
task_id = self._create_task()["uuid"]
db.task_result_create(task_id,
{task_id: task_id},
{task_id: task_id})
res = db.task_result_get_all_by_uuid(task_id)
self.assertEqual(len(res), 1)
db.task_delete(task_id)
res = db.task_result_get_all_by_uuid(task_id)
self.assertEqual(len(res), 0)
def test_task_delete_by_uuid_and_status(self):
values = {
"status": consts.TaskStatus.FINISHED,
}
task1 = self._create_task(values=values)["uuid"]
task2 = self._create_task(values=values)["uuid"]
db.task_delete(task1, status=consts.TaskStatus.FINISHED)
self.assertRaises(exceptions.TaskNotFound, self._get_task, task1)
self.assertEqual(task2, self._get_task(task2)["uuid"])
def test_task_delete_by_uuid_and_status_invalid(self):
task = self._create_task(
values={"status": consts.TaskStatus.INIT})["uuid"]
self.assertRaises(exceptions.TaskInvalidStatus, db.task_delete, task,
status=consts.TaskStatus.FINISHED)
def test_task_delete_by_uuid_and_status_not_found(self):
self.assertRaises(exceptions.TaskNotFound,
db.task_delete,
"fcd0483f-a405-44c4-b712-99c9e52254eb",
status=consts.TaskStatus.FINISHED)
def test_task_result_get_all_by_uuid(self):
task1 = self._create_task()["uuid"]
task2 = self._create_task()["uuid"]
for task_id in (task1, task2):
db.task_result_create(task_id,
{task_id: task_id},
{task_id: task_id})
for task_id in (task1, task2):
res = db.task_result_get_all_by_uuid(task_id)
data = {task_id: task_id}
self.assertEqual(len(res), 1)
self.assertEqual(res[0]["key"], data)
self.assertEqual(res[0]["data"], data)
def test_task_get_detailed(self):
task1 = self._create_task()
key = {"name": "atata"}
data = {"a": "b", "c": "d"}
db.task_result_create(task1["uuid"], key, data)
task1_full = db.task_get_detailed(task1["uuid"])
results = task1_full["results"]
self.assertEqual(len(results), 1)
self.assertEqual(results[0]["key"], key)
self.assertEqual(results[0]["data"], data)
def test_task_get_detailed_last(self):
task1 = self._create_task()
key = {"name": "atata"}
data = {"a": "b", "c": "d"}
db.task_result_create(task1["uuid"], key, data)
task1_full = db.task_get_detailed_last()
results = task1_full["results"]
self.assertEqual(len(results), 1)
self.assertEqual(results[0]["key"], key)
self.assertEqual(results[0]["data"], data)
class DeploymentTestCase(test.DBTestCase):
def test_deployment_create(self):
deploy = db.deployment_create({"config": {"opt": "val"}})
deploys = db.deployment_list()
self.assertEqual(len(deploys), 1)
self.assertEqual(deploy["uuid"], deploys[0]["uuid"])
self.assertEqual(deploy["status"], consts.DeployStatus.DEPLOY_INIT)
self.assertEqual(deploy["config"], {"opt": "val"})
def test_deployment_create_several(self):
# Create a deployment
deploys = db.deployment_list()
self.assertEqual(len(deploys), 0)
deploy_one = db.deployment_create({"config": {"opt1": "val1"}})
deploys = db.deployment_list()
self.assertEqual(len(deploys), 1)
self.assertEqual(deploy_one["uuid"], deploys[0]["uuid"])
self.assertEqual(deploy_one["status"], consts.DeployStatus.DEPLOY_INIT)
self.assertEqual(deploy_one["config"], {"opt1": "val1"})
# Create another deployment and sure that they are different
deploy_two = db.deployment_create({"config": {"opt2": "val2"}})
deploys = db.deployment_list()
self.assertEqual(len(deploys), 2)
self.assertEqual(set([deploy_one["uuid"], deploy_two["uuid"]]),
set([deploy["uuid"] for deploy in deploys]))
self.assertNotEqual(deploy_one["uuid"], deploy_two["uuid"])
self.assertEqual(deploy_two["status"], consts.DeployStatus.DEPLOY_INIT)
self.assertEqual(deploy_two["config"], {"opt2": "val2"})
def test_deployment_update(self):
deploy = db.deployment_create({})
self.assertEqual(deploy["config"], {})
update_deploy = db.deployment_update(deploy["uuid"],
{"config": {"opt": "val"}})
self.assertEqual(update_deploy["uuid"], deploy["uuid"])
self.assertEqual(update_deploy["config"], {"opt": "val"})
get_deploy = db.deployment_get(deploy["uuid"])
self.assertEqual(get_deploy["uuid"], deploy["uuid"])
self.assertEqual(get_deploy["config"], {"opt": "val"})
def test_deployment_update_several(self):
# Create a deployment and update it
deploy_one = db.deployment_create({})
self.assertEqual(deploy_one["config"], {})
update_deploy_one = db.deployment_update(
deploy_one["uuid"], {"config": {"opt1": "val1"}})
self.assertEqual(update_deploy_one["uuid"], deploy_one["uuid"])
self.assertEqual(update_deploy_one["config"], {"opt1": "val1"})
get_deploy_one = db.deployment_get(deploy_one["uuid"])
self.assertEqual(get_deploy_one["uuid"], deploy_one["uuid"])
self.assertEqual(get_deploy_one["config"], {"opt1": "val1"})
# Create another deployment
deploy_two = db.deployment_create({})
update_deploy_two = db.deployment_update(
deploy_two["uuid"], {"config": {"opt2": "val2"}})
self.assertEqual(update_deploy_two["uuid"], deploy_two["uuid"])
self.assertEqual(update_deploy_two["config"], {"opt2": "val2"})
get_deploy_one_again = db.deployment_get(deploy_one["uuid"])
self.assertEqual(get_deploy_one_again["uuid"], deploy_one["uuid"])
self.assertEqual(get_deploy_one_again["config"], {"opt1": "val1"})
def test_deployment_get(self):
deploy_one = db.deployment_create({"config": {"opt1": "val1"}})
deploy_two = db.deployment_create({"config": {"opt2": "val2"}})
get_deploy_one = db.deployment_get(deploy_one["uuid"])
get_deploy_two = db.deployment_get(deploy_two["uuid"])
self.assertNotEqual(get_deploy_one["uuid"], get_deploy_two["uuid"])
self.assertEqual(get_deploy_one["config"], {"opt1": "val1"})
self.assertEqual(get_deploy_two["config"], {"opt2": "val2"})
def test_deployment_get_not_found(self):
self.assertRaises(exceptions.DeploymentNotFound,
db.deployment_get,
"852e932b-9552-4b2d-89e3-a5915780a5e3")
def test_deployment_list(self):
deploy_one = db.deployment_create({})
deploy_two = db.deployment_create({})
deploys = db.deployment_list()
self.assertEqual(sorted([deploy_one["uuid"], deploy_two["uuid"]]),
sorted([deploy["uuid"] for deploy in deploys]))
def test_deployment_list_with_status_and_name(self):
deploy_one = db.deployment_create({})
deploy_two = db.deployment_create({
"config": {},
"status": consts.DeployStatus.DEPLOY_FAILED,
})
deploy_three = db.deployment_create({"name": "deployment_name"})
deploys = db.deployment_list(status=consts.DeployStatus.DEPLOY_INIT)
deploys.sort(key=lambda x: x["id"])
self.assertEqual(len(deploys), 2)
self.assertEqual(deploys[0]["uuid"], deploy_one["uuid"])
deploys = db.deployment_list(status=consts.DeployStatus.DEPLOY_FAILED)
self.assertEqual(len(deploys), 1)
self.assertEqual(deploys[0]["uuid"], deploy_two["uuid"])
deploys = db.deployment_list(
status=consts.DeployStatus.DEPLOY_FINISHED)
self.assertEqual(len(deploys), 0)
deploys = db.deployment_list(name="deployment_name")
self.assertEqual(deploys[0]["uuid"], deploy_three["uuid"])
self.assertEqual(len(deploys), 1)
def test_deployment_list_parent(self):
deploy = db.deployment_create({})
subdeploy1 = db.deployment_create({"parent_uuid": deploy["uuid"]})
subdeploy2 = db.deployment_create({"parent_uuid": deploy["uuid"]})
self.assertEqual(
[deploy["uuid"]], [d["uuid"] for d in db.deployment_list()])
subdeploys = db.deployment_list(parent_uuid=deploy["uuid"])
self.assertEqual(set([subdeploy1["uuid"], subdeploy2["uuid"]]),
set([d["uuid"] for d in subdeploys]))
def test_deployment_delete(self):
deploy_one = db.deployment_create({})
deploy_two = db.deployment_create({})
db.deployment_delete(deploy_two["uuid"])
deploys = db.deployment_list()
self.assertEqual(len(deploys), 1)
self.assertEqual(deploys[0]["uuid"], deploy_one["uuid"])
def test_deployment_delete_not_found(self):
self.assertRaises(exceptions.DeploymentNotFound,
db.deployment_delete,
"5f2883be-46c8-4c4b-a4fe-988ad0c6b20a")
def test_deployment_delete_is_busy(self):
deployment = db.deployment_create({})
db.resource_create({"deployment_uuid": deployment["uuid"]})
db.resource_create({"deployment_uuid": deployment["uuid"]})
self.assertRaises(exceptions.DeploymentIsBusy, db.deployment_delete,
deployment["uuid"])
class ResourceTestCase(test.DBTestCase):
def test_create(self):
deployment = db.deployment_create({})
resource = db.resource_create({
"deployment_uuid": deployment["uuid"],
"provider_name": "fakeprovider",
"type": "faketype",
})
resources = db.resource_get_all(deployment["uuid"])
self.assertTrue(resource["id"])
self.assertEqual(len(resources), 1)
self.assertTrue(resource["id"], resources[0]["id"])
self.assertEqual(resource["deployment_uuid"], deployment["uuid"])
self.assertEqual(resource["provider_name"], "fakeprovider")
self.assertEqual(resource["type"], "faketype")
def test_delete(self):
deployment = db.deployment_create({})
res = db.resource_create({"deployment_uuid": deployment["uuid"]})
db.resource_delete(res["id"])
resources = db.resource_get_all(deployment["uuid"])
self.assertEqual(len(resources), 0)
def test_delete_not_found(self):
self.assertRaises(exceptions.ResourceNotFound,
db.resource_delete, 123456789)
def test_get_all(self):
deployment0 = db.deployment_create({})
deployment1 = db.deployment_create({})
res0 = db.resource_create({"deployment_uuid": deployment0["uuid"]})
res1 = db.resource_create({"deployment_uuid": deployment1["uuid"]})
res2 = db.resource_create({"deployment_uuid": deployment1["uuid"]})
resources = db.resource_get_all(deployment1["uuid"])
self.assertEqual(sorted([res1["id"], res2["id"]]),
sorted([r["id"] for r in resources]))
resources = db.resource_get_all(deployment0["uuid"])
self.assertEqual(len(resources), 1)
self.assertEqual(res0["id"], resources[0]["id"])
def test_get_all_by_provider_name(self):
deployment = db.deployment_create({})
res_one = db.resource_create({
"deployment_uuid": deployment["uuid"],
"provider_name": "one",
})
res_two = db.resource_create({
"deployment_uuid": deployment["uuid"],
"provider_name": "two",
})
resources = db.resource_get_all(deployment["uuid"],
provider_name="one")
self.assertEqual(len(resources), 1)
self.assertEqual(res_one["id"], resources[0]["id"])
resources = db.resource_get_all(deployment["uuid"],
provider_name="two")
self.assertEqual(len(resources), 1)
self.assertEqual(res_two["id"], resources[0]["id"])
def test_get_all_by_provider_type(self):
deployment = db.deployment_create({})
res_one = db.resource_create({
"deployment_uuid": deployment["uuid"],
"type": "one",
})
res_two = db.resource_create({
"deployment_uuid": deployment["uuid"],
"type": "two",
})
resources = db.resource_get_all(deployment["uuid"], type="one")
self.assertEqual(len(resources), 1)
self.assertEqual(res_one["id"], resources[0]["id"])
resources = db.resource_get_all(deployment["uuid"], type="two")
self.assertEqual(len(resources), 1)
self.assertEqual(res_two["id"], resources[0]["id"])
class VerificationTestCase(test.DBTestCase):
def setUp(self):
super(VerificationTestCase, self).setUp()
self.deploy = db.deployment_create({})
def _create_verification(self):
deployment_uuid = self.deploy["uuid"]
return db.verification_create(deployment_uuid)
def test_creation_of_verification(self):
verification = self._create_verification()
db_verification = db.verification_get(verification["uuid"])
self.assertEqual(verification["tests"], db_verification["tests"])
self.assertEqual(verification["time"], db_verification["time"])
self.assertEqual(verification["errors"], db_verification["errors"])
self.assertEqual(verification["failures"], db_verification["failures"])
def test_verification_get_not_found(self):
self.assertRaises(exceptions.NotFoundException,
db.verification_get,
"fake_uuid")
def test_verification_result_create_and_get(self):
verification = self._create_verification()
db_verification = db.verification_get(verification["uuid"])
ver_result1 = db.verification_result_create(
db_verification["uuid"], {})
ver_result2 = db.verification_result_get(db_verification["uuid"])
self.assertEqual(ver_result1["verification_uuid"],
ver_result2["verification_uuid"])
class WorkerTestCase(test.DBTestCase):
def setUp(self):
super(WorkerTestCase, self).setUp()
self.worker = db.register_worker({"hostname": "test"})
def test_register_worker_duplicate(self):
self.assertRaises(exceptions.WorkerAlreadyRegistered,
db.register_worker, {"hostname": "test"})
def test_get_worker(self):
worker = db.get_worker("test")
self.assertEqual(self.worker["id"], worker["id"])
self.assertEqual(self.worker["hostname"], worker["hostname"])
def test_get_worker_not_found(self):
self.assertRaises(exceptions.WorkerNotFound, db.get_worker, "notfound")
def test_unregister_worker(self):
db.unregister_worker("test")
self.assertRaises(exceptions.WorkerNotFound, db.get_worker, "test")
def test_unregister_worker_not_found(self):
self.assertRaises(exceptions.WorkerNotFound,
db.unregister_worker, "fake")
def test_update_worker(self):
db.update_worker("test")
worker = db.get_worker("test")
self.assertNotEqual(self.worker["updated_at"], worker["updated_at"])
def test_update_worker_not_found(self):
self.assertRaises(exceptions.WorkerNotFound, db.update_worker, "fake")
| {
"content_hash": "14164e17d2da44c6bd20e7a090222f6f",
"timestamp": "",
"source": "github",
"line_count": 544,
"max_line_length": 79,
"avg_line_length": 40.83639705882353,
"alnum_prop": 0.5944632005401755,
"repo_name": "varuntiwari27/rally",
"id": "a3aa598f47ae30cf61f7d918b8d957e43876d99c",
"size": "22845",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/unit/common/db/test_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "452687"
},
{
"name": "C++",
"bytes": "26115"
},
{
"name": "CSS",
"bytes": "6231"
},
{
"name": "HTML",
"bytes": "51546"
},
{
"name": "JavaScript",
"bytes": "14187"
},
{
"name": "Makefile",
"bytes": "68380"
},
{
"name": "Mako",
"bytes": "17949"
},
{
"name": "Python",
"bytes": "8478187"
},
{
"name": "Shell",
"bytes": "61579"
}
],
"symlink_target": ""
} |
import graph_tool.all as gt
import numpy as np
import itertools
def get_modified_adjacency_matrix(g, k):
# Get regular adjacency matrix
adj = gt.adjacency(g)
# Initialize the modified adjacency matrix
X = np.zeros(adj.shape)
# Loop over nonzero elements
for i, j in zip(*adj.nonzero()):
X[i, j] = 1 / adj[i, j]
adj_max = adj.max()
# Loop over zero elements
for i, j in set(itertools.product(range(adj.shape[0]), range(adj.shape[1]))).difference(zip(*adj.nonzero())):
X[i, j] = k * adj_max
return X
def get_shortest_path_distance_matrix(g, k=2):
# Used to find which vertices are not connected. This has to be this weird,
# since graph_tool uses maxint for the shortest path distance between
# unconnected vertices.
def get_unconnected_distance():
g_mock = gt.Graph()
g_mock.add_vertex(2)
shortest_distances_mock = gt.shortest_distance(g_mock)
unconnected_dist = shortest_distances_mock[0][1]
return unconnected_dist
# Get the value (usually maxint) that graph_tool uses for distances between
# unconnected vertices.
unconnected_dist = get_unconnected_distance()
# Get shortest distances for all pairs of vertices in a NumPy array.
X = gt.shortest_distance(g).get_2d_array(range(g.num_vertices()))
if len(X[X == unconnected_dist]) > 0:
print('[distance_matrix] There were disconnected components!')
# Get maximum shortest-path distance (ignoring maxint)
X_max = X[X != unconnected_dist].max()
# Set the unconnected distances to k times the maximum of the other
# distances.
X[X == unconnected_dist] = k * X_max
return X
# Return the distance matrix of g, with the specified metric.
def get_distance_matrix(g, distance_metric, normalize=True, k=10.0):
print('[distance_matrix] Computing distance matrix (metric: {0})'.format(distance_metric))
if distance_metric == 'shortest_path' or distance_metric == 'spdm':
X = get_shortest_path_distance_matrix(g)
elif distance_metric == 'modified_adjacency' or distance_metric == 'mam':
X = get_modified_adjacency_matrix(g, k)
else:
raise Exception('Unknown distance metric.')
# Just to make sure, symmetrize the matrix.
X = (X + X.T) / 2
# Force diagonal to zero
X[range(X.shape[0]), range(X.shape[1])] = 0
# Normalize matrix s.t. sum is 1.
if normalize:
X /= np.max(X)
print('[distance_matrix] Done!')
return X
| {
"content_hash": "e830e15b4fb81ed71f53792d3a911419",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 113,
"avg_line_length": 31.6625,
"alnum_prop": 0.6517962889853928,
"repo_name": "HanKruiger/tsnetwork",
"id": "249a7c78021e76dd4b3eb731970e204bf0bdaad1",
"size": "2533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/modules/distance_matrix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "61308"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.apps import AppConfig
class AccountsConfig(AppConfig):
name = 'home'
| {
"content_hash": "3c4c6837ca7816f088b17d38d67cee4b",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 39,
"avg_line_length": 18.285714285714285,
"alnum_prop": 0.75,
"repo_name": "jrdbnntt/facetags",
"id": "ab43b6a597a290c9b93ae3111349f8c54fc5b97a",
"size": "128",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "home/apps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2584"
},
{
"name": "HTML",
"bytes": "7436"
},
{
"name": "JavaScript",
"bytes": "6104"
},
{
"name": "Python",
"bytes": "13076"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ansible-role-jenkins-plugins'
copyright = u'2013, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
# intersphinx_mapping = {'http://docs.python.org/': None}
| {
"content_hash": "985dc15f91eea26df6839c20fcbc73b0",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 79,
"avg_line_length": 31.508474576271187,
"alnum_prop": 0.6750941366325982,
"repo_name": "openstack/ansible-role-jenkins-plugins",
"id": "dbb6a1bf825c68101126522dd71af41fdf9a0fbe",
"size": "2429",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/source/conf.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "954"
}
],
"symlink_target": ""
} |
""" Codec for the Punicode encoding, as specified in RFC 3492
Written by Martin v. Löwis.
"""
import codecs
# #################### Encoding #####################################
def segregate(str):
"""3.1 Basic code point segregation"""
base = []
extended = {}
for c in str:
if ord(c) < 128:
base.append(c)
else:
extended[c] = 1
extended = extended.keys()
extended.sort()
return "".join(base).encode("ascii"), extended
def selective_len(str, max):
"""Return the length of str, considering only characters below max."""
res = 0
for c in str:
if ord(c) < max:
res += 1
return res
def selective_find(str, char, index, pos):
"""Return a pair (index, pos), indicating the next occurrence of
char in str. index is the position of the character considering
only ordinals up to and including char, and pos is the position in
the full string. index/pos is the starting position in the full
string."""
l = len(str)
while 1:
pos += 1
if pos == l:
return (-1, -1)
c = str[pos]
if c == char:
return index + 1, pos
elif c < char:
index += 1
def insertion_unsort(str, extended):
"""3.2 Insertion unsort coding"""
oldchar = 0x80
result = []
oldindex = -1
for c in extended:
index = pos = -1
char = ord(c)
curlen = selective_len(str, char)
delta = (curlen + 1) * (char - oldchar)
while 1:
index, pos = selective_find(str, c, index, pos)
if index == -1:
break
delta += index - oldindex
result.append(delta - 1)
oldindex = index
delta = 0
oldchar = char
return result
def T(j, bias):
# Punycode parameters: tmin = 1, tmax = 26, base = 36
res = 36 * (j + 1) - bias
if res < 1: return 1
if res > 26: return 26
return res
digits = "abcdefghijklmnopqrstuvwxyz0123456789"
def generate_generalized_integer(N, bias):
"""3.3 Generalized variable-length integers"""
result = []
j = 0
while 1:
t = T(j, bias)
if N < t:
result.append(digits[N])
return result
result.append(digits[t + ((N - t) % (36 - t))])
N = (N - t) // (36 - t)
j += 1
def adapt(delta, first, numchars):
if first:
delta //= 700
else:
delta //= 2
delta += delta // numchars
# ((base - tmin) * tmax) // 2 == 455
divisions = 0
while delta > 455:
delta = delta // 35 # base - tmin
divisions += 36
bias = divisions + (36 * delta // (delta + 38))
return bias
def generate_integers(baselen, deltas):
"""3.4 Bias adaptation"""
# Punycode parameters: initial bias = 72, damp = 700, skew = 38
result = []
bias = 72
for points, delta in enumerate(deltas):
s = generate_generalized_integer(delta, bias)
result.extend(s)
bias = adapt(delta, points == 0, baselen + points + 1)
return "".join(result)
def punycode_encode(text):
base, extended = segregate(text)
base = base.encode("ascii")
deltas = insertion_unsort(text, extended)
extended = generate_integers(len(base), deltas)
if base:
return base + "-" + extended
return extended
##################### Decoding #####################################
def decode_generalized_number(extended, extpos, bias, errors):
"""3.3 Generalized variable-length integers"""
result = 0
w = 1
j = 0
while 1:
try:
char = ord(extended[extpos])
except IndexError:
if errors == "strict":
raise UnicodeError, "incomplete punicode string"
return extpos + 1, None
extpos += 1
if 0x41 <= char <= 0x5A: # A-Z
digit = char - 0x41
elif 0x30 <= char <= 0x39:
digit = char - 22 # 0x30-26
elif errors == "strict":
raise UnicodeError("Invalid extended code point '%s'"
% extended[extpos])
else:
return extpos, None
t = T(j, bias)
result += digit * w
if digit < t:
return extpos, result
w = w * (36 - t)
j += 1
def insertion_sort(base, extended, errors):
"""3.2 Insertion unsort coding"""
char = 0x80
pos = -1
bias = 72
extpos = 0
while extpos < len(extended):
newpos, delta = decode_generalized_number(extended, extpos,
bias, errors)
if delta is None:
# There was an error in decoding. We can't continue because
# synchronization is lost.
return base
pos += delta + 1
char += pos // (len(base) + 1)
if char > 0x10FFFF:
if errors == "strict":
raise UnicodeError, ("Invalid character U+%x" % char)
char = ord('?')
pos = pos % (len(base) + 1)
base = base[:pos] + unichr(char) + base[pos:]
bias = adapt(delta, (extpos == 0), len(base))
extpos = newpos
return base
def punycode_decode(text, errors):
pos = text.rfind("-")
if pos == -1:
base = ""
extended = text
else:
base = text[:pos]
extended = text[pos + 1:]
base = unicode(base, "ascii", errors)
extended = extended.upper()
return insertion_sort(base, extended, errors)
### Codec APIs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
res = punycode_encode(input)
return res, len(input)
def decode(self, input, errors='strict'):
if errors not in ('strict', 'replace', 'ignore'):
raise UnicodeError, "Unsupported error handling " + errors
res = punycode_decode(input, errors)
return res, len(input)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return punycode_encode(input)
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
if self.errors not in ('strict', 'replace', 'ignore'):
raise UnicodeError, "Unsupported error handling " + self.errors
return punycode_decode(input, self.errors)
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='punycode',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| {
"content_hash": "72ada9de94e353a2bd7de5d010098e3c",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 75,
"avg_line_length": 27.015810276679844,
"alnum_prop": 0.5482077542062912,
"repo_name": "ppyordanov/HCI_4_Future_Cities",
"id": "da2daf7edd6cb2bac9141c7453aed72ab0b18d25",
"size": "6864",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Server/src/virtualenv/Lib/encodings/punycode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "427445"
},
{
"name": "C++",
"bytes": "21783"
},
{
"name": "CSS",
"bytes": "280650"
},
{
"name": "D",
"bytes": "9679"
},
{
"name": "HTML",
"bytes": "37335"
},
{
"name": "Java",
"bytes": "740594"
},
{
"name": "JavaScript",
"bytes": "1801741"
},
{
"name": "PowerShell",
"bytes": "8104"
},
{
"name": "Python",
"bytes": "2631176"
},
{
"name": "Shell",
"bytes": "12283"
}
],
"symlink_target": ""
} |
import asynqp
from asynqp import spec
from asynqp import frames
from asynqp import amqptypes
from asynqp import message
from .base_contexts import ProtocolContext, MockDispatcherContext
class WhenConnectionStartArrives(MockDispatcherContext):
def given_a_connection_start_method_I_copied_from_the_rabbitmq_server(self):
self.raw = (b"\x01\x00\x00\x00\x00\x01\x50" # type, channel, size
b"\x00\x0A\x00\x0A\x00\t\x00\x00\x01"
b"%\x0ccapabilitiesF\x00\x00\x00X\x12publisher_confirmst\x01\x1aexchange_exchange_bindings"
b"t\x01\nbasic.nackt\x01\x16consumer_cancel_notifyt\x01\tcopyrightS\x00\x00\x00'Copyright "
b"(C) 2007-2013 GoPivotal, Inc.\x0binformationS\x00\x00\x005Licensed under the MPL. "
b" See http://www.rabbitmq.com/\x08platformS\x00\x00\x00\nErlang/OTP\x07productS\x00\x00\x00\x08"
b"RabbitMQ\x07versionS\x00\x00\x00\x053.1.5"
b"\x00\x00\x00\x0eAMQPLAIN PLAIN\x00\x00\x00\x0Ben_US en_GB\xCE")
expected_method = spec.ConnectionStart(0, 9, {
'capabilities': {'publisher_confirms': True,
'exchange_exchange_bindings': True,
'basic.nack': True,
'consumer_cancel_notify': True},
'copyright': 'Copyright (C) 2007-2013 GoPivotal, Inc.',
'information': 'Licensed under the MPL. See http://www.rabbitmq.com/',
'platform': 'Erlang/OTP',
'product': 'RabbitMQ',
'version': '3.1.5'
}, 'AMQPLAIN PLAIN', 'en_US en_GB')
self.expected_frame = asynqp.frames.MethodFrame(0, expected_method)
def when_the_frame_arrives(self):
self.protocol.data_received(self.raw)
self.tick()
def it_should_dispatch_a_correctly_deserialised_ConnectionStart_method(self):
self.dispatcher.dispatch.assert_called_once_with(self.expected_frame)
class WhenSendingConnectionStartOK(ProtocolContext):
def given_a_method_to_send(self):
method = spec.ConnectionStartOK({'somecrap': 'aboutme'}, 'AMQPLAIN', {'auth': 'info'}, 'en_US')
self.frame = asynqp.frames.MethodFrame(0, method)
def when_we_send_the_method(self):
self.protocol.send_frame(self.frame)
def it_should_send_the_correct_bytestring(self):
expected_bytes = (b'\x01\x00\x00\x00\x00\x00>\x00\n\x00\x0b\x00\x00\x00\x15\x08somecrapS'
b'\x00\x00\x00\x07aboutme\x08AMQPLAIN\x00\x00\x00\x0e\x04'
b'authS\x00\x00\x00\x04info\x05en_US\xce')
self.transport.write.assert_called_once_with(expected_bytes)
class WhenConnectionTuneArrives(MockDispatcherContext):
def given_a_connection_tune_method_I_copied_from_the_rabbitmq_server(self):
self.raw = b'\x01\x00\x00\x00\x00\x00\x0C\x00\x0A\x00\x1E\x00\x00\x00\x02\x00\x00\x02\x58\xCE'
expected_method = spec.ConnectionTune(0, 131072, 600)
self.expected_frame = asynqp.frames.MethodFrame(0, expected_method)
def when_the_frame_arrives(self):
self.protocol.data_received(self.raw)
self.tick()
def it_should_dispatch_a_correctly_deserialised_ConnectionTune_method(self):
self.dispatcher.dispatch.assert_called_once_with(self.expected_frame)
class WhenSendingConnectionTuneOK(ProtocolContext):
def given_a_method_to_send(self):
method = spec.ConnectionTuneOK(1024, 131072, 10)
self.frame = asynqp.frames.MethodFrame(0, method)
def when_I_send_the_method(self):
self.protocol.send_frame(self.frame)
def it_should_write_the_correct_bytestring(self):
self.transport.write.assert_called_once_with(b'\x01\x00\x00\x00\x00\x00\x0C\x00\n\x00\x1F\x04\x00\x00\x02\x00\x00\x00\x0A\xCE')
class WhenSendingConnectionOpen(ProtocolContext):
def given_a_method_to_send(self):
method = spec.ConnectionOpen('/', '', False)
self.frame = asynqp.frames.MethodFrame(0, method)
def when_I_send_the_method(self):
self.protocol.send_frame(self.frame)
def it_should_write_the_correct_bytestring(self):
self.transport.write.assert_called_once_with(b'\x01\x00\x00\x00\x00\x00\x08\x00\x0A\x00\x28\x01/\x00\x00\xCE')
class WhenSendingQueueDeclare(ProtocolContext):
def given_a_method_to_send(self):
self.method = spec.QueueDeclare(0, 'a', False, False, False, True, False, {})
def when_I_send_the_method(self):
self.protocol.send_method(1, self.method)
def it_should_write_the_correct_bytestring(self):
self.transport.write.assert_called_once_with(b'\x01\x00\x01\x00\x00\x00\x0D\x00\x32\x00\x0A\x00\x00\x01a\x08\x00\x00\x00\x00\xCE')
class WhenSendingContentHeader(ProtocolContext):
def given_a_content_header_frame(self):
payload = message.ContentHeaderPayload(50, 100, [amqptypes.Octet(3), None, amqptypes.Table({'some': 'value'})])
self.frame = frames.ContentHeaderFrame(1, payload)
def when_I_send_the_frame(self):
self.protocol.send_frame(self.frame)
def it_should_write_the_correct_bytestring(self):
self.transport.write.assert_called_once_with(
b'\x02\x00\x01\x00\x00\x00\x22' # regular frame header
b'\x00\x32\x00\x00' # class id 50; weight is always 0
b'\x00\x00\x00\x00\x00\x00\x00\x64' # body length 100
b'\xA0\x00' # property_flags 0b1010000000000000
b'\x03\x00\x00\x00\x0F\x04someS\x00\x00\x00\x05value' # property list
b'\xCE')
class WhenAContentHeaderArrives(MockDispatcherContext):
def given_a_content_header_frame(self):
self.raw = (
b'\x02\x00\x01\x00\x00\x00\x25' # regular frame header
b'\x00\x32\x00\x00' # class id 50; weight is always 0
b'\x00\x00\x00\x00\x00\x00\x00\x64' # body length 100
b'\xA0\x00' # property_flags 0b1010000000000000
b'\x03yes\x00\x00\x00\x0F\x04someS\x00\x00\x00\x05value' # property list
b'\xCE')
expected_payload = message.ContentHeaderPayload(50, 100, [
amqptypes.ShortStr('yes'), None, amqptypes.Table({'some': 'value'}),
None, None, None, None, None, None, None, None, None, None])
self.expected_frame = frames.ContentHeaderFrame(1, expected_payload)
def when_the_frame_arrives(self):
self.protocol.data_received(self.raw)
self.tick()
def it_should_deserialise_it_to_a_ContentHeaderFrame(self):
self.dispatcher.dispatch.assert_called_once_with(self.expected_frame)
class WhenBasicGetOKArrives(MockDispatcherContext):
def given_a_frame(self):
self.raw = (
b'\x01\x00\x01\x00\x00\x00\x22' # type, channel, size
b'\x00\x3C\x00\x47' # 60, 71
b'\x00\x00\x00\x00\x00\x00\x00\x01' # delivery tag
b'\x00' # not redelivered
b'\x08exchange'
b'\x07routing'
b'\x00\x00\x00\x00' # no more messages
b'\xCE')
expected_method = spec.BasicGetOK(1, False, 'exchange', 'routing', 0)
self.expected_frame = frames.MethodFrame(1, expected_method)
def when_the_frame_arrives(self):
self.protocol.data_received(self.raw)
self.tick()
def it_should_deserialise_it_to_the_correct_method(self):
self.dispatcher.dispatch.assert_called_once_with(self.expected_frame)
| {
"content_hash": "cb2002ba98757982090499bf6eb7d9c1",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 138,
"avg_line_length": 44.92168674698795,
"alnum_prop": 0.6528094407938849,
"repo_name": "benjamin-hodgson/asynqp",
"id": "5c0fc23690dc3b75bff10e49c92f5abcafaad478",
"size": "7457",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/method_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "204194"
}
],
"symlink_target": ""
} |
def not_equal_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes):
"""
Args:
grad_inputs (list of :obj:`nnabla.Variable`): Propagated grads to this backward function.
inputs (list of :obj:`nnabla.Variable` and None): Input Variables of the forward function
if this backward function depends on it. Otherwise, None is set instead.
input_shapes (list of tuple of :obj:`int`): Input shapes of the forward function.
The shapes of the inputs in which None is set can be passed.
outputs (list of :obj:`nnabla.Variable` and None): Output Variables of the forward function
if this backward function depends on it. Otherwise, None is set instead.
output_shapes (list of tuple of :obj:`int`): Output shapes of the forward function.
The shapes of the outputs in which None is set can be passed.
kwargs (dict of arguments): Dictionary of the corresponding function arguments.
Return:
list of Variable: Return the gradients wrt inputs of the corresponding function.
"""
return [None] * (len(grad_inputs) + len(inputs))
| {
"content_hash": "5496dc672d71ee45780862f96d662f4b",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 97,
"avg_line_length": 62.5,
"alnum_prop": 0.6995555555555556,
"repo_name": "sony/nnabla",
"id": "356773fe0e50d29ba672713b5e4c7d8cda8214e2",
"size": "1759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/src/nnabla/backward_function/not_equal.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "25938"
},
{
"name": "C++",
"bytes": "2590231"
},
{
"name": "CMake",
"bytes": "35358"
},
{
"name": "Cython",
"bytes": "180959"
},
{
"name": "Dockerfile",
"bytes": "5431"
},
{
"name": "Jupyter Notebook",
"bytes": "540006"
},
{
"name": "Makefile",
"bytes": "24294"
},
{
"name": "Python",
"bytes": "5311538"
},
{
"name": "Shell",
"bytes": "4750"
}
],
"symlink_target": ""
} |
from oslo_config import cfg
from oslo_log import log as logging
from oslo_versionedobjects import fields
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import base
from cinder import utils
CONF = cfg.CONF
OPTIONAL_FIELDS = []
LOG = logging.getLogger(__name__)
@base.CinderObjectRegistry.register
class Volume(base.CinderPersistentObject, base.CinderObject,
base.CinderObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'id': fields.UUIDField(),
'_name_id': fields.UUIDField(nullable=True),
'ec2_id': fields.UUIDField(nullable=True),
'user_id': fields.UUIDField(nullable=True),
'project_id': fields.UUIDField(nullable=True),
'snapshot_id': fields.UUIDField(nullable=True),
'host': fields.StringField(nullable=True),
'size': fields.IntegerField(),
'availability_zone': fields.StringField(),
'status': fields.StringField(),
'attach_status': fields.StringField(),
'migration_status': fields.StringField(nullable=True),
'scheduled_at': fields.DateTimeField(nullable=True),
'launched_at': fields.DateTimeField(nullable=True),
'terminated_at': fields.DateTimeField(nullable=True),
'display_name': fields.StringField(nullable=True),
'display_description': fields.StringField(nullable=True),
'provider_id': fields.UUIDField(nullable=True),
'provider_location': fields.StringField(nullable=True),
'provider_auth': fields.StringField(nullable=True),
'provider_geometry': fields.StringField(nullable=True),
'volume_type_id': fields.UUIDField(nullable=True),
'source_volid': fields.UUIDField(nullable=True),
'encryption_key_id': fields.UUIDField(nullable=True),
'consistencygroup_id': fields.UUIDField(nullable=True),
'deleted': fields.BooleanField(default=False),
'bootable': fields.BooleanField(default=False),
'replication_status': fields.StringField(nullable=True),
'replication_extended_status': fields.StringField(nullable=True),
'replication_driver_data': fields.StringField(nullable=True),
}
# NOTE(thangp): obj_extra_fields is used to hold properties that are not
# usually part of the model
obj_extra_fields = ['name', 'name_id']
@property
def name_id(self):
return self.id if not self._name_id else self._name_id
@name_id.setter
def name_id(self, value):
self._name_id = value
@property
def name(self):
return CONF.volume_name_template % self.name_id
def __init__(self, *args, **kwargs):
super(Volume, self).__init__(*args, **kwargs)
def obj_make_compatible(self, primitive, target_version):
"""Make an object representation compatible with a target version."""
target_version = utils.convert_version_to_tuple(target_version)
@staticmethod
def _from_db_object(context, volume, db_volume):
for name, field in volume.fields.items():
value = db_volume[name]
if isinstance(field, fields.IntegerField):
value = value or 0
volume[name] = value
volume._context = context
volume.obj_reset_changes()
return volume
@base.remotable_classmethod
def get_by_id(cls, context, id):
db_volume = db.volume_get(context, id)
return cls._from_db_object(context, cls(context), db_volume)
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason=_('already created'))
updates = self.obj_get_changes()
db_volume = db.volume_create(self._context, updates)
self._from_db_object(self._context, self, db_volume)
@base.remotable
def save(self):
updates = self.obj_get_changes()
if updates:
db.volume_update(self._context, self.id, updates)
self.obj_reset_changes()
@base.remotable
def destroy(self):
db.volume_destroy(self._context, self.id)
@base.CinderObjectRegistry.register
class VolumeList(base.ObjectListBase, base.CinderObject):
VERSION = '1.0'
fields = {
'objects': fields.ListOfObjectsField('Volume'),
}
child_versions = {
'1.0': '1.0'
}
@base.remotable_classmethod
def get_all(cls, context, marker, limit, sort_key, sort_dir,
filters=None):
volumes = db.volume_get_all(context, marker, limit, sort_key,
sort_dir, filters=filters)
return base.obj_make_list(context, cls(context), objects.Volume,
volumes)
| {
"content_hash": "344c16f1ee87e7da541392495dd39374",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 77,
"avg_line_length": 33.77777777777778,
"alnum_prop": 0.6371299342105263,
"repo_name": "saeki-masaki/cinder",
"id": "d32aff26af73ce9d18b4e641d71fd901e260e64b",
"size": "5476",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cinder/objects/volume.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11624862"
},
{
"name": "Shell",
"bytes": "8111"
}
],
"symlink_target": ""
} |
"""
:mod:`extn_test` --- lib.packet.spse.scmp_auth.extn unit tests
==============================================================================
"""
# Stdlib
from unittest.mock import patch
import nose
import nose.tools as ntools
from lib.packet.spse.defines import SPSESecModes, SPSEValidationError
from lib.packet.spse.scmp_auth.ext_hashtree import SCMPAuthHashtreeLengths, SCMPAuthHashTreeExtn
from test.testcommon import create_mock
class TestSCMPAuthHashTreeExtnParse(object):
"""
Unit tests for lib.packet.spse.scmp_auth.extn.SCMPAuthHashTreeExtn._parse
"""
@patch("lib.packet.spse.scmp_auth.ext_hashtree."
"SCIONPacketSecurityBaseExtn._parse", autospec=True)
@patch("lib.packet.spse.scmp_auth.ext_hashtree.Raw", autospec=True)
def test(self, raw, super_parse):
inst = SCMPAuthHashTreeExtn()
inst.append_hop = create_mock()
data = create_mock(["pop"])
data.pop.side_effect = ("sec_mode", "height", "res", "order", "sign", "hashes")
raw.return_value = data
arg = bytes(21)
# Call
inst._parse(arg)
# Tests
raw.assert_called_once_with(arg, "SCMPAuthHashTreeExtn")
super_parse.assert_called_once_with(inst, data)
ntools.assert_equal(inst.sec_mode, "sec_mode")
ntools.assert_equal(inst.height, "height")
ntools.assert_equal(inst.order, "order")
ntools.assert_equal(inst.signature, "sign")
ntools.assert_equal(inst.hashes, "hashes")
class TestSCMPAuthHashTreeExtnPack(object):
"""
Unit tests for lib.packet.spse.scmp_auth.extn.SCMPAuthHashTreeExtn.pack
"""
def test(self):
height = 2
order = bytes(range(SCMPAuthHashtreeLengths.ORDER))
signature = bytes(range(SCMPAuthHashtreeLengths.SIGNATURE))
hashes = bytes(range(height * SCMPAuthHashtreeLengths.HASH))
inst = SCMPAuthHashTreeExtn.from_values(
height, order, signature, hashes)
inst._check_len = create_mock()
expected = b"".join((
bytes([SPSESecModes.SCMP_AUTH_HASH_TREE]), bytes((height,)), bytes(1),
order, signature, hashes))
# Call
ntools.eq_(inst.pack(), expected)
# Tests
inst._check_len.assert_called_once_with(expected)
class TestSCMPAuthDRKeyCheckValidity(object):
"""
Unit tests for lib.packet.spse.scmp_auth.extn.SCMPAuthDRKeyExtn.check_validity
"""
def _setup(self):
return (13, bytes(SCMPAuthHashtreeLengths.ORDER), bytes(SCMPAuthHashtreeLengths.SIGNATURE),
bytes(SCMPAuthHashtreeLengths.HASH * 13))
def test(self):
height, order, signature, hashes = self._setup()
SCMPAuthHashTreeExtn.check_validity(height, order, signature, hashes)
def test_invalid_order_length(self):
height, order, signature, hashes = self._setup()
func = SCMPAuthHashTreeExtn.check_validity
ntools.assert_raises(SPSEValidationError, func, height, order + bytes(1), signature, hashes)
def test_invalid_signature_length(self):
height, order, signature, hashes = self._setup()
func = SCMPAuthHashTreeExtn.check_validity
ntools.assert_raises(SPSEValidationError, func, height, order, signature + bytes(1), hashes)
def test_invalid_hashes_length(self):
height, order, signature, hashes = self._setup()
func = SCMPAuthHashTreeExtn.check_validity
ntools.assert_raises(SPSEValidationError, func, height, order, signature, hashes + bytes(1))
def test_invalid_height(self):
height, order, signature, hashes = self._setup()
func = SCMPAuthHashTreeExtn.check_validity
ntools.assert_raises(SPSEValidationError, func, -1, order, signature, hashes)
ntools.assert_raises(SPSEValidationError, func, 17, order, signature, hashes)
if __name__ == "__main__":
nose.run(defaultTest=__name__)
| {
"content_hash": "7502f1ad286ce5fd3586322e24cdef86",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 100,
"avg_line_length": 39.93877551020408,
"alnum_prop": 0.6584057230454777,
"repo_name": "klausman/scion",
"id": "5cc3f801ff74bb70fa89c6cdae9be2dd664bf75e",
"size": "4486",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/test/lib/packet/spse/scmp_auth/ext_hashtree_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "155250"
},
{
"name": "Cap'n Proto",
"bytes": "13369"
},
{
"name": "Dockerfile",
"bytes": "471"
},
{
"name": "Go",
"bytes": "1831534"
},
{
"name": "Lua",
"bytes": "28950"
},
{
"name": "Makefile",
"bytes": "22469"
},
{
"name": "Python",
"bytes": "1306018"
},
{
"name": "Ruby",
"bytes": "2550"
},
{
"name": "Shell",
"bytes": "56791"
},
{
"name": "Standard ML",
"bytes": "390"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from .views import home, base_files, documentation
urlpatterns = [
url(r'^$', home, name='home'),
url(r'^ayuda/', documentation, name='ayuda'),
url(r'^(?P<filename>(robots.txt)|(humans.txt))$', base_files, name='base_files')
]
| {
"content_hash": "3f35909009ad36b46d72df08171c5367",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 84,
"avg_line_length": 34.125,
"alnum_prop": 0.6556776556776557,
"repo_name": "erikiado/jp2_online",
"id": "40e49617f09ed59df708698a17fd8b5afeddb6c2",
"size": "273",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "base/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14504"
},
{
"name": "HTML",
"bytes": "146491"
},
{
"name": "JavaScript",
"bytes": "15162"
},
{
"name": "Python",
"bytes": "586358"
}
],
"symlink_target": ""
} |
import warnings
import pymongo
import re
from pymongo.read_preferences import ReadPreference
from bson.dbref import DBRef
from mongoengine import signals
from mongoengine.common import _import_class
from mongoengine.base import (
DocumentMetaclass,
TopLevelDocumentMetaclass,
BaseDocument,
BaseDict,
BaseList,
EmbeddedDocumentList,
ALLOW_INHERITANCE,
get_document
)
from mongoengine.errors import (InvalidQueryError, InvalidDocumentError,
SaveConditionError)
from mongoengine.python_support import IS_PYMONGO_3
from mongoengine.queryset import (OperationError, NotUniqueError,
QuerySet, transform)
from mongoengine.connection import get_db, DEFAULT_CONNECTION_NAME
from mongoengine.context_managers import switch_db, switch_collection
__all__ = ('Document', 'EmbeddedDocument', 'DynamicDocument',
'DynamicEmbeddedDocument', 'OperationError',
'InvalidCollectionError', 'NotUniqueError', 'MapReduceDocument')
def includes_cls(fields):
""" Helper function used for ensuring and comparing indexes
"""
first_field = None
if len(fields):
if isinstance(fields[0], basestring):
first_field = fields[0]
elif isinstance(fields[0], (list, tuple)) and len(fields[0]):
first_field = fields[0][0]
return first_field == '_cls'
class InvalidCollectionError(Exception):
pass
class EmbeddedDocument(BaseDocument):
"""A :class:`~mongoengine.Document` that isn't stored in its own
collection. :class:`~mongoengine.EmbeddedDocument`\ s should be used as
fields on :class:`~mongoengine.Document`\ s through the
:class:`~mongoengine.EmbeddedDocumentField` field type.
A :class:`~mongoengine.EmbeddedDocument` subclass may be itself subclassed,
to create a specialised version of the embedded document that will be
stored in the same collection. To facilitate this behaviour a `_cls`
field is added to documents (hidden though the MongoEngine interface).
To disable this behaviour and remove the dependence on the presence of
`_cls` set :attr:`allow_inheritance` to ``False`` in the :attr:`meta`
dictionary.
"""
__slots__ = ('_instance', )
# The __metaclass__ attribute is removed by 2to3 when running with Python3
# my_metaclass is defined so that metaclass can be queried in Python 2 & 3
my_metaclass = DocumentMetaclass
__metaclass__ = DocumentMetaclass
def __init__(self, *args, **kwargs):
super(EmbeddedDocument, self).__init__(*args, **kwargs)
self._instance = None
self._changed_fields = []
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._data == other._data
return False
def __ne__(self, other):
return not self.__eq__(other)
def save(self, *args, **kwargs):
self._instance.save(*args, **kwargs)
def reload(self, *args, **kwargs):
self._instance.reload(*args, **kwargs)
class Document(BaseDocument):
"""The base class used for defining the structure and properties of
collections of documents stored in MongoDB. Inherit from this class, and
add fields as class attributes to define a document's structure.
Individual documents may then be created by making instances of the
:class:`~mongoengine.Document` subclass.
By default, the MongoDB collection used to store documents created using a
:class:`~mongoengine.Document` subclass will be the name of the subclass
converted to lowercase. A different collection may be specified by
providing :attr:`collection` to the :attr:`meta` dictionary in the class
definition.
A :class:`~mongoengine.Document` subclass may be itself subclassed, to
create a specialised version of the document that will be stored in the
same collection. To facilitate this behaviour a `_cls`
field is added to documents (hidden though the MongoEngine interface).
To disable this behaviour and remove the dependence on the presence of
`_cls` set :attr:`allow_inheritance` to ``False`` in the :attr:`meta`
dictionary.
A :class:`~mongoengine.Document` may use a **Capped Collection** by
specifying :attr:`max_documents` and :attr:`max_size` in the :attr:`meta`
dictionary. :attr:`max_documents` is the maximum number of documents that
is allowed to be stored in the collection, and :attr:`max_size` is the
maximum size of the collection in bytes. :attr:`max_size` is rounded up
to the next multiple of 256 by MongoDB internally and mongoengine before.
Use also a multiple of 256 to avoid confusions. If :attr:`max_size` is not
specified and :attr:`max_documents` is, :attr:`max_size` defaults to
10485760 bytes (10MB).
Indexes may be created by specifying :attr:`indexes` in the :attr:`meta`
dictionary. The value should be a list of field names or tuples of field
names. Index direction may be specified by prefixing the field names with
a **+** or **-** sign.
Automatic index creation can be disabled by specifying
:attr:`auto_create_index` in the :attr:`meta` dictionary. If this is set to
False then indexes will not be created by MongoEngine. This is useful in
production systems where index creation is performed as part of a
deployment system.
By default, _cls will be added to the start of every index (that
doesn't contain a list) if allow_inheritance is True. This can be
disabled by either setting cls to False on the specific index or
by setting index_cls to False on the meta dictionary for the document.
By default, any extra attribute existing in stored data but not declared
in your model will raise a :class:`~mongoengine.FieldDoesNotExist` error.
This can be disabled by setting :attr:`strict` to ``False``
in the :attr:`meta` dictionary.
"""
# The __metaclass__ attribute is removed by 2to3 when running with Python3
# my_metaclass is defined so that metaclass can be queried in Python 2 & 3
my_metaclass = TopLevelDocumentMetaclass
__metaclass__ = TopLevelDocumentMetaclass
__slots__ = ('__objects',)
def pk():
"""Primary key alias
"""
def fget(self):
if 'id_field' not in self._meta:
return None
return getattr(self, self._meta['id_field'])
def fset(self, value):
return setattr(self, self._meta['id_field'], value)
return property(fget, fset)
pk = pk()
@classmethod
def _get_db(cls):
"""Some Model using other db_alias"""
return get_db(cls._meta.get("db_alias", DEFAULT_CONNECTION_NAME))
@classmethod
def _get_collection(cls):
"""Returns the collection for the document."""
# TODO: use new get_collection() with PyMongo3 ?
if not hasattr(cls, '_collection') or cls._collection is None:
db = cls._get_db()
collection_name = cls._get_collection_name()
# Create collection as a capped collection if specified
if cls._meta.get('max_size') or cls._meta.get('max_documents'):
# Get max document limit and max byte size from meta
max_size = cls._meta.get('max_size') or 10 * 2 ** 20 # 10MB default
max_documents = cls._meta.get('max_documents')
# Round up to next 256 bytes as MongoDB would do it to avoid exception
if max_size % 256:
max_size = (max_size // 256 + 1) * 256
if collection_name in db.collection_names():
cls._collection = db[collection_name]
# The collection already exists, check if its capped
# options match the specified capped options
options = cls._collection.options()
if options.get('max') != max_documents or \
options.get('size') != max_size:
msg = (('Cannot create collection "%s" as a capped '
'collection as it already exists')
% cls._collection)
raise InvalidCollectionError(msg)
else:
# Create the collection as a capped collection
opts = {'capped': True, 'size': max_size}
if max_documents:
opts['max'] = max_documents
cls._collection = db.create_collection(
collection_name, **opts
)
else:
cls._collection = db[collection_name]
if cls._meta.get('auto_create_index', True):
cls.ensure_indexes()
return cls._collection
def modify(self, query={}, **update):
"""Perform an atomic update of the document in the database and reload
the document object using updated version.
Returns True if the document has been updated or False if the document
in the database doesn't match the query.
.. note:: All unsaved changes that have been made to the document are
rejected if the method returns True.
:param query: the update will be performed only if the document in the
database matches the query
:param update: Django-style update keyword arguments
"""
if self.pk is None:
raise InvalidDocumentError("The document does not have a primary key.")
id_field = self._meta["id_field"]
query = query.copy() if isinstance(query, dict) else query.to_query(self)
if id_field not in query:
query[id_field] = self.pk
elif query[id_field] != self.pk:
raise InvalidQueryError("Invalid document modify query: it must modify only this document.")
updated = self._qs(**query).modify(new=True, **update)
if updated is None:
return False
for field in self._fields_ordered:
setattr(self, field, self._reload(field, updated[field]))
self._changed_fields = updated._changed_fields
self._created = False
return True
def save(self, force_insert=False, validate=True, clean=True,
write_concern=None, cascade=None, cascade_kwargs=None,
_refs=None, save_condition=None, signal_kwargs=None, **kwargs):
"""Save the :class:`~mongoengine.Document` to the database. If the
document already exists, it will be updated, otherwise it will be
created.
:param force_insert: only try to create a new document, don't allow
updates of existing documents
:param validate: validates the document; set to ``False`` to skip.
:param clean: call the document clean method, requires `validate` to be
True.
:param write_concern: Extra keyword arguments are passed down to
:meth:`~pymongo.collection.Collection.save` OR
:meth:`~pymongo.collection.Collection.insert`
which will be used as options for the resultant
``getLastError`` command. For example,
``save(..., write_concern={w: 2, fsync: True}, ...)`` will
wait until at least two servers have recorded the write and
will force an fsync on the primary server.
:param cascade: Sets the flag for cascading saves. You can set a
default by setting "cascade" in the document __meta__
:param cascade_kwargs: (optional) kwargs dictionary to be passed throw
to cascading saves. Implies ``cascade=True``.
:param _refs: A list of processed references used in cascading saves
:param save_condition: only perform save if matching record in db
satisfies condition(s) (e.g. version number).
Raises :class:`OperationError` if the conditions are not satisfied
:parm signal_kwargs: (optional) kwargs dictionary to be passed to
the signal calls.
.. versionchanged:: 0.5
In existing documents it only saves changed fields using
set / unset. Saves are cascaded and any
:class:`~bson.dbref.DBRef` objects that have changes are
saved as well.
.. versionchanged:: 0.6
Added cascading saves
.. versionchanged:: 0.8
Cascade saves are optional and default to False. If you want
fine grain control then you can turn off using document
meta['cascade'] = True. Also you can pass different kwargs to
the cascade save using cascade_kwargs which overwrites the
existing kwargs with custom values.
.. versionchanged:: 0.8.5
Optional save_condition that only overwrites existing documents
if the condition is satisfied in the current db record.
.. versionchanged:: 0.10
:class:`OperationError` exception raised if save_condition fails.
.. versionchanged:: 0.10.1
:class: save_condition failure now raises a `SaveConditionError`
.. versionchanged:: 0.10.7
Add signal_kwargs argument
"""
signal_kwargs = signal_kwargs or {}
signals.pre_save.send(self.__class__, document=self, **signal_kwargs)
if validate:
self.validate(clean=clean)
if write_concern is None:
write_concern = {"w": 1}
doc = self.to_mongo()
created = ('_id' not in doc or self._created or force_insert)
signals.pre_save_post_validation.send(self.__class__, document=self,
created=created, **signal_kwargs)
try:
collection = self._get_collection()
if self._meta.get('auto_create_index', True):
self.ensure_indexes()
if created:
if force_insert:
object_id = collection.insert(doc, **write_concern)
else:
object_id = collection.save(doc, **write_concern)
# In PyMongo 3.0, the save() call calls internally the _update() call
# but they forget to return the _id value passed back, therefore getting it back here
# Correct behaviour in 2.X and in 3.0.1+ versions
if not object_id and pymongo.version_tuple == (3, 0):
pk_as_mongo_obj = self._fields.get(self._meta['id_field']).to_mongo(self.pk)
object_id = self._qs.filter(pk=pk_as_mongo_obj).first() and \
self._qs.filter(pk=pk_as_mongo_obj).first().pk
else:
object_id = doc['_id']
updates, removals = self._delta()
# Need to add shard key to query, or you get an error
if save_condition is not None:
select_dict = transform.query(self.__class__,
**save_condition)
else:
select_dict = {}
select_dict['_id'] = object_id
shard_key = self.__class__._meta.get('shard_key', tuple())
for k in shard_key:
path = self._lookup_field(k.split('.'))
actual_key = [p.db_field for p in path]
val = doc
for ak in actual_key:
val = val[ak]
select_dict['.'.join(actual_key)] = val
def is_new_object(last_error):
if last_error is not None:
updated = last_error.get("updatedExisting")
if updated is not None:
return not updated
return created
update_query = {}
if updates:
update_query["$set"] = updates
if removals:
update_query["$unset"] = removals
if updates or removals:
upsert = save_condition is None
last_error = collection.update(select_dict, update_query,
upsert=upsert, **write_concern)
if not upsert and last_error["n"] == 0:
raise SaveConditionError('Race condition preventing'
' document update detected')
created = is_new_object(last_error)
if cascade is None:
cascade = self._meta.get(
'cascade', False) or cascade_kwargs is not None
if cascade:
kwargs = {
"force_insert": force_insert,
"validate": validate,
"write_concern": write_concern,
"cascade": cascade
}
if cascade_kwargs: # Allow granular control over cascades
kwargs.update(cascade_kwargs)
kwargs['_refs'] = _refs
self.cascade_save(**kwargs)
except pymongo.errors.DuplicateKeyError, err:
message = u'Tried to save duplicate unique keys (%s)'
raise NotUniqueError(message % unicode(err))
except pymongo.errors.OperationFailure, err:
message = 'Could not save document (%s)'
if re.match('^E1100[01] duplicate key', unicode(err)):
# E11000 - duplicate key error index
# E11001 - duplicate key on update
message = u'Tried to save duplicate unique keys (%s)'
raise NotUniqueError(message % unicode(err))
raise OperationError(message % unicode(err))
id_field = self._meta['id_field']
if created or id_field not in self._meta.get('shard_key', []):
self[id_field] = self._fields[id_field].to_python(object_id)
signals.post_save.send(self.__class__, document=self,
created=created, **signal_kwargs)
self._clear_changed_fields()
self._created = False
return self
def cascade_save(self, *args, **kwargs):
"""Recursively saves any references /
generic references on the document"""
_refs = kwargs.get('_refs', []) or []
ReferenceField = _import_class('ReferenceField')
GenericReferenceField = _import_class('GenericReferenceField')
for name, cls in self._fields.items():
if not isinstance(cls, (ReferenceField,
GenericReferenceField)):
continue
ref = self._data.get(name)
if not ref or isinstance(ref, DBRef):
continue
if not getattr(ref, '_changed_fields', True):
continue
ref_id = "%s,%s" % (ref.__class__.__name__, str(ref._data))
if ref and ref_id not in _refs:
_refs.append(ref_id)
kwargs["_refs"] = _refs
ref.save(**kwargs)
ref._changed_fields = []
@property
def _qs(self):
"""
Returns the queryset to use for updating / reloading / deletions
"""
if not hasattr(self, '__objects'):
self.__objects = QuerySet(self, self._get_collection())
return self.__objects
@property
def _object_key(self):
"""Dict to identify object in collection
"""
select_dict = {'pk': self.pk}
shard_key = self.__class__._meta.get('shard_key', tuple())
for k in shard_key:
path = self._lookup_field(k.split('.'))
actual_key = [p.db_field for p in path]
val = self
for ak in actual_key:
val = getattr(val, ak)
select_dict['__'.join(actual_key)] = val
return select_dict
def update(self, **kwargs):
"""Performs an update on the :class:`~mongoengine.Document`
A convenience wrapper to :meth:`~mongoengine.QuerySet.update`.
Raises :class:`OperationError` if called on an object that has not yet
been saved.
"""
if not self.pk:
if kwargs.get('upsert', False):
query = self.to_mongo()
if "_cls" in query:
del query["_cls"]
return self._qs.filter(**query).update_one(**kwargs)
else:
raise OperationError(
'attempt to update a document not yet saved')
# Need to add shard key to query, or you get an error
return self._qs.filter(**self._object_key).update_one(**kwargs)
def delete(self, signal_kwargs=None, **write_concern):
"""Delete the :class:`~mongoengine.Document` from the database. This
will only take effect if the document has been previously saved.
:parm signal_kwargs: (optional) kwargs dictionary to be passed to
the signal calls.
:param write_concern: Extra keyword arguments are passed down which
will be used as options for the resultant
``getLastError`` command. For example,
``save(..., write_concern={w: 2, fsync: True}, ...)`` will
wait until at least two servers have recorded the write and
will force an fsync on the primary server.
.. versionchanged:: 0.10.7
Add signal_kwargs argument
"""
signal_kwargs = signal_kwargs or {}
signals.pre_delete.send(self.__class__, document=self, **signal_kwargs)
# Delete FileFields separately
FileField = _import_class('FileField')
for name, field in self._fields.iteritems():
if isinstance(field, FileField):
getattr(self, name).delete()
try:
self._qs.filter(
**self._object_key).delete(write_concern=write_concern, _from_doc_delete=True)
except pymongo.errors.OperationFailure, err:
message = u'Could not delete document (%s)' % err.message
raise OperationError(message)
signals.post_delete.send(self.__class__, document=self, **signal_kwargs)
def switch_db(self, db_alias, keep_created=True):
"""
Temporarily switch the database for a document instance.
Only really useful for archiving off data and calling `save()`::
user = User.objects.get(id=user_id)
user.switch_db('archive-db')
user.save()
:param str db_alias: The database alias to use for saving the document
:param bool keep_created: keep self._created value after switching db, else is reset to True
.. seealso::
Use :class:`~mongoengine.context_managers.switch_collection`
if you need to read from another collection
"""
with switch_db(self.__class__, db_alias) as cls:
collection = cls._get_collection()
db = cls._get_db()
self._get_collection = lambda: collection
self._get_db = lambda: db
self._collection = collection
self._created = True if not keep_created else self._created
self.__objects = self._qs
self.__objects._collection_obj = collection
return self
def switch_collection(self, collection_name, keep_created=True):
"""
Temporarily switch the collection for a document instance.
Only really useful for archiving off data and calling `save()`::
user = User.objects.get(id=user_id)
user.switch_collection('old-users')
user.save()
:param str collection_name: The database alias to use for saving the
document
:param bool keep_created: keep self._created value after switching collection, else is reset to True
.. seealso::
Use :class:`~mongoengine.context_managers.switch_db`
if you need to read from another database
"""
with switch_collection(self.__class__, collection_name) as cls:
collection = cls._get_collection()
self._get_collection = lambda: collection
self._collection = collection
self._created = True if not keep_created else self._created
self.__objects = self._qs
self.__objects._collection_obj = collection
return self
def select_related(self, max_depth=1):
"""Handles dereferencing of :class:`~bson.dbref.DBRef` objects to
a maximum depth in order to cut down the number queries to mongodb.
.. versionadded:: 0.5
"""
DeReference = _import_class('DeReference')
DeReference()([self], max_depth + 1)
return self
def reload(self, *fields, **kwargs):
"""Reloads all attributes from the database.
:param fields: (optional) args list of fields to reload
:param max_depth: (optional) depth of dereferencing to follow
.. versionadded:: 0.1.2
.. versionchanged:: 0.6 Now chainable
.. versionchanged:: 0.9 Can provide specific fields to reload
"""
max_depth = 1
if fields and isinstance(fields[0], int):
max_depth = fields[0]
fields = fields[1:]
elif "max_depth" in kwargs:
max_depth = kwargs["max_depth"]
if not self.pk:
raise self.DoesNotExist("Document does not exist")
obj = self._qs.read_preference(ReadPreference.PRIMARY).filter(
**self._object_key).only(*fields).limit(
1).select_related(max_depth=max_depth)
if obj:
obj = obj[0]
else:
raise self.DoesNotExist("Document does not exist")
for field in obj._data:
if not fields or field in fields:
try:
setattr(self, field, self._reload(field, obj[field]))
except (KeyError, AttributeError):
try:
# If field is a special field, e.g. items is stored as _reserved_items,
# an KeyError is thrown. So try to retrieve the field from _data
setattr(self, field, self._reload(field, obj._data.get(field)))
except KeyError:
# If field is removed from the database while the object
# is in memory, a reload would cause a KeyError
# i.e. obj.update(unset__field=1) followed by obj.reload()
delattr(self, field)
self._changed_fields = obj._changed_fields
self._created = False
return self
def _reload(self, key, value):
"""Used by :meth:`~mongoengine.Document.reload` to ensure the
correct instance is linked to self.
"""
if isinstance(value, BaseDict):
value = [(k, self._reload(k, v)) for k, v in value.items()]
value = BaseDict(value, self, key)
elif isinstance(value, EmbeddedDocumentList):
value = [self._reload(key, v) for v in value]
value = EmbeddedDocumentList(value, self, key)
elif isinstance(value, BaseList):
value = [self._reload(key, v) for v in value]
value = BaseList(value, self, key)
elif isinstance(value, (EmbeddedDocument, DynamicEmbeddedDocument)):
value._instance = None
value._changed_fields = []
return value
def to_dbref(self):
"""Returns an instance of :class:`~bson.dbref.DBRef` useful in
`__raw__` queries."""
if not self.pk:
msg = "Only saved documents can have a valid dbref"
raise OperationError(msg)
return DBRef(self.__class__._get_collection_name(), self.pk)
@classmethod
def register_delete_rule(cls, document_cls, field_name, rule):
"""This method registers the delete rules to apply when removing this
object.
"""
classes = [get_document(class_name)
for class_name in cls._subclasses
if class_name != cls.__name__] + [cls]
documents = [get_document(class_name)
for class_name in document_cls._subclasses
if class_name != document_cls.__name__] + [document_cls]
for klass in classes:
for document_cls in documents:
delete_rules = klass._meta.get('delete_rules') or {}
delete_rules[(document_cls, field_name)] = rule
klass._meta['delete_rules'] = delete_rules
@classmethod
def drop_collection(cls):
"""Drops the entire collection associated with this
:class:`~mongoengine.Document` type from the database.
Raises :class:`OperationError` if the document has no collection set
(i.g. if it is `abstract`)
.. versionchanged:: 0.10.7
:class:`OperationError` exception raised if no collection available
"""
col_name = cls._get_collection_name()
if not col_name:
raise OperationError('Document %s has no collection defined '
'(is it abstract ?)' % cls)
cls._collection = None
db = cls._get_db()
db.drop_collection(col_name)
@classmethod
def create_index(cls, keys, background=False, **kwargs):
"""Creates the given indexes if required.
:param keys: a single index key or a list of index keys (to
construct a multi-field index); keys may be prefixed with a **+**
or a **-** to determine the index ordering
:param background: Allows index creation in the background
"""
index_spec = cls._build_index_spec(keys)
index_spec = index_spec.copy()
fields = index_spec.pop('fields')
drop_dups = kwargs.get('drop_dups', False)
if IS_PYMONGO_3 and drop_dups:
msg = "drop_dups is deprecated and is removed when using PyMongo 3+."
warnings.warn(msg, DeprecationWarning)
elif not IS_PYMONGO_3:
index_spec['drop_dups'] = drop_dups
index_spec['background'] = background
index_spec.update(kwargs)
if IS_PYMONGO_3:
return cls._get_collection().create_index(fields, **index_spec)
else:
return cls._get_collection().ensure_index(fields, **index_spec)
@classmethod
def ensure_index(cls, key_or_list, drop_dups=False, background=False,
**kwargs):
"""Ensure that the given indexes are in place. Deprecated in favour
of create_index.
:param key_or_list: a single index key or a list of index keys (to
construct a multi-field index); keys may be prefixed with a **+**
or a **-** to determine the index ordering
:param background: Allows index creation in the background
:param drop_dups: Was removed/ignored with MongoDB >2.7.5. The value
will be removed if PyMongo3+ is used
"""
if IS_PYMONGO_3 and drop_dups:
msg = "drop_dups is deprecated and is removed when using PyMongo 3+."
warnings.warn(msg, DeprecationWarning)
elif not IS_PYMONGO_3:
kwargs.update({'drop_dups': drop_dups})
return cls.create_index(key_or_list, background=background, **kwargs)
@classmethod
def ensure_indexes(cls):
"""Checks the document meta data and ensures all the indexes exist.
Global defaults can be set in the meta - see :doc:`guide/defining-documents`
.. note:: You can disable automatic index creation by setting
`auto_create_index` to False in the documents meta data
"""
background = cls._meta.get('index_background', False)
drop_dups = cls._meta.get('index_drop_dups', False)
index_opts = cls._meta.get('index_opts') or {}
index_cls = cls._meta.get('index_cls', True)
if IS_PYMONGO_3 and drop_dups:
msg = "drop_dups is deprecated and is removed when using PyMongo 3+."
warnings.warn(msg, DeprecationWarning)
collection = cls._get_collection()
# 746: when connection is via mongos, the read preference is not necessarily an indication that
# this code runs on a secondary
if not collection.is_mongos and collection.read_preference > 1:
return
# determine if an index which we are creating includes
# _cls as its first field; if so, we can avoid creating
# an extra index on _cls, as mongodb will use the existing
# index to service queries against _cls
cls_indexed = False
# Ensure document-defined indexes are created
if cls._meta['index_specs']:
index_spec = cls._meta['index_specs']
for spec in index_spec:
spec = spec.copy()
fields = spec.pop('fields')
cls_indexed = cls_indexed or includes_cls(fields)
opts = index_opts.copy()
opts.update(spec)
# we shouldn't pass 'cls' to the collection.ensureIndex options
# because of https://jira.mongodb.org/browse/SERVER-769
if 'cls' in opts:
del opts['cls']
if IS_PYMONGO_3:
collection.create_index(fields, background=background, **opts)
else:
collection.ensure_index(fields, background=background,
drop_dups=drop_dups, **opts)
# If _cls is being used (for polymorphism), it needs an index,
# only if another index doesn't begin with _cls
if (index_cls and not cls_indexed and
cls._meta.get('allow_inheritance', ALLOW_INHERITANCE) is True):
# we shouldn't pass 'cls' to the collection.ensureIndex options
# because of https://jira.mongodb.org/browse/SERVER-769
if 'cls' in index_opts:
del index_opts['cls']
if IS_PYMONGO_3:
collection.create_index('_cls', background=background,
**index_opts)
else:
collection.ensure_index('_cls', background=background,
**index_opts)
@classmethod
def list_indexes(cls):
""" Lists all of the indexes that should be created for given
collection. It includes all the indexes from super- and sub-classes.
"""
if cls._meta.get('abstract'):
return []
# get all the base classes, subclasses and siblings
classes = []
def get_classes(cls):
if (cls not in classes and
isinstance(cls, TopLevelDocumentMetaclass)):
classes.append(cls)
for base_cls in cls.__bases__:
if (isinstance(base_cls, TopLevelDocumentMetaclass) and
base_cls != Document and
not base_cls._meta.get('abstract') and
base_cls._get_collection().full_name == cls._get_collection().full_name and
base_cls not in classes):
classes.append(base_cls)
get_classes(base_cls)
for subclass in cls.__subclasses__():
if (isinstance(base_cls, TopLevelDocumentMetaclass) and
subclass._get_collection().full_name == cls._get_collection().full_name and
subclass not in classes):
classes.append(subclass)
get_classes(subclass)
get_classes(cls)
# get the indexes spec for all of the gathered classes
def get_indexes_spec(cls):
indexes = []
if cls._meta['index_specs']:
index_spec = cls._meta['index_specs']
for spec in index_spec:
spec = spec.copy()
fields = spec.pop('fields')
indexes.append(fields)
return indexes
indexes = []
for klass in classes:
for index in get_indexes_spec(klass):
if index not in indexes:
indexes.append(index)
# finish up by appending { '_id': 1 } and { '_cls': 1 }, if needed
if [(u'_id', 1)] not in indexes:
indexes.append([(u'_id', 1)])
if (cls._meta.get('index_cls', True) and
cls._meta.get('allow_inheritance', ALLOW_INHERITANCE) is True):
indexes.append([(u'_cls', 1)])
return indexes
@classmethod
def compare_indexes(cls):
""" Compares the indexes defined in MongoEngine with the ones existing
in the database. Returns any missing/extra indexes.
"""
required = cls.list_indexes()
existing = [info['key']
for info in cls._get_collection().index_information().values()]
missing = [index for index in required if index not in existing]
extra = [index for index in existing if index not in required]
# if { _cls: 1 } is missing, make sure it's *really* necessary
if [(u'_cls', 1)] in missing:
cls_obsolete = False
for index in existing:
if includes_cls(index) and index not in extra:
cls_obsolete = True
break
if cls_obsolete:
missing.remove([(u'_cls', 1)])
return {'missing': missing, 'extra': extra}
class DynamicDocument(Document):
"""A Dynamic Document class allowing flexible, expandable and uncontrolled
schemas. As a :class:`~mongoengine.Document` subclass, acts in the same
way as an ordinary document but has expando style properties. Any data
passed or set against the :class:`~mongoengine.DynamicDocument` that is
not a field is automatically converted into a
:class:`~mongoengine.fields.DynamicField` and data can be attributed to that
field.
.. note::
There is one caveat on Dynamic Documents: fields cannot start with `_`
"""
# The __metaclass__ attribute is removed by 2to3 when running with Python3
# my_metaclass is defined so that metaclass can be queried in Python 2 & 3
my_metaclass = TopLevelDocumentMetaclass
__metaclass__ = TopLevelDocumentMetaclass
_dynamic = True
def __delattr__(self, *args, **kwargs):
"""Deletes the attribute by setting to None and allowing _delta to unset
it"""
field_name = args[0]
if field_name in self._dynamic_fields:
setattr(self, field_name, None)
else:
super(DynamicDocument, self).__delattr__(*args, **kwargs)
class DynamicEmbeddedDocument(EmbeddedDocument):
"""A Dynamic Embedded Document class allowing flexible, expandable and
uncontrolled schemas. See :class:`~mongoengine.DynamicDocument` for more
information about dynamic documents.
"""
# The __metaclass__ attribute is removed by 2to3 when running with Python3
# my_metaclass is defined so that metaclass can be queried in Python 2 & 3
my_metaclass = DocumentMetaclass
__metaclass__ = DocumentMetaclass
_dynamic = True
def __delattr__(self, *args, **kwargs):
"""Deletes the attribute by setting to None and allowing _delta to unset
it"""
field_name = args[0]
if field_name in self._fields:
default = self._fields[field_name].default
if callable(default):
default = default()
setattr(self, field_name, default)
else:
setattr(self, field_name, None)
class MapReduceDocument(object):
"""A document returned from a map/reduce query.
:param collection: An instance of :class:`~pymongo.Collection`
:param key: Document/result key, often an instance of
:class:`~bson.objectid.ObjectId`. If supplied as
an ``ObjectId`` found in the given ``collection``,
the object can be accessed via the ``object`` property.
:param value: The result(s) for this key.
.. versionadded:: 0.3
"""
def __init__(self, document, collection, key, value):
self._document = document
self._collection = collection
self.key = key
self.value = value
@property
def object(self):
"""Lazy-load the object referenced by ``self.key``. ``self.key``
should be the ``primary_key``.
"""
id_field = self._document()._meta['id_field']
id_field_type = type(id_field)
if not isinstance(self.key, id_field_type):
try:
self.key = id_field_type(self.key)
except:
raise Exception("Could not cast key as %s" %
id_field_type.__name__)
if not hasattr(self, "_key_object"):
self._key_object = self._document.objects.with_id(self.key)
return self._key_object
return self._key_object
| {
"content_hash": "2575d6e9c30f2cf4d8cb82e928b76d1d",
"timestamp": "",
"source": "github",
"line_count": 991,
"max_line_length": 108,
"avg_line_length": 41.761856710393545,
"alnum_prop": 0.5845454984777461,
"repo_name": "touilleMan/mongoengine",
"id": "52353523eb903361cc4612298eb268120b74f850",
"size": "41386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mongoengine/document.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1055087"
}
],
"symlink_target": ""
} |
"""
This module cotains definitions for functions or methods which are
generally useful for translating instances.
"""
def filter_non_alnum(string):
""" filter_non_alnum is a helper function which, given a string, returns it
with all non-aplha-numric characters removed. This is required for fields
in ARM which don't accept any special characters.
"""
return ''.join([c for c in string if c.isalnum()])
| {
"content_hash": "3bca9bd938c7758e82206079cb778458",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 79,
"avg_line_length": 36.166666666666664,
"alnum_prop": 0.7119815668202765,
"repo_name": "cloudbase/heat2arm",
"id": "2929f33e12273b4445b723f8322805708f702116",
"size": "1073",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat2arm/translators/instances/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "268087"
}
],
"symlink_target": ""
} |
import logging
import os
import shutil
import subprocess
import tempfile
import textwrap
import fortranformat as ff
import numpy
logger = logging.getLogger(__name__)
INPUT_TEMPLATE = """
DIPOLE CHARACTERISTIC PARAMETERS:
IFACT(Without-1/With-2 Displacement Current Factor) Format(I5)
{calc_disp_currs}
IDIPOL(VMD-1,HMD-2,HED-3)--ICOMP(Hr/Hx-1,Ephai/Hy-2,Hz-3,Ex-4,Ey-5) Format(2I5)
{IDIPOL}{ICOMP}
R(S-R Offset)--HT(Source Height)--Z(Receiver Level)(Format(3F9.2)
{src_rec_offset}{src_height}{rec_level}
FREQ1(Highest Freq.)------FREQL(Lowest Freq) ---Format(2F12.2)
{freq_h}{freq_l}
RI(Current-Ampere)-Area(Dipole Area)-RM(Dipole Moment)-Format(3F9.2)
{rec_curr}{rec_area}{rec_dip_moment}
X (X- HMD & HED)--Y (Y- HMD & HED)--(Receiver position w.r.t. Dipole)--Format(2F9.3)
{hx}{hy}
MODEL PARAMETERS:
NLYR-------Resistivity--and---Thickness----Format(10F8.3)
{nlyr}
{res}{thk}
"""[1:-1]
class results(dict):
def __init__(self, *args, **kwargs):
self.__dict__ = self
def vmd(src_rec_offset, src_height, rec_level,
res, thk=None,
nlayers=None,
freq_h=1e5, freq_l=10,
rec_curr=1, rec_area=1, rec_dip_moment=1,
hx=0, hy=0,
field_components=("Hz", ), calc_displ_currs=False,
emdpler_exe=None, print_input_files=False, print_output=False):
"""Run forward model for vertical magnetic dipole configuration (VMD).
Arguments:
src_rec_offset (float):
src_height (float):
rec_level (float):
res (array of floats): list of N resistivities for N model layers
thk (array of floats): list of N-1 thicknesses for N-1 model layers
(the last resistivity is for the underlying halfspace?)
field_components (list of strings): field components to calculate,
can be a list containing any number of the values
"Hz" (more to follow in the future).
calc_disp_currs (bool): include displacement currents
emdpler_exe (string): path to emdpler executable
"""
if emdpler_exe is None:
suffix = ""
if os.name == "nt":
suffix = ".exe"
emdpler_exe = os.path.join(os.path.dirname(__file__), "emdpler" + suffix)
assert os.path.isfile(emdpler_exe)
IFACT = {True: 2, False: 1}[calc_displ_currs]
if nlayers is None:
nlayers = len(res)
if thk is None:
thk = []
# TODO: loop and allow multiple runs of Emdpler to calculate more field components.
ICOMP = {"Hz": 3}[field_components[0]]
temp_dir = tempfile.mkdtemp(prefix="tmp_emdpler")
logger.info("Running modelling in %s" % temp_dir)
logger.debug("Creating input file...")
input_template = str(INPUT_TEMPLATE)
res_sl = ["%f" % r for r in res]
res_s = textwrap.wrap(" ".join(res_sl))
input_template = input_template.format(
calc_disp_currs = "%.0f" % IFACT,
IDIPOL = "1",
ICOMP = ff.FortranRecordWriter('(2I5)').write([ICOMP]),
src_rec_offset = ff.FortranRecordWriter('(3F9.2)').write([src_rec_offset]),
src_height = ff.FortranRecordWriter('(3F9.2)').write([src_height]),
rec_level = ff.FortranRecordWriter('(3F9.2)').write([rec_level]),
freq_h = ff.FortranRecordWriter('(2F12.2)').write([freq_h]),
freq_l = ff.FortranRecordWriter('(2F12.2)').write([freq_l]),
rec_curr = ff.FortranRecordWriter('(3F9.2)').write([rec_curr]),
rec_area = ff.FortranRecordWriter('(3F9.2)').write([rec_area]),
rec_dip_moment = ff.FortranRecordWriter('(3F9.2)').write([rec_dip_moment]),
hx = ff.FortranRecordWriter('(2F9.3)').write([hx]),
hy = ff.FortranRecordWriter('(2F9.3)').write([hy]),
nlyr = ff.FortranRecordWriter('(2I5)').write([nlayers]),
res = "\n".join(textwrap.wrap(" ".join([ff.FortranRecordWriter('(10F8.3)').write([r]) for r in res]))),
thk = "\n".join(textwrap.wrap(" ".join([ff.FortranRecordWriter('(10F8.3)').write([t]) for t in thk]))),
)
input_fn = os.path.join(temp_dir, "Input.in")
with open(input_fn, mode="w") as inf:
inf.write(input_template)
logger.debug("Wrote input file at " + input_fn)
if print_input_files:
print input_template
try:
pr_output = subprocess.check_output([emdpler_exe], cwd=temp_dir)
if print_output:
print pr_output
except:
raise
finally:
r1 = numpy.loadtxt(os.path.join(temp_dir, "RESULT1.DAT"))
r2 = numpy.loadtxt(os.path.join(temp_dir, "RESULT2.DAT"))
r3 = numpy.loadtxt(os.path.join(temp_dir, "RESULT3.DAT"))
shutil.rmtree(temp_dir)
logger.info("Finished modelling in %s" % temp_dir)
rfreq = results()
rindn = results()
assert (r1[:,0] == r3[:, 0]).all()
rfreq.freq = r1[:,0]
rfreq.ampl = r1[:,1]
rfreq.phase = fix_phases(r1[:,2])
rfreq.norm_ampl = r3[:,1]
rfreq.norm_phase = fix_phases(r3[:,2])
rindn.ind_num = r2[:,0]
rindn.ampl = r2[:,1]
rindn.phase = fix_phases(r2[:,2])
return rfreq, rindn
def plot_results(rfreq, rindn, fig=None, fign=None, figsize=(15, 6),
amplim=(None, None), phaselim=(None, None), gskws={}, pltkws={}):
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
if fig is None:
fig = plt.figure(fign, figsize=figsize)
pltkws["color"] = pltkws.get("color", "k")
gskws["wspace"] = gskws.get("wspace", 0.3)
gskws["hspace"] = gskws.get("hspace", 0.3)
gs = gridspec.GridSpec(2, 3, **gskws)
ax = fig.add_subplot(gs[0])
ax.plot(rfreq.freq, rfreq.ampl, **pltkws)
ax.set_xscale("log")
ax.set_xlim(*ax.get_xlim()[::-1])
ax.set_ylim(*amplim)
ax.set_xlabel("Frequency [Hz]")
ax.set_ylabel("Amplitude")
ax.yaxis.get_major_formatter().set_powerlimits((-2, 3))
ax = fig.add_subplot(gs[3])
ax.plot(rfreq.freq, rfreq.phase, **pltkws)
ax.set_xscale("log")
ax.set_xlim(*ax.get_xlim()[::-1])
ax.set_ylim(*phaselim)
ax.set_xlabel("Frequency [Hz]")
ax.set_ylabel("Phase")
ax.yaxis.get_major_formatter().set_powerlimits((-2, 3))
ax = fig.add_subplot(gs[1])
ax.plot(rfreq.freq, rfreq.norm_ampl, **pltkws)
ax.set_xscale("log")
ax.set_xlim(*ax.get_xlim()[::-1])
ax.set_xlabel("Frequency [Hz]")
ax.set_ylabel("Normalized amplitude")
ax.yaxis.get_major_formatter().set_powerlimits((-2, 3))
ax = fig.add_subplot(gs[4])
ax.plot(rfreq.freq, rfreq.norm_phase, **pltkws)
ax.set_xscale("log")
ax.set_xlim(*ax.get_xlim()[::-1])
ax.set_xlabel("Frequency [Hz]")
ax.set_ylabel("Normalized phase [deg]")
ax.yaxis.get_major_formatter().set_powerlimits((-2, 3))
ax = fig.add_subplot(gs[2])
ax.plot(rindn.ind_num, rindn.ampl, **pltkws)
ax.set_xscale("log")
ax.set_xlim(*ax.get_xlim()[::-1])
ax.set_ylim(*amplim)
ax.set_xlabel("Induction number")
ax.set_ylabel("Amplitude")
ax.yaxis.get_major_formatter().set_powerlimits((-2, 3))
ax = fig.add_subplot(gs[5])
ax.plot(rindn.ind_num, rindn.phase, **pltkws)
ax.set_xscale("log")
ax.set_xlim(*ax.get_xlim()[::-1])
ax.set_ylim(*phaselim)
ax.set_xlabel("Induction number")
ax.set_ylabel("Phase [deg]")
ax.yaxis.get_major_formatter().set_powerlimits((-2, 3))
def fix_phases(arr):
for i in range(len(arr)):
while arr[i] > 180:
arr[i] = arr[i] - 180
while arr[i] < -180:
arr[i] = arr[i] + 180
return arr | {
"content_hash": "f1f0e476239bd13603c46fab8753728b",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 112,
"avg_line_length": 36.10096153846154,
"alnum_prop": 0.6091357038220802,
"repo_name": "kinverarity1/emdpler_wrapper",
"id": "008f31c00faec3fee536bd9a9ceb18321fecc679",
"size": "7509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "emdpler_wrapper/emdpler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9002"
}
],
"symlink_target": ""
} |
import os
from packaging.version import Version
import pytest
import matplotlib.ft2font
from urllib.request import urlopen, URLError
from oggm import cfg
from oggm.utils import SAMPLE_DATA_COMMIT
# Some logic to see which environment we are running on
# Matplotlib version changes plots, too
HAS_MPL_FOR_TESTS = False
if Version(matplotlib.__version__) >= Version('2'):
HAS_MPL_FOR_TESTS = True
BASELINE_DIR = os.path.join(cfg.CACHE_DIR,
'oggm-sample-data-%s' % SAMPLE_DATA_COMMIT,
'baseline_images', 'freetype_28')
# quick n dirty method to see if internet is on
try:
_ = urlopen('http://www.google.com', timeout=1)
HAS_INTERNET = True
except URLError:
HAS_INTERNET = False
def mpl_image_compare(baseline_dir=BASELINE_DIR, tolerance=5, **kwargs):
return pytest.mark.mpl_image_compare(baseline_dir=baseline_dir,
tolerance=tolerance,
**kwargs)
| {
"content_hash": "f8e2304a526f467b8595c07f826002a7",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 75,
"avg_line_length": 31.875,
"alnum_prop": 0.6352941176470588,
"repo_name": "OGGM/oggm",
"id": "c0cdc7af00981ccf4772eba84337a3346511bd07",
"size": "1020",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "oggm/tests/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "384"
},
{
"name": "Python",
"bytes": "1875666"
},
{
"name": "Shell",
"bytes": "1201"
}
],
"symlink_target": ""
} |
from flask import Flask
from .view_classes import DefaultMethodsView, NoDefaultMethodsView
from nose.tools import eq_
app = Flask('default_methods')
DefaultMethodsView.register(app)
NoDefaultMethodsView.register(app)
def test_default_methods():
client = app.test_client()
resp = client.get('/default-methods/foo/')
eq_(b"GET", resp.data)
resp = client.post('/default-methods/foo/')
eq_(b"POST", resp.data)
def test_no_default_methods():
client = app.test_client()
resp = client.get('/no-default-methods/foo/')
eq_(b"GET", resp.data)
resp = client.post('/no-default-methods/foo/')
eq_(resp.status_code, 405)
| {
"content_hash": "e480fba3e8f552f77a1a98a70221ce30",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 66,
"avg_line_length": 27.208333333333332,
"alnum_prop": 0.6921898928024502,
"repo_name": "teracyhq/flask-classy",
"id": "32459f28025cf6aa33e7f52d5a11c2aef9f6760b",
"size": "653",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "test_classful/test_default_methods.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "480"
},
{
"name": "Python",
"bytes": "42187"
},
{
"name": "Shell",
"bytes": "458"
}
],
"symlink_target": ""
} |
import contextlib
import os
import sys
if os.name != "nt":
# Linux/macOS: remove current script directory to avoid importing this file
# as a module; we want to import the installed esptool module instead
with contextlib.suppress(ValueError):
if sys.path[0].endswith("/bin"):
sys.path.pop(0)
sys.path.remove(os.path.dirname(sys.executable))
# Linux/macOS: delete imported module entry to force Python to load
# the module from scratch; this enables importing esptool module in
# other Python scripts
with contextlib.suppress(KeyError):
del sys.modules["esptool"]
import esptool
if __name__ == "__main__":
esptool._main()
| {
"content_hash": "29dfd0391722d73de05d60ba6f1bdea3",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 79,
"avg_line_length": 31.5,
"alnum_prop": 0.683982683982684,
"repo_name": "nabovarme/MeterLogger",
"id": "60e5bd3397d82ea42545adae185fc579807a6080",
"size": "1084",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/esptool.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1122"
},
{
"name": "C",
"bytes": "729660"
},
{
"name": "CSS",
"bytes": "639"
},
{
"name": "Dockerfile",
"bytes": "3551"
},
{
"name": "HTML",
"bytes": "484"
},
{
"name": "Makefile",
"bytes": "18879"
},
{
"name": "Perl",
"bytes": "11791"
},
{
"name": "Python",
"bytes": "1084"
},
{
"name": "Shell",
"bytes": "318"
},
{
"name": "Smarty",
"bytes": "6025"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="colorsrc",
parent_name="scatterternary.hoverlabel.font",
**kwargs
):
super(ColorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
| {
"content_hash": "e9cd9d5d5d3cbd64856892a0f160df68",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 67,
"avg_line_length": 29.529411764705884,
"alnum_prop": 0.5756972111553785,
"repo_name": "plotly/python-api",
"id": "a9011be7e3c1e976ac731a6a83368a8670f3a7d6",
"size": "502",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scatterternary/hoverlabel/font/_colorsrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
"""
Una app para cargar contenidos multimedia
"""
import mimetypes
import datetime
import os
import logging
logger = logging.getLogger(__name__)
try:
from Pillow import Image
except ImportError:
try:
from PIL import Image
except ImportError:
import Image
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.utils.encoding import force_unicode
from mediacontent.utils import convert_to_rgb, crop_aspect, resize
def_sizes = {
'thumb': (75,75),
'gallery': (250, 250),
'normal': (600,600)
}
CONTENT_MEDIA_SIZE = getattr(settings, 'CONTENT_MEDIA_SIZE', def_sizes)
CONTENT_MEDIA_PATH = getattr(settings, 'CONTENT_MEDIA_PATH', 'mediacontent')
class MediaContentQuerySet(models.QuerySet):
def get_thumbnail(self):
logger.debug('Filtrando solo thumbnail')
return self.filter(thumbnail_only=True)
def get_gallery(self):
logger.debug('Filtrando solo gallery')
return self.filter(gallery_only=True)
class MediaContentManager(models.Manager):
def get_queryset(self):
return MediaContentQuerySet(self.model, using=self._db)
def get_for_model(self, model):
ct = ContentType.objects.get_for_model(model)
qs = self.get_queryset().filter(content_type=ct)
if isinstance(model, models.Model):
qs = qs.filter(object_pk=force_unicode(model._get_pk_val()))
return qs
def get_content_path(instance, filename):
cls = instance._get_ct()
return u'%s/%s/%s/%s' % (CONTENT_MEDIA_PATH, cls._meta.app_label, instance.object_pk, filename)
def get_thumb_path(instance, filename):
cls = instance._get_ct()
return u'%s/%s/%s/thumbnail_%s' % (CONTENT_MEDIA_PATH, cls._meta.app_label, instance.mimetype, filename)
def get_gallery_path(instance, filename):
cls = instance._get_ct()
return u'%s/%s/%s/gallery_%s' % (CONTENT_MEDIA_PATH, cls._meta.app_label, instance.mimetype, filename)
class MediaContent(models.Model):
def _get_ct(self):
mcls = ContentType.objects.get(pk=self.content_type.pk)
# esto fixea el problema de multise y _default_manager
if self.content_object:
return self.content_object
try:
cls = mcls.get_object_for_this_type(pk=self.object_pk)
except mcls.model_class().DoesNotExist:
cls = mcls.model_class()
return cls
def content_path(self, filename):
return self.get_content_path(filename)
def thumb_path(self, filename):
return self.get_thumb_path(filename)
def gallery_path(self, filename):
return self.get_gallery_path(filename)
def get_sizes(self):
return CONTENT_MEDIA_SIZE
content_type = models.ForeignKey(ContentType, verbose_name=_('content type'), related_name="content_type_set_for_%(class)s")
object_pk = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_pk')
mimetype = models.CharField(max_length=100, blank=True)
title = models.CharField(max_length=100)
description = models.TextField(blank=True)
thumbnail_only = models.BooleanField(default=False, help_text="Indica si se usa como thumbnail del objeto asociado")
gallery_only = models.BooleanField(default=False, help_text="Indica si se usa para gallery del objeto asociado")
content = models.FileField(upload_to=get_content_path, max_length=300)
thumbnail = models.ImageField(upload_to=get_thumb_path, blank=True, max_length=300)
gallery = models.ImageField(upload_to=get_gallery_path, blank=True, max_length=300)
#hay que actualizar la DB y generar todas las fechas por defecto
pub_date = models.DateTimeField(blank=True)
sort_order = models.IntegerField(null=True, blank=True)
objects = MediaContentManager()
class Meta:
ordering = ('sort_order', 'pub_date')
def __init__(self, *args, **kwargs):
super(MediaContent, self).__init__(*args, **kwargs)
self.__original_image = self.content
def __unicode__(self):
return unicode(self.title)
def save(self, *args, **kwargs):
changed_image = self.content != self.__original_image
if not self.id and not self.pub_date:
self.pub_date = datetime.datetime.today()
crop_original = kwargs.get('crop_original', False)
super(MediaContent, self).save(*args, **kwargs)
self.mimetype = mimetypes.guess_type(self.content.path)[0]
if self.mimetype:
content_type = self.mimetype.replace('/', '_')
else:
# assume everything else is text/plain
content_type = 'text_plain'
i = self.content.name.rindex('/')
thumbnail = u'%sthumbnail_%s' % (unicode(self.content.name[:i+1]), unicode(self.content.name[i+1:]))
gallery = u'%sgallery_%s' % (unicode(self.content.name[:i+1]), unicode(self.content.name[i+1:]))
orig = self.content.name
if (not self.thumbnail or not self.gallery or changed_image) and content_type.split('_')[0]=='image':
img_path = self.content.path
if content_type == 'image_svg+xml':
try:
from nebula.mediacontent import svg_to_png
svg_to_png.convert(img_path, svg_to_png.new_name(img_path))
img_path = svg_to_png.new_name(img_path)
self.content.name = self.content.name[:-3] + self.content.name[-3:].replace('svg', 'png')
except:
pass
image = Image.open(img_path)
image = convert_to_rgb(image)
image = crop_aspect(image, ratio=1.0)
# hace el thumb
image_thumb = resize(image.copy(), size=self.get_sizes()['thumb'])
image_thumb.save(os.path.join(settings.MEDIA_ROOT, thumbnail))
self.thumbnail = thumbnail
# guarda la imagen para gallery
image_gallery = resize(image.copy(), size=self.get_sizes()['gallery'])
image_gallery.save(os.path.join(settings.MEDIA_ROOT, gallery))
self.gallery = gallery
# guarda la imagen al tamaño máximo
if crop_original:
image_normal = resize(image.copy(), size=self.get_sizes()['normal'])
image_normal.save(os.path.join(settings.MEDIA_ROOT, orig))
elif (not self.thumbnail or not self.gallery or changed_image) and content_type == 'application_pdf':
# Crea una imagen de la primer pagina de un PDF
from subprocess import call
cmd = "gs -q -dQUIET -dPARANOIDSAFER -dBATCH -dNOPAUSE -dNOPROMPT \
-dMaxBitmap=500000000 -dLastPage=1 -dAlignToPixels=0 -dGridFitTT=0 \
-sDEVICE=jpeg -dTextAlphaBits=4 -dGraphicsAlphaBits=4 -r150 \
-sOutputFile=%(fileout)s %(filein)s"
filein = os.path.join(settings.MEDIA_ROOT, self.content.name)
filejpg = self.content.name[:-3] + self.content.name[-3:].replace('pdf', 'jpg')
fileout = os.path.join(settings.MEDIA_ROOT, filejpg)
if not os.access(filein, os.R_OK):
raise 'not access %s' % filein
files = {
'filein': filein.replace(' ', '\ '),
'fileout': fileout.replace(' ', '\ '),
}
# devuelve 0 si esta OK
if not call(cmd % files, shell=True):
i = filejpg.rindex('/')
thumbnail = u'%sthumbnail_%s' % (unicode(filejpg[:i+1]), unicode(filejpg[i+1:]))
gallery = u'%sgallery_%s' % (unicode(filejpg[:i+1]), unicode(filejpg[i+1:]))
image = Image.open(fileout)
image = convert_to_rgb(image)
#image = crop_aspect(image, ratio=1.0)
# hace el thumb
image_thumb = resize(image.copy(), size=None, max_width=self.get_sizes()['gallery'][0])
image_thumb.save(os.path.join(settings.MEDIA_ROOT, thumbnail))
self.thumbnail = thumbnail
# guarda la imagen para gallery
#image_gallery = image.copy()
image_gallery = resize(image.copy(), size=None, max_width=self.get_sizes()['normal'][0])
image.save(os.path.join(settings.MEDIA_ROOT, gallery))
self.gallery = gallery
# borra la original porque es un PDF
try:
os.remove(fileout)
except (OSError, ValueError):
pass
super(MediaContent, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
try:
os.remove(self.content.path)
except (OSError, ValueError):
pass
try:
os.remove(self.gallery.path)
except (OSError, ValueError):
pass
try:
os.remove(self.thumbnail.path)
except (OSError, ValueError):
pass
return super(MediaContent, self).delete(*args, **kwargs)
def get_header(self):
return self.mimetype.split('/')[0]
def get_file_name(self):
return self.content.name.split('/')[-1]
| {
"content_hash": "6374cd7fba5bc50f637fdfb20b420e9a",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 128,
"avg_line_length": 36.252873563218394,
"alnum_prop": 0.6076939336292538,
"repo_name": "ninjaotoko/django-mediacontent",
"id": "7ccb6af0a68ccf702e55d847240a166a4029e519",
"size": "9487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mediacontent/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "27234"
}
],
"symlink_target": ""
} |
import cgi
import datetime
import urllib
import urlparse
from django.conf import settings
from django.contrib.staticfiles.storage import staticfiles_storage
from django.template import defaultfilters
from django.utils.encoding import smart_str
from django.utils.html import strip_tags
from jingo import register
import jinja2
from .urlresolvers import reverse
# Yanking filters from Django.
register.filter(strip_tags)
register.filter(defaultfilters.timesince)
register.filter(defaultfilters.truncatewords)
@register.function
def thisyear():
"""The current year."""
return jinja2.Markup(datetime.date.today().year)
@register.function
def url(viewname, *args, **kwargs):
"""Helper for Django's ``reverse`` in templates."""
return reverse(viewname, args=args, kwargs=kwargs)
@register.filter
def urlparams(url_, hash=None, **query):
"""Add a fragment and/or query paramaters to a URL.
New query params will be appended to exising parameters, except duplicate
names, which will be replaced.
"""
url = urlparse.urlparse(url_)
fragment = hash if hash is not None else url.fragment
# Use dict(parse_qsl) so we don't get lists of values.
q = url.query
query_dict = dict(urlparse.parse_qsl(smart_str(q))) if q else {}
query_dict.update((k, v) for k, v in query.items())
query_string = _urlencode([(k, v) for k, v in query_dict.items()
if v is not None])
new = urlparse.ParseResult(url.scheme, url.netloc, url.path, url.params,
query_string, fragment)
return new.geturl()
def _urlencode(items):
"""A Unicode-safe URLencoder."""
try:
return urllib.urlencode(items)
except UnicodeEncodeError:
return urllib.urlencode([(k, smart_str(v)) for k, v in items])
@register.filter
def urlencode(txt):
"""Url encode a path."""
if isinstance(txt, unicode):
txt = txt.encode('utf-8')
return urllib.quote_plus(txt)
@register.function
def static(path):
return staticfiles_storage.url(path)
| {
"content_hash": "c8415779110d2d72d59dc67c11e068c7",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 77,
"avg_line_length": 27.91891891891892,
"alnum_prop": 0.6926427879961278,
"repo_name": "hfeeki/djfactory",
"id": "643619c6f7bf54e5bca255185ccf7c141f85c567",
"size": "2066",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djfactory/helpers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
Parser for various Hi-C data.
"""
import numpy as np
from collections import defaultdict
class HiCData(object):
"""HiCData
Simple class for storing and filtering contact data from single-cell
HiC experiments.
"""
def __init__(self, data):
"""HiCData
This is a list of tuples specifying the indices of the loci that
are in contact.
Parameters
----------
data : list of tuples
"""
self.data = map(tuple, data)
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data)
def add(self, pair):
i, j = pair
self.data.append((i,j))
def remove_self_contacts(self):
"""
Remove contacts between one and the same locus. Self-contacts can
occur due to mapping high-resolution contact data to a low-resolution
representation of the chromatin fiber.
"""
contacts = np.array(self.data)
mask = contacts[:,0] != contacts[:,1]
self.__init__(contacts[mask])
def remove_redundant_contacts(self):
"""
Remove contacts that are duplicated or equivalent (e.g. (1,2) is
equivalent to (2,1)).
"""
unique = []
for i, j in self:
i, j = min(i,j), max(i,j)
if not (i,j) in unique:
unique.append((i,j))
self.__init__(unique)
def coarsen(self, n_beads, chrsize):
scale = n_beads / float(chrsize)
self.__init__((np.array(self.data) * scale).astype('i'))
class HiCParser(object):
"""HiCParser
Parser for text files storing HiC contact data. The parser assumes
that the format is
<chr1>[tab]<coord1>[tab]<chr2>[tab]<coord2>
...
The first line is assumed to be a header and is skipped. The parser
focus on specific trans- and cis-contacts as specified in the
constructor of the parser.
"""
def __init__(self, filename, chromosome1=None, chromosome2=None):
"""HiCParser
Instantiates a parser for HiC text files. By specifying one or
two names of chromosomes whose data will be parsed, we can restrict
the parsing to trans- and cis-chromosomal contacts. If both
arguments are 'None', all contacts are read. If 'chromosome1'
is specified and chromosome2 is 'None', all contacts between a
locus on the chosen chromosome and all other chromosomes will be
read.
Parameters
----------
filename :
name of the file storing the HiC contact data
chromosome1 :
optional selector for the first chromosome
chromosome2:
optional string selecting the second interaction partner
"""
self.filename = filename
self.chromosome1 = chromosome1
self.chromosome2 = chromosome2
def parse(self):
"""
Reads contacts from a text file
"""
datasets = defaultdict(list)
with open(self.filename) as f:
header = f.readline().strip().split('\t')
while 1:
line = f.readline()
if line == '': break
chr1, i, chr2, j = line.split('\t')
if self.chromosome1 and str(self.chromosome1) != chr1: continue
if self.chromosome2 and str(self.chromosome2) != chr2: continue
datasets[(chr1,chr2)].append((int(i),int(j)))
for k, v in datasets.items():
datasets[k] = HiCData(v)
return datasets
| {
"content_hash": "aa215eb01e8f3b6d82e8ed98270e66a0",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 79,
"avg_line_length": 26.467153284671532,
"alnum_prop": 0.5717043574186431,
"repo_name": "michaelhabeck/isdhic",
"id": "73be4628d408c4b5bac245f00564b452eddc47a6",
"size": "3626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "isdhic/data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "38564"
},
{
"name": "C++",
"bytes": "390"
},
{
"name": "Python",
"bytes": "125434"
}
],
"symlink_target": ""
} |
import logging
import yaml
import io
from redash.utils.requests_session import requests_or_advocate, UnacceptableAddressException
from redash.query_runner import *
from redash.utils import json_dumps
logger = logging.getLogger(__name__)
try:
import pandas as pd
import numpy as np
enabled = True
except ImportError:
enabled = False
class CSV(BaseQueryRunner):
should_annotate_query = False
@classmethod
def name(cls):
return "CSV"
@classmethod
def enabled(cls):
return enabled
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {},
}
def __init__(self, configuration):
super(CSV, self).__init__(configuration)
self.syntax = "yaml"
def test_connection(self):
pass
def run_query(self, query, user):
path = ""
ua = ""
args = {}
try:
args = yaml.safe_load(query)
path = args['url']
args.pop('url', None)
ua = args['user-agent']
args.pop('user-agent', None)
except:
pass
try:
response = requests_or_advocate.get(url=path, headers={"User-agent": ua})
workbook = pd.read_csv(io.BytesIO(response.content),sep=",", **args)
df = workbook.copy()
data = {'columns': [], 'rows': []}
conversions = [
{'pandas_type': np.integer, 'redash_type': 'integer',},
{'pandas_type': np.inexact, 'redash_type': 'float',},
{'pandas_type': np.datetime64, 'redash_type': 'datetime', 'to_redash': lambda x: x.strftime('%Y-%m-%d %H:%M:%S')},
{'pandas_type': np.bool_, 'redash_type': 'boolean'},
{'pandas_type': np.object, 'redash_type': 'string'}
]
labels = []
for dtype, label in zip(df.dtypes, df.columns):
for conversion in conversions:
if issubclass(dtype.type, conversion['pandas_type']):
data['columns'].append({'name': label, 'friendly_name': label, 'type': conversion['redash_type']})
labels.append(label)
func = conversion.get('to_redash')
if func:
df[label] = df[label].apply(func)
break
data['rows'] = df[labels].replace({np.nan: None}).to_dict(orient='records')
json_data = json_dumps(data)
error = None
except KeyboardInterrupt:
error = "Query cancelled by user."
json_data = None
except UnacceptableAddressException:
error = "Can't query private addresses."
json_data = None
except Exception as e:
error = "Error reading {0}. {1}".format(path, str(e))
json_data = None
return json_data, error
def get_schema(self):
raise NotSupported()
register(CSV)
| {
"content_hash": "d7f62f3018452687c6f65246af3f9c73",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 130,
"avg_line_length": 30.53,
"alnum_prop": 0.5280052407468064,
"repo_name": "getredash/redash",
"id": "22aa148d5792080b902b359a0fc840255ebab034",
"size": "3053",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "redash/query_runner/csv.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "2135"
},
{
"name": "Dockerfile",
"bytes": "3500"
},
{
"name": "HTML",
"bytes": "32865"
},
{
"name": "JavaScript",
"bytes": "990852"
},
{
"name": "Less",
"bytes": "196598"
},
{
"name": "Makefile",
"bytes": "1381"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1238254"
},
{
"name": "Shell",
"bytes": "4734"
},
{
"name": "TypeScript",
"bytes": "521588"
}
],
"symlink_target": ""
} |
"""
Base classes for writing management commands (named commands which can
be executed through ``django-admin`` or ``manage.py``).
"""
from __future__ import unicode_literals
import os
import sys
from argparse import ArgumentParser
import django
from django.core import checks
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import color_style, no_style
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.migrations.exceptions import MigrationSchemaMissing
from django.utils.encoding import force_str
class CommandError(Exception):
"""
Exception class indicating a problem while executing a management
command.
If this exception is raised during the execution of a management
command, it will be caught and turned into a nicely-printed error
message to the appropriate output stream (i.e., stderr); as a
result, raising this exception (with a sensible description of the
error) is the preferred way to indicate that something has gone
wrong in the execution of a command.
"""
pass
class SystemCheckError(CommandError):
"""
The system check framework detected unrecoverable errors.
"""
pass
class CommandParser(ArgumentParser):
"""
Customized ArgumentParser class to improve some error messages and prevent
SystemExit in several occasions, as SystemExit is unacceptable when a
command is called programmatically.
"""
def __init__(self, cmd, **kwargs):
self.cmd = cmd
super(CommandParser, self).__init__(**kwargs)
def parse_args(self, args=None, namespace=None):
# Catch missing argument for a better error message
if (hasattr(self.cmd, 'missing_args_message') and
not (args or any(not arg.startswith('-') for arg in args))):
self.error(self.cmd.missing_args_message)
return super(CommandParser, self).parse_args(args, namespace)
def error(self, message):
if self.cmd._called_from_command_line:
super(CommandParser, self).error(message)
else:
raise CommandError("Error: %s" % message)
def handle_default_options(options):
"""
Include any default options that all commands should accept here
so that ManagementUtility can handle them before searching for
user commands.
"""
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
if options.pythonpath:
sys.path.insert(0, options.pythonpath)
class OutputWrapper(object):
"""
Wrapper around stdout/stderr
"""
@property
def style_func(self):
return self._style_func
@style_func.setter
def style_func(self, style_func):
if style_func and self.isatty():
self._style_func = style_func
else:
self._style_func = lambda x: x
def __init__(self, out, style_func=None, ending='\n'):
self._out = out
self.style_func = None
self.ending = ending
def __getattr__(self, name):
return getattr(self._out, name)
def isatty(self):
return hasattr(self._out, 'isatty') and self._out.isatty()
def write(self, msg, style_func=None, ending=None):
ending = self.ending if ending is None else ending
if ending and not msg.endswith(ending):
msg += ending
style_func = style_func or self.style_func
self._out.write(force_str(style_func(msg)))
class BaseCommand(object):
"""
The base class from which all management commands ultimately
derive.
Use this class if you want access to all of the mechanisms which
parse the command-line arguments and work out what code to call in
response; if you don't need to change any of that behavior,
consider using one of the subclasses defined in this file.
If you are interested in overriding/customizing various aspects of
the command-parsing and -execution behavior, the normal flow works
as follows:
1. ``django-admin`` or ``manage.py`` loads the command class
and calls its ``run_from_argv()`` method.
2. The ``run_from_argv()`` method calls ``create_parser()`` to get
an ``ArgumentParser`` for the arguments, parses them, performs
any environment changes requested by options like
``pythonpath``, and then calls the ``execute()`` method,
passing the parsed arguments.
3. The ``execute()`` method attempts to carry out the command by
calling the ``handle()`` method with the parsed arguments; any
output produced by ``handle()`` will be printed to standard
output and, if the command is intended to produce a block of
SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``.
4. If ``handle()`` or ``execute()`` raised any exception (e.g.
``CommandError``), ``run_from_argv()`` will instead print an error
message to ``stderr``.
Thus, the ``handle()`` method is typically the starting point for
subclasses; many built-in commands and command types either place
all of their logic in ``handle()``, or perform some additional
parsing work in ``handle()`` and then delegate from it to more
specialized methods as needed.
Several attributes affect behavior at various steps along the way:
``can_import_settings``
A boolean indicating whether the command needs to be able to
import Django settings; if ``True``, ``execute()`` will verify
that this is possible before proceeding. Default value is
``True``.
``help``
A short description of the command, which will be printed in
help messages.
``output_transaction``
A boolean indicating whether the command outputs SQL
statements; if ``True``, the output will automatically be
wrapped with ``BEGIN;`` and ``COMMIT;``. Default value is
``False``.
``requires_migrations_checks``
A boolean; if ``True``, the command prints a warning if the set of
migrations on disk don't match the migrations in the database.
``requires_system_checks``
A boolean; if ``True``, entire Django project will be checked for errors
prior to executing the command. Default value is ``True``.
To validate an individual application's models
rather than all applications' models, call
``self.check(app_configs)`` from ``handle()``, where ``app_configs``
is the list of application's configuration provided by the
app registry.
``leave_locale_alone``
A boolean indicating whether the locale set in settings should be
preserved during the execution of the command instead of translations
being deactivated.
Default value is ``False``.
Make sure you know what you are doing if you decide to change the value
of this option in your custom command if it creates database content
that is locale-sensitive and such content shouldn't contain any
translations (like it happens e.g. with django.contrib.auth
permissions) as activating any locale might cause unintended effects.
This option can't be False when the can_import_settings option is set
to False too because attempting to deactivate translations needs access
to settings. This condition will generate a CommandError.
"""
# Metadata about this command.
help = ''
# Configuration shortcuts that alter various logic.
_called_from_command_line = False
can_import_settings = True
output_transaction = False # Whether to wrap the output in a "BEGIN; COMMIT;"
leave_locale_alone = False
requires_migrations_checks = False
requires_system_checks = True
def __init__(self, stdout=None, stderr=None, no_color=False):
self.stdout = OutputWrapper(stdout or sys.stdout)
self.stderr = OutputWrapper(stderr or sys.stderr)
if no_color:
self.style = no_style()
else:
self.style = color_style()
self.stderr.style_func = self.style.ERROR
def get_version(self):
"""
Return the Django version, which should be correct for all built-in
Django commands. User-supplied commands can override this method to
return their own version.
"""
return django.get_version()
def usage(self, subcommand):
"""
Return a brief description of how to use this command, by
default from the attribute ``self.help``.
"""
usage = '%%prog %s [options] %s' % (subcommand, self.args)
if self.help:
return '%s\n\n%s' % (usage, self.help)
else:
return usage
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``ArgumentParser`` which will be used to
parse the arguments to this command.
"""
parser = CommandParser(self, prog="%s %s" % (os.path.basename(prog_name), subcommand),
description=self.help or None)
parser.add_argument('--version', action='version', version=self.get_version())
parser.add_argument('-v', '--verbosity', action='store', dest='verbosity', default=1,
type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output')
parser.add_argument('--settings',
help=(
'The Python path to a settings module, e.g. '
'"myproject.settings.main". If this isn\'t provided, the '
'DJANGO_SETTINGS_MODULE environment variable will be used.'
),
)
parser.add_argument('--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".')
parser.add_argument('--traceback', action='store_true',
help='Raise on CommandError exceptions')
parser.add_argument('--no-color', action='store_true', dest='no_color', default=False,
help="Don't colorize the command output.")
self.add_arguments(parser)
return parser
def add_arguments(self, parser):
"""
Entry point for subclassed commands to add custom arguments.
"""
pass
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command. If the
command raises a ``CommandError``, intercept it and print it sensibly
to stderr. If the ``--traceback`` option is present or the raised
``Exception`` is not ``CommandError``, raise it.
"""
self._called_from_command_line = True
parser = self.create_parser(argv[0], argv[1])
options = parser.parse_args(argv[2:])
cmd_options = vars(options)
# Move positional args out of options to mimic legacy optparse
args = cmd_options.pop('args', ())
handle_default_options(options)
try:
self.execute(*args, **cmd_options)
except Exception as e:
if options.traceback or not isinstance(e, CommandError):
raise
# SystemCheckError takes care of its own formatting.
if isinstance(e, SystemCheckError):
self.stderr.write(str(e), lambda x: x)
else:
self.stderr.write('%s: %s' % (e.__class__.__name__, e))
sys.exit(1)
finally:
connections.close_all()
def execute(self, *args, **options):
"""
Try to execute this command, performing system checks if needed (as
controlled by the ``requires_system_checks`` attribute, except if
force-skipped).
"""
if options['no_color']:
self.style = no_style()
self.stderr.style_func = None
if options.get('stdout'):
self.stdout = OutputWrapper(options['stdout'])
if options.get('stderr'):
self.stderr = OutputWrapper(options['stderr'], self.stderr.style_func)
saved_locale = None
if not self.leave_locale_alone:
# Only mess with locales if we can assume we have a working
# settings file, because django.utils.translation requires settings
# (The final saying about whether the i18n machinery is active will be
# found in the value of the USE_I18N setting)
if not self.can_import_settings:
raise CommandError("Incompatible values of 'leave_locale_alone' "
"(%s) and 'can_import_settings' (%s) command "
"options." % (self.leave_locale_alone,
self.can_import_settings))
# Deactivate translations, because django-admin creates database
# content like permissions, and those shouldn't contain any
# translations.
from django.utils import translation
saved_locale = translation.get_language()
translation.deactivate_all()
try:
if self.requires_system_checks and not options.get('skip_checks'):
self.check()
if self.requires_migrations_checks:
self.check_migrations()
output = self.handle(*args, **options)
if output:
if self.output_transaction:
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
output = '%s\n%s\n%s' % (
self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()),
output,
self.style.SQL_KEYWORD(connection.ops.end_transaction_sql()),
)
self.stdout.write(output)
finally:
if saved_locale is not None:
translation.activate(saved_locale)
return output
def check(self, app_configs=None, tags=None, display_num_errors=False,
include_deployment_checks=False, fail_level=checks.ERROR):
"""
Uses the system check framework to validate entire Django project.
Raises CommandError for any serious message (error or critical errors).
If there are only light messages (like warnings), they are printed to
stderr and no exception is raised.
"""
all_issues = checks.run_checks(
app_configs=app_configs,
tags=tags,
include_deployment_checks=include_deployment_checks,
)
header, body, footer = "", "", ""
visible_issue_count = 0 # excludes silenced warnings
if all_issues:
debugs = [e for e in all_issues if e.level < checks.INFO and not e.is_silenced()]
infos = [e for e in all_issues if checks.INFO <= e.level < checks.WARNING and not e.is_silenced()]
warnings = [e for e in all_issues if checks.WARNING <= e.level < checks.ERROR and not e.is_silenced()]
errors = [e for e in all_issues if checks.ERROR <= e.level < checks.CRITICAL and not e.is_silenced()]
criticals = [e for e in all_issues if checks.CRITICAL <= e.level and not e.is_silenced()]
sorted_issues = [
(criticals, 'CRITICALS'),
(errors, 'ERRORS'),
(warnings, 'WARNINGS'),
(infos, 'INFOS'),
(debugs, 'DEBUGS'),
]
for issues, group_name in sorted_issues:
if issues:
visible_issue_count += len(issues)
formatted = (
self.style.ERROR(force_str(e))
if e.is_serious()
else self.style.WARNING(force_str(e))
for e in issues)
formatted = "\n".join(sorted(formatted))
body += '\n%s:\n%s\n' % (group_name, formatted)
if visible_issue_count:
header = "System check identified some issues:\n"
if display_num_errors:
if visible_issue_count:
footer += '\n'
footer += "System check identified %s (%s silenced)." % (
"no issues" if visible_issue_count == 0 else
"1 issue" if visible_issue_count == 1 else
"%s issues" % visible_issue_count,
len(all_issues) - visible_issue_count,
)
if any(e.is_serious(fail_level) and not e.is_silenced() for e in all_issues):
msg = self.style.ERROR("SystemCheckError: %s" % header) + body + footer
raise SystemCheckError(msg)
else:
msg = header + body + footer
if msg:
if visible_issue_count:
self.stderr.write(msg, lambda x: x)
else:
self.stdout.write(msg)
def check_migrations(self):
"""
Print a warning if the set of migrations on disk don't match the
migrations in the database.
"""
from django.db.migrations.executor import MigrationExecutor
try:
executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])
except ImproperlyConfigured:
# No databases are configured (or the dummy one)
return
except MigrationSchemaMissing:
self.stdout.write(self.style.NOTICE(
"\nNot checking migrations as it is not possible to access/create the django_migrations table."
))
return
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
if plan:
apps_waiting_migration = sorted(set(migration.app_label for migration, backwards in plan))
self.stdout.write(
self.style.NOTICE(
"\nYou have %(unpplied_migration_count)s unapplied migration(s). "
"Your project may not work properly until you apply the "
"migrations for app(s): %(apps_waiting_migration)s." % {
"unpplied_migration_count": len(plan),
"apps_waiting_migration": ", ".join(apps_waiting_migration),
}
)
)
self.stdout.write(self.style.NOTICE("Run 'python manage.py migrate' to apply them.\n"))
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError('subclasses of BaseCommand must provide a handle() method')
class AppCommand(BaseCommand):
"""
A management command which takes one or more installed application labels
as arguments, and does something with each of them.
Rather than implementing ``handle()``, subclasses must implement
``handle_app_config()``, which will be called once for each application.
"""
missing_args_message = "Enter at least one application label."
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label', nargs='+',
help='One or more application label.')
def handle(self, *app_labels, **options):
from django.apps import apps
try:
app_configs = [apps.get_app_config(app_label) for app_label in app_labels]
except (LookupError, ImportError) as e:
raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
output = []
for app_config in app_configs:
app_output = self.handle_app_config(app_config, **options)
if app_output:
output.append(app_output)
return '\n'.join(output)
def handle_app_config(self, app_config, **options):
"""
Perform the command's actions for app_config, an AppConfig instance
corresponding to an application label given on the command line.
"""
raise NotImplementedError(
"Subclasses of AppCommand must provide"
"a handle_app_config() method.")
class LabelCommand(BaseCommand):
"""
A management command which takes one or more arbitrary arguments
(labels) on the command line, and does something with each of
them.
Rather than implementing ``handle()``, subclasses must implement
``handle_label()``, which will be called once for each label.
If the arguments should be names of installed applications, use
``AppCommand`` instead.
"""
label = 'label'
missing_args_message = "Enter at least one %s." % label
def add_arguments(self, parser):
parser.add_argument('args', metavar=self.label, nargs='+')
def handle(self, *labels, **options):
output = []
for label in labels:
label_output = self.handle_label(label, **options)
if label_output:
output.append(label_output)
return '\n'.join(output)
def handle_label(self, label, **options):
"""
Perform the command's actions for ``label``, which will be the
string as given on the command line.
"""
raise NotImplementedError('subclasses of LabelCommand must provide a handle_label() method')
| {
"content_hash": "8c09bc962475a16aebc517c1b7d543b4",
"timestamp": "",
"source": "github",
"line_count": 537,
"max_line_length": 114,
"avg_line_length": 41.245810055865924,
"alnum_prop": 0.5973633121134136,
"repo_name": "yephper/django",
"id": "ad175d4def18d1e21cefe54511a52d2101fae776",
"size": "22174",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/core/management/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "1538"
},
{
"name": "CSS",
"bytes": "1697381"
},
{
"name": "HTML",
"bytes": "390772"
},
{
"name": "Java",
"bytes": "588"
},
{
"name": "JavaScript",
"bytes": "3172126"
},
{
"name": "Makefile",
"bytes": "134"
},
{
"name": "PHP",
"bytes": "19336"
},
{
"name": "Python",
"bytes": "13365273"
},
{
"name": "Shell",
"bytes": "837"
},
{
"name": "Smarty",
"bytes": "133"
}
],
"symlink_target": ""
} |
""" core implementation of testing process: init, session, runtest loop. """
import os
import sys
import _pytest
import _pytest._code
import py
import pytest
try:
from collections import MutableMapping as MappingMixin
except ImportError:
from UserDict import DictMixin as MappingMixin
from _pytest.runner import collect_one_node
tracebackcutdir = py.path.local(_pytest.__file__).dirpath()
# exitcodes for the command line
EXIT_OK = 0
EXIT_TESTSFAILED = 1
EXIT_INTERRUPTED = 2
EXIT_INTERNALERROR = 3
EXIT_USAGEERROR = 4
EXIT_NOTESTSCOLLECTED = 5
def pytest_addoption(parser):
parser.addini("norecursedirs", "directory patterns to avoid for recursion",
type="args", default=['.*', 'CVS', '_darcs', '{arch}', '*.egg'])
parser.addini("testpaths", "directories to search for tests when no files or directories are given in the command line.",
type="args", default=[])
#parser.addini("dirpatterns",
# "patterns specifying possible locations of test files",
# type="linelist", default=["**/test_*.txt",
# "**/test_*.py", "**/*_test.py"]
#)
group = parser.getgroup("general", "running and selection options")
group._addoption('-x', '--exitfirst', action="store_true", default=False,
dest="exitfirst",
help="exit instantly on first error or failed test."),
group._addoption('--maxfail', metavar="num",
action="store", type=int, dest="maxfail", default=0,
help="exit after first num failures or errors.")
group._addoption('--strict', action="store_true",
help="run pytest in strict mode, warnings become errors.")
group._addoption("-c", metavar="file", type=str, dest="inifilename",
help="load configuration from `file` instead of trying to locate one of the implicit configuration files.")
group = parser.getgroup("collect", "collection")
group.addoption('--collectonly', '--collect-only', action="store_true",
help="only collect tests, don't execute them."),
group.addoption('--pyargs', action="store_true",
help="try to interpret all arguments as python packages.")
group.addoption("--ignore", action="append", metavar="path",
help="ignore path during collection (multi-allowed).")
# when changing this to --conf-cut-dir, config.py Conftest.setinitial
# needs upgrading as well
group.addoption('--confcutdir', dest="confcutdir", default=None,
metavar="dir",
help="only load conftest.py's relative to specified dir.")
group.addoption('--noconftest', action="store_true",
dest="noconftest", default=False,
help="Don't load any conftest.py files.")
group = parser.getgroup("debugconfig",
"test session debugging and configuration")
group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir",
help="base temporary directory for this test run.")
def pytest_namespace():
collect = dict(Item=Item, Collector=Collector, File=File, Session=Session)
return dict(collect=collect)
def pytest_configure(config):
pytest.config = config # compatibiltiy
if config.option.exitfirst:
config.option.maxfail = 1
def wrap_session(config, doit):
"""Skeleton command line program"""
session = Session(config)
session.exitstatus = EXIT_OK
initstate = 0
try:
try:
config._do_configure()
initstate = 1
config.hook.pytest_sessionstart(session=session)
initstate = 2
session.exitstatus = doit(config, session) or 0
except pytest.UsageError:
raise
except KeyboardInterrupt:
excinfo = _pytest._code.ExceptionInfo()
config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
session.exitstatus = EXIT_INTERRUPTED
except:
excinfo = _pytest._code.ExceptionInfo()
config.notify_exception(excinfo, config.option)
session.exitstatus = EXIT_INTERNALERROR
if excinfo.errisinstance(SystemExit):
sys.stderr.write("mainloop: caught Spurious SystemExit!\n")
finally:
excinfo = None # Explicitly break reference cycle.
session.startdir.chdir()
if initstate >= 2:
config.hook.pytest_sessionfinish(
session=session,
exitstatus=session.exitstatus)
config._ensure_unconfigure()
return session.exitstatus
def pytest_cmdline_main(config):
return wrap_session(config, _main)
def _main(config, session):
""" default command line protocol for initialization, session,
running tests and reporting. """
config.hook.pytest_collection(session=session)
config.hook.pytest_runtestloop(session=session)
if session.testsfailed:
return EXIT_TESTSFAILED
elif session.testscollected == 0:
return EXIT_NOTESTSCOLLECTED
def pytest_collection(session):
return session.perform_collect()
def pytest_runtestloop(session):
if session.config.option.collectonly:
return True
for i, item in enumerate(session.items):
nextitem = session.items[i+1] if i+1 < len(session.items) else None
item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
if session.shouldstop:
raise session.Interrupted(session.shouldstop)
return True
def pytest_ignore_collect(path, config):
p = path.dirpath()
ignore_paths = config._getconftest_pathlist("collect_ignore", path=p)
ignore_paths = ignore_paths or []
excludeopt = config.getoption("ignore")
if excludeopt:
ignore_paths.extend([py.path.local(x) for x in excludeopt])
return path in ignore_paths
class FSHookProxy:
def __init__(self, fspath, pm, remove_mods):
self.fspath = fspath
self.pm = pm
self.remove_mods = remove_mods
def __getattr__(self, name):
x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods)
self.__dict__[name] = x
return x
def compatproperty(name):
def fget(self):
# deprecated - use pytest.name
return getattr(pytest, name)
return property(fget)
class NodeKeywords(MappingMixin):
def __init__(self, node):
self.node = node
self.parent = node.parent
self._markers = {node.name: True}
def __getitem__(self, key):
try:
return self._markers[key]
except KeyError:
if self.parent is None:
raise
return self.parent.keywords[key]
def __setitem__(self, key, value):
self._markers[key] = value
def __delitem__(self, key):
raise ValueError("cannot delete key in keywords dict")
def __iter__(self):
seen = set(self._markers)
if self.parent is not None:
seen.update(self.parent.keywords)
return iter(seen)
def __len__(self):
return len(self.__iter__())
def keys(self):
return list(self)
def __repr__(self):
return "<NodeKeywords for node %s>" % (self.node, )
class Node(object):
""" base class for Collector and Item the test collection tree.
Collector subclasses have children, Items are terminal nodes."""
def __init__(self, name, parent=None, config=None, session=None):
#: a unique name within the scope of the parent node
self.name = name
#: the parent collector node.
self.parent = parent
#: the pytest config object
self.config = config or parent.config
#: the session this node is part of
self.session = session or parent.session
#: filesystem path where this node was collected from (can be None)
self.fspath = getattr(parent, 'fspath', None)
#: keywords/markers collected from all scopes
self.keywords = NodeKeywords(self)
#: allow adding of extra keywords to use for matching
self.extra_keyword_matches = set()
# used for storing artificial fixturedefs for direct parametrization
self._name2pseudofixturedef = {}
@property
def ihook(self):
""" fspath sensitive hook proxy used to call pytest hooks"""
return self.session.gethookproxy(self.fspath)
Module = compatproperty("Module")
Class = compatproperty("Class")
Instance = compatproperty("Instance")
Function = compatproperty("Function")
File = compatproperty("File")
Item = compatproperty("Item")
def _getcustomclass(self, name):
cls = getattr(self, name)
if cls != getattr(pytest, name):
py.log._apiwarn("2.0", "use of node.%s is deprecated, "
"use pytest_pycollect_makeitem(...) to create custom "
"collection nodes" % name)
return cls
def __repr__(self):
return "<%s %r>" %(self.__class__.__name__,
getattr(self, 'name', None))
def warn(self, code, message):
""" generate a warning with the given code and message for this
item. """
assert isinstance(code, str)
fslocation = getattr(self, "location", None)
if fslocation is None:
fslocation = getattr(self, "fspath", None)
else:
fslocation = "%s:%s" % fslocation[:2]
self.ihook.pytest_logwarning.call_historic(kwargs=dict(
code=code, message=message,
nodeid=self.nodeid, fslocation=fslocation))
# methods for ordering nodes
@property
def nodeid(self):
""" a ::-separated string denoting its collection tree address. """
try:
return self._nodeid
except AttributeError:
self._nodeid = x = self._makeid()
return x
def _makeid(self):
return self.parent.nodeid + "::" + self.name
def __hash__(self):
return hash(self.nodeid)
def setup(self):
pass
def teardown(self):
pass
def _memoizedcall(self, attrname, function):
exattrname = "_ex_" + attrname
failure = getattr(self, exattrname, None)
if failure is not None:
py.builtin._reraise(failure[0], failure[1], failure[2])
if hasattr(self, attrname):
return getattr(self, attrname)
try:
res = function()
except py.builtin._sysex:
raise
except:
failure = sys.exc_info()
setattr(self, exattrname, failure)
raise
setattr(self, attrname, res)
return res
def listchain(self):
""" return list of all parent collectors up to self,
starting from root of collection tree. """
chain = []
item = self
while item is not None:
chain.append(item)
item = item.parent
chain.reverse()
return chain
def add_marker(self, marker):
""" dynamically add a marker object to the node.
``marker`` can be a string or pytest.mark.* instance.
"""
from _pytest.mark import MarkDecorator
if isinstance(marker, py.builtin._basestring):
marker = MarkDecorator(marker)
elif not isinstance(marker, MarkDecorator):
raise ValueError("is not a string or pytest.mark.* Marker")
self.keywords[marker.name] = marker
def get_marker(self, name):
""" get a marker object from this node or None if
the node doesn't have a marker with that name. """
val = self.keywords.get(name, None)
if val is not None:
from _pytest.mark import MarkInfo, MarkDecorator
if isinstance(val, (MarkDecorator, MarkInfo)):
return val
def listextrakeywords(self):
""" Return a set of all extra keywords in self and any parents."""
extra_keywords = set()
item = self
for item in self.listchain():
extra_keywords.update(item.extra_keyword_matches)
return extra_keywords
def listnames(self):
return [x.name for x in self.listchain()]
def addfinalizer(self, fin):
""" register a function to be called when this node is finalized.
This method can only be called when this node is active
in a setup chain, for example during self.setup().
"""
self.session._setupstate.addfinalizer(fin, self)
def getparent(self, cls):
""" get the next parent node (including ourself)
which is an instance of the given class"""
current = self
while current and not isinstance(current, cls):
current = current.parent
return current
def _prunetraceback(self, excinfo):
pass
def _repr_failure_py(self, excinfo, style=None):
fm = self.session._fixturemanager
if excinfo.errisinstance(fm.FixtureLookupError):
return excinfo.value.formatrepr()
tbfilter = True
if self.config.option.fulltrace:
style="long"
else:
tb = _pytest._code.Traceback([excinfo.traceback[-1]])
self._prunetraceback(excinfo)
if len(excinfo.traceback) == 0:
excinfo.traceback = tb
tbfilter = False # prunetraceback already does it
if style == "auto":
style = "long"
# XXX should excinfo.getrepr record all data and toterminal() process it?
if style is None:
if self.config.option.tbstyle == "short":
style = "short"
else:
style = "long"
try:
os.getcwd()
abspath = False
except OSError:
abspath = True
return excinfo.getrepr(funcargs=True, abspath=abspath,
showlocals=self.config.option.showlocals,
style=style, tbfilter=tbfilter)
repr_failure = _repr_failure_py
class Collector(Node):
""" Collector instances create children through collect()
and thus iteratively build a tree.
"""
class CollectError(Exception):
""" an error during collection, contains a custom message. """
def collect(self):
""" returns a list of children (items and collectors)
for this collection node.
"""
raise NotImplementedError("abstract")
def repr_failure(self, excinfo):
""" represent a collection failure. """
if excinfo.errisinstance(self.CollectError):
exc = excinfo.value
return str(exc.args[0])
return self._repr_failure_py(excinfo, style="short")
def _memocollect(self):
""" internal helper method to cache results of calling collect(). """
return self._memoizedcall('_collected', lambda: list(self.collect()))
def _prunetraceback(self, excinfo):
if hasattr(self, 'fspath'):
traceback = excinfo.traceback
ntraceback = traceback.cut(path=self.fspath)
if ntraceback == traceback:
ntraceback = ntraceback.cut(excludepath=tracebackcutdir)
excinfo.traceback = ntraceback.filter()
class FSCollector(Collector):
def __init__(self, fspath, parent=None, config=None, session=None):
fspath = py.path.local(fspath) # xxx only for test_resultlog.py?
name = fspath.basename
if parent is not None:
rel = fspath.relto(parent.fspath)
if rel:
name = rel
name = name.replace(os.sep, "/")
super(FSCollector, self).__init__(name, parent, config, session)
self.fspath = fspath
def _makeid(self):
relpath = self.fspath.relto(self.config.rootdir)
if os.sep != "/":
relpath = relpath.replace(os.sep, "/")
return relpath
class File(FSCollector):
""" base class for collecting tests from a file. """
class Item(Node):
""" a basic test invocation item. Note that for a single function
there might be multiple test invocation items.
"""
nextitem = None
def __init__(self, name, parent=None, config=None, session=None):
super(Item, self).__init__(name, parent, config, session)
self._report_sections = []
def add_report_section(self, when, key, content):
if content:
self._report_sections.append((when, key, content))
def reportinfo(self):
return self.fspath, None, ""
@property
def location(self):
try:
return self._location
except AttributeError:
location = self.reportinfo()
# bestrelpath is a quite slow function
cache = self.config.__dict__.setdefault("_bestrelpathcache", {})
try:
fspath = cache[location[0]]
except KeyError:
fspath = self.session.fspath.bestrelpath(location[0])
cache[location[0]] = fspath
location = (fspath, location[1], str(location[2]))
self._location = location
return location
class NoMatch(Exception):
""" raised if matching cannot locate a matching names. """
class Interrupted(KeyboardInterrupt):
""" signals an interrupted test run. """
__module__ = 'builtins' # for py3
class Session(FSCollector):
Interrupted = Interrupted
def __init__(self, config):
FSCollector.__init__(self, config.rootdir, parent=None,
config=config, session=self)
self._fs2hookproxy = {}
self.testsfailed = 0
self.testscollected = 0
self.shouldstop = False
self.trace = config.trace.root.get("collection")
self._norecursepatterns = config.getini("norecursedirs")
self.startdir = py.path.local()
self.config.pluginmanager.register(self, name="session")
def _makeid(self):
return ""
@pytest.hookimpl(tryfirst=True)
def pytest_collectstart(self):
if self.shouldstop:
raise self.Interrupted(self.shouldstop)
@pytest.hookimpl(tryfirst=True)
def pytest_runtest_logreport(self, report):
if report.failed and not hasattr(report, 'wasxfail'):
self.testsfailed += 1
maxfail = self.config.getvalue("maxfail")
if maxfail and self.testsfailed >= maxfail:
self.shouldstop = "stopping after %d failures" % (
self.testsfailed)
pytest_collectreport = pytest_runtest_logreport
def isinitpath(self, path):
return path in self._initialpaths
def gethookproxy(self, fspath):
try:
return self._fs2hookproxy[fspath]
except KeyError:
# check if we have the common case of running
# hooks with all conftest.py filesall conftest.py
pm = self.config.pluginmanager
my_conftestmodules = pm._getconftestmodules(fspath)
remove_mods = pm._conftest_plugins.difference(my_conftestmodules)
if remove_mods:
# one or more conftests are not in use at this fspath
proxy = FSHookProxy(fspath, pm, remove_mods)
else:
# all plugis are active for this fspath
proxy = self.config.hook
self._fs2hookproxy[fspath] = proxy
return proxy
def perform_collect(self, args=None, genitems=True):
hook = self.config.hook
try:
items = self._perform_collect(args, genitems)
hook.pytest_collection_modifyitems(session=self,
config=self.config, items=items)
finally:
hook.pytest_collection_finish(session=self)
self.testscollected = len(items)
return items
def _perform_collect(self, args, genitems):
if args is None:
args = self.config.args
self.trace("perform_collect", self, args)
self.trace.root.indent += 1
self._notfound = []
self._initialpaths = set()
self._initialparts = []
self.items = items = []
for arg in args:
parts = self._parsearg(arg)
self._initialparts.append(parts)
self._initialpaths.add(parts[0])
rep = collect_one_node(self)
self.ihook.pytest_collectreport(report=rep)
self.trace.root.indent -= 1
if self._notfound:
errors = []
for arg, exc in self._notfound:
line = "(no name %r in any of %r)" % (arg, exc.args[0])
errors.append("not found: %s\n%s" % (arg, line))
#XXX: test this
raise pytest.UsageError(*errors)
if not genitems:
return rep.result
else:
if rep.passed:
for node in rep.result:
self.items.extend(self.genitems(node))
return items
def collect(self):
for parts in self._initialparts:
arg = "::".join(map(str, parts))
self.trace("processing argument", arg)
self.trace.root.indent += 1
try:
for x in self._collect(arg):
yield x
except NoMatch:
# we are inside a make_report hook so
# we cannot directly pass through the exception
self._notfound.append((arg, sys.exc_info()[1]))
self.trace.root.indent -= 1
def _collect(self, arg):
names = self._parsearg(arg)
path = names.pop(0)
if path.check(dir=1):
assert not names, "invalid arg %r" %(arg,)
for path in path.visit(fil=lambda x: x.check(file=1),
rec=self._recurse, bf=True, sort=True):
for x in self._collectfile(path):
yield x
else:
assert path.check(file=1)
for x in self.matchnodes(self._collectfile(path), names):
yield x
def _collectfile(self, path):
ihook = self.gethookproxy(path)
if not self.isinitpath(path):
if ihook.pytest_ignore_collect(path=path, config=self.config):
return ()
return ihook.pytest_collect_file(path=path, parent=self)
def _recurse(self, path):
ihook = self.gethookproxy(path.dirpath())
if ihook.pytest_ignore_collect(path=path, config=self.config):
return
for pat in self._norecursepatterns:
if path.check(fnmatch=pat):
return False
ihook = self.gethookproxy(path)
ihook.pytest_collect_directory(path=path, parent=self)
return True
def _tryconvertpyarg(self, x):
"""Convert a dotted module name to path.
"""
import pkgutil
try:
loader = pkgutil.find_loader(x)
except ImportError:
return x
if loader is None:
return x
# This method is sometimes invoked when AssertionRewritingHook, which
# does not define a get_filename method, is already in place:
try:
path = loader.get_filename()
except AttributeError:
# Retrieve path from AssertionRewritingHook:
path = loader.modules[x][0].co_filename
if loader.is_package(x):
path = os.path.dirname(path)
return path
def _parsearg(self, arg):
""" return (fspath, names) tuple after checking the file exists. """
parts = str(arg).split("::")
if self.config.option.pyargs:
parts[0] = self._tryconvertpyarg(parts[0])
relpath = parts[0].replace("/", os.sep)
path = self.config.invocation_dir.join(relpath, abs=True)
if not path.check():
if self.config.option.pyargs:
msg = "file or package not found: "
else:
msg = "file not found: "
raise pytest.UsageError(msg + arg)
parts[0] = path
return parts
def matchnodes(self, matching, names):
self.trace("matchnodes", matching, names)
self.trace.root.indent += 1
nodes = self._matchnodes(matching, names)
num = len(nodes)
self.trace("matchnodes finished -> ", num, "nodes")
self.trace.root.indent -= 1
if num == 0:
raise NoMatch(matching, names[:1])
return nodes
def _matchnodes(self, matching, names):
if not matching or not names:
return matching
name = names[0]
assert name
nextnames = names[1:]
resultnodes = []
for node in matching:
if isinstance(node, pytest.Item):
if not names:
resultnodes.append(node)
continue
assert isinstance(node, pytest.Collector)
rep = collect_one_node(node)
if rep.passed:
has_matched = False
for x in rep.result:
# TODO: remove parametrized workaround once collection structure contains parametrization
if x.name == name or x.name.split("[")[0] == name:
resultnodes.extend(self.matchnodes([x], nextnames))
has_matched = True
# XXX accept IDs that don't have "()" for class instances
if not has_matched and len(rep.result) == 1 and x.name == "()":
nextnames.insert(0, name)
resultnodes.extend(self.matchnodes([x], nextnames))
node.ihook.pytest_collectreport(report=rep)
return resultnodes
def genitems(self, node):
self.trace("genitems", node)
if isinstance(node, pytest.Item):
node.ihook.pytest_itemcollected(item=node)
yield node
else:
assert isinstance(node, pytest.Collector)
rep = collect_one_node(node)
if rep.passed:
for subnode in rep.result:
for x in self.genitems(subnode):
yield x
node.ihook.pytest_collectreport(report=rep)
| {
"content_hash": "0bc4054e897c0af33111d175fe0d945c",
"timestamp": "",
"source": "github",
"line_count": 736,
"max_line_length": 125,
"avg_line_length": 35.53532608695652,
"alnum_prop": 0.5923376921312228,
"repo_name": "userzimmermann/pytest",
"id": "df99687ade59aba6b726f4847de7f7916a5f8e7c",
"size": "26154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_pytest/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1223900"
},
{
"name": "Shell",
"bytes": "282"
}
],
"symlink_target": ""
} |
from test.test_support import verbose
import select
import os
# test some known error conditions
try:
rfd, wfd, xfd = select.select(1, 2, 3)
except TypeError:
pass
else:
print 'expected TypeError exception not raised'
class Nope:
pass
class Almost:
def fileno(self):
return 'fileno'
try:
rfd, wfd, xfd = select.select([Nope()], [], [])
except TypeError:
pass
else:
print 'expected TypeError exception not raised'
try:
rfd, wfd, xfd = select.select([Almost()], [], [])
except TypeError:
pass
else:
print 'expected TypeError exception not raised'
try:
rfd, wfd, xfd = select.select([], [], [], 'not a number')
except TypeError:
pass
else:
print 'expected TypeError exception not raised'
def test():
import sys
if sys.platform[:3] in ('win', 'mac', 'os2', 'riscos'):
if verbose:
print "Can't test select easily on", sys.platform
return
cmd = 'for i in 0 1 2 3 4 5 6 7 8 9; do echo testing...; sleep 1; done'
p = os.popen(cmd, 'r')
for tout in (0, 1, 2, 4, 8, 16) + (None,)*10:
if verbose:
print 'timeout =', tout
rfd, wfd, xfd = select.select([p], [], [], tout)
if (rfd, wfd, xfd) == ([], [], []):
continue
if (rfd, wfd, xfd) == ([p], [], []):
line = p.readline()
if verbose:
print repr(line)
if not line:
if verbose:
print 'EOF'
break
continue
print 'Unexpected return values from select():', rfd, wfd, xfd
p.close()
test()
| {
"content_hash": "4386a60a6aa511c596c4dc30d7723c75",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 75,
"avg_line_length": 23.926470588235293,
"alnum_prop": 0.5457897971727105,
"repo_name": "MalloyPower/parsing-python",
"id": "eaec52be194b7c20b7ce7aaaa35afc63273b6fb3",
"size": "1651",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-2.4/Lib/test/test_select.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
} |
def main():
largestProduct = 0
for i in range(999,100,-1):
for j in range(i,100,-1):
product = i*j
if str(product) == str(product)[::-1] and product > largestProduct:
largestProduct = product
print(largestProduct)
if __name__ == "__main__":
main()
| {
"content_hash": "feac2e978a248e3e7054d1da5379dee6",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 79,
"avg_line_length": 28.454545454545453,
"alnum_prop": 0.5303514376996805,
"repo_name": "clairejaja/project-euler",
"id": "c7742eccab86c5c046c69a6c6dfdd9b665e535ad",
"size": "615",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/python/problem4/largest_palindrome_product.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "53772"
},
{
"name": "Python",
"bytes": "4524"
},
{
"name": "Scala",
"bytes": "5151"
}
],
"symlink_target": ""
} |
"""Starter script for Nova Conductor."""
import sys
from oslo.config import cfg
from nova import config
from nova import objects
from nova.openstack.common import log as logging
from nova import service
from nova import utils
CONF = cfg.CONF
CONF.import_opt('topic', 'nova.conductor.api', group='conductor')
def main():
objects.register_all()
config.parse_args(sys.argv)
logging.setup("nova")
utils.monkey_patch()
server = service.Service.create(binary='nova-conductor',
topic=CONF.conductor.topic,
manager=CONF.conductor.manager)
service.serve(server)
service.wait()
| {
"content_hash": "d5bcf079139be84acc89cbbfb999cd87",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 67,
"avg_line_length": 25.923076923076923,
"alnum_prop": 0.6602373887240356,
"repo_name": "shootstar/novatest",
"id": "3fc25eb8c6986a632c1f92efbc02ab84ad8a11ba",
"size": "1324",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "nova/cmd/conductor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11238174"
},
{
"name": "Shell",
"bytes": "17485"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from links.models import Link
# Settings for link
class LinkAdmin(admin.ModelAdmin):
# Fields to list
list_display = ('user', 'kintera_id', 't4k_url', 'url', 'clicks', 'last_click')
# Filter by
list_filter = ('user', )
# Fields to order by
ordering = ('user',)
# Add to admin
admin.site.register(Link, LinkAdmin) | {
"content_hash": "78e5e8d81fbbe3513def9125d0e8c43f",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 81,
"avg_line_length": 24.133333333333333,
"alnum_prop": 0.6795580110497238,
"repo_name": "ethanperez/t4k-rms",
"id": "9ee1cac8365cf58dae8d68309417fbca352ece89",
"size": "362",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "links/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "28133"
},
{
"name": "Python",
"bytes": "39561"
}
],
"symlink_target": ""
} |
__author__ = 'greghines'
import numpy as np
import os
import pymongo
import sys
import urllib
import matplotlib.cbook as cbook
from PIL import Image
import matplotlib.pyplot as plt
import warnings
if os.path.exists("/home/ggdhines"):
sys.path.append("/home/ggdhines/PycharmProjects/reduction/experimental/clusteringAlg")
else:
sys.path.append("/home/greg/github/reduction/experimental/clusteringAlg")
#from divisiveDBSCAN import DivisiveDBSCAN
from divisiveDBSCAN_multi import DivisiveDBSCAN
from clusterCompare import cluster_compare
if os.path.exists("/home/ggdhines"):
base_directory = "/home/ggdhines"
else:
base_directory = "/home/greg"
client = pymongo.MongoClient()
db = client['penguin_2014-10-12']
collection = db["penguin_classifications"]
collection2 = db["penguin_subjects"]
steps = [20]
penguins_at = {k:[] for k in steps}
alreadyThere = False
subject_index = 0
import cPickle as pickle
to_sample = pickle.load(open(base_directory+"/Databases/sample.pickle","rb"))
import random
#for subject in collection2.find({"classification_count": 20}):
noise_list = {k:[] for k in steps}
marking_count = {}
for zooniverse_id in random.sample(to_sample,len(to_sample)):
#zooniverse_id = "APZ0001mt9"
subject = collection2.find_one({"zooniverse_id": zooniverse_id})
subject_index += 1
#if subject_index == 2:
# break
#zooniverse_id = subject["zooniverse_id"]
print "=== " + str(subject_index)
print zooniverse_id
alreadyThere = True
user_markings = {k:[] for k in steps}
user_ips = {k:[] for k in steps}
user_index = 0
total_marks = []
for classification in collection.find({"subjects" : {"$elemMatch": {"zooniverse_id":zooniverse_id}}}):
user_index += 1
if user_index == 21:
break
per_user = []
ip = classification["user_ip"]
try:
markings_list = classification["annotations"][1]["value"]
if isinstance(markings_list,dict):
mm = 0
for marking in markings_list.values():
if marking["value"] in ["adult","chick"]:
mm += 1
x,y = (float(marking["x"]),float(marking["y"]))
if not((x,y) in per_user):
per_user.append((x,y))
for s in steps:
if user_index < s:
user_markings[s].append((x,y))
user_ips[s].append(ip)
total_marks.append(mm)
except (KeyError, ValueError):
#classification["annotations"]
user_index += -1
penguins = []
penguins_center = {}
noise_points = {}
try:
for s in steps:
if s == 25:
user_identified_penguins,penguin_clusters,noise__ = DivisiveDBSCAN(3).fit(user_markings[s],user_ips[s],debug=True,jpeg_file=base_directory + "/Databases/penguins/images/"+object_id+".JPG")
else:
user_identified_penguins,penguin_clusters,noise__ = DivisiveDBSCAN(3).fit(user_markings[s],user_ips[s],debug=True)
penguins_at[s].append(len(user_identified_penguins))
penguins_center[s] = user_identified_penguins
#noise_list[s].append(noise)
#penguins.append(penguin_clusters)
#print penguin_clusters
#print noise__
noise_points[s] = [x for x,u in noise__]
print str(s) + " - " + str(len(user_identified_penguins))
if not(len(user_identified_penguins) in marking_count):
marking_count[len(user_identified_penguins)] = total_marks
else:
marking_count[len(user_identified_penguins)].extend(total_marks)
#if len(user_identified_penguins) > 20:
# break
except AssertionError:
continue
if len(user_identified_penguins) == 0:
continue
# if len(user_identified_penguins) <= 20:
# #print noise__
# not_found = cluster_compare(penguins[0],penguins[-1])
# if not_found == []:
# continue
#
#
#
# image_file = cbook.get_sample_data(base_directory + "/Databases/penguins/images/"+object_id+".JPG")
# image = plt.imread(image_file)
# fig, ax = plt.subplots()
# im = ax.imshow(image)
#
# try:
# X,Y = zip(*penguins_center[5])
# plt.plot(X,Y,'.',color="red")
# except ValueError:
# pass
#
# X,Y = zip(*noise_points[5])
# plt.plot(X,Y,'.',color="green")
# print [(x,y) for i,(x,y) in enumerate(user_identified_penguins) if i in not_found]
# X,Y = zip(*[(x,y) for i,(x,y) in enumerate(user_identified_penguins) if i in not_found])
# #X,Y = zip(*noise)
#
# plt.plot(X,Y,'.',color="blue")
# plt.show()
if (subject_index % 5) == 0:
print "WRITING"
pickle.dump(marking_count,open(base_directory+"/Databases/penguins_at_3__.pickle","wb"))
# max5_10 = {}
# for x,y in zip(penguins_at[5],penguins_at[10]):
# if not(x in max5_10):
# max5_10[x] = y
# else:
# max5_10[x] = max(max5_10[x],y)
#
# print max5_10
#
# max10_15 = {}
# for x,y in zip(penguins_at[10],penguins_at[15]):
# if not(x in max5_10):
# max5_10[x] = y
# else:
# max5_10[x] = max(max5_10[x],y)
#fig, (ax0, ax1) = plt.subplots(nrows=2)
#plt.plot(penguins_at[5],penguins_at[10],'.')
#plt.plot(penguins_at[10],penguins_at[15],'.',color="green")
#plt.plot((0,100),(0,100))
#plt.show() | {
"content_hash": "b398ab9e1aa56fd85a399604040820d7",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 204,
"avg_line_length": 32.30113636363637,
"alnum_prop": 0.570976253298153,
"repo_name": "zooniverse/aggregation",
"id": "ad16f449b4418e86ce090be6678e196865609c11",
"size": "5707",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "experimental/penguins/clusterAnalysis/ratio.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "723"
},
{
"name": "Python",
"bytes": "2184451"
},
{
"name": "Scala",
"bytes": "629"
},
{
"name": "Shell",
"bytes": "190"
}
],
"symlink_target": ""
} |
from cudatypes import dim3, Pointer
from cudaarray import BaseCudaArray, CudaArray
class CudaPyError (Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
# Handle grid and block dims, coerce CudaArrays into bare pointers
def wrapper(fun, sig, funName):
threadSize = 512
sig = sig[1:]
def kernel(callDim, y = 1, z = 1):
if not isinstance(callDim, dim3):
callDim = dim3(callDim, y, z)
blockDim = allocateThreads(threadSize, callDim)
gridDim = getGridDim(callDim, blockDim)
def coerceArgs(*args):
args = list(args)
if len(args) != len(sig):
raise CudaPyError(funName + " takes " + str(len(sig)) + " arguments.")
temps = [] # Prevent premature garbage collection
for i in xrange(len(sig)):
if isinstance(sig[i], Pointer):
if isinstance(args[i], list):
temps.append(CudaArray(args[i]))
args[i] = temps[-1]
assert isinstance(args[i], BaseCudaArray), "expected CudaArray found " + type(args[i]).__name__
assert args[i].elemType() == sig[i].elemType(), "argument types do not match"
args[i] = args[i].pointer()
args = [gridDim, blockDim] + args
fun(*args)
return coerceArgs
return kernel;
# Allocate available threads to three dimensions
def allocateThreads(threads, dim):
def power_two(n):
return 1 << (n.bit_length() - 1)
tx = min(threads, power_two(dim.x))
threads //= tx
ty = min(threads, power_two(dim.y))
threads //= ty
tz = min(threads, power_two(dim.z))
threads //= tz
return dim3(tx, ty, tz)
# Compute grid dimensions from data and block dimensions
def getGridDim(callDim, blockDim):
def divideUp(n, d):
return (n + d - 1) // d
x = divideUp(callDim.x, blockDim.x)
y = divideUp(callDim.y, blockDim.y)
z = divideUp(callDim.z, blockDim.z)
return dim3(x, y, z)
| {
"content_hash": "a6a5195dae22d6fa532286e0e6156360",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 105,
"avg_line_length": 27.82608695652174,
"alnum_prop": 0.6338541666666667,
"repo_name": "tammyyang/CudaPy",
"id": "09ca4503a68188ed598b2fdd4ee82ea15351e008",
"size": "1920",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cudapy/wrapper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "376"
},
{
"name": "Cuda",
"bytes": "939"
},
{
"name": "Haskell",
"bytes": "32862"
},
{
"name": "Makefile",
"bytes": "1190"
},
{
"name": "Python",
"bytes": "17391"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Quotes',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.CharField(max_length=512)),
],
),
]
| {
"content_hash": "aa98bba3a7de7f18346989f8b4f506d5",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 114,
"avg_line_length": 24.105263157894736,
"alnum_prop": 0.5698689956331878,
"repo_name": "rjelte/web-dev-demos",
"id": "2ab1116801250e6dd89f2baa3d0dc2f8bc317b8b",
"size": "482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rjelte/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "52873"
},
{
"name": "Groff",
"bytes": "80"
},
{
"name": "HTML",
"bytes": "38251"
},
{
"name": "JavaScript",
"bytes": "262145"
},
{
"name": "Python",
"bytes": "12046"
},
{
"name": "Shell",
"bytes": "3781"
}
],
"symlink_target": ""
} |
from .base import BaseCaptcha
from .enums import ReCaptchaEnm
class BaseReCaptcha(BaseCaptcha):
def __init__(self, pageurl: str, googlekey: str, method: str = ReCaptchaEnm.USER_RECAPTCHA.value, *args, **kwargs):
super().__init__(method=method, *args, **kwargs)
self.post_payload.update({"googlekey": googlekey, "pageurl": pageurl})
# check user params
if method not in ReCaptchaEnm.list_values():
raise ValueError(f"Invalid method parameter set, available - {ReCaptchaEnm.list_values()}")
class ReCaptcha(BaseReCaptcha):
"""
The class is used to work with ReCaptcha
Solve description:
https://rucaptcha.com/api-rucaptcha#solving_recaptchav2_new
https://rucaptcha.com/api-rucaptcha#invisible
https://rucaptcha.com/api-rucaptcha#solving_recaptchav3
https://rucaptcha.com/api-rucaptcha#solving_recaptcha_enterprise
"""
def captcha_handler(self, **kwargs):
"""
The method is responsible for sending data to the server to solve the captcha
:param kwargs: Parameters for the `requests` library
:return: Response to captcha as JSON string with fields:
captchaSolve - captcha solution,
taskId - finds the ID of the task to solve the captcha,
error - False - if everything is fine, True - if there is an error,
errorBody - error name
"""
return self._processing_response(**kwargs)
class aioReCaptcha(BaseReCaptcha):
"""
The class is used to async work with ReCaptcha
Solve description:
https://rucaptcha.com/api-rucaptcha#solving_recaptchav2_new
https://rucaptcha.com/api-rucaptcha#invisible
https://rucaptcha.com/api-rucaptcha#solving_recaptchav3
https://rucaptcha.com/api-rucaptcha#solving_recaptcha_enterprise
"""
async def captcha_handler(self):
"""
The method is responsible for sending data to the server to solve the captcha
:return: Response to captcha as JSON string with fields:
captchaSolve - captcha solution,
taskId - finds the ID of the task to solve the captcha,
error - False - if everything is fine, True - if there is an error,
errorBody - error name
"""
return await self._aio_processing_response()
| {
"content_hash": "2c309fae190506c234bdc4278c87c507",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 119,
"avg_line_length": 40.610169491525426,
"alnum_prop": 0.6519198664440734,
"repo_name": "AndreiDrang/python-rucaptcha",
"id": "071a46829c33e84e7b66f94faf8f3ffe76537891",
"size": "2396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python_rucaptcha/ReCaptcha.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "569"
},
{
"name": "Python",
"bytes": "130612"
}
],
"symlink_target": ""
} |
"""
* *******************************************************
* Copyright (c) VMware, Inc. 2016-2018. All Rights Reserved.
* SPDX-License-Identifier: MIT
* *******************************************************
*
* DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
* EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
* WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
* NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""
__author__ = 'VMware, Inc.'
__vcenter_version__ = '6.5+'
from com.vmware.vcenter.vm.hardware.boot_client import Device as BootDevice
from com.vmware.vcenter.vm.hardware_client import (
Disk, Ethernet)
from com.vmware.vcenter.vm.hardware_client import ScsiAddressSpec
from com.vmware.vcenter.vm_client import (Power)
from com.vmware.vcenter_client import VM
from vmware.vapi.vsphere.client import create_vsphere_client
from samples.vsphere.common.ssl_helper import get_unverified_session
from samples.vsphere.common import sample_cli
from samples.vsphere.common import sample_util
from samples.vsphere.common.sample_util import pp
from samples.vsphere.vcenter.helper import network_helper
from samples.vsphere.vcenter.helper import vm_placement_helper
from samples.vsphere.vcenter.helper.vm_helper import get_vm
from samples.vsphere.vcenter.setup import testbed
class CreateBasicVM(object):
"""
Demonstrates how to create a basic VM with following configuration:
2 disks, 1 nic
Sample Prerequisites:
- datacenter
- vm folder
- datastore
- standard switch network
"""
def __init__(self, client=None, placement_spec=None):
self.client = client
self.placement_spec = placement_spec
self.vm_name = testbed.config['VM_NAME_BASIC']
self.cleardata = None
# Execute the sample in standalone mode.
if not self.client:
parser = sample_cli.build_arg_parser()
parser.add_argument('-n', '--vm_name',
action='store',
help='Name of the testing vm')
args = sample_util.process_cli_args(parser.parse_args())
if args.vm_name:
self.vm_name = args.vm_name
self.cleardata = args.cleardata
session = get_unverified_session() if args.skipverification else None
self.client = create_vsphere_client(server=args.server,
username=args.username,
password=args.password,
session=session)
def run(self):
# Get a placement spec
datacenter_name = testbed.config['VM_DATACENTER_NAME']
vm_folder_name = testbed.config['VM_FOLDER2_NAME']
datastore_name = testbed.config['VM_DATASTORE_NAME']
std_portgroup_name = testbed.config['STDPORTGROUP_NAME']
if not self.placement_spec:
self.placement_spec = vm_placement_helper.get_placement_spec_for_resource_pool(
self.client,
datacenter_name,
vm_folder_name,
datastore_name)
# Get a standard network backing
standard_network = network_helper.get_standard_network_backing(
self.client,
std_portgroup_name,
datacenter_name)
"""
Create a basic VM.
Using the provided PlacementSpec, create a VM with a selected Guest OS
and provided name.
Create a VM with the following configuration:
* Create 2 disks and specify one of them on scsi0:0 since it's the boot disk
* Specify 1 ethernet adapter using a Standard Portgroup backing
* Setup for PXE install by selecting network as first boot device
Use guest and system provided defaults for most configuration settings.
"""
guest_os = testbed.config['VM_GUESTOS']
boot_disk = Disk.CreateSpec(type=Disk.HostBusAdapterType.SCSI,
scsi=ScsiAddressSpec(bus=0, unit=0),
new_vmdk=Disk.VmdkCreateSpec())
data_disk = Disk.CreateSpec(new_vmdk=Disk.VmdkCreateSpec())
nic = Ethernet.CreateSpec(
start_connected=True,
backing=Ethernet.BackingSpec(
type=Ethernet.BackingType.STANDARD_PORTGROUP,
network=standard_network))
boot_device_order = [
BootDevice.EntryCreateSpec(BootDevice.Type.ETHERNET),
BootDevice.EntryCreateSpec(BootDevice.Type.DISK)]
vm_create_spec = VM.CreateSpec(name=self.vm_name,
guest_os=guest_os,
placement=self.placement_spec,
disks=[boot_disk, data_disk],
nics=[nic],
boot_devices=boot_device_order)
print('\n# Example: create_basic_vm: Creating a VM using spec\n-----')
print(pp(vm_create_spec))
print('-----')
vm = self.client.vcenter.VM.create(vm_create_spec)
print("create_basic_vm: Created VM '{}' ({})".format(self.vm_name, vm))
vm_info = self.client.vcenter.VM.get(vm)
print('vm.get({}) -> {}'.format(vm, pp(vm_info)))
return vm
def cleanup(self):
vm = get_vm(self.client, self.vm_name)
if vm:
state = self.client.vcenter.vm.Power.get(vm)
if state == Power.Info(state=Power.State.POWERED_ON):
self.client.vcenter.vm.Power.stop(vm)
elif state == Power.Info(state=Power.State.SUSPENDED):
self.client.vcenter.vm.Power.start(vm)
self.client.vcenter.vm.Power.stop(vm)
print("Deleting VM '{}' ({})".format(self.vm_name, vm))
self.client.vcenter.VM.delete(vm)
def main():
create_basic_vm = CreateBasicVM()
create_basic_vm.cleanup()
create_basic_vm.run()
if create_basic_vm.cleardata:
create_basic_vm.cleanup()
if __name__ == '__main__':
main()
| {
"content_hash": "938d10e445971ff65831f9f95f9ed6f6",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 91,
"avg_line_length": 38.869565217391305,
"alnum_prop": 0.5942793224672419,
"repo_name": "pgbidkar/vsphere-automation-sdk-python",
"id": "18c31cde5ea12204b7b48a6c1dbf319a8286211b",
"size": "6281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/vsphere/vcenter/vm/create/create_basic_vm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1232"
},
{
"name": "Python",
"bytes": "2656"
}
],
"symlink_target": ""
} |
from typing import (
Any,
AsyncIterator,
Awaitable,
Callable,
Sequence,
Tuple,
Optional,
Iterator,
)
from google.cloud.asset_v1p1beta1.types import asset_service
from google.cloud.asset_v1p1beta1.types import assets
class SearchAllResourcesPager:
"""A pager for iterating through ``search_all_resources`` requests.
This class thinly wraps an initial
:class:`google.cloud.asset_v1p1beta1.types.SearchAllResourcesResponse` object, and
provides an ``__iter__`` method to iterate through its
``results`` field.
If there are more pages, the ``__iter__`` method will make additional
``SearchAllResources`` requests and continue to iterate
through the ``results`` field on the
corresponding responses.
All the usual :class:`google.cloud.asset_v1p1beta1.types.SearchAllResourcesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., asset_service.SearchAllResourcesResponse],
request: asset_service.SearchAllResourcesRequest,
response: asset_service.SearchAllResourcesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.asset_v1p1beta1.types.SearchAllResourcesRequest):
The initial request object.
response (google.cloud.asset_v1p1beta1.types.SearchAllResourcesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = asset_service.SearchAllResourcesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[asset_service.SearchAllResourcesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[assets.StandardResourceMetadata]:
for page in self.pages:
yield from page.results
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class SearchAllResourcesAsyncPager:
"""A pager for iterating through ``search_all_resources`` requests.
This class thinly wraps an initial
:class:`google.cloud.asset_v1p1beta1.types.SearchAllResourcesResponse` object, and
provides an ``__aiter__`` method to iterate through its
``results`` field.
If there are more pages, the ``__aiter__`` method will make additional
``SearchAllResources`` requests and continue to iterate
through the ``results`` field on the
corresponding responses.
All the usual :class:`google.cloud.asset_v1p1beta1.types.SearchAllResourcesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[asset_service.SearchAllResourcesResponse]],
request: asset_service.SearchAllResourcesRequest,
response: asset_service.SearchAllResourcesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.asset_v1p1beta1.types.SearchAllResourcesRequest):
The initial request object.
response (google.cloud.asset_v1p1beta1.types.SearchAllResourcesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = asset_service.SearchAllResourcesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[asset_service.SearchAllResourcesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[assets.StandardResourceMetadata]:
async def async_generator():
async for page in self.pages:
for response in page.results:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class SearchAllIamPoliciesPager:
"""A pager for iterating through ``search_all_iam_policies`` requests.
This class thinly wraps an initial
:class:`google.cloud.asset_v1p1beta1.types.SearchAllIamPoliciesResponse` object, and
provides an ``__iter__`` method to iterate through its
``results`` field.
If there are more pages, the ``__iter__`` method will make additional
``SearchAllIamPolicies`` requests and continue to iterate
through the ``results`` field on the
corresponding responses.
All the usual :class:`google.cloud.asset_v1p1beta1.types.SearchAllIamPoliciesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., asset_service.SearchAllIamPoliciesResponse],
request: asset_service.SearchAllIamPoliciesRequest,
response: asset_service.SearchAllIamPoliciesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.asset_v1p1beta1.types.SearchAllIamPoliciesRequest):
The initial request object.
response (google.cloud.asset_v1p1beta1.types.SearchAllIamPoliciesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = asset_service.SearchAllIamPoliciesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[asset_service.SearchAllIamPoliciesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[assets.IamPolicySearchResult]:
for page in self.pages:
yield from page.results
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class SearchAllIamPoliciesAsyncPager:
"""A pager for iterating through ``search_all_iam_policies`` requests.
This class thinly wraps an initial
:class:`google.cloud.asset_v1p1beta1.types.SearchAllIamPoliciesResponse` object, and
provides an ``__aiter__`` method to iterate through its
``results`` field.
If there are more pages, the ``__aiter__`` method will make additional
``SearchAllIamPolicies`` requests and continue to iterate
through the ``results`` field on the
corresponding responses.
All the usual :class:`google.cloud.asset_v1p1beta1.types.SearchAllIamPoliciesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[asset_service.SearchAllIamPoliciesResponse]],
request: asset_service.SearchAllIamPoliciesRequest,
response: asset_service.SearchAllIamPoliciesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.asset_v1p1beta1.types.SearchAllIamPoliciesRequest):
The initial request object.
response (google.cloud.asset_v1p1beta1.types.SearchAllIamPoliciesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = asset_service.SearchAllIamPoliciesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[asset_service.SearchAllIamPoliciesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[assets.IamPolicySearchResult]:
async def async_generator():
async for page in self.pages:
for response in page.results:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
| {
"content_hash": "f4428064ba78e2931d3a626e21d3d657",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 90,
"avg_line_length": 39.438661710037174,
"alnum_prop": 0.6547271184843058,
"repo_name": "googleapis/python-asset",
"id": "a2aea41b482369d808176168b8068dae4e18b4fe",
"size": "11209",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/asset_v1p1beta1/services/asset_service/pagers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1590384"
},
{
"name": "Shell",
"bytes": "30657"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
DEFAULT_SUBMENU_PLUGIN_TEMPLATE_CHOICES = (
('publication_backbone/plugins/sub_menu/default.html', _("Default sub menu")),
('publication_backbone/plugins/sub_menu/horizontal.html', _("As string submenu")),
)
SUBMENU_PLUGIN_TEMPLATE_CHOICES = getattr(settings, 'SUBMENU_PLUGIN_TEMPLATE_CHOICES', DEFAULT_SUBMENU_PLUGIN_TEMPLATE_CHOICES) | {
"content_hash": "503b07a299aa68d592a35cbf2b275681",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 127,
"avg_line_length": 49.55555555555556,
"alnum_prop": 0.7511210762331838,
"repo_name": "Excentrics/publication-backbone",
"id": "b35866e95be781fede1bcc49dd98942e3b54fc44",
"size": "470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "publication_backbone/plugins/sub_menu/appsettings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "447762"
},
{
"name": "HTML",
"bytes": "217091"
},
{
"name": "JavaScript",
"bytes": "904819"
},
{
"name": "Python",
"bytes": "470545"
}
],
"symlink_target": ""
} |
from django.shortcuts import render_to_response
from django.template import RequestContext
from markitup import settings
from markitup.markup import filter_func
def apply_filter(request):
markup = filter_func(request.POST.get('data', ''))
return render_to_response( 'markitup/preview.html',
{'preview': markup},
context_instance=RequestContext(request))
| {
"content_hash": "f10a559cc66c2494c200841f63303897",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 71,
"avg_line_length": 38.54545454545455,
"alnum_prop": 0.6698113207547169,
"repo_name": "chrisdev/django-markitup",
"id": "df89c50bee8fdd4bac925c1ed4e9a463367b3928",
"size": "424",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "markitup/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
The plugin for capturing and storing the page source on errors and failures.
"""
import os
import codecs
from nose.plugins import Plugin
from seleniumbase.config import settings
from seleniumbase.core import log_helper
class PageSource(Plugin):
"""
This plugin will capture the page source when a test fails
or raises an error. It will store the page source in the
logs file specified, along with default test information.
"""
name = "page_source" # Usage: --with-page_source
logfile_name = settings.PAGE_SOURCE_NAME
def options(self, parser, env):
super(PageSource, self).options(parser, env=env)
def configure(self, options, conf):
super(PageSource, self).configure(options, conf)
if not self.enabled:
return
self.options = options
def addError(self, test, err, capt=None):
try:
page_source = test.driver.page_source
except Exception:
# Since we can't get the page source from here, skip saving it
return
test_logpath = self.options.log_path + "/" + test.id()
if not os.path.exists(test_logpath):
os.makedirs(test_logpath)
html_file_name = os.path.join(test_logpath, self.logfile_name)
html_file = codecs.open(html_file_name, "w+", "utf-8")
rendered_source = log_helper.get_html_source_with_base_href(
test.driver, page_source
)
html_file.write(rendered_source)
html_file.close()
def addFailure(self, test, err, capt=None, tbinfo=None):
try:
page_source = test.driver.page_source
except Exception:
# Since we can't get the page source from here, skip saving it
return
test_logpath = self.options.log_path + "/" + test.id()
if not os.path.exists(test_logpath):
os.makedirs(test_logpath)
html_file_name = os.path.join(test_logpath, self.logfile_name)
html_file = codecs.open(html_file_name, "w+", "utf-8")
rendered_source = log_helper.get_html_source_with_base_href(
test.driver, page_source
)
html_file.write(rendered_source)
html_file.close()
| {
"content_hash": "e57296c62a11a5f387cf5c06947bd59d",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 76,
"avg_line_length": 35.333333333333336,
"alnum_prop": 0.62848158131177,
"repo_name": "seleniumbase/SeleniumBase",
"id": "af9e217e5a7e3e4d03a7c5410171a81751e22c2f",
"size": "2226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "seleniumbase/plugins/page_source.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1503"
},
{
"name": "Dockerfile",
"bytes": "3823"
},
{
"name": "Gherkin",
"bytes": "5076"
},
{
"name": "HTML",
"bytes": "10180"
},
{
"name": "JavaScript",
"bytes": "1338"
},
{
"name": "Python",
"bytes": "2298163"
},
{
"name": "Shell",
"bytes": "13488"
}
],
"symlink_target": ""
} |
import logging
from django.conf import settings
from django.utils.importlib import import_module
from django.core.management.base import BaseCommand
from mapentity import registry
from mapentity.registry import create_mapentity_model_permissions
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Create MapEntity models permissions"
def execute(self, *args, **options):
logger.info("Synchronize permissions of MapEntity models")
# Make sure apps are registered at this point
import_module(settings.ROOT_URLCONF)
# For all models registered, add missing bits
for model in registry.registry.keys():
create_mapentity_model_permissions(model)
logger.info("Done.")
| {
"content_hash": "6dbcee6e3430dda002c1ebdb8e050d62",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 66,
"avg_line_length": 28.185185185185187,
"alnum_prop": 0.7293035479632063,
"repo_name": "Anaethelion/django-mapentity",
"id": "f6f076bab0832d978c8dd2e6bca3e87459d575c0",
"size": "761",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mapentity/management/commands/update_permissions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "33501"
},
{
"name": "HTML",
"bytes": "27408"
},
{
"name": "JavaScript",
"bytes": "484635"
},
{
"name": "Python",
"bytes": "204082"
}
],
"symlink_target": ""
} |
"""Class representing an X.509 certificate."""
from utils.ASN1Parser import ASN1Parser
from utils.cryptomath import *
from utils.keyfactory import _createPublicRSAKey
class X509:
"""This class represents an X.509 certificate.
@type bytes: L{array.array} of unsigned bytes
@ivar bytes: The DER-encoded ASN.1 certificate
@type publicKey: L{tlslite.utils.RSAKey.RSAKey}
@ivar publicKey: The subject public key from the certificate.
@type subject: L{array.array} of unsigned bytes
@ivar subject: The DER-encoded ASN.1 subject distinguished name.
"""
def __init__(self):
self.bytes = createByteArraySequence([])
self.publicKey = None
self.subject = None
def parse(self, s):
"""Parse a PEM-encoded X.509 certificate.
@type s: str
@param s: A PEM-encoded X.509 certificate (i.e. a base64-encoded
certificate wrapped with "-----BEGIN CERTIFICATE-----" and
"-----END CERTIFICATE-----" tags).
"""
start = s.find("-----BEGIN CERTIFICATE-----")
end = s.find("-----END CERTIFICATE-----")
if start == -1:
raise SyntaxError("Missing PEM prefix")
if end == -1:
raise SyntaxError("Missing PEM postfix")
s = s[start+len("-----BEGIN CERTIFICATE-----") : end]
bytes = base64ToBytes(s)
self.parseBinary(bytes)
return self
def parseBinary(self, bytes):
"""Parse a DER-encoded X.509 certificate.
@type bytes: str or L{array.array} of unsigned bytes
@param bytes: A DER-encoded X.509 certificate.
"""
if isinstance(bytes, type("")):
bytes = stringToBytes(bytes)
self.bytes = bytes
p = ASN1Parser(bytes)
#Get the tbsCertificate
tbsCertificateP = p.getChild(0)
#Is the optional version field present?
#This determines which index the key is at.
if tbsCertificateP.value[0]==0xA0:
subjectPublicKeyInfoIndex = 6
else:
subjectPublicKeyInfoIndex = 5
#Get the subject
self.subject = tbsCertificateP.getChildBytes(\
subjectPublicKeyInfoIndex - 1)
#Get the subjectPublicKeyInfo
subjectPublicKeyInfoP = tbsCertificateP.getChild(\
subjectPublicKeyInfoIndex)
#Get the algorithm
algorithmP = subjectPublicKeyInfoP.getChild(0)
rsaOID = algorithmP.value
if list(rsaOID) != [6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0]:
raise SyntaxError("Unrecognized AlgorithmIdentifier")
#Get the subjectPublicKey
subjectPublicKeyP = subjectPublicKeyInfoP.getChild(1)
#Adjust for BIT STRING encapsulation
if (subjectPublicKeyP.value[0] !=0):
raise SyntaxError()
subjectPublicKeyP = ASN1Parser(subjectPublicKeyP.value[1:])
#Get the modulus and exponent
modulusP = subjectPublicKeyP.getChild(0)
publicExponentP = subjectPublicKeyP.getChild(1)
#Decode them into numbers
n = bytesToNumber(modulusP.value)
e = bytesToNumber(publicExponentP.value)
#Create a public key instance
self.publicKey = _createPublicRSAKey(n, e)
def getFingerprint(self):
"""Get the hex-encoded fingerprint of this certificate.
@rtype: str
@return: A hex-encoded fingerprint.
"""
return sha.sha(self.bytes).hexdigest()
def getCommonName(self):
"""Get the Subject's Common Name from the certificate.
The cryptlib_py module must be installed in order to use this
function.
@rtype: str or None
@return: The CN component of the certificate's subject DN, if
present.
"""
import cryptlib_py
import array
c = cryptlib_py.cryptImportCert(self.bytes, cryptlib_py.CRYPT_UNUSED)
name = cryptlib_py.CRYPT_CERTINFO_COMMONNAME
try:
try:
length = cryptlib_py.cryptGetAttributeString(c, name, None)
returnVal = array.array('B', [0] * length)
cryptlib_py.cryptGetAttributeString(c, name, returnVal)
returnVal = returnVal.tostring()
except cryptlib_py.CryptException, e:
if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND:
returnVal = None
return returnVal
finally:
cryptlib_py.cryptDestroyCert(c)
def writeBytes(self):
return self.bytes
| {
"content_hash": "36f8d619e92f61792f03fa39e52f3e02",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 77,
"avg_line_length": 32.45390070921986,
"alnum_prop": 0.607298951048951,
"repo_name": "Crystalnix/house-of-life-chromium",
"id": "d8b8bccb1414c62093fbfb3e20259ea711bfe2a3",
"size": "4576",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "third_party/tlslite/tlslite/X509.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "3418"
},
{
"name": "C",
"bytes": "88445923"
},
{
"name": "C#",
"bytes": "73756"
},
{
"name": "C++",
"bytes": "77228136"
},
{
"name": "Emacs Lisp",
"bytes": "6648"
},
{
"name": "F#",
"bytes": "381"
},
{
"name": "Go",
"bytes": "3744"
},
{
"name": "Java",
"bytes": "11354"
},
{
"name": "JavaScript",
"bytes": "6191433"
},
{
"name": "Objective-C",
"bytes": "4023654"
},
{
"name": "PHP",
"bytes": "97796"
},
{
"name": "Perl",
"bytes": "92217"
},
{
"name": "Python",
"bytes": "5604932"
},
{
"name": "Ruby",
"bytes": "937"
},
{
"name": "Shell",
"bytes": "1234672"
},
{
"name": "Tcl",
"bytes": "200213"
}
],
"symlink_target": ""
} |
"""
A Marketplace only reindexing that indexes only apps.
This avoids a lot of complexity for now. We might want an all encompassing
reindex command that has args for AMO and MKT.
"""
import datetime
import logging
import os
import sys
import time
from optparse import make_option
import pyelasticsearch
from celery import task
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from amo.utils import chunked, timestamp_index
from addons.models import Webapp # To avoid circular import.
from lib.es.models import Reindexing
from lib.es.utils import database_flagged
from mkt.webapps.models import WebappIndexer
logger = logging.getLogger('z.elasticsearch')
# Enable these to get full debugging information.
# logging.getLogger('pyelasticsearch').setLevel(logging.DEBUG)
# logging.getLogger('requests').setLevel(logging.DEBUG)
# The subset of settings.ES_INDEXES we are concerned with.
ALIAS = settings.ES_INDEXES['webapp']
if hasattr(settings, 'ES_URLS'):
ES_URL = settings.ES_URLS[0]
else:
ES_URL = 'http://127.0.0.1:9200'
ES = pyelasticsearch.ElasticSearch(ES_URL)
job = 'lib.es.management.commands.reindex_mkt.run_indexing'
time_limits = settings.CELERY_TIME_LIMITS[job]
@task
def delete_index(old_index):
"""Removes the index."""
sys.stdout.write('Removing index %r' % old_index)
ES.delete_index(old_index)
@task
def create_index(new_index, alias, settings):
"""Creates a mapping for the new index.
- new_index: new index name
- alias: alias name
- settings: a dictionary of settings
"""
sys.stdout.write(
'Create the mapping for index %r, alias: %r' % (new_index, alias))
# Update settings with mapping.
settings = {
'settings': settings,
'mappings': WebappIndexer.get_mapping(),
}
# Create index and mapping.
try:
ES.create_index(new_index, settings)
except pyelasticsearch.exceptions.IndexAlreadyExistsError:
raise CommandError('New index [%s] already exists' % new_index)
# Don't return until the health is green. By default waits for 30s.
ES.health(new_index, wait_for_status='green', wait_for_relocating_shards=0)
def index_webapp(ids, **kw):
index = kw.pop('index', None) or ALIAS
sys.stdout.write('Indexing %s apps' % len(ids))
qs = Webapp.indexing_transformer(Webapp.with_deleted.no_cache()
.filter(id__in=ids))
docs = []
for obj in qs:
try:
docs.append(WebappIndexer.extract_document(obj.id, obj=obj))
except:
sys.stdout.write('Failed to index obj: {0}'.format(obj.id))
WebappIndexer.bulk_index(docs, es=ES, index=index)
@task(time_limit=time_limits['hard'], soft_time_limit=time_limits['soft'])
def run_indexing(index):
"""Index the objects.
- index: name of the index
Note: Our ES doc sizes are about 5k in size. Chunking by 100 sends ~500kb
of data to ES at a time.
TODO: Use celery chords here to parallelize these indexing chunks. This
requires celery 3 (bug 825938).
"""
sys.stdout.write('Indexing apps into index: %s' % index)
qs = WebappIndexer.get_indexable()
for chunk in chunked(list(qs), 100):
index_webapp(chunk, index=index)
@task
def flag_database(new_index, old_index, alias):
"""Flags the database to indicate that the reindexing has started."""
sys.stdout.write('Flagging the database to start the reindexation')
Reindexing.objects.create(new_index=new_index, old_index=old_index,
alias=alias, start_date=datetime.datetime.now())
time.sleep(5) # Give celeryd some time to flag the DB.
@task
def unflag_database():
"""Unflag the database to indicate that the reindexing is over."""
sys.stdout.write('Unflagging the database')
Reindexing.objects.all().delete()
@task
def update_alias(new_index, old_index, alias, settings):
"""
Update the alias now that indexing is over.
We do 3 things:
1. Optimize (which also does a refresh and a flush by default).
2. Update settings to reset number of replicas.
3. Point the alias to this new index.
"""
sys.stdout.write('Optimizing, updating settings and aliases.')
# Optimize.
ES.optimize(new_index)
# Update the replicas.
ES.update_settings(new_index, settings)
# Add and remove aliases.
actions = [
{'add': {'index': new_index, 'alias': alias}}
]
if old_index:
actions.append(
{'remove': {'index': old_index, 'alias': alias}}
)
ES.update_aliases(dict(actions=actions))
@task
def output_summary():
aliases = ES.aliases(ALIAS)
sys.stdout.write(
'Reindexation done. Current Aliases configuration: %s\n' % aliases)
class Command(BaseCommand):
help = 'Reindex all ES indexes'
option_list = BaseCommand.option_list + (
make_option('--prefix', action='store',
help='Indexes prefixes, like test_',
default=''),
make_option('--force', action='store_true',
help=('Bypass the database flag that says '
'another indexation is ongoing'),
default=False),
)
def handle(self, *args, **kwargs):
"""Set up reindexing tasks.
Creates a Tasktree that creates a new indexes and indexes all objects,
then points the alias to this new index when finished.
"""
if not settings.MARKETPLACE:
raise CommandError('This command affects only marketplace and '
'should be run under Marketplace settings.')
force = kwargs.get('force', False)
prefix = kwargs.get('prefix', '')
if database_flagged() and not force:
raise CommandError('Indexation already occuring - use --force to '
'bypass')
elif force:
unflag_database()
# The list of indexes that is currently aliased by `ALIAS`.
try:
aliases = ES.aliases(ALIAS).keys()
except pyelasticsearch.exceptions.ElasticHttpNotFoundError:
aliases = []
old_index = aliases[0] if aliases else None
# Create a new index, using the index name with a timestamp.
new_index = timestamp_index(prefix + ALIAS)
# See how the index is currently configured.
if old_index:
try:
s = (ES.get_settings(old_index).get(old_index, {})
.get('settings', {}))
except pyelasticsearch.exceptions.ElasticHttpNotFoundError:
s = {}
else:
s = {}
num_replicas = s.get('number_of_replicas',
settings.ES_DEFAULT_NUM_REPLICAS)
num_shards = s.get('number_of_shards', settings.ES_DEFAULT_NUM_SHARDS)
# Flag the database.
chain = flag_database.si(new_index, old_index, ALIAS)
# Create the index and mapping.
#
# Note: We set num_replicas=0 here to decrease load while re-indexing.
# In a later step we increase it which results in a more efficient bulk
# copy in Elasticsearch.
# For ES < 0.90 we manually enable compression.
chain |= create_index.si(new_index, ALIAS, {
'analysis': WebappIndexer.get_analysis(),
'number_of_replicas': 0, 'number_of_shards': num_shards,
'store.compress.tv': True, 'store.compress.stored': True,
'refresh_interval': '-1'})
# Index all the things!
chain |= run_indexing.si(new_index)
# After indexing we optimize the index, adjust settings, and point the
# alias to the new index.
chain |= update_alias.si(new_index, old_index, ALIAS, {
'number_of_replicas': num_replicas, 'refresh_interval': '5s'})
# Unflag the database.
chain |= unflag_database.si()
# Delete the old index, if any.
if old_index:
chain |= delete_index.si(old_index)
chain |= output_summary.si()
self.stdout.write('\nNew index and indexing tasks all queued up.\n')
os.environ['FORCE_INDEXING'] = '1'
try:
chain.apply_async()
finally:
del os.environ['FORCE_INDEXING']
| {
"content_hash": "5ef712ea608d4f3c58e609c9ad768d3e",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 79,
"avg_line_length": 31.129151291512915,
"alnum_prop": 0.6256519677572309,
"repo_name": "Joergen/zamboni",
"id": "380075ab5795f20fd444979584b96e513382d9eb",
"size": "8436",
"binary": false,
"copies": "1",
"ref": "refs/heads/uge43",
"path": "lib/es/management/commands/reindex_mkt.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4145"
},
{
"name": "CSS",
"bytes": "608838"
},
{
"name": "JavaScript",
"bytes": "1750529"
},
{
"name": "Perl",
"bytes": "565"
},
{
"name": "Puppet",
"bytes": "13808"
},
{
"name": "Python",
"bytes": "6063534"
},
{
"name": "Ruby",
"bytes": "1865"
},
{
"name": "Shell",
"bytes": "19774"
}
],
"symlink_target": ""
} |
"""
@package mi.core.instrument.playback
@file mi/core/instrument/playback.py
@author Ronald Ronquillo
@brief Playback process using ZMQ messaging.
Usage:
playback datalog <module> <refdes> <event_url> <particle_url> [--allowed=<particles>] [--max_events=<events>] <files>...
playback ascii <module> <refdes> <event_url> <particle_url> [--allowed=<particles>] [--max_events=<events>] <files>...
playback chunky <module> <refdes> <event_url> <particle_url> [--allowed=<particles>] [--max_events=<events>] <files>...
playback zplsc <module> <refdes> <event_url> <particle_url> [--allowed=<particles>] [--max_events=<events>] <files>...
Options:
-h, --help Show this screen
--allowed=<particles> Comma-separated list of publishable particles
To run without installing:
python -m mi.core.instrument.playback ...
"""
import glob
import importlib
import sys
import time
from datetime import datetime
import os
import re
from docopt import docopt
from mi.core.instrument.instrument_driver import DriverAsyncEvent
from mi.core.instrument.instrument_protocol import \
MenuInstrumentProtocol,\
CommandResponseInstrumentProtocol, \
InstrumentProtocol
from mi.core.instrument.publisher import Publisher
from mi.logging import log
from ooi_port_agent.common import PacketType
from ooi_port_agent.packet import Packet, PacketHeader
from wrapper import EventKeys, encode_exception, DriverWrapper
__author__ = 'Ronald Ronquillo'
__license__ = 'Apache 2.0'
NTP_DIFF = (datetime(1970, 1, 1) - datetime(1900, 1, 1)).total_seconds()
Y2K = (datetime(2000, 1, 1) - datetime(1900, 1, 1)).total_seconds()
DATE_PATTERN = r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d+)?Z?$'
DATE_MATCHER = re.compile(DATE_PATTERN)
DATE_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ"
def string_to_ntp_date_time(datestr):
"""
Extract an ntp date from a ISO8601 formatted date string.
@param datestr an ISO8601 formatted string containing date information
@retval an ntp date number (seconds since jan 1 1900)
@throws InstrumentParameterException if datestr cannot be formatted to
a date.
"""
if not isinstance(datestr, basestring):
raise IOError('Value %s is not a string.' % str(datestr))
if not DATE_MATCHER.match(datestr):
raise ValueError("date string not in ISO8601 format YYYY-MM-DDTHH:MM:SS.SSSSZ")
try:
# This assumes input date string are in UTC (=GMT)
# if there is no decimal place, add one to match the date format
if datestr.find('.') == -1:
if datestr[-1] != 'Z':
datestr += '.0Z'
else:
datestr = datestr[:-1] + '.0Z'
# if there is no trailing 'Z' on the input string add one
if datestr[-1:] != 'Z':
datestr += 'Z'
dt = datetime.strptime(datestr, DATE_FORMAT)
timestamp = (dt - datetime(1900, 1, 1)).total_seconds()
except ValueError as e:
raise ValueError('Value %s could not be formatted to a date. %s' % (str(datestr), e))
return timestamp
class PlaybackPacket(Packet):
def get_data_length(self):
return len(self.payload)
def get_data(self):
return self.payload
def get_timestamp(self):
return self.header.time
def __repr__(self):
return repr(self.payload)
@staticmethod
def packet_from_fh(file_handle):
data_buffer = bytearray()
while True:
byte = file_handle.read(1)
if byte == '':
return None
data_buffer.append(byte)
sync_index = data_buffer.find(PacketHeader.sync)
if sync_index != -1:
# found the sync bytes, read the rest of the header
data_buffer.extend(file_handle.read(PacketHeader.header_size - len(PacketHeader.sync)))
if len(data_buffer) < PacketHeader.header_size:
return None
header = PacketHeader.from_buffer(data_buffer, sync_index)
# read the payload
payload = file_handle.read(header.payload_size)
if len(payload) == header.payload_size:
packet = PlaybackPacket(payload=payload, header=header)
return packet
class PlaybackWrapper(object):
def __init__(self, module, refdes, event_url, particle_url, reader_klass, allowed, files, max_events, handler=None):
version = DriverWrapper.get_version(module)
headers = {'sensor': refdes, 'deliveryType': 'streamed', 'version': version, 'module': module}
self.max_events = max_events
self.event_publisher = Publisher.from_url(event_url, handler, headers)
self.particle_publisher = Publisher.from_url(particle_url, handler, headers, allowed, max_events)
self.protocol = self.construct_protocol(module)
self.reader = reader_klass(files, self.got_data)
def set_header_filename(self, filename):
self.event_publisher.set_source(filename)
self.particle_publisher.set_source(filename)
def playback(self):
for index, filename in enumerate(self.reader.read()):
if filename is not None:
self.set_header_filename(filename)
if hasattr(self.protocol, 'got_filename'):
self.protocol.got_filename(filename)
if index % 1000 == 0:
self.publish()
self.publish()
if hasattr(self.particle_publisher, 'write'):
self.particle_publisher.write()
def zplsc_playback(self):
for index, filename in enumerate(self.reader.read()):
if filename:
self.set_header_filename(filename)
log.info("filename is: %s", filename)
if hasattr(self.protocol, 'got_filename'):
self.protocol.got_filename(filename)
pub_index = 0
while True:
self.publish()
pub_index = pub_index + 1
log.info("publish index is: %d", pub_index)
def got_data(self, packet):
try:
self.protocol.got_data(packet)
except KeyboardInterrupt:
raise
except Exception as e:
log.exception(e)
@staticmethod
def find_base_class(base):
targets = (MenuInstrumentProtocol, CommandResponseInstrumentProtocol, InstrumentProtocol, object)
while True:
if base in targets:
return base
base = base.__base__
def construct_protocol(self, proto_module):
module = importlib.import_module(proto_module)
if hasattr(module, 'create_playback_protocol'):
return module.create_playback_protocol(self.handle_event)
log.error('Unable to import and create playback protocol from module: %r', module)
sys.exit(1)
def publish(self):
for publisher in [self.event_publisher, self.particle_publisher]:
remaining = publisher.publish()
while remaining >= publisher._max_events:
remaining = publisher.publish()
def handle_event(self, event_type, val=None):
"""
Construct and send an asynchronous driver event.
@param event_type a DriverAsyncEvent type specifier.
@param val event value for sample and test result events.
"""
event = {
'type': event_type,
'value': val,
'time': time.time()
}
if isinstance(event[EventKeys.VALUE], Exception):
event[EventKeys.VALUE] = encode_exception(event[EventKeys.VALUE])
if event[EventKeys.TYPE] == DriverAsyncEvent.ERROR:
log.error(event)
if event[EventKeys.TYPE] == DriverAsyncEvent.SAMPLE:
if event[EventKeys.VALUE].get('stream_name') != 'raw':
# don't publish raw
self.particle_publisher.enqueue(event)
else:
self.event_publisher.enqueue(event)
class DatalogReader(object):
def __init__(self, files, callback):
self.callback = callback
self.files = []
for each in files:
self.files.extend(glob.glob(each))
self.files.sort()
if not all([os.path.isfile(f) for f in self.files]):
raise Exception('Not all files found')
self._filehandle = None
self.target_types = [PacketType.FROM_INSTRUMENT, PacketType.PA_CONFIG]
self.file_name_list = []
def read(self):
while True:
if self._filehandle is None and not self.files:
log.info('Completed reading specified port agent logs, exiting...')
raise StopIteration
if self._filehandle is None:
name = self.files.pop(0)
log.info('Begin reading: %r', name)
# yield the filename so we can pass it through to the driver
yield name
self.file_name_list.append(name)
self._filehandle = open(name, 'r')
if not self._process_packet():
self._filehandle.close()
self._filehandle = None
yield
def _process_packet(self):
packet = PlaybackPacket.packet_from_fh(self._filehandle)
if packet is None:
return False
if packet.header.packet_type in self.target_types:
self.callback(packet)
return True
class DigiDatalogAsciiReader(DatalogReader):
def __init__(self, files, callback):
self.ooi_ts_regex = re.compile(r'<OOI-TS (.+?) [TX][NS]>\r\n(.*?)<\\OOI-TS>', re.DOTALL)
self.buffer = ''
self.MAXBUF = 65535
super(DigiDatalogAsciiReader, self).__init__(files, callback)
if all((self.search_utc(f) for f in self.files)):
self.files.sort(key=self.search_utc)
# special case for RSN archived data
# if all files have date_UTC in filename then sort by that
@staticmethod
def search_utc(f):
match = re.search('(\d+T\d+_UTC)', f)
if match is None:
return None
return match.group(1)
def _process_packet(self):
chunk = self._filehandle.read(1024)
if chunk != '':
self.buffer += chunk
new_index = 0
for match in self.ooi_ts_regex.finditer(self.buffer):
payload = match.group(2)
try:
packet_time = string_to_ntp_date_time(match.group(1))
header = PacketHeader(packet_type=PacketType.FROM_INSTRUMENT,
payload_size=len(payload), packet_time=packet_time)
header.set_checksum(payload)
packet = PlaybackPacket(payload=payload, header=header)
self.callback(packet)
except ValueError:
log.error('Unable to extract timestamp from record: %r' % match.group())
new_index = match.end()
if new_index > 0:
self.buffer = self.buffer[new_index:]
if len(self.buffer) > self.MAXBUF:
self.buffer = self.buffer[-self.MAXBUF:]
return True
return False
class ChunkyDatalogReader(DatalogReader):
def _process_packet(self):
data = self._filehandle.read(1024)
if data != '':
header = PacketHeader(packet_type=PacketType.FROM_INSTRUMENT,
payload_size=len(data), packet_time=0)
header.set_checksum(data)
packet = PlaybackPacket(payload=data, header=header)
self.callback(packet)
return True
return False
class ZplscReader(DatalogReader):
def __init__(self, files, callback):
super(ZplscReader, self).__init__(files, callback)
def _process_packet(self):
for name in self.file_name_list:
data = 'downloaded file:' + name + '\n'
header = PacketHeader(packet_type=PacketType.FROM_INSTRUMENT,
payload_size=len(data), packet_time=0)
header.set_checksum(data)
packet = PlaybackPacket(payload=data, header=header)
self.callback(packet)
return False
def main():
options = docopt(__doc__)
module = options['<module>']
refdes = options['<refdes>']
event_url = options['<event_url>']
particle_url = options['<particle_url>']
files = options.get('<files>')
allowed = options.get('--allowed')
if allowed is not None:
allowed = [_.strip() for _ in allowed.split(',')]
max_events = options.get('--max_events')
if not max_events:
max_events = Publisher.DEFAULT_MAX_EVENTS
else:
max_events = int(max_events)
# when running with the profiler, files will be a string
# coerce to list
if isinstance(files, basestring):
files = [files]
zplsc_reader = False
if options['datalog']:
reader = DatalogReader
elif options['ascii']:
reader = DigiDatalogAsciiReader
elif options['chunky']:
reader = ChunkyDatalogReader
elif options['zplsc']:
reader = ZplscReader
zplsc_reader = True
else:
reader = None
wrapper = PlaybackWrapper(module, refdes, event_url, particle_url, reader, allowed, files, max_events)
if zplsc_reader:
wrapper.zplsc_playback()
else:
wrapper.playback()
if __name__ == '__main__':
main()
| {
"content_hash": "ede243f7b94a3ee04f8b33d25daffaa2",
"timestamp": "",
"source": "github",
"line_count": 388,
"max_line_length": 125,
"avg_line_length": 34.876288659793815,
"alnum_prop": 0.5979899497487438,
"repo_name": "renegelinas/mi-instrument",
"id": "041d2f912bd4f617f6239adb4e905580af112355",
"size": "13554",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mi/core/instrument/playback.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "4746"
},
{
"name": "Python",
"bytes": "10013408"
}
],
"symlink_target": ""
} |
import re
from os.path import join
import setuptools
setuptools.setup(
version=re.search("__version__ = '([^']+)'",
open(join('mock', 'mock.py')).read()).group(1),
long_description=open('README.rst').read(),
)
| {
"content_hash": "abd160b889261ad2ddd66302dd12c48f",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 69,
"avg_line_length": 24.2,
"alnum_prop": 0.5826446280991735,
"repo_name": "cloudera/hue",
"id": "d47345f06da697db05c825a97c8b72edb64f73bb",
"size": "242",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/mock-3.0.5/setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
} |
from dashmat.core_modules.base import Module
class Status(Module):
@classmethod
def npm_deps(self):
return {
"moment": "^2.11.2"
}
| {
"content_hash": "12338c22a89edd740ef693bd5f3fe022",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 44,
"avg_line_length": 19.444444444444443,
"alnum_prop": 0.5485714285714286,
"repo_name": "realestate-com-au/dashmat",
"id": "1f26742206c84960e92abbde34d4ed190a350ed2",
"size": "175",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dashmat/core_modules/status/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1486"
},
{
"name": "HTML",
"bytes": "352"
},
{
"name": "JavaScript",
"bytes": "3053"
},
{
"name": "Python",
"bytes": "402180"
},
{
"name": "Shell",
"bytes": "36"
}
],
"symlink_target": ""
} |
"""
This example demonstrates the justify argument to print.
"""
from rich.console import Console
console = Console(width=20)
style = "bold white on blue"
console.print("Rich", style=style)
console.print("Rich", style=style, justify="left")
console.print("Rich", style=style, justify="center")
console.print("Rich", style=style, justify="right")
| {
"content_hash": "3c6677fb2e694d611566e1a774374c7e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 56,
"avg_line_length": 26.846153846153847,
"alnum_prop": 0.7363896848137536,
"repo_name": "willmcgugan/rich",
"id": "670988206de985adfc46daef2e924b48073161cd",
"size": "349",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/justify.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "799"
},
{
"name": "Makefile",
"bytes": "267"
},
{
"name": "Python",
"bytes": "1130365"
}
],
"symlink_target": ""
} |
"""This file is a casapy script. Do not use it as a module.
It is also not intended to be invoked directly through pkcasascript. See
`pwkit.environments.casa.tasks.plotcal`.
"""
def in_casapy (helper, caltable=None, selectcals={}, plotoptions={},
xaxis=None, yaxis=None, figfile=None):
"""This function is run inside the weirdo casapy IPython environment! A
strange set of modules is available, and the
`pwkit.environments.casa.scripting` system sets up a very particular
environment to allow encapsulated scripting.
"""
if caltable is None:
raise ValueError ('caltable')
show_gui = (figfile is None)
cp = helper.casans.cp
helper.casans.tp.setgui (show_gui)
cp.open (caltable)
cp.selectcal (**selectcals)
cp.plotoptions (**plotoptions)
cp.plot (xaxis, yaxis)
if show_gui:
import pylab as pl
pl.show ()
else:
cp.savefig (figfile)
| {
"content_hash": "d89418f99610b19c5ed21e5864ece877",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 75,
"avg_line_length": 28.606060606060606,
"alnum_prop": 0.6673728813559322,
"repo_name": "pkgw/pwkit",
"id": "115cd592f06bc1151f9a42d9b75982d1880a1d79",
"size": "1084",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pwkit/environments/casa/cscript_plotcal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Fortran",
"bytes": "198303"
},
{
"name": "IDL",
"bytes": "142529"
},
{
"name": "Python",
"bytes": "1498923"
}
],
"symlink_target": ""
} |
from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, overload
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import MixinABC, _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_validate_request(azure_region: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/Subscriptions/{subscriptionId}/providers/Microsoft.RecoveryServices/locations/{azureRegion}/backupValidateFeatures",
) # pylint: disable=line-too-long
path_format_arguments = {
"azureRegion": _SERIALIZER.url("azure_region", azure_region, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
class FeatureSupportOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.recoveryservicesbackup.activestamp.RecoveryServicesBackupClient`'s
:attr:`feature_support` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@overload
def validate(
self,
azure_region: str,
parameters: _models.FeatureSupportRequest,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.AzureVMResourceFeatureSupportResponse:
"""It will validate if given feature with resource properties is supported in service.
It will validate if given feature with resource properties is supported in service.
:param azure_region: Azure region to hit Api. Required.
:type azure_region: str
:param parameters: Feature support request object. Required.
:type parameters: ~azure.mgmt.recoveryservicesbackup.activestamp.models.FeatureSupportRequest
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AzureVMResourceFeatureSupportResponse or the result of cls(response)
:rtype:
~azure.mgmt.recoveryservicesbackup.activestamp.models.AzureVMResourceFeatureSupportResponse
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def validate(
self, azure_region: str, parameters: IO, *, content_type: str = "application/json", **kwargs: Any
) -> _models.AzureVMResourceFeatureSupportResponse:
"""It will validate if given feature with resource properties is supported in service.
It will validate if given feature with resource properties is supported in service.
:param azure_region: Azure region to hit Api. Required.
:type azure_region: str
:param parameters: Feature support request object. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AzureVMResourceFeatureSupportResponse or the result of cls(response)
:rtype:
~azure.mgmt.recoveryservicesbackup.activestamp.models.AzureVMResourceFeatureSupportResponse
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def validate(
self, azure_region: str, parameters: Union[_models.FeatureSupportRequest, IO], **kwargs: Any
) -> _models.AzureVMResourceFeatureSupportResponse:
"""It will validate if given feature with resource properties is supported in service.
It will validate if given feature with resource properties is supported in service.
:param azure_region: Azure region to hit Api. Required.
:type azure_region: str
:param parameters: Feature support request object. Is either a model type or a IO type.
Required.
:type parameters: ~azure.mgmt.recoveryservicesbackup.activestamp.models.FeatureSupportRequest
or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AzureVMResourceFeatureSupportResponse or the result of cls(response)
:rtype:
~azure.mgmt.recoveryservicesbackup.activestamp.models.AzureVMResourceFeatureSupportResponse
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.AzureVMResourceFeatureSupportResponse]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "FeatureSupportRequest")
request = build_validate_request(
azure_region=azure_region,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.validate.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("AzureVMResourceFeatureSupportResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
validate.metadata = {"url": "/Subscriptions/{subscriptionId}/providers/Microsoft.RecoveryServices/locations/{azureRegion}/backupValidateFeatures"} # type: ignore
| {
"content_hash": "22990018329063de86141aeaea6c2ccf",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 166,
"avg_line_length": 44.34634146341463,
"alnum_prop": 0.6819931800681993,
"repo_name": "Azure/azure-sdk-for-python",
"id": "79f530ef9c237e14cf8eaecd7d732122a25a586c",
"size": "9591",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/recoveryservices/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/activestamp/operations/_feature_support_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.