text
stringlengths
4
1.02M
meta
dict
import json from questionnaire.models import SimpleQuestion, MCQ, Quiz JSON_FILEPATH = 'questionnaire/questions/' JSON_FILENAME = 'questions.json' def addquestions(filename=JSON_FILEPATH + JSON_FILENAME): with open(filename) as jsonfile: quesjson = json.load(jsonfile) questions = quesjson['QUESTIONS'] mcqs = quesjson['MCQS'] for question in questions: ques = SimpleQuestion(question=question['question'], slug=question['slug'], hints=question['hints'], answer=question['answer'], score=question['score'], quiz=Quiz.objects.get(slug=question['quiz-slug'])) ques.save() for mcq in mcqs: ques = MCQ.create(question=mcq['question'], slug=mcq['slug'], hints=mcq['hints'], choices=mcq['choices'], correct=int(mcq['correct']), score=mcq['score'], quiz=Quiz.objects.get(slug=mcq['quiz-slug'])) ques.save() return filename """ example: add such object to questions.json { "QUESTIONS": [ { "question": "What is the full form of IP?", "slug": "IP-sq", "hints": "Seriously?!", "answer": "Internet Protocol", "score": 10, "quiz-slug": "test_quiz" } ], "MCQS": [ { "question": "What is the full form of IP?", "slug": "IP-mcq", "hints": "Seriously?!", "choices": [ "Internet Programs", "Internet Protocol", "Internal Protocol", "International Protocol" ], "correct": 2, "score": 10, "quiz-slug": "test_quiz" } ] } """
{ "content_hash": "f318dad7b6648621eba561e2682e67ad", "timestamp": "", "source": "github", "line_count": 65, "max_line_length": 84, "avg_line_length": 29.276923076923076, "alnum_prop": 0.4813452443510247, "repo_name": "super1337/Super1337-CTF", "id": "fe01f764bd5010e9e4013a31338b3d7949b61507", "size": "1903", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "questionnaire/questions/questions.py", "mode": "33261", "license": "mit", "language": [ { "name": "CSS", "bytes": "151967" }, { "name": "HTML", "bytes": "77118" }, { "name": "JavaScript", "bytes": "151922" }, { "name": "Python", "bytes": "41684" } ], "symlink_target": "" }
""" Calculate the average protein lengths from the blast output files. """ import os import sys import argparse from roblib import bcolors, median def av_protein_lengths(sample, blastfile, fractionout, summaryout, searchtype): """ Calculate the average length of the best hit of all the proteins """ q = {} av = [] sys.stderr.write(f"{bcolors.GREEN}Average protein lengths for {sample} and {searchtype}{bcolors.ENDC}\n") with open(blastfile, 'r') as f: with open(fractionout, 'w') as out: for l in f: p = l.strip().split("\t") if p[0] in q: continue q[p[0]] = int(p[12])/int(p[13]) av.append(q[p[0]]) out.write(f"{p[0]}\t{q[p[0]]}\n") with open(summaryout, 'w') as out: out.write(f"{sample}\tAverage {searchtype} protein lengths\t") out.write("[num orfs, median proportional length, average proportional length]\t") out.write(f"{len(av)}\t{median(av)}\t{sum(av)/len(av)}\n") if __name__ == '__main__': parser = argparse.ArgumentParser(description=" ") parser.add_argument('-s', help='sample name used in output', required=True) parser.add_argument('-b', help='blast m8 file', required=True) parser.add_argument('-f', help='fractions output file', required=True) parser.add_argument('-o', help='summary output file', required=True) parser.add_argument('-t', help='search type (e.g. phage, bacteria) (used in output)', required=True) args = parser.parse_args() av_protein_lengths(args.s, args.b, args.f, args.o, args.t)
{ "content_hash": "3e5194dd3af76cba0e66b52fad57429f", "timestamp": "", "source": "github", "line_count": 43, "max_line_length": 109, "avg_line_length": 37.97674418604651, "alnum_prop": 0.6117575015309247, "repo_name": "linsalrob/EdwardsLab", "id": "03e2f1634290ca0e1ec0fec8f6abec265bfd8dec", "size": "1633", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "phage/phage_quality_assessment_scripts/av_protein_lengths.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "227276" }, { "name": "C++", "bytes": "21508" }, { "name": "Jupyter Notebook", "bytes": "490830" }, { "name": "Makefile", "bytes": "936" }, { "name": "Perl", "bytes": "280086" }, { "name": "Python", "bytes": "1102051" }, { "name": "Shell", "bytes": "13759" } ], "symlink_target": "" }
from pifx.client import LIFXWebAPIClient class PIFX: """Main PIFX class""" def __init__(self, api_key, http_endpoint=None): self.client = LIFXWebAPIClient(api_key, http_endpoint) def list_lights(self, selector='all'): """Given a selector (defaults to all), return a list of lights. Without a selector provided, return list of all lights. """ return self.client.perform_request( method='get', endpoint='lights/{}', endpoint_args=[selector], parse_data=False) def set_state(self, selector='all', power=None, color=None, brightness=None, duration=None): """Given a selector (defaults to all), set the state of a light. Selector can be based on id, scene_id, group_id, label, etc. Returns list of lightbulb statuses if successful. See http://api.developer.lifx.com/v1/docs/selectors selector: required String The selector to limit which lights will run the effect. power: String e.g "on" or "off" color: String e.g #ff0000 or "red" Color to set selected bulbs. Hex color code, color name, saturation percentage, hue, RGB, etc. See http://api.developer.lifx.com/v1/docs/colors brightness: Double e.g 0.5 Set brightness level from 0 to 1 duration: Double e.g 10 Setting transition time, in seconds, from 0.0 to 3155760000.0 (100 years). """ argument_tuples = [ ('power', power), ('color', color), ('brightness', brightness), ('duration', duration) ] return self.client.perform_request( method='put', endpoint='lights/{}/state', endpoint_args=[selector], argument_tuples=argument_tuples) def state_delta(self, selector='all', power=None, duration=1.0, infrared=None, hue=None, saturation=None, brightness=None, kelvin=None): """Given a state delta, apply the modifications to lights' state over a given period of time. selector: required String The selector to limit which lights are controlled. power: String The power state you want to set on the selector. on or off duration: Double How long in seconds you want the power action to take. Range: 0.0 – 3155760000.0 (100 years) infrared: Double The maximum brightness of the infrared channel. hue: Double Rotate the hue by this angle in degrees. saturation: Double Change the saturation by this additive amount; the resulting saturation is clipped to [0, 1]. brightness: Double Change the brightness by this additive amount; the resulting brightness is clipped to [0, 1]. kelvin: Double Change the kelvin by this additive amount; the resulting kelvin is clipped to [2500, 9000]. """ argument_tuples = [ ("power", power), ("duration", duration), ("infrared", infrared), ("hue", hue), ("saturation", saturation), ("brightness", brightness), ("kelvin", kelvin) ] return self.client.perform_request( method='post', endpoint='lights/{}/state/delta', endpoint_args=[selector], argument_tuples=argument_tuples) def toggle_power(self, selector='all', duration=1.0): """Given a selector and transition duration, toggle lights (on/off)""" argument_tuples = [ ("duration", duration) ] return self.client.perform_request( method='post', endpoint='lights/{}/toggle', endpoint_args=[selector], argument_tuples=argument_tuples) def breathe_lights(self, color, selector='all', from_color=None, period=1.0, cycles=1.0, persist=False, power_on=True, peak=0.5): """Perform breathe effect on lights. selector: String The selector to limit which lights will run the effect. default: all color: required String Color attributes to use during effect. See set_state for more. from_color: String The color to start the effect from. See set_state for more. default: current bulb color period: Double The time in seconds for one cyles of the effect. default: 1.0 cycles: Double The number of times to repeat the effect. default: 1.0 persist: Boolean If false set the light back to its previous value when effect ends, if true leave the last effect color. default: false power_on: Boolean If true, turn the bulb on if it is not already on. default: true peak: String Defines where in a period the target color is at its maximum. Minimum 0.0, maximum 1.0. default: 0.5 """ argument_tuples = [ ("color", color), ("from_color", from_color), ("period", period), ("cycles", cycles), ("persist", persist), ("power_on", power_on), ("peak", peak), ] return self.client.perform_request( method='post', endpoint='lights/{}/effects/breathe', endpoint_args=[selector], argument_tuples=argument_tuples) def pulse_lights(self, color, selector='all', from_color=None, period=1.0, cycles=1.0, persist=False, power_on=True): """Perform pulse effect on lights. selector: String The selector to limit which lights will run the effect. default: all color: required String Color attributes to use during effect. See set_state for more. from_color: String The color to start the effect from. See set_state for more. default: current bulb color period: Double The time in seconds for one cyles of the effect. default: 1.0 cycles: Double The number of times to repeat the effect. default: 1.0 persist: Boolean If false set the light back to its previous value when effect ends, if true leave the last effect color. default: false power_on: Boolean If true, turn the bulb on if it is not already on. default: true """ argument_tuples = [ ("color", color), ("from_color", from_color), ("period", period), ("cycles", cycles), ("persist", persist), ("power_on", power_on), ] return self.client.perform_request( method='post', endpoint='lights/{}/effects/pulse', endpoint_args=[selector], argument_tuples=argument_tuples) def cycle_lights(self, states, defaults, direction='forward', selector='all'): """Cycle through list of effects. Provide array states as a list of dictionaries with set_state arguments. See http://api.developer.lifx.com/docs/cycle selector: String The selector to limit which lights will run the effect. default: all states: required List of Dicts List of arguments, named as per set_state. Must have 2 to 5 entries. defaults: Object Default values to use when not specified in each states[] object. Argument names as per set_state. direction: String Direction in which to cycle through the list. Can be forward or backward default: forward """ argument_tuples = [ ("states", states), ("defaults", defaults), ("direction", direction) ] return self.client.perform_request( method='post', endpoint='lights/{}/cycle', endpoint_args=[selector], argument_tuples=argument_tuples, json_body=True) def list_scenes(self): """Return a list of scenes. See http://api.developer.lifx.com/docs/list-scenes """ return self.client.perform_request( method='get', endpoint='scenes', parse_data=False) def activate_scene(self, scene_uuid, duration=1.0): """Activate a scene. See http://api.developer.lifx.com/docs/activate-scene scene_uuid: required String The UUID for the scene you wish to activate duration: Double The time in seconds to spend performing the scene transition. default: 1.0 """ argument_tuples = [ ("duration", duration), ] return self.client.perform_request( method='put', endpoint='scenes/scene_id:{}/activate', endpoint_args=[scene_uuid], argument_tuples=argument_tuples)
{ "content_hash": "96e9420bdcb344c146f6043c82443102", "timestamp": "", "source": "github", "line_count": 278, "max_line_length": 84, "avg_line_length": 32.902877697841724, "alnum_prop": 0.5746146277468023, "repo_name": "cydrobolt/pifx", "id": "fb36ef0650d27f91e47b601c7cb7a4b5ac41ea2c", "size": "9779", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pifx/core.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "19870" } ], "symlink_target": "" }
from openstack_dashboard.test.integration_tests.pages import basepage class SharesPage(basepage.BaseNavigationPage): def __init__(self, driver, conf): super(SharesPage, self).__init__(driver, conf) self._page_title = "Shares"
{ "content_hash": "73f7bd16a08338eaf8e2bbdf8b84a235", "timestamp": "", "source": "github", "line_count": 7, "max_line_length": 69, "avg_line_length": 35.42857142857143, "alnum_prop": 0.7056451612903226, "repo_name": "openstack/manila-ui", "id": "fd7cb2625d4f5d7305c8e9f9ea5d9d7634567cbd", "size": "824", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "manila_ui/tests/integration/pages/project/share/sharespage.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "72666" }, { "name": "Python", "bytes": "756045" }, { "name": "Shell", "bytes": "20977" } ], "symlink_target": "" }
import time from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer, StorageAccountPreparer, api_version_constraint, JMESPathCheck, JMESPathCheckExists, NoneCheck) from azure.cli.core.profiles import ResourceType @api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2016-12-01') class StorageQueueScenarioTests(ScenarioTest): @ResourceGroupPreparer() @StorageAccountPreparer(sku='Standard_RAGRS') def test_storage_queue_general_scenario(self, resource_group, storage_account): account_key = self.get_account_key(resource_group, storage_account) self.set_env('AZURE_STORAGE_ACCOUNT', storage_account) self.set_env('AZURE_STORAGE_KEY', account_key) queue = self.create_random_name('queue', 24) self.cmd('storage queue create -n {} --fail-on-exist --metadata a=b c=d'.format(queue), checks=JMESPathCheck('created', True)) self.cmd('storage queue exists -n {}'.format(queue), checks=JMESPathCheck('exists', True)) res = self.cmd('storage queue list').get_output_in_json() self.assertIn(queue, [x['name'] for x in res], 'The newly created queue is not listed.') from datetime import datetime, timedelta expiry = (datetime.utcnow() + timedelta(hours=1)).strftime('%Y-%m-%dT%H:%MZ') sas = self.cmd('storage queue generate-sas -n {} --permissions r --expiry {}'.format(queue, expiry)).output self.assertIn('sig', sas, 'The sig segment is not in the sas {}'.format(sas)) self.cmd('storage queue metadata show -n {}'.format(queue), checks=[ JMESPathCheck('a', 'b'), JMESPathCheck('c', 'd') ]) self.cmd('storage queue metadata update -n {} --metadata e=f g=h'.format(queue)) self.cmd('storage queue metadata show -n {}'.format(queue), checks=[ JMESPathCheck('e', 'f'), JMESPathCheck('g', 'h') ]) # Queue ACL policy self.cmd('storage queue policy list -q {}'.format(queue), checks=NoneCheck()) start_time = '2016-01-01T00:00Z' expiry = '2016-05-01T00:00Z' policy = self.create_random_name('policy', 16) self.cmd('storage queue policy create -q {} -n {} --permission raup --start {} --expiry {}' .format(queue, policy, start_time, expiry)) acl = self.cmd('storage queue policy list -q {}'.format(queue)).get_output_in_json() self.assertIn(policy, acl) self.assertEqual(1, len(acl)) returned_permissions = self.cmd('storage queue policy show -q {} -n {}'.format(queue, policy), checks=[ JMESPathCheck('start', '2016-01-01T00:00:00+00:00'), JMESPathCheck('expiry', '2016-05-01T00:00:00+00:00'), JMESPathCheckExists('permission') ]).get_output_in_json()['permission'] self.assertIn('r', returned_permissions) self.assertIn('p', returned_permissions) self.assertIn('a', returned_permissions) self.assertIn('u', returned_permissions) self.cmd('storage queue policy update -q {} -n {} --permission ra'.format(queue, policy)) self.cmd('storage queue policy show -q {} -n {}'.format(queue, policy), checks=JMESPathCheck('permission', 'ra')) self.cmd('storage queue policy delete -q {} -n {}'.format(queue, policy)) self.cmd('storage queue policy list -q {}'.format(queue), checks=NoneCheck()) # Queue message operation self.cmd('storage message put -q {} --content "test message"'.format(queue)) self.cmd('storage message peek -q {}'.format(queue), checks=JMESPathCheck('[0].content', 'test message')) first_message = self.cmd('storage message get -q {}'.format(queue), checks=JMESPathCheck('length(@)', 1)).get_output_in_json()[0] self.cmd('storage message update -q {} --id {} --pop-receipt {} --visibility-timeout 1 ' '--content "new message!"'.format(queue, first_message['id'], first_message['popReceipt'])) time.sleep(2) # ensures message should be back in queue self.cmd('storage message peek -q {}'.format(queue), checks=JMESPathCheck('[0].content', 'new message!')) self.cmd('storage message put -q {} --content "second message"'.format(queue)) self.cmd('storage message put -q {} --content "third message"'.format(queue)) self.cmd('storage message peek -q {} --num-messages 32'.format(queue), checks=JMESPathCheck('length(@)', 3)) third_message = self.cmd('storage message get -q {}'.format(queue)).get_output_in_json()[0] self.cmd('storage message delete -q {} --id {} --pop-receipt {}' .format(queue, third_message['id'], third_message['popReceipt'])) self.cmd('storage message peek -q {} --num-messages 32'.format(queue), checks=JMESPathCheck('length(@)', 2)) self.cmd('storage message clear -q {}'.format(queue)) self.cmd('storage message peek -q {} --num-messages 32'.format(queue), checks=NoneCheck()) # verify delete operation self.cmd('storage queue delete -n {} --fail-not-exist'.format(queue), checks=JMESPathCheck('deleted', True)) self.cmd('storage queue exists -n {}'.format(queue), checks=JMESPathCheck('exists', False)) # check status of the queue queue_status = self.cmd('storage queue stats').get_output_in_json() self.assertIn(queue_status['geoReplication']['status'], ('live', 'unavailable')) def get_account_key(self, group, name): return self.cmd('storage account keys list -n {} -g {} --query "[0].value" -otsv' .format(name, group)).output if __name__ == '__main__': import unittest unittest.main()
{ "content_hash": "a12a9d31676299f74e303810dc9a15bb", "timestamp": "", "source": "github", "line_count": 123, "max_line_length": 115, "avg_line_length": 48.577235772357724, "alnum_prop": 0.6046861924686192, "repo_name": "yugangw-msft/azure-cli", "id": "7abeaf28c355ef7446cbf4d570d50989139a14b8", "size": "6321", "binary": false, "copies": "5", "ref": "refs/heads/dev", "path": "src/azure-cli/azure/cli/command_modules/storage/tests/hybrid_2019_03_01/test_storage_queue_scenarios.py", "mode": "33188", "license": "mit", "language": [ { "name": "ANTLR", "bytes": "5355" }, { "name": "Batchfile", "bytes": "14110" }, { "name": "Bicep", "bytes": "1679" }, { "name": "C#", "bytes": "1971" }, { "name": "C++", "bytes": "275" }, { "name": "Dockerfile", "bytes": "8427" }, { "name": "HTML", "bytes": "794" }, { "name": "JavaScript", "bytes": "1404" }, { "name": "Jupyter Notebook", "bytes": "389" }, { "name": "PowerShell", "bytes": "1781" }, { "name": "Python", "bytes": "24270340" }, { "name": "Rich Text Format", "bytes": "12032" }, { "name": "Roff", "bytes": "1036959" }, { "name": "Shell", "bytes": "56023" }, { "name": "TSQL", "bytes": "1145" } ], "symlink_target": "" }
import os import sys from mock import patch import pytest from decouple import Config, RepositoryEnv, UndefinedValueError # Useful for very coarse version differentiation. PY3 = sys.version_info[0] == 3 if PY3: from io import StringIO else: from StringIO import StringIO ENVFILE = ''' KeyTrue=True KeyOne=1 KeyYes=yes KeyOn=on KeyFalse=False KeyZero=0 KeyNo=no KeyOff=off #CommentedKey=None PercentNotEscaped=%% NoInterpolation=%(KeyOff)s ''' @pytest.fixture(scope='module') def config(): with patch('decouple.open', return_value=StringIO(ENVFILE), create=True): return Config(RepositoryEnv('.env')) def test_env_comment(config): with pytest.raises(UndefinedValueError): config('CommentedKey') def test_env_percent_not_escaped(config): assert '%%' == config('PercentNotEscaped') def test_env_no_interpolation(config): assert '%(KeyOff)s' == config('NoInterpolation') def test_env_bool_true(config): assert True == config('KeyTrue', cast=bool) assert True == config('KeyOne', cast=bool) assert True == config('KeyYes', cast=bool) assert True == config('KeyOn', cast=bool) def test_env_bool_false(config): assert False == config('KeyFalse', cast=bool) assert False == config('KeyZero', cast=bool) assert False == config('KeyNo', cast=bool) assert False == config('KeyOff', cast=bool) def test_env_os_environ(config): os.environ['KeyFallback'] = 'On' assert True == config('KeyTrue', cast=bool) assert True == config('KeyFallback', cast=bool) del os.environ['KeyFallback'] def test_env_undefined(config): with pytest.raises(UndefinedValueError): config('UndefinedKey') def test_env_default_none(config): assert None is config('UndefinedKey', default=None)
{ "content_hash": "bf6e2c3c29641f8d647e6a0ad7194981", "timestamp": "", "source": "github", "line_count": 72, "max_line_length": 77, "avg_line_length": 24.63888888888889, "alnum_prop": 0.7046223224351748, "repo_name": "henriquebastos/django-decouple", "id": "1d35d301c65c90df82ef14f98140db466e5e9fcb", "size": "1790", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_env.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "12416" } ], "symlink_target": "" }
from operator import attrgetter from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType from pyangbind.lib.yangtypes import RestrictedClassType from pyangbind.lib.yangtypes import TypedListType from pyangbind.lib.yangtypes import YANGBool from pyangbind.lib.yangtypes import YANGListType from pyangbind.lib.yangtypes import YANGDynClass from pyangbind.lib.yangtypes import ReferenceType from pyangbind.lib.base import PybindBase from collections import OrderedDict from decimal import Decimal from bitarray import bitarray import six # PY3 support of some PY2 keywords (needs improved) if six.PY3: import builtins as __builtin__ long = int elif six.PY2: import __builtin__ from . import addresses from . import neighbors from . import unnumbered from . import config from . import state from . import autoconf class ipv6(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-interfaces - based on the path /interfaces/interface/subinterfaces/subinterface/ipv6. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: Parameters for the IPv6 address family. """ __slots__ = ( "_path_helper", "_extmethods", "__addresses", "__neighbors", "__unnumbered", "__config", "__state", "__autoconf", ) _yang_name = "ipv6" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__addresses = YANGDynClass( base=addresses.addresses, is_container="container", yang_name="addresses", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="container", is_config=True, ) self.__neighbors = YANGDynClass( base=neighbors.neighbors, is_container="container", yang_name="neighbors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="container", is_config=True, ) self.__unnumbered = YANGDynClass( base=unnumbered.unnumbered, is_container="container", yang_name="unnumbered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="container", is_config=True, ) self.__config = YANGDynClass( base=config.config, is_container="container", yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="container", is_config=True, ) self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="container", is_config=True, ) self.__autoconf = YANGDynClass( base=autoconf.autoconf, is_container="container", yang_name="autoconf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/interfaces/ip-ext", defining_module="openconfig-if-ip-ext", yang_type="container", is_config=True, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return ["interfaces", "interface", "subinterfaces", "subinterface", "ipv6"] def _get_addresses(self): """ Getter method for addresses, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/addresses (container) YANG Description: Enclosing container for address list """ return self.__addresses def _set_addresses(self, v, load=False): """ Setter method for addresses, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/addresses (container) If this variable is read-only (config: false) in the source YANG file, then _set_addresses is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_addresses() directly. YANG Description: Enclosing container for address list """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=addresses.addresses, is_container="container", yang_name="addresses", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="container", is_config=True, ) except (TypeError, ValueError): raise ValueError( { "error-string": """addresses must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=addresses.addresses, is_container='container', yang_name="addresses", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='container', is_config=True)""", } ) self.__addresses = t if hasattr(self, "_set"): self._set() def _unset_addresses(self): self.__addresses = YANGDynClass( base=addresses.addresses, is_container="container", yang_name="addresses", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="container", is_config=True, ) def _get_neighbors(self): """ Getter method for neighbors, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/neighbors (container) YANG Description: Enclosing container for list of IPv6 neighbors """ return self.__neighbors def _set_neighbors(self, v, load=False): """ Setter method for neighbors, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/neighbors (container) If this variable is read-only (config: false) in the source YANG file, then _set_neighbors is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_neighbors() directly. YANG Description: Enclosing container for list of IPv6 neighbors """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=neighbors.neighbors, is_container="container", yang_name="neighbors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="container", is_config=True, ) except (TypeError, ValueError): raise ValueError( { "error-string": """neighbors must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=neighbors.neighbors, is_container='container', yang_name="neighbors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='container', is_config=True)""", } ) self.__neighbors = t if hasattr(self, "_set"): self._set() def _unset_neighbors(self): self.__neighbors = YANGDynClass( base=neighbors.neighbors, is_container="container", yang_name="neighbors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="container", is_config=True, ) def _get_unnumbered(self): """ Getter method for unnumbered, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/unnumbered (container) YANG Description: Top-level container for setting unnumbered interfaces. Includes reference the interface that provides the address information """ return self.__unnumbered def _set_unnumbered(self, v, load=False): """ Setter method for unnumbered, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/unnumbered (container) If this variable is read-only (config: false) in the source YANG file, then _set_unnumbered is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_unnumbered() directly. YANG Description: Top-level container for setting unnumbered interfaces. Includes reference the interface that provides the address information """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=unnumbered.unnumbered, is_container="container", yang_name="unnumbered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="container", is_config=True, ) except (TypeError, ValueError): raise ValueError( { "error-string": """unnumbered must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=unnumbered.unnumbered, is_container='container', yang_name="unnumbered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='container', is_config=True)""", } ) self.__unnumbered = t if hasattr(self, "_set"): self._set() def _unset_unnumbered(self): self.__unnumbered = YANGDynClass( base=unnumbered.unnumbered, is_container="container", yang_name="unnumbered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="container", is_config=True, ) def _get_config(self): """ Getter method for config, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/config (container) YANG Description: Top-level config data for the IPv6 interface """ return self.__config def _set_config(self, v, load=False): """ Setter method for config, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/config (container) If this variable is read-only (config: false) in the source YANG file, then _set_config is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_config() directly. YANG Description: Top-level config data for the IPv6 interface """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=config.config, is_container="container", yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="container", is_config=True, ) except (TypeError, ValueError): raise ValueError( { "error-string": """config must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='container', is_config=True)""", } ) self.__config = t if hasattr(self, "_set"): self._set() def _unset_config(self): self.__config = YANGDynClass( base=config.config, is_container="container", yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="container", is_config=True, ) def _get_state(self): """ Getter method for state, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/state (container) YANG Description: Top-level operational state data for the IPv6 interface """ return self.__state def _set_state(self, v, load=False): """ Setter method for state, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/state (container) If this variable is read-only (config: false) in the source YANG file, then _set_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_state() directly. YANG Description: Top-level operational state data for the IPv6 interface """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="container", is_config=True, ) except (TypeError, ValueError): raise ValueError( { "error-string": """state must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='container', is_config=True)""", } ) self.__state = t if hasattr(self, "_set"): self._set() def _unset_state(self): self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/interfaces/ip", defining_module="openconfig-if-ip", yang_type="container", is_config=True, ) def _get_autoconf(self): """ Getter method for autoconf, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/autoconf (container) YANG Description: Top-level container for IPv6 autoconf """ return self.__autoconf def _set_autoconf(self, v, load=False): """ Setter method for autoconf, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/autoconf (container) If this variable is read-only (config: false) in the source YANG file, then _set_autoconf is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_autoconf() directly. YANG Description: Top-level container for IPv6 autoconf """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=autoconf.autoconf, is_container="container", yang_name="autoconf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/interfaces/ip-ext", defining_module="openconfig-if-ip-ext", yang_type="container", is_config=True, ) except (TypeError, ValueError): raise ValueError( { "error-string": """autoconf must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=autoconf.autoconf, is_container='container', yang_name="autoconf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/interfaces/ip-ext', defining_module='openconfig-if-ip-ext', yang_type='container', is_config=True)""", } ) self.__autoconf = t if hasattr(self, "_set"): self._set() def _unset_autoconf(self): self.__autoconf = YANGDynClass( base=autoconf.autoconf, is_container="container", yang_name="autoconf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/interfaces/ip-ext", defining_module="openconfig-if-ip-ext", yang_type="container", is_config=True, ) addresses = __builtin__.property(_get_addresses, _set_addresses) neighbors = __builtin__.property(_get_neighbors, _set_neighbors) unnumbered = __builtin__.property(_get_unnumbered, _set_unnumbered) config = __builtin__.property(_get_config, _set_config) state = __builtin__.property(_get_state, _set_state) autoconf = __builtin__.property(_get_autoconf, _set_autoconf) _pyangbind_elements = OrderedDict( [ ("addresses", addresses), ("neighbors", neighbors), ("unnumbered", unnumbered), ("config", config), ("state", state), ("autoconf", autoconf), ] )
{ "content_hash": "d9cb2801bc3d66167d55dd3eae13e5e3", "timestamp": "", "source": "github", "line_count": 582, "max_line_length": 377, "avg_line_length": 39.02061855670103, "alnum_prop": 0.5803170409511229, "repo_name": "napalm-automation/napalm-yang", "id": "8e5d75627cbbb0ab285cf23000bd38f5c3b268aa", "size": "22734", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "napalm_yang/models/openconfig/interfaces/interface/subinterfaces/subinterface/ipv6/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "370237" }, { "name": "Jupyter Notebook", "bytes": "152135" }, { "name": "Makefile", "bytes": "1965" }, { "name": "Python", "bytes": "105688785" }, { "name": "Roff", "bytes": "1632" } ], "symlink_target": "" }
import sys import unittest import re import os.path sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..')) from Exscript.emulators import CommandSet class CommandSetTest(unittest.TestCase): CORRELATE = CommandSet def testConstructor(self): CommandSet() CommandSet(strict=True) CommandSet(strict=False) def testAdd(self): cs = CommandSet() self.assertRaises(Exception, cs.eval, 'foo') cs = CommandSet(strict=False) self.assertEqual(cs.eval('foo'), None) cs = CommandSet(strict=True) self.assertRaises(Exception, cs.eval, 'foo') cs.add('foo', 'bar') self.assertEqual(cs.eval('foo'), 'bar') def sayhello(cmd): return 'hello' cs.add('hi', sayhello) self.assertEqual(cs.eval('hi'), 'hello') def testAddFromFile(self): pass # FIXME def testEval(self): pass # See testAdd() def suite(): return unittest.TestLoader().loadTestsFromTestCase(CommandSetTest) if __name__ == '__main__': unittest.TextTestRunner(verbosity=2).run(suite())
{ "content_hash": "00b338f6da9ee855fc6a4a94aecf2fa3", "timestamp": "", "source": "github", "line_count": 45, "max_line_length": 77, "avg_line_length": 25.08888888888889, "alnum_prop": 0.6182462356067316, "repo_name": "knipknap/exscript", "id": "68464eef018ba358c76a107ae1501a8d2785cbf2", "size": "1129", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "tests/Exscript/emulators/CommandSetTest.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "1844" }, { "name": "Python", "bytes": "848571" }, { "name": "Roff", "bytes": "10849" }, { "name": "Shell", "bytes": "1418" } ], "symlink_target": "" }
"""Creates a survey-distribution class for interfacing with Qualtrics API v3 This module creates a class for encapsulating a Qualtrics Survey Distribution based upon a Qualtrics Mailing List object, Qualtrics Message id, Qualtrics Survey id, and email settings for send date, from name, reply-to email address, subject, and from email address, which defaults to "[email protected]" """ import requests from .qualtrics_mailing_list import QualtricsMailingList class QualtricsDistribution(object): """Survey-Distribution Class for interfacing with Qualtrics API v3""" def __init__( self, mailing_list: QualtricsMailingList, message_id: str, survey_id: str, send_date: str, from_name: str, reply_email: str, subject: str, *, from_email: str = "[email protected]", ): """Initializes a Qualtrics survey-distribution object Args: mailing_list: a QualtricsMailingList object with initialized Qualtrics Account sub-object message_id: a Qualtrics message id; see https://api.qualtrics.com/ docs/finding-qualtrics-ids survey_id: a Qualtrics survey id; see https://api.qualtrics.com/ docs/finding-qualtrics-ids send_date: the send datetime for the survey distribution being created in ISO 8601 format; see https://api.qualtrics.com/ docs/dates-and-times from_name: the from name for the survey distribution being created reply_email: the reply-to email address for the survey distribution being created subject: the email subject for the survey distribution being created from_email: the from name for the survey distribution being created, defaulting to the Qualtrics-supplied [email protected]; see https://www.qualtrics.com/support/survey-platform/ distributions-module/email-distribution/emails/ using-a-custom-from-address/ """ self.mailing_list = mailing_list self.message_id = message_id self.survey_id = survey_id self.send_date = send_date self.from_name = from_name self.reply_email = reply_email self.subject = subject self.from_email = from_email # make Qualtrics API v3 call to create survey distribution request_response = requests.request( "POST", f"https://{self.mailing_list.account.data_center}.qualtrics.com" f"/API/v3/distributions/", headers={ "x-api-token": self.mailing_list.account.api_token, "Content-Type": "application/json", }, json={ "surveyLink": { "surveyId": self.survey_id, }, "header": { "fromEmail": self.from_email, "fromName": self.from_name, "replyToEmail": self.reply_email, "subject": self.subject, }, "message": { "libraryId": self.mailing_list.library_id, "messageId": self.message_id, }, "recipients": { "mailingListId": self.mailing_list.id, }, "sendDate": self.send_date, } ) # extract distribution id from HTTP response self.id = request_response.json()["result"]["id"] @property def details(self) -> dict: """Returns survey-distribution details without caching""" request_response = requests.request( "GET", f"https://{self.mailing_list.account.data_center}.qualtrics.com" f"/API/v3/distributions/{self.id}?surveyId={self.survey_id}", headers={ "x-api-token": self.mailing_list.account.api_token }, ) return request_response.json()['result']
{ "content_hash": "e24beaa94a29ec6297a6ff585c6c8742", "timestamp": "", "source": "github", "line_count": 106, "max_line_length": 80, "avg_line_length": 38.89622641509434, "alnum_prop": 0.579674993936454, "repo_name": "KaiAnalytics/qualtrics-mailer", "id": "14b471cf7e05dbb876edfa07d72a9b7d4be104c1", "size": "4123", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "qualtrics_mailer/qualtrics_distribution.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "29055" } ], "symlink_target": "" }
""" Resystem Scheduling Service. Released under New BSD License. Copyright © 2015, Vadim Markovtsev :: Angry Developers LLC All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Angry Developers LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL VADIM MARKOVTSEV BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ from setuptools import setup import os def parse_requirements(): path = os.path.join(os.path.dirname(__file__), "requirements.txt") reqs = [] with open(path, "r") as fin: for r in fin.read().split("\n"): r = r.strip() if r.startswith("#") or not r: continue if r.startswith("git+"): print("Warning: git dependencies cannot be used in setuptools " "(%s)" % r) continue if not r.startswith("-r"): reqs.append(r) return reqs setup( name="res-scheduling", description="Resystem Scheduling Service", version="1.0.2", license="New BSD", author="Vadim Markovtsev", author_email="[email protected]", url="https://github.com/AngryDevelopersLLC/res-scheduler", download_url='https://github.com/AngryDevelopersLLC/res-scheduler', packages=["res.scheduling"], install_requires=parse_requirements(), package_data={"": [ 'res/scheduling/requirements/base.txt', 'res/scheduling/res_scheduling.service', 'res/scheduling/run.sh']}, classifiers=[ "Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: POSIX", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", ] )
{ "content_hash": "faf673a45e42750e4bca2cc09b89113d", "timestamp": "", "source": "github", "line_count": 74, "max_line_length": 79, "avg_line_length": 41.71621621621622, "alnum_prop": 0.68837058632977, "repo_name": "AngryDevelopersLLC/res-scheduler", "id": "99a7ca8628fdec72873ef9263ef8cee5b1b43b10", "size": "3088", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "45208" }, { "name": "Shell", "bytes": "224" } ], "symlink_target": "" }
from __future__ import print_function from __future__ import absolute_import import os import tct import sys # import PIL params = tct.readjson(sys.argv[1]) binabspath = sys.argv[2] facts = tct.readjson(params['factsfile']) milestones = tct.readjson(params['milestonesfile']) reason = '' resultfile = params['resultfile'] result = tct.readjson(resultfile) loglist = result['loglist'] = result.get('loglist', []) toolname = params['toolname'] toolname_pure = params['toolname_pure'] workdir = params['workdir'] exitcode = CONTINUE = 0 # ================================================== # Make a copy of milestones for later inspection? # -------------------------------------------------- if 0 or milestones.get('debug_always_make_milestones_snapshot'): tct.make_snapshot_of_milestones(params['milestonesfile'], sys.argv[1]) # ================================================== # Helper functions # -------------------------------------------------- def lookup(D, *keys, **kwdargs): result = tct.deepget(D, *keys, **kwdargs) loglist.append((keys, result)) return result # ================================================== # define # -------------------------------------------------- masterdoc_manual_html_gifs_fixed = None xeq_name_cnt = 0 # ================================================== # Check params # -------------------------------------------------- if exitcode == CONTINUE: loglist.append('CHECK PARAMS') masterdoc_manual_000_html = lookup(milestones, 'masterdoc_manual_000_html') TheProjectBuildOpenOffice2Rest = lookup(milestones, 'TheProjectBuildOpenOffice2Rest') if not (masterdoc_manual_000_html and TheProjectBuildOpenOffice2Rest): CONTINUE = -2 reason = 'Bad PARAMS or nothing to do' if exitcode == CONTINUE: loglist.append('PARAMS are ok') else: loglist.append('Bad PARAMS or nothing to do') # ================================================== # work # -------------------------------------------------- if exitcode == CONTINUE: masterdoc_manual_html_gifs_fixed = os.path.join(TheProjectBuildOpenOffice2Rest, 'manual-001-gifs-fixed.html') L = [] for fname in os.listdir(TheProjectBuildOpenOffice2Rest): if fname.lower().startswith( 'manual_html_') and fname.lower().endswith('.gif'): L.append(fname) if L: for fname in L: gifFile = os.path.join(TheProjectBuildOpenOffice2Rest, fname) im = PIL.Image.open(gifFile) pngFile = gifFile + '.png' im.save(pngFile) with open(masterdoc_manual_000_html, 'rb') as f1: data = f1.read() for fname in L: data = data.replace(fname, fname + '.png') with open(masterdoc_manual_html_gifs_fixed, 'wb') as f2: f2.write(data) # ================================================== # Set MILESTONES # -------------------------------------------------- if masterdoc_manual_html_gifs_fixed: result['MILESTONES'].append({'masterdoc_manual_html_gifs_fixed': masterdoc_manual_html_gifs_fixed}) # ================================================== # save result # -------------------------------------------------- tct.save_the_result(result, resultfile, params, facts, milestones, exitcode, CONTINUE, reason) # ================================================== # Return with proper exitcode # -------------------------------------------------- sys.exit(exitcode)
{ "content_hash": "8215f7e382f1e4674f8df73934ba0940", "timestamp": "", "source": "github", "line_count": 116, "max_line_length": 113, "avg_line_length": 29.689655172413794, "alnum_prop": 0.5177119628339141, "repo_name": "marble/Toolchain_RenderDocumentation", "id": "fa836b40ece8cffe73fd9ee36e9e71747fe3e5e0", "size": "3467", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "14-Provide-the-project/32-Convert-oo2rst/run_3205-001-Gif-to-Png.py", "mode": "33261", "license": "mit", "language": [ { "name": "HTML", "bytes": "98648" }, { "name": "Python", "bytes": "745804" }, { "name": "Shell", "bytes": "660" } ], "symlink_target": "" }
"""Tools to summarise the output of (multiple calls to) evaluation, confidence, etc.""" # TODO: PlotSystems: legend in plot # TODO: examples for documentation (including different output formats) # TODO: scores as percentage? from __future__ import print_function, absolute_import, division import os import itertools import operator import json from collections import namedtuple, defaultdict import re import warnings import sys try: import numpy as np except ImportError: np = None try: from matplotlib import pyplot as plt except ImportError: plt = None try: from scipy import stats except ImportError: stats = None from .configs import DEFAULT_MEASURE_SET, MEASURE_HELP, parse_measures from .document import Reader from .evaluate import Evaluate from .significance import Confidence from .interact import embed_shell DEFAULT_OUT_FMT = '.%s{}.pdf' % os.path.sep MAX_LEGEND_PER_COL = 20 CMAP = 'jet' def _pairs(items): return itertools.combinations(items, 2) def make_small_font(): from matplotlib.font_manager import FontProperties font = FontProperties() font.set_size('small') return font def _parse_limits(limits): if limits == 'tight': return if limits.count(',') != 1: raise ValueError('Expected a single comma in figure size, got {!r}'.format(limits)) width, _, height = limits.partition(',') return float(width), float(height) def _parse_figsize(figsize): if figsize.count(',') != 1: raise ValueError('Expected a single comma in figure size, got {!r}'.format(figsize)) width, _, height = figsize.partition(',') return int(width), int(height) def _parse_label_map(arg): if arg is None: return {} elif hasattr(arg, 'read'): return json.load(arg) elif hasattr(arg, 'keys'): return arg elif os.path.isfile(arg): return json.load(open(arg)) elif arg.startswith('{'): return json.loads(arg) def _get_system_names(systems): path_prefix = os.path.commonprefix(systems) if os.path.sep in path_prefix: path_prefix = os.path.dirname(path_prefix) + os.path.sep path_suffix = os.path.commonprefix([system[::-1] for system in systems]) return [system[len(path_prefix):-len(path_suffix)] for system in systems] class _Result(namedtuple('Result', 'system measure data group')): def __new__(cls, system, measure, data, group=None): if group is None: group = system return super(_Result, cls).__new__(cls, system, measure, data, group) def _group(group_re, system): if group_re is not None: return group_re.search(system).group() XTICK_ROTATION = {'rotation': 40, 'ha': 'right'} #XTICK_ROTATION = {'rotation': 'vertical', 'ha': 'center'} class PlotSystems(object): """Summarise system results as scatter plots""" def __init__(self, systems, input_type='evaluate', measures=DEFAULT_MEASURE_SET, figures_by='measure', secondary='columns', metrics=('fscore',), lines=False, confidence=None, group_re=None, best_in_group=False, sort_by=None, at_most=None, limits=(0, 1), out_fmt=DEFAULT_OUT_FMT, figsize=(8, 6), label_map=None, style_map=None, cmap=CMAP, interactive=False): if plt is None: raise ImportError('PlotSystems requires matplotlib to be installed') if figures_by == 'single': if secondary == 'markers': raise ValueError('Require rows or columns for single plot') self.systems = systems self.measures = parse_measures(measures or DEFAULT_MEASURE_SET, allow_unknown=True) self.input_type = input_type self.figures_by = figures_by or 'measure' self.confidence = confidence if confidence is not None and input_type != 'confidence': raise ValueError('--input-type=confidence required') self.secondary = secondary or 'markers' self.metrics = metrics self.lines = lines self.interactive = interactive self.out_fmt = out_fmt self.figsize = figsize self.label_map = _parse_label_map(label_map) self.style_map = _parse_label_map(style_map) self.cmap = cmap self.limits = limits self.group_re = group_re self.best_in_group = best_in_group if self.best_in_group == True and \ self.figures_by == 'measure' and \ self.secondary == 'markers' and \ len(self.measures) > 1: raise ValueError('best-in-group not supported with shared legend') self.sort_by = sort_by or 'none' self.at_most = at_most multiple_measures_per_figure = (secondary == 'heatmap') or (self.figures_by == 'single') or (self.figures_by == 'system' and len(self.measures) > 1) if self.best_in_group == True and multiple_measures_per_figure: raise ValueError('best-in-group cannot be evaluated with multiple measures per figure') if self.sort_by == 'score' and multiple_measures_per_figure: raise ValueError('Cannot sort by score with multiple measures per figure. You could instead specify a measure name.') if self.figures_by == 'single' and self.group_re and self.best_in_group in (False, True): raise ValueError('Single plot does not support grouping without --best-in-group') def _plot(self, ax, x, y, *args, **kwargs): # uses errorbars where appropriate x = np.atleast_1d(x) y = np.atleast_1d(y) fn = ax.plot if x.dtype.names and 'lo' in x.dtype.names: kwargs['xerr'] = [x['score'] - x['lo'], x['hi'] - x['score']] fn = ax.errorbar if x.dtype.names and 'score' in x.dtype.names: x = x['score'] if y.dtype.names and 'lo' in y.dtype.names: kwargs['yerr'] = [y['score'] - y['lo'], y['hi'] - y['score']] fn = ax.errorbar if y.dtype.names and 'score' in y.dtype.names: y = y['score'] if fn == ax.plot: kwargs['ls'] = '-' if self.lines else 'None' else: kwargs['fmt'] = '-o' if self.lines else 'o' return fn(x, y, *args, **kwargs) METRIC_DATA = {'precision': (0, 'b', '^'), 'recall': (1, 'r', 'v'), 'fscore': (2, 'k', '.')} def _metric_data(self): for metric in self.metrics: ind, color, marker = self.METRIC_DATA[metric] marker = self._marker(metric) or marker color = self._color(metric) or color yield ind, {'marker': marker, 'color': color, 'markeredgecolor': color, 'label': self._t(metric), # HACK: make more flexible later; shows only F1 errorbars 'score_only': metric in ('precision', 'recall')} def _t(self, s): # Translate label return self.label_map.get(s, s) def _style(self, s): if not self.style_map: return try: return self.style_map[s] except KeyError: warnings.warn('Found no style for {!r}'.format(s)) def _color(self, s): return (self._style(s) or '').partition('/')[0] def _marker(self, s): return (self._style(s) or '').partition('/')[2] def _plot1d(self, ax, data, group_sizes, tick_labels, score_label): small_font = make_small_font() ordinate = np.repeat(np.arange(len(group_sizes)), group_sizes) for scores, kwargs in data: if kwargs.pop('score_only', False): try: scores = scores['score'] except Exception: pass if self.secondary == 'rows': self._plot(ax, scores, ordinate[::-1], **kwargs) #, marker=marker, color=color, label=self._t(label), markeredgecolor=color) else: self._plot(ax, ordinate, scores, **kwargs) ticks = np.arange(len(tick_labels)) tick_labels = [self._t(label) for label in tick_labels] score_label = self._t(score_label) if self.secondary == 'rows': plt.yticks(ticks[::-1], tick_labels, fontproperties=small_font) self._set_lim(plt.xlim) plt.ylim(-.5, len(tick_labels) - .5) plt.xlabel(score_label) elif self.secondary == 'columns': plt.xticks(ticks, tick_labels, fontproperties=small_font, **XTICK_ROTATION) plt.xlim(-.5, len(tick_labels) - .5) self._set_lim(plt.ylim) plt.ylabel(score_label) else: raise ValueError('Unexpected secondary: {!r}'.format(self.secondary)) plt.tight_layout() if len(data) > 1: plt.legend(loc='best', prop=small_font) def _regroup(self, iterable, key, best_system=False, sort_by='name'): iterable = list(iterable) out = [(k, list(it)) for k, it in itertools.groupby(sorted(iterable, key=key), key=key)] if best_system == True: out = [(best.system, [best]) for best in (max(results, key=lambda result: result.data[2]['score']) for group, results in out)] elif best_system: # Already selected in _select_best_in_group out = [(results[0].system, results) for group, results in out] if sort_by == 'name': # done above return out elif callable(sort_by): pass elif sort_by == 'measure': sort_by = lambda results: self.measures.index(results[0].measure) elif sort_by == 'score': sort_by = lambda results: -max(result.data[2]['score'] for result in results) else: raise ValueError('Unknown sort: {!r}'.format(sort_by)) return sorted(out, key=lambda entry: sort_by(entry[1]))[:self.at_most] def _load_data(self, more_measures): # XXX: this needs a refactor/cleanup!!! Maybe just use more struct arrays rather than namedtuple measures = self.measures + more_measures if self.input_type == 'confidence': """ {'intervals': {'fscore': {90: (0.504, 0.602), 95: (0.494, 0.611), 99: (0.474, 0.626)}, 'precision': {90: (0.436, 0.56), 95: (0.426, 0.569), 99: (0.402, 0.591)}, 'recall': {90: (0.573, 0.672), 95: (0.562, 0.681), 99: (0.543, 0.697)}}, 'measure': 'strong_nil_match', 'overall': {'fscore': '0.555', 'precision': '0.498', 'recall': '0.626'}} """ all_results = np.empty((len(self.systems), len(measures), 3), dtype=[('score', float), ('lo', float), ('hi', float)]) for system, sys_results in zip(self.systems, all_results): result_dict = {entry['measure']: entry for entry in Confidence.read_tab_format(open(system))} # XXX: this is an ugly use of list comprehensions mat = [[(result_dict[measure]['overall'][metric], 0 if self.confidence is None else result_dict[measure]['intervals'][metric][self.confidence][0], 0 if self.confidence is None else result_dict[measure]['intervals'][metric][self.confidence][1]) for metric in ('precision', 'recall', 'fscore')] for measure in measures] sys_results[...] = mat if self.confidence is None: # hide other fields all_results = all_results[['score']] else: all_results = np.empty((len(self.systems), len(measures), 3), dtype=[('score', float)]) for system, sys_results in zip(self.systems, all_results): result_dict = Evaluate.read_tab_format(open(system)) sys_results[...] = [[(result_dict[measure][metric],) for metric in ('precision', 'recall', 'fscore')] for measure in measures] # TODO: avoid legacy array intermediary all_results_tmp = [] for path, system_name, sys_results in zip(self.systems, _get_system_names(self.systems), all_results): all_results_tmp.extend(_Result(system=system_name, measure=measure, group=_group(self.group_re, path), data=measure_results) for measure, measure_results in zip(measures, sys_results)) return all_results_tmp @staticmethod def _select_best_in_group(results, measure): found = False best = {} for res in results: if res.measure == measure: found = True best_sys, best_score = best.get(res.group, (None, -float('inf'))) cur_score = res.data['score'][2] if cur_score > best_score: best[res.group] = (res.system, cur_score) if not found: raise KeyError('Could not find results for measure {!r}'.format(measure)) return [res for res in results if best[res.group][0] == res.system] def __call__(self): more_measures = [] if self.sort_by and self.sort_by not in ('score', 'name', 'none', 'measure') and self.sort_by not in self.measures + more_measures: more_measures.append(self.sort_by) if self.best_in_group not in (False, True) and self.best_in_group not in self.measures + more_measures: more_measures.append(self.best_in_group) all_results = self._load_data(more_measures) if self.sort_by in self.measures + more_measures: by_measure = sorted((result for result in all_results if result.measure == self.sort_by), key=lambda result: -result.data[2]['score']) groups_by_measure = [result.group for result in by_measure] sort_by = lambda results: groups_by_measure.index(results[0].group) else: sort_by = self.sort_by if self.best_in_group != True and self.best_in_group: # cut back all_results to only the system per group that is best by measure best_in_group all_results = self._select_best_in_group(all_results, self.best_in_group) # HACK: now remove measures only needed for selecting all_results = [res for res in all_results if res.measure not in more_measures] if self.figures_by in ('measure', 'single'): if sort_by == 'none': groups = [result.group for result in all_results] sort_by = lambda results: groups.index(results[0].group) primary_regroup = {'key': operator.attrgetter('measure'), 'sort_by': 'measure',} secondary_regroup = {'key': operator.attrgetter('group'), 'best_system': self.best_in_group, 'sort_by': sort_by,} elif self.figures_by == 'system': if sort_by == 'none': sort_by = lambda results: self.measures.index(results[0].measure) primary_regroup = {'key': operator.attrgetter('group'), 'best_system': self.best_in_group} secondary_regroup = {'key': operator.attrgetter('measure'), 'sort_by': sort_by,} else: raise ValueError('Unexpected figures_by: {!r}'.format(self.figures_by)) if self.interactive: figures = {} else: figure_names = [] for name, figure, save_kwargs in self._generate_figures(all_results, primary_regroup, secondary_regroup): if self.interactive: figures[name] = figure else: figure_names.append(name) figure.savefig(self.out_fmt.format(name), **save_kwargs) plt.close(figure) if self.interactive: print('Opening interactive shell with variables `figures` and `results`') embed_shell({'figures': figures, 'results': all_results}, shell=None if self.interactive is True else self.interactive) else: return 'Saved to %s' % self.out_fmt.format('{%s}' % ','.join(figure_names)) def _generate_figures(self, *args): if self.secondary == 'heatmap': yield self._heatmap(*args) elif self.figures_by == 'single': yield self._single_plot(*args) else: for plot in self._generate_plots(*args): yield plot def _fscore_matrix(self, all_results, primary_regroup, secondary_regroup, get_field=lambda x: x): matrix = [] primary_names = [] for primary_name, row in self._regroup(all_results, **primary_regroup): secondary_names, row = zip(*self._regroup(row, **secondary_regroup)) matrix.append([get_field(cell.data[2]) for (cell,) in row]) primary_names.append(primary_name) matrix = np.array(matrix) return matrix, primary_names, secondary_names def _heatmap(self, all_results, primary_regroup, secondary_regroup): # FIXME: sort_by only currently applied to columns! figure = plt.figure('heatmap', figsize=self.figsize) ax = figure.add_subplot(1, 1, 1) matrix, row_names, column_names = self._fscore_matrix(all_results, primary_regroup, secondary_regroup, operator.itemgetter('score')) if self.limits and self.limits != 'tight': kwargs = {'vmin': self.limits[0], 'vmax': self.limits[1]} im = ax.imshow(matrix, interpolation='nearest', cmap=self.cmap, **kwargs) small_font = make_small_font() plt.yticks(np.arange(len(row_names)), [self._t(name) for name in row_names], fontproperties=small_font) plt.xticks(np.arange(len(column_names)), [self._t(name) for name in column_names], fontproperties=small_font, **XTICK_ROTATION) ax.set_xlim(-.5, len(column_names) - .5) figure.colorbar(im) figure.tight_layout() return 'heatmap', figure, {} def _marker_cycle(self): return itertools.cycle(('+', '.', 'o', 's', '*', '^', 'v', 'p')) def _single_plot(self, all_results, primary_regroup, secondary_regroup): figure_name = 'altogether' matrix, measure_names, sys_names = self._fscore_matrix(all_results, primary_regroup, secondary_regroup) colors = plt.get_cmap(self.cmap)(np.linspace(0, 1.0, len(measure_names))) fig = plt.figure(figure_name, figsize=self.figsize) ax = fig.add_subplot(1, 1, 1) data = [(col, {'label': self._t(measure), 'marker': self._marker(measure) or marker, 'color': self._color(measure) or color, 'markeredgecolor': self._color(measure) or color}) for col, measure, color, marker in zip(matrix, measure_names, colors, self._marker_cycle())] self._plot1d(ax, data, np.ones(len(sys_names), dtype=int), sys_names, 'fscore') plt.grid(axis='x' if self.secondary == 'rows' else 'y') return figure_name, fig, {} def _generate_plots(self, all_results, primary_regroup, secondary_regroup): for figure_name, figure_data in self._regroup(all_results, **primary_regroup): figure_data = self._regroup(figure_data, **secondary_regroup) n_secondary = len(figure_data) colors = plt.get_cmap(self.cmap)(np.linspace(0, 1.0, n_secondary)) fig = plt.figure(figure_name, figsize=self.figsize) ax = fig.add_subplot(1, 1, 1) if self.secondary == 'markers': markers = self._marker_cycle() patches = [] for (secondary_name, results), color, marker in zip(figure_data, colors, markers): # recall-precision data = np.array([result.data for result in results]) patches.append(self._plot(ax, data[..., 1], data[..., 0], marker=self._marker(secondary_name) or marker, color=self._color(secondary_name) or color, label=self._t(secondary_name))) plt.xlabel(self._t('recall')) plt.ylabel(self._t('precision')) self._set_lim(plt.ylim) self._set_lim(plt.xlim) fig.tight_layout() else: secondary_names, figure_data = zip(*figure_data) scores = np.array([result.data for results in figure_data for result in results]) if tuple(self.metrics) == ('fscore',): axis_label = 'fscore' else: axis_label = 'score' axis_label = '{} {}'.format(self._t(figure_name), self._t(axis_label)) self._plot1d(ax, [(scores[..., c], kwargs) for c, kwargs in self._metric_data()], [len(group) for group in figure_data], secondary_names, axis_label) plt.grid(axis='x' if self.secondary == 'rows' else 'y') yield figure_name, fig, {} if self.secondary == 'markers' and n_secondary > 1: # XXX: this uses `ax` defined above fig = plt.figure() legend = plt.figlegend(*ax.get_axes().get_legend_handles_labels(), loc='center', ncol=int(np.ceil(n_secondary / MAX_LEGEND_PER_COL)), prop=make_small_font()) fig.canvas.draw() # FIXME: need some padding bbox = legend.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) yield '_legend_', fig, {'bbox_inches': bbox} def _set_lim(self, fn): if self.limits == 'tight': return fn(self.limits) @classmethod def add_arguments(cls, p): p.add_argument('systems', nargs='+', metavar='FILE') meg = p.add_mutually_exclusive_group() meg.add_argument('--by-system', dest='figures_by', action='store_const', const='system', help='Each system in its own figure, or row with --heatmap') meg.add_argument('--by-measure', dest='figures_by', action='store_const', const='measure', default='measure', help='Each measure in its own figure, or row with --heatmap (default)') meg.add_argument('--single-plot', dest='figures_by', action='store_const', const='single', help='Single figure showing fscore for all given measures') meg = p.add_mutually_exclusive_group() meg.add_argument('--scatter', dest='secondary', action='store_const', const='markers', default='columns', help='Plot precision and recall as separate axes with different markers as needed') meg.add_argument('--rows', dest='secondary', action='store_const', const='rows', help='Show rows of P/R/F plots') meg.add_argument('--columns', dest='secondary', action='store_const', const='columns', help='Show columns of P/R/F plots (default)') meg.add_argument('--heatmap', dest='secondary', action='store_const', const='heatmap', help='Show a heatmap comparing all systems and measures') meg = p.add_mutually_exclusive_group() meg.add_argument('--pr', dest='metrics', action='store_const', const=('precision', 'recall'), default=('fscore',), help='In rows or columns mode, plot both precision and recall, rather than F1') meg.add_argument('--prf', dest='metrics', action='store_const', const=('precision', 'recall', 'fscore'), help='In rows or columns mode, plot precision and recall as well as F1') p.add_argument('--lines', action='store_true', default=False, help='Draw lines between points in rows/cols mode') p.add_argument('--cmap', default=CMAP) p.add_argument('--limits', type=_parse_limits, default=(0, 1), help='Limits the shown score range to the specified min,max; or "tight"') p.add_argument('-i', '--input-type', choices=['evaluate', 'confidence'], default='evaluate', help='Whether input was produced by the evaluate (default) or confidence command') meg = p.add_mutually_exclusive_group() meg.add_argument('-o', '--out-fmt', default=DEFAULT_OUT_FMT, help='Path template for saving plots with --fmt=plot (default: %(default)s))') meg.add_argument('--interactive', nargs='?', default=False, const=True, metavar='SHELL', help='Open an interactive shell with `figures` available instead of saving images to file') p.add_argument('--figsize', default=(8, 6), type=_parse_figsize, help='The width,height of a figure in inches (default 8,6)') p.add_argument('-m', '--measure', dest='measures', action='append', metavar='NAME', help=MEASURE_HELP) p.add_argument('--ci', dest='confidence', type=int, help='The percentile confidence interval to display as error bars ' '(requires --input-type=confidence') p.add_argument('--group-re', type=re.compile, help='Display systems grouped, where a system\'s group label is extracted from its path by this PCRE') p.add_argument('--best-in-group', nargs='?', const=True, default=False, help='Only show best system per group, optionally according to a given measure') p.add_argument('-s', '--sort-by', help='Sort each plot, options include "none", "name", "score", or the name of a measure.') p.add_argument('--at-most', type=int, help='Show the first AT_MOST sorted entries') p.add_argument('--label-map', help='JSON (or file) mapping internal labels to display labels') p.add_argument('--style-map', help='JSON (or file) mapping labels to <color>/<marker> settings') p.set_defaults(cls=cls) return p class CompareMeasures(object): """Calculate statistics of measure distribution over systems """ def __init__(self, systems, gold=None, evaluation_files=False, measures=DEFAULT_MEASURE_SET, fmt='none', out_fmt=DEFAULT_OUT_FMT, figsize=(8, 6), cmap=CMAP, sort_by='none', label_map=None): if stats is None: raise ImportError('CompareMeasures requires scipy to be installed') self.systems = systems if gold: assert not evaluation_files self.gold = list(Reader(open(gold))) else: assert evaluation_files self.gold = None self.measures = parse_measures(measures or DEFAULT_MEASURE_SET, allow_unknown=evaluation_files) self.format = self.FMTS[fmt] if fmt is not callable else fmt self.out_fmt = out_fmt self.figsize = figsize self.sort_by = sort_by self.label_map = _parse_label_map(label_map) self.cmap = cmap def __call__(self): all_results = np.empty((len(self.systems), len(self.measures))) # TODO: parallelise? for system, sys_results in zip(self.systems, all_results): if self.gold is None: result_dict = Evaluate.read_tab_format(open(system)) else: result_dict = Evaluate(system, self.gold, measures=self.measures, fmt='none')() sys_results[...] = [result_dict[measure]['fscore'] for measure in self.measures] self.all_results = all_results correlations = {} scores_by_measure = zip(self.measures, all_results.T) for (measure_i, scores_i), (measure_j, scores_j) in _pairs(scores_by_measure): correlations[measure_i, measure_j] = {'pearson': stats.pearsonr(scores_i, scores_j), 'spearman': stats.spearmanr(scores_i, scores_j), 'kendall': stats.kendalltau(scores_i, scores_j)} quartiles = {} for measure_i, scores_i in scores_by_measure: quartiles[measure_i] = np.percentile(scores_i, [0, 25, 50, 75, 100]) return self.format(self, {'quartiles': quartiles, 'correlations': correlations}) def tab_format(self, results): correlations = results['correlations'] quartiles = results['quartiles'] rows = [['measure1', 'measure2', 'pearson-r', 'spearman-r', 'kendall-tau', 'median-diff', 'iqr-ratio']] for measure1, measure2 in _pairs(self.measures): pair_corr = correlations[measure1, measure2] quart1 = quartiles[measure1] quart2 = quartiles[measure2] data = [pair_corr['pearson'][0], pair_corr['spearman'][0], pair_corr['kendall'][0], quart1[2] - quart2[2], (quart1[3] - quart1[1]) / (quart2[3] - quart2[1])] data = ['%0.3f' % v for v in data] rows.append([measure1, measure2] + data) col_widths = [max(len(row[col]) for row in rows) for col in range(len(rows[0]))] fmt = '\t'.join('{{:{:d}s}}'.format(width) for width in col_widths) return "\n".join(fmt.format(*row) for row in rows) def json_format(self, results): return json.dumps(results, sort_keys=True, indent=4) def no_format(self, results): return results def plot_format(self, results): import matplotlib.pyplot as plt small_font = make_small_font() correlations = results['correlations'] measures = self.measures all_results = self.all_results # Order measures cleverly if self.sort_by == 'name': order = np.argsort(measures) elif self.sort_by == 'eigen': from matplotlib.mlab import PCA try: order = np.argsort(PCA(all_results).s) except np.linalg.LinAlgError: warnings.warn('PCA failed; not sorting measures') order = None elif self.sort_by == 'mds': from sklearn.manifold import MDS mds = MDS(n_components=1, n_init=20, random_state=0) order = np.argsort(mds.fit_transform(all_results.T), axis=None) else: order = None if order is not None: measures = np.take(measures, order) all_results = np.take(all_results, order, axis=1) disp_measures = [self.label_map.get(measure, measure) for measure in measures] n_measures = len(measures) ticks = (np.arange(len(measures)), disp_measures) cmap = plt.get_cmap(self.cmap) cmap.set_bad('white') for metric in ['pearson', 'spearman', 'kendall']: data = np.ma.masked_all((n_measures, n_measures), dtype=float) for (i, measure_i), (j, measure_j) in _pairs(enumerate(measures)): try: pair_corr = correlations[measure_i, measure_j] except KeyError: pair_corr = correlations[measure_j, measure_i] data[i, j] = data[j, i] = pair_corr[metric][0] for i in range(n_measures): data[i, i] = 1 fig, ax = plt.subplots(figsize=self.figsize) im = ax.imshow(data, interpolation='nearest', cmap=cmap) plt.colorbar(im) plt.xticks(*ticks, fontproperties=small_font, **XTICK_ROTATION) plt.yticks(*ticks, fontproperties=small_font) plt.tight_layout() plt.savefig(self.out_fmt.format(metric)) plt.close(fig) fig, ax = plt.subplots(figsize=self.figsize) ax.boxplot(all_results[:, ::-1], 0, 'rs', 0, labels=disp_measures[::-1]) plt.yticks(fontproperties=small_font) plt.tight_layout() plt.savefig(self.out_fmt.format('spread')) return 'Saved to %s' % self.out_fmt.format('{pearson,spearman,kendall,spread}') FMTS = { 'none': no_format, 'tab': tab_format, 'json': json_format, 'plot': plot_format, } @classmethod def add_arguments(cls, p): p.add_argument('systems', nargs='+', metavar='FILE') meg = p.add_mutually_exclusive_group(required=True) meg.add_argument('-g', '--gold') meg.add_argument('-e', '--evaluation-files', action='store_true', default=False, help='System paths are the tab-formatted outputs ' 'of the evaluate command, rather than ' 'system outputs') p.add_argument('-f', '--fmt', default='tab', choices=cls.FMTS.keys()) p.add_argument('-o', '--out-fmt', default=DEFAULT_OUT_FMT, help='Path template for saving plots with --fmt=plot (default: %(default)s))') p.add_argument('--figsize', default=(8, 6), type=_parse_figsize, help='The width,height of a figure in inches (default 8,6)') p.add_argument('-m', '--measure', dest='measures', action='append', metavar='NAME', help=MEASURE_HELP) p.add_argument('-s', '--sort-by', choices=['none', 'name', 'eigen', 'mds'], help='For plot, sort by name, eigenvalue, or ' 'multidimensional scaling (requires scikit-learn)') p.add_argument('--cmap', default=CMAP) p.add_argument('--label-map', help='JSON (or file) mapping internal labels to display labels') p.set_defaults(cls=cls) return p class ComposeMeasures(object): """Adds composite measures rows to evaluation output""" def __init__(self, systems, out_fmt=None, ratios=[]): if len(systems) == 0 or (len(systems) == 1 and systems[0] == '-'): if out_fmt is not None: raise ValueError('Cannot use --out-fmt with standard IO mode') self.stdio_mode = True else: self.stdio_mode = False self.systems = systems self.ratios = ratios or [] self.out_fmt = out_fmt def __call__(self): if self.stdio_mode: return self._process_system(sys.stdin) for path in self.systems: if self.out_fmt is None: out_path = path else: dirname = os.path.dirname(path) basename = os.path.basename(path) if '.' in basename: basename, ext = os.path.splitext(basename) out_path = self.out_fmt.format(dir=dirname, base=basename, ext=ext) with open(path) as in_file: result = self._process_system(in_file) with open(out_path, 'w') as out_file: print(result, file=out_file) def _process_system(self, in_file): # TODO: don't be so implicit about header out = [] lookup = {} for l in in_file: l = l.rstrip().split('\t') out.append(l) lookup[l[-1]] = l[:-1] for m1, m2 in self.ratios: row = [] for v1, v2 in zip(lookup[m1], lookup[m2]): v1 = float(v1) v2 = float(v2) if abs(v2) < 1e-10: row.append('nan') continue row.append('{:0.3f}'.format(v1 / v2)) row.append('{}/{}'.format(m1, m2)) out.append(row) return '\n'.join('\t'.join(row) for row in out) @classmethod def add_arguments(cls, p): p.add_argument('systems', nargs='*', metavar='FILE') p.add_argument('-o', '--out-fmt', help='Output path format (default overwrites input path), e.g. {dir}/{base}.evaluation_with_ratios') p.add_argument('-r', '--ratio', dest='ratios', nargs=2, action='append', help='Create a ratio of two other measures named <measure1>/<measure2>') p.set_defaults(cls=cls) return p class RankSystems(object): """Get filenames corresponding to best-ranked systems Given evaluation outputs, ranks the system by some measure(s), or best per name group. This is a useful command-line helper before plotting to ensure all have same systems. """ # TODO: support JSON format output def __init__(self, systems, measures, metrics=['fscore'], group_re=None, group_limit=None, group_max=None, limit=None, max=None, short_names=False): self.systems = systems self.measures = parse_measures(measures or DEFAULT_MEASURE_SET, allow_unknown=True) self.metrics = metrics or ['fscore'] self.group_re = group_re self.group_limit = group_limit self.group_max = group_max self.limit = limit self.max = max self.short_names = short_names def __call__(self): # This could be done by awk and sort and awk if we rathered.... Tup = namedtuple('Tup', 'system group measure metric score') tuples = [] measures = set(self.measures) short_names = _get_system_names(self.systems) for path, short in zip(self.systems, short_names): results = Evaluate.read_tab_format(open(path)) system = short if self.short_names else path tuples.extend(Tup(system, _group(self.group_re, path), measure, metric, score) for measure, measure_results in results.items() if measure in measures for metric, score in measure_results.items() if metric in self.metrics) tuples.sort(key=lambda tup: (tup.measure, tup.metric, -tup.score)) result = [] for _, rank_tuples in itertools.groupby(tuples, key=lambda tup: (tup.measure, tup.metric)): result.extend(self._rank(rank_tuples)) if self.group_re: header = 'measure\tmetric\trank\tgroup rank\tscore\tgroup\tsystem' fmt = '{0.measure}\t{0.metric}\t{1[0]}\t{1[1]}\t{0.score}\t{0.group}\t{0.system}' else: header = 'measure\tmetric\trank\tscore\tsystem' fmt = '{0.measure}\t{0.metric}\t{1[0]}\t{0.score}\t{0.system}' rows = [header] rows.extend(fmt.format(tup, ranks) for tup, ranks in result) return '\n'.join(rows) def _rank(self, tuples): key_fns = [(lambda k: None, self.limit, self.max)] if self.group_re is not None: # no_yield cases must be handled first for group, then overall :s key_fns.insert(0, (operator.attrgetter('group'), self.group_limit, self.group_max)) INF = float('inf') idx = defaultdict(int) prev = defaultdict(lambda: (INF, INF)) for tup in tuples: no_yield = False ranks = [] for fn, limit, max_rank in key_fns: key = fn(tup) idx[key] += 1 if limit is not None and idx[key] > limit: no_yield = True break score, rank = prev[key] if tup.score != score: rank = idx[key] prev[key] = (tup.score, rank) ranks.append(rank) if max_rank is not None and rank > max_rank: no_yield = True break if not no_yield: yield tup, tuple(ranks) @classmethod def add_arguments(cls, p): p.add_argument('systems', nargs='+', metavar='FILE') p.add_argument('-m', '--measure', dest='measures', action='append', metavar='NAME', help=MEASURE_HELP) p.add_argument('--metric', dest='metrics', action='append', choices=['precision', 'recall', 'fscore'], metavar='NAME') p.add_argument('--group-re', type=re.compile, help='Rank systems within groups, where a system\'s ' 'group label is extracted from its path by this ' 'PCRE') p.add_argument('--short-names', action='store_true', help='Strip common prefix/suffix off system names') meg = p.add_mutually_exclusive_group() meg.add_argument('--group-limit', type=int, help='Max number of entries per group (breaking ties arbitrarily)') meg.add_argument('--group-max', type=int, help='Max rank per group') meg = p.add_mutually_exclusive_group() meg.add_argument('--limit', type=int, help='Max number of entries (breaking ties arbitrarily)') meg.add_argument('--max', type=int, help='Max rank') p.set_defaults(cls=cls) return p
{ "content_hash": "052b2cb1835142bd84f5979bb3d28109", "timestamp": "", "source": "github", "line_count": 931, "max_line_length": 259, "avg_line_length": 45.0687432867884, "alnum_prop": 0.5549703281775066, "repo_name": "jamesxia4/neleval", "id": "9f4e657b6e61147ed1ca5bf397e3010f5e30a4eb", "size": "41959", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "neleval/summary.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "201073" }, { "name": "Shell", "bytes": "27014" } ], "symlink_target": "" }
import yaml from toskeriser.exceptions import TkStackException from .test_upper import TestUpper class TestGroupForceUpdate(TestUpper): @classmethod def setUpClass(self): self._file_path = 'data/examples/example_group_force_update.yaml' self._new_path = 'data/examples/example_group_force_update.completed.yaml' self._mock_responces = { 'node=6&ruby=2&distro=alpine': { 'count': 1, 'images': [ { 'name': 'jekyll/jekyll:builder', 'softwares': [ {'software': 'node', 'ver': '6.9.5'}, {'software': 'ruby', 'ver': '2.3.3'}, {'software': 'wget', 'ver': '1.25.1'} ], 'distro': 'Alpine Linux v3.5', 'size': 20000000, 'pulls': 200, 'stars': 23 } ] } } self._node_templates = yaml.load(''' app3A: type: tosker.nodes.Software requirements: - host: my_group_container interfaces: Standard: start: implementation: get_version.sh app3B: type: tosker.nodes.Software requirements: - host: node_filter: properties: - supported_sw: - node: 6.x - ruby: 2.x - os_distribution: alpine node: my_group_container interfaces: Standard: start: implementation: get_version.sh my_group_container: type: tosker.nodes.Container properties: supported_sw: ruby: 2.3.3 node: 6.9.5 wget: 1.25.1 os_distribution: Alpine Linux v3.5 artifacts: my_image: file: jekyll/jekyll:builder type: tosker.artifacts.Image repository: docker_hub ''') def test_default(self): with self.assertRaises(TkStackException): self._default_test() self._default_test(force=True) def test_policy(self): with self.assertRaises(TkStackException): self._policy_test() self._policy_test(force=True) def test_constraints(self): with self.assertRaises(TkStackException): self._constraints_test() self._constraints_test(force=True)
{ "content_hash": "ced5f99c09604654892e4794eff867cf", "timestamp": "", "source": "github", "line_count": 87, "max_line_length": 82, "avg_line_length": 26.96551724137931, "alnum_prop": 0.525149190110827, "repo_name": "di-unipi-socc/TosKeriser", "id": "e20279658ac881cd6bb79145b3962f3750fde727", "size": "2346", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_group_force_update.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "2311" }, { "name": "Perl", "bytes": "159" }, { "name": "Python", "bytes": "104325" }, { "name": "Shell", "bytes": "18223" } ], "symlink_target": "" }
from persimmon.view.pins import OutputPin from persimmon.view.blocks.block import Block # MYPY HACK from kivy.properties import ObjectProperty from kivy.lang import Builder from sklearn.neighbors import KNeighborsClassifier Builder.load_file('persimmon/view/blocks/knnblock.kv') class KNNBlock(Block): est_out = ObjectProperty() def function(self): self.est_out.val = KNeighborsClassifier()
{ "content_hash": "b87e706490f5ee318021eb75a5003a8d", "timestamp": "", "source": "github", "line_count": 16, "max_line_length": 58, "avg_line_length": 25.875, "alnum_prop": 0.7777777777777778, "repo_name": "AlvarBer/Persimmon", "id": "038a26b286b7b6008a07c3589d2109e1883a3c66", "size": "414", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "persimmon/view/blocks/knnblock.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "59242" } ], "symlink_target": "" }
""" This package provides a collection of simulation area classes. Area expresses a simulation surface by its shape and extent in the two-dimensional space with the origin in (0, 0). """ __docformat__ = 'reStructuredText' __all__ = ['rectangle', 'square']
{ "content_hash": "d0e38ab4d9394a3dd2909a04a498d923", "timestamp": "", "source": "github", "line_count": 11, "max_line_length": 66, "avg_line_length": 23.636363636363637, "alnum_prop": 0.7192307692307692, "repo_name": "mkalewski/sim2net", "id": "f58630ce95cd483680163525f238bf22bc433390", "size": "827", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "sim2net/area/__init__.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "222225" } ], "symlink_target": "" }
"""Test suite for the runtime profiler.""" from __future__ import division from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals from pyperf.profilers import runtime def test_runtime_gives_reasonable_results(): """Ensure runtime is measured within some degree of reason. The expectation is that obviously longer running code is measured as longer running by the profiler. """ profiler = runtime.RuntimeProfiler() small, _ = profiler(setup='pass', code='for x in range(100): pass') large, _ = profiler(setup='pass', code='for x in range(10000): pass') assert small < large
{ "content_hash": "8b5e3a0869fdbf055b83b1ddc18eb1bd", "timestamp": "", "source": "github", "line_count": 20, "max_line_length": 79, "avg_line_length": 33.6, "alnum_prop": 0.7127976190476191, "repo_name": "kevinconway/PyPerf", "id": "4451192eb3adc0cd4f9af5b4622b4fc4eba616a3", "size": "672", "binary": false, "copies": "1", "ref": "refs/heads/alpha", "path": "tests/profilers/test_runtime.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "33946" }, { "name": "Ruby", "bytes": "424" }, { "name": "Shell", "bytes": "2175" } ], "symlink_target": "" }
from past.builtins import basestring from airflow.sensors.base_sensor_operator import BaseSensorOperator from airflow.utils.decorators import apply_defaults class NamedHivePartitionSensor(BaseSensorOperator): """ Waits for a set of partitions to show up in Hive. :param partition_names: List of fully qualified names of the partitions to wait for. A fully qualified name is of the form ``schema.table/pk1=pv1/pk2=pv2``, for example, default.users/ds=2016-01-01. This is passed as is to the metastore Thrift client ``get_partitions_by_name`` method. Note that you cannot use logical or comparison operators as in HivePartitionSensor. :type partition_names: list[str] :param metastore_conn_id: reference to the metastore thrift service connection id :type metastore_conn_id: str """ template_fields = ('partition_names',) ui_color = '#8d99ae' @apply_defaults def __init__(self, partition_names, metastore_conn_id='metastore_default', poke_interval=60 * 3, hook=None, *args, **kwargs): super(NamedHivePartitionSensor, self).__init__( poke_interval=poke_interval, *args, **kwargs) if isinstance(partition_names, basestring): raise TypeError('partition_names must be an array of strings') self.metastore_conn_id = metastore_conn_id self.partition_names = partition_names self.hook = hook if self.hook and metastore_conn_id != 'metastore_default': self.log.warning( 'A hook was passed but a non defaul metastore_conn_id=%s was used', metastore_conn_id ) @staticmethod def parse_partition_name(partition): first_split = partition.split('.', 1) if len(first_split) == 1: schema = 'default' table_partition = max(first_split) # poor man first else: schema, table_partition = first_split second_split = table_partition.split('/', 1) if len(second_split) == 1: raise ValueError('Could not parse ' + partition + 'into table, partition') else: table, partition = second_split return schema, table, partition def poke_partition(self, partition): if not self.hook: from airflow.hooks.hive_hooks import HiveMetastoreHook self.hook = HiveMetastoreHook( metastore_conn_id=self.metastore_conn_id) schema, table, partition = self.parse_partition_name(partition) self.log.info('Poking for %s.%s/%s', schema, table, partition) return self.hook.check_for_named_partition( schema, table, partition) def poke(self, context): self.partition_names = [ partition_name for partition_name in self.partition_names if not self.poke_partition(partition_name) ] return not self.partition_names
{ "content_hash": "de85c6106d3ed63a2687f19121f7f033", "timestamp": "", "source": "github", "line_count": 83, "max_line_length": 101, "avg_line_length": 37.13253012048193, "alnum_prop": 0.6171317326411421, "repo_name": "owlabs/incubator-airflow", "id": "6f254589658540187dcb604bc34040e1bbfae787", "size": "3894", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "airflow/sensors/named_hive_partition_sensor.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "57045" }, { "name": "HTML", "bytes": "147187" }, { "name": "JavaScript", "bytes": "1370838" }, { "name": "Mako", "bytes": "1037" }, { "name": "Python", "bytes": "1647566" }, { "name": "Shell", "bytes": "18823" } ], "symlink_target": "" }
from sqlalchemy.orm import validates from sqlalchemy import UniqueConstraint from eachday import app, db, bcrypt from datetime import datetime, date, timedelta import jwt import marshmallow from marshmallow import Schema, fields, validate, ValidationError class User(db.Model): __tablename__ = 'user' TOKEN_EXPIRATION_DAYS = 1 id = db.Column(db.Integer, primary_key=True, autoincrement=True) email = db.Column(db.String, unique=True, nullable=False) password = db.Column(db.String, nullable=False) name = db.Column(db.String, nullable=False) joined_on = db.Column(db.Date, nullable=False) def set_password(self, password): self.password = bcrypt.generate_password_hash( password, app.config.get('BCRYPT_LOG_ROUNDS') ).decode() def __init__(self, email, password, name, joined_on=None): self.email = email self.set_password(password) self.name = name self.joined_on = joined_on or date.today() def encode_auth_token(self, user_id): """ Generates an Auth Token :return: string """ td = timedelta(days=User.TOKEN_EXPIRATION_DAYS) payload = { 'exp': datetime.utcnow() + td, 'iat': datetime.utcnow(), 'sub': self.id, } payload.update(UserSchema().dump(self).data) return jwt.encode( payload, app.config.get('SECRET_KEY'), algorithm='HS256' ) @staticmethod def decode_auth_token(auth_token): """ Decodes the auth token :param auth_token: :return: integer|string """ try: payload = jwt.decode(auth_token, app.config.get('SECRET_KEY')) return payload['sub'] except jwt.ExpiredSignatureError: raise Exception('Signature expired. Please log in again.') except jwt.InvalidTokenError: raise Exception('Invalid token. Please log in again.') class Entry(db.Model): __tablename__ = 'entry' id = db.Column(db.Integer, primary_key=True) user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False) date = db.Column(db.Date, nullable=False) notes = db.Column(db.Text) rating = db.Column(db.Integer) __table_args__ = (UniqueConstraint('user_id', 'date'),) class BlacklistToken(db.Model): __tablename__ = 'blacklist_token' id = db.Column(db.Integer, primary_key=True) token = db.Column(db.String, unique=True, nullable=False) blacklisted_on = db.Column(db.DateTime, nullable=False) def __init__(self, token): self.token = token self.blacklisted_on = datetime.utcnow() class UserSchema(Schema): id = fields.Int() email = fields.Str(required=True, validate=validate.Email(error='Invalid email address')) password = fields.Str(required=True, load_only=True) name = fields.Str(required=True) joined_on = fields.Date(required=False) class EntrySchema(Schema): id = fields.Int() user_id = fields.Int() date = fields.Date(required=True) notes = fields.Str(allow_none=True) rating = fields.Int(allow_none=True) @marshmallow.validates('rating') def validate_rating(self, data): if data is not None and not 1 <= data <= 10: raise ValidationError('Rating must be between 1 and 10') return data
{ "content_hash": "c81fe22fcc8fffe18a97720a27ecb8bf", "timestamp": "", "source": "github", "line_count": 106, "max_line_length": 78, "avg_line_length": 32.339622641509436, "alnum_prop": 0.6277712952158693, "repo_name": "bcongdon/EachDay", "id": "953cb120eba3a233658ce86af467546aadda29b4", "size": "3428", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "eachday/models.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "517" }, { "name": "HTML", "bytes": "489" }, { "name": "JavaScript", "bytes": "87297" }, { "name": "Mako", "bytes": "494" }, { "name": "Python", "bytes": "51507" }, { "name": "Shell", "bytes": "92" } ], "symlink_target": "" }
""" (c) April 2017 by Daniel Seita Code for plotting behavioral cloning. No need to use command line arguments, just run `python plot_bc.py`. Easy! Right now it generates two figures per environment, one with validation set losses and the other with returns. The latter is probably more interesting. """ import argparse import gym import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as np import os import pickle import sys np.set_printoptions(edgeitems=100, linewidth=100, suppress=True) # Some matplotlib settings. plt.style.use('seaborn-darkgrid') error_region_alpha = 0.25 LOGDIR = 'logs/' FIGDIR = 'figures/' title_size = 22 tick_size = 17 legend_size = 17 ysize = 18 xsize = 18 lw = 3 ms = 8 colors = ['red', 'blue', 'yellow', 'black'] def plot_bc_modern(edir): """ Plot the results for this particular environment. """ subdirs = os.listdir(LOGDIR+edir) print("plotting subdirs {}".format(subdirs)) # Make it easy to count how many of each numrollouts we have. R_TO_COUNT = {'4':0, '11':0, '18':0, '25':0} R_TO_IJ = {'4':(0,2), '11':(1,0), '18':(1,1), '25':(1,2)} fig,axarr = plt.subplots(2, 3, figsize=(24,15)) axarr[0,2].set_title(edir+", Returns, 4 Rollouts", fontsize=title_size) axarr[1,0].set_title(edir+", Returns, 11 Rollouts", fontsize=title_size) axarr[1,1].set_title(edir+", Returns, 18 Rollouts", fontsize=title_size) axarr[1,2].set_title(edir+", Returns, 25 Rollouts", fontsize=title_size) # Don't forget to plot the expert performance! exp04 = np.mean(np.load("expert_data/"+edir+"_004.npy")[()]['returns']) exp11 = np.mean(np.load("expert_data/"+edir+"_011.npy")[()]['returns']) exp18 = np.mean(np.load("expert_data/"+edir+"_018.npy")[()]['returns']) axarr[0,2].axhline(y=exp04, color='brown', lw=lw, linestyle='--', label='expert') axarr[1,0].axhline(y=exp11, color='brown', lw=lw, linestyle='--', label='expert') axarr[1,1].axhline(y=exp18, color='brown', lw=lw, linestyle='--', label='expert') if 'Reacher' not in edir: exp25 = np.mean(np.load("expert_data/"+edir+"_025.npy")[()]['returns']) axarr[1,2].axhline(y=exp25, color='brown', lw=lw, linestyle='--', label='expert') for dd in subdirs: ddsplit = dd.split("_") # `dd` is of the form `numroll_X_seed_Y` numroll, seed = ddsplit[1], ddsplit[3] xcoord = np.load(LOGDIR+edir+"/"+dd+"/iters.npy") tr_loss = np.load(LOGDIR+edir+"/"+dd+"/tr_loss.npy") val_loss = np.load(LOGDIR+edir+"/"+dd+"/val_loss.npy") returns = np.load(LOGDIR+edir+"/"+dd+"/returns.npy") mean_ret = np.mean(returns, axis=1) std_ret = np.std(returns, axis=1) # Playing with dictionaries ijcoord = R_TO_IJ[numroll] cc = colors[ R_TO_COUNT[numroll] ] R_TO_COUNT[numroll] += 1 axarr[ijcoord].plot(xcoord, mean_ret, lw=lw, color=cc, label=dd) axarr[ijcoord].fill_between(xcoord, mean_ret-std_ret, mean_ret+std_ret, alpha=error_region_alpha, facecolor=cc) # Cram the training and validation losses on these subplots. axarr[0,0].plot(xcoord, tr_loss, lw=lw, label=dd) axarr[0,1].plot(xcoord, val_loss, lw=lw, label=dd) boring_stuff(axarr, edir) plt.tight_layout() plt.savefig(FIGDIR+edir+".png") def plot_bc_humanoid(edir): """ Plots humanoid. The argument here is kind of redundant... also, I guess we'll have to ignore one of the plots here since Humanoid will have 5 subplots. Yeah, it's a bit awkward. """ assert edir == "Humanoid-v1" subdirs = os.listdir(LOGDIR+edir) print("plotting subdirs {}".format(subdirs)) # Make it easy to count how many of each numrollouts we have. R_TO_COUNT = {'80':0, '160':0, '240':0} R_TO_IJ = {'80':(1,0), '160':(1,1), '240':(1,2)} fig,axarr = plt.subplots(2, 3, figsize=(24,15)) axarr[0,2].set_title("Empty Plot", fontsize=title_size) axarr[1,0].set_title(edir+", Returns, 80 Rollouts", fontsize=title_size) axarr[1,1].set_title(edir+", Returns, 160 Rollouts", fontsize=title_size) axarr[1,2].set_title(edir+", Returns, 240 Rollouts", fontsize=title_size) # Plot expert performance (um, this takes a while...). exp080 = np.mean(np.load("expert_data/"+edir+"_080.npy")[()]['returns']) exp160 = np.mean(np.load("expert_data/"+edir+"_160.npy")[()]['returns']) exp240 = np.mean(np.load("expert_data/"+edir+"_240.npy")[()]['returns']) axarr[1,0].axhline(y=exp080, color='brown', lw=lw, linestyle='--', label='expert') axarr[1,1].axhline(y=exp160, color='brown', lw=lw, linestyle='--', label='expert') axarr[1,2].axhline(y=exp240, color='brown', lw=lw, linestyle='--', label='expert') for dd in subdirs: ddsplit = dd.split("_") # `dd` is of the form `numroll_X_seed_Y` numroll, seed = ddsplit[1], ddsplit[3] xcoord = np.load(LOGDIR+edir+"/"+dd+"/iters.npy") tr_loss = np.load(LOGDIR+edir+"/"+dd+"/tr_loss.npy") val_loss = np.load(LOGDIR+edir+"/"+dd+"/val_loss.npy") returns = np.load(LOGDIR+edir+"/"+dd+"/returns.npy") mean_ret = np.mean(returns, axis=1) std_ret = np.std(returns, axis=1) # Playing with dictionaries ijcoord = R_TO_IJ[numroll] cc = colors[ R_TO_COUNT[numroll] ] R_TO_COUNT[numroll] += 1 axarr[ijcoord].plot(xcoord, mean_ret, lw=lw, color=cc, label=dd) axarr[ijcoord].fill_between(xcoord, mean_ret-std_ret, mean_ret+std_ret, alpha=error_region_alpha, facecolor=cc) # Cram the training and validation losses on these subplots. axarr[0,0].plot(xcoord, tr_loss, lw=lw, label=dd) axarr[0,1].plot(xcoord, val_loss, lw=lw, label=dd) boring_stuff(axarr, edir) plt.tight_layout() plt.savefig(FIGDIR+edir+".png") def boring_stuff(axarr, edir): """ Axes, titles, legends, etc. Yeah yeah ... """ for i in range(2): for j in range(3): if i == 0 and j == 0: axarr[i,j].set_ylabel("Loss Training MBs", fontsize=ysize) if i == 0 and j == 1: axarr[i,j].set_ylabel("Loss Validation Set", fontsize=ysize) else: axarr[i,j].set_ylabel("Average Return", fontsize=ysize) axarr[i,j].set_xlabel("Training Minibatches", fontsize=xsize) axarr[i,j].tick_params(axis='x', labelsize=tick_size) axarr[i,j].tick_params(axis='y', labelsize=tick_size) axarr[i,j].legend(loc="best", prop={'size':legend_size}) axarr[i,j].legend(loc="best", prop={'size':legend_size}) axarr[0,0].set_title(edir+", Training Losses", fontsize=title_size) axarr[0,1].set_title(edir+", Validation Losses", fontsize=title_size) axarr[0,0].set_yscale('log') axarr[0,1].set_yscale('log') def plot_bc(e): """ Split into cases. It makes things easier for me. """ env_to_method = {'Ant-v1': plot_bc_modern, 'HalfCheetah-v1': plot_bc_modern, 'Hopper-v1': plot_bc_modern, 'Walker2d-v1': plot_bc_modern, 'Reacher-v1': plot_bc_modern, 'Humanoid-v1': plot_bc_humanoid} env_to_method[e](e) if __name__ == "__main__": env_dirs = [e for e in os.listdir(LOGDIR) if "text" not in e] print("Plotting with one figure per env_dirs = {}".format(env_dirs)) for e in env_dirs: plot_bc(e)
{ "content_hash": "ecda409cf4c017a1ef1c04938e48bc36", "timestamp": "", "source": "github", "line_count": 187, "max_line_length": 89, "avg_line_length": 40.62566844919786, "alnum_prop": 0.601553244701856, "repo_name": "DanielTakeshi/rl_algorithms", "id": "0dae5e859296fd9ede68d149de4bb44b1c19f02b", "size": "7597", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "bc/plot_bc.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "262454" }, { "name": "Shell", "bytes": "5544" } ], "symlink_target": "" }
from django.conf import settings from django.contrib.sites.models import Site from django.core.validators import MinValueValidator, MaxValueValidator from django.db import models from django.utils.translation import ugettext_lazy as _ class Sitepath(models.Model): TYPE = { 'page': _('page'), 'image': _('image'), } FREQUENCY = { 'always': _('always'), 'hourly': _('hourly'), 'daily': _('daily'), 'weekly': _('weekly'), 'monthly': _('monthly'), 'yearly': _('yearly'), 'never': _('never'), } location = models.CharField(max_length=200) type = models.CharField(max_length=7, default=TYPE['page'], choices=[(k,v) for k,v in TYPE.iteritems()]) site = models.ForeignKey(Site, default=settings.SITE_ID) referrer = models.CharField(max_length=200, blank=True, default='') frequency = models.CharField(max_length=7, default=FREQUENCY['never'], choices=[(k,v) for k,v in FREQUENCY.iteritems()]) priority = models.FloatField(default=0.1, validators=[MinValueValidator(0.0), MaxValueValidator(1.0)]) modified = models.DateField(blank=True, null=True) publish = models.BooleanField() sitemap = models.BooleanField() def __unicode__(self): return u"%s" % self.location
{ "content_hash": "2934fd0cb066e318511eb2b21aec4a60", "timestamp": "", "source": "github", "line_count": 38, "max_line_length": 106, "avg_line_length": 34.578947368421055, "alnum_prop": 0.6354642313546424, "repo_name": "StuartMacKay/django-sitepaths", "id": "8904d71b211a5d136fed22d2a114a0e8fef15806", "size": "1361", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "sitepaths/models.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "5392" } ], "symlink_target": "" }
import pymongo import sys # Connect to the database. connection = pymongo.MongoClient("mongodb://localhost") db = connection.students grades = db.grades # Find IDs and scores for homework documents. hw_query = {"type": "homework"} selector = {"student_id":1, "score": 1} homework = grades.find(hw_query, selector) # Sort by student_id and then by homework. homework = homework.sort( [ ("student_id", pymongo.ASCENDING), ("score", pymongo.DESCENDING) ] ) # Remove minima using the hint. last_doc = homework[0] last_student_id = last_doc["student_id"] for doc in homework: student_id = doc["student_id"] # When the student_id changes between entries, we know the previous item was a minimum. if last_student_id != student_id: try: print "Removing ", last_doc grades.remove(last_doc) except: print "Exception while removing ", last_doc last_doc = doc last_student_id = student_id # The last element in the sorted list is a minimum that is undetected by the hint's strategy. try: print "Removing ", last_doc grades.remove(last_doc) except: print "Exception while removing ", last_doc
{ "content_hash": "3c5281e1dd60dff362fb23afb8f8e74c", "timestamp": "", "source": "github", "line_count": 41, "max_line_length": 96, "avg_line_length": 27.585365853658537, "alnum_prop": 0.7064544650751547, "repo_name": "ronaldsmartin/m101p", "id": "309e8262e080939210181cb3131f3f3138a16a07", "size": "1131", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "chapter_2_crud.3da6a46d8250/homework_2_2/hw02.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "JavaScript", "bytes": "259" }, { "name": "Python", "bytes": "102643" } ], "symlink_target": "" }
from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('contenttypes', '0002_remove_content_type_name'), ] operations = [ migrations.CreateModel( name='Action', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('verb', models.CharField(max_length=255)), ('target_id', models.PositiveIntegerField(blank=True, db_index=True, null=True)), ('created', models.DateTimeField(auto_now_add=True, db_index=True)), ('target_ct', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='target_obj', to='contenttypes.ContentType')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='actions', to=settings.AUTH_USER_MODEL)), ], options={ 'ordering': ('-created',), }, ), ]
{ "content_hash": "3ab851080ad10d7c2a192005fad109c0", "timestamp": "", "source": "github", "line_count": 30, "max_line_length": 175, "avg_line_length": 40.36666666666667, "alnum_prop": 0.611065235342692, "repo_name": "delitamakanda/BukkakeGramNew", "id": "c042ce5a2e7e3050f55da2953befe9990a839444", "size": "1261", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "actions/migrations/0001_initial.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "848" }, { "name": "HTML", "bytes": "15873" }, { "name": "JavaScript", "bytes": "1053" }, { "name": "Python", "bytes": "25706" } ], "symlink_target": "" }
import json import unittest from cryptography.hazmat.primitives.asymmetric.rsa import ( RSAPrivateKey, RSAPublicKey, ) from jwt.exceptions import MalformedJWKError, UnsupportedKeyTypeError from jwt.jwk import ( OctetJWK, RSAJWK, jwk_from_dict, jwk_from_pem, jwk_from_der, jwk_from_bytes_argument_conversion, ) from pytest import raises, warns from .helper import load_testdata def test_jwk_from_pem(): jwk_priv = jwk_from_pem(load_testdata('rsa_privkey.pem')) assert isinstance(jwk_priv, RSAJWK) assert isinstance(jwk_priv.keyobj, RSAPrivateKey) def test_jwk_from_dict(): jwk_priv = jwk_from_dict( json.loads(load_testdata('rsa_privkey.json', 'r'))) assert isinstance(jwk_priv, RSAJWK) assert isinstance(jwk_priv.keyobj, RSAPrivateKey) def test_jwk_from_dict_malformed_kty(): json_priv = json.loads(load_testdata('rsa_privkey.json', 'r')) del json_priv['kty'] with raises(MalformedJWKError): jwk_from_dict(json_priv) def test_jwk_from_dict_unsupported_kty(): json_priv = json.loads(load_testdata('rsa_privkey.json', 'r')) json_priv['kty'] = 'unknown' with raises(UnsupportedKeyTypeError): jwk_from_dict(json_priv) def test_jwk_from_bytes_argument_conversion_confusing_name(): with raises(Exception) as ex: @jwk_from_bytes_argument_conversion def confusing(): # pylint: disable=unused-variable # pragma: no cover pass assert ("the wrapped function must have either public" " or private in it's name" in str(ex)) def test_jwk_from_unsupported_pem(): with raises(UnsupportedKeyTypeError): jwk_from_pem(load_testdata('dsa_privkey.pem')) def test_jwk_from_pem_not_deserializable(): with raises(UnsupportedKeyTypeError): jwk_from_pem(b'') def test_jwk_from_der(): jwk_priv = jwk_from_der(load_testdata('rsa_privkey.der')) assert isinstance(jwk_priv, RSAJWK) assert isinstance(jwk_priv.keyobj, RSAPrivateKey) class OctetJWKTest(unittest.TestCase): def setUp(self): self.key_json = json.loads(load_testdata('oct.json', 'r')) self.inst = OctetJWK.from_dict(self.key_json) def test_get_kty(self): self.assertEqual(self.inst.get_kty(), 'oct') def test_get_kid(self): self.assertEqual( self.inst.get_kid(), 'HMAC key used in JWS A.1 example') def test_is_sign_key(self): self.assertTrue(self.inst.is_sign_key()) def test_to_dict(self): self.assertEqual(self.inst.to_dict(public_only=False), self.key_json) def test_from_dict_missing_k(self): key_json = self.key_json.copy() del key_json['k'] with raises(MalformedJWKError): OctetJWK.from_dict(key_json) class RSAJWKTest(unittest.TestCase): def setUp(self): self.privkey_pem = load_testdata('rsa_privkey.pem') self.inst_priv = jwk_from_pem(self.privkey_pem) self.pubkey_pem = load_testdata('rsa_pubkey.pem') self.inst_pub = jwk_from_pem(self.pubkey_pem) self.privkey_json = json.loads( load_testdata('rsa_privkey.json', 'r')) self.privkey_full_json = json.loads( load_testdata('rsa_privkey_full.json', 'r')) self.pubkey_json = json.loads( load_testdata('rsa_pubkey.json', 'r')) def test_is_sign_key(self): self.assertTrue(self.inst_priv.is_sign_key()) self.assertFalse(self.inst_pub.is_sign_key()) def test_get_kty(self): self.assertEqual(self.inst_priv.get_kty(), 'RSA') self.assertEqual(self.inst_pub.get_kty(), 'RSA') def test_to_dict_pub(self): self.assertEqual( self.inst_pub.to_dict(public_only=False), self.pubkey_json) def test_to_dict_priv(self): self.assertEqual( self.inst_priv.to_dict(public_only=False), self.privkey_full_json) def test_to_dict_pubonly(self): self.assertEqual( self.inst_priv.to_dict(public_only=True), self.inst_pub.to_dict()) def test_from_dict_pub(self): inst = RSAJWK.from_dict(self.pubkey_json) self.assertIsInstance(inst, RSAJWK) self.assertIsInstance(inst.keyobj, RSAPublicKey) self.assertEqual(inst.to_dict(public_only=False), self.pubkey_json) def test_from_dict_priv_full(self): inst = RSAJWK.from_dict(self.privkey_full_json) self.assertIsInstance(inst, RSAJWK) self.assertIsInstance(inst.keyobj, RSAPrivateKey) self.assertEqual( inst.to_dict(public_only=False), self.privkey_full_json) def test_from_dict_priv_oth_unsupported(self): with raises(UnsupportedKeyTypeError): _json = self.privkey_full_json.copy() _json['oth'] = 'unsupported' RSAJWK.from_dict(_json) def test_from_dict_priv_malformed_e(self): with raises(MalformedJWKError): _json = self.privkey_full_json.copy() del _json['e'] RSAJWK.from_dict(_json) def test_from_dict_priv_malformed_q(self): with raises(MalformedJWKError): _json = self.privkey_full_json.copy() del _json['q'] RSAJWK.from_dict(_json) def test_verify_invalid(self): from cryptography.hazmat.primitives.hashes import SHA256 inst = RSAJWK.from_dict(self.privkey_full_json) with warns(UserWarning): assert not inst.verify(b'hello everyone', b'', hash_fun=SHA256)
{ "content_hash": "6bc2aeb58f23329642e5eeb52b5d63c6", "timestamp": "", "source": "github", "line_count": 182, "max_line_length": 79, "avg_line_length": 30.45054945054945, "alnum_prop": 0.6429086972212198, "repo_name": "GehirnInc/python-jwt", "id": "a6e2876b971a1625861a575c3279f18f7c626d8b", "size": "6144", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "jwt/tests/test_jwk.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "55534" } ], "symlink_target": "" }
"""Auto-generated file, do not edit by hand. BS metadata""" from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata PHONE_METADATA_BS = PhoneMetadata(id='BS', country_code=None, international_prefix=None, general_desc=PhoneNumberDesc(national_number_pattern='9\\d{2}', possible_number_pattern='\\d{3}', possible_length=(3,)), toll_free=PhoneNumberDesc(), premium_rate=PhoneNumberDesc(), emergency=PhoneNumberDesc(national_number_pattern='91[19]', possible_number_pattern='\\d{3}', example_number='911', possible_length=(3,)), short_code=PhoneNumberDesc(national_number_pattern='91[19]', possible_number_pattern='\\d{3}', example_number='911', possible_length=(3,)), standard_rate=PhoneNumberDesc(), carrier_specific=PhoneNumberDesc(), short_data=True)
{ "content_hash": "8daa5f245c4858212e5a401028efae70", "timestamp": "", "source": "github", "line_count": 12, "max_line_length": 143, "avg_line_length": 66.83333333333333, "alnum_prop": 0.7331670822942643, "repo_name": "vicky2135/lucious", "id": "3f7f893c2e2dbd9310e45352f625433127ecf325", "size": "802", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "oscar/lib/python2.7/site-packages/phonenumbers/shortdata/region_BS.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "896683" }, { "name": "C++", "bytes": "52230" }, { "name": "CSS", "bytes": "1169533" }, { "name": "HTML", "bytes": "1104983" }, { "name": "JavaScript", "bytes": "1055140" }, { "name": "Makefile", "bytes": "145238" }, { "name": "Python", "bytes": "55993261" }, { "name": "Shell", "bytes": "40487" } ], "symlink_target": "" }
import os import sys import codecs import optparse from Danpass import DanPASS SCRIPT = '[danpass_parser.py]: ' parser = optparse.OptionParser() parser.add_option('-o', '--output', dest="fout", default="danpass_out", ) parser.add_option('-c', '--corpus', dest="fin", default='../dp_mono/processed/test', ) options, remainder = parser.parse_args() try: assert os.path.isdir(options.fin) == True except AssertionError: print("Unable to access directory.") sys.exit('Terminate.') path = os.path.abspath(options.fin) monologues = DanPASS(path, os.path.join(path, options.fout)) monologues.printKaldiData(os.path.join(path, 'danpass_kaldi')) # TODO: #
{ "content_hash": "c3d078c1c3c29cd5d426d2ba463c1c5a", "timestamp": "", "source": "github", "line_count": 32, "max_line_length": 62, "avg_line_length": 24.15625, "alnum_prop": 0.610608020698577, "repo_name": "dresen/praat", "id": "0ff62a8b1374f6616c51acea76d80d5b94074841", "size": "773", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "scripts/kaldi_parser.py", "mode": "33188", "license": "mit", "language": [ { "name": "Papyrus", "bytes": "4130" }, { "name": "Python", "bytes": "66335" } ], "symlink_target": "" }
from django.conf import settings from django.core.files.storage import default_storage as storage from django.db.models import Q from django_statsd.clients import statsd from elasticsearch_dsl import filter as es_filter from elasticsearch_dsl import function as es_function from elasticsearch_dsl import query, Search from PIL import Image from rest_framework import generics, response, status, viewsets from rest_framework.exceptions import ParseError from rest_framework.filters import BaseFilterBackend, OrderingFilter from rest_framework.views import APIView import mkt import mkt.feed.constants as feed from mkt.api.authentication import (RestAnonymousAuthentication, RestOAuthAuthentication, RestSharedSecretAuthentication) from mkt.api.authorization import AllowReadOnly, AnyOf, GroupPermission from mkt.api.base import CORSMixin, MarketplaceView, SlugOrIdMixin from mkt.api.paginator import ESPaginator from mkt.collections.views import CollectionImageViewSet from mkt.constants.applications import DEVICE_LOOKUP from mkt.developers.tasks import pngcrush_image from mkt.feed.indexers import FeedItemIndexer from mkt.operators.authorization import OperatorShelfAuthorization from mkt.webapps.indexers import WebappIndexer from mkt.webapps.models import Webapp from .authorization import FeedAuthorization from .fields import ImageURLField from .models import FeedApp, FeedBrand, FeedCollection, FeedItem, FeedShelf from .serializers import (FeedAppESSerializer, FeedAppSerializer, FeedBrandESSerializer, FeedBrandSerializer, FeedCollectionESSerializer, FeedCollectionSerializer, FeedItemESSerializer, FeedItemSerializer, FeedShelfESSerializer, FeedShelfSerializer) class ImageURLUploadMixin(viewsets.ModelViewSet): """ Attaches pre/post save methods for image handling. The pre_save downloads an image from a URL and validates. The post_save saves the image in feed element's directory. We look at the class' `image_fields` property for the list of tuples to check. The tuples are the names of the the image form name, the hash field, and a suffix to append to the image file name:: image_fields = ('background_image_upload_url', 'image_hash', '') """ def pre_save(self, obj): """Download and validate image URL.""" for image_field, hash_field, suffix in self.image_fields: if self.request.DATA.get(image_field): img, hash_ = ImageURLField().from_native( self.request.DATA[image_field]) # Store img for `post_save` where we have access to the pk so # we can save img in appropriate directory. setattr(obj, '_%s' % image_field, img) setattr(obj, hash_field, hash_) elif hasattr(obj, 'type') and obj.type == feed.COLLECTION_PROMO: # Remove background images for promo collections. setattr(obj, hash_field, None) return super(ImageURLUploadMixin, self).pre_save(obj) def post_save(self, obj, created=True): """Store image that we attached to the obj in pre_save.""" for image_field, hash_field, suffix in self.image_fields: image = getattr(obj, '_%s' % image_field, None) if image: i = Image.open(image) path = obj.image_path(suffix) with storage.open(path, 'wb') as f: i.save(f, 'png') pngcrush_image.delay(path, set_modified_on=[obj]) return super(ImageURLUploadMixin, self).post_save(obj, created) class BaseFeedCollectionViewSet(CORSMixin, SlugOrIdMixin, MarketplaceView, ImageURLUploadMixin): """ Base viewset for subclasses of BaseFeedCollection. """ serializer_class = None queryset = None cors_allowed_methods = ('get', 'post', 'delete', 'patch', 'put') permission_classes = [FeedAuthorization] authentication_classes = [RestOAuthAuthentication, RestSharedSecretAuthentication, RestAnonymousAuthentication] exceptions = { 'doesnt_exist': 'One or more of the specified `apps` do not exist.' } image_fields = (('background_image_upload_url', 'image_hash', ''),) def list(self, request, *args, **kwargs): page = self.paginate_queryset( self.filter_queryset(self.get_queryset())) serializer = self.get_pagination_serializer(page) return response.Response(serializer.data) def set_apps(self, obj, apps): if apps: try: obj.set_apps(apps) except Webapp.DoesNotExist: raise ParseError(detail=self.exceptions['doesnt_exist']) def create(self, request, *args, **kwargs): apps = request.DATA.pop('apps', []) serializer = self.get_serializer(data=request.DATA, files=request.FILES) if serializer.is_valid(): self.pre_save(serializer.object) self.object = serializer.save(force_insert=True) self.set_apps(self.object, apps) self.post_save(self.object, created=True) headers = self.get_success_headers(serializer.data) return response.Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers) return response.Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) def update(self, request, *args, **kwargs): apps = request.DATA.pop('apps', []) self.set_apps(self.get_object(), apps) ret = super(BaseFeedCollectionViewSet, self).update( request, *args, **kwargs) return ret class RegionCarrierFilter(BaseFilterBackend): def filter_queryset(self, request, qs, view): q = request.QUERY_PARAMS # Filter for only the region if specified. if q.get('region') and q.get('region') in mkt.regions.REGIONS_DICT: region_id = mkt.regions.REGIONS_DICT[q['region']].id qs = qs.filter(region=region_id) # Exclude feed items that specify carrier but do not match carrier. if q.get('carrier') and q.get('carrier') in mkt.carriers.CARRIER_MAP: carrier = mkt.carriers.CARRIER_MAP[q['carrier']].id qs = qs.exclude(~Q(carrier=carrier), carrier__isnull=False) return qs class FeedItemViewSet(CORSMixin, viewsets.ModelViewSet): """ A viewset for the FeedItem class, which wraps all items that live on the feed. """ authentication_classes = [RestOAuthAuthentication, RestSharedSecretAuthentication, RestAnonymousAuthentication] permission_classes = [AnyOf(AllowReadOnly, GroupPermission('Feed', 'Curate'))] filter_backends = (OrderingFilter, RegionCarrierFilter) queryset = FeedItem.objects.no_cache().all() cors_allowed_methods = ('get', 'delete', 'post', 'put', 'patch') serializer_class = FeedItemSerializer class FeedBuilderView(CORSMixin, APIView): authentication_classes = [RestOAuthAuthentication, RestSharedSecretAuthentication] permission_classes = [GroupPermission('Feed', 'Curate')] cors_allowed_methods = ('put',) def put(self, request, *args, **kwargs): """ For each region in the object: Deletes all of the (carrier-less) FeedItems in the region. Batch create all of the FeedItems in order for each region. -- feed - object of regions that point to a list of feed element IDs (as well as their type) . { 'us': [ ['app', 36L], ['app', 42L], ['collection', 12L], ['brand', 12L] ] } """ regions = [mkt.regions.REGIONS_DICT[region].id for region in request.DATA.keys()] FeedItem.objects.filter( carrier=None, region__in=regions).delete() feed_items = [] for region, feed_elements in request.DATA.items(): for order, feed_element in enumerate(feed_elements): try: item_type, item_id = feed_element except ValueError: return response.Response( 'Expected two-element arrays.', status=status.HTTP_400_BAD_REQUEST) feed_item = { 'region': mkt.regions.REGIONS_DICT[region].id, 'order': order, 'item_type': item_type, } feed_item[item_type + '_id'] = item_id feed_items.append(FeedItem(**feed_item)) FeedItem.objects.bulk_create(feed_items) # Index the feed items created. bulk_create doesn't call save or # post_save so get the IDs manually. feed_item_ids = list(FeedItem.objects.filter(region__in=regions) .values_list('id', flat=True)) FeedItem.get_indexer().index_ids(feed_item_ids, no_delay=True) return response.Response(status=status.HTTP_201_CREATED) class FeedAppViewSet(CORSMixin, MarketplaceView, SlugOrIdMixin, ImageURLUploadMixin): """ A viewset for the FeedApp class, which highlights a single app and some additional metadata (e.g. a review or a screenshot). """ authentication_classes = [RestOAuthAuthentication, RestSharedSecretAuthentication, RestAnonymousAuthentication] permission_classes = [AnyOf(AllowReadOnly, GroupPermission('Feed', 'Curate'))] filter_backends = (OrderingFilter,) queryset = FeedApp.objects.all() cors_allowed_methods = ('get', 'delete', 'post', 'put', 'patch') serializer_class = FeedAppSerializer image_fields = (('background_image_upload_url', 'image_hash', ''),) def list(self, request, *args, **kwargs): page = self.paginate_queryset( self.filter_queryset(self.get_queryset())) serializer = self.get_pagination_serializer(page) return response.Response(serializer.data) class FeedBrandViewSet(BaseFeedCollectionViewSet): """ A viewset for the FeedBrand class, a type of collection that allows editors to quickly create content without involving localizers. """ queryset = FeedBrand.objects.all() serializer_class = FeedBrandSerializer class FeedCollectionViewSet(BaseFeedCollectionViewSet): """ A viewset for the FeedCollection class. """ queryset = FeedCollection.objects.all() serializer_class = FeedCollectionSerializer def set_apps_grouped(self, obj, apps): if apps: try: obj.set_apps_grouped(apps) except Webapp.DoesNotExist: raise ParseError(detail=self.exceptions['doesnt_exist']) def set_apps(self, obj, apps): """ Attempt to set the apps via the superclass, catching and handling the TypeError raised if the apps are passed in a grouped manner. """ try: super(FeedCollectionViewSet, self).set_apps(obj, apps) except TypeError: self.set_apps_grouped(obj, apps) class FeedShelfViewSet(BaseFeedCollectionViewSet): """ A viewset for the FeedShelf class. """ queryset = FeedShelf.objects.all() serializer_class = FeedShelfSerializer permission_classes = [AnyOf(OperatorShelfAuthorization, *BaseFeedCollectionViewSet.permission_classes)] image_fields = ( ('background_image_upload_url', 'image_hash', ''), ('background_image_landing_upload_url', 'image_landing_hash', '_landing'), ) class FeedShelfPublishView(CORSMixin, APIView): """ put -- creates a FeedItem for a FeedShelf with respective carrier/region pair. Deletes any currently existing FeedItems with the carrier/region pair to effectively "unpublish" it since only one shelf can be toggled at a time for a carrier/region. delete -- deletes the FeedItem for a FeedShelf with respective carrier/region. """ authentication_classes = [RestOAuthAuthentication, RestSharedSecretAuthentication] permission_classes = [GroupPermission('Feed', 'Curate')] cors_allowed_methods = ('delete', 'put',) def get_object(self, pk): if pk.isdigit(): return FeedShelf.objects.get(pk=pk) else: return FeedShelf.objects.get(slug=pk) def put(self, request, *args, **kwargs): try: shelf = self.get_object(self.kwargs['pk']) except FeedShelf.DoesNotExist: return response.Response(status=status.HTTP_404_NOT_FOUND) feed_item_kwargs = { 'item_type': feed.FEED_TYPE_SHELF, 'carrier': shelf.carrier, 'region': shelf.region } FeedItem.objects.filter(**feed_item_kwargs).delete() feed_item = FeedItem.objects.create(shelf_id=shelf.id, **feed_item_kwargs) # Return. return response.Response(FeedItemSerializer(feed_item).data, status=status.HTTP_201_CREATED) def delete(self, request, *args, **kwargs): try: shelf = self.get_object(self.kwargs['pk']) except FeedShelf.DoesNotExist: return response.Response(status=status.HTTP_404_NOT_FOUND) feed_item_kwargs = { 'item_type': feed.FEED_TYPE_SHELF, 'carrier': shelf.carrier, 'region': shelf.region } FeedItem.objects.filter(**feed_item_kwargs).delete() # Return. return response.Response(status=status.HTTP_204_NO_CONTENT) class FeedAppImageViewSet(CollectionImageViewSet): queryset = FeedApp.objects.all() class FeedCollectionImageViewSet(CollectionImageViewSet): queryset = FeedCollection.objects.all() class FeedShelfImageViewSet(CollectionImageViewSet): queryset = FeedShelf.objects.all() class FeedShelfLandingImageViewSet(CollectionImageViewSet): queryset = FeedShelf.objects.all() hash_field = 'image_landing_hash' image_suffix = '_landing' class BaseFeedESView(CORSMixin, APIView): def __init__(self, *args, **kw): self.ITEM_TYPES = { 'apps': feed.FEED_TYPE_APP, 'brands': feed.FEED_TYPE_BRAND, 'collections': feed.FEED_TYPE_COLL, 'shelves': feed.FEED_TYPE_SHELF, } self.PLURAL_TYPES = dict((v, k) for k, v in self.ITEM_TYPES.items()) self.SERIALIZERS = { feed.FEED_TYPE_APP: FeedAppESSerializer, feed.FEED_TYPE_BRAND: FeedBrandESSerializer, feed.FEED_TYPE_COLL: FeedCollectionESSerializer, feed.FEED_TYPE_SHELF: FeedShelfESSerializer, } self.INDICES = { feed.FEED_TYPE_APP: settings.ES_INDEXES['mkt_feed_app'], feed.FEED_TYPE_BRAND: settings.ES_INDEXES['mkt_feed_brand'], feed.FEED_TYPE_COLL: settings.ES_INDEXES['mkt_feed_collection'], feed.FEED_TYPE_SHELF: settings.ES_INDEXES['mkt_feed_shelf'], } super(BaseFeedESView, self).__init__(*args, **kw) def get_feed_element_index(self): """Return a list of index to query all at once.""" return [ settings.ES_INDEXES['mkt_feed_app'], settings.ES_INDEXES['mkt_feed_brand'], settings.ES_INDEXES['mkt_feed_collection'], settings.ES_INDEXES['mkt_feed_shelf'] ] def get_app_ids(self, feed_element): """Get a single feed element's app IDs.""" if hasattr(feed_element, 'app'): return [feed_element.app] return feed_element.apps def get_app_ids_all(self, feed_elements): """From a list of feed_elements, return a list of app IDs.""" app_ids = [] for elm in feed_elements: app_ids += self.get_app_ids(elm) return app_ids def get_apps(self, request, app_ids): """ Takes a list of app_ids. Gets the apps, including filters. Returns an app_map for serializer context. """ if request.QUERY_PARAMS.get('filtering', '1') == '0': # Without filtering. sq = WebappIndexer.search().filter(es_filter.Bool( should=[es_filter.Terms(id=app_ids)] ))[0:len(app_ids)] else: # With filtering. sq = WebappIndexer.get_app_filter(request, { 'device': self._get_device(request) }, app_ids=app_ids) # Store the apps to attach to feed elements later. apps = sq.execute().hits return dict((app.id, app) for app in apps) def filter_feed_items(self, request, feed_items): """ Removes feed items from the feed if they do not meet some requirements like app count. """ for feed_item in feed_items: item_type = feed_item['item_type'] feed_item[item_type] = self.filter_feed_element( request, feed_item[item_type], item_type) # Filter out feed elements that did not pass the filters. return filter(lambda item: item[item['item_type']], feed_items) def filter_feed_element(self, request, feed_element, item_type): """ If a feed element does not have enough apps, return None. Else return the feed element. """ if request.QUERY_PARAMS.get('filtering', '1') == '0': # Without filtering return feed_element # No empty collections. if 'app_count' in feed_element and feed_element['app_count'] == 0: return None # If the app of a featured app was filtered out. if item_type == feed.FEED_TYPE_APP and not feed_element['app']: return None # Enforce minimum apps on collections. if (item_type == feed.FEED_TYPE_COLL and feed_element['app_count'] < feed.MIN_APPS_COLLECTION): return None return feed_element def _get_device(self, request): """ Return device ID for ES to filter by (or None). Fireplace sends `dev` and `device`. See the API docs for more info. When `dev` is 'android' we also need to check `device` to pick a device object. """ dev = request.QUERY_PARAMS.get('dev') device = request.QUERY_PARAMS.get('device') if dev == 'android' and device: dev = '%s-%s' % (dev, device) return getattr(DEVICE_LOOKUP.get(dev), 'id', None) class FeedElementSearchView(BaseFeedESView): """ Search view for the Curation Tools. Returns an object keyed by feed element type ('apps', 'brands', 'collections'). """ authentication_classes = [RestOAuthAuthentication, RestSharedSecretAuthentication] permission_classes = [GroupPermission('Feed', 'Curate')] cors_allowed_methods = ('get',) def _phrase(self, q): return { 'query': q, 'type': 'phrase', 'slop': 2, } def get(self, request, *args, **kwargs): q = request.GET.get('q') # Make search. queries = [ query.Q('match', slug=self._phrase(q)), # Slug. query.Q('match', type=self._phrase(q)), # Type. query.Q('match', search_names=self._phrase(q)), # Name. query.Q('prefix', carrier=q), # Shelf carrier. query.Q('term', region=q) # Shelf region. ] sq = query.Bool(should=queries) # Search. res = {'apps': [], 'brands': [], 'collections': [], 'shelves': []} es = Search(using=FeedItemIndexer.get_es(), index=self.get_feed_element_index()) feed_elements = es.query(sq).execute().hits if not feed_elements: return response.Response(res, status=status.HTTP_404_NOT_FOUND) # Deserialize. ctx = {'app_map': self.get_apps(request, self.get_app_ids_all(feed_elements)), 'request': request} for feed_element in feed_elements: item_type = feed_element.item_type serializer = self.SERIALIZERS[item_type] data = serializer(feed_element, context=ctx).data res[self.PLURAL_TYPES[item_type]].append(data) # Return. return response.Response(res, status=status.HTTP_200_OK) class FeedView(MarketplaceView, BaseFeedESView, generics.GenericAPIView): """ THE feed view. It hits ES with: - a weighted function score query to get feed items - a filter to deserialize feed elements - a filter to deserialize apps """ authentication_classes = [] cors_allowed_methods = ('get',) paginator_class = ESPaginator permission_classes = [] def get_es_feed_query(self, sq, region=mkt.regions.RESTOFWORLD.id, carrier=None, original_region=None): """ Build ES query for feed. Must match region. Orders by FeedItem.order. Boosted operator shelf matching region + carrier. Boosted operator shelf matching original_region + carrier. region -- region ID (integer) carrier -- carrier ID (integer) original_region -- region from before we were falling back, to keep the original shelf atop the RoW feed. """ region_filter = es_filter.Term(region=region) shelf_filter = es_filter.Term(item_type=feed.FEED_TYPE_SHELF) ordering_fn = es_function.FieldValueFactor( field='order', modifier='reciprocal', filter=es_filter.Bool(must=[region_filter], must_not=[shelf_filter])) boost_fn = es_function.BoostFactor(value=10000.0, filter=shelf_filter) if carrier is None: return sq.query('function_score', functions=[ordering_fn], filter=region_filter) # Must match region. # But also include the original region if we falling back to RoW. # The only original region feed item that will be included is a shelf # else we wouldn't be falling back in the first place. region_filters = [region_filter] if original_region: region_filters.append(es_filter.Term(region=original_region)) return sq.query( 'function_score', functions=[boost_fn, ordering_fn], filter=es_filter.Bool( should=region_filters, must_not=[es_filter.Bool( must=[shelf_filter], must_not=[es_filter.Term(carrier=carrier)])]) ) def get_es_feed_element_query(self, sq, feed_items): """ From a list of FeedItems with normalized feed element IDs, return an ES query that fetches the feed elements for each feed item. """ filters = [] for feed_item in feed_items: item_type = feed_item['item_type'] filters.append(es_filter.Bool( must=[es_filter.Term(id=feed_item[item_type]), es_filter.Term(item_type=item_type)])) return sq.filter(es_filter.Bool(should=filters))[0:len(feed_items)] def _check_empty_feed(self, items, rest_of_world): """ Return -1 if feed is empty and we are already falling back to RoW. Return 0 if feed is empty and we are not falling back to RoW yet. Return 1 if at least one feed item and the only feed item is not shelf. """ if not items or (len(items) == 1 and items[0].get('shelf')): # Empty feed. if rest_of_world: return -1 return 0 return 1 def _handle_empty_feed(self, empty_feed_code, region, request, args, kwargs): """ If feed is empty, this method handles appropriately what to return. If empty_feed_code == 0: try to fallback to RoW. If empty_feed_code == -1: 404. """ if empty_feed_code == 0: return self._get(request, rest_of_world=True, original_region=region, *args, **kwargs) return response.Response(status=status.HTTP_404_NOT_FOUND) def _get(self, request, rest_of_world=False, original_region=None, *args, **kwargs): es = FeedItemIndexer.get_es() # Parse region. if rest_of_world: region = mkt.regions.RESTOFWORLD.id else: region = request.REGION.id # Parse carrier. carrier = None q = request.QUERY_PARAMS if q.get('carrier') and q['carrier'] in mkt.carriers.CARRIER_MAP: carrier = mkt.carriers.CARRIER_MAP[q['carrier']].id # Fetch FeedItems. sq = self.get_es_feed_query(FeedItemIndexer.search(using=es), region=region, carrier=carrier, original_region=original_region) feed_items = self.paginate_queryset(sq) feed_ok = self._check_empty_feed(feed_items, rest_of_world) if feed_ok != 1: return self._handle_empty_feed(feed_ok, region, request, args, kwargs) # Build the meta object. meta = mkt.api.paginator.CustomPaginationSerializer( feed_items, context={'request': request}).data['meta'] # Set up serializer context. feed_element_map = { feed.FEED_TYPE_APP: {}, feed.FEED_TYPE_BRAND: {}, feed.FEED_TYPE_COLL: {}, feed.FEED_TYPE_SHELF: {}, } # Fetch feed elements to attach to FeedItems later. apps = [] sq = self.get_es_feed_element_query( Search(using=es, index=self.get_feed_element_index()), feed_items) for feed_elm in sq.execute().hits: # Store the feed elements to attach to FeedItems later. feed_element_map[feed_elm['item_type']][feed_elm['id']] = feed_elm # Store the apps to retrieve later. apps += self.get_app_ids(feed_elm) # Fetch apps to attach to feed elements later. app_map = self.get_apps(request, apps) # Super serialize. feed_items = FeedItemESSerializer(feed_items, many=True, context={ 'app_map': app_map, 'feed_element_map': feed_element_map, 'request': request }).data # Filter excluded apps. If there are feed items that have all their # apps excluded, they will be removed from the feed. feed_items = self.filter_feed_items(request, feed_items) feed_ok = self._check_empty_feed(feed_items, rest_of_world) if feed_ok != 1: return self._handle_empty_feed(feed_ok, region, request, args, kwargs) return response.Response({'meta': meta, 'objects': feed_items}, status=status.HTTP_200_OK) def get(self, request, *args, **kwargs): with statsd.timer('mkt.feed.view'): return self._get(request, *args, **kwargs) class FeedElementGetView(BaseFeedESView): """ Fetches individual feed elements from ES. Detail views. """ authentication_classes = [] permission_classes = [] cors_allowed_methods = ('get',) def get_feed_element_filter(self, sq, item_type, slug): """Matches a single feed element.""" bool_filter = es_filter.Bool(must=[ es_filter.Term(item_type=item_type), es_filter.Term(**{'slug.raw': slug}) ]) return sq.filter(bool_filter) def get(self, request, item_type, slug, **kwargs): item_type = self.ITEM_TYPES[item_type] # Hit ES. sq = self.get_feed_element_filter( Search(using=FeedItemIndexer.get_es(), index=self.INDICES[item_type]), item_type, slug) try: feed_element = sq.execute().hits[0] except IndexError: return response.Response(status=status.HTTP_404_NOT_FOUND) # Deserialize. data = self.SERIALIZERS[item_type](feed_element, context={ 'app_map': self.get_apps(request, self.get_app_ids(feed_element)), 'request': request }).data # Limit if necessary. limit = request.GET.get('limit') if limit and limit.isdigit() and 'apps' in data: data['apps'] = data['apps'][:int(limit)] return response.Response(data, status=status.HTTP_200_OK) class FeedElementListView(BaseFeedESView, MarketplaceView, generics.GenericAPIView): """ Fetches the five most recent of a feed element type for Curation Tools. With pagination. """ authentication_classes = [RestOAuthAuthentication, RestSharedSecretAuthentication] permission_classes = [GroupPermission('Feed', 'Curate')] cors_allowed_methods = ('get',) paginator_class = ESPaginator def get_recent_feed_elements(self, sq): """Matches all sorted by recent.""" return sq.sort('-created').query(query.MatchAll()) def get(self, request, item_type, **kwargs): item_type = self.ITEM_TYPES[item_type] # Hit ES. sq = self.get_recent_feed_elements( Search(using=FeedItemIndexer.get_es(), index=self.INDICES[item_type])) feed_elements = self.paginate_queryset(sq) if not feed_elements: return response.Response({'objects': []}, status=status.HTTP_404_NOT_FOUND) # Deserialize. Manually use pagination serializer because this view # uses multiple serializers. meta = mkt.api.paginator.CustomPaginationSerializer( feed_elements, context={'request': request}).data['meta'] objects = self.SERIALIZERS[item_type](feed_elements, context={ 'app_map': self.get_apps(request, self.get_app_ids_all(feed_elements)), 'request': request }, many=True).data return response.Response({'meta': meta, 'objects': objects}, status=status.HTTP_200_OK)
{ "content_hash": "4c05b2f44365e55892e9b5d5282e3c2f", "timestamp": "", "source": "github", "line_count": 803, "max_line_length": 79, "avg_line_length": 38.742216687422165, "alnum_prop": 0.594439087110254, "repo_name": "ngokevin/zamboni", "id": "eefac669c0ce73e4d7b0f0aeee3a2a6054870ba7", "size": "31110", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "mkt/feed/views.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "356777" }, { "name": "JavaScript", "bytes": "536388" }, { "name": "Python", "bytes": "3883015" }, { "name": "Shell", "bytes": "13597" } ], "symlink_target": "" }
import abc import torch import numpy from .. import mean_functions from .. import parameter class GPModel(torch.nn.Module): """ A base class for Gaussian process models, that is, those of the form .. math:: :nowrap: \\begin{align} \\theta & \sim p(\\theta) \\\\ f & \sim \\mathcal{GP}(m(x), k(x, x'; \\theta)) \\\\ f_i & = f(x_i) \\\\ y_i\,|\,f_i & \sim p(y_i|f_i) \\end{align} This class mostly adds functionality to compile predictions. To use it, inheriting classes must define a build_predict function, which computes the means and variances of the latent function. This gets compiled similarly to build_likelihood in the Model class. These predictions are then pushed through the likelihood to obtain means and variances of held out data, self.predict_y. The predictions can also be used to compute the (log) density of held-out data via self.predict_density. For handling another data (Xnew, Ynew), set the new value to self.X and self.Y >>> m.X = Xnew >>> m.Y = Ynew """ def __init__(self, X, Y, kern, likelihood, mean_function, name=None, jitter_level=1e-6): super(GPModel, self).__init__() self.name = name self.mean_function = mean_function or mean_functions.Zero() self.kern = kern self.likelihood = likelihood self.jitter_level = jitter_level if isinstance(X, numpy.ndarray): # X is a data matrix; each row represents one instance X = torch.from_numpy(X) if isinstance(Y, numpy.ndarray): # Y is a data matrix, rows correspond to the rows in X, # columns are treated independently Y = torch.from_numpy(Y) self.X, self.Y = X, Y @abc.abstractmethod def compute_log_prior(self): """Compute the log prior of the model.""" pass @abc.abstractmethod def compute_log_likelihood(self, X=None, Y=None): """Compute the log likelihood of the model.""" pass def objective(self, X=None, Y=None): pos_objective = self.compute_log_likelihood(X, Y) for param in self.parameters(): if isinstance(param, parameter.ParamWithPrior): pos_objective = pos_objective + param.get_prior() return -pos_objective def forward(self, X=None, Y=None): return self.objective(X, Y) @abc.abstractmethod def predict_f(self, Xnew, full_cov=False): """ Compute the mean and variance of the latent function(s) at the points Xnew. """ pass #return self._build_predict(Xnew) # @autoflow((settings.tf_float, [None, None])) def predict_f_full_cov(self, Xnew): """ Compute the mean and covariance matrix of the latent function(s) at the points Xnew. """ return self.predict_f(Xnew, full_cov=True) def predict_f_samples(self, Xnew, num_samples): """ Produce samples from the posterior latent function(s) at the points Xnew. """ mu, var = self.predict_f(Xnew, full_cov=True) jitter = torch.eye(mu.size(0), dtype=mu.dtype, device=mu.device) * self.jitter_level samples = [] for i in range(self.num_latent): # TV-Todo: batch?? L = torch.cholesky(var[:, :, i] + jitter, upper=False) V = torch.randn(L.size(0), num_samples, dtype=L.dtype, device=L.device) samples.append(mu[:, i:i + 1] + torch.matmul(L, V)) return torch.stack(samples, dim=0) # TV-Todo: transpose? def predict_y(self, Xnew): """ Compute the mean and variance of held-out data at the points Xnew """ pred_f_mean, pred_f_var = self.predict_f(Xnew) return self.likelihood.predict_mean_and_var(pred_f_mean, pred_f_var) def predict_density(self, Xnew, Ynew): """ Compute the (log) density of the data Ynew at the points Xnew Note that this computes the log density of the data individually, ignoring correlations between them. The result is a matrix the same shape as Ynew containing the log densities. """ pred_f_mean, pred_f_var = self.predict_f(Xnew) return self.likelihood.predict_density(pred_f_mean, pred_f_var, Ynew) def _repr_html_(self): s = 'Model {}<ul>'.format(type(self).__name__) for n,c in self.named_children(): s += '<li>{}: {}</li>'.format(n, type(c).__name__) s += '</ul><table><tr><th>Parameter</th><th>Value</th><th>Prior</th><th>ParamType</th></tr><tr><td>' s += '</td></tr><tr><td>'.join(['</td><td>'.join((n,str(p.get().data.cpu().numpy()),str(p.prior),type(p).__name__)) for n,p in self.named_parameters()]) s += '</td></tr></table>' return s
{ "content_hash": "3059c6fc6640865aae99ce35966a04c4", "timestamp": "", "source": "github", "line_count": 131, "max_line_length": 160, "avg_line_length": 37.29007633587786, "alnum_prop": 0.5946775844421699, "repo_name": "t-vi/candlegp", "id": "f29635c58b447a89960b25d379f710e5c4504c48", "size": "5558", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "candlegp/models/model.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Jupyter Notebook", "bytes": "1552737" }, { "name": "Python", "bytes": "102602" } ], "symlink_target": "" }
from .calls import Call, FuncArg from .error import ShortException from .symbolic import strip_symbolic class FunctionLookupError(ShortException): pass class FunctionLookupBound: def __init__(self, msg): self.msg = msg def __call__(self, *args, **kwargs): raise NotImplementedError(self.msg) class CallVisitor: """ A node visitor base class that walks the call tree and calls a visitor function for every node found. This function may return a value which is forwarded by the `visit` method. Note: essentially a copy of ast.NodeVisitor """ def visit(self, node): """Visit a node.""" method = 'visit_' + node.func visitor = getattr(self, method, self.generic_visit) return visitor(node) def generic_visit(self, node): """Called if no explicit visitor function exists for a node.""" node.map_subcalls(self.visit) @classmethod def quick_visitor(cls, visit_dict, node): """Class method to quickly define and run a custom visitor.""" qv = type('QuickVisitor', cls, visit_dict) qv.visit(node) class CallListener: """Generic listener. Each exit is called on a node's copy.""" def enter(self, node): method = 'enter_' + node.func f_enter = getattr(self, method, self.generic_enter) return f_enter(node) def exit(self, node): method = 'exit_' + node.func f_exit = getattr(self, method, self.generic_exit) return f_exit(node) def generic_enter(self, node): args, kwargs = node.map_subcalls(self.enter) return self.exit(node.__class__(node.func, *args, **kwargs)) def generic_exit(self, node): return node def enter_if_call(self, x): if isinstance(x, Call): return self.enter(x) return x def visit(self, node): return self.enter(node) def get_attr_chain(node, max_n): # TODO: need to make custom calls their own Call class, then will not have to # do these kinds of checks, since a __call__ will always be on a Call obj if not isinstance(node, Call): return [], node out = [] ttl_n = 0 crnt_node = node while ttl_n < max_n: if crnt_node.func != "__getattr__": break obj, attr = crnt_node.args out.append(attr) ttl_n += 1 crnt_node = obj return list(reversed(out)), crnt_node from inspect import isclass, isfunction from typing import get_type_hints from .utils import is_dispatch_func_subtype class CallTreeLocal(CallListener): def __init__( self, local, call_sub_attr = None, chain_sub_attr = False, dispatch_cls = None, result_cls = None, call_props = None ): """ Parameters ---------- local : mapping A dictionary mapping func_name: func, used to replace call expressions. call_sub_attr : set, optional A set of attributes signaling any subattributes are property methods. Eg. {'dt'} to signify in _.dt.year, year is a property call. chain_sub_attr : bool Whether to included the attributes in the above argument, when looking up up a replacement for the property call. E.g. does local have a 'dt.year' entry. dispatch_cls : class If custom calls are dispatchers, dispatch on this class. If none, use their name to try and get their corresponding local function. result_cls : class If custom calls are dispatchers, require their result annotation to be a subclass of this class. call_props : set, sequence Property methods to potentially convert to local calls. """ self.local = local self.call_sub_attr = set(call_sub_attr or []) self.chain_sub_attr = chain_sub_attr self.dispatch_cls = dispatch_cls self.result_cls = result_cls self.call_props = set(call_props or []) def translate(self, expr): """Return the translation of an expression. This method is meant to be a high-level entrypoint. """ # note that by contract, don't need to strip symbolic return self.enter(strip_symbolic(expr)) def dispatch_local(self, name): f_local = self.local[name] if isinstance(f_local, FunctionLookupBound): raise FunctionLookupError(f_local.msg) return f_local def create_local_call(self, name, prev_obj, cls, func_args = None, func_kwargs = None): # need call attr name (arg[0].args[1]) # need call arg and kwargs func_args = tuple() if func_args is None else func_args func_kwargs = {} if func_kwargs is None else func_kwargs try: local_func = self.dispatch_local(name) except KeyError as err: raise FunctionLookupError("Missing translation for function call: %s"% name) return cls( "__call__", FuncArg(local_func), prev_obj, *func_args, **func_kwargs ) def enter(self, node): # if no enter metthod for operators, like __invert__, try to get from local # TODO: want to only do this if func is the name of an infix op's method method = 'enter_' + node.func if not hasattr(self, method) and node.func in self.local: args, kwargs = node.map_subcalls(self.enter) return self.create_local_call(node.func, args[0], Call, args[1:], kwargs) return super().enter(node) def enter___getattr__(self, node): obj, attr = node.args # Note that the conversion of attribute chains to calls is similar # to that in the __call__ visit. But in the code below we're handling # optional cases where attribute chains might have property methods, # which may (optionally) be converted to calls. attr_chain, target = get_attr_chain(node, 2) if len(attr_chain) == 2 and attr_chain[0] in self.call_sub_attr: # convert subattributes to calls, e.g. dt.days -> dt.days() # TODO: should always call exit? if self.chain_sub_attr: # use chained attribute to look up local function instead # e.g. `dt.round`(), rather than `round`() attr = ".".join(attr_chain) return self.create_local_call(attr, target, Call) elif attr in self.call_props: return self.create_local_call(attr, obj, Call) return self.generic_enter(node) def enter___call__(self, node): """ Overview: variables _.x.method(1) row_number(_.x, 1) --------- ------------- -------------------- █─'__call__' █─'__call__' obj ├─█─. ├─<function row_number │ ├─█─. ├─█─. │ │ ├─_ │ ├─_ │ │ └─'x' │ └─'x' │ └─'method' │ └─1 └─1 """ obj, *rest = node.args args = tuple(self.enter_if_call(child) for child in rest) kwargs = {k: self.enter_if_call(child) for k, child in node.kwargs.items()} attr_chain, target = get_attr_chain(obj, max_n = 2) if attr_chain: # want _.x.method() -> method(_.x), need to transform if len(attr_chain) == 2 and attr_chain[0] in self.call_sub_attr: # e.g. _.dt.round() call_name = ".".join(attr_chain) if self.chain_sub_attr else attr_chain[-1] entered_target = self.enter_if_call(target) else: call_name = attr_chain[-1] entered_target = self.enter_if_call(obj.args[0]) else: # default to generic enter return self.generic_enter(node) return self.create_local_call( call_name, entered_target, node.__class__, args, kwargs ) class ExecutionValidatorVisitor(CallListener): # MC-NOTE TOTEST: # * dispatch input parent validates # * dispatch output child validates def __init__( self, dispatch_cls = None, result_cls = None, ): """ Parameters ---------- dispatch_cls : class If custom calls are dispatchers, dispatch on this class. If none, use their name to try and get their corresponding local function. result_cls : class If custom calls are dispatchers, require their result annotation to be a subclass of this class. """ self.dispatch_cls = dispatch_cls self.result_cls = result_cls def validate_dispatcher(self, dispatcher, strict=True): f_concrete = dispatcher.dispatch(self.dispatch_cls) if isinstance(f_concrete, FunctionLookupBound) and strict: raise FunctionLookupError(f_concrete.msg) if isclass(f_concrete) and issubclass(f_concrete, Exception): raise f_concrete return f_concrete @staticmethod def is_dispatcher(f): # TODO: this is essentially a protocol return hasattr(f, 'registry') and hasattr(f, 'dispatch') def enter___custom_func__(self, node): func = node(None) if self.is_dispatcher(func) and self.dispatch_cls is not None: # allow custom functions that dispatch on dispatch_cls f = self.validate_dispatcher(func) if (self.result_cls is None or is_dispatch_func_subtype(f, self.dispatch_cls, self.result_cls) ): # TODO: MC-NOTE: recreates old behavior, as a temporary step toward codata return self.exit(FuncArg(func.dispatch(self.dispatch_cls))) raise FunctionLookupError( "External function {name} can dispatch on the class {dispatch_cls}, but " "must also have result annotation of (sub)type {result_cls}" .format( name = func.__name__, dispatch_cls = self.dispatch_cls, result_cls = self.result_cls ) ) return self.generic_enter(node) class CodataVisitor(ExecutionValidatorVisitor): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.codata_instance = self.dispatch_cls() def exit___call__(self, node): if isinstance(node.args[0], FuncArg): return Call( "__call__", node.args[0], self.codata_instance, *node.args[1:], **node.kwargs ) return node
{ "content_hash": "3ba6d3c2d9990de7b023f2e5915d851f", "timestamp": "", "source": "github", "line_count": 324, "max_line_length": 97, "avg_line_length": 35.4320987654321, "alnum_prop": 0.5401567944250871, "repo_name": "machow/siuba", "id": "5e6da15032a270022602505de074be569dfe031b", "size": "11642", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "siuba/siu/visitors.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "1007" }, { "name": "Python", "bytes": "573788" } ], "symlink_target": "" }
from remoteobjects import fields, dataobject class Referencive(dataobject.DataObject): related = fields.Object('Related') other = fields.Object('OtherRelated') class Related(dataobject.DataObject): pass class OtherRelated(dataobject.DataObject): pass
{ "content_hash": "01886ee91926ad71260c24c7dce5b3c8", "timestamp": "", "source": "github", "line_count": 11, "max_line_length": 44, "avg_line_length": 24.727272727272727, "alnum_prop": 0.7610294117647058, "repo_name": "alex/remoteobjects", "id": "33bfbce646674bb7101eccc888faf3eeefc8d607", "size": "1809", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/extra_dataobject.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "156682" } ], "symlink_target": "" }
""" DocuSign REST API The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501 OpenAPI spec version: v2.1 Contact: [email protected] Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six from docusign_esign.client.configuration import Configuration class EnvelopeAttachmentsResult(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'attachments': 'list[EnvelopeAttachment]' } attribute_map = { 'attachments': 'attachments' } def __init__(self, _configuration=None, **kwargs): # noqa: E501 """EnvelopeAttachmentsResult - a model defined in Swagger""" # noqa: E501 if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._attachments = None self.discriminator = None setattr(self, "_{}".format('attachments'), kwargs.get('attachments', None)) @property def attachments(self): """Gets the attachments of this EnvelopeAttachmentsResult. # noqa: E501 # noqa: E501 :return: The attachments of this EnvelopeAttachmentsResult. # noqa: E501 :rtype: list[EnvelopeAttachment] """ return self._attachments @attachments.setter def attachments(self, attachments): """Sets the attachments of this EnvelopeAttachmentsResult. # noqa: E501 :param attachments: The attachments of this EnvelopeAttachmentsResult. # noqa: E501 :type: list[EnvelopeAttachment] """ self._attachments = attachments def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(EnvelopeAttachmentsResult, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, EnvelopeAttachmentsResult): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, EnvelopeAttachmentsResult): return True return self.to_dict() != other.to_dict()
{ "content_hash": "c2b87517b024eac48167ebfcff1d7b36", "timestamp": "", "source": "github", "line_count": 122, "max_line_length": 140, "avg_line_length": 30.524590163934427, "alnum_prop": 0.5829752953813104, "repo_name": "docusign/docusign-python-client", "id": "71dfef27321288b59853385626c81e69e4817d9d", "size": "3741", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "docusign_esign/models/envelope_attachments_result.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "9687716" } ], "symlink_target": "" }
"""Representing missing value for a field.""" from typing import Any from pyglove.core.object_utils import common_traits class MissingValue(common_traits.Formattable): """Value placeholder for an unassigned attribute.""" def format(self, *args, **kwargs): # pytype: disable=signature-mismatch return 'MISSING_VALUE' def __ne__(self, other: Any) -> bool: return not self.__eq__(other) def __eq__(self, other: Any) -> bool: return isinstance(other, MissingValue) def __hash__(self) -> int: return hash(MissingValue.__module__ + MissingValue.__name__) # A shortcut global object (constant) for referencing MissingValue. MISSING_VALUE = MissingValue()
{ "content_hash": "339019a4b08ab10b1369972575b6d8d8", "timestamp": "", "source": "github", "line_count": 24, "max_line_length": 74, "avg_line_length": 28.5, "alnum_prop": 0.7002923976608187, "repo_name": "google/pyglove", "id": "b9cbc2dd53e25f0c1a23fefddfc87945fb0cd8b7", "size": "1268", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "pyglove/core/object_utils/missing.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "1796188" } ], "symlink_target": "" }
''' This file contains classes and functions that implement the PyPXE HTTP service ''' import socket import struct import os import threading import logging from pypxe import helpers class HTTPD: ''' This class implements a HTTP Server, limited to GET and HEAD, from RFC2616, RFC7230. ''' def __init__(self, **server_settings): self.ip = server_settings.get('ip', '0.0.0.0') self.port = int(server_settings.get('port', 80)) self.netboot_directory = server_settings.get('netboot_directory', '.') self.mode_verbose = server_settings.get('mode_verbose', False) # verbose mode self.mode_debug = server_settings.get('mode_debug', False) # debug mode self.logger = server_settings.get('logger', None) # setup logger if self.logger == None: self.logger = logging.getLogger('HTTP') handler = logging.StreamHandler() formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(name)s %(message)s') handler.setFormatter(formatter) self.logger.addHandler(handler) if self.mode_debug: self.logger.setLevel(logging.DEBUG) elif self.mode_verbose: self.logger.setLevel(logging.INFO) else: self.logger.setLevel(logging.WARN) self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.sock.bind((self.ip, self.port)) self.sock.listen(1) self.logger.debug('NOTICE: HTTP server started in debug mode. HTTP server is using the following:') self.logger.info('Server IP: {0}'.format(self.ip)) self.logger.info('Server Port: {0}'.format(self.port)) self.logger.info('Network Boot Directory: {0}'.format(self.netboot_directory)) def handle_request(self, connection, addr): '''This method handles HTTP request.''' request = connection.recv(1024) self.logger.debug('Received message from {addr}'.format(addr = repr(addr))) self.logger.debug('<--BEGIN MESSAGE-->') self.logger.debug('{0}'.format(repr(request))) self.logger.debug('<--END MESSAGE-->') method, target, version = request.decode('ascii').split('\r\n')[0].split(' ') target = target.lstrip('/') try: self.logger.debug("Netboot: {0}, Target: {1}".format(self.netboot_directory, target)) target = helpers.normalize_path(self.netboot_directory, target) if not os.path.lexists(target) or not os.path.isfile(target): status = '404 Not Found' elif method not in ('GET', 'HEAD'): status = '501 Not Implemented' else: status = '200 OK' except helpers.PathTraversalException: status = '403 Forbidden' response = 'HTTP/1.1 {0}\r\n'.format(status) if status[:3] != '200': # fail out connection.send(response.encode('ascii')) connection.close() self.logger.warn('Sending {status} to {addr[0]}:{addr[1]} for {target}'.format(status = status, target = target, addr = addr)) self.logger.debug('Sending message to {0}'.format(repr(addr))) self.logger.debug('<--BEING MESSAGE-->') self.logger.debug('{0}'.format(repr(response))) self.logger.debug('<--END MESSAGE-->') return response += 'Content-Length: {0}\r\n'.format(os.path.getsize(target)) response += '\r\n' if method == 'HEAD': connection.send(response) connection.close() self.logger.debug('Sending message to {0}'.format(repr(addr))) self.logger.debug('<--BEING MESSAGE-->') self.logger.debug('{0}'.format(repr(response))) self.logger.debug('<--END MESSAGE-->') return connection.send(response.encode('ascii')) with open(target, 'rb') as handle: while True: data = handle.read(8192) if not data: break connection.send(data) connection.close() self.logger.info('File Sent - {target} -> {addr[0]}:{addr[1]}'.format(target = target, addr = addr)) def listen(self): '''This method is the main loop that listens for requests.''' while True: conn, addr = self.sock.accept() client = threading.Thread(target = self.handle_request, args = (conn, addr)) client.daemon = True; client.start()
{ "content_hash": "466d30bbdbf5cff21b4872e39a7ed70b", "timestamp": "", "source": "github", "line_count": 108, "max_line_length": 138, "avg_line_length": 42.629629629629626, "alnum_prop": 0.5892701998262381, "repo_name": "psychomario/PyPXE", "id": "1dfa483cb1849d09de5806eeb7afe6eda19d051c", "size": "4604", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pypxe/http.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "66815" } ], "symlink_target": "" }
"""Placeholder objects replaced with real GIMP objects when calling GIMP PDB procedures during export. The following placeholder objects are defined: * `PLACEHOLDERS['current_image']` - Represents the image currently being processed. * `PLACEHOLDERS['current_layer']` - Represents the layer currently being processed in the current image. This placeholder is currently also used for PDB procedures containing `gimp.Drawable` or `gimp.Item` parameters. """ from __future__ import absolute_import, division, print_function, unicode_literals from future.builtins import * import gimpenums from export_layers import pygimplib as pg from export_layers.gui import placeholders as gui_placeholders class _GimpObjectPlaceholder(object): def __init__(self, display_name, replacement_func): self._display_name = display_name self._replacement_func = replacement_func @property def display_name(self): return self._display_name def replace_args(self, *args): return self._replacement_func(*args) def _get_current_image(image, layer, layer_exporter): return image def _get_current_layer(image, layer, layer_exporter): return layer _PLACEHOLDERS = { 'current_image': _GimpObjectPlaceholder(_('Current Image'), _get_current_image), 'current_layer': _GimpObjectPlaceholder(_('Current Layer'), _get_current_layer), } def get_replaced_arg(arg, image, layer, layer_exporter): """ If `arg` is a placeholder object, return a real object replacing the placeholder. Otherwise, return `arg`. Arguments after `args` are required arguments for actions and are used to determine the real object that replaces the placeholder. """ try: placeholder = _PLACEHOLDERS[arg] except (KeyError, TypeError): return arg else: return placeholder.replace_args(image, layer, layer_exporter) def get_replaced_args_and_kwargs(func_args, func_kwargs, image, layer, layer_exporter): """ Return arguments and keyword arguments for a function whose placeholder objects are replaced with real objects. Arguments after `func_kwargs` are required arguments for actions and are used to determine the real object that replaces the placeholder. """ new_func_args = [ get_replaced_arg(arg, image, layer, layer_exporter) for arg in func_args] new_func_kwargs = { name: get_replaced_arg(value, image, layer, layer_exporter) for name, value in func_kwargs.items()} return new_func_args, new_func_kwargs #=============================================================================== class PlaceholderSetting(pg.setting.Setting): _ALLOWED_GUI_TYPES = [gui_placeholders.GimpObjectPlaceholdersComboBoxPresenter] _ALLOWED_PLACEHOLDERS = [] @classmethod def get_allowed_placeholder_names(cls): """ Return a list of allowed names of placeholders for this setting class. """ return list(cls._ALLOWED_PLACEHOLDERS) @classmethod def get_allowed_placeholders(cls): """ Return a list of allowed placeholder objects for this setting class. """ return [ placeholder for placeholder_name, placeholder in _PLACEHOLDERS.items() if placeholder_name in cls._ALLOWED_PLACEHOLDERS] def _init_error_messages(self): self.error_messages['invalid_value'] = _('Invalid placeholder.') def _validate(self, value): if value not in self._ALLOWED_PLACEHOLDERS: raise pg.setting.SettingValueError( pg.setting.value_to_str_prefix(value) + self.error_messages['invalid_value']) class PlaceholderImageSetting(PlaceholderSetting): _DEFAULT_DEFAULT_VALUE = 'current_image' _ALLOWED_PLACEHOLDERS = ['current_image'] class PlaceholderDrawableSetting(PlaceholderSetting): _DEFAULT_DEFAULT_VALUE = 'current_layer' _ALLOWED_PLACEHOLDERS = ['current_layer'] class PlaceholderLayerSetting(PlaceholderSetting): _DEFAULT_DEFAULT_VALUE = 'current_layer' _ALLOWED_PLACEHOLDERS = ['current_layer'] class PlaceholderItemSetting(PlaceholderSetting): _DEFAULT_DEFAULT_VALUE = 'current_layer' _ALLOWED_PLACEHOLDERS = ['current_layer'] PDB_TYPES_TO_PLACEHOLDER_SETTING_TYPES_MAP = { gimpenums.PDB_IMAGE: PlaceholderImageSetting, gimpenums.PDB_ITEM: PlaceholderItemSetting, gimpenums.PDB_DRAWABLE: PlaceholderDrawableSetting, gimpenums.PDB_LAYER: PlaceholderLayerSetting, }
{ "content_hash": "5fe8d5d939487e6411670c98f2c3abb0", "timestamp": "", "source": "github", "line_count": 148, "max_line_length": 87, "avg_line_length": 29.39864864864865, "alnum_prop": 0.7228223396920248, "repo_name": "khalim19/gimp-plugin-export-layers", "id": "260fc81b77c9502f24e2477b60a8a321fc39c459", "size": "4376", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "export_layers/placeholders.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "2873599" }, { "name": "Shell", "bytes": "8320" } ], "symlink_target": "" }
from rest_framework.renderers import JSONRenderer from rest_framework_csv.renderers import CSVRenderer class KLPJSONRenderer(JSONRenderer): ''' Sub-classes JSONRenderer to render GeoJSON where appropriate. If the request contains a geometry=yes parameter, it converts features to GeoJSON ''' media_type = 'application/json' format = 'json' def render(self, data, media_type=None, renderer_context=None): #figure out whether we need to render geometry based on GET param render_geometry = renderer_context['request'].GET.get('geometry', 'no') status = renderer_context['response'].status_code #only try and fetch geometries if response status is 200 if render_geometry == 'yes' and status == 200: self.render_geometry = True else: self.render_geometry = False # if data is a list, that means pagination was turned off # with per_page=0 # then we first need to convert the list to a dict so that # we have same data structure: if isinstance(data, list): data = { 'features': data } #If the view is an "omni" view, we need to handle it differently is_omni = False if isinstance(data, dict): view = renderer_context['view'] if hasattr(view, 'is_omni') and view.is_omni: is_omni = True #if geometry=yes and results are a list, convert to geojson if self.render_geometry and 'features' in data and \ isinstance(data['features'], list): data['type'] = 'FeatureCollection' features = data.pop('features') data['features'] = [self.get_feature(elem) for elem in features] #if geometry=yes and is a single feature, convert data to geojson elif self.render_geometry and not is_omni: data = self.get_feature(data) elif self.render_geometry and is_omni: for key in data: arr = data[key] data[key] = {} data[key]['type'] = 'FeatureCollection' data[key]['features'] = [self.get_feature(elem) for elem in arr] #if geometry=no, just convert data as is to JSON else: pass return super(KLPJSONRenderer, self).render(data, media_type, renderer_context) def get_feature(self, elem): ''' Passed an element with properties, including a 'geometry' property, will convert it to GeoJSON format ''' #this should never be called if geometry=no if 'geometry' not in elem: geometry = {} else: geometry = elem.pop('geometry') feature = { 'type': 'Feature', 'geometry': geometry, 'properties': elem } return feature class KLPCSVRenderer(CSVRenderer): media_type = 'application/csv'
{ "content_hash": "ebfcf0b261478a0eb2deba36de098d8c", "timestamp": "", "source": "github", "line_count": 87, "max_line_length": 80, "avg_line_length": 35.03448275862069, "alnum_prop": 0.5787401574803149, "repo_name": "klpdotorg/dubdubdub", "id": "ad29f250aae48fe1c4f71fa1d2becb95d6adbfc1", "size": "3048", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "apps/common/renderers.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "478" }, { "name": "CSS", "bytes": "335110" }, { "name": "HTML", "bytes": "655218" }, { "name": "JavaScript", "bytes": "1941014" }, { "name": "PLpgSQL", "bytes": "156345" }, { "name": "Python", "bytes": "920256" }, { "name": "Shell", "bytes": "10544" } ], "symlink_target": "" }
import glob import os import subprocess import tempfile import mock import unittest2 as unittest # Django from django.conf import settings from django.core.urlresolvers import reverse # Django-CRUM from crum import impersonate # AWX from awx.main.utils import * # noqa from awx.main.models import * # noqa from awx.main.tests.base import BaseJobExecutionTest from awx.main.tests.data.ssh import ( TEST_SSH_KEY_DATA, TEST_SSH_KEY_DATA_LOCKED, TEST_SSH_KEY_DATA_UNLOCK, ) __all__ = ['RunAdHocCommandTest', 'AdHocCommandApiTest'] class BaseAdHocCommandTest(BaseJobExecutionTest): ''' Common initialization for testing ad hoc commands. ''' def setUp(self): with ignore_inventory_computed_fields(): super(BaseAdHocCommandTest, self).setUp() self.setup_instances() self.setup_users() self.organization = self.make_organizations(self.super_django_user, 1)[0] self.organization.admin_role.members.add(self.normal_django_user) self.inventory = self.organization.inventories.create(name='test-inventory', description='description for test-inventory') self.host = self.inventory.hosts.create(name='host.example.com') self.host2 = self.inventory.hosts.create(name='host2.example.com') self.group = self.inventory.groups.create(name='test-group') self.group2 = self.inventory.groups.create(name='test-group2') self.group.hosts.add(self.host) self.group2.hosts.add(self.host, self.host2) self.inventory2 = self.organization.inventories.create(name='test-inventory2') self.host3 = self.inventory2.hosts.create(name='host3.example.com') self.credential = None settings.INTERNAL_API_URL = self.live_server_url settings.CALLBACK_CONSUMER_PORT = '' def create_test_credential(self, **kwargs): self.credential = self.make_credential(**kwargs) return self.credential @unittest.skipIf(os.environ.get('SKIP_SLOW_TESTS', False), 'Skipping slow test') class RunAdHocCommandTest(BaseAdHocCommandTest): ''' Test cases for RunAdHocCommand celery task. ''' def create_test_ad_hoc_command(self, **kwargs): with impersonate(self.super_django_user): opts = { 'inventory': self.inventory, 'credential': self.credential, 'job_type': 'run', 'module_name': 'command', 'module_args': 'uptime', } opts.update(kwargs) self.ad_hoc_command = AdHocCommand.objects.create(**opts) return self.ad_hoc_command def check_ad_hoc_command_events(self, ad_hoc_command, runner_status='ok', hosts=None): ad_hoc_command_events = ad_hoc_command.ad_hoc_command_events.all() for ad_hoc_command_event in ad_hoc_command_events: unicode(ad_hoc_command_event) # For test coverage. should_be_failed = bool(runner_status not in ('ok', 'skipped')) should_be_changed = bool(runner_status in ('ok', 'failed') and ad_hoc_command.job_type == 'run') if hosts is not None: host_pks = set([x.pk for x in hosts]) else: host_pks = set(ad_hoc_command.inventory.hosts.values_list('pk', flat=True)) qs = ad_hoc_command_events.filter(event=('runner_on_%s' % runner_status)) self.assertEqual(qs.count(), len(host_pks)) for evt in qs: self.assertTrue(evt.host_id in host_pks) self.assertTrue(evt.host_name) self.assertEqual(evt.failed, should_be_failed) self.assertEqual(evt.changed, should_be_changed) def test_run_ad_hoc_command(self): ad_hoc_command = self.create_test_ad_hoc_command() self.assertEqual(ad_hoc_command.status, 'new') self.assertFalse(ad_hoc_command.passwords_needed_to_start) self.assertTrue(ad_hoc_command.signal_start()) ad_hoc_command = AdHocCommand.objects.get(pk=ad_hoc_command.pk) self.check_job_result(ad_hoc_command, 'successful') self.check_ad_hoc_command_events(ad_hoc_command, 'ok') def test_check_mode_ad_hoc_command(self): ad_hoc_command = self.create_test_ad_hoc_command(module_name='ping', module_args='', job_type='check') self.assertEqual(ad_hoc_command.status, 'new') self.assertFalse(ad_hoc_command.passwords_needed_to_start) self.assertTrue(ad_hoc_command.signal_start()) ad_hoc_command = AdHocCommand.objects.get(pk=ad_hoc_command.pk) self.check_job_result(ad_hoc_command, 'successful') self.check_ad_hoc_command_events(ad_hoc_command, 'ok') def test_run_ad_hoc_command_that_fails(self): ad_hoc_command = self.create_test_ad_hoc_command(module_args='false') self.assertEqual(ad_hoc_command.status, 'new') self.assertFalse(ad_hoc_command.passwords_needed_to_start) self.assertTrue(ad_hoc_command.signal_start()) ad_hoc_command = AdHocCommand.objects.get(pk=ad_hoc_command.pk) self.check_job_result(ad_hoc_command, 'failed') self.check_ad_hoc_command_events(ad_hoc_command, 'failed') def test_check_mode_where_command_would_fail(self): ad_hoc_command = self.create_test_ad_hoc_command(job_type='check', module_args='false') self.assertEqual(ad_hoc_command.status, 'new') self.assertFalse(ad_hoc_command.passwords_needed_to_start) self.assertTrue(ad_hoc_command.signal_start()) ad_hoc_command = AdHocCommand.objects.get(pk=ad_hoc_command.pk) self.check_job_result(ad_hoc_command, 'successful') self.check_ad_hoc_command_events(ad_hoc_command, 'skipped') @mock.patch('awx.main.tasks.BaseTask.run_pexpect', return_value=('canceled', 0)) def test_cancel_ad_hoc_command(self, ignore): ad_hoc_command = self.create_test_ad_hoc_command() self.assertEqual(ad_hoc_command.status, 'new') self.assertFalse(ad_hoc_command.cancel_flag) self.assertFalse(ad_hoc_command.passwords_needed_to_start) ad_hoc_command.cancel_flag = True ad_hoc_command.save(update_fields=['cancel_flag']) self.assertTrue(ad_hoc_command.signal_start()) ad_hoc_command = AdHocCommand.objects.get(pk=ad_hoc_command.pk) self.check_job_result(ad_hoc_command, 'canceled') self.assertTrue(ad_hoc_command.cancel_flag) # Calling cancel afterwards just returns the cancel flag. self.assertTrue(ad_hoc_command.cancel()) # Read attribute for test coverage. ad_hoc_command.celery_task ad_hoc_command.celery_task_id = '' ad_hoc_command.save(update_fields=['celery_task_id']) self.assertEqual(ad_hoc_command.celery_task, None) # Unable to start ad hoc command again. self.assertFalse(ad_hoc_command.signal_start()) @mock.patch('awx.main.tasks.BaseTask.run_pexpect', return_value=('successful', 0)) def test_ad_hoc_command_options(self, ignore): ad_hoc_command = self.create_test_ad_hoc_command(forks=2, verbosity=2) self.assertEqual(ad_hoc_command.status, 'new') self.assertFalse(ad_hoc_command.passwords_needed_to_start) self.assertTrue(ad_hoc_command.signal_start()) ad_hoc_command = AdHocCommand.objects.get(pk=ad_hoc_command.pk) self.check_job_result(ad_hoc_command, 'successful') self.assertTrue('"--forks=2"' in ad_hoc_command.job_args) self.assertTrue('"-vv"' in ad_hoc_command.job_args) # Test with basic become privilege escalation ad_hoc_command2 = self.create_test_ad_hoc_command(become_enabled=True) self.assertEqual(ad_hoc_command2.status, 'new') self.assertFalse(ad_hoc_command2.passwords_needed_to_start) self.assertTrue(ad_hoc_command2.signal_start()) ad_hoc_command2 = AdHocCommand.objects.get(pk=ad_hoc_command2.pk) self.check_job_result(ad_hoc_command2, ('successful', 'failed')) self.assertTrue('"--become"' in ad_hoc_command2.job_args) def test_limit_option(self): # Test limit by hostname. ad_hoc_command = self.create_test_ad_hoc_command(limit='host.example.com') self.assertEqual(ad_hoc_command.status, 'new') self.assertFalse(ad_hoc_command.passwords_needed_to_start) self.assertTrue(ad_hoc_command.signal_start()) ad_hoc_command = AdHocCommand.objects.get(pk=ad_hoc_command.pk) self.check_job_result(ad_hoc_command, 'successful') self.check_ad_hoc_command_events(ad_hoc_command, 'ok', hosts=[self.host]) self.assertTrue('"host.example.com"' in ad_hoc_command.job_args) # Test limit by group name. ad_hoc_command2 = self.create_test_ad_hoc_command(limit='test-group') self.assertEqual(ad_hoc_command2.status, 'new') self.assertFalse(ad_hoc_command2.passwords_needed_to_start) self.assertTrue(ad_hoc_command2.signal_start()) ad_hoc_command2 = AdHocCommand.objects.get(pk=ad_hoc_command.pk) self.check_job_result(ad_hoc_command2, 'successful') self.check_ad_hoc_command_events(ad_hoc_command2, 'ok', hosts=[self.host]) # Test limit by host not in inventory. ad_hoc_command3 = self.create_test_ad_hoc_command(limit='bad-host') self.assertEqual(ad_hoc_command3.status, 'new') self.assertFalse(ad_hoc_command3.passwords_needed_to_start) self.assertTrue(ad_hoc_command3.signal_start()) ad_hoc_command3 = AdHocCommand.objects.get(pk=ad_hoc_command3.pk) self.check_job_result(ad_hoc_command3, 'successful') self.check_ad_hoc_command_events(ad_hoc_command3, 'ok', hosts=[]) self.assertEqual(ad_hoc_command3.ad_hoc_command_events.count(), 0) @mock.patch('awx.main.tasks.BaseTask.run_pexpect', return_value=('successful', 0)) def test_ssh_username_and_password(self, ignore): self.create_test_credential(username='sshuser', password='sshpass') ad_hoc_command = self.create_test_ad_hoc_command() self.assertEqual(ad_hoc_command.status, 'new') self.assertFalse(ad_hoc_command.passwords_needed_to_start) self.assertTrue(ad_hoc_command.signal_start()) ad_hoc_command = AdHocCommand.objects.get(pk=ad_hoc_command.pk) self.check_job_result(ad_hoc_command, 'successful') self.assertIn('"-u"', ad_hoc_command.job_args) self.assertIn('"--ask-pass"', ad_hoc_command.job_args) @mock.patch('awx.main.tasks.BaseTask.run_pexpect', return_value=('successful', 0)) def test_ssh_ask_password(self, ignore): self.create_test_credential(password='ASK') ad_hoc_command = self.create_test_ad_hoc_command() self.assertEqual(ad_hoc_command.status, 'new') self.assertTrue(ad_hoc_command.passwords_needed_to_start) self.assertTrue('ssh_password' in ad_hoc_command.passwords_needed_to_start) self.assertFalse(ad_hoc_command.signal_start()) self.assertTrue(ad_hoc_command.signal_start(ssh_password='sshpass')) ad_hoc_command = AdHocCommand.objects.get(pk=ad_hoc_command.pk) self.check_job_result(ad_hoc_command, 'successful') self.assertIn('"--ask-pass"', ad_hoc_command.job_args) @mock.patch('awx.main.tasks.BaseTask.run_pexpect', return_value=('successful', 0)) def test_sudo_username_and_password(self, ignore): self.create_test_credential(become_method="sudo", become_username='sudouser', become_password='sudopass') ad_hoc_command = self.create_test_ad_hoc_command() self.assertEqual(ad_hoc_command.status, 'new') self.assertFalse(ad_hoc_command.passwords_needed_to_start) self.assertTrue(ad_hoc_command.signal_start()) ad_hoc_command = AdHocCommand.objects.get(pk=ad_hoc_command.pk) self.check_job_result(ad_hoc_command, ('successful', 'failed')) self.assertIn('"--become-method"', ad_hoc_command.job_args) self.assertIn('"--become-user"', ad_hoc_command.job_args) self.assertIn('"--ask-become-pass"', ad_hoc_command.job_args) self.assertNotIn('"--become"', ad_hoc_command.job_args) @mock.patch('awx.main.tasks.BaseTask.run_pexpect', return_value=('successful', 0)) def test_sudo_ask_password(self, ignore): self.create_test_credential(become_password='ASK') ad_hoc_command = self.create_test_ad_hoc_command() self.assertEqual(ad_hoc_command.status, 'new') self.assertTrue(ad_hoc_command.passwords_needed_to_start) self.assertTrue('become_password' in ad_hoc_command.passwords_needed_to_start) self.assertFalse(ad_hoc_command.signal_start()) self.assertTrue(ad_hoc_command.signal_start(become_password='sudopass')) ad_hoc_command = AdHocCommand.objects.get(pk=ad_hoc_command.pk) self.check_job_result(ad_hoc_command, ('successful', 'failed')) self.assertIn('"--ask-become-pass"', ad_hoc_command.job_args) self.assertNotIn('"--become-user"', ad_hoc_command.job_args) self.assertNotIn('"--become"', ad_hoc_command.job_args) @mock.patch('awx.main.tasks.BaseTask.run_pexpect', return_value=('successful', 0)) def test_unlocked_ssh_key(self, ignore): self.create_test_credential(ssh_key_data=TEST_SSH_KEY_DATA) ad_hoc_command = self.create_test_ad_hoc_command() self.assertEqual(ad_hoc_command.status, 'new') self.assertFalse(ad_hoc_command.passwords_needed_to_start) self.assertTrue(ad_hoc_command.signal_start()) ad_hoc_command = AdHocCommand.objects.get(pk=ad_hoc_command.pk) self.check_job_result(ad_hoc_command, 'successful') self.assertNotIn('"--private-key=', ad_hoc_command.job_args) self.assertIn('ssh-agent', ad_hoc_command.job_args) def test_locked_ssh_key_with_password(self): self.create_test_credential(ssh_key_data=TEST_SSH_KEY_DATA_LOCKED, ssh_key_unlock=TEST_SSH_KEY_DATA_UNLOCK) ad_hoc_command = self.create_test_ad_hoc_command() self.assertEqual(ad_hoc_command.status, 'new') self.assertFalse(ad_hoc_command.passwords_needed_to_start) self.assertTrue(ad_hoc_command.signal_start()) ad_hoc_command = AdHocCommand.objects.get(pk=ad_hoc_command.pk) self.check_job_result(ad_hoc_command, 'successful') self.assertIn('ssh-agent', ad_hoc_command.job_args) self.assertNotIn('Bad passphrase', ad_hoc_command.result_stdout) def test_locked_ssh_key_with_bad_password(self): self.create_test_credential(ssh_key_data=TEST_SSH_KEY_DATA_LOCKED, ssh_key_unlock='not the passphrase') ad_hoc_command = self.create_test_ad_hoc_command() self.assertEqual(ad_hoc_command.status, 'new') self.assertFalse(ad_hoc_command.passwords_needed_to_start) self.assertTrue(ad_hoc_command.signal_start()) ad_hoc_command = AdHocCommand.objects.get(pk=ad_hoc_command.pk) self.check_job_result(ad_hoc_command, 'failed') self.assertIn('ssh-agent', ad_hoc_command.job_args) self.assertIn('Bad passphrase', ad_hoc_command.result_stdout) def test_locked_ssh_key_ask_password(self): self.create_test_credential(ssh_key_data=TEST_SSH_KEY_DATA_LOCKED, ssh_key_unlock='ASK') ad_hoc_command = self.create_test_ad_hoc_command() self.assertEqual(ad_hoc_command.status, 'new') self.assertTrue(ad_hoc_command.passwords_needed_to_start) self.assertTrue('ssh_key_unlock' in ad_hoc_command.passwords_needed_to_start) self.assertFalse(ad_hoc_command.signal_start()) self.assertTrue(ad_hoc_command.signal_start(ssh_key_unlock='not it')) ad_hoc_command = AdHocCommand.objects.get(pk=ad_hoc_command.pk) self.check_job_result(ad_hoc_command, 'failed') self.assertTrue('ssh-agent' in ad_hoc_command.job_args) self.assertTrue('Bad passphrase' in ad_hoc_command.result_stdout) # Try again and pass correct password. ad_hoc_command = self.create_test_ad_hoc_command() self.assertEqual(ad_hoc_command.status, 'new') self.assertTrue(ad_hoc_command.passwords_needed_to_start) self.assertTrue('ssh_key_unlock' in ad_hoc_command.passwords_needed_to_start) self.assertFalse(ad_hoc_command.signal_start()) self.assertTrue(ad_hoc_command.signal_start(ssh_key_unlock=TEST_SSH_KEY_DATA_UNLOCK)) ad_hoc_command = AdHocCommand.objects.get(pk=ad_hoc_command.pk) self.check_job_result(ad_hoc_command, 'successful') self.assertIn('ssh-agent', ad_hoc_command.job_args) self.assertNotIn('Bad passphrase', ad_hoc_command.result_stdout) def test_run_with_bubblewrap(self): # Only run test if bubblewrap is installed cmd = [getattr(settings, 'AWX_PROOT_CMD', 'bwrap'), '--version'] try: proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) proc.communicate() has_bubblewrap = bool(proc.returncode == 0) except (OSError, ValueError): has_bubblewrap = False if not has_bubblewrap: self.skipTest('bubblewrap is not installed') # Enable bubblewrap for this test. settings.AWX_PROOT_ENABLED = True # Hide local settings path. settings.AWX_PROOT_HIDE_PATHS = [os.path.join(settings.BASE_DIR, 'settings')] # Create list of paths that should not be visible to the command. hidden_paths = [ os.path.join(settings.PROJECTS_ROOT, '*'), os.path.join(settings.JOBOUTPUT_ROOT, '*'), ] # Create a temp directory that should not be visible to the command. temp_path = tempfile.mkdtemp() self._temp_paths.append(temp_path) hidden_paths.append(temp_path) # Find a file in supervisor logs that should not be visible. try: supervisor_log_path = glob.glob('/var/log/supervisor/*')[0] except IndexError: supervisor_log_path = None if supervisor_log_path: hidden_paths.append(supervisor_log_path) # Create and run ad hoc command. module_args = ' && '.join(['echo %s && test ! -e %s' % (x, x) for x in hidden_paths]) ad_hoc_command = self.create_test_ad_hoc_command(module_name='shell', module_args=module_args, verbosity=2) self.assertEqual(ad_hoc_command.status, 'new') self.assertFalse(ad_hoc_command.passwords_needed_to_start) self.assertTrue(ad_hoc_command.signal_start()) ad_hoc_command = AdHocCommand.objects.get(pk=ad_hoc_command.pk) self.check_job_result(ad_hoc_command, 'successful') self.check_ad_hoc_command_events(ad_hoc_command, 'ok') @mock.patch('awx.main.tasks.BaseTask.run_pexpect', return_value=('failed', 0)) def test_run_with_bubblewrap_not_installed(self, ignore): # Enable bubblewrap for this test, specify invalid bubblewrap cmd. settings.AWX_PROOT_ENABLED = True settings.AWX_PROOT_CMD = 'PR00T' ad_hoc_command = self.create_test_ad_hoc_command() self.assertEqual(ad_hoc_command.status, 'new') self.assertFalse(ad_hoc_command.passwords_needed_to_start) self.assertTrue(ad_hoc_command.signal_start()) ad_hoc_command = AdHocCommand.objects.get(pk=ad_hoc_command.pk) self.check_job_result(ad_hoc_command, 'error', expect_traceback=True) def run_pexpect_mock(self, *args, **kwargs): return 'successful', 0 @unittest.skipIf(os.environ.get('SKIP_SLOW_TESTS', False), 'Skipping slow test') class AdHocCommandApiTest(BaseAdHocCommandTest): ''' Test API list/detail views for ad hoc commands. ''' def setUp(self): super(AdHocCommandApiTest, self).setUp() self.create_test_credential(user=self.normal_django_user) def run_test_ad_hoc_command(self, **kwargs): # Post to list to start a new ad hoc command. expect = kwargs.pop('expect', 201) url = kwargs.pop('url', reverse('api:ad_hoc_command_list')) data = { 'inventory': self.inventory.pk, 'credential': self.credential.pk, 'module_name': 'command', 'module_args': 'uptime', } data.update(kwargs) for k,v in data.items(): if v is None: del data[k] return self.post(url, data, expect=expect) @mock.patch('awx.main.tasks.BaseTask.run_pexpect', side_effect=run_pexpect_mock) def test_ad_hoc_command_detail(self, ignore): with self.current_user('admin'): response1 = self.run_test_ad_hoc_command() response2 = self.run_test_ad_hoc_command() response3 = self.run_test_ad_hoc_command() # Retrieve detail for ad hoc command. Only GET is supported. with self.current_user('admin'): url = reverse('api:ad_hoc_command_detail', args=(response1['id'],)) self.assertEqual(url, response1['url']) response = self.get(url, expect=200) self.assertEqual(response['credential'], self.credential.pk) self.assertEqual(response['related']['credential'], reverse('api:credential_detail', args=(self.credential.pk,))) self.assertEqual(response['inventory'], self.inventory.pk) self.assertEqual(response['related']['inventory'], reverse('api:inventory_detail', args=(self.inventory.pk,))) self.assertTrue(response['related']['stdout']) self.assertTrue(response['related']['cancel']) self.assertTrue(response['related']['relaunch']) self.assertTrue(response['related']['events']) self.assertTrue(response['related']['activity_stream']) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=204) self.delete(url, expect=404) with self.current_user('normal'): url = reverse('api:ad_hoc_command_detail', args=(response2['id'],)) self.assertEqual(url, response2['url']) response = self.get(url, expect=200) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=204) self.delete(url, expect=404) url = reverse('api:ad_hoc_command_detail', args=(response3['id'],)) self.assertEqual(url, response3['url']) with self.current_user('other'): response = self.get(url, expect=403) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=403) with self.current_user('nobody'): response = self.get(url, expect=403) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=403) with self.current_user(None): response = self.get(url, expect=401) self.put(url, {}, expect=401) self.patch(url, {}, expect=401) self.delete(url, expect=401) # Verify that the credential and inventory are null when they have # been deleted, can delete an ad hoc command without inventory or # credential. self.credential.delete() self.inventory.delete() with self.current_user('admin'): response = self.get(url, expect=200) self.assertEqual(response['credential'], None) self.assertEqual(response['inventory'], None) self.delete(url, expect=204) self.delete(url, expect=404) @mock.patch('awx.main.tasks.BaseTask.run_pexpect', side_effect=run_pexpect_mock) def test_ad_hoc_command_cancel(self, ignore): # Override setting so that ad hoc command isn't actually started. with self.settings(CELERY_UNIT_TEST=False): with self.current_user('admin'): response = self.run_test_ad_hoc_command() # Retrieve the cancel URL, should indicate it can be canceled. url = reverse('api:ad_hoc_command_cancel', args=(response['id'],)) self.assertEqual(url, response['related']['cancel']) with self.current_user('admin'): response = self.get(url, expect=200) self.assertEqual(response['can_cancel'], True) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user('normal'): response = self.get(url, expect=200) self.assertEqual(response['can_cancel'], True) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user('other'): self.get(url, expect=403) self.post(url, {}, expect=403) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user('nobody'): self.get(url, expect=403) self.post(url, {}, expect=403) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user(None): self.get(url, expect=401) self.post(url, {}, expect=401) self.put(url, {}, expect=401) self.patch(url, {}, expect=401) self.delete(url, expect=401) # Cancel ad hoc command (before it starts) and verify the can_cancel # flag is False and attempts to cancel again fail. with self.current_user('normal'): self.post(url, {}, expect=202) response = self.get(url, expect=200) self.assertEqual(response['can_cancel'], False) self.post(url, {}, expect=403) with self.current_user('admin'): response = self.get(url, expect=200) self.assertEqual(response['can_cancel'], False) self.post(url, {}, expect=405) @mock.patch('awx.main.tasks.BaseTask.run_pexpect', side_effect=run_pexpect_mock) def test_ad_hoc_command_relaunch(self, ignore): with self.current_user('admin'): response = self.run_test_ad_hoc_command() # Retrieve the relaunch URL, should indicate no passwords are needed # and it can be relaunched. Relaunch and fetch the new command. url = reverse('api:ad_hoc_command_relaunch', args=(response['id'],)) self.assertEqual(url, response['related']['relaunch']) with self.current_user('admin'): response = self.get(url, expect=200) self.assertEqual(response['passwords_needed_to_start'], []) response = self.post(url, {}, expect=201) self.assertTrue(response['ad_hoc_command']) self.get(response['url'], expect=200) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user('normal'): response = self.get(url, expect=200) self.assertEqual(response['passwords_needed_to_start'], []) response = self.post(url, {}, expect=201) self.assertTrue(response['ad_hoc_command']) self.get(response['url'], expect=200) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user('other'): self.get(url, expect=403) self.post(url, {}, expect=403) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user('nobody'): self.get(url, expect=403) self.post(url, {}, expect=403) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user(None): self.get(url, expect=401) self.post(url, {}, expect=401) self.put(url, {}, expect=401) self.patch(url, {}, expect=401) self.delete(url, expect=401) # Try to relaunch ad hoc command when module has been removed from # allowed list of modules. try: ad_hoc_commands = settings.AD_HOC_COMMANDS settings.AD_HOC_COMMANDS = [] with self.current_user('admin'): response = self.get(url, expect=200) self.assertEqual(response['passwords_needed_to_start'], []) response = self.post(url, {}, expect=400) finally: settings.AD_HOC_COMMANDS = ad_hoc_commands # Try to relaunch after the inventory has been marked inactive. self.inventory.delete() with self.current_user('admin'): response = self.get(url, expect=200) self.assertEqual(response['passwords_needed_to_start'], []) response = self.post(url, {}, expect=400) # Try to relaunch with expired license. with self.current_user('admin'): response = self.run_test_ad_hoc_command(inventory=self.inventory2.pk) self.create_expired_license_file() with self.current_user('admin'): self.post(response['related']['relaunch'], {}, expect=403) def test_ad_hoc_command_events_list(self): # TODO: Create test events instead of relying on playbooks execution with self.current_user('admin'): response = self.run_test_ad_hoc_command() response = self.run_test_ad_hoc_command() # Check list of ad hoc command events for a specific ad hoc command. ad_hoc_command_id = response['id'] url = reverse('api:ad_hoc_command_ad_hoc_command_events_list', args=(ad_hoc_command_id,)) self.assertEqual(url, response['related']['events']) with self.current_user('admin'): response = self.get(url, expect=200) self.assertEqual(response['count'], self.inventory.hosts.count()) for result in response['results']: self.assertEqual(result['ad_hoc_command'], ad_hoc_command_id) self.assertTrue(result['id']) self.assertTrue(result['url']) self.assertEqual(result['event'], 'runner_on_ok') self.assertFalse(result['failed']) self.assertTrue(result['changed']) self.assertTrue(result['host'] in set(self.inventory.hosts.values_list('pk', flat=True))) self.assertTrue(result['host_name'] in set(self.inventory.hosts.values_list('name', flat=True))) self.post(url, {}, expect=403) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user('normal'): response = self.get(url, expect=200) self.assertEqual(response['count'], self.inventory.hosts.count()) self.post(url, {}, expect=403) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user('other'): self.get(url, expect=403) self.post(url, {}, expect=403) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user('nobody'): self.get(url, expect=403) self.post(url, {}, expect=403) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user(None): self.get(url, expect=401) self.post(url, {}, expect=401) self.put(url, {}, expect=401) self.patch(url, {}, expect=401) self.delete(url, expect=401) # Test top level ad hoc command events list. url = reverse('api:ad_hoc_command_event_list') with self.current_user('admin'): response = self.get(url, expect=200) self.assertEqual(response['count'], 2 * self.inventory.hosts.count()) self.post(url, {}, expect=405) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user('normal'): response = self.get(url, expect=200) self.assertEqual(response['count'], 2 * self.inventory.hosts.count()) self.post(url, {}, expect=405) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user('other'): response = self.get(url, expect=200) self.assertEqual(response['count'], 0) self.post(url, {}, expect=405) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user('nobody'): response = self.get(url, expect=200) self.assertEqual(response['count'], 0) self.post(url, {}, expect=405) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user(None): self.get(url, expect=401) self.post(url, {}, expect=401) self.put(url, {}, expect=401) self.patch(url, {}, expect=401) self.delete(url, expect=401) def test_ad_hoc_command_event_detail(self): # TODO: Mock pexpect. Create test events instead of relying on playbooks execution with self.current_user('admin'): response = self.run_test_ad_hoc_command() # Check ad hoc command event detail view. ad_hoc_command_event_ids = AdHocCommandEvent.objects.values_list('pk', flat=True) with self.current_user('admin'): for ahce_id in ad_hoc_command_event_ids: url = reverse('api:ad_hoc_command_event_detail', args=(ahce_id,)) response = self.get(url, expect=200) self.assertTrue(response['ad_hoc_command']) self.assertEqual(response['id'], ahce_id) self.assertEqual(response['url'], url) self.assertEqual(response['event'], 'runner_on_ok') self.assertFalse(response['failed']) self.assertTrue(response['changed']) self.assertTrue(response['host'] in set(self.inventory.hosts.values_list('pk', flat=True))) self.assertTrue(response['host_name'] in set(self.inventory.hosts.values_list('name', flat=True))) self.post(url, {}, expect=405) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user('normal'): for ahce_id in ad_hoc_command_event_ids: url = reverse('api:ad_hoc_command_event_detail', args=(ahce_id,)) self.get(url, expect=200) self.post(url, {}, expect=405) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user('other'): for ahce_id in ad_hoc_command_event_ids: url = reverse('api:ad_hoc_command_event_detail', args=(ahce_id,)) self.get(url, expect=403) self.post(url, {}, expect=405) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user('nobody'): for ahce_id in ad_hoc_command_event_ids: url = reverse('api:ad_hoc_command_event_detail', args=(ahce_id,)) self.get(url, expect=403) self.post(url, {}, expect=405) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user(None): for ahce_id in ad_hoc_command_event_ids: url = reverse('api:ad_hoc_command_event_detail', args=(ahce_id,)) self.get(url, expect=401) self.post(url, {}, expect=401) self.put(url, {}, expect=401) self.patch(url, {}, expect=401) self.delete(url, expect=401) @mock.patch('awx.main.tasks.BaseTask.run_pexpect', side_effect=run_pexpect_mock) def test_ad_hoc_command_activity_stream(self, ignore): # TODO: Test non-enterprise license self.create_test_license_file() with self.current_user('admin'): response = self.run_test_ad_hoc_command() # Check activity stream for ad hoc command. There should only be one # entry when it was created; other changes made while running should # not show up. url = reverse('api:ad_hoc_command_activity_stream_list', args=(response['id'],)) self.assertEqual(url, response['related']['activity_stream']) with self.current_user('admin'): response = self.get(url, expect=200) self.assertEqual(response['count'], 1) result = response['results'][0] self.assertTrue(result['id']) self.assertTrue(result['url']) self.assertEqual(result['operation'], 'create') self.assertTrue(result['changes']) self.assertTrue(result['timestamp']) self.assertEqual(result['object1'], 'ad_hoc_command') self.assertEqual(result['object2'], '') self.post(url, {}, expect=405) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user('normal'): response = self.get(url, expect=200) self.assertEqual(response['count'], 1) self.post(url, {}, expect=405) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user('other'): self.get(url, expect=403) self.post(url, {}, expect=405) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user('nobody'): self.get(url, expect=403) self.post(url, {}, expect=405) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user(None): self.get(url, expect=401) self.post(url, {}, expect=401) self.put(url, {}, expect=401) self.patch(url, {}, expect=401) self.delete(url, expect=401) def test_host_ad_hoc_commands_list(self): # TODO: Figure out why this test needs pexpect with self.current_user('admin'): response = self.run_test_ad_hoc_command() response = self.run_test_ad_hoc_command(limit=self.host2.name) # Test the ad hoc commands list for a host. Should only return the ad # hoc command(s) run against that host. Posting should start a new ad # hoc command and always set the inventory and limit based on URL. url = reverse('api:host_ad_hoc_commands_list', args=(self.host.pk,)) with self.current_user('admin'): response = self.get(url, expect=200) self.assertEqual(response['count'], 1) response = self.run_test_ad_hoc_command(url=url, inventory=None, expect=201) self.assertEqual(response['inventory'], self.inventory.pk) self.assertEqual(response['limit'], self.host.name) response = self.run_test_ad_hoc_command(url=url, inventory=self.inventory2.pk, limit=self.host2.name, expect=201) self.assertEqual(response['inventory'], self.inventory.pk) self.assertEqual(response['limit'], self.host.name) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user('normal'): response = self.get(url, expect=200) self.assertEqual(response['count'], 3) response = self.run_test_ad_hoc_command(url=url, inventory=None, expect=201) self.assertEqual(response['inventory'], self.inventory.pk) self.assertEqual(response['limit'], self.host.name) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user('other'): self.get(url, expect=403) self.post(url, {}, expect=403) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user('nobody'): self.get(url, expect=403) self.post(url, {}, expect=403) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user(None): self.get(url, expect=401) self.post(url, {}, expect=401) self.put(url, {}, expect=401) self.patch(url, {}, expect=401) self.delete(url, expect=401) # Try to run with expired license. self.create_expired_license_file() with self.current_user('admin'): self.run_test_ad_hoc_command(url=url, expect=403) with self.current_user('normal'): self.run_test_ad_hoc_command(url=url, expect=403) def test_group_ad_hoc_commands_list(self): # TODO: Figure out why this test needs pexpect with self.current_user('admin'): response = self.run_test_ad_hoc_command() # self.host + self.host2 response = self.run_test_ad_hoc_command(limit=self.group.name) # self.host response = self.run_test_ad_hoc_command(limit=self.host2.name) # self.host2 # Test the ad hoc commands list for a group. Should return the ad # hoc command(s) run against any hosts in that group. Posting should # start a new ad hoc command and always set the inventory and limit # based on URL. url = reverse('api:group_ad_hoc_commands_list', args=(self.group.pk,)) # only self.host url2 = reverse('api:group_ad_hoc_commands_list', args=(self.group2.pk,)) # self.host + self.host2 with self.current_user('admin'): response = self.get(url, expect=200) self.assertEqual(response['count'], 2) response = self.get(url2, expect=200) self.assertEqual(response['count'], 3) response = self.run_test_ad_hoc_command(url=url, inventory=None, expect=201) self.assertEqual(response['inventory'], self.inventory.pk) self.assertEqual(response['limit'], self.group.name) response = self.run_test_ad_hoc_command(url=url, inventory=self.inventory2.pk, limit=self.group2.name, expect=201) self.assertEqual(response['inventory'], self.inventory.pk) self.assertEqual(response['limit'], self.group.name) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user('normal'): response = self.get(url, expect=200) self.assertEqual(response['count'], 4) response = self.run_test_ad_hoc_command(url=url, inventory=None, expect=201) self.assertEqual(response['inventory'], self.inventory.pk) self.assertEqual(response['limit'], self.group.name) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user('other'): self.get(url, expect=403) self.post(url, {}, expect=403) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user('nobody'): self.get(url, expect=403) self.post(url, {}, expect=403) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user(None): self.get(url, expect=401) self.post(url, {}, expect=401) self.put(url, {}, expect=401) self.patch(url, {}, expect=401) self.delete(url, expect=401) # Try to run with expired license. self.create_expired_license_file() with self.current_user('admin'): self.run_test_ad_hoc_command(url=url, expect=403) with self.current_user('normal'): self.run_test_ad_hoc_command(url=url, expect=403) def test_host_ad_hoc_command_events_list(self): # TODO: Mock run_pexpect. Create test events instead of relying on playbooks execution with self.current_user('admin'): response = self.run_test_ad_hoc_command() # Test the ad hoc command events list for a host. Should return the # events only for that particular host. url = reverse('api:host_ad_hoc_command_events_list', args=(self.host.pk,)) with self.current_user('admin'): response = self.get(url, expect=200) self.assertEqual(response['count'], 1) self.post(url, {}, expect=405) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user('normal'): response = self.get(url, expect=200) self.assertEqual(response['count'], 1) self.post(url, {}, expect=405) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user('other'): self.get(url, expect=403) self.post(url, {}, expect=405) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user('nobody'): self.get(url, expect=403) self.post(url, {}, expect=405) self.put(url, {}, expect=405) self.patch(url, {}, expect=405) self.delete(url, expect=405) with self.current_user(None): self.get(url, expect=401) self.post(url, {}, expect=401) self.put(url, {}, expect=401) self.patch(url, {}, expect=401) self.delete(url, expect=401)
{ "content_hash": "f440958b90c997c536b66cdb6f2cfb29", "timestamp": "", "source": "github", "line_count": 957, "max_line_length": 134, "avg_line_length": 49.44827586206897, "alnum_prop": 0.6050674105067411, "repo_name": "snahelou/awx", "id": "8da0e33d244598df780e16f383d223ea107f108c", "size": "47390", "binary": false, "copies": "1", "ref": "refs/heads/devel", "path": "awx/main/tests/old/ad_hoc.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "277672" }, { "name": "HTML", "bytes": "424349" }, { "name": "JavaScript", "bytes": "2903576" }, { "name": "Makefile", "bytes": "20443" }, { "name": "Nginx", "bytes": "2520" }, { "name": "PowerShell", "bytes": "6936" }, { "name": "Python", "bytes": "7328472" }, { "name": "Shell", "bytes": "1068" } ], "symlink_target": "" }
__all__ = ['restart', 'reload', 'wallpaper', 'theme'] from plugins.plasma import *
{ "content_hash": "2b409ab06a2fec98dd12499016ea5085", "timestamp": "", "source": "github", "line_count": 2, "max_line_length": 53, "avg_line_length": 41.5, "alnum_prop": 0.6385542168674698, "repo_name": "nielsvm/kde4-profiles", "id": "cafa572b0b0ec18af98fb40ee7a4f4f0a9475dd6", "size": "83", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "plugins/plasma/__init__.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "15999" } ], "symlink_target": "" }
""" Define a family of algorithms, encapsulate each one, and make them interchangeable. Strategy lets the algorithm vary independently from clients that use it. """ import abc class Context: """ Define the interface of interest to clients. Maintain a reference to a Strategy object. """ def __init__(self, strategy): self._strategy = strategy def context_interface(self): self._strategy.algorithm_interface() class Strategy(metaclass=abc.ABCMeta): """ Declare an interface common to all supported algorithms. Context uses this interface to call the algorithm defined by a ConcreteStrategy. """ @abc.abstractmethod def algorithm_interface(self): pass class ConcreteStrategyA(Strategy): """ Implement the algorithm using the Strategy interface. """ def algorithm_interface(self): pass class ConcreteStrategyB(Strategy): """ Implement the algorithm using the Strategy interface. """ def algorithm_interface(self): pass def main(): concrete_strategy_a = ConcreteStrategyA() context = Context(concrete_strategy_a) context.context_interface() if __name__ == "__main__": main()
{ "content_hash": "829215e091e640ad91709f7075ddd33f", "timestamp": "", "source": "github", "line_count": 60, "max_line_length": 68, "avg_line_length": 20.466666666666665, "alnum_prop": 0.6742671009771987, "repo_name": "tcp813/mouTools", "id": "953e85fd2c74ba3d5279648a22e762cd4e533605", "size": "1228", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "design_patterns/strategy.py", "mode": "33188", "license": "mit", "language": [ { "name": "ASP", "bytes": "374" }, { "name": "Batchfile", "bytes": "553" }, { "name": "C", "bytes": "18596" }, { "name": "C#", "bytes": "300" }, { "name": "C++", "bytes": "13278" }, { "name": "CMake", "bytes": "530" }, { "name": "HTML", "bytes": "15189" }, { "name": "JavaScript", "bytes": "285" }, { "name": "Makefile", "bytes": "450" }, { "name": "Python", "bytes": "183617" }, { "name": "QMake", "bytes": "689" }, { "name": "Shell", "bytes": "159" } ], "symlink_target": "" }
from django.conf.urls import patterns, include, url from django.contrib import admin urlpatterns = patterns('', # Examples: # url(r'^$', 'newsnet.views.home', name='home'), # url(r'^blog/', include('blog.urls')), url(r'^admin/', include(admin.site.urls)), )
{ "content_hash": "010902a8ff169be0c2f787bfb412ce8a", "timestamp": "", "source": "github", "line_count": 10, "max_line_length": 52, "avg_line_length": 27.6, "alnum_prop": 0.6413043478260869, "repo_name": "mike10010100/newsnet", "id": "94b13d9254b83f30e36eac239c85ebc19e3151f6", "size": "276", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "newsnet/newsnet/urls.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "6317" } ], "symlink_target": "" }
from setuptools import setup, find_packages setup(name="django-smart-selects", version="1.2.3-dev", description="Django application to handle chained model fields.", author="Patrick Lauber", packages=find_packages(), include_package_data=True, )
{ "content_hash": "624556ee303259ae4f225e62988e1afc", "timestamp": "", "source": "github", "line_count": 9, "max_line_length": 71, "avg_line_length": 31.444444444444443, "alnum_prop": 0.6784452296819788, "repo_name": "johtso/django-smart-selects", "id": "3094eb322c97f387a63259ddffb389fd3bcfb2e5", "size": "283", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "JavaScript", "bytes": "7696" }, { "name": "Python", "bytes": "53529" } ], "symlink_target": "" }
""" parquet compat """ from __future__ import annotations import io import os from typing import ( Any, Literal, ) from warnings import catch_warnings from pandas._typing import ( FilePath, ReadBuffer, StorageOptions, WriteBuffer, ) from pandas.compat._optional import import_optional_dependency from pandas.errors import AbstractMethodError from pandas.util._decorators import doc from pandas import ( DataFrame, MultiIndex, arrays, get_option, ) from pandas.core.shared_docs import _shared_docs from pandas.util.version import Version from pandas.io.common import ( IOHandles, get_handle, is_fsspec_url, is_url, stringify_path, ) def get_engine(engine: str) -> BaseImpl: """return our implementation""" if engine == "auto": engine = get_option("io.parquet.engine") if engine == "auto": # try engines in this order engine_classes = [PyArrowImpl, FastParquetImpl] error_msgs = "" for engine_class in engine_classes: try: return engine_class() except ImportError as err: error_msgs += "\n - " + str(err) raise ImportError( "Unable to find a usable engine; " "tried using: 'pyarrow', 'fastparquet'.\n" "A suitable version of " "pyarrow or fastparquet is required for parquet " "support.\n" "Trying to import the above resulted in these errors:" f"{error_msgs}" ) if engine == "pyarrow": return PyArrowImpl() elif engine == "fastparquet": return FastParquetImpl() raise ValueError("engine must be one of 'pyarrow', 'fastparquet'") def _get_path_or_handle( path: FilePath | ReadBuffer[bytes] | WriteBuffer[bytes], fs: Any, storage_options: StorageOptions = None, mode: str = "rb", is_dir: bool = False, ) -> tuple[ FilePath | ReadBuffer[bytes] | WriteBuffer[bytes], IOHandles[bytes] | None, Any ]: """File handling for PyArrow.""" path_or_handle = stringify_path(path) if is_fsspec_url(path_or_handle) and fs is None: fsspec = import_optional_dependency("fsspec") fs, path_or_handle = fsspec.core.url_to_fs( path_or_handle, **(storage_options or {}) ) elif storage_options and (not is_url(path_or_handle) or mode != "rb"): # can't write to a remote url # without making use of fsspec at the moment raise ValueError("storage_options passed with buffer, or non-supported URL") handles = None if ( not fs and not is_dir and isinstance(path_or_handle, str) and not os.path.isdir(path_or_handle) ): # use get_handle only when we are very certain that it is not a directory # fsspec resources can also point to directories # this branch is used for example when reading from non-fsspec URLs handles = get_handle( path_or_handle, mode, is_text=False, storage_options=storage_options ) fs = None path_or_handle = handles.handle return path_or_handle, handles, fs class BaseImpl: @staticmethod def validate_dataframe(df: DataFrame) -> None: if not isinstance(df, DataFrame): raise ValueError("to_parquet only supports IO with DataFrames") # must have value column names for all index levels (strings only) if isinstance(df.columns, MultiIndex): if not all( x.inferred_type in {"string", "empty"} for x in df.columns.levels ): raise ValueError( """ parquet must have string column names for all values in each level of the MultiIndex """ ) else: if df.columns.inferred_type not in {"string", "empty"}: raise ValueError("parquet must have string column names") # index level names must be strings valid_names = all( isinstance(name, str) for name in df.index.names if name is not None ) if not valid_names: raise ValueError("Index level names must be strings") def write(self, df: DataFrame, path, compression, **kwargs): raise AbstractMethodError(self) def read(self, path, columns=None, **kwargs) -> DataFrame: raise AbstractMethodError(self) class PyArrowImpl(BaseImpl): def __init__(self) -> None: import_optional_dependency( "pyarrow", extra="pyarrow is required for parquet support." ) import pyarrow.parquet # import utils to register the pyarrow extension types import pandas.core.arrays.arrow.extension_types # pyright: ignore # noqa:F401 self.api = pyarrow def write( self, df: DataFrame, path: FilePath | WriteBuffer[bytes], compression: str | None = "snappy", index: bool | None = None, storage_options: StorageOptions = None, partition_cols: list[str] | None = None, **kwargs, ) -> None: self.validate_dataframe(df) from_pandas_kwargs: dict[str, Any] = {"schema": kwargs.pop("schema", None)} if index is not None: from_pandas_kwargs["preserve_index"] = index table = self.api.Table.from_pandas(df, **from_pandas_kwargs) path_or_handle, handles, kwargs["filesystem"] = _get_path_or_handle( path, kwargs.pop("filesystem", None), storage_options=storage_options, mode="wb", is_dir=partition_cols is not None, ) if ( isinstance(path_or_handle, io.BufferedWriter) and hasattr(path_or_handle, "name") and isinstance(path_or_handle.name, (str, bytes)) ): path_or_handle = path_or_handle.name if isinstance(path_or_handle, bytes): path_or_handle = path_or_handle.decode() try: if partition_cols is not None: # writes to multiple files under the given path self.api.parquet.write_to_dataset( table, path_or_handle, compression=compression, partition_cols=partition_cols, **kwargs, ) else: # write to single output file self.api.parquet.write_table( table, path_or_handle, compression=compression, **kwargs ) finally: if handles is not None: handles.close() def read( self, path, columns=None, use_nullable_dtypes: bool = False, storage_options: StorageOptions = None, **kwargs, ) -> DataFrame: kwargs["use_pandas_metadata"] = True nullable_backend = get_option("io.nullable_backend") to_pandas_kwargs = {} if use_nullable_dtypes: import pandas as pd if nullable_backend == "pandas": mapping = { self.api.int8(): pd.Int8Dtype(), self.api.int16(): pd.Int16Dtype(), self.api.int32(): pd.Int32Dtype(), self.api.int64(): pd.Int64Dtype(), self.api.uint8(): pd.UInt8Dtype(), self.api.uint16(): pd.UInt16Dtype(), self.api.uint32(): pd.UInt32Dtype(), self.api.uint64(): pd.UInt64Dtype(), self.api.bool_(): pd.BooleanDtype(), self.api.string(): pd.StringDtype(), self.api.float32(): pd.Float32Dtype(), self.api.float64(): pd.Float64Dtype(), } to_pandas_kwargs["types_mapper"] = mapping.get manager = get_option("mode.data_manager") if manager == "array": to_pandas_kwargs["split_blocks"] = True # type: ignore[assignment] path_or_handle, handles, kwargs["filesystem"] = _get_path_or_handle( path, kwargs.pop("filesystem", None), storage_options=storage_options, mode="rb", ) try: pa_table = self.api.parquet.read_table( path_or_handle, columns=columns, **kwargs ) if nullable_backend == "pandas": result = pa_table.to_pandas(**to_pandas_kwargs) elif nullable_backend == "pyarrow": result = DataFrame( { col_name: arrays.ArrowExtensionArray(pa_col) for col_name, pa_col in zip( pa_table.column_names, pa_table.itercolumns() ) } ) if manager == "array": result = result._as_manager("array", copy=False) return result finally: if handles is not None: handles.close() class FastParquetImpl(BaseImpl): def __init__(self) -> None: # since pandas is a dependency of fastparquet # we need to import on first use fastparquet = import_optional_dependency( "fastparquet", extra="fastparquet is required for parquet support." ) self.api = fastparquet def write( self, df: DataFrame, path, compression: Literal["snappy", "gzip", "brotli"] | None = "snappy", index=None, partition_cols=None, storage_options: StorageOptions = None, **kwargs, ) -> None: self.validate_dataframe(df) # thriftpy/protocol/compact.py:339: # DeprecationWarning: tostring() is deprecated. # Use tobytes() instead. if "partition_on" in kwargs and partition_cols is not None: raise ValueError( "Cannot use both partition_on and " "partition_cols. Use partition_cols for partitioning data" ) if "partition_on" in kwargs: partition_cols = kwargs.pop("partition_on") if partition_cols is not None: kwargs["file_scheme"] = "hive" # cannot use get_handle as write() does not accept file buffers path = stringify_path(path) if is_fsspec_url(path): fsspec = import_optional_dependency("fsspec") # if filesystem is provided by fsspec, file must be opened in 'wb' mode. kwargs["open_with"] = lambda path, _: fsspec.open( path, "wb", **(storage_options or {}) ).open() elif storage_options: raise ValueError( "storage_options passed with file object or non-fsspec file path" ) with catch_warnings(record=True): self.api.write( path, df, compression=compression, write_index=index, partition_on=partition_cols, **kwargs, ) def read( self, path, columns=None, storage_options: StorageOptions = None, **kwargs ) -> DataFrame: parquet_kwargs: dict[str, Any] = {} use_nullable_dtypes = kwargs.pop("use_nullable_dtypes", False) if Version(self.api.__version__) >= Version("0.7.1"): # We are disabling nullable dtypes for fastparquet pending discussion parquet_kwargs["pandas_nulls"] = False if use_nullable_dtypes: raise ValueError( "The 'use_nullable_dtypes' argument is not supported for the " "fastparquet engine" ) path = stringify_path(path) handles = None if is_fsspec_url(path): fsspec = import_optional_dependency("fsspec") if Version(self.api.__version__) > Version("0.6.1"): parquet_kwargs["fs"] = fsspec.open( path, "rb", **(storage_options or {}) ).fs else: parquet_kwargs["open_with"] = lambda path, _: fsspec.open( path, "rb", **(storage_options or {}) ).open() elif isinstance(path, str) and not os.path.isdir(path): # use get_handle only when we are very certain that it is not a directory # fsspec resources can also point to directories # this branch is used for example when reading from non-fsspec URLs handles = get_handle( path, "rb", is_text=False, storage_options=storage_options ) path = handles.handle try: parquet_file = self.api.ParquetFile(path, **parquet_kwargs) return parquet_file.to_pandas(columns=columns, **kwargs) finally: if handles is not None: handles.close() @doc(storage_options=_shared_docs["storage_options"]) def to_parquet( df: DataFrame, path: FilePath | WriteBuffer[bytes] | None = None, engine: str = "auto", compression: str | None = "snappy", index: bool | None = None, storage_options: StorageOptions = None, partition_cols: list[str] | None = None, **kwargs, ) -> bytes | None: """ Write a DataFrame to the parquet format. Parameters ---------- df : DataFrame path : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If None, the result is returned as bytes. If a string, it will be used as Root Directory path when writing a partitioned dataset. The engine fastparquet does not accept file-like objects. .. versionchanged:: 1.2.0 engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {{'snappy', 'gzip', 'brotli', 'lz4', 'zstd', None}}, default 'snappy'. Name of the compression to use. Use ``None`` for no compression. The supported compression methods actually depend on which engine is used. For 'pyarrow', 'snappy', 'gzip', 'brotli', 'lz4', 'zstd' are all supported. For 'fastparquet', only 'gzip' and 'snappy' are supported. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``True`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. partition_cols : str or list, optional, default None Column names by which to partition the dataset. Columns are partitioned in the order they are given. Must be None if path is not a string. {storage_options} .. versionadded:: 1.2.0 kwargs Additional keyword arguments passed to the engine Returns ------- bytes if no path argument is provided else None """ if isinstance(partition_cols, str): partition_cols = [partition_cols] impl = get_engine(engine) path_or_buf: FilePath | WriteBuffer[bytes] = io.BytesIO() if path is None else path impl.write( df, path_or_buf, compression=compression, index=index, partition_cols=partition_cols, storage_options=storage_options, **kwargs, ) if path is None: assert isinstance(path_or_buf, io.BytesIO) return path_or_buf.getvalue() else: return None @doc(storage_options=_shared_docs["storage_options"]) def read_parquet( path: FilePath | ReadBuffer[bytes], engine: str = "auto", columns: list[str] | None = None, storage_options: StorageOptions = None, use_nullable_dtypes: bool = False, **kwargs, ) -> DataFrame: """ Load a parquet object from the file path, returning a DataFrame. Parameters ---------- path : str, path object or file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``read()`` function. The string could be a URL. Valid URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is expected. A local file could be: ``file://localhost/path/to/table.parquet``. A file URL can also be a path to a directory that contains multiple partitioned parquet files. Both pyarrow and fastparquet support paths to directories as well as file URLs. A directory path could be: ``file://localhost/path/to/tables`` or ``s3://bucket/partition_dir``. engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. columns : list, default=None If not None, only these columns will be read from the file. {storage_options} .. versionadded:: 1.3.0 use_nullable_dtypes : bool, default False If True, use dtypes that use ``pd.NA`` as missing value indicator for the resulting DataFrame. (only applicable for the ``pyarrow`` engine) As new dtypes are added that support ``pd.NA`` in the future, the output with this option will change to use those dtypes. Note: this is an experimental option, and behaviour (e.g. additional support dtypes) may change without notice. .. versionadded:: 1.2.0 The nullable dtype implementation can be configured by setting the global ``io.nullable_backend`` configuration option to ``"pandas"`` to use numpy-backed nullable dtypes or ``"pyarrow"`` to use pyarrow-backed nullable dtypes (using ``pd.ArrowDtype``). .. versionadded:: 2.0.0 **kwargs Any additional kwargs are passed to the engine. Returns ------- DataFrame """ impl = get_engine(engine) return impl.read( path, columns=columns, storage_options=storage_options, use_nullable_dtypes=use_nullable_dtypes, **kwargs, )
{ "content_hash": "4992d374a3d1ad22e82ed69bfc7ec56b", "timestamp": "", "source": "github", "line_count": 533, "max_line_length": 87, "avg_line_length": 35.35834896810506, "alnum_prop": 0.5778414517669532, "repo_name": "pandas-dev/pandas", "id": "1c14722227124279dfb9f56cce298736455ae02c", "size": "18846", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "pandas/io/parquet.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "512" }, { "name": "C", "bytes": "366145" }, { "name": "CSS", "bytes": "1800" }, { "name": "Cython", "bytes": "1186787" }, { "name": "Dockerfile", "bytes": "1411" }, { "name": "HTML", "bytes": "456531" }, { "name": "Python", "bytes": "18778786" }, { "name": "Shell", "bytes": "10369" }, { "name": "Smarty", "bytes": "8486" }, { "name": "XSLT", "bytes": "1196" } ], "symlink_target": "" }
import logging, os, random from zc.buildout import UserError, easy_install from zc.recipe.egg import Egg SETTINGS_TEMPLATE = ''' from %(settings_module)s import * SECRET_KEY = "%(secret)s" %(settings_override)s ''' SCRIPT_TEMPLATES = { 'wsgi': easy_install.script_header + ''' %(relative_paths_setup)s import sys sys.path[0:0] = [ %(path)s, ] %(initialization)s import os try: from django.core.wsgi import get_wsgi_application IS_14_PLUS = True except ImportError: from django.core.handlers.wsgi import WSGIHandler IS_14_PLUS = False os.environ['DJANGO_SETTINGS_MODULE'] = "%(module_name)s%(attrs)s" def app_factory(global_config, **local_config): """This function wraps our simple WSGI app so it can be used with paste.deploy""" if IS_14_PLUS: return get_wsgi_application() else: return WSGIHandler() application = app_factory(%(arguments)s) ''', 'manage': easy_install.script_header + ''' %(relative_paths_setup)s import sys sys.path[0:0] = [ %(path)s, ] %(initialization)s import os try: from django.core.management import execute_from_command_line IS_14_PLUS = True except ImportError: from django.core.management import ManagementUtility IS_14_PLUS = False os.environ['DJANGO_SETTINGS_MODULE'] = "%(module_name)s%(attrs)s" if IS_14_PLUS: execute_from_command_line(%(arguments)s) else: utility = ManagementUtility(%(arguments)s) utility.execute() ''' } class Recipe(object): wsgi_file = 'wsgi.py' settings_file = 'settings.py' sites_default = 'sites' site_settings_template = '%(name)s_site_config' secret_cfg = '.secret.cfg' def __init__(self, buildout, name, options): self.buildout, self.name, self.options = buildout, name, options self.logger = logging.getLogger(name) self.options['location'] = os.path.join( self.buildout['buildout']['parts-directory'], self.name ) self.options.setdefault('extra-paths', '') self.options.setdefault('environment-vars', '') self.options.setdefault('sites-directory', self.sites_default) self.options.setdefault('settings-override', '') self.options.setdefault('settings-file', self.settings_file) self.options.setdefault('wsgi-file', self.wsgi_file) self.options.setdefault('manage-py-file', 'django') self.eggs = [ ] if 'eggs' in self.buildout['buildout']: self.eggs.extend(self.buildout['buildout']['eggs'].split()) if 'eggs' in self.options: self.eggs.extend(self.options['eggs'].split()) self.working_set = None self.extra_paths = [ self.options['location'] ] sites_path = os.path.join( self.buildout['buildout']['directory'], self.options['sites-directory'] ) if os.path.isdir(sites_path): self.extra_paths.append(sites_path) if os.path.isdir(sites_path) and 'settings-module' not in self.options: # Check if the user has created a module %(name)s_config settings_module = self.site_settings_template % { 'name': self.name } settings_module_path = os.path.join(sites_path, settings_module) initpy = os.path.join(settings_module_path, '__init__.py') settingspy = os.path.join(settings_module_path, 'settings.py') if os.path.isdir(settings_module_path) and \ os.path.isfile(initpy) and os.path.isfile(settingspy): self.options.setdefault('settings-module', '%s.settings' % settings_module) self.extra_paths.extend(self.options['extra-paths'].split()) self.secret_key = None def setup_working_set(self): egg = Egg( self.buildout, 'Django', self.options ) self.working_set = egg.working_set(self.eggs) def setup_secret(self): secret_file = os.path.join( self.buildout['buildout']['directory'], self.secret_cfg ) if os.path.isfile(secret_file): stream = open(secret_file, 'rb') data = stream.read().decode('utf-8').strip() stream.close() self.logger.debug("Read secret: %s" % data) else: stream = open(secret_file, 'wb') chars = u'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)' data = u''.join([random.choice(chars) for __ in range(50)]) stream.write(data.encode('utf-8')+u"\n") stream.close() self.logger.debug( "Generated secret: %s (and written to %s)" % (data, secret_file) ) self.secret_key = data return secret_file def setup_module_file(self, module, name, data): with open(os.path.join(module, name), 'wb') as stream: stream.write(data) def get_settings(self, static_directory=None, media_directory=None): if 'settings-module' not in self.options: raise UserError( ("You should specify 'settings-module' in %(name)s " "or create a module named '"+self.site_settings_template+"' " "in '%(sites)s' with a 'settings.py' file in it") % { 'name': self.name, 'sites': self.options['sites-directory'] } ) settings_override = self.options['settings-override'] if static_directory is not None: settings_override += '\nSTATIC_ROOT = "%s"\n' % ( static_directory, ) if media_directory is not None: settings_override += '\nMEDIA_ROOT = "%s"\n' % ( media_directory, ) return SETTINGS_TEMPLATE % { 'settings_module': self.options['settings-module'], 'secret': self.secret_key, 'settings_override': settings_override } def setup_directories(self): result = [] for directory in [ 'static-directory', 'media-directory' ]: result.append(None) if directory in self.options: path = os.path.join( self.buildout['buildout']['directory'], self.options[directory] ) if not os.path.isdir(path): os.makedirs(path) result[-1] = path return result def get_initialization(self): # The initialization code is expressed as a list of lines initialization = [] # Gets the initialization code: the tricky part here is to preserve # indentation. # Since buildout does totally waste whitespace, if one wants to # preserve indentation must prefix its lines with '>>> ' or '... ' raw_value = self.options.get('initialization', '') is_indented = False indentations = ('>>> ', '... ') for line in raw_value.splitlines(): if line != "": if len(initialization) == 0: if line.startswith(indentations[0]): is_indented = True else: if is_indented and not line.startswith(indentations[1]): raise UserError( ("Line '%s' should be indented " "properly but is not") % line ) if is_indented: line = line[4:] initialization.append(line) # Gets the environment-vars option and generates code to set the # enviroment variables via os.environ environment_vars = [] for line in self.options.get('environment-vars', '').splitlines(): line = line.strip() if len(line) > 0: try: var_name, raw_value = line.split(' ', 1) except ValueError: raise RuntimeError( "Bad djc.recipe2 environment-vars contents: %s" % line ) environment_vars.append( 'os.environ["%s"] = r"%s"' % ( var_name, raw_value.strip() ) ) if len(environment_vars) > 0: initialization.append("import os") initialization.extend(environment_vars) if len(initialization) > 0: return "\n"+"\n".join(initialization)+"\n" return "" def create_script(self, name, path, settings, template, arguments): """Create arbitrary script. This script will also include the eventual code found in ``initialization`` and will also set (via ``os.environ``) the environment variables found in ``environment-vars`` """ self.logger.info( "Creating script at %s" % (os.path.join(path, name),) ) settings = settings.rsplit(".", 1) module = settings[0] attrs = "" if len(settings) > 1: attrs = "." + settings[1] old_script_template = easy_install.script_template easy_install.script_template = template script = easy_install.scripts( reqs=[(name, module, attrs)], working_set=self.working_set[1], executable=self.options['executable'], dest=path, extra_paths=self.extra_paths, initialization=self.get_initialization(), arguments=str(arguments) ) easy_install.script_template = old_script_template return script def setup_manage_script(self, settings): arguments = "sys.argv" return self.create_script( self.options['manage-py-file'], self.buildout['buildout']['bin-directory'], settings, SCRIPT_TEMPLATES['manage'], arguments ) def setup_wsgi_script(self, module_path, settings): arguments = "global_config={}" return self.create_script( self.options['wsgi-file'], module_path, settings, SCRIPT_TEMPLATES['wsgi'], arguments ) def setup(self, static_directory=None, media_directory=None): part_module = '%s_part_site' % self.name part_module_path = os.path.join(self.options['location'], part_module) settings_module = "%s.%s" % ( part_module, os.path.splitext(self.options['settings-file'])[0] ) if not os.path.exists(part_module_path): os.makedirs(part_module_path) self.setup_module_file(part_module_path, '__init__.py', "#\n") self.setup_module_file( part_module_path, self.options['settings-file'], self.get_settings(static_directory, media_directory) ) self.setup_wsgi_script(part_module_path, settings_module) files = [ self.options['location'] ] files.extend(self.setup_manage_script(settings_module)) return files def install(self): files = [] self.setup_working_set() # The .secret.cfg file is not reported so it doesn't get deleted self.setup_secret() static_directory, media_directory = self.setup_directories() # static and media are not added to files so that updates # won't delete them, nor reinstallations of parts files.extend(self.setup(static_directory, media_directory)) return tuple(files) update = install
{ "content_hash": "509d69808f67f3a783b8c968e627334e", "timestamp": "", "source": "github", "line_count": 323, "max_line_length": 80, "avg_line_length": 36.17956656346749, "alnum_prop": 0.5618689029608078, "repo_name": "abstract-open-solutions/djc.recipe2", "id": "24f523f5f4a4e76e0f15d104b4d7f25fad450035", "size": "11686", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "djc/recipe2/recipe.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "17822" }, { "name": "Shell", "bytes": "406" } ], "symlink_target": "" }
import unittest import paddle.v2.framework.core as core from paddle.v2.framework.op import Operator import numpy class TestGaussianRandomOp(unittest.TestCase): def test_cpu(self): self.gaussian_random_test(place=core.CPUPlace()) def test_gpu(self): if core.is_compile_gpu(): self.gaussian_random_test(place=core.GPUPlace(0)) def gaussian_random_test(self, place): scope = core.Scope() scope.var('Out').get_tensor() op = Operator( "gaussian_random", Out='Out', shape=[1000, 784], mean=.0, std=1., seed=10) context = core.DeviceContext.create(place) op.run(scope, context) tensor = numpy.array(scope.find_var('Out').get_tensor()) self.assertAlmostEqual(numpy.mean(tensor), .0, delta=0.1) self.assertAlmostEqual(numpy.std(tensor), 1., delta=0.1) if __name__ == "__main__": unittest.main()
{ "content_hash": "cc07d6d37d2c365a9f0bc77b22c09cb3", "timestamp": "", "source": "github", "line_count": 35, "max_line_length": 65, "avg_line_length": 27.82857142857143, "alnum_prop": 0.5985626283367557, "repo_name": "pengli09/Paddle", "id": "0dc7e091a5c8dd046f36cab7f79a15b2281cdd90", "size": "974", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "python/paddle/v2/framework/tests/test_gaussian_random_op.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "261148" }, { "name": "C++", "bytes": "5387850" }, { "name": "CMake", "bytes": "215783" }, { "name": "CSS", "bytes": "21730" }, { "name": "Cuda", "bytes": "884320" }, { "name": "Go", "bytes": "109479" }, { "name": "HTML", "bytes": "8941" }, { "name": "JavaScript", "bytes": "1025" }, { "name": "Perl", "bytes": "11452" }, { "name": "Python", "bytes": "1796075" }, { "name": "Shell", "bytes": "137943" } ], "symlink_target": "" }
import airtablewrapper import json participantList = airtablewrapper.get_participants() participantJSONArray = [] for participant in participantList: newParticipant = { 'id': participant['fields']['Barcode']['text'], 'name': participant['fields']['First Name'] + " " + participant['fields']['Last Name'], 'role': participant['fields']['Role'].lower() } participantJSONArray.append(newParticipant) participantJSONStr = json.dumps(participantJSONArray) myFile = open('participants.json', 'w') myFile.write(participantJSONStr)
{ "content_hash": "108497cb561cc64fbe952259ff044f55", "timestamp": "", "source": "github", "line_count": 19, "max_line_length": 95, "avg_line_length": 29.736842105263158, "alnum_prop": 0.7061946902654868, "repo_name": "zbreit18/AttendanceUC", "id": "7f8c34696d1d5cf0a1a9bab707ed624300cbacc2", "size": "565", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "barcodeGenerator/idJSONGenerator.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "4676" }, { "name": "HTML", "bytes": "1333" }, { "name": "JavaScript", "bytes": "19986" }, { "name": "Python", "bytes": "7122" } ], "symlink_target": "" }
"""Client and server classes corresponding to protobuf-defined services.""" import grpc import psi_service_pb2 as psi__service__pb2 class PSIServiceStub(object): """Missing associated documentation comment in .proto file.""" def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.getSalt = channel.unary_unary( '/PSIService/getSalt', request_serializer=psi__service__pb2.SaltRequest.SerializeToString, response_deserializer=psi__service__pb2.SaltReply.FromString, ) self.uploadSet = channel.unary_unary( '/PSIService/uploadSet', request_serializer=psi__service__pb2.UploadSetRequest.SerializeToString, response_deserializer=psi__service__pb2.UploadSetResponse.FromString, ) self.downloadIntersection = channel.unary_unary( '/PSIService/downloadIntersection', request_serializer=psi__service__pb2.DownloadIntersectionRequest.SerializeToString, response_deserializer=psi__service__pb2.DownloadIntersectionResponse.FromString, ) class PSIServiceServicer(object): """Missing associated documentation comment in .proto file.""" def getSalt(self, request, context): """Gives SHA256 Hash salt """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def uploadSet(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def downloadIntersection(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_PSIServiceServicer_to_server(servicer, server): rpc_method_handlers = { 'getSalt': grpc.unary_unary_rpc_method_handler( servicer.getSalt, request_deserializer=psi__service__pb2.SaltRequest.FromString, response_serializer=psi__service__pb2.SaltReply.SerializeToString, ), 'uploadSet': grpc.unary_unary_rpc_method_handler( servicer.uploadSet, request_deserializer=psi__service__pb2.UploadSetRequest.FromString, response_serializer=psi__service__pb2.UploadSetResponse.SerializeToString, ), 'downloadIntersection': grpc.unary_unary_rpc_method_handler( servicer.downloadIntersection, request_deserializer=psi__service__pb2.DownloadIntersectionRequest.FromString, response_serializer=psi__service__pb2.DownloadIntersectionResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'PSIService', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class PSIService(object): """Missing associated documentation comment in .proto file.""" @staticmethod def getSalt(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/PSIService/getSalt', psi__service__pb2.SaltRequest.SerializeToString, psi__service__pb2.SaltReply.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def uploadSet(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/PSIService/uploadSet', psi__service__pb2.UploadSetRequest.SerializeToString, psi__service__pb2.UploadSetResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def downloadIntersection(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/PSIService/downloadIntersection', psi__service__pb2.DownloadIntersectionRequest.SerializeToString, psi__service__pb2.DownloadIntersectionResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
{ "content_hash": "1448611ed060f83bb5f4d124d1d38575", "timestamp": "", "source": "github", "line_count": 132, "max_line_length": 105, "avg_line_length": 41.45454545454545, "alnum_prop": 0.6330409356725146, "repo_name": "intel-analytics/BigDL", "id": "9916c0356fbddefaab64a7852aedd0130f1f5a06", "size": "5542", "binary": false, "copies": "2", "ref": "refs/heads/main", "path": "python/ppml/src/bigdl/ppml/fl/nn/generated/psi_service_pb2_grpc.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "5342" }, { "name": "Dockerfile", "bytes": "139304" }, { "name": "Java", "bytes": "1321348" }, { "name": "Jupyter Notebook", "bytes": "54112822" }, { "name": "Lua", "bytes": "1904" }, { "name": "Makefile", "bytes": "19253" }, { "name": "PowerShell", "bytes": "1137" }, { "name": "PureBasic", "bytes": "593" }, { "name": "Python", "bytes": "8825782" }, { "name": "RobotFramework", "bytes": "16117" }, { "name": "Scala", "bytes": "13216148" }, { "name": "Shell", "bytes": "848241" } ], "symlink_target": "" }
import re def ensure_trailing_slash(url): if not url.endswith('/'): url += '/' return url class Mountpoint: def __init__(self, path): self.path = ensure_trailing_slash(path) def join(self, other): if not isinstance(other, Mountpoint): other = Mountpoint(other) other_path = other.path if other_path.startswith('/'): other_path = other_path[1:] return Mountpoint(self.path + other_path) def without_trailing_slash(self): return self.path[:-1] def __str__(self): return self.path def __repr__(self): return '<{}>'.format(self) def __hash__(self): return hash(self.path) def __eq__(self, other): if not isinstance(other, Mountpoint): other = Mountpoint(other) return self.path == other.path def sort_paths_specific_to_generic(paths, key=None): return sorted(paths, key=_get_path_normalizer(key=key), reverse=True) def _get_path_normalizer(key=None): if key is None: return _normalize_path def normalizer(a): return _normalize_path(key(a)) return normalizer def _normalize_path(path): path = re.sub(r'/+', '/', path) if path.startswith('/'): path = path[1:] if path.endswith('/'): path = path[:-1] return tuple(path.split('/'))
{ "content_hash": "8a3cc6b744e93fcad97cfca30dda1d0f", "timestamp": "", "source": "github", "line_count": 61, "max_line_length": 73, "avg_line_length": 22.540983606557376, "alnum_prop": 0.5781818181818181, "repo_name": "getweber/weber-cli", "id": "f6936cc86956ab905386ea937e0f197d13813723", "size": "1375", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "cob/utils/url.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Makefile", "bytes": "327" }, { "name": "Python", "bytes": "10717" } ], "symlink_target": "" }
from google.cloud.automl_v1.services.auto_ml.client import AutoMlClient from google.cloud.automl_v1.services.auto_ml.async_client import AutoMlAsyncClient from google.cloud.automl_v1.services.prediction_service.client import ( PredictionServiceClient, ) from google.cloud.automl_v1.services.prediction_service.async_client import ( PredictionServiceAsyncClient, ) from google.cloud.automl_v1.types.annotation_payload import AnnotationPayload from google.cloud.automl_v1.types.annotation_spec import AnnotationSpec from google.cloud.automl_v1.types.classification import ClassificationAnnotation from google.cloud.automl_v1.types.classification import ClassificationEvaluationMetrics from google.cloud.automl_v1.types.classification import ClassificationType from google.cloud.automl_v1.types.data_items import Document from google.cloud.automl_v1.types.data_items import DocumentDimensions from google.cloud.automl_v1.types.data_items import ExamplePayload from google.cloud.automl_v1.types.data_items import Image from google.cloud.automl_v1.types.data_items import TextSnippet from google.cloud.automl_v1.types.dataset import Dataset from google.cloud.automl_v1.types.detection import BoundingBoxMetricsEntry from google.cloud.automl_v1.types.detection import ImageObjectDetectionAnnotation from google.cloud.automl_v1.types.detection import ImageObjectDetectionEvaluationMetrics from google.cloud.automl_v1.types.geometry import BoundingPoly from google.cloud.automl_v1.types.geometry import NormalizedVertex from google.cloud.automl_v1.types.image import ImageClassificationDatasetMetadata from google.cloud.automl_v1.types.image import ( ImageClassificationModelDeploymentMetadata, ) from google.cloud.automl_v1.types.image import ImageClassificationModelMetadata from google.cloud.automl_v1.types.image import ImageObjectDetectionDatasetMetadata from google.cloud.automl_v1.types.image import ( ImageObjectDetectionModelDeploymentMetadata, ) from google.cloud.automl_v1.types.image import ImageObjectDetectionModelMetadata from google.cloud.automl_v1.types.io import BatchPredictInputConfig from google.cloud.automl_v1.types.io import BatchPredictOutputConfig from google.cloud.automl_v1.types.io import DocumentInputConfig from google.cloud.automl_v1.types.io import GcsDestination from google.cloud.automl_v1.types.io import GcsSource from google.cloud.automl_v1.types.io import InputConfig from google.cloud.automl_v1.types.io import ModelExportOutputConfig from google.cloud.automl_v1.types.io import OutputConfig from google.cloud.automl_v1.types.model import Model from google.cloud.automl_v1.types.model_evaluation import ModelEvaluation from google.cloud.automl_v1.types.operations import BatchPredictOperationMetadata from google.cloud.automl_v1.types.operations import CreateDatasetOperationMetadata from google.cloud.automl_v1.types.operations import CreateModelOperationMetadata from google.cloud.automl_v1.types.operations import DeleteOperationMetadata from google.cloud.automl_v1.types.operations import DeployModelOperationMetadata from google.cloud.automl_v1.types.operations import ExportDataOperationMetadata from google.cloud.automl_v1.types.operations import ExportModelOperationMetadata from google.cloud.automl_v1.types.operations import ImportDataOperationMetadata from google.cloud.automl_v1.types.operations import OperationMetadata from google.cloud.automl_v1.types.operations import UndeployModelOperationMetadata from google.cloud.automl_v1.types.prediction_service import BatchPredictRequest from google.cloud.automl_v1.types.prediction_service import BatchPredictResult from google.cloud.automl_v1.types.prediction_service import PredictRequest from google.cloud.automl_v1.types.prediction_service import PredictResponse from google.cloud.automl_v1.types.service import CreateDatasetRequest from google.cloud.automl_v1.types.service import CreateModelRequest from google.cloud.automl_v1.types.service import DeleteDatasetRequest from google.cloud.automl_v1.types.service import DeleteModelRequest from google.cloud.automl_v1.types.service import DeployModelRequest from google.cloud.automl_v1.types.service import ExportDataRequest from google.cloud.automl_v1.types.service import ExportModelRequest from google.cloud.automl_v1.types.service import GetAnnotationSpecRequest from google.cloud.automl_v1.types.service import GetDatasetRequest from google.cloud.automl_v1.types.service import GetModelEvaluationRequest from google.cloud.automl_v1.types.service import GetModelRequest from google.cloud.automl_v1.types.service import ImportDataRequest from google.cloud.automl_v1.types.service import ListDatasetsRequest from google.cloud.automl_v1.types.service import ListDatasetsResponse from google.cloud.automl_v1.types.service import ListModelEvaluationsRequest from google.cloud.automl_v1.types.service import ListModelEvaluationsResponse from google.cloud.automl_v1.types.service import ListModelsRequest from google.cloud.automl_v1.types.service import ListModelsResponse from google.cloud.automl_v1.types.service import UndeployModelRequest from google.cloud.automl_v1.types.service import UpdateDatasetRequest from google.cloud.automl_v1.types.service import UpdateModelRequest from google.cloud.automl_v1.types.text import TextClassificationDatasetMetadata from google.cloud.automl_v1.types.text import TextClassificationModelMetadata from google.cloud.automl_v1.types.text import TextExtractionDatasetMetadata from google.cloud.automl_v1.types.text import TextExtractionModelMetadata from google.cloud.automl_v1.types.text import TextSentimentDatasetMetadata from google.cloud.automl_v1.types.text import TextSentimentModelMetadata from google.cloud.automl_v1.types.text_extraction import TextExtractionAnnotation from google.cloud.automl_v1.types.text_extraction import TextExtractionEvaluationMetrics from google.cloud.automl_v1.types.text_segment import TextSegment from google.cloud.automl_v1.types.text_sentiment import TextSentimentAnnotation from google.cloud.automl_v1.types.text_sentiment import TextSentimentEvaluationMetrics from google.cloud.automl_v1.types.translation import TranslationAnnotation from google.cloud.automl_v1.types.translation import TranslationDatasetMetadata from google.cloud.automl_v1.types.translation import TranslationEvaluationMetrics from google.cloud.automl_v1.types.translation import TranslationModelMetadata __all__ = ( "AutoMlClient", "AutoMlAsyncClient", "PredictionServiceClient", "PredictionServiceAsyncClient", "AnnotationPayload", "AnnotationSpec", "ClassificationAnnotation", "ClassificationEvaluationMetrics", "ClassificationType", "Document", "DocumentDimensions", "ExamplePayload", "Image", "TextSnippet", "Dataset", "BoundingBoxMetricsEntry", "ImageObjectDetectionAnnotation", "ImageObjectDetectionEvaluationMetrics", "BoundingPoly", "NormalizedVertex", "ImageClassificationDatasetMetadata", "ImageClassificationModelDeploymentMetadata", "ImageClassificationModelMetadata", "ImageObjectDetectionDatasetMetadata", "ImageObjectDetectionModelDeploymentMetadata", "ImageObjectDetectionModelMetadata", "BatchPredictInputConfig", "BatchPredictOutputConfig", "DocumentInputConfig", "GcsDestination", "GcsSource", "InputConfig", "ModelExportOutputConfig", "OutputConfig", "Model", "ModelEvaluation", "BatchPredictOperationMetadata", "CreateDatasetOperationMetadata", "CreateModelOperationMetadata", "DeleteOperationMetadata", "DeployModelOperationMetadata", "ExportDataOperationMetadata", "ExportModelOperationMetadata", "ImportDataOperationMetadata", "OperationMetadata", "UndeployModelOperationMetadata", "BatchPredictRequest", "BatchPredictResult", "PredictRequest", "PredictResponse", "CreateDatasetRequest", "CreateModelRequest", "DeleteDatasetRequest", "DeleteModelRequest", "DeployModelRequest", "ExportDataRequest", "ExportModelRequest", "GetAnnotationSpecRequest", "GetDatasetRequest", "GetModelEvaluationRequest", "GetModelRequest", "ImportDataRequest", "ListDatasetsRequest", "ListDatasetsResponse", "ListModelEvaluationsRequest", "ListModelEvaluationsResponse", "ListModelsRequest", "ListModelsResponse", "UndeployModelRequest", "UpdateDatasetRequest", "UpdateModelRequest", "TextClassificationDatasetMetadata", "TextClassificationModelMetadata", "TextExtractionDatasetMetadata", "TextExtractionModelMetadata", "TextSentimentDatasetMetadata", "TextSentimentModelMetadata", "TextExtractionAnnotation", "TextExtractionEvaluationMetrics", "TextSegment", "TextSentimentAnnotation", "TextSentimentEvaluationMetrics", "TranslationAnnotation", "TranslationDatasetMetadata", "TranslationEvaluationMetrics", "TranslationModelMetadata", )
{ "content_hash": "01d5e0af9991b53058f964032a7a42b6", "timestamp": "", "source": "github", "line_count": 184, "max_line_length": 88, "avg_line_length": 48.88586956521739, "alnum_prop": 0.8290161200667038, "repo_name": "googleapis/python-automl", "id": "9f115aac3696680469b499065ebdf37f4d579c61", "size": "9596", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "google/cloud/automl/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "2050" }, { "name": "Python", "bytes": "2347989" }, { "name": "Shell", "bytes": "30660" } ], "symlink_target": "" }
import datetime from sqlalchemy import Column, Integer, String, Text, DateTime, ForeignKey from sqlalchemy.orm import sessionmaker, relationship from app.database import Base # 이 경우 경로를 위해 app.을 붙여줘야합니다 # users 테이블에 맵핑 class User(Base): __tablename__ = 'users' id = Column(Integer, primary_key=True) name = Column(String(50), unique=False) password = Column(String(50), unique=False, nullable=False) email = Column(String(120), unique=True, nullable=False) image = Column(String(200), unique=False) company = Column(String(100), unique=False) gender = Column(String(50), unique=False) location = Column(String(100), unique=False) tel = Column(String(100), unique=False) description = Column(String(500), unique=False) posts = relationship('Post', backref='user', lazy='dynamic') def __init__(self, name=None, password=None, email=None, image=None, company=None, gender=None, location=None, tel=None, description=None): self.name = name self.password = password self.email = email self.image = image self.company = company self.gender = gender self.location = location self.tel = tel self.description = description # Representation def __repr__(self): return '<User %r>' % (self.name) # posts 테이블에 맵핑 class Post(Base): __tablename__ = 'posts' id = Column(Integer, primary_key=True) contents = Column(Text, unique=False) userid = Column(Integer, nullable=False) writer = Column(Integer, ForeignKey('users.id'), nullable=False) date = Column(DateTime, default=datetime.datetime.utcnow, unique=False) # 자동으로 들어간다. users = relationship('User', backref='post', lazy='joined') def __init__(self, contents=None, userid=None, writer=None): self.contents = contents self.userid = userid self.writer = writer def __repr__(self): return '<Contents %r %r>' % (self.contents, self.userid)
{ "content_hash": "465412634767eb2f0d6c5a530757783e", "timestamp": "", "source": "github", "line_count": 57, "max_line_length": 89, "avg_line_length": 35.24561403508772, "alnum_prop": 0.6550522648083623, "repo_name": "zzsza/TIL", "id": "3de93b2f5c28eeb4eeb5ac3639412e527fb4298e", "size": "2105", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "python/flask/ch1/app/models.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "431717" }, { "name": "Java", "bytes": "19334" }, { "name": "JavaScript", "bytes": "484" }, { "name": "Julia", "bytes": "314" }, { "name": "Jupyter Notebook", "bytes": "15381217" }, { "name": "Python", "bytes": "124497" }, { "name": "Shell", "bytes": "1958" } ], "symlink_target": "" }
from __future__ import unicode_literals import pytest def assert_no_error(result): assert 'error' not in result, result['error_description'] def test_simple_search(ws): params = {'latitude': 125994.28, 'longitude': 488781.51, 'hits': 3} assert_no_error(ws.travel.hotel.simple_search(**params)) def test_detail_search(ws): params = {'hotel_no': 136197} assert_no_error(ws.travel.hotel.detail_search(**params)) @pytest.mark.skip(reason="Changes in the API") def test_search_vacant(ws): params = {'latitude': '125994.28', 'longitude': '488781.51', 'checkinDate': '2017-02-25', 'checkoutDate': '2017-03-10'} assert_no_error(ws.travel.hotel.search_vacant(**params)) def test_ranking(ws): params = {'hits': 3} assert_no_error(ws.travel.hotel.ranking(**params)) @pytest.mark.skip(reason="Changes in the API") def test_get_chain_list(ws): assert_no_error(ws.travel.hotel.get_chain_list()) def test_keyword_search(ws): params = {'keyword': "Tokyo"} assert_no_error(ws.travel.hotel.keyword_search(**params)) @pytest.mark.skip(reason="Changes in the API") def test_get_area_class(ws): assert_no_error(ws.travel.area.get_class())
{ "content_hash": "d7850c58226961c8683062e6bc98a199", "timestamp": "", "source": "github", "line_count": 43, "max_line_length": 72, "avg_line_length": 27.906976744186046, "alnum_prop": 0.6783333333333333, "repo_name": "alexandriagroup/rakuten-ws", "id": "fcb60203581d00cae990051686b31b800bc14195", "size": "1216", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/webservice/test_travel_api.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "3127" }, { "name": "Python", "bytes": "104602" } ], "symlink_target": "" }
"""The app module, containing the app factory function.""" from flask import Flask, render_template from flask_security import SQLAlchemyUserDatastore from annotator import annotations, commands, public, user from annotator.extensions import (babel, cache, compress, csrf_protect, db, debug_toolbar, gitversion, mail, migrate, security, webpack) from annotator.settings import ProdConfig from annotator.user.forms import ExtendedConfirmRegisterForm, ExtendedRegisterForm from annotator.user.models import Role, User def create_app(config_object=ProdConfig): """An application factory, as explained here: http://flask.pocoo.org/docs/patterns/appfactories/. :param config_object: The configuration object to use. """ app = Flask(__name__.split('.')[0]) app.config.from_object(config_object) register_extensions(app) register_blueprints(app) register_errorhandlers(app) register_shellcontext(app) register_commands(app) return app def register_extensions(app): """Register Flask extensions.""" babel.init_app(app) cache.init_app(app) compress.init_app(app) db.init_app(app) csrf_protect.init_app(app) user_datastore = SQLAlchemyUserDatastore(db, User, Role) security.init_app(app, user_datastore, confirm_register_form=ExtendedConfirmRegisterForm, register_form=ExtendedRegisterForm) debug_toolbar.init_app(app) gitversion.init_app(app) mail.init_app(app) migrate.init_app(app, db) webpack.init_app(app) return None def register_blueprints(app): """Register Flask blueprints.""" app.register_blueprint(public.views.blueprint) app.register_blueprint(user.views.blueprint) app.register_blueprint(annotations.rest.blueprint) app.register_blueprint(annotations.views.blueprint) return None def register_errorhandlers(app): """Register error handlers.""" def render_error(error): """Render error template.""" # If a HTTPException, pull the `code` attribute; default to 500 error_code = getattr(error, 'code', 500) return render_template('{0}.html'.format(error_code)), error_code for errcode in [401, 404, 500]: app.errorhandler(errcode)(render_error) return None def register_shellcontext(app): """Register shell context objects.""" def shell_context(): """Shell context objects.""" return { 'db': db, 'User': user.models.User} app.shell_context_processor(shell_context) def register_commands(app): """Register Click commands.""" app.cli.add_command(commands.test) app.cli.add_command(commands.lint) app.cli.add_command(commands.clean) app.cli.add_command(commands.urls) app.cli.add_command(commands.drop_db) app.cli.add_command(commands.create_corpus) app.cli.add_command(commands.upgrade_corpus_v2) app.cli.add_command(commands.export)
{ "content_hash": "aa412f8179c0ef539bde0eb53bd9b671", "timestamp": "", "source": "github", "line_count": 88, "max_line_length": 117, "avg_line_length": 34, "alnum_prop": 0.6931818181818182, "repo_name": "wroberts/annotator", "id": "4f921e33696d7a7e29413dbb440067cd2843c0d4", "size": "3016", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "annotator/app.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "1808" }, { "name": "HTML", "bytes": "29073" }, { "name": "JavaScript", "bytes": "21930" }, { "name": "PLpgSQL", "bytes": "1679" }, { "name": "Python", "bytes": "65859" } ], "symlink_target": "" }
""" Network-related utilities and helper functions. """ import socket from six.moves.urllib import parse from glance.openstack.common.gettextutils import _LW from glance.openstack.common import log as logging LOG = logging.getLogger(__name__) def parse_host_port(address, default_port=None): """Interpret a string as a host:port pair. An IPv6 address MUST be escaped if accompanied by a port, because otherwise ambiguity ensues: 2001:db8:85a3::8a2e:370:7334 means both [2001:db8:85a3::8a2e:370:7334] and [2001:db8:85a3::8a2e:370]:7334. >>> parse_host_port('server01:80') ('server01', 80) >>> parse_host_port('server01') ('server01', None) >>> parse_host_port('server01', default_port=1234) ('server01', 1234) >>> parse_host_port('[::1]:80') ('::1', 80) >>> parse_host_port('[::1]') ('::1', None) >>> parse_host_port('[::1]', default_port=1234) ('::1', 1234) >>> parse_host_port('2001:db8:85a3::8a2e:370:7334', default_port=1234) ('2001:db8:85a3::8a2e:370:7334', 1234) """ if address[0] == '[': # Escaped ipv6 _host, _port = address[1:].split(']') host = _host if ':' in _port: port = _port.split(':')[1] else: port = default_port else: if address.count(':') == 1: host, port = address.split(':') else: # 0 means ipv4, >1 means ipv6. # We prohibit unescaped ipv6 addresses with port. host = address port = default_port return (host, None if port is None else int(port)) class ModifiedSplitResult(parse.SplitResult): """Split results class for urlsplit.""" # NOTE(dims): The functions below are needed for Python 2.6.x. # We can remove these when we drop support for 2.6.x. @property def hostname(self): netloc = self.netloc.split('@', 1)[-1] host, port = parse_host_port(netloc) return host @property def port(self): netloc = self.netloc.split('@', 1)[-1] host, port = parse_host_port(netloc) return port def urlsplit(url, scheme='', allow_fragments=True): """Parse a URL using urlparse.urlsplit(), splitting query and fragments. This function papers over Python issue9374 when needed. The parameters are the same as urlparse.urlsplit. """ scheme, netloc, path, query, fragment = parse.urlsplit( url, scheme, allow_fragments) if allow_fragments and '#' in path: path, fragment = path.split('#', 1) if '?' in path: path, query = path.split('?', 1) return ModifiedSplitResult(scheme, netloc, path, query, fragment) def set_tcp_keepalive(sock, tcp_keepalive=True, tcp_keepidle=None, tcp_keepalive_interval=None, tcp_keepalive_count=None): """Set values for tcp keepalive parameters This function configures tcp keepalive parameters if users wish to do so. :param tcp_keepalive: Boolean, turn on or off tcp_keepalive. If users are not sure, this should be True, and default values will be used. :param tcp_keepidle: time to wait before starting to send keepalive probes :param tcp_keepalive_interval: time between successive probes, once the initial wait time is over :param tcp_keepalive_count: number of probes to send before the connection is killed """ # NOTE(praneshp): Despite keepalive being a tcp concept, the level is # still SOL_SOCKET. This is a quirk. if isinstance(tcp_keepalive, bool): sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, tcp_keepalive) else: raise TypeError("tcp_keepalive must be a boolean") if not tcp_keepalive: return # These options aren't available in the OS X version of eventlet, # Idle + Count * Interval effectively gives you the total timeout. if tcp_keepidle is not None: if hasattr(socket, 'TCP_KEEPIDLE'): sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, tcp_keepidle) else: LOG.warning(_LW('tcp_keepidle not available on your system')) if tcp_keepalive_interval is not None: if hasattr(socket, 'TCP_KEEPINTVL'): sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, tcp_keepalive_interval) else: LOG.warning(_LW('tcp_keepintvl not available on your system')) if tcp_keepalive_count is not None: if hasattr(socket, 'TCP_KEEPCNT'): sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, tcp_keepalive_count) else: LOG.warning(_LW('tcp_keepknt not available on your system'))
{ "content_hash": "707c6a13bf4c71f61aeb7d2bf18b838d", "timestamp": "", "source": "github", "line_count": 145, "max_line_length": 78, "avg_line_length": 33.93103448275862, "alnum_prop": 0.6038617886178862, "repo_name": "redhat-openstack/glance", "id": "02f8f71d767e32835f6325d5c331e24f341dffa8", "size": "5557", "binary": false, "copies": "2", "ref": "refs/heads/f22-patches", "path": "glance/openstack/common/network_utils.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "PLpgSQL", "bytes": "12183" }, { "name": "Python", "bytes": "3304893" }, { "name": "Shell", "bytes": "7168" } ], "symlink_target": "" }
from _infos import __version__ # Include the lib folder in the python import path (so that packaged modules can be easily called, such as gooey which always call its submodules via gooey parent module) import sys, os thispathname = os.path.dirname(__file__) sys.path.append(os.path.join(thispathname, 'lib')) # Import necessary libraries from lib.aux_funcs import fullpath import lib.argparse as argparse import datetime, time import lib.tqdm as tqdm import itertools import math #import operator # to get the max out of a dict import shlex # for string parsing as argv argument to main(), unnecessary otherwise from lib.tee import Tee # Redirect print output to the terminal as well as in a log file import struct # to support indexes backup file import shutil from lib.distance.distance import hamming #import pprint # Unnecessary, used only for debugging purposes # ECC and hashing facade libraries from lib.eccman import ECCMan, compute_ecc_params from lib.hasher import Hasher from lib.reedsolomon.reedsolo import ReedSolomonError #*********************************** # GUI AUX FUNCTIONS #*********************************** # Try to import Gooey for GUI display, but manage exception so that we replace the Gooey decorator by a dummy function that will just return the main function as-is, thus keeping the compatibility with command-line usage try: # pragma: no cover import lib.gooey as gooey except ImportError as exc: # Define a dummy replacement function for Gooey to stay compatible with command-line usage class gooey(object): # pragma: no cover def Gooey(func): return func # If --gui was specified, then there's a problem if len(sys.argv) > 1 and sys.argv[1] == '--gui': # pragma: no cover print('ERROR: --gui specified but an error happened with lib/gooey, cannot load the GUI (however you can still use this script in commandline). Check that lib/gooey exists and that you have wxpython installed. Here is the error: ') raise(exc) def conditional_decorator(flag, dec): # pragma: no cover def decorate(fn): if flag: return dec(fn) else: return fn return decorate def check_gui_arg(): # pragma: no cover '''Check that the --gui argument was passed, and if true, we remove the --gui option and replace by --gui_launched so that Gooey does not loop infinitely''' if len(sys.argv) > 1 and sys.argv[1] == '--gui': # DEPRECATED since Gooey automatically supply a --ignore-gooey argument when calling back the script for processing #sys.argv[1] = '--gui_launched' # CRITICAL: need to remove/replace the --gui argument, else it will stay in memory and when Gooey will call the script again, it will be stuck in an infinite loop calling back and forth between this script and Gooey. Thus, we need to remove this argument, but we also need to be aware that Gooey was called so that we can call gooey.GooeyParser() instead of argparse.ArgumentParser() (for better fields management like checkboxes for boolean arguments). To solve both issues, we replace the argument --gui by another internal argument --gui_launched. return True else: return False def AutoGooey(fn): # pragma: no cover '''Automatically show a Gooey GUI if --gui is passed as the first argument, else it will just run the function as normal''' if check_gui_arg(): return gooey.Gooey(fn) else: return fn #*********************************** # MAIN #*********************************** @AutoGooey def main(argv=None): if argv is None: # if argv is empty, fetch from the commandline argv = sys.argv[1:] elif isinstance(argv, basestring): # else if argv is supplied but it's a simple string, we need to parse it to a list of arguments before handing to argparse or any other argument parser argv = shlex.split(argv) # Parse string just like argv using shlex #==== COMMANDLINE PARSER ==== #== Commandline description desc = '''ECC file repairer Description: Repair the structure of an ecc file, mainly the ecc markers, so that at least the ecc correction can align correctly the ecc entries and fields. Note: An ecc structure repair does NOT allow to recover from more errors on your files, it only allows to repair an ecc file so that its structure is valid and can be read correctly. ''' ep = ''' ''' #== Commandline arguments #-- Constructing the parser # Use GooeyParser if we want the GUI because it will provide better widgets if len(argv) > 0 and (argv[0] == '--gui' and not '--ignore-gooey' in argv): # pragma: no cover # Initialize the Gooey parser main_parser = gooey.GooeyParser(add_help=True, description=desc, epilog=ep, formatter_class=argparse.RawTextHelpFormatter) # Define Gooey widget types explicitly (because type auto-detection doesn't work quite well) widget_dir = {"widget": "DirChooser"} widget_filesave = {"widget": "FileSaver"} widget_file = {"widget": "FileChooser"} widget_text = {"widget": "TextField"} else: # Else in command-line usage, use the standard argparse # Delete the special argument to avoid unrecognized argument error in argparse if '--ignore-gooey' in argv[0]: argv.remove('--ignore-gooey') # this argument is automatically fed by Gooey when the user clicks on Start # Initialize the normal argparse parser main_parser = argparse.ArgumentParser(add_help=True, description=desc, epilog=ep, formatter_class=argparse.RawTextHelpFormatter) # Define dummy dict to keep compatibile with command-line usage widget_dir = {} widget_filesave = {} widget_file = {} widget_text = {} # Required arguments main_parser.add_argument('-i', '--input', metavar='eccfile.txt', type=str, required=True, help='Path to the ecc file to repair.', **widget_file) main_parser.add_argument('-o', '--output', metavar='eccfile_repaired.txt', type=str, required=True, #type=argparse.FileType('rt') help='Output path where to save the repaired ecc file.', **widget_filesave) main_parser.add_argument('-t', '--threshold', type=float, default=0.3, required=False, help='Distance threshold for the heuristic hamming distance repair. This must be a float, eg, 0.2 means that if there are 20% characters different between an ecc marker and a substring in the ecc file, it will be detected as a marker and corrected.', **widget_text) # Optional general arguments main_parser.add_argument('--index', metavar='eccfile.txt.idx', type=str, required=False, help='Path to the index backup file corresponding to the ecc file (optional but helps a lot).', **widget_file) main_parser.add_argument('--ecc_algo', type=int, default=1, required=False, help='What algorithm use to generate and verify the ECC? Values possible: 1-4. 1 is the formal, fully verified Reed-Solomon in base 3 ; 2 is a faster implementation but still based on the formal base 3 ; 3 is an even faster implementation but based on another library which may not be correct ; 4 is the fastest implementation supporting US FAA ADSB UAT RS FEC standard but is totally incompatible with the other three (a text encoded with any of 1-3 modes will be decodable with any one of them).', **widget_text) main_parser.add_argument('-l', '--log', metavar='/some/folder/filename.log', type=str, required=False, help='Path to the log file. (Output will be piped to both the stdout and the log file)', **widget_filesave) main_parser.add_argument('-v', '--verbose', action='store_true', required=False, default=False, help='Verbose mode (show more output).') main_parser.add_argument('--silent', action='store_true', required=False, default=False, help='No console output (but if --log specified, the log will still be saved in the specified file).') main_parser.add_argument('-f', '--force', action='store_true', required=False, default=False, help='Force overwriting the ecc file even if it already exists (if --generate).') #== Parsing the arguments args = main_parser.parse_args(argv) # Storing all arguments to args #-- Set hard-coded variables entrymarker = "\xFE\xFF\xFE\xFF\xFE\xFF\xFE\xFF\xFE\xFF" # marker that will signal the beginning of an ecc entry - use an alternating pattern of several characters, this avoids confusion (eg: if you use "AAA" as a pattern, if the ecc block of the previous file ends with "EGA" for example, then the full string for example will be "EGAAAAC:\yourfolder\filea.jpg" and then the entry reader will detect the first "AAA" occurrence as the entry start - this should not make the next entry bug because there is an automatic trim - but the previous ecc block will miss one character that could be used to repair the block because it will be "EG" instead of "EGA"!) field_delim = "\xFA\xFF\xFA\xFF\xFA" # delimiter between fields (filepath, filesize, hash+ecc blocks) inside an ecc entry markers = [entrymarker, field_delim] # put them in a list for easy reference max_block_size = 27 resilience_rate = 1 #-- Set variables from arguments inputpath = fullpath(args.input) outputpath = fullpath(args.output) distance_threshold = args.threshold indexpath = None if args.index: indexpath = fullpath(args.index) force = args.force ecc_algo = args.ecc_algo verbose = args.verbose silent = args.silent # -- Checking arguments if not os.path.isfile(inputpath): raise NameError('Specified database ecc file %s does not exist!' % inputpath) if os.path.isfile(outputpath) and not force: raise NameError('Specified output path for the repaired ecc file %s already exists! Use --force if you want to overwrite.' % outputpath) if indexpath and not os.path.isfile(indexpath): raise NameError('Specified index backup file %s does not exist!' % indexpath) if max_block_size < 2 or max_block_size > 255: raise ValueError('RS max block size must be between 2 and 255.') # -- Configure the log file if enabled (ptee.write() will write to both stdout/console and to the log file) if args.log: ptee = Tee(args.log, 'a', nostdout=silent) #sys.stdout = Tee(args.log, 'a') sys.stderr = Tee(args.log, 'a', nostdout=silent) else: ptee = Tee(nostdout=silent) # == PROCESSING BRANCHING == # # Precompute some parameters and load up ecc manager objects (big optimization as g_exp and g_log tables calculation is done only once) hasher_none = Hasher('none') # for index ecc we don't use any hash ecc_params_idx = compute_ecc_params(max_block_size, resilience_rate, hasher_none) ecc_manager_idx = ECCMan(max_block_size, ecc_params_idx["message_size"], algo=ecc_algo) # == Main loop ptee.write("====================================") ptee.write("ECC repair, started on %s" % datetime.datetime.now().isoformat()) ptee.write("====================================") ptee.write("Please note that this tool may not know if it found all the markers, so it may miss too much corrupted markers but it will repair the ones it finds (except if you have a fully valid index file, then you are guaranteed to always find all markers).") ecc_size = os.stat(inputpath).st_size if indexpath: idx_size = os.stat(indexpath).st_size shutil.copy2(inputpath, outputpath) blocksize = 65535 with open(outputpath, 'r+b') as db: # == Index backup repair # This repair needs an index backup file which is normally generated at the same time as the ecc file. The index backup file is a file that stores the position of all ecc markers in the corresponding ecc file, and protects those positions using ecc. if indexpath: ptee.write("Using the index backup file %s to repair ecc markers, please wait..." % args.index) db.seek(0) # seek to the beginning of the database file idx_corrupted = 0 idx_corrected = 0 idx_total = 0 markers_repaired = [0] * len(markers) bardisp = tqdm.tqdm(total=idx_size, file=ptee, leave=True, desc='IDXREAD', unit='B', unit_scale=True) # display progress bar based on reading the database file (since we don't know how many files we will process beforehand nor how many total entries we have) with open(indexpath, 'rb') as dbidx: buf = 1 while buf: # The format of the index backup file is pretty simple: for each entrymarker or field_delim, a block is appended. Each such block is made of: the type on one byte (1 for entrymarker, 2 for field_delim), then the marker's position in the ecc file encoded in an unsigned long long (thus it's on a fixed 8 bytes), and finally an ecc for both the type and marker's position, and which is of fixed size (since we know that the marker's type + position = 9 bytes). Each such block is appended right after the precedent, so we know easily read them and such structure cannot be tampered by a soft error (there's no way a hard drive failure can chance the structure of the data, but a malicious user can! But it's then easy to fix that for a human user, you can clearly see the patterns, where the marker's positions begins and ends). # Note that this constant sized structure of blocks is made on purpose, so that the structure of the index backup file is implicit, while the structure of the ecc file is explicit (it needs uncorrupted markers, which is a weak point that we try to address with the index backup file). # eg of two blocks: 10000008Aecceccecc2000000F2ecceccecc # # Read one index block curpos = dbidx.tell() # backup current position for error messages buf = dbidx.read(max_block_size) # Update progress bar bardisp.update(dbidx.tell()-bardisp.n) # If we have reached EOF, then we stop here if not buf: break # Else it's ok we have an index block, we process it idx_total += 1 # Extract the marker's infos and the ecc marker_str = buf[:ecc_params_idx["message_size"]] ecc = buf[ecc_params_idx["message_size"]:] # Check if the marker's infos are corrupted, if yes, then we will try to fix that using the ecc if not ecc_manager_idx.check(marker_str, ecc): # Trying to fix the marker's infos using the ecc idx_corrupted += 1 marker_repaired, repaired_ecc = ecc_manager_idx.decode(marker_str, ecc) # Repaired the marker's infos, all is good! if ecc_manager_idx.check(marker_repaired, repaired_ecc): marker_str = marker_repaired idx_corrected += 1 # Else it's corrupted beyond repair, just skip else: ptee.write("\n- Index backup file: error on block starting at %i, corrupted and could not fix it. Skipping." % curpos) marker_str = None continue if not marker_str: continue # Repair ecc file's marker using our correct (or repaired) marker's infos marker_type = int(marker_str[0]) # marker's type is always stored on the first byte/character marker_pos = struct.unpack('>Q', marker_str[1:]) # marker's position is encoded as a big-endian unsigned long long, in a 8 bytes/chars string db.seek(marker_pos[0]) # move the ecc reading cursor to the beginning of the marker current_marker = db.read(len(markers[marker_type-1])) # read the current marker (potentially corrupted) db.seek(marker_pos[0]) if verbose: print "- Found marker by index file: type=%i content=" % (marker_type) print db.read(len(markers[marker_type-1])+4) db.seek(marker_pos[0]) # replace the reading cursor back in place before the marker if current_marker != markers[marker_type-1]: # check if we really need to repair this marker # Rewrite the marker over the ecc file db.write(markers[marker_type-1]) markers_repaired[marker_type-1] += 1 else: print "skipped, no need to repair" # Done the index backup repair if bardisp.n > bardisp.total: bardisp.n = bardisp.total # just a workaround in case there's one byte more than the predicted total bardisp.close() ptee.write("Done. Total: %i/%i markers repaired (%i entrymarkers and %i field_delim), %i indexes corrupted and %i indexes repaired (%i indexes lost).\n" % (markers_repaired[0]+markers_repaired[1], idx_total, markers_repaired[0], markers_repaired[1], idx_corrupted, idx_corrected, idx_corrupted-idx_corrected) ) # == Heuristical Greedy Hamming distance repair # This is a heuristical (doesn't need any other file than the ecc file) 2-pass algorithm: the first pass tries to find the markers positions, and then the second pass simply reads the original ecc file and copies it while repairing the found markers. # The first pass is obviously the most interesting, here's a description: we use a kind of greedy algorithm but with backtracking, meaning that we simply read through all the strings sequentially and just compare with the markers and compute the Hamming distance: if the Hamming distance gets below the threshold, we trigger the found marker flag. Then if the Hamming distance decreases, we save this marker position and disable the found marker flag. However, there can be false positives like this (eg, the marker is corrupted in the middle), so we have a backtracking mechanism: if a later string is found to have a Hamming distance that is below the threshold, then we check if the just previously found marker is in the range (ie, the new marker's position is smaller than the previous marker's length) and if the Hamming distance is smaller, then we replace the previous marker with the new marker's position, because the previous one was most likely a false positive. # This method doesn't require any other file than the ecc file, but it may not work on ecc markers that are too much tampered, and if the detection threshold is too low or the markers are too small, there may be lots of false positives. # So try to use long markers (consisting of many character, preferably an alternating pattern different than the null byte \x00) and a high enough detection threshold. ptee.write("Using heuristics (Hamming distance) to fix markers with a threshold of %i%%, please wait..." % (round(distance_threshold*100, 0)) ) # Main loop for heuristical repair, try to find the substrings that minimize the hamming distance to one of the ecc markers markers_repaired = [0] * len(markers) # stat counter already_valid = 0 # stat counter db.seek(0) # seek to the beginning of the database file buf = 1 # init the buffer to 1 to initiate the while loop markers_pos = [[] for i in xrange(len(markers))] # will contain the list of positions where a corrupted marker has been detected (not valid markers, they will be skipped) distance_thresholds = [round(len(x)*distance_threshold, 0) for x in markers] # calculate the number of characters maximum for distance skip_until = -1 # when a valid marker (non corrupted) is found, we use this variable to skip to after the marker length (to avoid detecting partial parts of this marker, which will have a hamming distance even if the marker is completely valid because the reading window will be after the beginning of the marker) bardisp = tqdm.tqdm(total=ecc_size, file=ptee, leave=True, desc='DBREAD', unit='B', unit_scale=True) # display progress bar based on reading the database file (since we don't know how many files we will process beforehand nor how many total entries we have) while buf: # until we have walked through the whole ecc file # Read a part of the ecc file into a buffer, this allows to process more quickly than just loading the size of a marker curpos = db.tell() # keep the current reading position buf = db.read(blocksize) # Update progress bar bardisp.update(db.tell()-bardisp.n) if not buf: break # reached EOF? quitting here # Scan the buffer, by splitting the buffer into substrings the length of the ecc markers for i in xrange(len(buf)-max(len(entrymarker),len(field_delim))): # If we just came accross a non corrupted ecc marker, we skip until we are after this ecc marker (to avoid misdetections) if i < skip_until: continue # Compare each ecc marker type to this substring and compute the Hamming distance for m in xrange(len(markers)): d = hamming(buf[i:i+len(markers[m])], markers[m]) # Compute the Hamming distance (simply the number of different characters) mcurpos = curpos+i # current absolute position of this ecc marker # If there's no difference, then it's a valid, non-corrupted ecc marker if d == 0: already_valid += 1 # stats... # If we previously wrongly detected a corrupted ecc marker near here, then it's probably a misdetection (because we just had a partial view on this marker until now), thus we just remove it from our list of markers to repair if len(markers_pos[m]) > 0 and (mcurpos - markers_pos[m][-1][0]) <= len(markers[m]): # to detect that, we just check if the latest marker to repair is near the current marker (if its position is at maximum the length of the marker). This works because in the other condition below, we update the latest marker to repair if we find another one with a lower hamming distance very near. del markers_pos[m][-1] # Skip scanning until we are after the current marker to avoid misdetections su = i+len(markers[m]) if su > skip_until: skip_until = su # update with the biggest marker (because both markers can be detected here if the pattern is similar) break # Else there's a difference/distance but it's below the threshold: we have a corrupted marker! elif d > 0 and d <= distance_thresholds[m]: # Updating case: If the latest marker to repair is quite close to the current one, but the current detection has a lower distance, we probably are detecting the same marker but we are better positionned now, so we update the previous marker's position with this one now. if len(markers_pos[m]) > 0 and (mcurpos - markers_pos[m][-1][0]) <= len(markers[m]): if d < markers_pos[m][-1][1]: # Update only if the distance is less markers_pos[m][-1] = [mcurpos, d] else: # Else, we probably are detecting the same marker as the last detected one, but since our scanning window has moved forward, we have increased the distance. Just skip it, we should not repair at this position (else we will probably be overwriting over real content). continue # Adding case: Else we just add this marker as a new one to repair by appending to the list else: markers_pos[m].append([mcurpos, d]) # Else the distance is too great for the threshold, it's not a marker at all, we go on to the next substring if db.tell() < ecc_size: db.seek(db.tell()-max(len(entrymarker),len(field_delim))) if bardisp.n > bardisp.total: bardisp.n = bardisp.total # just a workaround in case there's one byte more than the predicted total bardisp.close() # Committing the repair into the ecc file for m in xrange(len(markers)): # for each type of markers marker = markers[m] if len(markers_pos[m]) > 0: # If there is any detected marker to repair for this type for pos in markers_pos[m]: # for each detected marker to repair, we rewrite it over into the file at the detected position if verbose: ptee.write("- Detected marker type %i at position %i with distance %i (%i%%): repairing." % (m+1, pos[0], pos[1], (float(pos[1])/len(markers[m]))*100) ) db.seek(pos[0]) db.write(marker) #print(markers_pos) ptee.write("Done. Hamming heuristic with threshold %i%% repaired %i entrymarkers and %i field_delim (%i total) and %i were already valid.\n" % (round(distance_threshold*100, 0), len(markers_pos[0]), len(markers_pos[1]), len(markers_pos[0])+len(markers_pos[1]), already_valid) ) del ptee return 0 # Calling main function if the script is directly called (not imported as a library in another program) if __name__ == "__main__": # pragma: no cover sys.exit(main())
{ "content_hash": "437488c5a7c7bdf30ac1359f6fb64199", "timestamp": "", "source": "github", "line_count": 340, "max_line_length": 982, "avg_line_length": 76.49411764705883, "alnum_prop": 0.657912949861581, "repo_name": "lrq3000/pyFileFixity", "id": "38537e1fbdf6fa36d9f8beb93a880cf335e4b8ec", "size": "27188", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pyFileFixity/repair_ecc.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "48259" }, { "name": "CSS", "bytes": "1629" }, { "name": "JavaScript", "bytes": "339" }, { "name": "Makefile", "bytes": "1301" }, { "name": "Python", "bytes": "2235740" }, { "name": "Shell", "bytes": "964" }, { "name": "TeX", "bytes": "19102" } ], "symlink_target": "" }
from pyramid.response import Response from pyramid.view import view_config from pyramid.compat import bytes_ from snovault import Item from collections import OrderedDict from copy import deepcopy import json import os from urllib.parse import ( parse_qs, urlencode, ) from snovault.elasticsearch.interfaces import ELASTIC_SEARCH import time from pkg_resources import resource_filename import logging log = logging.getLogger(__name__) #log.setLevel(logging.DEBUG) log.setLevel(logging.INFO) IHEC_DEEP_DIG = True # ihec requires pipeline and aligner information which is not easy to get IHEC_LIB_STRATEGY = { 'ATAC-seq': 'ATAC-seq', 'ChIP-seq': 'ChIP-Seq', 'DNase-seq': 'DNase-Hypersensitivity', 'MeDIP-seq': 'MeDIP-Seq', 'microRNA-seq': 'miRNA-Seq', 'microRNA counts': 'miRNA-Seq', 'small RNA-seq': 'RNA-Seq', 'MRE-seq': 'MRE-Seq', 'polyA plus RNA-seq': 'RNA-Seq', 'RNA-seq': 'RNA-Seq', 'RRBS': 'Bisulfite-Seq', 'whole-genome shotgun bisulfite sequencing': 'Bisulfite-Seq' } ASSEMBLY_DETAILS = { 'GRCh38': { 'species': 'Homo sapiens', 'assembly_reference': 'GRCh38', 'common_name': 'human', 'ucsc_assembly': 'hg38', 'ensembl_host': 'www.ensembl.org', 'quickview': True, 'hic': True, 'comment': 'Ensembl works' }, 'GRCh38-minimal': { 'species': 'Homo sapiens', 'assembly_reference': 'GRCh38', 'common_name': 'human', 'ucsc_assembly': 'hg38', 'ensembl_host': 'www.ensembl.org', }, 'hg19': { 'species': 'Homo sapiens', 'assembly_reference': 'GRCh37', 'common_name': 'human', 'ucsc_assembly': 'hg19', 'NA_ensembl_host': 'grch37.ensembl.org', 'quickview': True, 'hic': True, 'comment': 'Ensembl DOES NOT WORK' }, 'GRCm39': { 'species': 'Mus musculus', 'assembly_reference': 'GRCm39', 'common_name': 'mouse', 'ucsc_assembly': 'mm39', 'ensembl_host': 'www.ensembl.org', 'quickview': True, 'comment': 'Ensembl should work' }, 'mm10': { 'species': 'Mus musculus', 'assembly_reference': 'GRCm38', 'common_name': 'mouse', 'ucsc_assembly': 'mm10', 'ensembl_host': 'www.ensembl.org', 'quickview': True, 'comment': 'Ensembl works' }, 'mm10-minimal': { 'species': 'Mus musculus', 'assembly_reference': 'GRCm38', 'common_name': 'mouse', 'ucsc_assembly': 'mm10', 'ensembl_host': 'www.ensembl.org', 'quickview': True, 'comment': 'Should this be removed?' }, 'mm9': { 'species': 'Mus musculus', 'assembly_reference': 'NCBI37', 'common_name': 'mouse', 'ucsc_assembly': 'mm9', 'NA_ensembl_host': 'may2012.archive.ensembl.org', 'quickview': True, 'comment': 'Ensembl DOES NOT WORK' }, 'dm6': { 'species': 'Drosophila melanogaster', 'assembly_reference': 'BDGP6', 'common_name': 'fruit fly', 'ucsc_assembly': 'dm6', 'NA_ensembl_host': 'www.ensembl.org', 'quickview': True, 'comment': 'Ensembl DOES NOT WORK' }, 'dm3': { 'species': 'Drosophila melanogaster', 'assembly_reference': 'BDGP5', 'common_name': 'fruit fly', 'ucsc_assembly': 'dm3', 'NA_ensembl_host': 'dec2014.archive.ensembl.org', 'quickview': True, 'comment': 'Ensembl DOES NOT WORK' }, 'ce11': { 'species': 'Caenorhabditis elegans', 'assembly_reference': 'WBcel235', 'common_name': 'worm', 'ucsc_assembly': 'ce11', 'NA_ensembl_host': 'www.ensembl.org', 'quickview': True, 'comment': 'Ensembl DOES NOT WORK' }, 'ce10': { 'species': 'Caenorhabditis elegans', 'assembly_reference': 'WS220', 'common_name': 'worm', 'ucsc_assembly': 'ce10', 'quickview': True, 'comment': 'Never Ensembl' }, 'ce6': { 'species': 'Caenorhabditis elegans', 'assembly_reference': 'WS190', 'common_name': 'worm', 'ucsc_assembly': 'ce6', 'comment': 'Never Ensembl, not found in encoded' }, 'J02459.1': { 'species': 'Escherichia virus Lambda', 'assembly_reference': 'J02459.1', 'common_name': 'lambda phage', 'comment': 'Never visualized' }, } BROWSER_FILE_TYPES = { 'ucsc': {'bigWig', 'bigBed', 'bigInteract'}, 'ensembl': {'bigWig', 'bigBed'}, 'quickview': {'bigWig', 'bigBed'}, 'hic': {'hic'}, } # Distinct from ASSEMBLY_DETAILS['ucsc_assembly'] as that defines allowed mappings ASSEMBLY_TO_UCSC_ID = { 'GRCh38-minimal': 'hg38', 'GRCh38': 'hg38', 'GRCh37': 'hg19', 'GRCm39': 'mm39', 'mm10-minimal': 'mm10', 'GRCm38': 'mm10', 'NCBI37': 'mm9', 'BDGP6': 'dm6', 'BDGP5': 'dm3', 'WBcel235': 'ce11' } QUICKVIEW_STATUSES_BLOCKED = ["deleted", "revoked", "replaced"] VISIBLE_DATASET_STATUSES = ["released"] VISIBLE_FILE_STATUSES = ["released"] BIGWIG_FILE_TYPES = ['bigWig'] BIGBED_FILE_TYPES = ['bigBed'] HIC_FILE_TYPES = ['hic'] BIGINTERACT_FILE_TYPES = ['bigInteract'] VISIBLE_FILE_FORMATS = BIGBED_FILE_TYPES + BIGWIG_FILE_TYPES + HIC_FILE_TYPES + BIGINTERACT_FILE_TYPES VISIBLE_DATASET_TYPES = ["Experiment", "Annotation", "FunctionalCharacterizationExperiment"] VISIBLE_DATASET_TYPES_LC = ["experiment", "annotation", "functional_characterization_experiment"] # Supported tokens are the only tokens the code currently knows how to look up. SUPPORTED_MASK_TOKENS = [ "{replicate}", # replicate that that will be displayed: ("rep1", "combined") "{rep_tech}", # The rep_tech if desired ("rep1_1", "combined") "{replicate_number}", # The replicate number displayed for visualized track: ("1", "0") "{biological_replicate_number}", "{technical_replicate_number}", "{assay_title}", "{assay_term_name}", # dataset.assay_term_name "{annotation_type}", # some datasets have annotation type and not assay "{output_type}", # files.output_type "{accession}", "{experiment.accession}", # "{accession}" is assumed to be experiment.accession "{file.accession}", "{@id}", "{@type}", # dataset only "{target}", "{target.label}", # Either is acceptible "{target.title}", "{target.name}", # Used in metadata URLs "{target.investigated_as}", "{biosample_term_name}", "{biosample_term_name|multiple}", # "|multiple": none means multiple "{output_type_short_label}", # hard-coded translation from output_type to very # short version "{replicates.library.biosample.summary}", # Idan, Forrest and Cricket are conspiring to move # to dataset.biosample_summary & make it shorter "{replicates.library.biosample.summary|multiple}", # "|multiple": none means multiple "{assembly}", # you don't need this in titles, but it is crucial # variable and seems to not be being applied # # correctly in the html generation "{lab.title}", # In metadata "{award.rfa}", # To distinguish vis_defs based upon award # TODO "{software? or pipeline?}", # Cricket: "I am stumbling over the fact that we # # can't distinguish tophat and star produced files" # TODO "{phase}", # Cricket: "If we get to the point of being fancy # # in the replication timing, then we need this, # # otherwise it bundles up in the biosample summary now" ] # Simple tokens are a straight lookup, no questions asked SIMPLE_DATASET_TOKENS = ["{accession}", "{assay_title}", "{assay_term_name}", "{annotation_type}", "{@id}", "{@type}"] # static group defs are keyed by group title (or special token) and consist of # tag: (optional) unique terse key for referencing group # groups: (optional) { subgroups keyed by subgroup title } # group_order: (optional) [ ordered list of subgroup titles ] # other definitions # live group defs are keyed by tag and are the transformed in memory version of static defs # title: (required) same as the static group's key # groups: (if appropriate) { subgroups keyed by subgroup tag } # group_order: (if appropriate) [ ordered list of subgroup tags ] VIS_DEFS_FOLDER = "static/vis_defs/" VIS_DEFS_BY_TYPE = {} VIS_DEFS_DEFAULT = {} # vis_defs may not have the default experiment group defined EXP_GROUP = "Experiment" DEFAULT_EXPERIMENT_GROUP = {"tag": "EXP", "groups": {"one": {"title_mask": "{accession}", "url_mask": "experiments/{accession}"}}} # Pennants are flags that display at UCSC next to composite definitions PENNANTS = { "NHGRI": ("https://www.encodeproject.org/static/img/pennant-nhgri.png " "https://www.encodeproject.org/ " "\"This trackhub was automatically generated from the files and metadata found " "at the ENCODE portal\""), "ENCODE": ("https://www.encodeproject.org/static/img/pennant-encode.png " "https://www.encodeproject.org/ " "\"This trackhub was automatically generated from the ENCODE files and metadata " "found at the ENCODE portal\""), "modENCODE": ("https://www.encodeproject.org/static/img/pennant-encode.png " "https://www.encodeproject.org/ " "\"This trackhub was automatically generated from the modENCODE files and " "metadata found at the ENCODE portal\""), "GGR": ("https://www.encodeproject.org/static/img/pennant-ggr.png " "https://www.encodeproject.org/ " "\"This trackhub was automatically generated from the Genomics of " "Gene Regulation files files and metadata found at the " "ENCODE portal\""), "REMC": ("https://www.encodeproject.org/static/img/pennant-remc.png " "https://www.encodeproject.org/ " "\"This trackhub was automatically generated from the Roadmap Epigentics files " "and metadata found at the ENCODE portal\"") # "Roadmap": "encodeThumbnail.jpg " # "https://www.encodeproject.org/ " # "\"This trackhub was automatically generated from the Roadmap files and " # "metadata found at https://www.encodeproject.org/\"", # "modERN": "encodeThumbnail.jpg " # "https://www.encodeproject.org/ " # "\"This trackhub was automatically generated from the modERN files and " # "metadata found at https://www.encodeproject.org/\"", } # supported groups for arranging/sorting files in a visualization SUPPORTED_SUBGROUPS = ["Biosample", "Targets", "Assay", "Replicates", "Views", EXP_GROUP] # UCSC trackDb settings that are supported SUPPORTED_TRACK_SETTINGS = [ "type", "visibility", "longLabel", "shortLabel", "color", "altColor", "allButtonPair", "html", "scoreFilter", "spectrum", "minGrayLevel", "itemRgb", "viewLimits", "autoScale", "negateValues", "maxHeightPixels", "windowingFunction", "transformFunc", "signalFilter", "signalFilterLimits", "pValueFilter", "pValueFilterLimits", "qValueFilter", "qValueFilterLimits", "interactUp" ] VIEW_SETTINGS = SUPPORTED_TRACK_SETTINGS # UCSC trackDb settings that are supported COMPOSITE_SETTINGS = ["longLabel", "shortLabel", "visibility", "pennantIcon", "allButtonPair", "html"] # UCSC settings for individual files (tracks) TRACK_SETTINGS = ["bigDataUrl", "longLabel", "shortLabel", "type", "color", "altColor"] # This dataset terms (among others) are needed in vis_dataset formatting ENCODED_DATASET_TERMS = ['biosample_ontology.term_name', 'biosample_ontology.term_id', 'biosample_summary', 'biosample_ontology.classification', 'assay_term_id', 'assay_term_name'] # This dataset terms (among others) are needed in vis_dataset formatting ENCODED_DATASET_EMBEDDED_TERMS = { 'biosample_accession': 'replicates.library.biosample.accession', 'sex': 'replicates.library.biosample.sex', 'taxon_id': 'replicates.library.biosample.organism.taxon_id' } # Abbeviations for output_type to fit in UCSC shortLabel OUTPUT_TYPE_8CHARS = { # "idat green channel": "idat gr", # raw data # "idat red channel": "idat rd", # raw data # "reads":"reads", # raw data # "intensity values": "intnsty", # raw data # "reporter code counts": "rcc", # raw data # "alignments":"aln", # our plan is not to visualize alignments for now # "unfiltered alignments":"unflt aln", # our plan is not to visualize alignments for now # "transcriptome alignments":"tr aln", # our plan is not to visualize alignments for now "minus strand signal of all reads": "all -", "plus strand signal of all reads": "all +", "signal of all reads": "all sig", "normalized signal of all reads": "normsig", # "raw minus strand signal":"raw -", # these are all now minus signal of all reads # "raw plus strand signal":"raw +", # these are all now plus signal of all reads "raw signal": "raw sig", "raw normalized signal": "nraw", "read-depth normalized signal": "rdnorm", "control normalized signal": "ctlnorm", "minus strand signal of unique reads": "unq -", "plus strand signal of unique reads": "unq +", "signal of unique reads": "unq sig", "signal p-value": "pval sig", "fold change over control": "foldchg", "exon quantifications": "exon qt", "gene quantifications": "gene qt", "microRNA quantifications": "miRNA qt", "transcript quantifications": "trsct qt", "library fraction": "lib frac", "methylation state at CpG": "mth CpG", "methylation state at CHG": "mth CHG", "methylation state at CHH": "mth CHH", "enrichment": "enrich", "replication timing profile": "repli tm", "variant calls": "vars", "filtered SNPs": "f SNPs", "filtered indels": "f indel", "hotspots": "hotspt", "loops": "lrci", "contact matrix": "ch int", "contact domains": "tads", "genome compartments": "compart", "open chromatin regions": "open ch", "filtered peaks": "filt pk", "filtered regions": "filt reg", "DHS peaks": "DHS pk", "peaks": "peaks", "replicated peaks": "rep pk", "RNA-binding protein associated mRNAs": "RBP RNA", "splice junctions": "splice", "transcription start sites": "tss", "predicted enhancers": "pr enh", "candidate enhancers": "can enh", "candidate promoters": "can pro", "predicted forebrain enhancers": "fb enh", # plan to fix these "predicted heart enhancers": "hrt enh", # plan to fix these "predicted whole brain enhancers": "wb enh", # plan to fix these "candidate Cis-Regulatory Elements": "cCRE", # "genome reference":"ref", # references not to be viewed # "transcriptome reference":"tr ref", # references not to be viewed # "transcriptome index":"tr rix", # references not to be viewed # "tRNA reference":"tRNA", # references not to be viewed # "miRNA reference":"miRNA", # references not to be viewed # "snRNA reference":"snRNA", # references not to be viewed # "rRNA reference":"rRNA", # references not to be viewed # "TSS reference":"TSS", # references not to be viewed # "reference variants":"var", # references not to be viewed # "genome index":"ref ix", # references not to be viewed # "female genome reference":"XX ref", # references not to be viewed # "female genome index":"XX rix", # references not to be viewed # "male genome reference":"XY ref", # references not to be viewed # "male genome index":"XY rix", # references not to be viewed # "spike-in sequence":"spike", # references not to be viewed "IDR thresholded peaks": "IDRt pk", "optimal IDR thresholded peaks": "oIDR pk", "conservative IDR thresholded peaks": "cIDR pk", "enhancer validation": "enh val", "semi-automated genome annotation": "saga" } # Track coloring is defined by biosample BIOSAMPLE_COLOR = { "GM12878": {"color": "153,38,0", "altColor": "115,31,0"}, # Dark Orange-Red "H1-hESC": {"color": "0,107,27", "altColor": "0,77,20"}, # Dark Green "K562": {"color": "46,0,184", "altColor": "38,0,141"}, # Dark Blue "keratinocyte": {"color": "179,0,134", "altColor": "154,0,113"}, # Darker Pink-Purple "HepG2": {"color": "189,0,157", "altColor": "189,76,172"}, # Pink-Purple "HeLa-S3": {"color": "0,119,158", "altColor": "0,94,128"}, # Greenish-Blue "HeLa": {"color": "0,119,158", "altColor": "0,94,128"}, # Greenish-Blue "A549": {"color": "204,163,0", "altColor": "218,205,22"}, # Dark Yellow "endothelial cell of umbilical vein": {"color": "224,75,0", "altColor": "179,60,0"}, # Pink "MCF-7": {"color": "22,219,206", "altColor": "18,179,168"}, # Cyan "SK-N-SH": {"color": "255,115,7", "altColor": "218,98,7"}, # Orange "IMR-90": {"color": "6,62,218", "altColor": "5,52,179"}, # Blue "CH12.LX": {"color": "86,180,233", "altColor": "76,157,205"}, # Dark Orange-Red "MEL cell line": {"color": "46,0,184", "altColor": "38,0,141"}, # Dark Blue "brain": {"color": "105,105,105", "altColor": "77,77,77"}, # Grey "eye": {"color": "105,105,105", "altColor": "77,77,77"}, # Grey "spinal cord": {"color": "105,105,105", "altColor": "77,77,77"}, # Grey "olfactory organ": {"color": "105,105,105", "altColor": "77,77,77"}, # Grey "esophagus": {"color": "230,159,0", "altColor": "179,125,0"}, # Mustard "stomach": {"color": "230,159,0", "altColor": "179,125,0"}, # Mustard "liver": {"color": "230,159,0", "altColor": "179,125,0"}, # Mustard "pancreas": {"color": "230,159,0", "altColor": "179,125,0"}, # Mustard "large intestine": {"color": "230,159,0", "altColor": "179,125,0"}, # Mustard "small intestine": {"color": "230,159,0", "altColor": "179,125,0"}, # Mustard "gonad": {"color": "0.0,158,115", "altColor": "0.0,125,92"}, # Darker Aquamarine "mammary gland": {"color": "0.0,158,115", "altColor": "0.0,125,92"}, # Darker Aquamarine "prostate gland": {"color": "0.0,158,115", "altColor": "0.0,125,92"}, # Darker Aquamarine "ureter": {"color": "204,121,167", "altColor": "166,98,132"}, # Grey-Pink "urinary bladder": {"color": "204,121,167", "altColor": "166,98,132"}, # Grey-Pink "kidney": {"color": "204,121,167", "altColor": "166,98,132"}, # Grey-Pink "muscle organ": {"color": "102,50,200 ", "altColor": "81,38,154"}, # Violet "tongue": {"color": "102,50,200", "altColor": "81,38,154"}, # Violet "adrenal gland": {"color": "189,0,157", "altColor": "154,0,128"}, # Pink-Purple "thyroid gland": {"color": "189,0,157", "altColor": "154,0,128"}, # Pink-Purple "lung": {"color": "145,235,43", "altColor": "119,192,35"}, # Mossy green "bronchus": {"color": "145,235,43", "altColor": "119,192,35"}, # Mossy green "trachea": {"color": "145,235,43", "altColor": "119,192,35"}, # Mossy green "nose": {"color": "145,235,43", "altColor": "119,192,35"}, # Mossy green "placenta": {"color": "153,38,0", "altColor": "102,27,0"}, # Orange-Brown "extraembryonic structure": {"color": "153,38,0", "altColor": "102,27,0"}, # Orange-Brown "thymus": {"color": "86,180,233", "altColor": "71,148,192"}, # Baby Blue "spleen": {"color": "86,180,233", "altColor": "71,148,192"}, # Baby Blue "bone element": {"color": "86,180,233", "altColor": "71,148,192"}, # Baby Blue "blood": {"color": "86,180,233", "altColor": "71,148,192"}, # Baby Blue (red?) "blood vessel": {"color": "214,0,0", "altColor": "214,79,79"}, # Red "heart": {"color": "214,0,0", "altColor": "214,79,79"}, # Red "lymphatic vessel": {"color": "214,0,0", "altColor": "214,79,79"}, # Red "skin of body": {"color": "74,74,21", "altColor": "102,102,44"}, # Brown } VIS_CACHE_INDEX = "vis_cache" class Sanitize(object): # Tools for sanitizing labels def escape_char(self, c, exceptions=['_'], htmlize=False, numeralize=False): '''Pass through for 0-9,A-Z.a-z,_, but then either html encodes, numeralizes or removes special characters.''' n = ord(c) if n >= 47 and n <= 57: # 0-9 return c if n >= 65 and n <= 90: # A-Z return c if n >= 97 and n <= 122: # a-z return c if c in exceptions: return c if n == 32: # space return '_' if htmlize: return "&#%d;" % n if numeralize: return "%d" % n return "" def label(self, s): '''Encodes the string to swap special characters and leaves spaces alone.''' new_s = "" # longLabel and shorLabel can have spaces and some special characters for c in s: new_s += self.escape_char(c, [' ', '_', '.', '-', '(', ')', '+'], htmlize=False) return new_s def title(self, s): '''Encodes the string to swap special characters and replace spaces with '_'.''' new_s = "" # Titles appear in tag=title pairs and cannot have spaces for c in s: new_s += self.escape_char(c, ['_', '.', '-', '(', ')', '+'], htmlize=True) return new_s def tag(self, s): '''Encodes the string to swap special characters and remove spaces.''' new_s = "" first = True for c in s: new_s += self.escape_char(c, numeralize=True) if first: if new_s.isdigit(): # tags cannot start with digit. new_s = 'z' + new_s first = False return new_s def name(self, s): '''Encodes the string to remove special characters swap spaces for underscores.''' new_s = "" for c in s: new_s += self.escape_char(c) return new_s sanitize = Sanitize() class VisDefines(object): # Loads vis_def static files and other defines for vis formatting # This class is also a swiss army knife of vis formatting conversions def __init__(self, request, dataset=None): # Make these global so that the same files are not continually reloaded self._request = request global VIS_DEFS_BY_TYPE global VIS_DEFS_DEFAULT self.vis_defs = VIS_DEFS_BY_TYPE self.vis_def_default = VIS_DEFS_DEFAULT self.vis_type = "opaque" self.dataset = dataset if not self.vis_defs: self.load_vis_defs() def load_vis_defs(self): '''Loads 'vis_defs' (visualization definitions by assay type) from a static files.''' #global VIS_DEFS_FOLDER global VIS_DEFS_BY_TYPE global VIS_DEFS_DEFAULT folder = resource_filename(__name__, VIS_DEFS_FOLDER) files = os.listdir(folder) for filename in files: if filename.endswith('.json'): with open(folder + filename) as fh: log.debug('Preparing to load %s' % (filename)) vis_def = json.load(fh) # Could alter vis_defs here if desired. if vis_def: VIS_DEFS_BY_TYPE.update(vis_def) self.vis_defs = VIS_DEFS_BY_TYPE VIS_DEFS_DEFAULT = self.vis_defs.get("opaque",{}) self.vis_def_default = VIS_DEFS_DEFAULT def get_vis_type(self): '''returns the best visualization definition type, based upon dataset.''' assert(self.dataset is not None) assay = self.dataset.get("assay_term_name", 'none') if isinstance(assay, list): if len(assay) == 1: assay = assay[0] else: log.debug("assay_term_name for %s is unexpectedly a list %s" % (self.dataset['accession'], str(assay))) return "opaque" # simple rule defined in most vis_defs for vis_type in sorted(self.vis_defs.keys(), reverse=True): # Reverse pushes anno to bottom if "rule" in self.vis_defs[vis_type]: rule = self.vis_defs[vis_type]["rule"].replace('{assay_term_name}', assay) if rule.find('{') != -1: rule = self.convert_mask(rule) if eval(rule): self.vis_type = vis_type return self.vis_type # Ugly rules: vis_type = None if assay in ["RNA-seq", "PAS-seq", "microRNA-seq", \ "shRNA knockdown followed by RNA-seq", \ "CRISPR genome editing followed by RNA-seq", \ "CRISPRi followed by RNA-seq", \ "single-cell RNA sequencing assay", \ "siRNA knockdown followed by RNA-seq", "small RNA-seq"]: reps = self.dataset.get("replicates", []) # NOTE: overly cautious if len(reps) < 1: log.debug("Could not distinguish between long and short RNA for %s because there are " "no replicates. Defaulting to short." % (self.dataset.get("accession"))) vis_type = "SRNA" # this will be more noticed if there is a mistake else: average_fragment_size = reps[0].get("library", {}).get("average_fragment_size",) if average_fragment_size is not None: if average_fragment_size <= 200: vis_type = "SRNA" elif average_fragment_size > 200: vis_type = "LRNA" else: size_range = reps[0].get("library", {}).get("size_range", "") if size_range.startswith('>'): try: min_size = int(size_range[1:]) max_size = min_size except: log.debug("Could not distinguish between long and short RNA for %s. " "Defaulting to short." % (self.dataset.get("accession"))) vis_type = "SRNA" # this will be more noticed if there is a mistake elif size_range.startswith('<'): try: max_size = int(size_range[1:]) - 1 min_size = 0 except: log.debug("Could not distinguish between long and short RNA for %s. " "Defaulting to short." % (self.dataset.get("accession"))) self.vis_type = "SRNA" # this will be more noticed if there is a mistake return self.vis_type else: try: sizes = size_range.split('-') min_size = int(sizes[0]) max_size = int(sizes[1]) except: log.debug("Could not distinguish between long and short RNA for %s. " "Defaulting to short." % (self.dataset.get("accession"))) vis_type = "SRNA" # this will be more noticed if there is a mistake if vis_type is None: if min_size == 120 and max_size == 200: # Another ugly exception! vis_type = "LRNA" elif max_size <= 200 and max_size != min_size: vis_type = "SRNA" elif min_size >= 150: vis_type = "LRNA" elif (min_size + max_size)/2 >= 235: # This is some wicked voodoo (SRNA:108-347=227; LRNA:155-315=235) vis_type = "SRNA" if vis_type is None: log.debug("%s (assay:'%s') has undefined vis_type" % (self.dataset['accession'], assay)) vis_type = "opaque" # This becomes a dict key later so None is not okay self.vis_type = vis_type return self.vis_type def get_vis_def(self, vis_type=None): '''returns the visualization definition set, based upon dataset.''' if vis_type is None: vis_type = self.vis_type vis_def = self.vis_defs.get(vis_type, self.vis_def_default) if "other_groups" in vis_def and EXP_GROUP not in vis_def["other_groups"]["groups"]: vis_def["other_groups"]["groups"][EXP_GROUP] = DEFAULT_EXPERIMENT_GROUP if "sortOrder" in vis_def and EXP_GROUP not in vis_def["sortOrder"]: vis_def["sortOrder"].append(EXP_GROUP) return vis_def def visible_file_statuses(self): return VISIBLE_FILE_STATUSES def supported_subgroups(self): return SUPPORTED_SUBGROUPS def encoded_dataset_terms(self): return list(ENCODED_DATASET_EMBEDDED_TERMS.keys()) + ENCODED_DATASET_TERMS def pennants(self, project): return PENNANTS.get(project, PENNANTS['NHGRI']) def find_pennent(self): '''Returns an appropriate pennantIcon given dataset's award''' assert(self.dataset is not None) project = self.dataset.get("award", {}).get("project", "NHGRI") return self.pennants(project) def lookup_colors(self): '''Using the mask, determine which color table to use.''' assert(self.dataset is not None) color = None altColor = None coloring = {} ontology = self.dataset.get('biosample_ontology') term = "unknown" # really only seen in test data! if ontology is not None: if not isinstance(ontology, list): ontology = [ontology] if len(ontology) == 1: if isinstance(ontology[0], dict): term = ontology[0]['term_name'] else: log.debug("%s has biosample_ontology %s that is unexpectedly a list", self.dataset['accession'], str([bo['@id'] for bo in ontology])) coloring = BIOSAMPLE_COLOR.get(term, {}) if not coloring: for organ_slim in (os for bo in ontology for os in bo['organ_slims']): coloring = BIOSAMPLE_COLOR.get(organ_slim, {}) if coloring: break if coloring: assert("color" in coloring) if "altColor" not in coloring: color = coloring["color"] shades = color.split(',') red = int(shades[0]) / 2 green = int(shades[1]) / 2 blue = int(shades[2]) / 2 altColor = "%d,%d,%d" % (red, green, blue) coloring["altColor"] = altColor return coloring def add_living_color(self, live_format): '''Adds color and altColor. Note that altColor is only added if color is found.''' colors = self.lookup_colors() if colors and "color" in colors: live_format["color"] = colors["color"] if "altColor" in colors: live_format["altColor"] = colors["altColor"] def rep_for_file(self, a_file): '''Determines best rep_tech or rep for a file.''' # Starting with a little cheat for rare cases where techreps are compared instead of bioreps if a_file.get("file_format_type", "none") in ["idr_peak"]: return "combined" if a_file['output_type'].endswith("IDR thresholded peaks"): return "combined" bio_rep = 0 tech_rep = 0 if "replicate" in a_file: bio_rep = a_file["replicate"]["biological_replicate_number"] tech_rep = a_file["replicate"]["technical_replicate_number"] elif "tech_replicates" in a_file: # Do we want to make rep1_1.2.3 ? Not doing it now tech_reps = a_file["tech_replicates"] if len(tech_reps) == 1: bio_rep = int(tech_reps[0].split('_')[0]) tech_reps = tech_reps[0][2:] if len(tech_reps) == 1: tech_rep = int(tech_reps) elif len(tech_reps) > 1: bio = 0 for tech in tech_reps: if bio == 0: bio = int(tech.split('_')[0]) elif bio != int(tech.split('_')[0]): bio = 0 break if bio > 0: bio_rep = bio elif "biological_replicates" in a_file: bio_reps = a_file["biological_replicates"] if len(bio_reps) == 1: bio_rep = bio_reps[0] if bio_rep == 0: return "combined" rep = "rep%d" % bio_rep if tech_rep > 0: rep += "_%d" % tech_rep return rep def lookup_embedded_token(self, name, obj): '''Encodes the string to swap special characters and remove spaces.''' token = ENCODED_DATASET_EMBEDDED_TERMS.get(name, name) if token[0] == '{' and token[-1] == '}': token = token[1:-1] terms = token.split('.') cur_obj = obj while len(terms) > 0: term = terms.pop(0) cur_obj = cur_obj.get(term) if len(terms) == 0 or cur_obj is None: return cur_obj if isinstance(cur_obj,list): if len(cur_obj) == 0: return None cur_obj = cur_obj[0] # Can't presume to use any but first return None def lookup_token(self, token, dataset, a_file=None): '''Encodes the string to swap special characters and remove spaces.''' # dataset might not be self.dataset if token not in SUPPORTED_MASK_TOKENS: log.warn("Attempting to look up unexpected token: '%s'" % token) return "unknown token" if token in SIMPLE_DATASET_TOKENS: term = dataset.get(token[1:-1]) if term is None: return "Unknown " + token[1:-1].split('_')[0].capitalize() elif isinstance(term,list) and len(term) > 3: return "Collection of %d %ss" % (len(term),token[1:-1].split('_')[0].capitalize()) return term elif token == "{experiment.accession}": return dataset['accession'] elif token in ["{target}", "{target.label}", "{target.name}", "{target.title}", "{target.investigated_as}"]: if token == '{target}': token = '{target.label}' term = self.lookup_embedded_token(token, dataset) if term is None and token == '{target.name}': term = self.lookup_embedded_token('{target.label}', dataset) if term is not None: if isinstance(term, list) and len(term) > 0: return term[0] return term return "Unknown Target" elif token in ["{replicates.library.biosample.summary}", "{replicates.library.biosample.summary|multiple}"]: term = self.lookup_embedded_token('{replicates.library.biosample.summary}', dataset) if term is None: term = dataset.get("{biosample_term_name}") if term is not None: return term if token.endswith("|multiple}"): return "multiple biosamples" return "Unknown Biosample" elif token == "{biosample_term_name}": biosample_ontology = dataset.get('biosample_ontology') if biosample_ontology is None: return "Unknown Biosample" if isinstance(biosample_ontology, dict): return biosample_ontology['term_name'] if isinstance(biosample_ontology, list) and len(biosample_ontology) > 3: return "Collection of %d Biosamples" % (len(biosample_ontology)) # The following got complicated because general Dataset objects # cannot have biosample_ontology embedded properly. As a base class, # some of the children, PublicationData, Project and 8 Series # objects, have biosample_ontology embedded as array of objects, # while experiment and annotation have it embedded as one single # object. This becomes a problem when File object linkTo Dataset in # general rather than one specific type. Current embedding system # don't know how to map a property with type = ["array", "string"] # in elasticsearch. Therefore, it is possible the # "biosample_ontology" we got here is @id which should be embedded # with the following code. if not isinstance(biosample_ontology, list): biosample_ontology = [biosample_ontology] term_names = [] for type_obj in biosample_ontology: if isinstance(type_obj, str): term_names.append( self._request.embed(type_obj, '@@object')['term_name'] ) elif 'term_name' in type_obj: term_names.append(type_obj['term_name']) if len(term_names) == 1: return term_names[0] else: return term_names elif token == "{biosample_term_name|multiple}": biosample_ontology = dataset.get('biosample_ontology') if biosample_ontology is None: return "multiple biosamples" return biosample_ontology.get('term_name') # TODO: rna_species # elif token == "{rna_species}": # if replicates.library.nucleic_acid = polyadenylated mRNA # rna_species = "polyA RNA" # elif replicates.library.nucleic_acid == "RNA": # if "polyadenylated mRNA" in replicates.library.depleted_in_term_name # rna_species = "polyA depleted RNA" # else # rna_species = "total RNA" elif a_file is not None: if token == "{file.accession}": return a_file['accession'] #elif token == "{output_type}": # return a_file['output_type'] elif token == "{output_type_short_label}": output_type = a_file['output_type'] return OUTPUT_TYPE_8CHARS.get(output_type, output_type) elif token == "{replicate}": rep_tag = a_file.get("rep_tag") if rep_tag is not None: while len(rep_tag) > 4: if rep_tag[3] != '0': break rep_tag = rep_tag[0:3] + rep_tag[4:] return rep_tag rep_tech = a_file.get("rep_tech") if rep_tech is not None: return rep_tech.split('_')[0] # Should truncate tech_rep rep_tech = self.rep_for_file(a_file) return rep_tech.split('_')[0] # Should truncate tech_rep elif token == "{replicate_number}": rep_tag = a_file.get("rep_tag", a_file.get("rep_tech", self.rep_for_file(a_file))) if not rep_tag.startswith("rep"): return "0" return rep_tag[3:].split('_')[0] elif token == "{biological_replicate_number}": rep_tech = a_file.get("rep_tech", self.rep_for_file(a_file)) if not rep_tech.startswith("rep"): return "0" return rep_tech[3:].split('_')[0] elif token == "{technical_replicate_number}": rep_tech = a_file.get("rep_tech", self.rep_for_file(a_file)) if not rep_tech.startswith("rep"): return "0" return rep_tech.split('_')[1] elif token == "{rep_tech}": return a_file.get("rep_tech", self.rep_for_file(a_file)) else: val = self.lookup_embedded_token(token, a_file) if val is not None and isinstance(val, str): return val return "" else: val = self.lookup_embedded_token(token, dataset) if val is not None and isinstance(val, str): return val log.debug('Untranslated token: "%s"' % token) return "unknown" def convert_mask(self, mask, dataset=None, a_file=None): '''Given a mask with one or more known {term_name}s, replaces with values.''' working_on = mask # dataset might not be self.dataset if dataset is None: dataset = self.dataset chars = len(working_on) while chars > 0: beg_ix = working_on.find('{') if beg_ix == -1: break end_ix = working_on.find('}') if end_ix == -1: break term = self.lookup_token(working_on[beg_ix:end_ix+1], dataset, a_file=a_file) new_mask = [] if beg_ix > 0: new_mask = working_on[0:beg_ix] new_mask += "%s%s" % (term, working_on[end_ix+1:]) chars = len(working_on[end_ix+1:]) working_on = ''.join(new_mask) return working_on def ucsc_single_composite_trackDb(self, vis_format, title): '''Given a single vis_format (vis_dataset or vis_by_type dict, returns single UCSC trackDb composite text''' if vis_format is None or len(vis_format) == 0: return "# Empty composite for %s. It cannot be visualized at this time.\n" % title blob = "" # First the composite structure blob += "track %s\n" % vis_format["name"] blob += "compositeTrack on\n" blob += "type bed 3\n" for var in COMPOSITE_SETTINGS: val = vis_format.get(var) if val: blob += "%s %s\n" % (var, val) views = vis_format.get("view", []) if len(views) > 0: blob += "subGroup1 view %s" % views["title"] for view_tag in views["group_order"]: view_title = views["groups"][view_tag]["title"] blob += " %s=%s" % (view_tag, sanitize.title(view_title)) blob += '\n' dimA_checked = vis_format.get("dimensionAchecked", "all") dimA_tag = "" if dimA_checked == "first": # All will leave dimA_tag & dimA_checked empty, default to all on dimA_tag = vis_format.get("dimensions", {}).get("dimA", "") dimA_checked = None subgroup_ix = 2 for group_tag in vis_format["group_order"]: group = vis_format["groups"][group_tag] blob += "subGroup%d %s %s" % (subgroup_ix, group_tag, sanitize.title(group["title"])) subgroup_ix += 1 subgroup_order = None # group.get("group_order") if subgroup_order is None or not isinstance(subgroup_order, list): subgroup_order = sorted(group["groups"].keys()) for subgroup_tag in subgroup_order: subgroup_title = group["groups"][subgroup_tag]["title"] blob += " %s=%s" % (subgroup_tag, sanitize.title(subgroup_title)) if group_tag == dimA_tag and dimA_checked is None: dimA_checked = subgroup_tag blob += '\n' # sortOrder sort_order = vis_format.get("sortOrder") if sort_order: blob += "sortOrder" for sort_tag in sort_order: if title.startswith("ENCSR") and sort_tag == "EXP": continue # Single exp composites do not need to sort on EMP blob += " %s=+" % sort_tag blob += '\n' # dimensions actual_group_tags = ["view"] # Not all groups will be used in composite, depending upon content dimensions = vis_format.get("dimensions", {}) if dimensions: pairs = "" XY_skipped = [] XY_added = [] for dim_tag in sorted(dimensions.keys()): group = vis_format["groups"].get(dimensions[dim_tag]) if group is None: # e.g. "Targets" may not exist continue if dimensions[dim_tag] != "REP": if len(group.get("groups", {})) <= 1: if dim_tag[-1] in ['X', 'Y']: XY_skipped.append(dim_tag) continue elif dim_tag[-1] in ['X', 'Y']: XY_added.append(dim_tag) pairs += " %s=%s" % (dim_tag, dimensions[dim_tag]) actual_group_tags.append(dimensions[dim_tag]) # Getting too fancy for our own good: # If one XY dimension has more than one member then we must add both X and Y if len(XY_skipped) > 0 and len(XY_added) > 0: for dim_tag in XY_skipped: pairs += " %s=%s" % (dim_tag, dimensions[dim_tag]) actual_group_tags.append(dimensions[dim_tag]) if len(pairs) > 0: blob += "dimensions%s\n" % pairs # filterComposite filter_composite = vis_format.get("filterComposite") if filter_composite: filterfish = "" for filter_tag in sorted(filter_composite.keys()): group = vis_format["groups"].get(filter_composite[filter_tag]) if group is None or len(group.get("groups", {})) <= 1: # e.g. "Targets" may not exist continue filterfish += " %s" % filter_tag if filter_composite[filter_tag] == "one": filterfish += "=one" if len(filterfish) > 0: blob += 'filterComposite%s\n' % filterfish elif dimA_checked is not None: blob += 'dimensionAchecked %s\n' % dimA_checked blob += '\n' # Now cycle through views for view_tag in views["group_order"]: view = views["groups"][view_tag] tracks = view.get("tracks", []) if len(tracks) == 0: continue blob += " track %s_%s_view\n" % (vis_format["name"], view["tag"]) blob += " parent %s on\n" % vis_format["name"] blob += " view %s\n" % view["tag"] for var in VIEW_SETTINGS: val = view.get(var) if val: blob += " %s %s\n" % (var, val) blob += '\n' # Now cycle through tracks in view for track in tracks: blob += " track %s\n" % (track["name"]) blob += " parent %s_%s_view" % (vis_format["name"], view["tag"]) dimA_subgroup = track.get("membership", {}).get(dimA_tag) if dimA_subgroup is not None and dimA_subgroup != dimA_checked: blob += " off\n" else: # Can set individual tracks off. Used when remodelling blob += " %s\n" % track.get("checked", "on") if "type" not in track: blob += " type %s\n" % (view["type"]) for var in TRACK_SETTINGS: val = track.get(var) if val: blob += " %s %s\n" % (var, val) # Now membership membership = track.get("membership") if membership: blob += " subGroups" for member_tag in sorted(membership): blob += " %s=%s" % (member_tag, membership[member_tag]) blob += '\n' # metadata line? metadata_pairs = track.get("metadata_pairs") if metadata_pairs is not None: metadata_line = "" for meta_tag in sorted(metadata_pairs.keys()): metadata_line += ' %s=%s' % (meta_tag.lower(), metadata_pairs[meta_tag]) if len(metadata_line) > 0: blob += " metadata%s\n" % metadata_line blob += '\n' blob += '\n' return blob class IhecDefines(object): # Defines and formatting code for IHEC JSON def __init__(self, request): self._request = request self.samples = {} self.vis_defines = None def molecule(self, dataset): # ["total RNA", "polyA RNA", "cytoplasmic RNA", "nuclear RNA", "genomic DNA", "protein", "other"] replicates = dataset.get("replicates", []) if len(replicates) == 0: return None molecule = replicates[0].get("library", {}).get("nucleic_acid_term_name") if not molecule: return None if molecule == "DNA": return "genomic DNA" if molecule == "RNA": # TODO: Can/should we distinguish "cytoplasmic RNA" and "nuclear RNA" #descr = dataset.get('assay_term_name', '').lower() #if 'nuclear' in descr: # return "nuclear RNA" #if 'cyto' in descr: # return "cytoplasmic RNA" return "total RNA" if molecule == "polyadenylated mRNA": return "polyA RNA" if molecule == "miRNA": return "other" # TODO: should this be something else if molecule == "protein": return "protein" return "genomic DNA" # default def lineage(self, biosample, default=None): # TODO: faking lineage dev_slims = biosample.get("developmental_slims",[]) if len(dev_slims) > 0: return ','.join(dev_slims) return default def differentiation(self, biosample, default=None): # TODO: faking differentiation diff_slims = biosample.get("organ_slims",[]) if len(diff_slims) > 0: return '.'.join(diff_slims) return default def exp_type(self, vis_type, dataset): # For IHEC, a simple experiment type is needed: # TODO: EpiRR experiment type: ChIP-Seq Input, Histone H3K27ac, mRNA-Seq, total-RNA-Seq, Stranded Total RNA-Seq # /Ihec_metadata_specification.md: Chromatin Accessibility, Bisulfite-Seq, MeDIP-Seq, MRE-Seq, ChIP-Seq, mRNA-Seq, smRNA-Seq # DNA Methylation --> DNA Methylation # DNA accessibility --> Chromatin Accessibility # if assay_slims=Transcription, get assay_title # polyA RNA-seq --> mRNA-Seq # total RNA-seq --> total-RNA-Seq # small RNA-seq --> smRNA-Seq # microRNA-seq/transcription profiling by array assay/microRNA counts/ - I have to ask them # if assay_slims=DNA Binding, then get the target.label # control --> ChIP-Seq Input # if not control, then look at target.investigated_as to contain 'histone' or 'transcription factor' # Then either 'Histone <target.label>' or 'Transcription Factor <target.label>' (example: 'Histone H3K4me3') if vis_type in ["ChIP", "GGRChIP", "HIST"]: # Controls have no visualizable files so we shouldn't see them, however... # Chip-seq Controls would have target.investigated_as=Control if dataset.get('control_type'): return "ChIP-Seq Input" target = dataset.get('target',{}).get('label','unknown') if vis_type == "HIST": return "Histone " + target if target == "unknown": return "ChIP-seq" return "ChIP-Seq Input: Transcription factor " + target if vis_type == "DNASE": return "Chromatin Accessibility" if vis_type == "ATAC": return "Chromatin Accessibility" # TODO Confirm if vis_type == "WGBS": return "DNA Methylation" # IHEC only allow smRNA for microRNA-seq which is different from our # get_vis_type logic if 'assay_term_name' not in dataset: return None assay = dataset['assay_term_name'] if assay == 'microRNA-seq': return 'smRNA-Seq' if assay == 'small RNA-seq': return 'RNA-Seq' if assay == 'polyA plus RNA-seq': return 'mRNA-Seq' if assay == 'RNA-seq': assay_title = dataset.get('assay_title') if assay_title == 'total RNA-seq': return 'total-RNA-Seq' return 'RNA-Seq' #if vis_type == "ChIA": # return "ChIA-pet" #if vis_type == "HiC": # return "Hi-C" #if vis_type == "TSS": # return "Rampage" #if vis_type == "eCLIP": # return "e-CLIP" #if vis_type == "ANNO": # return "Annotation" return None # vis_dataset.get('assay_term_name','Unknown') def experiment_attributes(self, vis_dataset): assay_id = vis_dataset.get('assay_term_id') if not assay_id: return {} attributes = {} experiment_type = vis_dataset.get('ihec_exp_type') if experiment_type is None: return {} attributes["experiment_type"] = [experiment_type] attributes["experiment_ontology_uri"] = ['http://purl.obolibrary.org/obo/' + assay_id.replace(':','_')] assay_name = vis_dataset.get('assay_term_name') if assay_name: attributes["assay_type"] = assay_name attributes['library_strategy'] = [IHEC_LIB_STRATEGY[assay_name]] query = ( '/search/?type=ReferenceEpigenome&related_datasets.accession={}' '&status=released&field=dbxrefs&limit=all' ).format(vis_dataset['name']) for ref_epi in self._request.embed(query)['@graph']: for dbxref in ref_epi.get('dbxrefs', []): if dbxref.startswith('IHEC:IHECRE'): attributes['reference_registry_id'] = [dbxref[5:].split('.')[0]] break return attributes def analysis_attributes(self, vis_dataset): # find/create analysis_attributes: # WARNING: This could go crazy! # NOTE: single pipeline version AND single aligner only allowed for the whole exp! # NOTE: Ugly defaults attributes = { "analysis_software": 'ENCODE', "analysis_group": 'ENCODE DCC', "analysis_software_version": '1', "alignment_software": 'unknown', "alignment_software_version": '1' } if IHEC_DEEP_DIG: pipeline = vis_dataset.get('pipeline') if pipeline and 'title' in pipeline: attributes["analysis_software"] = pipeline['title'] attributes["analysis_group"] = pipeline.get('lab') attributes["analysis_software_version"] = pipeline.get('version') aligner = vis_dataset.get('aligner') if aligner: attributes["alignment_software"] = aligner.get('name') attributes["alignment_software_version"] = aligner.get('version') return attributes def biomaterial_type(self, biosample_type): # For IHEC, biomaterial type: "Cell Line", "Primary Cell" "Primary Cell Culture" "Primary Tissue" if biosample_type: biosample_type = biosample_type.lower() if biosample_type in ["tissue", "whole organism"]: # "whole organism" (but really they should add another enum) - hitz return "Primary Tissue" if biosample_type in ["primary cell"]: return "Primary Cell Culture" return "Cell Line" def sample(self, dataset, vis_defines=None): # returns an ihec sample appropriate for the dataset if vis_defines is None: if self.vis_defines is None: self.vis_defines = VisDefines(self._request) vis_defines = self.vis_defines sample = {} biosample = vis_defines.lookup_embedded_token('replicates.library.biosample', dataset) if biosample is None: return {} sample_id = biosample['accession'] if sample_id in self.samples: return self.samples[sample_id] term_id = biosample.get('biosample_ontology', {}).get('term_id') if term_id: sample["sample_ontology_uri"] = [term_id] sample["biomaterial_type"] = [self.biomaterial_type(biosample.get('biosample_ontology', {}).get('classification'))] # ["Cell Line","Primary Cell", ... source = biosample.get('source') sample["biomaterial_provider"] = [source['title']] sample["line"] = [biosample.get('biosample_ontology', {}).get('term_name', 'none')] sample["disease"] = [biosample.get('health_status',"Healthy").capitalize()] # assume all samples are healthy - hitz if "Healthy" in sample["disease"]: sample["disease_ontology_uri"] = ["http://ncit.nci.nih.gov/ncitbrowser/ConceptReport.jsp?dictionary=NCI_Thesaurus&code=C115935&ns=NCI_Thesaurus"] else: # Note only term for disease ontology is healthy=C115935. No search url syntax known sample["disease_ontology_uri"] = ["https://ncit.nci.nih.gov/ncitbrowser/pages/multiple_search.jsf?nav_type=terminologies"] sample["sex"] = [biosample.get('sex','unknown').capitalize()] if "Cell Line" in sample["biomaterial_type"]: sample["differentiation_method"] = ["NA"] sample["batch"] = ["NA"] sample["medium"] = ["unknown"] # We don't have this information sample['lineage'] = [self.lineage(biosample, 'unknown')] sample['differentiation_stage'] = [self.differentiation(biosample, 'unknown')] sample['passage'] = [str(biosample.get('passage_number', 'NA'))] if "Primary Tissue" in sample["biomaterial_type"] or "Primary Cell Culture" in sample["biomaterial_type"]: sample["donor_sex"] = sample["sex"] donor = biosample.get('donor') if donor is not None: sample["donor_id"] = [donor['accession']] if donor.get('age', 'NA').isdigit(): sample["donor_age"] = [int(donor['age'])] elif donor.get('age', 'NA') == 'unknown': sample["donor_age"] = ['NA'] else: sample["donor_age"] = [donor.get('age', 'NA')] sample["donor_age_unit"] = [donor.get('age_units','year')] # unknown is not supported sample["donor_life_stage"] = [donor.get('life_stage','unknown')] sample["donor_health_status"] = sample["disease"] sample["donor_health_status_ontology_uri"] = sample["disease_ontology_uri"] if donor.get('organism',{}).get('name','unknown') == 'human': ethnicity = donor.get('ethnicity') if ethnicity is not None: sample["donor_ethnicity"] = ethnicity else: sample["donor_ethnicity"] = ['unknown'] else: sample["donor_ethnicity"] = ['NA'] if "Primary Tissue" in sample["biomaterial_type"]: sample["tissue_type"] = sample["line"] sample["tissue_depot"] = [biosample.get('source',{}).get('description','unknown')] sample["collection_method"] = ["unknown"] # we don't have this information elif "Primary Cell Culture" in sample["biomaterial_type"]: sample["cell_type"] = sample["line"] sample["culture_conditions"] = ["unknown"] # applied_modifications=[], treatments=[], genetic_modifications=[], characterizations=[] sample["markers"] = ["unknown"] # not collected by us sample["passage_if_expanded"] = [str(biosample.get('passage_number', 'NA'))] sample["origin_sample"] = ["unknown"] sample["origin_sample_ontology_uri"] = sample["sample_ontology_uri"] self.samples[sample_id] = sample return sample def view_type(self, view, track): # For IHEC, dataset track view type needed: signal_unstranded, peak_calls, methylation_profile, signal_forward, signal_reverse, # rpkm_forward, rpkm_reverse, rpkm_unstranded, reads_per_million_ miRNA_mapped, copy_number_variation # https://github.com/IHEC/ihec-ecosystems/blob/master/docs/minimum_required_track_types.md track_type = track.get('type','').split()[0] if track_type in ['bigBed', 'bigNarrowPeak']: return 'peak_calls' view_title = view.get('title').lower() if 'minus signal' in view_title: return 'signal_reverse' if 'plus signal' in view_title: return 'signal_forward' return 'signal_unstranded' def remodel_to_json(self, host_url, vis_datasets): '''Formats this collection of vis_datasets into IHEC hub json structure.''' if not vis_datasets: return {} # ihec_json = { # 'hub_description': { ... }, similar to hub.txt/genome.txt # 'datasets': { ... }, one per experiment for ChIP # and one per replicate for non-ChIP # 'samples': { ... } one per biosample # } self.samples = {} # { # 'sample_id_1': { # one per biosample # 'sample_ontology_uri': '...', # UBERON or CL # 'molecule': '...', # # [ # # 'total RNA', # # 'polyA RNA', # # 'cytoplasmic RNA', # # 'nuclear RNA', # # 'genomic DNA', # # 'protein', # # 'other' # # ] # 'disease': '...', # TODO # 'disease_ontology_uri': '...', # TODO # 'biomaterial_type': '...', # # [ # # 'Cell Line', # # 'Primary Cell', # # 'Primary Cell Culture', # # 'Primary Tissue' # # ] # 'line': '...', # biosample_term_name # 'lineage': '?', # 'differentiation_stage': '?', # 'medium': '?', # 'sex': '...', # experiment.replicate.library.biosample.sex # }, # 'sample_id_2': {...} # } datasets = {} # { # 'experiment_1': { # # one per experiment for ChIP and one per replicate for non-ChIP # 'sample_id': '...', # biosample accession # 'experiment_attributes': { # 'experiment_type': '...', # 'assay_type': '...', # assay_term_name (e.g. 'DNA Methylation') # 'experiment_ontology_uri': '...', # assay_term_id (e.g. OBI:0000716) # 'reference_registry_id': '...', # EpiRR ID # }, # 'analysis_attributes': { # 'analysis_software': '...', # pipeline # 'analysis_software_version': '...', # pipeline version # 'analysis_group': '...', # pipeline laboratory # 'alignment_software': '...', # software ugly lookup; one per experiment # 'alignment_software_version': '...', # software_version # }, # 'browser': { # 'signal_forward': [ # view # { # 'big_data_url': '...', # 'description_url': '...', # 'md5sum': '...', # 'subtype': '...', # 'sample_source': '...', # 'primary': '?', # }, # {...} # ], # 'signal_reverse': [{...}], # } # }, # 'experiment_2': {...}, # } # TODO: If properties aren't found then warn and skip dataset! assembly = '' taxon_id = 0 included_accessions = [] for accession in vis_datasets.keys(): vis_dataset = vis_datasets[accession] if vis_dataset is None or len(vis_dataset) == 0: continue # From any vis_dataset, update these: assembly = vis_dataset.get('ucsc_assembly') or assembly taxon_id = vis_dataset.get('taxon_id') or taxon_id dataset = {} analysis_attributes = self.analysis_attributes(vis_dataset) if analysis_attributes: dataset['analysis_attributes'] = analysis_attributes else: log.warn('Could not determine IHEC analysis attributes for %s', accession) # Check if experiment is IHEC-able first experiment_attributes = self.experiment_attributes(vis_dataset) if experiment_attributes: dataset['experiment_attributes'] = experiment_attributes else: log.warn('Could not determine IHEC experiment attributes for %s', accession) # Find/create sample: biosample_accession = vis_dataset.get('biosample_accession') if biosample_accession is None: log.warn('vis_dataset %s is missing biosample', accession) else: dataset['sample_id'] = biosample_accession if biosample_accession not in self.samples: sample = vis_dataset.get('ihec_sample', {}) if not sample: log.warn('vis_dataset %s is missing sample', accession) else: self.samples[biosample_accession] = sample # create browser, which hold tracks: browser = {} views = vis_dataset.get('view', []) for view_tag in views['group_order']: view = views['groups'][view_tag] # Add tracks to views tracks = view.get('tracks', []) if len(tracks) == 0: continue for track in tracks: ihec_track = { 'big_data_url': track['bigDataUrl'], 'description_url': '{}/experiments/{}/'.format( host_url, accession ), # "primary" is required; # default to False first and worry about it later 'primary': False, 'subtype': track['longLabel'], } md5sum = track.get('md5sum') if md5sum: ihec_track['md5sum'] = md5sum # TODO: clean up the following logic # rep_membership = track.get('membership', {}).get('REP') # rep_group = vis_dataset.get('groups', {}).get('REP') # if rep_membership and rep_group: # if rep_membership in rep_group: # ihec_track['sample_source'] = rep_group[rep_membership]['title'] # subgroup_order = sorted(rep_group['groups'].keys()) # ihec_track['primary'] = (rep_membership == subgroup_order[0]) # extra fields for term in ['type', 'color', 'altColor']: if term in track: ihec_track[term] = track[term] ihec_track['view'] = self.view_type(view, track) metadata_pairs = track.get('metadata_pairs', {}) for meta_key in metadata_pairs: ihec_track[meta_key.replace('&#32;', ' ')] = metadata_pairs[meta_key][1:-1] # Add IHEC tracks: # For ChIP-seq experiments, label the first track as # primary for each track type. # For non-ChIP-seq experiments, split experiments as one # dataset per replicate. if vis_dataset.get('assay_term_name', '') == 'ChIP-seq': if ihec_track['view'] not in browser.keys(): browser[ihec_track['view']] = [] browser[ihec_track['view']].append(ihec_track) else: rep = ( ihec_track.get('replicate (bio_tech)') or ihec_track.get('replicate (bio_tech)', '') ) experiment_key = '{}_rep{}'.format(accession, rep) if experiment_key not in datasets: datasets[experiment_key] = deepcopy(dataset) datasets[experiment_key]['browser'] = {} if ihec_track['view'] not in datasets[experiment_key][ 'browser' ]: # Tracks are sorted based on "group_order" in # vis_defs. So the first track for a certain track # type should be primary ihec_track['primary'] = True datasets[experiment_key]['browser'][ ihec_track['view'] ] = [ihec_track] else: datasets[experiment_key]['browser'][ ihec_track['view'] ].append(ihec_track) # Add ChIP-seq tracks and assign one primary track if vis_dataset.get('assay_term_name', '') == 'ChIP-seq': # For experiment like ENCSR000ALI, there are no peak_calls but # only singals according to vis_defs. In another word, there # is no peak to guide selecting primary track. Thus simply # select the first track. primary_rep_val = '' if 'peak_calls' in browser: browser['peak_calls'][0]['primary'] = True primary_rep_val = ( browser['peak_calls'][0].get('replicate (bio_tech)') or browser['peak_calls'][0].get('replicates (bio_tech)') or '' ) for track_type in browser: for track in browser[track_type]: track_rep_val = ( track.get('replicate (bio_tech)') or track.get('replicates (bio_tech)', '') ) if ( not primary_rep_val or track_rep_val == primary_rep_val ): track['primary'] = True break dataset['browser'] = browser datasets[accession] = dataset included_accessions.append(accession) hub_description = { # similar to hub.txt/genome.txt 'publishing_group': 'ENCODE', 'name': 'ENCODE reference epigenomes', 'description': 'ENCODE reference epigenomes', 'description_url': '{}/search/?type=ReferenceEpigenome'.format( host_url ), 'email': '[email protected]', 'date': time.strftime('%Y-%m-%d', time.gmtime()), # 'taxon_id': ..., # Species taxonomy id. (human: 9606, mouse: 10090) # 'assembly': '...', # UCSC: hg19, hg38 } if assembly: hub_description['assembly'] = assembly if taxon_id: hub_description['taxon_id'] = int(taxon_id) # Find corresponding reference epigenome query = ( '/search/?type=ReferenceEpigenome' '&{}&status=released&field=accession&limit=all' ).format( '&'.join( 'related_datasets.accession={}'.format(acc) for acc in included_accessions ) ) ref_epi_accs = [ ref_epi['accession'] for ref_epi in self._request.embed(query)['@graph'] ] return { 'hub_description': hub_description, 'datasets': datasets, 'samples': self.samples, } # TODO: move to separate vis_cache module? class VisCache(object): # Stores and recalls vis_dataset formatted json to/from es vis_cache def __init__(self, request): self.request = request self.es = self.request.registry.get(ELASTIC_SEARCH, None) self.index = VIS_CACHE_INDEX def create_cache(self): if not self.es: return None if not self.es.indices.exists(self.index): one_shard = {'index': {'number_of_shards': 1, 'max_result_window': 99999 }} mapping = {'default': {"enabled": False}} self.es.indices.create(index=self.index, body=one_shard, wait_for_active_shards=1) self.es.indices.put_mapping(index=self.index, doc_type='default', body=mapping) log.debug("created %s index" % self.index) def add(self, vis_id, vis_dataset): '''Adds a vis_dataset (aka vis_blob) json object to elastic-search''' if not self.es: return if not self.es.indices.exists(self.index): self.create_cache() # Only bother creating on add self.es.index(index=self.index, doc_type='default', body=vis_dataset, id=vis_id) def get(self, vis_id=None, accession=None, assembly=None): '''Returns the vis_dataset json object from elastic-search, or None if not found.''' if vis_id is None and accession is not None and assembly is not None: vis_id = accession + '_' + ASSEMBLY_TO_UCSC_ID.get(assembly, assembly) if self.es: try: result = self.es.get(index=self.index, doc_type='default', id=vis_id) return result['_source'] except: pass # Missing index will return None return None def search(self, accessions, assembly): '''Returns a list of composites from elastic-search, or None if not found.''' if self.es: ucsc_assembly = ASSEMBLY_TO_UCSC_ID.get(assembly, assembly) # Normalized accession vis_ids = [accession + "_" + ucsc_assembly for accession in accessions] try: query = {"query": {"ids": {"values": vis_ids}}} res = self.es.search(body=query, index=self.index, doc_type='default', size=99999) # size=200? hits = res.get("hits", {}).get("hits", []) results = {} for hit in hits: results[hit["_id"]] = hit["_source"] # make this a generator? No... len(results) log.debug("ids found: %d" % (len(results))) return results except: pass return {} # Not referenced in any other module def visualizable_assemblies( assemblies, files, visible_statuses=VISIBLE_FILE_STATUSES ): '''Returns just the assemblies with visualizable files.''' file_assemblies = set() # sets for comparing assemblies_set = set(assemblies) for afile in files: afile_assembly = afile.get('assembly') if afile_assembly is None or afile_assembly in file_assemblies: continue # more efficient than simply relying on set() if (afile['status'] in visible_statuses and afile.get('file_format', '') in VISIBLE_FILE_FORMATS): file_assemblies.add(afile_assembly) if file_assemblies == assemblies_set: break # Try not to go through the whole file list! return list(file_assemblies) def is_file_visualizable(file): '''Determines whether a file can be visualized in a genome browser. Needs to be kept in sync with isFileVisualizable in objectutils.js. Keyword arguments: file -- file object including props to test for visualizability ''' conditions = [ file.get('file_format') in [ 'bigWig', 'bigBed', ], file.get('file_format_type') not in [ 'bedMethyl', 'bedLogR', 'idr_peak', 'tss_peak', 'pepMap', 'modPepMap', ], file.get('status') in [ 'released', 'in progress', 'archived', ], ] return all(conditions) def _file_to_format(process_file): '''Used with map to convert list of files to their types''' return process_file['file_format'] # Currently called in types/shared_calculated_properties.py def browsers_available( status, assemblies, types, item_type=None, files=None, accession=None, request=None ): '''Returns list of browsers based upon vis_blobs or else files list.''' # NOTES:When called by visualize calculated property, # vis_blob should be in vis_cache, but if not files are used. # When called by visindexer, neither vis_cache nor files are # used (could be called 'browsers_might_work'). if "Dataset" not in types: return [] if item_type is None: visualizable_types = set(VISIBLE_DATASET_TYPES) if visualizable_types.isdisjoint(types): return [] elif item_type not in VISIBLE_DATASET_TYPES_LC: return [] browsers = set() full_set = {'ucsc', 'ensembl', 'hic'} file_assemblies = None file_types = None if request is not None: vis_cache = VisCache(request) if files is not None: # Make a set of all file types in all dataset files file_types = set(map(_file_to_format, files)) for assembly in assemblies: if file_types is None: continue mapped_assembly = ASSEMBLY_DETAILS.get(assembly) if not mapped_assembly: continue vis_blob = None if (request is not None and accession is not None and status in VISIBLE_FILE_STATUSES): # use of find_or_make_acc_composite() will recurse! vis_blob = vis_cache.get(accession=accession, assembly=assembly) file_assemblies = visualizable_assemblies(assemblies, files) if ('ucsc' not in browsers and 'ucsc_assembly' in mapped_assembly.keys() and not BROWSER_FILE_TYPES['ucsc'].isdisjoint(file_types)): if vis_blob or files is None or assembly in file_assemblies: browsers.add('UCSC') if ('ensembl' not in browsers and 'ensembl_host' in mapped_assembly.keys() and not BROWSER_FILE_TYPES['ensembl'].isdisjoint(file_types)): if vis_blob or files is None or assembly in file_assemblies: browsers.add('Ensembl') if ('hic' not in browsers and 'hic' in mapped_assembly.keys() and not BROWSER_FILE_TYPES['hic'].isdisjoint(file_types)): if file_assemblies is not None and assembly in file_assemblies: browsers.add('hic') if browsers == full_set: # No use continuing break return list(browsers) # Currently called in visualization.py and in search.py def object_is_visualizable( obj, assembly=None, check_files=False, exclude_quickview=False ): '''Returns true if it is likely that this object can be visualized.''' if 'accession' not in obj: return False if assembly is not None: assemblies = [ assembly ] else: assemblies = obj.get('assembly',[]) files = None if check_files: # Returning [] instead of None is important files = obj.get('files', []) browsers = browsers_available(obj.get('status', 'none'), assemblies, obj.get('@type', []), files=files) if exclude_quickview and 'quickview' in browsers: return len(browsers) > 1 else: return len(browsers) > 0 # Currently called in search.py def vis_format_url(browser, path, assembly, position=None): '''Given a url to hub.txt, returns the url to an external browser or None.''' mapped_assembly = ASSEMBLY_DETAILS[assembly] if not mapped_assembly: return None if browser == "ucsc": ucsc_assembly = mapped_assembly.get('ucsc_assembly') if ucsc_assembly is not None: external_url = 'http://genome.ucsc.edu/cgi-bin/hgTracks?hubClear=' external_url += path + '&db=' + ucsc_assembly if position is not None: external_url += '&position={}'.format(position) return external_url elif browser == "ensembl": ensembl_host = mapped_assembly.get('ensembl_host') if ensembl_host is not None: external_url = 'http://' + ensembl_host + '/Trackhub?url=' external_url += path + ';species=' + mapped_assembly.get('species').replace(' ','_') ### TODO: remove redirect=no when Ensembl fixes their mirrors #external_url += ';redirect=no' ### TODO: remove redirect=no when Ensembl fixes their mirrors if position is not None: if position.startswith('chr'): position = position[3:] # ensembl position r=19:7069444-7087968 external_url += '&r={}'.format(position) # GRCh38: http://www.ensembl.org/Trackhub?url=https://www.encodeproject.org/experiments/ENCSR596NOF/@@hub/hub.txt # GRCh38: http://www.ensembl.org/Trackhub?url=https://www.encodeproject.org/experiments/ENCSR596NOF/@@hub/hub.txt;species=Homo_sapiens # hg19/GRCh37: http://grch37.ensembl.org/Trackhub?url=https://www.encodeproject.org/experiments/ENCSR596NOF/@@hub/hub.txt;species=Homo_sapiens # mm10/GRCm38: http://www.ensembl.org/Trackhub?url=https://www.encodeproject.org/experiments/ENCSR475TDY@@hub/hub.txt;species=Mus_musculus # mm9/NCBIM37: http://may2012.archive.ensembl.org/Trackhub?url=https://www.encodeproject.org/experiments/ENCSR000CNV@@hub/hub.txt;species=Mus_musculus # BDGP6: http://www.ensembl.org/Trackhub?url=https://www.encodeproject.org/experiments/ENCSR040UNE@@hub/hub.txt;species=Drosophila_melanogaster # BDGP5: http://dec2014.archive.ensembl.org/Trackhub?url=https://www.encodeproject.org/experiments/ENCSR040UNE@@hub/hub.txt;species=Drosophila_melanogaster # ce11/WBcel235: http://www.ensembl.org/Trackhub?url=https://www.encodeproject.org/experiments/ENCSR475TDY@@hub/hub.txt;species=Caenorhabditis_elegans return external_url elif browser == "quickview": file_formats = '&file_format=bigBed&file_format=bigWig' file_inclusions = '&status=released&status=in+progress' return ('/search/?type=File&assembly=%s&dataset=%s%s%s#browser' % (assembly,path,file_formats,file_inclusions)) #else: # ERROR: not supported at this time return None
{ "content_hash": "77c8dad2f680d6cf91a29f4093f3502b", "timestamp": "", "source": "github", "line_count": 1834, "max_line_length": 170, "avg_line_length": 47.62213740458015, "alnum_prop": 0.521450898224161, "repo_name": "ENCODE-DCC/encoded", "id": "de92712a31038512f2f26d86b195e5f15e951c06", "size": "87339", "binary": false, "copies": "1", "ref": "refs/heads/dev", "path": "src/encoded/vis_defines.py", "mode": "33188", "license": "mit", "language": [ { "name": "AngelScript", "bytes": "741" }, { "name": "Dockerfile", "bytes": "1988" }, { "name": "Gherkin", "bytes": "48806" }, { "name": "HTML", "bytes": "371973" }, { "name": "JavaScript", "bytes": "3493156" }, { "name": "Jsonnet", "bytes": "15159" }, { "name": "Makefile", "bytes": "875" }, { "name": "Mako", "bytes": "494" }, { "name": "Python", "bytes": "2845978" }, { "name": "SCSS", "bytes": "403800" }, { "name": "Shell", "bytes": "30525" } ], "symlink_target": "" }
from decimal import Decimal from trading import file_logger try: import ujson as json except ImportError: import json from pprint import pformat import time import requests from trading.exchange import exchange_api_url, exchange_auth def market_maker_strategy(open_orders, order_book, spreads): time.sleep(10) open_orders.get_open_orders() open_orders.cancel_all() while True: time.sleep(0.005) if order_book.asks.price_tree.min_key() - order_book.bids.price_tree.max_key() < 0: file_logger.warn('Negative spread: {0}'.format( order_book.asks.price_tree.min_key() - order_book.bids.price_tree.max_key())) continue if not open_orders.open_bid_order_id: open_bid_price = order_book.asks.price_tree.min_key() - spreads.bid_spread - open_orders.open_bid_rejections if 0.01 * float(open_bid_price) < float(open_orders.accounts['USD']['available']): order = {'size': '0.01', 'price': str(open_bid_price), 'side': 'buy', 'product_id': 'BTC-USD', 'post_only': True} response = requests.post(exchange_api_url + 'orders', json=order, auth=exchange_auth) if 'status' in response.json() and response.json()['status'] == 'pending': open_orders.open_bid_order_id = response.json()['id'] open_orders.open_bid_price = open_bid_price open_orders.open_bid_rejections = Decimal('0.0') file_logger.info('new bid @ {0}'.format(open_bid_price)) elif 'status' in response.json() and response.json()['status'] == 'rejected': open_orders.open_bid_order_id = None open_orders.open_bid_price = None open_orders.open_bid_rejections += Decimal('0.04') file_logger.warn('rejected: new bid @ {0}'.format(open_bid_price)) elif 'message' in response.json() and response.json()['message'] == 'Insufficient funds': open_orders.open_bid_order_id = None open_orders.open_bid_price = None file_logger.warn('Insufficient USD') else: file_logger.error('Unhandled response: {0}'.format(pformat(response.json()))) continue if not open_orders.open_ask_order_id: open_ask_price = order_book.bids.price_tree.max_key() + spreads.ask_spread + open_orders.open_ask_rejections if 0.01 < float(open_orders.accounts['BTC']['available']): order = {'size': '0.01', 'price': str(open_ask_price), 'side': 'sell', 'product_id': 'BTC-USD', 'post_only': True} response = requests.post(exchange_api_url + 'orders', json=order, auth=exchange_auth) if 'status' in response.json() and response.json()['status'] == 'pending': open_orders.open_ask_order_id = response.json()['id'] open_orders.open_ask_price = open_ask_price file_logger.info('new ask @ {0}'.format(open_ask_price)) open_orders.open_ask_rejections = Decimal('0.0') elif 'status' in response.json() and response.json()['status'] == 'rejected': open_orders.open_ask_order_id = None open_orders.open_ask_price = None open_orders.open_ask_rejections += Decimal('0.04') file_logger.warn('rejected: new ask @ {0}'.format(open_ask_price)) elif 'message' in response.json() and response.json()['message'] == 'Insufficient funds': open_orders.open_ask_order_id = None open_orders.open_ask_price = None file_logger.warn('Insufficient BTC') else: file_logger.error('Unhandled response: {0}'.format(pformat(response.json()))) continue if open_orders.open_bid_order_id and not open_orders.open_bid_cancelled: bid_too_far_out = open_orders.open_bid_price < (order_book.asks.price_tree.min_key() - spreads.bid_too_far_adjustment_spread) bid_too_close = open_orders.open_bid_price > (order_book.bids.price_tree.max_key() - spreads.bid_too_close_adjustment_spread) cancel_bid = bid_too_far_out or bid_too_close if cancel_bid: if bid_too_far_out: file_logger.info('CANCEL: open bid {0} too far from best ask: {1} spread: {2}'.format( open_orders.open_bid_price, order_book.asks.price_tree.min_key(), open_orders.open_bid_price - order_book.asks.price_tree.min_key())) if bid_too_close: file_logger.info('CANCEL: open bid {0} too close to best bid: {1} spread: {2}'.format( open_orders.open_bid_price, order_book.bids.price_tree.max_key(), open_orders.open_bid_price - order_book.bids.price_tree.max_key())) open_orders.cancel('bid') continue if open_orders.open_ask_order_id and not open_orders.open_ask_cancelled: ask_too_far_out = open_orders.open_ask_price > (order_book.bids.price_tree.max_key() + spreads.ask_too_far_adjustment_spread) ask_too_close = open_orders.open_ask_price < (order_book.asks.price_tree.min_key() - spreads.ask_too_close_adjustment_spread) cancel_ask = ask_too_far_out or ask_too_close if cancel_ask: if ask_too_far_out: file_logger.info('CANCEL: open ask {0} too far from best bid: {1} spread: {2}'.format( open_orders.open_ask_price, order_book.bids.price_tree.max_key(), open_orders.open_ask_price - order_book.bids.price_tree.max_key())) if ask_too_close: file_logger.info('CANCEL: open ask {0} too close to best ask: {1} spread: {2}'.format( open_orders.open_ask_price, order_book.asks.price_tree.min_key(), open_orders.open_ask_price - order_book.asks.price_tree.min_key())) open_orders.cancel('ask') continue def buyer_strategy(order_book, open_orders, spreads): time.sleep(10) while True: time.sleep(0.001) if not open_orders.open_bid_order_id: open_bid_price = order_book.bids.price_tree.max_key() - spreads.bid_spread if 0.01 * float(open_bid_price) < float(open_orders.accounts['USD']['available']): order = {'size': '0.01', 'price': str(open_bid_price), 'side': 'buy', 'product_id': 'BTC-USD', 'post_only': True} response = requests.post(exchange_api_url + 'orders', json=order, auth=exchange_auth) try: response = response.json() except ValueError: file_logger.error('Unhandled response: {0}'.format(pformat(response))) if 'status' in response and response['status'] == 'pending': open_orders.open_bid_order_id = response['id'] open_orders.open_bid_price = open_bid_price open_orders.open_bid_rejections = Decimal('0.0') file_logger.info('new bid @ {0}'.format(open_bid_price)) elif 'status' in response and response['status'] == 'rejected': open_orders.open_bid_order_id = None open_orders.open_bid_price = None open_orders.open_bid_rejections += Decimal('0.04') file_logger.warn('rejected: new bid @ {0}'.format(open_bid_price)) elif 'message' in response and response['message'] == 'Insufficient funds': open_orders.open_bid_order_id = None open_orders.open_bid_price = None file_logger.warn('Insufficient USD') elif 'message' in response and response['message'] == 'request timestamp expired': open_orders.open_bid_order_id = None open_orders.open_bid_price = None file_logger.warn('Request timestamp expired') else: file_logger.error('Unhandled response: {0}'.format(pformat(response))) continue if open_orders.open_bid_order_id and not open_orders.open_bid_cancelled: bid_too_far_out = open_orders.open_bid_price < (order_book.bids.price_tree.max_key() - spreads.bid_too_far_adjustment_spread) bid_too_close = open_orders.open_bid_price > (order_book.bids.price_tree.max_key() - spreads.bid_too_close_adjustment_spread) cancel_bid = bid_too_far_out or bid_too_close if cancel_bid: if bid_too_far_out: file_logger.info('CANCEL: open bid {0} too far from best bid: {1} spread: {2}'.format( open_orders.open_bid_price, order_book.bids.price_tree.max_key(), order_book.bids.price_tree.max_key() - open_orders.open_bid_price)) if bid_too_close: file_logger.info('CANCEL: open bid {0} too close to best bid: {1} spread: {2}'.format( open_orders.open_bid_price, order_book.bids.price_tree.max_key(), order_book.bids.price_tree.max_key() - open_orders.open_bid_price)) open_orders.cancel('bid') continue
{ "content_hash": "794bd2cdd63e840ea6b527d10f9e3397", "timestamp": "", "source": "github", "line_count": 183, "max_line_length": 120, "avg_line_length": 56.85245901639344, "alnum_prop": 0.5219146482122261, "repo_name": "PierreRochard/coinbase-exchange-order-book", "id": "8d5296a7b7c863ac3c85f8ae18c39290f7c3ba09", "size": "10404", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "trading/strategies.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Python", "bytes": "40976" } ], "symlink_target": "" }
import os import sys try: from setuptools import setup except ImportError: from distutils.core import setup import perfdemo version = perfdemo.__version__ setup( name='perfdemo', version=version, author='', author_email='[email protected]', packages=[ 'perfdemo', ], include_package_data=True, install_requires=[ 'Django>=1.6.5', ], zip_safe=False, scripts=['perfdemo/manage.py'], )
{ "content_hash": "9c1d2726f664a81a22216996ba7d9e78", "timestamp": "", "source": "github", "line_count": 27, "max_line_length": 41, "avg_line_length": 17, "alnum_prop": 0.6318082788671024, "repo_name": "martinblech/pyconar2014_perfdemo", "id": "23e454e9c0219cee41350b4c94e6730be0c823a6", "size": "506", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "1212" }, { "name": "JavaScript", "bytes": "2387" }, { "name": "Python", "bytes": "36866" }, { "name": "Shell", "bytes": "5100" } ], "symlink_target": "" }
from typing import List, Tuple, Iterable import unittest class Solution: def islandPerimeter(self, grid: List[List[int]]) -> int: # Find the first non-0 cell for i, row in enumerate(grid): for j, cell in enumerate(row): if cell: # Return perimeter starting from that cell. return determine_perimeter_bfs((i, j), grid) def determine_perimeter_bfs(coordinate, map_: List[List[int]]) -> list: visited_coordinates = set() seeds_current_round = [coordinate] perimeter = 0 while any(seeds_current_round): seeds_next_round = [] for seed in seeds_current_round: if seed in visited_coordinates: continue visited_coordinates.add(seed) for neighbor in generate_bordering_coordinates(seed): is_land = ( neighbor[0] >= 0 and neighbor[0] < len(map_) and neighbor[1] >= 0 and neighbor[1] < len(map_[0]) and map_[neighbor[0]][neighbor[1]] ) if not is_land: perimeter += 1 else: seeds_next_round.append(neighbor) seeds_current_round = seeds_next_round return perimeter def generate_bordering_coordinates(coordinate: Tuple[int, int]) -> Iterable[Tuple[int, int]]: """ Generates the coordinates that border the specified coordinate. """ return ( (coordinate[0] + 1, coordinate[1]), (coordinate[0] - 1, coordinate[1]), (coordinate[0], coordinate[1] + 1), (coordinate[0], coordinate[1] - 1), ) TEST_MAP = [ [0, 1, 0, 0], [1, 1, 1, 0], [0, 1, 0, 0], [1, 1, 0, 0], ] class Tests(unittest.TestCase): def test_1(self): s = Solution() self.assertEqual(s.islandPerimeter(TEST_MAP), 16)
{ "content_hash": "0cc9afd929ce79b2d44ad69b185bbae2", "timestamp": "", "source": "github", "line_count": 69, "max_line_length": 93, "avg_line_length": 28.26086956521739, "alnum_prop": 0.5333333333333333, "repo_name": "AustinTSchaffer/DailyProgrammer", "id": "daaa6a5e26b001c0b1e75ed756532615315a931b", "size": "1950", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "LeetCode/IslandPerimeter/app.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "9482" }, { "name": "C#", "bytes": "11127" }, { "name": "Dockerfile", "bytes": "308" }, { "name": "F#", "bytes": "26762" }, { "name": "HCL", "bytes": "2461" }, { "name": "HTML", "bytes": "824" }, { "name": "Java", "bytes": "22830" }, { "name": "Julia", "bytes": "3416" }, { "name": "Lua", "bytes": "6296" }, { "name": "Python", "bytes": "284314" }, { "name": "Rust", "bytes": "1517" }, { "name": "Shell", "bytes": "871" } ], "symlink_target": "" }
from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('ebooks', '0121_auto_20201005_2211'), ] operations = [ migrations.RemoveField( model_name='book', name='lead', ), migrations.RemoveField( model_name='book', name='lead_en', ), migrations.RemoveField( model_name='book', name='lead_it', ), ]
{ "content_hash": "e9620b314e47f887683c6b96cbb814b3", "timestamp": "", "source": "github", "line_count": 23, "max_line_length": 46, "avg_line_length": 20.782608695652176, "alnum_prop": 0.5, "repo_name": "flavoi/diventi", "id": "4b7cb02cc6e5649ded973e7e048a82389ff4f80f", "size": "528", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "diventi/ebooks/migrations/0122_auto_20201005_2212.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "385265" }, { "name": "Procfile", "bytes": "46" }, { "name": "Python", "bytes": "826530" } ], "symlink_target": "" }
import serial import sys from time import localtime,strftime ser = serial.Serial(sys.argv[1],57600) fout = open(sys.argv[2],'a') while (1): line = ser.readline()[4:] fout.write(strftime("%H:%M:%S ",localtime()) + line) fout.flush()
{ "content_hash": "93876574bc2a5aa9da612cdd05b37cb1", "timestamp": "", "source": "github", "line_count": 9, "max_line_length": 57, "avg_line_length": 27.22222222222222, "alnum_prop": 0.6571428571428571, "repo_name": "turon/mantis", "id": "0cb2e1d3a2bbfee1150875bef72c4b834520de96", "size": "245", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/apps/deprecated/wildfire/readserial.py", "mode": "33261", "license": "bsd-3-clause", "language": [], "symlink_target": "" }
from msrest.serialization import Model class NodeTransitionProgress(Model): """Information about an NodeTransition operation. This class contains an OperationState and a NodeTransitionResult. The NodeTransitionResult is not valid until OperationState is Completed or Faulted. . :param state: Possible values include: 'Invalid', 'Running', 'RollingBack', 'Completed', 'Faulted', 'Cancelled', 'ForceCancelled' :type state: str or :class:`enum <azure.servicefabric.models.enum>` :param node_transition_result: :type node_transition_result: :class:`NodeTransitionResult <azure.servicefabric.models.NodeTransitionResult>` """ _attribute_map = { 'state': {'key': 'State', 'type': 'str'}, 'node_transition_result': {'key': 'NodeTransitionResult', 'type': 'NodeTransitionResult'}, } def __init__(self, state=None, node_transition_result=None): self.state = state self.node_transition_result = node_transition_result
{ "content_hash": "5e9d466d87e3d57cd90c54f15974e2c4", "timestamp": "", "source": "github", "line_count": 26, "max_line_length": 98, "avg_line_length": 38.80769230769231, "alnum_prop": 0.6947472745292369, "repo_name": "AutorestCI/azure-sdk-for-python", "id": "1b31a4519621e7173c6af2b500928f0ac5ba7444", "size": "1483", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "azure-servicefabric/azure/servicefabric/models/node_transition_progress.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "34619070" } ], "symlink_target": "" }
import proto # type: ignore __protobuf__ = proto.module( package="google.ads.googleads.v12.enums", marshal="google.ads.googleads.v12", manifest={"CallPlaceholderFieldEnum",}, ) class CallPlaceholderFieldEnum(proto.Message): r"""Values for Call placeholder fields. """ class CallPlaceholderField(proto.Enum): r"""Possible values for Call placeholder fields.""" UNSPECIFIED = 0 UNKNOWN = 1 PHONE_NUMBER = 2 COUNTRY_CODE = 3 TRACKED = 4 CONVERSION_TYPE_ID = 5 CONVERSION_REPORTING_STATE = 6 __all__ = tuple(sorted(__protobuf__.manifest))
{ "content_hash": "a94d34bf77bc02d5920100c521292c5a", "timestamp": "", "source": "github", "line_count": 26, "max_line_length": 59, "avg_line_length": 24.26923076923077, "alnum_prop": 0.6370839936608558, "repo_name": "googleads/google-ads-python", "id": "f0f3d90b80ca12acf3c2103081eed5dc5e633386", "size": "1231", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "google/ads/googleads/v12/enums/types/call_placeholder_field.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "23399881" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('phrasebook', '0010_auto_20170415_1417'), ] operations = [ migrations.AlterField( model_name='word', name='created_on', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='date created'), ), ]
{ "content_hash": "4e72d1fcfc32daa5b4debbd16c418f1b", "timestamp": "", "source": "github", "line_count": 19, "max_line_length": 103, "avg_line_length": 24.473684210526315, "alnum_prop": 0.6365591397849463, "repo_name": "DanCatchpole/phrasebook-django", "id": "fbba235039d26102819b138e88dc99efc01a4be2", "size": "538", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "phrasebook/migrations/0011_auto_20170415_1518.py", "mode": "33188", "license": "mit", "language": [ { "name": "JavaScript", "bytes": "14192" }, { "name": "Python", "bytes": "46266" } ], "symlink_target": "" }
"""Viterbi contour decoder. """ from motif.core import ContourDecoder class ViterbiDecoder(ContourDecoder): ''' Viterbi contour decoder. ''' def decode(self, ctr, Y): raise NotImplementedError @classmethod def get_id(cls): return 'viterbi'
{ "content_hash": "e8e85f2a4ef25145b34fe15e2ec681dc", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 37, "avg_line_length": 19.928571428571427, "alnum_prop": 0.6594982078853047, "repo_name": "rabitt/motif", "id": "e46e8bb4b1b0ae90d4ada4859248454e12bfacea", "size": "279", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "motif/contour_decoders/viterbi.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "7794" }, { "name": "Python", "bytes": "205056" }, { "name": "Shell", "bytes": "1022" } ], "symlink_target": "" }
import sys import pyvips image = pyvips.Image.new_from_file(sys.argv[1], access="sequential") watermark = pyvips.Image.new_from_file(sys.argv[3], access="sequential") # downsize the image by 50% image = image.resize(0.5) # set the watermark alpha to 20% (multiply A of RGBA by 0.2). watermark *= [1, 1, 1, 0.2] # overlay the watermark at the bottom left, with a 100 pixel margin image = image.composite(watermark, "over", x=100, y=image.height - watermark.height - 100) image.write_to_file(sys.argv[2])
{ "content_hash": "394c47d47fd9ccaac17f2b06f1c1aab3", "timestamp": "", "source": "github", "line_count": 17, "max_line_length": 72, "avg_line_length": 31.352941176470587, "alnum_prop": 0.6848030018761726, "repo_name": "jcupitt/pyvips", "id": "4d235071bf70cd954451f764211902211230d06e", "size": "553", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "examples/watermark_image.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "115655" }, { "name": "Shell", "bytes": "811" } ], "symlink_target": "" }
from __future__ import unicode_literals from dbdiff.fixture import Fixture from .base import TestImportBase, FixtureDir class TestImport(TestImportBase): """Load test.""" def test_single_city(self): """Load single city.""" fixture_dir = FixtureDir('import') self.import_data( fixture_dir, 'angouleme_country', 'angouleme_region', 'angouleme_city', 'angouleme_translations' ) Fixture(fixture_dir.get_file_path('angouleme.json')).assertNoDiff()
{ "content_hash": "a84c547d86e832d151b97366551549d3", "timestamp": "", "source": "github", "line_count": 20, "max_line_length": 75, "avg_line_length": 27.75, "alnum_prop": 0.6108108108108108, "repo_name": "greenday2/django-cities-light", "id": "b363cbf2e3248805dd82acbd17f58a6f84a989c1", "size": "555", "binary": false, "copies": "2", "ref": "refs/heads/spatial", "path": "cities_light/tests/test_import.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "108541" } ], "symlink_target": "" }
import logging from pyvisdk.exceptions import InvalidArgumentError ######################################## # Automatically generated, do not edit. ######################################## log = logging.getLogger(__name__) def ComputeResourceHostSPBMLicenseInfo(vim, *args, **kwargs): '''The ComputeResourceHostSPBMLicenseInfo data object encapsulates the SPBM(Storage Policy Based Management) license information for a host.''' obj = vim.client.factory.create('ns0:ComputeResourceHostSPBMLicenseInfo') # do some validation checking... if (len(args) + len(kwargs)) < 2: raise IndexError('Expected at least 3 arguments got: %d' % len(args)) required = [ 'host', 'licenseState' ] optional = [ 'dynamicProperty', 'dynamicType' ] for name, arg in zip(required+optional, args): setattr(obj, name, arg) for name, value in kwargs.items(): if name in required + optional: setattr(obj, name, value) else: raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional))) return obj
{ "content_hash": "b12075683efcfa1adf311afcb0a69737", "timestamp": "", "source": "github", "line_count": 33, "max_line_length": 124, "avg_line_length": 34.333333333333336, "alnum_prop": 0.6266548984995587, "repo_name": "xuru/pyvisdk", "id": "346adf153d56756eab2e513b104bbcedf9ff0c42", "size": "1134", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pyvisdk/do/compute_resource_host_spbm_license_info.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "369" }, { "name": "Python", "bytes": "3037849" }, { "name": "Shell", "bytes": "4517" } ], "symlink_target": "" }
from troveclient import base class Flavor(base.Resource): """ A Flavor is an Instance type, specifying among other things, RAM size. """ def __repr__(self): return "<Flavor: %s>" % self.name class Flavors(base.ManagerWithFind): """ Manage :class:`Flavor` resources. """ resource_class = Flavor def list(self): """ Get a list of all flavors. :rtype: list of :class:`Flavor`. """ return self._list("/flavors", "flavors") def get(self, flavor): """ Get a specific flavor. :rtype: :class:`Flavor` """ return self._get("/flavors/%s" % base.getid(flavor), "flavor")
{ "content_hash": "b20de9baea64c81255a633d782e3d3ce", "timestamp": "", "source": "github", "line_count": 34, "max_line_length": 74, "avg_line_length": 21.235294117647058, "alnum_prop": 0.5332409972299169, "repo_name": "cp16net/python-troveclient", "id": "1bb49d9c6e645d6e621b33b43521eabd530aec97", "size": "1363", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "troveclient/v1/flavors.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "387875" }, { "name": "Shell", "bytes": "1432" } ], "symlink_target": "" }
from ray.rllib.algorithms.pg.pg import PG, PGConfig, DEFAULT_CONFIG from ray.rllib.algorithms.pg.pg_tf_policy import PGTF1Policy, PGTF2Policy from ray.rllib.algorithms.pg.pg_torch_policy import PGTorchPolicy from ray.rllib.algorithms.pg.utils import post_process_advantages __all__ = [ "DEFAULT_CONFIG", "post_process_advantages", "PG", "PGConfig", "PGTF1Policy", "PGTF2Policy", "PGTorchPolicy", "post_process_advantages", "DEFAULT_CONFIG", ]
{ "content_hash": "805501fcc958a78047fab3fb0d314f8b", "timestamp": "", "source": "github", "line_count": 17, "max_line_length": 73, "avg_line_length": 28.294117647058822, "alnum_prop": 0.7172557172557172, "repo_name": "ray-project/ray", "id": "945c492c8168d88eb584b64ee6e531c361c69174", "size": "481", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "rllib/algorithms/pg/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "37490" }, { "name": "C++", "bytes": "5972422" }, { "name": "CSS", "bytes": "10912" }, { "name": "Cython", "bytes": "227477" }, { "name": "Dockerfile", "bytes": "20210" }, { "name": "HTML", "bytes": "30382" }, { "name": "Java", "bytes": "1160849" }, { "name": "JavaScript", "bytes": "1128" }, { "name": "Jinja", "bytes": "6371" }, { "name": "Jupyter Notebook", "bytes": "1615" }, { "name": "Makefile", "bytes": "234" }, { "name": "PowerShell", "bytes": "1114" }, { "name": "Python", "bytes": "19539109" }, { "name": "Shell", "bytes": "134583" }, { "name": "Starlark", "bytes": "334862" }, { "name": "TypeScript", "bytes": "190599" } ], "symlink_target": "" }
from functools import total_ordering from dark.score import HigherIsBetterScore, LowerIsBetterScore @total_ordering class _Base(object): """ Holds information about a matching region from a read alignment. You should not use this class directly. Use one of its subclasses, either HSP or LSP, depending on whether you want numerically higher scores to be considered better (HSP) or worse (LSP). Below is an example alignment to show the locations of the six start/end offsets. The upper four are offsets into the subject. The lower two are offsets into the read. Note that the read has two gaps ('-' characters). All offsets are zero-based and follow the Python convention that the 'end' positions are not included in the string. readStartInSubject readEndInSubject | | | | | subjectStart subjectEnd | | | | | | | | | Subject: .................ACGTAAAGGCTTAGGT................. Read: ....ACGTA-AGGCTT-GGT............ | | | | readStart readEnd Note that the above is just one alignment, and that others are possible (e.g., with the read extending beyond the end(s) of the subject, or the subject also with gaps in it). The point of the example diagram is to show what the six variable names will always refer to, not to enumerate all possible alignments (the tests in test/blast/test_hsp.py go through many different cases). The classes in this file are just designed to hold the variables associated with an HSP and to make it easy to compare them. @param readStart: The offset in the read where the match begins. @param readEnd: The offset in the read where the match ends. @param readStartInSubject: The offset in the subject where the match of the read starts. @param readEndInSubject: The offset in the subject where the match of the read ends. @param readFrame: The reading frame for the read, a value from {-3, -2, -1, 1, 2, 3} where the sign indicates negative or positive sense. @param subjectStart: The offset in the subject where the match begins. @param subjectEnd: The offset in the subject where the match ends. @param subjectFrame: The reading frame for the subject, a value from {-3, -2, -1, 1, 2, 3} where the sign indicates negative or positive sense. @param readMatchedSequence: The matched part of the read. Note that this may contain gaps (marked with '-'). @param subjectMatchedSequence: The matched part of the subject. Note that this may contain gaps (marked with '-'). @param identicalCount: The C{int} number of positions at which the subject and query were identical. @param positiveCount: The C{int} number of positions at which the subject and query had a positive score in the scoring matrix used during matching (this is probably only different from the C{identicalCount} when matching amino acids (i.e., not nucleotides). """ def __init__(self, readStart=None, readEnd=None, readStartInSubject=None, readEndInSubject=None, readFrame=None, subjectStart=None, subjectEnd=None, subjectFrame=None, readMatchedSequence=None, subjectMatchedSequence=None, identicalCount=None, positiveCount=None): self.readStart = readStart self.readEnd = readEnd self.readStartInSubject = readStartInSubject self.readEndInSubject = readEndInSubject self.readFrame = readFrame self.subjectStart = subjectStart self.subjectEnd = subjectEnd self.subjectFrame = subjectFrame self.readMatchedSequence = readMatchedSequence self.subjectMatchedSequence = subjectMatchedSequence self.identicalCount = identicalCount self.positiveCount = positiveCount def __lt__(self, other): return self.score < other.score def __eq__(self, other): return self.score == other.score def betterThan(self, score): """ Compare this instance's score with another score. @param score: A C{float} score. @return: A C{bool}, C{True} if this score is the better. """ return self.score.betterThan(score) class HSP(_Base): """ Holds information about a high-scoring pair from a read alignment. Comparisons are done as for BLAST or DIAMOND bit scores (higher is better). @param score: The numeric score of this HSP. """ def __init__(self, score, **kwargs): _Base.__init__(self, **kwargs) self.score = HigherIsBetterScore(score) class LSP(_Base): """ Holds information about a low-scoring pair from a read alignment. Comparisons are done as for BLAST or DIAMOND e-values (smaller is better). @param score: The numeric score of this LSP. """ def __init__(self, score, **kwargs): _Base.__init__(self, **kwargs) self.score = LowerIsBetterScore(score)
{ "content_hash": "9ead23cb21027955e937d6f1178c3f94", "timestamp": "", "source": "github", "line_count": 123, "max_line_length": 79, "avg_line_length": 44.113821138211385, "alnum_prop": 0.6249539255436786, "repo_name": "bamueh/dark-matter", "id": "61dccfdb2eeb985a99b4254e023e74728b6dced1", "size": "5426", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "dark/hsp.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "1249" }, { "name": "Python", "bytes": "1451852" }, { "name": "Shell", "bytes": "1125" } ], "symlink_target": "" }
import re from distutils.command.build import build from setuptools import setup VERSION = re.search( r"^__version__ = ['\"]([^'\"]*)['\"]", open('aws_ir/_version.py', 'r').read(), re.MULTILINE ).group(1) setup(name="aws_ir", version=VERSION, author="Andrew Krug, Alex McCormack, Joel Ferrier, Jeff Parr", author_email="[email protected],[email protected],[email protected]", packages=["aws_ir", "aws_ir/libs", "aws_ir/plans"], license="MIT", description="AWS Incident Response ToolKit", scripts=['bin/aws_ir'], url='https://github.com/ThreatResponse/aws_ir', download_url="https://github.com/ThreatResponse/aws_ir/archive/v0.3.0.tar.gz", use_2to3=True, zip_safe=True, install_requires=['boto3>=1.3.0', 'progressbar_latest', 'logutils==0.3.3', 'requests', 'structlog', 'pytz', 'jinja2', 'pluginbase', 'margaritashotgun>=0.4.1', 'aws-ir-plugins>=0.0.3' ], setup_requires=['pytest-runner'], tests_require=['pytest', 'pytest-cov', 'pytest-watch', 'moto', 'mock'], )
{ "content_hash": "0f78d8d0d252acc00467186939e8edec", "timestamp": "", "source": "github", "line_count": 40, "max_line_length": 84, "avg_line_length": 34.975, "alnum_prop": 0.4839170836311651, "repo_name": "ThreatResponse/aws_ir", "id": "8d790f858eee4ab83a2e880d03d90a627acc40b7", "size": "1399", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "49247" }, { "name": "Vim script", "bytes": "84" } ], "symlink_target": "" }
from cuttlebug.util import bidict import collections class ParseError(Exception): pass UNSPECIFIED = -1 VOID = 0 CHAR = 1 INT = 3 FLOAT = 4 DOUBLE = 5 STRUCT = 6 UNION = 7 TYPEDEF = 8 # Weird case SHORT = 9 LONG = 10 SIGNED,UNSIGNED = 1,0 SIZE_SHORT,SIZE_LONG = 0, 1 TYPES = bidict({"void": VOID, "char":CHAR,"short": SHORT, "float":FLOAT, "double": DOUBLE, "int":INT, "long":LONG, "struct":STRUCT, "union":UNION}) SIZES = bidict({"short":SHORT, "long":LONG}) class Type(object): @staticmethod def parse(type_string): try: return Type.parse_proper(type_string) except ParseError: return Type(UNSPECIFIED, UNSPECIFIED, UNSPECIFIED, UNSPECIFIED, UNSPECIFIED, TYPEDEF) @property def icon_name(self): if self.type == TYPEDEF: return 'unknown_type.png' retval = '' if self.signed in (SIGNED,UNSIGNED): retval = "signed_" if self.signed else "unsigned_" if self.size in SIZES: retval += SIZES[self.size] + "_" if self.type in TYPES: retval += TYPES[self.type] retval += "_star.png" if self.pointer else ".png" return retval @staticmethod def parse_proper(type_string): parts = type_string.split() parts = [part.strip() for part in parts] volatile = "volatile" in parts static = "static" in parts size = UNSPECIFIED if "short" in parts: size = SIZE_SHORT if "long" in parts: size = SIZE_LONG pointer = False if parts[-1] == '*': parts.pop(-1) pointer = True if 'struct' in parts: type = STRUCT else: try: type = TYPES[parts[-1]] except KeyError: if size != UNSPECIFIED: type = INT else: raise ParseError("Could not parse type '%s'" % type_string) signed = UNSPECIFIED if type in (INT, CHAR): signed = "unsigned" not in parts return Type(static, volatile, signed, size, pointer, type) def __init__(self, static, volatile, signed, size, pointer, type): self.static = static self.volatile = volatile self.signed = signed self.size = size self.pointer = pointer self.type = type def __str__(self): static = "static " if self.static else "" volatile = "volatile " if self.volatile else "" signed = {SIGNED:"signed ", UNSIGNED:"unsigned ", UNSPECIFIED:""}[self.signed] type = {INT:"int ", CHAR:"char ", FLOAT:"float ", DOUBLE:"double ", VOID:"void ", UNSPECIFIED:""}[self.type] size = {SIZE_SHORT:"short ", SIZE_LONG:"long ", UNSPECIFIED:""}[self.size] pointer = "*" if self.pointer else "" return "%s%s%s%s%s%s%s" % (static, volatile, signed, size, type, pointer, self.value) def __repr__(self): return "<Type '%s'>" % str(self) def __cmp__(self, x): return self.static == x.static and \ self.volatile == x.volatile and \ self.signed == x.signed and \ self.size == x.size and \ self.pointer == x.pointer and \ self.type == x.type class Variable(object): def __init__(self, name, expression, type, children=0, data=None, frame=None): self.type = type self.data = data self.name = name self.frame = frame self.expression = expression self.children = int(children) def __str__(self): if self.children: return "<Variable %s expr='%s' with %d children>" % (self.name, self.expression, self.children) else: return "<Variable %s %s=%s>" % (self.name, self.expression, self.data) class GDBStackFrame(object): def __init__(self, level, addr, func, file, line, fullname=None): self.level = level self.addr = addr self.func = func self.file = file self.line = line self.fullname = fullname or '' @property def key(self): return "%s:%s:%s" % (self.func, self.file, self.fullname) # def __cmp__(self, x): # return cmp(self.level,x.level) and cmp(self.addr,x.addr) and cmp(self.file,x.file) and cmp(self.line,x.line) # def __hash__(self): # return hash('%d:%d:%s:%d' % (self.level, self.addr, self.file, self.line)) def __str__(self): return "<GDB Stack Frame %d %s()@0x%08x in file %s:%d>" % (self.level, self.func, self.addr, self.file, self.line) def __repr__(self): return str(self) class GDBRegisterModel(object): def __init__(self, parent): self.parent = parent self.set_names([]) def __getitem__(self, x): return self.__values[x] def set_names(self, names): self.__names = list(names) self.__values = collections.OrderedDict() for name in self.__names: if name: self.__values[name] = None def get_name_from_number(self, number): return self.__names[int(number)] def set_value_from_number(self, number, value): name = self.get_name_from_number(number) if name: self.__values[name] = value def get_value_from_number(self, number): name = self.__names[int(number)] if name: return self.__values[name] def __len__(self): return len(self.__values) def __iter__(self): return iter(self.__values) def iteritems(self): return self.__values.iteritems() class GDBStackModel(object): def __init__(self, parent): self.parent = parent self.clear() def add_frame(self, level, addr, func, file, line): if level != len(self.frames): raise ValueError("Corrupted stack model. Attempt to add frame of level %d when stack is at level %d" % (level, len(self.frames))) self.frames.append(GDBStackFrame(level, addr, func, file, line)) @property def keys(self): return [frame.key for frame in self] def clear(self): self.frames = [] def __iter__(self): return iter(self.frames) def __len__(self): return len(self.frames) def __getitem__(self, item): return self.frames[int(item)] @property def top(self): if self.depth: return self.frames[0] @property def depth(self): return len(self.frames) def pretty(self): retval ='' for frame in reversed(self): retval += (' '*(len(self)-frame.level)) + str(frame) + "\n" return retval class GDBVarModel(object): def __init__(self, parent): self.parent = parent self.vars = collections.OrderedDict() self.exprs = {} def __str__(self): return "<GDBVarModel managing %d variables>" % len(self.vars) @staticmethod def parent_name(name): parts = name.split('.') if len(parts) > 1: return '.'.join(parts[:-1]) else: return None def __iter__(self): return iter(self.vars) def __get_roots(self): return [name for name in self if '.' not in name] roots = property(__get_roots) def get_parent(self, name): parent = GDBVarModel.parent_name(name) if parent and parent not in self: raise Exception("Orphaned variable in GDBVarModel: %s" % name) return parent def add(self, name, variable): parent_name = GDBVarModel.parent_name(name) if parent_name and parent_name not in self: raise Exception("Attempt to add child variable before parent") self.vars[name] = variable if parent_name: parent = self.vars[parent_name] if name not in parent.data: parent.data.append(name) self.exprs[variable.expression] = name def remove(self, name): var = self.vars.pop(name) def __contains__(self, name): return name in self.vars def __getitem__(self, name): return self.vars[name] def __str__(self): retval = 'GDBVarModel\n' for name, var in self.vars.iteritems(): retval += "%8s %s\n" % (name, var) return retval
{ "content_hash": "817492b01299cc33101878ae054c0a4e", "timestamp": "", "source": "github", "line_count": 277, "max_line_length": 147, "avg_line_length": 31.707581227436823, "alnum_prop": 0.533302971649778, "repo_name": "ryansturmer/cuttlebug", "id": "82b0db9a4e460bc61aa235e05556569e761ca9ca", "size": "8783", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "cuttlebug/gdb/models.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "513241" }, { "name": "Shell", "bytes": "434" } ], "symlink_target": "" }
import re import os import os.path import shutil import uuid import tempfile import subprocess from django.conf import settings from edge.blast import BLAST_DB, default_genome_db_name from edge.blast import Blast_Accession from edge.models import Fragment, Genome from edge.utils import make_required_dirs def fragment_fasta_fn(fragment): return "%s/fragment/%s/%s/edge-fragment-%s-nucl.fa" % ( settings.NCBI_DATA_DIR, fragment.id % 1024, (fragment.id >> 10) % 1024, fragment.id, ) def does_blast_db_have_all_fragments(fragments, dbname): f = open(dbname + ".nsd") lines = f.readlines() f.close() print("verifying: expecting %s fragments, got %s lines in nsd" % (len(fragments), len(lines))) return len(fragments) * 2 == len(lines) def build_fragment_fasta(fragment, refresh=False): fn = fragment_fasta_fn(fragment) make_required_dirs(fn) if not os.path.isfile(fn) or refresh: # have not built this fasta or need refresh print("building %s" % fn) # this may take awhile, so do this first, so user interrupt does # not create an empty file sequence = fragment.indexed_fragment().sequence # be really lenient, convert any unknown bp to N sequence = re.sub(r"[^agctnAGCTN]", "n", sequence) if fragment.circular is True: sequence = sequence + sequence # writing first to a temp file, rename to an expected file location, # prevents possible race condition of accessing/writing to the same # file (which may or may not be a problem on a NAS, i don't know). with tempfile.NamedTemporaryFile(mode="w", delete=False) as tmpf: tmpf.write( ">gnl|edge|%s %s\n%s\n" % (Blast_Accession.make(fragment), fragment.name, sequence) ) # i think this will write atomically to dest, and copies over different fs shutil.move(tmpf.name, fn) return fn def build_db(fragments, dbname, refresh=True, attempt=0): if len(fragments) == 0: return None fns = [] for fragment in sorted(fragments, key=lambda f: f.id): fn = build_fragment_fasta(fragment, refresh) fns.append(fn) print("concat fasta files for %s" % dbname) with tempfile.NamedTemporaryFile(mode="w", delete=False) as f: fafile = f.name for fn in fns: with open(fn) as inf: for line in inf: f.write(line) # the following prevents concurrent blastdb builds corrupting each other orig_dbname = dbname unique_suffix = str(uuid.uuid4()) dbname = "%s_%s" % (dbname, unique_suffix) print("building blast db %s" % dbname) make_required_dirs(dbname) cmd = "%s/makeblastdb -in %s -out %s " % (settings.NCBI_BIN_DIR, fafile, dbname) cmd += "-title edge -dbtype nucl -parse_seqids -input_type fasta" r = subprocess.check_output(cmd.split(" ")) if b"Adding sequences from FASTA" not in r: print(r) os.unlink(fafile) if not does_blast_db_have_all_fragments(fragments, dbname) and attempt < 5: print("does not have all fragments, retry (attempts: %s)" % (attempt + 1,)) return build_db(fragments, orig_dbname, refresh=refresh, attempt=(attempt + 1)) return dbname def build_genome_db(genome, refresh=False): if genome.blastdb is None or refresh: fragments = list(genome.fragments.all()) dbname = build_db(fragments, default_genome_db_name(genome), refresh=refresh) genome.blastdb = dbname genome.save() return dbname else: print("already built genome blast db for %s" % genome.id) return genome.blastdb def check_and_build_genome_db(genome, refresh=False): if not genome.blastdb or refresh: build_genome_db(genome, refresh) def build_all_genome_dbs(refresh=False): for genome in Genome.objects.all(): build_genome_db(genome, refresh=refresh) def build_all_db(): build_db(Fragment.objects.all(), BLAST_DB)
{ "content_hash": "fb5175dcdccb849e69b512a4d215cfa8", "timestamp": "", "source": "github", "line_count": 125, "max_line_length": 98, "avg_line_length": 32.616, "alnum_prop": 0.6448368898700024, "repo_name": "ginkgobioworks/edge", "id": "d55ad7feb1aa12282b5b0a9427525567c2afa91e", "size": "4077", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/edge/blastdb.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "2553" }, { "name": "Dockerfile", "bytes": "1203" }, { "name": "HTML", "bytes": "32885" }, { "name": "JavaScript", "bytes": "27599" }, { "name": "Makefile", "bytes": "3665" }, { "name": "Python", "bytes": "826040" }, { "name": "Shell", "bytes": "227" } ], "symlink_target": "" }
import numpy as np import h5py def writeH5(pressure,u,v,w,velmag,filename): """ Write the h5 file that will save the information needed in proper structure. pressure = numpy array with pressure values u,v,w = numpy array with velocity data filename = string with desired filename dims = 3-tuple with the number of rank of each dimension """ f = h5py.File(filename,'w') # Store velocity data into the velo_group of h5 file velo_group = f.create_group("velo_group") x_velo = velo_group.create_dataset("x_velo",data=u) y_velo = velo_group.create_dataset("y_velo",data=v) z_velo = velo_group.create_dataset("z_velo",data=w) velmag = velo_group.create_dataset("velmag",data=velmag) # Store velocity data into the velo_group of h5 file pres_group = f.create_group("pres_group") presmag = pres_group.create_dataset("presmag",data=pressure) f.close() def writeXdmf(dims,dx,filename,h5_file): """ Write the xmf file, that describes the hdf5 data, to be read by Paraview. filename = string with the desired filename dims = 3-tuple with the number of rank in each dimension (z,y,x) """ f = open(filename,'w') f.write('<?xml version="1.0" ?>\n') f.write('<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd" []>\n') f.write('<Xdmf xmlns:xi="http://www.w3.org/2003/XInclude" Version="2.1">\n') f.write('<Domain>\n') f.write('<Grid Name="my_Grid" GridType="Uniform">\n') f.write('<Topology TopologyType="3DCoRectMesh" Dimensions="%d %d %d">\n'%(dims[0],dims[1],dims[2])) f.write('</Topology>\n') f.write('<Geometry GeometryType="Origin_DxDyDz">\n') f.write('<DataItem Dimensions="3" NumberType="Integer" Format="XML">\n') f.write('0 0 0\n') f.write('</DataItem>\n') f.write('<DataItem Dimensions="3" NumberType="Integer" Format="XML">\n') f.write('%g %g %g\n'%(dx,dx,dx)) f.write('</DataItem>\n') f.write('</Geometry>\n') f.write('<Attribute Name="velocity" AttributeType="Vector" Center="Node">\n') f.write('<DataItem ItemType="Function" Function="JOIN($0, $1, $2)" Dimensions="%d %d %d 3">\n'%(dims[0],dims[1],dims[2])) f.write('<DataItem Dimensions="%d %d %d" NumberType="Float" Format="HDF">\n'%(dims[0],dims[1],dims[2])) #f.write('out'+str(i)+'.h5:/velo_group/x_velo\n') f.write('%s:/velo_group/x_velo\n'%h5_file) f.write('</DataItem>\n') f.write('<DataItem Dimensions="%d %d %d" NumberType="Float" Format="HDF">\n'%(dims[0],dims[1],dims[2])) #f.write('out'+str(i)+'.h5:/velo_group/y_velo\n') f.write('%s:/velo_group/y_velo\n'%h5_file) f.write('</DataItem>\n') f.write('<DataItem Dimensions="%d %d %d" NumberType="Float" Format="HDF">\n'%(dims[0],dims[1],dims[2])) #f.write('out'+str(i)+'.h5:/velo_group/z_velo\n') f.write('%s:/velo_group/z_velo\n'%h5_file) f.write('</DataItem>\n') f.write('</DataItem>\n') f.write('</Attribute>\n') f.write('<Attribute Name="pressure" AttributeType="Scalar" Center="Node">\n') f.write('<DataItem Dimensions="%d %d %d" NumberType="Float" Format="HDF">\n'%(dims[0],dims[1],dims[2])) #f.write('out'+str(i)+'.h5:/pres_group/presmag\n') f.write('%s:/pres_group/presmag\n'%h5_file) f.write('</DataItem>\n') f.write('</Attribute>\n') f.write('<Attribute Name="velocityMagnitude" AttributeType="Scalar" Center="Node">\n') f.write('<DataItem Dimensions="%d %d %d" NumberType="Float" Format="HDF">\n'%(dims[0],dims[1],dims[2])) #f.write('out'+str(i)+'.h5:/velo_group/velmag\n') f.write('%s:/velo_group/velmag\n'%h5_file) f.write('</DataItem>\n') f.write('</Attribute>\n') f.write('</Grid>\n') f.write('</Domain>\n') f.write('</Xdmf>\n') f.close()
{ "content_hash": "0e89823cbe9e729326b1419d09bdd93d", "timestamp": "", "source": "github", "line_count": 89, "max_line_length": 123, "avg_line_length": 40.449438202247194, "alnum_prop": 0.6475, "repo_name": "stu314159/pyNFC", "id": "4c8545a511fb19177617ff2205b1e338108c6365", "size": "3600", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "hdf5Helper.py", "mode": "33188", "license": "mit", "language": [ { "name": "C++", "bytes": "51749" }, { "name": "MATLAB", "bytes": "631" }, { "name": "Makefile", "bytes": "3324" }, { "name": "Python", "bytes": "218342" }, { "name": "Shell", "bytes": "23943" } ], "symlink_target": "" }
from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'HelloSignRequest' db.create_table(u'hello_sign_hellosignrequest', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('content_object_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])), ('object_id', self.gf('django.db.models.fields.IntegerField')(db_index=True)), ('signature_request_id', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), ('dateof', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)), ('data', self.gf('jsonfield.fields.JSONField')(default={})), )) db.send_create_signal(u'hello_sign', ['HelloSignRequest']) # Adding model 'HelloSignLog' db.create_table(u'hello_sign_hellosignlog', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('request', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['hello_sign.HelloSignRequest'])), ('event_type', self.gf('django.db.models.fields.CharField')(max_length=32, db_index=True)), ('dateof', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)), ('data', self.gf('jsonfield.fields.JSONField')(default={})), )) db.send_create_signal(u'hello_sign', ['HelloSignLog']) # Adding model 'HelloSignSigningUrl' db.create_table(u'hello_sign_hellosignsigningurl', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('request', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['hello_sign.HelloSignRequest'])), ('signature_id', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), ('has_been_viewed', self.gf('django.db.models.fields.BooleanField')(default=False)), ('expires_at', self.gf('django.db.models.fields.DateTimeField')(null=True, db_index=True)), ('dateof', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)), ('data', self.gf('jsonfield.fields.JSONField')(default={})), )) db.send_create_signal(u'hello_sign', ['HelloSignSigningUrl']) def backwards(self, orm): # Deleting model 'HelloSignRequest' db.delete_table(u'hello_sign_hellosignrequest') # Deleting model 'HelloSignLog' db.delete_table(u'hello_sign_hellosignlog') # Deleting model 'HelloSignSigningUrl' db.delete_table(u'hello_sign_hellosignsigningurl') models = { u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'hello_sign.hellosignlog': { 'Meta': {'ordering': "['-id']", 'object_name': 'HelloSignLog'}, 'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}), 'dateof': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}), 'event_type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'request': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['hello_sign.HelloSignRequest']"}) }, u'hello_sign.hellosignrequest': { 'Meta': {'ordering': "['-dateof']", 'object_name': 'HelloSignRequest'}, 'content_object_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), 'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}), 'dateof': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'signature_request_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) }, u'hello_sign.hellosignsigningurl': { 'Meta': {'object_name': 'HelloSignSigningUrl'}, 'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}), 'dateof': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}), 'expires_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'has_been_viewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'request': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['hello_sign.HelloSignRequest']"}), 'signature_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}) } } complete_apps = ['hello_sign']
{ "content_hash": "0b73ccca20237bb8513f5bf7e91f8d59", "timestamp": "", "source": "github", "line_count": 92, "max_line_length": 161, "avg_line_length": 62.94565217391305, "alnum_prop": 0.5952339837679157, "repo_name": "rosscdh/django-hello_sign", "id": "c8c370861252d72abab98f1ef1f58c775be5b519", "size": "5815", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "hello_sign/migrations/0001_initial.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "90762" } ], "symlink_target": "" }
from __future__ import print_function, division from sympy.core import S, C, sympify from sympy.core.add import Add from sympy.core.basic import Basic from sympy.core.containers import Tuple from sympy.core.numbers import Rational from sympy.core.operations import LatticeOp, ShortCircuit from sympy.core.function import Application, Lambda, ArgumentIndexError from sympy.core.expr import Expr from sympy.core.singleton import Singleton from sympy.core.rules import Transform from sympy.core.compatibility import as_int, with_metaclass, xrange from sympy.core.logic import fuzzy_and class IdentityFunction(with_metaclass(Singleton, Lambda)): """ The identity function Examples ======== >>> from sympy import Id, Symbol >>> x = Symbol('x') >>> Id(x) x """ def __new__(cls): from sympy.sets.sets import FiniteSet x = C.Dummy('x') #construct "by hand" to avoid infinite loop obj = Expr.__new__(cls, Tuple(x), x) obj.nargs = FiniteSet(1) return obj Id = S.IdentityFunction ############################################################################### ############################# ROOT and SQUARE ROOT FUNCTION ################### ############################################################################### def sqrt(arg): """The square root function sqrt(x) -> Returns the principal square root of x. Examples ======== >>> from sympy import sqrt, Symbol >>> x = Symbol('x') >>> sqrt(x) sqrt(x) >>> sqrt(x)**2 x Note that sqrt(x**2) does not simplify to x. >>> sqrt(x**2) sqrt(x**2) This is because the two are not equal to each other in general. For example, consider x == -1: >>> from sympy import Eq >>> Eq(sqrt(x**2), x).subs(x, -1) False This is because sqrt computes the principal square root, so the square may put the argument in a different branch. This identity does hold if x is positive: >>> y = Symbol('y', positive=True) >>> sqrt(y**2) y You can force this simplification by using the powdenest() function with the force option set to True: >>> from sympy import powdenest >>> sqrt(x**2) sqrt(x**2) >>> powdenest(sqrt(x**2), force=True) x To get both branches of the square root you can use the RootOf function: >>> from sympy import RootOf >>> [ RootOf(x**2-3,i) for i in (0,1) ] [-sqrt(3), sqrt(3)] See Also ======== sympy.polys.rootoftools.RootOf, root, real_root References ========== * http://en.wikipedia.org/wiki/Square_root * http://en.wikipedia.org/wiki/Principal_value """ # arg = sympify(arg) is handled by Pow return C.Pow(arg, S.Half) def cbrt(arg): """This function computes the principial cube root of `arg`, so it's just a shortcut for `arg**Rational(1, 3)`. Examples ======== >>> from sympy import cbrt, Symbol >>> x = Symbol('x') >>> cbrt(x) x**(1/3) >>> cbrt(x)**3 x Note that cbrt(x**3) does not simplify to x. >>> cbrt(x**3) (x**3)**(1/3) This is because the two are not equal to each other in general. For example, consider `x == -1`: >>> from sympy import Eq >>> Eq(cbrt(x**3), x).subs(x, -1) False This is because cbrt computes the principal cube root, this identity does hold if `x` is positive: >>> y = Symbol('y', positive=True) >>> cbrt(y**3) y See Also ======== sympy.polys.rootoftools.RootOf, root, real_root References ========== * http://en.wikipedia.org/wiki/Cube_root * http://en.wikipedia.org/wiki/Principal_value """ return C.Pow(arg, C.Rational(1, 3)) def root(arg, n): """The n-th root function (a shortcut for ``arg**(1/n)``) root(x, n) -> Returns the principal n-th root of x. Examples ======== >>> from sympy import root, Rational >>> from sympy.abc import x, n >>> root(x, 2) sqrt(x) >>> root(x, 3) x**(1/3) >>> root(x, n) x**(1/n) >>> root(x, -Rational(2, 3)) x**(-3/2) To get all n n-th roots you can use the RootOf function. The following examples show the roots of unity for n equal 2, 3 and 4: >>> from sympy import RootOf, I >>> [ RootOf(x**2-1,i) for i in (0,1) ] [-1, 1] >>> [ RootOf(x**3-1,i) for i in (0,1,2) ] [1, -1/2 - sqrt(3)*I/2, -1/2 + sqrt(3)*I/2] >>> [ RootOf(x**4-1,i) for i in (0,1,2,3) ] [-1, 1, -I, I] SymPy, like other symbolic algebra systems, returns the complex root of negative numbers. This is the principal root and differs from the text-book result that one might be expecting. For example, the cube root of -8 does not come back as -2: >>> root(-8, 3) 2*(-1)**(1/3) The real_root function can be used to either make such a result real or simply return the real root in the first place: >>> from sympy import real_root >>> real_root(_) -2 >>> real_root(-32, 5) -2 See Also ======== sympy.polys.rootoftools.RootOf sympy.core.power.integer_nthroot sqrt, real_root References ========== * http://en.wikipedia.org/wiki/Square_root * http://en.wikipedia.org/wiki/Real_root * http://en.wikipedia.org/wiki/Root_of_unity * http://en.wikipedia.org/wiki/Principal_value * http://mathworld.wolfram.com/CubeRoot.html """ n = sympify(n) return C.Pow(arg, 1/n) def real_root(arg, n=None): """Return the real nth-root of arg if possible. If n is omitted then all instances of -1**(1/odd) will be changed to -1. Examples ======== >>> from sympy import root, real_root, Rational >>> from sympy.abc import x, n >>> real_root(-8, 3) -2 >>> root(-8, 3) 2*(-1)**(1/3) >>> real_root(_) -2 See Also ======== sympy.polys.rootoftools.RootOf sympy.core.power.integer_nthroot root, sqrt """ if n is not None: n = as_int(n) rv = C.Pow(arg, Rational(1, n)) if n % 2 == 0: return rv else: rv = sympify(arg) n1pow = Transform(lambda x: S.NegativeOne, lambda x: x.is_Pow and x.base is S.NegativeOne and x.exp.is_Rational and x.exp.p == 1 and x.exp.q % 2) return rv.xreplace(n1pow) ############################################################################### ############################# MINIMUM and MAXIMUM ############################# ############################################################################### class MinMaxBase(Expr, LatticeOp): def __new__(cls, *args, **assumptions): if not args: raise ValueError("The Max/Min functions must have arguments.") args = (sympify(arg) for arg in args) # first standard filter, for cls.zero and cls.identity # also reshape Max(a, Max(b, c)) to Max(a, b, c) try: _args = frozenset(cls._new_args_filter(args)) except ShortCircuit: return cls.zero # second filter # variant I: remove ones which can be removed # args = cls._collapse_arguments(set(_args), **assumptions) # variant II: find local zeros args = cls._find_localzeros(set(_args), **assumptions) if not args: return cls.identity elif len(args) == 1: return args.pop() else: # base creation # XXX should _args be made canonical with sorting? _args = frozenset(args) obj = Expr.__new__(cls, _args, **assumptions) obj._argset = _args return obj @classmethod def _new_args_filter(cls, arg_sequence): """ Generator filtering args. first standard filter, for cls.zero and cls.identity. Also reshape Max(a, Max(b, c)) to Max(a, b, c), and check arguments for comparability """ for arg in arg_sequence: # pre-filter, checking comparability of arguments if (arg.is_real is False) or (arg is S.ComplexInfinity): raise ValueError("The argument '%s' is not comparable." % arg) if arg == cls.zero: raise ShortCircuit(arg) elif arg == cls.identity: continue elif arg.func == cls: for x in arg.iter_basic_args(): yield x else: yield arg @classmethod def _find_localzeros(cls, values, **options): """ Sequentially allocate values to localzeros. When a value is identified as being more extreme than another member it replaces that member; if this is never true, then the value is simply appended to the localzeros. """ localzeros = set() for v in values: is_newzero = True localzeros_ = list(localzeros) for z in localzeros_: if id(v) == id(z): is_newzero = False elif cls._is_connected(v, z): is_newzero = False if cls._is_asneeded(v, z): localzeros.remove(z) localzeros.update([v]) if is_newzero: localzeros.update([v]) return localzeros @classmethod def _is_connected(cls, x, y): """ Check if x and y are connected somehow. """ xy = x > y yx = x < y if (x == y) or xy == True or xy == False or yx == True or yx == False: return True if x.is_Number and y.is_Number: return True return False @classmethod def _is_asneeded(cls, x, y): """ Check if x and y satisfy relation condition. The relation condition for Max function is x > y, for Min function is x < y. They are defined in children Max and Min classes through the method _rel(cls, x, y) """ if (x == y): return False if x.is_Number and y.is_Number: if cls._rel(x, y): return True xy = cls._rel(x, y) if xy == True or xy == False: return bool(xy) yx = cls._rel_inversed(x, y) if yx == True or yx == False: return not bool(yx) return False def _eval_derivative(self, s): # f(x).diff(s) -> x.diff(s) * f.fdiff(1)(s) i = 0 l = [] for a in self.args: i += 1 da = a.diff(s) if da is S.Zero: continue try: df = self.fdiff(i) except ArgumentIndexError: df = Function.fdiff(self, i) l.append(df * da) return Add(*l) @property def is_real(self): return fuzzy_and(arg.is_real for arg in self.args) class Max(MinMaxBase, Application): """ Return, if possible, the maximum value of the list. When number of arguments is equal one, then return this argument. When number of arguments is equal two, then return, if possible, the value from (a, b) that is >= the other. In common case, when the length of list greater than 2, the task is more complicated. Return only the arguments, which are greater than others, if it is possible to determine directional relation. If is not possible to determine such a relation, return a partially evaluated result. Assumptions are used to make the decision too. Also, only comparable arguments are permitted. Examples ======== >>> from sympy import Max, Symbol, oo >>> from sympy.abc import x, y >>> p = Symbol('p', positive=True) >>> n = Symbol('n', negative=True) >>> Max(x, -2) #doctest: +SKIP Max(x, -2) >>> Max(x, -2).subs(x, 3) 3 >>> Max(p, -2) p >>> Max(x, y) #doctest: +SKIP Max(x, y) >>> Max(x, y) == Max(y, x) True >>> Max(x, Max(y, z)) #doctest: +SKIP Max(x, y, z) >>> Max(n, 8, p, 7, -oo) #doctest: +SKIP Max(8, p) >>> Max (1, x, oo) oo Algorithm The task can be considered as searching of supremums in the directed complete partial orders [1]_. The source values are sequentially allocated by the isolated subsets in which supremums are searched and result as Max arguments. If the resulted supremum is single, then it is returned. The isolated subsets are the sets of values which are only the comparable with each other in the current set. E.g. natural numbers are comparable with each other, but not comparable with the `x` symbol. Another example: the symbol `x` with negative assumption is comparable with a natural number. Also there are "least" elements, which are comparable with all others, and have a zero property (maximum or minimum for all elements). E.g. `oo`. In case of it the allocation operation is terminated and only this value is returned. Assumption: - if A > B > C then A > C - if A==B then B can be removed References ========== .. [1] http://en.wikipedia.org/wiki/Directed_complete_partial_order .. [2] http://en.wikipedia.org/wiki/Lattice_%28order%29 See Also ======== Min : find minimum values """ zero = S.Infinity identity = S.NegativeInfinity @classmethod def _rel(cls, x, y): """ Check if x > y. """ return (x > y) @classmethod def _rel_inversed(cls, x, y): """ Check if x < y. """ return (x < y) def fdiff( self, argindex ): from sympy.functions.special.delta_functions import Heaviside n = len(self.args) if 0 < argindex and argindex <= n: argindex -= 1 if n == 2: return Heaviside( self.args[argindex] - self.args[1-argindex] ) newargs = tuple([self.args[i] for i in xrange(n) if i != argindex]) return Heaviside( self.args[argindex] - Max(*newargs) ) else: raise ArgumentIndexError(self, argindex) class Min(MinMaxBase, Application): """ Return, if possible, the minimum value of the list. Examples ======== >>> from sympy import Min, Symbol, oo >>> from sympy.abc import x, y >>> p = Symbol('p', positive=True) >>> n = Symbol('n', negative=True) >>> Min(x, -2) #doctest: +SKIP Min(x, -2) >>> Min(x, -2).subs(x, 3) -2 >>> Min(p, -3) -3 >>> Min(x, y) #doctest: +SKIP Min(x, y) >>> Min(n, 8, p, -7, p, oo) #doctest: +SKIP Min(n, -7) See Also ======== Max : find maximum values """ zero = S.NegativeInfinity identity = S.Infinity @classmethod def _rel(cls, x, y): """ Check if x < y. """ return (x < y) @classmethod def _rel_inversed(cls, x, y): """ Check if x > y. """ return (x > y) def fdiff( self, argindex ): from sympy.functions.special.delta_functions import Heaviside n = len(self.args) if 0 < argindex and argindex <= n: argindex -= 1 if n == 2: return Heaviside( self.args[1-argindex] - self.args[argindex] ) newargs = tuple([ self.args[i] for i in xrange(n) if i != argindex]) return Heaviside( Min(*newargs) - self.args[argindex] ) else: raise ArgumentIndexError(self, argindex)
{ "content_hash": "71a7dbf862175ec3e4e25d2b4f2fca66", "timestamp": "", "source": "github", "line_count": 604, "max_line_length": 80, "avg_line_length": 26.177152317880793, "alnum_prop": 0.5389285940168237, "repo_name": "wdv4758h/ZipPy", "id": "63410320efe2567a4a772bac93783baa947816f7", "size": "15811", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "edu.uci.python.benchmark/src/benchmarks/sympy/sympy/functions/elementary/miscellaneous.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "9447" }, { "name": "C", "bytes": "106932" }, { "name": "CSS", "bytes": "32004" }, { "name": "Groff", "bytes": "27753" }, { "name": "HTML", "bytes": "721863" }, { "name": "Java", "bytes": "1550721" }, { "name": "JavaScript", "bytes": "10581" }, { "name": "Makefile", "bytes": "16156" }, { "name": "PLSQL", "bytes": "22886" }, { "name": "Python", "bytes": "33672733" }, { "name": "R", "bytes": "1959" }, { "name": "Ruby", "bytes": "304" }, { "name": "Scheme", "bytes": "125" }, { "name": "Shell", "bytes": "3119" }, { "name": "Tcl", "bytes": "1048" }, { "name": "TeX", "bytes": "8790" }, { "name": "Visual Basic", "bytes": "481" }, { "name": "XSLT", "bytes": "366202" } ], "symlink_target": "" }
"""Runs a sweep over evaluation algorithms and prints results. This experiment is far from ideal because it does not support sweeps over different parameters for different algorithms at the moment. """ import functools from typing import Callable from typing import Sequence import numpy as np import time from absl import app from absl import flags from differential_value_iteration.algorithms import algorithm from differential_value_iteration.algorithms import dvi from differential_value_iteration.algorithms import mdvi from differential_value_iteration.algorithms import rvi from differential_value_iteration.environments import micro from differential_value_iteration.environments import structure FLAGS = flags.FLAGS _MAX_ITERS = flags.DEFINE_integer('max_iters', 50000, 'Maximum iterations per algorithm.') _MINIMUM_STEP_SIZE = flags.DEFINE_float('minimum_step_size', .001, 'Minimum step size.') _MAXIMUM_STEP_SIZE = flags.DEFINE_float('maximum_step_size', 1., 'Maximum step size.') _NUM_STEP_SIZES = flags.DEFINE_integer('num_step_sizes', 10, 'Number of step sizes to try.') _SYNCHRONIZED = flags.DEFINE_bool('synchronized', True, 'Run algorithms in synchronized mode.') _64bit = flags.DEFINE_bool('64bit', False, 'Use 64 bit precision (default is 32 bit).') _CONVERGENCE_TOLERANCE = flags.DEFINE_float('convergence_tolerance', 1e-5, 'Tolerance for convergence.') # DVI-specific flags flags.DEFINE_bool('dvi', True, 'Run Differential Value Iteration') flags.DEFINE_float('dvi_minimum_beta', .001, 'Minimum DVI beta.') flags.DEFINE_float('dvi_maximum_beta', 1., 'Maximum DVI beta.') flags.DEFINE_integer('dvi_num_betas', 10, 'Number of DVI beta values to try.') flags.DEFINE_float('dvi_initial_rbar', 0., 'Initial r_bar for DVI.') # MDVI-specific flags flags.DEFINE_bool('mdvi', True, 'Run Multichain Differential Value Iteration') flags.DEFINE_float('mdvi_minimum_beta', .001, 'Minimum MDVI beta.') flags.DEFINE_float('mdvi_maximum_beta', 1., 'Maximum DMVI beta.') flags.DEFINE_integer('mdvi_num_betas', 10, 'Number of MDVI beta values to try.') flags.DEFINE_float('mdvi_initial_rbar', 0., 'Initial r_bar for MDVI.') # RVI-specific flags flags.DEFINE_bool('rvi', True, 'Run Relative Value Iteration') flags.DEFINE_integer('rvi_reference_index', 0, 'Reference index for RVI.') # Environment flags _MRP1 = flags.DEFINE_bool('mrp1', True, 'Include MRP1 in evaluation.') _MRP2 = flags.DEFINE_bool('mrp2', True, 'Include MRP2 in evaluation.') _MRP3 = flags.DEFINE_bool('mrp3', True, 'Include MRP3 in evaluation.') # Debugging flags _SAVE_FINAL_ESTIMATES = flags.DEFINE_bool('save_final_estimates', False, 'Save the final estimates.') def run( environments: Sequence[structure.MarkovRewardProcess], algorithm_constructors: Sequence[Callable[..., algorithm.Evaluation]], step_sizes: Sequence[float], max_iters: int, convergence_tolerance: float, synchronized: bool, save_final_estimates: bool): """Runs a list of algorithms on a list of environments and prints outcomes. Params: environments: Sequence of Markov Reward Processes to run. algorithm_constructors: Sequence of Callable algorithm constructors. If an algorithm has hyperparameters, it should have multiple entries in here with hypers preset using functools.partial. step_sizes: Step sizes to try for each algorithm-environment pair. max_iters: Maximum number of iterations before declaring fail to converge. convergence_tolerance: Criteria for convergence. synchronized: Run algorithms in synchronized or asynchronous mode. save_final_estimates: Save the final (dictionary of) estimates to a file """ for environment in environments: initial_values = np.zeros(environment.num_states) inner_loop_range = 1 if synchronized else environment.num_states for algorithm_constructor in algorithm_constructors: print(f'Running {algorithm_constructor} on {environment.name}') for step_size in step_sizes: converged = False alg = algorithm_constructor(mrp=environment, initial_values=initial_values, step_size=step_size, synchronized=synchronized) for i in range(max_iters): change_summary = 0. for _ in range(inner_loop_range): changes = alg.update() # Mean instead of sum so tolerance scales with num_states. change_summary += np.mean(np.abs(changes)) # Basically divide by num_states if running async. change_summary /= inner_loop_range if alg.diverged(): converged = False break if change_summary <= convergence_tolerance and i > 1: converged = True break print( f'step_size:{step_size:.5f}\tConverged:{converged}\tafter {i} iterations\tFinal Changes:{changes}') if save_final_estimates: estimates = alg.get_estimates() # is there a simple way of making the filename more accessible? # filename = f'results/{environment.__name__}_{alg.__name__}_{step_size}' filename = str(time.time()) full_path = 'results/' + filename # directory can be a command-line argument as well np.save(full_path, estimates) # we can also save a string with the above convergence details print(f'Results saved in: {full_path}') def main(argv): del argv # Stop linter from complaining about unused argv. algorithm_constructors = [] # Create constructors that only depends on params common to all algorithms. if FLAGS.dvi: betas = np.geomspace(start=FLAGS.dvi_minimum_beta, stop=FLAGS.dvi_maximum_beta, num=FLAGS.dvi_num_betas, endpoint=True) for beta in betas: dvi_algorithm = functools.partial(dvi.Evaluation, beta=beta, initial_r_bar=FLAGS.dvi_initial_rbar) algorithm_constructors.append(dvi_algorithm) if FLAGS.mdvi: betas = np.geomspace(start=FLAGS.mdvi_minimum_beta, stop=FLAGS.mdvi_maximum_beta, num=FLAGS.mdvi_num_betas, endpoint=True) for beta in betas: mdvi_algorithm = functools.partial(mdvi.Evaluation, beta=beta, initial_r_bar=FLAGS.mdvi_initial_rbar) algorithm_constructors.append(mdvi_algorithm) if FLAGS.rvi: rvi_algorithm = functools.partial(rvi.Evaluation, reference_index=FLAGS.rvi_reference_index) algorithm_constructors.append(rvi_algorithm) if not algorithm_constructors: raise ValueError('No algorithms scheduled to run.') # Generate stepsizes log-spaced minimum and maximum supplied. step_sizes = np.geomspace( start=_MINIMUM_STEP_SIZE.value, stop=_MAXIMUM_STEP_SIZE.value, num=_NUM_STEP_SIZES.value, endpoint=True) environments = [] problem_dtype = np.float64 if _64bit.value else np.float32 if _MRP1.value: environments.append(micro.create_mrp1(dtype=problem_dtype)) if _MRP2.value: environments.append(micro.create_mrp2(dtype=problem_dtype)) if _MRP3.value: environments.append(micro.create_mrp3(dtype=problem_dtype)) if not environments: raise ValueError('At least one environment required.') run(environments=environments, algorithm_constructors=algorithm_constructors, step_sizes=step_sizes, max_iters=_MAX_ITERS.value, convergence_tolerance=_CONVERGENCE_TOLERANCE.value, synchronized=_SYNCHRONIZED.value, save_final_estimates=_SAVE_FINAL_ESTIMATES.value) if __name__ == '__main__': app.run(main)
{ "content_hash": "ade49e60eac1f6fe462be337c27e8b02", "timestamp": "", "source": "github", "line_count": 181, "max_line_length": 111, "avg_line_length": 43.32596685082873, "alnum_prop": 0.6817138485080336, "repo_name": "abhisheknaik96/differential-value-iteration", "id": "d4741f1bbd15d694c164dacea0ed9a18b65a7c80", "size": "7842", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "src/differential_value_iteration/experiments/evaluation_convergence.py", "mode": "33188", "license": "mit", "language": [ { "name": "Jupyter Notebook", "bytes": "59817" }, { "name": "Python", "bytes": "32399" } ], "symlink_target": "" }
from collections import defaultdict from datetime import date, timedelta from decimal import Decimal from django.contrib.auth.models import User from django.contrib.postgres.fields import ArrayField from django.core.exceptions import ValidationError from django.db import models from django.db.models import Q, signals from django.dispatch import receiver from django.urls import reverse from django.utils import timezone from django.utils.translation import gettext, gettext_lazy as _ from towel.managers import SearchManager from towel.resources.urls import model_resource_urls STATE_CHOICES = ( ("AG", _("Aargau")), ("AI", _("Appenzell Innerrhoden")), ("AR", _("Appenzell Ausserrhoden")), ("BS", _("Basel-Stadt")), ("BL", _("Basel-Land")), ("BE", _("Berne")), ("FR", _("Fribourg")), ("GE", _("Geneva")), ("GL", _("Glarus")), ("GR", _("Graubuenden")), ("JU", _("Jura")), ("LU", _("Lucerne")), ("NE", _("Neuchatel")), ("NW", _("Nidwalden")), ("OW", _("Obwalden")), ("SH", _("Schaffhausen")), ("SZ", _("Schwyz")), ("SO", _("Solothurn")), ("SG", _("St. Gallen")), ("TG", _("Thurgau")), ("TI", _("Ticino")), ("UR", _("Uri")), ("VS", _("Valais")), ("VD", _("Vaud")), ("ZG", _("Zug")), ("ZH", _("Zurich")), ) @model_resource_urls() class ScopeStatement(models.Model): is_active = models.BooleanField(_("is active"), default=True) eis_no = models.CharField(_("scope statement No."), unique=True, max_length=10) name = models.CharField(_("name"), max_length=100) branch_no = models.CharField(_("EIS No."), max_length=10) branch = models.CharField(_("branch"), max_length=100) company_name = models.CharField(_("company name"), max_length=100, blank=True) company_address = models.CharField(_("company address"), max_length=100, blank=True) company_zip_code = models.CharField( _("company ZIP code"), max_length=10, blank=True ) company_city = models.CharField(_("company city"), max_length=100, blank=True) company_contact_name = models.CharField( _("company contact name"), max_length=100, blank=True ) company_contact_email = models.EmailField(_("company contact email"), blank=True) company_contact_function = models.CharField( _("company contact function"), max_length=100, blank=True ) company_contact_phone = models.CharField( _("company contact phone"), max_length=100, blank=True ) work_location = models.CharField(_("work location"), max_length=100, blank=True) default_group = models.ForeignKey( "Group", on_delete=models.SET_NULL, verbose_name=_("default group"), blank=True, null=True, related_name="+", ) class Meta: ordering = ["name"] verbose_name = _("scope statement") verbose_name_plural = _("scope statements") def __str__(self): return f"{self.name} ({self.eis_no})" @property def company_contact_location(self): return (f"{self.company_zip_code} {self.company_city}").strip() class DrudgeQuota(models.Model): scope_statement = models.ForeignKey( ScopeStatement, on_delete=models.CASCADE, verbose_name=_("scope statement") ) week = models.DateField(_("week")) quota = models.PositiveIntegerField(_("quota")) class Meta: verbose_name = _("drudge quota") verbose_name_plural = _("drudge quotas") def __str__(self): from zivinetz.views.scheduling import calendar_week year, week = calendar_week(self.week) return "{}: {} Zivis in KW{} {}".format( self.scope_statement.name, self.quota, week, year, ) class Choices: def __init__(self, choices): self.kwargs = {"max_length": 20, "choices": choices, "default": choices[0][0]} for key, value in choices: setattr(self, key, key) @model_resource_urls() class Specification(models.Model): ACCOMODATION = Choices( (("provided", _("provided")), ("compensated", _("compensated"))) ) MEAL = Choices( ( ("no_compensation", _("no compensation")), ("at_accomodation", _("at accomodation")), ("external", _("external")), ) ) CLOTHING = Choices((("provided", _("provided")), ("compensated", _("compensated")))) scope_statement = models.ForeignKey( ScopeStatement, on_delete=models.CASCADE, verbose_name=_("scope statement"), related_name="specifications", ) with_accomodation = models.BooleanField(_("with accomodation"), default=False) code = models.CharField( _("code"), max_length=10, help_text=_("Short, unique code identifying this specification."), ) accomodation_working = models.CharField( _("accomodation on working days"), **ACCOMODATION.kwargs ) breakfast_working = models.CharField(_("breakfast on working days"), **MEAL.kwargs) lunch_working = models.CharField(_("lunch on working days"), **MEAL.kwargs) supper_working = models.CharField(_("supper on working days"), **MEAL.kwargs) accomodation_sick = models.CharField( _("accomodation on sick days"), **ACCOMODATION.kwargs ) breakfast_sick = models.CharField(_("breakfast on sick days"), **MEAL.kwargs) lunch_sick = models.CharField(_("lunch on sick days"), **MEAL.kwargs) supper_sick = models.CharField(_("supper on sick days"), **MEAL.kwargs) accomodation_free = models.CharField( _("accomodation on free days"), **ACCOMODATION.kwargs ) breakfast_free = models.CharField(_("breakfast on free days"), **MEAL.kwargs) lunch_free = models.CharField(_("lunch on free days"), **MEAL.kwargs) supper_free = models.CharField(_("supper on free days"), **MEAL.kwargs) clothing = models.CharField(_("clothing"), **CLOTHING.kwargs) accomodation_throughout = models.BooleanField( _("accomodation throughout"), help_text=_("Accomodation is offered throughout."), default=False, ) food_throughout = models.BooleanField( _("food throughout"), help_text=_("Food is offered throughout."), default=False ) conditions = models.FileField(_("conditions"), upload_to="conditions", blank=True) ordering = models.IntegerField(_("ordering"), default=0) class Meta: ordering = ["ordering", "scope_statement", "with_accomodation"] unique_together = (("scope_statement", "with_accomodation"),) verbose_name = _("specification") verbose_name_plural = _("specifications") def __str__(self): return "{} - {}".format( self.scope_statement, ( self.with_accomodation and gettext("with accomodation") or gettext("without accomodation") ), ) def compensation(self, for_date=date.today): cset = CompensationSet.objects.for_date(for_date) compensation = {"spending_money": cset.spending_money} for day_type in ("working", "sick", "free"): key = "accomodation_%s" % day_type value = getattr(self, key) if value == self.ACCOMODATION.provided: compensation[key] = Decimal("0.00") else: compensation[key] = cset.accomodation_home for meal in ("breakfast", "lunch", "supper"): key = f"{meal}_{day_type}" value = getattr(self, key) if value == self.MEAL.no_compensation: compensation[key] = Decimal("0.00") else: compensation[key] = getattr(cset, f"{meal}_{value}") if self.clothing == self.CLOTHING.provided: compensation.update( { "clothing": Decimal("0.00"), "clothing_limit_per_assignment": Decimal("0.00"), } ) else: compensation.update( { "clothing": cset.clothing, "clothing_limit_per_assignment": cset.clothing_limit_per_assignment, } ) return compensation class CompensationSetManager(models.Manager): def for_date(self, for_date=date.today): if hasattr(for_date, "__call__"): for_date = for_date() try: return self.filter(valid_from__lte=for_date).order_by("-valid_from")[0] except IndexError: raise self.model.DoesNotExist @model_resource_urls() class CompensationSet(models.Model): valid_from = models.DateField(_("valid from"), unique=True) spending_money = models.DecimalField( _("spending money"), max_digits=10, decimal_places=2 ) breakfast_at_accomodation = models.DecimalField( _("breakfast at accomodation"), max_digits=10, decimal_places=2 ) lunch_at_accomodation = models.DecimalField( _("lunch at accomodation"), max_digits=10, decimal_places=2 ) supper_at_accomodation = models.DecimalField( _("supper at accomodation"), max_digits=10, decimal_places=2 ) breakfast_external = models.DecimalField( _("external breakfast"), max_digits=10, decimal_places=2 ) lunch_external = models.DecimalField( _("external lunch"), max_digits=10, decimal_places=2 ) supper_external = models.DecimalField( _("external supper"), max_digits=10, decimal_places=2 ) accomodation_home = models.DecimalField( _("accomodation"), max_digits=10, decimal_places=2, help_text=_("Daily compensation if drudge returns home for the night."), ) private_transport_per_km = models.DecimalField( _("private transport per km"), max_digits=10, decimal_places=2, help_text=_("Only applies if public transport use is not reasonable."), ) clothing = models.DecimalField( _("clothing"), max_digits=10, decimal_places=6, help_text=_( "Daily compensation for clothes if clothing isn't" " offered by the company." ), ) clothing_limit_per_assignment = models.DecimalField( _("clothing limit per assignment"), max_digits=10, decimal_places=2, help_text=_("Maximal compensation for clothing per assignment."), ) class Meta: ordering = ["-valid_from"] verbose_name = _("compensation set") verbose_name_plural = _("compensation sets") objects = CompensationSetManager() def __str__(self): return gettext("compensation set, valid from %s") % self.valid_from @model_resource_urls(default="edit") class RegionalOffice(models.Model): name = models.CharField(_("name"), max_length=100) city = models.CharField(_("city"), max_length=100) address = models.TextField(_("address"), blank=True) code = models.CharField( _("code"), max_length=10, help_text=_("Short, unique identifier.") ) phone = models.CharField(_("phone"), max_length=20, blank=True) fax = models.CharField(_("fax"), max_length=20, blank=True) class Meta: ordering = ["name"] verbose_name = _("regional office") verbose_name_plural = _("regional offices") def __str__(self): return self.name class DrudgeManager(SearchManager): search_fields = ( "user__first_name", "user__last_name", "zdp_no", "address", "zip_code", "city", "place_of_citizenship_city", "place_of_citizenship_state", "phone_home", "phone_office", "mobile", "bank_account", "health_insurance_account", "health_insurance_company", "education_occupation", ) def active_set(self, access, additional_ids=None): # pragma: no cover q = Q(id=0) if additional_ids: q |= Q(id__in=additional_ids) return self.filter(q).select_related("user") @model_resource_urls() class Drudge(models.Model): STATES = [state[0] for state in STATE_CHOICES] STATE_CHOICES = zip(STATES, STATES) MOTOR_SAW_COURSE_CHOICES = ( ("2-day", _("2 day course")), ("5-day", _("5 day course")), ) user = models.OneToOneField(User, on_delete=models.CASCADE) zdp_no = models.CharField(_("ZDP No."), unique=True, max_length=10) address = models.TextField(_("address")) zip_code = models.CharField(_("ZIP code"), max_length=10) city = models.CharField(_("city"), max_length=100) date_of_birth = models.DateField(_("date of birth")) place_of_citizenship_city = models.CharField( _("place of citizenship"), max_length=100 ) place_of_citizenship_state = models.CharField( _("place of citizenship (canton)"), max_length=2, choices=STATE_CHOICES ) phone_home = models.CharField(_("phone (home)"), max_length=20, blank=True) phone_office = models.CharField(_("phone (office)"), max_length=20, blank=True) mobile = models.CharField(_("mobile"), max_length=20, blank=True) bank_account = models.CharField( _("bank account"), max_length=100, help_text=_("Enter your IBAN.") ) health_insurance_company = models.CharField( _("health insurance company"), max_length=100, blank=True ) health_insurance_account = models.CharField( _("health insurance account"), max_length=100, blank=True ) education_occupation = models.TextField(_("education / occupation"), blank=True) driving_license = models.BooleanField(_("driving license"), default=False) general_abonnement = models.BooleanField(_("general abonnement"), default=False) half_fare_card = models.BooleanField(_("half-fare card"), default=False) other_card = models.CharField(_("other card"), max_length=100, blank=True) youth_association = models.CharField( _("youth association"), max_length=100, blank=True, choices=[ ("Pfadi", _("Pfadi")), ("Cevi", _("Cevi")), ("Jubla", _("Jubla")), ("Anderer", _("Anderer")), ("Keiner", _("Keiner")), ], ) environment_course = models.BooleanField( _("environment course"), default=False, help_text=_("I have taken the environment course already."), ) motor_saw_course = models.CharField( _("motor saw course"), max_length=10, choices=MOTOR_SAW_COURSE_CHOICES, blank=True, null=True, help_text=_("I have taken the denoted motor saw course already."), ) regional_office = models.ForeignKey( RegionalOffice, verbose_name=_("regional office"), on_delete=models.CASCADE ) notes = models.TextField( _("notes"), blank=True, help_text=_("Allergies, anything else we should be aware of?"), ) internal_notes = models.TextField( _("internal notes"), blank=True, help_text=_("This field is not visible to drudges."), ) profile_image = models.ImageField( _("profile image"), blank=True, null=True, upload_to="profile_images/" ) class Meta: ordering = ["user__last_name", "user__first_name", "zdp_no"] verbose_name = _("drudge") verbose_name_plural = _("drudges") objects = DrudgeManager() def __str__(self): return f"{self.user.first_name} {self.user.last_name} ({self.zdp_no})" def pretty_motor_saw_course(self): """for the scheduling table""" msw = self.motor_saw_course or "" return msw.replace("-day", "T") class AssignmentManager(SearchManager): search_fields = ["specification__scope_statement__name", "specification__code"] + [ "drudge__%s" % f for f in DrudgeManager.search_fields ] def for_date(self, day=None): day = day if day else date.today() return self.filter( Q(date_from__lte=day) & ( Q(date_until__gte=day) | Q(date_until_extension__isnull=False, date_until_extension__gte=day) ) ) def active_set(self, access, additional_ids=None): # pragma: no cover q = Q(id__in=self.for_date()) if additional_ids: q |= Q(id__in=additional_ids) return self.filter(q).select_related("specification", "drudge__user") @model_resource_urls() class Assignment(models.Model): TENTATIVE = 10 ARRANGED = 20 MOBILIZED = 30 DECLINED = 40 STATUS_CHOICES = ( (TENTATIVE, _("tentative")), (ARRANGED, _("arranged")), (MOBILIZED, _("mobilized")), (DECLINED, _("declined")), ) created = models.DateTimeField(_("created"), default=timezone.now) modified = models.DateTimeField(_("modified"), auto_now=True) specification = models.ForeignKey( Specification, verbose_name=_("specification"), on_delete=models.CASCADE ) drudge = models.ForeignKey( Drudge, verbose_name=_("drudge"), related_name="assignments", on_delete=models.CASCADE, ) regional_office = models.ForeignKey( RegionalOffice, verbose_name=_("regional office"), on_delete=models.CASCADE ) date_from = models.DateField(_("date from")) date_until = models.DateField(_("date until")) date_until_extension = models.DateField( _("date until (extended)"), blank=True, null=True, help_text=_("Only fill out if assignment has been extended."), ) available_holi_days = models.PositiveIntegerField( _("available holiday days"), blank=True, null=True ) part_of_long_assignment = models.BooleanField( _("part of long assignment"), default=False ) status = models.IntegerField(_("status"), choices=STATUS_CHOICES, default=TENTATIVE) arranged_on = models.DateField(_("arranged on"), blank=True, null=True) mobilized_on = models.DateField(_("mobilized on"), blank=True, null=True) environment_course_date = models.DateField( _("environment course starting date"), blank=True, null=True ) motor_saw_course_date = models.DateField( _("motor saw course starting date"), blank=True, null=True ) class Meta: ordering = ["-date_from", "-date_until"] verbose_name = _("assignment") verbose_name_plural = _("assignments") objects = AssignmentManager() def __str__(self): return "{} on {} ({} - {})".format( self.drudge, self.specification.code, self.date_from, self.determine_date_until(), ) def determine_date_until(self): return self.date_until_extension or self.date_until determine_date_until.short_description = _("eff. until date") def assignment_days(self): day = self.date_from until = self.determine_date_until() one_day = timedelta(days=1) public_holidays = PublicHoliday.objects.filter( date__range=(day, until) ).values_list("date", flat=True) company_holidays = self.specification.scope_statement.company_holidays company_holidays = list( company_holidays.filter(date_from__lte=until, date_until__gte=day) ) vacation_days = 0 # +1 because the range is inclusive assignment_days = (self.date_until - self.date_from).days + 1 if assignment_days >= 180: # 30 days isn't exactly one month. But that's good enough for us. # We grant 2 additional vacation days per 30 full days only # (see ZDV Art. 72) vacation_days = 8 + int((assignment_days - 180) / 30) * 2 days = { "assignment_days": assignment_days, "vacation_days": vacation_days, "company_holidays": 0, "public_holidays_during_company_holidays": 0, "public_holidays_outside_company_holidays": 0, "vacation_days_during_company_holidays": 0, "freely_definable_vacation_days": vacation_days, "working_days": 0, "countable_days": 0, # days which aren't countable and are forced upon the drudge: "forced_leave_days": 0, } monthly_expense_days = {} def pop_company_holiday(): try: return company_holidays.pop(0) except IndexError: return None company_holiday = pop_company_holiday() while day <= until: is_weekend = day.weekday() in (5, 6) is_public_holiday = day in public_holidays is_company_holiday = company_holiday and company_holiday.is_contained(day) slot = "free" if is_company_holiday: days["company_holidays"] += 1 if is_public_holiday: # At least we have public holidays too. days["public_holidays_during_company_holidays"] += 1 days["countable_days"] += 1 else: if is_weekend: # We were lucky once again. days["countable_days"] += 1 else: # Oh no, company holiday and neither public holiday nor # weekend. Now the draconian regime of the swiss # administration comes to full power. if days["freely_definable_vacation_days"]: # Vacations need to be taken during public holidays # if possible at all. Unfortunately for drudges. days["freely_definable_vacation_days"] -= 1 days["vacation_days_during_company_holidays"] += 1 slot = "holi" # At least they are countable towards assignment # total. days["countable_days"] += 1 else: # Damn. No vacation days left (if there were any in # the beginning. The drudge has to pause his # assignment for this time. days["forced_leave_days"] += 1 slot = "forced" else: # No company holiday... business as usual, maybe. days["countable_days"] += 1 # Nice! if not (is_public_holiday or is_weekend): # Hard beer-drinking and pickaxing action. days["working_days"] += 1 slot = "working" key = (day.year, day.month, 1) if day.month == self.date_from.month and day.year == self.date_from.year: key = (self.date_from.year, self.date_from.month, self.date_from.day) if day > self.date_until: # Only the case when assignment has been extended # If we are in the same month as the original end of the # assignment, create a new key for the extension part of # the given month only if ( day.month == self.date_until.month and day.year == self.date_until.year ): extended_start = self.date_until + one_day key = ( extended_start.year, extended_start.month, extended_start.day, ) monthly_expense_days.setdefault( key, {"free": 0, "working": 0, "holi": 0, "forced": 0, "start": day} ) monthly_expense_days[key][slot] += 1 monthly_expense_days[key]["end"] = day day += one_day # Fetch new company holiday once the old one starts smelling funny. if company_holiday and company_holiday.date_until < day: company_holiday = pop_company_holiday() return days, sorted(monthly_expense_days.items(), key=lambda item: item[0]) def expenses(self): """ This calculates an estimate """ assignment_days, monthly_expense_days = self.assignment_days() specification = self.specification clothing_total = None expenses = {} for month, days in monthly_expense_days: compensation = specification.compensation( date(month[0], month[1], month[2]) ) free = days["free"] working = days["working"] total = free + working expenses[month] = { "spending_money": total * compensation["spending_money"], "clothing": total * compensation["clothing"], "accomodation": ( free * compensation["accomodation_free"] + working * compensation["accomodation_working"] ), "food": free * ( compensation["breakfast_free"] + compensation["lunch_free"] + compensation["supper_free"] ) + working * ( compensation["breakfast_working"] + compensation["lunch_working"] + compensation["supper_working"] ), } if clothing_total is None: clothing_total = compensation["clothing_limit_per_assignment"] clothing_total -= expenses[month]["clothing"] if clothing_total < 0: expenses[month]["clothing"] += clothing_total clothing_total = 0 return assignment_days, monthly_expense_days, expenses def pdf_url(self): return reverse("zivinetz_assignment_pdf", args=(self.pk,)) def admin_pdf_url(self): return '<a href="%s">PDF</a>' % self.pdf_url() admin_pdf_url.allow_tags = True admin_pdf_url.short_description = "PDF" def generate_expensereports(self): occupied_months = [ (d.year, d.month, d.day) for d in self.reports.values_list("date_from", flat=True) ] days, monthly_expense_days, expenses = self.expenses() created = 0 for month, data in monthly_expense_days: if month in occupied_months: continue try: clothing_expenses = expenses[month]["clothing"] except KeyError: clothing_expenses = 0 report = self.reports.create( date_from=data["start"], date_until=data["end"], working_days=data["working"], free_days=data["free"], sick_days=0, holi_days=data["holi"], forced_leave_days=data["forced"], calculated_total_days=sum( (data["working"], data["free"], data["holi"], data["forced"]), 0 ), clothing_expenses=clothing_expenses, specification=self.specification, ) report.recalculate_total() created += 1 return created @model_resource_urls() class AssignmentChange(models.Model): created = models.DateTimeField(_("created"), default=timezone.now) assignment = models.ForeignKey( Assignment, verbose_name=_("assignment"), blank=True, null=True, on_delete=models.SET_NULL, ) assignment_description = models.CharField( _("assignment description"), max_length=200 ) changed_by = models.CharField(_("changed by"), max_length=100, default="nobody") changes = models.TextField(_("changes"), blank=True) class Meta: ordering = ["created"] verbose_name = _("assignment change") verbose_name_plural = _("assignment changes") def __str__(self): return self.assignment_description def get_request(): """Walk up the stack, return the nearest first argument named "request".""" import inspect frame = None try: for f in inspect.stack()[1:]: frame = f[0] code = frame.f_code if code.co_varnames[:1] == ("request",): return frame.f_locals["request"] elif code.co_varnames[:2] == ("self", "request"): return frame.f_locals["request"] finally: del frame @receiver(signals.pre_save, sender=Assignment) def assignment_pre_save(sender, instance, **kwargs): try: original = Assignment.objects.get(pk=instance.pk) except (AttributeError, Assignment.DoesNotExist): original = None changes = [] if not original: changes.append(gettext("Assignment has been created.")) else: change_tracked_fields = [ "specification", "drudge", "date_from", "date_until", "date_until_extension", "status", "arranged_on", "mobilized_on", "environment_course_date", "motor_saw_course_date", ] def nicify(instance, field): if hasattr(instance, "get_%s_display" % field): return getattr(instance, "get_%s_display" % field)() return getattr(instance, field) or "-" for field in change_tracked_fields: if getattr(original, field) == getattr(instance, field): continue field_instance = Assignment._meta.get_field(field) changes.append( gettext( "The value of `%(field)s` has been changed from" " %(from)s to %(to)s." ) % { "field": field_instance.verbose_name, "from": nicify(original, field), "to": nicify(instance, field), } ) request = get_request() instance._assignment_change = dict( assignment=instance, assignment_description="%s" % instance, changed_by=request.user.get_full_name() if request else "unknown", changes="\n".join(changes), ) @receiver(signals.post_save, sender=Assignment) def assignment_post_save(sender, instance, **kwargs): if getattr(instance, "_assignment_change", None): AssignmentChange.objects.create(**instance._assignment_change) @receiver(signals.post_delete, sender=Assignment) def assignment_post_delete(sender, instance, **kwargs): request = get_request() AssignmentChange.objects.create( assignment=None, assignment_description="%s" % instance, changed_by=request.user.get_full_name() if request else "unknown", changes=gettext("Assignment has been deleted."), ) class ExpenseReportManager(SearchManager): search_fields = [ "report_no", "working_days_notes", "free_days_notes", "sick_days_notes", "holi_days_notes", "forced_leave_days_notes", "clothing_expenses_notes", "transport_expenses_notes", "miscellaneous_notes", ] + ["assignment__%s" % f for f in AssignmentManager.search_fields] @model_resource_urls() class ExpenseReport(models.Model): PENDING = 10 FILLED = 20 PAID = 30 STATUS_CHOICES = ((PENDING, _("pending")), (FILLED, _("filled")), (PAID, _("paid"))) assignment = models.ForeignKey( Assignment, verbose_name=_("assignment"), related_name="reports", on_delete=models.CASCADE, ) date_from = models.DateField(_("date from")) date_until = models.DateField(_("date until")) report_no = models.CharField(_("report no."), max_length=10, blank=True) status = models.IntegerField(_("status"), choices=STATUS_CHOICES, default=PENDING) working_days = models.PositiveIntegerField(_("working days")) working_days_notes = models.CharField(_("notes"), max_length=100, blank=True) free_days = models.PositiveIntegerField(_("free days")) free_days_notes = models.CharField(_("notes"), max_length=100, blank=True) sick_days = models.PositiveIntegerField(_("sick days")) sick_days_notes = models.CharField(_("notes"), max_length=100, blank=True) holi_days = models.PositiveIntegerField( _("holiday days"), help_text=_( "These days are still countable towards the assignment" " total days." ), ) holi_days_notes = models.CharField(_("notes"), max_length=100, blank=True) forced_leave_days = models.PositiveIntegerField(_("forced leave days")) forced_leave_days_notes = models.CharField(_("notes"), max_length=100, blank=True) calculated_total_days = models.PositiveIntegerField( _("calculated total days"), help_text=_( "This field is filled in automatically by the system" " and should not be changed." ), default=0, ) clothing_expenses = models.DecimalField( _("clothing expenses"), max_digits=10, decimal_places=2, default=Decimal("0.00") ) clothing_expenses_notes = models.CharField(_("notes"), max_length=100, blank=True) transport_expenses = models.DecimalField( _("transport expenses"), max_digits=10, decimal_places=2, default=Decimal("0.00"), ) transport_expenses_notes = models.CharField(_("notes"), max_length=100, blank=True) miscellaneous = models.DecimalField( _("miscellaneous"), max_digits=10, decimal_places=2, default=Decimal("0.00") ) miscellaneous_notes = models.CharField(_("notes"), max_length=100, blank=True) total = models.DecimalField(_("total"), max_digits=10, decimal_places=2, default=0) specification = models.ForeignKey( Specification, verbose_name=_("specification"), on_delete=models.CASCADE ) class Meta: ordering = ["assignment__drudge", "date_from"] verbose_name = _("expense report") verbose_name_plural = _("expense reports") objects = ExpenseReportManager() def __str__(self): return f"{self.date_from} - {self.date_until}" @property def total_days(self): return ( self.working_days + self.free_days + self.sick_days + self.holi_days + self.forced_leave_days ) def pdf_url(self): return reverse("zivinetz_expensereport_pdf", args=(self.pk,)) def recalculate_total(self, save=True): _n1, _n2, self.total = self.compensations() if save: self.save() def compensation_data(self, arranged_on=None): arranged_on = arranged_on or self.assignment.arranged_on return self.specification.compensation(arranged_on) if arranged_on else None def compensations(self): if not self.assignment.arranged_on: # Make recalculate_total not fall flat on its face return None, None, 0 compensation = self.compensation_data() # spending_money, accomodation, breakfast, lunch, supper, total def line(title, day_type, days): line = [ compensation["spending_money"], compensation["accomodation_%s" % day_type], compensation["breakfast_%s" % day_type], compensation["lunch_%s" % day_type], compensation["supper_%s" % day_type], ] return [f"{days} {title}"] + line + [sum(line) * days] ret = [ [ "", gettext("spending money"), gettext("accomodation"), gettext("breakfast"), gettext("lunch"), gettext("supper"), gettext("Total"), ] ] ret.append(line(gettext("working days"), "working", self.working_days)) ret.append([self.working_days_notes, "", "", "", "", "", ""]) ret.append(line(gettext("free days"), "free", self.free_days)) ret.append([self.free_days_notes, "", "", "", "", "", ""]) ret.append(line(gettext("sick days"), "sick", self.sick_days)) ret.append([self.sick_days_notes, "", "", "", "", "", ""]) # holiday counts as work ret.append(line(gettext("holiday days"), "free", self.holi_days)) ret.append([self.holi_days_notes, "", "", "", "", "", ""]) # forced leave counts zero ret.append( ["{} {}".format(self.forced_leave_days, gettext("forced leave days"))] + [Decimal("0.00")] * 6 ) ret.append([self.forced_leave_days_notes, "", "", "", "", "", ""]) additional = [ (gettext("transport expenses"), self.transport_expenses), (self.transport_expenses_notes, ""), (gettext("clothing expenses"), self.clothing_expenses), (self.clothing_expenses_notes, ""), (gettext("miscellaneous"), self.miscellaneous), (self.miscellaneous_notes, ""), ] total = sum(r[6] for r in ret[1::2] if r) + sum(r[1] for r in additional[::2]) return ret, additional, total @model_resource_urls() class PublicHoliday(models.Model): name = models.CharField(_("name"), max_length=100) date = models.DateField(_("date"), unique=True) class Meta: ordering = ["date"] verbose_name = _("public holiday") verbose_name_plural = _("public holidays") def __str__(self): return f"{self.name} ({self.date})" @model_resource_urls() class CompanyHoliday(models.Model): date_from = models.DateField(_("date from")) date_until = models.DateField(_("date until")) applies_to = models.ManyToManyField( ScopeStatement, blank=False, verbose_name=_("applies to scope statements"), related_name="company_holidays", ) class Meta: ordering = ["date_from"] verbose_name = _("company holiday") verbose_name_plural = _("company holidays") def __str__(self): return f"{self.date_from} - {self.date_until}" def is_contained(self, day): return self.date_from <= day <= self.date_until @model_resource_urls(default="edit") class Assessment(models.Model): created = models.DateTimeField(_("created"), default=timezone.now) created_by = models.ForeignKey( User, blank=True, null=True, on_delete=models.CASCADE, verbose_name=_("created by"), ) drudge = models.ForeignKey( Drudge, verbose_name=_("drudge"), related_name="assessments", on_delete=models.CASCADE, ) assignment = models.ForeignKey( Assignment, verbose_name=_("assignment"), related_name="assessments", blank=True, null=True, on_delete=models.CASCADE, ) mark = models.IntegerField( _("mark"), choices=zip(range(1, 7), range(1, 7)), blank=True, null=True ) comment = models.TextField(_("comment"), blank=True) class Meta: ordering = ["-created"] verbose_name = _("internal assessment") verbose_name_plural = _("internal assessments") def __str__(self): return gettext("Mark %(mark)s for %(drudge)s") % { "mark": self.mark or "-", "drudge": self.drudge, } class CodewordManager(models.Manager): def word(self, key): try: return self.filter(key=key).latest().codeword except self.model.DoesNotExist: return "" @model_resource_urls() class Codeword(models.Model): created = models.DateTimeField(_("created"), default=timezone.now) key = models.CharField(_("key"), max_length=10, db_index=True) codeword = models.CharField(_("codeword"), max_length=20) class Meta: get_latest_by = "created" ordering = ["-created"] verbose_name = _("codeword") verbose_name_plural = _("codewords") objects = CodewordManager() def __str__(self): return self.codeword @model_resource_urls() class JobReferenceTemplate(models.Model): title = models.CharField(_("title"), max_length=100) text = models.TextField(_("text")) class Meta: ordering = ["title"] verbose_name = _("job reference template") verbose_name_plural = _("job reference templates") def __str__(self): return self.title class JobReferenceManager(SearchManager): search_fields = ["text"] + [ "assignment__%s" % f for f in AssignmentManager.search_fields ] @model_resource_urls() class JobReference(models.Model): assignment = models.ForeignKey( Assignment, verbose_name=_("assignment"), related_name="jobreferences", on_delete=models.CASCADE, ) created = models.DateField(_("created")) text = models.TextField(_("text")) class Meta: ordering = ["-created"] verbose_name = _("job reference") verbose_name_plural = _("job references") objects = JobReferenceManager() def __str__(self): return f"{self._meta.verbose_name}: {self.assignment}" def pdf_url(self): return reverse("zivinetz_reference_pdf", args=(self.pk,)) class GroupQuerySet(models.QuerySet): def active(self): return self.filter(is_active=True) @model_resource_urls() class Group(models.Model): is_active = models.BooleanField(_("is active"), default=True) name = models.CharField(_("name"), max_length=100) ordering = models.IntegerField(_("ordering"), default=0) objects = GroupQuerySet.as_manager() class Meta: ordering = ["ordering"] verbose_name = _("group") verbose_name_plural = _("groups") def __str__(self): return self.name class GroupAssignmentQuerySet(models.QuerySet): def monday(self, day): return day - timedelta(days=day.weekday()) def for_date(self, day): return self.filter(week=self.monday(day)) class GroupAssignment(models.Model): group = models.ForeignKey( Group, on_delete=models.CASCADE, related_name="group_assignments", verbose_name=_("group"), ) assignment = models.ForeignKey( Assignment, on_delete=models.CASCADE, related_name="group_assignments", verbose_name=_("assignment"), ) week = models.DateField(_("week")) objects = GroupAssignmentQuerySet.as_manager() class Meta: unique_together = (("assignment", "week"),) verbose_name = _("group assignment") verbose_name_plural = _("group assignments") def __str__(self): return "{}/{}: {}".format( self.group, self.assignment, " - ".join(d.strftime("%d.%m.%Y") for d in self.date_range), ) def save(self, *args, **kwargs): self.week = self.week - timedelta(days=self.week.weekday()) super().save(*args, **kwargs) save.alters_data = True @property def date_range(self): return (self.week, self.week + timedelta(days=4)) class AbsenceManager(SearchManager): search_fields = ( "assignment__drudge__user__first_name", "assignment__drudge__user__last_name", "internal_notes", ) def for_expense_report(self, report): candidate_days = [ report.date_from + timedelta(days=i) for i in range(0, (report.date_until - report.date_from).days) ] days = defaultdict(int) reasons = defaultdict(list) for absence in self.filter( assignment_id=report.assignment_id, days__overlap=candidate_days ): in_range = [day for day in sorted(absence.days) if day in candidate_days] field = absence.REASON_TO_EXPENSE_REPORT[absence.reason] days[field] += len(in_range) parts = [absence.get_reason_display()] if absence.internal_notes: parts.append(" (%s)" % absence.internal_notes) parts.append(": ") parts.append(", ".join(day.strftime("%a %d.%m.%y") for day in in_range)) reasons["%s_notes" % field].append("".join(parts)) return { **days, **{field: "\n".join(reason) for field, reason in reasons.items()}, } @model_resource_urls() class Absence(models.Model): APPROVED_VACATION = "approved-vacation" APPROVED_HOLIDAY = "approved-holiday" SICK = "sick" MOTOR_SAW_COURSE = "motor-saw-course" ENVIRONMENT_COURSE = "environment-course" ABSENTEEISM = "absenteeism" MISSED_WORKING_HOURS = "missed-working-hours" REASON_CHOICES = ( (APPROVED_VACATION, _("approved vacation")), (APPROVED_HOLIDAY, _("approved holiday")), (SICK, _("sick")), (MOTOR_SAW_COURSE, _("motor saw course")), (ENVIRONMENT_COURSE, _("environment course")), (ABSENTEEISM, _("absenteeism")), (MISSED_WORKING_HOURS, _("missed working hours")), ) REASON_TO_EXPENSE_REPORT = { APPROVED_VACATION: "forced_leave_days", APPROVED_HOLIDAY: "holi_days", SICK: "sick_days", MOTOR_SAW_COURSE: "working_days", ENVIRONMENT_COURSE: "working_days", ABSENTEEISM: "forced_leave_days", MISSED_WORKING_HOURS: "sick_days", } PRETTY_REASON = { APPROVED_VACATION: "Urlaub", APPROVED_HOLIDAY: "Ferien", SICK: "Krank", MOTOR_SAW_COURSE: "MSK", ENVIRONMENT_COURSE: "UNA", ABSENTEEISM: "Unentschuldigt", MISSED_WORKING_HOURS: "Verpasst", } assignment = models.ForeignKey( Assignment, on_delete=models.CASCADE, related_name="absences", verbose_name=_("assignment"), ) created_at = models.DateTimeField(_("created at"), default=timezone.now) created_by = models.ForeignKey( User, on_delete=models.CASCADE, verbose_name=_("created by") ) reason = models.CharField(_("reason"), max_length=20, choices=REASON_CHOICES) internal_notes = models.TextField(_("internal notes"), blank=True) days = ArrayField(models.DateField(), verbose_name=_("days")) objects = AbsenceManager() class Meta: ordering = ["-days"] verbose_name = _("absence") verbose_name_plural = _("absences") def __str__(self): return "Absenz von {} von {} bis {}".format( self.assignment.drudge.user.get_full_name(), min(self.days), max(self.days), ) def pretty_days(self): return ", ".join(day.strftime("%a %d.%m.%y") for day in sorted(self.days)) def pretty_reason(self): try: return self.PRETTY_REASON[self.reason] except KeyError: return self.get_reason_display() def clean(self): if not self.days: return outside = [ day for day in self.days if day < self.assignment.date_from or day > self.assignment.determine_date_until() ] if outside: raise ValidationError( _("Absence days outside duration of assignment: %s") % (", ".join(str(day) for day in sorted(outside))) ) if self.reason == self.APPROVED_HOLIDAY: if self.assignment.available_holi_days is None: raise ValidationError( _("Define available holiday days on assignment first.") ) already = sum( ( len(a.days) for a in self.assignment.absences.filter( Q(reason=self.APPROVED_HOLIDAY), ~Q(pk=self.pk) ) ), 0, ) if already + len(self.days) > self.assignment.available_holi_days: raise ValidationError( _("Not enough holiday days available. Only %s remaining.") % (self.assignment.available_holi_days - already) ) overlapping = self.assignment.absences.filter( Q(days__overlap=self.days), ~Q(pk=self.pk) ).select_related("assignment__drudge__user") if overlapping: raise ValidationError( _( "Overlapping absences are not allowed, days already occupied" " by %s." ) % (", ".join(str(o) for o in overlapping)) )
{ "content_hash": "e3eba1c25e63731b7117c9b46a578de0", "timestamp": "", "source": "github", "line_count": 1493, "max_line_length": 88, "avg_line_length": 32.8801071667783, "alnum_prop": 0.5698716642900794, "repo_name": "matthiask/zivinetz", "id": "7ee740fe1230bf1d21212cfb20de26574a67e919", "size": "49090", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "zivinetz/models.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "16354" }, { "name": "HTML", "bytes": "63953" }, { "name": "JavaScript", "bytes": "2141" }, { "name": "Python", "bytes": "281404" }, { "name": "Shell", "bytes": "87" } ], "symlink_target": "" }
from . import docdata # noqa. enforce signal registration.
{ "content_hash": "e82572520a66d3ba2766664bca37db91", "timestamp": "", "source": "github", "line_count": 1, "max_line_length": 60, "avg_line_length": 61, "alnum_prop": 0.7540983606557377, "repo_name": "edoburu/django-oscar-docdata", "id": "84b61f6dc142deec7711f9fb360e8d97667dc26e", "size": "61", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "sandbox/apps/checkout/models.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "24711" }, { "name": "Python", "bytes": "189773" } ], "symlink_target": "" }
""" URLConf for Django-Forum. django-forum assumes that the forum application is living under /forum/. Usage in your base urls.py: (r'^forum/', include('forum.urls')), """ from django.conf.urls.defaults import * from forum.models import Forum from forum.feeds import RssForumFeed, AtomForumFeed from forum.sitemap import ForumSitemap, ThreadSitemap, PostSitemap feed_dict = { 'rss' : RssForumFeed, 'atom': AtomForumFeed } sitemap_dict = { 'forums': ForumSitemap, 'threads': ThreadSitemap, 'posts': PostSitemap, } urlpatterns = patterns('', url(r'^$', 'forum.views.forums_list', name='forum_index'), url(r'^(?P<url>(rss|atom).*)/$', 'django.contrib.syndication.views.Feed', {'feed_dict': feed_dict}), url(r'^thread/(?P<thread>[0-9]+)/$', 'forum.views.thread', name='forum_view_thread'), url(r'^thread/(?P<thread>[0-9]+)/reply/$', 'forum.views.reply', name='forum_reply_thread'), url(r'^subscriptions/$', 'forum.views.updatesubs', name='forum_subscriptions'), url(r'^(?P<slug>[-\w]+)/$', 'forum.views.forum', name='forum_thread_list'), url(r'^(?P<forum>[-\w]+)/new/$', 'forum.views.newthread', name='forum_new_thread'), url(r'^([-\w/]+/)(?P<forum>[-\w]+)/new/$', 'forum.views.newthread'), url(r'^([-\w/]+/)(?P<slug>[-\w]+)/$', 'forum.views.forum', name='forum_subforum_thread_list'), (r'^sitemap.xml$', 'django.contrib.sitemaps.views.index', {'sitemaps': sitemap_dict}), (r'^sitemap-(?P<section>.+)\.xml$', 'django.contrib.sitemaps.views.sitemap', {'sitemaps': sitemap_dict}), )
{ "content_hash": "22b198c76b4ac34a1131ce02d5fa8c26", "timestamp": "", "source": "github", "line_count": 46, "max_line_length": 109, "avg_line_length": 33.95652173913044, "alnum_prop": 0.6376440460947503, "repo_name": "andrewychoi/django-forum", "id": "31ccfd13ff872f2176ac587e7153625ef9596354", "size": "1562", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "forum/urls.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "27857" } ], "symlink_target": "" }
from typing import Dict, Tuple from hgijson.tests._models import SimpleModel, ComplexModel EXAMPLE_VALUE_1 = "example-value-1" EXAMPLE_VALUE_2 = "example-value-2" EXAMPLE_VALUE_3 = "example-value-3" EXAMPLE_VALUE_4 = "example-value-4" EXAMPLE_PROPERTY_1 = "example-property-1" EXAMPLE_PROPERTY_2 = "example-property-2" EXAMPLE_PROPERTY_3 = "example-property-3" EXAMPLE_PROPERTY_4 = "example-property-4" def create_simple_model_with_json_representation(modifier: int=0) -> Tuple[SimpleModel, Dict]: """ Creates an instance of `SimpleModel` alongside its expected JSON representation (given the most obvious property mappings). :param modifier: can be used to make the model distinguishable :return: tuple where the first value is the model and the second is its JSON representation """ simple_model = SimpleModel(50) simple_model.a = modifier simple_model_as_json = { "serialized_a": simple_model.a, "serialized_b": simple_model.b } return simple_model, simple_model_as_json def create_complex_model_with_json_representation(modifier: int=0) -> Tuple[ComplexModel, Dict]: """ Creates an instance of `ComplexModel` alongside its expected JSON representation (given the most obvious property mappings). :param modifier: can be used to make the model distinguishable :return: tuple where the first value is the model and the second is its JSON representation """ complex_model = ComplexModel(5) complex_model.a = modifier complex_model.c = 4 complex_model_as_json = create_simple_model_with_json_representation(modifier)[1] complex_model_as_json.update({ "serialized_a": complex_model.a, "serialized_b": complex_model.b, "serialized_c": complex_model.c, "serialized_d": [{ "serialized_a": i, "serialized_b": complex_model.b + i } for i in range(3)], "serialized_e": complex_model.e, "serialized_f": complex_model.f, "serialized_g": complex_model.g, "serialized_h": complex_model.h, "serialized_i": list(complex_model.i) }) return complex_model, complex_model_as_json
{ "content_hash": "c4a2b2d98c340670416011deb8d73f7a", "timestamp": "", "source": "github", "line_count": 61, "max_line_length": 117, "avg_line_length": 35.90163934426229, "alnum_prop": 0.6840182648401827, "repo_name": "wtsi-hgi/python-json", "id": "6871eeec50170cd55038f2d6b8a9cd252326083d", "size": "2190", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "hgijson/tests/json_converters/_helpers.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "104217" } ], "symlink_target": "" }
"""Generates graphs with a given eigenvector structure""" import networkx as nx from networkx.utils import np_random_state __all__ = ['spectral_graph_forge'] def _truncate(x): """ Returns the truncated value of x in the interval [0,1] """ if x < 0: return 0 if x > 1: return 1 return x def _mat_spect_approx(A, level, sorteigs=True, reverse=False, absolute=True): """ Returns the low-rank approximation of the given matrix A Parameters ---------- A : numpy matrix level : integer It represents the fixed rank for the output approximation matrix sorteigs : boolean Whether eigenvectors should be sorted according to their associated eigenvalues before removing the firsts of them reverse : boolean Whether eigenvectors list should be reversed before removing the firsts of them absolute : boolean Whether eigenvectors should be sorted considering the absolute values of the corresponding eigenvalues Returns ------- B : numpy matrix low-rank approximation of A Notes ----- Low-rank matrix approximation is about finding a fixed rank matrix close enough to the input one with respect to a given norm (distance). In the case of real symmetric input matrix and euclidean distance, the best low-rank approximation is given by the sum of first eigenvector matrices. References ---------- .. [1] G. Eckart and G. Young, The approximation of one matrix by another of lower rank .. [2] L. Mirsky, Symmetric gauge functions and unitarily invariant norms """ import numpy as np d, V = np.linalg.eigh(A) d = np.ravel(d) n = len(d) if sorteigs: if absolute: k = np.argsort(np.abs(d)) else: k = np.argsort(d) # ordered from the lowest to the highest else: k = range(n) if not reverse: k = np.flipud(k) z = np.zeros((n, 1)) for i in range(level, n): V[:, k[i]] = z B = V*np.diag(d)*np.transpose(V) return B @np_random_state(3) def spectral_graph_forge(G, alpha, transformation='identity', seed=None): """Returns a random simple graph with spectrum resembling that of `G` This algorithm, called Spectral Graph Forge (SGF), computes the eigenvectors of a given graph adjacency matrix, filters them and builds a random graph with a similar eigenstructure. SGF has been proved to be particularly useful for synthesizing realistic social networks and it can also be used to anonymize graph sensitive data. Parameters ---------- G : Graph alpha : float Ratio representing the percentage of eigenvectors of G to consider, values in [0,1]. transformation : string, optional Represents the intended matrix linear transformation, possible values are 'identity' and 'modularity' seed : integer, random_state, or None (default) Indicator of numpy random number generation state. See :ref:`Randomness<randomness>`. Returns ------- H : Graph A graph with a similar eigenvector structure of the input one. Raises ------ NetworkXError If transformation has a value different from 'identity' or 'modularity' Notes ----- Spectral Graph Forge (SGF) generates a random simple graph resembling the global properties of the given one. It leverages the low-rank approximation of the associated adjacency matrix driven by the *alpha* precision parameter. SGF preserves the number of nodes of the input graph and their ordering. This way, nodes of output graphs resemble the properties of the input one and attributes can be directly mapped. It considers the graph adjacency matrices which can optionally be transformed to other symmetric real matrices (currently transformation options include *identity* and *modularity*). The *modularity* transformation, in the sense of Newman's modularity matrix allows the focusing on community structure related properties of the graph. SGF applies a low-rank approximation whose fixed rank is computed from the ratio *alpha* of the input graph adjacency matrix dimension. This step performs a filtering on the input eigenvectors similar to the low pass filtering common in telecommunications. The filtered values (after truncation) are used as input to a Bernoulli sampling for constructing a random adjacency matrix. References ---------- .. [1] L. Baldesi, C. T. Butts, A. Markopoulou, "Spectral Graph Forge: Graph Generation Targeting Modularity", IEEE Infocom, '18. https://arxiv.org/abs/1801.01715 .. [2] M. Newman, "Networks: an introduction", Oxford university press, 2010 Examples -------- >>> import networkx as nx >>> G = nx.karate_club_graph() >>> H = nx.spectral_graph_forge(G, 0.3) >>> """ import numpy as np import scipy.stats as stats available_transformations = ['identity', 'modularity'] alpha = _truncate(alpha) A = nx.to_numpy_matrix(G) n = A.shape[1] level = int(round(n*alpha)) if transformation not in available_transformations: msg = '\'{0}\' is not a valid transformation. '.format(transformation) msg += 'Transformations: {0}'.format(available_transformations) raise nx.NetworkXError(msg) K = np.ones((1, n)) * A B = A if (transformation == 'modularity'): B -= np.transpose(K) * K / float(sum(np.ravel(K))) B = _mat_spect_approx(B, level, sorteigs=True, absolute=True) if (transformation == 'modularity'): B += np.transpose(K) * K / float(sum(np.ravel(K))) B = np.vectorize(_truncate, otypes=[np.float])(B) np.fill_diagonal(B, np.zeros((1, n))) for i in range(n-1): B[i, i+1:] = stats.bernoulli.rvs(B[i, i+1:], random_state=seed) B[i+1:, i] = np.transpose(B[i, i+1:]) H = nx.from_numpy_matrix(B) return H # fixture for pytest def setup_module(module): import pytest numpy = pytest.importorskip('numpy') scipy = pytest.importorskip('scipy')
{ "content_hash": "61d4d931edbe555a837a39a97f8154d6", "timestamp": "", "source": "github", "line_count": 198, "max_line_length": 79, "avg_line_length": 31.6010101010101, "alnum_prop": 0.6560652069681956, "repo_name": "sserrot/champion_relationships", "id": "af470b4279120507013b06cee3652b4224ec90b8", "size": "6375", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "venv/Lib/site-packages/networkx/generators/spectral_graph_forge.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "128" }, { "name": "HTML", "bytes": "18324224" }, { "name": "Jupyter Notebook", "bytes": "9131072" }, { "name": "Python", "bytes": "10702" } ], "symlink_target": "" }
class Solution(object): def rotate(self, nums, k): """ :type nums: List[int] :type k: int :rtype: void Do not return anything, modify nums in-place instead. """ length = len(nums) - 1 for i in range(k): nums.insert(0, nums.pop(length))
{ "content_hash": "7a30c3d4fd3a163c04c3cc6346d88cbf", "timestamp": "", "source": "github", "line_count": 10, "max_line_length": 74, "avg_line_length": 30.8, "alnum_prop": 0.525974025974026, "repo_name": "andy-sheng/leetcode", "id": "592136362fe3f80f686b15c9412217b70538b8f3", "size": "308", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "189-Rotate-Array.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "22195" }, { "name": "C++", "bytes": "315781" }, { "name": "Java", "bytes": "37348" }, { "name": "Objective-C", "bytes": "2621" }, { "name": "Python", "bytes": "12592" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.apps.registry import Apps from django.db import models from django.db.utils import DatabaseError from django.utils.encoding import python_2_unicode_compatible from django.utils.timezone import now from .exceptions import MigrationSchemaMissing class MigrationRecorder(object): """ Deals with storing migration records in the database. Because this table is actually itself used for dealing with model creation, it's the one thing we can't do normally via migrations. We manually handle table creation/schema updating (using schema backend) and then have a floating model to do queries with. If a migration is unapplied its row is removed from the table. Having a row in the table always means a migration is applied. """ @python_2_unicode_compatible class Migration(models.Model): app = models.CharField(max_length=255) name = models.CharField(max_length=255) applied = models.DateTimeField(default=now) class Meta: apps = Apps() app_label = "migrations" db_table = "django_migrations" def __str__(self): return "Migration %s for %s" % (self.name, self.app) def __init__(self, connection): self.connection = connection @property def migration_qs(self): return self.Migration.objects.using(self.connection.alias) def ensure_schema(self): """ Ensures the table exists and has the correct schema. """ # If the table's there, that's fine - we've never changed its schema # in the codebase. if self.Migration._meta.db_table in self.connection.introspection.table_names(self.connection.cursor()): return # Make the table try: with self.connection.schema_editor() as editor: editor.create_model(self.Migration) except DatabaseError as exc: raise MigrationSchemaMissing("Unable to create the django_migrations table (%s)" % exc) def applied_migrations(self): """ Returns a set of (app, name) of applied migrations. """ self.ensure_schema() return set(tuple(x) for x in self.migration_qs.values_list("app", "name")) def record_applied(self, app, name): """ Records that a migration was applied. """ self.ensure_schema() self.migration_qs.create(app=app, name=name) def record_unapplied(self, app, name): """ Records that a migration was unapplied. """ self.ensure_schema() self.migration_qs.filter(app=app, name=name).delete() def flush(self): """ Deletes all migration records. Useful if you're testing migrations. """ self.migration_qs.all().delete()
{ "content_hash": "7e2e26aaec82080098b096dc54527bf1", "timestamp": "", "source": "github", "line_count": 86, "max_line_length": 112, "avg_line_length": 34.348837209302324, "alnum_prop": 0.6242383209207854, "repo_name": "yephper/django", "id": "72f9f8cbb05d3afae78ca825cf18c0e95f42fd30", "size": "2954", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "django/db/migrations/recorder.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "ASP", "bytes": "1538" }, { "name": "CSS", "bytes": "1697381" }, { "name": "HTML", "bytes": "390772" }, { "name": "Java", "bytes": "588" }, { "name": "JavaScript", "bytes": "3172126" }, { "name": "Makefile", "bytes": "134" }, { "name": "PHP", "bytes": "19336" }, { "name": "Python", "bytes": "13365273" }, { "name": "Shell", "bytes": "837" }, { "name": "Smarty", "bytes": "133" } ], "symlink_target": "" }
import read_amber_prmtop as ra import networkx as nx import itertools class GhostAtom(ra.Atom): pass valences = {"H": 1, "C": 4, "N": 3, "O": 2, "S": 2, "Se": 2, "P": 5} def tetravalent_atoms(atoms): """ Identifies possible candidates (those with 4 bonds) :param atoms: Graph of atoms :return: List of atoms with 4 bonds """ candidates = [atom for atom in atoms.nodes() if len(nx.edges(atoms, atom)) == 4] return candidates def multi_bonds(atoms): multibonded = [atom for atom in atoms.nodes() if len(nx.edges(atoms, atom)) < valences[atom.element]] for i, atom in enumerate(multibonded): paired = False for other in atoms.neighbors(atom): if isinstance(other, GhostAtom): paired = True continue if len(nx.edges(atoms, other)) < valences[other.element]: ghost_atom = GhostAtom(**(atom.__dict__)) ghost_atom.name = atom.name + "*" ghost_other = GhostAtom(**(other.__dict__)) ghost_other.name = other.name + "*" atoms.add_edge(other, ghost_atom) atoms.add_edge(atom, ghost_other) paired = True def remove_ghost_atoms(atoms): """ Removes ghost atoms from the atom graph (not needed after chirality checks). :param atoms: Atom graph """ ghost_atoms = [atom for atom in atoms.nodes() if isinstance(atom, GhostAtom)] atoms.remove_nodes_from(ghost_atoms) def rankable_neighbours(chiral_cands): """ Checks if the chiral atom candidates have rankable substituents on each site (i.e. discounting those whose neighbour list contains the same univalent atoms). :param chiral_cands: Atoms to test. :return: maybe_chiral, not_chiral: lists of possibly chiral and achiral atoms """ maybe_chiral, not_chiral = [], [] for chiral_cand in chiral_cands: atoms = chiral_cand.molecule.atoms neighbours = atoms.neighbors(chiral_cand) # Univalent atoms only have the original chiral_cand atom in their neighbour list. Possibly twice, because of # the multi-bond routine. univalent = [nb for nb in neighbours if all([nb2 == chiral_cand for nb2 in atoms.neighbors(nb)])] if len(univalent) > 1 and any([x.mass == y.mass for x, y in itertools.combinations(univalent, 2)]): not_chiral.append(chiral_cand) else: maybe_chiral.append(chiral_cand) return maybe_chiral, not_chiral def chiral_order(atoms, chiral_atom, depth=6): # print "\n\nResidue:", chiral_atom.residue, "atom:", chiral_atom # print "Neighbours:", atoms.neighbors(chiral_atom) # Create a list of ordered atoms to be passed back ordered = [] # Do a quick check whether there are multiple hydrogens neighbors = atoms.neighbors(chiral_atom) hydrogens = [atom for atom in neighbors if atom.element == "H"] if len(hydrogens) < 2: tree = nx.bfs_tree(atoms, chiral_atom) # Generate the list of shortest paths in the molecule, neglecting the trivial path [chiral_atom] paths = sorted(nx.single_source_shortest_path(tree, chiral_atom, depth).values(), reverse=True, key=lambda x: map(lambda at: at.mass, x))[:-1] while paths: # Pop the first element (highest priority path) from the list of paths and remove any duplicates. path = paths.pop(0) # print "Path considered:", path paths_no_dups = [unpruned for unpruned in paths if unpruned != path] # print "Paths:", paths # print "Paths without dups:", paths_no_dups # If there are any duplicates, the paths list will be smaller and we can't resolve a highest priority if len(paths_no_dups) != len(paths): paths = paths_no_dups # Otherwise, the path is higher priority than all the other paths, so its second atom is the neighbour with # highest priority. else: # print "Best path:", path ranked_atom = path[1] # print "Ranked atom:", ranked_atom ordered.append(ranked_atom) # Drop all the paths containing our ranked atom. paths = [unpruned for unpruned in paths if unpruned[1] is not ranked_atom] else: ordered = [] # ordered = [atom for atom in neighbors if atom.element != "H"] # ordered += [atom for atom in neighbors if atom.element == "H"] return ordered def get_chiral_sets(atoms): """ Driver routine for all the chirality stuff. :param atoms: Atom graph :return: Dictionary of chiral centres and CIP-ordered neighbours """ chiral_cands = tetravalent_atoms(atoms) chiral_cands = rankable_neighbours(chiral_cands)[0] multi_bonds(atoms) chiral_centres = {} for i, chiral_atom in enumerate(chiral_cands): ordered = chiral_order(atoms, chiral_atom) if len(ordered) == 4: chiral_centres[chiral_atom] = ordered remove_ghost_atoms(atoms) return chiral_centres def get_chiral_atoms(atoms): return get_chiral_sets(atoms).keys() def write_chirality_file(input_filename, output_filename): molecule = ra.parse_topology_file(input_filename) atoms = molecule.atoms chiral_centres = get_chiral_sets(atoms) with open(output_filename, "w") as output_file: for atom in sorted(chiral_centres.keys(), cmp=lambda x, y: cmp(x.index, y.index)): # Write out the list of chiral atoms and their CIP-ranked neighbours. output_string = "{0:>8d}{1:>8d}{2:>8d}{3:>8d}{4:>8d}\n".format(atom.index + 1, *[other_atom.index + 1 for other_atom in chiral_centres[atom]]) output_file.write(output_string) def calculate_chirality(coords, chiral_centres): import numpy as np angles = [] # For centre atom C and atoms ordered I, J, K and L # Calculate dihedral of I-C-L-J for atom_list in chiral_centres: b1 = coords[atom_list[0]] - coords[atom_list[1]] b2 = coords[atom_list[4]] - coords[atom_list[0]] b3 = coords[atom_list[2]] - coords[atom_list[4]] b1xb2 = np.cross(b1, b2) b2xb3 = np.cross(b2, b3) b1xb2_x_b2xb3 = np.cross(b1xb2, b2xb3) b2_norm = b2 / np.linalg.norm(b2) angles.append(np.arctan2(np.dot(b1xb2_x_b2xb3, b2_norm), np.dot(b1xb2, b2xb3))) return angles if __name__ == "__main__": import rotamer.io.gmin molecule = ra.parse_topology_file("../library/coords.prmtop") atoms = molecule.atoms chiral_centres = get_chiral_sets(atoms) chiral_centres_list = [[k.index] + [val.index for val in v] for k, v in chiral_centres.items()] # Starting coords coords = rotamer.io.amber.read_amber_restart("../library/coords.inpcrd") print calculate_chirality(coords.reshape((-1, 3)), chiral_centres_list) # Lowest file coords = rotamer.io.gmin.read_lowest("../library/lowest")[0]["coords"] print calculate_chirality(coords.reshape((-1, 3)), chiral_centres_list)
{ "content_hash": "1f692d5cc763566556614e92268c0afd", "timestamp": "", "source": "github", "line_count": 181, "max_line_length": 119, "avg_line_length": 40.70165745856354, "alnum_prop": 0.6075743179041673, "repo_name": "khs26/rotamer_library", "id": "6b94b99412a01b78497cfbfd3e8c5e12e54915ea", "size": "7386", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "rotamer/topology/chirality.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "118889" } ], "symlink_target": "" }
import unittest from math import pi, atan, sqrt from RULEngine.Util.geometry import get_angle from RULEngine.Util.game_world import GameWorld from RULEngine.Game.Referee import Referee from RULEngine.Util.team_color_service import TeamColorService from RULEngine.Game.Game import Game from RULEngine.Game.Ball import Ball from RULEngine.Util.Pose import Pose from RULEngine.Util.constant import * from ai.STA.Action.GoBehind import GoBehind from ai.STA.Action.GoBetween import GoBetween from ai.STA.Action.GetBall import GetBall from ai.STA.Action.Idle import Idle from ai.STA.Action.Kick import Kick from ai.STA.Action.MoveToPosition import MoveToPosition from ai.STA.Action.MoveToDribblingBall import MoveToDribblingBall from ai.STA.Action.ProtectGoal import ProtectGoal from ai.states.game_state import GameState from ai.Util.ai_command import AICommand, AICommandType class TestActions(unittest.TestCase): def setUp(self): # ToDo : Use mock instead of actual objects self.game_state = GameState() self.game = Game() self.game.set_referee(Referee()) self.game.ball = Ball() game_world = GameWorld(self.game) game_world.set_team_color_svc(TeamColorService(TeamColor.YELLOW_TEAM)) self.game.set_our_team_color(TeamColor.YELLOW_TEAM) self.game_state.set_reference(game_world) self.player_id = 1 # random integer def test_move_to(self): self.pose = Pose(Position(0, 0, 0), orientation=0.0) self.move = MoveToPosition(self.game_state, self.player_id, self.pose) self.assertEqual(MoveToPosition.exec(self.move), AICommand(self.player_id, AICommandType.MOVE, **{"pose_goal": self.pose})) self.pose = Pose(Position(0.5, 0.3, 0.2), orientation=3.2) self.move = MoveToPosition(self.game_state, self.player_id, self.pose) self.assertEqual(MoveToPosition.exec(self.move), AICommand(self.player_id, AICommandType.MOVE, **{"pose_goal": self.pose})) def test_idle(self): self.idle = Idle(self.game_state, self.player_id) current_pose = None current_pose_string = AICommand(self.player_id, AICommandType.STOP) self.assertEqual(Idle.exec(self.idle), current_pose_string) @unittest.skip("I don't know what the fuck is happening here.") def test_GrabBall(self): self.grab_ball = GetBall(self.game_state, self.player_id) self.game_state.game.ball.set_position(Position(5, 0), 0) ai_cmd = self.grab_ball.exec() ball_position = self.game_state.get_ball_position() destination_orientation = get_angle(self.game_state.get_player_pose(self.player_id).position, ball_position) destination_pose = {"pose_goal": Pose(ball_position, destination_orientation)} ai_cmd_expected = AICommand(self.player_id, AICommandType.MOVE, **{"pose_goal": destination_pose}) self.assertEqual(ai_cmd, ai_cmd_expected) self.game_state.game.ball.set_position(Position(-5, 5), 0) ai_cmd = self.grab_ball.exec() ai_cmd_expected = AICommand(Pose(Position(-5, 5), 3*pi/4), 0) self.assertEqual(ai_cmd, ai_cmd_expected) @unittest.skip("LAZY ME OH HELL") def test_MoveWithBall(self): self.move_with_ball = MoveToDribblingBall(self.game_state, self.player_id, Position(100, 0)) self.game_state.game.ball.set_position(Position(5, 0), 0) ai_cmd = self.move_with_ball.exec() ai_cmd_expected = AICommand(self.player_id, AICommandType.MOVE, **{"pose_goal": Pose(Position(100, 0), 0)}) self.assertEqual(ai_cmd, ai_cmd_expected) self.game_state.game.ball.set_position(Position(5, 2), 0) ai_cmd = self.move_with_ball.exec() ai_cmd_expected = AICommand(self.player_id, AICommandType.MOVE, **{"pose_goal": Pose(Position(100, 0), atan(2/5))}) self.assertEqual(ai_cmd, ai_cmd_expected) def test_GoBetween(self): # test avec une droite verticale self.go_between = GoBetween(self.game_state, self.player_id, Position(100, 100), Position(100, -100), Position(200, 0)) ai_cmd = self.go_between.exec() ai_cmd_expected = AICommand(self.player_id, AICommandType.MOVE, **{"pose_goal": Pose(Position(100, 0), 0)}) self.assertEqual(ai_cmd, ai_cmd_expected) # test avec une droite horizontale self.go_between = GoBetween(self.game_state, self.player_id, Position(100, 100), Position(-100, 100), Position(0, 200)) ai_cmd = self.go_between.exec() ai_cmd_expected = AICommand(self.player_id, AICommandType.MOVE, **{"pose_goal": Pose(Position(0, 100), pi/2)}) self.assertEqual(ai_cmd, ai_cmd_expected) # test avec une droite quelconque self.go_between = GoBetween(self.game_state, self.player_id, Position(0, 500), Position(500, 0), Position(-300, -300)) ai_cmd = self.go_between.exec() ai_cmd_expected = AICommand(self.player_id, AICommandType.MOVE, **{"pose_goal": Pose(Position(250, 250), -3*pi/4)}) self.assertEqual(ai_cmd, ai_cmd_expected) # test destination calculée derrière position1 self.go_between = GoBetween(self.game_state, self.player_id, Position(1000, 75), Position(1500, -250), Position(0, 0), 180) ai_cmd = self.go_between.exec() ai_cmd_expected = AICommand(self.player_id, AICommandType.MOVE, **{"pose_goal": Pose(Position(1150, -23), 3.1215)}) self.assertEqual(ai_cmd, ai_cmd_expected) # test destination calculée derrière position2 self.go_between = GoBetween(self.game_state, self.player_id, Position(-100, 50), Position(-50, 50), Position(-60.0 + sqrt(3), 51.0), 10) ai_cmd = self.go_between.exec() ai_cmd_expected = AICommand(self.player_id, AICommandType.MOVE, **{"pose_goal": Pose(Position(-60, 50), 0.5235)}) self.assertEqual(ai_cmd, ai_cmd_expected) # test correction pour respecter la distance minimale self.go_between = GoBetween(self.game_state, self.player_id, Position(-500, 25), Position(1, 25), Position(-179, 0), 180) ai_cmd = self.go_between.exec() ai_cmd_expected = AICommand(self.player_id, AICommandType.MOVE, **{"pose_goal": Pose(Position(-179, 25), -pi/2)}) self.assertEqual(ai_cmd, ai_cmd_expected) # test distance entre les positions insuffisantes self.assertRaises(AssertionError, GoBetween, self.game_state, self.player_id, Position(1, 1), Position(-1, -1), 50) def test_GoBehind(self): # TODO: faire davantage de cas de test distance_behind = 500 # test avec une droite quelconque self.go_behind = GoBehind(self.game_state, self.player_id, Position(1.5, 2.3), Position(18.3, 27.8), distance_behind) aicmd_obtenu = GoBehind.exec(self.go_behind) aicmd_cible = AICommand(self.player_id, AICommandType.MOVE, **{"pose_goal": Pose(Position(-273, -415), 0.9882)}) # AICommand(Pose(Position(-273, -415), 0.9882), 0) self.assertEqual(aicmd_obtenu, aicmd_cible) # test avec une droite verticale self.go_behind = GoBehind(self.game_state, self.player_id, Position(1000, 250.3), Position(1000, 725.8), distance_behind) aicmd_obtenu = GoBehind.exec(self.go_behind) aicmd_cible = AICommand(self.player_id, AICommandType.MOVE, **{"pose_goal": Pose(Position(1000, -249), 1.5707)}) # AICommand(Pose(Position(1000, -249), 1.5707), 0) self.assertEqual(aicmd_obtenu, aicmd_cible) # test avec une droite horizontale self.go_behind = GoBehind(self.game_state, self.player_id, Position(175.8, -200.34), Position(-276.8, -200.34), distance_behind) aicmd_obtenu = GoBehind.exec(self.go_behind) aicmd_cible = AICommand(self.player_id, AICommandType.MOVE, **{"pose_goal": Pose(Position(675, -200), -3.1415)}) self.assertEqual(aicmd_obtenu, aicmd_cible) def test_kick(self): # test avec la valeur 0 (nulle) self.kick = Kick(self.game_state, self.player_id, 0) current_pose = self.game_state.get_player_pose(self.player_id) current_pose_string = AICommand(self.player_id, AICommandType.KICK, **{"pose_goal": current_pose}) self.assertEqual(Kick.exec(self.kick), current_pose_string) # test avec la valeur 1 (force maximale) self.kick = Kick(self.game_state, self.player_id, 1) current_pose = self.game_state.get_player_pose(self.player_id) self.assertEqual(Kick.exec(self.kick), AICommand(self.player_id, AICommandType.KICK, **{"pose_goal": current_pose})) # test avec la valeur 0.3 (force intermediaire) self.kick = Kick(self.game_state, self.player_id, 0.3) current_pose = self.game_state.get_player_pose(self.player_id) self.assertEqual(Kick.exec(self.kick), AICommand(self.player_id, AICommandType.KICK, **{"pose_goal": current_pose})) @unittest.skip("I got lazy, didn't want to review all of the protectgoal.") def test_ProtectGoal(self): # test de base self.game_state.game.friends.players[0].update(Pose(Position(4450, 10))) self.game_state.game.ball.set_position(Position(0, 0), 0) self.protectGoal = ProtectGoal(self.game_state, 0) aicmd_obtenu = self.protectGoal.exec() aicmd_cible = AICommand(self.player_id, AICommandType.MOVE, **{"pose_goal": Pose(Position(4000, 0), -pi)}) self.assertEqual(aicmd_obtenu, aicmd_cible) # test distance max < distance min self.assertRaises(AssertionError, ProtectGoal, self.game_state, 0, True, 50, 40) if __name__ == "__main__": unittest.main()
{ "content_hash": "ffa8eac6bc3d885b26f05c75af0f0a82", "timestamp": "", "source": "github", "line_count": 206, "max_line_length": 119, "avg_line_length": 51.53398058252427, "alnum_prop": 0.6109645817633761, "repo_name": "AlexandraMercier/StrategyIA", "id": "876b18ea8626438b6b3adf0de0099c525224074d", "size": "10658", "binary": false, "copies": "1", "ref": "refs/heads/dev", "path": "tests/test_actions.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "240682" } ], "symlink_target": "" }
import sure # noqa # pylint: disable=unused-import import moto.server as server from moto import mock_kinesisvideoarchivedmedia """ Test the different server responses """ @mock_kinesisvideoarchivedmedia def test_kinesisvideoarchivedmedia_server_is_up(): backend = server.create_backend_app("kinesis-video-archived-media") test_client = backend.test_client() res = test_client.post("/getHLSStreamingSessionURL") # Just checking server is up res.status_code.should.equal(404)
{ "content_hash": "5c8a17a49e02edeadfb311fb9f0d131f", "timestamp": "", "source": "github", "line_count": 17, "max_line_length": 71, "avg_line_length": 29.41176470588235, "alnum_prop": 0.76, "repo_name": "spulec/moto", "id": "d05a884b2ebe4f0c3ad28d8b719d41c15747d049", "size": "500", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_kinesisvideoarchivedmedia/test_server.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "255" }, { "name": "HTML", "bytes": "5983" }, { "name": "Java", "bytes": "1688" }, { "name": "JavaScript", "bytes": "1424" }, { "name": "Jinja", "bytes": "2502" }, { "name": "Makefile", "bytes": "2284" }, { "name": "Python", "bytes": "14737868" }, { "name": "Ruby", "bytes": "188" }, { "name": "Scala", "bytes": "782" }, { "name": "Shell", "bytes": "5515" } ], "symlink_target": "" }
from typing import Any, Callable, Dict, Optional, TypeVar from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error, ) from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpResponse from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from azure.core.utils import case_insensitive_dict from azure.mgmt.core.exceptions import ARMErrorFormat from .. import models as _models from .._serialization import Serializer from .._vendor import _convert_request, _format_url_section T = TypeVar("T") ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False def build_get_request(operation_result_id: str, subscription_id: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-08-01")) # type: str accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop( "template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.LabServices/operationResults/{operationResultId}", ) # pylint: disable=line-too-long path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1), "operationResultId": _SERIALIZER.url( "operation_result_id", operation_result_id, "str", max_length=100, min_length=1, pattern=r"^[-\w\\._\\(\\)]+$", ), } _url = _format_url_section(_url, **path_format_arguments) # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) class OperationResultsOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.mgmt.labservices.ManagedLabsClient`'s :attr:`operation_results` attribute. """ models = _models def __init__(self, *args, **kwargs): input_args = list(args) self._client = input_args.pop(0) if input_args else kwargs.pop("client") self._config = input_args.pop(0) if input_args else kwargs.pop("config") self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace def get(self, operation_result_id: str, **kwargs: Any) -> Optional[_models.OperationResult]: """Get an azure operation result. Returns an azure operation result. :param operation_result_id: The operation result ID / name. Required. :type operation_result_id: str :keyword callable cls: A custom type or function that will be passed the direct response :return: OperationResult or None or the result of cls(response) :rtype: ~azure.mgmt.labservices.models.OperationResult or None :raises ~azure.core.exceptions.HttpResponseError: """ error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop("error_map", {}) or {}) _headers = kwargs.pop("headers", {}) or {} _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.OperationResult]] request = build_get_request( operation_result_id=operation_result_id, subscription_id=self._config.subscription_id, api_version=api_version, template_url=self.get.metadata["url"], headers=_headers, params=_params, ) request = _convert_request(request) request.url = self._client.format_url(request.url) # type: ignore pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access request, stream=False, **kwargs ) response = pipeline_response.http_response if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None if response.status_code == 200: deserialized = self._deserialize("OperationResult", pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.LabServices/operationResults/{operationResultId}"} # type: ignore
{ "content_hash": "59721b25dccc063b1bcb7f3486b8ad5c", "timestamp": "", "source": "github", "line_count": 135, "max_line_length": 146, "avg_line_length": 40.422222222222224, "alnum_prop": 0.664467656221367, "repo_name": "Azure/azure-sdk-for-python", "id": "1a9d088e0b7873e0c501145a9a4bfa4d65b899a0", "size": "5957", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "sdk/labservices/azure-mgmt-labservices/azure/mgmt/labservices/operations/_operation_results_operations.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "1224" }, { "name": "Bicep", "bytes": "24196" }, { "name": "CSS", "bytes": "6089" }, { "name": "Dockerfile", "bytes": "4892" }, { "name": "HTML", "bytes": "12058" }, { "name": "JavaScript", "bytes": "8137" }, { "name": "Jinja", "bytes": "10377" }, { "name": "Jupyter Notebook", "bytes": "272022" }, { "name": "PowerShell", "bytes": "518535" }, { "name": "Python", "bytes": "715484989" }, { "name": "Shell", "bytes": "3631" } ], "symlink_target": "" }
"""Tests for the helper for managing project source code.""" from __future__ import unicode_literals import unittest from l2tdevtools import source_helper from tests import test_lib class SourceHelperTest(test_lib.BaseTestCase): """Tests for the helper to manager project source code.""" def testInitialize(self): """Tests the __init__ function.""" source_helper_object = source_helper.SourceHelper('test', None) self.assertIsNotNone(source_helper_object) # TODO: add tests. if __name__ == '__main__': unittest.main()
{ "content_hash": "78bbfff279cedac522c19e325b20a001", "timestamp": "", "source": "github", "line_count": 25, "max_line_length": 67, "avg_line_length": 21.88, "alnum_prop": 0.7129798903107861, "repo_name": "Onager/l2tdevtools", "id": "cfee0777513db999f991b0b04a66d3587d58143c", "size": "593", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/source_helper.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "415" }, { "name": "Makefile", "bytes": "7082" }, { "name": "PowerShell", "bytes": "10256" }, { "name": "Python", "bytes": "596082" }, { "name": "Shell", "bytes": "28813" } ], "symlink_target": "" }
from django.conf.urls import include, url app_name = 'sofort' urlpatterns = [ url('^sofort/', include('django_sofortueberweisung.urls')), ]
{ "content_hash": "10cc98777bd0e7c9fc6c73b437207f45", "timestamp": "", "source": "github", "line_count": 6, "max_line_length": 63, "avg_line_length": 24.166666666666668, "alnum_prop": 0.7034482758620689, "repo_name": "ParticulateSolutions/django-sofortueberweisung", "id": "f300412143390733592683f59a134782392b5fbe", "size": "145", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_urls.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "34849" } ], "symlink_target": "" }
from __future__ import division from __future__ import unicode_literals from __future__ import print_function from __future__ import absolute_import from future import standard_library standard_library.install_aliases() from builtins import str from builtins import range from builtins import * from builtins import object from past.utils import old_div import math import re ########################################################### ## Google map python wrapper V0.1 ## Extended to allow for marker titels ## ############################################################ class maps(object): def __init__(self, centerLat, centerLng, zoom ): self.center = (float(centerLat),float(centerLng)) self.zoom = int(zoom) self.grids = None self.paths = [] self.points = [] self.radpoints = [] self.gridsetting = None self.coloricon = 'http://chart.apis.google.com/chart?cht=mm&chs=12x16&chco=FFFFFF,XXXXXX,000000&ext=.png' def setgrids(self,slat,elat,latin,slng,elng,lngin): self.gridsetting = [slat,elat,latin,slng,elng,lngin] def addpoint(self, lat, lng, color = '#FF0000', title=''): self.points.append((lat,lng,color[1:], title)) #def addpointcoord(self, coord): # self.points.append((coord[0],coord[1])) def addradpoint(self, lat,lng,rad,color = '#0000FF'): self.radpoints.append((lat,lng,rad,color, title)) def addpath(self,path,color = '#FF0000', _id = ''): path.append(color) path.append(_id) self.paths.append(path) #create the html file which inlcude one google map and all points and paths def draw(self, htmlfile): f = open(htmlfile,'w') f.write('<html>\n') f.write('<head>\n') f.write('<meta name="viewport" content="initial-scale=1.0, user-scalable=no" />\n') f.write('<meta http-equiv="content-type" content="text/html; charset=UTF-8"/>\n') f.write('<style>\n.map-marker-label{\nposition: absolute;\ncolor: blue;\nfont-size:16px;\nfont-weight:bold;}\n</style>') f.write('<title>Google Maps - pygmaps </title>\n') f.write('<script src="https://ajax.googleapis.com/ajax/libs/jquery/2.1.3/jquery.min.js"></script>') f.write('<script type="text/javascript" src="https://maps.googleapis.com/maps/api/js"></script>\n') f.write('<script type="text/javascript" src="http://maps.google.com/maps/api/js?sensor=false"></script>\n') f.write('<script type="text/javascript" src="gmaps_with_marker.js"></script>\n') f.write('<script type="text/javascript">\n') f.write('\tfunction initialize() {\n') self.drawmap(f) self.write_polyline_infowindow_js(f) self.write_marker_infowindow_js(f) self.drawgrids(f) self.drawpoints(f) self.drawradpoints(f) self.drawpaths(f,self.paths) f.write('\t}\n') f.write('</script>\n') f.write('</head>\n') f.write('<body style="margin:0px; padding:0px;" onload="initialize()">\n') f.write('\t<div id="map_canvas" style="width: 100%; height: 100%;"></div>\n') f.write('</body>\n') f.write('</html>\n') f.close() def drawgrids(self, f): if self.gridsetting == None: return slat = self.gridsetting[0] elat = self.gridsetting[1] latin = self.gridsetting[2] slng = self.gridsetting[3] elng = self.gridsetting[4] lngin = self.gridsetting[5] self.grids = [] r = [slat+float(x)*latin for x in range(0, int(old_div((elat-slat),latin)))] for lat in r: self.grids.append([(lat+old_div(latin,2.0),slng+old_div(lngin,2.0)),(lat+old_div(latin,2.0),elng+old_div(lngin,2.0))]) r = [slng+float(x)*lngin for x in range(0, int(old_div((elng-slng),lngin)))] for lng in r: self.grids.append([(slat+old_div(latin,2.0),lng+old_div(lngin,2.0)),(elat+old_div(latin,2.0),lng+old_div(lngin,2.0))]) for line in self.grids: self.drawPolyline(f,line,strokeColor = "#000000") def drawpoints(self,f): for point in self.points: self.drawpoint(f,point[0],point[1],point[2],point[3]) def drawradpoints(self, f): for rpoint in self.radpoints: path = self.getcycle(rpoint[0:3]) self.drawPolygon(f,path,strokeColor = rpoint[3]) def getcycle(self,rpoint): cycle = [] lat = rpoint[0] lng = rpoint[1] rad = rpoint[2] #unit: meter d = old_div((old_div(rad,1000.0)),6378.8); lat1 = (old_div(math.pi,180.0))* lat lng1 = (old_div(math.pi,180.0))* lng r = [x*30 for x in range(12)] for a in r: tc = (old_div(math.pi,180.0))*a; y = math.asin(math.sin(lat1)*math.cos(d)+math.cos(lat1)*math.sin(d)*math.cos(tc)) dlng = math.atan2(math.sin(tc)*math.sin(d)*math.cos(lat1),math.cos(d)-math.sin(lat1)*math.sin(y)) x = ((lng1-dlng+math.pi) % (2.0*math.pi)) - math.pi cycle.append( ( float(y*(old_div(180.0,math.pi))),float(x*(old_div(180.0,math.pi))) ) ) return cycle def drawpaths(self, f, paths): c = 0 for path in paths: #print path self.drawPolyline(f,path[:-2], strokeColor = path[-2], polylineLabel = path[-1]) c += 1 ############################################# # # # # # # Low level Map Drawing # # # # # # ############################################# def write_polyline_infowindow_js(self, f): f.write('function addPolylineInfoWindow(o, polylineLabel){var n=new google.maps.InfoWindow({content:""});eventPolygonMouseover=google.maps.event.addListener(o,"click",function(o){var e=new google.maps.Marker({position:o.latLng});console.log("fired"),n.setPosition(o.latLng),n.setContent(polylineLabel),n.open(map,e)})}') def write_marker_infowindow_js(self, f): f.write('function addMarkerInfoWindow(o, title){var n=new google.maps.InfoWindow({content: title});eventMarkerMouseover=google.maps.event.addListener(o,"click",function(){n.open(map,o)})}') def drawmap(self, f): f.write('\t\tvar centerlatlng = new google.maps.LatLng(%f, %f);\n' % (self.center[0],self.center[1])) f.write('\t\tvar myOptions = {\n') f.write('\t\t\tzoom: %d,\n' % (self.zoom)) f.write('\t\t\tcenter: centerlatlng,\n') f.write('\t\t\tmapTypeId: google.maps.MapTypeId.ROADMAP\n') f.write('\t\t};\n') f.write('\t\tvar map = new google.maps.Map(document.getElementById("map_canvas"), myOptions);\n') f.write('\n') def drawpoint(self,f,lat,lon,color, title=None): f.write('\t\tvar latlng = new google.maps.LatLng(%f, %f);\n'%(lat,lon)) f.write('\t\tvar img = new google.maps.MarkerImage(\'%s\');\n' % (self.coloricon.replace('XXXXXX',color))) f.write('\t\tvar marker = new google.maps.Marker({\n') f.write('\t\tposition: latlng,\n') f.write('\t\tmap: map,\n') f.write('\t\ttitle: "'+re.escape(str(title))+'"\n') f.write('\t\t});\n') f.write('\n') if title is not None: f.write('addMarkerInfoWindow(marker,"' + title + '");\n\n') def drawPolyline(self,f,path,\ clickable = True, \ geodesic = True,\ strokeColor = "#FF0000",\ strokeOpacity = 1.0,\ strokeWeight = 2,\ polylineLabel = None ): f.write('var PolylineCoordinates = [\n') for (i, coordinate) in enumerate(path): f.write('new google.maps.LatLng(%f, %f), // %d \n' % (coordinate[0],coordinate[1], i)) f.write('];\n') f.write('\n') f.write('var Path = new google.maps.Polyline({\n') f.write('clickable: %s,\n' % (str(clickable).lower())) f.write('geodesic: %s,\n' % (str(geodesic).lower())) f.write('path: PolylineCoordinates,\n') f.write('strokeColor: "%s",\n' %(strokeColor)) f.write('strokeOpacity: %f,\n' % (strokeOpacity)) f.write('strokeWeight: %d\n' % (strokeWeight)) f.write('});\n') f.write('\n') f.write('Path.setMap(map);\n') f.write('\n\n') if polylineLabel is not None: f.write('addPolylineInfoWindow(Path,"' + polylineLabel + '");\n\n') def drawPolygon(self,f,path,\ clickable = False, \ geodesic = True,\ fillColor = "#000000",\ fillOpacity = 0.0,\ strokeColor = "#FF0000",\ strokeOpacity = 1.0,\ strokeWeight = 1 ): f.write('var coords = [\n') for coordinate in path: f.write('new google.maps.LatLng(%f, %f),\n' % (coordinate[0],coordinate[1])) f.write('];\n') f.write('\n') f.write('var polygon = new google.maps.Polygon({\n') f.write('clickable: %s,\n' % (str(clickable).lower())) f.write('geodesic: %s,\n' % (str(geodesic).lower())) f.write('fillColor: "%s",\n' %(fillColor)) f.write('fillOpacity: %f,\n' % (fillOpacity)) f.write('paths: coords,\n') f.write('strokeColor: "%s",\n' %(strokeColor)) f.write('strokeOpacity: %f,\n' % (strokeOpacity)) f.write('strokeWeight: %d\n' % (strokeWeight)) f.write('});\n') f.write('\n') f.write('polygon.setMap(map);\n') f.write('\n\n') if __name__ == "__main__": ########## CONSTRUCTOR: pygmaps(latitude, longitude, zoom) ############################## # DESC: initialize a map with latitude and longitude of center point # and map zoom level "15" # PARAMETER1: latitude (float) latittude of map center point # PARAMETER2: longitude (float) latittude of map center point # PARAMETER3: zoom (int) map zoom level 0~20 # RETURN: the instant of pygmaps #======================================================================================== mymap = pygmaps(37.428, -122.145, 16) ########## FUNCTION: setgrids(start-Lat, end-Lat, Lat-interval, start-Lng, end-Lng, Lng-interval) ###### # DESC: set grids on map # PARAMETER1: start-Lat (float), start (minimum) latittude of the grids # PARAMETER2: end-Lat (float), end (maximum) latittude of the grids # PARAMETER3: Lat-interval (float) grid size in latitude # PARAMETER4: start-Lng (float), start (minimum) longitude of the grids # PARAMETER5: end-Lng (float), end (maximum) longitude of the grids # PARAMETER6: Lng-interval (float) grid size in longitude # RETURN: no returns #======================================================================================== mymap.setgrids(37.42, 37.43, 0.001, -122.15, -122.14, 0.001) ########## FUNCTION: addpoint(latitude, longitude, [color])############################# # DESC: add a point into a map and dispaly it, color is optional default is red # PARAMETER1: latitude (float) latitude of the point # PARAMETER2: longitude (float) longitude of the point # PARAMETER3: color (string) color of the point showed in map, using HTML color code # HTML COLOR CODE: http://www.computerhope.com/htmcolor.htm # e.g. red "#FF0000", Blue "#0000FF", Green "#00FF00" # RETURN: no return #======================================================================================== mymap.addpoint(37.427, -122.145, "#0000FF") ########## FUNCTION: addradpoint(latitude, longitude, radius, [color])################## # DESC: add a point with a radius (Meter) - Draw cycle # PARAMETER1: latitude (float) latitude of the point # PARAMETER2: longitude (float) longitude of the point # PARAMETER3: radius (float), radius in meter # PARAMETER4: color (string) color of the point showed in map, using HTML color code # HTML COLOR CODE: http://www.computerhope.com/htmcolor.htm # e.g. red "#FF0000", Blue "#0000FF", Green "#00FF00" # RETURN: no return #======================================================================================== mymap.addradpoint(37.429, -122.145, 95, "#FF0000") ########## FUNCTION: addpath(path,[color])############################################## # DESC: add a path into map, the data struceture of Path is a list of points # PARAMETER1: path (list of coordinates) e.g. [(lat1,lng1),(lat2,lng2),...] # PARAMETER2: color (string) color of the point showed in map, using HTML color code # HTML COLOR CODE: http://www.computerhope.com/htmcolor.htm # e.g. red "#FF0000", Blue "#0000FF", Green "#00FF00" # RETURN: no return #======================================================================================== path = [(37.429, -122.145),(37.428, -122.145),(37.427, -122.145),(37.427, -122.146),(37.427, -122.146)] mymap.addpath(path,"#00FF00") ########## FUNCTION: addpath(file)###################################################### # DESC: create the html map file (.html) # PARAMETER1: file (string) the map path and file # RETURN: no return, generate html file in specified directory #======================================================================================== mymap.draw('./mymap.html')
{ "content_hash": "d0d32d13a84f50a56492ba5170782643", "timestamp": "", "source": "github", "line_count": 297, "max_line_length": 322, "avg_line_length": 40.68350168350168, "alnum_prop": 0.6096995779193909, "repo_name": "sunil07t/e-mission-server", "id": "3db06eb5819372605a06d7ef6af88664324974f8", "size": "12083", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "emission/analysis/plotting/gmaps/pygmaps_modified.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "445" }, { "name": "CSS", "bytes": "711874" }, { "name": "HTML", "bytes": "122542" }, { "name": "JavaScript", "bytes": "6962852" }, { "name": "Jupyter Notebook", "bytes": "99521529" }, { "name": "Python", "bytes": "1800632" }, { "name": "Shell", "bytes": "2299" }, { "name": "Smarty", "bytes": "3456" } ], "symlink_target": "" }
import warnings import django from django.db import transaction from django.db.models import ManyToManyField from django.forms.models import model_to_dict from simple_history.exceptions import AlternativeManagerError, NotHistoricalModelError def update_change_reason(instance, reason): attrs = {} model = type(instance) manager = instance if instance.id is not None else model history = get_history_manager_for_model(manager) history_fields = [field.attname for field in history.model._meta.fields] for field in instance._meta.fields: if field.attname not in history_fields: continue value = getattr(instance, field.attname) if field.primary_key is True: if value is not None: attrs[field.attname] = value else: attrs[field.attname] = value record = history.filter(**attrs).order_by("-history_date").first() record.history_change_reason = reason record.save() def get_history_manager_for_model(model): """Return the history manager for a given app model.""" try: manager_name = model._meta.simple_history_manager_attribute except AttributeError: raise NotHistoricalModelError( "Cannot find a historical model for {model}.".format(model=model) ) return getattr(model, manager_name) def get_history_model_for_model(model): """Return the history model for a given app model.""" return get_history_manager_for_model(model).model def bulk_create_with_history( objs, model, batch_size=None, ignore_conflicts=False, default_user=None, default_change_reason=None, default_date=None, ): """ Bulk create the objects specified by objs while also bulk creating their history (all in one transaction). Because of not providing primary key attribute after bulk_create on any DB except Postgres (https://docs.djangoproject.com/en/2.2/ref/models/querysets/#bulk-create) Divide this process on two transactions for other DB's :param objs: List of objs (not yet saved to the db) of type model :param model: Model class that should be created :param batch_size: Number of objects that should be created in each batch :param default_user: Optional user to specify as the history_user in each historical record :param default_change_reason: Optional change reason to specify as the change_reason in each historical record :param default_date: Optional date to specify as the history_date in each historical record :return: List of objs with IDs """ # Exclude ManyToManyFields because they end up as invalid kwargs to # model.objects.filter(...) below. exclude_fields = [ field.name for field in model._meta.get_fields() if isinstance(field, ManyToManyField) ] history_manager = get_history_manager_for_model(model) model_manager = model._default_manager second_transaction_required = True with transaction.atomic(savepoint=False): objs_with_id = model_manager.bulk_create( objs, batch_size=batch_size, ignore_conflicts=ignore_conflicts ) if objs_with_id and objs_with_id[0].pk and not ignore_conflicts: second_transaction_required = False history_manager.bulk_history_create( objs_with_id, batch_size=batch_size, default_user=default_user, default_change_reason=default_change_reason, default_date=default_date, ) if second_transaction_required: obj_list = [] with transaction.atomic(savepoint=False): for obj in objs_with_id: attributes = dict( filter( lambda x: x[1] is not None, model_to_dict(obj, exclude=exclude_fields).items(), ) ) obj_list += model_manager.filter(**attributes) history_manager.bulk_history_create( obj_list, batch_size=batch_size, default_user=default_user, default_change_reason=default_change_reason, default_date=default_date, ) objs_with_id = obj_list return objs_with_id def bulk_update_with_history( objs, model, fields, batch_size=None, default_user=None, default_change_reason=None, default_date=None, manager=None, ): """ Bulk update the objects specified by objs while also bulk creating their history (all in one transaction). :param objs: List of objs of type model to be updated :param model: Model class that should be updated :param fields: The fields that are updated :param batch_size: Number of objects that should be updated in each batch :param default_user: Optional user to specify as the history_user in each historical record :param default_change_reason: Optional change reason to specify as the change_reason in each historical record :param default_date: Optional date to specify as the history_date in each historical record :param manager: Optional model manager to use for the model instead of the default manager """ history_manager = get_history_manager_for_model(model) model_manager = manager or model._default_manager if model_manager.model is not model: raise AlternativeManagerError("The given manager does not belong to the model.") with transaction.atomic(savepoint=False): model_manager.bulk_update(objs, fields, batch_size=batch_size) history_manager.bulk_history_create( objs, batch_size=batch_size, update=True, default_user=default_user, default_change_reason=default_change_reason, default_date=default_date, ) def get_change_reason_from_object(obj): if hasattr(obj, "_change_reason"): return getattr(obj, "_change_reason") if hasattr(obj, "changeReason"): warning_msg = ( "Using the attr changeReason to populate history_change_reason is" " deprecated in 2.10.0 and will be removed in 3.0.0. Use " "_change_reason instead. " ) warnings.warn(warning_msg, DeprecationWarning) return getattr(obj, "changeReason") return None
{ "content_hash": "d1230cf339110d3002ccb39f6774d80c", "timestamp": "", "source": "github", "line_count": 176, "max_line_length": 88, "avg_line_length": 36.875, "alnum_prop": 0.6510015408320493, "repo_name": "treyhunner/django-simple-history", "id": "f7290b4c53116bdaf469ce7678a80eb24c95ee72", "size": "6490", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "simple_history/utils.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "HTML", "bytes": "3476" }, { "name": "Makefile", "bytes": "860" }, { "name": "Python", "bytes": "218249" } ], "symlink_target": "" }
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2010 (ita) """ Node: filesystem structure, contains lists of nodes #. Each file/folder is represented by exactly one node. #. Some potential class properties are stored on :py:class:`waflib.Build.BuildContext` : nodes to depend on, etc. Unused class members can increase the `.wafpickle` file size sensibly. #. Node objects should never be created directly, use the methods :py:func:`Node.make_node` or :py:func:`Node.find_node` #. The methods :py:func:`Node.find_resource`, :py:func:`Node.find_dir` :py:func:`Node.find_or_declare` should be used when a build context is present #. Each instance of :py:class:`waflib.Context.Context` has a unique :py:class:`Node` subclass. (:py:class:`waflib.Node.Nod3`, see the :py:class:`waflib.Context.Context` initializer). A reference to the context owning a node is held as self.ctx """ import os, re, sys, shutil from waflib import Utils, Errors exclude_regs = ''' **/*~ **/#*# **/.#* **/%*% **/._* **/CVS **/CVS/** **/.cvsignore **/SCCS **/SCCS/** **/vssver.scc **/.svn **/.svn/** **/BitKeeper **/.git **/.git/** **/.gitignore **/.bzr **/.bzrignore **/.bzr/** **/.hg **/.hg/** **/_MTN **/_MTN/** **/.arch-ids **/{arch} **/_darcs **/_darcs/** **/.intlcache **/.DS_Store''' """ Ant patterns for files and folders to exclude while doing the recursive traversal in :py:meth:`waflib.Node.Node.ant_glob` """ def split_path(path): """ Split a path by os.sep (This is not os.path.split) :param path: path to split :type path: string :rtype: list of string :return: the path, split """ return path.split('/') def split_path_cygwin(path): if path.startswith('//'): ret = path.split('/')[2:] ret[0] = '/' + ret[0] return ret return path.split('/') re_sp = re.compile('[/\\\\]') def split_path_win32(path): if path.startswith('\\\\'): if path.startswith('\\\\?'): path = path[4:] else: ret = re.split(re_sp, path)[2:] ret[0] = '\\\\' + ret[0] return ret return re.split(re_sp, path) if sys.platform == 'cygwin': split_path = split_path_cygwin elif Utils.is_win32: split_path = split_path_win32 class Node(object): """ This class is organized in two parts * The basic methods meant for filesystem access (compute paths, create folders, etc) * The methods bound to a :py:class:`waflib.Build.BuildContext` (require ``bld.srcnode`` and ``bld.bldnode``) The Node objects are not thread safe in any way. """ dict_class = dict __slots__ = ('name', 'sig', 'children', 'parent', 'cache_abspath', 'cache_isdir', 'cache_sig') def __init__(self, name, parent): self.name = name self.parent = parent if parent: if name in parent.children: raise Errors.WafError('node %s exists in the parent files %r already' % (name, parent)) parent.children[name] = self def __setstate__(self, data): "Deserializes from data" self.name = data[0] self.parent = data[1] if data[2] is not None: # Issue 1480 self.children = self.dict_class(data[2]) if data[3] is not None: self.sig = data[3] def __getstate__(self): "Serialize the node info" return (self.name, self.parent, getattr(self, 'children', None), getattr(self, 'sig', None)) def __str__(self): "String representation (name), for debugging purposes" return self.name def __repr__(self): "String representation (abspath), for debugging purposes" return self.abspath() def __hash__(self): "Node hash, used for storage in dicts. This hash is not persistent." return id(self) def __eq__(self, node): "Node comparison, based on the IDs" return id(self) == id(node) def __copy__(self): "Implemented to prevent nodes from being copied (raises an exception)" raise Errors.WafError('nodes are not supposed to be copied') def read(self, flags='r', encoding='ISO8859-1'): """ Return the contents of the file represented by this node:: def build(bld): bld.path.find_node('wscript').read() :type fname: string :param fname: Path to file :type m: string :param m: Open mode :rtype: string :return: File contents """ return Utils.readf(self.abspath(), flags, encoding) def write(self, data, flags='w', encoding='ISO8859-1'): """ Write some text to the physical file represented by this node:: def build(bld): bld.path.make_node('foo.txt').write('Hello, world!') :type data: string :param data: data to write :type flags: string :param flags: Write mode """ Utils.writef(self.abspath(), data, flags, encoding) def chmod(self, val): """ Change file/dir permissions:: def build(bld): bld.path.chmod(493) # 0755 """ os.chmod(self.abspath(), val) def delete(self): """Delete the file/folder, and remove this node from the tree. Do not use this object after calling this method.""" try: try: if hasattr(self, 'children'): shutil.rmtree(self.abspath()) else: os.remove(self.abspath()) except OSError as e: if os.path.exists(self.abspath()): raise e finally: self.evict() def evict(self): """Internal - called when a node is removed""" del self.parent.children[self.name] def suffix(self): """Return the file extension""" k = max(0, self.name.rfind('.')) return self.name[k:] def height(self): """Depth in the folder hierarchy from the filesystem root or from all the file drives""" d = self val = -1 while d: d = d.parent val += 1 return val def listdir(self): """List the folder contents""" lst = Utils.listdir(self.abspath()) lst.sort() return lst def mkdir(self): """ Create a folder represented by this node, creating intermediate nodes as needed An exception will be raised only when the folder cannot possibly exist there """ if getattr(self, 'cache_isdir', None): return try: self.parent.mkdir() except OSError: pass if self.name: try: os.makedirs(self.abspath()) except OSError: pass if not os.path.isdir(self.abspath()): raise Errors.WafError('Could not create the directory %s' % self.abspath()) try: self.children except AttributeError: self.children = self.dict_class() self.cache_isdir = True def find_node(self, lst): """ Find a node on the file system (files or folders), create intermediate nodes as needed :param lst: path :type lst: string or list of string """ if isinstance(lst, str): lst = [x for x in split_path(lst) if x and x != '.'] cur = self for x in lst: if x == '..': cur = cur.parent or cur continue try: ch = cur.children except AttributeError: cur.children = self.dict_class() else: try: cur = cur.children[x] continue except KeyError: pass # optimistic: create the node first then look if it was correct to do so cur = self.__class__(x, cur) try: os.stat(cur.abspath()) except OSError: cur.evict() return None ret = cur try: os.stat(ret.abspath()) except OSError: ret.evict() return None try: while not getattr(cur.parent, 'cache_isdir', None): cur = cur.parent cur.cache_isdir = True except AttributeError: pass return ret def make_node(self, lst): """ Find or create a node without looking on the filesystem :param lst: path :type lst: string or list of string """ if isinstance(lst, str): lst = [x for x in split_path(lst) if x and x != '.'] cur = self for x in lst: if x == '..': cur = cur.parent or cur continue if getattr(cur, 'children', {}): if x in cur.children: cur = cur.children[x] continue else: cur.children = self.dict_class() cur = self.__class__(x, cur) return cur def search_node(self, lst): """ Search for a node without looking on the filesystem :param lst: path :type lst: string or list of string """ if isinstance(lst, str): lst = [x for x in split_path(lst) if x and x != '.'] cur = self for x in lst: if x == '..': cur = cur.parent or cur else: try: cur = cur.children[x] except (AttributeError, KeyError): return None return cur def path_from(self, node): """ Path of this node seen from the other:: def build(bld): n1 = bld.path.find_node('foo/bar/xyz.txt') n2 = bld.path.find_node('foo/stuff/') n1.path_from(n2) # '../bar/xyz.txt' :param node: path to use as a reference :type node: :py:class:`waflib.Node.Node` """ c1 = self c2 = node c1h = c1.height() c2h = c2.height() lst = [] up = 0 while c1h > c2h: lst.append(c1.name) c1 = c1.parent c1h -= 1 while c2h > c1h: up += 1 c2 = c2.parent c2h -= 1 while id(c1) != id(c2): lst.append(c1.name) up += 1 c1 = c1.parent c2 = c2.parent for i in range(up): lst.append('..') lst.reverse() return os.sep.join(lst) or '.' def abspath(self): """ Absolute path. A cache is kept in the context as ``cache_node_abspath`` """ try: return self.cache_abspath except AttributeError: pass # think twice before touching this (performance + complexity + correctness) if os.sep == '/': if not self.parent: val = os.sep elif not self.parent.name: val = os.sep + self.name else: val = self.parent.abspath() + os.sep + self.name else: if not self.parent: val = '' elif not self.parent.name: val = self.name + os.sep else: val = self.parent.abspath().rstrip(os.sep) + os.sep + self.name self.cache_abspath = val return val def is_child_of(self, node): """ Does this node belong to the subtree node?:: def build(bld): node = bld.path.find_node('wscript') node.is_child_of(bld.path) # True :param node: path to use as a reference :type node: :py:class:`waflib.Node.Node` """ p = self diff = self.height() - node.height() while diff > 0: diff -= 1 p = p.parent return id(p) == id(node) def ant_iter(self, accept=None, maxdepth=25, pats=[], dir=False, src=True, remove=True): """ Semi-private and recursive method used by ant_glob. :param accept: function used for accepting/rejecting a node, returns the patterns that can be still accepted in recursion :type accept: function :param maxdepth: maximum depth in the filesystem (25) :type maxdepth: int :param pats: list of patterns to accept and list of patterns to exclude :type pats: tuple :param dir: return folders too (False by default) :type dir: bool :param src: return files (True by default) :type src: bool :param remove: remove files/folders that do not exist (True by default) :type remove: bool """ dircont = self.listdir() dircont.sort() try: lst = set(self.children.keys()) except AttributeError: self.children = self.dict_class() else: if remove: for x in lst - set(dircont): self.children[x].evict() for name in dircont: npats = accept(name, pats) if npats and npats[0]: accepted = [] in npats[0] node = self.make_node([name]) isdir = os.path.isdir(node.abspath()) if accepted: if isdir: if dir: yield node else: if src: yield node if getattr(node, 'cache_isdir', None) or isdir: node.cache_isdir = True if maxdepth: for k in node.ant_iter(accept=accept, maxdepth=maxdepth - 1, pats=npats, dir=dir, src=src, remove=remove): yield k raise StopIteration def ant_glob(self, *k, **kw): """ This method is used for finding files across folders. It behaves like ant patterns: * ``**/*`` find all files recursively * ``**/*.class`` find all files ending by .class * ``..`` find files having two dot characters For example:: def configure(cfg): cfg.path.ant_glob('**/*.cpp') # find all .cpp files cfg.root.ant_glob('etc/*.txt') # using the filesystem root can be slow cfg.path.ant_glob('*.cpp', excl=['*.c'], src=True, dir=False) For more information see http://ant.apache.org/manual/dirtasks.html The nodes that correspond to files and folders that do not exist will be removed. To prevent this behaviour, pass 'remove=False' :param incl: ant patterns or list of patterns to include :type incl: string or list of strings :param excl: ant patterns or list of patterns to exclude :type excl: string or list of strings :param dir: return folders too (False by default) :type dir: bool :param src: return files (True by default) :type src: bool :param remove: remove files/folders that do not exist (True by default) :type remove: bool :param maxdepth: maximum depth of recursion :type maxdepth: int :param ignorecase: ignore case while matching (False by default) :type ignorecase: bool """ src = kw.get('src', True) dir = kw.get('dir', False) excl = kw.get('excl', exclude_regs) incl = k and k[0] or kw.get('incl', '**') reflags = kw.get('ignorecase', 0) and re.I def to_pat(s): lst = Utils.to_list(s) ret = [] for x in lst: x = x.replace('\\', '/').replace('//', '/') if x.endswith('/'): x += '**' lst2 = x.split('/') accu = [] for k in lst2: if k == '**': accu.append(k) else: k = k.replace('.', '[.]').replace('*','.*').replace('?', '.').replace('+', '\\+') k = '^%s$' % k try: #print "pattern", k accu.append(re.compile(k, flags=reflags)) except Exception as e: raise Errors.WafError("Invalid pattern: %s" % k, e) ret.append(accu) return ret def filtre(name, nn): ret = [] for lst in nn: if not lst: pass elif lst[0] == '**': ret.append(lst) if len(lst) > 1: if lst[1].match(name): ret.append(lst[2:]) else: ret.append([]) elif lst[0].match(name): ret.append(lst[1:]) return ret def accept(name, pats): nacc = filtre(name, pats[0]) nrej = filtre(name, pats[1]) if [] in nrej: nacc = [] return [nacc, nrej] ret = [x for x in self.ant_iter(accept=accept, pats=[to_pat(incl), to_pat(excl)], maxdepth=kw.get('maxdepth', 25), dir=dir, src=src, remove=kw.get('remove', True))] if kw.get('flat', False): return ' '.join([x.path_from(self) for x in ret]) return ret # -------------------------------------------------------------------------------- # the following methods require the source/build folders (bld.srcnode/bld.bldnode) # using a subclass is a possibility, but is that really necessary? # -------------------------------------------------------------------------------- def is_src(self): """ True if the node is below the source directory note: !is_src does not imply is_bld() :rtype: bool """ cur = self x = id(self.ctx.srcnode) y = id(self.ctx.bldnode) while cur.parent: if id(cur) == y: return False if id(cur) == x: return True cur = cur.parent return False def is_bld(self): """ True if the node is below the build directory note: !is_bld does not imply is_src :rtype: bool """ cur = self y = id(self.ctx.bldnode) while cur.parent: if id(cur) == y: return True cur = cur.parent return False def get_src(self): """ Return the equivalent src node (or self if not possible) :rtype: :py:class:`waflib.Node.Node` """ cur = self x = id(self.ctx.srcnode) y = id(self.ctx.bldnode) lst = [] while cur.parent: if id(cur) == y: lst.reverse() return self.ctx.srcnode.make_node(lst) if id(cur) == x: return self lst.append(cur.name) cur = cur.parent return self def get_bld(self): """ Return the equivalent bld node (or self if not possible) :rtype: :py:class:`waflib.Node.Node` """ cur = self x = id(self.ctx.srcnode) y = id(self.ctx.bldnode) lst = [] while cur.parent: if id(cur) == y: return self if id(cur) == x: lst.reverse() return self.ctx.bldnode.make_node(lst) lst.append(cur.name) cur = cur.parent # the file is external to the current project, make a fake root in the current build directory lst.reverse() if lst and Utils.is_win32 and len(lst[0]) == 2 and lst[0].endswith(':'): lst[0] = lst[0][0] return self.ctx.bldnode.make_node(['__root__'] + lst) def find_resource(self, lst): """ Try to find a declared build node or a source file :param lst: path :type lst: string or list of string """ if isinstance(lst, str): lst = [x for x in split_path(lst) if x and x != '.'] node = self.get_bld().search_node(lst) if not node: self = self.get_src() node = self.find_node(lst) if node: if os.path.isdir(node.abspath()): return None return node def find_or_declare(self, lst): """ if 'self' is in build directory, try to return an existing node if no node is found, go to the source directory try to find an existing node in the source directory if no node is found, create it in the build directory :param lst: path :type lst: string or list of string """ if isinstance(lst, str): lst = [x for x in split_path(lst) if x and x != '.'] node = self.get_bld().search_node(lst) if node: if not os.path.isfile(node.abspath()): node.sig = None node.parent.mkdir() return node self = self.get_src() node = self.find_node(lst) if node: if not os.path.isfile(node.abspath()): node.sig = None node.parent.mkdir() return node node = self.get_bld().make_node(lst) node.parent.mkdir() return node def find_dir(self, lst): """ Search for a folder in the filesystem :param lst: path :type lst: string or list of string """ if isinstance(lst, str): lst = [x for x in split_path(lst) if x and x != '.'] node = self.find_node(lst) try: if not os.path.isdir(node.abspath()): return None except (OSError, AttributeError): # the node might be None, and raise an AttributeError return None return node # helpers for building things def change_ext(self, ext, ext_in=None): """ :return: A build node of the same path, but with a different extension :rtype: :py:class:`waflib.Node.Node` """ name = self.name if ext_in is None: k = name.rfind('.') if k >= 0: name = name[:k] + ext else: name = name + ext else: name = name[:- len(ext_in)] + ext return self.parent.find_or_declare([name]) def bldpath(self): "Path seen from the build directory default/src/foo.cpp" return self.path_from(self.ctx.bldnode) def srcpath(self): "Path seen from the source directory ../src/foo.cpp" return self.path_from(self.ctx.srcnode) def relpath(self): "If a file in the build directory, bldpath, else srcpath" cur = self x = id(self.ctx.bldnode) while cur.parent: if id(cur) == x: return self.bldpath() cur = cur.parent return self.srcpath() def bld_dir(self): "Build path without the file name" return self.parent.bldpath() def get_bld_sig(self): """ Node signature, assuming the file is in the build directory """ try: return self.cache_sig except AttributeError: pass if not self.is_bld() or self.ctx.bldnode is self.ctx.srcnode: self.sig = Utils.h_file(self.abspath()) self.cache_sig = ret = self.sig return ret pickle_lock = Utils.threading.Lock() """Lock mandatory for thread-safe node serialization""" class Nod3(Node): """Mandatory subclass for thread-safe node serialization""" pass # do not remove
{ "content_hash": "6b2659a0711f64b7a5332f3ef34ffd84", "timestamp": "", "source": "github", "line_count": 803, "max_line_length": 166, "avg_line_length": 24.074719800747197, "alnum_prop": 0.6293192633974757, "repo_name": "livioferrante/my-final-project", "id": "33a5833f3cd7e8dd9b4b52c6ab1ae9c656f3c395", "size": "19332", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": ".mywaflib/waflib/Node.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "1057393" }, { "name": "Stata", "bytes": "527255" }, { "name": "TeX", "bytes": "22811" } ], "symlink_target": "" }
""" A test of the Average kernel used for the Averager. """ from firedrake import (IntervalMesh, Function, RectangleMesh, SpatialCoordinate, VectorFunctionSpace, FiniteElement) from gusto.recovery.recovery_kernels import AverageKernel import numpy as np import pytest @pytest.fixture def mesh(geometry): L = 3.0 n = 3 if geometry == "1D": m = IntervalMesh(n, L) elif geometry == "2D": m = RectangleMesh(n, n, L, L, quadrilateral=True) return m def setup_values(geometry, DG0_field, weights): x = SpatialCoordinate(weights.function_space().mesh()) coords_CG1 = Function(weights.function_space()).interpolate(x) coords_DG0 = Function(DG0_field.function_space()).interpolate(x) if geometry == "1D": # Let us focus on the point at x = 1.0 # The test is if at CG_field[CG_index] we get the average of the corresponding DG_field values CG_index = set_val_at_point(coords_CG1, 1.0) set_val_at_point(coords_DG0, 0.5, DG0_field, 6.0) set_val_at_point(coords_DG0, 1.5, DG0_field, -10.0) set_val_at_point(coords_CG1, 1.0, weights, 2.0) true_values = 0.5 * (6.0 - 10.0) elif geometry == "2D": # Let us focus on the point at (1,1) # The test is if at CG_field[CG_index] we get the average of the corresponding DG_field values # We do it for both components of the vector field CG_index = set_val_at_point(coords_CG1, [1.0, 1.0]) set_val_at_point(coords_CG1, [1.0, 1.0], weights, [4.0, 4.0]) set_val_at_point(coords_DG0, [0.5, 0.5], DG0_field, [6.0, -3.0]) set_val_at_point(coords_DG0, [1.5, 0.5], DG0_field, [-7.0, -6.0]) set_val_at_point(coords_DG0, [0.5, 1.5], DG0_field, [0.0, 3.0]) set_val_at_point(coords_DG0, [1.5, 1.5], DG0_field, [-9.0, -1.0]) true_values = [0.25 * (6.0 - 7.0 + 0.0 - 9.0), 0.25 * (-3.0 - 6.0 + 3.0 - 1.0)] return DG0_field, weights, true_values, CG_index def set_val_at_point(coord_field, coords, field=None, new_value=None): """ Finds the DoF of a field at a particular coordinate. If new_value is provided then it also assigns the coefficient for the field there to be new_value. Otherwise the DoF index is returned. """ num_points = len(coord_field.dat.data[:]) point_found = False for i in range(num_points): # Do the coordinates at the ith point match our desired coords? if np.allclose(coord_field.dat.data[i], coords, rtol=1e-14): point_found = True point_index = i if field is not None and new_value is not None: field.dat.data[i] = new_value break if not point_found: raise ValueError('Your coordinates do not appear to match the coordinates of a DoF') if field is None or new_value is None: return point_index @pytest.mark.parametrize("geometry", ["1D", "2D"]) def test_average(geometry, mesh): cell = mesh.ufl_cell().cellname() DG1_elt = FiniteElement("DG", cell, 1, variant="equispaced") vec_DG1 = VectorFunctionSpace(mesh, DG1_elt) vec_DG0 = VectorFunctionSpace(mesh, "DG", 0) vec_CG1 = VectorFunctionSpace(mesh, "CG", 1) # We will fill DG1_field with values, and average them to CG_field # First need to put the values into DG0 and then interpolate DG0_field = Function(vec_DG0) DG1_field = Function(vec_DG1) CG_field = Function(vec_CG1) weights = Function(vec_CG1) DG0_field, weights, true_values, CG_index = setup_values(geometry, DG0_field, weights) DG1_field.interpolate(DG0_field) kernel = AverageKernel(vec_CG1) kernel.apply(CG_field, weights, DG1_field) tolerance = 1e-12 if geometry == "1D": assert abs(CG_field.dat.data[CG_index] - true_values) < tolerance elif geometry == "2D": assert abs(CG_field.dat.data[CG_index][0] - true_values[0]) < tolerance assert abs(CG_field.dat.data[CG_index][1] - true_values[1]) < tolerance
{ "content_hash": "4edb601d3c2ca8c3ff539d13f82c0b3e", "timestamp": "", "source": "github", "line_count": 113, "max_line_length": 102, "avg_line_length": 35.94690265486726, "alnum_prop": 0.6260462826193993, "repo_name": "firedrakeproject/gusto", "id": "13bffa657f136b612754be0bbb979e9befe203b3", "size": "4062", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "unit-tests/recovery_tests/test_average_kernel.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "765" }, { "name": "Python", "bytes": "567084" } ], "symlink_target": "" }
""" Render documents as plaintext. Very scruffy and not very powerful. Should probably be modified to generate markdown syntax. """ from __future__ import absolute_import from pyth import document from pyth.format import PythWriter from six import StringIO class PlaintextWriter(PythWriter): @classmethod def write(klass, document, target=None, encoding="utf-8", newline="\n"): if target is None: target = StringIO() writer = PlaintextWriter(document, target, encoding, newline) return writer.go() def __init__(self, doc, target, encoding, newline): self.document = doc self.target = target self.encoding = encoding self.newline = newline self.indent = -1 self.paragraphDispatch = { document.List: self.list, document.Paragraph: self.paragraph } def go(self): np = len(self.document.content) for (i, paragraph) in enumerate(self.document.content): handler = self.paragraphDispatch[paragraph.__class__] handler(paragraph) if i < np - 1: self.target.write(self.newline) self.target.truncate() self.target.seek(0) return self.target def paragraph(self, paragraph, prefix=""): content = [] for text in paragraph.content: content.append("".join(text.content)) content = "".join(content) for line in content.splitlines(): self.target.write(" " * self.indent) self.target.write(prefix) self.target.write(line) self.target.write(self.newline) if prefix: prefix = " " def list(self, list, prefix=None): self.indent += 1 for (i, entry) in enumerate(list.content): for (j, paragraph) in enumerate(entry.content): prefix = "* " if j == 0 else " " handler = self.paragraphDispatch[paragraph.__class__] handler(paragraph, prefix) self.indent -= 1
{ "content_hash": "d97035f12c6dc38d155b2adafa7503cc", "timestamp": "", "source": "github", "line_count": 76, "max_line_length": 76, "avg_line_length": 27.32894736842105, "alnum_prop": 0.5888300433317285, "repo_name": "prechelt/pyth", "id": "2a0c3495856d69a385aaa90d4d839dc091676b84", "size": "2077", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pyth/plugins/plaintext/writer.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "2446" }, { "name": "Python", "bytes": "79854" } ], "symlink_target": "" }
from django.contrib import admin from events import models class EventAdmin(admin.ModelAdmin): list_display = ('name', 'start_time', 'location') admin.site.register(models.Event, EventAdmin)
{ "content_hash": "d709ba4bb4598af68f07251fd74e8636", "timestamp": "", "source": "github", "line_count": 9, "max_line_length": 53, "avg_line_length": 22.444444444444443, "alnum_prop": 0.7425742574257426, "repo_name": "madisona/django-events", "id": "1b7cbf22c5e67d583aa8ff6d54a464bb1bf177c5", "size": "203", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "events/admin.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "7284" } ], "symlink_target": "" }
from fdp.fairgraph import FAIRGraph def build_base_uri(host, port): if not host.startswith('http'): host = f'http://{host}' if int(port) == 80: base_uri = host else: base_uri = f'{host}:{port}' return base_uri _fairgraph = None def init_fairgraph(host, port, endpoint): base_uri = build_base_uri(host, port) global _fairgraph _fairgraph = FAIRGraph(base_uri, endpoint) def get_fairgraph(): return _fairgraph
{ "content_hash": "4e256ec03ef615fa42af225d4395196f", "timestamp": "", "source": "github", "line_count": 20, "max_line_length": 46, "avg_line_length": 23.4, "alnum_prop": 0.6303418803418803, "repo_name": "NLeSC/ODEX-FAIRDataPoint", "id": "8c68842a3a48725842c751b4b75276583998fe8e", "size": "469", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "fdp/config.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "279" }, { "name": "Java", "bytes": "87648" }, { "name": "Makefile", "bytes": "645" }, { "name": "Python", "bytes": "33000" }, { "name": "R", "bytes": "128" }, { "name": "Shell", "bytes": "113" } ], "symlink_target": "" }
import re from SG_Utilities import * import os import fnmatch ## Returns a list of all reviews each as a separate string def getReviews() : reviews = [] for dirpath, subdirs, files in os.walk('Data') : for filename in fnmatch.filter(files, '*.txt') : filenamePath = os.path.join(dirpath, filename) with open(filenamePath, 'r') as f : fileContent = readFileInString(filenamePath) reviews.append(fileContent) writeListToFile('Output/reviewsList.txt', reviews) return reviews #getReviews() #fileContents = readFileInString('Data/Tweets.txt') # Extract tweets using regex. #tweets = re.findall('\+0000 2014\t(.*)\t.*\t.*\t<a href', fileContents) #writeListToFile('Data/extractedTweets.txt', tweets) # Remove duplicate tweets. #uniqueTweets = list(set(tweets)) #writeListToFile('Data/uniqueTweets.txt', uniqueTweets)
{ "content_hash": "263191f3c956b6c7e2aab1d4d02baea5", "timestamp": "", "source": "github", "line_count": 28, "max_line_length": 72, "avg_line_length": 29.964285714285715, "alnum_prop": 0.733015494636472, "repo_name": "shubha1593/MovieReviewAnalysis", "id": "9b62e7885d9fd05594d1b9ebf91ca36795299fc7", "size": "864", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "SG_ExtractReviews.py", "mode": "33188", "license": "mit", "language": [ { "name": "Groff", "bytes": "2511" }, { "name": "Matlab", "bytes": "4999" }, { "name": "Python", "bytes": "6328" } ], "symlink_target": "" }
"""Scraper for Wyoming Supreme Court CourtID: wyo Court Short Name: Wyo. History: - 2014-07-02: mlr: Created new version when court got new website. - 2015-07-06: m4h7: Updated to use JSON! - 2016-06-09: arderyp: Updated because json endpoint moved and was changed """ import re from datetime import date, datetime from juriscraper.OpinionSite import OpinionSite class Site(OpinionSite): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.court_id = self.__module__ self.base_url = "http://www.courts.state.wy.us" self.download_base = "https://documents.courts.state.wy.us/Opinions" self.url = ( "https://opinions.courts.state.wy.us/Home/GetOpinions?StartDate=1%2F1%2F" + str(date.today().year) ) def _get_case_names(self): return [ f"{opinion['Appellant']} v. {opinion['Appellee']}" for opinion in self.html ] def _get_download_urls(self): download_urls = [] for record in self.html: pdf_file_name = record["DocumentName"] if pdf_file_name[:5] == "../..": pdf_file_name = pdf_file_name[5:] url = f"{self.download_base}/{pdf_file_name}".replace(" ", "%20") download_urls.append(url) return download_urls def _get_case_dates(self): case_dates = [] date_re = re.compile(r"^/Date\((\d+)\)/$") for record in self.html: match = date_re.match(record["date_heard"]) if match: timestamp = int(match.group(1)) / 1000 case_dates.append(datetime.fromtimestamp(timestamp).date()) return case_dates def _get_docket_numbers(self): return [opinion["DocketNumber"] for opinion in self.html] def _get_citations(self): return [opinion["OpinionID"] for opinion in self.html] def _get_precedential_statuses(self): return ["Published"] * len(self.case_names)
{ "content_hash": "ac75e001c644ef59b392d8f538624b8b", "timestamp": "", "source": "github", "line_count": 60, "max_line_length": 85, "avg_line_length": 33.6, "alnum_prop": 0.5912698412698413, "repo_name": "freelawproject/juriscraper", "id": "726f81002a051bee8631d105b2ba9d14b9c2f30a", "size": "2016", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "juriscraper/opinions/united_states/state/wyo.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "HTML", "bytes": "63242956" }, { "name": "Jinja", "bytes": "2201" }, { "name": "Makefile", "bytes": "75" }, { "name": "Python", "bytes": "1059228" } ], "symlink_target": "" }
from distutils.core import setup from distutils.extension import Extension import commands def pkgconfig(*packages, **kw): flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'} for token in commands.getoutput("pkg-config --libs --cflags %s" % ' '.join(packages)).split(): kw.setdefault(flag_map.get(token[:2]), []).append(token[2:]) return kw setup( name = "myPackage", ext_modules=[ Extension("extension", ["extension_main.c"], **pkgconfig('glib-2.0')), ], )
{ "content_hash": "a1f8008b7c9bb1029c9d2afe59dd3358", "timestamp": "", "source": "github", "line_count": 16, "max_line_length": 98, "avg_line_length": 32.6875, "alnum_prop": 0.6290630975143403, "repo_name": "ActiveState/code", "id": "dd5cb9069384b462dbbe1faa13d64e29ca5c1ab9", "size": "545", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "recipes/Python/502261_Python_distutils__pkgconfig/recipe-502261.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "35894" }, { "name": "C", "bytes": "56048" }, { "name": "C++", "bytes": "90880" }, { "name": "HTML", "bytes": "11656" }, { "name": "Java", "bytes": "57468" }, { "name": "JavaScript", "bytes": "181218" }, { "name": "PHP", "bytes": "250144" }, { "name": "Perl", "bytes": "37296" }, { "name": "Perl 6", "bytes": "9914" }, { "name": "Python", "bytes": "17387779" }, { "name": "Ruby", "bytes": "40233" }, { "name": "Shell", "bytes": "190732" }, { "name": "Tcl", "bytes": "674650" } ], "symlink_target": "" }
""" Provide math calls that uses intrinsics or libc math functions. """ from __future__ import print_function, absolute_import, division import math import operator import sys import numpy as np import llvmlite.llvmpy.core as lc from llvmlite.llvmpy.core import Type from numba.targets.imputils import Registry, impl_ret_untracked from numba import types, cgutils, utils, config from numba.typing import signature registry = Registry() lower = registry.lower # Helpers, shared with cmathimpl. _NP_FLT_FINFO = np.finfo(np.dtype('float32')) FLT_MAX = _NP_FLT_FINFO.max FLT_MIN = _NP_FLT_FINFO.tiny _NP_DBL_FINFO = np.finfo(np.dtype('float64')) DBL_MAX = _NP_DBL_FINFO.max DBL_MIN = _NP_DBL_FINFO.tiny FLOAT_ABS_MASK = 0x7fffffff FLOAT_SIGN_MASK = 0x80000000 DOUBLE_ABS_MASK = 0x7fffffffffffffff DOUBLE_SIGN_MASK = 0x8000000000000000 def is_nan(builder, val): """ Return a condition testing whether *val* is a NaN. """ return builder.fcmp_unordered('uno', val, val) def is_inf(builder, val): """ Return a condition testing whether *val* is an infinite. """ pos_inf = lc.Constant.real(val.type, float("+inf")) neg_inf = lc.Constant.real(val.type, float("-inf")) isposinf = builder.fcmp(lc.FCMP_OEQ, val, pos_inf) isneginf = builder.fcmp(lc.FCMP_OEQ, val, neg_inf) return builder.or_(isposinf, isneginf) def is_finite(builder, val): """ Return a condition testing whether *val* is a finite. """ # is_finite(x) <=> x - x != NaN val_minus_val = builder.fsub(val, val) return builder.fcmp_ordered('ord', val_minus_val, val_minus_val) def f64_as_int64(builder, val): """ Bitcast a double into a 64-bit integer. """ assert val.type == Type.double() return builder.bitcast(val, Type.int(64)) def int64_as_f64(builder, val): """ Bitcast a 64-bit integer into a double. """ assert val.type == Type.int(64) return builder.bitcast(val, Type.double()) def f32_as_int32(builder, val): """ Bitcast a float into a 32-bit integer. """ assert val.type == Type.float() return builder.bitcast(val, Type.int(32)) def int32_as_f32(builder, val): """ Bitcast a 32-bit integer into a float. """ assert val.type == Type.int(32) return builder.bitcast(val, Type.float()) def negate_real(builder, val): """ Negate real number *val*, with proper handling of zeros. """ # The negative zero forces LLVM to handle signed zeros properly. return builder.fsub(lc.Constant.real(val.type, -0.0), val) def call_fp_intrinsic(builder, name, args): """ Call a LLVM intrinsic floating-point operation. """ mod = builder.module intr = lc.Function.intrinsic(mod, name, [a.type for a in args]) return builder.call(intr, args) def _unary_int_input_wrapper_impl(wrapped_impl): """ Return an implementation factory to convert the single integral input argument to a float64, then defer to the *wrapped_impl*. """ def implementer(context, builder, sig, args): val, = args input_type = sig.args[0] fpval = context.cast(builder, val, input_type, types.float64) inner_sig = signature(types.float64, types.float64) res = wrapped_impl(context, builder, inner_sig, (fpval,)) return context.cast(builder, res, types.float64, sig.return_type) return implementer def unary_math_int_impl(fn, float_impl): impl = _unary_int_input_wrapper_impl(float_impl) lower(fn, types.Integer)(impl) def unary_math_intr(fn, intrcode): """ Implement the math function *fn* using the LLVM intrinsic *intrcode*. """ @lower(fn, types.Float) def float_impl(context, builder, sig, args): res = call_fp_intrinsic(builder, intrcode, args) return impl_ret_untracked(context, builder, sig.return_type, res) unary_math_int_impl(fn, float_impl) return float_impl def unary_math_extern(fn, f32extern, f64extern, int_restype=False): """ Register implementations of Python function *fn* using the external function named *f32extern* and *f64extern* (for float32 and float64 inputs, respectively). If *int_restype* is true, then the function's return value should be integral, otherwise floating-point. """ f_restype = types.int64 if int_restype else None def float_impl(context, builder, sig, args): """ Implement *fn* for a types.Float input. """ [val] = args mod = builder.module input_type = sig.args[0] lty = context.get_value_type(input_type) func_name = { types.float32: f32extern, types.float64: f64extern, }[input_type] fnty = Type.function(lty, [lty]) fn = cgutils.insert_pure_function(builder.module, fnty, name=func_name) res = builder.call(fn, (val,)) res = context.cast(builder, res, input_type, sig.return_type) return impl_ret_untracked(context, builder, sig.return_type, res) lower(fn, types.Float)(float_impl) # Implement wrapper for integer inputs unary_math_int_impl(fn, float_impl) return float_impl unary_math_intr(math.fabs, lc.INTR_FABS) #unary_math_intr(math.sqrt, lc.INTR_SQRT) exp_impl = unary_math_intr(math.exp, lc.INTR_EXP) log_impl = unary_math_intr(math.log, lc.INTR_LOG) log10_impl = unary_math_intr(math.log10, lc.INTR_LOG10) sin_impl = unary_math_intr(math.sin, lc.INTR_SIN) cos_impl = unary_math_intr(math.cos, lc.INTR_COS) #unary_math_intr(math.floor, lc.INTR_FLOOR) #unary_math_intr(math.ceil, lc.INTR_CEIL) #unary_math_intr(math.trunc, lc.INTR_TRUNC) log1p_impl = unary_math_extern(math.log1p, "log1pf", "log1p") expm1_impl = unary_math_extern(math.expm1, "expm1f", "expm1") erf_impl = unary_math_extern(math.erf, "erff", "erf") erfc_impl = unary_math_extern(math.erfc, "erfcf", "erfc") tan_impl = unary_math_extern(math.tan, "tanf", "tan") asin_impl = unary_math_extern(math.asin, "asinf", "asin") acos_impl = unary_math_extern(math.acos, "acosf", "acos") atan_impl = unary_math_extern(math.atan, "atanf", "atan") asinh_impl = unary_math_extern(math.asinh, "asinhf", "asinh") acosh_impl = unary_math_extern(math.acosh, "acoshf", "acosh") atanh_impl = unary_math_extern(math.atanh, "atanhf", "atanh") sinh_impl = unary_math_extern(math.sinh, "sinhf", "sinh") cosh_impl = unary_math_extern(math.cosh, "coshf", "cosh") tanh_impl = unary_math_extern(math.tanh, "tanhf", "tanh") # math.floor and math.ceil return float on 2.x, int on 3.x if utils.PYVERSION > (3, 0): log2_impl = unary_math_extern(math.log2, "log2f", "log2") ceil_impl = unary_math_extern(math.ceil, "ceilf", "ceil", True) floor_impl = unary_math_extern(math.floor, "floorf", "floor", True) else: ceil_impl = unary_math_extern(math.ceil, "ceilf", "ceil") floor_impl = unary_math_extern(math.floor, "floorf", "floor") gamma_impl = unary_math_extern(math.gamma, "numba_gammaf", "numba_gamma") # work-around sqrt_impl = unary_math_extern(math.sqrt, "sqrtf", "sqrt") trunc_impl = unary_math_extern(math.trunc, "truncf", "trunc", True) lgamma_impl = unary_math_extern(math.lgamma, "lgammaf", "lgamma") @lower(math.isnan, types.Float) def isnan_float_impl(context, builder, sig, args): [val] = args res = is_nan(builder, val) return impl_ret_untracked(context, builder, sig.return_type, res) @lower(math.isnan, types.Integer) def isnan_int_impl(context, builder, sig, args): res = cgutils.false_bit return impl_ret_untracked(context, builder, sig.return_type, res) @lower(math.isinf, types.Float) def isinf_float_impl(context, builder, sig, args): [val] = args res = is_inf(builder, val) return impl_ret_untracked(context, builder, sig.return_type, res) @lower(math.isinf, types.Integer) def isinf_int_impl(context, builder, sig, args): res = cgutils.false_bit return impl_ret_untracked(context, builder, sig.return_type, res) if utils.PYVERSION >= (3, 2): @lower(math.isfinite, types.Float) def isfinite_float_impl(context, builder, sig, args): [val] = args res = is_finite(builder, val) return impl_ret_untracked(context, builder, sig.return_type, res) @lower(math.isfinite, types.Integer) def isfinite_int_impl(context, builder, sig, args): res = cgutils.true_bit return impl_ret_untracked(context, builder, sig.return_type, res) @lower(math.copysign, types.Float, types.Float) def copysign_float_impl(context, builder, sig, args): lty = args[0].type mod = builder.module fn = mod.get_or_insert_function(lc.Type.function(lty, (lty, lty)), 'llvm.copysign.%s' % lty.intrinsic_name) res = builder.call(fn, args) return impl_ret_untracked(context, builder, sig.return_type, res) # ----------------------------------------------------------------------------- @lower(math.frexp, types.Float) def frexp_impl(context, builder, sig, args): val, = args fltty = context.get_data_type(sig.args[0]) intty = context.get_data_type(sig.return_type[1]) expptr = cgutils.alloca_once(builder, intty, name='exp') fnty = Type.function(fltty, (fltty, Type.pointer(intty))) fname = { "float": "numba_frexpf", "double": "numba_frexp", }[str(fltty)] fn = builder.module.get_or_insert_function(fnty, name=fname) res = builder.call(fn, (val, expptr)) res = cgutils.make_anonymous_struct(builder, (res, builder.load(expptr))) return impl_ret_untracked(context, builder, sig.return_type, res) @lower(math.ldexp, types.Float, types.intc) def ldexp_impl(context, builder, sig, args): val, exp = args fltty, intty = map(context.get_data_type, sig.args) fnty = Type.function(fltty, (fltty, intty)) fname = { "float": "numba_ldexpf", "double": "numba_ldexp", }[str(fltty)] fn = cgutils.insert_pure_function(builder.module, fnty, name=fname) res = builder.call(fn, (val, exp)) return impl_ret_untracked(context, builder, sig.return_type, res) # ----------------------------------------------------------------------------- @lower(math.atan2, types.int64, types.int64) def atan2_s64_impl(context, builder, sig, args): [y, x] = args y = builder.sitofp(y, Type.double()) x = builder.sitofp(x, Type.double()) fsig = signature(types.float64, types.float64, types.float64) return atan2_float_impl(context, builder, fsig, (y, x)) @lower(math.atan2, types.uint64, types.uint64) def atan2_u64_impl(context, builder, sig, args): [y, x] = args y = builder.uitofp(y, Type.double()) x = builder.uitofp(x, Type.double()) fsig = signature(types.float64, types.float64, types.float64) return atan2_float_impl(context, builder, fsig, (y, x)) @lower(math.atan2, types.Float, types.Float) def atan2_float_impl(context, builder, sig, args): assert len(args) == 2 mod = builder.module ty = sig.args[0] lty = context.get_value_type(ty) func_name = { types.float32: "atan2f", # Workaround atan2() issues under Windows types.float64: "atan2_fixed" if sys.platform == "win32" else "atan2" }[ty] fnty = Type.function(lty, (lty, lty)) fn = cgutils.insert_pure_function(builder.module, fnty, name=func_name) res = builder.call(fn, args) return impl_ret_untracked(context, builder, sig.return_type, res) # ----------------------------------------------------------------------------- @lower(math.hypot, types.int64, types.int64) def hypot_s64_impl(context, builder, sig, args): [x, y] = args y = builder.sitofp(y, Type.double()) x = builder.sitofp(x, Type.double()) fsig = signature(types.float64, types.float64, types.float64) res = hypot_float_impl(context, builder, fsig, (x, y)) return impl_ret_untracked(context, builder, sig.return_type, res) @lower(math.hypot, types.uint64, types.uint64) def hypot_u64_impl(context, builder, sig, args): [x, y] = args y = builder.sitofp(y, Type.double()) x = builder.sitofp(x, Type.double()) fsig = signature(types.float64, types.float64, types.float64) res = hypot_float_impl(context, builder, fsig, (x, y)) return impl_ret_untracked(context, builder, sig.return_type, res) @lower(math.hypot, types.Float, types.Float) def hypot_float_impl(context, builder, sig, args): xty, yty = sig.args assert xty == yty == sig.return_type x, y = args # Windows has alternate names for hypot/hypotf, see # https://msdn.microsoft.com/fr-fr/library/a9yb3dbt%28v=vs.80%29.aspx fname = { types.float32: "_hypotf" if sys.platform == 'win32' else "hypotf", types.float64: "_hypot" if sys.platform == 'win32' else "hypot", }[xty] plat_hypot = types.ExternalFunction(fname, sig) if sys.platform == 'win32' and config.MACHINE_BITS == 32: inf = xty(float('inf')) def hypot_impl(x, y): if math.isinf(x) or math.isinf(y): return inf return plat_hypot(x, y) else: def hypot_impl(x, y): return plat_hypot(x, y) res = context.compile_internal(builder, hypot_impl, sig, args) return impl_ret_untracked(context, builder, sig.return_type, res) # ----------------------------------------------------------------------------- @lower(math.radians, types.Float) def radians_float_impl(context, builder, sig, args): [x] = args coef = context.get_constant(sig.return_type, math.pi / 180) res = builder.fmul(x, coef) return impl_ret_untracked(context, builder, sig.return_type, res) unary_math_int_impl(math.radians, radians_float_impl) # ----------------------------------------------------------------------------- @lower(math.degrees, types.Float) def degrees_float_impl(context, builder, sig, args): [x] = args coef = context.get_constant(sig.return_type, 180 / math.pi) res = builder.fmul(x, coef) return impl_ret_untracked(context, builder, sig.return_type, res) unary_math_int_impl(math.degrees, degrees_float_impl) # ----------------------------------------------------------------------------- @lower(math.pow, types.Float, types.Float) @lower(math.pow, types.Float, types.Integer) def pow_impl(context, builder, sig, args): impl = context.get_function(operator.pow, sig) return impl(builder, args)
{ "content_hash": "06ea404c2f0d80bba8d6fc5f14095338", "timestamp": "", "source": "github", "line_count": 411, "max_line_length": 87, "avg_line_length": 34.968369829683695, "alnum_prop": 0.6433342610631784, "repo_name": "jriehl/numba", "id": "8fba75332b44b56da579c0b0c64269c5cc9cd00c", "size": "14372", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "numba/targets/mathimpl.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Batchfile", "bytes": "7023" }, { "name": "C", "bytes": "657637" }, { "name": "C++", "bytes": "49158" }, { "name": "Cuda", "bytes": "214" }, { "name": "GDB", "bytes": "101" }, { "name": "HTML", "bytes": "3464" }, { "name": "Jupyter Notebook", "bytes": "110326" }, { "name": "Python", "bytes": "6611899" }, { "name": "Shell", "bytes": "7290" } ], "symlink_target": "" }
import click GOOD_COLOR = 'green' WARN_COLOR = 'yellow' BAD_COLOR = 'red' IGNORED_COLOR = 'blue' def set_output_color(color): # Print nothing except the ANSI escape sequence click.secho('', nl=False, fg=color, reset=False) def clear_output_color(): # Print nothing except the reset escape sequence click.secho('', nl=False, reset=True) def output(message, color=None, items=[]): if color: set_output_color(color) click.echo(message) for item in sorted(items): click.echo(" {}".format(item)) clear_output_color() def info(message, items=[]): output(message, None, items) def success(message, items=[]): output("SUCCESS: {}".format(message), GOOD_COLOR, items) def warning(message, items=[]): output("WARNING: {}".format(message), WARN_COLOR, items) def error(message, items=[]): output("ERROR: {}".format(message), BAD_COLOR, items) def print_filelists(new_files, changed_files, missing_files, ignored_files): if not any([new_files, changed_files, missing_files, ignored_files]): output("Index is up-to-date (no changes)", GOOD_COLOR) return if new_files: output("New files (not in index):", WARN_COLOR, new_files) # Print a blank space between sections if missing_files or changed_files or ignored_files: click.echo() if missing_files: output("Missing files:", WARN_COLOR, missing_files) # Print a blank space between sections if changed_files or ignored_files: click.echo() if changed_files: output("Changed files (hash differs from index):", BAD_COLOR, changed_files) # Print a blank space between sections if ignored_files: click.echo() if ignored_files: output("Ignored files and directories:", IGNORED_COLOR, ignored_files)
{ "content_hash": "e1450d523a31fe4003d3fd818c52030d", "timestamp": "", "source": "github", "line_count": 69, "max_line_length": 84, "avg_line_length": 27.07246376811594, "alnum_prop": 0.6445396145610278, "repo_name": "mcgid/morenines", "id": "61cf11aba3784722fa4b2a0266c4d508b5da92e2", "size": "1868", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "morenines/output.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "22607" }, { "name": "Shell", "bytes": "301" } ], "symlink_target": "" }
import django try: from django.urls import path url = None except ImportError: path = None from django.conf.urls import url from django.http import HttpResponse def emptypage(request): # Minimal page needed for some tests. # We include a favicon link to stop browsers making a request to # /favicon.ico which slows things down (potentially a lot) and can cause # other issues. return HttpResponse( """<html> <head> <link href="data:image/gif;base64,R0lGODlhEAAQAIABAACE/////yH5BAEKAAEALA""" """AAAAAQABAAAAIghI9pwe2+nmRxvmobzmFnb4GTKJEXwEFoSq2sqSqyUQAAOw==" rel="shortcut icon"> </head> <body></body> </html>""" ) if path is None: urlpatterns = [ url(r"^__emptypage/$", emptypage, name="django_functest.emptypage"), ] else: urlpatterns = [ path(r"__emptypage/", emptypage, name="django_functest.emptypage"), ] if django.VERSION < (1, 9): from django.conf.urls import patterns urlpatterns = patterns("", *urlpatterns)
{ "content_hash": "db58e10146eefe5c1e96d956036f2ad4", "timestamp": "", "source": "github", "line_count": 44, "max_line_length": 95, "avg_line_length": 23.727272727272727, "alnum_prop": 0.6580459770114943, "repo_name": "django-functest/django-functest", "id": "54c554d871721b9390512c382bf3daf425f0cef3", "size": "1044", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/django_functest/urls.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "HTML", "bytes": "7962" }, { "name": "Python", "bytes": "115731" }, { "name": "Shell", "bytes": "585" } ], "symlink_target": "" }