Search is not available for this dataset
text
stringlengths
75
104k
def register(self, model): """Register a model in self.""" self.models[model._meta.table_name] = model model._meta.database = self.database return model
async def manage(self): """Manage a database connection.""" cm = _ContextManager(self.database) if isinstance(self.database.obj, AIODatabase): cm.connection = await self.database.async_connect() else: cm.connection = self.database.connect() return cm
def migrate(migrator, database, **kwargs): """ Write your migrations here. > Model = migrator.orm['name'] > migrator.sql(sql) > migrator.create_table(Model) > migrator.drop_table(Model, cascade=True) > migrator.add_columns(Model, **fields) > migrator.change_columns(Model, **fields) > migrator.drop_columns(Model, *field_names, cascade=True) > migrator.rename_column(Model, old_field_name, new_field_name) > migrator.rename_table(Model, new_table_name) > migrator.add_index(Model, *col_names, unique=False) > migrator.drop_index(Model, index_name) > migrator.add_not_null(Model, field_name) > migrator.drop_not_null(Model, field_name) > migrator.add_default(Model, field_name, default) """ @migrator.create_table class DataItem(pw.Model): created = pw.DateTimeField(default=dt.datetime.utcnow) content = pw.CharField()
def chain(*args): """Runs a series of parsers in sequence passing the result of each parser to the next. The result of the last parser is returned. """ def chain_block(*args, **kwargs): v = args[0](*args, **kwargs) for p in args[1:]: v = p(v) return v return chain_block
def one_of(these): """Returns the current token if is found in the collection provided. Fails otherwise. """ ch = peek() try: if (ch is EndOfFile) or (ch not in these): fail(list(these)) except TypeError: if ch != these: fail([these]) next() return ch
def not_one_of(these): """Returns the current token if it is not found in the collection provided. The negative of one_of. """ ch = peek() desc = "not_one_of" + repr(these) try: if (ch is EndOfFile) or (ch in these): fail([desc]) except TypeError: if ch != these: fail([desc]) next() return ch
def satisfies(guard): """Returns the current token if it satisfies the guard function provided. Fails otherwise. This is the a generalisation of one_of. """ i = peek() if (i is EndOfFile) or (not guard(i)): fail(["<satisfies predicate " + _fun_to_str(guard) + ">"]) next() return i
def not_followed_by(parser): """Succeeds if the given parser cannot consume input""" @tri def not_followed_by_block(): failed = object() result = optional(tri(parser), failed) if result != failed: fail(["not " + _fun_to_str(parser)]) choice(not_followed_by_block)
def many(parser): """Applies the parser to input zero or more times. Returns a list of parser results. """ results = [] terminate = object() while local_ps.value: result = optional(parser, terminate) if result == terminate: break results.append(result) return results
def many_until(these, term): """Consumes as many of these as it can until it term is encountered. Returns a tuple of the list of these results and the term result """ results = [] while True: stop, result = choice(_tag(True, term), _tag(False, these)) if stop: return results, result else: results.append(result)
def many_until1(these, term): """Like many_until but must consume at least one of these. """ first = [these()] these_results, term_result = many_until(these, term) return (first + these_results, term_result)
def sep1(parser, separator): """Like sep but must consume at least one of parser. """ first = [parser()] def inner(): separator() return parser() return first + many(tri(inner))
def string(string): """Iterates over string, matching input to the items provided. The most obvious usage of this is to accept an entire string of characters, However this is function is more general than that. It takes an iterable and for each item, it tries one_of for that set. For example, string(['aA','bB','cC']) will accept 'abc' in either case. note, If you wish to match caseless strings as in the example, use picoparse.text.caseless_string. """ found = [] for c in string: found.append(one_of(c)) return found
def seq(*sequence): """Runs a series of parsers in sequence optionally storing results in a returned dictionary. For example: seq(whitespace, ('phone', digits), whitespace, ('name', remaining)) """ results = {} for p in sequence: if callable(p): p() continue k, v = p results[k] = v() return results
def _fill(self, size): """fills the internal buffer from the source iterator""" try: for i in range(size): self.buffer.append(self.source.next()) except StopIteration: self.buffer.append((EndOfFile, EndOfFile)) self.len = len(self.buffer)
def next(self): """Advances to and returns the next token or returns EndOfFile""" self.index += 1 t = self.peek() if not self.depth: self._cut() return t
def current(self): """Returns the current (token, position) or (EndOfFile, EndOfFile)""" if self.index >= self.len: self._fill((self.index - self.len) + 1) return self.index < self.len and self.buffer[self.index] or (EndOfFile, EndOfFile)
def main(world_cls, referee_cls, gui_cls, gui_actor_cls, ai_actor_cls, theater_cls=PygletTheater, default_host=DEFAULT_HOST, default_port=DEFAULT_PORT, argv=None): """ Run a game being developed with the kxg game engine. Usage: {exe_name} sandbox [<num_ais>] [-v...] {exe_name} client [--host HOST] [--port PORT] [-v...] {exe_name} server <num_guis> [<num_ais>] [--host HOST] [--port PORT] [-v...] {exe_name} debug <num_guis> [<num_ais>] [--host HOST] [--port PORT] [-v...] {exe_name} --help Commands: sandbox Play a single-player game with the specified number of AIs. None of the multiplayer machinery will be used. client Launch a client that will try to connect to a server on the given host and port. Once it connects and the game starts, the client will allow you to play the game against any other connected clients. server Launch a server that will manage a game between the given number of human and AI players. The human players must connect using this command's client mode. debug Debug a multiplayer game locally. This command launches a server and the given number of clients all in different processes, and configures the logging system such that the output from each process can be easily distinguished. Arguments: <num_guis> The number of human players that will be playing the game. Only needed by commands that will launch some sort of multiplayer server. <num_ais> The number of AI players that will be playing the game. Only needed by commands that will launch single-player games or multiplayer servers. Options: -x --host HOST [default: {default_host}] The address of the machine running the server. Must be accessible from the machines running the clients. -p --port PORT [default: {default_port}] The port that the server should listen on. Don't specify a value less than 1024 unless the server is running with root permissions. -v --verbose Have the game engine log more information about what it's doing. You can specify this option several times to get more and more information. This command is provided so that you can start writing your game with the least possible amount of boilerplate code. However, the clients and servers provided by this command are not capable of running a production game. Once you have written your game and want to give it a polished set of menus and options, you'll have to write new Stage subclasses encapsulating that logic and you'll have to call those stages yourself by interacting more directly with the Theater class. The online documentation has more information on this process. """ import sys, os, docopt, nonstdlib exe_name = os.path.basename(sys.argv[0]) usage = main.__doc__.format(**locals()).strip() args = docopt.docopt(usage, argv or sys.argv[1:]) num_guis = int(args['<num_guis>'] or 1) num_ais = int(args['<num_ais>'] or 0) host, port = args['--host'], int(args['--port']) logging.basicConfig( format='%(levelname)s: %(name)s: %(message)s', level=nonstdlib.verbosity(args['--verbose']), ) # Use the given game objects and command line arguments to play a game! if args['debug']: print("""\ ****************************** KNOWN BUG WARNING ****************************** In debug mode, every message produced by the logging system gets printed twice. I know vaguely why this is happening, but as of yet I've not been able to fix it. In the mean time, don't let this confuse you! *******************************************************************************""") game = MultiplayerDebugger( world_cls, referee_cls, gui_cls, gui_actor_cls, num_guis, ai_actor_cls, num_ais, theater_cls, host, port) else: game = theater_cls() ai_actors = [ai_actor_cls() for i in range(num_ais)] if args['sandbox']: game.gui = gui_cls() game.initial_stage = UniplayerGameStage( world_cls(), referee_cls(), gui_actor_cls(), ai_actors) game.initial_stage.successor = PostgameSplashStage() if args['client']: game.gui = gui_cls() game.initial_stage = ClientConnectionStage( world_cls(), gui_actor_cls(), host, port) if args['server']: game.initial_stage = ServerConnectionStage( world_cls(), referee_cls(), num_guis, ai_actors, host, port) game.play()
def _run_supervisor(self): """ Poll the queues that the worker can use to communicate with the supervisor, until all the workers are done and all the queues are empty. Handle messages as they appear. """ import time still_supervising = lambda: ( multiprocessing.active_children() or not self.log_queue.empty() or not self.exception_queue.empty()) try: while still_supervising(): # When a log message is received, make a logger with the same # name in this process and use it to re-log the message. It # will get handled in this process. try: record = self.log_queue.get_nowait() logger = logging.getLogger(record.name) logger.handle(record) except queue.Empty: pass # When an exception is received, immediately re-raise it. try: exception = self.exception_queue.get_nowait() except queue.Empty: pass else: raise exception # Sleep for a little bit, and make sure that the workers haven't # outlived their time limit. time.sleep(1/self.frame_rate) self.elapsed_time += 1/self.frame_rate if self.time_limit and self.elapsed_time > self.time_limit: raise RuntimeError("timeout") # Make sure the workers don't outlive the supervisor, no matter how the # polling loop ended (e.g. normal execution or an exception). finally: for process in multiprocessing.active_children(): process.terminate()
def field_type(self): """Return database field type.""" if not self.model: return 'JSON' database = self.model._meta.database if isinstance(database, Proxy): database = database.obj if Json and isinstance(database, PostgresqlDatabase): return 'JSON' return 'TEXT'
def python_value(self, value): """Parse value from database.""" if self.field_type == 'TEXT' and isinstance(value, str): return self.loads(value) return value
def get_fsapi_endpoint(self): """Parse the fsapi endpoint from the device url.""" endpoint = yield from self.__session.get(self.fsapi_device_url, timeout = self.timeout) text = yield from endpoint.text(encoding='utf-8') doc = objectify.fromstring(text) return doc.webfsapi.text
def create_session(self): """Create a session on the frontier silicon device.""" req_url = '%s/%s' % (self.__webfsapi, 'CREATE_SESSION') sid = yield from self.__session.get(req_url, params=dict(pin=self.pin), timeout = self.timeout) text = yield from sid.text(encoding='utf-8') doc = objectify.fromstring(text) return doc.sessionId.text
def call(self, path, extra=None): """Execute a frontier silicon API call.""" try: if not self.__webfsapi: self.__webfsapi = yield from self.get_fsapi_endpoint() if not self.sid: self.sid = yield from self.create_session() if not isinstance(extra, dict): extra = dict() params = dict(pin=self.pin, sid=self.sid) params.update(**extra) req_url = ('%s/%s' % (self.__webfsapi, path)) result = yield from self.__session.get(req_url, params=params, timeout = self.timeout) if result.status == 200: text = yield from result.text(encoding='utf-8') else: self.sid = yield from self.create_session() params = dict(pin=self.pin, sid=self.sid) params.update(**extra) result = yield from self.__session.get(req_url, params=params, timeout = self.timeout) text = yield from result.text(encoding='utf-8') return objectify.fromstring(text) except Exception as e: logging.info('AFSAPI Exception: ' +traceback.format_exc()) return None
def handle_set(self, item, value): """Helper method for setting a value by using the fsapi API.""" doc = yield from self.call('SET/{}'.format(item), dict(value=value)) if doc is None: return None return doc.status == 'FS_OK'
def handle_text(self, item): """Helper method for fetching a text value.""" doc = yield from self.handle_get(item) if doc is None: return None return doc.value.c8_array.text or None
def handle_int(self, item): """Helper method for fetching a integer value.""" doc = yield from self.handle_get(item) if doc is None: return None return int(doc.value.u8.text) or None
def handle_long(self, item): """Helper method for fetching a long value. Result is integer.""" doc = yield from self.handle_get(item) if doc is None: return None return int(doc.value.u32.text) or None
def handle_list(self, item): """Helper method for fetching a list(map) value.""" doc = yield from self.call('LIST_GET_NEXT/'+item+'/-1', dict( maxItems=100, )) if doc is None: return [] if not doc.status == 'FS_OK': return [] ret = list() for index, item in enumerate(list(doc.iterchildren('item'))): temp = dict(band=index) for field in list(item.iterchildren()): temp[field.get('name')] = list(field.iterchildren()).pop() ret.append(temp) return ret
def get_power(self): """Check if the device is on.""" power = (yield from self.handle_int(self.API.get('power'))) return bool(power)
def set_power(self, value=False): """Power on or off the device.""" power = (yield from self.handle_set( self.API.get('power'), int(value))) return bool(power)
def get_modes(self): """Get the modes supported by this device.""" if not self.__modes: self.__modes = yield from self.handle_list( self.API.get('valid_modes')) return self.__modes
def get_mode_list(self): """Get the label list of the supported modes.""" self.__modes = yield from self.get_modes() return (yield from self.collect_labels(self.__modes))
def get_mode(self): """Get the currently active mode on the device (DAB, FM, Spotify).""" mode = None int_mode = (yield from self.handle_long(self.API.get('mode'))) modes = yield from self.get_modes() for temp_mode in modes: if temp_mode['band'] == int_mode: mode = temp_mode['label'] return str(mode)
def set_mode(self, value): """Set the currently active mode on the device (DAB, FM, Spotify).""" mode = -1 modes = yield from self.get_modes() for temp_mode in modes: if temp_mode['label'] == value: mode = temp_mode['band'] return (yield from self.handle_set(self.API.get('mode'), mode))
def get_volume_steps(self): """Read the maximum volume level of the device.""" if not self.__volume_steps: self.__volume_steps = yield from self.handle_int( self.API.get('volume_steps')) return self.__volume_steps
def get_mute(self): """Check if the device is muted.""" mute = (yield from self.handle_int(self.API.get('mute'))) return bool(mute)
def set_mute(self, value=False): """Mute or unmute the device.""" mute = (yield from self.handle_set(self.API.get('mute'), int(value))) return bool(mute)
def get_play_status(self): """Get the play status of the device.""" status = yield from self.handle_int(self.API.get('status')) return self.PLAY_STATES.get(status)
def get_equalisers(self): """Get the equaliser modes supported by this device.""" if not self.__equalisers: self.__equalisers = yield from self.handle_list( self.API.get('equalisers')) return self.__equalisers
def get_equaliser_list(self): """Get the label list of the supported modes.""" self.__equalisers = yield from self.get_equalisers() return (yield from self.collect_labels(self.__equalisers))
def set_sleep(self, value=False): """Set device sleep timer.""" return (yield from self.handle_set(self.API.get('sleep'), int(value)))
def _set_range(self, start, stop, value, value_len): """ Assumes that start and stop are already in 'buffer' coordinates. value is a byte iterable. value_len is fractional. """ assert stop >= start and value_len >= 0 range_len = stop - start if range_len < value_len: self._insert_zeros(stop, stop + value_len - range_len) self._copy_to_range(start, value, value_len) elif range_len > value_len: self._del_range(stop - (range_len - value_len), stop) self._copy_to_range(start, value, value_len) else: self._copy_to_range(start, value, value_len)
def _parse_genotype(self, vcf_fields): """Parse genotype from VCF line data""" format_col = vcf_fields[8].split(':') genome_data = vcf_fields[9].split(':') try: gt_idx = format_col.index('GT') except ValueError: return [] return [int(x) for x in re.split(r'[\|/]', genome_data[gt_idx]) if x != '.']
def setDefaultIREncoding(encoding): ''' setDefaultIREncoding - Sets the default encoding used by IndexedRedis. This will be the default encoding used for field data. You can override this on a per-field basis by using an IRField (such as IRUnicodeField or IRRawField) @param encoding - An encoding (like utf-8) ''' try: b''.decode(encoding) except: raise ValueError('setDefaultIREncoding was provided an invalid codec. Got (encoding="%s")' %(str(encoding), )) global defaultIREncoding defaultIREncoding = encoding
def toIndex(self, value): ''' toIndex - An optional method which will return the value prepped for index. By default, "toStorage" will be called. If you provide "hashIndex=True" on the constructor, the field will be md5summed for indexing purposes. This is useful for large strings, etc. ''' if self._isIrNull(value): ret = IR_NULL_STR else: ret = self._toIndex(value) if self.isIndexHashed is False: return ret return md5(tobytes(ret)).hexdigest()
def _getReprProperties(self): ''' _getReprProperties - Get the properties of this field to display in repr(). These should be in the form of $propertyName=$propertyRepr The default IRField implementation handles just the "hashIndex" property. defaultValue is part of "__repr__" impl. You should just extend this method with your object's properties instead of rewriting repr. ''' ret = [] if getattr(self, 'valueType', None) is not None: ret.append('valueType=%s' %(self.valueType.__name__, )) if hasattr(self, 'hashIndex'): ret.append('hashIndex=%s' %(self.hashIndex, )) return ret
def copy(self): ''' copy - Create a copy of this IRField. Each subclass should implement this, as you'll need to pass in the args to constructor. @return <IRField (or subclass)> - Another IRField that has all the same values as this one. ''' return self.__class__(name=self.name, valueType=self.valueType, defaultValue=self.defaultValue, hashIndex=self.hashIndex)
def getObj(self): ''' getObj - Fetch (if not fetched) and return the obj associated with this data. ''' if self.obj is None: if not self.pk: return None self.obj = self.foreignModel.objects.get(self.pk) return self.obj
def getPk(self): ''' getPk - Resolve any absent pk's off the obj's (like if an obj has been saved), and return the pk. ''' if not self.pk and self.obj: if self.obj._id: self.pk = self.obj._id return self.pk
def objHasUnsavedChanges(self): ''' objHasUnsavedChanges - Check if any object has unsaved changes, cascading. ''' if not self.obj: return False return self.obj.hasUnsavedChanges(cascadeObjects=True)
def getPk(self): ''' getPk - @see ForeignLinkData.getPk ''' if not self.pk or None in self.pk: for i in range( len(self.pk) ): if self.pk[i]: continue if self.obj[i] and self.obj[i]._id: self.pk[i] = self.obj[i]._id return self.pk
def getObj(self): ''' getObj - @see ForeignLinkData.getObj Except this always returns a list ''' if self.obj: needPks = [ (i, self.pk[i]) for i in range(len(self.obj)) if self.obj[i] is None] if not needPks: return self.obj fetched = list(self.foreignModel.objects.getMultiple([needPk[1] for needPk in needPks])) i = 0 for objIdx, pk in needPks: self.obj[objIdx] = fetched[i] i += 1 return self.obj
def isFetched(self): ''' isFetched - @see ForeignLinkData.isFetched ''' if not self.obj: return False if not self.pk or None in self.obj: return False return not bool(self.obj is None)
def objHasUnsavedChanges(self): ''' objHasUnsavedChanges - @see ForeignLinkData.objHasUnsavedChanges True if ANY object has unsaved changes. ''' if not self.obj: return False for thisObj in self.obj: if not thisObj: continue if thisObj.hasUnsavedChanges(cascadeObjects=True): return True return False
def assert_json_type(value: JsonValue, expected_type: JsonCheckType) -> None: """Check that a value has a certain JSON type. Raise TypeError if the type does not match. Supported types: str, int, float, bool, list, dict, and None. float will match any number, int will only match numbers without fractional part. The special type JList(x) will match a list value where each item is of type x: >>> assert_json_type([1, 2, 3], JList(int)) """ def type_name(t: Union[JsonCheckType, Type[None]]) -> str: if t is None: return "None" if isinstance(t, JList): return "list" return t.__name__ if expected_type is None: if value is None: return elif expected_type == float: if isinstance(value, float) or isinstance(value, int): return elif expected_type in [str, int, bool, list, dict]: if isinstance(value, expected_type): # type: ignore return elif isinstance(expected_type, JList): if isinstance(value, list): for v in value: assert_json_type(v, expected_type.value_type) return else: raise TypeError("unsupported type") raise TypeError("wrong JSON type {} != {}".format( type_name(expected_type), type_name(type(value))))
def json_get(json: JsonValue, path: str, expected_type: Any = ANY) -> Any: """Get a JSON value by path, optionally checking its type. >>> j = {"foo": {"num": 3.4, "s": "Text"}, "arr": [10, 20, 30]} >>> json_get(j, "/foo/num") 3.4 >>> json_get(j, "/arr[1]") 20 Raise ValueError if the path is not found: >>> json_get(j, "/foo/unknown") Traceback (most recent call last): ... ValueError: JSON path '/foo/unknown' not found Raise TypeError if the path contains a non-object element: >>> json_get(j, "/foo/num/bar") Traceback (most recent call last): ... TypeError: JSON path '/foo/num' is not an object Or a non-array element: >>> json_get(j, "/foo[2]") Traceback (most recent call last): ... TypeError: JSON path '/foo' is not an array Raise an IndexError if the array index is out of bounds: >>> json_get(j, "/arr[10]") Traceback (most recent call last): ... IndexError: JSON array '/arr' too small (3 <= 10) Recognized types are: str, int, float, bool, list, dict, and None. TypeError is raised if the type does not match. >>> json_get(j, "/foo/num", str) Traceback (most recent call last): ... TypeError: wrong JSON type str != float float will match any number, int will only match numbers without a fractional part. >>> json_get(j, "/foo/num", float) 3.4 >>> json_get(j, "/foo/num", int) Traceback (most recent call last): ... TypeError: wrong JSON type int != float """ elements = _parse_json_path(path) current = json current_path = "" for i, element in enumerate(elements): if isinstance(element, str): if not isinstance(current, dict): msg = "JSON path '{}' is not an object".format(current_path) raise TypeError(msg) from None if element not in current: raise ValueError("JSON path '{}' not found".format(path)) current_path += "/" + element current = current[element] else: if not isinstance(current, list): msg = "JSON path '{}' is not an array".format(current_path) raise TypeError(msg) from None if element >= len(current): msg = "JSON array '{}' too small ({} <= {})".format( current_path, len(current), element) raise IndexError(msg) current_path += "[{}]".format(i) current = current[element] if expected_type != ANY: assert_json_type(current, cast(JsonType, expected_type)) return current
def json_get_default(json: JsonValue, path: str, default: Any, expected_type: Any = ANY) -> Any: """Get a JSON value by path, optionally checking its type. This works exactly like json_get(), but instead of raising ValueError or IndexError when a path part is not found, return the provided default value: >>> json_get_default({}, "/foo", "I am a default value") 'I am a default value' TypeErrors will be raised as in json_get() if an expected_type is provided: >>> json_get_default({"foo": "bar"}, "/foo", 123, int) Traceback (most recent call last): ... TypeError: wrong JSON type int != str """ try: return json_get(json, path, expected_type) except (ValueError, IndexError): return default
def load(cls, fh): """ Load json or yaml data from file handle. Args: fh (file): File handle to load from. Examlple: >>> with open('data.json', 'r') as json: >>> jsdata = composite.load(json) >>> >>> with open('data.yml', 'r') as yml: >>> ymldata = composite.load(yml) """ dat = fh.read() try: ret = cls.from_json(dat) except: ret = cls.from_yaml(dat) return ret
def from_json(cls, fh): """ Load json from file handle. Args: fh (file): File handle to load from. Examlple: >>> with open('data.json', 'r') as json: >>> data = composite.load(json) """ if isinstance(fh, str): return cls(json.loads(fh)) else: return cls(json.load(fh))
def intersection(self, other, recursive=True): """ Recursively compute intersection of data. For dictionaries, items for specific keys will be reduced to unique items. For lists, items will be reduced to unique items. This method is meant to be analogous to set.intersection for composite objects. Args: other (composite): Other composite object to intersect with. recursive (bool): Whether or not to perform the operation recursively, for all nested composite objects. """ if not isinstance(other, composite): raise AssertionError('Cannot intersect composite and {} types'.format(type(other))) if self.meta_type != other.meta_type: return composite({}) if self.meta_type == 'list': keep = [] for item in self._list: if item in other._list: if recursive and isinstance(item, composite): keep.extend(item.intersection(other.index(item), recursive=True)) else: keep.append(item) return composite(keep) elif self.meta_type == 'dict': keep = {} for key in self._dict: item = self._dict[key] if key in other._dict: if recursive and \ isinstance(item, composite) and \ isinstance(other.get(key), composite): keep[key] = item.intersection(other.get(key), recursive=True) elif item == other[key]: keep[key] = item return composite(keep) return
def union(self, other, recursive=True, overwrite=False): """ Recursively compute union of data. For dictionaries, items for specific keys will be combined into a list, depending on the status of the overwrite= parameter. For lists, items will be appended and reduced to unique items. This method is meant to be analogous to set.union for composite objects. Args: other (composite): Other composite object to union with. recursive (bool): Whether or not to perform the operation recursively, for all nested composite objects. overwrite (bool): Whether or not to overwrite entries with the same key in a nested dictionary. """ if not isinstance(other, composite): raise AssertionError('Cannot union composite and {} types'.format(type(other))) if self.meta_type != other.meta_type: return composite([self, other]) if self.meta_type == 'list': keep = [] for item in self._list: keep.append(item) for item in other._list: if item not in self._list: keep.append(item) return composite(keep) elif self.meta_type == 'dict': keep = {} for key in list(set(list(self._dict.keys()) + list(other._dict.keys()))): left = self._dict.get(key) right = other._dict.get(key) if recursive and \ isinstance(left, composite) and \ isinstance(right, composite): keep[key] = left.union(right, recursive=recursive, overwrite=overwrite) elif left == right: keep[key] = left elif left is None: keep[key] = right elif right is None: keep[key] = left elif overwrite: keep[key] = right else: keep[key] = composite([left, right]) return composite(keep) return
def update(self, other): """ Update internal dictionary object. This is meant to be an analog for dict.update(). """ if self.meta_type == 'list': raise AssertionError('Cannot update object of `list` base type!') elif self.meta_type == 'dict': self._dict = dict(self + composite(other)) return
def items(self): """ Return keys for object, if they are available. """ if self.meta_type == 'list': return self._list elif self.meta_type == 'dict': return self._dict.items()
def values(self): """ Return keys for object, if they are available. """ if self.meta_type == 'list': return self._list elif self.meta_type == 'dict': return self._dict.values()
def append(self, item): """ Append to object, if object is list. """ if self.meta_type == 'dict': raise AssertionError('Cannot append to object of `dict` base type!') if self.meta_type == 'list': self._list.append(item) return
def extend(self, item): """ Extend list from object, if object is list. """ if self.meta_type == 'dict': raise AssertionError('Cannot extend to object of `dict` base type!') if self.meta_type == 'list': self._list.extend(item) return
def json(self): """ Return JSON representation of object. """ if self.meta_type == 'list': ret = [] for dat in self._list: if not isinstance(dat, composite): ret.append(dat) else: ret.append(dat.json()) return ret elif self.meta_type == 'dict': ret = {} for key in self._dict: if not isinstance(self._dict[key], composite): ret[key] = self._dict[key] else: ret[key] = self._dict[key].json() return ret
def write_json(self, fh, pretty=True): """ Write composite object to file handle in JSON format. Args: fh (file): File handle to write to. pretty (bool): Sort keys and indent in output. """ sjson = json.JSONEncoder().encode(self.json()) if pretty: json.dump(json.loads(sjson), fh, sort_keys=True, indent=4) else: json.dump(json.loads(sjson), fh) return
def write(self, fh, pretty=True): """ API niceness defaulting to composite.write_json(). """ return self.write_json(fh, pretty=pretty)
def json(self): """ Return JSON representation of object. """ data = {} for item in self._data: if isinstance(self._data[item], filetree): data[item] = self._data[item].json() else: data[item] = self._data[item] return data
def filelist(self): """ Return list of files in filetree. """ if len(self._filelist) == 0: for item in self._data: if isinstance(self._data[item], filetree): self._filelist.extend(self._data[item].filelist()) else: self._filelist.append(self._data[item]) return self._filelist
def prune(self, regex=r".*"): """ Prune leaves of filetree according to specified regular expression. Args: regex (str): Regular expression to use in pruning tree. """ return filetree(self.root, ignore=self.ignore, regex=regex)
def deref(self, ctx): """ Returns the value this reference is pointing to. This method uses 'ctx' to resolve the reference and return the value this reference references. If the call was already made, it returns a cached result. It also makes sure there's no cyclic reference, and if so raises CyclicReferenceError. """ if self in ctx.call_nodes: raise CyclicReferenceError(ctx, self) if self in ctx.cached_results: return ctx.cached_results[self] try: ctx.call_nodes.add(self) ctx.call_stack.append(self) result = self.evaluate(ctx) ctx.cached_results[self] = result return result except: if ctx.exception_call_stack is None: ctx.exception_call_stack = list(ctx.call_stack) raise finally: ctx.call_stack.pop() ctx.call_nodes.remove(self)
def getModel(self): ''' getModel - get the IndexedRedisModel associated with this list. If one was not provided in constructor, it will be inferred from the first item in the list (if present) @return <None/IndexedRedisModel> - None if none could be found, otherwise the IndexedRedisModel type of the items in this list. @raises ValueError if first item is not the expected type. ''' if not self.mdl and len(self) > 0: mdl = self[0].__class__ self.__validate_model(mdl) self.mdl = mdl return self.mdl
def delete(self): ''' delete - Delete all objects in this list. @return <int> - Number of objects deleted ''' if len(self) == 0: return 0 mdl = self.getModel() return mdl.deleter.deleteMultiple(self)
def save(self): ''' save - Save all objects in this list ''' if len(self) == 0: return [] mdl = self.getModel() return mdl.saver.save(self)
def reload(self): ''' reload - Reload all objects in this list. Updates in-place. To just fetch all these objects again, use "refetch" @return - List (same order as current objects) of either exception (KeyError) if operation failed, or a dict of fields changed -> (old, new) ''' if len(self) == 0: return [] ret = [] for obj in self: res = None try: res = obj.reload() except Exception as e: res = e ret.append(res) return ret
def refetch(self): ''' refetch - Fetch a fresh copy of all items in this list. Returns a new list. To update in-place, use "reload". @return IRQueryableList<IndexedRedisModel> - List of fetched items ''' if len(self) == 0: return IRQueryableList() mdl = self.getModel() pks = [item._id for item in self if item._id] return mdl.objects.getMultiple(pks)
def hashDictOneLevel(myDict): ''' A function which can generate a hash of a one-level dict containing strings (like REDIS_CONNECTION_PARAMS) @param myDict <dict> - Dict with string keys and values @return <long> - Hash of myDict ''' keys = [str(x) for x in myDict.keys()] keys.sort() lst = [] for key in keys: lst.append(str(myDict[key]) + '__~~__') return '+_[,'.join(lst).__hash__()
def output(self, to=None, *args, **kwargs): '''Outputs to a stream (like a file or request)''' for blok in self: blok.output(to, *args, **kwargs) return self
def render(self, *args, **kwargs): '''Renders as a str''' render_to = StringIO() self.output(render_to, *args, **kwargs) return render_to.getvalue()
def output(self, to=None, formatted=False, indent=0, indentation=' ', *args, **kwargs): '''Outputs to a stream (like a file or request)''' if formatted and self.blox: self.blox[0].output(to=to, formatted=True, indent=indent, indentation=indentation, *args, **kwargs) for blok in self.blox[1:]: to.write('\n') to.write(indent * indentation) blok.output(to=to, formatted=True, indent=indent, indentation=indentation, *args, **kwargs) if not indent: to.write('\n') else: for blok in self.blox: blok.output(to=to, *args, **kwargs) return self
def start_tag(self): '''Returns the elements HTML start tag''' direct_attributes = (attribute.render(self) for attribute in self.render_attributes) attributes = () if hasattr(self, '_attributes'): attributes = ('{0}="{1}"'.format(key, value) for key, value in self.attributes.items() if value) rendered_attributes = " ".join(filter(bool, chain(direct_attributes, attributes))) return '<{0}{1}{2}{3}>'.format(self.tag, ' ' if rendered_attributes else '', rendered_attributes, ' /' if self.tag_self_closes else "")
def output(self, to=None, *args, **kwargs): '''Outputs to a stream (like a file or request)''' to.write(self.start_tag) if not self.tag_self_closes: to.write(self.end_tag)
def output(self, to=None, formatted=False, indent=0, indentation=' ', *args, **kwargs): '''Outputs to a stream (like a file or request)''' if formatted: to.write(self.start_tag) to.write('\n') if not self.tag_self_closes: for blok in self.blox: to.write(indentation * (indent + 1)) blok.output(to=to, indent=indent + 1, formatted=True, indentation=indentation, *args, **kwargs) to.write('\n') to.write(indentation * indent) to.write(self.end_tag) if not indentation: to.write('\n') else: to.write(self.start_tag) if not self.tag_self_closes: for blok in self.blox: blok.output(to=to, *args, **kwargs) to.write(self.end_tag)
def safe_repr(obj): """Returns a repr of an object and falls back to a minimal representation of type and ID if the call to repr raised an error. :param obj: object to safe repr :returns: repr string or '(type<id> repr error)' string :rtype: str """ try: obj_repr = repr(obj) except: obj_repr = "({0}<{1}> repr error)".format(type(obj), id(obj)) return obj_repr
def output(self, to=None, formatted=False, *args, **kwargs): '''Outputs the set text''' to.write('<!DOCTYPE {0}>'.format(self.type))
def match_to_clinvar(genome_file, clin_file): """ Match a genome VCF to variants in the ClinVar VCF file Acts as a generator, yielding tuples of: (ClinVarVCFLine, ClinVarAllele, zygosity) 'zygosity' is a string and corresponds to the genome's zygosity for that ClinVarAllele. It can be either: 'Het' (heterozygous), 'Hom' (homozygous), or 'Hem' (hemizygous, e.g. X chromosome in XY individuals). """ clin_curr_line = _next_line(clin_file) genome_curr_line = _next_line(genome_file) # Ignores all the lines that start with a hashtag while clin_curr_line.startswith('#'): clin_curr_line = _next_line(clin_file) while genome_curr_line.startswith('#'): genome_curr_line = _next_line(genome_file) # Advance through both files simultaneously to find matches while clin_curr_line and genome_curr_line: # Advance a file when positions aren't equal. clin_curr_pos = VCFLine.get_pos(clin_curr_line) genome_curr_pos = VCFLine.get_pos(genome_curr_line) try: if clin_curr_pos['chrom'] > genome_curr_pos['chrom']: genome_curr_line = _next_line(genome_file) continue elif clin_curr_pos['chrom'] < genome_curr_pos['chrom']: clin_curr_line = _next_line(clin_file) continue if clin_curr_pos['pos'] > genome_curr_pos['pos']: genome_curr_line = _next_line(genome_file) continue elif clin_curr_pos['pos'] < genome_curr_pos['pos']: clin_curr_line = _next_line(clin_file) continue except StopIteration: break # If we get here, start positions match. # Look for allele matching. genome_vcf_line = GenomeVCFLine(vcf_line=genome_curr_line, skip_info=True) # We can skip if genome has no allele information for this point. if not genome_vcf_line.genotype_allele_indexes: genome_curr_line = _next_line(genome_file) continue # Match only if ClinVar and Genome ref_alleles match. clinvar_vcf_line = ClinVarVCFLine(vcf_line=clin_curr_line) if not genome_vcf_line.ref_allele == clinvar_vcf_line.ref_allele: try: genome_curr_line = _next_line(genome_file) clin_curr_line = _next_line(clin_file) continue except StopIteration: break # Determine genome alleles and zygosity. Zygosity is assumed to be one # of: heterozygous, homozygous, or hemizygous. genotype_allele_indexes = genome_vcf_line.genotype_allele_indexes genome_alleles = [genome_vcf_line.alleles[x] for x in genotype_allele_indexes] if len(genome_alleles) == 1: zygosity = 'Hem' elif len(genome_alleles) == 2: if genome_alleles[0].sequence == genome_alleles[1].sequence: zygosity = 'Hom' genome_alleles = [genome_alleles[0]] else: zygosity = 'Het' else: raise ValueError('This code only expects to work on genomes ' + 'with one or two alleles called at each ' + 'location. The following line violates this:' + str(genome_vcf_line)) # Look for matches to ClinVar alleles. for genome_allele in genome_alleles: for allele in clinvar_vcf_line.alleles: if genome_allele.sequence == allele.sequence: # The 'records' attribute is specific to ClinVarAlleles. if hasattr(allele, 'records'): yield (genome_vcf_line, allele, zygosity) # Done matching, move on. try: genome_curr_line = _next_line(genome_file) clin_curr_line = _next_line(clin_file) except StopIteration: break
def as_dict(self): """Return Allele data as dict object.""" self_as_dict = dict() self_as_dict['sequence'] = self.sequence if hasattr(self, 'frequency'): self_as_dict['frequency'] = self.frequency return self_as_dict
def _parse_allele_data(self): """Create list of Alleles from VCF line data""" return [Allele(sequence=x) for x in [self.ref_allele] + self.alt_alleles]
def _parse_info(self, info_field): """Parse the VCF info field""" info = dict() for item in info_field.split(';'): # Info fields may be "foo=bar" or just "foo". # For the first case, store key "foo" with value "bar" # For the second case, store key "foo" with value True. info_item_data = item.split('=') # If length is one, just store as a key with value = true. if len(info_item_data) == 1: info[info_item_data[0]] = True elif len(info_item_data) == 2: info[info_item_data[0]] = info_item_data[1] return info
def as_dict(self): """Dict representation of parsed VCF data""" self_as_dict = {'chrom': self.chrom, 'start': self.start, 'ref_allele': self.ref_allele, 'alt_alleles': self.alt_alleles, 'alleles': [x.as_dict() for x in self.alleles]} try: self_as_dict['info'] = self.info except AttributeError: pass return self_as_dict
def get_pos(vcf_line): """ Very lightweight parsing of a vcf line to get position. Returns a dict containing: 'chrom': index of chromosome (int), indicates sort order 'pos': position on chromosome (int) """ if not vcf_line: return None vcf_data = vcf_line.strip().split('\t') return_data = dict() return_data['chrom'] = CHROM_INDEX[vcf_data[0]] return_data['pos'] = int(vcf_data[1]) return return_data
def _toStorage(self, value): ''' _toStorage - Convert the value to a string representation for storage. @param value - The value of the item to convert @return A string value suitable for storing. ''' for chainedField in self.chainedFields: value = chainedField.toStorage(value) return value
def _fromStorage(self, value): ''' _fromStorage - Convert the value from storage (string) to the value type. @return - The converted value, or "irNull" if no value was defined (and field type is not default/string) ''' for chainedField in reversed(self.chainedFields): value = chainedField._fromStorage(value) return value
def _checkCanIndex(self): ''' _checkCanIndex - Check if we CAN index (if all fields are indexable). Also checks the right-most field for "hashIndex" - if it needs to hash we will hash. ''' # NOTE: We can't just check the right-most field. For types like pickle that don't support indexing, they don't # support it because python2 and python3 have different results for pickle.dumps on the same object. # So if we have a field chain like Pickle, Compressed then we will have two different results. if not self.chainedFields: return (False, False) for chainedField in self.chainedFields: if chainedField.CAN_INDEX is False: return (False, False) return (True, self.chainedFields[-1].hashIndex)
def byte_length(self): """ :returns: sum of lengthes of all ranges or None if one of the ranges is open :rtype: int, float or None """ sum = 0 for r in self: if r.is_open(): return None sum += r.byte_length() return sum
def has_overlaps(self): """ :returns: True if one or more range in the list overlaps with another :rtype: bool """ sorted_list = sorted(self) for i in range(0, len(sorted_list) - 1): if sorted_list[i].overlaps(sorted_list[i + 1]): return True return False
def max_stop(self): """ :returns: maximum stop in list or None if there's at least one open range :type: int, float or None """ m = 0 for r in self: if r.is_open(): return None m = max(m, r.stop) return m