repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
NaPs/Kolekto
kolekto/commands/importer.py
copy
def copy(tree, source_filename): """ Copy file in tree, show a progress bar during operations, and return the sha1 sum of copied file. """ #_, ext = os.path.splitext(source_filename) filehash = sha1() with printer.progress(os.path.getsize(source_filename)) as update: with open(source_filename, 'rb') as fsource: with NamedTemporaryFile(dir=os.path.join(tree, '.kolekto', 'movies'), delete=False) as fdestination: # Copy the source into the temporary destination: while True: buf = fsource.read(10 * 1024) if not buf: break filehash.update(buf) fdestination.write(buf) update(len(buf)) # Rename the file to its final name or raise an error if # the file already exists: dest = os.path.join(tree, '.kolekto', 'movies', filehash.hexdigest()) if os.path.exists(dest): raise IOError('This file already exists in tree (%s)' % filehash.hexdigest()) else: os.rename(fdestination.name, dest) return filehash.hexdigest()
python
def copy(tree, source_filename): """ Copy file in tree, show a progress bar during operations, and return the sha1 sum of copied file. """ #_, ext = os.path.splitext(source_filename) filehash = sha1() with printer.progress(os.path.getsize(source_filename)) as update: with open(source_filename, 'rb') as fsource: with NamedTemporaryFile(dir=os.path.join(tree, '.kolekto', 'movies'), delete=False) as fdestination: # Copy the source into the temporary destination: while True: buf = fsource.read(10 * 1024) if not buf: break filehash.update(buf) fdestination.write(buf) update(len(buf)) # Rename the file to its final name or raise an error if # the file already exists: dest = os.path.join(tree, '.kolekto', 'movies', filehash.hexdigest()) if os.path.exists(dest): raise IOError('This file already exists in tree (%s)' % filehash.hexdigest()) else: os.rename(fdestination.name, dest) return filehash.hexdigest()
[ "def", "copy", "(", "tree", ",", "source_filename", ")", ":", "#_, ext = os.path.splitext(source_filename)", "filehash", "=", "sha1", "(", ")", "with", "printer", ".", "progress", "(", "os", ".", "path", ".", "getsize", "(", "source_filename", ")", ")", "as", "update", ":", "with", "open", "(", "source_filename", ",", "'rb'", ")", "as", "fsource", ":", "with", "NamedTemporaryFile", "(", "dir", "=", "os", ".", "path", ".", "join", "(", "tree", ",", "'.kolekto'", ",", "'movies'", ")", ",", "delete", "=", "False", ")", "as", "fdestination", ":", "# Copy the source into the temporary destination:", "while", "True", ":", "buf", "=", "fsource", ".", "read", "(", "10", "*", "1024", ")", "if", "not", "buf", ":", "break", "filehash", ".", "update", "(", "buf", ")", "fdestination", ".", "write", "(", "buf", ")", "update", "(", "len", "(", "buf", ")", ")", "# Rename the file to its final name or raise an error if", "# the file already exists:", "dest", "=", "os", ".", "path", ".", "join", "(", "tree", ",", "'.kolekto'", ",", "'movies'", ",", "filehash", ".", "hexdigest", "(", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "dest", ")", ":", "raise", "IOError", "(", "'This file already exists in tree (%s)'", "%", "filehash", ".", "hexdigest", "(", ")", ")", "else", ":", "os", ".", "rename", "(", "fdestination", ".", "name", ",", "dest", ")", "return", "filehash", ".", "hexdigest", "(", ")" ]
Copy file in tree, show a progress bar during operations, and return the sha1 sum of copied file.
[ "Copy", "file", "in", "tree", "show", "a", "progress", "bar", "during", "operations", "and", "return", "the", "sha1", "sum", "of", "copied", "file", "." ]
29c5469da8782780a06bf9a76c59414bb6fd8fe3
https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/commands/importer.py#L36-L60
train
NaPs/Kolekto
kolekto/commands/importer.py
list_attachments
def list_attachments(fullname): """ List attachment for the specified fullname. """ parent, filename = os.path.split(fullname) filename_without_ext, ext = os.path.splitext(filename) attachments = [] for found_filename in os.listdir(parent): found_filename_without_ext, _ = os.path.splitext(found_filename) if filename_without_ext == found_filename_without_ext and found_filename != filename: attachments.append(os.path.join(parent, found_filename)) return attachments
python
def list_attachments(fullname): """ List attachment for the specified fullname. """ parent, filename = os.path.split(fullname) filename_without_ext, ext = os.path.splitext(filename) attachments = [] for found_filename in os.listdir(parent): found_filename_without_ext, _ = os.path.splitext(found_filename) if filename_without_ext == found_filename_without_ext and found_filename != filename: attachments.append(os.path.join(parent, found_filename)) return attachments
[ "def", "list_attachments", "(", "fullname", ")", ":", "parent", ",", "filename", "=", "os", ".", "path", ".", "split", "(", "fullname", ")", "filename_without_ext", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "attachments", "=", "[", "]", "for", "found_filename", "in", "os", ".", "listdir", "(", "parent", ")", ":", "found_filename_without_ext", ",", "_", "=", "os", ".", "path", ".", "splitext", "(", "found_filename", ")", "if", "filename_without_ext", "==", "found_filename_without_ext", "and", "found_filename", "!=", "filename", ":", "attachments", ".", "append", "(", "os", ".", "path", ".", "join", "(", "parent", ",", "found_filename", ")", ")", "return", "attachments" ]
List attachment for the specified fullname.
[ "List", "attachment", "for", "the", "specified", "fullname", "." ]
29c5469da8782780a06bf9a76c59414bb6fd8fe3
https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/commands/importer.py#L88-L98
train
IRC-SPHERE/HyperStream
hyperstream/channels/assets_channel.py
AssetsChannel.write_to_stream
def write_to_stream(self, stream_id, data, sandbox=None): """ Write to the stream :param stream_id: The stream identifier :param data: The stream instances :param sandbox: The sandbox for this stream :type stream_id: StreamId :return: None :raises: NotImplementedError """ if sandbox is not None: raise NotImplementedError if stream_id not in self.streams: raise StreamNotFoundError("Stream with id '{}' does not exist".format(stream_id)) writer = self.get_stream_writer(self.streams[stream_id]) if isinstance(data, StreamInstance): data = [data] for instance in data: if not isinstance(instance, StreamInstance): raise ValueError("Expected StreamInstance, got {}".format(str(type(instance)))) writer(instance)
python
def write_to_stream(self, stream_id, data, sandbox=None): """ Write to the stream :param stream_id: The stream identifier :param data: The stream instances :param sandbox: The sandbox for this stream :type stream_id: StreamId :return: None :raises: NotImplementedError """ if sandbox is not None: raise NotImplementedError if stream_id not in self.streams: raise StreamNotFoundError("Stream with id '{}' does not exist".format(stream_id)) writer = self.get_stream_writer(self.streams[stream_id]) if isinstance(data, StreamInstance): data = [data] for instance in data: if not isinstance(instance, StreamInstance): raise ValueError("Expected StreamInstance, got {}".format(str(type(instance)))) writer(instance)
[ "def", "write_to_stream", "(", "self", ",", "stream_id", ",", "data", ",", "sandbox", "=", "None", ")", ":", "if", "sandbox", "is", "not", "None", ":", "raise", "NotImplementedError", "if", "stream_id", "not", "in", "self", ".", "streams", ":", "raise", "StreamNotFoundError", "(", "\"Stream with id '{}' does not exist\"", ".", "format", "(", "stream_id", ")", ")", "writer", "=", "self", ".", "get_stream_writer", "(", "self", ".", "streams", "[", "stream_id", "]", ")", "if", "isinstance", "(", "data", ",", "StreamInstance", ")", ":", "data", "=", "[", "data", "]", "for", "instance", "in", "data", ":", "if", "not", "isinstance", "(", "instance", ",", "StreamInstance", ")", ":", "raise", "ValueError", "(", "\"Expected StreamInstance, got {}\"", ".", "format", "(", "str", "(", "type", "(", "instance", ")", ")", ")", ")", "writer", "(", "instance", ")" ]
Write to the stream :param stream_id: The stream identifier :param data: The stream instances :param sandbox: The sandbox for this stream :type stream_id: StreamId :return: None :raises: NotImplementedError
[ "Write", "to", "the", "stream" ]
98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/channels/assets_channel.py#L82-L107
train
dsoprea/PySchedules
pyschedules/xml_callbacks.py
XmlCallbacks._startXTVDNode
def _startXTVDNode(self, name, attrs): """Process the start of the top-level xtvd node""" schemaVersion = attrs.get('schemaVersion') validFrom = self._parseDateTime(attrs.get('from')) validTo = self._parseDateTime(attrs.get('to')) self._progress.printMsg('Parsing version %s data from %s to %s' % (schemaVersion, validFrom.strftime('%Y/%m/%d'), validTo.strftime('%Y/%m/%d')))
python
def _startXTVDNode(self, name, attrs): """Process the start of the top-level xtvd node""" schemaVersion = attrs.get('schemaVersion') validFrom = self._parseDateTime(attrs.get('from')) validTo = self._parseDateTime(attrs.get('to')) self._progress.printMsg('Parsing version %s data from %s to %s' % (schemaVersion, validFrom.strftime('%Y/%m/%d'), validTo.strftime('%Y/%m/%d')))
[ "def", "_startXTVDNode", "(", "self", ",", "name", ",", "attrs", ")", ":", "schemaVersion", "=", "attrs", ".", "get", "(", "'schemaVersion'", ")", "validFrom", "=", "self", ".", "_parseDateTime", "(", "attrs", ".", "get", "(", "'from'", ")", ")", "validTo", "=", "self", ".", "_parseDateTime", "(", "attrs", ".", "get", "(", "'to'", ")", ")", "self", ".", "_progress", ".", "printMsg", "(", "'Parsing version %s data from %s to %s'", "%", "(", "schemaVersion", ",", "validFrom", ".", "strftime", "(", "'%Y/%m/%d'", ")", ",", "validTo", ".", "strftime", "(", "'%Y/%m/%d'", ")", ")", ")" ]
Process the start of the top-level xtvd node
[ "Process", "the", "start", "of", "the", "top", "-", "level", "xtvd", "node" ]
e5aae988fad90217f72db45f93bf69839f4d75e7
https://github.com/dsoprea/PySchedules/blob/e5aae988fad90217f72db45f93bf69839f4d75e7/pyschedules/xml_callbacks.py#L68-L77
train
dsoprea/PySchedules
pyschedules/xml_callbacks.py
XmlCallbacks.startElement
def startElement(self, name, attrs): """Callback run at the start of each XML element""" self._contextStack.append(self._context) self._contentList = [] if name in self._statusDict: self._itemTag, itemType = self._statusDict[name] self._progress.startItem(itemType) elif name == self._itemTag: self._error = False self._progress.newItem() try: if self._context == 'root': if name == 'xtvd': self._context = 'xtvd' self._startXTVDNode(name, attrs) elif self._context == 'xtvd': self._context = name elif self._context == 'stations': self._startStationsNode(name, attrs) elif self._context == 'lineups': self._startLineupsNode(name, attrs) elif self._context == 'schedules': self._startSchedulesNode(name, attrs) elif self._context == 'programs': self._startProgramsNode(name, attrs) elif self._context == 'productionCrew': self._startProductionCrewNode(name, attrs) elif self._context == 'genres': self._startGenresNode(name, attrs) except Exception, e: self._error = True self._progress.printMsg(str(e), error=True)
python
def startElement(self, name, attrs): """Callback run at the start of each XML element""" self._contextStack.append(self._context) self._contentList = [] if name in self._statusDict: self._itemTag, itemType = self._statusDict[name] self._progress.startItem(itemType) elif name == self._itemTag: self._error = False self._progress.newItem() try: if self._context == 'root': if name == 'xtvd': self._context = 'xtvd' self._startXTVDNode(name, attrs) elif self._context == 'xtvd': self._context = name elif self._context == 'stations': self._startStationsNode(name, attrs) elif self._context == 'lineups': self._startLineupsNode(name, attrs) elif self._context == 'schedules': self._startSchedulesNode(name, attrs) elif self._context == 'programs': self._startProgramsNode(name, attrs) elif self._context == 'productionCrew': self._startProductionCrewNode(name, attrs) elif self._context == 'genres': self._startGenresNode(name, attrs) except Exception, e: self._error = True self._progress.printMsg(str(e), error=True)
[ "def", "startElement", "(", "self", ",", "name", ",", "attrs", ")", ":", "self", ".", "_contextStack", ".", "append", "(", "self", ".", "_context", ")", "self", ".", "_contentList", "=", "[", "]", "if", "name", "in", "self", ".", "_statusDict", ":", "self", ".", "_itemTag", ",", "itemType", "=", "self", ".", "_statusDict", "[", "name", "]", "self", ".", "_progress", ".", "startItem", "(", "itemType", ")", "elif", "name", "==", "self", ".", "_itemTag", ":", "self", ".", "_error", "=", "False", "self", ".", "_progress", ".", "newItem", "(", ")", "try", ":", "if", "self", ".", "_context", "==", "'root'", ":", "if", "name", "==", "'xtvd'", ":", "self", ".", "_context", "=", "'xtvd'", "self", ".", "_startXTVDNode", "(", "name", ",", "attrs", ")", "elif", "self", ".", "_context", "==", "'xtvd'", ":", "self", ".", "_context", "=", "name", "elif", "self", ".", "_context", "==", "'stations'", ":", "self", ".", "_startStationsNode", "(", "name", ",", "attrs", ")", "elif", "self", ".", "_context", "==", "'lineups'", ":", "self", ".", "_startLineupsNode", "(", "name", ",", "attrs", ")", "elif", "self", ".", "_context", "==", "'schedules'", ":", "self", ".", "_startSchedulesNode", "(", "name", ",", "attrs", ")", "elif", "self", ".", "_context", "==", "'programs'", ":", "self", ".", "_startProgramsNode", "(", "name", ",", "attrs", ")", "elif", "self", ".", "_context", "==", "'productionCrew'", ":", "self", ".", "_startProductionCrewNode", "(", "name", ",", "attrs", ")", "elif", "self", ".", "_context", "==", "'genres'", ":", "self", ".", "_startGenresNode", "(", "name", ",", "attrs", ")", "except", "Exception", ",", "e", ":", "self", ".", "_error", "=", "True", "self", ".", "_progress", ".", "printMsg", "(", "str", "(", "e", ")", ",", "error", "=", "True", ")" ]
Callback run at the start of each XML element
[ "Callback", "run", "at", "the", "start", "of", "each", "XML", "element" ]
e5aae988fad90217f72db45f93bf69839f4d75e7
https://github.com/dsoprea/PySchedules/blob/e5aae988fad90217f72db45f93bf69839f4d75e7/pyschedules/xml_callbacks.py#L289-L323
train
dsoprea/PySchedules
pyschedules/xml_callbacks.py
XmlCallbacks.endElement
def endElement(self, name): """Callback run at the end of each XML element""" content = ''.join(self._contentList) if name == 'xtvd': self._progress.endItems() else: try: if self._context == 'stations': self._endStationsNode(name, content) elif self._context == 'lineups': self._endLineupsNode(name, content) elif self._context == 'schedules': self._endSchedulesNode(name, content) elif self._context == 'programs': self._endProgramsNode(name, content) elif self._context == 'productionCrew': self._endProductionCrewNode(name, content) elif self._context == 'genres': self._endGenresNode(name, content) except Exception, e: self._error = True self._progress.printMsg(str(e), error=True) self._context = self._contextStack.pop()
python
def endElement(self, name): """Callback run at the end of each XML element""" content = ''.join(self._contentList) if name == 'xtvd': self._progress.endItems() else: try: if self._context == 'stations': self._endStationsNode(name, content) elif self._context == 'lineups': self._endLineupsNode(name, content) elif self._context == 'schedules': self._endSchedulesNode(name, content) elif self._context == 'programs': self._endProgramsNode(name, content) elif self._context == 'productionCrew': self._endProductionCrewNode(name, content) elif self._context == 'genres': self._endGenresNode(name, content) except Exception, e: self._error = True self._progress.printMsg(str(e), error=True) self._context = self._contextStack.pop()
[ "def", "endElement", "(", "self", ",", "name", ")", ":", "content", "=", "''", ".", "join", "(", "self", ".", "_contentList", ")", "if", "name", "==", "'xtvd'", ":", "self", ".", "_progress", ".", "endItems", "(", ")", "else", ":", "try", ":", "if", "self", ".", "_context", "==", "'stations'", ":", "self", ".", "_endStationsNode", "(", "name", ",", "content", ")", "elif", "self", ".", "_context", "==", "'lineups'", ":", "self", ".", "_endLineupsNode", "(", "name", ",", "content", ")", "elif", "self", ".", "_context", "==", "'schedules'", ":", "self", ".", "_endSchedulesNode", "(", "name", ",", "content", ")", "elif", "self", ".", "_context", "==", "'programs'", ":", "self", ".", "_endProgramsNode", "(", "name", ",", "content", ")", "elif", "self", ".", "_context", "==", "'productionCrew'", ":", "self", ".", "_endProductionCrewNode", "(", "name", ",", "content", ")", "elif", "self", ".", "_context", "==", "'genres'", ":", "self", ".", "_endGenresNode", "(", "name", ",", "content", ")", "except", "Exception", ",", "e", ":", "self", ".", "_error", "=", "True", "self", ".", "_progress", ".", "printMsg", "(", "str", "(", "e", ")", ",", "error", "=", "True", ")", "self", ".", "_context", "=", "self", ".", "_contextStack", ".", "pop", "(", ")" ]
Callback run at the end of each XML element
[ "Callback", "run", "at", "the", "end", "of", "each", "XML", "element" ]
e5aae988fad90217f72db45f93bf69839f4d75e7
https://github.com/dsoprea/PySchedules/blob/e5aae988fad90217f72db45f93bf69839f4d75e7/pyschedules/xml_callbacks.py#L330-L355
train
dsoprea/PySchedules
pyschedules/xml_callbacks.py
XmlCallbacks.error
def error(self, msg): """Callback run when a recoverable parsing error occurs""" self._error = True self._progress.printMsg('XML parse error: %s' % msg, error=True)
python
def error(self, msg): """Callback run when a recoverable parsing error occurs""" self._error = True self._progress.printMsg('XML parse error: %s' % msg, error=True)
[ "def", "error", "(", "self", ",", "msg", ")", ":", "self", ".", "_error", "=", "True", "self", ".", "_progress", ".", "printMsg", "(", "'XML parse error: %s'", "%", "msg", ",", "error", "=", "True", ")" ]
Callback run when a recoverable parsing error occurs
[ "Callback", "run", "when", "a", "recoverable", "parsing", "error", "occurs" ]
e5aae988fad90217f72db45f93bf69839f4d75e7
https://github.com/dsoprea/PySchedules/blob/e5aae988fad90217f72db45f93bf69839f4d75e7/pyschedules/xml_callbacks.py#L357-L361
train
NaPs/Kolekto
kolekto/commands/link.py
format_all
def format_all(format_string, env): """ Format the input string using each possible combination of lists in the provided environment. Returns a list of formated strings. """ prepared_env = parse_pattern(format_string, env, lambda x, y: [FormatWrapper(x, z) for z in y]) # Generate each possible combination, format the string with it and yield # the resulting string: for field_values in product(*prepared_env.itervalues()): format_env = dict(izip(prepared_env.iterkeys(), field_values)) yield format_string.format(**format_env)
python
def format_all(format_string, env): """ Format the input string using each possible combination of lists in the provided environment. Returns a list of formated strings. """ prepared_env = parse_pattern(format_string, env, lambda x, y: [FormatWrapper(x, z) for z in y]) # Generate each possible combination, format the string with it and yield # the resulting string: for field_values in product(*prepared_env.itervalues()): format_env = dict(izip(prepared_env.iterkeys(), field_values)) yield format_string.format(**format_env)
[ "def", "format_all", "(", "format_string", ",", "env", ")", ":", "prepared_env", "=", "parse_pattern", "(", "format_string", ",", "env", ",", "lambda", "x", ",", "y", ":", "[", "FormatWrapper", "(", "x", ",", "z", ")", "for", "z", "in", "y", "]", ")", "# Generate each possible combination, format the string with it and yield", "# the resulting string:", "for", "field_values", "in", "product", "(", "*", "prepared_env", ".", "itervalues", "(", ")", ")", ":", "format_env", "=", "dict", "(", "izip", "(", "prepared_env", ".", "iterkeys", "(", ")", ",", "field_values", ")", ")", "yield", "format_string", ".", "format", "(", "*", "*", "format_env", ")" ]
Format the input string using each possible combination of lists in the provided environment. Returns a list of formated strings.
[ "Format", "the", "input", "string", "using", "each", "possible", "combination", "of", "lists", "in", "the", "provided", "environment", ".", "Returns", "a", "list", "of", "formated", "strings", "." ]
29c5469da8782780a06bf9a76c59414bb6fd8fe3
https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/commands/link.py#L26-L36
train
tamasgal/km3pipe
km3pipe/utils/rba.py
RBAPrompt.preloop
def preloop(self): """Initialization before prompting user for commands. Despite the claims in the Cmd documentaion, Cmd.preloop() is not a stub """ Cmd.preloop(self) # sets up command completion self._hist = [] # No history yet self._locals = {} # Initialize execution namespace for user self._globals = {}
python
def preloop(self): """Initialization before prompting user for commands. Despite the claims in the Cmd documentaion, Cmd.preloop() is not a stub """ Cmd.preloop(self) # sets up command completion self._hist = [] # No history yet self._locals = {} # Initialize execution namespace for user self._globals = {}
[ "def", "preloop", "(", "self", ")", ":", "Cmd", ".", "preloop", "(", "self", ")", "# sets up command completion", "self", ".", "_hist", "=", "[", "]", "# No history yet", "self", ".", "_locals", "=", "{", "}", "# Initialize execution namespace for user", "self", ".", "_globals", "=", "{", "}" ]
Initialization before prompting user for commands. Despite the claims in the Cmd documentaion, Cmd.preloop() is not a stub
[ "Initialization", "before", "prompting", "user", "for", "commands", "." ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/utils/rba.py#L46-L54
train
IRC-SPHERE/HyperStream
hyperstream/channels/base_channel.py
BaseChannel.execute_tool
def execute_tool(self, stream, interval): """ Executes the stream's tool over the given time interval :param stream: the stream reference :param interval: the time interval :return: None """ if interval.end > self.up_to_timestamp: raise ValueError( 'The stream is not available after ' + str(self.up_to_timestamp) + ' and cannot be calculated') required_intervals = TimeIntervals([interval]) - stream.calculated_intervals if not required_intervals.is_empty: for interval in required_intervals: stream.tool.execute(stream.input_streams, stream, interval) stream.calculated_intervals += interval if not stream.required_intervals.is_empty: raise RuntimeError('Tool execution did not cover the specified time interval.')
python
def execute_tool(self, stream, interval): """ Executes the stream's tool over the given time interval :param stream: the stream reference :param interval: the time interval :return: None """ if interval.end > self.up_to_timestamp: raise ValueError( 'The stream is not available after ' + str(self.up_to_timestamp) + ' and cannot be calculated') required_intervals = TimeIntervals([interval]) - stream.calculated_intervals if not required_intervals.is_empty: for interval in required_intervals: stream.tool.execute(stream.input_streams, stream, interval) stream.calculated_intervals += interval if not stream.required_intervals.is_empty: raise RuntimeError('Tool execution did not cover the specified time interval.')
[ "def", "execute_tool", "(", "self", ",", "stream", ",", "interval", ")", ":", "if", "interval", ".", "end", ">", "self", ".", "up_to_timestamp", ":", "raise", "ValueError", "(", "'The stream is not available after '", "+", "str", "(", "self", ".", "up_to_timestamp", ")", "+", "' and cannot be calculated'", ")", "required_intervals", "=", "TimeIntervals", "(", "[", "interval", "]", ")", "-", "stream", ".", "calculated_intervals", "if", "not", "required_intervals", ".", "is_empty", ":", "for", "interval", "in", "required_intervals", ":", "stream", ".", "tool", ".", "execute", "(", "stream", ".", "input_streams", ",", "stream", ",", "interval", ")", "stream", ".", "calculated_intervals", "+=", "interval", "if", "not", "stream", ".", "required_intervals", ".", "is_empty", ":", "raise", "RuntimeError", "(", "'Tool execution did not cover the specified time interval.'", ")" ]
Executes the stream's tool over the given time interval :param stream: the stream reference :param interval: the time interval :return: None
[ "Executes", "the", "stream", "s", "tool", "over", "the", "given", "time", "interval" ]
98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/channels/base_channel.py#L46-L65
train
IRC-SPHERE/HyperStream
hyperstream/channels/base_channel.py
BaseChannel.get_or_create_stream
def get_or_create_stream(self, stream_id, try_create=True): """ Helper function to get a stream or create one if it's not already defined :param stream_id: The stream id :param try_create: Whether to try to create the stream if not found :return: The stream object """ stream_id = get_stream_id(stream_id) if stream_id in self.streams: logging.debug("found {}".format(stream_id)) return self.streams[stream_id] elif try_create: # Try to create the stream logging.debug("creating {}".format(stream_id)) return self.create_stream(stream_id=stream_id)
python
def get_or_create_stream(self, stream_id, try_create=True): """ Helper function to get a stream or create one if it's not already defined :param stream_id: The stream id :param try_create: Whether to try to create the stream if not found :return: The stream object """ stream_id = get_stream_id(stream_id) if stream_id in self.streams: logging.debug("found {}".format(stream_id)) return self.streams[stream_id] elif try_create: # Try to create the stream logging.debug("creating {}".format(stream_id)) return self.create_stream(stream_id=stream_id)
[ "def", "get_or_create_stream", "(", "self", ",", "stream_id", ",", "try_create", "=", "True", ")", ":", "stream_id", "=", "get_stream_id", "(", "stream_id", ")", "if", "stream_id", "in", "self", ".", "streams", ":", "logging", ".", "debug", "(", "\"found {}\"", ".", "format", "(", "stream_id", ")", ")", "return", "self", ".", "streams", "[", "stream_id", "]", "elif", "try_create", ":", "# Try to create the stream", "logging", ".", "debug", "(", "\"creating {}\"", ".", "format", "(", "stream_id", ")", ")", "return", "self", ".", "create_stream", "(", "stream_id", "=", "stream_id", ")" ]
Helper function to get a stream or create one if it's not already defined :param stream_id: The stream id :param try_create: Whether to try to create the stream if not found :return: The stream object
[ "Helper", "function", "to", "get", "a", "stream", "or", "create", "one", "if", "it", "s", "not", "already", "defined" ]
98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/channels/base_channel.py#L76-L91
train
IRC-SPHERE/HyperStream
hyperstream/channels/base_channel.py
BaseChannel.find_streams
def find_streams(self, **kwargs): """ Finds streams with the given meta data values. Useful for debugging purposes. :param kwargs: The meta data as keyword arguments :return: The streams found """ found = {} if 'name' in kwargs: name = kwargs.pop('name') else: name = None for stream_id, stream in self.streams.items(): if name is not None and stream_id.name != name: continue d = dict(stream_id.meta_data) if all(k in d and d[k] == str(v) for k, v in kwargs.items()): found[stream_id] = stream return found
python
def find_streams(self, **kwargs): """ Finds streams with the given meta data values. Useful for debugging purposes. :param kwargs: The meta data as keyword arguments :return: The streams found """ found = {} if 'name' in kwargs: name = kwargs.pop('name') else: name = None for stream_id, stream in self.streams.items(): if name is not None and stream_id.name != name: continue d = dict(stream_id.meta_data) if all(k in d and d[k] == str(v) for k, v in kwargs.items()): found[stream_id] = stream return found
[ "def", "find_streams", "(", "self", ",", "*", "*", "kwargs", ")", ":", "found", "=", "{", "}", "if", "'name'", "in", "kwargs", ":", "name", "=", "kwargs", ".", "pop", "(", "'name'", ")", "else", ":", "name", "=", "None", "for", "stream_id", ",", "stream", "in", "self", ".", "streams", ".", "items", "(", ")", ":", "if", "name", "is", "not", "None", "and", "stream_id", ".", "name", "!=", "name", ":", "continue", "d", "=", "dict", "(", "stream_id", ".", "meta_data", ")", "if", "all", "(", "k", "in", "d", "and", "d", "[", "k", "]", "==", "str", "(", "v", ")", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ")", ":", "found", "[", "stream_id", "]", "=", "stream", "return", "found" ]
Finds streams with the given meta data values. Useful for debugging purposes. :param kwargs: The meta data as keyword arguments :return: The streams found
[ "Finds", "streams", "with", "the", "given", "meta", "data", "values", ".", "Useful", "for", "debugging", "purposes", "." ]
98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/channels/base_channel.py#L100-L121
train
IRC-SPHERE/HyperStream
hyperstream/channels/base_channel.py
BaseChannel.find_stream
def find_stream(self, **kwargs): """ Finds a single stream with the given meta data values. Useful for debugging purposes. :param kwargs: The meta data as keyword arguments :return: The stream found """ found = list(self.find_streams(**kwargs).values()) if not found: raise StreamNotFoundError(kwargs) if len(found) > 1: raise MultipleStreamsFoundError(kwargs) return found[0]
python
def find_stream(self, **kwargs): """ Finds a single stream with the given meta data values. Useful for debugging purposes. :param kwargs: The meta data as keyword arguments :return: The stream found """ found = list(self.find_streams(**kwargs).values()) if not found: raise StreamNotFoundError(kwargs) if len(found) > 1: raise MultipleStreamsFoundError(kwargs) return found[0]
[ "def", "find_stream", "(", "self", ",", "*", "*", "kwargs", ")", ":", "found", "=", "list", "(", "self", ".", "find_streams", "(", "*", "*", "kwargs", ")", ".", "values", "(", ")", ")", "if", "not", "found", ":", "raise", "StreamNotFoundError", "(", "kwargs", ")", "if", "len", "(", "found", ")", ">", "1", ":", "raise", "MultipleStreamsFoundError", "(", "kwargs", ")", "return", "found", "[", "0", "]" ]
Finds a single stream with the given meta data values. Useful for debugging purposes. :param kwargs: The meta data as keyword arguments :return: The stream found
[ "Finds", "a", "single", "stream", "with", "the", "given", "meta", "data", "values", ".", "Useful", "for", "debugging", "purposes", "." ]
98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/channels/base_channel.py#L123-L135
train
tamasgal/km3pipe
km3pipe/io/daq.py
DAQPump.next_blob
def next_blob(self): """Get the next frame from file""" blob_file = self.blob_file try: preamble = DAQPreamble(file_obj=blob_file) except struct.error: raise StopIteration try: data_type = DATA_TYPES[preamble.data_type] except KeyError: log.error("Unkown datatype: {0}".format(preamble.data_type)) data_type = 'Unknown' blob = Blob() blob[data_type] = None blob['DAQPreamble'] = preamble if data_type == 'DAQSummaryslice': daq_frame = DAQSummaryslice(blob_file) blob[data_type] = daq_frame blob['DAQHeader'] = daq_frame.header elif data_type == 'DAQEvent': daq_frame = DAQEvent(blob_file) blob[data_type] = daq_frame blob['DAQHeader'] = daq_frame.header else: log.warning( "Skipping DAQ frame with data type code '{0}'.".format( preamble.data_type ) ) blob_file.seek(preamble.length - DAQPreamble.size, 1) return blob
python
def next_blob(self): """Get the next frame from file""" blob_file = self.blob_file try: preamble = DAQPreamble(file_obj=blob_file) except struct.error: raise StopIteration try: data_type = DATA_TYPES[preamble.data_type] except KeyError: log.error("Unkown datatype: {0}".format(preamble.data_type)) data_type = 'Unknown' blob = Blob() blob[data_type] = None blob['DAQPreamble'] = preamble if data_type == 'DAQSummaryslice': daq_frame = DAQSummaryslice(blob_file) blob[data_type] = daq_frame blob['DAQHeader'] = daq_frame.header elif data_type == 'DAQEvent': daq_frame = DAQEvent(blob_file) blob[data_type] = daq_frame blob['DAQHeader'] = daq_frame.header else: log.warning( "Skipping DAQ frame with data type code '{0}'.".format( preamble.data_type ) ) blob_file.seek(preamble.length - DAQPreamble.size, 1) return blob
[ "def", "next_blob", "(", "self", ")", ":", "blob_file", "=", "self", ".", "blob_file", "try", ":", "preamble", "=", "DAQPreamble", "(", "file_obj", "=", "blob_file", ")", "except", "struct", ".", "error", ":", "raise", "StopIteration", "try", ":", "data_type", "=", "DATA_TYPES", "[", "preamble", ".", "data_type", "]", "except", "KeyError", ":", "log", ".", "error", "(", "\"Unkown datatype: {0}\"", ".", "format", "(", "preamble", ".", "data_type", ")", ")", "data_type", "=", "'Unknown'", "blob", "=", "Blob", "(", ")", "blob", "[", "data_type", "]", "=", "None", "blob", "[", "'DAQPreamble'", "]", "=", "preamble", "if", "data_type", "==", "'DAQSummaryslice'", ":", "daq_frame", "=", "DAQSummaryslice", "(", "blob_file", ")", "blob", "[", "data_type", "]", "=", "daq_frame", "blob", "[", "'DAQHeader'", "]", "=", "daq_frame", ".", "header", "elif", "data_type", "==", "'DAQEvent'", ":", "daq_frame", "=", "DAQEvent", "(", "blob_file", ")", "blob", "[", "data_type", "]", "=", "daq_frame", "blob", "[", "'DAQHeader'", "]", "=", "daq_frame", ".", "header", "else", ":", "log", ".", "warning", "(", "\"Skipping DAQ frame with data type code '{0}'.\"", ".", "format", "(", "preamble", ".", "data_type", ")", ")", "blob_file", ".", "seek", "(", "preamble", ".", "length", "-", "DAQPreamble", ".", "size", ",", "1", ")", "return", "blob" ]
Get the next frame from file
[ "Get", "the", "next", "frame", "from", "file" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/daq.py#L145-L179
train
tamasgal/km3pipe
km3pipe/io/daq.py
DAQPump.seek_to_frame
def seek_to_frame(self, index): """Move file pointer to the frame with given index.""" pointer_position = self.frame_positions[index] self.blob_file.seek(pointer_position, 0)
python
def seek_to_frame(self, index): """Move file pointer to the frame with given index.""" pointer_position = self.frame_positions[index] self.blob_file.seek(pointer_position, 0)
[ "def", "seek_to_frame", "(", "self", ",", "index", ")", ":", "pointer_position", "=", "self", ".", "frame_positions", "[", "index", "]", "self", ".", "blob_file", ".", "seek", "(", "pointer_position", ",", "0", ")" ]
Move file pointer to the frame with given index.
[ "Move", "file", "pointer", "to", "the", "frame", "with", "given", "index", "." ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/daq.py#L181-L184
train
tamasgal/km3pipe
km3pipe/io/daq.py
DAQPreamble._parse_file
def _parse_file(self, file_obj): """Directly read from file handler. Note that this will move the file pointer. """ byte_data = file_obj.read(self.size) self._parse_byte_data(byte_data)
python
def _parse_file(self, file_obj): """Directly read from file handler. Note that this will move the file pointer. """ byte_data = file_obj.read(self.size) self._parse_byte_data(byte_data)
[ "def", "_parse_file", "(", "self", ",", "file_obj", ")", ":", "byte_data", "=", "file_obj", ".", "read", "(", "self", ".", "size", ")", "self", ".", "_parse_byte_data", "(", "byte_data", ")" ]
Directly read from file handler. Note that this will move the file pointer.
[ "Directly", "read", "from", "file", "handler", "." ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/daq.py#L409-L416
train
tamasgal/km3pipe
km3pipe/io/daq.py
DAQSummaryslice._parse_summary_frames
def _parse_summary_frames(self, file_obj): """Iterate through the byte data and fill the summary_frames""" for _ in range(self.n_summary_frames): dom_id = unpack('<i', file_obj.read(4))[0] dq_status = file_obj.read(4) # probably dom status? # noqa dom_status = unpack('<iiii', file_obj.read(16)) raw_rates = unpack('b' * 31, file_obj.read(31)) pmt_rates = [self._get_rate(value) for value in raw_rates] self.summary_frames[dom_id] = pmt_rates self.dq_status[dom_id] = dq_status self.dom_status[dom_id] = dom_status self.dom_rates[dom_id] = np.sum(pmt_rates)
python
def _parse_summary_frames(self, file_obj): """Iterate through the byte data and fill the summary_frames""" for _ in range(self.n_summary_frames): dom_id = unpack('<i', file_obj.read(4))[0] dq_status = file_obj.read(4) # probably dom status? # noqa dom_status = unpack('<iiii', file_obj.read(16)) raw_rates = unpack('b' * 31, file_obj.read(31)) pmt_rates = [self._get_rate(value) for value in raw_rates] self.summary_frames[dom_id] = pmt_rates self.dq_status[dom_id] = dq_status self.dom_status[dom_id] = dom_status self.dom_rates[dom_id] = np.sum(pmt_rates)
[ "def", "_parse_summary_frames", "(", "self", ",", "file_obj", ")", ":", "for", "_", "in", "range", "(", "self", ".", "n_summary_frames", ")", ":", "dom_id", "=", "unpack", "(", "'<i'", ",", "file_obj", ".", "read", "(", "4", ")", ")", "[", "0", "]", "dq_status", "=", "file_obj", ".", "read", "(", "4", ")", "# probably dom status? # noqa", "dom_status", "=", "unpack", "(", "'<iiii'", ",", "file_obj", ".", "read", "(", "16", ")", ")", "raw_rates", "=", "unpack", "(", "'b'", "*", "31", ",", "file_obj", ".", "read", "(", "31", ")", ")", "pmt_rates", "=", "[", "self", ".", "_get_rate", "(", "value", ")", "for", "value", "in", "raw_rates", "]", "self", ".", "summary_frames", "[", "dom_id", "]", "=", "pmt_rates", "self", ".", "dq_status", "[", "dom_id", "]", "=", "dq_status", "self", ".", "dom_status", "[", "dom_id", "]", "=", "dom_status", "self", ".", "dom_rates", "[", "dom_id", "]", "=", "np", ".", "sum", "(", "pmt_rates", ")" ]
Iterate through the byte data and fill the summary_frames
[ "Iterate", "through", "the", "byte", "data", "and", "fill", "the", "summary_frames" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/daq.py#L499-L510
train
tamasgal/km3pipe
km3pipe/io/daq.py
DAQSummaryslice._get_rate
def _get_rate(self, value): """Return the rate in Hz from the short int value""" if value == 0: return 0 else: return MINIMAL_RATE_HZ * math.exp(value * self._get_factor())
python
def _get_rate(self, value): """Return the rate in Hz from the short int value""" if value == 0: return 0 else: return MINIMAL_RATE_HZ * math.exp(value * self._get_factor())
[ "def", "_get_rate", "(", "self", ",", "value", ")", ":", "if", "value", "==", "0", ":", "return", "0", "else", ":", "return", "MINIMAL_RATE_HZ", "*", "math", ".", "exp", "(", "value", "*", "self", ".", "_get_factor", "(", ")", ")" ]
Return the rate in Hz from the short int value
[ "Return", "the", "rate", "in", "Hz", "from", "the", "short", "int", "value" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/daq.py#L512-L517
train
tamasgal/km3pipe
km3pipe/io/daq.py
DAQEvent._parse_triggered_hits
def _parse_triggered_hits(self, file_obj): """Parse and store triggered hits.""" for _ in range(self.n_triggered_hits): dom_id, pmt_id = unpack('<ib', file_obj.read(5)) tdc_time = unpack('>I', file_obj.read(4))[0] tot = unpack('<b', file_obj.read(1))[0] trigger_mask = unpack('<Q', file_obj.read(8)) self.triggered_hits.append( (dom_id, pmt_id, tdc_time, tot, trigger_mask) )
python
def _parse_triggered_hits(self, file_obj): """Parse and store triggered hits.""" for _ in range(self.n_triggered_hits): dom_id, pmt_id = unpack('<ib', file_obj.read(5)) tdc_time = unpack('>I', file_obj.read(4))[0] tot = unpack('<b', file_obj.read(1))[0] trigger_mask = unpack('<Q', file_obj.read(8)) self.triggered_hits.append( (dom_id, pmt_id, tdc_time, tot, trigger_mask) )
[ "def", "_parse_triggered_hits", "(", "self", ",", "file_obj", ")", ":", "for", "_", "in", "range", "(", "self", ".", "n_triggered_hits", ")", ":", "dom_id", ",", "pmt_id", "=", "unpack", "(", "'<ib'", ",", "file_obj", ".", "read", "(", "5", ")", ")", "tdc_time", "=", "unpack", "(", "'>I'", ",", "file_obj", ".", "read", "(", "4", ")", ")", "[", "0", "]", "tot", "=", "unpack", "(", "'<b'", ",", "file_obj", ".", "read", "(", "1", ")", ")", "[", "0", "]", "trigger_mask", "=", "unpack", "(", "'<Q'", ",", "file_obj", ".", "read", "(", "8", ")", ")", "self", ".", "triggered_hits", ".", "append", "(", "(", "dom_id", ",", "pmt_id", ",", "tdc_time", ",", "tot", ",", "trigger_mask", ")", ")" ]
Parse and store triggered hits.
[ "Parse", "and", "store", "triggered", "hits", "." ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/daq.py#L556-L565
train
tamasgal/km3pipe
km3pipe/io/daq.py
DAQEvent._parse_snapshot_hits
def _parse_snapshot_hits(self, file_obj): """Parse and store snapshot hits.""" for _ in range(self.n_snapshot_hits): dom_id, pmt_id = unpack('<ib', file_obj.read(5)) tdc_time = unpack('>I', file_obj.read(4))[0] tot = unpack('<b', file_obj.read(1))[0] self.snapshot_hits.append((dom_id, pmt_id, tdc_time, tot))
python
def _parse_snapshot_hits(self, file_obj): """Parse and store snapshot hits.""" for _ in range(self.n_snapshot_hits): dom_id, pmt_id = unpack('<ib', file_obj.read(5)) tdc_time = unpack('>I', file_obj.read(4))[0] tot = unpack('<b', file_obj.read(1))[0] self.snapshot_hits.append((dom_id, pmt_id, tdc_time, tot))
[ "def", "_parse_snapshot_hits", "(", "self", ",", "file_obj", ")", ":", "for", "_", "in", "range", "(", "self", ".", "n_snapshot_hits", ")", ":", "dom_id", ",", "pmt_id", "=", "unpack", "(", "'<ib'", ",", "file_obj", ".", "read", "(", "5", ")", ")", "tdc_time", "=", "unpack", "(", "'>I'", ",", "file_obj", ".", "read", "(", "4", ")", ")", "[", "0", "]", "tot", "=", "unpack", "(", "'<b'", ",", "file_obj", ".", "read", "(", "1", ")", ")", "[", "0", "]", "self", ".", "snapshot_hits", ".", "append", "(", "(", "dom_id", ",", "pmt_id", ",", "tdc_time", ",", "tot", ")", ")" ]
Parse and store snapshot hits.
[ "Parse", "and", "store", "snapshot", "hits", "." ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/daq.py#L567-L573
train
tamasgal/km3pipe
km3pipe/utils/runtable.py
runtable
def runtable(det_id, n=5, run_range=None, compact=False, sep='\t', regex=None): """Print the run table of the last `n` runs for given detector""" db = kp.db.DBManager() df = db.run_table(det_id) if run_range is not None: try: from_run, to_run = [int(r) for r in run_range.split('-')] except ValueError: log.critical("Please specify a valid range (e.g. 3100-3200)!") raise SystemExit else: df = df[(df.RUN >= from_run) & (df.RUN <= to_run)] if regex is not None: try: re.compile(regex) except re.error: log.error("Invalid regex!") return df = df[df['RUNSETUPNAME'].str.contains(regex) | df['RUNSETUPID'].str.contains(regex)] if n is not None: df = df.tail(n) if compact: df = df[['RUN', 'DATETIME', 'RUNSETUPNAME']] df.to_csv(sys.stdout, sep=sep)
python
def runtable(det_id, n=5, run_range=None, compact=False, sep='\t', regex=None): """Print the run table of the last `n` runs for given detector""" db = kp.db.DBManager() df = db.run_table(det_id) if run_range is not None: try: from_run, to_run = [int(r) for r in run_range.split('-')] except ValueError: log.critical("Please specify a valid range (e.g. 3100-3200)!") raise SystemExit else: df = df[(df.RUN >= from_run) & (df.RUN <= to_run)] if regex is not None: try: re.compile(regex) except re.error: log.error("Invalid regex!") return df = df[df['RUNSETUPNAME'].str.contains(regex) | df['RUNSETUPID'].str.contains(regex)] if n is not None: df = df.tail(n) if compact: df = df[['RUN', 'DATETIME', 'RUNSETUPNAME']] df.to_csv(sys.stdout, sep=sep)
[ "def", "runtable", "(", "det_id", ",", "n", "=", "5", ",", "run_range", "=", "None", ",", "compact", "=", "False", ",", "sep", "=", "'\\t'", ",", "regex", "=", "None", ")", ":", "db", "=", "kp", ".", "db", ".", "DBManager", "(", ")", "df", "=", "db", ".", "run_table", "(", "det_id", ")", "if", "run_range", "is", "not", "None", ":", "try", ":", "from_run", ",", "to_run", "=", "[", "int", "(", "r", ")", "for", "r", "in", "run_range", ".", "split", "(", "'-'", ")", "]", "except", "ValueError", ":", "log", ".", "critical", "(", "\"Please specify a valid range (e.g. 3100-3200)!\"", ")", "raise", "SystemExit", "else", ":", "df", "=", "df", "[", "(", "df", ".", "RUN", ">=", "from_run", ")", "&", "(", "df", ".", "RUN", "<=", "to_run", ")", "]", "if", "regex", "is", "not", "None", ":", "try", ":", "re", ".", "compile", "(", "regex", ")", "except", "re", ".", "error", ":", "log", ".", "error", "(", "\"Invalid regex!\"", ")", "return", "df", "=", "df", "[", "df", "[", "'RUNSETUPNAME'", "]", ".", "str", ".", "contains", "(", "regex", ")", "|", "df", "[", "'RUNSETUPID'", "]", ".", "str", ".", "contains", "(", "regex", ")", "]", "if", "n", "is", "not", "None", ":", "df", "=", "df", ".", "tail", "(", "n", ")", "if", "compact", ":", "df", "=", "df", "[", "[", "'RUN'", ",", "'DATETIME'", ",", "'RUNSETUPNAME'", "]", "]", "df", ".", "to_csv", "(", "sys", ".", "stdout", ",", "sep", "=", "sep", ")" ]
Print the run table of the last `n` runs for given detector
[ "Print", "the", "run", "table", "of", "the", "last", "n", "runs", "for", "given", "detector" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/utils/runtable.py#L35-L65
train
tamasgal/km3pipe
km3modules/ahrs.py
_extract_calibration
def _extract_calibration(xroot): """Extract AHRS calibration information from XML root. Parameters ---------- xroot: XML root Returns ------- Aoff: numpy.array with shape(3,) Arot: numpy.array with shape(3,3) Hoff: numpy.array with shape(3,) Hrot: numpy.array with shape(3,3) """ names = [c.text for c in xroot.findall(".//Name")] val = [[i.text for i in c] for c in xroot.findall(".//Values")] # The fields has to be reindeced, these are the index mappings col_ic = [int(v) for v in val[names.index("AHRS_Matrix_Column(-)")]] try: row_ic = [int(v) for v in val[names.index("AHRS_Matrix_Row(-)")]] except ValueError: row_ic = [2, 2, 2, 1, 1, 1, 0, 0, 0] try: vec_ic = [int(v) for v in val[names.index("AHRS_Vector_Index(-)")]] except ValueError: vec_ic = [2, 1, 0] Aoff_ix = names.index("AHRS_Acceleration_Offset(g/ms^2-)") Arot_ix = names.index("AHRS_Acceleration_Rotation(-)") Hrot_ix = names.index("AHRS_Magnetic_Rotation(-)") Aoff = np.array(val[Aoff_ix])[vec_ic].astype(float) Arot = np.array(val[Arot_ix]).reshape(3, 3)[col_ic, row_ic] \ .reshape(3, 3).astype(float) Hrot = np.array(val[Hrot_ix]).reshape(3, 3)[col_ic, row_ic] \ .reshape(3, 3).astype(float) Hoff = [] for q in 'XYZ': values = [] for t in ('Min', 'Max'): ix = names.index("AHRS_Magnetic_{}{}(G-)".format(q, t)) values.append(float(val[ix][0])) Hoff.append(sum(values) / 2.) Hoff = np.array(Hoff) return Aoff, Arot, Hoff, Hrot
python
def _extract_calibration(xroot): """Extract AHRS calibration information from XML root. Parameters ---------- xroot: XML root Returns ------- Aoff: numpy.array with shape(3,) Arot: numpy.array with shape(3,3) Hoff: numpy.array with shape(3,) Hrot: numpy.array with shape(3,3) """ names = [c.text for c in xroot.findall(".//Name")] val = [[i.text for i in c] for c in xroot.findall(".//Values")] # The fields has to be reindeced, these are the index mappings col_ic = [int(v) for v in val[names.index("AHRS_Matrix_Column(-)")]] try: row_ic = [int(v) for v in val[names.index("AHRS_Matrix_Row(-)")]] except ValueError: row_ic = [2, 2, 2, 1, 1, 1, 0, 0, 0] try: vec_ic = [int(v) for v in val[names.index("AHRS_Vector_Index(-)")]] except ValueError: vec_ic = [2, 1, 0] Aoff_ix = names.index("AHRS_Acceleration_Offset(g/ms^2-)") Arot_ix = names.index("AHRS_Acceleration_Rotation(-)") Hrot_ix = names.index("AHRS_Magnetic_Rotation(-)") Aoff = np.array(val[Aoff_ix])[vec_ic].astype(float) Arot = np.array(val[Arot_ix]).reshape(3, 3)[col_ic, row_ic] \ .reshape(3, 3).astype(float) Hrot = np.array(val[Hrot_ix]).reshape(3, 3)[col_ic, row_ic] \ .reshape(3, 3).astype(float) Hoff = [] for q in 'XYZ': values = [] for t in ('Min', 'Max'): ix = names.index("AHRS_Magnetic_{}{}(G-)".format(q, t)) values.append(float(val[ix][0])) Hoff.append(sum(values) / 2.) Hoff = np.array(Hoff) return Aoff, Arot, Hoff, Hrot
[ "def", "_extract_calibration", "(", "xroot", ")", ":", "names", "=", "[", "c", ".", "text", "for", "c", "in", "xroot", ".", "findall", "(", "\".//Name\"", ")", "]", "val", "=", "[", "[", "i", ".", "text", "for", "i", "in", "c", "]", "for", "c", "in", "xroot", ".", "findall", "(", "\".//Values\"", ")", "]", "# The fields has to be reindeced, these are the index mappings", "col_ic", "=", "[", "int", "(", "v", ")", "for", "v", "in", "val", "[", "names", ".", "index", "(", "\"AHRS_Matrix_Column(-)\"", ")", "]", "]", "try", ":", "row_ic", "=", "[", "int", "(", "v", ")", "for", "v", "in", "val", "[", "names", ".", "index", "(", "\"AHRS_Matrix_Row(-)\"", ")", "]", "]", "except", "ValueError", ":", "row_ic", "=", "[", "2", ",", "2", ",", "2", ",", "1", ",", "1", ",", "1", ",", "0", ",", "0", ",", "0", "]", "try", ":", "vec_ic", "=", "[", "int", "(", "v", ")", "for", "v", "in", "val", "[", "names", ".", "index", "(", "\"AHRS_Vector_Index(-)\"", ")", "]", "]", "except", "ValueError", ":", "vec_ic", "=", "[", "2", ",", "1", ",", "0", "]", "Aoff_ix", "=", "names", ".", "index", "(", "\"AHRS_Acceleration_Offset(g/ms^2-)\"", ")", "Arot_ix", "=", "names", ".", "index", "(", "\"AHRS_Acceleration_Rotation(-)\"", ")", "Hrot_ix", "=", "names", ".", "index", "(", "\"AHRS_Magnetic_Rotation(-)\"", ")", "Aoff", "=", "np", ".", "array", "(", "val", "[", "Aoff_ix", "]", ")", "[", "vec_ic", "]", ".", "astype", "(", "float", ")", "Arot", "=", "np", ".", "array", "(", "val", "[", "Arot_ix", "]", ")", ".", "reshape", "(", "3", ",", "3", ")", "[", "col_ic", ",", "row_ic", "]", ".", "reshape", "(", "3", ",", "3", ")", ".", "astype", "(", "float", ")", "Hrot", "=", "np", ".", "array", "(", "val", "[", "Hrot_ix", "]", ")", ".", "reshape", "(", "3", ",", "3", ")", "[", "col_ic", ",", "row_ic", "]", ".", "reshape", "(", "3", ",", "3", ")", ".", "astype", "(", "float", ")", "Hoff", "=", "[", "]", "for", "q", "in", "'XYZ'", ":", "values", "=", "[", "]", "for", "t", "in", "(", "'Min'", ",", "'Max'", ")", ":", "ix", "=", "names", ".", "index", "(", "\"AHRS_Magnetic_{}{}(G-)\"", ".", "format", "(", "q", ",", "t", ")", ")", "values", ".", "append", "(", "float", "(", "val", "[", "ix", "]", "[", "0", "]", ")", ")", "Hoff", ".", "append", "(", "sum", "(", "values", ")", "/", "2.", ")", "Hoff", "=", "np", ".", "array", "(", "Hoff", ")", "return", "Aoff", ",", "Arot", ",", "Hoff", ",", "Hrot" ]
Extract AHRS calibration information from XML root. Parameters ---------- xroot: XML root Returns ------- Aoff: numpy.array with shape(3,) Arot: numpy.array with shape(3,3) Hoff: numpy.array with shape(3,) Hrot: numpy.array with shape(3,3)
[ "Extract", "AHRS", "calibration", "information", "from", "XML", "root", "." ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/ahrs.py#L207-L256
train
tamasgal/km3pipe
km3modules/ahrs.py
AHRSCalibrator.calibrate
def calibrate(self): """Calculate yaw, pitch and roll from the median of A and H. After successful calibration, the `self.A` and `self.H` are reset. DOMs with missing AHRS pre-calibration data are skipped. Returns ------- dict: key=dom_id, value=tuple: (timestamp, du, floor, yaw, pitch, roll) """ now = time.time() dom_ids = self.A.keys() print( "Calibrating AHRS from median A and H for {} DOMs.".format( len(dom_ids) ) ) calibrations = {} for dom_id in dom_ids: print("Calibrating DOM ID {}".format(dom_id)) clb_upi = self.db.doms.via_dom_id(dom_id).clb_upi ahrs_calib = get_latest_ahrs_calibration(clb_upi) if ahrs_calib is None: log.warning("AHRS calibration missing for '{}'".format(dom_id)) continue du, floor, _ = self.detector.doms[dom_id] A = np.median(self.A[dom_id], axis=0) H = np.median(self.H[dom_id], axis=0) cyaw, cpitch, croll = fit_ahrs(A, H, *ahrs_calib) calibrations[dom_id] = (now, du, floor, cyaw, cpitch, croll) self.A = defaultdict(list) self.H = defaultdict(list) return calibrations
python
def calibrate(self): """Calculate yaw, pitch and roll from the median of A and H. After successful calibration, the `self.A` and `self.H` are reset. DOMs with missing AHRS pre-calibration data are skipped. Returns ------- dict: key=dom_id, value=tuple: (timestamp, du, floor, yaw, pitch, roll) """ now = time.time() dom_ids = self.A.keys() print( "Calibrating AHRS from median A and H for {} DOMs.".format( len(dom_ids) ) ) calibrations = {} for dom_id in dom_ids: print("Calibrating DOM ID {}".format(dom_id)) clb_upi = self.db.doms.via_dom_id(dom_id).clb_upi ahrs_calib = get_latest_ahrs_calibration(clb_upi) if ahrs_calib is None: log.warning("AHRS calibration missing for '{}'".format(dom_id)) continue du, floor, _ = self.detector.doms[dom_id] A = np.median(self.A[dom_id], axis=0) H = np.median(self.H[dom_id], axis=0) cyaw, cpitch, croll = fit_ahrs(A, H, *ahrs_calib) calibrations[dom_id] = (now, du, floor, cyaw, cpitch, croll) self.A = defaultdict(list) self.H = defaultdict(list) return calibrations
[ "def", "calibrate", "(", "self", ")", ":", "now", "=", "time", ".", "time", "(", ")", "dom_ids", "=", "self", ".", "A", ".", "keys", "(", ")", "print", "(", "\"Calibrating AHRS from median A and H for {} DOMs.\"", ".", "format", "(", "len", "(", "dom_ids", ")", ")", ")", "calibrations", "=", "{", "}", "for", "dom_id", "in", "dom_ids", ":", "print", "(", "\"Calibrating DOM ID {}\"", ".", "format", "(", "dom_id", ")", ")", "clb_upi", "=", "self", ".", "db", ".", "doms", ".", "via_dom_id", "(", "dom_id", ")", ".", "clb_upi", "ahrs_calib", "=", "get_latest_ahrs_calibration", "(", "clb_upi", ")", "if", "ahrs_calib", "is", "None", ":", "log", ".", "warning", "(", "\"AHRS calibration missing for '{}'\"", ".", "format", "(", "dom_id", ")", ")", "continue", "du", ",", "floor", ",", "_", "=", "self", ".", "detector", ".", "doms", "[", "dom_id", "]", "A", "=", "np", ".", "median", "(", "self", ".", "A", "[", "dom_id", "]", ",", "axis", "=", "0", ")", "H", "=", "np", ".", "median", "(", "self", ".", "H", "[", "dom_id", "]", ",", "axis", "=", "0", ")", "cyaw", ",", "cpitch", ",", "croll", "=", "fit_ahrs", "(", "A", ",", "H", ",", "*", "ahrs_calib", ")", "calibrations", "[", "dom_id", "]", "=", "(", "now", ",", "du", ",", "floor", ",", "cyaw", ",", "cpitch", ",", "croll", ")", "self", ".", "A", "=", "defaultdict", "(", "list", ")", "self", ".", "H", "=", "defaultdict", "(", "list", ")", "return", "calibrations" ]
Calculate yaw, pitch and roll from the median of A and H. After successful calibration, the `self.A` and `self.H` are reset. DOMs with missing AHRS pre-calibration data are skipped. Returns ------- dict: key=dom_id, value=tuple: (timestamp, du, floor, yaw, pitch, roll)
[ "Calculate", "yaw", "pitch", "and", "roll", "from", "the", "median", "of", "A", "and", "H", "." ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/ahrs.py#L75-L109
train
NaPs/Kolekto
kolekto/commands/stats.py
humanize_filesize
def humanize_filesize(value): """ Return an humanized file size. """ value = float(value) if value == 1: return '1 Byte' elif value < 1024: return '%d Bytes' % value elif value < 1024: return '%dB' % value for i, s in enumerate(SUFFIXES): unit = 1024 ** (i + 2) if value < unit: return '%.1f %s' % ((1024 * value / unit), s) return '%.1f %s' % ((1024 * value / unit), s)
python
def humanize_filesize(value): """ Return an humanized file size. """ value = float(value) if value == 1: return '1 Byte' elif value < 1024: return '%d Bytes' % value elif value < 1024: return '%dB' % value for i, s in enumerate(SUFFIXES): unit = 1024 ** (i + 2) if value < unit: return '%.1f %s' % ((1024 * value / unit), s) return '%.1f %s' % ((1024 * value / unit), s)
[ "def", "humanize_filesize", "(", "value", ")", ":", "value", "=", "float", "(", "value", ")", "if", "value", "==", "1", ":", "return", "'1 Byte'", "elif", "value", "<", "1024", ":", "return", "'%d Bytes'", "%", "value", "elif", "value", "<", "1024", ":", "return", "'%dB'", "%", "value", "for", "i", ",", "s", "in", "enumerate", "(", "SUFFIXES", ")", ":", "unit", "=", "1024", "**", "(", "i", "+", "2", ")", "if", "value", "<", "unit", ":", "return", "'%.1f %s'", "%", "(", "(", "1024", "*", "value", "/", "unit", ")", ",", "s", ")", "return", "'%.1f %s'", "%", "(", "(", "1024", "*", "value", "/", "unit", ")", ",", "s", ")" ]
Return an humanized file size.
[ "Return", "an", "humanized", "file", "size", "." ]
29c5469da8782780a06bf9a76c59414bb6fd8fe3
https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/commands/stats.py#L14-L31
train
NaPs/Kolekto
kolekto/commands/stats.py
format_top
def format_top(counter, top=3): """ Format a top. """ items = islice(reversed(sorted(counter.iteritems(), key=lambda x: x[1])), 0, top) return u'; '.join(u'{g} ({nb})'.format(g=g, nb=nb) for g, nb in items)
python
def format_top(counter, top=3): """ Format a top. """ items = islice(reversed(sorted(counter.iteritems(), key=lambda x: x[1])), 0, top) return u'; '.join(u'{g} ({nb})'.format(g=g, nb=nb) for g, nb in items)
[ "def", "format_top", "(", "counter", ",", "top", "=", "3", ")", ":", "items", "=", "islice", "(", "reversed", "(", "sorted", "(", "counter", ".", "iteritems", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ")", ")", ",", "0", ",", "top", ")", "return", "u'; '", ".", "join", "(", "u'{g} ({nb})'", ".", "format", "(", "g", "=", "g", ",", "nb", "=", "nb", ")", "for", "g", ",", "nb", "in", "items", ")" ]
Format a top.
[ "Format", "a", "top", "." ]
29c5469da8782780a06bf9a76c59414bb6fd8fe3
https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/commands/stats.py#L34-L38
train
IRC-SPHERE/HyperStream
hyperstream/utils/decorators.py
check_input_stream_count
def check_input_stream_count(expected_number_of_streams): """ Decorator for Tool._execute that checks the number of input streams :param expected_number_of_streams: The expected number of streams :return: the decorator """ def stream_count_decorator(func): def func_wrapper(*args, **kwargs): self = args[0] sources = kwargs['sources'] if 'sources' in kwargs else args[1] if expected_number_of_streams == 0: if sources: raise ValueError("No input streams expected") else: given_number_of_streams = len(sources) if sources else 0 if given_number_of_streams != expected_number_of_streams: raise ValueError("{} tool takes {} stream(s) as input ({} given)".format( self.__class__.__name__, expected_number_of_streams, given_number_of_streams)) return func(*args, **kwargs) return func_wrapper return stream_count_decorator
python
def check_input_stream_count(expected_number_of_streams): """ Decorator for Tool._execute that checks the number of input streams :param expected_number_of_streams: The expected number of streams :return: the decorator """ def stream_count_decorator(func): def func_wrapper(*args, **kwargs): self = args[0] sources = kwargs['sources'] if 'sources' in kwargs else args[1] if expected_number_of_streams == 0: if sources: raise ValueError("No input streams expected") else: given_number_of_streams = len(sources) if sources else 0 if given_number_of_streams != expected_number_of_streams: raise ValueError("{} tool takes {} stream(s) as input ({} given)".format( self.__class__.__name__, expected_number_of_streams, given_number_of_streams)) return func(*args, **kwargs) return func_wrapper return stream_count_decorator
[ "def", "check_input_stream_count", "(", "expected_number_of_streams", ")", ":", "def", "stream_count_decorator", "(", "func", ")", ":", "def", "func_wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", "=", "args", "[", "0", "]", "sources", "=", "kwargs", "[", "'sources'", "]", "if", "'sources'", "in", "kwargs", "else", "args", "[", "1", "]", "if", "expected_number_of_streams", "==", "0", ":", "if", "sources", ":", "raise", "ValueError", "(", "\"No input streams expected\"", ")", "else", ":", "given_number_of_streams", "=", "len", "(", "sources", ")", "if", "sources", "else", "0", "if", "given_number_of_streams", "!=", "expected_number_of_streams", ":", "raise", "ValueError", "(", "\"{} tool takes {} stream(s) as input ({} given)\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ",", "expected_number_of_streams", ",", "given_number_of_streams", ")", ")", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "func_wrapper", "return", "stream_count_decorator" ]
Decorator for Tool._execute that checks the number of input streams :param expected_number_of_streams: The expected number of streams :return: the decorator
[ "Decorator", "for", "Tool", ".", "_execute", "that", "checks", "the", "number", "of", "input", "streams" ]
98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/utils/decorators.py#L69-L93
train
tamasgal/km3pipe
km3pipe/utils/ligiermirror.py
main
def main(): """The main script""" from docopt import docopt args = docopt(__doc__, version=kp.version) kp.logger.set_level("km3pipe", args['-d']) pipe = kp.Pipeline() pipe.attach( kp.io.ch.CHPump, host=args['SOURCE_IP'], port=int(args['-p']), tags=args['-m'], timeout=int(args['-x']), max_queue=int(args['-s']) ) pipe.attach(LigierSender, target_ip=args['-t'], port=int(args['-q'])) pipe.drain()
python
def main(): """The main script""" from docopt import docopt args = docopt(__doc__, version=kp.version) kp.logger.set_level("km3pipe", args['-d']) pipe = kp.Pipeline() pipe.attach( kp.io.ch.CHPump, host=args['SOURCE_IP'], port=int(args['-p']), tags=args['-m'], timeout=int(args['-x']), max_queue=int(args['-s']) ) pipe.attach(LigierSender, target_ip=args['-t'], port=int(args['-q'])) pipe.drain()
[ "def", "main", "(", ")", ":", "from", "docopt", "import", "docopt", "args", "=", "docopt", "(", "__doc__", ",", "version", "=", "kp", ".", "version", ")", "kp", ".", "logger", ".", "set_level", "(", "\"km3pipe\"", ",", "args", "[", "'-d'", "]", ")", "pipe", "=", "kp", ".", "Pipeline", "(", ")", "pipe", ".", "attach", "(", "kp", ".", "io", ".", "ch", ".", "CHPump", ",", "host", "=", "args", "[", "'SOURCE_IP'", "]", ",", "port", "=", "int", "(", "args", "[", "'-p'", "]", ")", ",", "tags", "=", "args", "[", "'-m'", "]", ",", "timeout", "=", "int", "(", "args", "[", "'-x'", "]", ")", ",", "max_queue", "=", "int", "(", "args", "[", "'-s'", "]", ")", ")", "pipe", ".", "attach", "(", "LigierSender", ",", "target_ip", "=", "args", "[", "'-t'", "]", ",", "port", "=", "int", "(", "args", "[", "'-q'", "]", ")", ")", "pipe", ".", "drain", "(", ")" ]
The main script
[ "The", "main", "script" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/utils/ligiermirror.py#L44-L61
train
nsfmc/swatch
swatch/__init__.py
parse
def parse(filename): """parses a .ase file and returns a list of colors and color groups `swatch.parse` reads in an ase file and converts it to a list of colors and palettes. colors are simple dicts of the form ```json { 'name': u'color name', 'type': u'Process', 'data': { 'mode': u'RGB', 'values': [1.0, 1.0, 1.0] } } ``` the values provided vary between color mode. For all color modes, the value is always a list of floats. RGB: three floats between [0,1] corresponding to RGB. CMYK: four floats between [0,1] inclusive, corresponding to CMYK. Gray: one float between [0,1] with 1 being white, 0 being black. LAB: three floats. The first L, is ranged from 0,1. Both A and B are floats ranging from [-128.0,127.0]. I believe illustrator just crops these to whole values, though. Palettes (née Color Groups in Adobe Parlance) are also dicts, but they have an attribute named `swatches` which contains a list of colors contained within the palette. ```json { 'name': u'accent colors', 'type': u'Color Group', 'swatches': [ {color}, {color}, ..., {color} ] } ``` Because Adobe Illustrator lets swatches exist either inside and outside of palettes, the output of swatch.parse is a list that may contain swatches and palettes, i.e. [ swatch* palette* ] Here's an example with a light grey swatch followed by a color group containing three >>> import swatch >>> swatch.parse("example.ase") [{'data': {'mode': u'Gray', 'values': [0.75]}, 'name': u'Light Grey', 'type': u'Process'}, {'name': u'Accent Colors', 'swatches': [{'data': {'mode': u'CMYK', 'values': [0.5279774069786072, 0.24386966228485107, 1.0, 0.04303044080734253]}, 'name': u'Green', 'type': u'Process'}, {'data': {'mode': u'CMYK', 'values': [0.6261844635009766, 0.5890134572982788, 3.051804378628731e-05, 3.051804378628731e-05]}, 'name': u'Violet Process Global', 'type': u'Global'}, {'data': {'mode': u'LAB', 'values': [0.6000000238418579, -35.0, -5.0]}, 'name': u'Cyan Spot (global)', 'type': u'Spot'}], 'type': u'Color Group'}] """ with open(filename, "rb") as data: header, v_major, v_minor, chunk_count = struct.unpack("!4sHHI", data.read(12)) assert header == b"ASEF" assert (v_major, v_minor) == (1, 0) return [c for c in parser.parse_chunk(data)]
python
def parse(filename): """parses a .ase file and returns a list of colors and color groups `swatch.parse` reads in an ase file and converts it to a list of colors and palettes. colors are simple dicts of the form ```json { 'name': u'color name', 'type': u'Process', 'data': { 'mode': u'RGB', 'values': [1.0, 1.0, 1.0] } } ``` the values provided vary between color mode. For all color modes, the value is always a list of floats. RGB: three floats between [0,1] corresponding to RGB. CMYK: four floats between [0,1] inclusive, corresponding to CMYK. Gray: one float between [0,1] with 1 being white, 0 being black. LAB: three floats. The first L, is ranged from 0,1. Both A and B are floats ranging from [-128.0,127.0]. I believe illustrator just crops these to whole values, though. Palettes (née Color Groups in Adobe Parlance) are also dicts, but they have an attribute named `swatches` which contains a list of colors contained within the palette. ```json { 'name': u'accent colors', 'type': u'Color Group', 'swatches': [ {color}, {color}, ..., {color} ] } ``` Because Adobe Illustrator lets swatches exist either inside and outside of palettes, the output of swatch.parse is a list that may contain swatches and palettes, i.e. [ swatch* palette* ] Here's an example with a light grey swatch followed by a color group containing three >>> import swatch >>> swatch.parse("example.ase") [{'data': {'mode': u'Gray', 'values': [0.75]}, 'name': u'Light Grey', 'type': u'Process'}, {'name': u'Accent Colors', 'swatches': [{'data': {'mode': u'CMYK', 'values': [0.5279774069786072, 0.24386966228485107, 1.0, 0.04303044080734253]}, 'name': u'Green', 'type': u'Process'}, {'data': {'mode': u'CMYK', 'values': [0.6261844635009766, 0.5890134572982788, 3.051804378628731e-05, 3.051804378628731e-05]}, 'name': u'Violet Process Global', 'type': u'Global'}, {'data': {'mode': u'LAB', 'values': [0.6000000238418579, -35.0, -5.0]}, 'name': u'Cyan Spot (global)', 'type': u'Spot'}], 'type': u'Color Group'}] """ with open(filename, "rb") as data: header, v_major, v_minor, chunk_count = struct.unpack("!4sHHI", data.read(12)) assert header == b"ASEF" assert (v_major, v_minor) == (1, 0) return [c for c in parser.parse_chunk(data)]
[ "def", "parse", "(", "filename", ")", ":", "with", "open", "(", "filename", ",", "\"rb\"", ")", "as", "data", ":", "header", ",", "v_major", ",", "v_minor", ",", "chunk_count", "=", "struct", ".", "unpack", "(", "\"!4sHHI\"", ",", "data", ".", "read", "(", "12", ")", ")", "assert", "header", "==", "b\"ASEF\"", "assert", "(", "v_major", ",", "v_minor", ")", "==", "(", "1", ",", "0", ")", "return", "[", "c", "for", "c", "in", "parser", ".", "parse_chunk", "(", "data", ")", "]" ]
parses a .ase file and returns a list of colors and color groups `swatch.parse` reads in an ase file and converts it to a list of colors and palettes. colors are simple dicts of the form ```json { 'name': u'color name', 'type': u'Process', 'data': { 'mode': u'RGB', 'values': [1.0, 1.0, 1.0] } } ``` the values provided vary between color mode. For all color modes, the value is always a list of floats. RGB: three floats between [0,1] corresponding to RGB. CMYK: four floats between [0,1] inclusive, corresponding to CMYK. Gray: one float between [0,1] with 1 being white, 0 being black. LAB: three floats. The first L, is ranged from 0,1. Both A and B are floats ranging from [-128.0,127.0]. I believe illustrator just crops these to whole values, though. Palettes (née Color Groups in Adobe Parlance) are also dicts, but they have an attribute named `swatches` which contains a list of colors contained within the palette. ```json { 'name': u'accent colors', 'type': u'Color Group', 'swatches': [ {color}, {color}, ..., {color} ] } ``` Because Adobe Illustrator lets swatches exist either inside and outside of palettes, the output of swatch.parse is a list that may contain swatches and palettes, i.e. [ swatch* palette* ] Here's an example with a light grey swatch followed by a color group containing three >>> import swatch >>> swatch.parse("example.ase") [{'data': {'mode': u'Gray', 'values': [0.75]}, 'name': u'Light Grey', 'type': u'Process'}, {'name': u'Accent Colors', 'swatches': [{'data': {'mode': u'CMYK', 'values': [0.5279774069786072, 0.24386966228485107, 1.0, 0.04303044080734253]}, 'name': u'Green', 'type': u'Process'}, {'data': {'mode': u'CMYK', 'values': [0.6261844635009766, 0.5890134572982788, 3.051804378628731e-05, 3.051804378628731e-05]}, 'name': u'Violet Process Global', 'type': u'Global'}, {'data': {'mode': u'LAB', 'values': [0.6000000238418579, -35.0, -5.0]}, 'name': u'Cyan Spot (global)', 'type': u'Spot'}], 'type': u'Color Group'}]
[ "parses", "a", ".", "ase", "file", "and", "returns", "a", "list", "of", "colors", "and", "color", "groups" ]
8654edf4f1aeef37d42211ff3fe6a3e9e4325859
https://github.com/nsfmc/swatch/blob/8654edf4f1aeef37d42211ff3fe6a3e9e4325859/swatch/__init__.py#L27-L106
train
nsfmc/swatch
swatch/__init__.py
dumps
def dumps(obj): """converts a swatch to bytes suitable for writing""" header = b'ASEF' v_major, v_minor = 1, 0 chunk_count = writer.chunk_count(obj) head = struct.pack('!4sHHI', header, v_major, v_minor, chunk_count) body = b''.join([writer.chunk_for_object(c) for c in obj]) return head + body
python
def dumps(obj): """converts a swatch to bytes suitable for writing""" header = b'ASEF' v_major, v_minor = 1, 0 chunk_count = writer.chunk_count(obj) head = struct.pack('!4sHHI', header, v_major, v_minor, chunk_count) body = b''.join([writer.chunk_for_object(c) for c in obj]) return head + body
[ "def", "dumps", "(", "obj", ")", ":", "header", "=", "b'ASEF'", "v_major", ",", "v_minor", "=", "1", ",", "0", "chunk_count", "=", "writer", ".", "chunk_count", "(", "obj", ")", "head", "=", "struct", ".", "pack", "(", "'!4sHHI'", ",", "header", ",", "v_major", ",", "v_minor", ",", "chunk_count", ")", "body", "=", "b''", ".", "join", "(", "[", "writer", ".", "chunk_for_object", "(", "c", ")", "for", "c", "in", "obj", "]", ")", "return", "head", "+", "body" ]
converts a swatch to bytes suitable for writing
[ "converts", "a", "swatch", "to", "bytes", "suitable", "for", "writing" ]
8654edf4f1aeef37d42211ff3fe6a3e9e4325859
https://github.com/nsfmc/swatch/blob/8654edf4f1aeef37d42211ff3fe6a3e9e4325859/swatch/__init__.py#L108-L116
train
PrefPy/prefpy
prefpy/preference.py
Preference.isFullPreferenceOrder
def isFullPreferenceOrder(self, candList): """ Returns True if the underlying weighted majority graph contains a comparision between every pair of candidate and returns False otherwise. :ivar list<int> candList: Contains integer representations of each candidate. """ # If a candidate is missing from the wmgMap or if there is a pair of candidates for which # there is no value in the wmgMap, then the wmgMap cannot be a full preference order. for cand1 in candList: if cand1 not in self.wmgMap.keys(): return False for cand2 in candList: if cand1 == cand2: continue if cand2 not in self.wmgMap[cand1].keys(): return False return True
python
def isFullPreferenceOrder(self, candList): """ Returns True if the underlying weighted majority graph contains a comparision between every pair of candidate and returns False otherwise. :ivar list<int> candList: Contains integer representations of each candidate. """ # If a candidate is missing from the wmgMap or if there is a pair of candidates for which # there is no value in the wmgMap, then the wmgMap cannot be a full preference order. for cand1 in candList: if cand1 not in self.wmgMap.keys(): return False for cand2 in candList: if cand1 == cand2: continue if cand2 not in self.wmgMap[cand1].keys(): return False return True
[ "def", "isFullPreferenceOrder", "(", "self", ",", "candList", ")", ":", "# If a candidate is missing from the wmgMap or if there is a pair of candidates for which ", "# there is no value in the wmgMap, then the wmgMap cannot be a full preference order.", "for", "cand1", "in", "candList", ":", "if", "cand1", "not", "in", "self", ".", "wmgMap", ".", "keys", "(", ")", ":", "return", "False", "for", "cand2", "in", "candList", ":", "if", "cand1", "==", "cand2", ":", "continue", "if", "cand2", "not", "in", "self", ".", "wmgMap", "[", "cand1", "]", ".", "keys", "(", ")", ":", "return", "False", "return", "True" ]
Returns True if the underlying weighted majority graph contains a comparision between every pair of candidate and returns False otherwise. :ivar list<int> candList: Contains integer representations of each candidate.
[ "Returns", "True", "if", "the", "underlying", "weighted", "majority", "graph", "contains", "a", "comparision", "between", "every", "pair", "of", "candidate", "and", "returns", "False", "otherwise", "." ]
f395ba3782f05684fa5de0cece387a6da9391d02
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/preference.py#L21-L39
train
PrefPy/prefpy
prefpy/preference.py
Preference.containsTie
def containsTie(self): """ Returns True if the underlying weighted majority graph contains a tie between any pair of candidates and returns False otherwise. """ # If a value of 0 is present in the wmgMap, we assume that it represents a tie. for cand in self.wmgMap.keys(): if 0 in self.wmgMap[cand].values(): return True return False
python
def containsTie(self): """ Returns True if the underlying weighted majority graph contains a tie between any pair of candidates and returns False otherwise. """ # If a value of 0 is present in the wmgMap, we assume that it represents a tie. for cand in self.wmgMap.keys(): if 0 in self.wmgMap[cand].values(): return True return False
[ "def", "containsTie", "(", "self", ")", ":", "# If a value of 0 is present in the wmgMap, we assume that it represents a tie.", "for", "cand", "in", "self", ".", "wmgMap", ".", "keys", "(", ")", ":", "if", "0", "in", "self", ".", "wmgMap", "[", "cand", "]", ".", "values", "(", ")", ":", "return", "True", "return", "False" ]
Returns True if the underlying weighted majority graph contains a tie between any pair of candidates and returns False otherwise.
[ "Returns", "True", "if", "the", "underlying", "weighted", "majority", "graph", "contains", "a", "tie", "between", "any", "pair", "of", "candidates", "and", "returns", "False", "otherwise", "." ]
f395ba3782f05684fa5de0cece387a6da9391d02
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/preference.py#L41-L51
train
PrefPy/prefpy
prefpy/preference.py
Preference.getIncEdgesMap
def getIncEdgesMap(self): """ Returns a dictionary that associates numbers of incoming edges in the weighted majority graph with the candidates that have that number of incoming edges. """ # We calculate the number of incoming edges for each candidate and store it into a dictionary # that associates the number of incoming edges with the candidates with that number. incEdgesMap = dict() for cand1 in self.wmgMap.keys(): incEdgesSum = 0 for cand2 in self.wmgMap[cand1].keys(): if self.wmgMap[cand1][cand2] > 0: incEdgesSum += self.wmgMap[cand1][cand2] # Check if this is the first candidate associated with this number of associated edges. if incEdgesSum in incEdgesMap.keys(): incEdgesMap[incEdgesSum].append(cand1) else: incEdgesMap[incEdgesSum] = [cand1] return incEdgesMap
python
def getIncEdgesMap(self): """ Returns a dictionary that associates numbers of incoming edges in the weighted majority graph with the candidates that have that number of incoming edges. """ # We calculate the number of incoming edges for each candidate and store it into a dictionary # that associates the number of incoming edges with the candidates with that number. incEdgesMap = dict() for cand1 in self.wmgMap.keys(): incEdgesSum = 0 for cand2 in self.wmgMap[cand1].keys(): if self.wmgMap[cand1][cand2] > 0: incEdgesSum += self.wmgMap[cand1][cand2] # Check if this is the first candidate associated with this number of associated edges. if incEdgesSum in incEdgesMap.keys(): incEdgesMap[incEdgesSum].append(cand1) else: incEdgesMap[incEdgesSum] = [cand1] return incEdgesMap
[ "def", "getIncEdgesMap", "(", "self", ")", ":", "# We calculate the number of incoming edges for each candidate and store it into a dictionary ", "# that associates the number of incoming edges with the candidates with that number.", "incEdgesMap", "=", "dict", "(", ")", "for", "cand1", "in", "self", ".", "wmgMap", ".", "keys", "(", ")", ":", "incEdgesSum", "=", "0", "for", "cand2", "in", "self", ".", "wmgMap", "[", "cand1", "]", ".", "keys", "(", ")", ":", "if", "self", ".", "wmgMap", "[", "cand1", "]", "[", "cand2", "]", ">", "0", ":", "incEdgesSum", "+=", "self", ".", "wmgMap", "[", "cand1", "]", "[", "cand2", "]", "# Check if this is the first candidate associated with this number of associated edges.", "if", "incEdgesSum", "in", "incEdgesMap", ".", "keys", "(", ")", ":", "incEdgesMap", "[", "incEdgesSum", "]", ".", "append", "(", "cand1", ")", "else", ":", "incEdgesMap", "[", "incEdgesSum", "]", "=", "[", "cand1", "]", "return", "incEdgesMap" ]
Returns a dictionary that associates numbers of incoming edges in the weighted majority graph with the candidates that have that number of incoming edges.
[ "Returns", "a", "dictionary", "that", "associates", "numbers", "of", "incoming", "edges", "in", "the", "weighted", "majority", "graph", "with", "the", "candidates", "that", "have", "that", "number", "of", "incoming", "edges", "." ]
f395ba3782f05684fa5de0cece387a6da9391d02
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/preference.py#L53-L74
train
PrefPy/prefpy
prefpy/preference.py
Preference.getRankMap
def getRankMap(self): """ Returns a dictionary that associates the integer representation of each candidate with its position in the ranking, starting from 1. """ # We sort the candidates based on the number of incoming edges they have in the graph. If # two candidates have the same number, we assume that they are tied. incEdgesMap = self.getIncEdgesMap() sortedKeys = sorted(incEdgesMap.keys(), reverse = True) rankMap = dict() pos = 1 for key in sortedKeys: cands = incEdgesMap[key] for cand in cands: rankMap[cand] = pos pos += 1 return rankMap
python
def getRankMap(self): """ Returns a dictionary that associates the integer representation of each candidate with its position in the ranking, starting from 1. """ # We sort the candidates based on the number of incoming edges they have in the graph. If # two candidates have the same number, we assume that they are tied. incEdgesMap = self.getIncEdgesMap() sortedKeys = sorted(incEdgesMap.keys(), reverse = True) rankMap = dict() pos = 1 for key in sortedKeys: cands = incEdgesMap[key] for cand in cands: rankMap[cand] = pos pos += 1 return rankMap
[ "def", "getRankMap", "(", "self", ")", ":", "# We sort the candidates based on the number of incoming edges they have in the graph. If ", "# two candidates have the same number, we assume that they are tied.", "incEdgesMap", "=", "self", ".", "getIncEdgesMap", "(", ")", "sortedKeys", "=", "sorted", "(", "incEdgesMap", ".", "keys", "(", ")", ",", "reverse", "=", "True", ")", "rankMap", "=", "dict", "(", ")", "pos", "=", "1", "for", "key", "in", "sortedKeys", ":", "cands", "=", "incEdgesMap", "[", "key", "]", "for", "cand", "in", "cands", ":", "rankMap", "[", "cand", "]", "=", "pos", "pos", "+=", "1", "return", "rankMap" ]
Returns a dictionary that associates the integer representation of each candidate with its position in the ranking, starting from 1.
[ "Returns", "a", "dictionary", "that", "associates", "the", "integer", "representation", "of", "each", "candidate", "with", "its", "position", "in", "the", "ranking", "starting", "from", "1", "." ]
f395ba3782f05684fa5de0cece387a6da9391d02
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/preference.py#L76-L93
train
PrefPy/prefpy
prefpy/preference.py
Preference.getReverseRankMap
def getReverseRankMap(self): """ Returns a dictionary that associates each position in the ranking with a list of integer representations of the candidates ranked at that position. """ # We sort the candidates based on the number of incoming edges they have in the graph. If # two candidates have the same number, we assume that they are tied. incEdgesMap = self.getIncEdgesMap() sortedKeys = sorted(incEdgesMap.keys(), reverse = True) reverseRankMap = dict() pos = 1 for key in sortedKeys: cands = incEdgesMap[key] reverseRankMap[pos] = cands pos += 1 return reverseRankMap
python
def getReverseRankMap(self): """ Returns a dictionary that associates each position in the ranking with a list of integer representations of the candidates ranked at that position. """ # We sort the candidates based on the number of incoming edges they have in the graph. If # two candidates have the same number, we assume that they are tied. incEdgesMap = self.getIncEdgesMap() sortedKeys = sorted(incEdgesMap.keys(), reverse = True) reverseRankMap = dict() pos = 1 for key in sortedKeys: cands = incEdgesMap[key] reverseRankMap[pos] = cands pos += 1 return reverseRankMap
[ "def", "getReverseRankMap", "(", "self", ")", ":", "# We sort the candidates based on the number of incoming edges they have in the graph. If ", "# two candidates have the same number, we assume that they are tied.", "incEdgesMap", "=", "self", ".", "getIncEdgesMap", "(", ")", "sortedKeys", "=", "sorted", "(", "incEdgesMap", ".", "keys", "(", ")", ",", "reverse", "=", "True", ")", "reverseRankMap", "=", "dict", "(", ")", "pos", "=", "1", "for", "key", "in", "sortedKeys", ":", "cands", "=", "incEdgesMap", "[", "key", "]", "reverseRankMap", "[", "pos", "]", "=", "cands", "pos", "+=", "1", "return", "reverseRankMap" ]
Returns a dictionary that associates each position in the ranking with a list of integer representations of the candidates ranked at that position.
[ "Returns", "a", "dictionary", "that", "associates", "each", "position", "in", "the", "ranking", "with", "a", "list", "of", "integer", "representations", "of", "the", "candidates", "ranked", "at", "that", "position", "." ]
f395ba3782f05684fa5de0cece387a6da9391d02
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/preference.py#L95-L111
train
IRC-SPHERE/HyperStream
hyperstream/utils/statistics/histogram.py
histogram
def histogram(a, bins): """ Compute the histogram of a set of data. :param a: Input data :param bins: int or sequence of scalars or str, optional :type a: list | tuple :type bins: int | list[int] | list[str] :return: """ if any(map(lambda x: x < 0, diff(bins))): raise ValueError( 'bins must increase monotonically.') try: sa = sorted(a) except TypeError: # Perhaps just a single value? Treat as a list and carry on sa = sorted([a]) # import numpy as np # nl = np.searchsorted(sa, bins[:-1], 'left') # nr = np.searchsorted(sa, bins[-1], 'right') # nn = np.r_[nl, nr] # # # cl = list(accumulate(Counter(map(lambda x: bisect_left(bins[:-1], x), sa))) # # print("cl") # # print([cl[i] for i in range(len(bins))]) # print("nl") # print(list(nl)) # # print(Counter(map(lambda x: bisect_right([bins[-1]], x), sa))) # print("nr") # print([nr]) # print("nn") # print(list(nn)) # print("hist") # print(list(np.diff(nn))) # print(list(np.histogram(a, bins)[0])) nl = list(accumulate([Counter(map(lambda x: bisect_left(bins[:-1], x), sa))[i] for i in range(len(bins) - 1)])) # print("nl") # print(nl) nr = Counter(map(lambda x: bisect_right([bins[1]], x), sa))[1] # print(nl) # print(nr) n = list(nl) + [nr] return diff(n), bins
python
def histogram(a, bins): """ Compute the histogram of a set of data. :param a: Input data :param bins: int or sequence of scalars or str, optional :type a: list | tuple :type bins: int | list[int] | list[str] :return: """ if any(map(lambda x: x < 0, diff(bins))): raise ValueError( 'bins must increase monotonically.') try: sa = sorted(a) except TypeError: # Perhaps just a single value? Treat as a list and carry on sa = sorted([a]) # import numpy as np # nl = np.searchsorted(sa, bins[:-1], 'left') # nr = np.searchsorted(sa, bins[-1], 'right') # nn = np.r_[nl, nr] # # # cl = list(accumulate(Counter(map(lambda x: bisect_left(bins[:-1], x), sa))) # # print("cl") # # print([cl[i] for i in range(len(bins))]) # print("nl") # print(list(nl)) # # print(Counter(map(lambda x: bisect_right([bins[-1]], x), sa))) # print("nr") # print([nr]) # print("nn") # print(list(nn)) # print("hist") # print(list(np.diff(nn))) # print(list(np.histogram(a, bins)[0])) nl = list(accumulate([Counter(map(lambda x: bisect_left(bins[:-1], x), sa))[i] for i in range(len(bins) - 1)])) # print("nl") # print(nl) nr = Counter(map(lambda x: bisect_right([bins[1]], x), sa))[1] # print(nl) # print(nr) n = list(nl) + [nr] return diff(n), bins
[ "def", "histogram", "(", "a", ",", "bins", ")", ":", "if", "any", "(", "map", "(", "lambda", "x", ":", "x", "<", "0", ",", "diff", "(", "bins", ")", ")", ")", ":", "raise", "ValueError", "(", "'bins must increase monotonically.'", ")", "try", ":", "sa", "=", "sorted", "(", "a", ")", "except", "TypeError", ":", "# Perhaps just a single value? Treat as a list and carry on", "sa", "=", "sorted", "(", "[", "a", "]", ")", "# import numpy as np", "# nl = np.searchsorted(sa, bins[:-1], 'left')", "# nr = np.searchsorted(sa, bins[-1], 'right')", "# nn = np.r_[nl, nr]", "#", "# # cl = list(accumulate(Counter(map(lambda x: bisect_left(bins[:-1], x), sa)))", "# # print(\"cl\")", "# # print([cl[i] for i in range(len(bins))])", "# print(\"nl\")", "# print(list(nl))", "# # print(Counter(map(lambda x: bisect_right([bins[-1]], x), sa)))", "# print(\"nr\")", "# print([nr])", "# print(\"nn\")", "# print(list(nn))", "# print(\"hist\")", "# print(list(np.diff(nn)))", "# print(list(np.histogram(a, bins)[0]))", "nl", "=", "list", "(", "accumulate", "(", "[", "Counter", "(", "map", "(", "lambda", "x", ":", "bisect_left", "(", "bins", "[", ":", "-", "1", "]", ",", "x", ")", ",", "sa", ")", ")", "[", "i", "]", "for", "i", "in", "range", "(", "len", "(", "bins", ")", "-", "1", ")", "]", ")", ")", "# print(\"nl\")", "# print(nl)", "nr", "=", "Counter", "(", "map", "(", "lambda", "x", ":", "bisect_right", "(", "[", "bins", "[", "1", "]", "]", ",", "x", ")", ",", "sa", ")", ")", "[", "1", "]", "# print(nl)", "# print(nr)", "n", "=", "list", "(", "nl", ")", "+", "[", "nr", "]", "return", "diff", "(", "n", ")", ",", "bins" ]
Compute the histogram of a set of data. :param a: Input data :param bins: int or sequence of scalars or str, optional :type a: list | tuple :type bins: int | list[int] | list[str] :return:
[ "Compute", "the", "histogram", "of", "a", "set", "of", "data", "." ]
98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/utils/statistics/histogram.py#L68-L115
train
tamasgal/km3pipe
km3pipe/logger.py
deprecation
def deprecation(self, message, *args, **kws): """Show a deprecation warning.""" self._log(DEPRECATION, message, args, **kws)
python
def deprecation(self, message, *args, **kws): """Show a deprecation warning.""" self._log(DEPRECATION, message, args, **kws)
[ "def", "deprecation", "(", "self", ",", "message", ",", "*", "args", ",", "*", "*", "kws", ")", ":", "self", ".", "_log", "(", "DEPRECATION", ",", "message", ",", "args", ",", "*", "*", "kws", ")" ]
Show a deprecation warning.
[ "Show", "a", "deprecation", "warning", "." ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/logger.py#L33-L35
train
tamasgal/km3pipe
km3pipe/logger.py
once
def once(self, message, *args, **kws): """Show a message only once, determined by position in source or identifer. This will not work in IPython or Jupyter notebooks if no identifier is specified, since then the determined position in source contains the execution number of the input (cell), which changes every time. Set a unique identifier, otherwise the message will be printed every time. """ # TODO: after py2 support drop, put this into # function signature: identifier=None (between *args and **kws) identifier = kws.pop('identifier', None) if identifier is None: caller = getframeinfo(stack()[1][0]) identifier = "%s:%d" % (caller.filename, caller.lineno) if not hasattr(self, 'once_dict'): self.once_dict = {} if identifier in self.once_dict: return self.once_dict[identifier] = True self._log(ONCE, message, args, **kws)
python
def once(self, message, *args, **kws): """Show a message only once, determined by position in source or identifer. This will not work in IPython or Jupyter notebooks if no identifier is specified, since then the determined position in source contains the execution number of the input (cell), which changes every time. Set a unique identifier, otherwise the message will be printed every time. """ # TODO: after py2 support drop, put this into # function signature: identifier=None (between *args and **kws) identifier = kws.pop('identifier', None) if identifier is None: caller = getframeinfo(stack()[1][0]) identifier = "%s:%d" % (caller.filename, caller.lineno) if not hasattr(self, 'once_dict'): self.once_dict = {} if identifier in self.once_dict: return self.once_dict[identifier] = True self._log(ONCE, message, args, **kws)
[ "def", "once", "(", "self", ",", "message", ",", "*", "args", ",", "*", "*", "kws", ")", ":", "# TODO: after py2 support drop, put this into", "# function signature: identifier=None (between *args and **kws)", "identifier", "=", "kws", ".", "pop", "(", "'identifier'", ",", "None", ")", "if", "identifier", "is", "None", ":", "caller", "=", "getframeinfo", "(", "stack", "(", ")", "[", "1", "]", "[", "0", "]", ")", "identifier", "=", "\"%s:%d\"", "%", "(", "caller", ".", "filename", ",", "caller", ".", "lineno", ")", "if", "not", "hasattr", "(", "self", ",", "'once_dict'", ")", ":", "self", ".", "once_dict", "=", "{", "}", "if", "identifier", "in", "self", ".", "once_dict", ":", "return", "self", ".", "once_dict", "[", "identifier", "]", "=", "True", "self", ".", "_log", "(", "ONCE", ",", "message", ",", "args", ",", "*", "*", "kws", ")" ]
Show a message only once, determined by position in source or identifer. This will not work in IPython or Jupyter notebooks if no identifier is specified, since then the determined position in source contains the execution number of the input (cell), which changes every time. Set a unique identifier, otherwise the message will be printed every time.
[ "Show", "a", "message", "only", "once", "determined", "by", "position", "in", "source", "or", "identifer", "." ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/logger.py#L38-L59
train
tamasgal/km3pipe
km3pipe/logger.py
get_logger
def get_logger(name): """Helper function to get a logger""" if name in loggers: return loggers[name] logger = logging.getLogger(name) logger.propagate = False pre1, suf1 = hash_coloured_escapes(name) if supports_color() else ('', '') pre2, suf2 = hash_coloured_escapes(name + 'salt') \ if supports_color() else ('', '') formatter = logging.Formatter( '%(levelname)s {}+{}+{} ' '%(name)s: %(message)s'.format(pre1, pre2, suf1) ) ch = logging.StreamHandler() ch.setFormatter(formatter) logger.addHandler(ch) loggers[name] = logger logger.once_dict = {} return logger
python
def get_logger(name): """Helper function to get a logger""" if name in loggers: return loggers[name] logger = logging.getLogger(name) logger.propagate = False pre1, suf1 = hash_coloured_escapes(name) if supports_color() else ('', '') pre2, suf2 = hash_coloured_escapes(name + 'salt') \ if supports_color() else ('', '') formatter = logging.Formatter( '%(levelname)s {}+{}+{} ' '%(name)s: %(message)s'.format(pre1, pre2, suf1) ) ch = logging.StreamHandler() ch.setFormatter(formatter) logger.addHandler(ch) loggers[name] = logger logger.once_dict = {} return logger
[ "def", "get_logger", "(", "name", ")", ":", "if", "name", "in", "loggers", ":", "return", "loggers", "[", "name", "]", "logger", "=", "logging", ".", "getLogger", "(", "name", ")", "logger", ".", "propagate", "=", "False", "pre1", ",", "suf1", "=", "hash_coloured_escapes", "(", "name", ")", "if", "supports_color", "(", ")", "else", "(", "''", ",", "''", ")", "pre2", ",", "suf2", "=", "hash_coloured_escapes", "(", "name", "+", "'salt'", ")", "if", "supports_color", "(", ")", "else", "(", "''", ",", "''", ")", "formatter", "=", "logging", ".", "Formatter", "(", "'%(levelname)s {}+{}+{} '", "'%(name)s: %(message)s'", ".", "format", "(", "pre1", ",", "pre2", ",", "suf1", ")", ")", "ch", "=", "logging", ".", "StreamHandler", "(", ")", "ch", ".", "setFormatter", "(", "formatter", ")", "logger", ".", "addHandler", "(", "ch", ")", "loggers", "[", "name", "]", "=", "logger", "logger", ".", "once_dict", "=", "{", "}", "return", "logger" ]
Helper function to get a logger
[ "Helper", "function", "to", "get", "a", "logger" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/logger.py#L119-L139
train
tamasgal/km3pipe
km3pipe/logger.py
get_printer
def get_printer(name, color=None, ansi_code=None, force_color=False): """Return a function which prints a message with a coloured name prefix""" if force_color or supports_color(): if color is None and ansi_code is None: cpre_1, csuf_1 = hash_coloured_escapes(name) cpre_2, csuf_2 = hash_coloured_escapes(name + 'salt') name = cpre_1 + '+' + cpre_2 + '+' + csuf_1 + ' ' + name else: name = colored(name, color=color, ansi_code=ansi_code) prefix = name + ': ' def printer(text): print(prefix + str(text)) return printer
python
def get_printer(name, color=None, ansi_code=None, force_color=False): """Return a function which prints a message with a coloured name prefix""" if force_color or supports_color(): if color is None and ansi_code is None: cpre_1, csuf_1 = hash_coloured_escapes(name) cpre_2, csuf_2 = hash_coloured_escapes(name + 'salt') name = cpre_1 + '+' + cpre_2 + '+' + csuf_1 + ' ' + name else: name = colored(name, color=color, ansi_code=ansi_code) prefix = name + ': ' def printer(text): print(prefix + str(text)) return printer
[ "def", "get_printer", "(", "name", ",", "color", "=", "None", ",", "ansi_code", "=", "None", ",", "force_color", "=", "False", ")", ":", "if", "force_color", "or", "supports_color", "(", ")", ":", "if", "color", "is", "None", "and", "ansi_code", "is", "None", ":", "cpre_1", ",", "csuf_1", "=", "hash_coloured_escapes", "(", "name", ")", "cpre_2", ",", "csuf_2", "=", "hash_coloured_escapes", "(", "name", "+", "'salt'", ")", "name", "=", "cpre_1", "+", "'+'", "+", "cpre_2", "+", "'+'", "+", "csuf_1", "+", "' '", "+", "name", "else", ":", "name", "=", "colored", "(", "name", ",", "color", "=", "color", ",", "ansi_code", "=", "ansi_code", ")", "prefix", "=", "name", "+", "': '", "def", "printer", "(", "text", ")", ":", "print", "(", "prefix", "+", "str", "(", "text", ")", ")", "return", "printer" ]
Return a function which prints a message with a coloured name prefix
[ "Return", "a", "function", "which", "prints", "a", "message", "with", "a", "coloured", "name", "prefix" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/logger.py#L152-L168
train
tamasgal/km3pipe
km3pipe/logger.py
hash_coloured
def hash_coloured(text): """Return a ANSI coloured text based on its hash""" ansi_code = int(sha256(text.encode('utf-8')).hexdigest(), 16) % 230 return colored(text, ansi_code=ansi_code)
python
def hash_coloured(text): """Return a ANSI coloured text based on its hash""" ansi_code = int(sha256(text.encode('utf-8')).hexdigest(), 16) % 230 return colored(text, ansi_code=ansi_code)
[ "def", "hash_coloured", "(", "text", ")", ":", "ansi_code", "=", "int", "(", "sha256", "(", "text", ".", "encode", "(", "'utf-8'", ")", ")", ".", "hexdigest", "(", ")", ",", "16", ")", "%", "230", "return", "colored", "(", "text", ",", "ansi_code", "=", "ansi_code", ")" ]
Return a ANSI coloured text based on its hash
[ "Return", "a", "ANSI", "coloured", "text", "based", "on", "its", "hash" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/logger.py#L171-L174
train
tamasgal/km3pipe
km3pipe/logger.py
hash_coloured_escapes
def hash_coloured_escapes(text): """Return the ANSI hash colour prefix and suffix for a given text""" ansi_code = int(sha256(text.encode('utf-8')).hexdigest(), 16) % 230 prefix, suffix = colored('SPLIT', ansi_code=ansi_code).split('SPLIT') return prefix, suffix
python
def hash_coloured_escapes(text): """Return the ANSI hash colour prefix and suffix for a given text""" ansi_code = int(sha256(text.encode('utf-8')).hexdigest(), 16) % 230 prefix, suffix = colored('SPLIT', ansi_code=ansi_code).split('SPLIT') return prefix, suffix
[ "def", "hash_coloured_escapes", "(", "text", ")", ":", "ansi_code", "=", "int", "(", "sha256", "(", "text", ".", "encode", "(", "'utf-8'", ")", ")", ".", "hexdigest", "(", ")", ",", "16", ")", "%", "230", "prefix", ",", "suffix", "=", "colored", "(", "'SPLIT'", ",", "ansi_code", "=", "ansi_code", ")", ".", "split", "(", "'SPLIT'", ")", "return", "prefix", ",", "suffix" ]
Return the ANSI hash colour prefix and suffix for a given text
[ "Return", "the", "ANSI", "hash", "colour", "prefix", "and", "suffix", "for", "a", "given", "text" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/logger.py#L177-L181
train
tamasgal/km3pipe
km3pipe/time.py
tai_timestamp
def tai_timestamp(): """Return current TAI timestamp.""" timestamp = time.time() date = datetime.utcfromtimestamp(timestamp) if date.year < 1972: return timestamp offset = 10 + timestamp leap_seconds = [ (1972, 1, 1), (1972, 7, 1), (1973, 1, 1), (1974, 1, 1), (1975, 1, 1), (1976, 1, 1), (1977, 1, 1), (1978, 1, 1), (1979, 1, 1), (1980, 1, 1), (1981, 7, 1), (1982, 7, 1), (1983, 7, 1), (1985, 7, 1), (1988, 1, 1), (1990, 1, 1), (1991, 1, 1), (1992, 7, 1), (1993, 7, 1), (1994, 7, 1), (1996, 1, 1), (1997, 7, 1), (1999, 1, 1), (2006, 1, 1), (2009, 1, 1), (2012, 7, 1), (2015, 7, 1), (2017, 1, 1), ] for idx, leap_date in enumerate(leap_seconds): if leap_date >= (date.year, date.month, date.day): return idx - 1 + offset return len(leap_seconds) - 1 + offset
python
def tai_timestamp(): """Return current TAI timestamp.""" timestamp = time.time() date = datetime.utcfromtimestamp(timestamp) if date.year < 1972: return timestamp offset = 10 + timestamp leap_seconds = [ (1972, 1, 1), (1972, 7, 1), (1973, 1, 1), (1974, 1, 1), (1975, 1, 1), (1976, 1, 1), (1977, 1, 1), (1978, 1, 1), (1979, 1, 1), (1980, 1, 1), (1981, 7, 1), (1982, 7, 1), (1983, 7, 1), (1985, 7, 1), (1988, 1, 1), (1990, 1, 1), (1991, 1, 1), (1992, 7, 1), (1993, 7, 1), (1994, 7, 1), (1996, 1, 1), (1997, 7, 1), (1999, 1, 1), (2006, 1, 1), (2009, 1, 1), (2012, 7, 1), (2015, 7, 1), (2017, 1, 1), ] for idx, leap_date in enumerate(leap_seconds): if leap_date >= (date.year, date.month, date.day): return idx - 1 + offset return len(leap_seconds) - 1 + offset
[ "def", "tai_timestamp", "(", ")", ":", "timestamp", "=", "time", ".", "time", "(", ")", "date", "=", "datetime", ".", "utcfromtimestamp", "(", "timestamp", ")", "if", "date", ".", "year", "<", "1972", ":", "return", "timestamp", "offset", "=", "10", "+", "timestamp", "leap_seconds", "=", "[", "(", "1972", ",", "1", ",", "1", ")", ",", "(", "1972", ",", "7", ",", "1", ")", ",", "(", "1973", ",", "1", ",", "1", ")", ",", "(", "1974", ",", "1", ",", "1", ")", ",", "(", "1975", ",", "1", ",", "1", ")", ",", "(", "1976", ",", "1", ",", "1", ")", ",", "(", "1977", ",", "1", ",", "1", ")", ",", "(", "1978", ",", "1", ",", "1", ")", ",", "(", "1979", ",", "1", ",", "1", ")", ",", "(", "1980", ",", "1", ",", "1", ")", ",", "(", "1981", ",", "7", ",", "1", ")", ",", "(", "1982", ",", "7", ",", "1", ")", ",", "(", "1983", ",", "7", ",", "1", ")", ",", "(", "1985", ",", "7", ",", "1", ")", ",", "(", "1988", ",", "1", ",", "1", ")", ",", "(", "1990", ",", "1", ",", "1", ")", ",", "(", "1991", ",", "1", ",", "1", ")", ",", "(", "1992", ",", "7", ",", "1", ")", ",", "(", "1993", ",", "7", ",", "1", ")", ",", "(", "1994", ",", "7", ",", "1", ")", ",", "(", "1996", ",", "1", ",", "1", ")", ",", "(", "1997", ",", "7", ",", "1", ")", ",", "(", "1999", ",", "1", ",", "1", ")", ",", "(", "2006", ",", "1", ",", "1", ")", ",", "(", "2009", ",", "1", ",", "1", ")", ",", "(", "2012", ",", "7", ",", "1", ")", ",", "(", "2015", ",", "7", ",", "1", ")", ",", "(", "2017", ",", "1", ",", "1", ")", ",", "]", "for", "idx", ",", "leap_date", "in", "enumerate", "(", "leap_seconds", ")", ":", "if", "leap_date", ">=", "(", "date", ".", "year", ",", "date", ".", "month", ",", "date", ".", "day", ")", ":", "return", "idx", "-", "1", "+", "offset", "return", "len", "(", "leap_seconds", ")", "-", "1", "+", "offset" ]
Return current TAI timestamp.
[ "Return", "current", "TAI", "timestamp", "." ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/time.py#L102-L142
train
tamasgal/km3pipe
km3pipe/time.py
Cuckoo.msg
def msg(self, *args, **kwargs): "Only execute callback when interval is reached." if self.timestamp is None or self._interval_reached(): self.callback(*args, **kwargs) self.reset()
python
def msg(self, *args, **kwargs): "Only execute callback when interval is reached." if self.timestamp is None or self._interval_reached(): self.callback(*args, **kwargs) self.reset()
[ "def", "msg", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "timestamp", "is", "None", "or", "self", ".", "_interval_reached", "(", ")", ":", "self", ".", "callback", "(", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "reset", "(", ")" ]
Only execute callback when interval is reached.
[ "Only", "execute", "callback", "when", "interval", "is", "reached", "." ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/time.py#L83-L87
train
thebigmunch/google-music-utils
src/google_music_utils/compare.py
_gather_field_values
def _gather_field_values( item, *, fields=None, field_map=FIELD_MAP, normalize_values=False, normalize_func=normalize_value): """Create a tuple of normalized metadata field values. Parameter: item (~collections.abc.Mapping, str, os.PathLike): Item dict or filepath. fields (list): A list of fields used to compare item dicts. field_map (~collections.abc.Mapping): A mapping field name aliases. Default: :data:`~google_music_utils.constants.FIELD_MAP` normalize_values (bool): Normalize metadata values to remove common differences between sources. Default: ``False`` normalize_func (function): Function to apply to metadata values if ``normalize_values`` is ``True``. Default: :func:`~google_music_utils.utils.normalize_value` Returns: tuple: Values from the given metadata fields. """ it = get_item_tags(item) if fields is None: fields = list(it.keys()) normalize = normalize_func if normalize_values else lambda x: str(x) field_values = [] for field in fields: field_values.append( normalize( list_to_single_value( get_field(it, field, field_map=field_map) ) ) ) return tuple(field_values)
python
def _gather_field_values( item, *, fields=None, field_map=FIELD_MAP, normalize_values=False, normalize_func=normalize_value): """Create a tuple of normalized metadata field values. Parameter: item (~collections.abc.Mapping, str, os.PathLike): Item dict or filepath. fields (list): A list of fields used to compare item dicts. field_map (~collections.abc.Mapping): A mapping field name aliases. Default: :data:`~google_music_utils.constants.FIELD_MAP` normalize_values (bool): Normalize metadata values to remove common differences between sources. Default: ``False`` normalize_func (function): Function to apply to metadata values if ``normalize_values`` is ``True``. Default: :func:`~google_music_utils.utils.normalize_value` Returns: tuple: Values from the given metadata fields. """ it = get_item_tags(item) if fields is None: fields = list(it.keys()) normalize = normalize_func if normalize_values else lambda x: str(x) field_values = [] for field in fields: field_values.append( normalize( list_to_single_value( get_field(it, field, field_map=field_map) ) ) ) return tuple(field_values)
[ "def", "_gather_field_values", "(", "item", ",", "*", ",", "fields", "=", "None", ",", "field_map", "=", "FIELD_MAP", ",", "normalize_values", "=", "False", ",", "normalize_func", "=", "normalize_value", ")", ":", "it", "=", "get_item_tags", "(", "item", ")", "if", "fields", "is", "None", ":", "fields", "=", "list", "(", "it", ".", "keys", "(", ")", ")", "normalize", "=", "normalize_func", "if", "normalize_values", "else", "lambda", "x", ":", "str", "(", "x", ")", "field_values", "=", "[", "]", "for", "field", "in", "fields", ":", "field_values", ".", "append", "(", "normalize", "(", "list_to_single_value", "(", "get_field", "(", "it", ",", "field", ",", "field_map", "=", "field_map", ")", ")", ")", ")", "return", "tuple", "(", "field_values", ")" ]
Create a tuple of normalized metadata field values. Parameter: item (~collections.abc.Mapping, str, os.PathLike): Item dict or filepath. fields (list): A list of fields used to compare item dicts. field_map (~collections.abc.Mapping): A mapping field name aliases. Default: :data:`~google_music_utils.constants.FIELD_MAP` normalize_values (bool): Normalize metadata values to remove common differences between sources. Default: ``False`` normalize_func (function): Function to apply to metadata values if ``normalize_values`` is ``True``. Default: :func:`~google_music_utils.utils.normalize_value` Returns: tuple: Values from the given metadata fields.
[ "Create", "a", "tuple", "of", "normalized", "metadata", "field", "values", "." ]
2e8873defe7d5aab7321b9d5ec8a80d72687578e
https://github.com/thebigmunch/google-music-utils/blob/2e8873defe7d5aab7321b9d5ec8a80d72687578e/src/google_music_utils/compare.py#L12-L50
train
thebigmunch/google-music-utils
src/google_music_utils/compare.py
find_existing_items
def find_existing_items( src, dst, *, fields=None, field_map=None, normalize_values=False, normalize_func=normalize_value): """Find items from an item collection that are in another item collection. Parameters: src (list): A list of item dicts or filepaths. dst (list): A list of item dicts or filepaths. fields (list): A list of fields used to compare item dicts. field_map (~collections.abc.Mapping): A mapping field name aliases. Default: :data:`~google_music_utils.constants.FIELD_MAP` normalize_values (bool): Normalize metadata values to remove common differences between sources. Default: ``False`` normalize_func (function): Function to apply to metadata values if ``normalize_values`` is ``True``. Default: :func:`~google_music_utils.utils.normalize_value` Yields: dict: The next item from ``src`` collection in ``dst`` collection. """ if field_map is None: field_map = FIELD_MAP dst_keys = { _gather_field_values( dst_item, fields=fields, field_map=field_map, normalize_values=normalize_values, normalize_func=normalize_func ) for dst_item in dst } for src_item in src: if _gather_field_values( src_item, fields=fields, field_map=field_map, normalize_values=normalize_values, normalize_func=normalize_func ) in dst_keys: yield src_item
python
def find_existing_items( src, dst, *, fields=None, field_map=None, normalize_values=False, normalize_func=normalize_value): """Find items from an item collection that are in another item collection. Parameters: src (list): A list of item dicts or filepaths. dst (list): A list of item dicts or filepaths. fields (list): A list of fields used to compare item dicts. field_map (~collections.abc.Mapping): A mapping field name aliases. Default: :data:`~google_music_utils.constants.FIELD_MAP` normalize_values (bool): Normalize metadata values to remove common differences between sources. Default: ``False`` normalize_func (function): Function to apply to metadata values if ``normalize_values`` is ``True``. Default: :func:`~google_music_utils.utils.normalize_value` Yields: dict: The next item from ``src`` collection in ``dst`` collection. """ if field_map is None: field_map = FIELD_MAP dst_keys = { _gather_field_values( dst_item, fields=fields, field_map=field_map, normalize_values=normalize_values, normalize_func=normalize_func ) for dst_item in dst } for src_item in src: if _gather_field_values( src_item, fields=fields, field_map=field_map, normalize_values=normalize_values, normalize_func=normalize_func ) in dst_keys: yield src_item
[ "def", "find_existing_items", "(", "src", ",", "dst", ",", "*", ",", "fields", "=", "None", ",", "field_map", "=", "None", ",", "normalize_values", "=", "False", ",", "normalize_func", "=", "normalize_value", ")", ":", "if", "field_map", "is", "None", ":", "field_map", "=", "FIELD_MAP", "dst_keys", "=", "{", "_gather_field_values", "(", "dst_item", ",", "fields", "=", "fields", ",", "field_map", "=", "field_map", ",", "normalize_values", "=", "normalize_values", ",", "normalize_func", "=", "normalize_func", ")", "for", "dst_item", "in", "dst", "}", "for", "src_item", "in", "src", ":", "if", "_gather_field_values", "(", "src_item", ",", "fields", "=", "fields", ",", "field_map", "=", "field_map", ",", "normalize_values", "=", "normalize_values", ",", "normalize_func", "=", "normalize_func", ")", "in", "dst_keys", ":", "yield", "src_item" ]
Find items from an item collection that are in another item collection. Parameters: src (list): A list of item dicts or filepaths. dst (list): A list of item dicts or filepaths. fields (list): A list of fields used to compare item dicts. field_map (~collections.abc.Mapping): A mapping field name aliases. Default: :data:`~google_music_utils.constants.FIELD_MAP` normalize_values (bool): Normalize metadata values to remove common differences between sources. Default: ``False`` normalize_func (function): Function to apply to metadata values if ``normalize_values`` is ``True``. Default: :func:`~google_music_utils.utils.normalize_value` Yields: dict: The next item from ``src`` collection in ``dst`` collection.
[ "Find", "items", "from", "an", "item", "collection", "that", "are", "in", "another", "item", "collection", "." ]
2e8873defe7d5aab7321b9d5ec8a80d72687578e
https://github.com/thebigmunch/google-music-utils/blob/2e8873defe7d5aab7321b9d5ec8a80d72687578e/src/google_music_utils/compare.py#L53-L89
train
IRC-SPHERE/HyperStream
hyperstream/utils/hyperstream_logger.py
monitor
def monitor(self, message, *args, **kws): """ Define a monitoring logger that will be added to Logger :param self: The logging object :param message: The logging message :param args: Positional arguments :param kws: Keyword arguments :return: """ if self.isEnabledFor(MON): # Yes, logger takes its '*args' as 'args'. self._log(MON, message, args, **kws)
python
def monitor(self, message, *args, **kws): """ Define a monitoring logger that will be added to Logger :param self: The logging object :param message: The logging message :param args: Positional arguments :param kws: Keyword arguments :return: """ if self.isEnabledFor(MON): # Yes, logger takes its '*args' as 'args'. self._log(MON, message, args, **kws)
[ "def", "monitor", "(", "self", ",", "message", ",", "*", "args", ",", "*", "*", "kws", ")", ":", "if", "self", ".", "isEnabledFor", "(", "MON", ")", ":", "# Yes, logger takes its '*args' as 'args'.", "self", ".", "_log", "(", "MON", ",", "message", ",", "args", ",", "*", "*", "kws", ")" ]
Define a monitoring logger that will be added to Logger :param self: The logging object :param message: The logging message :param args: Positional arguments :param kws: Keyword arguments :return:
[ "Define", "a", "monitoring", "logger", "that", "will", "be", "added", "to", "Logger" ]
98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/utils/hyperstream_logger.py#L160-L172
train
IRC-SPHERE/HyperStream
hyperstream/utils/hyperstream_logger.py
monitor
def monitor(msg, *args, **kwargs): """ Log a message with severity 'MON' on the root logger. """ if len(logging.root.handlers) == 0: logging.basicConfig() logging.root.monitor(msg, *args, **kwargs)
python
def monitor(msg, *args, **kwargs): """ Log a message with severity 'MON' on the root logger. """ if len(logging.root.handlers) == 0: logging.basicConfig() logging.root.monitor(msg, *args, **kwargs)
[ "def", "monitor", "(", "msg", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "len", "(", "logging", ".", "root", ".", "handlers", ")", "==", "0", ":", "logging", ".", "basicConfig", "(", ")", "logging", ".", "root", ".", "monitor", "(", "msg", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Log a message with severity 'MON' on the root logger.
[ "Log", "a", "message", "with", "severity", "MON", "on", "the", "root", "logger", "." ]
98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/utils/hyperstream_logger.py#L178-L184
train
IRC-SPHERE/HyperStream
hyperstream/utils/hyperstream_logger.py
SenMLFormatter.format
def format(self, record): """ The formatting function :param record: The record object :return: The string representation of the record """ try: n = record.n except AttributeError: n = 'default' try: message = record.message except AttributeError: message = record.msg senml = OrderedDict( uid="hyperstream", bt=datetime.utcfromtimestamp(record.created).isoformat()[:-3] + 'Z', e=[OrderedDict(n=n, v=message)] ) formatted_json = json.dumps(senml) return formatted_json
python
def format(self, record): """ The formatting function :param record: The record object :return: The string representation of the record """ try: n = record.n except AttributeError: n = 'default' try: message = record.message except AttributeError: message = record.msg senml = OrderedDict( uid="hyperstream", bt=datetime.utcfromtimestamp(record.created).isoformat()[:-3] + 'Z', e=[OrderedDict(n=n, v=message)] ) formatted_json = json.dumps(senml) return formatted_json
[ "def", "format", "(", "self", ",", "record", ")", ":", "try", ":", "n", "=", "record", ".", "n", "except", "AttributeError", ":", "n", "=", "'default'", "try", ":", "message", "=", "record", ".", "message", "except", "AttributeError", ":", "message", "=", "record", ".", "msg", "senml", "=", "OrderedDict", "(", "uid", "=", "\"hyperstream\"", ",", "bt", "=", "datetime", ".", "utcfromtimestamp", "(", "record", ".", "created", ")", ".", "isoformat", "(", ")", "[", ":", "-", "3", "]", "+", "'Z'", ",", "e", "=", "[", "OrderedDict", "(", "n", "=", "n", ",", "v", "=", "message", ")", "]", ")", "formatted_json", "=", "json", ".", "dumps", "(", "senml", ")", "return", "formatted_json" ]
The formatting function :param record: The record object :return: The string representation of the record
[ "The", "formatting", "function" ]
98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/utils/hyperstream_logger.py#L197-L222
train
wtsi-hgi/consul-lock
consullock/managers.py
ConsulLockManager.teardown
def teardown(self): """ Tears down the instance, removing any remaining sessions that this instance has created. The instance must not be used after this method has been called. """ with self._teardown_lock: if not self._teardown_called: self._teardown_called = True if len(self._acquiring_session_ids) > 0: logger.info( f"Destroying all sessions that have not acquired keys: {self._acquiring_session_ids}...") for session_id in self._acquiring_session_ids: try: self.consul_client.session.destroy(session_id=session_id) logger.debug(f"Destroyed: {session_id}") except requests.exceptions.ConnectionError as e: logger.debug(f"Exception: {e}") logger.warning(f"Could not connect to Consul to clean up session {session_id}") atexit.unregister(self.teardown)
python
def teardown(self): """ Tears down the instance, removing any remaining sessions that this instance has created. The instance must not be used after this method has been called. """ with self._teardown_lock: if not self._teardown_called: self._teardown_called = True if len(self._acquiring_session_ids) > 0: logger.info( f"Destroying all sessions that have not acquired keys: {self._acquiring_session_ids}...") for session_id in self._acquiring_session_ids: try: self.consul_client.session.destroy(session_id=session_id) logger.debug(f"Destroyed: {session_id}") except requests.exceptions.ConnectionError as e: logger.debug(f"Exception: {e}") logger.warning(f"Could not connect to Consul to clean up session {session_id}") atexit.unregister(self.teardown)
[ "def", "teardown", "(", "self", ")", ":", "with", "self", ".", "_teardown_lock", ":", "if", "not", "self", ".", "_teardown_called", ":", "self", ".", "_teardown_called", "=", "True", "if", "len", "(", "self", ".", "_acquiring_session_ids", ")", ">", "0", ":", "logger", ".", "info", "(", "f\"Destroying all sessions that have not acquired keys: {self._acquiring_session_ids}...\"", ")", "for", "session_id", "in", "self", ".", "_acquiring_session_ids", ":", "try", ":", "self", ".", "consul_client", ".", "session", ".", "destroy", "(", "session_id", "=", "session_id", ")", "logger", ".", "debug", "(", "f\"Destroyed: {session_id}\"", ")", "except", "requests", ".", "exceptions", ".", "ConnectionError", "as", "e", ":", "logger", ".", "debug", "(", "f\"Exception: {e}\"", ")", "logger", ".", "warning", "(", "f\"Could not connect to Consul to clean up session {session_id}\"", ")", "atexit", ".", "unregister", "(", "self", ".", "teardown", ")" ]
Tears down the instance, removing any remaining sessions that this instance has created. The instance must not be used after this method has been called.
[ "Tears", "down", "the", "instance", "removing", "any", "remaining", "sessions", "that", "this", "instance", "has", "created", "." ]
deb07ab41dabbb49f4d0bbc062bc3b4b6e5d71b2
https://github.com/wtsi-hgi/consul-lock/blob/deb07ab41dabbb49f4d0bbc062bc3b4b6e5d71b2/consullock/managers.py#L356-L375
train
tamasgal/km3pipe
km3pipe/db.py
we_are_in_lyon
def we_are_in_lyon(): """Check if we are on a Lyon machine""" import socket try: hostname = socket.gethostname() ip = socket.gethostbyname(hostname) except socket.gaierror: return False return ip.startswith("134.158.")
python
def we_are_in_lyon(): """Check if we are on a Lyon machine""" import socket try: hostname = socket.gethostname() ip = socket.gethostbyname(hostname) except socket.gaierror: return False return ip.startswith("134.158.")
[ "def", "we_are_in_lyon", "(", ")", ":", "import", "socket", "try", ":", "hostname", "=", "socket", ".", "gethostname", "(", ")", "ip", "=", "socket", ".", "gethostbyname", "(", "hostname", ")", "except", "socket", ".", "gaierror", ":", "return", "False", "return", "ip", ".", "startswith", "(", "\"134.158.\"", ")" ]
Check if we are on a Lyon machine
[ "Check", "if", "we", "are", "on", "a", "Lyon", "machine" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L62-L70
train
tamasgal/km3pipe
km3pipe/db.py
read_csv
def read_csv(text, sep="\t"): """Create a DataFrame from CSV text""" import pandas as pd # no top level load to make a faster import of db return pd.read_csv(StringIO(text), sep="\t")
python
def read_csv(text, sep="\t"): """Create a DataFrame from CSV text""" import pandas as pd # no top level load to make a faster import of db return pd.read_csv(StringIO(text), sep="\t")
[ "def", "read_csv", "(", "text", ",", "sep", "=", "\"\\t\"", ")", ":", "import", "pandas", "as", "pd", "# no top level load to make a faster import of db", "return", "pd", ".", "read_csv", "(", "StringIO", "(", "text", ")", ",", "sep", "=", "\"\\t\"", ")" ]
Create a DataFrame from CSV text
[ "Create", "a", "DataFrame", "from", "CSV", "text" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L73-L76
train
tamasgal/km3pipe
km3pipe/db.py
add_datetime
def add_datetime(dataframe, timestamp_key='UNIXTIME'): """Add an additional DATETIME column with standar datetime format. This currently manipulates the incoming DataFrame! """ def convert_data(timestamp): return datetime.fromtimestamp(float(timestamp) / 1e3, UTC_TZ) try: log.debug("Adding DATETIME column to the data") converted = dataframe[timestamp_key].apply(convert_data) dataframe['DATETIME'] = converted except KeyError: log.warning("Could not add DATETIME column")
python
def add_datetime(dataframe, timestamp_key='UNIXTIME'): """Add an additional DATETIME column with standar datetime format. This currently manipulates the incoming DataFrame! """ def convert_data(timestamp): return datetime.fromtimestamp(float(timestamp) / 1e3, UTC_TZ) try: log.debug("Adding DATETIME column to the data") converted = dataframe[timestamp_key].apply(convert_data) dataframe['DATETIME'] = converted except KeyError: log.warning("Could not add DATETIME column")
[ "def", "add_datetime", "(", "dataframe", ",", "timestamp_key", "=", "'UNIXTIME'", ")", ":", "def", "convert_data", "(", "timestamp", ")", ":", "return", "datetime", ".", "fromtimestamp", "(", "float", "(", "timestamp", ")", "/", "1e3", ",", "UTC_TZ", ")", "try", ":", "log", ".", "debug", "(", "\"Adding DATETIME column to the data\"", ")", "converted", "=", "dataframe", "[", "timestamp_key", "]", ".", "apply", "(", "convert_data", ")", "dataframe", "[", "'DATETIME'", "]", "=", "converted", "except", "KeyError", ":", "log", ".", "warning", "(", "\"Could not add DATETIME column\"", ")" ]
Add an additional DATETIME column with standar datetime format. This currently manipulates the incoming DataFrame!
[ "Add", "an", "additional", "DATETIME", "column", "with", "standar", "datetime", "format", "." ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L488-L502
train
tamasgal/km3pipe
km3pipe/db.py
show_ahrs_calibration
def show_ahrs_calibration(clb_upi, version='3'): """Show AHRS calibration data for given `clb_upi`.""" db = DBManager() ahrs_upi = clbupi2ahrsupi(clb_upi) print("AHRS UPI: {}".format(ahrs_upi)) content = db._get_content("show_product_test.htm?upi={0}&" "testtype=AHRS-CALIBRATION-v{1}&n=1&out=xml" .format(ahrs_upi, version)) \ .replace('\n', '') import xml.etree.ElementTree as ET try: root = ET.parse(io.StringIO(content)).getroot() except ET.ParseError: print("No calibration data found") else: for child in root: print("{}: {}".format(child.tag, child.text)) names = [c.text for c in root.findall(".//Name")] values = [[i.text for i in c] for c in root.findall(".//Values")] for name, value in zip(names, values): print("{}: {}".format(name, value))
python
def show_ahrs_calibration(clb_upi, version='3'): """Show AHRS calibration data for given `clb_upi`.""" db = DBManager() ahrs_upi = clbupi2ahrsupi(clb_upi) print("AHRS UPI: {}".format(ahrs_upi)) content = db._get_content("show_product_test.htm?upi={0}&" "testtype=AHRS-CALIBRATION-v{1}&n=1&out=xml" .format(ahrs_upi, version)) \ .replace('\n', '') import xml.etree.ElementTree as ET try: root = ET.parse(io.StringIO(content)).getroot() except ET.ParseError: print("No calibration data found") else: for child in root: print("{}: {}".format(child.tag, child.text)) names = [c.text for c in root.findall(".//Name")] values = [[i.text for i in c] for c in root.findall(".//Values")] for name, value in zip(names, values): print("{}: {}".format(name, value))
[ "def", "show_ahrs_calibration", "(", "clb_upi", ",", "version", "=", "'3'", ")", ":", "db", "=", "DBManager", "(", ")", "ahrs_upi", "=", "clbupi2ahrsupi", "(", "clb_upi", ")", "print", "(", "\"AHRS UPI: {}\"", ".", "format", "(", "ahrs_upi", ")", ")", "content", "=", "db", ".", "_get_content", "(", "\"show_product_test.htm?upi={0}&\"", "\"testtype=AHRS-CALIBRATION-v{1}&n=1&out=xml\"", ".", "format", "(", "ahrs_upi", ",", "version", ")", ")", ".", "replace", "(", "'\\n'", ",", "''", ")", "import", "xml", ".", "etree", ".", "ElementTree", "as", "ET", "try", ":", "root", "=", "ET", ".", "parse", "(", "io", ".", "StringIO", "(", "content", ")", ")", ".", "getroot", "(", ")", "except", "ET", ".", "ParseError", ":", "print", "(", "\"No calibration data found\"", ")", "else", ":", "for", "child", "in", "root", ":", "print", "(", "\"{}: {}\"", ".", "format", "(", "child", ".", "tag", ",", "child", ".", "text", ")", ")", "names", "=", "[", "c", ".", "text", "for", "c", "in", "root", ".", "findall", "(", "\".//Name\"", ")", "]", "values", "=", "[", "[", "i", ".", "text", "for", "i", "in", "c", "]", "for", "c", "in", "root", ".", "findall", "(", "\".//Values\"", ")", "]", "for", "name", ",", "value", "in", "zip", "(", "names", ",", "values", ")", ":", "print", "(", "\"{}: {}\"", ".", "format", "(", "name", ",", "value", ")", ")" ]
Show AHRS calibration data for given `clb_upi`.
[ "Show", "AHRS", "calibration", "data", "for", "given", "clb_upi", "." ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L826-L848
train
tamasgal/km3pipe
km3pipe/db.py
DBManager._datalog
def _datalog(self, parameter, run, maxrun, det_id): "Extract data from database" values = { 'parameter_name': parameter, 'minrun': run, 'maxrun': maxrun, 'detid': det_id, } data = urlencode(values) content = self._get_content('streamds/datalognumbers.txt?' + data) if content.startswith('ERROR'): log.error(content) return None try: dataframe = read_csv(content) except ValueError: log.warning( "Empty dataset" ) # ...probably. Waiting for more info return make_empty_dataset() else: add_datetime(dataframe) try: self._add_converted_units(dataframe, parameter) except KeyError: log.warning( "Could not add converted units for {0}".format(parameter) ) return dataframe
python
def _datalog(self, parameter, run, maxrun, det_id): "Extract data from database" values = { 'parameter_name': parameter, 'minrun': run, 'maxrun': maxrun, 'detid': det_id, } data = urlencode(values) content = self._get_content('streamds/datalognumbers.txt?' + data) if content.startswith('ERROR'): log.error(content) return None try: dataframe = read_csv(content) except ValueError: log.warning( "Empty dataset" ) # ...probably. Waiting for more info return make_empty_dataset() else: add_datetime(dataframe) try: self._add_converted_units(dataframe, parameter) except KeyError: log.warning( "Could not add converted units for {0}".format(parameter) ) return dataframe
[ "def", "_datalog", "(", "self", ",", "parameter", ",", "run", ",", "maxrun", ",", "det_id", ")", ":", "values", "=", "{", "'parameter_name'", ":", "parameter", ",", "'minrun'", ":", "run", ",", "'maxrun'", ":", "maxrun", ",", "'detid'", ":", "det_id", ",", "}", "data", "=", "urlencode", "(", "values", ")", "content", "=", "self", ".", "_get_content", "(", "'streamds/datalognumbers.txt?'", "+", "data", ")", "if", "content", ".", "startswith", "(", "'ERROR'", ")", ":", "log", ".", "error", "(", "content", ")", "return", "None", "try", ":", "dataframe", "=", "read_csv", "(", "content", ")", "except", "ValueError", ":", "log", ".", "warning", "(", "\"Empty dataset\"", ")", "# ...probably. Waiting for more info", "return", "make_empty_dataset", "(", ")", "else", ":", "add_datetime", "(", "dataframe", ")", "try", ":", "self", ".", "_add_converted_units", "(", "dataframe", ",", "parameter", ")", "except", "KeyError", ":", "log", ".", "warning", "(", "\"Could not add converted units for {0}\"", ".", "format", "(", "parameter", ")", ")", "return", "dataframe" ]
Extract data from database
[ "Extract", "data", "from", "database" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L134-L162
train
tamasgal/km3pipe
km3pipe/db.py
DBManager._add_converted_units
def _add_converted_units(self, dataframe, parameter, key='VALUE'): """Add an additional DATA_VALUE column with converted VALUEs""" convert_unit = self.parameters.get_converter(parameter) try: log.debug("Adding unit converted DATA_VALUE to the data") dataframe[key] = dataframe['DATA_VALUE'].apply(convert_unit) except KeyError: log.warning("Missing 'VALUE': no unit conversion.") else: dataframe.unit = self.parameters.unit(parameter)
python
def _add_converted_units(self, dataframe, parameter, key='VALUE'): """Add an additional DATA_VALUE column with converted VALUEs""" convert_unit = self.parameters.get_converter(parameter) try: log.debug("Adding unit converted DATA_VALUE to the data") dataframe[key] = dataframe['DATA_VALUE'].apply(convert_unit) except KeyError: log.warning("Missing 'VALUE': no unit conversion.") else: dataframe.unit = self.parameters.unit(parameter)
[ "def", "_add_converted_units", "(", "self", ",", "dataframe", ",", "parameter", ",", "key", "=", "'VALUE'", ")", ":", "convert_unit", "=", "self", ".", "parameters", ".", "get_converter", "(", "parameter", ")", "try", ":", "log", ".", "debug", "(", "\"Adding unit converted DATA_VALUE to the data\"", ")", "dataframe", "[", "key", "]", "=", "dataframe", "[", "'DATA_VALUE'", "]", ".", "apply", "(", "convert_unit", ")", "except", "KeyError", ":", "log", ".", "warning", "(", "\"Missing 'VALUE': no unit conversion.\"", ")", "else", ":", "dataframe", ".", "unit", "=", "self", ".", "parameters", ".", "unit", "(", "parameter", ")" ]
Add an additional DATA_VALUE column with converted VALUEs
[ "Add", "an", "additional", "DATA_VALUE", "column", "with", "converted", "VALUEs" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L178-L187
train
tamasgal/km3pipe
km3pipe/db.py
DBManager.to_det_id
def to_det_id(self, det_id_or_det_oid): """Convert det ID or OID to det ID""" try: int(det_id_or_det_oid) except ValueError: return self.get_det_id(det_id_or_det_oid) else: return det_id_or_det_oid
python
def to_det_id(self, det_id_or_det_oid): """Convert det ID or OID to det ID""" try: int(det_id_or_det_oid) except ValueError: return self.get_det_id(det_id_or_det_oid) else: return det_id_or_det_oid
[ "def", "to_det_id", "(", "self", ",", "det_id_or_det_oid", ")", ":", "try", ":", "int", "(", "det_id_or_det_oid", ")", "except", "ValueError", ":", "return", "self", ".", "get_det_id", "(", "det_id_or_det_oid", ")", "else", ":", "return", "det_id_or_det_oid" ]
Convert det ID or OID to det ID
[ "Convert", "det", "ID", "or", "OID", "to", "det", "ID" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L223-L230
train
tamasgal/km3pipe
km3pipe/db.py
DBManager.to_det_oid
def to_det_oid(self, det_id_or_det_oid): """Convert det OID or ID to det OID""" try: int(det_id_or_det_oid) except ValueError: return det_id_or_det_oid else: return self.get_det_oid(det_id_or_det_oid)
python
def to_det_oid(self, det_id_or_det_oid): """Convert det OID or ID to det OID""" try: int(det_id_or_det_oid) except ValueError: return det_id_or_det_oid else: return self.get_det_oid(det_id_or_det_oid)
[ "def", "to_det_oid", "(", "self", ",", "det_id_or_det_oid", ")", ":", "try", ":", "int", "(", "det_id_or_det_oid", ")", "except", "ValueError", ":", "return", "det_id_or_det_oid", "else", ":", "return", "self", ".", "get_det_oid", "(", "det_id_or_det_oid", ")" ]
Convert det OID or ID to det OID
[ "Convert", "det", "OID", "or", "ID", "to", "det", "OID" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L232-L239
train
tamasgal/km3pipe
km3pipe/db.py
DBManager._load_parameters
def _load_parameters(self): "Retrieve a list of available parameters from the database" parameters = self._get_json('allparam/s') data = {} for parameter in parameters: # There is a case-chaos in the DB data[parameter['Name'].lower()] = parameter self._parameters = ParametersContainer(data)
python
def _load_parameters(self): "Retrieve a list of available parameters from the database" parameters = self._get_json('allparam/s') data = {} for parameter in parameters: # There is a case-chaos in the DB data[parameter['Name'].lower()] = parameter self._parameters = ParametersContainer(data)
[ "def", "_load_parameters", "(", "self", ")", ":", "parameters", "=", "self", ".", "_get_json", "(", "'allparam/s'", ")", "data", "=", "{", "}", "for", "parameter", "in", "parameters", ":", "# There is a case-chaos in the DB", "data", "[", "parameter", "[", "'Name'", "]", ".", "lower", "(", ")", "]", "=", "parameter", "self", ".", "_parameters", "=", "ParametersContainer", "(", "data", ")" ]
Retrieve a list of available parameters from the database
[ "Retrieve", "a", "list", "of", "available", "parameters", "from", "the", "database" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L248-L254
train
tamasgal/km3pipe
km3pipe/db.py
DBManager.trigger_setup
def trigger_setup(self, runsetup_oid): "Retrieve the trigger setup for a given runsetup OID" r = self._get_content( "jsonds/rslite/s?rs_oid={}&upifilter=1.1.2.2.3/*". format(runsetup_oid) ) data = json.loads(r)['Data'] if not data: log.error("Empty dataset.") return raw_setup = data[0] det_id = raw_setup['DetID'] name = raw_setup['Name'] description = raw_setup['Desc'] _optical_df = raw_setup['ConfGroups'][0] optical_df = {'Name': _optical_df['Name'], 'Desc': _optical_df['Desc']} for param in _optical_df['Params']: pname = self.parameters.oid2name(param['OID']).replace('DAQ_', '') try: dtype = float if '.' in param['Val'] else int val = dtype(param['Val']) except ValueError: val = param['Val'] optical_df[pname] = val _acoustic_df = raw_setup['ConfGroups'][1] acoustic_df = { 'Name': _acoustic_df['Name'], 'Desc': _acoustic_df['Desc'] } for param in _acoustic_df['Params']: pname = self.parameters.oid2name(param['OID']).replace('DAQ_', '') try: dtype = float if '.' in param['Val'] else int val = dtype(param['Val']) except ValueError: val = param['Val'] acoustic_df[pname] = val return TriggerSetup( runsetup_oid, name, det_id, description, optical_df, acoustic_df )
python
def trigger_setup(self, runsetup_oid): "Retrieve the trigger setup for a given runsetup OID" r = self._get_content( "jsonds/rslite/s?rs_oid={}&upifilter=1.1.2.2.3/*". format(runsetup_oid) ) data = json.loads(r)['Data'] if not data: log.error("Empty dataset.") return raw_setup = data[0] det_id = raw_setup['DetID'] name = raw_setup['Name'] description = raw_setup['Desc'] _optical_df = raw_setup['ConfGroups'][0] optical_df = {'Name': _optical_df['Name'], 'Desc': _optical_df['Desc']} for param in _optical_df['Params']: pname = self.parameters.oid2name(param['OID']).replace('DAQ_', '') try: dtype = float if '.' in param['Val'] else int val = dtype(param['Val']) except ValueError: val = param['Val'] optical_df[pname] = val _acoustic_df = raw_setup['ConfGroups'][1] acoustic_df = { 'Name': _acoustic_df['Name'], 'Desc': _acoustic_df['Desc'] } for param in _acoustic_df['Params']: pname = self.parameters.oid2name(param['OID']).replace('DAQ_', '') try: dtype = float if '.' in param['Val'] else int val = dtype(param['Val']) except ValueError: val = param['Val'] acoustic_df[pname] = val return TriggerSetup( runsetup_oid, name, det_id, description, optical_df, acoustic_df )
[ "def", "trigger_setup", "(", "self", ",", "runsetup_oid", ")", ":", "r", "=", "self", ".", "_get_content", "(", "\"jsonds/rslite/s?rs_oid={}&upifilter=1.1.2.2.3/*\"", ".", "format", "(", "runsetup_oid", ")", ")", "data", "=", "json", ".", "loads", "(", "r", ")", "[", "'Data'", "]", "if", "not", "data", ":", "log", ".", "error", "(", "\"Empty dataset.\"", ")", "return", "raw_setup", "=", "data", "[", "0", "]", "det_id", "=", "raw_setup", "[", "'DetID'", "]", "name", "=", "raw_setup", "[", "'Name'", "]", "description", "=", "raw_setup", "[", "'Desc'", "]", "_optical_df", "=", "raw_setup", "[", "'ConfGroups'", "]", "[", "0", "]", "optical_df", "=", "{", "'Name'", ":", "_optical_df", "[", "'Name'", "]", ",", "'Desc'", ":", "_optical_df", "[", "'Desc'", "]", "}", "for", "param", "in", "_optical_df", "[", "'Params'", "]", ":", "pname", "=", "self", ".", "parameters", ".", "oid2name", "(", "param", "[", "'OID'", "]", ")", ".", "replace", "(", "'DAQ_'", ",", "''", ")", "try", ":", "dtype", "=", "float", "if", "'.'", "in", "param", "[", "'Val'", "]", "else", "int", "val", "=", "dtype", "(", "param", "[", "'Val'", "]", ")", "except", "ValueError", ":", "val", "=", "param", "[", "'Val'", "]", "optical_df", "[", "pname", "]", "=", "val", "_acoustic_df", "=", "raw_setup", "[", "'ConfGroups'", "]", "[", "1", "]", "acoustic_df", "=", "{", "'Name'", ":", "_acoustic_df", "[", "'Name'", "]", ",", "'Desc'", ":", "_acoustic_df", "[", "'Desc'", "]", "}", "for", "param", "in", "_acoustic_df", "[", "'Params'", "]", ":", "pname", "=", "self", ".", "parameters", ".", "oid2name", "(", "param", "[", "'OID'", "]", ")", ".", "replace", "(", "'DAQ_'", ",", "''", ")", "try", ":", "dtype", "=", "float", "if", "'.'", "in", "param", "[", "'Val'", "]", "else", "int", "val", "=", "dtype", "(", "param", "[", "'Val'", "]", ")", "except", "ValueError", ":", "val", "=", "param", "[", "'Val'", "]", "acoustic_df", "[", "pname", "]", "=", "val", "return", "TriggerSetup", "(", "runsetup_oid", ",", "name", ",", "det_id", ",", "description", ",", "optical_df", ",", "acoustic_df", ")" ]
Retrieve the trigger setup for a given runsetup OID
[ "Retrieve", "the", "trigger", "setup", "for", "a", "given", "runsetup", "OID" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L256-L298
train
tamasgal/km3pipe
km3pipe/db.py
DBManager.detx
def detx(self, det_id, t0set=None, calibration=None): """Retrieve the detector file for given detector id If t0set is given, append the calibration data. """ url = 'detx/{0}?'.format(det_id) # '?' since it's ignored if no args if t0set is not None: url += '&t0set=' + t0set if calibration is not None: url += '&calibrid=' + calibration detx = self._get_content(url) return detx
python
def detx(self, det_id, t0set=None, calibration=None): """Retrieve the detector file for given detector id If t0set is given, append the calibration data. """ url = 'detx/{0}?'.format(det_id) # '?' since it's ignored if no args if t0set is not None: url += '&t0set=' + t0set if calibration is not None: url += '&calibrid=' + calibration detx = self._get_content(url) return detx
[ "def", "detx", "(", "self", ",", "det_id", ",", "t0set", "=", "None", ",", "calibration", "=", "None", ")", ":", "url", "=", "'detx/{0}?'", ".", "format", "(", "det_id", ")", "# '?' since it's ignored if no args", "if", "t0set", "is", "not", "None", ":", "url", "+=", "'&t0set='", "+", "t0set", "if", "calibration", "is", "not", "None", ":", "url", "+=", "'&calibrid='", "+", "calibration", "detx", "=", "self", ".", "_get_content", "(", "url", ")", "return", "detx" ]
Retrieve the detector file for given detector id If t0set is given, append the calibration data.
[ "Retrieve", "the", "detector", "file", "for", "given", "detector", "id" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L311-L323
train
tamasgal/km3pipe
km3pipe/db.py
DBManager._get_json
def _get_json(self, url): "Get JSON-type content" content = self._get_content('jsonds/' + url) try: json_content = json.loads(content.decode()) except AttributeError: json_content = json.loads(content) if json_content['Comment']: log.warning(json_content['Comment']) if json_content['Result'] != 'OK': raise ValueError('Error while retrieving the parameter list.') return json_content['Data']
python
def _get_json(self, url): "Get JSON-type content" content = self._get_content('jsonds/' + url) try: json_content = json.loads(content.decode()) except AttributeError: json_content = json.loads(content) if json_content['Comment']: log.warning(json_content['Comment']) if json_content['Result'] != 'OK': raise ValueError('Error while retrieving the parameter list.') return json_content['Data']
[ "def", "_get_json", "(", "self", ",", "url", ")", ":", "content", "=", "self", ".", "_get_content", "(", "'jsonds/'", "+", "url", ")", "try", ":", "json_content", "=", "json", ".", "loads", "(", "content", ".", "decode", "(", ")", ")", "except", "AttributeError", ":", "json_content", "=", "json", ".", "loads", "(", "content", ")", "if", "json_content", "[", "'Comment'", "]", ":", "log", ".", "warning", "(", "json_content", "[", "'Comment'", "]", ")", "if", "json_content", "[", "'Result'", "]", "!=", "'OK'", ":", "raise", "ValueError", "(", "'Error while retrieving the parameter list.'", ")", "return", "json_content", "[", "'Data'", "]" ]
Get JSON-type content
[ "Get", "JSON", "-", "type", "content" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L356-L367
train
tamasgal/km3pipe
km3pipe/db.py
DBManager._get_content
def _get_content(self, url): "Get HTML content" target_url = self._db_url + '/' + unquote(url) # .encode('utf-8')) log.debug("Opening '{0}'".format(target_url)) try: f = self.opener.open(target_url) except HTTPError as e: log.error("HTTP error, your session may be expired.") log.error(e) if input("Request new permanent session and retry? (y/n)") in 'yY': self.request_permanent_session() return self._get_content(url) else: return None log.debug("Accessing '{0}'".format(target_url)) try: content = f.read() except IncompleteRead as icread: log.critical( "Incomplete data received from the DB, " + "the data could be corrupted." ) content = icread.partial log.debug("Got {0} bytes of data.".format(len(content))) return content.decode('utf-8')
python
def _get_content(self, url): "Get HTML content" target_url = self._db_url + '/' + unquote(url) # .encode('utf-8')) log.debug("Opening '{0}'".format(target_url)) try: f = self.opener.open(target_url) except HTTPError as e: log.error("HTTP error, your session may be expired.") log.error(e) if input("Request new permanent session and retry? (y/n)") in 'yY': self.request_permanent_session() return self._get_content(url) else: return None log.debug("Accessing '{0}'".format(target_url)) try: content = f.read() except IncompleteRead as icread: log.critical( "Incomplete data received from the DB, " + "the data could be corrupted." ) content = icread.partial log.debug("Got {0} bytes of data.".format(len(content))) return content.decode('utf-8')
[ "def", "_get_content", "(", "self", ",", "url", ")", ":", "target_url", "=", "self", ".", "_db_url", "+", "'/'", "+", "unquote", "(", "url", ")", "# .encode('utf-8'))", "log", ".", "debug", "(", "\"Opening '{0}'\"", ".", "format", "(", "target_url", ")", ")", "try", ":", "f", "=", "self", ".", "opener", ".", "open", "(", "target_url", ")", "except", "HTTPError", "as", "e", ":", "log", ".", "error", "(", "\"HTTP error, your session may be expired.\"", ")", "log", ".", "error", "(", "e", ")", "if", "input", "(", "\"Request new permanent session and retry? (y/n)\"", ")", "in", "'yY'", ":", "self", ".", "request_permanent_session", "(", ")", "return", "self", ".", "_get_content", "(", "url", ")", "else", ":", "return", "None", "log", ".", "debug", "(", "\"Accessing '{0}'\"", ".", "format", "(", "target_url", ")", ")", "try", ":", "content", "=", "f", ".", "read", "(", ")", "except", "IncompleteRead", "as", "icread", ":", "log", ".", "critical", "(", "\"Incomplete data received from the DB, \"", "+", "\"the data could be corrupted.\"", ")", "content", "=", "icread", ".", "partial", "log", ".", "debug", "(", "\"Got {0} bytes of data.\"", ".", "format", "(", "len", "(", "content", ")", ")", ")", "return", "content", ".", "decode", "(", "'utf-8'", ")" ]
Get HTML content
[ "Get", "HTML", "content" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L369-L393
train
tamasgal/km3pipe
km3pipe/db.py
DBManager.opener
def opener(self): "A reusable connection manager" if self._opener is None: log.debug("Creating connection handler") opener = build_opener() if self._cookies: log.debug("Appending cookies") else: log.debug("No cookies to append") for cookie in self._cookies: cookie_str = cookie.name + '=' + cookie.value opener.addheaders.append(('Cookie', cookie_str)) self._opener = opener else: log.debug("Reusing connection manager") return self._opener
python
def opener(self): "A reusable connection manager" if self._opener is None: log.debug("Creating connection handler") opener = build_opener() if self._cookies: log.debug("Appending cookies") else: log.debug("No cookies to append") for cookie in self._cookies: cookie_str = cookie.name + '=' + cookie.value opener.addheaders.append(('Cookie', cookie_str)) self._opener = opener else: log.debug("Reusing connection manager") return self._opener
[ "def", "opener", "(", "self", ")", ":", "if", "self", ".", "_opener", "is", "None", ":", "log", ".", "debug", "(", "\"Creating connection handler\"", ")", "opener", "=", "build_opener", "(", ")", "if", "self", ".", "_cookies", ":", "log", ".", "debug", "(", "\"Appending cookies\"", ")", "else", ":", "log", ".", "debug", "(", "\"No cookies to append\"", ")", "for", "cookie", "in", "self", ".", "_cookies", ":", "cookie_str", "=", "cookie", ".", "name", "+", "'='", "+", "cookie", ".", "value", "opener", ".", "addheaders", ".", "append", "(", "(", "'Cookie'", ",", "cookie_str", ")", ")", "self", ".", "_opener", "=", "opener", "else", ":", "log", ".", "debug", "(", "\"Reusing connection manager\"", ")", "return", "self", ".", "_opener" ]
A reusable connection manager
[ "A", "reusable", "connection", "manager" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L396-L411
train
tamasgal/km3pipe
km3pipe/db.py
DBManager.request_sid_cookie
def request_sid_cookie(self, username, password): """Request cookie for permanent session token.""" log.debug("Requesting SID cookie") target_url = self._login_url + '?usr={0}&pwd={1}&persist=y'.format( username, password ) cookie = urlopen(target_url).read() return cookie
python
def request_sid_cookie(self, username, password): """Request cookie for permanent session token.""" log.debug("Requesting SID cookie") target_url = self._login_url + '?usr={0}&pwd={1}&persist=y'.format( username, password ) cookie = urlopen(target_url).read() return cookie
[ "def", "request_sid_cookie", "(", "self", ",", "username", ",", "password", ")", ":", "log", ".", "debug", "(", "\"Requesting SID cookie\"", ")", "target_url", "=", "self", ".", "_login_url", "+", "'?usr={0}&pwd={1}&persist=y'", ".", "format", "(", "username", ",", "password", ")", "cookie", "=", "urlopen", "(", "target_url", ")", ".", "read", "(", ")", "return", "cookie" ]
Request cookie for permanent session token.
[ "Request", "cookie", "for", "permanent", "session", "token", "." ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L413-L420
train
tamasgal/km3pipe
km3pipe/db.py
DBManager.restore_session
def restore_session(self, cookie): """Establish databse connection using permanent session cookie""" log.debug("Restoring session from cookie: {}".format(cookie)) opener = build_opener() opener.addheaders.append(('Cookie', cookie)) self._opener = opener
python
def restore_session(self, cookie): """Establish databse connection using permanent session cookie""" log.debug("Restoring session from cookie: {}".format(cookie)) opener = build_opener() opener.addheaders.append(('Cookie', cookie)) self._opener = opener
[ "def", "restore_session", "(", "self", ",", "cookie", ")", ":", "log", ".", "debug", "(", "\"Restoring session from cookie: {}\"", ".", "format", "(", "cookie", ")", ")", "opener", "=", "build_opener", "(", ")", "opener", ".", "addheaders", ".", "append", "(", "(", "'Cookie'", ",", "cookie", ")", ")", "self", ".", "_opener", "=", "opener" ]
Establish databse connection using permanent session cookie
[ "Establish", "databse", "connection", "using", "permanent", "session", "cookie" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L422-L427
train
tamasgal/km3pipe
km3pipe/db.py
DBManager.login
def login(self, username, password): "Login to the database and store cookies for upcoming requests." log.debug("Logging in to the DB") opener = self._build_opener() values = {'usr': username, 'pwd': password} req = self._make_request(self._login_url, values) try: log.debug("Sending login request") f = opener.open(req) except URLError as e: log.error("Failed to connect to the database -> probably down!") log.error("Error from database server:\n {0}".format(e)) return False html = f.read() failed_auth_message = 'Bad username or password' if failed_auth_message in str(html): log.error(failed_auth_message) return False return True
python
def login(self, username, password): "Login to the database and store cookies for upcoming requests." log.debug("Logging in to the DB") opener = self._build_opener() values = {'usr': username, 'pwd': password} req = self._make_request(self._login_url, values) try: log.debug("Sending login request") f = opener.open(req) except URLError as e: log.error("Failed to connect to the database -> probably down!") log.error("Error from database server:\n {0}".format(e)) return False html = f.read() failed_auth_message = 'Bad username or password' if failed_auth_message in str(html): log.error(failed_auth_message) return False return True
[ "def", "login", "(", "self", ",", "username", ",", "password", ")", ":", "log", ".", "debug", "(", "\"Logging in to the DB\"", ")", "opener", "=", "self", ".", "_build_opener", "(", ")", "values", "=", "{", "'usr'", ":", "username", ",", "'pwd'", ":", "password", "}", "req", "=", "self", ".", "_make_request", "(", "self", ".", "_login_url", ",", "values", ")", "try", ":", "log", ".", "debug", "(", "\"Sending login request\"", ")", "f", "=", "opener", ".", "open", "(", "req", ")", "except", "URLError", "as", "e", ":", "log", ".", "error", "(", "\"Failed to connect to the database -> probably down!\"", ")", "log", ".", "error", "(", "\"Error from database server:\\n {0}\"", ".", "format", "(", "e", ")", ")", "return", "False", "html", "=", "f", ".", "read", "(", ")", "failed_auth_message", "=", "'Bad username or password'", "if", "failed_auth_message", "in", "str", "(", "html", ")", ":", "log", ".", "error", "(", "failed_auth_message", ")", "return", "False", "return", "True" ]
Login to the database and store cookies for upcoming requests.
[ "Login", "to", "the", "database", "and", "store", "cookies", "for", "upcoming", "requests", "." ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L449-L467
train
tamasgal/km3pipe
km3pipe/db.py
StreamDS._update_streams
def _update_streams(self): """Update the list of available straems""" content = self._db._get_content("streamds") self._stream_df = read_csv(content).sort_values("STREAM") self._streams = None for stream in self.streams: setattr(self, stream, self.__getattr__(stream))
python
def _update_streams(self): """Update the list of available straems""" content = self._db._get_content("streamds") self._stream_df = read_csv(content).sort_values("STREAM") self._streams = None for stream in self.streams: setattr(self, stream, self.__getattr__(stream))
[ "def", "_update_streams", "(", "self", ")", ":", "content", "=", "self", ".", "_db", ".", "_get_content", "(", "\"streamds\"", ")", "self", ".", "_stream_df", "=", "read_csv", "(", "content", ")", ".", "sort_values", "(", "\"STREAM\"", ")", "self", ".", "_streams", "=", "None", "for", "stream", "in", "self", ".", "streams", ":", "setattr", "(", "self", ",", "stream", ",", "self", ".", "__getattr__", "(", "stream", ")", ")" ]
Update the list of available straems
[ "Update", "the", "list", "of", "available", "straems" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L517-L523
train
tamasgal/km3pipe
km3pipe/db.py
StreamDS.streams
def streams(self): """A list of available streams""" if self._streams is None: self._streams = list(self._stream_df["STREAM"].values) return self._streams
python
def streams(self): """A list of available streams""" if self._streams is None: self._streams = list(self._stream_df["STREAM"].values) return self._streams
[ "def", "streams", "(", "self", ")", ":", "if", "self", ".", "_streams", "is", "None", ":", "self", ".", "_streams", "=", "list", "(", "self", ".", "_stream_df", "[", "\"STREAM\"", "]", ".", "values", ")", "return", "self", ".", "_streams" ]
A list of available streams
[ "A", "list", "of", "available", "streams" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L553-L557
train
tamasgal/km3pipe
km3pipe/db.py
StreamDS.help
def help(self, stream): """Show the help for a given stream.""" if stream not in self.streams: log.error("Stream '{}' not found in the database.".format(stream)) params = self._stream_df[self._stream_df['STREAM'] == stream].values[0] self._print_stream_parameters(params)
python
def help(self, stream): """Show the help for a given stream.""" if stream not in self.streams: log.error("Stream '{}' not found in the database.".format(stream)) params = self._stream_df[self._stream_df['STREAM'] == stream].values[0] self._print_stream_parameters(params)
[ "def", "help", "(", "self", ",", "stream", ")", ":", "if", "stream", "not", "in", "self", ".", "streams", ":", "log", ".", "error", "(", "\"Stream '{}' not found in the database.\"", ".", "format", "(", "stream", ")", ")", "params", "=", "self", ".", "_stream_df", "[", "self", ".", "_stream_df", "[", "'STREAM'", "]", "==", "stream", "]", ".", "values", "[", "0", "]", "self", ".", "_print_stream_parameters", "(", "params", ")" ]
Show the help for a given stream.
[ "Show", "the", "help", "for", "a", "given", "stream", "." ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L574-L579
train
tamasgal/km3pipe
km3pipe/db.py
StreamDS._print_stream_parameters
def _print_stream_parameters(self, values): """Print a coloured help for a given tuple of stream parameters.""" cprint("{0}".format(*values), "magenta", attrs=["bold"]) print("{4}".format(*values)) cprint(" available formats: {1}".format(*values), "blue") cprint(" mandatory selectors: {2}".format(*values), "red") cprint(" optional selectors: {3}".format(*values), "green") print()
python
def _print_stream_parameters(self, values): """Print a coloured help for a given tuple of stream parameters.""" cprint("{0}".format(*values), "magenta", attrs=["bold"]) print("{4}".format(*values)) cprint(" available formats: {1}".format(*values), "blue") cprint(" mandatory selectors: {2}".format(*values), "red") cprint(" optional selectors: {3}".format(*values), "green") print()
[ "def", "_print_stream_parameters", "(", "self", ",", "values", ")", ":", "cprint", "(", "\"{0}\"", ".", "format", "(", "*", "values", ")", ",", "\"magenta\"", ",", "attrs", "=", "[", "\"bold\"", "]", ")", "print", "(", "\"{4}\"", ".", "format", "(", "*", "values", ")", ")", "cprint", "(", "\" available formats: {1}\"", ".", "format", "(", "*", "values", ")", ",", "\"blue\"", ")", "cprint", "(", "\" mandatory selectors: {2}\"", ".", "format", "(", "*", "values", ")", ",", "\"red\"", ")", "cprint", "(", "\" optional selectors: {3}\"", ".", "format", "(", "*", "values", ")", ",", "\"green\"", ")", "print", "(", ")" ]
Print a coloured help for a given tuple of stream parameters.
[ "Print", "a", "coloured", "help", "for", "a", "given", "tuple", "of", "stream", "parameters", "." ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L586-L593
train
tamasgal/km3pipe
km3pipe/db.py
StreamDS.get
def get(self, stream, fmt='txt', **kwargs): """Get the data for a given stream manually""" sel = ''.join(["&{0}={1}".format(k, v) for (k, v) in kwargs.items()]) url = "streamds/{0}.{1}?{2}".format(stream, fmt, sel[1:]) data = self._db._get_content(url) if not data: log.error("No data found at URL '%s'." % url) return if (data.startswith("ERROR")): log.error(data) return if fmt == "txt": return read_csv(data) return data
python
def get(self, stream, fmt='txt', **kwargs): """Get the data for a given stream manually""" sel = ''.join(["&{0}={1}".format(k, v) for (k, v) in kwargs.items()]) url = "streamds/{0}.{1}?{2}".format(stream, fmt, sel[1:]) data = self._db._get_content(url) if not data: log.error("No data found at URL '%s'." % url) return if (data.startswith("ERROR")): log.error(data) return if fmt == "txt": return read_csv(data) return data
[ "def", "get", "(", "self", ",", "stream", ",", "fmt", "=", "'txt'", ",", "*", "*", "kwargs", ")", ":", "sel", "=", "''", ".", "join", "(", "[", "\"&{0}={1}\"", ".", "format", "(", "k", ",", "v", ")", "for", "(", "k", ",", "v", ")", "in", "kwargs", ".", "items", "(", ")", "]", ")", "url", "=", "\"streamds/{0}.{1}?{2}\"", ".", "format", "(", "stream", ",", "fmt", ",", "sel", "[", "1", ":", "]", ")", "data", "=", "self", ".", "_db", ".", "_get_content", "(", "url", ")", "if", "not", "data", ":", "log", ".", "error", "(", "\"No data found at URL '%s'.\"", "%", "url", ")", "return", "if", "(", "data", ".", "startswith", "(", "\"ERROR\"", ")", ")", ":", "log", ".", "error", "(", "data", ")", "return", "if", "fmt", "==", "\"txt\"", ":", "return", "read_csv", "(", "data", ")", "return", "data" ]
Get the data for a given stream manually
[ "Get", "the", "data", "for", "a", "given", "stream", "manually" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L595-L608
train
tamasgal/km3pipe
km3pipe/db.py
ParametersContainer.get_parameter
def get_parameter(self, parameter): "Return a dict for given parameter" parameter = self._get_parameter_name(parameter) return self._parameters[parameter]
python
def get_parameter(self, parameter): "Return a dict for given parameter" parameter = self._get_parameter_name(parameter) return self._parameters[parameter]
[ "def", "get_parameter", "(", "self", ",", "parameter", ")", ":", "parameter", "=", "self", ".", "_get_parameter_name", "(", "parameter", ")", "return", "self", ".", "_parameters", "[", "parameter", "]" ]
Return a dict for given parameter
[ "Return", "a", "dict", "for", "given", "parameter" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L624-L627
train
tamasgal/km3pipe
km3pipe/db.py
ParametersContainer.get_converter
def get_converter(self, parameter): """Generate unit conversion function for given parameter""" if parameter not in self._converters: param = self.get_parameter(parameter) try: scale = float(param['Scale']) except KeyError: scale = 1 def convert(value): # easy_scale = float(param['EasyScale']) # easy_scale_multiplier = float(param['EasyScaleMultiplier']) return value * scale return convert
python
def get_converter(self, parameter): """Generate unit conversion function for given parameter""" if parameter not in self._converters: param = self.get_parameter(parameter) try: scale = float(param['Scale']) except KeyError: scale = 1 def convert(value): # easy_scale = float(param['EasyScale']) # easy_scale_multiplier = float(param['EasyScaleMultiplier']) return value * scale return convert
[ "def", "get_converter", "(", "self", ",", "parameter", ")", ":", "if", "parameter", "not", "in", "self", ".", "_converters", ":", "param", "=", "self", ".", "get_parameter", "(", "parameter", ")", "try", ":", "scale", "=", "float", "(", "param", "[", "'Scale'", "]", ")", "except", "KeyError", ":", "scale", "=", "1", "def", "convert", "(", "value", ")", ":", "# easy_scale = float(param['EasyScale'])", "# easy_scale_multiplier = float(param['EasyScaleMultiplier'])", "return", "value", "*", "scale", "return", "convert" ]
Generate unit conversion function for given parameter
[ "Generate", "unit", "conversion", "function", "for", "given", "parameter" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L629-L643
train
tamasgal/km3pipe
km3pipe/db.py
ParametersContainer.unit
def unit(self, parameter): "Get the unit for given parameter" parameter = self._get_parameter_name(parameter).lower() return self._parameters[parameter]['Unit']
python
def unit(self, parameter): "Get the unit for given parameter" parameter = self._get_parameter_name(parameter).lower() return self._parameters[parameter]['Unit']
[ "def", "unit", "(", "self", ",", "parameter", ")", ":", "parameter", "=", "self", ".", "_get_parameter_name", "(", "parameter", ")", ".", "lower", "(", ")", "return", "self", ".", "_parameters", "[", "parameter", "]", "[", "'Unit'", "]" ]
Get the unit for given parameter
[ "Get", "the", "unit", "for", "given", "parameter" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L645-L648
train
tamasgal/km3pipe
km3pipe/db.py
ParametersContainer.oid2name
def oid2name(self, oid): "Look up the parameter name for a given OID" if not self._oid_lookup: for name, data in self._parameters.items(): self._oid_lookup[data['OID']] = data['Name'] return self._oid_lookup[oid]
python
def oid2name(self, oid): "Look up the parameter name for a given OID" if not self._oid_lookup: for name, data in self._parameters.items(): self._oid_lookup[data['OID']] = data['Name'] return self._oid_lookup[oid]
[ "def", "oid2name", "(", "self", ",", "oid", ")", ":", "if", "not", "self", ".", "_oid_lookup", ":", "for", "name", ",", "data", "in", "self", ".", "_parameters", ".", "items", "(", ")", ":", "self", ".", "_oid_lookup", "[", "data", "[", "'OID'", "]", "]", "=", "data", "[", "'Name'", "]", "return", "self", ".", "_oid_lookup", "[", "oid", "]" ]
Look up the parameter name for a given OID
[ "Look", "up", "the", "parameter", "name", "for", "a", "given", "OID" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L650-L655
train
tamasgal/km3pipe
km3pipe/db.py
DOMContainer.via_dom_id
def via_dom_id(self, dom_id, det_id): """Return DOM for given dom_id""" try: return DOM.from_json([ d for d in self._json if d["DOMId"] == dom_id and d["DetOID"] == det_id ][0]) except IndexError: log.critical("No DOM found for DOM ID '{0}'".format(dom_id))
python
def via_dom_id(self, dom_id, det_id): """Return DOM for given dom_id""" try: return DOM.from_json([ d for d in self._json if d["DOMId"] == dom_id and d["DetOID"] == det_id ][0]) except IndexError: log.critical("No DOM found for DOM ID '{0}'".format(dom_id))
[ "def", "via_dom_id", "(", "self", ",", "dom_id", ",", "det_id", ")", ":", "try", ":", "return", "DOM", ".", "from_json", "(", "[", "d", "for", "d", "in", "self", ".", "_json", "if", "d", "[", "\"DOMId\"", "]", "==", "dom_id", "and", "d", "[", "\"DetOID\"", "]", "==", "det_id", "]", "[", "0", "]", ")", "except", "IndexError", ":", "log", ".", "critical", "(", "\"No DOM found for DOM ID '{0}'\"", ".", "format", "(", "dom_id", ")", ")" ]
Return DOM for given dom_id
[ "Return", "DOM", "for", "given", "dom_id" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L717-L725
train
tamasgal/km3pipe
km3pipe/db.py
DOMContainer.via_clb_upi
def via_clb_upi(self, clb_upi, det_id): """return DOM for given CLB UPI""" try: return DOM.from_json([ d for d in self._json if d["CLBUPI"] == clb_upi and d["DetOID"] == det_id ][0]) except IndexError: log.critical("No DOM found for CLB UPI '{0}'".format(clb_upi))
python
def via_clb_upi(self, clb_upi, det_id): """return DOM for given CLB UPI""" try: return DOM.from_json([ d for d in self._json if d["CLBUPI"] == clb_upi and d["DetOID"] == det_id ][0]) except IndexError: log.critical("No DOM found for CLB UPI '{0}'".format(clb_upi))
[ "def", "via_clb_upi", "(", "self", ",", "clb_upi", ",", "det_id", ")", ":", "try", ":", "return", "DOM", ".", "from_json", "(", "[", "d", "for", "d", "in", "self", ".", "_json", "if", "d", "[", "\"CLBUPI\"", "]", "==", "clb_upi", "and", "d", "[", "\"DetOID\"", "]", "==", "det_id", "]", "[", "0", "]", ")", "except", "IndexError", ":", "log", ".", "critical", "(", "\"No DOM found for CLB UPI '{0}'\"", ".", "format", "(", "clb_upi", ")", ")" ]
return DOM for given CLB UPI
[ "return", "DOM", "for", "given", "CLB", "UPI" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L727-L735
train
tamasgal/km3pipe
km3pipe/db.py
CLBMap.upi
def upi(self): """A dict of CLBs with UPI as key""" parameter = 'UPI' if parameter not in self._by: self._populate(by=parameter) return self._by[parameter]
python
def upi(self): """A dict of CLBs with UPI as key""" parameter = 'UPI' if parameter not in self._by: self._populate(by=parameter) return self._by[parameter]
[ "def", "upi", "(", "self", ")", ":", "parameter", "=", "'UPI'", "if", "parameter", "not", "in", "self", ".", "_by", ":", "self", ".", "_populate", "(", "by", "=", "parameter", ")", "return", "self", ".", "_by", "[", "parameter", "]" ]
A dict of CLBs with UPI as key
[ "A", "dict", "of", "CLBs", "with", "UPI", "as", "key" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L879-L884
train
tamasgal/km3pipe
km3pipe/db.py
CLBMap.dom_id
def dom_id(self): """A dict of CLBs with DOM ID as key""" parameter = 'DOMID' if parameter not in self._by: self._populate(by=parameter) return self._by[parameter]
python
def dom_id(self): """A dict of CLBs with DOM ID as key""" parameter = 'DOMID' if parameter not in self._by: self._populate(by=parameter) return self._by[parameter]
[ "def", "dom_id", "(", "self", ")", ":", "parameter", "=", "'DOMID'", "if", "parameter", "not", "in", "self", ".", "_by", ":", "self", ".", "_populate", "(", "by", "=", "parameter", ")", "return", "self", ".", "_by", "[", "parameter", "]" ]
A dict of CLBs with DOM ID as key
[ "A", "dict", "of", "CLBs", "with", "DOM", "ID", "as", "key" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L887-L892
train
tamasgal/km3pipe
km3pipe/db.py
CLBMap.base
def base(self, du): """Return the base CLB for a given DU""" parameter = 'base' if parameter not in self._by: self._by[parameter] = {} for clb in self.upi.values(): if clb.floor == 0: self._by[parameter][clb.du] = clb return self._by[parameter][du]
python
def base(self, du): """Return the base CLB for a given DU""" parameter = 'base' if parameter not in self._by: self._by[parameter] = {} for clb in self.upi.values(): if clb.floor == 0: self._by[parameter][clb.du] = clb return self._by[parameter][du]
[ "def", "base", "(", "self", ",", "du", ")", ":", "parameter", "=", "'base'", "if", "parameter", "not", "in", "self", ".", "_by", ":", "self", ".", "_by", "[", "parameter", "]", "=", "{", "}", "for", "clb", "in", "self", ".", "upi", ".", "values", "(", ")", ":", "if", "clb", ".", "floor", "==", "0", ":", "self", ".", "_by", "[", "parameter", "]", "[", "clb", ".", "du", "]", "=", "clb", "return", "self", ".", "_by", "[", "parameter", "]", "[", "du", "]" ]
Return the base CLB for a given DU
[ "Return", "the", "base", "CLB", "for", "a", "given", "DU" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L894-L902
train
IRC-SPHERE/HyperStream
hyperstream/channels/database_channel.py
DatabaseChannel.get_results
def get_results(self, stream, time_interval): """ Get the results for a given stream :param time_interval: The time interval :param stream: The stream object :return: A generator over stream instances """ query = stream.stream_id.as_raw() query['datetime'] = {'$gt': time_interval.start, '$lte': time_interval.end} with switch_db(StreamInstanceModel, 'hyperstream'): for instance in StreamInstanceModel.objects(__raw__=query): yield StreamInstance(timestamp=instance.datetime, value=instance.value)
python
def get_results(self, stream, time_interval): """ Get the results for a given stream :param time_interval: The time interval :param stream: The stream object :return: A generator over stream instances """ query = stream.stream_id.as_raw() query['datetime'] = {'$gt': time_interval.start, '$lte': time_interval.end} with switch_db(StreamInstanceModel, 'hyperstream'): for instance in StreamInstanceModel.objects(__raw__=query): yield StreamInstance(timestamp=instance.datetime, value=instance.value)
[ "def", "get_results", "(", "self", ",", "stream", ",", "time_interval", ")", ":", "query", "=", "stream", ".", "stream_id", ".", "as_raw", "(", ")", "query", "[", "'datetime'", "]", "=", "{", "'$gt'", ":", "time_interval", ".", "start", ",", "'$lte'", ":", "time_interval", ".", "end", "}", "with", "switch_db", "(", "StreamInstanceModel", ",", "'hyperstream'", ")", ":", "for", "instance", "in", "StreamInstanceModel", ".", "objects", "(", "__raw__", "=", "query", ")", ":", "yield", "StreamInstance", "(", "timestamp", "=", "instance", ".", "datetime", ",", "value", "=", "instance", ".", "value", ")" ]
Get the results for a given stream :param time_interval: The time interval :param stream: The stream object :return: A generator over stream instances
[ "Get", "the", "results", "for", "a", "given", "stream" ]
98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/channels/database_channel.py#L57-L69
train
tamasgal/km3pipe
km3pipe/io/clb.py
CLBPump.seek_to_packet
def seek_to_packet(self, index): """Move file pointer to the packet with given index.""" pointer_position = self.packet_positions[index] self.blob_file.seek(pointer_position, 0)
python
def seek_to_packet(self, index): """Move file pointer to the packet with given index.""" pointer_position = self.packet_positions[index] self.blob_file.seek(pointer_position, 0)
[ "def", "seek_to_packet", "(", "self", ",", "index", ")", ":", "pointer_position", "=", "self", ".", "packet_positions", "[", "index", "]", "self", ".", "blob_file", ".", "seek", "(", "pointer_position", ",", "0", ")" ]
Move file pointer to the packet with given index.
[ "Move", "file", "pointer", "to", "the", "packet", "with", "given", "index", "." ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/clb.py#L59-L62
train
tamasgal/km3pipe
km3pipe/io/clb.py
CLBPump.next_blob
def next_blob(self): """Generate next blob in file""" try: length = struct.unpack('<i', self.blob_file.read(4))[0] except struct.error: raise StopIteration header = CLBHeader(file_obj=self.blob_file) blob = {'CLBHeader': header} remaining_length = length - header.size pmt_data = [] pmt_raw_data = self.blob_file.read(remaining_length) pmt_raw_data_io = BytesIO(pmt_raw_data) for _ in range(int(remaining_length / 6)): channel_id, time, tot = struct.unpack( '>cic', pmt_raw_data_io.read(6) ) pmt_data.append(PMTData(ord(channel_id), time, ord(tot))) blob['PMTData'] = pmt_data blob['PMTRawData'] = pmt_raw_data return blob
python
def next_blob(self): """Generate next blob in file""" try: length = struct.unpack('<i', self.blob_file.read(4))[0] except struct.error: raise StopIteration header = CLBHeader(file_obj=self.blob_file) blob = {'CLBHeader': header} remaining_length = length - header.size pmt_data = [] pmt_raw_data = self.blob_file.read(remaining_length) pmt_raw_data_io = BytesIO(pmt_raw_data) for _ in range(int(remaining_length / 6)): channel_id, time, tot = struct.unpack( '>cic', pmt_raw_data_io.read(6) ) pmt_data.append(PMTData(ord(channel_id), time, ord(tot))) blob['PMTData'] = pmt_data blob['PMTRawData'] = pmt_raw_data return blob
[ "def", "next_blob", "(", "self", ")", ":", "try", ":", "length", "=", "struct", ".", "unpack", "(", "'<i'", ",", "self", ".", "blob_file", ".", "read", "(", "4", ")", ")", "[", "0", "]", "except", "struct", ".", "error", ":", "raise", "StopIteration", "header", "=", "CLBHeader", "(", "file_obj", "=", "self", ".", "blob_file", ")", "blob", "=", "{", "'CLBHeader'", ":", "header", "}", "remaining_length", "=", "length", "-", "header", ".", "size", "pmt_data", "=", "[", "]", "pmt_raw_data", "=", "self", ".", "blob_file", ".", "read", "(", "remaining_length", ")", "pmt_raw_data_io", "=", "BytesIO", "(", "pmt_raw_data", ")", "for", "_", "in", "range", "(", "int", "(", "remaining_length", "/", "6", ")", ")", ":", "channel_id", ",", "time", ",", "tot", "=", "struct", ".", "unpack", "(", "'>cic'", ",", "pmt_raw_data_io", ".", "read", "(", "6", ")", ")", "pmt_data", ".", "append", "(", "PMTData", "(", "ord", "(", "channel_id", ")", ",", "time", ",", "ord", "(", "tot", ")", ")", ")", "blob", "[", "'PMTData'", "]", "=", "pmt_data", "blob", "[", "'PMTRawData'", "]", "=", "pmt_raw_data", "return", "blob" ]
Generate next blob in file
[ "Generate", "next", "blob", "in", "file" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/clb.py#L64-L83
train
PrefPy/prefpy
prefpy/mechanism.py
getKendallTauScore
def getKendallTauScore(myResponse, otherResponse): """ Returns the Kendall Tau Score """ # variables kt = 0 list1 = myResponse.values() list2 = otherResponse.values() if len(list1) <= 1: return kt # runs through list1 for itr1 in range(0, len(list1) - 1): # runs through list2 for itr2 in range(itr1 + 1, len(list2)): # checks if there is a discrepancy. If so, adds if ((list1[itr1] > list1[itr2] and list2[itr1] < list2[itr2]) or (list1[itr1] < list1[itr2] and list2[itr1] > list2[itr2])): kt += 1 # normalizes between 0 and 1 kt = (kt * 2) / (len(list1) * (len(list1) - 1)) # returns found value return kt
python
def getKendallTauScore(myResponse, otherResponse): """ Returns the Kendall Tau Score """ # variables kt = 0 list1 = myResponse.values() list2 = otherResponse.values() if len(list1) <= 1: return kt # runs through list1 for itr1 in range(0, len(list1) - 1): # runs through list2 for itr2 in range(itr1 + 1, len(list2)): # checks if there is a discrepancy. If so, adds if ((list1[itr1] > list1[itr2] and list2[itr1] < list2[itr2]) or (list1[itr1] < list1[itr2] and list2[itr1] > list2[itr2])): kt += 1 # normalizes between 0 and 1 kt = (kt * 2) / (len(list1) * (len(list1) - 1)) # returns found value return kt
[ "def", "getKendallTauScore", "(", "myResponse", ",", "otherResponse", ")", ":", "# variables", "kt", "=", "0", "list1", "=", "myResponse", ".", "values", "(", ")", "list2", "=", "otherResponse", ".", "values", "(", ")", "if", "len", "(", "list1", ")", "<=", "1", ":", "return", "kt", "# runs through list1", "for", "itr1", "in", "range", "(", "0", ",", "len", "(", "list1", ")", "-", "1", ")", ":", "# runs through list2", "for", "itr2", "in", "range", "(", "itr1", "+", "1", ",", "len", "(", "list2", ")", ")", ":", "# checks if there is a discrepancy. If so, adds", "if", "(", "(", "list1", "[", "itr1", "]", ">", "list1", "[", "itr2", "]", "and", "list2", "[", "itr1", "]", "<", "list2", "[", "itr2", "]", ")", "or", "(", "list1", "[", "itr1", "]", "<", "list1", "[", "itr2", "]", "and", "list2", "[", "itr1", "]", ">", "list2", "[", "itr2", "]", ")", ")", ":", "kt", "+=", "1", "# normalizes between 0 and 1", "kt", "=", "(", "kt", "*", "2", ")", "/", "(", "len", "(", "list1", ")", "*", "(", "len", "(", "list1", ")", "-", "1", ")", ")", "# returns found value", "return", "kt" ]
Returns the Kendall Tau Score
[ "Returns", "the", "Kendall", "Tau", "Score" ]
f395ba3782f05684fa5de0cece387a6da9391d02
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L580-L606
train
PrefPy/prefpy
prefpy/mechanism.py
MechanismPosScoring.getCandScoresMap
def getCandScoresMap(self, profile): """ Returns a dictonary that associates the integer representation of each candidate with the score they recieved in the profile. :ivar Profile profile: A Profile object that represents an election profile. """ # Currently, we expect the profile to contain complete ordering over candidates. elecType = profile.getElecType() if elecType != "soc" and elecType != "toc": print("ERROR: unsupported election type") exit() # Initialize our dictionary so that all candidates have a score of zero. candScoresMap = dict() for cand in profile.candMap.keys(): candScoresMap[cand] = 0.0 rankMaps = profile.getRankMaps() rankMapCounts = profile.getPreferenceCounts() scoringVector = self.getScoringVector(profile) # Go through the rankMaps of the profile and increment each candidates score appropriately. for i in range(0, len(rankMaps)): rankMap = rankMaps[i] rankMapCount = rankMapCounts[i] for cand in rankMap.keys(): candScoresMap[cand] += scoringVector[rankMap[cand] - 1] * rankMapCount # print("candScoresMap=", candScoresMap) return candScoresMap
python
def getCandScoresMap(self, profile): """ Returns a dictonary that associates the integer representation of each candidate with the score they recieved in the profile. :ivar Profile profile: A Profile object that represents an election profile. """ # Currently, we expect the profile to contain complete ordering over candidates. elecType = profile.getElecType() if elecType != "soc" and elecType != "toc": print("ERROR: unsupported election type") exit() # Initialize our dictionary so that all candidates have a score of zero. candScoresMap = dict() for cand in profile.candMap.keys(): candScoresMap[cand] = 0.0 rankMaps = profile.getRankMaps() rankMapCounts = profile.getPreferenceCounts() scoringVector = self.getScoringVector(profile) # Go through the rankMaps of the profile and increment each candidates score appropriately. for i in range(0, len(rankMaps)): rankMap = rankMaps[i] rankMapCount = rankMapCounts[i] for cand in rankMap.keys(): candScoresMap[cand] += scoringVector[rankMap[cand] - 1] * rankMapCount # print("candScoresMap=", candScoresMap) return candScoresMap
[ "def", "getCandScoresMap", "(", "self", ",", "profile", ")", ":", "# Currently, we expect the profile to contain complete ordering over candidates.", "elecType", "=", "profile", ".", "getElecType", "(", ")", "if", "elecType", "!=", "\"soc\"", "and", "elecType", "!=", "\"toc\"", ":", "print", "(", "\"ERROR: unsupported election type\"", ")", "exit", "(", ")", "# Initialize our dictionary so that all candidates have a score of zero.", "candScoresMap", "=", "dict", "(", ")", "for", "cand", "in", "profile", ".", "candMap", ".", "keys", "(", ")", ":", "candScoresMap", "[", "cand", "]", "=", "0.0", "rankMaps", "=", "profile", ".", "getRankMaps", "(", ")", "rankMapCounts", "=", "profile", ".", "getPreferenceCounts", "(", ")", "scoringVector", "=", "self", ".", "getScoringVector", "(", "profile", ")", "# Go through the rankMaps of the profile and increment each candidates score appropriately.", "for", "i", "in", "range", "(", "0", ",", "len", "(", "rankMaps", ")", ")", ":", "rankMap", "=", "rankMaps", "[", "i", "]", "rankMapCount", "=", "rankMapCounts", "[", "i", "]", "for", "cand", "in", "rankMap", ".", "keys", "(", ")", ":", "candScoresMap", "[", "cand", "]", "+=", "scoringVector", "[", "rankMap", "[", "cand", "]", "-", "1", "]", "*", "rankMapCount", "# print(\"candScoresMap=\", candScoresMap)", "return", "candScoresMap" ]
Returns a dictonary that associates the integer representation of each candidate with the score they recieved in the profile. :ivar Profile profile: A Profile object that represents an election profile.
[ "Returns", "a", "dictonary", "that", "associates", "the", "integer", "representation", "of", "each", "candidate", "with", "the", "score", "they", "recieved", "in", "the", "profile", "." ]
f395ba3782f05684fa5de0cece387a6da9391d02
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L128-L159
train
PrefPy/prefpy
prefpy/mechanism.py
MechanismPosScoring.getMov
def getMov(self, profile): """ Returns an integer that is equal to the margin of victory of the election profile. :ivar Profile profile: A Profile object that represents an election profile. """ # from . import mov import mov return mov.MoVScoring(profile, self.getScoringVector(profile))
python
def getMov(self, profile): """ Returns an integer that is equal to the margin of victory of the election profile. :ivar Profile profile: A Profile object that represents an election profile. """ # from . import mov import mov return mov.MoVScoring(profile, self.getScoringVector(profile))
[ "def", "getMov", "(", "self", ",", "profile", ")", ":", "# from . import mov", "import", "mov", "return", "mov", ".", "MoVScoring", "(", "profile", ",", "self", ".", "getScoringVector", "(", "profile", ")", ")" ]
Returns an integer that is equal to the margin of victory of the election profile. :ivar Profile profile: A Profile object that represents an election profile.
[ "Returns", "an", "integer", "that", "is", "equal", "to", "the", "margin", "of", "victory", "of", "the", "election", "profile", "." ]
f395ba3782f05684fa5de0cece387a6da9391d02
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L161-L169
train
PrefPy/prefpy
prefpy/mechanism.py
MechanismSimplifiedBucklin.getCandScoresMap
def getCandScoresMap(self, profile): """ Returns a dictionary that associates integer representations of each candidate with their Bucklin score. :ivar Profile profile: A Profile object that represents an election profile. """ # Currently, we expect the profile to contain complete ordering over candidates. elecType = profile.getElecType() if elecType != "soc" and elecType != "toc": print("ERROR: unsupported profile type") exit() bucklinScores = dict() rankMaps = profile.getRankMaps() preferenceCounts = profile.getPreferenceCounts() for cand in profile.candMap.keys(): # We keep track of the number of times a candidate is ranked in the first t positions. numTimesRanked = 0 # We increase t in increments of 1 until we find t such that the candidate is ranked in the # first t positions in at least half the votes. for t in range(1, profile.numCands + 1): for i in range(0, len(rankMaps)): if (rankMaps[i][cand] == t): numTimesRanked += preferenceCounts[i] if numTimesRanked >= math.ceil(float(profile.numVoters) / 2): bucklinScores[cand] = t break return bucklinScores
python
def getCandScoresMap(self, profile): """ Returns a dictionary that associates integer representations of each candidate with their Bucklin score. :ivar Profile profile: A Profile object that represents an election profile. """ # Currently, we expect the profile to contain complete ordering over candidates. elecType = profile.getElecType() if elecType != "soc" and elecType != "toc": print("ERROR: unsupported profile type") exit() bucklinScores = dict() rankMaps = profile.getRankMaps() preferenceCounts = profile.getPreferenceCounts() for cand in profile.candMap.keys(): # We keep track of the number of times a candidate is ranked in the first t positions. numTimesRanked = 0 # We increase t in increments of 1 until we find t such that the candidate is ranked in the # first t positions in at least half the votes. for t in range(1, profile.numCands + 1): for i in range(0, len(rankMaps)): if (rankMaps[i][cand] == t): numTimesRanked += preferenceCounts[i] if numTimesRanked >= math.ceil(float(profile.numVoters) / 2): bucklinScores[cand] = t break return bucklinScores
[ "def", "getCandScoresMap", "(", "self", ",", "profile", ")", ":", "# Currently, we expect the profile to contain complete ordering over candidates.", "elecType", "=", "profile", ".", "getElecType", "(", ")", "if", "elecType", "!=", "\"soc\"", "and", "elecType", "!=", "\"toc\"", ":", "print", "(", "\"ERROR: unsupported profile type\"", ")", "exit", "(", ")", "bucklinScores", "=", "dict", "(", ")", "rankMaps", "=", "profile", ".", "getRankMaps", "(", ")", "preferenceCounts", "=", "profile", ".", "getPreferenceCounts", "(", ")", "for", "cand", "in", "profile", ".", "candMap", ".", "keys", "(", ")", ":", "# We keep track of the number of times a candidate is ranked in the first t positions.", "numTimesRanked", "=", "0", "# We increase t in increments of 1 until we find t such that the candidate is ranked in the", "# first t positions in at least half the votes.", "for", "t", "in", "range", "(", "1", ",", "profile", ".", "numCands", "+", "1", ")", ":", "for", "i", "in", "range", "(", "0", ",", "len", "(", "rankMaps", ")", ")", ":", "if", "(", "rankMaps", "[", "i", "]", "[", "cand", "]", "==", "t", ")", ":", "numTimesRanked", "+=", "preferenceCounts", "[", "i", "]", "if", "numTimesRanked", ">=", "math", ".", "ceil", "(", "float", "(", "profile", ".", "numVoters", ")", "/", "2", ")", ":", "bucklinScores", "[", "cand", "]", "=", "t", "break", "return", "bucklinScores" ]
Returns a dictionary that associates integer representations of each candidate with their Bucklin score. :ivar Profile profile: A Profile object that represents an election profile.
[ "Returns", "a", "dictionary", "that", "associates", "integer", "representations", "of", "each", "candidate", "with", "their", "Bucklin", "score", "." ]
f395ba3782f05684fa5de0cece387a6da9391d02
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L335-L367
train
PrefPy/prefpy
prefpy/mechanism.py
MechanismMaximin.getCandScoresMap
def getCandScoresMap(self, profile): """ Returns a dictionary that associates integer representations of each candidate with their maximin score. :ivar Profile profile: A Profile object that represents an election profile. """ # Currently, we expect the profile to contain complete ordering over candidates. Ties are # allowed however. elecType = profile.getElecType() if elecType != "soc" and elecType != "toc": print("ERROR: unsupported election type") exit() wmg = profile.getWmg() # Initialize the maximin score for each candidate as infinity. maximinScores = dict() for cand in wmg.keys(): maximinScores[cand] = float("inf") # For each pair of candidates, calculate the number of times each beats the other. for cand1, cand2 in itertools.combinations(wmg.keys(), 2): if cand2 in wmg[cand1].keys(): maximinScores[cand1] = min(maximinScores[cand1], wmg[cand1][cand2]) maximinScores[cand2] = min(maximinScores[cand2], wmg[cand2][cand1]) return maximinScores
python
def getCandScoresMap(self, profile): """ Returns a dictionary that associates integer representations of each candidate with their maximin score. :ivar Profile profile: A Profile object that represents an election profile. """ # Currently, we expect the profile to contain complete ordering over candidates. Ties are # allowed however. elecType = profile.getElecType() if elecType != "soc" and elecType != "toc": print("ERROR: unsupported election type") exit() wmg = profile.getWmg() # Initialize the maximin score for each candidate as infinity. maximinScores = dict() for cand in wmg.keys(): maximinScores[cand] = float("inf") # For each pair of candidates, calculate the number of times each beats the other. for cand1, cand2 in itertools.combinations(wmg.keys(), 2): if cand2 in wmg[cand1].keys(): maximinScores[cand1] = min(maximinScores[cand1], wmg[cand1][cand2]) maximinScores[cand2] = min(maximinScores[cand2], wmg[cand2][cand1]) return maximinScores
[ "def", "getCandScoresMap", "(", "self", ",", "profile", ")", ":", "# Currently, we expect the profile to contain complete ordering over candidates. Ties are", "# allowed however.", "elecType", "=", "profile", ".", "getElecType", "(", ")", "if", "elecType", "!=", "\"soc\"", "and", "elecType", "!=", "\"toc\"", ":", "print", "(", "\"ERROR: unsupported election type\"", ")", "exit", "(", ")", "wmg", "=", "profile", ".", "getWmg", "(", ")", "# Initialize the maximin score for each candidate as infinity.", "maximinScores", "=", "dict", "(", ")", "for", "cand", "in", "wmg", ".", "keys", "(", ")", ":", "maximinScores", "[", "cand", "]", "=", "float", "(", "\"inf\"", ")", "# For each pair of candidates, calculate the number of times each beats the other.", "for", "cand1", ",", "cand2", "in", "itertools", ".", "combinations", "(", "wmg", ".", "keys", "(", ")", ",", "2", ")", ":", "if", "cand2", "in", "wmg", "[", "cand1", "]", ".", "keys", "(", ")", ":", "maximinScores", "[", "cand1", "]", "=", "min", "(", "maximinScores", "[", "cand1", "]", ",", "wmg", "[", "cand1", "]", "[", "cand2", "]", ")", "maximinScores", "[", "cand2", "]", "=", "min", "(", "maximinScores", "[", "cand2", "]", ",", "wmg", "[", "cand2", "]", "[", "cand1", "]", ")", "return", "maximinScores" ]
Returns a dictionary that associates integer representations of each candidate with their maximin score. :ivar Profile profile: A Profile object that represents an election profile.
[ "Returns", "a", "dictionary", "that", "associates", "integer", "representations", "of", "each", "candidate", "with", "their", "maximin", "score", "." ]
f395ba3782f05684fa5de0cece387a6da9391d02
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L436-L464
train
PrefPy/prefpy
prefpy/mechanism.py
MechanismSchulze.computeStrongestPaths
def computeStrongestPaths(self, profile, pairwisePreferences): """ Returns a two-dimensional dictionary that associates every pair of candidates, cand1 and cand2, with the strongest path from cand1 to cand2. :ivar Profile profile: A Profile object that represents an election profile. :ivar dict<int,dict<int,int>> pairwisePreferences: A two-dimensional dictionary that associates every pair of candidates, cand1 and cand2, with number of voters who prefer cand1 to cand2. """ cands = profile.candMap.keys() numCands = len(cands) # Initialize the two-dimensional dictionary that will hold our strongest paths. strongestPaths = dict() for cand in cands: strongestPaths[cand] = dict() for i in range(1, numCands + 1): for j in range(1, numCands + 1): if (i == j): continue if pairwisePreferences[i][j] > pairwisePreferences[j][i]: strongestPaths[i][j] = pairwisePreferences[i][j] else: strongestPaths[i][j] = 0 for i in range(1, numCands + 1): for j in range(1, numCands + 1): if (i == j): continue for k in range(1, numCands + 1): if (i == k or j == k): continue strongestPaths[j][k] = max(strongestPaths[j][k], min(strongestPaths[j][i], strongestPaths[i][k])) return strongestPaths
python
def computeStrongestPaths(self, profile, pairwisePreferences): """ Returns a two-dimensional dictionary that associates every pair of candidates, cand1 and cand2, with the strongest path from cand1 to cand2. :ivar Profile profile: A Profile object that represents an election profile. :ivar dict<int,dict<int,int>> pairwisePreferences: A two-dimensional dictionary that associates every pair of candidates, cand1 and cand2, with number of voters who prefer cand1 to cand2. """ cands = profile.candMap.keys() numCands = len(cands) # Initialize the two-dimensional dictionary that will hold our strongest paths. strongestPaths = dict() for cand in cands: strongestPaths[cand] = dict() for i in range(1, numCands + 1): for j in range(1, numCands + 1): if (i == j): continue if pairwisePreferences[i][j] > pairwisePreferences[j][i]: strongestPaths[i][j] = pairwisePreferences[i][j] else: strongestPaths[i][j] = 0 for i in range(1, numCands + 1): for j in range(1, numCands + 1): if (i == j): continue for k in range(1, numCands + 1): if (i == k or j == k): continue strongestPaths[j][k] = max(strongestPaths[j][k], min(strongestPaths[j][i], strongestPaths[i][k])) return strongestPaths
[ "def", "computeStrongestPaths", "(", "self", ",", "profile", ",", "pairwisePreferences", ")", ":", "cands", "=", "profile", ".", "candMap", ".", "keys", "(", ")", "numCands", "=", "len", "(", "cands", ")", "# Initialize the two-dimensional dictionary that will hold our strongest paths.", "strongestPaths", "=", "dict", "(", ")", "for", "cand", "in", "cands", ":", "strongestPaths", "[", "cand", "]", "=", "dict", "(", ")", "for", "i", "in", "range", "(", "1", ",", "numCands", "+", "1", ")", ":", "for", "j", "in", "range", "(", "1", ",", "numCands", "+", "1", ")", ":", "if", "(", "i", "==", "j", ")", ":", "continue", "if", "pairwisePreferences", "[", "i", "]", "[", "j", "]", ">", "pairwisePreferences", "[", "j", "]", "[", "i", "]", ":", "strongestPaths", "[", "i", "]", "[", "j", "]", "=", "pairwisePreferences", "[", "i", "]", "[", "j", "]", "else", ":", "strongestPaths", "[", "i", "]", "[", "j", "]", "=", "0", "for", "i", "in", "range", "(", "1", ",", "numCands", "+", "1", ")", ":", "for", "j", "in", "range", "(", "1", ",", "numCands", "+", "1", ")", ":", "if", "(", "i", "==", "j", ")", ":", "continue", "for", "k", "in", "range", "(", "1", ",", "numCands", "+", "1", ")", ":", "if", "(", "i", "==", "k", "or", "j", "==", "k", ")", ":", "continue", "strongestPaths", "[", "j", "]", "[", "k", "]", "=", "max", "(", "strongestPaths", "[", "j", "]", "[", "k", "]", ",", "min", "(", "strongestPaths", "[", "j", "]", "[", "i", "]", ",", "strongestPaths", "[", "i", "]", "[", "k", "]", ")", ")", "return", "strongestPaths" ]
Returns a two-dimensional dictionary that associates every pair of candidates, cand1 and cand2, with the strongest path from cand1 to cand2. :ivar Profile profile: A Profile object that represents an election profile. :ivar dict<int,dict<int,int>> pairwisePreferences: A two-dimensional dictionary that associates every pair of candidates, cand1 and cand2, with number of voters who prefer cand1 to cand2.
[ "Returns", "a", "two", "-", "dimensional", "dictionary", "that", "associates", "every", "pair", "of", "candidates", "cand1", "and", "cand2", "with", "the", "strongest", "path", "from", "cand1", "to", "cand2", "." ]
f395ba3782f05684fa5de0cece387a6da9391d02
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L475-L511
train
PrefPy/prefpy
prefpy/mechanism.py
MechanismSchulze.computePairwisePreferences
def computePairwisePreferences(self, profile): """ Returns a two-dimensional dictionary that associates every pair of candidates, cand1 and cand2, with number of voters who prefer cand1 to cand2. :ivar Profile profile: A Profile object that represents an election profile. """ cands = profile.candMap.keys() # Initialize the two-dimensional dictionary that will hold our pairwise preferences. pairwisePreferences = dict() for cand in cands: pairwisePreferences[cand] = dict() for cand1 in cands: for cand2 in cands: if cand1 != cand2: pairwisePreferences[cand1][cand2] = 0 for preference in profile.preferences: wmgMap = preference.wmgMap for cand1, cand2 in itertools.combinations(cands, 2): # If either candidate was unranked, we assume that they are lower ranked than all # ranked candidates. if cand1 not in wmgMap.keys(): if cand2 in wmgMap.keys(): pairwisePreferences[cand2][cand1] += 1 * preference.count elif cand2 not in wmgMap.keys(): if cand1 in wmgMap.keys(): pairwisePreferences[cand1][cand2] += 1 * preference.count elif wmgMap[cand1][cand2] == 1: pairwisePreferences[cand1][cand2] += 1 * preference.count elif wmgMap[cand1][cand2] == -1: pairwisePreferences[cand2][cand1] += 1 * preference.count return pairwisePreferences
python
def computePairwisePreferences(self, profile): """ Returns a two-dimensional dictionary that associates every pair of candidates, cand1 and cand2, with number of voters who prefer cand1 to cand2. :ivar Profile profile: A Profile object that represents an election profile. """ cands = profile.candMap.keys() # Initialize the two-dimensional dictionary that will hold our pairwise preferences. pairwisePreferences = dict() for cand in cands: pairwisePreferences[cand] = dict() for cand1 in cands: for cand2 in cands: if cand1 != cand2: pairwisePreferences[cand1][cand2] = 0 for preference in profile.preferences: wmgMap = preference.wmgMap for cand1, cand2 in itertools.combinations(cands, 2): # If either candidate was unranked, we assume that they are lower ranked than all # ranked candidates. if cand1 not in wmgMap.keys(): if cand2 in wmgMap.keys(): pairwisePreferences[cand2][cand1] += 1 * preference.count elif cand2 not in wmgMap.keys(): if cand1 in wmgMap.keys(): pairwisePreferences[cand1][cand2] += 1 * preference.count elif wmgMap[cand1][cand2] == 1: pairwisePreferences[cand1][cand2] += 1 * preference.count elif wmgMap[cand1][cand2] == -1: pairwisePreferences[cand2][cand1] += 1 * preference.count return pairwisePreferences
[ "def", "computePairwisePreferences", "(", "self", ",", "profile", ")", ":", "cands", "=", "profile", ".", "candMap", ".", "keys", "(", ")", "# Initialize the two-dimensional dictionary that will hold our pairwise preferences.", "pairwisePreferences", "=", "dict", "(", ")", "for", "cand", "in", "cands", ":", "pairwisePreferences", "[", "cand", "]", "=", "dict", "(", ")", "for", "cand1", "in", "cands", ":", "for", "cand2", "in", "cands", ":", "if", "cand1", "!=", "cand2", ":", "pairwisePreferences", "[", "cand1", "]", "[", "cand2", "]", "=", "0", "for", "preference", "in", "profile", ".", "preferences", ":", "wmgMap", "=", "preference", ".", "wmgMap", "for", "cand1", ",", "cand2", "in", "itertools", ".", "combinations", "(", "cands", ",", "2", ")", ":", "# If either candidate was unranked, we assume that they are lower ranked than all", "# ranked candidates.", "if", "cand1", "not", "in", "wmgMap", ".", "keys", "(", ")", ":", "if", "cand2", "in", "wmgMap", ".", "keys", "(", ")", ":", "pairwisePreferences", "[", "cand2", "]", "[", "cand1", "]", "+=", "1", "*", "preference", ".", "count", "elif", "cand2", "not", "in", "wmgMap", ".", "keys", "(", ")", ":", "if", "cand1", "in", "wmgMap", ".", "keys", "(", ")", ":", "pairwisePreferences", "[", "cand1", "]", "[", "cand2", "]", "+=", "1", "*", "preference", ".", "count", "elif", "wmgMap", "[", "cand1", "]", "[", "cand2", "]", "==", "1", ":", "pairwisePreferences", "[", "cand1", "]", "[", "cand2", "]", "+=", "1", "*", "preference", ".", "count", "elif", "wmgMap", "[", "cand1", "]", "[", "cand2", "]", "==", "-", "1", ":", "pairwisePreferences", "[", "cand2", "]", "[", "cand1", "]", "+=", "1", "*", "preference", ".", "count", "return", "pairwisePreferences" ]
Returns a two-dimensional dictionary that associates every pair of candidates, cand1 and cand2, with number of voters who prefer cand1 to cand2. :ivar Profile profile: A Profile object that represents an election profile.
[ "Returns", "a", "two", "-", "dimensional", "dictionary", "that", "associates", "every", "pair", "of", "candidates", "cand1", "and", "cand2", "with", "number", "of", "voters", "who", "prefer", "cand1", "to", "cand2", "." ]
f395ba3782f05684fa5de0cece387a6da9391d02
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L513-L550
train
PrefPy/prefpy
prefpy/mechanism.py
MechanismSchulze.getCandScoresMap
def getCandScoresMap(self, profile): """ Returns a dictionary that associates integer representations of each candidate with the number of other candidates for which her strongest path to the other candidate is greater than the other candidate's stronget path to her. :ivar Profile profile: A Profile object that represents an election profile. """ cands = profile.candMap.keys() pairwisePreferences = self.computePairwisePreferences(profile) strongestPaths = self.computeStrongestPaths(profile, pairwisePreferences) # For each candidate, determine how many times p[E,X] >= p[X,E] using a variant of the # Floyd-Warshall algorithm. betterCount = dict() for cand in cands: betterCount[cand] = 0 for cand1 in cands: for cand2 in cands: if cand1 == cand2: continue if strongestPaths[cand1][cand2] >= strongestPaths[cand2][cand1]: betterCount[cand1] += 1 return betterCount
python
def getCandScoresMap(self, profile): """ Returns a dictionary that associates integer representations of each candidate with the number of other candidates for which her strongest path to the other candidate is greater than the other candidate's stronget path to her. :ivar Profile profile: A Profile object that represents an election profile. """ cands = profile.candMap.keys() pairwisePreferences = self.computePairwisePreferences(profile) strongestPaths = self.computeStrongestPaths(profile, pairwisePreferences) # For each candidate, determine how many times p[E,X] >= p[X,E] using a variant of the # Floyd-Warshall algorithm. betterCount = dict() for cand in cands: betterCount[cand] = 0 for cand1 in cands: for cand2 in cands: if cand1 == cand2: continue if strongestPaths[cand1][cand2] >= strongestPaths[cand2][cand1]: betterCount[cand1] += 1 return betterCount
[ "def", "getCandScoresMap", "(", "self", ",", "profile", ")", ":", "cands", "=", "profile", ".", "candMap", ".", "keys", "(", ")", "pairwisePreferences", "=", "self", ".", "computePairwisePreferences", "(", "profile", ")", "strongestPaths", "=", "self", ".", "computeStrongestPaths", "(", "profile", ",", "pairwisePreferences", ")", "# For each candidate, determine how many times p[E,X] >= p[X,E] using a variant of the", "# Floyd-Warshall algorithm.", "betterCount", "=", "dict", "(", ")", "for", "cand", "in", "cands", ":", "betterCount", "[", "cand", "]", "=", "0", "for", "cand1", "in", "cands", ":", "for", "cand2", "in", "cands", ":", "if", "cand1", "==", "cand2", ":", "continue", "if", "strongestPaths", "[", "cand1", "]", "[", "cand2", "]", ">=", "strongestPaths", "[", "cand2", "]", "[", "cand1", "]", ":", "betterCount", "[", "cand1", "]", "+=", "1", "return", "betterCount" ]
Returns a dictionary that associates integer representations of each candidate with the number of other candidates for which her strongest path to the other candidate is greater than the other candidate's stronget path to her. :ivar Profile profile: A Profile object that represents an election profile.
[ "Returns", "a", "dictionary", "that", "associates", "integer", "representations", "of", "each", "candidate", "with", "the", "number", "of", "other", "candidates", "for", "which", "her", "strongest", "path", "to", "the", "other", "candidate", "is", "greater", "than", "the", "other", "candidate", "s", "stronget", "path", "to", "her", "." ]
f395ba3782f05684fa5de0cece387a6da9391d02
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L552-L577
train
PrefPy/prefpy
prefpy/mechanism.py
MechanismSTV.STVsocwinners
def STVsocwinners(self, profile): """ Returns an integer list that represents all possible winners of a profile under STV rule. :ivar Profile profile: A Profile object that represents an election profile. """ ordering = profile.getOrderVectors() prefcounts = profile.getPreferenceCounts() m = profile.numCands if min(ordering[0]) == 0: startstate = set(range(m)) else: startstate = set(range(1, m + 1)) ordering, startstate = self.preprocessing(ordering, prefcounts, m, startstate) m_star = len(startstate) known_winners = set() # ----------Some statistics-------------- hashtable2 = set() # push the node of start state into the priority queue root = Node(value=startstate) stackNode = [] stackNode.append(root) while stackNode: # ------------pop the current node----------------- node = stackNode.pop() # ------------------------------------------------- state = node.value.copy() # use heuristic to delete all the candidates which satisfy the following condition # goal state 1: if the state set contains only 1 candidate, then stop if len(state) == 1 and list(state)[0] not in known_winners: known_winners.add(list(state)[0]) continue # goal state 2 (pruning): if the state set is subset of the known_winners set, then stop if state <= known_winners: continue # ----------Compute plurality score for the current remaining candidates-------------- plural_score = self.get_plurality_scores3(prefcounts, ordering, state, m_star) minscore = min(plural_score.values()) for to_be_deleted in state: if plural_score[to_be_deleted] == minscore: child_state = state.copy() child_state.remove(to_be_deleted) tpc = tuple(sorted(child_state)) if tpc in hashtable2: continue else: hashtable2.add(tpc) child_node = Node(value=child_state) stackNode.append(child_node) return sorted(known_winners)
python
def STVsocwinners(self, profile): """ Returns an integer list that represents all possible winners of a profile under STV rule. :ivar Profile profile: A Profile object that represents an election profile. """ ordering = profile.getOrderVectors() prefcounts = profile.getPreferenceCounts() m = profile.numCands if min(ordering[0]) == 0: startstate = set(range(m)) else: startstate = set(range(1, m + 1)) ordering, startstate = self.preprocessing(ordering, prefcounts, m, startstate) m_star = len(startstate) known_winners = set() # ----------Some statistics-------------- hashtable2 = set() # push the node of start state into the priority queue root = Node(value=startstate) stackNode = [] stackNode.append(root) while stackNode: # ------------pop the current node----------------- node = stackNode.pop() # ------------------------------------------------- state = node.value.copy() # use heuristic to delete all the candidates which satisfy the following condition # goal state 1: if the state set contains only 1 candidate, then stop if len(state) == 1 and list(state)[0] not in known_winners: known_winners.add(list(state)[0]) continue # goal state 2 (pruning): if the state set is subset of the known_winners set, then stop if state <= known_winners: continue # ----------Compute plurality score for the current remaining candidates-------------- plural_score = self.get_plurality_scores3(prefcounts, ordering, state, m_star) minscore = min(plural_score.values()) for to_be_deleted in state: if plural_score[to_be_deleted] == minscore: child_state = state.copy() child_state.remove(to_be_deleted) tpc = tuple(sorted(child_state)) if tpc in hashtable2: continue else: hashtable2.add(tpc) child_node = Node(value=child_state) stackNode.append(child_node) return sorted(known_winners)
[ "def", "STVsocwinners", "(", "self", ",", "profile", ")", ":", "ordering", "=", "profile", ".", "getOrderVectors", "(", ")", "prefcounts", "=", "profile", ".", "getPreferenceCounts", "(", ")", "m", "=", "profile", ".", "numCands", "if", "min", "(", "ordering", "[", "0", "]", ")", "==", "0", ":", "startstate", "=", "set", "(", "range", "(", "m", ")", ")", "else", ":", "startstate", "=", "set", "(", "range", "(", "1", ",", "m", "+", "1", ")", ")", "ordering", ",", "startstate", "=", "self", ".", "preprocessing", "(", "ordering", ",", "prefcounts", ",", "m", ",", "startstate", ")", "m_star", "=", "len", "(", "startstate", ")", "known_winners", "=", "set", "(", ")", "# ----------Some statistics--------------", "hashtable2", "=", "set", "(", ")", "# push the node of start state into the priority queue", "root", "=", "Node", "(", "value", "=", "startstate", ")", "stackNode", "=", "[", "]", "stackNode", ".", "append", "(", "root", ")", "while", "stackNode", ":", "# ------------pop the current node-----------------", "node", "=", "stackNode", ".", "pop", "(", ")", "# -------------------------------------------------", "state", "=", "node", ".", "value", ".", "copy", "(", ")", "# use heuristic to delete all the candidates which satisfy the following condition", "# goal state 1: if the state set contains only 1 candidate, then stop", "if", "len", "(", "state", ")", "==", "1", "and", "list", "(", "state", ")", "[", "0", "]", "not", "in", "known_winners", ":", "known_winners", ".", "add", "(", "list", "(", "state", ")", "[", "0", "]", ")", "continue", "# goal state 2 (pruning): if the state set is subset of the known_winners set, then stop", "if", "state", "<=", "known_winners", ":", "continue", "# ----------Compute plurality score for the current remaining candidates--------------", "plural_score", "=", "self", ".", "get_plurality_scores3", "(", "prefcounts", ",", "ordering", ",", "state", ",", "m_star", ")", "minscore", "=", "min", "(", "plural_score", ".", "values", "(", ")", ")", "for", "to_be_deleted", "in", "state", ":", "if", "plural_score", "[", "to_be_deleted", "]", "==", "minscore", ":", "child_state", "=", "state", ".", "copy", "(", ")", "child_state", ".", "remove", "(", "to_be_deleted", ")", "tpc", "=", "tuple", "(", "sorted", "(", "child_state", ")", ")", "if", "tpc", "in", "hashtable2", ":", "continue", "else", ":", "hashtable2", ".", "add", "(", "tpc", ")", "child_node", "=", "Node", "(", "value", "=", "child_state", ")", "stackNode", ".", "append", "(", "child_node", ")", "return", "sorted", "(", "known_winners", ")" ]
Returns an integer list that represents all possible winners of a profile under STV rule. :ivar Profile profile: A Profile object that represents an election profile.
[ "Returns", "an", "integer", "list", "that", "represents", "all", "possible", "winners", "of", "a", "profile", "under", "STV", "rule", "." ]
f395ba3782f05684fa5de0cece387a6da9391d02
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L624-L680
train
PrefPy/prefpy
prefpy/mechanism.py
MechanismBaldwin.baldwinsoc_winners
def baldwinsoc_winners(self, profile): """ Returns an integer list that represents all possible winners of a profile under baldwin rule. :ivar Profile profile: A Profile object that represents an election profile. """ ordering = profile.getOrderVectors() m = profile.numCands prefcounts = profile.getPreferenceCounts() if min(ordering[0]) == 0: startstate = set(range(m)) else: startstate = set(range(1, m + 1)) wmg = self.getWmg2(prefcounts, ordering, startstate, normalize=False) known_winners = set() # ----------Some statistics-------------- hashtable2 = set() # push the node of start state into the priority queue root = Node(value=startstate) stackNode = [] stackNode.append(root) while stackNode: # ------------pop the current node----------------- node = stackNode.pop() # ------------------------------------------------- state = node.value.copy() # goal state 1: if the state set contains only 1 candidate, then stop if len(state) == 1 and list(state)[0] not in known_winners: known_winners.add(list(state)[0]) continue # goal state 2 (pruning): if the state set is subset of the known_winners set, then stop if state <= known_winners: continue # ----------Compute plurality score for the current remaining candidates-------------- plural_score = dict() for cand in state: plural_score[cand] = 0 for cand1, cand2 in itertools.permutations(state, 2): plural_score[cand1] += wmg[cand1][cand2] # if current state satisfies one of the 3 goal state, continue to the next loop # After using heuristics, generate children and push them into priority queue # frontier = [val for val in known_winners if val in state] + list(set(state) - set(known_winners)) minscore = min(plural_score.values()) for to_be_deleted in state: if plural_score[to_be_deleted] == minscore: child_state = state.copy() child_state.remove(to_be_deleted) tpc = tuple(sorted(child_state)) if tpc in hashtable2: continue else: hashtable2.add(tpc) child_node = Node(value=child_state) stackNode.append(child_node) return sorted(known_winners)
python
def baldwinsoc_winners(self, profile): """ Returns an integer list that represents all possible winners of a profile under baldwin rule. :ivar Profile profile: A Profile object that represents an election profile. """ ordering = profile.getOrderVectors() m = profile.numCands prefcounts = profile.getPreferenceCounts() if min(ordering[0]) == 0: startstate = set(range(m)) else: startstate = set(range(1, m + 1)) wmg = self.getWmg2(prefcounts, ordering, startstate, normalize=False) known_winners = set() # ----------Some statistics-------------- hashtable2 = set() # push the node of start state into the priority queue root = Node(value=startstate) stackNode = [] stackNode.append(root) while stackNode: # ------------pop the current node----------------- node = stackNode.pop() # ------------------------------------------------- state = node.value.copy() # goal state 1: if the state set contains only 1 candidate, then stop if len(state) == 1 and list(state)[0] not in known_winners: known_winners.add(list(state)[0]) continue # goal state 2 (pruning): if the state set is subset of the known_winners set, then stop if state <= known_winners: continue # ----------Compute plurality score for the current remaining candidates-------------- plural_score = dict() for cand in state: plural_score[cand] = 0 for cand1, cand2 in itertools.permutations(state, 2): plural_score[cand1] += wmg[cand1][cand2] # if current state satisfies one of the 3 goal state, continue to the next loop # After using heuristics, generate children and push them into priority queue # frontier = [val for val in known_winners if val in state] + list(set(state) - set(known_winners)) minscore = min(plural_score.values()) for to_be_deleted in state: if plural_score[to_be_deleted] == minscore: child_state = state.copy() child_state.remove(to_be_deleted) tpc = tuple(sorted(child_state)) if tpc in hashtable2: continue else: hashtable2.add(tpc) child_node = Node(value=child_state) stackNode.append(child_node) return sorted(known_winners)
[ "def", "baldwinsoc_winners", "(", "self", ",", "profile", ")", ":", "ordering", "=", "profile", ".", "getOrderVectors", "(", ")", "m", "=", "profile", ".", "numCands", "prefcounts", "=", "profile", ".", "getPreferenceCounts", "(", ")", "if", "min", "(", "ordering", "[", "0", "]", ")", "==", "0", ":", "startstate", "=", "set", "(", "range", "(", "m", ")", ")", "else", ":", "startstate", "=", "set", "(", "range", "(", "1", ",", "m", "+", "1", ")", ")", "wmg", "=", "self", ".", "getWmg2", "(", "prefcounts", ",", "ordering", ",", "startstate", ",", "normalize", "=", "False", ")", "known_winners", "=", "set", "(", ")", "# ----------Some statistics--------------", "hashtable2", "=", "set", "(", ")", "# push the node of start state into the priority queue", "root", "=", "Node", "(", "value", "=", "startstate", ")", "stackNode", "=", "[", "]", "stackNode", ".", "append", "(", "root", ")", "while", "stackNode", ":", "# ------------pop the current node-----------------", "node", "=", "stackNode", ".", "pop", "(", ")", "# -------------------------------------------------", "state", "=", "node", ".", "value", ".", "copy", "(", ")", "# goal state 1: if the state set contains only 1 candidate, then stop", "if", "len", "(", "state", ")", "==", "1", "and", "list", "(", "state", ")", "[", "0", "]", "not", "in", "known_winners", ":", "known_winners", ".", "add", "(", "list", "(", "state", ")", "[", "0", "]", ")", "continue", "# goal state 2 (pruning): if the state set is subset of the known_winners set, then stop", "if", "state", "<=", "known_winners", ":", "continue", "# ----------Compute plurality score for the current remaining candidates--------------", "plural_score", "=", "dict", "(", ")", "for", "cand", "in", "state", ":", "plural_score", "[", "cand", "]", "=", "0", "for", "cand1", ",", "cand2", "in", "itertools", ".", "permutations", "(", "state", ",", "2", ")", ":", "plural_score", "[", "cand1", "]", "+=", "wmg", "[", "cand1", "]", "[", "cand2", "]", "# if current state satisfies one of the 3 goal state, continue to the next loop", "# After using heuristics, generate children and push them into priority queue", "# frontier = [val for val in known_winners if val in state] + list(set(state) - set(known_winners))", "minscore", "=", "min", "(", "plural_score", ".", "values", "(", ")", ")", "for", "to_be_deleted", "in", "state", ":", "if", "plural_score", "[", "to_be_deleted", "]", "==", "minscore", ":", "child_state", "=", "state", ".", "copy", "(", ")", "child_state", ".", "remove", "(", "to_be_deleted", ")", "tpc", "=", "tuple", "(", "sorted", "(", "child_state", ")", ")", "if", "tpc", "in", "hashtable2", ":", "continue", "else", ":", "hashtable2", ".", "add", "(", "tpc", ")", "child_node", "=", "Node", "(", "value", "=", "child_state", ")", "stackNode", ".", "append", "(", "child_node", ")", "return", "sorted", "(", "known_winners", ")" ]
Returns an integer list that represents all possible winners of a profile under baldwin rule. :ivar Profile profile: A Profile object that represents an election profile.
[ "Returns", "an", "integer", "list", "that", "represents", "all", "possible", "winners", "of", "a", "profile", "under", "baldwin", "rule", "." ]
f395ba3782f05684fa5de0cece387a6da9391d02
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L801-L861
train
PrefPy/prefpy
prefpy/mechanism.py
MechanismBaldwin.getWmg2
def getWmg2(self, prefcounts, ordering, state, normalize=False): """ Generate a weighted majority graph that represents the whole profile. The function will return a two-dimensional dictionary that associates integer representations of each pair of candidates, cand1 and cand2, with the number of times cand1 is ranked above cand2 minus the number of times cand2 is ranked above cand1. :ivar bool normalize: If normalize is True, the function will return a normalized graph where each edge has been divided by the value of the largest edge. """ # Initialize a new dictionary for our final weighted majority graph. wmgMap = dict() for cand in state: wmgMap[cand] = dict() for cand1, cand2 in itertools.combinations(state, 2): wmgMap[cand1][cand2] = 0 wmgMap[cand2][cand1] = 0 # Go through the wmgMaps and increment the value of each edge in our final graph with the # edges in each of the wmgMaps. We take into account the number of times that the vote # occured. for i in range(0, len(prefcounts)): for cand1, cand2 in itertools.combinations(ordering[i], 2): # -------------------------- wmgMap[cand1][cand2] += prefcounts[i] # By default, we assume that the weighted majority graph should not be normalized. If # desired, we normalize by dividing each edge by the value of the largest edge. if normalize == True: maxEdge = float('-inf') for cand in wmgMap.keys(): maxEdge = max(maxEdge, max(wmgMap[cand].values())) for cand1 in wmgMap.keys(): for cand2 in wmgMap[cand1].keys(): wmgMap[cand1][cand2] = float(wmgMap[cand1][cand2]) / maxEdge return wmgMap
python
def getWmg2(self, prefcounts, ordering, state, normalize=False): """ Generate a weighted majority graph that represents the whole profile. The function will return a two-dimensional dictionary that associates integer representations of each pair of candidates, cand1 and cand2, with the number of times cand1 is ranked above cand2 minus the number of times cand2 is ranked above cand1. :ivar bool normalize: If normalize is True, the function will return a normalized graph where each edge has been divided by the value of the largest edge. """ # Initialize a new dictionary for our final weighted majority graph. wmgMap = dict() for cand in state: wmgMap[cand] = dict() for cand1, cand2 in itertools.combinations(state, 2): wmgMap[cand1][cand2] = 0 wmgMap[cand2][cand1] = 0 # Go through the wmgMaps and increment the value of each edge in our final graph with the # edges in each of the wmgMaps. We take into account the number of times that the vote # occured. for i in range(0, len(prefcounts)): for cand1, cand2 in itertools.combinations(ordering[i], 2): # -------------------------- wmgMap[cand1][cand2] += prefcounts[i] # By default, we assume that the weighted majority graph should not be normalized. If # desired, we normalize by dividing each edge by the value of the largest edge. if normalize == True: maxEdge = float('-inf') for cand in wmgMap.keys(): maxEdge = max(maxEdge, max(wmgMap[cand].values())) for cand1 in wmgMap.keys(): for cand2 in wmgMap[cand1].keys(): wmgMap[cand1][cand2] = float(wmgMap[cand1][cand2]) / maxEdge return wmgMap
[ "def", "getWmg2", "(", "self", ",", "prefcounts", ",", "ordering", ",", "state", ",", "normalize", "=", "False", ")", ":", "# Initialize a new dictionary for our final weighted majority graph.", "wmgMap", "=", "dict", "(", ")", "for", "cand", "in", "state", ":", "wmgMap", "[", "cand", "]", "=", "dict", "(", ")", "for", "cand1", ",", "cand2", "in", "itertools", ".", "combinations", "(", "state", ",", "2", ")", ":", "wmgMap", "[", "cand1", "]", "[", "cand2", "]", "=", "0", "wmgMap", "[", "cand2", "]", "[", "cand1", "]", "=", "0", "# Go through the wmgMaps and increment the value of each edge in our final graph with the", "# edges in each of the wmgMaps. We take into account the number of times that the vote", "# occured.", "for", "i", "in", "range", "(", "0", ",", "len", "(", "prefcounts", ")", ")", ":", "for", "cand1", ",", "cand2", "in", "itertools", ".", "combinations", "(", "ordering", "[", "i", "]", ",", "2", ")", ":", "# --------------------------", "wmgMap", "[", "cand1", "]", "[", "cand2", "]", "+=", "prefcounts", "[", "i", "]", "# By default, we assume that the weighted majority graph should not be normalized. If", "# desired, we normalize by dividing each edge by the value of the largest edge.", "if", "normalize", "==", "True", ":", "maxEdge", "=", "float", "(", "'-inf'", ")", "for", "cand", "in", "wmgMap", ".", "keys", "(", ")", ":", "maxEdge", "=", "max", "(", "maxEdge", ",", "max", "(", "wmgMap", "[", "cand", "]", ".", "values", "(", ")", ")", ")", "for", "cand1", "in", "wmgMap", ".", "keys", "(", ")", ":", "for", "cand2", "in", "wmgMap", "[", "cand1", "]", ".", "keys", "(", ")", ":", "wmgMap", "[", "cand1", "]", "[", "cand2", "]", "=", "float", "(", "wmgMap", "[", "cand1", "]", "[", "cand2", "]", ")", "/", "maxEdge", "return", "wmgMap" ]
Generate a weighted majority graph that represents the whole profile. The function will return a two-dimensional dictionary that associates integer representations of each pair of candidates, cand1 and cand2, with the number of times cand1 is ranked above cand2 minus the number of times cand2 is ranked above cand1. :ivar bool normalize: If normalize is True, the function will return a normalized graph where each edge has been divided by the value of the largest edge.
[ "Generate", "a", "weighted", "majority", "graph", "that", "represents", "the", "whole", "profile", ".", "The", "function", "will", "return", "a", "two", "-", "dimensional", "dictionary", "that", "associates", "integer", "representations", "of", "each", "pair", "of", "candidates", "cand1", "and", "cand2", "with", "the", "number", "of", "times", "cand1", "is", "ranked", "above", "cand2", "minus", "the", "number", "of", "times", "cand2", "is", "ranked", "above", "cand1", "." ]
f395ba3782f05684fa5de0cece387a6da9391d02
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L927-L963
train
PrefPy/prefpy
prefpy/mechanism.py
MechanismPluralityRunOff.PluRunOff_single_winner
def PluRunOff_single_winner(self, profile): """ Returns a number that associates the winner of a profile under Plurality with Runoff rule. :ivar Profile profile: A Profile object that represents an election profile. """ # Currently, we expect the profile to contain complete ordering over candidates. Ties are # allowed however. elecType = profile.getElecType() if elecType != "soc" and elecType != "toc" and elecType != "csv": print("ERROR: unsupported election type") exit() # Initialization prefcounts = profile.getPreferenceCounts() len_prefcounts = len(prefcounts) rankmaps = profile.getRankMaps() ranking = MechanismPlurality().getRanking(profile) # 1st round: find the top 2 candidates in plurality scores # Compute the 1st-place candidate in plurality scores # print(ranking) max_cand = ranking[0][0][0] # Compute the 2nd-place candidate in plurality scores # Automatically using tie-breaking rule--numerically increasing order if len(ranking[0][0]) > 1: second_max_cand = ranking[0][0][1] else: second_max_cand = ranking[0][1][0] top_2 = [max_cand, second_max_cand] # 2nd round: find the candidate with maximum plurality score dict_top2 = {max_cand: 0, second_max_cand: 0} for i in range(len_prefcounts): vote_top2 = {key: value for key, value in rankmaps[i].items() if key in top_2} top_position = min(vote_top2.values()) keys = [x for x in vote_top2.keys() if vote_top2[x] == top_position] for key in keys: dict_top2[key] += prefcounts[i] # print(dict_top2) winner = max(dict_top2.items(), key=lambda x: x[1])[0] return winner
python
def PluRunOff_single_winner(self, profile): """ Returns a number that associates the winner of a profile under Plurality with Runoff rule. :ivar Profile profile: A Profile object that represents an election profile. """ # Currently, we expect the profile to contain complete ordering over candidates. Ties are # allowed however. elecType = profile.getElecType() if elecType != "soc" and elecType != "toc" and elecType != "csv": print("ERROR: unsupported election type") exit() # Initialization prefcounts = profile.getPreferenceCounts() len_prefcounts = len(prefcounts) rankmaps = profile.getRankMaps() ranking = MechanismPlurality().getRanking(profile) # 1st round: find the top 2 candidates in plurality scores # Compute the 1st-place candidate in plurality scores # print(ranking) max_cand = ranking[0][0][0] # Compute the 2nd-place candidate in plurality scores # Automatically using tie-breaking rule--numerically increasing order if len(ranking[0][0]) > 1: second_max_cand = ranking[0][0][1] else: second_max_cand = ranking[0][1][0] top_2 = [max_cand, second_max_cand] # 2nd round: find the candidate with maximum plurality score dict_top2 = {max_cand: 0, second_max_cand: 0} for i in range(len_prefcounts): vote_top2 = {key: value for key, value in rankmaps[i].items() if key in top_2} top_position = min(vote_top2.values()) keys = [x for x in vote_top2.keys() if vote_top2[x] == top_position] for key in keys: dict_top2[key] += prefcounts[i] # print(dict_top2) winner = max(dict_top2.items(), key=lambda x: x[1])[0] return winner
[ "def", "PluRunOff_single_winner", "(", "self", ",", "profile", ")", ":", "# Currently, we expect the profile to contain complete ordering over candidates. Ties are", "# allowed however.", "elecType", "=", "profile", ".", "getElecType", "(", ")", "if", "elecType", "!=", "\"soc\"", "and", "elecType", "!=", "\"toc\"", "and", "elecType", "!=", "\"csv\"", ":", "print", "(", "\"ERROR: unsupported election type\"", ")", "exit", "(", ")", "# Initialization", "prefcounts", "=", "profile", ".", "getPreferenceCounts", "(", ")", "len_prefcounts", "=", "len", "(", "prefcounts", ")", "rankmaps", "=", "profile", ".", "getRankMaps", "(", ")", "ranking", "=", "MechanismPlurality", "(", ")", ".", "getRanking", "(", "profile", ")", "# 1st round: find the top 2 candidates in plurality scores", "# Compute the 1st-place candidate in plurality scores", "# print(ranking)", "max_cand", "=", "ranking", "[", "0", "]", "[", "0", "]", "[", "0", "]", "# Compute the 2nd-place candidate in plurality scores", "# Automatically using tie-breaking rule--numerically increasing order", "if", "len", "(", "ranking", "[", "0", "]", "[", "0", "]", ")", ">", "1", ":", "second_max_cand", "=", "ranking", "[", "0", "]", "[", "0", "]", "[", "1", "]", "else", ":", "second_max_cand", "=", "ranking", "[", "0", "]", "[", "1", "]", "[", "0", "]", "top_2", "=", "[", "max_cand", ",", "second_max_cand", "]", "# 2nd round: find the candidate with maximum plurality score", "dict_top2", "=", "{", "max_cand", ":", "0", ",", "second_max_cand", ":", "0", "}", "for", "i", "in", "range", "(", "len_prefcounts", ")", ":", "vote_top2", "=", "{", "key", ":", "value", "for", "key", ",", "value", "in", "rankmaps", "[", "i", "]", ".", "items", "(", ")", "if", "key", "in", "top_2", "}", "top_position", "=", "min", "(", "vote_top2", ".", "values", "(", ")", ")", "keys", "=", "[", "x", "for", "x", "in", "vote_top2", ".", "keys", "(", ")", "if", "vote_top2", "[", "x", "]", "==", "top_position", "]", "for", "key", "in", "keys", ":", "dict_top2", "[", "key", "]", "+=", "prefcounts", "[", "i", "]", "# print(dict_top2)", "winner", "=", "max", "(", "dict_top2", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ")", "[", "0", "]", "return", "winner" ]
Returns a number that associates the winner of a profile under Plurality with Runoff rule. :ivar Profile profile: A Profile object that represents an election profile.
[ "Returns", "a", "number", "that", "associates", "the", "winner", "of", "a", "profile", "under", "Plurality", "with", "Runoff", "rule", "." ]
f395ba3782f05684fa5de0cece387a6da9391d02
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L1780-L1825
train
PrefPy/prefpy
prefpy/mechanism.py
MechanismPluralityRunOff.PluRunOff_cowinners
def PluRunOff_cowinners(self, profile): """ Returns a list that associates all the winners of a profile under Plurality with Runoff rule. :ivar Profile profile: A Profile object that represents an election profile. """ # Currently, we expect the profile to contain complete ordering over candidates. Ties are # allowed however. elecType = profile.getElecType() if elecType != "soc" and elecType != "toc" and elecType != "csv": print("ERROR: unsupported election type") exit() # Initialization prefcounts = profile.getPreferenceCounts() len_prefcounts = len(prefcounts) rankmaps = profile.getRankMaps() ranking = MechanismPlurality().getRanking(profile) known_winners = set() # 1st round: find the top 2 candidates in plurality scores top_2_combinations = [] if len(ranking[0][0]) > 1: for cand1, cand2 in itertools.combinations(ranking[0][0], 2): top_2_combinations.append([cand1, cand2]) else: max_cand = ranking[0][0][0] if len(ranking[0][1]) > 1: for second_max_cand in ranking[0][1]: top_2_combinations.append([max_cand, second_max_cand]) else: second_max_cand = ranking[0][1][0] top_2_combinations.append([max_cand, second_max_cand]) # 2nd round: find the candidate with maximum plurality score for top_2 in top_2_combinations: dict_top2 = {top_2[0]: 0, top_2[1]: 0} for i in range(len_prefcounts): vote_top2 = {key: value for key, value in rankmaps[i].items() if key in top_2} top_position = min(vote_top2.values()) keys = [x for x in vote_top2.keys() if vote_top2[x] == top_position] for key in keys: dict_top2[key] += prefcounts[i] max_value = max(dict_top2.values()) winners = [y for y in dict_top2.keys() if dict_top2[y] == max_value] known_winners = known_winners | set(winners) return sorted(known_winners)
python
def PluRunOff_cowinners(self, profile): """ Returns a list that associates all the winners of a profile under Plurality with Runoff rule. :ivar Profile profile: A Profile object that represents an election profile. """ # Currently, we expect the profile to contain complete ordering over candidates. Ties are # allowed however. elecType = profile.getElecType() if elecType != "soc" and elecType != "toc" and elecType != "csv": print("ERROR: unsupported election type") exit() # Initialization prefcounts = profile.getPreferenceCounts() len_prefcounts = len(prefcounts) rankmaps = profile.getRankMaps() ranking = MechanismPlurality().getRanking(profile) known_winners = set() # 1st round: find the top 2 candidates in plurality scores top_2_combinations = [] if len(ranking[0][0]) > 1: for cand1, cand2 in itertools.combinations(ranking[0][0], 2): top_2_combinations.append([cand1, cand2]) else: max_cand = ranking[0][0][0] if len(ranking[0][1]) > 1: for second_max_cand in ranking[0][1]: top_2_combinations.append([max_cand, second_max_cand]) else: second_max_cand = ranking[0][1][0] top_2_combinations.append([max_cand, second_max_cand]) # 2nd round: find the candidate with maximum plurality score for top_2 in top_2_combinations: dict_top2 = {top_2[0]: 0, top_2[1]: 0} for i in range(len_prefcounts): vote_top2 = {key: value for key, value in rankmaps[i].items() if key in top_2} top_position = min(vote_top2.values()) keys = [x for x in vote_top2.keys() if vote_top2[x] == top_position] for key in keys: dict_top2[key] += prefcounts[i] max_value = max(dict_top2.values()) winners = [y for y in dict_top2.keys() if dict_top2[y] == max_value] known_winners = known_winners | set(winners) return sorted(known_winners)
[ "def", "PluRunOff_cowinners", "(", "self", ",", "profile", ")", ":", "# Currently, we expect the profile to contain complete ordering over candidates. Ties are", "# allowed however.", "elecType", "=", "profile", ".", "getElecType", "(", ")", "if", "elecType", "!=", "\"soc\"", "and", "elecType", "!=", "\"toc\"", "and", "elecType", "!=", "\"csv\"", ":", "print", "(", "\"ERROR: unsupported election type\"", ")", "exit", "(", ")", "# Initialization", "prefcounts", "=", "profile", ".", "getPreferenceCounts", "(", ")", "len_prefcounts", "=", "len", "(", "prefcounts", ")", "rankmaps", "=", "profile", ".", "getRankMaps", "(", ")", "ranking", "=", "MechanismPlurality", "(", ")", ".", "getRanking", "(", "profile", ")", "known_winners", "=", "set", "(", ")", "# 1st round: find the top 2 candidates in plurality scores", "top_2_combinations", "=", "[", "]", "if", "len", "(", "ranking", "[", "0", "]", "[", "0", "]", ")", ">", "1", ":", "for", "cand1", ",", "cand2", "in", "itertools", ".", "combinations", "(", "ranking", "[", "0", "]", "[", "0", "]", ",", "2", ")", ":", "top_2_combinations", ".", "append", "(", "[", "cand1", ",", "cand2", "]", ")", "else", ":", "max_cand", "=", "ranking", "[", "0", "]", "[", "0", "]", "[", "0", "]", "if", "len", "(", "ranking", "[", "0", "]", "[", "1", "]", ")", ">", "1", ":", "for", "second_max_cand", "in", "ranking", "[", "0", "]", "[", "1", "]", ":", "top_2_combinations", ".", "append", "(", "[", "max_cand", ",", "second_max_cand", "]", ")", "else", ":", "second_max_cand", "=", "ranking", "[", "0", "]", "[", "1", "]", "[", "0", "]", "top_2_combinations", ".", "append", "(", "[", "max_cand", ",", "second_max_cand", "]", ")", "# 2nd round: find the candidate with maximum plurality score", "for", "top_2", "in", "top_2_combinations", ":", "dict_top2", "=", "{", "top_2", "[", "0", "]", ":", "0", ",", "top_2", "[", "1", "]", ":", "0", "}", "for", "i", "in", "range", "(", "len_prefcounts", ")", ":", "vote_top2", "=", "{", "key", ":", "value", "for", "key", ",", "value", "in", "rankmaps", "[", "i", "]", ".", "items", "(", ")", "if", "key", "in", "top_2", "}", "top_position", "=", "min", "(", "vote_top2", ".", "values", "(", ")", ")", "keys", "=", "[", "x", "for", "x", "in", "vote_top2", ".", "keys", "(", ")", "if", "vote_top2", "[", "x", "]", "==", "top_position", "]", "for", "key", "in", "keys", ":", "dict_top2", "[", "key", "]", "+=", "prefcounts", "[", "i", "]", "max_value", "=", "max", "(", "dict_top2", ".", "values", "(", ")", ")", "winners", "=", "[", "y", "for", "y", "in", "dict_top2", ".", "keys", "(", ")", "if", "dict_top2", "[", "y", "]", "==", "max_value", "]", "known_winners", "=", "known_winners", "|", "set", "(", "winners", ")", "return", "sorted", "(", "known_winners", ")" ]
Returns a list that associates all the winners of a profile under Plurality with Runoff rule. :ivar Profile profile: A Profile object that represents an election profile.
[ "Returns", "a", "list", "that", "associates", "all", "the", "winners", "of", "a", "profile", "under", "Plurality", "with", "Runoff", "rule", "." ]
f395ba3782f05684fa5de0cece387a6da9391d02
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L1827-L1876
train
PrefPy/prefpy
prefpy/mechanism.py
MechanismSNTV.SNTV_winners
def SNTV_winners(self, profile, K): """ Returns a list that associates all the winners of a profile under Single non-transferable vote rule. :ivar Profile profile: A Profile object that represents an election profile. """ # Currently, we expect the profile to contain complete ordering over candidates. Ties are # allowed however. elecType = profile.getElecType() if elecType != "soc" and elecType != "toc" and elecType != "csv": print("ERROR: unsupported election type") exit() m = profile.numCands candScoresMap = MechanismPlurality().getCandScoresMap(profile) if K >= m: return list(candScoresMap.keys()) # print(candScoresMap) sorted_items = sorted(candScoresMap.items(), key=lambda x: x[1], reverse=True) sorted_dict = {key: value for key, value in sorted_items} winners = list(sorted_dict.keys())[0:K] return winners
python
def SNTV_winners(self, profile, K): """ Returns a list that associates all the winners of a profile under Single non-transferable vote rule. :ivar Profile profile: A Profile object that represents an election profile. """ # Currently, we expect the profile to contain complete ordering over candidates. Ties are # allowed however. elecType = profile.getElecType() if elecType != "soc" and elecType != "toc" and elecType != "csv": print("ERROR: unsupported election type") exit() m = profile.numCands candScoresMap = MechanismPlurality().getCandScoresMap(profile) if K >= m: return list(candScoresMap.keys()) # print(candScoresMap) sorted_items = sorted(candScoresMap.items(), key=lambda x: x[1], reverse=True) sorted_dict = {key: value for key, value in sorted_items} winners = list(sorted_dict.keys())[0:K] return winners
[ "def", "SNTV_winners", "(", "self", ",", "profile", ",", "K", ")", ":", "# Currently, we expect the profile to contain complete ordering over candidates. Ties are", "# allowed however.", "elecType", "=", "profile", ".", "getElecType", "(", ")", "if", "elecType", "!=", "\"soc\"", "and", "elecType", "!=", "\"toc\"", "and", "elecType", "!=", "\"csv\"", ":", "print", "(", "\"ERROR: unsupported election type\"", ")", "exit", "(", ")", "m", "=", "profile", ".", "numCands", "candScoresMap", "=", "MechanismPlurality", "(", ")", ".", "getCandScoresMap", "(", "profile", ")", "if", "K", ">=", "m", ":", "return", "list", "(", "candScoresMap", ".", "keys", "(", ")", ")", "# print(candScoresMap)", "sorted_items", "=", "sorted", "(", "candScoresMap", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ",", "reverse", "=", "True", ")", "sorted_dict", "=", "{", "key", ":", "value", "for", "key", ",", "value", "in", "sorted_items", "}", "winners", "=", "list", "(", "sorted_dict", ".", "keys", "(", ")", ")", "[", "0", ":", "K", "]", "return", "winners" ]
Returns a list that associates all the winners of a profile under Single non-transferable vote rule. :ivar Profile profile: A Profile object that represents an election profile.
[ "Returns", "a", "list", "that", "associates", "all", "the", "winners", "of", "a", "profile", "under", "Single", "non", "-", "transferable", "vote", "rule", "." ]
f395ba3782f05684fa5de0cece387a6da9391d02
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L1899-L1921
train
PrefPy/prefpy
prefpy/mechanism.py
MechanismBordaMean.Borda_mean_winners
def Borda_mean_winners(self, profile): """ Returns a list that associates all the winners of a profile under The Borda-mean rule. :ivar Profile profile: A Profile object that represents an election profile. """ n_candidates = profile.numCands prefcounts = profile.getPreferenceCounts() len_prefcounts = len(prefcounts) rankmaps = profile.getRankMaps() values = zeros([len_prefcounts, n_candidates], dtype=int) if min(list(rankmaps[0].keys())) == 0: delta = 0 else: delta = 1 for i in range(len_prefcounts): for j in range(delta, n_candidates + delta): values[i][j - delta] = rankmaps[i][j] # print("values=", values) mat0 = self._build_mat(values, n_candidates, prefcounts) borda = [0 for i in range(n_candidates)] for i in range(n_candidates): borda[i] = sum([mat0[i, j] for j in range(n_candidates)]) borda_mean = mean(borda) bin_winners_list = [int(borda[i] >= borda_mean) for i in range(n_candidates)] return bin_winners_list
python
def Borda_mean_winners(self, profile): """ Returns a list that associates all the winners of a profile under The Borda-mean rule. :ivar Profile profile: A Profile object that represents an election profile. """ n_candidates = profile.numCands prefcounts = profile.getPreferenceCounts() len_prefcounts = len(prefcounts) rankmaps = profile.getRankMaps() values = zeros([len_prefcounts, n_candidates], dtype=int) if min(list(rankmaps[0].keys())) == 0: delta = 0 else: delta = 1 for i in range(len_prefcounts): for j in range(delta, n_candidates + delta): values[i][j - delta] = rankmaps[i][j] # print("values=", values) mat0 = self._build_mat(values, n_candidates, prefcounts) borda = [0 for i in range(n_candidates)] for i in range(n_candidates): borda[i] = sum([mat0[i, j] for j in range(n_candidates)]) borda_mean = mean(borda) bin_winners_list = [int(borda[i] >= borda_mean) for i in range(n_candidates)] return bin_winners_list
[ "def", "Borda_mean_winners", "(", "self", ",", "profile", ")", ":", "n_candidates", "=", "profile", ".", "numCands", "prefcounts", "=", "profile", ".", "getPreferenceCounts", "(", ")", "len_prefcounts", "=", "len", "(", "prefcounts", ")", "rankmaps", "=", "profile", ".", "getRankMaps", "(", ")", "values", "=", "zeros", "(", "[", "len_prefcounts", ",", "n_candidates", "]", ",", "dtype", "=", "int", ")", "if", "min", "(", "list", "(", "rankmaps", "[", "0", "]", ".", "keys", "(", ")", ")", ")", "==", "0", ":", "delta", "=", "0", "else", ":", "delta", "=", "1", "for", "i", "in", "range", "(", "len_prefcounts", ")", ":", "for", "j", "in", "range", "(", "delta", ",", "n_candidates", "+", "delta", ")", ":", "values", "[", "i", "]", "[", "j", "-", "delta", "]", "=", "rankmaps", "[", "i", "]", "[", "j", "]", "# print(\"values=\", values)", "mat0", "=", "self", ".", "_build_mat", "(", "values", ",", "n_candidates", ",", "prefcounts", ")", "borda", "=", "[", "0", "for", "i", "in", "range", "(", "n_candidates", ")", "]", "for", "i", "in", "range", "(", "n_candidates", ")", ":", "borda", "[", "i", "]", "=", "sum", "(", "[", "mat0", "[", "i", ",", "j", "]", "for", "j", "in", "range", "(", "n_candidates", ")", "]", ")", "borda_mean", "=", "mean", "(", "borda", ")", "bin_winners_list", "=", "[", "int", "(", "borda", "[", "i", "]", ">=", "borda_mean", ")", "for", "i", "in", "range", "(", "n_candidates", ")", "]", "return", "bin_winners_list" ]
Returns a list that associates all the winners of a profile under The Borda-mean rule. :ivar Profile profile: A Profile object that represents an election profile.
[ "Returns", "a", "list", "that", "associates", "all", "the", "winners", "of", "a", "profile", "under", "The", "Borda", "-", "mean", "rule", "." ]
f395ba3782f05684fa5de0cece387a6da9391d02
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanism.py#L2043-L2068
train
tamasgal/km3pipe
km3pipe/calib.py
Calibration.apply_t0
def apply_t0(self, hits): """Apply only t0s""" if HAVE_NUMBA: apply_t0_nb( hits.time, hits.dom_id, hits.channel_id, self._lookup_tables ) else: n = len(hits) cal = np.empty(n) lookup = self._calib_by_dom_and_channel for i in range(n): calib = lookup[hits['dom_id'][i]][hits['channel_id'][i]] cal[i] = calib[6] hits.time += cal return hits
python
def apply_t0(self, hits): """Apply only t0s""" if HAVE_NUMBA: apply_t0_nb( hits.time, hits.dom_id, hits.channel_id, self._lookup_tables ) else: n = len(hits) cal = np.empty(n) lookup = self._calib_by_dom_and_channel for i in range(n): calib = lookup[hits['dom_id'][i]][hits['channel_id'][i]] cal[i] = calib[6] hits.time += cal return hits
[ "def", "apply_t0", "(", "self", ",", "hits", ")", ":", "if", "HAVE_NUMBA", ":", "apply_t0_nb", "(", "hits", ".", "time", ",", "hits", ".", "dom_id", ",", "hits", ".", "channel_id", ",", "self", ".", "_lookup_tables", ")", "else", ":", "n", "=", "len", "(", "hits", ")", "cal", "=", "np", ".", "empty", "(", "n", ")", "lookup", "=", "self", ".", "_calib_by_dom_and_channel", "for", "i", "in", "range", "(", "n", ")", ":", "calib", "=", "lookup", "[", "hits", "[", "'dom_id'", "]", "[", "i", "]", "]", "[", "hits", "[", "'channel_id'", "]", "[", "i", "]", "]", "cal", "[", "i", "]", "=", "calib", "[", "6", "]", "hits", ".", "time", "+=", "cal", "return", "hits" ]
Apply only t0s
[ "Apply", "only", "t0s" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/calib.py#L116-L130
train
tamasgal/km3pipe
km3pipe/io/evt.py
EvtPump._get_file_index_str
def _get_file_index_str(self): """Create a string out of the current file_index""" file_index = str(self.file_index) if self.n_digits is not None: file_index = file_index.zfill(self.n_digits) return file_index
python
def _get_file_index_str(self): """Create a string out of the current file_index""" file_index = str(self.file_index) if self.n_digits is not None: file_index = file_index.zfill(self.n_digits) return file_index
[ "def", "_get_file_index_str", "(", "self", ")", ":", "file_index", "=", "str", "(", "self", ".", "file_index", ")", "if", "self", ".", "n_digits", "is", "not", "None", ":", "file_index", "=", "file_index", ".", "zfill", "(", "self", ".", "n_digits", ")", "return", "file_index" ]
Create a string out of the current file_index
[ "Create", "a", "string", "out", "of", "the", "current", "file_index" ]
7a9b59ac899a28775b5bdc5d391d9a5340d08040
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/evt.py#L156-L161
train