repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
klahnakoski/mo-logs
mo_logs/strings.py
utf82unicode
def utf82unicode(value): """ WITH EXPLANATION FOR FAILURE """ try: return value.decode("utf8") except Exception as e: if not _Log: _late_import() if not is_binary(value): _Log.error("Can not convert {{type}} to unicode because it's not bytes", type= type(value).__name__) e = _Except.wrap(e) for i, c in enumerate(value): try: c.decode("utf8") except Exception as f: _Log.error("Can not convert charcode {{c}} in string index {{i}}", i=i, c=ord(c), cause=[e, _Except.wrap(f)]) try: latin1 = text_type(value.decode("latin1")) _Log.error("Can not explain conversion failure, but seems to be latin1", e) except Exception: pass try: a = text_type(value.decode("latin1")) _Log.error("Can not explain conversion failure, but seems to be latin1", e) except Exception: pass _Log.error("Can not explain conversion failure of " + type(value).__name__ + "!", e)
python
def utf82unicode(value): """ WITH EXPLANATION FOR FAILURE """ try: return value.decode("utf8") except Exception as e: if not _Log: _late_import() if not is_binary(value): _Log.error("Can not convert {{type}} to unicode because it's not bytes", type= type(value).__name__) e = _Except.wrap(e) for i, c in enumerate(value): try: c.decode("utf8") except Exception as f: _Log.error("Can not convert charcode {{c}} in string index {{i}}", i=i, c=ord(c), cause=[e, _Except.wrap(f)]) try: latin1 = text_type(value.decode("latin1")) _Log.error("Can not explain conversion failure, but seems to be latin1", e) except Exception: pass try: a = text_type(value.decode("latin1")) _Log.error("Can not explain conversion failure, but seems to be latin1", e) except Exception: pass _Log.error("Can not explain conversion failure of " + type(value).__name__ + "!", e)
[ "def", "utf82unicode", "(", "value", ")", ":", "try", ":", "return", "value", ".", "decode", "(", "\"utf8\"", ")", "except", "Exception", "as", "e", ":", "if", "not", "_Log", ":", "_late_import", "(", ")", "if", "not", "is_binary", "(", "value", ")", ":", "_Log", ".", "error", "(", "\"Can not convert {{type}} to unicode because it's not bytes\"", ",", "type", "=", "type", "(", "value", ")", ".", "__name__", ")", "e", "=", "_Except", ".", "wrap", "(", "e", ")", "for", "i", ",", "c", "in", "enumerate", "(", "value", ")", ":", "try", ":", "c", ".", "decode", "(", "\"utf8\"", ")", "except", "Exception", "as", "f", ":", "_Log", ".", "error", "(", "\"Can not convert charcode {{c}} in string index {{i}}\"", ",", "i", "=", "i", ",", "c", "=", "ord", "(", "c", ")", ",", "cause", "=", "[", "e", ",", "_Except", ".", "wrap", "(", "f", ")", "]", ")", "try", ":", "latin1", "=", "text_type", "(", "value", ".", "decode", "(", "\"latin1\"", ")", ")", "_Log", ".", "error", "(", "\"Can not explain conversion failure, but seems to be latin1\"", ",", "e", ")", "except", "Exception", ":", "pass", "try", ":", "a", "=", "text_type", "(", "value", ".", "decode", "(", "\"latin1\"", ")", ")", "_Log", ".", "error", "(", "\"Can not explain conversion failure, but seems to be latin1\"", ",", "e", ")", "except", "Exception", ":", "pass", "_Log", ".", "error", "(", "\"Can not explain conversion failure of \"", "+", "type", "(", "value", ")", ".", "__name__", "+", "\"!\"", ",", "e", ")" ]
WITH EXPLANATION FOR FAILURE
[ "WITH", "EXPLANATION", "FOR", "FAILURE" ]
0971277ac9caf28a755b766b70621916957d4fea
https://github.com/klahnakoski/mo-logs/blob/0971277ac9caf28a755b766b70621916957d4fea/mo_logs/strings.py#L858-L890
train
portfors-lab/sparkle
sparkle/gui/stim/tuning_curve.py
TuningCurveEditor.setModel
def setModel(self, model): """Sets the QStimulusModel for this editor""" self.stimModel = model self.parameterModel = model.autoParams() tone = self.stimModel.data(self.stimModel.index(0,0), QtCore.Qt.UserRole+1) info = tone.auto_details() # set max/mins fmax = info['frequency']['max'] self.ui.freqStartSpnbx.setMaximum(fmax) self.ui.freqStopSpnbx.setMaximum(fmax) self.ui.freqStepSpnbx.setMaximum(500000) dbmax = info['intensity']['max'] self.ui.dbStartSpnbx.setMaximum(dbmax) self.ui.dbStopSpnbx.setMaximum(dbmax) self.ui.dbStepSpnbx.setMaximum(500000) self.ui.durSpnbx.setMaximum(info['duration']['max']) self.ui.risefallSpnbx.setMaximum(info['risefall']['max']) self.fmapper.setModel(self.parameterModel) self.dbmapper.setModel(self.parameterModel) self.fmapper.addMapping(self.ui.freqStartSpnbx, 1) self.fmapper.addMapping(self.ui.freqStopSpnbx, 2) self.fmapper.addMapping(self.ui.freqStepSpnbx, 3) self.fmapper.addMapping(self.ui.freqNstepsLbl, 4, 'text') self.dbmapper.addMapping(self.ui.dbStartSpnbx, 1) self.dbmapper.addMapping(self.ui.dbStopSpnbx, 2) self.dbmapper.addMapping(self.ui.dbStepSpnbx, 3) self.dbmapper.addMapping(self.ui.dbNstepsLbl, 4, 'text') self.fmapper.toFirst() self.dbmapper.setCurrentIndex(1) self.ui.durSpnbx.setValue(tone.duration()) self.ui.nrepsSpnbx.setValue(self.stimModel.repCount()) self.ui.risefallSpnbx.setValue(tone.risefall()) self.tone = tone
python
def setModel(self, model): """Sets the QStimulusModel for this editor""" self.stimModel = model self.parameterModel = model.autoParams() tone = self.stimModel.data(self.stimModel.index(0,0), QtCore.Qt.UserRole+1) info = tone.auto_details() # set max/mins fmax = info['frequency']['max'] self.ui.freqStartSpnbx.setMaximum(fmax) self.ui.freqStopSpnbx.setMaximum(fmax) self.ui.freqStepSpnbx.setMaximum(500000) dbmax = info['intensity']['max'] self.ui.dbStartSpnbx.setMaximum(dbmax) self.ui.dbStopSpnbx.setMaximum(dbmax) self.ui.dbStepSpnbx.setMaximum(500000) self.ui.durSpnbx.setMaximum(info['duration']['max']) self.ui.risefallSpnbx.setMaximum(info['risefall']['max']) self.fmapper.setModel(self.parameterModel) self.dbmapper.setModel(self.parameterModel) self.fmapper.addMapping(self.ui.freqStartSpnbx, 1) self.fmapper.addMapping(self.ui.freqStopSpnbx, 2) self.fmapper.addMapping(self.ui.freqStepSpnbx, 3) self.fmapper.addMapping(self.ui.freqNstepsLbl, 4, 'text') self.dbmapper.addMapping(self.ui.dbStartSpnbx, 1) self.dbmapper.addMapping(self.ui.dbStopSpnbx, 2) self.dbmapper.addMapping(self.ui.dbStepSpnbx, 3) self.dbmapper.addMapping(self.ui.dbNstepsLbl, 4, 'text') self.fmapper.toFirst() self.dbmapper.setCurrentIndex(1) self.ui.durSpnbx.setValue(tone.duration()) self.ui.nrepsSpnbx.setValue(self.stimModel.repCount()) self.ui.risefallSpnbx.setValue(tone.risefall()) self.tone = tone
[ "def", "setModel", "(", "self", ",", "model", ")", ":", "self", ".", "stimModel", "=", "model", "self", ".", "parameterModel", "=", "model", ".", "autoParams", "(", ")", "tone", "=", "self", ".", "stimModel", ".", "data", "(", "self", ".", "stimModel", ".", "index", "(", "0", ",", "0", ")", ",", "QtCore", ".", "Qt", ".", "UserRole", "+", "1", ")", "info", "=", "tone", ".", "auto_details", "(", ")", "# set max/mins", "fmax", "=", "info", "[", "'frequency'", "]", "[", "'max'", "]", "self", ".", "ui", ".", "freqStartSpnbx", ".", "setMaximum", "(", "fmax", ")", "self", ".", "ui", ".", "freqStopSpnbx", ".", "setMaximum", "(", "fmax", ")", "self", ".", "ui", ".", "freqStepSpnbx", ".", "setMaximum", "(", "500000", ")", "dbmax", "=", "info", "[", "'intensity'", "]", "[", "'max'", "]", "self", ".", "ui", ".", "dbStartSpnbx", ".", "setMaximum", "(", "dbmax", ")", "self", ".", "ui", ".", "dbStopSpnbx", ".", "setMaximum", "(", "dbmax", ")", "self", ".", "ui", ".", "dbStepSpnbx", ".", "setMaximum", "(", "500000", ")", "self", ".", "ui", ".", "durSpnbx", ".", "setMaximum", "(", "info", "[", "'duration'", "]", "[", "'max'", "]", ")", "self", ".", "ui", ".", "risefallSpnbx", ".", "setMaximum", "(", "info", "[", "'risefall'", "]", "[", "'max'", "]", ")", "self", ".", "fmapper", ".", "setModel", "(", "self", ".", "parameterModel", ")", "self", ".", "dbmapper", ".", "setModel", "(", "self", ".", "parameterModel", ")", "self", ".", "fmapper", ".", "addMapping", "(", "self", ".", "ui", ".", "freqStartSpnbx", ",", "1", ")", "self", ".", "fmapper", ".", "addMapping", "(", "self", ".", "ui", ".", "freqStopSpnbx", ",", "2", ")", "self", ".", "fmapper", ".", "addMapping", "(", "self", ".", "ui", ".", "freqStepSpnbx", ",", "3", ")", "self", ".", "fmapper", ".", "addMapping", "(", "self", ".", "ui", ".", "freqNstepsLbl", ",", "4", ",", "'text'", ")", "self", ".", "dbmapper", ".", "addMapping", "(", "self", ".", "ui", ".", "dbStartSpnbx", ",", "1", ")", "self", ".", "dbmapper", ".", "addMapping", "(", "self", ".", "ui", ".", "dbStopSpnbx", ",", "2", ")", "self", ".", "dbmapper", ".", "addMapping", "(", "self", ".", "ui", ".", "dbStepSpnbx", ",", "3", ")", "self", ".", "dbmapper", ".", "addMapping", "(", "self", ".", "ui", ".", "dbNstepsLbl", ",", "4", ",", "'text'", ")", "self", ".", "fmapper", ".", "toFirst", "(", ")", "self", ".", "dbmapper", ".", "setCurrentIndex", "(", "1", ")", "self", ".", "ui", ".", "durSpnbx", ".", "setValue", "(", "tone", ".", "duration", "(", ")", ")", "self", ".", "ui", ".", "nrepsSpnbx", ".", "setValue", "(", "self", ".", "stimModel", ".", "repCount", "(", ")", ")", "self", ".", "ui", ".", "risefallSpnbx", ".", "setValue", "(", "tone", ".", "risefall", "(", ")", ")", "self", ".", "tone", "=", "tone" ]
Sets the QStimulusModel for this editor
[ "Sets", "the", "QStimulusModel", "for", "this", "editor" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/tuning_curve.py#L39-L74
train
portfors-lab/sparkle
sparkle/gui/stim/tuning_curve.py
TuningCurveEditor.setStimDuration
def setStimDuration(self): """Sets the duration of the StimulusModel from values pulled from this widget""" duration = self.ui.durSpnbx.value() self.tone.setDuration(duration)
python
def setStimDuration(self): """Sets the duration of the StimulusModel from values pulled from this widget""" duration = self.ui.durSpnbx.value() self.tone.setDuration(duration)
[ "def", "setStimDuration", "(", "self", ")", ":", "duration", "=", "self", ".", "ui", ".", "durSpnbx", ".", "value", "(", ")", "self", ".", "tone", ".", "setDuration", "(", "duration", ")" ]
Sets the duration of the StimulusModel from values pulled from this widget
[ "Sets", "the", "duration", "of", "the", "StimulusModel", "from", "values", "pulled", "from", "this", "widget" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/tuning_curve.py#L83-L87
train
portfors-lab/sparkle
sparkle/gui/stim/tuning_curve.py
TuningCurveEditor.setStimReps
def setStimReps(self): """Sets the reps of the StimulusModel from values pulled from this widget""" reps = self.ui.nrepsSpnbx.value() self.stimModel.setRepCount(reps)
python
def setStimReps(self): """Sets the reps of the StimulusModel from values pulled from this widget""" reps = self.ui.nrepsSpnbx.value() self.stimModel.setRepCount(reps)
[ "def", "setStimReps", "(", "self", ")", ":", "reps", "=", "self", ".", "ui", ".", "nrepsSpnbx", ".", "value", "(", ")", "self", ".", "stimModel", ".", "setRepCount", "(", "reps", ")" ]
Sets the reps of the StimulusModel from values pulled from this widget
[ "Sets", "the", "reps", "of", "the", "StimulusModel", "from", "values", "pulled", "from", "this", "widget" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/tuning_curve.py#L90-L94
train
portfors-lab/sparkle
sparkle/gui/stim/tuning_curve.py
TuningCurveEditor.setStimRisefall
def setStimRisefall(self): """Sets the Risefall of the StimulusModel's tone from values pulled from this widget""" rf = self.ui.risefallSpnbx.value() self.tone.setRisefall(rf)
python
def setStimRisefall(self): """Sets the Risefall of the StimulusModel's tone from values pulled from this widget""" rf = self.ui.risefallSpnbx.value() self.tone.setRisefall(rf)
[ "def", "setStimRisefall", "(", "self", ")", ":", "rf", "=", "self", ".", "ui", ".", "risefallSpnbx", ".", "value", "(", ")", "self", ".", "tone", ".", "setRisefall", "(", "rf", ")" ]
Sets the Risefall of the StimulusModel's tone from values pulled from this widget
[ "Sets", "the", "Risefall", "of", "the", "StimulusModel", "s", "tone", "from", "values", "pulled", "from", "this", "widget" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/tuning_curve.py#L96-L100
train
loganasherjones/yapconf
yapconf/spec.py
YapconfSpec.add_arguments
def add_arguments(self, parser, bootstrap=False): """Adds all items to the parser passed in. Args: parser (argparse.ArgumentParser): The parser to add all items to. bootstrap (bool): Flag to indicate whether you only want to mark bootstrapped items as required on the command-line. """ [item.add_argument(parser, bootstrap) for item in self._get_items(bootstrap=False)]
python
def add_arguments(self, parser, bootstrap=False): """Adds all items to the parser passed in. Args: parser (argparse.ArgumentParser): The parser to add all items to. bootstrap (bool): Flag to indicate whether you only want to mark bootstrapped items as required on the command-line. """ [item.add_argument(parser, bootstrap) for item in self._get_items(bootstrap=False)]
[ "def", "add_arguments", "(", "self", ",", "parser", ",", "bootstrap", "=", "False", ")", ":", "[", "item", ".", "add_argument", "(", "parser", ",", "bootstrap", ")", "for", "item", "in", "self", ".", "_get_items", "(", "bootstrap", "=", "False", ")", "]" ]
Adds all items to the parser passed in. Args: parser (argparse.ArgumentParser): The parser to add all items to. bootstrap (bool): Flag to indicate whether you only want to mark bootstrapped items as required on the command-line.
[ "Adds", "all", "items", "to", "the", "parser", "passed", "in", "." ]
d2970e6e7e3334615d4d978d8b0ca33006d79d16
https://github.com/loganasherjones/yapconf/blob/d2970e6e7e3334615d4d978d8b0ca33006d79d16/yapconf/spec.py#L133-L143
train
loganasherjones/yapconf
yapconf/spec.py
YapconfSpec.add_source
def add_source(self, label, source_type, **kwargs): """Add a source to the spec. Sources should have a unique label. This will help tracing where your configurations are coming from if you turn up the log-level. The keyword arguments are significant. Different sources require different keyword arguments. Required keys for each source_type are listed below, for a detailed list of all possible arguments, see the individual source's documentation. source_type: dict required keyword arguments: - data - A dictionary source_type: environment No required keyword arguments. source_type: etcd required keyword arguments: - client - A client from the python-etcd package. source_type: json required keyword arguments: - filename - A JSON file. - data - A string representation of JSON source_type: kubernetes required keyword arguments: - client - A client from the kubernetes package - name - The name of the ConfigMap to load source_type: yaml required keyword arguments: - filename - A YAML file. Args: label (str): A label for the source. source_type (str): A source type, available source types depend on the packages installed. See ``yapconf.ALL_SUPPORTED_SOURCES`` for a complete list. """ self._sources[label] = get_source(label, source_type, **kwargs)
python
def add_source(self, label, source_type, **kwargs): """Add a source to the spec. Sources should have a unique label. This will help tracing where your configurations are coming from if you turn up the log-level. The keyword arguments are significant. Different sources require different keyword arguments. Required keys for each source_type are listed below, for a detailed list of all possible arguments, see the individual source's documentation. source_type: dict required keyword arguments: - data - A dictionary source_type: environment No required keyword arguments. source_type: etcd required keyword arguments: - client - A client from the python-etcd package. source_type: json required keyword arguments: - filename - A JSON file. - data - A string representation of JSON source_type: kubernetes required keyword arguments: - client - A client from the kubernetes package - name - The name of the ConfigMap to load source_type: yaml required keyword arguments: - filename - A YAML file. Args: label (str): A label for the source. source_type (str): A source type, available source types depend on the packages installed. See ``yapconf.ALL_SUPPORTED_SOURCES`` for a complete list. """ self._sources[label] = get_source(label, source_type, **kwargs)
[ "def", "add_source", "(", "self", ",", "label", ",", "source_type", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_sources", "[", "label", "]", "=", "get_source", "(", "label", ",", "source_type", ",", "*", "*", "kwargs", ")" ]
Add a source to the spec. Sources should have a unique label. This will help tracing where your configurations are coming from if you turn up the log-level. The keyword arguments are significant. Different sources require different keyword arguments. Required keys for each source_type are listed below, for a detailed list of all possible arguments, see the individual source's documentation. source_type: dict required keyword arguments: - data - A dictionary source_type: environment No required keyword arguments. source_type: etcd required keyword arguments: - client - A client from the python-etcd package. source_type: json required keyword arguments: - filename - A JSON file. - data - A string representation of JSON source_type: kubernetes required keyword arguments: - client - A client from the kubernetes package - name - The name of the ConfigMap to load source_type: yaml required keyword arguments: - filename - A YAML file. Args: label (str): A label for the source. source_type (str): A source type, available source types depend on the packages installed. See ``yapconf.ALL_SUPPORTED_SOURCES`` for a complete list.
[ "Add", "a", "source", "to", "the", "spec", "." ]
d2970e6e7e3334615d4d978d8b0ca33006d79d16
https://github.com/loganasherjones/yapconf/blob/d2970e6e7e3334615d4d978d8b0ca33006d79d16/yapconf/spec.py#L145-L188
train
loganasherjones/yapconf
yapconf/spec.py
YapconfSpec.find_item
def find_item(self, fq_name): """Find an item in the specification by fully qualified name. Args: fq_name (str): Fully-qualified name of the item. Returns: The item if it is in the specification. None otherwise """ names = fq_name.split(self._separator) current = self._yapconf_items for name in names: if isinstance(current, (YapconfDictItem, YapconfListItem)): current = current.children if name not in current: return None current = current[name] return current
python
def find_item(self, fq_name): """Find an item in the specification by fully qualified name. Args: fq_name (str): Fully-qualified name of the item. Returns: The item if it is in the specification. None otherwise """ names = fq_name.split(self._separator) current = self._yapconf_items for name in names: if isinstance(current, (YapconfDictItem, YapconfListItem)): current = current.children if name not in current: return None current = current[name] return current
[ "def", "find_item", "(", "self", ",", "fq_name", ")", ":", "names", "=", "fq_name", ".", "split", "(", "self", ".", "_separator", ")", "current", "=", "self", ".", "_yapconf_items", "for", "name", "in", "names", ":", "if", "isinstance", "(", "current", ",", "(", "YapconfDictItem", ",", "YapconfListItem", ")", ")", ":", "current", "=", "current", ".", "children", "if", "name", "not", "in", "current", ":", "return", "None", "current", "=", "current", "[", "name", "]", "return", "current" ]
Find an item in the specification by fully qualified name. Args: fq_name (str): Fully-qualified name of the item. Returns: The item if it is in the specification. None otherwise
[ "Find", "an", "item", "in", "the", "specification", "by", "fully", "qualified", "name", "." ]
d2970e6e7e3334615d4d978d8b0ca33006d79d16
https://github.com/loganasherjones/yapconf/blob/d2970e6e7e3334615d4d978d8b0ca33006d79d16/yapconf/spec.py#L190-L209
train
loganasherjones/yapconf
yapconf/spec.py
YapconfSpec.get_item
def get_item(self, name, bootstrap=False): """Get a particular item in the specification. Args: name (str): The name of the item to retrieve. bootstrap (bool): Only search bootstrap items Returns (YapconfItem): A YapconfItem if it is found, None otherwise. """ for item in self._get_items(bootstrap): if item.name == name: return item return None
python
def get_item(self, name, bootstrap=False): """Get a particular item in the specification. Args: name (str): The name of the item to retrieve. bootstrap (bool): Only search bootstrap items Returns (YapconfItem): A YapconfItem if it is found, None otherwise. """ for item in self._get_items(bootstrap): if item.name == name: return item return None
[ "def", "get_item", "(", "self", ",", "name", ",", "bootstrap", "=", "False", ")", ":", "for", "item", "in", "self", ".", "_get_items", "(", "bootstrap", ")", ":", "if", "item", ".", "name", "==", "name", ":", "return", "item", "return", "None" ]
Get a particular item in the specification. Args: name (str): The name of the item to retrieve. bootstrap (bool): Only search bootstrap items Returns (YapconfItem): A YapconfItem if it is found, None otherwise.
[ "Get", "a", "particular", "item", "in", "the", "specification", "." ]
d2970e6e7e3334615d4d978d8b0ca33006d79d16
https://github.com/loganasherjones/yapconf/blob/d2970e6e7e3334615d4d978d8b0ca33006d79d16/yapconf/spec.py#L211-L225
train
loganasherjones/yapconf
yapconf/spec.py
YapconfSpec.update_defaults
def update_defaults(self, new_defaults, respect_none=False): """Update items defaults to the values in the new_defaults dict. Args: new_defaults (dict): A key-value pair of new defaults to be applied. respect_none (bool): Flag to indicate if ``None`` values should constitute an update to the default. """ for key, value in six.iteritems(new_defaults): item = self.get_item(key) if item is None: raise YapconfItemNotFound("Cannot update default for {0}, " "there is no config item by the " "name of {1}".format(key, key), None) item.update_default(value, respect_none)
python
def update_defaults(self, new_defaults, respect_none=False): """Update items defaults to the values in the new_defaults dict. Args: new_defaults (dict): A key-value pair of new defaults to be applied. respect_none (bool): Flag to indicate if ``None`` values should constitute an update to the default. """ for key, value in six.iteritems(new_defaults): item = self.get_item(key) if item is None: raise YapconfItemNotFound("Cannot update default for {0}, " "there is no config item by the " "name of {1}".format(key, key), None) item.update_default(value, respect_none)
[ "def", "update_defaults", "(", "self", ",", "new_defaults", ",", "respect_none", "=", "False", ")", ":", "for", "key", ",", "value", "in", "six", ".", "iteritems", "(", "new_defaults", ")", ":", "item", "=", "self", ".", "get_item", "(", "key", ")", "if", "item", "is", "None", ":", "raise", "YapconfItemNotFound", "(", "\"Cannot update default for {0}, \"", "\"there is no config item by the \"", "\"name of {1}\"", ".", "format", "(", "key", ",", "key", ")", ",", "None", ")", "item", ".", "update_default", "(", "value", ",", "respect_none", ")" ]
Update items defaults to the values in the new_defaults dict. Args: new_defaults (dict): A key-value pair of new defaults to be applied. respect_none (bool): Flag to indicate if ``None`` values should constitute an update to the default.
[ "Update", "items", "defaults", "to", "the", "values", "in", "the", "new_defaults", "dict", "." ]
d2970e6e7e3334615d4d978d8b0ca33006d79d16
https://github.com/loganasherjones/yapconf/blob/d2970e6e7e3334615d4d978d8b0ca33006d79d16/yapconf/spec.py#L227-L244
train
loganasherjones/yapconf
yapconf/spec.py
YapconfSpec.generate_documentation
def generate_documentation(self, app_name, **kwargs): """Generate documentation for this specification. Documentation is generated in Markdown format. An example of the generated documentation can be found at: https://github.com/loganasherjones/yapconf/blob/master/example/doc.md Args: app_name (str): The name of your application. Keyword Args: output_file_name (str): If provided, will write to this file. encoding (str): The encoding to use for the output file. Default is utf-8. Returns: A string representation of the documentation. """ output_file = kwargs.get('output_file_name') encoding = kwargs.get('encoding', 'utf-8') doc_string = generate_markdown_doc(app_name, self) if output_file: with open(output_file, 'w', encoding=encoding) as doc_file: doc_file.write(doc_string) return doc_string
python
def generate_documentation(self, app_name, **kwargs): """Generate documentation for this specification. Documentation is generated in Markdown format. An example of the generated documentation can be found at: https://github.com/loganasherjones/yapconf/blob/master/example/doc.md Args: app_name (str): The name of your application. Keyword Args: output_file_name (str): If provided, will write to this file. encoding (str): The encoding to use for the output file. Default is utf-8. Returns: A string representation of the documentation. """ output_file = kwargs.get('output_file_name') encoding = kwargs.get('encoding', 'utf-8') doc_string = generate_markdown_doc(app_name, self) if output_file: with open(output_file, 'w', encoding=encoding) as doc_file: doc_file.write(doc_string) return doc_string
[ "def", "generate_documentation", "(", "self", ",", "app_name", ",", "*", "*", "kwargs", ")", ":", "output_file", "=", "kwargs", ".", "get", "(", "'output_file_name'", ")", "encoding", "=", "kwargs", ".", "get", "(", "'encoding'", ",", "'utf-8'", ")", "doc_string", "=", "generate_markdown_doc", "(", "app_name", ",", "self", ")", "if", "output_file", ":", "with", "open", "(", "output_file", ",", "'w'", ",", "encoding", "=", "encoding", ")", "as", "doc_file", ":", "doc_file", ".", "write", "(", "doc_string", ")", "return", "doc_string" ]
Generate documentation for this specification. Documentation is generated in Markdown format. An example of the generated documentation can be found at: https://github.com/loganasherjones/yapconf/blob/master/example/doc.md Args: app_name (str): The name of your application. Keyword Args: output_file_name (str): If provided, will write to this file. encoding (str): The encoding to use for the output file. Default is utf-8. Returns: A string representation of the documentation.
[ "Generate", "documentation", "for", "this", "specification", "." ]
d2970e6e7e3334615d4d978d8b0ca33006d79d16
https://github.com/loganasherjones/yapconf/blob/d2970e6e7e3334615d4d978d8b0ca33006d79d16/yapconf/spec.py#L246-L273
train
loganasherjones/yapconf
yapconf/spec.py
YapconfSpec.load_config
def load_config(self, *args, **kwargs): """Load a config based on the arguments passed in. The order of arguments passed in as \*args is significant. It indicates the order of precedence used to load configuration values. Each argument can be a string, dictionary or a tuple. There is a special case string called 'ENVIRONMENT', otherwise it will attempt to load the filename passed in as a string. By default, if a string is provided, it will attempt to load the file based on the file_type passed in on initialization. If you want to load a mixture of json and yaml files, you can specify them as the 3rd part of a tuple. Examples: You can load configurations in any of the following ways: >>> my_spec = YapconfSpec({'foo': {'type': 'str'}}) >>> my_spec.load_config('/path/to/file') >>> my_spec.load_config({'foo': 'bar'}) >>> my_spec.load_config('ENVIRONMENT') >>> my_spec.load_config(('label', {'foo': 'bar'})) >>> my_spec.load_config(('label', '/path/to/file.yaml', 'yaml')) >>> my_spec.load_config(('label', '/path/to/file.json', 'json')) You can of course combine each of these and the order will be held correctly. Args: *args: **kwargs: The only supported keyword argument is 'bootstrap' which will indicate that only bootstrap configurations should be loaded. Returns: box.Box: A Box object which is subclassed from dict. It should behave exactly as a dictionary. This object is guaranteed to contain at least all of your required configuration items. Raises: YapconfLoadError: If we attempt to load your args and something goes wrong. YapconfItemNotFound: If an item is required but could not be found in the configuration. YapconfItemError: If a possible value was found but the type cannot be determined. YapconfValueError: If a possible value is found but during conversion, an exception was raised. """ bootstrap = kwargs.get('bootstrap', False) overrides = self._generate_overrides(*args) config = self._generate_config_from_overrides(overrides, bootstrap) return Box(config)
python
def load_config(self, *args, **kwargs): """Load a config based on the arguments passed in. The order of arguments passed in as \*args is significant. It indicates the order of precedence used to load configuration values. Each argument can be a string, dictionary or a tuple. There is a special case string called 'ENVIRONMENT', otherwise it will attempt to load the filename passed in as a string. By default, if a string is provided, it will attempt to load the file based on the file_type passed in on initialization. If you want to load a mixture of json and yaml files, you can specify them as the 3rd part of a tuple. Examples: You can load configurations in any of the following ways: >>> my_spec = YapconfSpec({'foo': {'type': 'str'}}) >>> my_spec.load_config('/path/to/file') >>> my_spec.load_config({'foo': 'bar'}) >>> my_spec.load_config('ENVIRONMENT') >>> my_spec.load_config(('label', {'foo': 'bar'})) >>> my_spec.load_config(('label', '/path/to/file.yaml', 'yaml')) >>> my_spec.load_config(('label', '/path/to/file.json', 'json')) You can of course combine each of these and the order will be held correctly. Args: *args: **kwargs: The only supported keyword argument is 'bootstrap' which will indicate that only bootstrap configurations should be loaded. Returns: box.Box: A Box object which is subclassed from dict. It should behave exactly as a dictionary. This object is guaranteed to contain at least all of your required configuration items. Raises: YapconfLoadError: If we attempt to load your args and something goes wrong. YapconfItemNotFound: If an item is required but could not be found in the configuration. YapconfItemError: If a possible value was found but the type cannot be determined. YapconfValueError: If a possible value is found but during conversion, an exception was raised. """ bootstrap = kwargs.get('bootstrap', False) overrides = self._generate_overrides(*args) config = self._generate_config_from_overrides(overrides, bootstrap) return Box(config)
[ "def", "load_config", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "bootstrap", "=", "kwargs", ".", "get", "(", "'bootstrap'", ",", "False", ")", "overrides", "=", "self", ".", "_generate_overrides", "(", "*", "args", ")", "config", "=", "self", ".", "_generate_config_from_overrides", "(", "overrides", ",", "bootstrap", ")", "return", "Box", "(", "config", ")" ]
Load a config based on the arguments passed in. The order of arguments passed in as \*args is significant. It indicates the order of precedence used to load configuration values. Each argument can be a string, dictionary or a tuple. There is a special case string called 'ENVIRONMENT', otherwise it will attempt to load the filename passed in as a string. By default, if a string is provided, it will attempt to load the file based on the file_type passed in on initialization. If you want to load a mixture of json and yaml files, you can specify them as the 3rd part of a tuple. Examples: You can load configurations in any of the following ways: >>> my_spec = YapconfSpec({'foo': {'type': 'str'}}) >>> my_spec.load_config('/path/to/file') >>> my_spec.load_config({'foo': 'bar'}) >>> my_spec.load_config('ENVIRONMENT') >>> my_spec.load_config(('label', {'foo': 'bar'})) >>> my_spec.load_config(('label', '/path/to/file.yaml', 'yaml')) >>> my_spec.load_config(('label', '/path/to/file.json', 'json')) You can of course combine each of these and the order will be held correctly. Args: *args: **kwargs: The only supported keyword argument is 'bootstrap' which will indicate that only bootstrap configurations should be loaded. Returns: box.Box: A Box object which is subclassed from dict. It should behave exactly as a dictionary. This object is guaranteed to contain at least all of your required configuration items. Raises: YapconfLoadError: If we attempt to load your args and something goes wrong. YapconfItemNotFound: If an item is required but could not be found in the configuration. YapconfItemError: If a possible value was found but the type cannot be determined. YapconfValueError: If a possible value is found but during conversion, an exception was raised.
[ "Load", "a", "config", "based", "on", "the", "arguments", "passed", "in", "." ]
d2970e6e7e3334615d4d978d8b0ca33006d79d16
https://github.com/loganasherjones/yapconf/blob/d2970e6e7e3334615d4d978d8b0ca33006d79d16/yapconf/spec.py#L275-L327
train
loganasherjones/yapconf
yapconf/spec.py
YapconfSpec.spawn_watcher
def spawn_watcher(self, label, target=None, eternal=False): """Spawns a config watcher in a separate daemon thread. If a particular config value changes, and the item has a ``watch_target`` defined, then that method will be called. If a ``target`` is passed in, then it will call the ``target`` anytime the config changes. Args: label (str): Should match a label added through ``add_source`` target (func): Should be a function that takes two arguments, the old configuration and the new configuration. eternal (bool): Determines if watcher threads should be restarted if they die. Returns: The thread that was spawned. """ if label not in self._sources: raise YapconfSourceError( 'Cannot watch %s no source named %s' % (label, label) ) current_config = self._sources[label].get_data() handler = ConfigChangeHandler(current_config, self, target) return self._sources[label].watch(handler, eternal)
python
def spawn_watcher(self, label, target=None, eternal=False): """Spawns a config watcher in a separate daemon thread. If a particular config value changes, and the item has a ``watch_target`` defined, then that method will be called. If a ``target`` is passed in, then it will call the ``target`` anytime the config changes. Args: label (str): Should match a label added through ``add_source`` target (func): Should be a function that takes two arguments, the old configuration and the new configuration. eternal (bool): Determines if watcher threads should be restarted if they die. Returns: The thread that was spawned. """ if label not in self._sources: raise YapconfSourceError( 'Cannot watch %s no source named %s' % (label, label) ) current_config = self._sources[label].get_data() handler = ConfigChangeHandler(current_config, self, target) return self._sources[label].watch(handler, eternal)
[ "def", "spawn_watcher", "(", "self", ",", "label", ",", "target", "=", "None", ",", "eternal", "=", "False", ")", ":", "if", "label", "not", "in", "self", ".", "_sources", ":", "raise", "YapconfSourceError", "(", "'Cannot watch %s no source named %s'", "%", "(", "label", ",", "label", ")", ")", "current_config", "=", "self", ".", "_sources", "[", "label", "]", ".", "get_data", "(", ")", "handler", "=", "ConfigChangeHandler", "(", "current_config", ",", "self", ",", "target", ")", "return", "self", ".", "_sources", "[", "label", "]", ".", "watch", "(", "handler", ",", "eternal", ")" ]
Spawns a config watcher in a separate daemon thread. If a particular config value changes, and the item has a ``watch_target`` defined, then that method will be called. If a ``target`` is passed in, then it will call the ``target`` anytime the config changes. Args: label (str): Should match a label added through ``add_source`` target (func): Should be a function that takes two arguments, the old configuration and the new configuration. eternal (bool): Determines if watcher threads should be restarted if they die. Returns: The thread that was spawned.
[ "Spawns", "a", "config", "watcher", "in", "a", "separate", "daemon", "thread", "." ]
d2970e6e7e3334615d4d978d8b0ca33006d79d16
https://github.com/loganasherjones/yapconf/blob/d2970e6e7e3334615d4d978d8b0ca33006d79d16/yapconf/spec.py#L329-L357
train
loganasherjones/yapconf
yapconf/spec.py
YapconfSpec.migrate_config_file
def migrate_config_file( self, config_file_path, always_update=False, current_file_type=None, output_file_name=None, output_file_type=None, create=True, update_defaults=True, dump_kwargs=None, include_bootstrap=True, ): """Migrates a configuration file. This is used to help you update your configurations throughout the lifetime of your application. It is probably best explained through example. Examples: Assume we have a JSON config file ('/path/to/config.json') like the following: ``{"db_name": "test_db_name", "db_host": "1.2.3.4"}`` >>> spec = YapconfSpec({ ... 'db_name': { ... 'type': 'str', ... 'default': 'new_default', ... 'previous_defaults': ['test_db_name'] ... }, ... 'db_host': { ... 'type': 'str', ... 'previous_defaults': ['localhost'] ... } ... }) We can migrate that file quite easily with the spec object: >>> spec.migrate_config_file('/path/to/config.json') Will result in /path/to/config.json being overwritten: ``{"db_name": "new_default", "db_host": "1.2.3.4"}`` Args: config_file_path (str): The path to your current config always_update (bool): Always update values (even to None) current_file_type (str): Defaults to self._file_type output_file_name (str): Defaults to the current_file_path output_file_type (str): Defaults to self._file_type create (bool): Create the file if it doesn't exist (otherwise error if the file does not exist). update_defaults (bool): Update values that have a value set to something listed in the previous_defaults dump_kwargs (dict): A key-value pair that will be passed to dump include_bootstrap (bool): Include bootstrap items in the output Returns: box.Box: The newly migrated configuration. """ current_file_type = current_file_type or self._file_type output_file_type = output_file_type or self._file_type output_file_name = output_file_name or config_file_path current_config = self._get_config_if_exists(config_file_path, create, current_file_type) migrated_config = {} if include_bootstrap: items = self._yapconf_items.values() else: items = [ item for item in self._yapconf_items.values() if not item.bootstrap ] for item in items: item.migrate_config(current_config, migrated_config, always_update, update_defaults) if create: yapconf.dump_data(migrated_config, filename=output_file_name, file_type=output_file_type, klazz=YapconfLoadError, dump_kwargs=dump_kwargs) return Box(migrated_config)
python
def migrate_config_file( self, config_file_path, always_update=False, current_file_type=None, output_file_name=None, output_file_type=None, create=True, update_defaults=True, dump_kwargs=None, include_bootstrap=True, ): """Migrates a configuration file. This is used to help you update your configurations throughout the lifetime of your application. It is probably best explained through example. Examples: Assume we have a JSON config file ('/path/to/config.json') like the following: ``{"db_name": "test_db_name", "db_host": "1.2.3.4"}`` >>> spec = YapconfSpec({ ... 'db_name': { ... 'type': 'str', ... 'default': 'new_default', ... 'previous_defaults': ['test_db_name'] ... }, ... 'db_host': { ... 'type': 'str', ... 'previous_defaults': ['localhost'] ... } ... }) We can migrate that file quite easily with the spec object: >>> spec.migrate_config_file('/path/to/config.json') Will result in /path/to/config.json being overwritten: ``{"db_name": "new_default", "db_host": "1.2.3.4"}`` Args: config_file_path (str): The path to your current config always_update (bool): Always update values (even to None) current_file_type (str): Defaults to self._file_type output_file_name (str): Defaults to the current_file_path output_file_type (str): Defaults to self._file_type create (bool): Create the file if it doesn't exist (otherwise error if the file does not exist). update_defaults (bool): Update values that have a value set to something listed in the previous_defaults dump_kwargs (dict): A key-value pair that will be passed to dump include_bootstrap (bool): Include bootstrap items in the output Returns: box.Box: The newly migrated configuration. """ current_file_type = current_file_type or self._file_type output_file_type = output_file_type or self._file_type output_file_name = output_file_name or config_file_path current_config = self._get_config_if_exists(config_file_path, create, current_file_type) migrated_config = {} if include_bootstrap: items = self._yapconf_items.values() else: items = [ item for item in self._yapconf_items.values() if not item.bootstrap ] for item in items: item.migrate_config(current_config, migrated_config, always_update, update_defaults) if create: yapconf.dump_data(migrated_config, filename=output_file_name, file_type=output_file_type, klazz=YapconfLoadError, dump_kwargs=dump_kwargs) return Box(migrated_config)
[ "def", "migrate_config_file", "(", "self", ",", "config_file_path", ",", "always_update", "=", "False", ",", "current_file_type", "=", "None", ",", "output_file_name", "=", "None", ",", "output_file_type", "=", "None", ",", "create", "=", "True", ",", "update_defaults", "=", "True", ",", "dump_kwargs", "=", "None", ",", "include_bootstrap", "=", "True", ",", ")", ":", "current_file_type", "=", "current_file_type", "or", "self", ".", "_file_type", "output_file_type", "=", "output_file_type", "or", "self", ".", "_file_type", "output_file_name", "=", "output_file_name", "or", "config_file_path", "current_config", "=", "self", ".", "_get_config_if_exists", "(", "config_file_path", ",", "create", ",", "current_file_type", ")", "migrated_config", "=", "{", "}", "if", "include_bootstrap", ":", "items", "=", "self", ".", "_yapconf_items", ".", "values", "(", ")", "else", ":", "items", "=", "[", "item", "for", "item", "in", "self", ".", "_yapconf_items", ".", "values", "(", ")", "if", "not", "item", ".", "bootstrap", "]", "for", "item", "in", "items", ":", "item", ".", "migrate_config", "(", "current_config", ",", "migrated_config", ",", "always_update", ",", "update_defaults", ")", "if", "create", ":", "yapconf", ".", "dump_data", "(", "migrated_config", ",", "filename", "=", "output_file_name", ",", "file_type", "=", "output_file_type", ",", "klazz", "=", "YapconfLoadError", ",", "dump_kwargs", "=", "dump_kwargs", ")", "return", "Box", "(", "migrated_config", ")" ]
Migrates a configuration file. This is used to help you update your configurations throughout the lifetime of your application. It is probably best explained through example. Examples: Assume we have a JSON config file ('/path/to/config.json') like the following: ``{"db_name": "test_db_name", "db_host": "1.2.3.4"}`` >>> spec = YapconfSpec({ ... 'db_name': { ... 'type': 'str', ... 'default': 'new_default', ... 'previous_defaults': ['test_db_name'] ... }, ... 'db_host': { ... 'type': 'str', ... 'previous_defaults': ['localhost'] ... } ... }) We can migrate that file quite easily with the spec object: >>> spec.migrate_config_file('/path/to/config.json') Will result in /path/to/config.json being overwritten: ``{"db_name": "new_default", "db_host": "1.2.3.4"}`` Args: config_file_path (str): The path to your current config always_update (bool): Always update values (even to None) current_file_type (str): Defaults to self._file_type output_file_name (str): Defaults to the current_file_path output_file_type (str): Defaults to self._file_type create (bool): Create the file if it doesn't exist (otherwise error if the file does not exist). update_defaults (bool): Update values that have a value set to something listed in the previous_defaults dump_kwargs (dict): A key-value pair that will be passed to dump include_bootstrap (bool): Include bootstrap items in the output Returns: box.Box: The newly migrated configuration.
[ "Migrates", "a", "configuration", "file", "." ]
d2970e6e7e3334615d4d978d8b0ca33006d79d16
https://github.com/loganasherjones/yapconf/blob/d2970e6e7e3334615d4d978d8b0ca33006d79d16/yapconf/spec.py#L359-L446
train
portfors-lab/sparkle
sparkle/tools/spikestats.py
refractory
def refractory(times, refract=0.002): """Removes spikes in times list that do not satisfy refractor period :param times: list(float) of spike times in seconds :type times: list(float) :param refract: Refractory period in seconds :type refract: float :returns: list(float) of spike times in seconds For every interspike interval < refract, removes the second spike time in list and returns the result""" times_refract = [] times_refract.append(times[0]) for i in range(1,len(times)): if times_refract[-1]+refract <= times[i]: times_refract.append(times[i]) return times_refract
python
def refractory(times, refract=0.002): """Removes spikes in times list that do not satisfy refractor period :param times: list(float) of spike times in seconds :type times: list(float) :param refract: Refractory period in seconds :type refract: float :returns: list(float) of spike times in seconds For every interspike interval < refract, removes the second spike time in list and returns the result""" times_refract = [] times_refract.append(times[0]) for i in range(1,len(times)): if times_refract[-1]+refract <= times[i]: times_refract.append(times[i]) return times_refract
[ "def", "refractory", "(", "times", ",", "refract", "=", "0.002", ")", ":", "times_refract", "=", "[", "]", "times_refract", ".", "append", "(", "times", "[", "0", "]", ")", "for", "i", "in", "range", "(", "1", ",", "len", "(", "times", ")", ")", ":", "if", "times_refract", "[", "-", "1", "]", "+", "refract", "<=", "times", "[", "i", "]", ":", "times_refract", ".", "append", "(", "times", "[", "i", "]", ")", "return", "times_refract" ]
Removes spikes in times list that do not satisfy refractor period :param times: list(float) of spike times in seconds :type times: list(float) :param refract: Refractory period in seconds :type refract: float :returns: list(float) of spike times in seconds For every interspike interval < refract, removes the second spike time in list and returns the result
[ "Removes", "spikes", "in", "times", "list", "that", "do", "not", "satisfy", "refractor", "period" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/tools/spikestats.py#L4-L20
train
portfors-lab/sparkle
sparkle/tools/spikestats.py
spike_times
def spike_times(signal, threshold, fs, absval=True): """Detect spikes from a given signal :param signal: Spike trace recording (vector) :type signal: numpy array :param threshold: Threshold value to determine spikes :type threshold: float :param absval: Whether to apply absolute value to signal before thresholding :type absval: bool :returns: list(float) of spike times in seconds For every continuous set of points over given threshold, returns the time of the maximum""" times = [] if absval: signal = np.abs(signal) over, = np.where(signal>threshold) segments, = np.where(np.diff(over) > 1) if len(over) > 1: if len(segments) == 0: segments = [0, len(over)-1] else: # add end points to sections for looping if segments[0] != 0: segments = np.insert(segments, [0], [0]) else: #first point in singleton times.append(float(over[0])/fs) if 1 not in segments: # make sure that first point is in there segments[0] = 1 if segments[-1] != len(over)-1: segments = np.insert(segments, [len(segments)], [len(over)-1]) else: times.append(float(over[-1])/fs) for iseg in range(1,len(segments)): if segments[iseg] - segments[iseg-1] == 1: # only single point over threshold idx = over[segments[iseg]] else: segments[0] = segments[0]-1 # find maximum of continuous set over max idx = over[segments[iseg-1]+1] + np.argmax(signal[over[segments[iseg-1]+1]:over[segments[iseg]]]) times.append(float(idx)/fs) elif len(over) == 1: times.append(float(over[0])/fs) if len(times)>0: return refractory(times) else: return times
python
def spike_times(signal, threshold, fs, absval=True): """Detect spikes from a given signal :param signal: Spike trace recording (vector) :type signal: numpy array :param threshold: Threshold value to determine spikes :type threshold: float :param absval: Whether to apply absolute value to signal before thresholding :type absval: bool :returns: list(float) of spike times in seconds For every continuous set of points over given threshold, returns the time of the maximum""" times = [] if absval: signal = np.abs(signal) over, = np.where(signal>threshold) segments, = np.where(np.diff(over) > 1) if len(over) > 1: if len(segments) == 0: segments = [0, len(over)-1] else: # add end points to sections for looping if segments[0] != 0: segments = np.insert(segments, [0], [0]) else: #first point in singleton times.append(float(over[0])/fs) if 1 not in segments: # make sure that first point is in there segments[0] = 1 if segments[-1] != len(over)-1: segments = np.insert(segments, [len(segments)], [len(over)-1]) else: times.append(float(over[-1])/fs) for iseg in range(1,len(segments)): if segments[iseg] - segments[iseg-1] == 1: # only single point over threshold idx = over[segments[iseg]] else: segments[0] = segments[0]-1 # find maximum of continuous set over max idx = over[segments[iseg-1]+1] + np.argmax(signal[over[segments[iseg-1]+1]:over[segments[iseg]]]) times.append(float(idx)/fs) elif len(over) == 1: times.append(float(over[0])/fs) if len(times)>0: return refractory(times) else: return times
[ "def", "spike_times", "(", "signal", ",", "threshold", ",", "fs", ",", "absval", "=", "True", ")", ":", "times", "=", "[", "]", "if", "absval", ":", "signal", "=", "np", ".", "abs", "(", "signal", ")", "over", ",", "=", "np", ".", "where", "(", "signal", ">", "threshold", ")", "segments", ",", "=", "np", ".", "where", "(", "np", ".", "diff", "(", "over", ")", ">", "1", ")", "if", "len", "(", "over", ")", ">", "1", ":", "if", "len", "(", "segments", ")", "==", "0", ":", "segments", "=", "[", "0", ",", "len", "(", "over", ")", "-", "1", "]", "else", ":", "# add end points to sections for looping", "if", "segments", "[", "0", "]", "!=", "0", ":", "segments", "=", "np", ".", "insert", "(", "segments", ",", "[", "0", "]", ",", "[", "0", "]", ")", "else", ":", "#first point in singleton", "times", ".", "append", "(", "float", "(", "over", "[", "0", "]", ")", "/", "fs", ")", "if", "1", "not", "in", "segments", ":", "# make sure that first point is in there", "segments", "[", "0", "]", "=", "1", "if", "segments", "[", "-", "1", "]", "!=", "len", "(", "over", ")", "-", "1", ":", "segments", "=", "np", ".", "insert", "(", "segments", ",", "[", "len", "(", "segments", ")", "]", ",", "[", "len", "(", "over", ")", "-", "1", "]", ")", "else", ":", "times", ".", "append", "(", "float", "(", "over", "[", "-", "1", "]", ")", "/", "fs", ")", "for", "iseg", "in", "range", "(", "1", ",", "len", "(", "segments", ")", ")", ":", "if", "segments", "[", "iseg", "]", "-", "segments", "[", "iseg", "-", "1", "]", "==", "1", ":", "# only single point over threshold", "idx", "=", "over", "[", "segments", "[", "iseg", "]", "]", "else", ":", "segments", "[", "0", "]", "=", "segments", "[", "0", "]", "-", "1", "# find maximum of continuous set over max", "idx", "=", "over", "[", "segments", "[", "iseg", "-", "1", "]", "+", "1", "]", "+", "np", ".", "argmax", "(", "signal", "[", "over", "[", "segments", "[", "iseg", "-", "1", "]", "+", "1", "]", ":", "over", "[", "segments", "[", "iseg", "]", "]", "]", ")", "times", ".", "append", "(", "float", "(", "idx", ")", "/", "fs", ")", "elif", "len", "(", "over", ")", "==", "1", ":", "times", ".", "append", "(", "float", "(", "over", "[", "0", "]", ")", "/", "fs", ")", "if", "len", "(", "times", ")", ">", "0", ":", "return", "refractory", "(", "times", ")", "else", ":", "return", "times" ]
Detect spikes from a given signal :param signal: Spike trace recording (vector) :type signal: numpy array :param threshold: Threshold value to determine spikes :type threshold: float :param absval: Whether to apply absolute value to signal before thresholding :type absval: bool :returns: list(float) of spike times in seconds For every continuous set of points over given threshold, returns the time of the maximum
[ "Detect", "spikes", "from", "a", "given", "signal" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/tools/spikestats.py#L22-L74
train
portfors-lab/sparkle
sparkle/tools/spikestats.py
bin_spikes
def bin_spikes(spike_times, binsz): """Sort spike times into bins :param spike_times: times of spike instances :type spike_times: list :param binsz: length of time bin to use :type binsz: float :returns: list of bin indicies, one for each element in spike_times """ bins = np.empty((len(spike_times),), dtype=int) for i, stime in enumerate(spike_times): # around to fix rounding errors bins[i] = np.floor(np.around(stime/binsz, 5)) return bins
python
def bin_spikes(spike_times, binsz): """Sort spike times into bins :param spike_times: times of spike instances :type spike_times: list :param binsz: length of time bin to use :type binsz: float :returns: list of bin indicies, one for each element in spike_times """ bins = np.empty((len(spike_times),), dtype=int) for i, stime in enumerate(spike_times): # around to fix rounding errors bins[i] = np.floor(np.around(stime/binsz, 5)) return bins
[ "def", "bin_spikes", "(", "spike_times", ",", "binsz", ")", ":", "bins", "=", "np", ".", "empty", "(", "(", "len", "(", "spike_times", ")", ",", ")", ",", "dtype", "=", "int", ")", "for", "i", ",", "stime", "in", "enumerate", "(", "spike_times", ")", ":", "# around to fix rounding errors", "bins", "[", "i", "]", "=", "np", ".", "floor", "(", "np", ".", "around", "(", "stime", "/", "binsz", ",", "5", ")", ")", "return", "bins" ]
Sort spike times into bins :param spike_times: times of spike instances :type spike_times: list :param binsz: length of time bin to use :type binsz: float :returns: list of bin indicies, one for each element in spike_times
[ "Sort", "spike", "times", "into", "bins" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/tools/spikestats.py#L76-L89
train
portfors-lab/sparkle
sparkle/tools/spikestats.py
spike_latency
def spike_latency(signal, threshold, fs): """Find the latency of the first spike over threshold :param signal: Spike trace recording (vector) :type signal: numpy array :param threshold: Threshold value to determine spikes :type threshold: float :returns: float -- Time of peak of first spike, or None if no values over threshold This is the same as the first value returned from calc_spike_times """ over, = np.where(signal>threshold) segments, = np.where(np.diff(over) > 1) if len(over) > 1: if len(segments) == 0: # only signal peak idx = over[0] + np.argmax(signal[over[0]:over[-1]]) latency = float(idx)/fs elif segments[0] == 0: #first point in singleton latency = float(over[0])/fs else: idx = over[0] + np.argmax(signal[over[0]:over[segments[0]]]) latency = float(idx)/fs elif len(over) > 0: latency = float(over[0])/fs else: latency = np.nan return latency
python
def spike_latency(signal, threshold, fs): """Find the latency of the first spike over threshold :param signal: Spike trace recording (vector) :type signal: numpy array :param threshold: Threshold value to determine spikes :type threshold: float :returns: float -- Time of peak of first spike, or None if no values over threshold This is the same as the first value returned from calc_spike_times """ over, = np.where(signal>threshold) segments, = np.where(np.diff(over) > 1) if len(over) > 1: if len(segments) == 0: # only signal peak idx = over[0] + np.argmax(signal[over[0]:over[-1]]) latency = float(idx)/fs elif segments[0] == 0: #first point in singleton latency = float(over[0])/fs else: idx = over[0] + np.argmax(signal[over[0]:over[segments[0]]]) latency = float(idx)/fs elif len(over) > 0: latency = float(over[0])/fs else: latency = np.nan return latency
[ "def", "spike_latency", "(", "signal", ",", "threshold", ",", "fs", ")", ":", "over", ",", "=", "np", ".", "where", "(", "signal", ">", "threshold", ")", "segments", ",", "=", "np", ".", "where", "(", "np", ".", "diff", "(", "over", ")", ">", "1", ")", "if", "len", "(", "over", ")", ">", "1", ":", "if", "len", "(", "segments", ")", "==", "0", ":", "# only signal peak", "idx", "=", "over", "[", "0", "]", "+", "np", ".", "argmax", "(", "signal", "[", "over", "[", "0", "]", ":", "over", "[", "-", "1", "]", "]", ")", "latency", "=", "float", "(", "idx", ")", "/", "fs", "elif", "segments", "[", "0", "]", "==", "0", ":", "#first point in singleton", "latency", "=", "float", "(", "over", "[", "0", "]", ")", "/", "fs", "else", ":", "idx", "=", "over", "[", "0", "]", "+", "np", ".", "argmax", "(", "signal", "[", "over", "[", "0", "]", ":", "over", "[", "segments", "[", "0", "]", "]", "]", ")", "latency", "=", "float", "(", "idx", ")", "/", "fs", "elif", "len", "(", "over", ")", ">", "0", ":", "latency", "=", "float", "(", "over", "[", "0", "]", ")", "/", "fs", "else", ":", "latency", "=", "np", ".", "nan", "return", "latency" ]
Find the latency of the first spike over threshold :param signal: Spike trace recording (vector) :type signal: numpy array :param threshold: Threshold value to determine spikes :type threshold: float :returns: float -- Time of peak of first spike, or None if no values over threshold This is the same as the first value returned from calc_spike_times
[ "Find", "the", "latency", "of", "the", "first", "spike", "over", "threshold" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/tools/spikestats.py#L91-L121
train
portfors-lab/sparkle
sparkle/tools/spikestats.py
firing_rate
def firing_rate(spike_times, window_size=None): """Calculate the firing rate of spikes :param spike_times: times of spike instances :type spike_times: list :param window_size: length of time to use to determine rate. If none, uses time from first to last spike in spike_times :type window_size: float """ if len(spike_times) == 0: return 0 if window_size is None: if len(spike_times) > 1: window_size = spike_times[-1] - spike_times[0] elif len(spike_times) > 0: # Only one spike, and no window - what to do? window_size = 1 else: window_size = 0 rate = window_size/len(spike_times) return rate
python
def firing_rate(spike_times, window_size=None): """Calculate the firing rate of spikes :param spike_times: times of spike instances :type spike_times: list :param window_size: length of time to use to determine rate. If none, uses time from first to last spike in spike_times :type window_size: float """ if len(spike_times) == 0: return 0 if window_size is None: if len(spike_times) > 1: window_size = spike_times[-1] - spike_times[0] elif len(spike_times) > 0: # Only one spike, and no window - what to do? window_size = 1 else: window_size = 0 rate = window_size/len(spike_times) return rate
[ "def", "firing_rate", "(", "spike_times", ",", "window_size", "=", "None", ")", ":", "if", "len", "(", "spike_times", ")", "==", "0", ":", "return", "0", "if", "window_size", "is", "None", ":", "if", "len", "(", "spike_times", ")", ">", "1", ":", "window_size", "=", "spike_times", "[", "-", "1", "]", "-", "spike_times", "[", "0", "]", "elif", "len", "(", "spike_times", ")", ">", "0", ":", "# Only one spike, and no window - what to do?", "window_size", "=", "1", "else", ":", "window_size", "=", "0", "rate", "=", "window_size", "/", "len", "(", "spike_times", ")", "return", "rate" ]
Calculate the firing rate of spikes :param spike_times: times of spike instances :type spike_times: list :param window_size: length of time to use to determine rate. If none, uses time from first to last spike in spike_times :type window_size: float
[ "Calculate", "the", "firing", "rate", "of", "spikes" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/tools/spikestats.py#L123-L145
train
aresio/miniful
miniful/miniful.py
FuzzyReasoner.evaluate_rules
def evaluate_rules(self): """Perform Sugeno inference.""" outputs = defaultdict(list) total_rules = len(self._rules) for rule in self._rules: res = rule.evaluate(self._variables) outputs[res['term']].append([res['weight'], res['output']]) return_values = {} for k,v in outputs.items(): num = sum(map(lambda (x,y): x*y, v)) den = sum([i for i,j in v]) if den==0: return_values[k] = 0 else: return_values[k] = num/den return return_values
python
def evaluate_rules(self): """Perform Sugeno inference.""" outputs = defaultdict(list) total_rules = len(self._rules) for rule in self._rules: res = rule.evaluate(self._variables) outputs[res['term']].append([res['weight'], res['output']]) return_values = {} for k,v in outputs.items(): num = sum(map(lambda (x,y): x*y, v)) den = sum([i for i,j in v]) if den==0: return_values[k] = 0 else: return_values[k] = num/den return return_values
[ "def", "evaluate_rules", "(", "self", ")", ":", "outputs", "=", "defaultdict", "(", "list", ")", "total_rules", "=", "len", "(", "self", ".", "_rules", ")", "for", "rule", "in", "self", ".", "_rules", ":", "res", "=", "rule", ".", "evaluate", "(", "self", ".", "_variables", ")", "outputs", "[", "res", "[", "'term'", "]", "]", ".", "append", "(", "[", "res", "[", "'weight'", "]", ",", "res", "[", "'output'", "]", "]", ")", "return_values", "=", "{", "}", "for", "k", ",", "v", "in", "outputs", ".", "items", "(", ")", ":", "num", "=", "sum", "(", "map", "(", "lambda", "(", "x", ",", "y", ")", ":", "x", "*", "y", ",", "v", ")", ")", "den", "=", "sum", "(", "[", "i", "for", "i", ",", "j", "in", "v", "]", ")", "if", "den", "==", "0", ":", "return_values", "[", "k", "]", "=", "0", "else", ":", "return_values", "[", "k", "]", "=", "num", "/", "den", "return", "return_values" ]
Perform Sugeno inference.
[ "Perform", "Sugeno", "inference", "." ]
51eaa89677b22027134c208c012045de883c751e
https://github.com/aresio/miniful/blob/51eaa89677b22027134c208c012045de883c751e/miniful/miniful.py#L129-L145
train
TorkamaniLab/metapipe
metapipe/models/pbs_job.py
PBSJob.is_complete
def is_complete(self): """ Checks the job's output or log file to determing if the completion criteria was met. """ qstat = self._grep_qstat('complete') comp = self._grep_status('complete') if qstat and comp: return True return False
python
def is_complete(self): """ Checks the job's output or log file to determing if the completion criteria was met. """ qstat = self._grep_qstat('complete') comp = self._grep_status('complete') if qstat and comp: return True return False
[ "def", "is_complete", "(", "self", ")", ":", "qstat", "=", "self", ".", "_grep_qstat", "(", "'complete'", ")", "comp", "=", "self", ".", "_grep_status", "(", "'complete'", ")", "if", "qstat", "and", "comp", ":", "return", "True", "return", "False" ]
Checks the job's output or log file to determing if the completion criteria was met.
[ "Checks", "the", "job", "s", "output", "or", "log", "file", "to", "determing", "if", "the", "completion", "criteria", "was", "met", "." ]
15592e5b0c217afb00ac03503f8d0d7453d4baf4
https://github.com/TorkamaniLab/metapipe/blob/15592e5b0c217afb00ac03503f8d0d7453d4baf4/metapipe/models/pbs_job.py#L39-L47
train
mediawiki-utilities/python-mwoauth
mwoauth/functions.py
initiate
def initiate(mw_uri, consumer_token, callback='oob', user_agent=defaults.USER_AGENT): """ Initiate an oauth handshake with MediaWiki. :Parameters: mw_uri : `str` The base URI of the MediaWiki installation. Note that the URI should end in ``"index.php"``. consumer_token : :class:`~mwoauth.ConsumerToken` A token representing you, the consumer. Provided by MediaWiki via ``Special:OAuthConsumerRegistration``. callback : `str` Callback URL. Defaults to 'oob'. :Returns: A `tuple` of two values: * a MediaWiki URL to direct the user to * a :class:`~mwoauth.RequestToken` representing a request for access """ auth = OAuth1(consumer_token.key, client_secret=consumer_token.secret, callback_uri=callback) r = requests.post(url=mw_uri, params={'title': "Special:OAuth/initiate"}, auth=auth, headers={'User-Agent': user_agent}) credentials = parse_qs(r.content) if credentials is None or credentials == {}: raise OAuthException( "Expected x-www-form-urlencoded response from " + "MediaWiki, but got something else: " + "{0}".format(repr(r.content))) elif b('oauth_token') not in credentials or \ b('oauth_token_secret') not in credentials: raise OAuthException( "MediaWiki response lacks token information: " "{0}".format(repr(credentials))) else: request_token = RequestToken( credentials.get(b('oauth_token'))[0], credentials.get(b('oauth_token_secret'))[0] ) params = {'title': "Special:OAuth/authenticate", 'oauth_token': request_token.key, 'oauth_consumer_key': consumer_token.key} return ( mw_uri + "?" + urlencode(params), request_token )
python
def initiate(mw_uri, consumer_token, callback='oob', user_agent=defaults.USER_AGENT): """ Initiate an oauth handshake with MediaWiki. :Parameters: mw_uri : `str` The base URI of the MediaWiki installation. Note that the URI should end in ``"index.php"``. consumer_token : :class:`~mwoauth.ConsumerToken` A token representing you, the consumer. Provided by MediaWiki via ``Special:OAuthConsumerRegistration``. callback : `str` Callback URL. Defaults to 'oob'. :Returns: A `tuple` of two values: * a MediaWiki URL to direct the user to * a :class:`~mwoauth.RequestToken` representing a request for access """ auth = OAuth1(consumer_token.key, client_secret=consumer_token.secret, callback_uri=callback) r = requests.post(url=mw_uri, params={'title': "Special:OAuth/initiate"}, auth=auth, headers={'User-Agent': user_agent}) credentials = parse_qs(r.content) if credentials is None or credentials == {}: raise OAuthException( "Expected x-www-form-urlencoded response from " + "MediaWiki, but got something else: " + "{0}".format(repr(r.content))) elif b('oauth_token') not in credentials or \ b('oauth_token_secret') not in credentials: raise OAuthException( "MediaWiki response lacks token information: " "{0}".format(repr(credentials))) else: request_token = RequestToken( credentials.get(b('oauth_token'))[0], credentials.get(b('oauth_token_secret'))[0] ) params = {'title': "Special:OAuth/authenticate", 'oauth_token': request_token.key, 'oauth_consumer_key': consumer_token.key} return ( mw_uri + "?" + urlencode(params), request_token )
[ "def", "initiate", "(", "mw_uri", ",", "consumer_token", ",", "callback", "=", "'oob'", ",", "user_agent", "=", "defaults", ".", "USER_AGENT", ")", ":", "auth", "=", "OAuth1", "(", "consumer_token", ".", "key", ",", "client_secret", "=", "consumer_token", ".", "secret", ",", "callback_uri", "=", "callback", ")", "r", "=", "requests", ".", "post", "(", "url", "=", "mw_uri", ",", "params", "=", "{", "'title'", ":", "\"Special:OAuth/initiate\"", "}", ",", "auth", "=", "auth", ",", "headers", "=", "{", "'User-Agent'", ":", "user_agent", "}", ")", "credentials", "=", "parse_qs", "(", "r", ".", "content", ")", "if", "credentials", "is", "None", "or", "credentials", "==", "{", "}", ":", "raise", "OAuthException", "(", "\"Expected x-www-form-urlencoded response from \"", "+", "\"MediaWiki, but got something else: \"", "+", "\"{0}\"", ".", "format", "(", "repr", "(", "r", ".", "content", ")", ")", ")", "elif", "b", "(", "'oauth_token'", ")", "not", "in", "credentials", "or", "b", "(", "'oauth_token_secret'", ")", "not", "in", "credentials", ":", "raise", "OAuthException", "(", "\"MediaWiki response lacks token information: \"", "\"{0}\"", ".", "format", "(", "repr", "(", "credentials", ")", ")", ")", "else", ":", "request_token", "=", "RequestToken", "(", "credentials", ".", "get", "(", "b", "(", "'oauth_token'", ")", ")", "[", "0", "]", ",", "credentials", ".", "get", "(", "b", "(", "'oauth_token_secret'", ")", ")", "[", "0", "]", ")", "params", "=", "{", "'title'", ":", "\"Special:OAuth/authenticate\"", ",", "'oauth_token'", ":", "request_token", ".", "key", ",", "'oauth_consumer_key'", ":", "consumer_token", ".", "key", "}", "return", "(", "mw_uri", "+", "\"?\"", "+", "urlencode", "(", "params", ")", ",", "request_token", ")" ]
Initiate an oauth handshake with MediaWiki. :Parameters: mw_uri : `str` The base URI of the MediaWiki installation. Note that the URI should end in ``"index.php"``. consumer_token : :class:`~mwoauth.ConsumerToken` A token representing you, the consumer. Provided by MediaWiki via ``Special:OAuthConsumerRegistration``. callback : `str` Callback URL. Defaults to 'oob'. :Returns: A `tuple` of two values: * a MediaWiki URL to direct the user to * a :class:`~mwoauth.RequestToken` representing a request for access
[ "Initiate", "an", "oauth", "handshake", "with", "MediaWiki", "." ]
cd6990753ec3d59b7cfd96a76459f71ef4790cd3
https://github.com/mediawiki-utilities/python-mwoauth/blob/cd6990753ec3d59b7cfd96a76459f71ef4790cd3/mwoauth/functions.py#L60-L121
train
freshbooks/statsdecor
statsdecor/__init__.py
configure
def configure(*args, **kwargs): """Configure the module level statsd client that will be used in all library operations. Frequently used from application initialization code. >>> import statsdecor >>> statsdecor.configure( host='localhost', port=8125, prefix='myapp', maxudpsize=25) """ global _stats_client log.debug('statsd.configure(%s)' % kwargs) _config.update(kwargs) _stats_client = _create_client(**_config)
python
def configure(*args, **kwargs): """Configure the module level statsd client that will be used in all library operations. Frequently used from application initialization code. >>> import statsdecor >>> statsdecor.configure( host='localhost', port=8125, prefix='myapp', maxudpsize=25) """ global _stats_client log.debug('statsd.configure(%s)' % kwargs) _config.update(kwargs) _stats_client = _create_client(**_config)
[ "def", "configure", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "global", "_stats_client", "log", ".", "debug", "(", "'statsd.configure(%s)'", "%", "kwargs", ")", "_config", ".", "update", "(", "kwargs", ")", "_stats_client", "=", "_create_client", "(", "*", "*", "_config", ")" ]
Configure the module level statsd client that will be used in all library operations. Frequently used from application initialization code. >>> import statsdecor >>> statsdecor.configure( host='localhost', port=8125, prefix='myapp', maxudpsize=25)
[ "Configure", "the", "module", "level", "statsd", "client", "that", "will", "be", "used", "in", "all", "library", "operations", "." ]
1c4a98e120799b430fd40c8fede9020a91162d31
https://github.com/freshbooks/statsdecor/blob/1c4a98e120799b430fd40c8fede9020a91162d31/statsdecor/__init__.py#L31-L49
train
freshbooks/statsdecor
statsdecor/__init__.py
incr
def incr(name, value=1, rate=1, tags=None): """Increment a metric by value. >>> import statsdecor >>> statsdecor.incr('my.metric') """ client().incr(name, value, rate, tags)
python
def incr(name, value=1, rate=1, tags=None): """Increment a metric by value. >>> import statsdecor >>> statsdecor.incr('my.metric') """ client().incr(name, value, rate, tags)
[ "def", "incr", "(", "name", ",", "value", "=", "1", ",", "rate", "=", "1", ",", "tags", "=", "None", ")", ":", "client", "(", ")", ".", "incr", "(", "name", ",", "value", ",", "rate", ",", "tags", ")" ]
Increment a metric by value. >>> import statsdecor >>> statsdecor.incr('my.metric')
[ "Increment", "a", "metric", "by", "value", "." ]
1c4a98e120799b430fd40c8fede9020a91162d31
https://github.com/freshbooks/statsdecor/blob/1c4a98e120799b430fd40c8fede9020a91162d31/statsdecor/__init__.py#L59-L65
train
freshbooks/statsdecor
statsdecor/__init__.py
decr
def decr(name, value=1, rate=1, tags=None): """Decrement a metric by value. >>> import statsdecor >>> statsdecor.decr('my.metric') """ client().decr(name, value, rate, tags)
python
def decr(name, value=1, rate=1, tags=None): """Decrement a metric by value. >>> import statsdecor >>> statsdecor.decr('my.metric') """ client().decr(name, value, rate, tags)
[ "def", "decr", "(", "name", ",", "value", "=", "1", ",", "rate", "=", "1", ",", "tags", "=", "None", ")", ":", "client", "(", ")", ".", "decr", "(", "name", ",", "value", ",", "rate", ",", "tags", ")" ]
Decrement a metric by value. >>> import statsdecor >>> statsdecor.decr('my.metric')
[ "Decrement", "a", "metric", "by", "value", "." ]
1c4a98e120799b430fd40c8fede9020a91162d31
https://github.com/freshbooks/statsdecor/blob/1c4a98e120799b430fd40c8fede9020a91162d31/statsdecor/__init__.py#L68-L74
train
freshbooks/statsdecor
statsdecor/__init__.py
gauge
def gauge(name, value, rate=1, tags=None): """Set the value for a gauge. >>> import statsdecor >>> statsdecor.gauge('my.metric', 10) """ client().gauge(name, value, rate, tags)
python
def gauge(name, value, rate=1, tags=None): """Set the value for a gauge. >>> import statsdecor >>> statsdecor.gauge('my.metric', 10) """ client().gauge(name, value, rate, tags)
[ "def", "gauge", "(", "name", ",", "value", ",", "rate", "=", "1", ",", "tags", "=", "None", ")", ":", "client", "(", ")", ".", "gauge", "(", "name", ",", "value", ",", "rate", ",", "tags", ")" ]
Set the value for a gauge. >>> import statsdecor >>> statsdecor.gauge('my.metric', 10)
[ "Set", "the", "value", "for", "a", "gauge", "." ]
1c4a98e120799b430fd40c8fede9020a91162d31
https://github.com/freshbooks/statsdecor/blob/1c4a98e120799b430fd40c8fede9020a91162d31/statsdecor/__init__.py#L77-L83
train
freshbooks/statsdecor
statsdecor/__init__.py
timing
def timing(name, delta, rate=1, tags=None): """Sends new timing information. `delta` is in milliseconds. >>> import statsdecor >>> statsdecor.timing('my.metric', 314159265359) """ return client().timing(name, delta, rate=rate, tags=tags)
python
def timing(name, delta, rate=1, tags=None): """Sends new timing information. `delta` is in milliseconds. >>> import statsdecor >>> statsdecor.timing('my.metric', 314159265359) """ return client().timing(name, delta, rate=rate, tags=tags)
[ "def", "timing", "(", "name", ",", "delta", ",", "rate", "=", "1", ",", "tags", "=", "None", ")", ":", "return", "client", "(", ")", ".", "timing", "(", "name", ",", "delta", ",", "rate", "=", "rate", ",", "tags", "=", "tags", ")" ]
Sends new timing information. `delta` is in milliseconds. >>> import statsdecor >>> statsdecor.timing('my.metric', 314159265359)
[ "Sends", "new", "timing", "information", ".", "delta", "is", "in", "milliseconds", "." ]
1c4a98e120799b430fd40c8fede9020a91162d31
https://github.com/freshbooks/statsdecor/blob/1c4a98e120799b430fd40c8fede9020a91162d31/statsdecor/__init__.py#L97-L103
train
ArabellaTech/django-basic-cms
basic_cms/admin/__init__.py
PageAdmin.list_pages
def list_pages(self, request, template_name=None, extra_context=None): """List root pages""" if not self.admin_site.has_permission(request): return self.admin_site.login(request) language = get_language_from_request(request) query = request.POST.get('q', '').strip() if query: page_ids = list(set([c.page.pk for c in Content.objects.filter(body__icontains=query)])) pages = Page.objects.filter(pk__in=page_ids) else: pages = Page.objects.root() if settings.PAGE_HIDE_SITES: pages = pages.filter(sites=settings.SITE_ID) context = { 'can_publish': request.user.has_perm('pages.can_publish'), 'language': language, 'name': _("page"), 'pages': pages, 'opts': self.model._meta, 'q': query } context.update(extra_context or {}) change_list = self.changelist_view(request, context) return change_list
python
def list_pages(self, request, template_name=None, extra_context=None): """List root pages""" if not self.admin_site.has_permission(request): return self.admin_site.login(request) language = get_language_from_request(request) query = request.POST.get('q', '').strip() if query: page_ids = list(set([c.page.pk for c in Content.objects.filter(body__icontains=query)])) pages = Page.objects.filter(pk__in=page_ids) else: pages = Page.objects.root() if settings.PAGE_HIDE_SITES: pages = pages.filter(sites=settings.SITE_ID) context = { 'can_publish': request.user.has_perm('pages.can_publish'), 'language': language, 'name': _("page"), 'pages': pages, 'opts': self.model._meta, 'q': query } context.update(extra_context or {}) change_list = self.changelist_view(request, context) return change_list
[ "def", "list_pages", "(", "self", ",", "request", ",", "template_name", "=", "None", ",", "extra_context", "=", "None", ")", ":", "if", "not", "self", ".", "admin_site", ".", "has_permission", "(", "request", ")", ":", "return", "self", ".", "admin_site", ".", "login", "(", "request", ")", "language", "=", "get_language_from_request", "(", "request", ")", "query", "=", "request", ".", "POST", ".", "get", "(", "'q'", ",", "''", ")", ".", "strip", "(", ")", "if", "query", ":", "page_ids", "=", "list", "(", "set", "(", "[", "c", ".", "page", ".", "pk", "for", "c", "in", "Content", ".", "objects", ".", "filter", "(", "body__icontains", "=", "query", ")", "]", ")", ")", "pages", "=", "Page", ".", "objects", ".", "filter", "(", "pk__in", "=", "page_ids", ")", "else", ":", "pages", "=", "Page", ".", "objects", ".", "root", "(", ")", "if", "settings", ".", "PAGE_HIDE_SITES", ":", "pages", "=", "pages", ".", "filter", "(", "sites", "=", "settings", ".", "SITE_ID", ")", "context", "=", "{", "'can_publish'", ":", "request", ".", "user", ".", "has_perm", "(", "'pages.can_publish'", ")", ",", "'language'", ":", "language", ",", "'name'", ":", "_", "(", "\"page\"", ")", ",", "'pages'", ":", "pages", ",", "'opts'", ":", "self", ".", "model", ".", "_meta", ",", "'q'", ":", "query", "}", "context", ".", "update", "(", "extra_context", "or", "{", "}", ")", "change_list", "=", "self", ".", "changelist_view", "(", "request", ",", "context", ")", "return", "change_list" ]
List root pages
[ "List", "root", "pages" ]
863f3c6098606f663994930cd8e7723ad0c07caf
https://github.com/ArabellaTech/django-basic-cms/blob/863f3c6098606f663994930cd8e7723ad0c07caf/basic_cms/admin/__init__.py#L347-L376
train
lowandrew/OLCTools
coreGenome/annotate.py
Annotate.codingthreads
def codingthreads(self): """ Find CDS features in .gff files to filter out non-coding sequences from the analysis """ printtime('Extracting CDS features', self.start) # Create and start threads for i in range(self.cpus): # Send the threads to the appropriate destination function threads = Thread(target=self.codingsequences, args=()) # Set the daemon to true - something to do with thread management threads.setDaemon(True) # Start the threading threads.start() for sample in self.runmetadata.samples: self.codingqueue.put(sample) self.codingqueue.join() # Create CDS files and determine gene presence/absence self.corethreads()
python
def codingthreads(self): """ Find CDS features in .gff files to filter out non-coding sequences from the analysis """ printtime('Extracting CDS features', self.start) # Create and start threads for i in range(self.cpus): # Send the threads to the appropriate destination function threads = Thread(target=self.codingsequences, args=()) # Set the daemon to true - something to do with thread management threads.setDaemon(True) # Start the threading threads.start() for sample in self.runmetadata.samples: self.codingqueue.put(sample) self.codingqueue.join() # Create CDS files and determine gene presence/absence self.corethreads()
[ "def", "codingthreads", "(", "self", ")", ":", "printtime", "(", "'Extracting CDS features'", ",", "self", ".", "start", ")", "# Create and start threads", "for", "i", "in", "range", "(", "self", ".", "cpus", ")", ":", "# Send the threads to the appropriate destination function", "threads", "=", "Thread", "(", "target", "=", "self", ".", "codingsequences", ",", "args", "=", "(", ")", ")", "# Set the daemon to true - something to do with thread management", "threads", ".", "setDaemon", "(", "True", ")", "# Start the threading", "threads", ".", "start", "(", ")", "for", "sample", "in", "self", ".", "runmetadata", ".", "samples", ":", "self", ".", "codingqueue", ".", "put", "(", "sample", ")", "self", ".", "codingqueue", ".", "join", "(", ")", "# Create CDS files and determine gene presence/absence", "self", ".", "corethreads", "(", ")" ]
Find CDS features in .gff files to filter out non-coding sequences from the analysis
[ "Find", "CDS", "features", "in", ".", "gff", "files", "to", "filter", "out", "non", "-", "coding", "sequences", "from", "the", "analysis" ]
88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/coreGenome/annotate.py#L142-L159
train
lowandrew/OLCTools
coreGenome/annotate.py
Annotate.corethreads
def corethreads(self): """ Create a .cds file consisting of fasta records of CDS features for each strain """ printtime('Creating CDS files and finding core genes', self.start) # Create and start threads for i in range(self.cpus): # Send the threads to the appropriate destination function threads = Thread(target=self.coregroups, args=()) # Set the daemon to true - something to do with thread management threads.setDaemon(True) # Start the threading threads.start() for sample in self.runmetadata.samples: # Define the name of the file to store the CDS nucleotide sequences sample.prokka.cds = os.path.join(sample.prokka.outputdir, '{}.cds'.format(sample.name)) self.corequeue.put(sample) self.corequeue.join() # Write the core .fasta files for each gene self.corewriter()
python
def corethreads(self): """ Create a .cds file consisting of fasta records of CDS features for each strain """ printtime('Creating CDS files and finding core genes', self.start) # Create and start threads for i in range(self.cpus): # Send the threads to the appropriate destination function threads = Thread(target=self.coregroups, args=()) # Set the daemon to true - something to do with thread management threads.setDaemon(True) # Start the threading threads.start() for sample in self.runmetadata.samples: # Define the name of the file to store the CDS nucleotide sequences sample.prokka.cds = os.path.join(sample.prokka.outputdir, '{}.cds'.format(sample.name)) self.corequeue.put(sample) self.corequeue.join() # Write the core .fasta files for each gene self.corewriter()
[ "def", "corethreads", "(", "self", ")", ":", "printtime", "(", "'Creating CDS files and finding core genes'", ",", "self", ".", "start", ")", "# Create and start threads", "for", "i", "in", "range", "(", "self", ".", "cpus", ")", ":", "# Send the threads to the appropriate destination function", "threads", "=", "Thread", "(", "target", "=", "self", ".", "coregroups", ",", "args", "=", "(", ")", ")", "# Set the daemon to true - something to do with thread management", "threads", ".", "setDaemon", "(", "True", ")", "# Start the threading", "threads", ".", "start", "(", ")", "for", "sample", "in", "self", ".", "runmetadata", ".", "samples", ":", "# Define the name of the file to store the CDS nucleotide sequences", "sample", ".", "prokka", ".", "cds", "=", "os", ".", "path", ".", "join", "(", "sample", ".", "prokka", ".", "outputdir", ",", "'{}.cds'", ".", "format", "(", "sample", ".", "name", ")", ")", "self", ".", "corequeue", ".", "put", "(", "sample", ")", "self", ".", "corequeue", ".", "join", "(", ")", "# Write the core .fasta files for each gene", "self", ".", "corewriter", "(", ")" ]
Create a .cds file consisting of fasta records of CDS features for each strain
[ "Create", "a", ".", "cds", "file", "consisting", "of", "fasta", "records", "of", "CDS", "features", "for", "each", "strain" ]
88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/coreGenome/annotate.py#L193-L212
train
lowandrew/OLCTools
coreGenome/annotate.py
Annotate.corewriter
def corewriter(self): """ Creates .fasta files containing all alleles for each gene """ printtime('Creating core allele files', self.start) for gene in sorted(self.genesequence): self.geneset.add(gene) # Set the name of the allele file genefile = os.path.join(self.coregenelocation, '{}.fasta'.format(gene)) # If the file doesn't exist, create it if not os.path.isfile(genefile): with open(genefile, 'w') as core: for count, sequence in enumerate(self.genesequence[gene]): # The definition line is the gene name, and the allele number (count (+ 1 to compensate for # base zero)) definitionline = '{}-{}'.format(gene, count + 1) # Create a sequence record using BioPython fasta = SeqRecord(Seq(sequence), # Without this, the header will be improperly formatted description='', # Use >:definitionline as the header id=definitionline) # Use the SeqIO module to properly format the new sequence record SeqIO.write(fasta, core, 'fasta') for strain in self.coresequence[sequence]: # Record the strain name, the gene name, and the allele number. # [:-6] removes the contig number: 2014-SEQ-0276_00001 becomes 2014-SEQ-0276 try: self.corealleles[strain[:-6]].update({gene: count + 1}) except KeyError: self.corealleles[strain[:-6]] = {gene: count + 1} else: # If the file exists, don't recreate it; only iterate through the dictionary of gene sequences for count, sequence in enumerate(self.genesequence[gene]): for strain in self.coresequence[sequence]: # Populate the dictionary as above try: self.corealleles[strain[:-6]].update({gene: count + 1}) except KeyError: self.corealleles[strain[:-6]] = {gene: count + 1} # Create a combined file of all the core genes to be used in typing strain(s) of interest if not os.path.isfile(os.path.join(self.coregenelocation, 'core_combined.fasta')): fastafiles = glob(os.path.join(self.coregenelocation, '*.fasta')) # Run the method for each allele self.combinealleles(fastafiles) # Run the profiler self.profiler()
python
def corewriter(self): """ Creates .fasta files containing all alleles for each gene """ printtime('Creating core allele files', self.start) for gene in sorted(self.genesequence): self.geneset.add(gene) # Set the name of the allele file genefile = os.path.join(self.coregenelocation, '{}.fasta'.format(gene)) # If the file doesn't exist, create it if not os.path.isfile(genefile): with open(genefile, 'w') as core: for count, sequence in enumerate(self.genesequence[gene]): # The definition line is the gene name, and the allele number (count (+ 1 to compensate for # base zero)) definitionline = '{}-{}'.format(gene, count + 1) # Create a sequence record using BioPython fasta = SeqRecord(Seq(sequence), # Without this, the header will be improperly formatted description='', # Use >:definitionline as the header id=definitionline) # Use the SeqIO module to properly format the new sequence record SeqIO.write(fasta, core, 'fasta') for strain in self.coresequence[sequence]: # Record the strain name, the gene name, and the allele number. # [:-6] removes the contig number: 2014-SEQ-0276_00001 becomes 2014-SEQ-0276 try: self.corealleles[strain[:-6]].update({gene: count + 1}) except KeyError: self.corealleles[strain[:-6]] = {gene: count + 1} else: # If the file exists, don't recreate it; only iterate through the dictionary of gene sequences for count, sequence in enumerate(self.genesequence[gene]): for strain in self.coresequence[sequence]: # Populate the dictionary as above try: self.corealleles[strain[:-6]].update({gene: count + 1}) except KeyError: self.corealleles[strain[:-6]] = {gene: count + 1} # Create a combined file of all the core genes to be used in typing strain(s) of interest if not os.path.isfile(os.path.join(self.coregenelocation, 'core_combined.fasta')): fastafiles = glob(os.path.join(self.coregenelocation, '*.fasta')) # Run the method for each allele self.combinealleles(fastafiles) # Run the profiler self.profiler()
[ "def", "corewriter", "(", "self", ")", ":", "printtime", "(", "'Creating core allele files'", ",", "self", ".", "start", ")", "for", "gene", "in", "sorted", "(", "self", ".", "genesequence", ")", ":", "self", ".", "geneset", ".", "add", "(", "gene", ")", "# Set the name of the allele file", "genefile", "=", "os", ".", "path", ".", "join", "(", "self", ".", "coregenelocation", ",", "'{}.fasta'", ".", "format", "(", "gene", ")", ")", "# If the file doesn't exist, create it", "if", "not", "os", ".", "path", ".", "isfile", "(", "genefile", ")", ":", "with", "open", "(", "genefile", ",", "'w'", ")", "as", "core", ":", "for", "count", ",", "sequence", "in", "enumerate", "(", "self", ".", "genesequence", "[", "gene", "]", ")", ":", "# The definition line is the gene name, and the allele number (count (+ 1 to compensate for", "# base zero))", "definitionline", "=", "'{}-{}'", ".", "format", "(", "gene", ",", "count", "+", "1", ")", "# Create a sequence record using BioPython", "fasta", "=", "SeqRecord", "(", "Seq", "(", "sequence", ")", ",", "# Without this, the header will be improperly formatted", "description", "=", "''", ",", "# Use >:definitionline as the header", "id", "=", "definitionline", ")", "# Use the SeqIO module to properly format the new sequence record", "SeqIO", ".", "write", "(", "fasta", ",", "core", ",", "'fasta'", ")", "for", "strain", "in", "self", ".", "coresequence", "[", "sequence", "]", ":", "# Record the strain name, the gene name, and the allele number.", "# [:-6] removes the contig number: 2014-SEQ-0276_00001 becomes 2014-SEQ-0276", "try", ":", "self", ".", "corealleles", "[", "strain", "[", ":", "-", "6", "]", "]", ".", "update", "(", "{", "gene", ":", "count", "+", "1", "}", ")", "except", "KeyError", ":", "self", ".", "corealleles", "[", "strain", "[", ":", "-", "6", "]", "]", "=", "{", "gene", ":", "count", "+", "1", "}", "else", ":", "# If the file exists, don't recreate it; only iterate through the dictionary of gene sequences", "for", "count", ",", "sequence", "in", "enumerate", "(", "self", ".", "genesequence", "[", "gene", "]", ")", ":", "for", "strain", "in", "self", ".", "coresequence", "[", "sequence", "]", ":", "# Populate the dictionary as above", "try", ":", "self", ".", "corealleles", "[", "strain", "[", ":", "-", "6", "]", "]", ".", "update", "(", "{", "gene", ":", "count", "+", "1", "}", ")", "except", "KeyError", ":", "self", ".", "corealleles", "[", "strain", "[", ":", "-", "6", "]", "]", "=", "{", "gene", ":", "count", "+", "1", "}", "# Create a combined file of all the core genes to be used in typing strain(s) of interest", "if", "not", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "self", ".", "coregenelocation", ",", "'core_combined.fasta'", ")", ")", ":", "fastafiles", "=", "glob", "(", "os", ".", "path", ".", "join", "(", "self", ".", "coregenelocation", ",", "'*.fasta'", ")", ")", "# Run the method for each allele", "self", ".", "combinealleles", "(", "fastafiles", ")", "# Run the profiler", "self", ".", "profiler", "(", ")" ]
Creates .fasta files containing all alleles for each gene
[ "Creates", ".", "fasta", "files", "containing", "all", "alleles", "for", "each", "gene" ]
88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/coreGenome/annotate.py#L258-L304
train
lowandrew/OLCTools
coreGenome/annotate.py
Annotate.profiler
def profiler(self): """ Calculates the core profile for each strain """ printtime('Calculating core profiles', self.start) # Only create the profile if it doesn't exist already # if not os.path.isfile('{}/profile.txt'.format(self.profilelocation)): for strain in self.corealleles: # Add the gene name and allele number pair for each core gene in each strain self.coreset.add(tuple(sorted(self.corealleles[strain].items()))) # Set the header to be similar to an MLST profile - ST,gene1,gene2,etc header = 'ST,{}\n'.format(','.join(sorted(self.geneset))) data = '' for count, core in sorted(enumerate(self.coreset)): # Increment count now to account for 0-based numbering count += 1 # Add the sequence type number to the profile data += '{}'.format(count) # Store the sequence type for each strain for strain in self.corealleles: if tuple(sorted(self.corealleles[strain].items())) == core: self.profiles[strain] = count # Add the allele number for each gene for gene in sorted(core): data += ',{}'.format(gene[1]) data += '\n' # Write the profile with open(os.path.join(self.profilelocation, 'profile.txt'), 'w') as profile: profile.write(header) profile.write(data) # Create a list of which strains correspond to the sequence types self.linker()
python
def profiler(self): """ Calculates the core profile for each strain """ printtime('Calculating core profiles', self.start) # Only create the profile if it doesn't exist already # if not os.path.isfile('{}/profile.txt'.format(self.profilelocation)): for strain in self.corealleles: # Add the gene name and allele number pair for each core gene in each strain self.coreset.add(tuple(sorted(self.corealleles[strain].items()))) # Set the header to be similar to an MLST profile - ST,gene1,gene2,etc header = 'ST,{}\n'.format(','.join(sorted(self.geneset))) data = '' for count, core in sorted(enumerate(self.coreset)): # Increment count now to account for 0-based numbering count += 1 # Add the sequence type number to the profile data += '{}'.format(count) # Store the sequence type for each strain for strain in self.corealleles: if tuple(sorted(self.corealleles[strain].items())) == core: self.profiles[strain] = count # Add the allele number for each gene for gene in sorted(core): data += ',{}'.format(gene[1]) data += '\n' # Write the profile with open(os.path.join(self.profilelocation, 'profile.txt'), 'w') as profile: profile.write(header) profile.write(data) # Create a list of which strains correspond to the sequence types self.linker()
[ "def", "profiler", "(", "self", ")", ":", "printtime", "(", "'Calculating core profiles'", ",", "self", ".", "start", ")", "# Only create the profile if it doesn't exist already", "# if not os.path.isfile('{}/profile.txt'.format(self.profilelocation)):", "for", "strain", "in", "self", ".", "corealleles", ":", "# Add the gene name and allele number pair for each core gene in each strain", "self", ".", "coreset", ".", "add", "(", "tuple", "(", "sorted", "(", "self", ".", "corealleles", "[", "strain", "]", ".", "items", "(", ")", ")", ")", ")", "# Set the header to be similar to an MLST profile - ST,gene1,gene2,etc", "header", "=", "'ST,{}\\n'", ".", "format", "(", "','", ".", "join", "(", "sorted", "(", "self", ".", "geneset", ")", ")", ")", "data", "=", "''", "for", "count", ",", "core", "in", "sorted", "(", "enumerate", "(", "self", ".", "coreset", ")", ")", ":", "# Increment count now to account for 0-based numbering", "count", "+=", "1", "# Add the sequence type number to the profile", "data", "+=", "'{}'", ".", "format", "(", "count", ")", "# Store the sequence type for each strain", "for", "strain", "in", "self", ".", "corealleles", ":", "if", "tuple", "(", "sorted", "(", "self", ".", "corealleles", "[", "strain", "]", ".", "items", "(", ")", ")", ")", "==", "core", ":", "self", ".", "profiles", "[", "strain", "]", "=", "count", "# Add the allele number for each gene", "for", "gene", "in", "sorted", "(", "core", ")", ":", "data", "+=", "',{}'", ".", "format", "(", "gene", "[", "1", "]", ")", "data", "+=", "'\\n'", "# Write the profile", "with", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "profilelocation", ",", "'profile.txt'", ")", ",", "'w'", ")", "as", "profile", ":", "profile", ".", "write", "(", "header", ")", "profile", ".", "write", "(", "data", ")", "# Create a list of which strains correspond to the sequence types", "self", ".", "linker", "(", ")" ]
Calculates the core profile for each strain
[ "Calculates", "the", "core", "profile", "for", "each", "strain" ]
88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/coreGenome/annotate.py#L328-L359
train
lowandrew/OLCTools
coreGenome/annotate.py
Annotate.linker
def linker(self): """ Link the sequence types to the strains. Create a .csv file of the linkages """ strainprofile = os.path.join(self.profilelocation, 'strainprofiles.txt') if not os.path.isfile(strainprofile): header = 'Strain,SequenceType\n' data = '' # Sort the profiles based on sequence type sortedprofiles = sorted(self.profiles.items(), key=operator.itemgetter(1)) # Associate the sequence type with each strain for strain, seqtype in sortedprofiles: for sample in self.runmetadata.samples: if sample.name == strain: sample.general.coretype = seqtype data += '{},{}\n'.format(strain, seqtype) # Write the results to file with open(strainprofile, 'w') as profile: profile.write(header) profile.write(data)
python
def linker(self): """ Link the sequence types to the strains. Create a .csv file of the linkages """ strainprofile = os.path.join(self.profilelocation, 'strainprofiles.txt') if not os.path.isfile(strainprofile): header = 'Strain,SequenceType\n' data = '' # Sort the profiles based on sequence type sortedprofiles = sorted(self.profiles.items(), key=operator.itemgetter(1)) # Associate the sequence type with each strain for strain, seqtype in sortedprofiles: for sample in self.runmetadata.samples: if sample.name == strain: sample.general.coretype = seqtype data += '{},{}\n'.format(strain, seqtype) # Write the results to file with open(strainprofile, 'w') as profile: profile.write(header) profile.write(data)
[ "def", "linker", "(", "self", ")", ":", "strainprofile", "=", "os", ".", "path", ".", "join", "(", "self", ".", "profilelocation", ",", "'strainprofiles.txt'", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "strainprofile", ")", ":", "header", "=", "'Strain,SequenceType\\n'", "data", "=", "''", "# Sort the profiles based on sequence type", "sortedprofiles", "=", "sorted", "(", "self", ".", "profiles", ".", "items", "(", ")", ",", "key", "=", "operator", ".", "itemgetter", "(", "1", ")", ")", "# Associate the sequence type with each strain", "for", "strain", ",", "seqtype", "in", "sortedprofiles", ":", "for", "sample", "in", "self", ".", "runmetadata", ".", "samples", ":", "if", "sample", ".", "name", "==", "strain", ":", "sample", ".", "general", ".", "coretype", "=", "seqtype", "data", "+=", "'{},{}\\n'", ".", "format", "(", "strain", ",", "seqtype", ")", "# Write the results to file", "with", "open", "(", "strainprofile", ",", "'w'", ")", "as", "profile", ":", "profile", ".", "write", "(", "header", ")", "profile", ".", "write", "(", "data", ")" ]
Link the sequence types to the strains. Create a .csv file of the linkages
[ "Link", "the", "sequence", "types", "to", "the", "strains", ".", "Create", "a", ".", "csv", "file", "of", "the", "linkages" ]
88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/coreGenome/annotate.py#L361-L381
train
AllTheWayDown/turgles
turgles/buffer.py
ChunkBuffer.get
def get(self, index): """Get a chunk by index""" assert index <= self.count assert index < self.size offset = index * self.chunk_size return self.data[offset:offset + self.chunk_size]
python
def get(self, index): """Get a chunk by index""" assert index <= self.count assert index < self.size offset = index * self.chunk_size return self.data[offset:offset + self.chunk_size]
[ "def", "get", "(", "self", ",", "index", ")", ":", "assert", "index", "<=", "self", ".", "count", "assert", "index", "<", "self", ".", "size", "offset", "=", "index", "*", "self", ".", "chunk_size", "return", "self", ".", "data", "[", "offset", ":", "offset", "+", "self", ".", "chunk_size", "]" ]
Get a chunk by index
[ "Get", "a", "chunk", "by", "index" ]
1bb17abe9b3aa0953d9a8e9b05a23369c5bf8852
https://github.com/AllTheWayDown/turgles/blob/1bb17abe9b3aa0953d9a8e9b05a23369c5bf8852/turgles/buffer.py#L59-L64
train
AllTheWayDown/turgles
turgles/buffer.py
ChunkBuffer.new
def new(self, init=None): """Return the last currently unused chunk, resizing if needed. If init is passed, chunk will be initialised to that data""" if self.count >= self.size: self.resize(self.count * 2) chunk = self.get(self.count) if init is not None: assert len(init) == self.chunk_size chunk[0:self.chunk_size] = init self.count += 1 return chunk
python
def new(self, init=None): """Return the last currently unused chunk, resizing if needed. If init is passed, chunk will be initialised to that data""" if self.count >= self.size: self.resize(self.count * 2) chunk = self.get(self.count) if init is not None: assert len(init) == self.chunk_size chunk[0:self.chunk_size] = init self.count += 1 return chunk
[ "def", "new", "(", "self", ",", "init", "=", "None", ")", ":", "if", "self", ".", "count", ">=", "self", ".", "size", ":", "self", ".", "resize", "(", "self", ".", "count", "*", "2", ")", "chunk", "=", "self", ".", "get", "(", "self", ".", "count", ")", "if", "init", "is", "not", "None", ":", "assert", "len", "(", "init", ")", "==", "self", ".", "chunk_size", "chunk", "[", "0", ":", "self", ".", "chunk_size", "]", "=", "init", "self", ".", "count", "+=", "1", "return", "chunk" ]
Return the last currently unused chunk, resizing if needed. If init is passed, chunk will be initialised to that data
[ "Return", "the", "last", "currently", "unused", "chunk", "resizing", "if", "needed", "." ]
1bb17abe9b3aa0953d9a8e9b05a23369c5bf8852
https://github.com/AllTheWayDown/turgles/blob/1bb17abe9b3aa0953d9a8e9b05a23369c5bf8852/turgles/buffer.py#L66-L77
train
AllTheWayDown/turgles
turgles/buffer.py
ChunkBuffer.resize
def resize(self, new_size): """Create a new larger array, and copy data over""" assert new_size > self.size new_data = self._allocate(new_size) # copy new_data[0:self.size * self.chunk_size] = self.data self.size = new_size self.data = new_data
python
def resize(self, new_size): """Create a new larger array, and copy data over""" assert new_size > self.size new_data = self._allocate(new_size) # copy new_data[0:self.size * self.chunk_size] = self.data self.size = new_size self.data = new_data
[ "def", "resize", "(", "self", ",", "new_size", ")", ":", "assert", "new_size", ">", "self", ".", "size", "new_data", "=", "self", ".", "_allocate", "(", "new_size", ")", "# copy", "new_data", "[", "0", ":", "self", ".", "size", "*", "self", ".", "chunk_size", "]", "=", "self", ".", "data", "self", ".", "size", "=", "new_size", "self", ".", "data", "=", "new_data" ]
Create a new larger array, and copy data over
[ "Create", "a", "new", "larger", "array", "and", "copy", "data", "over" ]
1bb17abe9b3aa0953d9a8e9b05a23369c5bf8852
https://github.com/AllTheWayDown/turgles/blob/1bb17abe9b3aa0953d9a8e9b05a23369c5bf8852/turgles/buffer.py#L79-L86
train
AllTheWayDown/turgles
turgles/buffer.py
ChunkBuffer.remove
def remove(self, index): """Remove chunk at index. Doesn't actually delete data, copies last chunk's data over data to be removed, and decreases the count""" assert index < self.count last_index = self.count - 1 data = self.get(index) if index == last_index: # easy case - nothing to do except zero last chunk last_data = data moved = None else: last_data = self.get(last_index) # copy the last chunk's data over the data to be deleted data[0:self.chunk_size] = last_data moved = last_index # zero last chunk's data last_data[0:self.chunk_size] = [0] * self.chunk_size self.count -= 1 # provide which index has now moved return moved
python
def remove(self, index): """Remove chunk at index. Doesn't actually delete data, copies last chunk's data over data to be removed, and decreases the count""" assert index < self.count last_index = self.count - 1 data = self.get(index) if index == last_index: # easy case - nothing to do except zero last chunk last_data = data moved = None else: last_data = self.get(last_index) # copy the last chunk's data over the data to be deleted data[0:self.chunk_size] = last_data moved = last_index # zero last chunk's data last_data[0:self.chunk_size] = [0] * self.chunk_size self.count -= 1 # provide which index has now moved return moved
[ "def", "remove", "(", "self", ",", "index", ")", ":", "assert", "index", "<", "self", ".", "count", "last_index", "=", "self", ".", "count", "-", "1", "data", "=", "self", ".", "get", "(", "index", ")", "if", "index", "==", "last_index", ":", "# easy case - nothing to do except zero last chunk", "last_data", "=", "data", "moved", "=", "None", "else", ":", "last_data", "=", "self", ".", "get", "(", "last_index", ")", "# copy the last chunk's data over the data to be deleted", "data", "[", "0", ":", "self", ".", "chunk_size", "]", "=", "last_data", "moved", "=", "last_index", "# zero last chunk's data", "last_data", "[", "0", ":", "self", ".", "chunk_size", "]", "=", "[", "0", "]", "*", "self", ".", "chunk_size", "self", ".", "count", "-=", "1", "# provide which index has now moved", "return", "moved" ]
Remove chunk at index. Doesn't actually delete data, copies last chunk's data over data to be removed, and decreases the count
[ "Remove", "chunk", "at", "index", "." ]
1bb17abe9b3aa0953d9a8e9b05a23369c5bf8852
https://github.com/AllTheWayDown/turgles/blob/1bb17abe9b3aa0953d9a8e9b05a23369c5bf8852/turgles/buffer.py#L88-L112
train
AllTheWayDown/turgles
turgles/buffer.py
BufferManager.create_turtle
def create_turtle(self, id, shape, model_init, color_init): """Create a slice of memory for turtle data storage""" assert id not in self.id_to_shape data = self._create_turtle(id, shape, model_init, color_init) self.id_to_shape[id] = shape return data
python
def create_turtle(self, id, shape, model_init, color_init): """Create a slice of memory for turtle data storage""" assert id not in self.id_to_shape data = self._create_turtle(id, shape, model_init, color_init) self.id_to_shape[id] = shape return data
[ "def", "create_turtle", "(", "self", ",", "id", ",", "shape", ",", "model_init", ",", "color_init", ")", ":", "assert", "id", "not", "in", "self", ".", "id_to_shape", "data", "=", "self", ".", "_create_turtle", "(", "id", ",", "shape", ",", "model_init", ",", "color_init", ")", "self", ".", "id_to_shape", "[", "id", "]", "=", "shape", "return", "data" ]
Create a slice of memory for turtle data storage
[ "Create", "a", "slice", "of", "memory", "for", "turtle", "data", "storage" ]
1bb17abe9b3aa0953d9a8e9b05a23369c5bf8852
https://github.com/AllTheWayDown/turgles/blob/1bb17abe9b3aa0953d9a8e9b05a23369c5bf8852/turgles/buffer.py#L210-L215
train
AllTheWayDown/turgles
turgles/buffer.py
BufferManager.set_shape
def set_shape(self, id, new_shape): """Copies the turtle data from the old shape buffer to the new""" old_shape = self.id_to_shape[id] old_buffer = self.get_buffer(old_shape) model, color = old_buffer.get(id) new_data = self._create_turtle(id, new_shape, model, color) old_buffer.remove(id) self.id_to_shape[id] = new_shape return new_data
python
def set_shape(self, id, new_shape): """Copies the turtle data from the old shape buffer to the new""" old_shape = self.id_to_shape[id] old_buffer = self.get_buffer(old_shape) model, color = old_buffer.get(id) new_data = self._create_turtle(id, new_shape, model, color) old_buffer.remove(id) self.id_to_shape[id] = new_shape return new_data
[ "def", "set_shape", "(", "self", ",", "id", ",", "new_shape", ")", ":", "old_shape", "=", "self", ".", "id_to_shape", "[", "id", "]", "old_buffer", "=", "self", ".", "get_buffer", "(", "old_shape", ")", "model", ",", "color", "=", "old_buffer", ".", "get", "(", "id", ")", "new_data", "=", "self", ".", "_create_turtle", "(", "id", ",", "new_shape", ",", "model", ",", "color", ")", "old_buffer", ".", "remove", "(", "id", ")", "self", ".", "id_to_shape", "[", "id", "]", "=", "new_shape", "return", "new_data" ]
Copies the turtle data from the old shape buffer to the new
[ "Copies", "the", "turtle", "data", "from", "the", "old", "shape", "buffer", "to", "the", "new" ]
1bb17abe9b3aa0953d9a8e9b05a23369c5bf8852
https://github.com/AllTheWayDown/turgles/blob/1bb17abe9b3aa0953d9a8e9b05a23369c5bf8852/turgles/buffer.py#L222-L230
train
hughsie/python-appstream
appstream/component.py
Release.get_checksum_by_target
def get_checksum_by_target(self, target): """ returns a checksum of a specific kind """ for csum in self.checksums: if csum.target == target: return csum return None
python
def get_checksum_by_target(self, target): """ returns a checksum of a specific kind """ for csum in self.checksums: if csum.target == target: return csum return None
[ "def", "get_checksum_by_target", "(", "self", ",", "target", ")", ":", "for", "csum", "in", "self", ".", "checksums", ":", "if", "csum", ".", "target", "==", "target", ":", "return", "csum", "return", "None" ]
returns a checksum of a specific kind
[ "returns", "a", "checksum", "of", "a", "specific", "kind" ]
f2606380278c5728ee7f8e7d19914c54fca05e76
https://github.com/hughsie/python-appstream/blob/f2606380278c5728ee7f8e7d19914c54fca05e76/appstream/component.py#L155-L160
train
hughsie/python-appstream
appstream/component.py
Release.add_checksum
def add_checksum(self, csum): """ Add a checksum to a release object """ for csum_tmp in self.checksums: if csum_tmp.target == csum.target: self.checksums.remove(csum_tmp) break self.checksums.append(csum)
python
def add_checksum(self, csum): """ Add a checksum to a release object """ for csum_tmp in self.checksums: if csum_tmp.target == csum.target: self.checksums.remove(csum_tmp) break self.checksums.append(csum)
[ "def", "add_checksum", "(", "self", ",", "csum", ")", ":", "for", "csum_tmp", "in", "self", ".", "checksums", ":", "if", "csum_tmp", ".", "target", "==", "csum", ".", "target", ":", "self", ".", "checksums", ".", "remove", "(", "csum_tmp", ")", "break", "self", ".", "checksums", ".", "append", "(", "csum", ")" ]
Add a checksum to a release object
[ "Add", "a", "checksum", "to", "a", "release", "object" ]
f2606380278c5728ee7f8e7d19914c54fca05e76
https://github.com/hughsie/python-appstream/blob/f2606380278c5728ee7f8e7d19914c54fca05e76/appstream/component.py#L162-L168
train
hughsie/python-appstream
appstream/component.py
Screenshot.get_image_by_kind
def get_image_by_kind(self, kind): """ returns a image of a specific kind """ for ss in self.images: if ss.kind == kind: return ss return None
python
def get_image_by_kind(self, kind): """ returns a image of a specific kind """ for ss in self.images: if ss.kind == kind: return ss return None
[ "def", "get_image_by_kind", "(", "self", ",", "kind", ")", ":", "for", "ss", "in", "self", ".", "images", ":", "if", "ss", ".", "kind", "==", "kind", ":", "return", "ss", "return", "None" ]
returns a image of a specific kind
[ "returns", "a", "image", "of", "a", "specific", "kind" ]
f2606380278c5728ee7f8e7d19914c54fca05e76
https://github.com/hughsie/python-appstream/blob/f2606380278c5728ee7f8e7d19914c54fca05e76/appstream/component.py#L260-L265
train
hughsie/python-appstream
appstream/component.py
Screenshot.add_image
def add_image(self, im): """ Add a image to a screenshot object """ for im_tmp in self.images: if im_tmp.kind == im.kind: self.images.remove(im_tmp) break self.images.append(im)
python
def add_image(self, im): """ Add a image to a screenshot object """ for im_tmp in self.images: if im_tmp.kind == im.kind: self.images.remove(im_tmp) break self.images.append(im)
[ "def", "add_image", "(", "self", ",", "im", ")", ":", "for", "im_tmp", "in", "self", ".", "images", ":", "if", "im_tmp", ".", "kind", "==", "im", ".", "kind", ":", "self", ".", "images", ".", "remove", "(", "im_tmp", ")", "break", "self", ".", "images", ".", "append", "(", "im", ")" ]
Add a image to a screenshot object
[ "Add", "a", "image", "to", "a", "screenshot", "object" ]
f2606380278c5728ee7f8e7d19914c54fca05e76
https://github.com/hughsie/python-appstream/blob/f2606380278c5728ee7f8e7d19914c54fca05e76/appstream/component.py#L267-L273
train
hughsie/python-appstream
appstream/component.py
Component.add_screenshot
def add_screenshot(self, screenshot): """ Add a screenshot object if it does not already exist """ if screenshot in self.screenshots: return self.screenshots.append(screenshot)
python
def add_screenshot(self, screenshot): """ Add a screenshot object if it does not already exist """ if screenshot in self.screenshots: return self.screenshots.append(screenshot)
[ "def", "add_screenshot", "(", "self", ",", "screenshot", ")", ":", "if", "screenshot", "in", "self", ".", "screenshots", ":", "return", "self", ".", "screenshots", ".", "append", "(", "screenshot", ")" ]
Add a screenshot object if it does not already exist
[ "Add", "a", "screenshot", "object", "if", "it", "does", "not", "already", "exist" ]
f2606380278c5728ee7f8e7d19914c54fca05e76
https://github.com/hughsie/python-appstream/blob/f2606380278c5728ee7f8e7d19914c54fca05e76/appstream/component.py#L446-L450
train
hughsie/python-appstream
appstream/component.py
Component.add_provide
def add_provide(self, provide): """ Add a provide object if it does not already exist """ for p in self.provides: if p.value == provide.value: return self.provides.append(provide)
python
def add_provide(self, provide): """ Add a provide object if it does not already exist """ for p in self.provides: if p.value == provide.value: return self.provides.append(provide)
[ "def", "add_provide", "(", "self", ",", "provide", ")", ":", "for", "p", "in", "self", ".", "provides", ":", "if", "p", ".", "value", "==", "provide", ".", "value", ":", "return", "self", ".", "provides", ".", "append", "(", "provide", ")" ]
Add a provide object if it does not already exist
[ "Add", "a", "provide", "object", "if", "it", "does", "not", "already", "exist" ]
f2606380278c5728ee7f8e7d19914c54fca05e76
https://github.com/hughsie/python-appstream/blob/f2606380278c5728ee7f8e7d19914c54fca05e76/appstream/component.py#L452-L457
train
hughsie/python-appstream
appstream/component.py
Component.get_provides_by_kind
def get_provides_by_kind(self, kind): """ Returns an array of provides of a certain kind """ provs = [] for p in self.provides: if p.kind == kind: provs.append(p) return provs
python
def get_provides_by_kind(self, kind): """ Returns an array of provides of a certain kind """ provs = [] for p in self.provides: if p.kind == kind: provs.append(p) return provs
[ "def", "get_provides_by_kind", "(", "self", ",", "kind", ")", ":", "provs", "=", "[", "]", "for", "p", "in", "self", ".", "provides", ":", "if", "p", ".", "kind", "==", "kind", ":", "provs", ".", "append", "(", "p", ")", "return", "provs" ]
Returns an array of provides of a certain kind
[ "Returns", "an", "array", "of", "provides", "of", "a", "certain", "kind" ]
f2606380278c5728ee7f8e7d19914c54fca05e76
https://github.com/hughsie/python-appstream/blob/f2606380278c5728ee7f8e7d19914c54fca05e76/appstream/component.py#L459-L465
train
hughsie/python-appstream
appstream/component.py
Component.add_require
def add_require(self, require): """ Add a require object if it does not already exist """ for p in self.requires: if p.value == require.value: return self.requires.append(require)
python
def add_require(self, require): """ Add a require object if it does not already exist """ for p in self.requires: if p.value == require.value: return self.requires.append(require)
[ "def", "add_require", "(", "self", ",", "require", ")", ":", "for", "p", "in", "self", ".", "requires", ":", "if", "p", ".", "value", "==", "require", ".", "value", ":", "return", "self", ".", "requires", ".", "append", "(", "require", ")" ]
Add a require object if it does not already exist
[ "Add", "a", "require", "object", "if", "it", "does", "not", "already", "exist" ]
f2606380278c5728ee7f8e7d19914c54fca05e76
https://github.com/hughsie/python-appstream/blob/f2606380278c5728ee7f8e7d19914c54fca05e76/appstream/component.py#L467-L472
train
hughsie/python-appstream
appstream/component.py
Component.get_require_by_kind
def get_require_by_kind(self, kind, value): """ Returns a requires object of a specific value """ for r in self.requires: if r.kind == kind and r.value == value: return r return None
python
def get_require_by_kind(self, kind, value): """ Returns a requires object of a specific value """ for r in self.requires: if r.kind == kind and r.value == value: return r return None
[ "def", "get_require_by_kind", "(", "self", ",", "kind", ",", "value", ")", ":", "for", "r", "in", "self", ".", "requires", ":", "if", "r", ".", "kind", "==", "kind", "and", "r", ".", "value", "==", "value", ":", "return", "r", "return", "None" ]
Returns a requires object of a specific value
[ "Returns", "a", "requires", "object", "of", "a", "specific", "value" ]
f2606380278c5728ee7f8e7d19914c54fca05e76
https://github.com/hughsie/python-appstream/blob/f2606380278c5728ee7f8e7d19914c54fca05e76/appstream/component.py#L474-L479
train
hughsie/python-appstream
appstream/store.py
Store.to_file
def to_file(self, filename): """ Save the store to disk """ # save compressed file xml = self.to_xml() f = gzip.open(filename, 'wb') try: f.write(xml.encode('utf-8')) finally: f.close()
python
def to_file(self, filename): """ Save the store to disk """ # save compressed file xml = self.to_xml() f = gzip.open(filename, 'wb') try: f.write(xml.encode('utf-8')) finally: f.close()
[ "def", "to_file", "(", "self", ",", "filename", ")", ":", "# save compressed file", "xml", "=", "self", ".", "to_xml", "(", ")", "f", "=", "gzip", ".", "open", "(", "filename", ",", "'wb'", ")", "try", ":", "f", ".", "write", "(", "xml", ".", "encode", "(", "'utf-8'", ")", ")", "finally", ":", "f", ".", "close", "(", ")" ]
Save the store to disk
[ "Save", "the", "store", "to", "disk" ]
f2606380278c5728ee7f8e7d19914c54fca05e76
https://github.com/hughsie/python-appstream/blob/f2606380278c5728ee7f8e7d19914c54fca05e76/appstream/store.py#L52-L61
train
hughsie/python-appstream
appstream/store.py
Store.from_file
def from_file(self, filename): """ Open the store from disk """ with gzip.open(filename, 'rb') as f: self.parse(f.read())
python
def from_file(self, filename): """ Open the store from disk """ with gzip.open(filename, 'rb') as f: self.parse(f.read())
[ "def", "from_file", "(", "self", ",", "filename", ")", ":", "with", "gzip", ".", "open", "(", "filename", ",", "'rb'", ")", "as", "f", ":", "self", ".", "parse", "(", "f", ".", "read", "(", ")", ")" ]
Open the store from disk
[ "Open", "the", "store", "from", "disk" ]
f2606380278c5728ee7f8e7d19914c54fca05e76
https://github.com/hughsie/python-appstream/blob/f2606380278c5728ee7f8e7d19914c54fca05e76/appstream/store.py#L63-L66
train
hughsie/python-appstream
appstream/store.py
Store.get_components
def get_components(self): """ Returns all the applications from the store """ components = [] for app_id in self.components: components.append(self.components[app_id]) return components
python
def get_components(self): """ Returns all the applications from the store """ components = [] for app_id in self.components: components.append(self.components[app_id]) return components
[ "def", "get_components", "(", "self", ")", ":", "components", "=", "[", "]", "for", "app_id", "in", "self", ".", "components", ":", "components", ".", "append", "(", "self", ".", "components", "[", "app_id", "]", ")", "return", "components" ]
Returns all the applications from the store
[ "Returns", "all", "the", "applications", "from", "the", "store" ]
f2606380278c5728ee7f8e7d19914c54fca05e76
https://github.com/hughsie/python-appstream/blob/f2606380278c5728ee7f8e7d19914c54fca05e76/appstream/store.py#L74-L79
train
hughsie/python-appstream
appstream/store.py
Store.add
def add(self, component): """ Add component to the store """ # if already exists, just add the release objects old = self.get_component(component.id) if old: old.releases.extend(component.releases) return self.components[component.id] = component
python
def add(self, component): """ Add component to the store """ # if already exists, just add the release objects old = self.get_component(component.id) if old: old.releases.extend(component.releases) return self.components[component.id] = component
[ "def", "add", "(", "self", ",", "component", ")", ":", "# if already exists, just add the release objects", "old", "=", "self", ".", "get_component", "(", "component", ".", "id", ")", "if", "old", ":", "old", ".", "releases", ".", "extend", "(", "component", ".", "releases", ")", "return", "self", ".", "components", "[", "component", ".", "id", "]", "=", "component" ]
Add component to the store
[ "Add", "component", "to", "the", "store" ]
f2606380278c5728ee7f8e7d19914c54fca05e76
https://github.com/hughsie/python-appstream/blob/f2606380278c5728ee7f8e7d19914c54fca05e76/appstream/store.py#L81-L89
train
yarbshk/sqlalchemy-dst
sqlalchemy_dst/sqlalchemy_dst.py
row2dict
def row2dict(row, depth=None, exclude=None, exclude_pk=None, exclude_underscore=None, only=None, fk_suffix=None): """ Recursively walk row attributes to serialize ones into a dict. :param row: instance of the declarative base class :param depth: number that represent the depth of related relationships :param exclude: set of attributes names to exclude :param exclude_pk: are foreign keys (e.g. fk_name_id) excluded :param exclude_underscore: are private and protected attributes excluded :param only: set of attributes names to include :param fk_suffix: str that represent a foreign key suffix :return: dict with attributes of current depth level """ if depth == 0: return None d, mapper = {}, get_mapper(row) if depth is None: depth = getattr(row, ATTR_DEPTH, DEFAULT_DEPTH) - 1 else: depth -= 1 if exclude is None: exclude = getattr(row, ATTR_EXCLUDE, DEFAULT_EXCLUDE) if exclude_pk is None: exclude_pk = getattr(row, ATTR_EXCLUDE_PK, DEFAULT_EXCLUDE_PK) if exclude_underscore is None: exclude_underscore = getattr(row, ATTR_EXCLUDE_UNDERSCORE, DEFAULT_EXCLUDE_UNDERSCORE) if only is None: only = getattr(row, ATTR_ONLY, DEFAULT_ONLY) if fk_suffix is None: fk_suffix = getattr(row, ATTR_FK_SUFFIX, DEFAULT_FK_SUFFIX) for c in mapper.columns.keys() + mapper.synonyms.keys(): if c in exclude or \ check_exclude_pk(c, exclude_pk, fk_suffix=fk_suffix) or \ check_exclude_underscore(c, exclude_underscore) or \ check_only(c, only): continue d[c] = getattr(row, c) for r in mapper.relationships.keys(): if r in exclude or check_only(r, only): continue attr = getattr(row, r) backref = get_backref(mapper.relationships[r]) if backref: exclude.add(backref) kwargs = dict(depth=depth, exclude=exclude, exclude_pk=exclude_pk, exclude_underscore=exclude_underscore, only=only, fk_suffix=fk_suffix) if isinstance(attr, collections.InstrumentedList): d[r] = [row2dict(i, **kwargs) for i in attr if depth] else: d[r] = row2dict(attr, **kwargs) return d
python
def row2dict(row, depth=None, exclude=None, exclude_pk=None, exclude_underscore=None, only=None, fk_suffix=None): """ Recursively walk row attributes to serialize ones into a dict. :param row: instance of the declarative base class :param depth: number that represent the depth of related relationships :param exclude: set of attributes names to exclude :param exclude_pk: are foreign keys (e.g. fk_name_id) excluded :param exclude_underscore: are private and protected attributes excluded :param only: set of attributes names to include :param fk_suffix: str that represent a foreign key suffix :return: dict with attributes of current depth level """ if depth == 0: return None d, mapper = {}, get_mapper(row) if depth is None: depth = getattr(row, ATTR_DEPTH, DEFAULT_DEPTH) - 1 else: depth -= 1 if exclude is None: exclude = getattr(row, ATTR_EXCLUDE, DEFAULT_EXCLUDE) if exclude_pk is None: exclude_pk = getattr(row, ATTR_EXCLUDE_PK, DEFAULT_EXCLUDE_PK) if exclude_underscore is None: exclude_underscore = getattr(row, ATTR_EXCLUDE_UNDERSCORE, DEFAULT_EXCLUDE_UNDERSCORE) if only is None: only = getattr(row, ATTR_ONLY, DEFAULT_ONLY) if fk_suffix is None: fk_suffix = getattr(row, ATTR_FK_SUFFIX, DEFAULT_FK_SUFFIX) for c in mapper.columns.keys() + mapper.synonyms.keys(): if c in exclude or \ check_exclude_pk(c, exclude_pk, fk_suffix=fk_suffix) or \ check_exclude_underscore(c, exclude_underscore) or \ check_only(c, only): continue d[c] = getattr(row, c) for r in mapper.relationships.keys(): if r in exclude or check_only(r, only): continue attr = getattr(row, r) backref = get_backref(mapper.relationships[r]) if backref: exclude.add(backref) kwargs = dict(depth=depth, exclude=exclude, exclude_pk=exclude_pk, exclude_underscore=exclude_underscore, only=only, fk_suffix=fk_suffix) if isinstance(attr, collections.InstrumentedList): d[r] = [row2dict(i, **kwargs) for i in attr if depth] else: d[r] = row2dict(attr, **kwargs) return d
[ "def", "row2dict", "(", "row", ",", "depth", "=", "None", ",", "exclude", "=", "None", ",", "exclude_pk", "=", "None", ",", "exclude_underscore", "=", "None", ",", "only", "=", "None", ",", "fk_suffix", "=", "None", ")", ":", "if", "depth", "==", "0", ":", "return", "None", "d", ",", "mapper", "=", "{", "}", ",", "get_mapper", "(", "row", ")", "if", "depth", "is", "None", ":", "depth", "=", "getattr", "(", "row", ",", "ATTR_DEPTH", ",", "DEFAULT_DEPTH", ")", "-", "1", "else", ":", "depth", "-=", "1", "if", "exclude", "is", "None", ":", "exclude", "=", "getattr", "(", "row", ",", "ATTR_EXCLUDE", ",", "DEFAULT_EXCLUDE", ")", "if", "exclude_pk", "is", "None", ":", "exclude_pk", "=", "getattr", "(", "row", ",", "ATTR_EXCLUDE_PK", ",", "DEFAULT_EXCLUDE_PK", ")", "if", "exclude_underscore", "is", "None", ":", "exclude_underscore", "=", "getattr", "(", "row", ",", "ATTR_EXCLUDE_UNDERSCORE", ",", "DEFAULT_EXCLUDE_UNDERSCORE", ")", "if", "only", "is", "None", ":", "only", "=", "getattr", "(", "row", ",", "ATTR_ONLY", ",", "DEFAULT_ONLY", ")", "if", "fk_suffix", "is", "None", ":", "fk_suffix", "=", "getattr", "(", "row", ",", "ATTR_FK_SUFFIX", ",", "DEFAULT_FK_SUFFIX", ")", "for", "c", "in", "mapper", ".", "columns", ".", "keys", "(", ")", "+", "mapper", ".", "synonyms", ".", "keys", "(", ")", ":", "if", "c", "in", "exclude", "or", "check_exclude_pk", "(", "c", ",", "exclude_pk", ",", "fk_suffix", "=", "fk_suffix", ")", "or", "check_exclude_underscore", "(", "c", ",", "exclude_underscore", ")", "or", "check_only", "(", "c", ",", "only", ")", ":", "continue", "d", "[", "c", "]", "=", "getattr", "(", "row", ",", "c", ")", "for", "r", "in", "mapper", ".", "relationships", ".", "keys", "(", ")", ":", "if", "r", "in", "exclude", "or", "check_only", "(", "r", ",", "only", ")", ":", "continue", "attr", "=", "getattr", "(", "row", ",", "r", ")", "backref", "=", "get_backref", "(", "mapper", ".", "relationships", "[", "r", "]", ")", "if", "backref", ":", "exclude", ".", "add", "(", "backref", ")", "kwargs", "=", "dict", "(", "depth", "=", "depth", ",", "exclude", "=", "exclude", ",", "exclude_pk", "=", "exclude_pk", ",", "exclude_underscore", "=", "exclude_underscore", ",", "only", "=", "only", ",", "fk_suffix", "=", "fk_suffix", ")", "if", "isinstance", "(", "attr", ",", "collections", ".", "InstrumentedList", ")", ":", "d", "[", "r", "]", "=", "[", "row2dict", "(", "i", ",", "*", "*", "kwargs", ")", "for", "i", "in", "attr", "if", "depth", "]", "else", ":", "d", "[", "r", "]", "=", "row2dict", "(", "attr", ",", "*", "*", "kwargs", ")", "return", "d" ]
Recursively walk row attributes to serialize ones into a dict. :param row: instance of the declarative base class :param depth: number that represent the depth of related relationships :param exclude: set of attributes names to exclude :param exclude_pk: are foreign keys (e.g. fk_name_id) excluded :param exclude_underscore: are private and protected attributes excluded :param only: set of attributes names to include :param fk_suffix: str that represent a foreign key suffix :return: dict with attributes of current depth level
[ "Recursively", "walk", "row", "attributes", "to", "serialize", "ones", "into", "a", "dict", "." ]
8d1ff1a940180c8a7f78a5db1263afd6eb53b7b9
https://github.com/yarbshk/sqlalchemy-dst/blob/8d1ff1a940180c8a7f78a5db1263afd6eb53b7b9/sqlalchemy_dst/sqlalchemy_dst.py#L53-L108
train
yarbshk/sqlalchemy-dst
sqlalchemy_dst/sqlalchemy_dst.py
dict2row
def dict2row(d, model, rel=None, exclude=None, exclude_pk=None, exclude_underscore=None, only=None, fk_suffix=None): """ Recursively walk dict attributes to serialize ones into a row. :param d: dict that represent a serialized row :param model: class nested from the declarative base class :param rel: dict of key (relationship name) -value (class) pairs :param exclude: set of attributes names to exclude :param exclude_pk: are foreign keys (e.g. fk_name_id) excluded :param exclude_underscore: are private and protected attributes excluded :param only: set of attributes names to include :param fk_suffix: str that represent a foreign key suffix :return: instance of the declarative base class """ if not isinstance(d, dict): raise TypeError('Source must be instance of dict, got %s instead' % type(d).__name__) row = model() mapper = get_mapper(row) if rel is None: rel = getattr(row, ATTR_REL, DEFAULT_REL) if exclude is None: exclude = getattr(row, ATTR_EXCLUDE, DEFAULT_EXCLUDE) if exclude_pk is None: exclude_pk = getattr(row, ATTR_EXCLUDE_PK, DEFAULT_EXCLUDE_PK) if exclude_underscore is None: exclude_underscore = getattr(row, ATTR_EXCLUDE_UNDERSCORE, DEFAULT_EXCLUDE_UNDERSCORE) if only is None: only = getattr(row, ATTR_ONLY, DEFAULT_ONLY) if fk_suffix is None: fk_suffix = getattr(row, ATTR_FK_SUFFIX, DEFAULT_FK_SUFFIX) for c in mapper.columns.keys() + mapper.synonyms.keys(): if c not in d or c in exclude or \ check_exclude_pk(c, exclude_pk, fk_suffix=fk_suffix) or \ check_exclude_underscore(c, exclude_underscore) or \ check_only(c, only): continue setattr(row, c, d[c]) for r in mapper.relationships.keys(): if r not in d or r not in rel or check_only(r, only): continue kwargs = dict(rel=rel, exclude=exclude, exclude_pk=exclude_pk, exclude_underscore=exclude_underscore, only=only, fk_suffix=fk_suffix) if isinstance(d[r], list): setattr(row, r, collections.InstrumentedList()) for i in d[r]: getattr(row, r).append(dict2row(i, rel[r], **kwargs)) else: if not exclude_pk: rpk = d[r].get('id') if isinstance(d[r], dict) else None setattr(row, r + fk_suffix, rpk) setattr(row, r, dict2row(d[r], rel[r], **kwargs)) return row
python
def dict2row(d, model, rel=None, exclude=None, exclude_pk=None, exclude_underscore=None, only=None, fk_suffix=None): """ Recursively walk dict attributes to serialize ones into a row. :param d: dict that represent a serialized row :param model: class nested from the declarative base class :param rel: dict of key (relationship name) -value (class) pairs :param exclude: set of attributes names to exclude :param exclude_pk: are foreign keys (e.g. fk_name_id) excluded :param exclude_underscore: are private and protected attributes excluded :param only: set of attributes names to include :param fk_suffix: str that represent a foreign key suffix :return: instance of the declarative base class """ if not isinstance(d, dict): raise TypeError('Source must be instance of dict, got %s instead' % type(d).__name__) row = model() mapper = get_mapper(row) if rel is None: rel = getattr(row, ATTR_REL, DEFAULT_REL) if exclude is None: exclude = getattr(row, ATTR_EXCLUDE, DEFAULT_EXCLUDE) if exclude_pk is None: exclude_pk = getattr(row, ATTR_EXCLUDE_PK, DEFAULT_EXCLUDE_PK) if exclude_underscore is None: exclude_underscore = getattr(row, ATTR_EXCLUDE_UNDERSCORE, DEFAULT_EXCLUDE_UNDERSCORE) if only is None: only = getattr(row, ATTR_ONLY, DEFAULT_ONLY) if fk_suffix is None: fk_suffix = getattr(row, ATTR_FK_SUFFIX, DEFAULT_FK_SUFFIX) for c in mapper.columns.keys() + mapper.synonyms.keys(): if c not in d or c in exclude or \ check_exclude_pk(c, exclude_pk, fk_suffix=fk_suffix) or \ check_exclude_underscore(c, exclude_underscore) or \ check_only(c, only): continue setattr(row, c, d[c]) for r in mapper.relationships.keys(): if r not in d or r not in rel or check_only(r, only): continue kwargs = dict(rel=rel, exclude=exclude, exclude_pk=exclude_pk, exclude_underscore=exclude_underscore, only=only, fk_suffix=fk_suffix) if isinstance(d[r], list): setattr(row, r, collections.InstrumentedList()) for i in d[r]: getattr(row, r).append(dict2row(i, rel[r], **kwargs)) else: if not exclude_pk: rpk = d[r].get('id') if isinstance(d[r], dict) else None setattr(row, r + fk_suffix, rpk) setattr(row, r, dict2row(d[r], rel[r], **kwargs)) return row
[ "def", "dict2row", "(", "d", ",", "model", ",", "rel", "=", "None", ",", "exclude", "=", "None", ",", "exclude_pk", "=", "None", ",", "exclude_underscore", "=", "None", ",", "only", "=", "None", ",", "fk_suffix", "=", "None", ")", ":", "if", "not", "isinstance", "(", "d", ",", "dict", ")", ":", "raise", "TypeError", "(", "'Source must be instance of dict, got %s instead'", "%", "type", "(", "d", ")", ".", "__name__", ")", "row", "=", "model", "(", ")", "mapper", "=", "get_mapper", "(", "row", ")", "if", "rel", "is", "None", ":", "rel", "=", "getattr", "(", "row", ",", "ATTR_REL", ",", "DEFAULT_REL", ")", "if", "exclude", "is", "None", ":", "exclude", "=", "getattr", "(", "row", ",", "ATTR_EXCLUDE", ",", "DEFAULT_EXCLUDE", ")", "if", "exclude_pk", "is", "None", ":", "exclude_pk", "=", "getattr", "(", "row", ",", "ATTR_EXCLUDE_PK", ",", "DEFAULT_EXCLUDE_PK", ")", "if", "exclude_underscore", "is", "None", ":", "exclude_underscore", "=", "getattr", "(", "row", ",", "ATTR_EXCLUDE_UNDERSCORE", ",", "DEFAULT_EXCLUDE_UNDERSCORE", ")", "if", "only", "is", "None", ":", "only", "=", "getattr", "(", "row", ",", "ATTR_ONLY", ",", "DEFAULT_ONLY", ")", "if", "fk_suffix", "is", "None", ":", "fk_suffix", "=", "getattr", "(", "row", ",", "ATTR_FK_SUFFIX", ",", "DEFAULT_FK_SUFFIX", ")", "for", "c", "in", "mapper", ".", "columns", ".", "keys", "(", ")", "+", "mapper", ".", "synonyms", ".", "keys", "(", ")", ":", "if", "c", "not", "in", "d", "or", "c", "in", "exclude", "or", "check_exclude_pk", "(", "c", ",", "exclude_pk", ",", "fk_suffix", "=", "fk_suffix", ")", "or", "check_exclude_underscore", "(", "c", ",", "exclude_underscore", ")", "or", "check_only", "(", "c", ",", "only", ")", ":", "continue", "setattr", "(", "row", ",", "c", ",", "d", "[", "c", "]", ")", "for", "r", "in", "mapper", ".", "relationships", ".", "keys", "(", ")", ":", "if", "r", "not", "in", "d", "or", "r", "not", "in", "rel", "or", "check_only", "(", "r", ",", "only", ")", ":", "continue", "kwargs", "=", "dict", "(", "rel", "=", "rel", ",", "exclude", "=", "exclude", ",", "exclude_pk", "=", "exclude_pk", ",", "exclude_underscore", "=", "exclude_underscore", ",", "only", "=", "only", ",", "fk_suffix", "=", "fk_suffix", ")", "if", "isinstance", "(", "d", "[", "r", "]", ",", "list", ")", ":", "setattr", "(", "row", ",", "r", ",", "collections", ".", "InstrumentedList", "(", ")", ")", "for", "i", "in", "d", "[", "r", "]", ":", "getattr", "(", "row", ",", "r", ")", ".", "append", "(", "dict2row", "(", "i", ",", "rel", "[", "r", "]", ",", "*", "*", "kwargs", ")", ")", "else", ":", "if", "not", "exclude_pk", ":", "rpk", "=", "d", "[", "r", "]", ".", "get", "(", "'id'", ")", "if", "isinstance", "(", "d", "[", "r", "]", ",", "dict", ")", "else", "None", "setattr", "(", "row", ",", "r", "+", "fk_suffix", ",", "rpk", ")", "setattr", "(", "row", ",", "r", ",", "dict2row", "(", "d", "[", "r", "]", ",", "rel", "[", "r", "]", ",", "*", "*", "kwargs", ")", ")", "return", "row" ]
Recursively walk dict attributes to serialize ones into a row. :param d: dict that represent a serialized row :param model: class nested from the declarative base class :param rel: dict of key (relationship name) -value (class) pairs :param exclude: set of attributes names to exclude :param exclude_pk: are foreign keys (e.g. fk_name_id) excluded :param exclude_underscore: are private and protected attributes excluded :param only: set of attributes names to include :param fk_suffix: str that represent a foreign key suffix :return: instance of the declarative base class
[ "Recursively", "walk", "dict", "attributes", "to", "serialize", "ones", "into", "a", "row", "." ]
8d1ff1a940180c8a7f78a5db1263afd6eb53b7b9
https://github.com/yarbshk/sqlalchemy-dst/blob/8d1ff1a940180c8a7f78a5db1263afd6eb53b7b9/sqlalchemy_dst/sqlalchemy_dst.py#L110-L167
train
wylee/runcommands
runcommands/commands.py
copy_file
def copy_file(source, destination, follow_symlinks=True, template: arg(type=bool_or(str), choices=('format', 'string')) = False, context=None): """Copy source file to destination. The destination may be a file path or a directory. When it's a directory, the source file will be copied into the directory using the file's base name. When the source file is a template, ``context`` will be used as the template context. The supported template types are 'format' and 'string'. The former uses ``str.format_map()`` and the latter uses ``string.Template()``. .. note:: :func:`shutil.copy()` from the standard library is used to do the copy operation. """ if not template: # Fast path for non-templates. return shutil.copy(source, destination, follow_symlinks=follow_symlinks) if os.path.isdir(destination): destination = os.path.join(destination, os.path.basename(source)) with open(source) as source: contents = source.read() if template is True or template == 'format': contents = contents.format_map(context) elif template == 'string': string_template = string.Template(contents) contents = string_template.substitute(context) else: raise ValueError('Unknown template type: %s' % template) with tempfile.NamedTemporaryFile('w', delete=False) as temp_file: temp_file.write(contents) path = shutil.copy(temp_file.name, destination) os.remove(temp_file.name) return path
python
def copy_file(source, destination, follow_symlinks=True, template: arg(type=bool_or(str), choices=('format', 'string')) = False, context=None): """Copy source file to destination. The destination may be a file path or a directory. When it's a directory, the source file will be copied into the directory using the file's base name. When the source file is a template, ``context`` will be used as the template context. The supported template types are 'format' and 'string'. The former uses ``str.format_map()`` and the latter uses ``string.Template()``. .. note:: :func:`shutil.copy()` from the standard library is used to do the copy operation. """ if not template: # Fast path for non-templates. return shutil.copy(source, destination, follow_symlinks=follow_symlinks) if os.path.isdir(destination): destination = os.path.join(destination, os.path.basename(source)) with open(source) as source: contents = source.read() if template is True or template == 'format': contents = contents.format_map(context) elif template == 'string': string_template = string.Template(contents) contents = string_template.substitute(context) else: raise ValueError('Unknown template type: %s' % template) with tempfile.NamedTemporaryFile('w', delete=False) as temp_file: temp_file.write(contents) path = shutil.copy(temp_file.name, destination) os.remove(temp_file.name) return path
[ "def", "copy_file", "(", "source", ",", "destination", ",", "follow_symlinks", "=", "True", ",", "template", ":", "arg", "(", "type", "=", "bool_or", "(", "str", ")", ",", "choices", "=", "(", "'format'", ",", "'string'", ")", ")", "=", "False", ",", "context", "=", "None", ")", ":", "if", "not", "template", ":", "# Fast path for non-templates.", "return", "shutil", ".", "copy", "(", "source", ",", "destination", ",", "follow_symlinks", "=", "follow_symlinks", ")", "if", "os", ".", "path", ".", "isdir", "(", "destination", ")", ":", "destination", "=", "os", ".", "path", ".", "join", "(", "destination", ",", "os", ".", "path", ".", "basename", "(", "source", ")", ")", "with", "open", "(", "source", ")", "as", "source", ":", "contents", "=", "source", ".", "read", "(", ")", "if", "template", "is", "True", "or", "template", "==", "'format'", ":", "contents", "=", "contents", ".", "format_map", "(", "context", ")", "elif", "template", "==", "'string'", ":", "string_template", "=", "string", ".", "Template", "(", "contents", ")", "contents", "=", "string_template", ".", "substitute", "(", "context", ")", "else", ":", "raise", "ValueError", "(", "'Unknown template type: %s'", "%", "template", ")", "with", "tempfile", ".", "NamedTemporaryFile", "(", "'w'", ",", "delete", "=", "False", ")", "as", "temp_file", ":", "temp_file", ".", "write", "(", "contents", ")", "path", "=", "shutil", ".", "copy", "(", "temp_file", ".", "name", ",", "destination", ")", "os", ".", "remove", "(", "temp_file", ".", "name", ")", "return", "path" ]
Copy source file to destination. The destination may be a file path or a directory. When it's a directory, the source file will be copied into the directory using the file's base name. When the source file is a template, ``context`` will be used as the template context. The supported template types are 'format' and 'string'. The former uses ``str.format_map()`` and the latter uses ``string.Template()``. .. note:: :func:`shutil.copy()` from the standard library is used to do the copy operation.
[ "Copy", "source", "file", "to", "destination", "." ]
b1d7c262885b9ced7ab89b63562f5464ca9970fe
https://github.com/wylee/runcommands/blob/b1d7c262885b9ced7ab89b63562f5464ca9970fe/runcommands/commands.py#L16-L57
train
wylee/runcommands
runcommands/commands.py
git_version
def git_version(short: 'Get short hash' = True, show: 'Print version to stdout' = False): """Get tag associated with HEAD; fall back to SHA1. If HEAD is tagged, return the tag name; otherwise fall back to HEAD's short SHA1 hash. .. note:: Only annotated tags are considered. .. note:: The output isn't shown by default. To show it, pass the ``--show`` flag. """ result = local( ['git', 'rev-parse', '--is-inside-work-tree'], stdout='hide', stderr='hide', echo=False, raise_on_error=False) if not result: # Not a git directory return None # Return a tag if possible result = local( ['git', 'describe', '--exact-match'], stdout='capture', stderr='hide', echo=False, raise_on_error=False) if result: return result.stdout # Fall back to hash result = local( ['git', 'rev-parse', '--short' if short else None, 'HEAD'], stdout='capture', stderr='hide', echo=False, raise_on_error=False) if result: version = result.stdout.strip() if show: print(version) return version return None
python
def git_version(short: 'Get short hash' = True, show: 'Print version to stdout' = False): """Get tag associated with HEAD; fall back to SHA1. If HEAD is tagged, return the tag name; otherwise fall back to HEAD's short SHA1 hash. .. note:: Only annotated tags are considered. .. note:: The output isn't shown by default. To show it, pass the ``--show`` flag. """ result = local( ['git', 'rev-parse', '--is-inside-work-tree'], stdout='hide', stderr='hide', echo=False, raise_on_error=False) if not result: # Not a git directory return None # Return a tag if possible result = local( ['git', 'describe', '--exact-match'], stdout='capture', stderr='hide', echo=False, raise_on_error=False) if result: return result.stdout # Fall back to hash result = local( ['git', 'rev-parse', '--short' if short else None, 'HEAD'], stdout='capture', stderr='hide', echo=False, raise_on_error=False) if result: version = result.stdout.strip() if show: print(version) return version return None
[ "def", "git_version", "(", "short", ":", "'Get short hash'", "=", "True", ",", "show", ":", "'Print version to stdout'", "=", "False", ")", ":", "result", "=", "local", "(", "[", "'git'", ",", "'rev-parse'", ",", "'--is-inside-work-tree'", "]", ",", "stdout", "=", "'hide'", ",", "stderr", "=", "'hide'", ",", "echo", "=", "False", ",", "raise_on_error", "=", "False", ")", "if", "not", "result", ":", "# Not a git directory", "return", "None", "# Return a tag if possible", "result", "=", "local", "(", "[", "'git'", ",", "'describe'", ",", "'--exact-match'", "]", ",", "stdout", "=", "'capture'", ",", "stderr", "=", "'hide'", ",", "echo", "=", "False", ",", "raise_on_error", "=", "False", ")", "if", "result", ":", "return", "result", ".", "stdout", "# Fall back to hash", "result", "=", "local", "(", "[", "'git'", ",", "'rev-parse'", ",", "'--short'", "if", "short", "else", "None", ",", "'HEAD'", "]", ",", "stdout", "=", "'capture'", ",", "stderr", "=", "'hide'", ",", "echo", "=", "False", ",", "raise_on_error", "=", "False", ")", "if", "result", ":", "version", "=", "result", ".", "stdout", ".", "strip", "(", ")", "if", "show", ":", "print", "(", "version", ")", "return", "version", "return", "None" ]
Get tag associated with HEAD; fall back to SHA1. If HEAD is tagged, return the tag name; otherwise fall back to HEAD's short SHA1 hash. .. note:: Only annotated tags are considered. .. note:: The output isn't shown by default. To show it, pass the ``--show`` flag.
[ "Get", "tag", "associated", "with", "HEAD", ";", "fall", "back", "to", "SHA1", "." ]
b1d7c262885b9ced7ab89b63562f5464ca9970fe
https://github.com/wylee/runcommands/blob/b1d7c262885b9ced7ab89b63562f5464ca9970fe/runcommands/commands.py#L61-L100
train
wylee/runcommands
runcommands/commands.py
remote
def remote(cmd: arg(container=list), host, user=None, port=None, sudo=False, run_as=None, shell='/bin/sh', cd=None, environ: arg(container=dict) = None, paths=(), # Args passed through to local command: stdout: arg(type=StreamOptions) = None, stderr: arg(type=StreamOptions) = None, echo=False, raise_on_error=True, dry_run=False, ) -> Result: """Run a remote command via SSH. Runs a remote shell command using ``ssh`` in a subprocess like so: ssh -q [-t] [<user>@]<host> [sudo [-u <run_as>] -H] /bin/sh -c ' [cd <cd> &&] [export XYZ="xyz" &&] [export PATH="<path>" &&] <cmd> ' Args: cmd (list|str): The command to run. If this is a list, it will be flattened into a string. host (str): Remote host to SSH into. user (str): Remote user to log in as (defaults to current local user). port (int): SSH port on remote host. sudo (bool): Run the remote command as root using ``sudo``. run_as (str): Run the remote command as a different user using ``sudo -u <run_as>``. shell (str): The remote user's default shell will be used to run the remote command unless this is set to a different shell. cd (str): Where to run the command on the remote host. environ (dict): Extra environment variables to set on the remote host. paths (list): Additional paths to prepend to the remote ``$PATH``. stdout: See :func:`local`. stderr: See :func:`local`. echo: See :func:`local`. raise_on_error: See :func:`local`. dry_run: See :func:`local`. """ if not isinstance(cmd, str): cmd = flatten_args(cmd, join=True) ssh_options = ['-q'] if isatty(sys.stdin): ssh_options.append('-t') if port is not None: ssh_options.extend(('-p', port)) ssh_connection_str = '{user}@{host}'.format_map(locals()) if user else host remote_cmd = [] if sudo: remote_cmd.extend(('sudo', '-H')) elif run_as: remote_cmd.extend(('sudo', '-H', '-u', run_as)) remote_cmd.extend((shell, '-c')) inner_cmd = [] if cd: inner_cmd.append('cd {cd}'.format_map(locals())) if environ: inner_cmd.extend('export {k}="{v}"'.format_map(locals()) for k, v in environ.items()) if paths: inner_cmd.append('export PATH="{path}:$PATH"'.format(path=':'.join(paths))) inner_cmd.append(cmd) inner_cmd = ' &&\n '.join(inner_cmd) inner_cmd = '\n {inner_cmd}\n'.format_map(locals()) inner_cmd = shlex.quote(inner_cmd) remote_cmd.append(inner_cmd) remote_cmd = ' '.join(remote_cmd) args = ('ssh', ssh_options, ssh_connection_str, remote_cmd) return local( args, stdout=stdout, stderr=stderr, echo=echo, raise_on_error=raise_on_error, dry_run=dry_run)
python
def remote(cmd: arg(container=list), host, user=None, port=None, sudo=False, run_as=None, shell='/bin/sh', cd=None, environ: arg(container=dict) = None, paths=(), # Args passed through to local command: stdout: arg(type=StreamOptions) = None, stderr: arg(type=StreamOptions) = None, echo=False, raise_on_error=True, dry_run=False, ) -> Result: """Run a remote command via SSH. Runs a remote shell command using ``ssh`` in a subprocess like so: ssh -q [-t] [<user>@]<host> [sudo [-u <run_as>] -H] /bin/sh -c ' [cd <cd> &&] [export XYZ="xyz" &&] [export PATH="<path>" &&] <cmd> ' Args: cmd (list|str): The command to run. If this is a list, it will be flattened into a string. host (str): Remote host to SSH into. user (str): Remote user to log in as (defaults to current local user). port (int): SSH port on remote host. sudo (bool): Run the remote command as root using ``sudo``. run_as (str): Run the remote command as a different user using ``sudo -u <run_as>``. shell (str): The remote user's default shell will be used to run the remote command unless this is set to a different shell. cd (str): Where to run the command on the remote host. environ (dict): Extra environment variables to set on the remote host. paths (list): Additional paths to prepend to the remote ``$PATH``. stdout: See :func:`local`. stderr: See :func:`local`. echo: See :func:`local`. raise_on_error: See :func:`local`. dry_run: See :func:`local`. """ if not isinstance(cmd, str): cmd = flatten_args(cmd, join=True) ssh_options = ['-q'] if isatty(sys.stdin): ssh_options.append('-t') if port is not None: ssh_options.extend(('-p', port)) ssh_connection_str = '{user}@{host}'.format_map(locals()) if user else host remote_cmd = [] if sudo: remote_cmd.extend(('sudo', '-H')) elif run_as: remote_cmd.extend(('sudo', '-H', '-u', run_as)) remote_cmd.extend((shell, '-c')) inner_cmd = [] if cd: inner_cmd.append('cd {cd}'.format_map(locals())) if environ: inner_cmd.extend('export {k}="{v}"'.format_map(locals()) for k, v in environ.items()) if paths: inner_cmd.append('export PATH="{path}:$PATH"'.format(path=':'.join(paths))) inner_cmd.append(cmd) inner_cmd = ' &&\n '.join(inner_cmd) inner_cmd = '\n {inner_cmd}\n'.format_map(locals()) inner_cmd = shlex.quote(inner_cmd) remote_cmd.append(inner_cmd) remote_cmd = ' '.join(remote_cmd) args = ('ssh', ssh_options, ssh_connection_str, remote_cmd) return local( args, stdout=stdout, stderr=stderr, echo=echo, raise_on_error=raise_on_error, dry_run=dry_run)
[ "def", "remote", "(", "cmd", ":", "arg", "(", "container", "=", "list", ")", ",", "host", ",", "user", "=", "None", ",", "port", "=", "None", ",", "sudo", "=", "False", ",", "run_as", "=", "None", ",", "shell", "=", "'/bin/sh'", ",", "cd", "=", "None", ",", "environ", ":", "arg", "(", "container", "=", "dict", ")", "=", "None", ",", "paths", "=", "(", ")", ",", "# Args passed through to local command:", "stdout", ":", "arg", "(", "type", "=", "StreamOptions", ")", "=", "None", ",", "stderr", ":", "arg", "(", "type", "=", "StreamOptions", ")", "=", "None", ",", "echo", "=", "False", ",", "raise_on_error", "=", "True", ",", "dry_run", "=", "False", ",", ")", "->", "Result", ":", "if", "not", "isinstance", "(", "cmd", ",", "str", ")", ":", "cmd", "=", "flatten_args", "(", "cmd", ",", "join", "=", "True", ")", "ssh_options", "=", "[", "'-q'", "]", "if", "isatty", "(", "sys", ".", "stdin", ")", ":", "ssh_options", ".", "append", "(", "'-t'", ")", "if", "port", "is", "not", "None", ":", "ssh_options", ".", "extend", "(", "(", "'-p'", ",", "port", ")", ")", "ssh_connection_str", "=", "'{user}@{host}'", ".", "format_map", "(", "locals", "(", ")", ")", "if", "user", "else", "host", "remote_cmd", "=", "[", "]", "if", "sudo", ":", "remote_cmd", ".", "extend", "(", "(", "'sudo'", ",", "'-H'", ")", ")", "elif", "run_as", ":", "remote_cmd", ".", "extend", "(", "(", "'sudo'", ",", "'-H'", ",", "'-u'", ",", "run_as", ")", ")", "remote_cmd", ".", "extend", "(", "(", "shell", ",", "'-c'", ")", ")", "inner_cmd", "=", "[", "]", "if", "cd", ":", "inner_cmd", ".", "append", "(", "'cd {cd}'", ".", "format_map", "(", "locals", "(", ")", ")", ")", "if", "environ", ":", "inner_cmd", ".", "extend", "(", "'export {k}=\"{v}\"'", ".", "format_map", "(", "locals", "(", ")", ")", "for", "k", ",", "v", "in", "environ", ".", "items", "(", ")", ")", "if", "paths", ":", "inner_cmd", ".", "append", "(", "'export PATH=\"{path}:$PATH\"'", ".", "format", "(", "path", "=", "':'", ".", "join", "(", "paths", ")", ")", ")", "inner_cmd", ".", "append", "(", "cmd", ")", "inner_cmd", "=", "' &&\\n '", ".", "join", "(", "inner_cmd", ")", "inner_cmd", "=", "'\\n {inner_cmd}\\n'", ".", "format_map", "(", "locals", "(", ")", ")", "inner_cmd", "=", "shlex", ".", "quote", "(", "inner_cmd", ")", "remote_cmd", ".", "append", "(", "inner_cmd", ")", "remote_cmd", "=", "' '", ".", "join", "(", "remote_cmd", ")", "args", "=", "(", "'ssh'", ",", "ssh_options", ",", "ssh_connection_str", ",", "remote_cmd", ")", "return", "local", "(", "args", ",", "stdout", "=", "stdout", ",", "stderr", "=", "stderr", ",", "echo", "=", "echo", ",", "raise_on_error", "=", "raise_on_error", ",", "dry_run", "=", "dry_run", ")" ]
Run a remote command via SSH. Runs a remote shell command using ``ssh`` in a subprocess like so: ssh -q [-t] [<user>@]<host> [sudo [-u <run_as>] -H] /bin/sh -c ' [cd <cd> &&] [export XYZ="xyz" &&] [export PATH="<path>" &&] <cmd> ' Args: cmd (list|str): The command to run. If this is a list, it will be flattened into a string. host (str): Remote host to SSH into. user (str): Remote user to log in as (defaults to current local user). port (int): SSH port on remote host. sudo (bool): Run the remote command as root using ``sudo``. run_as (str): Run the remote command as a different user using ``sudo -u <run_as>``. shell (str): The remote user's default shell will be used to run the remote command unless this is set to a different shell. cd (str): Where to run the command on the remote host. environ (dict): Extra environment variables to set on the remote host. paths (list): Additional paths to prepend to the remote ``$PATH``. stdout: See :func:`local`. stderr: See :func:`local`. echo: See :func:`local`. raise_on_error: See :func:`local`. dry_run: See :func:`local`.
[ "Run", "a", "remote", "command", "via", "SSH", "." ]
b1d7c262885b9ced7ab89b63562f5464ca9970fe
https://github.com/wylee/runcommands/blob/b1d7c262885b9ced7ab89b63562f5464ca9970fe/runcommands/commands.py#L214-L308
train
wylee/runcommands
runcommands/commands.py
sync
def sync(source, destination, host, user=None, sudo=False, run_as=None, options=('-rltvz', '--no-perms', '--no-group'), excludes=(), exclude_from=None, delete=False, dry_run=False, mode='u=rwX,g=rwX,o=', quiet=True, pull=False, # Args passed through to local command: stdout: arg(type=StreamOptions) = None, stderr: arg(type=StreamOptions) = None, echo=False, raise_on_error=True, ) -> Result: """Sync files using rsync. By default, a local ``source`` is pushed to a remote ``destination``. To pull from a remote ``source`` to a local ``destination`` instead, pass ``pull=True``. """ source = abs_path(source, keep_slash=True) destination = abs_path(destination, keep_slash=True) connection_str = '{user}@{host}'.format_map(locals()) if user else host push = not pull if sudo: rsync_path = ('--rsync-path', 'sudo rsync') elif run_as: rsync_path = ('--rsync-path', 'sudo -u {run_as} rsync'.format_map(locals())) else: rsync_path = None if push: destination = '{connection_str}:{destination}'.format_map(locals()) else: source = '{connection_str}:{source}'.format_map(locals()) args = ( 'rsync', rsync_path, options, ('--chmod', mode) if mode else None, tuple(('--exclude', exclude) for exclude in excludes), ('--exclude-from', exclude_from) if exclude_from else None, '--delete' if delete else None, '--dry-run' if dry_run else None, '--quiet' if quiet else None, source, destination, ) return local(args, stdout=stdout, stderr=stderr, echo=echo, raise_on_error=raise_on_error)
python
def sync(source, destination, host, user=None, sudo=False, run_as=None, options=('-rltvz', '--no-perms', '--no-group'), excludes=(), exclude_from=None, delete=False, dry_run=False, mode='u=rwX,g=rwX,o=', quiet=True, pull=False, # Args passed through to local command: stdout: arg(type=StreamOptions) = None, stderr: arg(type=StreamOptions) = None, echo=False, raise_on_error=True, ) -> Result: """Sync files using rsync. By default, a local ``source`` is pushed to a remote ``destination``. To pull from a remote ``source`` to a local ``destination`` instead, pass ``pull=True``. """ source = abs_path(source, keep_slash=True) destination = abs_path(destination, keep_slash=True) connection_str = '{user}@{host}'.format_map(locals()) if user else host push = not pull if sudo: rsync_path = ('--rsync-path', 'sudo rsync') elif run_as: rsync_path = ('--rsync-path', 'sudo -u {run_as} rsync'.format_map(locals())) else: rsync_path = None if push: destination = '{connection_str}:{destination}'.format_map(locals()) else: source = '{connection_str}:{source}'.format_map(locals()) args = ( 'rsync', rsync_path, options, ('--chmod', mode) if mode else None, tuple(('--exclude', exclude) for exclude in excludes), ('--exclude-from', exclude_from) if exclude_from else None, '--delete' if delete else None, '--dry-run' if dry_run else None, '--quiet' if quiet else None, source, destination, ) return local(args, stdout=stdout, stderr=stderr, echo=echo, raise_on_error=raise_on_error)
[ "def", "sync", "(", "source", ",", "destination", ",", "host", ",", "user", "=", "None", ",", "sudo", "=", "False", ",", "run_as", "=", "None", ",", "options", "=", "(", "'-rltvz'", ",", "'--no-perms'", ",", "'--no-group'", ")", ",", "excludes", "=", "(", ")", ",", "exclude_from", "=", "None", ",", "delete", "=", "False", ",", "dry_run", "=", "False", ",", "mode", "=", "'u=rwX,g=rwX,o='", ",", "quiet", "=", "True", ",", "pull", "=", "False", ",", "# Args passed through to local command:", "stdout", ":", "arg", "(", "type", "=", "StreamOptions", ")", "=", "None", ",", "stderr", ":", "arg", "(", "type", "=", "StreamOptions", ")", "=", "None", ",", "echo", "=", "False", ",", "raise_on_error", "=", "True", ",", ")", "->", "Result", ":", "source", "=", "abs_path", "(", "source", ",", "keep_slash", "=", "True", ")", "destination", "=", "abs_path", "(", "destination", ",", "keep_slash", "=", "True", ")", "connection_str", "=", "'{user}@{host}'", ".", "format_map", "(", "locals", "(", ")", ")", "if", "user", "else", "host", "push", "=", "not", "pull", "if", "sudo", ":", "rsync_path", "=", "(", "'--rsync-path'", ",", "'sudo rsync'", ")", "elif", "run_as", ":", "rsync_path", "=", "(", "'--rsync-path'", ",", "'sudo -u {run_as} rsync'", ".", "format_map", "(", "locals", "(", ")", ")", ")", "else", ":", "rsync_path", "=", "None", "if", "push", ":", "destination", "=", "'{connection_str}:{destination}'", ".", "format_map", "(", "locals", "(", ")", ")", "else", ":", "source", "=", "'{connection_str}:{source}'", ".", "format_map", "(", "locals", "(", ")", ")", "args", "=", "(", "'rsync'", ",", "rsync_path", ",", "options", ",", "(", "'--chmod'", ",", "mode", ")", "if", "mode", "else", "None", ",", "tuple", "(", "(", "'--exclude'", ",", "exclude", ")", "for", "exclude", "in", "excludes", ")", ",", "(", "'--exclude-from'", ",", "exclude_from", ")", "if", "exclude_from", "else", "None", ",", "'--delete'", "if", "delete", "else", "None", ",", "'--dry-run'", "if", "dry_run", "else", "None", ",", "'--quiet'", "if", "quiet", "else", "None", ",", "source", ",", "destination", ",", ")", "return", "local", "(", "args", ",", "stdout", "=", "stdout", ",", "stderr", "=", "stderr", ",", "echo", "=", "echo", ",", "raise_on_error", "=", "raise_on_error", ")" ]
Sync files using rsync. By default, a local ``source`` is pushed to a remote ``destination``. To pull from a remote ``source`` to a local ``destination`` instead, pass ``pull=True``.
[ "Sync", "files", "using", "rsync", "." ]
b1d7c262885b9ced7ab89b63562f5464ca9970fe
https://github.com/wylee/runcommands/blob/b1d7c262885b9ced7ab89b63562f5464ca9970fe/runcommands/commands.py#L312-L369
train
raymondEhlers/pachyderm
pachyderm/remove_outliers.py
_get_mean_and_median
def _get_mean_and_median(hist: Hist) -> Tuple[float, float]: """ Retrieve the mean and median from a ROOT histogram. Note: These values are not so trivial to calculate without ROOT, as they are the bin values weighted by the bin content. Args: hist: Histogram from which the values will be extract. Returns: mean, median of the histogram. """ # Median # See: https://root-forum.cern.ch/t/median-of-histogram/7626/5 x = ctypes.c_double(0) q = ctypes.c_double(0.5) # Apparently needed to be safe(?) hist.ComputeIntegral() hist.GetQuantiles(1, x, q) mean = hist.GetMean() return (mean, x.value)
python
def _get_mean_and_median(hist: Hist) -> Tuple[float, float]: """ Retrieve the mean and median from a ROOT histogram. Note: These values are not so trivial to calculate without ROOT, as they are the bin values weighted by the bin content. Args: hist: Histogram from which the values will be extract. Returns: mean, median of the histogram. """ # Median # See: https://root-forum.cern.ch/t/median-of-histogram/7626/5 x = ctypes.c_double(0) q = ctypes.c_double(0.5) # Apparently needed to be safe(?) hist.ComputeIntegral() hist.GetQuantiles(1, x, q) mean = hist.GetMean() return (mean, x.value)
[ "def", "_get_mean_and_median", "(", "hist", ":", "Hist", ")", "->", "Tuple", "[", "float", ",", "float", "]", ":", "# Median", "# See: https://root-forum.cern.ch/t/median-of-histogram/7626/5", "x", "=", "ctypes", ".", "c_double", "(", "0", ")", "q", "=", "ctypes", ".", "c_double", "(", "0.5", ")", "# Apparently needed to be safe(?)", "hist", ".", "ComputeIntegral", "(", ")", "hist", ".", "GetQuantiles", "(", "1", ",", "x", ",", "q", ")", "mean", "=", "hist", ".", "GetMean", "(", ")", "return", "(", "mean", ",", "x", ".", "value", ")" ]
Retrieve the mean and median from a ROOT histogram. Note: These values are not so trivial to calculate without ROOT, as they are the bin values weighted by the bin content. Args: hist: Histogram from which the values will be extract. Returns: mean, median of the histogram.
[ "Retrieve", "the", "mean", "and", "median", "from", "a", "ROOT", "histogram", "." ]
aaa1d8374fd871246290ce76f1796f2f7582b01d
https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/remove_outliers.py#L25-L47
train
raymondEhlers/pachyderm
pachyderm/remove_outliers.py
_project_to_part_level
def _project_to_part_level(hist: Hist, outliers_removal_axis: OutliersRemovalAxis) -> Hist: """ Project the input histogram to the particle level axis. Args: hist: Histogram to check for outliers. outliers_removal_axis: Axis along which outliers removal will be performed. Usually the particle level aixs. Returns: The histogram to check for outliers. """ # Setup the projector import ROOT if isinstance(hist, (ROOT.TH2, ROOT.TH3)): projection_information: Dict[str, Any] = {} output_object = _OutputObject(None) projector = projectors.HistProjector( observable_to_project_from = hist, output_observable = output_object, output_attribute_name = "output", projection_name_format = "outliers_removal_hist", projection_information = projection_information, ) # No additional_axis_cuts or projection_dependent_cut_axes # Projection axis projector.projection_axes.append( projectors.HistAxisRange( axis_type = outliers_removal_axis, axis_range_name = "outliers_removal_axis", min_val = projectors.HistAxisRange.apply_func_to_find_bin(None, 1), max_val = projectors.HistAxisRange.apply_func_to_find_bin(ROOT.TAxis.GetNbins), ) ) # Perform the actual projection and return the output. projector.project() return output_object.output # If we already have a 1D hist, just return that existing hist. return hist
python
def _project_to_part_level(hist: Hist, outliers_removal_axis: OutliersRemovalAxis) -> Hist: """ Project the input histogram to the particle level axis. Args: hist: Histogram to check for outliers. outliers_removal_axis: Axis along which outliers removal will be performed. Usually the particle level aixs. Returns: The histogram to check for outliers. """ # Setup the projector import ROOT if isinstance(hist, (ROOT.TH2, ROOT.TH3)): projection_information: Dict[str, Any] = {} output_object = _OutputObject(None) projector = projectors.HistProjector( observable_to_project_from = hist, output_observable = output_object, output_attribute_name = "output", projection_name_format = "outliers_removal_hist", projection_information = projection_information, ) # No additional_axis_cuts or projection_dependent_cut_axes # Projection axis projector.projection_axes.append( projectors.HistAxisRange( axis_type = outliers_removal_axis, axis_range_name = "outliers_removal_axis", min_val = projectors.HistAxisRange.apply_func_to_find_bin(None, 1), max_val = projectors.HistAxisRange.apply_func_to_find_bin(ROOT.TAxis.GetNbins), ) ) # Perform the actual projection and return the output. projector.project() return output_object.output # If we already have a 1D hist, just return that existing hist. return hist
[ "def", "_project_to_part_level", "(", "hist", ":", "Hist", ",", "outliers_removal_axis", ":", "OutliersRemovalAxis", ")", "->", "Hist", ":", "# Setup the projector", "import", "ROOT", "if", "isinstance", "(", "hist", ",", "(", "ROOT", ".", "TH2", ",", "ROOT", ".", "TH3", ")", ")", ":", "projection_information", ":", "Dict", "[", "str", ",", "Any", "]", "=", "{", "}", "output_object", "=", "_OutputObject", "(", "None", ")", "projector", "=", "projectors", ".", "HistProjector", "(", "observable_to_project_from", "=", "hist", ",", "output_observable", "=", "output_object", ",", "output_attribute_name", "=", "\"output\"", ",", "projection_name_format", "=", "\"outliers_removal_hist\"", ",", "projection_information", "=", "projection_information", ",", ")", "# No additional_axis_cuts or projection_dependent_cut_axes", "# Projection axis", "projector", ".", "projection_axes", ".", "append", "(", "projectors", ".", "HistAxisRange", "(", "axis_type", "=", "outliers_removal_axis", ",", "axis_range_name", "=", "\"outliers_removal_axis\"", ",", "min_val", "=", "projectors", ".", "HistAxisRange", ".", "apply_func_to_find_bin", "(", "None", ",", "1", ")", ",", "max_val", "=", "projectors", ".", "HistAxisRange", ".", "apply_func_to_find_bin", "(", "ROOT", ".", "TAxis", ".", "GetNbins", ")", ",", ")", ")", "# Perform the actual projection and return the output.", "projector", ".", "project", "(", ")", "return", "output_object", ".", "output", "# If we already have a 1D hist, just return that existing hist.", "return", "hist" ]
Project the input histogram to the particle level axis. Args: hist: Histogram to check for outliers. outliers_removal_axis: Axis along which outliers removal will be performed. Usually the particle level aixs. Returns: The histogram to check for outliers.
[ "Project", "the", "input", "histogram", "to", "the", "particle", "level", "axis", "." ]
aaa1d8374fd871246290ce76f1796f2f7582b01d
https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/remove_outliers.py#L54-L92
train
raymondEhlers/pachyderm
pachyderm/remove_outliers.py
_determine_outliers_index
def _determine_outliers_index(hist: Hist, moving_average_threshold: float = 1.0, number_of_values_to_search_ahead: int = 5, limit_of_number_of_values_below_threshold: int = None) -> int: """ Determine the location of where outliers begin in a 1D histogram. When the moving average falls below the limit, we consider the outliers to have begun. To determine the location of outliers: - Calculate the moving average for number_of_values_to_search_ahead values. - First, the moving average must go above the limit at least once to guard against a random cut in a low pt bin causing most of the data to be cut out. - Next, we look for a consecutive number of entries below limit_of_number_of_values_below_threshold. - If we meet that condition, we have found the index where the outliers begin. We then return the ROOT bin index of the value. - If not, we return -1. Note: The index returned is when the moving average first drops below the threshold for a moving average calculated with that bin at the center. This is somewhat different from a standard moving average calculation which would only look forward in the array. Args: hist: Histogram to be checked for outliers. moving_average_threshold: Value of moving average under which we consider the moving average to be 0. Default: 2. number_of_values_to_search_ahead: Number of values to search ahead in the array when calculating the moving average. Default: 5. limit_of_number_of_values_below_threshold: Number of consecutive bins below the threshold to be considered the beginning of outliers. Default: None, which will correspond to number_of_values_to_search_ahead - 1. Returns: ROOT (ie 1-indexed) index of the histogram axes where the outliers begin. """ # Validation import ROOT if isinstance(hist, (ROOT.TH2, ROOT.TH3, ROOT.THnBase)): raise ValueError( f"Given histogram '{hist.GetName()}' of type {type(hist)}, but can only" " determine the outlier location of a 1D histogram. Please project to" " the particle level axis first." ) if limit_of_number_of_values_below_threshold is None: # In principle, this could be another value. However, this is what was used in the previous outliers # removal implementation. limit_of_number_of_values_below_threshold = number_of_values_to_search_ahead - 1 # It is much more convenient to work with a numpy array. hist_to_check = histogram.Histogram1D.from_existing_hist(hist) # Calculate the moving average for the entire axis, looking ahead including the current bin + 4 = 5 ahead. number_of_values_to_search_ahead = 5 moving_average = utils.moving_average(hist_to_check.y, n = number_of_values_to_search_ahead) #logger.debug(f"y: {hist_to_check.y}") #logger.debug(f"moving_average: {moving_average}") cut_index = _determine_outliers_for_moving_average( moving_average = moving_average, moving_average_threshold = moving_average_threshold, number_of_values_to_search_ahead = number_of_values_to_search_ahead, limit_of_number_of_values_below_threshold = limit_of_number_of_values_below_threshold, ) if cut_index != -1: # ROOT histograms are 1 indexed, so we add another 1. cut_index += 1 return cut_index
python
def _determine_outliers_index(hist: Hist, moving_average_threshold: float = 1.0, number_of_values_to_search_ahead: int = 5, limit_of_number_of_values_below_threshold: int = None) -> int: """ Determine the location of where outliers begin in a 1D histogram. When the moving average falls below the limit, we consider the outliers to have begun. To determine the location of outliers: - Calculate the moving average for number_of_values_to_search_ahead values. - First, the moving average must go above the limit at least once to guard against a random cut in a low pt bin causing most of the data to be cut out. - Next, we look for a consecutive number of entries below limit_of_number_of_values_below_threshold. - If we meet that condition, we have found the index where the outliers begin. We then return the ROOT bin index of the value. - If not, we return -1. Note: The index returned is when the moving average first drops below the threshold for a moving average calculated with that bin at the center. This is somewhat different from a standard moving average calculation which would only look forward in the array. Args: hist: Histogram to be checked for outliers. moving_average_threshold: Value of moving average under which we consider the moving average to be 0. Default: 2. number_of_values_to_search_ahead: Number of values to search ahead in the array when calculating the moving average. Default: 5. limit_of_number_of_values_below_threshold: Number of consecutive bins below the threshold to be considered the beginning of outliers. Default: None, which will correspond to number_of_values_to_search_ahead - 1. Returns: ROOT (ie 1-indexed) index of the histogram axes where the outliers begin. """ # Validation import ROOT if isinstance(hist, (ROOT.TH2, ROOT.TH3, ROOT.THnBase)): raise ValueError( f"Given histogram '{hist.GetName()}' of type {type(hist)}, but can only" " determine the outlier location of a 1D histogram. Please project to" " the particle level axis first." ) if limit_of_number_of_values_below_threshold is None: # In principle, this could be another value. However, this is what was used in the previous outliers # removal implementation. limit_of_number_of_values_below_threshold = number_of_values_to_search_ahead - 1 # It is much more convenient to work with a numpy array. hist_to_check = histogram.Histogram1D.from_existing_hist(hist) # Calculate the moving average for the entire axis, looking ahead including the current bin + 4 = 5 ahead. number_of_values_to_search_ahead = 5 moving_average = utils.moving_average(hist_to_check.y, n = number_of_values_to_search_ahead) #logger.debug(f"y: {hist_to_check.y}") #logger.debug(f"moving_average: {moving_average}") cut_index = _determine_outliers_for_moving_average( moving_average = moving_average, moving_average_threshold = moving_average_threshold, number_of_values_to_search_ahead = number_of_values_to_search_ahead, limit_of_number_of_values_below_threshold = limit_of_number_of_values_below_threshold, ) if cut_index != -1: # ROOT histograms are 1 indexed, so we add another 1. cut_index += 1 return cut_index
[ "def", "_determine_outliers_index", "(", "hist", ":", "Hist", ",", "moving_average_threshold", ":", "float", "=", "1.0", ",", "number_of_values_to_search_ahead", ":", "int", "=", "5", ",", "limit_of_number_of_values_below_threshold", ":", "int", "=", "None", ")", "->", "int", ":", "# Validation", "import", "ROOT", "if", "isinstance", "(", "hist", ",", "(", "ROOT", ".", "TH2", ",", "ROOT", ".", "TH3", ",", "ROOT", ".", "THnBase", ")", ")", ":", "raise", "ValueError", "(", "f\"Given histogram '{hist.GetName()}' of type {type(hist)}, but can only\"", "\" determine the outlier location of a 1D histogram. Please project to\"", "\" the particle level axis first.\"", ")", "if", "limit_of_number_of_values_below_threshold", "is", "None", ":", "# In principle, this could be another value. However, this is what was used in the previous outliers", "# removal implementation.", "limit_of_number_of_values_below_threshold", "=", "number_of_values_to_search_ahead", "-", "1", "# It is much more convenient to work with a numpy array.", "hist_to_check", "=", "histogram", ".", "Histogram1D", ".", "from_existing_hist", "(", "hist", ")", "# Calculate the moving average for the entire axis, looking ahead including the current bin + 4 = 5 ahead.", "number_of_values_to_search_ahead", "=", "5", "moving_average", "=", "utils", ".", "moving_average", "(", "hist_to_check", ".", "y", ",", "n", "=", "number_of_values_to_search_ahead", ")", "#logger.debug(f\"y: {hist_to_check.y}\")", "#logger.debug(f\"moving_average: {moving_average}\")", "cut_index", "=", "_determine_outliers_for_moving_average", "(", "moving_average", "=", "moving_average", ",", "moving_average_threshold", "=", "moving_average_threshold", ",", "number_of_values_to_search_ahead", "=", "number_of_values_to_search_ahead", ",", "limit_of_number_of_values_below_threshold", "=", "limit_of_number_of_values_below_threshold", ",", ")", "if", "cut_index", "!=", "-", "1", ":", "# ROOT histograms are 1 indexed, so we add another 1.", "cut_index", "+=", "1", "return", "cut_index" ]
Determine the location of where outliers begin in a 1D histogram. When the moving average falls below the limit, we consider the outliers to have begun. To determine the location of outliers: - Calculate the moving average for number_of_values_to_search_ahead values. - First, the moving average must go above the limit at least once to guard against a random cut in a low pt bin causing most of the data to be cut out. - Next, we look for a consecutive number of entries below limit_of_number_of_values_below_threshold. - If we meet that condition, we have found the index where the outliers begin. We then return the ROOT bin index of the value. - If not, we return -1. Note: The index returned is when the moving average first drops below the threshold for a moving average calculated with that bin at the center. This is somewhat different from a standard moving average calculation which would only look forward in the array. Args: hist: Histogram to be checked for outliers. moving_average_threshold: Value of moving average under which we consider the moving average to be 0. Default: 2. number_of_values_to_search_ahead: Number of values to search ahead in the array when calculating the moving average. Default: 5. limit_of_number_of_values_below_threshold: Number of consecutive bins below the threshold to be considered the beginning of outliers. Default: None, which will correspond to number_of_values_to_search_ahead - 1. Returns: ROOT (ie 1-indexed) index of the histogram axes where the outliers begin.
[ "Determine", "the", "location", "of", "where", "outliers", "begin", "in", "a", "1D", "histogram", "." ]
aaa1d8374fd871246290ce76f1796f2f7582b01d
https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/remove_outliers.py#L94-L163
train
raymondEhlers/pachyderm
pachyderm/remove_outliers.py
_determine_outliers_for_moving_average
def _determine_outliers_for_moving_average(moving_average: np.ndarray, moving_average_threshold: float, number_of_values_to_search_ahead: int, limit_of_number_of_values_below_threshold: int) -> int: """ Determine outliers to remove from a given moving average. Note: The index returned is when the moving average first drops below the threshold for a moving average calculated with that bin at the center. This is somewhat different from a standard moving average calculation which would only look forward in the array. Args: moving_average: Moving average. moving_average_threshold: Value of moving average under which we consider the moving average to be 0. Default: 2. number_of_values_to_search_ahead: Number of values to search ahead in the array when calculating the moving average. Default: 5. limit_of_number_of_values_below_threshold: Number of consecutive bins below the threshold to be considered the beginning of outliers. Default: None, which will correspond to number_of_values_to_search_ahead - 1. Returns: 0-indexed index of the histogram axes where the outliers begin. """ below_threshold = moving_average < moving_average_threshold # Build up a list of values to check if they are below threshold. This list allows us to easily look # forward in the below_threshold array. values_to_check = [] for i in range(limit_of_number_of_values_below_threshold): # Basically, this gives us (for limit_of_number_of_values_below_threshold = 4): # below_threshold[0:-3], below_threshold[1:-2], below_threshold[2:-1], below_threshold[3:None] values_to_check.append( below_threshold[i:-(limit_of_number_of_values_below_threshold - 1 - i) or None] ) # Some helpful logging information. #logger.debug(f"values_to_check: {values_to_check}") #logger.debug(f"moving avg length: {len(moving_average)}, length of values_to_check entries: {[len(v) for v in values_to_check]}") # Must have at least one bin above the specified threshold. found_at_least_one_bin_above_threshold = False # Index we will search for from which outliers will be cut. cut_index = -1 # Determine the index where the limit_of_number_of_values_below_threshold bins are consequentially below the threshold. for i, values in enumerate(zip(*values_to_check)): # Skip the first bin because some old pt hard bin trains had a large number of erroneous entries # in the first bin (regardless of the actual pt hard bin). This should be resolved in the embedding # helper now. In any case, it doesn't make sense to encounter outliers in the first bin, so this is a # fine bin to skip. if i == 0: continue # True if below threshold, so check if not True. above_threshold = [not value for value in values] # We require the values to go above the moving average threshold at least once. if any(above_threshold): #logger.debug(f"Found bin i {i} above threshold with moving average: {moving_average[i]}") found_at_least_one_bin_above_threshold = True # All values from which we are looking ahead must be below the threshold to consider the index # as below threshold. if found_at_least_one_bin_above_threshold and all(np.invert(above_threshold)): # The previous outlier removal implementation used a moving average centered on a value # (ie. it checked ``arr[-2 + current_index:current_index + 3]``). Thus, we need to # shift the cut_index that we assign by limit_of_number_of_values_below_threshold // 2 for # the index where we have found all values below the threshold. logger.debug(f"i at found cut_index: {i} with moving_average: {moving_average[i]}") cut_index = i + limit_of_number_of_values_below_threshold // 2 break return cut_index
python
def _determine_outliers_for_moving_average(moving_average: np.ndarray, moving_average_threshold: float, number_of_values_to_search_ahead: int, limit_of_number_of_values_below_threshold: int) -> int: """ Determine outliers to remove from a given moving average. Note: The index returned is when the moving average first drops below the threshold for a moving average calculated with that bin at the center. This is somewhat different from a standard moving average calculation which would only look forward in the array. Args: moving_average: Moving average. moving_average_threshold: Value of moving average under which we consider the moving average to be 0. Default: 2. number_of_values_to_search_ahead: Number of values to search ahead in the array when calculating the moving average. Default: 5. limit_of_number_of_values_below_threshold: Number of consecutive bins below the threshold to be considered the beginning of outliers. Default: None, which will correspond to number_of_values_to_search_ahead - 1. Returns: 0-indexed index of the histogram axes where the outliers begin. """ below_threshold = moving_average < moving_average_threshold # Build up a list of values to check if they are below threshold. This list allows us to easily look # forward in the below_threshold array. values_to_check = [] for i in range(limit_of_number_of_values_below_threshold): # Basically, this gives us (for limit_of_number_of_values_below_threshold = 4): # below_threshold[0:-3], below_threshold[1:-2], below_threshold[2:-1], below_threshold[3:None] values_to_check.append( below_threshold[i:-(limit_of_number_of_values_below_threshold - 1 - i) or None] ) # Some helpful logging information. #logger.debug(f"values_to_check: {values_to_check}") #logger.debug(f"moving avg length: {len(moving_average)}, length of values_to_check entries: {[len(v) for v in values_to_check]}") # Must have at least one bin above the specified threshold. found_at_least_one_bin_above_threshold = False # Index we will search for from which outliers will be cut. cut_index = -1 # Determine the index where the limit_of_number_of_values_below_threshold bins are consequentially below the threshold. for i, values in enumerate(zip(*values_to_check)): # Skip the first bin because some old pt hard bin trains had a large number of erroneous entries # in the first bin (regardless of the actual pt hard bin). This should be resolved in the embedding # helper now. In any case, it doesn't make sense to encounter outliers in the first bin, so this is a # fine bin to skip. if i == 0: continue # True if below threshold, so check if not True. above_threshold = [not value for value in values] # We require the values to go above the moving average threshold at least once. if any(above_threshold): #logger.debug(f"Found bin i {i} above threshold with moving average: {moving_average[i]}") found_at_least_one_bin_above_threshold = True # All values from which we are looking ahead must be below the threshold to consider the index # as below threshold. if found_at_least_one_bin_above_threshold and all(np.invert(above_threshold)): # The previous outlier removal implementation used a moving average centered on a value # (ie. it checked ``arr[-2 + current_index:current_index + 3]``). Thus, we need to # shift the cut_index that we assign by limit_of_number_of_values_below_threshold // 2 for # the index where we have found all values below the threshold. logger.debug(f"i at found cut_index: {i} with moving_average: {moving_average[i]}") cut_index = i + limit_of_number_of_values_below_threshold // 2 break return cut_index
[ "def", "_determine_outliers_for_moving_average", "(", "moving_average", ":", "np", ".", "ndarray", ",", "moving_average_threshold", ":", "float", ",", "number_of_values_to_search_ahead", ":", "int", ",", "limit_of_number_of_values_below_threshold", ":", "int", ")", "->", "int", ":", "below_threshold", "=", "moving_average", "<", "moving_average_threshold", "# Build up a list of values to check if they are below threshold. This list allows us to easily look", "# forward in the below_threshold array.", "values_to_check", "=", "[", "]", "for", "i", "in", "range", "(", "limit_of_number_of_values_below_threshold", ")", ":", "# Basically, this gives us (for limit_of_number_of_values_below_threshold = 4):", "# below_threshold[0:-3], below_threshold[1:-2], below_threshold[2:-1], below_threshold[3:None]", "values_to_check", ".", "append", "(", "below_threshold", "[", "i", ":", "-", "(", "limit_of_number_of_values_below_threshold", "-", "1", "-", "i", ")", "or", "None", "]", ")", "# Some helpful logging information.", "#logger.debug(f\"values_to_check: {values_to_check}\")", "#logger.debug(f\"moving avg length: {len(moving_average)}, length of values_to_check entries: {[len(v) for v in values_to_check]}\")", "# Must have at least one bin above the specified threshold.", "found_at_least_one_bin_above_threshold", "=", "False", "# Index we will search for from which outliers will be cut.", "cut_index", "=", "-", "1", "# Determine the index where the limit_of_number_of_values_below_threshold bins are consequentially below the threshold.", "for", "i", ",", "values", "in", "enumerate", "(", "zip", "(", "*", "values_to_check", ")", ")", ":", "# Skip the first bin because some old pt hard bin trains had a large number of erroneous entries", "# in the first bin (regardless of the actual pt hard bin). This should be resolved in the embedding", "# helper now. In any case, it doesn't make sense to encounter outliers in the first bin, so this is a", "# fine bin to skip.", "if", "i", "==", "0", ":", "continue", "# True if below threshold, so check if not True.", "above_threshold", "=", "[", "not", "value", "for", "value", "in", "values", "]", "# We require the values to go above the moving average threshold at least once.", "if", "any", "(", "above_threshold", ")", ":", "#logger.debug(f\"Found bin i {i} above threshold with moving average: {moving_average[i]}\")", "found_at_least_one_bin_above_threshold", "=", "True", "# All values from which we are looking ahead must be below the threshold to consider the index", "# as below threshold.", "if", "found_at_least_one_bin_above_threshold", "and", "all", "(", "np", ".", "invert", "(", "above_threshold", ")", ")", ":", "# The previous outlier removal implementation used a moving average centered on a value", "# (ie. it checked ``arr[-2 + current_index:current_index + 3]``). Thus, we need to", "# shift the cut_index that we assign by limit_of_number_of_values_below_threshold // 2 for", "# the index where we have found all values below the threshold.", "logger", ".", "debug", "(", "f\"i at found cut_index: {i} with moving_average: {moving_average[i]}\"", ")", "cut_index", "=", "i", "+", "limit_of_number_of_values_below_threshold", "//", "2", "break", "return", "cut_index" ]
Determine outliers to remove from a given moving average. Note: The index returned is when the moving average first drops below the threshold for a moving average calculated with that bin at the center. This is somewhat different from a standard moving average calculation which would only look forward in the array. Args: moving_average: Moving average. moving_average_threshold: Value of moving average under which we consider the moving average to be 0. Default: 2. number_of_values_to_search_ahead: Number of values to search ahead in the array when calculating the moving average. Default: 5. limit_of_number_of_values_below_threshold: Number of consecutive bins below the threshold to be considered the beginning of outliers. Default: None, which will correspond to number_of_values_to_search_ahead - 1. Returns: 0-indexed index of the histogram axes where the outliers begin.
[ "Determine", "outliers", "to", "remove", "from", "a", "given", "moving", "average", "." ]
aaa1d8374fd871246290ce76f1796f2f7582b01d
https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/remove_outliers.py#L165-L235
train
raymondEhlers/pachyderm
pachyderm/remove_outliers.py
_remove_outliers_from_hist
def _remove_outliers_from_hist(hist: Hist, outliers_start_index: int, outliers_removal_axis: OutliersRemovalAxis) -> None: """ Remove outliers from a given histogram. Args: hist: Histogram to check for outliers. outliers_start_index: Index in the truth axis where outliers begin. outliers_removal_axis: Axis along which outliers removal will be performed. Usually the particle level aixs. Returns: None. The histogram is modified in place. """ # Use on TH1, TH2, and TH3 since we don't start removing immediately, but instead only after the limit if outliers_start_index > 0: #logger.debug("Removing outliers") # Check for values above which they should be removed by translating the global index x = ctypes.c_int(0) y = ctypes.c_int(0) z = ctypes.c_int(0) # Maps axis to valaues # This is kind of dumb, but it works. outliers_removal_axis_values: Dict[OutliersRemovalAxis, ctypes.c_int] = { projectors.TH1AxisType.x_axis: x, projectors.TH1AxisType.y_axis: y, projectors.TH1AxisType.z_axis: z, } for index in range(0, hist.GetNcells()): # Get the bin x, y, z from the global bin hist.GetBinXYZ(index, x, y, z) # Watch out for any problems if hist.GetBinContent(index) < hist.GetBinError(index): logger.warning(f"Bin content < error. Name: {hist.GetName()}, Bin content: {hist.GetBinContent(index)}, Bin error: {hist.GetBinError(index)}, index: {index}, ({x.value}, {y.value})") if outliers_removal_axis_values[outliers_removal_axis].value >= outliers_start_index: #logger.debug("Cutting for index {}. x bin {}. Cut index: {}".format(index, x, cutIndex)) hist.SetBinContent(index, 0) hist.SetBinError(index, 0) else: logger.info(f"Hist {hist.GetName()} did not have any outliers to cut")
python
def _remove_outliers_from_hist(hist: Hist, outliers_start_index: int, outliers_removal_axis: OutliersRemovalAxis) -> None: """ Remove outliers from a given histogram. Args: hist: Histogram to check for outliers. outliers_start_index: Index in the truth axis where outliers begin. outliers_removal_axis: Axis along which outliers removal will be performed. Usually the particle level aixs. Returns: None. The histogram is modified in place. """ # Use on TH1, TH2, and TH3 since we don't start removing immediately, but instead only after the limit if outliers_start_index > 0: #logger.debug("Removing outliers") # Check for values above which they should be removed by translating the global index x = ctypes.c_int(0) y = ctypes.c_int(0) z = ctypes.c_int(0) # Maps axis to valaues # This is kind of dumb, but it works. outliers_removal_axis_values: Dict[OutliersRemovalAxis, ctypes.c_int] = { projectors.TH1AxisType.x_axis: x, projectors.TH1AxisType.y_axis: y, projectors.TH1AxisType.z_axis: z, } for index in range(0, hist.GetNcells()): # Get the bin x, y, z from the global bin hist.GetBinXYZ(index, x, y, z) # Watch out for any problems if hist.GetBinContent(index) < hist.GetBinError(index): logger.warning(f"Bin content < error. Name: {hist.GetName()}, Bin content: {hist.GetBinContent(index)}, Bin error: {hist.GetBinError(index)}, index: {index}, ({x.value}, {y.value})") if outliers_removal_axis_values[outliers_removal_axis].value >= outliers_start_index: #logger.debug("Cutting for index {}. x bin {}. Cut index: {}".format(index, x, cutIndex)) hist.SetBinContent(index, 0) hist.SetBinError(index, 0) else: logger.info(f"Hist {hist.GetName()} did not have any outliers to cut")
[ "def", "_remove_outliers_from_hist", "(", "hist", ":", "Hist", ",", "outliers_start_index", ":", "int", ",", "outliers_removal_axis", ":", "OutliersRemovalAxis", ")", "->", "None", ":", "# Use on TH1, TH2, and TH3 since we don't start removing immediately, but instead only after the limit", "if", "outliers_start_index", ">", "0", ":", "#logger.debug(\"Removing outliers\")", "# Check for values above which they should be removed by translating the global index", "x", "=", "ctypes", ".", "c_int", "(", "0", ")", "y", "=", "ctypes", ".", "c_int", "(", "0", ")", "z", "=", "ctypes", ".", "c_int", "(", "0", ")", "# Maps axis to valaues", "# This is kind of dumb, but it works.", "outliers_removal_axis_values", ":", "Dict", "[", "OutliersRemovalAxis", ",", "ctypes", ".", "c_int", "]", "=", "{", "projectors", ".", "TH1AxisType", ".", "x_axis", ":", "x", ",", "projectors", ".", "TH1AxisType", ".", "y_axis", ":", "y", ",", "projectors", ".", "TH1AxisType", ".", "z_axis", ":", "z", ",", "}", "for", "index", "in", "range", "(", "0", ",", "hist", ".", "GetNcells", "(", ")", ")", ":", "# Get the bin x, y, z from the global bin", "hist", ".", "GetBinXYZ", "(", "index", ",", "x", ",", "y", ",", "z", ")", "# Watch out for any problems", "if", "hist", ".", "GetBinContent", "(", "index", ")", "<", "hist", ".", "GetBinError", "(", "index", ")", ":", "logger", ".", "warning", "(", "f\"Bin content < error. Name: {hist.GetName()}, Bin content: {hist.GetBinContent(index)}, Bin error: {hist.GetBinError(index)}, index: {index}, ({x.value}, {y.value})\"", ")", "if", "outliers_removal_axis_values", "[", "outliers_removal_axis", "]", ".", "value", ">=", "outliers_start_index", ":", "#logger.debug(\"Cutting for index {}. x bin {}. Cut index: {}\".format(index, x, cutIndex))", "hist", ".", "SetBinContent", "(", "index", ",", "0", ")", "hist", ".", "SetBinError", "(", "index", ",", "0", ")", "else", ":", "logger", ".", "info", "(", "f\"Hist {hist.GetName()} did not have any outliers to cut\"", ")" ]
Remove outliers from a given histogram. Args: hist: Histogram to check for outliers. outliers_start_index: Index in the truth axis where outliers begin. outliers_removal_axis: Axis along which outliers removal will be performed. Usually the particle level aixs. Returns: None. The histogram is modified in place.
[ "Remove", "outliers", "from", "a", "given", "histogram", "." ]
aaa1d8374fd871246290ce76f1796f2f7582b01d
https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/remove_outliers.py#L237-L273
train
yoannMoreau/landsat_theia
python/clipper_helper.py
Clipper.shapefile
def shapefile(self, file): """ reprojette en WGS84 et recupere l'extend """ driver = ogr.GetDriverByName('ESRI Shapefile') dataset = driver.Open(file) if dataset is not None: # from Layer layer = dataset.GetLayer() spatialRef = layer.GetSpatialRef() # from Geometry feature = layer.GetNextFeature() geom = feature.GetGeometryRef() spatialRef = geom.GetSpatialReference() #WGS84 outSpatialRef = osr.SpatialReference() outSpatialRef.ImportFromEPSG(4326) coordTrans = osr.CoordinateTransformation(spatialRef, outSpatialRef) env = geom.GetEnvelope() xmin = env[0] ymin = env[2] xmax = env[1] ymax = env[3] pointMAX = ogr.Geometry(ogr.wkbPoint) pointMAX.AddPoint(env[1], env[3]) pointMAX.Transform(coordTrans) pointMIN = ogr.Geometry(ogr.wkbPoint) pointMIN.AddPoint(env[0], env[2]) pointMIN.Transform(coordTrans) self.bbox = str(pointMIN.GetPoint()[0])+','+str(pointMIN.GetPoint()[1])+','+str(pointMAX.GetPoint()[0])+','+str(pointMAX.GetPoint()[1]) self.query = None else: exit(" shapefile not found. Please verify your path to the shapefile")
python
def shapefile(self, file): """ reprojette en WGS84 et recupere l'extend """ driver = ogr.GetDriverByName('ESRI Shapefile') dataset = driver.Open(file) if dataset is not None: # from Layer layer = dataset.GetLayer() spatialRef = layer.GetSpatialRef() # from Geometry feature = layer.GetNextFeature() geom = feature.GetGeometryRef() spatialRef = geom.GetSpatialReference() #WGS84 outSpatialRef = osr.SpatialReference() outSpatialRef.ImportFromEPSG(4326) coordTrans = osr.CoordinateTransformation(spatialRef, outSpatialRef) env = geom.GetEnvelope() xmin = env[0] ymin = env[2] xmax = env[1] ymax = env[3] pointMAX = ogr.Geometry(ogr.wkbPoint) pointMAX.AddPoint(env[1], env[3]) pointMAX.Transform(coordTrans) pointMIN = ogr.Geometry(ogr.wkbPoint) pointMIN.AddPoint(env[0], env[2]) pointMIN.Transform(coordTrans) self.bbox = str(pointMIN.GetPoint()[0])+','+str(pointMIN.GetPoint()[1])+','+str(pointMAX.GetPoint()[0])+','+str(pointMAX.GetPoint()[1]) self.query = None else: exit(" shapefile not found. Please verify your path to the shapefile")
[ "def", "shapefile", "(", "self", ",", "file", ")", ":", "driver", "=", "ogr", ".", "GetDriverByName", "(", "'ESRI Shapefile'", ")", "dataset", "=", "driver", ".", "Open", "(", "file", ")", "if", "dataset", "is", "not", "None", ":", "# from Layer", "layer", "=", "dataset", ".", "GetLayer", "(", ")", "spatialRef", "=", "layer", ".", "GetSpatialRef", "(", ")", "# from Geometry", "feature", "=", "layer", ".", "GetNextFeature", "(", ")", "geom", "=", "feature", ".", "GetGeometryRef", "(", ")", "spatialRef", "=", "geom", ".", "GetSpatialReference", "(", ")", "#WGS84", "outSpatialRef", "=", "osr", ".", "SpatialReference", "(", ")", "outSpatialRef", ".", "ImportFromEPSG", "(", "4326", ")", "coordTrans", "=", "osr", ".", "CoordinateTransformation", "(", "spatialRef", ",", "outSpatialRef", ")", "env", "=", "geom", ".", "GetEnvelope", "(", ")", "xmin", "=", "env", "[", "0", "]", "ymin", "=", "env", "[", "2", "]", "xmax", "=", "env", "[", "1", "]", "ymax", "=", "env", "[", "3", "]", "pointMAX", "=", "ogr", ".", "Geometry", "(", "ogr", ".", "wkbPoint", ")", "pointMAX", ".", "AddPoint", "(", "env", "[", "1", "]", ",", "env", "[", "3", "]", ")", "pointMAX", ".", "Transform", "(", "coordTrans", ")", "pointMIN", "=", "ogr", ".", "Geometry", "(", "ogr", ".", "wkbPoint", ")", "pointMIN", ".", "AddPoint", "(", "env", "[", "0", "]", ",", "env", "[", "2", "]", ")", "pointMIN", ".", "Transform", "(", "coordTrans", ")", "self", ".", "bbox", "=", "str", "(", "pointMIN", ".", "GetPoint", "(", ")", "[", "0", "]", ")", "+", "','", "+", "str", "(", "pointMIN", ".", "GetPoint", "(", ")", "[", "1", "]", ")", "+", "','", "+", "str", "(", "pointMAX", ".", "GetPoint", "(", ")", "[", "0", "]", ")", "+", "','", "+", "str", "(", "pointMAX", ".", "GetPoint", "(", ")", "[", "1", "]", ")", "self", ".", "query", "=", "None", "else", ":", "exit", "(", "\" shapefile not found. Please verify your path to the shapefile\"", ")" ]
reprojette en WGS84 et recupere l'extend
[ "reprojette", "en", "WGS84", "et", "recupere", "l", "extend" ]
d23831417dfb6d0da8c9ef5c121f3a731f4eec94
https://github.com/yoannMoreau/landsat_theia/blob/d23831417dfb6d0da8c9ef5c121f3a731f4eec94/python/clipper_helper.py#L28-L68
train
portfors-lab/sparkle
sparkle/run/protocol_runner.py
ProtocolRunner.set_comment
def set_comment(self, cellid, comment): """Saves the provided comment to the current dataset. :param cellid: number of the current cell :type cellid: int :param comment: a message to add documentation to data :type comment: str """ info = {'cellid': cellid, 'comment': comment} self.datafile.set_metadata(self.current_dataset_name, info)
python
def set_comment(self, cellid, comment): """Saves the provided comment to the current dataset. :param cellid: number of the current cell :type cellid: int :param comment: a message to add documentation to data :type comment: str """ info = {'cellid': cellid, 'comment': comment} self.datafile.set_metadata(self.current_dataset_name, info)
[ "def", "set_comment", "(", "self", ",", "cellid", ",", "comment", ")", ":", "info", "=", "{", "'cellid'", ":", "cellid", ",", "'comment'", ":", "comment", "}", "self", ".", "datafile", ".", "set_metadata", "(", "self", ".", "current_dataset_name", ",", "info", ")" ]
Saves the provided comment to the current dataset. :param cellid: number of the current cell :type cellid: int :param comment: a message to add documentation to data :type comment: str
[ "Saves", "the", "provided", "comment", "to", "the", "current", "dataset", "." ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/protocol_runner.py#L82-L91
train
NoviceLive/pat
pat/main.py
main
def main(argument, sets, big_endian, optimal, output, clipboard, quiet, verbose): """Customizable Lazy Exploit Pattern Utility.""" logger = logging.getLogger() handler = logging.StreamHandler(sys.stderr) handler.setFormatter(LevelFormatter()) logger.addHandler(handler) logger.setLevel(logging.WARNING + (quiet-verbose)*10) if sets and optimal: pat = Pat.from_chars(''.join(sets), optimal) elif optimal: pat = Pat.from_chars(optimal=optimal) elif sets: pat = Pat(sets) else: pat = Pat() if argument.isdigit(): count = int(argument) try: pattern = pat.create(count) except IndexError: logging.exception(_('Failed to create the pattern.')) sys.exit(1) else: if output: output.write(pattern) elif clipboard: copy(pattern) else: print(pattern) else: target = argument try: index = pat.locate(target, big_endian) except KeyError: logging.exception(_('Failed to locate the pattern.')) sys.exit(1) else: print(index) sys.exit(0)
python
def main(argument, sets, big_endian, optimal, output, clipboard, quiet, verbose): """Customizable Lazy Exploit Pattern Utility.""" logger = logging.getLogger() handler = logging.StreamHandler(sys.stderr) handler.setFormatter(LevelFormatter()) logger.addHandler(handler) logger.setLevel(logging.WARNING + (quiet-verbose)*10) if sets and optimal: pat = Pat.from_chars(''.join(sets), optimal) elif optimal: pat = Pat.from_chars(optimal=optimal) elif sets: pat = Pat(sets) else: pat = Pat() if argument.isdigit(): count = int(argument) try: pattern = pat.create(count) except IndexError: logging.exception(_('Failed to create the pattern.')) sys.exit(1) else: if output: output.write(pattern) elif clipboard: copy(pattern) else: print(pattern) else: target = argument try: index = pat.locate(target, big_endian) except KeyError: logging.exception(_('Failed to locate the pattern.')) sys.exit(1) else: print(index) sys.exit(0)
[ "def", "main", "(", "argument", ",", "sets", ",", "big_endian", ",", "optimal", ",", "output", ",", "clipboard", ",", "quiet", ",", "verbose", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", ")", "handler", "=", "logging", ".", "StreamHandler", "(", "sys", ".", "stderr", ")", "handler", ".", "setFormatter", "(", "LevelFormatter", "(", ")", ")", "logger", ".", "addHandler", "(", "handler", ")", "logger", ".", "setLevel", "(", "logging", ".", "WARNING", "+", "(", "quiet", "-", "verbose", ")", "*", "10", ")", "if", "sets", "and", "optimal", ":", "pat", "=", "Pat", ".", "from_chars", "(", "''", ".", "join", "(", "sets", ")", ",", "optimal", ")", "elif", "optimal", ":", "pat", "=", "Pat", ".", "from_chars", "(", "optimal", "=", "optimal", ")", "elif", "sets", ":", "pat", "=", "Pat", "(", "sets", ")", "else", ":", "pat", "=", "Pat", "(", ")", "if", "argument", ".", "isdigit", "(", ")", ":", "count", "=", "int", "(", "argument", ")", "try", ":", "pattern", "=", "pat", ".", "create", "(", "count", ")", "except", "IndexError", ":", "logging", ".", "exception", "(", "_", "(", "'Failed to create the pattern.'", ")", ")", "sys", ".", "exit", "(", "1", ")", "else", ":", "if", "output", ":", "output", ".", "write", "(", "pattern", ")", "elif", "clipboard", ":", "copy", "(", "pattern", ")", "else", ":", "print", "(", "pattern", ")", "else", ":", "target", "=", "argument", "try", ":", "index", "=", "pat", ".", "locate", "(", "target", ",", "big_endian", ")", "except", "KeyError", ":", "logging", ".", "exception", "(", "_", "(", "'Failed to locate the pattern.'", ")", ")", "sys", ".", "exit", "(", "1", ")", "else", ":", "print", "(", "index", ")", "sys", ".", "exit", "(", "0", ")" ]
Customizable Lazy Exploit Pattern Utility.
[ "Customizable", "Lazy", "Exploit", "Pattern", "Utility", "." ]
bd223fc5e758213662befbebdf9538f3fbf58ad6
https://github.com/NoviceLive/pat/blob/bd223fc5e758213662befbebdf9538f3fbf58ad6/pat/main.py#L49-L91
train
portfors-lab/sparkle
sparkle/gui/plotmenubar.py
PlotMenuBar.mousePressEvent
def mousePressEvent(self, event): """Marshalls behaviour depending on location of the mouse click""" if event.x() < 50: super(PlotMenuBar, self).mousePressEvent(event) else: # ignore to allow proper functioning of float event.ignore()
python
def mousePressEvent(self, event): """Marshalls behaviour depending on location of the mouse click""" if event.x() < 50: super(PlotMenuBar, self).mousePressEvent(event) else: # ignore to allow proper functioning of float event.ignore()
[ "def", "mousePressEvent", "(", "self", ",", "event", ")", ":", "if", "event", ".", "x", "(", ")", "<", "50", ":", "super", "(", "PlotMenuBar", ",", "self", ")", ".", "mousePressEvent", "(", "event", ")", "else", ":", "# ignore to allow proper functioning of float", "event", ".", "ignore", "(", ")" ]
Marshalls behaviour depending on location of the mouse click
[ "Marshalls", "behaviour", "depending", "on", "location", "of", "the", "mouse", "click" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/plotmenubar.py#L27-L33
train
TorkamaniLab/metapipe
metapipe/runtime.py
Runtime.add
def add(self, command_template, job_class): """ Given a command template, add it as a job to the queue. """ job = JobTemplate(command_template.alias, command_template=command_template, depends_on=command_template.depends_on, queue=self.queue, job_class=job_class) self.queue.push(job)
python
def add(self, command_template, job_class): """ Given a command template, add it as a job to the queue. """ job = JobTemplate(command_template.alias, command_template=command_template, depends_on=command_template.depends_on, queue=self.queue, job_class=job_class) self.queue.push(job)
[ "def", "add", "(", "self", ",", "command_template", ",", "job_class", ")", ":", "job", "=", "JobTemplate", "(", "command_template", ".", "alias", ",", "command_template", "=", "command_template", ",", "depends_on", "=", "command_template", ".", "depends_on", ",", "queue", "=", "self", ".", "queue", ",", "job_class", "=", "job_class", ")", "self", ".", "queue", ".", "push", "(", "job", ")" ]
Given a command template, add it as a job to the queue.
[ "Given", "a", "command", "template", "add", "it", "as", "a", "job", "to", "the", "queue", "." ]
15592e5b0c217afb00ac03503f8d0d7453d4baf4
https://github.com/TorkamaniLab/metapipe/blob/15592e5b0c217afb00ac03503f8d0d7453d4baf4/metapipe/runtime.py#L26-L32
train
TorkamaniLab/metapipe
metapipe/runtime.py
Runtime.run
def run(self): """ Begins the runtime execution. """ iterations = 0 queue = self.queue.tick() while True: try: next(queue) except StopIteration: break iterations += 1 sleep(self.sleep_time) return iterations
python
def run(self): """ Begins the runtime execution. """ iterations = 0 queue = self.queue.tick() while True: try: next(queue) except StopIteration: break iterations += 1 sleep(self.sleep_time) return iterations
[ "def", "run", "(", "self", ")", ":", "iterations", "=", "0", "queue", "=", "self", ".", "queue", ".", "tick", "(", ")", "while", "True", ":", "try", ":", "next", "(", "queue", ")", "except", "StopIteration", ":", "break", "iterations", "+=", "1", "sleep", "(", "self", ".", "sleep_time", ")", "return", "iterations" ]
Begins the runtime execution.
[ "Begins", "the", "runtime", "execution", "." ]
15592e5b0c217afb00ac03503f8d0d7453d4baf4
https://github.com/TorkamaniLab/metapipe/blob/15592e5b0c217afb00ac03503f8d0d7453d4baf4/metapipe/runtime.py#L34-L46
train
by46/simplekit
simplekit/objson/dynamic_class.py
make_dynamic_class
def make_dynamic_class(typename, field_names): """a factory function to create type dynamically The factory function is used by :func:`objson.load` and :func:`objson.loads`. Creating the object deserialize from json string. The inspiration come from :func:`collections.namedtuple`. the difference is that I don't your the class template to define a dynamic class, instead of, I use the :func:`type` factory function. Class prototype definition :: class JsonObject(object): __identifier__ = "dolphin" def __init__(self, kv=None): if kv is None: kv = dict() self.__dict__.update(kv) def __getitem__(self, key): return self.__dict__.get(key) def __setitem__(self, key, value): self.__dict__[key] = value def __iter__(self): return iter(self.__dict__) def __repr__(self): keys = sorted(self.__dict__.keys()) text = ', '.join(["%s=%r" % (key, self[key]) for key in keys]) return '{%s}' % text name=_property('name') Basic Usage :: from objson import make_dynamic_class, dumps Entity = make_dynamic_class('Entity', 'name, sex, age') entity = Entity() entity.name, entity.sex, entity.age = 'benjamin', 'male', 21 dumps(entity) :param typename: dynamic class's name :param field_names: a string :class:`list` and a field name string which separated by comma, ``['name', 'sex']`` or ``"name,sex"`` :return: a class type """ if isinstance(field_names, basestring): field_names = field_names.replace(",", " ").split() field_names = map(str, field_names) safe_fields_names = map(_encode_property_name, field_names) attr = dict((safe_name, _property(name)) for name, safe_name in zip(field_names, safe_fields_names)) attr['__doc__'] = typename attr['__identifier__'] = "dolphin" attr['__init__'] = _dynamic__init attr['__getitem__'] = lambda self, key: self.__dict__.get(key) attr['__setitem__'] = _dynamic__setitem attr['__iter__'] = lambda self: iter(self.__dict__) attr['__repr__'] = lambda self: "{%s}" % (', '.join([ "%s=%r" % (key, self[key]) for key in sorted(self.__dict__.keys()) ])) return type(typename, (object,), attr)
python
def make_dynamic_class(typename, field_names): """a factory function to create type dynamically The factory function is used by :func:`objson.load` and :func:`objson.loads`. Creating the object deserialize from json string. The inspiration come from :func:`collections.namedtuple`. the difference is that I don't your the class template to define a dynamic class, instead of, I use the :func:`type` factory function. Class prototype definition :: class JsonObject(object): __identifier__ = "dolphin" def __init__(self, kv=None): if kv is None: kv = dict() self.__dict__.update(kv) def __getitem__(self, key): return self.__dict__.get(key) def __setitem__(self, key, value): self.__dict__[key] = value def __iter__(self): return iter(self.__dict__) def __repr__(self): keys = sorted(self.__dict__.keys()) text = ', '.join(["%s=%r" % (key, self[key]) for key in keys]) return '{%s}' % text name=_property('name') Basic Usage :: from objson import make_dynamic_class, dumps Entity = make_dynamic_class('Entity', 'name, sex, age') entity = Entity() entity.name, entity.sex, entity.age = 'benjamin', 'male', 21 dumps(entity) :param typename: dynamic class's name :param field_names: a string :class:`list` and a field name string which separated by comma, ``['name', 'sex']`` or ``"name,sex"`` :return: a class type """ if isinstance(field_names, basestring): field_names = field_names.replace(",", " ").split() field_names = map(str, field_names) safe_fields_names = map(_encode_property_name, field_names) attr = dict((safe_name, _property(name)) for name, safe_name in zip(field_names, safe_fields_names)) attr['__doc__'] = typename attr['__identifier__'] = "dolphin" attr['__init__'] = _dynamic__init attr['__getitem__'] = lambda self, key: self.__dict__.get(key) attr['__setitem__'] = _dynamic__setitem attr['__iter__'] = lambda self: iter(self.__dict__) attr['__repr__'] = lambda self: "{%s}" % (', '.join([ "%s=%r" % (key, self[key]) for key in sorted(self.__dict__.keys()) ])) return type(typename, (object,), attr)
[ "def", "make_dynamic_class", "(", "typename", ",", "field_names", ")", ":", "if", "isinstance", "(", "field_names", ",", "basestring", ")", ":", "field_names", "=", "field_names", ".", "replace", "(", "\",\"", ",", "\" \"", ")", ".", "split", "(", ")", "field_names", "=", "map", "(", "str", ",", "field_names", ")", "safe_fields_names", "=", "map", "(", "_encode_property_name", ",", "field_names", ")", "attr", "=", "dict", "(", "(", "safe_name", ",", "_property", "(", "name", ")", ")", "for", "name", ",", "safe_name", "in", "zip", "(", "field_names", ",", "safe_fields_names", ")", ")", "attr", "[", "'__doc__'", "]", "=", "typename", "attr", "[", "'__identifier__'", "]", "=", "\"dolphin\"", "attr", "[", "'__init__'", "]", "=", "_dynamic__init", "attr", "[", "'__getitem__'", "]", "=", "lambda", "self", ",", "key", ":", "self", ".", "__dict__", ".", "get", "(", "key", ")", "attr", "[", "'__setitem__'", "]", "=", "_dynamic__setitem", "attr", "[", "'__iter__'", "]", "=", "lambda", "self", ":", "iter", "(", "self", ".", "__dict__", ")", "attr", "[", "'__repr__'", "]", "=", "lambda", "self", ":", "\"{%s}\"", "%", "(", "', '", ".", "join", "(", "[", "\"%s=%r\"", "%", "(", "key", ",", "self", "[", "key", "]", ")", "for", "key", "in", "sorted", "(", "self", ".", "__dict__", ".", "keys", "(", ")", ")", "]", ")", ")", "return", "type", "(", "typename", ",", "(", "object", ",", ")", ",", "attr", ")" ]
a factory function to create type dynamically The factory function is used by :func:`objson.load` and :func:`objson.loads`. Creating the object deserialize from json string. The inspiration come from :func:`collections.namedtuple`. the difference is that I don't your the class template to define a dynamic class, instead of, I use the :func:`type` factory function. Class prototype definition :: class JsonObject(object): __identifier__ = "dolphin" def __init__(self, kv=None): if kv is None: kv = dict() self.__dict__.update(kv) def __getitem__(self, key): return self.__dict__.get(key) def __setitem__(self, key, value): self.__dict__[key] = value def __iter__(self): return iter(self.__dict__) def __repr__(self): keys = sorted(self.__dict__.keys()) text = ', '.join(["%s=%r" % (key, self[key]) for key in keys]) return '{%s}' % text name=_property('name') Basic Usage :: from objson import make_dynamic_class, dumps Entity = make_dynamic_class('Entity', 'name, sex, age') entity = Entity() entity.name, entity.sex, entity.age = 'benjamin', 'male', 21 dumps(entity) :param typename: dynamic class's name :param field_names: a string :class:`list` and a field name string which separated by comma, ``['name', 'sex']`` or ``"name,sex"`` :return: a class type
[ "a", "factory", "function", "to", "create", "type", "dynamically" ]
33f3ce6de33accc185e1057f096af41859db5976
https://github.com/by46/simplekit/blob/33f3ce6de33accc185e1057f096af41859db5976/simplekit/objson/dynamic_class.py#L46-L113
train
sirfoga/pyhal
hal/profile/mem.py
get_memory_usage
def get_memory_usage(): """Gets RAM memory usage :return: MB of memory used by this process """ process = psutil.Process(os.getpid()) mem = process.memory_info().rss return mem / (1024 * 1024)
python
def get_memory_usage(): """Gets RAM memory usage :return: MB of memory used by this process """ process = psutil.Process(os.getpid()) mem = process.memory_info().rss return mem / (1024 * 1024)
[ "def", "get_memory_usage", "(", ")", ":", "process", "=", "psutil", ".", "Process", "(", "os", ".", "getpid", "(", ")", ")", "mem", "=", "process", ".", "memory_info", "(", ")", ".", "rss", "return", "mem", "/", "(", "1024", "*", "1024", ")" ]
Gets RAM memory usage :return: MB of memory used by this process
[ "Gets", "RAM", "memory", "usage" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/profile/mem.py#L11-L18
train
computational-metabolomics/msp2db
msp2db/db.py
create_db
def create_db(file_pth): """ Create an empty SQLite database for library spectra. Example: >>> from msp2db.db import create_db >>> db_pth = 'library.db' >>> create_db(file_pth=db_pth) Args: file_pth (str): File path for SQLite database """ conn = sqlite3.connect(file_pth) c = conn.cursor() c.execute('DROP TABLE IF EXISTS library_spectra_source') c.execute('''CREATE TABLE library_spectra_source ( id integer PRIMARY KEY, name text NOT NULL, created_at date, parsing_software text )''' ) c.execute('DROP TABLE IF EXISTS metab_compound') c.execute('''CREATE TABLE metab_compound ( inchikey_id text PRIMARY KEY, name text, pubchem_id text, chemspider_id text, other_names text, exact_mass real, molecular_formula text, molecular_weight real, compound_class text, smiles text, created_at date, updated_at date )''') c.execute('DROP TABLE IF EXISTS library_spectra_meta') c.execute('''CREATE TABLE library_spectra_meta ( id integer PRIMARY KEY, name text, collision_energy text, ms_level real, accession text NOT NULL, resolution text, polarity integer, fragmentation_type text, precursor_mz real, precursor_type text, instrument_type text, instrument text, copyright text, column text, mass_accuracy real, mass_error real, origin text, splash text, retention_index real, retention_time real, library_spectra_source_id integer NOT NULL, inchikey_id text NOT NULL, FOREIGN KEY(library_spectra_source_id) REFERENCES library_spectra_source(id), FOREIGN KEY(inchikey_id) REFERENCES metab_compound(inchikey_id) )''' ) c.execute('DROP TABLE IF EXISTS library_spectra') c.execute('''CREATE TABLE library_spectra ( id integer PRIMARY KEY, mz real NOT NULL, i real NOT NULL, other text, library_spectra_meta_id integer NOT NULL, FOREIGN KEY (library_spectra_meta_id) REFERENCES library_spectra_meta(id) )''' ) c.execute('DROP TABLE IF EXISTS library_spectra_annotation') c.execute('''CREATE TABLE library_spectra_annotation ( id integer PRIMARY KEY, mz real, tentative_formula text, mass_error real, library_spectra_meta_id integer NOT NULL, FOREIGN KEY (library_spectra_meta_id) REFERENCES library_spectra_meta(id) )''' )
python
def create_db(file_pth): """ Create an empty SQLite database for library spectra. Example: >>> from msp2db.db import create_db >>> db_pth = 'library.db' >>> create_db(file_pth=db_pth) Args: file_pth (str): File path for SQLite database """ conn = sqlite3.connect(file_pth) c = conn.cursor() c.execute('DROP TABLE IF EXISTS library_spectra_source') c.execute('''CREATE TABLE library_spectra_source ( id integer PRIMARY KEY, name text NOT NULL, created_at date, parsing_software text )''' ) c.execute('DROP TABLE IF EXISTS metab_compound') c.execute('''CREATE TABLE metab_compound ( inchikey_id text PRIMARY KEY, name text, pubchem_id text, chemspider_id text, other_names text, exact_mass real, molecular_formula text, molecular_weight real, compound_class text, smiles text, created_at date, updated_at date )''') c.execute('DROP TABLE IF EXISTS library_spectra_meta') c.execute('''CREATE TABLE library_spectra_meta ( id integer PRIMARY KEY, name text, collision_energy text, ms_level real, accession text NOT NULL, resolution text, polarity integer, fragmentation_type text, precursor_mz real, precursor_type text, instrument_type text, instrument text, copyright text, column text, mass_accuracy real, mass_error real, origin text, splash text, retention_index real, retention_time real, library_spectra_source_id integer NOT NULL, inchikey_id text NOT NULL, FOREIGN KEY(library_spectra_source_id) REFERENCES library_spectra_source(id), FOREIGN KEY(inchikey_id) REFERENCES metab_compound(inchikey_id) )''' ) c.execute('DROP TABLE IF EXISTS library_spectra') c.execute('''CREATE TABLE library_spectra ( id integer PRIMARY KEY, mz real NOT NULL, i real NOT NULL, other text, library_spectra_meta_id integer NOT NULL, FOREIGN KEY (library_spectra_meta_id) REFERENCES library_spectra_meta(id) )''' ) c.execute('DROP TABLE IF EXISTS library_spectra_annotation') c.execute('''CREATE TABLE library_spectra_annotation ( id integer PRIMARY KEY, mz real, tentative_formula text, mass_error real, library_spectra_meta_id integer NOT NULL, FOREIGN KEY (library_spectra_meta_id) REFERENCES library_spectra_meta(id) )''' )
[ "def", "create_db", "(", "file_pth", ")", ":", "conn", "=", "sqlite3", ".", "connect", "(", "file_pth", ")", "c", "=", "conn", ".", "cursor", "(", ")", "c", ".", "execute", "(", "'DROP TABLE IF EXISTS library_spectra_source'", ")", "c", ".", "execute", "(", "'''CREATE TABLE library_spectra_source (\n id integer PRIMARY KEY,\n name text NOT NULL,\n created_at date,\n parsing_software text\n )'''", ")", "c", ".", "execute", "(", "'DROP TABLE IF EXISTS metab_compound'", ")", "c", ".", "execute", "(", "'''CREATE TABLE metab_compound (\n inchikey_id text PRIMARY KEY,\n name text,\n pubchem_id text,\n chemspider_id text,\n other_names text,\n exact_mass real,\n molecular_formula text,\n molecular_weight real,\n compound_class text,\n smiles text,\n created_at date,\n updated_at date\n\n )'''", ")", "c", ".", "execute", "(", "'DROP TABLE IF EXISTS library_spectra_meta'", ")", "c", ".", "execute", "(", "'''CREATE TABLE library_spectra_meta (\n id integer PRIMARY KEY,\n name text,\n collision_energy text,\n ms_level real,\n accession text NOT NULL,\n resolution text,\n polarity integer,\n fragmentation_type text,\n precursor_mz real,\n precursor_type text,\n instrument_type text,\n instrument text,\n copyright text,\n column text,\n mass_accuracy real,\n mass_error real,\n origin text,\n splash text,\n retention_index real, \n retention_time real,\n library_spectra_source_id integer NOT NULL,\n inchikey_id text NOT NULL,\n FOREIGN KEY(library_spectra_source_id) REFERENCES library_spectra_source(id),\n FOREIGN KEY(inchikey_id) REFERENCES metab_compound(inchikey_id)\n )'''", ")", "c", ".", "execute", "(", "'DROP TABLE IF EXISTS library_spectra'", ")", "c", ".", "execute", "(", "'''CREATE TABLE library_spectra (\n id integer PRIMARY KEY,\n mz real NOT NULL,\n i real NOT NULL,\n other text,\n library_spectra_meta_id integer NOT NULL,\n FOREIGN KEY (library_spectra_meta_id) REFERENCES library_spectra_meta(id)\n )'''", ")", "c", ".", "execute", "(", "'DROP TABLE IF EXISTS library_spectra_annotation'", ")", "c", ".", "execute", "(", "'''CREATE TABLE library_spectra_annotation (\n id integer PRIMARY KEY,\n mz real,\n tentative_formula text,\n mass_error real,\n library_spectra_meta_id integer NOT NULL,\n FOREIGN KEY (library_spectra_meta_id) REFERENCES library_spectra_meta(id)\n )'''", ")" ]
Create an empty SQLite database for library spectra. Example: >>> from msp2db.db import create_db >>> db_pth = 'library.db' >>> create_db(file_pth=db_pth) Args: file_pth (str): File path for SQLite database
[ "Create", "an", "empty", "SQLite", "database", "for", "library", "spectra", "." ]
f86f01efca26fd2745547c9993f97337c6bef123
https://github.com/computational-metabolomics/msp2db/blob/f86f01efca26fd2745547c9993f97337c6bef123/msp2db/db.py#L6-L96
train
computational-metabolomics/msp2db
msp2db/db.py
get_connection
def get_connection(db_type, db_pth, user=None, password=None, name=None): """ Get a connection to a SQL database. Can be used for SQLite, MySQL or Django MySQL database Example: >>> from msp2db.db import get_connection >>> conn = get_connection('sqlite', 'library.db') If using "mysql" mysql.connector needs to be installed. If using "django_mysql" Django needs to be installed. Args: db_type (str): Type of database can either be "sqlite", "mysql" or "django_mysql" Returns: sql connection object """ if db_type == 'sqlite': print(db_pth) conn = sqlite3.connect(db_pth) elif db_type == 'mysql': import mysql.connector conn = mysql.connector.connect(user=user, password=password, database=name) elif db_type == 'django_mysql': from django.db import connection as conn else: print('unsupported database type: {}, choices are "sqlite", "mysql" or "django_mysql"'.format(db_type)) return conn
python
def get_connection(db_type, db_pth, user=None, password=None, name=None): """ Get a connection to a SQL database. Can be used for SQLite, MySQL or Django MySQL database Example: >>> from msp2db.db import get_connection >>> conn = get_connection('sqlite', 'library.db') If using "mysql" mysql.connector needs to be installed. If using "django_mysql" Django needs to be installed. Args: db_type (str): Type of database can either be "sqlite", "mysql" or "django_mysql" Returns: sql connection object """ if db_type == 'sqlite': print(db_pth) conn = sqlite3.connect(db_pth) elif db_type == 'mysql': import mysql.connector conn = mysql.connector.connect(user=user, password=password, database=name) elif db_type == 'django_mysql': from django.db import connection as conn else: print('unsupported database type: {}, choices are "sqlite", "mysql" or "django_mysql"'.format(db_type)) return conn
[ "def", "get_connection", "(", "db_type", ",", "db_pth", ",", "user", "=", "None", ",", "password", "=", "None", ",", "name", "=", "None", ")", ":", "if", "db_type", "==", "'sqlite'", ":", "print", "(", "db_pth", ")", "conn", "=", "sqlite3", ".", "connect", "(", "db_pth", ")", "elif", "db_type", "==", "'mysql'", ":", "import", "mysql", ".", "connector", "conn", "=", "mysql", ".", "connector", ".", "connect", "(", "user", "=", "user", ",", "password", "=", "password", ",", "database", "=", "name", ")", "elif", "db_type", "==", "'django_mysql'", ":", "from", "django", ".", "db", "import", "connection", "as", "conn", "else", ":", "print", "(", "'unsupported database type: {}, choices are \"sqlite\", \"mysql\" or \"django_mysql\"'", ".", "format", "(", "db_type", ")", ")", "return", "conn" ]
Get a connection to a SQL database. Can be used for SQLite, MySQL or Django MySQL database Example: >>> from msp2db.db import get_connection >>> conn = get_connection('sqlite', 'library.db') If using "mysql" mysql.connector needs to be installed. If using "django_mysql" Django needs to be installed. Args: db_type (str): Type of database can either be "sqlite", "mysql" or "django_mysql" Returns: sql connection object
[ "Get", "a", "connection", "to", "a", "SQL", "database", ".", "Can", "be", "used", "for", "SQLite", "MySQL", "or", "Django", "MySQL", "database" ]
f86f01efca26fd2745547c9993f97337c6bef123
https://github.com/computational-metabolomics/msp2db/blob/f86f01efca26fd2745547c9993f97337c6bef123/msp2db/db.py#L99-L129
train
computational-metabolomics/msp2db
msp2db/db.py
db_dict
def db_dict(c): """ Get a dictionary of the library spectra from a database Example: >>> from msp2db.db import get_connection >>> conn = get_connection('sqlite', 'library.db') >>> test_db_d = db_dict(conn.cursor()) If using a large database the resulting dictionary will be very large! Args: c (cursor): SQL database connection cursor Returns: A dictionary with the following keys 'library_spectra', 'library_spectra_meta', 'library_spectra_annotations', 'library_spectra_source' and 'metab_compound'. Where corresponding values for each key are list of list containing all the rows in the database. """ db_d = {} c.execute('SELECT * FROM library_spectra') db_d['library_spectra'] = [list(row) for row in c] c.execute('SELECT * FROM library_spectra_meta') db_d['library_spectra_meta'] = [list(row) for row in c] c.execute('SELECT * FROM library_spectra_annotation') db_d['library_spectra_annotations'] = [list(row) for row in c] c.execute('SELECT * FROM library_spectra_source') db_d['library_spectra_source'] = [list(row) for row in c] c.execute('SELECT * FROM metab_compound') db_d['metab_compound'] = [list(row) for row in c] return db_d
python
def db_dict(c): """ Get a dictionary of the library spectra from a database Example: >>> from msp2db.db import get_connection >>> conn = get_connection('sqlite', 'library.db') >>> test_db_d = db_dict(conn.cursor()) If using a large database the resulting dictionary will be very large! Args: c (cursor): SQL database connection cursor Returns: A dictionary with the following keys 'library_spectra', 'library_spectra_meta', 'library_spectra_annotations', 'library_spectra_source' and 'metab_compound'. Where corresponding values for each key are list of list containing all the rows in the database. """ db_d = {} c.execute('SELECT * FROM library_spectra') db_d['library_spectra'] = [list(row) for row in c] c.execute('SELECT * FROM library_spectra_meta') db_d['library_spectra_meta'] = [list(row) for row in c] c.execute('SELECT * FROM library_spectra_annotation') db_d['library_spectra_annotations'] = [list(row) for row in c] c.execute('SELECT * FROM library_spectra_source') db_d['library_spectra_source'] = [list(row) for row in c] c.execute('SELECT * FROM metab_compound') db_d['metab_compound'] = [list(row) for row in c] return db_d
[ "def", "db_dict", "(", "c", ")", ":", "db_d", "=", "{", "}", "c", ".", "execute", "(", "'SELECT * FROM library_spectra'", ")", "db_d", "[", "'library_spectra'", "]", "=", "[", "list", "(", "row", ")", "for", "row", "in", "c", "]", "c", ".", "execute", "(", "'SELECT * FROM library_spectra_meta'", ")", "db_d", "[", "'library_spectra_meta'", "]", "=", "[", "list", "(", "row", ")", "for", "row", "in", "c", "]", "c", ".", "execute", "(", "'SELECT * FROM library_spectra_annotation'", ")", "db_d", "[", "'library_spectra_annotations'", "]", "=", "[", "list", "(", "row", ")", "for", "row", "in", "c", "]", "c", ".", "execute", "(", "'SELECT * FROM library_spectra_source'", ")", "db_d", "[", "'library_spectra_source'", "]", "=", "[", "list", "(", "row", ")", "for", "row", "in", "c", "]", "c", ".", "execute", "(", "'SELECT * FROM metab_compound'", ")", "db_d", "[", "'metab_compound'", "]", "=", "[", "list", "(", "row", ")", "for", "row", "in", "c", "]", "return", "db_d" ]
Get a dictionary of the library spectra from a database Example: >>> from msp2db.db import get_connection >>> conn = get_connection('sqlite', 'library.db') >>> test_db_d = db_dict(conn.cursor()) If using a large database the resulting dictionary will be very large! Args: c (cursor): SQL database connection cursor Returns: A dictionary with the following keys 'library_spectra', 'library_spectra_meta', 'library_spectra_annotations', 'library_spectra_source' and 'metab_compound'. Where corresponding values for each key are list of list containing all the rows in the database.
[ "Get", "a", "dictionary", "of", "the", "library", "spectra", "from", "a", "database" ]
f86f01efca26fd2745547c9993f97337c6bef123
https://github.com/computational-metabolomics/msp2db/blob/f86f01efca26fd2745547c9993f97337c6bef123/msp2db/db.py#L132-L167
train
computational-metabolomics/msp2db
msp2db/db.py
insert_query_m
def insert_query_m(data, table, conn, columns=None, db_type='mysql'): """ Insert python list of tuples into SQL table Args: data (list): List of tuples table (str): Name of database table conn (connection object): database connection object columns (str): String of column names to use if not assigned then all columns are presumed to be used [Optional] db_type (str): If "sqlite" or "mysql" """ # if length of data is very large we need to break into chunks the insert_query_m is then used recursively untill # all data has been inserted if len(data) > 10000: _chunk_query(data, 10000, columns, conn, table, db_type) else: # sqlite and mysql have type string (? or %s) reference to use if db_type == 'sqlite': type_sign = '?' else: type_sign = '%s' # create a string of types for the insertion string (e.g. ?,?,? if inserting 3 columns of data) type_com = type_sign + ", " type = type_com * (len(data[0]) - 1) type = type + type_sign # if using specific columns to insert data if columns: stmt = "INSERT INTO " + table + "( " + columns + ") VALUES (" + type + ")" else: stmt = "INSERT INTO " + table + " VALUES (" + type + ")" # execute query cursor = conn.cursor() cursor.executemany(stmt, data) conn.commit()
python
def insert_query_m(data, table, conn, columns=None, db_type='mysql'): """ Insert python list of tuples into SQL table Args: data (list): List of tuples table (str): Name of database table conn (connection object): database connection object columns (str): String of column names to use if not assigned then all columns are presumed to be used [Optional] db_type (str): If "sqlite" or "mysql" """ # if length of data is very large we need to break into chunks the insert_query_m is then used recursively untill # all data has been inserted if len(data) > 10000: _chunk_query(data, 10000, columns, conn, table, db_type) else: # sqlite and mysql have type string (? or %s) reference to use if db_type == 'sqlite': type_sign = '?' else: type_sign = '%s' # create a string of types for the insertion string (e.g. ?,?,? if inserting 3 columns of data) type_com = type_sign + ", " type = type_com * (len(data[0]) - 1) type = type + type_sign # if using specific columns to insert data if columns: stmt = "INSERT INTO " + table + "( " + columns + ") VALUES (" + type + ")" else: stmt = "INSERT INTO " + table + " VALUES (" + type + ")" # execute query cursor = conn.cursor() cursor.executemany(stmt, data) conn.commit()
[ "def", "insert_query_m", "(", "data", ",", "table", ",", "conn", ",", "columns", "=", "None", ",", "db_type", "=", "'mysql'", ")", ":", "# if length of data is very large we need to break into chunks the insert_query_m is then used recursively untill", "# all data has been inserted", "if", "len", "(", "data", ")", ">", "10000", ":", "_chunk_query", "(", "data", ",", "10000", ",", "columns", ",", "conn", ",", "table", ",", "db_type", ")", "else", ":", "# sqlite and mysql have type string (? or %s) reference to use", "if", "db_type", "==", "'sqlite'", ":", "type_sign", "=", "'?'", "else", ":", "type_sign", "=", "'%s'", "# create a string of types for the insertion string (e.g. ?,?,? if inserting 3 columns of data)", "type_com", "=", "type_sign", "+", "\", \"", "type", "=", "type_com", "*", "(", "len", "(", "data", "[", "0", "]", ")", "-", "1", ")", "type", "=", "type", "+", "type_sign", "# if using specific columns to insert data", "if", "columns", ":", "stmt", "=", "\"INSERT INTO \"", "+", "table", "+", "\"( \"", "+", "columns", "+", "\") VALUES (\"", "+", "type", "+", "\")\"", "else", ":", "stmt", "=", "\"INSERT INTO \"", "+", "table", "+", "\" VALUES (\"", "+", "type", "+", "\")\"", "# execute query", "cursor", "=", "conn", ".", "cursor", "(", ")", "cursor", ".", "executemany", "(", "stmt", ",", "data", ")", "conn", ".", "commit", "(", ")" ]
Insert python list of tuples into SQL table Args: data (list): List of tuples table (str): Name of database table conn (connection object): database connection object columns (str): String of column names to use if not assigned then all columns are presumed to be used [Optional] db_type (str): If "sqlite" or "mysql"
[ "Insert", "python", "list", "of", "tuples", "into", "SQL", "table" ]
f86f01efca26fd2745547c9993f97337c6bef123
https://github.com/computational-metabolomics/msp2db/blob/f86f01efca26fd2745547c9993f97337c6bef123/msp2db/db.py#L170-L205
train
computational-metabolomics/msp2db
msp2db/db.py
_chunk_query
def _chunk_query(l, n, cn, conn, table, db_type): """ Call for inserting SQL query in chunks based on n rows Args: l (list): List of tuples n (int): Number of rows cn (str): Column names conn (connection object): Database connection object table (str): Table name db_type (str): If "sqlite" or "mysql" """ # For item i in a range that is a length of l, [insert_query_m(l[i:i + n], table, conn, cn, db_type) for i in range(0, len(l), n)]
python
def _chunk_query(l, n, cn, conn, table, db_type): """ Call for inserting SQL query in chunks based on n rows Args: l (list): List of tuples n (int): Number of rows cn (str): Column names conn (connection object): Database connection object table (str): Table name db_type (str): If "sqlite" or "mysql" """ # For item i in a range that is a length of l, [insert_query_m(l[i:i + n], table, conn, cn, db_type) for i in range(0, len(l), n)]
[ "def", "_chunk_query", "(", "l", ",", "n", ",", "cn", ",", "conn", ",", "table", ",", "db_type", ")", ":", "# For item i in a range that is a length of l,", "[", "insert_query_m", "(", "l", "[", "i", ":", "i", "+", "n", "]", ",", "table", ",", "conn", ",", "cn", ",", "db_type", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "l", ")", ",", "n", ")", "]" ]
Call for inserting SQL query in chunks based on n rows Args: l (list): List of tuples n (int): Number of rows cn (str): Column names conn (connection object): Database connection object table (str): Table name db_type (str): If "sqlite" or "mysql"
[ "Call", "for", "inserting", "SQL", "query", "in", "chunks", "based", "on", "n", "rows" ]
f86f01efca26fd2745547c9993f97337c6bef123
https://github.com/computational-metabolomics/msp2db/blob/f86f01efca26fd2745547c9993f97337c6bef123/msp2db/db.py#L207-L220
train
Frzk/Ellis
ellis_actions/mail.py
send
async def send(from_addr, to_addrs, subject="Ellis", msg="", **kwargs): """ Sends an e-mail to the provided address. :param from_addr: E-mail address of the sender. :type from_addr: str :param to_addrs: E-mail address(es) of the receiver(s). :type to_addrs: list or str :param msg: Message to be sent. :type msg: str """ async with SMTP() as client: msg = "Subject: {0}\n\n{1}".format(subject, msg) if kwargs: # To append kwargs to the given message, we first # transform it into a more human friendly string: values = "\n".join(["{0}: {1}".format(k, v) for k, v in kwargs.items()]) # Actually append caught values to the message: msg = ("{0}\n\nThe following variables have been caught:" "\n{1}".format(msg, values)) try: await client.sendmail(from_addr, to_addrs, msg) except: # FIXME: print a friendly message to stdout. raise
python
async def send(from_addr, to_addrs, subject="Ellis", msg="", **kwargs): """ Sends an e-mail to the provided address. :param from_addr: E-mail address of the sender. :type from_addr: str :param to_addrs: E-mail address(es) of the receiver(s). :type to_addrs: list or str :param msg: Message to be sent. :type msg: str """ async with SMTP() as client: msg = "Subject: {0}\n\n{1}".format(subject, msg) if kwargs: # To append kwargs to the given message, we first # transform it into a more human friendly string: values = "\n".join(["{0}: {1}".format(k, v) for k, v in kwargs.items()]) # Actually append caught values to the message: msg = ("{0}\n\nThe following variables have been caught:" "\n{1}".format(msg, values)) try: await client.sendmail(from_addr, to_addrs, msg) except: # FIXME: print a friendly message to stdout. raise
[ "async", "def", "send", "(", "from_addr", ",", "to_addrs", ",", "subject", "=", "\"Ellis\"", ",", "msg", "=", "\"\"", ",", "*", "*", "kwargs", ")", ":", "async", "with", "SMTP", "(", ")", "as", "client", ":", "msg", "=", "\"Subject: {0}\\n\\n{1}\"", ".", "format", "(", "subject", ",", "msg", ")", "if", "kwargs", ":", "# To append kwargs to the given message, we first", "# transform it into a more human friendly string:", "values", "=", "\"\\n\"", ".", "join", "(", "[", "\"{0}: {1}\"", ".", "format", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", "]", ")", "# Actually append caught values to the message:", "msg", "=", "(", "\"{0}\\n\\nThe following variables have been caught:\"", "\"\\n{1}\"", ".", "format", "(", "msg", ",", "values", ")", ")", "try", ":", "await", "client", ".", "sendmail", "(", "from_addr", ",", "to_addrs", ",", "msg", ")", "except", ":", "# FIXME: print a friendly message to stdout.", "raise" ]
Sends an e-mail to the provided address. :param from_addr: E-mail address of the sender. :type from_addr: str :param to_addrs: E-mail address(es) of the receiver(s). :type to_addrs: list or str :param msg: Message to be sent. :type msg: str
[ "Sends", "an", "e", "-", "mail", "to", "the", "provided", "address", "." ]
39ce8987cbc503354cf1f45927344186a8b18363
https://github.com/Frzk/Ellis/blob/39ce8987cbc503354cf1f45927344186a8b18363/ellis_actions/mail.py#L10-L39
train
sirfoga/pyhal
hal/ml/correlation.py
CorrelationMatrix.show_correlation_matrix
def show_correlation_matrix(self, correlation_matrix): """Shows the given correlation matrix as image :param correlation_matrix: Correlation matrix of features """ cr_plot.create_correlation_matrix_plot( correlation_matrix, self.title, self.headers_to_test ) pyplot.show()
python
def show_correlation_matrix(self, correlation_matrix): """Shows the given correlation matrix as image :param correlation_matrix: Correlation matrix of features """ cr_plot.create_correlation_matrix_plot( correlation_matrix, self.title, self.headers_to_test ) pyplot.show()
[ "def", "show_correlation_matrix", "(", "self", ",", "correlation_matrix", ")", ":", "cr_plot", ".", "create_correlation_matrix_plot", "(", "correlation_matrix", ",", "self", ".", "title", ",", "self", ".", "headers_to_test", ")", "pyplot", ".", "show", "(", ")" ]
Shows the given correlation matrix as image :param correlation_matrix: Correlation matrix of features
[ "Shows", "the", "given", "correlation", "matrix", "as", "image" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/ml/correlation.py#L42-L50
train
sirfoga/pyhal
hal/ml/correlation.py
CorrelationMatrix.save_to_file
def save_to_file(self, out_file): """Saves correlation matrix of selected headers :param out_file: Output file """ correlation_matrix = self.get_correlation_matrix_from_columns() cr_plot.create_correlation_matrix_plot( correlation_matrix, self.title, self.headers_to_test) fig = pyplot.gcf() # get reference to figure fig.set_size_inches(23.4, 23.4) pyplot.savefig(out_file, dpi=120)
python
def save_to_file(self, out_file): """Saves correlation matrix of selected headers :param out_file: Output file """ correlation_matrix = self.get_correlation_matrix_from_columns() cr_plot.create_correlation_matrix_plot( correlation_matrix, self.title, self.headers_to_test) fig = pyplot.gcf() # get reference to figure fig.set_size_inches(23.4, 23.4) pyplot.savefig(out_file, dpi=120)
[ "def", "save_to_file", "(", "self", ",", "out_file", ")", ":", "correlation_matrix", "=", "self", ".", "get_correlation_matrix_from_columns", "(", ")", "cr_plot", ".", "create_correlation_matrix_plot", "(", "correlation_matrix", ",", "self", ".", "title", ",", "self", ".", "headers_to_test", ")", "fig", "=", "pyplot", ".", "gcf", "(", ")", "# get reference to figure", "fig", ".", "set_size_inches", "(", "23.4", ",", "23.4", ")", "pyplot", ".", "savefig", "(", "out_file", ",", "dpi", "=", "120", ")" ]
Saves correlation matrix of selected headers :param out_file: Output file
[ "Saves", "correlation", "matrix", "of", "selected", "headers" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/ml/correlation.py#L77-L88
train
sirfoga/pyhal
hal/ml/correlation.py
CorrelationMatrix.save_correlation_matrix_from_folder
def save_correlation_matrix_from_folder(folder_path): """Saves each file's correlation matrix of common headers :param folder_path: Folder containing logs data """ file_name = "output-" + str(int(time.time())) output_folder = os.path.join(folder_path, file_name) os.makedirs(output_folder) # make necessary folders to create directory for file in list_content(folder_path, False, False): if is_file(file) and str(file).endswith("csv"): print("Analysing file ", str(file)) file_name = Document(file).name.strip() output_file_name = file_name + ".png" # save output as image output_file_path = os.path.join(output_folder, output_file_name) headers, data = CSVParser.get_headers_data(file) # parse matrix = CorrelationMatrix( "Correlation of logs data for file " + file_name, headers, headers, data ) matrix.save_to_file(output_file_path)
python
def save_correlation_matrix_from_folder(folder_path): """Saves each file's correlation matrix of common headers :param folder_path: Folder containing logs data """ file_name = "output-" + str(int(time.time())) output_folder = os.path.join(folder_path, file_name) os.makedirs(output_folder) # make necessary folders to create directory for file in list_content(folder_path, False, False): if is_file(file) and str(file).endswith("csv"): print("Analysing file ", str(file)) file_name = Document(file).name.strip() output_file_name = file_name + ".png" # save output as image output_file_path = os.path.join(output_folder, output_file_name) headers, data = CSVParser.get_headers_data(file) # parse matrix = CorrelationMatrix( "Correlation of logs data for file " + file_name, headers, headers, data ) matrix.save_to_file(output_file_path)
[ "def", "save_correlation_matrix_from_folder", "(", "folder_path", ")", ":", "file_name", "=", "\"output-\"", "+", "str", "(", "int", "(", "time", ".", "time", "(", ")", ")", ")", "output_folder", "=", "os", ".", "path", ".", "join", "(", "folder_path", ",", "file_name", ")", "os", ".", "makedirs", "(", "output_folder", ")", "# make necessary folders to create directory", "for", "file", "in", "list_content", "(", "folder_path", ",", "False", ",", "False", ")", ":", "if", "is_file", "(", "file", ")", "and", "str", "(", "file", ")", ".", "endswith", "(", "\"csv\"", ")", ":", "print", "(", "\"Analysing file \"", ",", "str", "(", "file", ")", ")", "file_name", "=", "Document", "(", "file", ")", ".", "name", ".", "strip", "(", ")", "output_file_name", "=", "file_name", "+", "\".png\"", "# save output as image", "output_file_path", "=", "os", ".", "path", ".", "join", "(", "output_folder", ",", "output_file_name", ")", "headers", ",", "data", "=", "CSVParser", ".", "get_headers_data", "(", "file", ")", "# parse", "matrix", "=", "CorrelationMatrix", "(", "\"Correlation of logs data for file \"", "+", "file_name", ",", "headers", ",", "headers", ",", "data", ")", "matrix", ".", "save_to_file", "(", "output_file_path", ")" ]
Saves each file's correlation matrix of common headers :param folder_path: Folder containing logs data
[ "Saves", "each", "file", "s", "correlation", "matrix", "of", "common", "headers" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/ml/correlation.py#L91-L114
train
JukeboxPipeline/jukeboxmaya
src/jukeboxmaya/addons/mayamgmt/mayamgmt.py
MayaMGMT.run
def run(self, *args, **kwargs): """Start the tool :returns: None :rtype: None :raises: None """ pm = MayaPluginManager.get() guerilla = pm.get_plugin("GuerillaMGMT") mayawin = maya_main_window() guerilla.run(parent=mayawin)
python
def run(self, *args, **kwargs): """Start the tool :returns: None :rtype: None :raises: None """ pm = MayaPluginManager.get() guerilla = pm.get_plugin("GuerillaMGMT") mayawin = maya_main_window() guerilla.run(parent=mayawin)
[ "def", "run", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "pm", "=", "MayaPluginManager", ".", "get", "(", ")", "guerilla", "=", "pm", ".", "get_plugin", "(", "\"GuerillaMGMT\"", ")", "mayawin", "=", "maya_main_window", "(", ")", "guerilla", ".", "run", "(", "parent", "=", "mayawin", ")" ]
Start the tool :returns: None :rtype: None :raises: None
[ "Start", "the", "tool" ]
c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c
https://github.com/JukeboxPipeline/jukeboxmaya/blob/c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c/src/jukeboxmaya/addons/mayamgmt/mayamgmt.py#L62-L72
train
lsst-sqre/sqre-codekit
codekit/pygithub.py
login_github
def login_github(token_path=None, token=None): """Log into GitHub using an existing token. Parameters ---------- token_path : str, optional Path to the token file. The default token is used otherwise. token: str, optional Literal token string. If specified, this value is used instead of reading from the token_path file. Returns ------- gh : :class:`github.GitHub` instance A GitHub login instance. """ token = codetools.github_token(token_path=token_path, token=token) g = Github(token) debug_ratelimit(g) return g
python
def login_github(token_path=None, token=None): """Log into GitHub using an existing token. Parameters ---------- token_path : str, optional Path to the token file. The default token is used otherwise. token: str, optional Literal token string. If specified, this value is used instead of reading from the token_path file. Returns ------- gh : :class:`github.GitHub` instance A GitHub login instance. """ token = codetools.github_token(token_path=token_path, token=token) g = Github(token) debug_ratelimit(g) return g
[ "def", "login_github", "(", "token_path", "=", "None", ",", "token", "=", "None", ")", ":", "token", "=", "codetools", ".", "github_token", "(", "token_path", "=", "token_path", ",", "token", "=", "token", ")", "g", "=", "Github", "(", "token", ")", "debug_ratelimit", "(", "g", ")", "return", "g" ]
Log into GitHub using an existing token. Parameters ---------- token_path : str, optional Path to the token file. The default token is used otherwise. token: str, optional Literal token string. If specified, this value is used instead of reading from the token_path file. Returns ------- gh : :class:`github.GitHub` instance A GitHub login instance.
[ "Log", "into", "GitHub", "using", "an", "existing", "token", "." ]
98122404cd9065d4d1d570867fe518042669126c
https://github.com/lsst-sqre/sqre-codekit/blob/98122404cd9065d4d1d570867fe518042669126c/codekit/pygithub.py#L159-L180
train
lsst-sqre/sqre-codekit
codekit/pygithub.py
find_tag_by_name
def find_tag_by_name(repo, tag_name, safe=True): """Find tag by name in a github Repository Parameters ---------- repo: :class:`github.Repository` instance tag_name: str Short name of tag (not a fully qualified ref). safe: bool, optional Defaults to `True`. When `True`, `None` is returned on failure. When `False`, an exception will be raised upon failure. Returns ------- gh : :class:`github.GitRef` instance or `None` Raises ------ github.UnknownObjectException If git tag name does not exist in repo. """ tagfmt = 'tags/{ref}'.format(ref=tag_name) try: ref = repo.get_git_ref(tagfmt) if ref and ref.ref: return ref except github.UnknownObjectException: if not safe: raise return None
python
def find_tag_by_name(repo, tag_name, safe=True): """Find tag by name in a github Repository Parameters ---------- repo: :class:`github.Repository` instance tag_name: str Short name of tag (not a fully qualified ref). safe: bool, optional Defaults to `True`. When `True`, `None` is returned on failure. When `False`, an exception will be raised upon failure. Returns ------- gh : :class:`github.GitRef` instance or `None` Raises ------ github.UnknownObjectException If git tag name does not exist in repo. """ tagfmt = 'tags/{ref}'.format(ref=tag_name) try: ref = repo.get_git_ref(tagfmt) if ref and ref.ref: return ref except github.UnknownObjectException: if not safe: raise return None
[ "def", "find_tag_by_name", "(", "repo", ",", "tag_name", ",", "safe", "=", "True", ")", ":", "tagfmt", "=", "'tags/{ref}'", ".", "format", "(", "ref", "=", "tag_name", ")", "try", ":", "ref", "=", "repo", ".", "get_git_ref", "(", "tagfmt", ")", "if", "ref", "and", "ref", ".", "ref", ":", "return", "ref", "except", "github", ".", "UnknownObjectException", ":", "if", "not", "safe", ":", "raise", "return", "None" ]
Find tag by name in a github Repository Parameters ---------- repo: :class:`github.Repository` instance tag_name: str Short name of tag (not a fully qualified ref). safe: bool, optional Defaults to `True`. When `True`, `None` is returned on failure. When `False`, an exception will be raised upon failure. Returns ------- gh : :class:`github.GitRef` instance or `None` Raises ------ github.UnknownObjectException If git tag name does not exist in repo.
[ "Find", "tag", "by", "name", "in", "a", "github", "Repository" ]
98122404cd9065d4d1d570867fe518042669126c
https://github.com/lsst-sqre/sqre-codekit/blob/98122404cd9065d4d1d570867fe518042669126c/codekit/pygithub.py#L184-L217
train
lsst-sqre/sqre-codekit
codekit/pygithub.py
debug_ratelimit
def debug_ratelimit(g): """Log debug of github ratelimit information from last API call Parameters ---------- org: github.MainClass.Github github object """ assert isinstance(g, github.MainClass.Github), type(g) debug("github ratelimit: {rl}".format(rl=g.rate_limiting))
python
def debug_ratelimit(g): """Log debug of github ratelimit information from last API call Parameters ---------- org: github.MainClass.Github github object """ assert isinstance(g, github.MainClass.Github), type(g) debug("github ratelimit: {rl}".format(rl=g.rate_limiting))
[ "def", "debug_ratelimit", "(", "g", ")", ":", "assert", "isinstance", "(", "g", ",", "github", ".", "MainClass", ".", "Github", ")", ",", "type", "(", "g", ")", "debug", "(", "\"github ratelimit: {rl}\"", ".", "format", "(", "rl", "=", "g", ".", "rate_limiting", ")", ")" ]
Log debug of github ratelimit information from last API call Parameters ---------- org: github.MainClass.Github github object
[ "Log", "debug", "of", "github", "ratelimit", "information", "from", "last", "API", "call" ]
98122404cd9065d4d1d570867fe518042669126c
https://github.com/lsst-sqre/sqre-codekit/blob/98122404cd9065d4d1d570867fe518042669126c/codekit/pygithub.py#L292-L302
train
lsst-sqre/sqre-codekit
codekit/pygithub.py
get_default_ref
def get_default_ref(repo): """Return a `github.GitRef` object for the HEAD of the default branch. Parameters ---------- repo: github.Repository.Repository repo to get default branch head ref from Returns ------- head : :class:`github.GitRef` instance Raises ------ github.RateLimitExceededException codekit.pygithub.CaughtRepositoryError """ assert isinstance(repo, github.Repository.Repository), type(repo) # XXX this probably should be resolved via repos.yaml default_branch = repo.default_branch default_branch_ref = "heads/{ref}".format(ref=default_branch) # if accessing the default branch fails something is seriously wrong... try: head = repo.get_git_ref(default_branch_ref) except github.RateLimitExceededException: raise except github.GithubException as e: msg = "error getting ref: {ref}".format(ref=default_branch_ref) raise CaughtRepositoryError(repo, e, msg) from None return head
python
def get_default_ref(repo): """Return a `github.GitRef` object for the HEAD of the default branch. Parameters ---------- repo: github.Repository.Repository repo to get default branch head ref from Returns ------- head : :class:`github.GitRef` instance Raises ------ github.RateLimitExceededException codekit.pygithub.CaughtRepositoryError """ assert isinstance(repo, github.Repository.Repository), type(repo) # XXX this probably should be resolved via repos.yaml default_branch = repo.default_branch default_branch_ref = "heads/{ref}".format(ref=default_branch) # if accessing the default branch fails something is seriously wrong... try: head = repo.get_git_ref(default_branch_ref) except github.RateLimitExceededException: raise except github.GithubException as e: msg = "error getting ref: {ref}".format(ref=default_branch_ref) raise CaughtRepositoryError(repo, e, msg) from None return head
[ "def", "get_default_ref", "(", "repo", ")", ":", "assert", "isinstance", "(", "repo", ",", "github", ".", "Repository", ".", "Repository", ")", ",", "type", "(", "repo", ")", "# XXX this probably should be resolved via repos.yaml", "default_branch", "=", "repo", ".", "default_branch", "default_branch_ref", "=", "\"heads/{ref}\"", ".", "format", "(", "ref", "=", "default_branch", ")", "# if accessing the default branch fails something is seriously wrong...", "try", ":", "head", "=", "repo", ".", "get_git_ref", "(", "default_branch_ref", ")", "except", "github", ".", "RateLimitExceededException", ":", "raise", "except", "github", ".", "GithubException", "as", "e", ":", "msg", "=", "\"error getting ref: {ref}\"", ".", "format", "(", "ref", "=", "default_branch_ref", ")", "raise", "CaughtRepositoryError", "(", "repo", ",", "e", ",", "msg", ")", "from", "None", "return", "head" ]
Return a `github.GitRef` object for the HEAD of the default branch. Parameters ---------- repo: github.Repository.Repository repo to get default branch head ref from Returns ------- head : :class:`github.GitRef` instance Raises ------ github.RateLimitExceededException codekit.pygithub.CaughtRepositoryError
[ "Return", "a", "github", ".", "GitRef", "object", "for", "the", "HEAD", "of", "the", "default", "branch", "." ]
98122404cd9065d4d1d570867fe518042669126c
https://github.com/lsst-sqre/sqre-codekit/blob/98122404cd9065d4d1d570867fe518042669126c/codekit/pygithub.py#L353-L385
train
usc-isi-i2/dig-crf-tokenizer
digCrfTokenizer/crf_tokenizer.py
main
def main(argv=None): '''this is called if run from command line''' t = CrfTokenizer() print t.tokenize("This is a sentence.") print t.tokenize("Buy???This...Now!!!") print t.tokenize("The <bold>only</bold> source.") print t.tokenize("The<bold>only</bold>source.") print t.tokenize("Big&gt;little.") print t.tokenize("Big & little.") print t.tokenize("blond&curly.") print t.tokenize("&brokenHtml") t.setGroupPunctuation(True) t.setRecognizeHtmlTags(True) t.setRecognizeHtmlEntities(True) print t.tokenize("Buy???This...Now!!!") print t.tokenize("The <bold>only</bold> source.") print t.tokenize("The<bold>only</bold>source.") print t.tokenize("Big&gt;little.") print t.tokenize("Big & little.") print t.tokenize("blond&curly.") print t.tokenize("&brokenHtml") t.setSkipHtmlTags(True) t.setSkipHtmlEntities(True) print t.tokenize("Buy???This...Now!!!") print t.tokenize("The <bold>only</bold> source.") print t.tokenize("The<bold>only</bold>source.") print t.tokenize("Big&gt;little.") print t.tokenize("Big & little.") print t.tokenize("blond&curly.") print t.tokenize("&brokenHtml") t.setTokenPrefix("X:") print t.tokenize("Tokenize with prefixes.") t.setTokenPrefix(None) print t.tokenize("No more prefixes.") t.setRecognizePunctuation(False) print t.tokenize("This is a sentence.") print t.tokenize("Buy???This...Now!!!") print t.tokenize("The <bold>only</bold> source.") print t.tokenize("The<bold>only</bold>source.") print t.tokenize("Big&gt;little.") print t.tokenize("Big & little.") print t.tokenize("blond&curly.") print t.tokenize("&brokenHtml") print t.tokenize("A line break goes here\n\t \rand a new line starts") t.setRecognizeLinebreaks(True) print t.tokenize("A line break goes here\n\r \rand a new line starts")
python
def main(argv=None): '''this is called if run from command line''' t = CrfTokenizer() print t.tokenize("This is a sentence.") print t.tokenize("Buy???This...Now!!!") print t.tokenize("The <bold>only</bold> source.") print t.tokenize("The<bold>only</bold>source.") print t.tokenize("Big&gt;little.") print t.tokenize("Big & little.") print t.tokenize("blond&curly.") print t.tokenize("&brokenHtml") t.setGroupPunctuation(True) t.setRecognizeHtmlTags(True) t.setRecognizeHtmlEntities(True) print t.tokenize("Buy???This...Now!!!") print t.tokenize("The <bold>only</bold> source.") print t.tokenize("The<bold>only</bold>source.") print t.tokenize("Big&gt;little.") print t.tokenize("Big & little.") print t.tokenize("blond&curly.") print t.tokenize("&brokenHtml") t.setSkipHtmlTags(True) t.setSkipHtmlEntities(True) print t.tokenize("Buy???This...Now!!!") print t.tokenize("The <bold>only</bold> source.") print t.tokenize("The<bold>only</bold>source.") print t.tokenize("Big&gt;little.") print t.tokenize("Big & little.") print t.tokenize("blond&curly.") print t.tokenize("&brokenHtml") t.setTokenPrefix("X:") print t.tokenize("Tokenize with prefixes.") t.setTokenPrefix(None) print t.tokenize("No more prefixes.") t.setRecognizePunctuation(False) print t.tokenize("This is a sentence.") print t.tokenize("Buy???This...Now!!!") print t.tokenize("The <bold>only</bold> source.") print t.tokenize("The<bold>only</bold>source.") print t.tokenize("Big&gt;little.") print t.tokenize("Big & little.") print t.tokenize("blond&curly.") print t.tokenize("&brokenHtml") print t.tokenize("A line break goes here\n\t \rand a new line starts") t.setRecognizeLinebreaks(True) print t.tokenize("A line break goes here\n\r \rand a new line starts")
[ "def", "main", "(", "argv", "=", "None", ")", ":", "t", "=", "CrfTokenizer", "(", ")", "print", "t", ".", "tokenize", "(", "\"This is a sentence.\"", ")", "print", "t", ".", "tokenize", "(", "\"Buy???This...Now!!!\"", ")", "print", "t", ".", "tokenize", "(", "\"The <bold>only</bold> source.\"", ")", "print", "t", ".", "tokenize", "(", "\"The<bold>only</bold>source.\"", ")", "print", "t", ".", "tokenize", "(", "\"Big&gt;little.\"", ")", "print", "t", ".", "tokenize", "(", "\"Big & little.\"", ")", "print", "t", ".", "tokenize", "(", "\"blond&curly.\"", ")", "print", "t", ".", "tokenize", "(", "\"&brokenHtml\"", ")", "t", ".", "setGroupPunctuation", "(", "True", ")", "t", ".", "setRecognizeHtmlTags", "(", "True", ")", "t", ".", "setRecognizeHtmlEntities", "(", "True", ")", "print", "t", ".", "tokenize", "(", "\"Buy???This...Now!!!\"", ")", "print", "t", ".", "tokenize", "(", "\"The <bold>only</bold> source.\"", ")", "print", "t", ".", "tokenize", "(", "\"The<bold>only</bold>source.\"", ")", "print", "t", ".", "tokenize", "(", "\"Big&gt;little.\"", ")", "print", "t", ".", "tokenize", "(", "\"Big & little.\"", ")", "print", "t", ".", "tokenize", "(", "\"blond&curly.\"", ")", "print", "t", ".", "tokenize", "(", "\"&brokenHtml\"", ")", "t", ".", "setSkipHtmlTags", "(", "True", ")", "t", ".", "setSkipHtmlEntities", "(", "True", ")", "print", "t", ".", "tokenize", "(", "\"Buy???This...Now!!!\"", ")", "print", "t", ".", "tokenize", "(", "\"The <bold>only</bold> source.\"", ")", "print", "t", ".", "tokenize", "(", "\"The<bold>only</bold>source.\"", ")", "print", "t", ".", "tokenize", "(", "\"Big&gt;little.\"", ")", "print", "t", ".", "tokenize", "(", "\"Big & little.\"", ")", "print", "t", ".", "tokenize", "(", "\"blond&curly.\"", ")", "print", "t", ".", "tokenize", "(", "\"&brokenHtml\"", ")", "t", ".", "setTokenPrefix", "(", "\"X:\"", ")", "print", "t", ".", "tokenize", "(", "\"Tokenize with prefixes.\"", ")", "t", ".", "setTokenPrefix", "(", "None", ")", "print", "t", ".", "tokenize", "(", "\"No more prefixes.\"", ")", "t", ".", "setRecognizePunctuation", "(", "False", ")", "print", "t", ".", "tokenize", "(", "\"This is a sentence.\"", ")", "print", "t", ".", "tokenize", "(", "\"Buy???This...Now!!!\"", ")", "print", "t", ".", "tokenize", "(", "\"The <bold>only</bold> source.\"", ")", "print", "t", ".", "tokenize", "(", "\"The<bold>only</bold>source.\"", ")", "print", "t", ".", "tokenize", "(", "\"Big&gt;little.\"", ")", "print", "t", ".", "tokenize", "(", "\"Big & little.\"", ")", "print", "t", ".", "tokenize", "(", "\"blond&curly.\"", ")", "print", "t", ".", "tokenize", "(", "\"&brokenHtml\"", ")", "print", "t", ".", "tokenize", "(", "\"A line break goes here\\n\\t \\rand a new line starts\"", ")", "t", ".", "setRecognizeLinebreaks", "(", "True", ")", "print", "t", ".", "tokenize", "(", "\"A line break goes here\\n\\r \\rand a new line starts\"", ")" ]
this is called if run from command line
[ "this", "is", "called", "if", "run", "from", "command", "line" ]
f06458af40e648a968e547aead4510ff07bb5304
https://github.com/usc-isi-i2/dig-crf-tokenizer/blob/f06458af40e648a968e547aead4510ff07bb5304/digCrfTokenizer/crf_tokenizer.py#L357-L403
train
portfors-lab/sparkle
sparkle/gui/controlwindow.py
ControlWindow.verifyInputs
def verifyInputs(self, mode): """Goes through and checks all stimuli and input settings are valid and consistent. Prompts user with a message if there is a condition that would prevent acquisition. :param mode: The mode of acquisition trying to be run. Options are 'chart', or anthing else ('explore', 'protocol', 'calibration') :type mode: str :returns: bool -- Whether all inputs and stimuli are valid """ if len(self._aichans) < 1: failmsg = "Must have at least one input channel selected" QtGui.QMessageBox.warning(self, "Invalid Setting", failmsg) return False if mode == 'chart': if self.ui.aifsSpnbx.value()*self.fscale > 100000: QtGui.QMessageBox.warning(self, "Invalid Input", "Recording samplerate cannot exceed 100kHz for chart acquisition") return False elif mode is not None: # if (1./self.ui.reprateSpnbx.value()) < self.ui.windowszSpnbx.value()*self.tscale + 0.05: # QtGui.QMessageBox.warning(self, "Invalid Input", "A minimum of 50ms time between repetitions required. Current interval {}, required {}".format((1./self.ui.reprateSpnbx.value()), self.ui.windowszSpnbx.value()*self.tscale + 0.05)) # return False if self.ui.tabGroup.currentWidget().objectName() == 'tabExplore': # each widget should be in charge of putting its own stimulus together self.ui.exploreStimEditor.saveToObject() failmsg = self.ui.exploreStimEditor.verify(self.ui.windowszSpnbx.value()) if failmsg: QtGui.QMessageBox.warning(self, "Invalid Input", failmsg) return False # if selectedStim.intensity() > self.calvals['caldb']: # QtGui.QMessageBox.warning(self, "Invalid Input", # "Intensity must be below calibrated maximum {}dB SPL".format(self.calvals['caldb'])) # return False elif self.ui.tabGroup.currentWidget().objectName() == 'tabProtocol': protocol_model = self.acqmodel.protocol_model() # protocol delegates to each test to verify itself and report failure = protocol_model.verify(float(self.ui.windowszSpnbx.value())) if failure: QtGui.QMessageBox.warning(self, "Invalid Input", failure) return False elif self.ui.tabGroup.currentWidget().objectName() == 'tabCalibrate': if len(self._aichans) > 1: failmsg = "Speaker calibration only supported for single channel, currently {} channels selected; select 1 input channel.".format(len(self._aichans)) QtGui.QMessageBox.warning(self, "Invalid Setting", failmsg) return False # get what stimulus is about to be presented if self.ui.calibrationWidget.ui.savecalCkbx.isChecked() or not self.ui.calibrationWidget.currentSelection() == 'Tone Curve': calibration_stimulus = self.acqmodel.calibration_stimulus('noise') self.ui.calibrationWidget.saveToObject() else: calibration_stimulus = self.acqmodel.calibration_stimulus('tone') failmsg = calibration_stimulus.verify(float(self.ui.windowszSpnbx.value())) if failmsg: QtGui.QMessageBox.warning(self, "Invalid Input", failmsg) return False # also check that the recording samplerate is high enough in this case failmsg = calibration_stimulus.verifyExpanded(samplerate=self.ui.aifsSpnbx.value()) if failmsg: failmsg = failmsg.replace('Generation', 'Recording') QtGui.QMessageBox.warning(self, "Invalid Input", failmsg) return False if self.advanced_options['use_attenuator'] and not self.acqmodel.attenuator_connection(): failmsg = "Error Connection to attenuator, make sure it it turned on and connected, and try again" QtGui.QMessageBox.warning(self, "Connection Error", failmsg) return False return True
python
def verifyInputs(self, mode): """Goes through and checks all stimuli and input settings are valid and consistent. Prompts user with a message if there is a condition that would prevent acquisition. :param mode: The mode of acquisition trying to be run. Options are 'chart', or anthing else ('explore', 'protocol', 'calibration') :type mode: str :returns: bool -- Whether all inputs and stimuli are valid """ if len(self._aichans) < 1: failmsg = "Must have at least one input channel selected" QtGui.QMessageBox.warning(self, "Invalid Setting", failmsg) return False if mode == 'chart': if self.ui.aifsSpnbx.value()*self.fscale > 100000: QtGui.QMessageBox.warning(self, "Invalid Input", "Recording samplerate cannot exceed 100kHz for chart acquisition") return False elif mode is not None: # if (1./self.ui.reprateSpnbx.value()) < self.ui.windowszSpnbx.value()*self.tscale + 0.05: # QtGui.QMessageBox.warning(self, "Invalid Input", "A minimum of 50ms time between repetitions required. Current interval {}, required {}".format((1./self.ui.reprateSpnbx.value()), self.ui.windowszSpnbx.value()*self.tscale + 0.05)) # return False if self.ui.tabGroup.currentWidget().objectName() == 'tabExplore': # each widget should be in charge of putting its own stimulus together self.ui.exploreStimEditor.saveToObject() failmsg = self.ui.exploreStimEditor.verify(self.ui.windowszSpnbx.value()) if failmsg: QtGui.QMessageBox.warning(self, "Invalid Input", failmsg) return False # if selectedStim.intensity() > self.calvals['caldb']: # QtGui.QMessageBox.warning(self, "Invalid Input", # "Intensity must be below calibrated maximum {}dB SPL".format(self.calvals['caldb'])) # return False elif self.ui.tabGroup.currentWidget().objectName() == 'tabProtocol': protocol_model = self.acqmodel.protocol_model() # protocol delegates to each test to verify itself and report failure = protocol_model.verify(float(self.ui.windowszSpnbx.value())) if failure: QtGui.QMessageBox.warning(self, "Invalid Input", failure) return False elif self.ui.tabGroup.currentWidget().objectName() == 'tabCalibrate': if len(self._aichans) > 1: failmsg = "Speaker calibration only supported for single channel, currently {} channels selected; select 1 input channel.".format(len(self._aichans)) QtGui.QMessageBox.warning(self, "Invalid Setting", failmsg) return False # get what stimulus is about to be presented if self.ui.calibrationWidget.ui.savecalCkbx.isChecked() or not self.ui.calibrationWidget.currentSelection() == 'Tone Curve': calibration_stimulus = self.acqmodel.calibration_stimulus('noise') self.ui.calibrationWidget.saveToObject() else: calibration_stimulus = self.acqmodel.calibration_stimulus('tone') failmsg = calibration_stimulus.verify(float(self.ui.windowszSpnbx.value())) if failmsg: QtGui.QMessageBox.warning(self, "Invalid Input", failmsg) return False # also check that the recording samplerate is high enough in this case failmsg = calibration_stimulus.verifyExpanded(samplerate=self.ui.aifsSpnbx.value()) if failmsg: failmsg = failmsg.replace('Generation', 'Recording') QtGui.QMessageBox.warning(self, "Invalid Input", failmsg) return False if self.advanced_options['use_attenuator'] and not self.acqmodel.attenuator_connection(): failmsg = "Error Connection to attenuator, make sure it it turned on and connected, and try again" QtGui.QMessageBox.warning(self, "Connection Error", failmsg) return False return True
[ "def", "verifyInputs", "(", "self", ",", "mode", ")", ":", "if", "len", "(", "self", ".", "_aichans", ")", "<", "1", ":", "failmsg", "=", "\"Must have at least one input channel selected\"", "QtGui", ".", "QMessageBox", ".", "warning", "(", "self", ",", "\"Invalid Setting\"", ",", "failmsg", ")", "return", "False", "if", "mode", "==", "'chart'", ":", "if", "self", ".", "ui", ".", "aifsSpnbx", ".", "value", "(", ")", "*", "self", ".", "fscale", ">", "100000", ":", "QtGui", ".", "QMessageBox", ".", "warning", "(", "self", ",", "\"Invalid Input\"", ",", "\"Recording samplerate cannot exceed 100kHz for chart acquisition\"", ")", "return", "False", "elif", "mode", "is", "not", "None", ":", "# if (1./self.ui.reprateSpnbx.value()) < self.ui.windowszSpnbx.value()*self.tscale + 0.05:", "# QtGui.QMessageBox.warning(self, \"Invalid Input\", \"A minimum of 50ms time between repetitions required. Current interval {}, required {}\".format((1./self.ui.reprateSpnbx.value()), self.ui.windowszSpnbx.value()*self.tscale + 0.05))", "# return False", "if", "self", ".", "ui", ".", "tabGroup", ".", "currentWidget", "(", ")", ".", "objectName", "(", ")", "==", "'tabExplore'", ":", "# each widget should be in charge of putting its own stimulus together", "self", ".", "ui", ".", "exploreStimEditor", ".", "saveToObject", "(", ")", "failmsg", "=", "self", ".", "ui", ".", "exploreStimEditor", ".", "verify", "(", "self", ".", "ui", ".", "windowszSpnbx", ".", "value", "(", ")", ")", "if", "failmsg", ":", "QtGui", ".", "QMessageBox", ".", "warning", "(", "self", ",", "\"Invalid Input\"", ",", "failmsg", ")", "return", "False", "# if selectedStim.intensity() > self.calvals['caldb']:", "# QtGui.QMessageBox.warning(self, \"Invalid Input\",", "# \"Intensity must be below calibrated maximum {}dB SPL\".format(self.calvals['caldb']))", "# return False", "elif", "self", ".", "ui", ".", "tabGroup", ".", "currentWidget", "(", ")", ".", "objectName", "(", ")", "==", "'tabProtocol'", ":", "protocol_model", "=", "self", ".", "acqmodel", ".", "protocol_model", "(", ")", "# protocol delegates to each test to verify itself and report", "failure", "=", "protocol_model", ".", "verify", "(", "float", "(", "self", ".", "ui", ".", "windowszSpnbx", ".", "value", "(", ")", ")", ")", "if", "failure", ":", "QtGui", ".", "QMessageBox", ".", "warning", "(", "self", ",", "\"Invalid Input\"", ",", "failure", ")", "return", "False", "elif", "self", ".", "ui", ".", "tabGroup", ".", "currentWidget", "(", ")", ".", "objectName", "(", ")", "==", "'tabCalibrate'", ":", "if", "len", "(", "self", ".", "_aichans", ")", ">", "1", ":", "failmsg", "=", "\"Speaker calibration only supported for single channel, currently {} channels selected; select 1 input channel.\"", ".", "format", "(", "len", "(", "self", ".", "_aichans", ")", ")", "QtGui", ".", "QMessageBox", ".", "warning", "(", "self", ",", "\"Invalid Setting\"", ",", "failmsg", ")", "return", "False", "# get what stimulus is about to be presented", "if", "self", ".", "ui", ".", "calibrationWidget", ".", "ui", ".", "savecalCkbx", ".", "isChecked", "(", ")", "or", "not", "self", ".", "ui", ".", "calibrationWidget", ".", "currentSelection", "(", ")", "==", "'Tone Curve'", ":", "calibration_stimulus", "=", "self", ".", "acqmodel", ".", "calibration_stimulus", "(", "'noise'", ")", "self", ".", "ui", ".", "calibrationWidget", ".", "saveToObject", "(", ")", "else", ":", "calibration_stimulus", "=", "self", ".", "acqmodel", ".", "calibration_stimulus", "(", "'tone'", ")", "failmsg", "=", "calibration_stimulus", ".", "verify", "(", "float", "(", "self", ".", "ui", ".", "windowszSpnbx", ".", "value", "(", ")", ")", ")", "if", "failmsg", ":", "QtGui", ".", "QMessageBox", ".", "warning", "(", "self", ",", "\"Invalid Input\"", ",", "failmsg", ")", "return", "False", "# also check that the recording samplerate is high enough in this case", "failmsg", "=", "calibration_stimulus", ".", "verifyExpanded", "(", "samplerate", "=", "self", ".", "ui", ".", "aifsSpnbx", ".", "value", "(", ")", ")", "if", "failmsg", ":", "failmsg", "=", "failmsg", ".", "replace", "(", "'Generation'", ",", "'Recording'", ")", "QtGui", ".", "QMessageBox", ".", "warning", "(", "self", ",", "\"Invalid Input\"", ",", "failmsg", ")", "return", "False", "if", "self", ".", "advanced_options", "[", "'use_attenuator'", "]", "and", "not", "self", ".", "acqmodel", ".", "attenuator_connection", "(", ")", ":", "failmsg", "=", "\"Error Connection to attenuator, make sure it it turned on and connected, and try again\"", "QtGui", ".", "QMessageBox", ".", "warning", "(", "self", ",", "\"Connection Error\"", ",", "failmsg", ")", "return", "False", "return", "True" ]
Goes through and checks all stimuli and input settings are valid and consistent. Prompts user with a message if there is a condition that would prevent acquisition. :param mode: The mode of acquisition trying to be run. Options are 'chart', or anthing else ('explore', 'protocol', 'calibration') :type mode: str :returns: bool -- Whether all inputs and stimuli are valid
[ "Goes", "through", "and", "checks", "all", "stimuli", "and", "input", "settings", "are", "valid", "and", "consistent", ".", "Prompts", "user", "with", "a", "message", "if", "there", "is", "a", "condition", "that", "would", "prevent", "acquisition", "." ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/controlwindow.py#L93-L159
train
portfors-lab/sparkle
sparkle/gui/controlwindow.py
ControlWindow.updateUnitLabels
def updateUnitLabels(self, tscale, fscale): """When the GUI unit scale changes, it is neccessary to update the unit labels on all fields throughout the GUI. This handles The main window, and also notifys other windows to update Only supports for conversion between two values : * seconds and miliseconds for time * Hz and kHz for frequency :param tscale: Time scale to update to either 's' or 'ms' :type tscale: str :param fscale: Frequency scale to update to either 'Hz' or 'kHz' :type fscale: str """ AbstractEditorWidget.updateScales(tscale, fscale) SmartDelegate.updateScales(tscale, fscale) # purges stored label references from deleted parent widgets AbstractEditorWidget.purgeDeletedWidgets() self.tscale = tscale # updates labels for components # add the list of all time unit labels out there to our update # list here time_inputs = self.timeInputs + AbstractEditorWidget.tunit_fields # now go through our list of labels and fields and scale/update for field in time_inputs: field.setScale(tscale) self.fscale = fscale # add the list of all frequency unit labels out there to our update # list here frequency_inputs = self.frequencyInputs + AbstractEditorWidget.funit_fields # now go through our list of labels and fields and scale/update for field in frequency_inputs: field.setScale(fscale)
python
def updateUnitLabels(self, tscale, fscale): """When the GUI unit scale changes, it is neccessary to update the unit labels on all fields throughout the GUI. This handles The main window, and also notifys other windows to update Only supports for conversion between two values : * seconds and miliseconds for time * Hz and kHz for frequency :param tscale: Time scale to update to either 's' or 'ms' :type tscale: str :param fscale: Frequency scale to update to either 'Hz' or 'kHz' :type fscale: str """ AbstractEditorWidget.updateScales(tscale, fscale) SmartDelegate.updateScales(tscale, fscale) # purges stored label references from deleted parent widgets AbstractEditorWidget.purgeDeletedWidgets() self.tscale = tscale # updates labels for components # add the list of all time unit labels out there to our update # list here time_inputs = self.timeInputs + AbstractEditorWidget.tunit_fields # now go through our list of labels and fields and scale/update for field in time_inputs: field.setScale(tscale) self.fscale = fscale # add the list of all frequency unit labels out there to our update # list here frequency_inputs = self.frequencyInputs + AbstractEditorWidget.funit_fields # now go through our list of labels and fields and scale/update for field in frequency_inputs: field.setScale(fscale)
[ "def", "updateUnitLabels", "(", "self", ",", "tscale", ",", "fscale", ")", ":", "AbstractEditorWidget", ".", "updateScales", "(", "tscale", ",", "fscale", ")", "SmartDelegate", ".", "updateScales", "(", "tscale", ",", "fscale", ")", "# purges stored label references from deleted parent widgets", "AbstractEditorWidget", ".", "purgeDeletedWidgets", "(", ")", "self", ".", "tscale", "=", "tscale", "# updates labels for components", "# add the list of all time unit labels out there to our update", "# list here", "time_inputs", "=", "self", ".", "timeInputs", "+", "AbstractEditorWidget", ".", "tunit_fields", "# now go through our list of labels and fields and scale/update", "for", "field", "in", "time_inputs", ":", "field", ".", "setScale", "(", "tscale", ")", "self", ".", "fscale", "=", "fscale", "# add the list of all frequency unit labels out there to our update", "# list here", "frequency_inputs", "=", "self", ".", "frequencyInputs", "+", "AbstractEditorWidget", ".", "funit_fields", "# now go through our list of labels and fields and scale/update", "for", "field", "in", "frequency_inputs", ":", "field", ".", "setScale", "(", "fscale", ")" ]
When the GUI unit scale changes, it is neccessary to update the unit labels on all fields throughout the GUI. This handles The main window, and also notifys other windows to update Only supports for conversion between two values : * seconds and miliseconds for time * Hz and kHz for frequency :param tscale: Time scale to update to either 's' or 'ms' :type tscale: str :param fscale: Frequency scale to update to either 'Hz' or 'kHz' :type fscale: str
[ "When", "the", "GUI", "unit", "scale", "changes", "it", "is", "neccessary", "to", "update", "the", "unit", "labels", "on", "all", "fields", "throughout", "the", "GUI", ".", "This", "handles", "The", "main", "window", "and", "also", "notifys", "other", "windows", "to", "update" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/controlwindow.py#L161-L201
train
portfors-lab/sparkle
sparkle/gui/controlwindow.py
ControlWindow.reset_device_channels
def reset_device_channels(self): """Updates the input channel selection boxes based on the current device name stored in this object""" # clear boxes first self.ui.aochanBox.clear() devname = self.advanced_options['device_name'] device_list = get_devices() if devname in device_list: cnames = get_ao_chans(devname) self.ui.aochanBox.addItems(cnames) cnames = get_ai_chans(devname) # filter list for channels that are present in current device self._aichans = [chan for chan in self._aichans if chan in cnames] self._aichan_details = {chan: deets for chan, deets in self._aichan_details.items() if chan in cnames} elif devname == '' and len(device_list) > 0: devname = device_list[0] cnames = get_ao_chans(devname) self.ui.aochanBox.addItems(cnames) self.advanced_options['device_name'] = devname self._aichans = [] self._aichan_details = {} else: self._aichans = [] self._aichan_details = {} self.ui.chanNumLbl.setText(str(len(self._aichans))) # remove all plots and re-add from new list self.display.removeResponsePlot(*self.display.responseNameList()) self.display.addResponsePlot(*self._aichans) # update details on plots for name, deets in self._aichan_details.items(): self.display.setThreshold(deets['threshold'], name) self.display.setRasterBounds(deets['raster_bounds'], name) self.display.setAbs(deets['abs'], name) # can't find a function in DAQmx that gets the trigger # channel names, so add manually self.ui.trigchanBox.addItems(['/'+devname+'/PFI0', '/'+devname+'/PFI1'])
python
def reset_device_channels(self): """Updates the input channel selection boxes based on the current device name stored in this object""" # clear boxes first self.ui.aochanBox.clear() devname = self.advanced_options['device_name'] device_list = get_devices() if devname in device_list: cnames = get_ao_chans(devname) self.ui.aochanBox.addItems(cnames) cnames = get_ai_chans(devname) # filter list for channels that are present in current device self._aichans = [chan for chan in self._aichans if chan in cnames] self._aichan_details = {chan: deets for chan, deets in self._aichan_details.items() if chan in cnames} elif devname == '' and len(device_list) > 0: devname = device_list[0] cnames = get_ao_chans(devname) self.ui.aochanBox.addItems(cnames) self.advanced_options['device_name'] = devname self._aichans = [] self._aichan_details = {} else: self._aichans = [] self._aichan_details = {} self.ui.chanNumLbl.setText(str(len(self._aichans))) # remove all plots and re-add from new list self.display.removeResponsePlot(*self.display.responseNameList()) self.display.addResponsePlot(*self._aichans) # update details on plots for name, deets in self._aichan_details.items(): self.display.setThreshold(deets['threshold'], name) self.display.setRasterBounds(deets['raster_bounds'], name) self.display.setAbs(deets['abs'], name) # can't find a function in DAQmx that gets the trigger # channel names, so add manually self.ui.trigchanBox.addItems(['/'+devname+'/PFI0', '/'+devname+'/PFI1'])
[ "def", "reset_device_channels", "(", "self", ")", ":", "# clear boxes first", "self", ".", "ui", ".", "aochanBox", ".", "clear", "(", ")", "devname", "=", "self", ".", "advanced_options", "[", "'device_name'", "]", "device_list", "=", "get_devices", "(", ")", "if", "devname", "in", "device_list", ":", "cnames", "=", "get_ao_chans", "(", "devname", ")", "self", ".", "ui", ".", "aochanBox", ".", "addItems", "(", "cnames", ")", "cnames", "=", "get_ai_chans", "(", "devname", ")", "# filter list for channels that are present in current device", "self", ".", "_aichans", "=", "[", "chan", "for", "chan", "in", "self", ".", "_aichans", "if", "chan", "in", "cnames", "]", "self", ".", "_aichan_details", "=", "{", "chan", ":", "deets", "for", "chan", ",", "deets", "in", "self", ".", "_aichan_details", ".", "items", "(", ")", "if", "chan", "in", "cnames", "}", "elif", "devname", "==", "''", "and", "len", "(", "device_list", ")", ">", "0", ":", "devname", "=", "device_list", "[", "0", "]", "cnames", "=", "get_ao_chans", "(", "devname", ")", "self", ".", "ui", ".", "aochanBox", ".", "addItems", "(", "cnames", ")", "self", ".", "advanced_options", "[", "'device_name'", "]", "=", "devname", "self", ".", "_aichans", "=", "[", "]", "self", ".", "_aichan_details", "=", "{", "}", "else", ":", "self", ".", "_aichans", "=", "[", "]", "self", ".", "_aichan_details", "=", "{", "}", "self", ".", "ui", ".", "chanNumLbl", ".", "setText", "(", "str", "(", "len", "(", "self", ".", "_aichans", ")", ")", ")", "# remove all plots and re-add from new list", "self", ".", "display", ".", "removeResponsePlot", "(", "*", "self", ".", "display", ".", "responseNameList", "(", ")", ")", "self", ".", "display", ".", "addResponsePlot", "(", "*", "self", ".", "_aichans", ")", "# update details on plots", "for", "name", ",", "deets", "in", "self", ".", "_aichan_details", ".", "items", "(", ")", ":", "self", ".", "display", ".", "setThreshold", "(", "deets", "[", "'threshold'", "]", ",", "name", ")", "self", ".", "display", ".", "setRasterBounds", "(", "deets", "[", "'raster_bounds'", "]", ",", "name", ")", "self", ".", "display", ".", "setAbs", "(", "deets", "[", "'abs'", "]", ",", "name", ")", "# can't find a function in DAQmx that gets the trigger", "# channel names, so add manually", "self", ".", "ui", ".", "trigchanBox", ".", "addItems", "(", "[", "'/'", "+", "devname", "+", "'/PFI0'", ",", "'/'", "+", "devname", "+", "'/PFI1'", "]", ")" ]
Updates the input channel selection boxes based on the current device name stored in this object
[ "Updates", "the", "input", "channel", "selection", "boxes", "based", "on", "the", "current", "device", "name", "stored", "in", "this", "object" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/controlwindow.py#L203-L240
train
portfors-lab/sparkle
sparkle/gui/controlwindow.py
ControlWindow.saveInputs
def saveInputs(self, fname): """Save the values in the input fields so they can be loaded next time the GUI is run :param fname: file path of location to store values at :type fname: str """ # save current inputs to file for loading next time if not fname: return appdir = systools.get_appdir() if not os.path.isdir(appdir): os.makedirs(appdir) fname = os.path.join(appdir, fname) savedict = {} savedict['binsz'] = self.ui.binszSpnbx.value() savedict['aifs'] = self.ui.aifsSpnbx.value() savedict['tscale'] = self.tscale savedict['fscale'] = self.fscale savedict['saveformat'] = self.saveformat savedict['ex_nreps'] = self.ui.exploreStimEditor.repCount() savedict['reprate'] = self.ui.reprateSpnbx.value() savedict['windowsz'] = self.ui.windowszSpnbx.value() savedict['specargs'] = self.specArgs savedict['viewSettings'] = self.viewSettings savedict['calvals'] = self.calvals savedict['calparams'] = self.acqmodel.calibration_template() savedict['calreps'] = self.ui.calibrationWidget.ui.nrepsSpnbx.value() savedict['mphonesens'] = self.ui.mphoneSensSpnbx.value() savedict['mphonedb'] = self.ui.mphoneDBSpnbx.value() savedict['vocalpaths'] = Vocalization.paths savedict['aichans'] = self._aichans savedict['aichan_details'] = self._aichan_details # parameter settings -- save all tracks present savedict['explorestims'] = self.ui.exploreStimEditor.saveTemplate() savedict['advanced_options'] = self.advanced_options savedict['stim_view_defaults'] = StimulusView.getDefaults() savedict['tuning_curve'] = TCFactory.defaultInputs # filter out and non-native python types that are not json serializable savedict = convert2native(savedict) try: with open(fname, 'w') as jf: json.dump(savedict, jf) except: logger = logging.getLogger('main') logger.exception("Unable to save app data to file: {}".format(fname))
python
def saveInputs(self, fname): """Save the values in the input fields so they can be loaded next time the GUI is run :param fname: file path of location to store values at :type fname: str """ # save current inputs to file for loading next time if not fname: return appdir = systools.get_appdir() if not os.path.isdir(appdir): os.makedirs(appdir) fname = os.path.join(appdir, fname) savedict = {} savedict['binsz'] = self.ui.binszSpnbx.value() savedict['aifs'] = self.ui.aifsSpnbx.value() savedict['tscale'] = self.tscale savedict['fscale'] = self.fscale savedict['saveformat'] = self.saveformat savedict['ex_nreps'] = self.ui.exploreStimEditor.repCount() savedict['reprate'] = self.ui.reprateSpnbx.value() savedict['windowsz'] = self.ui.windowszSpnbx.value() savedict['specargs'] = self.specArgs savedict['viewSettings'] = self.viewSettings savedict['calvals'] = self.calvals savedict['calparams'] = self.acqmodel.calibration_template() savedict['calreps'] = self.ui.calibrationWidget.ui.nrepsSpnbx.value() savedict['mphonesens'] = self.ui.mphoneSensSpnbx.value() savedict['mphonedb'] = self.ui.mphoneDBSpnbx.value() savedict['vocalpaths'] = Vocalization.paths savedict['aichans'] = self._aichans savedict['aichan_details'] = self._aichan_details # parameter settings -- save all tracks present savedict['explorestims'] = self.ui.exploreStimEditor.saveTemplate() savedict['advanced_options'] = self.advanced_options savedict['stim_view_defaults'] = StimulusView.getDefaults() savedict['tuning_curve'] = TCFactory.defaultInputs # filter out and non-native python types that are not json serializable savedict = convert2native(savedict) try: with open(fname, 'w') as jf: json.dump(savedict, jf) except: logger = logging.getLogger('main') logger.exception("Unable to save app data to file: {}".format(fname))
[ "def", "saveInputs", "(", "self", ",", "fname", ")", ":", "# save current inputs to file for loading next time", "if", "not", "fname", ":", "return", "appdir", "=", "systools", ".", "get_appdir", "(", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "appdir", ")", ":", "os", ".", "makedirs", "(", "appdir", ")", "fname", "=", "os", ".", "path", ".", "join", "(", "appdir", ",", "fname", ")", "savedict", "=", "{", "}", "savedict", "[", "'binsz'", "]", "=", "self", ".", "ui", ".", "binszSpnbx", ".", "value", "(", ")", "savedict", "[", "'aifs'", "]", "=", "self", ".", "ui", ".", "aifsSpnbx", ".", "value", "(", ")", "savedict", "[", "'tscale'", "]", "=", "self", ".", "tscale", "savedict", "[", "'fscale'", "]", "=", "self", ".", "fscale", "savedict", "[", "'saveformat'", "]", "=", "self", ".", "saveformat", "savedict", "[", "'ex_nreps'", "]", "=", "self", ".", "ui", ".", "exploreStimEditor", ".", "repCount", "(", ")", "savedict", "[", "'reprate'", "]", "=", "self", ".", "ui", ".", "reprateSpnbx", ".", "value", "(", ")", "savedict", "[", "'windowsz'", "]", "=", "self", ".", "ui", ".", "windowszSpnbx", ".", "value", "(", ")", "savedict", "[", "'specargs'", "]", "=", "self", ".", "specArgs", "savedict", "[", "'viewSettings'", "]", "=", "self", ".", "viewSettings", "savedict", "[", "'calvals'", "]", "=", "self", ".", "calvals", "savedict", "[", "'calparams'", "]", "=", "self", ".", "acqmodel", ".", "calibration_template", "(", ")", "savedict", "[", "'calreps'", "]", "=", "self", ".", "ui", ".", "calibrationWidget", ".", "ui", ".", "nrepsSpnbx", ".", "value", "(", ")", "savedict", "[", "'mphonesens'", "]", "=", "self", ".", "ui", ".", "mphoneSensSpnbx", ".", "value", "(", ")", "savedict", "[", "'mphonedb'", "]", "=", "self", ".", "ui", ".", "mphoneDBSpnbx", ".", "value", "(", ")", "savedict", "[", "'vocalpaths'", "]", "=", "Vocalization", ".", "paths", "savedict", "[", "'aichans'", "]", "=", "self", ".", "_aichans", "savedict", "[", "'aichan_details'", "]", "=", "self", ".", "_aichan_details", "# parameter settings -- save all tracks present", "savedict", "[", "'explorestims'", "]", "=", "self", ".", "ui", ".", "exploreStimEditor", ".", "saveTemplate", "(", ")", "savedict", "[", "'advanced_options'", "]", "=", "self", ".", "advanced_options", "savedict", "[", "'stim_view_defaults'", "]", "=", "StimulusView", ".", "getDefaults", "(", ")", "savedict", "[", "'tuning_curve'", "]", "=", "TCFactory", ".", "defaultInputs", "# filter out and non-native python types that are not json serializable", "savedict", "=", "convert2native", "(", "savedict", ")", "try", ":", "with", "open", "(", "fname", ",", "'w'", ")", "as", "jf", ":", "json", ".", "dump", "(", "savedict", ",", "jf", ")", "except", ":", "logger", "=", "logging", ".", "getLogger", "(", "'main'", ")", "logger", ".", "exception", "(", "\"Unable to save app data to file: {}\"", ".", "format", "(", "fname", ")", ")" ]
Save the values in the input fields so they can be loaded next time the GUI is run :param fname: file path of location to store values at :type fname: str
[ "Save", "the", "values", "in", "the", "input", "fields", "so", "they", "can", "be", "loaded", "next", "time", "the", "GUI", "is", "run" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/controlwindow.py#L242-L293
train
portfors-lab/sparkle
sparkle/gui/controlwindow.py
ControlWindow.closeEvent
def closeEvent(self, event): """Closes listening threads and saves GUI data for later use. Re-implemented from :qtdoc:`QWidget` """ self.acqmodel.stop_listening() # close listener threads self.saveInputs(self.inputsFilename) # save GUI size settings = QtCore.QSettings("audiolab") settings.setValue("geometry", self.saveGeometry()) settings.setValue("windowState", self.saveState()) logger = logging.getLogger('main') logger.info('All user settings saved') self.garbage_timer.stop() gc.enable()
python
def closeEvent(self, event): """Closes listening threads and saves GUI data for later use. Re-implemented from :qtdoc:`QWidget` """ self.acqmodel.stop_listening() # close listener threads self.saveInputs(self.inputsFilename) # save GUI size settings = QtCore.QSettings("audiolab") settings.setValue("geometry", self.saveGeometry()) settings.setValue("windowState", self.saveState()) logger = logging.getLogger('main') logger.info('All user settings saved') self.garbage_timer.stop() gc.enable()
[ "def", "closeEvent", "(", "self", ",", "event", ")", ":", "self", ".", "acqmodel", ".", "stop_listening", "(", ")", "# close listener threads", "self", ".", "saveInputs", "(", "self", ".", "inputsFilename", ")", "# save GUI size", "settings", "=", "QtCore", ".", "QSettings", "(", "\"audiolab\"", ")", "settings", ".", "setValue", "(", "\"geometry\"", ",", "self", ".", "saveGeometry", "(", ")", ")", "settings", ".", "setValue", "(", "\"windowState\"", ",", "self", ".", "saveState", "(", ")", ")", "logger", "=", "logging", ".", "getLogger", "(", "'main'", ")", "logger", ".", "info", "(", "'All user settings saved'", ")", "self", ".", "garbage_timer", ".", "stop", "(", ")", "gc", ".", "enable", "(", ")" ]
Closes listening threads and saves GUI data for later use. Re-implemented from :qtdoc:`QWidget`
[ "Closes", "listening", "threads", "and", "saves", "GUI", "data", "for", "later", "use", "." ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/controlwindow.py#L399-L415
train
moin18/utilspie
utilspie/asyncutils/async_utils.py
ordered_async_call
def ordered_async_call(func_list): """ Runs the list of function asynchronously, returns the response maintaining the order :param func_list: Expects list of lists to be of format [[func1, args1, kwargs1], [func2, args2, kwargs2], ...] :return: List of output of the functions [output1, output2, ...] """ def worker(function, f_args, f_kwargs, queue, index): """ Runs the function and appends the output to list, and the Exception in the case of error """ response = { 'index': index, # For tracking the index of each function in actual list. # Since, this function is called asynchronously, order in # queue may differ 'data': None, 'error': None } # Handle error in the function call try: response['data'] = function(*f_args, **f_kwargs) except Exception as e: response['error'] = e # send back the exception along with the queue queue.put(response) queue = Queue() # For preserving state across threads processes = [Process(target=worker, args=(func, args, kwargs, queue, i)) \ for i, (func, args, kwargs) in enumerate(func_list)] for process in processes: process.start() response_list = [] for process in processes: # Wait for process to finish process.join() # Get back the response from the queue response = queue.get() if response['error']: raise response['error'] # Raise exception if the function call failed response_list.append(response) return [content['data'] for content in sorted(response_list, key=lambda x: x['index'])]
python
def ordered_async_call(func_list): """ Runs the list of function asynchronously, returns the response maintaining the order :param func_list: Expects list of lists to be of format [[func1, args1, kwargs1], [func2, args2, kwargs2], ...] :return: List of output of the functions [output1, output2, ...] """ def worker(function, f_args, f_kwargs, queue, index): """ Runs the function and appends the output to list, and the Exception in the case of error """ response = { 'index': index, # For tracking the index of each function in actual list. # Since, this function is called asynchronously, order in # queue may differ 'data': None, 'error': None } # Handle error in the function call try: response['data'] = function(*f_args, **f_kwargs) except Exception as e: response['error'] = e # send back the exception along with the queue queue.put(response) queue = Queue() # For preserving state across threads processes = [Process(target=worker, args=(func, args, kwargs, queue, i)) \ for i, (func, args, kwargs) in enumerate(func_list)] for process in processes: process.start() response_list = [] for process in processes: # Wait for process to finish process.join() # Get back the response from the queue response = queue.get() if response['error']: raise response['error'] # Raise exception if the function call failed response_list.append(response) return [content['data'] for content in sorted(response_list, key=lambda x: x['index'])]
[ "def", "ordered_async_call", "(", "func_list", ")", ":", "def", "worker", "(", "function", ",", "f_args", ",", "f_kwargs", ",", "queue", ",", "index", ")", ":", "\"\"\"\n Runs the function and appends the output to list, and the Exception in the case of error\n \"\"\"", "response", "=", "{", "'index'", ":", "index", ",", "# For tracking the index of each function in actual list.", "# Since, this function is called asynchronously, order in", "# queue may differ", "'data'", ":", "None", ",", "'error'", ":", "None", "}", "# Handle error in the function call", "try", ":", "response", "[", "'data'", "]", "=", "function", "(", "*", "f_args", ",", "*", "*", "f_kwargs", ")", "except", "Exception", "as", "e", ":", "response", "[", "'error'", "]", "=", "e", "# send back the exception along with the queue", "queue", ".", "put", "(", "response", ")", "queue", "=", "Queue", "(", ")", "# For preserving state across threads", "processes", "=", "[", "Process", "(", "target", "=", "worker", ",", "args", "=", "(", "func", ",", "args", ",", "kwargs", ",", "queue", ",", "i", ")", ")", "for", "i", ",", "(", "func", ",", "args", ",", "kwargs", ")", "in", "enumerate", "(", "func_list", ")", "]", "for", "process", "in", "processes", ":", "process", ".", "start", "(", ")", "response_list", "=", "[", "]", "for", "process", "in", "processes", ":", "# Wait for process to finish", "process", ".", "join", "(", ")", "# Get back the response from the queue", "response", "=", "queue", ".", "get", "(", ")", "if", "response", "[", "'error'", "]", ":", "raise", "response", "[", "'error'", "]", "# Raise exception if the function call failed", "response_list", ".", "append", "(", "response", ")", "return", "[", "content", "[", "'data'", "]", "for", "content", "in", "sorted", "(", "response_list", ",", "key", "=", "lambda", "x", ":", "x", "[", "'index'", "]", ")", "]" ]
Runs the list of function asynchronously, returns the response maintaining the order :param func_list: Expects list of lists to be of format [[func1, args1, kwargs1], [func2, args2, kwargs2], ...] :return: List of output of the functions [output1, output2, ...]
[ "Runs", "the", "list", "of", "function", "asynchronously", "returns", "the", "response", "maintaining", "the", "order" ]
ea96860b93fd058019a829847258e39323fef31f
https://github.com/moin18/utilspie/blob/ea96860b93fd058019a829847258e39323fef31f/utilspie/asyncutils/async_utils.py#L4-L52
train
sirfoga/pyhal
hal/internet/utils.py
add_params_to_url
def add_params_to_url(url, params): """Adds params to url :param url: Url :param params: Params to add :return: original url with new params """ url_parts = list(urlparse.urlparse(url)) # get url parts query = dict(urlparse.parse_qsl(url_parts[4])) # get url query query.update(params) # add new params url_parts[4] = urlencode(query) return urlparse.urlunparse(url_parts)
python
def add_params_to_url(url, params): """Adds params to url :param url: Url :param params: Params to add :return: original url with new params """ url_parts = list(urlparse.urlparse(url)) # get url parts query = dict(urlparse.parse_qsl(url_parts[4])) # get url query query.update(params) # add new params url_parts[4] = urlencode(query) return urlparse.urlunparse(url_parts)
[ "def", "add_params_to_url", "(", "url", ",", "params", ")", ":", "url_parts", "=", "list", "(", "urlparse", ".", "urlparse", "(", "url", ")", ")", "# get url parts", "query", "=", "dict", "(", "urlparse", ".", "parse_qsl", "(", "url_parts", "[", "4", "]", ")", ")", "# get url query", "query", ".", "update", "(", "params", ")", "# add new params", "url_parts", "[", "4", "]", "=", "urlencode", "(", "query", ")", "return", "urlparse", ".", "urlunparse", "(", "url_parts", ")" ]
Adds params to url :param url: Url :param params: Params to add :return: original url with new params
[ "Adds", "params", "to", "url" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/internet/utils.py#L15-L26
train
sirfoga/pyhal
hal/internet/utils.py
is_internet_on
def is_internet_on(host="8.8.8.8", port=53, timeout=3): """Checks if machine has internet connection :param host: hostname to test :param port: port of hostname :param timeout: seconds before discarding connection :return: True iff machine has internet connection """ socket.setdefaulttimeout(timeout) socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
python
def is_internet_on(host="8.8.8.8", port=53, timeout=3): """Checks if machine has internet connection :param host: hostname to test :param port: port of hostname :param timeout: seconds before discarding connection :return: True iff machine has internet connection """ socket.setdefaulttimeout(timeout) socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
[ "def", "is_internet_on", "(", "host", "=", "\"8.8.8.8\"", ",", "port", "=", "53", ",", "timeout", "=", "3", ")", ":", "socket", ".", "setdefaulttimeout", "(", "timeout", ")", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", ".", "connect", "(", "(", "host", ",", "port", ")", ")" ]
Checks if machine has internet connection :param host: hostname to test :param port: port of hostname :param timeout: seconds before discarding connection :return: True iff machine has internet connection
[ "Checks", "if", "machine", "has", "internet", "connection" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/internet/utils.py#L30-L39
train
sirfoga/pyhal
hal/internet/utils.py
wait_until_internet
def wait_until_internet(time_between_attempts=3, max_attempts=10): """Waits until machine has internet :param time_between_attempts: seconds between 2 consecutive attempts :param max_attempts: max number of attempts to try :return: True iff there is internet connection """ counter = 0 while not is_internet_on(): time.sleep(time_between_attempts) # wait until internet is on counter += 1 if counter > max_attempts: return False return True
python
def wait_until_internet(time_between_attempts=3, max_attempts=10): """Waits until machine has internet :param time_between_attempts: seconds between 2 consecutive attempts :param max_attempts: max number of attempts to try :return: True iff there is internet connection """ counter = 0 while not is_internet_on(): time.sleep(time_between_attempts) # wait until internet is on counter += 1 if counter > max_attempts: return False return True
[ "def", "wait_until_internet", "(", "time_between_attempts", "=", "3", ",", "max_attempts", "=", "10", ")", ":", "counter", "=", "0", "while", "not", "is_internet_on", "(", ")", ":", "time", ".", "sleep", "(", "time_between_attempts", ")", "# wait until internet is on", "counter", "+=", "1", "if", "counter", ">", "max_attempts", ":", "return", "False", "return", "True" ]
Waits until machine has internet :param time_between_attempts: seconds between 2 consecutive attempts :param max_attempts: max number of attempts to try :return: True iff there is internet connection
[ "Waits", "until", "machine", "has", "internet" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/internet/utils.py#L42-L57
train
hozn/keepassdb
keepassdb/util.py
transform_key
def transform_key(startkey, seed_key, seed_rand, rounds): """ This method creates the key to decrypt the database. """ masterkey = startkey aes = AES.new(seed_key, AES.MODE_ECB) # Encrypt the created hash <rounds> times for _i in range(rounds): masterkey = aes.encrypt(masterkey) # Finally, hash it again... masterkey = hashlib.sha256(masterkey).digest() # ...and hash the result together with the randomseed return hashlib.sha256(seed_rand + masterkey).digest()
python
def transform_key(startkey, seed_key, seed_rand, rounds): """ This method creates the key to decrypt the database. """ masterkey = startkey aes = AES.new(seed_key, AES.MODE_ECB) # Encrypt the created hash <rounds> times for _i in range(rounds): masterkey = aes.encrypt(masterkey) # Finally, hash it again... masterkey = hashlib.sha256(masterkey).digest() # ...and hash the result together with the randomseed return hashlib.sha256(seed_rand + masterkey).digest()
[ "def", "transform_key", "(", "startkey", ",", "seed_key", ",", "seed_rand", ",", "rounds", ")", ":", "masterkey", "=", "startkey", "aes", "=", "AES", ".", "new", "(", "seed_key", ",", "AES", ".", "MODE_ECB", ")", "# Encrypt the created hash <rounds> times", "for", "_i", "in", "range", "(", "rounds", ")", ":", "masterkey", "=", "aes", ".", "encrypt", "(", "masterkey", ")", "# Finally, hash it again...", "masterkey", "=", "hashlib", ".", "sha256", "(", "masterkey", ")", ".", "digest", "(", ")", "# ...and hash the result together with the randomseed", "return", "hashlib", ".", "sha256", "(", "seed_rand", "+", "masterkey", ")", ".", "digest", "(", ")" ]
This method creates the key to decrypt the database.
[ "This", "method", "creates", "the", "key", "to", "decrypt", "the", "database", "." ]
cb24985d1ed04e7d7db99ecdddf80dd1a91ee48b
https://github.com/hozn/keepassdb/blob/cb24985d1ed04e7d7db99ecdddf80dd1a91ee48b/keepassdb/util.py#L91-L105
train
wylee/runcommands
runcommands/completion/__init__.py
complete
def complete(command_line, current_token, position, shell: arg(choices=('bash', 'fish'))): """Find completions for current command. This assumes that we'll handle all completion logic here and that the shell's automatic file name completion is disabled. Args: command_line: Command line current_token: Token at cursor position: Current cursor position shell: Name of shell """ position = int(position) tokens = shlex.split(command_line[:position]) all_argv, run_argv, command_argv = run.partition_argv(tokens[1:]) run_args = run.parse_args(run_argv) module = run_args.get('commands_module') module = module or DEFAULT_COMMANDS_MODULE module = normalize_path(module) try: collection = Collection.load_from_module(module) except Exception: collection = {} found_command = find_command(collection, tokens) or run if current_token: # Completing either a command name, option name, or path. if current_token.startswith('-'): if current_token not in found_command.option_map: print_command_options(found_command, current_token) else: print_commands(collection, shell) path = os.path.expanduser(current_token) path = os.path.expandvars(path) paths = glob.glob('%s*' % path) if paths: for entry in paths: if os.path.isdir(entry): print('%s/' % entry) else: print(entry) else: # Completing option value. If a value isn't expected, show the # options for the current command and the list of commands # instead. option = found_command.option_map.get(tokens[-1]) if option and option.takes_value: if option.choices: for choice in option.choices: print(choice) else: for entry in os.listdir(): if os.path.isdir(entry): print('%s/' % entry) else: print(entry) else: print_command_options(found_command) print_commands(collection, shell)
python
def complete(command_line, current_token, position, shell: arg(choices=('bash', 'fish'))): """Find completions for current command. This assumes that we'll handle all completion logic here and that the shell's automatic file name completion is disabled. Args: command_line: Command line current_token: Token at cursor position: Current cursor position shell: Name of shell """ position = int(position) tokens = shlex.split(command_line[:position]) all_argv, run_argv, command_argv = run.partition_argv(tokens[1:]) run_args = run.parse_args(run_argv) module = run_args.get('commands_module') module = module or DEFAULT_COMMANDS_MODULE module = normalize_path(module) try: collection = Collection.load_from_module(module) except Exception: collection = {} found_command = find_command(collection, tokens) or run if current_token: # Completing either a command name, option name, or path. if current_token.startswith('-'): if current_token not in found_command.option_map: print_command_options(found_command, current_token) else: print_commands(collection, shell) path = os.path.expanduser(current_token) path = os.path.expandvars(path) paths = glob.glob('%s*' % path) if paths: for entry in paths: if os.path.isdir(entry): print('%s/' % entry) else: print(entry) else: # Completing option value. If a value isn't expected, show the # options for the current command and the list of commands # instead. option = found_command.option_map.get(tokens[-1]) if option and option.takes_value: if option.choices: for choice in option.choices: print(choice) else: for entry in os.listdir(): if os.path.isdir(entry): print('%s/' % entry) else: print(entry) else: print_command_options(found_command) print_commands(collection, shell)
[ "def", "complete", "(", "command_line", ",", "current_token", ",", "position", ",", "shell", ":", "arg", "(", "choices", "=", "(", "'bash'", ",", "'fish'", ")", ")", ")", ":", "position", "=", "int", "(", "position", ")", "tokens", "=", "shlex", ".", "split", "(", "command_line", "[", ":", "position", "]", ")", "all_argv", ",", "run_argv", ",", "command_argv", "=", "run", ".", "partition_argv", "(", "tokens", "[", "1", ":", "]", ")", "run_args", "=", "run", ".", "parse_args", "(", "run_argv", ")", "module", "=", "run_args", ".", "get", "(", "'commands_module'", ")", "module", "=", "module", "or", "DEFAULT_COMMANDS_MODULE", "module", "=", "normalize_path", "(", "module", ")", "try", ":", "collection", "=", "Collection", ".", "load_from_module", "(", "module", ")", "except", "Exception", ":", "collection", "=", "{", "}", "found_command", "=", "find_command", "(", "collection", ",", "tokens", ")", "or", "run", "if", "current_token", ":", "# Completing either a command name, option name, or path.", "if", "current_token", ".", "startswith", "(", "'-'", ")", ":", "if", "current_token", "not", "in", "found_command", ".", "option_map", ":", "print_command_options", "(", "found_command", ",", "current_token", ")", "else", ":", "print_commands", "(", "collection", ",", "shell", ")", "path", "=", "os", ".", "path", ".", "expanduser", "(", "current_token", ")", "path", "=", "os", ".", "path", ".", "expandvars", "(", "path", ")", "paths", "=", "glob", ".", "glob", "(", "'%s*'", "%", "path", ")", "if", "paths", ":", "for", "entry", "in", "paths", ":", "if", "os", ".", "path", ".", "isdir", "(", "entry", ")", ":", "print", "(", "'%s/'", "%", "entry", ")", "else", ":", "print", "(", "entry", ")", "else", ":", "# Completing option value. If a value isn't expected, show the", "# options for the current command and the list of commands", "# instead.", "option", "=", "found_command", ".", "option_map", ".", "get", "(", "tokens", "[", "-", "1", "]", ")", "if", "option", "and", "option", ".", "takes_value", ":", "if", "option", ".", "choices", ":", "for", "choice", "in", "option", ".", "choices", ":", "print", "(", "choice", ")", "else", ":", "for", "entry", "in", "os", ".", "listdir", "(", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "entry", ")", ":", "print", "(", "'%s/'", "%", "entry", ")", "else", ":", "print", "(", "entry", ")", "else", ":", "print_command_options", "(", "found_command", ")", "print_commands", "(", "collection", ",", "shell", ")" ]
Find completions for current command. This assumes that we'll handle all completion logic here and that the shell's automatic file name completion is disabled. Args: command_line: Command line current_token: Token at cursor position: Current cursor position shell: Name of shell
[ "Find", "completions", "for", "current", "command", "." ]
b1d7c262885b9ced7ab89b63562f5464ca9970fe
https://github.com/wylee/runcommands/blob/b1d7c262885b9ced7ab89b63562f5464ca9970fe/runcommands/completion/__init__.py#L13-L80
train
kevinconway/venvctrl
venvctrl/venv/pip.py
PipMixin.install_package
def install_package(self, name, index=None, force=False, update=False): """Install a given package. Args: name (str): The package name to install. This can be any valid pip package specification. index (str): The URL for a pypi index to use. force (bool): For the reinstall of packages during updates. update (bool): Update the package if it is out of date. """ cmd = 'install' if force: cmd = '{0} {1}'.format(cmd, '--force-reinstall') if update: cmd = '{0} {1}'.format(cmd, '--update') if index: cmd = '{0} {1}'.format(cmd, '--index-url {0}'.format(index)) self.pip('{0} {1}'.format(cmd, name))
python
def install_package(self, name, index=None, force=False, update=False): """Install a given package. Args: name (str): The package name to install. This can be any valid pip package specification. index (str): The URL for a pypi index to use. force (bool): For the reinstall of packages during updates. update (bool): Update the package if it is out of date. """ cmd = 'install' if force: cmd = '{0} {1}'.format(cmd, '--force-reinstall') if update: cmd = '{0} {1}'.format(cmd, '--update') if index: cmd = '{0} {1}'.format(cmd, '--index-url {0}'.format(index)) self.pip('{0} {1}'.format(cmd, name))
[ "def", "install_package", "(", "self", ",", "name", ",", "index", "=", "None", ",", "force", "=", "False", ",", "update", "=", "False", ")", ":", "cmd", "=", "'install'", "if", "force", ":", "cmd", "=", "'{0} {1}'", ".", "format", "(", "cmd", ",", "'--force-reinstall'", ")", "if", "update", ":", "cmd", "=", "'{0} {1}'", ".", "format", "(", "cmd", ",", "'--update'", ")", "if", "index", ":", "cmd", "=", "'{0} {1}'", ".", "format", "(", "cmd", ",", "'--index-url {0}'", ".", "format", "(", "index", ")", ")", "self", ".", "pip", "(", "'{0} {1}'", ".", "format", "(", "cmd", ",", "name", ")", ")" ]
Install a given package. Args: name (str): The package name to install. This can be any valid pip package specification. index (str): The URL for a pypi index to use. force (bool): For the reinstall of packages during updates. update (bool): Update the package if it is out of date.
[ "Install", "a", "given", "package", "." ]
36d4e0e4d5ebced6385a6ade1198f4769ff2df41
https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/venv/pip.py#L27-L50
train
kevinconway/venvctrl
venvctrl/venv/pip.py
PipMixin.install_requirements
def install_requirements(self, path, index=None): """Install packages from a requirements.txt file. Args: path (str): The path to the requirements file. index (str): The URL for a pypi index to use. """ cmd = 'install -r {0}'.format(path) if index: cmd = 'install --index-url {0} -r {1}'.format(index, path) self.pip(cmd)
python
def install_requirements(self, path, index=None): """Install packages from a requirements.txt file. Args: path (str): The path to the requirements file. index (str): The URL for a pypi index to use. """ cmd = 'install -r {0}'.format(path) if index: cmd = 'install --index-url {0} -r {1}'.format(index, path) self.pip(cmd)
[ "def", "install_requirements", "(", "self", ",", "path", ",", "index", "=", "None", ")", ":", "cmd", "=", "'install -r {0}'", ".", "format", "(", "path", ")", "if", "index", ":", "cmd", "=", "'install --index-url {0} -r {1}'", ".", "format", "(", "index", ",", "path", ")", "self", ".", "pip", "(", "cmd", ")" ]
Install packages from a requirements.txt file. Args: path (str): The path to the requirements file. index (str): The URL for a pypi index to use.
[ "Install", "packages", "from", "a", "requirements", ".", "txt", "file", "." ]
36d4e0e4d5ebced6385a6ade1198f4769ff2df41
https://github.com/kevinconway/venvctrl/blob/36d4e0e4d5ebced6385a6ade1198f4769ff2df41/venvctrl/venv/pip.py#L52-L64
train
sirfoga/pyhal
hal/times/dates.py
Weekday.get_next
def get_next(weekday, including_today=False): """Gets next day of week :param weekday: day of week :param including_today: If today is sunday and requesting next sunday :return: Date of next monday, tuesday .. """ now = datetime.datetime.now() if now.weekday() == weekday.value and including_today: delta = datetime.timedelta(days=0) elif now.weekday() == weekday.value and not including_today: delta = datetime.timedelta(days=7) else: delta = datetime.timedelta( (7 + weekday.value - now.weekday()) % 7 ) # times delta to next instance return Day(now + delta).get_just_date()
python
def get_next(weekday, including_today=False): """Gets next day of week :param weekday: day of week :param including_today: If today is sunday and requesting next sunday :return: Date of next monday, tuesday .. """ now = datetime.datetime.now() if now.weekday() == weekday.value and including_today: delta = datetime.timedelta(days=0) elif now.weekday() == weekday.value and not including_today: delta = datetime.timedelta(days=7) else: delta = datetime.timedelta( (7 + weekday.value - now.weekday()) % 7 ) # times delta to next instance return Day(now + delta).get_just_date()
[ "def", "get_next", "(", "weekday", ",", "including_today", "=", "False", ")", ":", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "if", "now", ".", "weekday", "(", ")", "==", "weekday", ".", "value", "and", "including_today", ":", "delta", "=", "datetime", ".", "timedelta", "(", "days", "=", "0", ")", "elif", "now", ".", "weekday", "(", ")", "==", "weekday", ".", "value", "and", "not", "including_today", ":", "delta", "=", "datetime", ".", "timedelta", "(", "days", "=", "7", ")", "else", ":", "delta", "=", "datetime", ".", "timedelta", "(", "(", "7", "+", "weekday", ".", "value", "-", "now", ".", "weekday", "(", ")", ")", "%", "7", ")", "# times delta to next instance", "return", "Day", "(", "now", "+", "delta", ")", ".", "get_just_date", "(", ")" ]
Gets next day of week :param weekday: day of week :param including_today: If today is sunday and requesting next sunday :return: Date of next monday, tuesday ..
[ "Gets", "next", "day", "of", "week" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/times/dates.py#L21-L37
train