repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
tamasgal/km3pipe | km3pipe/io/evt.py | EvtPump.prepare_blobs | def prepare_blobs(self):
"""Populate the blobs"""
self.raw_header = self.extract_header()
if self.cache_enabled:
self._cache_offsets() | python | def prepare_blobs(self):
"""Populate the blobs"""
self.raw_header = self.extract_header()
if self.cache_enabled:
self._cache_offsets() | [
"def",
"prepare_blobs",
"(",
"self",
")",
":",
"self",
".",
"raw_header",
"=",
"self",
".",
"extract_header",
"(",
")",
"if",
"self",
".",
"cache_enabled",
":",
"self",
".",
"_cache_offsets",
"(",
")"
] | Populate the blobs | [
"Populate",
"the",
"blobs"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/evt.py#L163-L167 | train |
tamasgal/km3pipe | km3pipe/io/evt.py | EvtPump.extract_header | def extract_header(self):
"""Create a dictionary with the EVT header information"""
self.log.info("Extracting the header")
raw_header = self.raw_header = defaultdict(list)
first_line = self.blob_file.readline()
first_line = try_decode_string(first_line)
self.blob_file.seek(0, 0)
if not first_line.startswith(str('start_run')):
self.log.warning("No header found.")
return raw_header
for line in iter(self.blob_file.readline, ''):
line = try_decode_string(line)
line = line.strip()
try:
tag, value = str(line).split(':')
except ValueError:
continue
raw_header[tag].append(str(value).split())
if line.startswith(str('end_event:')):
self._record_offset()
if self._auto_parse and 'physics' in raw_header:
parsers = [p[0].lower() for p in raw_header['physics']]
self._register_parsers(parsers)
return raw_header
raise ValueError("Incomplete header, no 'end_event' tag found!") | python | def extract_header(self):
"""Create a dictionary with the EVT header information"""
self.log.info("Extracting the header")
raw_header = self.raw_header = defaultdict(list)
first_line = self.blob_file.readline()
first_line = try_decode_string(first_line)
self.blob_file.seek(0, 0)
if not first_line.startswith(str('start_run')):
self.log.warning("No header found.")
return raw_header
for line in iter(self.blob_file.readline, ''):
line = try_decode_string(line)
line = line.strip()
try:
tag, value = str(line).split(':')
except ValueError:
continue
raw_header[tag].append(str(value).split())
if line.startswith(str('end_event:')):
self._record_offset()
if self._auto_parse and 'physics' in raw_header:
parsers = [p[0].lower() for p in raw_header['physics']]
self._register_parsers(parsers)
return raw_header
raise ValueError("Incomplete header, no 'end_event' tag found!") | [
"def",
"extract_header",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Extracting the header\"",
")",
"raw_header",
"=",
"self",
".",
"raw_header",
"=",
"defaultdict",
"(",
"list",
")",
"first_line",
"=",
"self",
".",
"blob_file",
".",
"readline",
"(",
")",
"first_line",
"=",
"try_decode_string",
"(",
"first_line",
")",
"self",
".",
"blob_file",
".",
"seek",
"(",
"0",
",",
"0",
")",
"if",
"not",
"first_line",
".",
"startswith",
"(",
"str",
"(",
"'start_run'",
")",
")",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"No header found.\"",
")",
"return",
"raw_header",
"for",
"line",
"in",
"iter",
"(",
"self",
".",
"blob_file",
".",
"readline",
",",
"''",
")",
":",
"line",
"=",
"try_decode_string",
"(",
"line",
")",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"try",
":",
"tag",
",",
"value",
"=",
"str",
"(",
"line",
")",
".",
"split",
"(",
"':'",
")",
"except",
"ValueError",
":",
"continue",
"raw_header",
"[",
"tag",
"]",
".",
"append",
"(",
"str",
"(",
"value",
")",
".",
"split",
"(",
")",
")",
"if",
"line",
".",
"startswith",
"(",
"str",
"(",
"'end_event:'",
")",
")",
":",
"self",
".",
"_record_offset",
"(",
")",
"if",
"self",
".",
"_auto_parse",
"and",
"'physics'",
"in",
"raw_header",
":",
"parsers",
"=",
"[",
"p",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"for",
"p",
"in",
"raw_header",
"[",
"'physics'",
"]",
"]",
"self",
".",
"_register_parsers",
"(",
"parsers",
")",
"return",
"raw_header",
"raise",
"ValueError",
"(",
"\"Incomplete header, no 'end_event' tag found!\"",
")"
] | Create a dictionary with the EVT header information | [
"Create",
"a",
"dictionary",
"with",
"the",
"EVT",
"header",
"information"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/evt.py#L169-L193 | train |
tamasgal/km3pipe | km3pipe/io/evt.py | EvtPump.get_blob | def get_blob(self, index):
"""Return a blob with the event at the given index"""
self.log.info("Retrieving blob #{}".format(index))
if index > len(self.event_offsets) - 1:
self.log.info("Index not in cache, caching offsets")
self._cache_offsets(index, verbose=False)
self.blob_file.seek(self.event_offsets[index], 0)
blob = self._create_blob()
if blob is None:
self.log.info("Empty blob created...")
raise IndexError
else:
self.log.debug("Applying parsers...")
for parser in self.parsers:
parser(blob)
self.log.debug("Returning the blob")
return blob | python | def get_blob(self, index):
"""Return a blob with the event at the given index"""
self.log.info("Retrieving blob #{}".format(index))
if index > len(self.event_offsets) - 1:
self.log.info("Index not in cache, caching offsets")
self._cache_offsets(index, verbose=False)
self.blob_file.seek(self.event_offsets[index], 0)
blob = self._create_blob()
if blob is None:
self.log.info("Empty blob created...")
raise IndexError
else:
self.log.debug("Applying parsers...")
for parser in self.parsers:
parser(blob)
self.log.debug("Returning the blob")
return blob | [
"def",
"get_blob",
"(",
"self",
",",
"index",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Retrieving blob #{}\"",
".",
"format",
"(",
"index",
")",
")",
"if",
"index",
">",
"len",
"(",
"self",
".",
"event_offsets",
")",
"-",
"1",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Index not in cache, caching offsets\"",
")",
"self",
".",
"_cache_offsets",
"(",
"index",
",",
"verbose",
"=",
"False",
")",
"self",
".",
"blob_file",
".",
"seek",
"(",
"self",
".",
"event_offsets",
"[",
"index",
"]",
",",
"0",
")",
"blob",
"=",
"self",
".",
"_create_blob",
"(",
")",
"if",
"blob",
"is",
"None",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Empty blob created...\"",
")",
"raise",
"IndexError",
"else",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"Applying parsers...\"",
")",
"for",
"parser",
"in",
"self",
".",
"parsers",
":",
"parser",
"(",
"blob",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Returning the blob\"",
")",
"return",
"blob"
] | Return a blob with the event at the given index | [
"Return",
"a",
"blob",
"with",
"the",
"event",
"at",
"the",
"given",
"index"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/evt.py#L195-L211 | train |
tamasgal/km3pipe | km3pipe/io/evt.py | EvtPump.process | def process(self, blob=None):
"""Pump the next blob to the modules"""
try:
blob = self.get_blob(self.index)
except IndexError:
self.log.info("Got an IndexError, trying the next file")
if (self.basename
or self.filenames) and self.file_index < self.index_stop:
self.file_index += 1
self.log.info("Now at file_index={}".format(self.file_index))
self._reset()
self.blob_file.close()
self.log.info("Resetting blob index to 0")
self.index = 0
file_index = self._get_file_index_str()
if self.filenames:
self.filename = self.filenames[self.file_index - 1]
elif self.basename:
self.filename = "{}{}{}.evt" \
.format(self.basename, file_index, self.suffix)
self.log.info("Next filename: {}".format(self.filename))
self.print("Opening {0}".format(self.filename))
self.open_file(self.filename)
self.prepare_blobs()
try:
blob = self.get_blob(self.index)
except IndexError:
self.log.warning(
"No blob found in file {}".format(self.filename)
)
else:
return blob
self.log.info("No files left, terminating the pipeline")
raise StopIteration
self.index += 1
return blob | python | def process(self, blob=None):
"""Pump the next blob to the modules"""
try:
blob = self.get_blob(self.index)
except IndexError:
self.log.info("Got an IndexError, trying the next file")
if (self.basename
or self.filenames) and self.file_index < self.index_stop:
self.file_index += 1
self.log.info("Now at file_index={}".format(self.file_index))
self._reset()
self.blob_file.close()
self.log.info("Resetting blob index to 0")
self.index = 0
file_index = self._get_file_index_str()
if self.filenames:
self.filename = self.filenames[self.file_index - 1]
elif self.basename:
self.filename = "{}{}{}.evt" \
.format(self.basename, file_index, self.suffix)
self.log.info("Next filename: {}".format(self.filename))
self.print("Opening {0}".format(self.filename))
self.open_file(self.filename)
self.prepare_blobs()
try:
blob = self.get_blob(self.index)
except IndexError:
self.log.warning(
"No blob found in file {}".format(self.filename)
)
else:
return blob
self.log.info("No files left, terminating the pipeline")
raise StopIteration
self.index += 1
return blob | [
"def",
"process",
"(",
"self",
",",
"blob",
"=",
"None",
")",
":",
"try",
":",
"blob",
"=",
"self",
".",
"get_blob",
"(",
"self",
".",
"index",
")",
"except",
"IndexError",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Got an IndexError, trying the next file\"",
")",
"if",
"(",
"self",
".",
"basename",
"or",
"self",
".",
"filenames",
")",
"and",
"self",
".",
"file_index",
"<",
"self",
".",
"index_stop",
":",
"self",
".",
"file_index",
"+=",
"1",
"self",
".",
"log",
".",
"info",
"(",
"\"Now at file_index={}\"",
".",
"format",
"(",
"self",
".",
"file_index",
")",
")",
"self",
".",
"_reset",
"(",
")",
"self",
".",
"blob_file",
".",
"close",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"Resetting blob index to 0\"",
")",
"self",
".",
"index",
"=",
"0",
"file_index",
"=",
"self",
".",
"_get_file_index_str",
"(",
")",
"if",
"self",
".",
"filenames",
":",
"self",
".",
"filename",
"=",
"self",
".",
"filenames",
"[",
"self",
".",
"file_index",
"-",
"1",
"]",
"elif",
"self",
".",
"basename",
":",
"self",
".",
"filename",
"=",
"\"{}{}{}.evt\"",
".",
"format",
"(",
"self",
".",
"basename",
",",
"file_index",
",",
"self",
".",
"suffix",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"Next filename: {}\"",
".",
"format",
"(",
"self",
".",
"filename",
")",
")",
"self",
".",
"print",
"(",
"\"Opening {0}\"",
".",
"format",
"(",
"self",
".",
"filename",
")",
")",
"self",
".",
"open_file",
"(",
"self",
".",
"filename",
")",
"self",
".",
"prepare_blobs",
"(",
")",
"try",
":",
"blob",
"=",
"self",
".",
"get_blob",
"(",
"self",
".",
"index",
")",
"except",
"IndexError",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"No blob found in file {}\"",
".",
"format",
"(",
"self",
".",
"filename",
")",
")",
"else",
":",
"return",
"blob",
"self",
".",
"log",
".",
"info",
"(",
"\"No files left, terminating the pipeline\"",
")",
"raise",
"StopIteration",
"self",
".",
"index",
"+=",
"1",
"return",
"blob"
] | Pump the next blob to the modules | [
"Pump",
"the",
"next",
"blob",
"to",
"the",
"modules"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/evt.py#L213-L250 | train |
tamasgal/km3pipe | km3pipe/io/evt.py | EvtPump._cache_offsets | def _cache_offsets(self, up_to_index=None, verbose=True):
"""Cache all event offsets."""
if not up_to_index:
if verbose:
self.print("Caching event file offsets, this may take a bit.")
self.blob_file.seek(0, 0)
self.event_offsets = []
if not self.raw_header:
self.event_offsets.append(0)
else:
self.blob_file.seek(self.event_offsets[-1], 0)
for line in iter(self.blob_file.readline, ''):
line = try_decode_string(line)
if line.startswith('end_event:'):
self._record_offset()
if len(self.event_offsets) % 100 == 0:
if verbose:
print('.', end='')
sys.stdout.flush()
if up_to_index and len(self.event_offsets) >= up_to_index + 1:
return
self.event_offsets.pop() # get rid of the last entry
if not up_to_index:
self.whole_file_cached = True
self.print("\n{0} events indexed.".format(len(self.event_offsets))) | python | def _cache_offsets(self, up_to_index=None, verbose=True):
"""Cache all event offsets."""
if not up_to_index:
if verbose:
self.print("Caching event file offsets, this may take a bit.")
self.blob_file.seek(0, 0)
self.event_offsets = []
if not self.raw_header:
self.event_offsets.append(0)
else:
self.blob_file.seek(self.event_offsets[-1], 0)
for line in iter(self.blob_file.readline, ''):
line = try_decode_string(line)
if line.startswith('end_event:'):
self._record_offset()
if len(self.event_offsets) % 100 == 0:
if verbose:
print('.', end='')
sys.stdout.flush()
if up_to_index and len(self.event_offsets) >= up_to_index + 1:
return
self.event_offsets.pop() # get rid of the last entry
if not up_to_index:
self.whole_file_cached = True
self.print("\n{0} events indexed.".format(len(self.event_offsets))) | [
"def",
"_cache_offsets",
"(",
"self",
",",
"up_to_index",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
":",
"if",
"not",
"up_to_index",
":",
"if",
"verbose",
":",
"self",
".",
"print",
"(",
"\"Caching event file offsets, this may take a bit.\"",
")",
"self",
".",
"blob_file",
".",
"seek",
"(",
"0",
",",
"0",
")",
"self",
".",
"event_offsets",
"=",
"[",
"]",
"if",
"not",
"self",
".",
"raw_header",
":",
"self",
".",
"event_offsets",
".",
"append",
"(",
"0",
")",
"else",
":",
"self",
".",
"blob_file",
".",
"seek",
"(",
"self",
".",
"event_offsets",
"[",
"-",
"1",
"]",
",",
"0",
")",
"for",
"line",
"in",
"iter",
"(",
"self",
".",
"blob_file",
".",
"readline",
",",
"''",
")",
":",
"line",
"=",
"try_decode_string",
"(",
"line",
")",
"if",
"line",
".",
"startswith",
"(",
"'end_event:'",
")",
":",
"self",
".",
"_record_offset",
"(",
")",
"if",
"len",
"(",
"self",
".",
"event_offsets",
")",
"%",
"100",
"==",
"0",
":",
"if",
"verbose",
":",
"print",
"(",
"'.'",
",",
"end",
"=",
"''",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"if",
"up_to_index",
"and",
"len",
"(",
"self",
".",
"event_offsets",
")",
">=",
"up_to_index",
"+",
"1",
":",
"return",
"self",
".",
"event_offsets",
".",
"pop",
"(",
")",
"# get rid of the last entry",
"if",
"not",
"up_to_index",
":",
"self",
".",
"whole_file_cached",
"=",
"True",
"self",
".",
"print",
"(",
"\"\\n{0} events indexed.\"",
".",
"format",
"(",
"len",
"(",
"self",
".",
"event_offsets",
")",
")",
")"
] | Cache all event offsets. | [
"Cache",
"all",
"event",
"offsets",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/evt.py#L252-L276 | train |
tamasgal/km3pipe | km3pipe/io/evt.py | EvtPump._record_offset | def _record_offset(self):
"""Stores the current file pointer position"""
offset = self.blob_file.tell()
self.event_offsets.append(offset) | python | def _record_offset(self):
"""Stores the current file pointer position"""
offset = self.blob_file.tell()
self.event_offsets.append(offset) | [
"def",
"_record_offset",
"(",
"self",
")",
":",
"offset",
"=",
"self",
".",
"blob_file",
".",
"tell",
"(",
")",
"self",
".",
"event_offsets",
".",
"append",
"(",
"offset",
")"
] | Stores the current file pointer position | [
"Stores",
"the",
"current",
"file",
"pointer",
"position"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/evt.py#L278-L281 | train |
tamasgal/km3pipe | km3pipe/io/evt.py | EvtPump._create_blob | def _create_blob(self):
"""Parse the next event from the current file position"""
blob = None
for line in self.blob_file:
line = try_decode_string(line)
line = line.strip()
if line == '':
self.log.info("Ignoring empty line...")
continue
if line.startswith('end_event:') and blob:
blob['raw_header'] = self.raw_header
return blob
try:
tag, values = line.split(':')
except ValueError:
self.log.warning("Ignoring corrupt line: {}".format(line))
continue
try:
values = tuple(split(values.strip(), callback=float))
except ValueError:
self.log.info("Empty value: {}".format(values))
if line.startswith('start_event:'):
blob = Blob()
blob[tag] = tuple(int(v) for v in values)
continue
if tag not in blob:
blob[tag] = []
blob[tag].append(values) | python | def _create_blob(self):
"""Parse the next event from the current file position"""
blob = None
for line in self.blob_file:
line = try_decode_string(line)
line = line.strip()
if line == '':
self.log.info("Ignoring empty line...")
continue
if line.startswith('end_event:') and blob:
blob['raw_header'] = self.raw_header
return blob
try:
tag, values = line.split(':')
except ValueError:
self.log.warning("Ignoring corrupt line: {}".format(line))
continue
try:
values = tuple(split(values.strip(), callback=float))
except ValueError:
self.log.info("Empty value: {}".format(values))
if line.startswith('start_event:'):
blob = Blob()
blob[tag] = tuple(int(v) for v in values)
continue
if tag not in blob:
blob[tag] = []
blob[tag].append(values) | [
"def",
"_create_blob",
"(",
"self",
")",
":",
"blob",
"=",
"None",
"for",
"line",
"in",
"self",
".",
"blob_file",
":",
"line",
"=",
"try_decode_string",
"(",
"line",
")",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line",
"==",
"''",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Ignoring empty line...\"",
")",
"continue",
"if",
"line",
".",
"startswith",
"(",
"'end_event:'",
")",
"and",
"blob",
":",
"blob",
"[",
"'raw_header'",
"]",
"=",
"self",
".",
"raw_header",
"return",
"blob",
"try",
":",
"tag",
",",
"values",
"=",
"line",
".",
"split",
"(",
"':'",
")",
"except",
"ValueError",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"Ignoring corrupt line: {}\"",
".",
"format",
"(",
"line",
")",
")",
"continue",
"try",
":",
"values",
"=",
"tuple",
"(",
"split",
"(",
"values",
".",
"strip",
"(",
")",
",",
"callback",
"=",
"float",
")",
")",
"except",
"ValueError",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Empty value: {}\"",
".",
"format",
"(",
"values",
")",
")",
"if",
"line",
".",
"startswith",
"(",
"'start_event:'",
")",
":",
"blob",
"=",
"Blob",
"(",
")",
"blob",
"[",
"tag",
"]",
"=",
"tuple",
"(",
"int",
"(",
"v",
")",
"for",
"v",
"in",
"values",
")",
"continue",
"if",
"tag",
"not",
"in",
"blob",
":",
"blob",
"[",
"tag",
"]",
"=",
"[",
"]",
"blob",
"[",
"tag",
"]",
".",
"append",
"(",
"values",
")"
] | Parse the next event from the current file position | [
"Parse",
"the",
"next",
"event",
"from",
"the",
"current",
"file",
"position"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/evt.py#L283-L310 | train |
abiiranathan/db2 | db2/__main__.py | runserver | def runserver(project_name):
'''
Runs a python cgi server in a subprocess.
'''
DIR = os.listdir(project_name)
if 'settings.py' not in DIR:
raise NotImplementedError('No file called: settings.py found in %s'%project_name)
CGI_BIN_FOLDER = os.path.join(project_name, 'cgi', 'cgi-bin')
CGI_FOLDER = os.path.join(project_name, 'cgi')
if not os.path.exists(CGI_BIN_FOLDER):
os.makedirs(CGI_BIN_FOLDER)
os.chdir(CGI_FOLDER)
subprocess.Popen("python -m http.server --cgi 8000") | python | def runserver(project_name):
'''
Runs a python cgi server in a subprocess.
'''
DIR = os.listdir(project_name)
if 'settings.py' not in DIR:
raise NotImplementedError('No file called: settings.py found in %s'%project_name)
CGI_BIN_FOLDER = os.path.join(project_name, 'cgi', 'cgi-bin')
CGI_FOLDER = os.path.join(project_name, 'cgi')
if not os.path.exists(CGI_BIN_FOLDER):
os.makedirs(CGI_BIN_FOLDER)
os.chdir(CGI_FOLDER)
subprocess.Popen("python -m http.server --cgi 8000") | [
"def",
"runserver",
"(",
"project_name",
")",
":",
"DIR",
"=",
"os",
".",
"listdir",
"(",
"project_name",
")",
"if",
"'settings.py'",
"not",
"in",
"DIR",
":",
"raise",
"NotImplementedError",
"(",
"'No file called: settings.py found in %s'",
"%",
"project_name",
")",
"CGI_BIN_FOLDER",
"=",
"os",
".",
"path",
".",
"join",
"(",
"project_name",
",",
"'cgi'",
",",
"'cgi-bin'",
")",
"CGI_FOLDER",
"=",
"os",
".",
"path",
".",
"join",
"(",
"project_name",
",",
"'cgi'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"CGI_BIN_FOLDER",
")",
":",
"os",
".",
"makedirs",
"(",
"CGI_BIN_FOLDER",
")",
"os",
".",
"chdir",
"(",
"CGI_FOLDER",
")",
"subprocess",
".",
"Popen",
"(",
"\"python -m http.server --cgi 8000\"",
")"
] | Runs a python cgi server in a subprocess. | [
"Runs",
"a",
"python",
"cgi",
"server",
"in",
"a",
"subprocess",
"."
] | 347319e421921517bcae7639f524c3c3eb5446e6 | https://github.com/abiiranathan/db2/blob/347319e421921517bcae7639f524c3c3eb5446e6/db2/__main__.py#L44-L59 | train |
PrefPy/prefpy | prefpy/utilityFunction.py | UtilityFunction.getUtility | def getUtility(self, decision, sample, aggregationMode = "avg"):
"""
Get the utility of a given decision given a preference.
:ivar list<int> decision: Contains a list of integer representations of candidates in the
current decision.
:ivar sample: A representation of a preference. We do not assume that it is of a certain
type here and merely pass it to the getUtilities() method.
ivar str aggregationMode: Identifies the aggregation mode of the utility function when the
decision selects more than one candidate. If the mode is "avg," the utility will be the
averge of that of each candidate. If "min," the utility will be the minimum, and if
"max," the utility will xbe the maximum. By default the aggregation mode will be "avg."
"""
utilities = self.getUtilities(decision, sample)
if aggregationMode == "avg":
utility = numpy.mean(utilities)
elif aggregationMode == "min":
utility = min(utilities)
elif aggregationMode == "max":
utility = max(utilities)
else:
print("ERROR: aggregation mode not recognized")
exit()
return utility | python | def getUtility(self, decision, sample, aggregationMode = "avg"):
"""
Get the utility of a given decision given a preference.
:ivar list<int> decision: Contains a list of integer representations of candidates in the
current decision.
:ivar sample: A representation of a preference. We do not assume that it is of a certain
type here and merely pass it to the getUtilities() method.
ivar str aggregationMode: Identifies the aggregation mode of the utility function when the
decision selects more than one candidate. If the mode is "avg," the utility will be the
averge of that of each candidate. If "min," the utility will be the minimum, and if
"max," the utility will xbe the maximum. By default the aggregation mode will be "avg."
"""
utilities = self.getUtilities(decision, sample)
if aggregationMode == "avg":
utility = numpy.mean(utilities)
elif aggregationMode == "min":
utility = min(utilities)
elif aggregationMode == "max":
utility = max(utilities)
else:
print("ERROR: aggregation mode not recognized")
exit()
return utility | [
"def",
"getUtility",
"(",
"self",
",",
"decision",
",",
"sample",
",",
"aggregationMode",
"=",
"\"avg\"",
")",
":",
"utilities",
"=",
"self",
".",
"getUtilities",
"(",
"decision",
",",
"sample",
")",
"if",
"aggregationMode",
"==",
"\"avg\"",
":",
"utility",
"=",
"numpy",
".",
"mean",
"(",
"utilities",
")",
"elif",
"aggregationMode",
"==",
"\"min\"",
":",
"utility",
"=",
"min",
"(",
"utilities",
")",
"elif",
"aggregationMode",
"==",
"\"max\"",
":",
"utility",
"=",
"max",
"(",
"utilities",
")",
"else",
":",
"print",
"(",
"\"ERROR: aggregation mode not recognized\"",
")",
"exit",
"(",
")",
"return",
"utility"
] | Get the utility of a given decision given a preference.
:ivar list<int> decision: Contains a list of integer representations of candidates in the
current decision.
:ivar sample: A representation of a preference. We do not assume that it is of a certain
type here and merely pass it to the getUtilities() method.
ivar str aggregationMode: Identifies the aggregation mode of the utility function when the
decision selects more than one candidate. If the mode is "avg," the utility will be the
averge of that of each candidate. If "min," the utility will be the minimum, and if
"max," the utility will xbe the maximum. By default the aggregation mode will be "avg." | [
"Get",
"the",
"utility",
"of",
"a",
"given",
"decision",
"given",
"a",
"preference",
"."
] | f395ba3782f05684fa5de0cece387a6da9391d02 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/utilityFunction.py#L13-L37 | train |
PrefPy/prefpy | prefpy/utilityFunction.py | UtilityFunctionMallowsPosScoring.getUtilities | def getUtilities(self, decision, orderVector):
"""
Returns a floats that contains the utilities of every candidate in the decision.
:ivar list<int> decision: Contains a list of integer representations of candidates in the
current decision.
:ivar list<int> orderVector: A list of integer representations for each candidate ordered
from most preferred to least.
"""
scoringVector = self.getScoringVector(orderVector)
utilities = []
for alt in decision:
altPosition = orderVector.index(alt)
utility = float(scoringVector[altPosition])
if self.isLoss == True:
utility = -1*utility
utilities.append(utility)
return utilities | python | def getUtilities(self, decision, orderVector):
"""
Returns a floats that contains the utilities of every candidate in the decision.
:ivar list<int> decision: Contains a list of integer representations of candidates in the
current decision.
:ivar list<int> orderVector: A list of integer representations for each candidate ordered
from most preferred to least.
"""
scoringVector = self.getScoringVector(orderVector)
utilities = []
for alt in decision:
altPosition = orderVector.index(alt)
utility = float(scoringVector[altPosition])
if self.isLoss == True:
utility = -1*utility
utilities.append(utility)
return utilities | [
"def",
"getUtilities",
"(",
"self",
",",
"decision",
",",
"orderVector",
")",
":",
"scoringVector",
"=",
"self",
".",
"getScoringVector",
"(",
"orderVector",
")",
"utilities",
"=",
"[",
"]",
"for",
"alt",
"in",
"decision",
":",
"altPosition",
"=",
"orderVector",
".",
"index",
"(",
"alt",
")",
"utility",
"=",
"float",
"(",
"scoringVector",
"[",
"altPosition",
"]",
")",
"if",
"self",
".",
"isLoss",
"==",
"True",
":",
"utility",
"=",
"-",
"1",
"*",
"utility",
"utilities",
".",
"append",
"(",
"utility",
")",
"return",
"utilities"
] | Returns a floats that contains the utilities of every candidate in the decision.
:ivar list<int> decision: Contains a list of integer representations of candidates in the
current decision.
:ivar list<int> orderVector: A list of integer representations for each candidate ordered
from most preferred to least. | [
"Returns",
"a",
"floats",
"that",
"contains",
"the",
"utilities",
"of",
"every",
"candidate",
"in",
"the",
"decision",
"."
] | f395ba3782f05684fa5de0cece387a6da9391d02 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/utilityFunction.py#L59-L77 | train |
PrefPy/prefpy | prefpy/utilityFunction.py | UtilityFunctionCondorcetTopK.getUtilities | def getUtilities(self, decision, binaryRelations):
"""
Returns a floats that contains the utilities of every candidate in the decision. This was
adapted from code written by Lirong Xia.
:ivar list<int> decision: Contains a list of integer representations of candidates in the
current decision.
:ivar list<list,int> binaryRelations: A two-dimensional array whose number of rows and
colums is equal to the number of candidates. For each pair of candidates, cand1 and
cand2, binaryRelations[cand1-1][cand2-1] contains 1 if cand1 is ranked above cand2
and 0 otherwise.
"""
m = len(binaryRelations)
utilities = []
for cand in decision:
tops = [cand-1]
index = 0
while index < len(tops):
s = tops[index]
for j in range(m):
if j == s:
continue
if binaryRelations[j][s] > 0:
if j not in tops:
tops.append(j)
index += 1
if len(tops) <= self.k:
if self.isLoss == False:
utilities.append(1.0)
elif self.isLoss == True:
utilities.append(-1.0)
else:
utilities.append(0.0)
return utilities | python | def getUtilities(self, decision, binaryRelations):
"""
Returns a floats that contains the utilities of every candidate in the decision. This was
adapted from code written by Lirong Xia.
:ivar list<int> decision: Contains a list of integer representations of candidates in the
current decision.
:ivar list<list,int> binaryRelations: A two-dimensional array whose number of rows and
colums is equal to the number of candidates. For each pair of candidates, cand1 and
cand2, binaryRelations[cand1-1][cand2-1] contains 1 if cand1 is ranked above cand2
and 0 otherwise.
"""
m = len(binaryRelations)
utilities = []
for cand in decision:
tops = [cand-1]
index = 0
while index < len(tops):
s = tops[index]
for j in range(m):
if j == s:
continue
if binaryRelations[j][s] > 0:
if j not in tops:
tops.append(j)
index += 1
if len(tops) <= self.k:
if self.isLoss == False:
utilities.append(1.0)
elif self.isLoss == True:
utilities.append(-1.0)
else:
utilities.append(0.0)
return utilities | [
"def",
"getUtilities",
"(",
"self",
",",
"decision",
",",
"binaryRelations",
")",
":",
"m",
"=",
"len",
"(",
"binaryRelations",
")",
"utilities",
"=",
"[",
"]",
"for",
"cand",
"in",
"decision",
":",
"tops",
"=",
"[",
"cand",
"-",
"1",
"]",
"index",
"=",
"0",
"while",
"index",
"<",
"len",
"(",
"tops",
")",
":",
"s",
"=",
"tops",
"[",
"index",
"]",
"for",
"j",
"in",
"range",
"(",
"m",
")",
":",
"if",
"j",
"==",
"s",
":",
"continue",
"if",
"binaryRelations",
"[",
"j",
"]",
"[",
"s",
"]",
">",
"0",
":",
"if",
"j",
"not",
"in",
"tops",
":",
"tops",
".",
"append",
"(",
"j",
")",
"index",
"+=",
"1",
"if",
"len",
"(",
"tops",
")",
"<=",
"self",
".",
"k",
":",
"if",
"self",
".",
"isLoss",
"==",
"False",
":",
"utilities",
".",
"append",
"(",
"1.0",
")",
"elif",
"self",
".",
"isLoss",
"==",
"True",
":",
"utilities",
".",
"append",
"(",
"-",
"1.0",
")",
"else",
":",
"utilities",
".",
"append",
"(",
"0.0",
")",
"return",
"utilities"
] | Returns a floats that contains the utilities of every candidate in the decision. This was
adapted from code written by Lirong Xia.
:ivar list<int> decision: Contains a list of integer representations of candidates in the
current decision.
:ivar list<list,int> binaryRelations: A two-dimensional array whose number of rows and
colums is equal to the number of candidates. For each pair of candidates, cand1 and
cand2, binaryRelations[cand1-1][cand2-1] contains 1 if cand1 is ranked above cand2
and 0 otherwise. | [
"Returns",
"a",
"floats",
"that",
"contains",
"the",
"utilities",
"of",
"every",
"candidate",
"in",
"the",
"decision",
".",
"This",
"was",
"adapted",
"from",
"code",
"written",
"by",
"Lirong",
"Xia",
"."
] | f395ba3782f05684fa5de0cece387a6da9391d02 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/utilityFunction.py#L140-L174 | train |
tamasgal/km3pipe | km3pipe/config.py | Config.db_credentials | def db_credentials(self):
"""Return username and password for the KM3NeT WebDB."""
try:
username = self.config.get('DB', 'username')
password = self.config.get('DB', 'password')
except Error:
username = input("Please enter your KM3NeT DB username: ")
password = getpass.getpass("Password: ")
return username, password | python | def db_credentials(self):
"""Return username and password for the KM3NeT WebDB."""
try:
username = self.config.get('DB', 'username')
password = self.config.get('DB', 'password')
except Error:
username = input("Please enter your KM3NeT DB username: ")
password = getpass.getpass("Password: ")
return username, password | [
"def",
"db_credentials",
"(",
"self",
")",
":",
"try",
":",
"username",
"=",
"self",
".",
"config",
".",
"get",
"(",
"'DB'",
",",
"'username'",
")",
"password",
"=",
"self",
".",
"config",
".",
"get",
"(",
"'DB'",
",",
"'password'",
")",
"except",
"Error",
":",
"username",
"=",
"input",
"(",
"\"Please enter your KM3NeT DB username: \"",
")",
"password",
"=",
"getpass",
".",
"getpass",
"(",
"\"Password: \"",
")",
"return",
"username",
",",
"password"
] | Return username and password for the KM3NeT WebDB. | [
"Return",
"username",
"and",
"password",
"for",
"the",
"KM3NeT",
"WebDB",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/config.py#L104-L112 | train |
lexibank/pylexibank | src/pylexibank/__main__.py | get_path | def get_path(src): # pragma: no cover
"""
Prompts the user to input a local path.
:param src: github repository name
:return: Absolute local path
"""
res = None
while not res:
if res is False:
print(colored('You must provide a path to an existing directory!', 'red'))
print('You need a local clone or release of (a fork of) '
'https://github.com/{0}'.format(src))
res = input(colored('Local path to {0}: '.format(src), 'green', attrs=['blink']))
if res and Path(res).exists():
return Path(res).resolve()
res = False | python | def get_path(src): # pragma: no cover
"""
Prompts the user to input a local path.
:param src: github repository name
:return: Absolute local path
"""
res = None
while not res:
if res is False:
print(colored('You must provide a path to an existing directory!', 'red'))
print('You need a local clone or release of (a fork of) '
'https://github.com/{0}'.format(src))
res = input(colored('Local path to {0}: '.format(src), 'green', attrs=['blink']))
if res and Path(res).exists():
return Path(res).resolve()
res = False | [
"def",
"get_path",
"(",
"src",
")",
":",
"# pragma: no cover",
"res",
"=",
"None",
"while",
"not",
"res",
":",
"if",
"res",
"is",
"False",
":",
"print",
"(",
"colored",
"(",
"'You must provide a path to an existing directory!'",
",",
"'red'",
")",
")",
"print",
"(",
"'You need a local clone or release of (a fork of) '",
"'https://github.com/{0}'",
".",
"format",
"(",
"src",
")",
")",
"res",
"=",
"input",
"(",
"colored",
"(",
"'Local path to {0}: '",
".",
"format",
"(",
"src",
")",
",",
"'green'",
",",
"attrs",
"=",
"[",
"'blink'",
"]",
")",
")",
"if",
"res",
"and",
"Path",
"(",
"res",
")",
".",
"exists",
"(",
")",
":",
"return",
"Path",
"(",
"res",
")",
".",
"resolve",
"(",
")",
"res",
"=",
"False"
] | Prompts the user to input a local path.
:param src: github repository name
:return: Absolute local path | [
"Prompts",
"the",
"user",
"to",
"input",
"a",
"local",
"path",
"."
] | c28e7f122f20de1232623dd7003cb5b01535e581 | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/__main__.py#L53-L69 | train |
IRC-SPHERE/HyperStream | hyperstream/workflow/workflow_manager.py | WorkflowManager.execute_all | def execute_all(self):
"""
Execute all workflows
"""
for workflow_id in self.workflows:
if self.workflows[workflow_id].online:
for interval in self.workflows[workflow_id].requested_intervals:
logging.info("Executing workflow {} over interval {}".format(workflow_id, interval))
self.workflows[workflow_id].execute(interval) | python | def execute_all(self):
"""
Execute all workflows
"""
for workflow_id in self.workflows:
if self.workflows[workflow_id].online:
for interval in self.workflows[workflow_id].requested_intervals:
logging.info("Executing workflow {} over interval {}".format(workflow_id, interval))
self.workflows[workflow_id].execute(interval) | [
"def",
"execute_all",
"(",
"self",
")",
":",
"for",
"workflow_id",
"in",
"self",
".",
"workflows",
":",
"if",
"self",
".",
"workflows",
"[",
"workflow_id",
"]",
".",
"online",
":",
"for",
"interval",
"in",
"self",
".",
"workflows",
"[",
"workflow_id",
"]",
".",
"requested_intervals",
":",
"logging",
".",
"info",
"(",
"\"Executing workflow {} over interval {}\"",
".",
"format",
"(",
"workflow_id",
",",
"interval",
")",
")",
"self",
".",
"workflows",
"[",
"workflow_id",
"]",
".",
"execute",
"(",
"interval",
")"
] | Execute all workflows | [
"Execute",
"all",
"workflows"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/workflow/workflow_manager.py#L350-L358 | train |
IRC-SPHERE/HyperStream | hyperstream/tool/tool.py | Tool.execute | def execute(self, sources, sink, interval, alignment_stream=None):
"""
Execute the tool over the given time interval.
If an alignment stream is given, the output instances will be aligned to this stream
:param sources: The source streams (possibly None)
:param sink: The sink stream
:param alignment_stream: The alignment stream
:param interval: The time interval
:type sources: list[Stream] | tuple[Stream] | None
:type sink: Stream
:type alignment_stream: Stream | None
:type interval: TimeInterval
:return: None
"""
if not isinstance(interval, TimeInterval):
raise TypeError('Expected TimeInterval, got {}'.format(type(interval)))
# logging.info(self.message(interval))
if interval.end > sink.channel.up_to_timestamp:
raise StreamNotAvailableError(sink.channel.up_to_timestamp)
required_intervals = TimeIntervals([interval]) - sink.calculated_intervals
if not required_intervals.is_empty:
document_count = 0
for interval in required_intervals:
for stream_instance in self._execute(
sources=sources, alignment_stream=alignment_stream, interval=interval):
sink.writer(stream_instance)
document_count += 1
sink.calculated_intervals += interval
required_intervals = TimeIntervals([interval]) - sink.calculated_intervals
if not required_intervals.is_empty:
# raise ToolExecutionError(required_intervals)
logging.error("{} execution error for time interval {} on stream {}".format(
self.name, interval, sink))
if not document_count:
logging.debug("{} did not produce any data for time interval {} on stream {}".format(
self.name, interval, sink))
self.write_to_history(
interval=interval,
tool=self.name,
document_count=document_count
) | python | def execute(self, sources, sink, interval, alignment_stream=None):
"""
Execute the tool over the given time interval.
If an alignment stream is given, the output instances will be aligned to this stream
:param sources: The source streams (possibly None)
:param sink: The sink stream
:param alignment_stream: The alignment stream
:param interval: The time interval
:type sources: list[Stream] | tuple[Stream] | None
:type sink: Stream
:type alignment_stream: Stream | None
:type interval: TimeInterval
:return: None
"""
if not isinstance(interval, TimeInterval):
raise TypeError('Expected TimeInterval, got {}'.format(type(interval)))
# logging.info(self.message(interval))
if interval.end > sink.channel.up_to_timestamp:
raise StreamNotAvailableError(sink.channel.up_to_timestamp)
required_intervals = TimeIntervals([interval]) - sink.calculated_intervals
if not required_intervals.is_empty:
document_count = 0
for interval in required_intervals:
for stream_instance in self._execute(
sources=sources, alignment_stream=alignment_stream, interval=interval):
sink.writer(stream_instance)
document_count += 1
sink.calculated_intervals += interval
required_intervals = TimeIntervals([interval]) - sink.calculated_intervals
if not required_intervals.is_empty:
# raise ToolExecutionError(required_intervals)
logging.error("{} execution error for time interval {} on stream {}".format(
self.name, interval, sink))
if not document_count:
logging.debug("{} did not produce any data for time interval {} on stream {}".format(
self.name, interval, sink))
self.write_to_history(
interval=interval,
tool=self.name,
document_count=document_count
) | [
"def",
"execute",
"(",
"self",
",",
"sources",
",",
"sink",
",",
"interval",
",",
"alignment_stream",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"interval",
",",
"TimeInterval",
")",
":",
"raise",
"TypeError",
"(",
"'Expected TimeInterval, got {}'",
".",
"format",
"(",
"type",
"(",
"interval",
")",
")",
")",
"# logging.info(self.message(interval))",
"if",
"interval",
".",
"end",
">",
"sink",
".",
"channel",
".",
"up_to_timestamp",
":",
"raise",
"StreamNotAvailableError",
"(",
"sink",
".",
"channel",
".",
"up_to_timestamp",
")",
"required_intervals",
"=",
"TimeIntervals",
"(",
"[",
"interval",
"]",
")",
"-",
"sink",
".",
"calculated_intervals",
"if",
"not",
"required_intervals",
".",
"is_empty",
":",
"document_count",
"=",
"0",
"for",
"interval",
"in",
"required_intervals",
":",
"for",
"stream_instance",
"in",
"self",
".",
"_execute",
"(",
"sources",
"=",
"sources",
",",
"alignment_stream",
"=",
"alignment_stream",
",",
"interval",
"=",
"interval",
")",
":",
"sink",
".",
"writer",
"(",
"stream_instance",
")",
"document_count",
"+=",
"1",
"sink",
".",
"calculated_intervals",
"+=",
"interval",
"required_intervals",
"=",
"TimeIntervals",
"(",
"[",
"interval",
"]",
")",
"-",
"sink",
".",
"calculated_intervals",
"if",
"not",
"required_intervals",
".",
"is_empty",
":",
"# raise ToolExecutionError(required_intervals)",
"logging",
".",
"error",
"(",
"\"{} execution error for time interval {} on stream {}\"",
".",
"format",
"(",
"self",
".",
"name",
",",
"interval",
",",
"sink",
")",
")",
"if",
"not",
"document_count",
":",
"logging",
".",
"debug",
"(",
"\"{} did not produce any data for time interval {} on stream {}\"",
".",
"format",
"(",
"self",
".",
"name",
",",
"interval",
",",
"sink",
")",
")",
"self",
".",
"write_to_history",
"(",
"interval",
"=",
"interval",
",",
"tool",
"=",
"self",
".",
"name",
",",
"document_count",
"=",
"document_count",
")"
] | Execute the tool over the given time interval.
If an alignment stream is given, the output instances will be aligned to this stream
:param sources: The source streams (possibly None)
:param sink: The sink stream
:param alignment_stream: The alignment stream
:param interval: The time interval
:type sources: list[Stream] | tuple[Stream] | None
:type sink: Stream
:type alignment_stream: Stream | None
:type interval: TimeInterval
:return: None | [
"Execute",
"the",
"tool",
"over",
"the",
"given",
"time",
"interval",
".",
"If",
"an",
"alignment",
"stream",
"is",
"given",
"the",
"output",
"instances",
"will",
"be",
"aligned",
"to",
"this",
"stream"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/tool/tool.py#L49-L97 | train |
IRC-SPHERE/HyperStream | hyperstream/channels/memory_channel.py | MemoryChannel.create_stream | def create_stream(self, stream_id, sandbox=None):
"""
Must be overridden by deriving classes, must create the stream according to the tool and return its unique
identifier stream_id
"""
if stream_id in self.streams:
raise StreamAlreadyExistsError("Stream with id '{}' already exists".format(stream_id))
if sandbox is not None:
raise ValueError("Cannot use sandboxes with memory streams")
stream = Stream(channel=self, stream_id=stream_id, calculated_intervals=None, sandbox=None)
self.streams[stream_id] = stream
self.data[stream_id] = StreamInstanceCollection()
return stream | python | def create_stream(self, stream_id, sandbox=None):
"""
Must be overridden by deriving classes, must create the stream according to the tool and return its unique
identifier stream_id
"""
if stream_id in self.streams:
raise StreamAlreadyExistsError("Stream with id '{}' already exists".format(stream_id))
if sandbox is not None:
raise ValueError("Cannot use sandboxes with memory streams")
stream = Stream(channel=self, stream_id=stream_id, calculated_intervals=None, sandbox=None)
self.streams[stream_id] = stream
self.data[stream_id] = StreamInstanceCollection()
return stream | [
"def",
"create_stream",
"(",
"self",
",",
"stream_id",
",",
"sandbox",
"=",
"None",
")",
":",
"if",
"stream_id",
"in",
"self",
".",
"streams",
":",
"raise",
"StreamAlreadyExistsError",
"(",
"\"Stream with id '{}' already exists\"",
".",
"format",
"(",
"stream_id",
")",
")",
"if",
"sandbox",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"Cannot use sandboxes with memory streams\"",
")",
"stream",
"=",
"Stream",
"(",
"channel",
"=",
"self",
",",
"stream_id",
"=",
"stream_id",
",",
"calculated_intervals",
"=",
"None",
",",
"sandbox",
"=",
"None",
")",
"self",
".",
"streams",
"[",
"stream_id",
"]",
"=",
"stream",
"self",
".",
"data",
"[",
"stream_id",
"]",
"=",
"StreamInstanceCollection",
"(",
")",
"return",
"stream"
] | Must be overridden by deriving classes, must create the stream according to the tool and return its unique
identifier stream_id | [
"Must",
"be",
"overridden",
"by",
"deriving",
"classes",
"must",
"create",
"the",
"stream",
"according",
"to",
"the",
"tool",
"and",
"return",
"its",
"unique",
"identifier",
"stream_id"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/channels/memory_channel.py#L44-L59 | train |
IRC-SPHERE/HyperStream | hyperstream/channels/memory_channel.py | MemoryChannel.purge_all | def purge_all(self, remove_definitions=False):
"""
Clears all streams in the channel - use with caution!
:return: None
"""
for stream_id in list(self.streams.keys()):
self.purge_stream(stream_id, remove_definition=remove_definitions) | python | def purge_all(self, remove_definitions=False):
"""
Clears all streams in the channel - use with caution!
:return: None
"""
for stream_id in list(self.streams.keys()):
self.purge_stream(stream_id, remove_definition=remove_definitions) | [
"def",
"purge_all",
"(",
"self",
",",
"remove_definitions",
"=",
"False",
")",
":",
"for",
"stream_id",
"in",
"list",
"(",
"self",
".",
"streams",
".",
"keys",
"(",
")",
")",
":",
"self",
".",
"purge_stream",
"(",
"stream_id",
",",
"remove_definition",
"=",
"remove_definitions",
")"
] | Clears all streams in the channel - use with caution!
:return: None | [
"Clears",
"all",
"streams",
"in",
"the",
"channel",
"-",
"use",
"with",
"caution!"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/channels/memory_channel.py#L61-L68 | train |
IRC-SPHERE/HyperStream | hyperstream/channels/memory_channel.py | ReadOnlyMemoryChannel.update_state | def update_state(self, up_to_timestamp):
"""
Call this function to ensure that the channel is up to date at the time of timestamp.
I.e., all the streams that have been created before or at that timestamp are calculated exactly until
up_to_timestamp.
"""
for stream_id in self.streams:
self.streams[stream_id].calculated_intervals = TimeIntervals([(MIN_DATE, up_to_timestamp)])
self.up_to_timestamp = up_to_timestamp | python | def update_state(self, up_to_timestamp):
"""
Call this function to ensure that the channel is up to date at the time of timestamp.
I.e., all the streams that have been created before or at that timestamp are calculated exactly until
up_to_timestamp.
"""
for stream_id in self.streams:
self.streams[stream_id].calculated_intervals = TimeIntervals([(MIN_DATE, up_to_timestamp)])
self.up_to_timestamp = up_to_timestamp | [
"def",
"update_state",
"(",
"self",
",",
"up_to_timestamp",
")",
":",
"for",
"stream_id",
"in",
"self",
".",
"streams",
":",
"self",
".",
"streams",
"[",
"stream_id",
"]",
".",
"calculated_intervals",
"=",
"TimeIntervals",
"(",
"[",
"(",
"MIN_DATE",
",",
"up_to_timestamp",
")",
"]",
")",
"self",
".",
"up_to_timestamp",
"=",
"up_to_timestamp"
] | Call this function to ensure that the channel is up to date at the time of timestamp.
I.e., all the streams that have been created before or at that timestamp are calculated exactly until
up_to_timestamp. | [
"Call",
"this",
"function",
"to",
"ensure",
"that",
"the",
"channel",
"is",
"up",
"to",
"date",
"at",
"the",
"time",
"of",
"timestamp",
".",
"I",
".",
"e",
".",
"all",
"the",
"streams",
"that",
"have",
"been",
"created",
"before",
"or",
"at",
"that",
"timestamp",
"are",
"calculated",
"exactly",
"until",
"up_to_timestamp",
"."
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/channels/memory_channel.py#L158-L166 | train |
finklabs/korg | korg/pattern.py | PatternRepo.compile_regex | def compile_regex(self, pattern, flags=0):
"""Compile regex from pattern and pattern_dict"""
pattern_re = regex.compile('(?P<substr>%\{(?P<fullname>(?P<patname>\w+)(?::(?P<subname>\w+))?)\})')
while 1:
matches = [md.groupdict() for md in pattern_re.finditer(pattern)]
if len(matches) == 0:
break
for md in matches:
if md['patname'] in self.pattern_dict:
if md['subname']:
# TODO error if more than one occurance
if '(?P<' in self.pattern_dict[md['patname']]:
# this is not part of the original logstash implementation
# but it might be useful to be able to replace the
# group name used in the pattern
repl = regex.sub('\(\?P<(\w+)>', '(?P<%s>' % md['subname'],
self.pattern_dict[md['patname']], 1)
else:
repl = '(?P<%s>%s)' % (md['subname'],
self.pattern_dict[md['patname']])
else:
repl = self.pattern_dict[md['patname']]
# print "Replacing %s with %s" %(md['substr'], repl)
pattern = pattern.replace(md['substr'], repl)
else:
# print('patname not found')
# maybe missing path entry or missing pattern file?
return
# print 'pattern: %s' % pattern
return regex.compile(pattern, flags) | python | def compile_regex(self, pattern, flags=0):
"""Compile regex from pattern and pattern_dict"""
pattern_re = regex.compile('(?P<substr>%\{(?P<fullname>(?P<patname>\w+)(?::(?P<subname>\w+))?)\})')
while 1:
matches = [md.groupdict() for md in pattern_re.finditer(pattern)]
if len(matches) == 0:
break
for md in matches:
if md['patname'] in self.pattern_dict:
if md['subname']:
# TODO error if more than one occurance
if '(?P<' in self.pattern_dict[md['patname']]:
# this is not part of the original logstash implementation
# but it might be useful to be able to replace the
# group name used in the pattern
repl = regex.sub('\(\?P<(\w+)>', '(?P<%s>' % md['subname'],
self.pattern_dict[md['patname']], 1)
else:
repl = '(?P<%s>%s)' % (md['subname'],
self.pattern_dict[md['patname']])
else:
repl = self.pattern_dict[md['patname']]
# print "Replacing %s with %s" %(md['substr'], repl)
pattern = pattern.replace(md['substr'], repl)
else:
# print('patname not found')
# maybe missing path entry or missing pattern file?
return
# print 'pattern: %s' % pattern
return regex.compile(pattern, flags) | [
"def",
"compile_regex",
"(",
"self",
",",
"pattern",
",",
"flags",
"=",
"0",
")",
":",
"pattern_re",
"=",
"regex",
".",
"compile",
"(",
"'(?P<substr>%\\{(?P<fullname>(?P<patname>\\w+)(?::(?P<subname>\\w+))?)\\})'",
")",
"while",
"1",
":",
"matches",
"=",
"[",
"md",
".",
"groupdict",
"(",
")",
"for",
"md",
"in",
"pattern_re",
".",
"finditer",
"(",
"pattern",
")",
"]",
"if",
"len",
"(",
"matches",
")",
"==",
"0",
":",
"break",
"for",
"md",
"in",
"matches",
":",
"if",
"md",
"[",
"'patname'",
"]",
"in",
"self",
".",
"pattern_dict",
":",
"if",
"md",
"[",
"'subname'",
"]",
":",
"# TODO error if more than one occurance",
"if",
"'(?P<'",
"in",
"self",
".",
"pattern_dict",
"[",
"md",
"[",
"'patname'",
"]",
"]",
":",
"# this is not part of the original logstash implementation ",
"# but it might be useful to be able to replace the",
"# group name used in the pattern",
"repl",
"=",
"regex",
".",
"sub",
"(",
"'\\(\\?P<(\\w+)>'",
",",
"'(?P<%s>'",
"%",
"md",
"[",
"'subname'",
"]",
",",
"self",
".",
"pattern_dict",
"[",
"md",
"[",
"'patname'",
"]",
"]",
",",
"1",
")",
"else",
":",
"repl",
"=",
"'(?P<%s>%s)'",
"%",
"(",
"md",
"[",
"'subname'",
"]",
",",
"self",
".",
"pattern_dict",
"[",
"md",
"[",
"'patname'",
"]",
"]",
")",
"else",
":",
"repl",
"=",
"self",
".",
"pattern_dict",
"[",
"md",
"[",
"'patname'",
"]",
"]",
"# print \"Replacing %s with %s\" %(md['substr'], repl)",
"pattern",
"=",
"pattern",
".",
"replace",
"(",
"md",
"[",
"'substr'",
"]",
",",
"repl",
")",
"else",
":",
"# print('patname not found')",
"# maybe missing path entry or missing pattern file?",
"return",
"# print 'pattern: %s' % pattern",
"return",
"regex",
".",
"compile",
"(",
"pattern",
",",
"flags",
")"
] | Compile regex from pattern and pattern_dict | [
"Compile",
"regex",
"from",
"pattern",
"and",
"pattern_dict"
] | e931a673ce4bc79cdf26cb4f697fa23fa8a72e4f | https://github.com/finklabs/korg/blob/e931a673ce4bc79cdf26cb4f697fa23fa8a72e4f/korg/pattern.py#L27-L56 | train |
finklabs/korg | korg/pattern.py | PatternRepo._load_patterns | def _load_patterns(self, folders, pattern_dict=None):
"""Load all pattern from all the files in folders"""
if pattern_dict is None:
pattern_dict = {}
for folder in folders:
for file in os.listdir(folder):
if regex.match('^[\w-]+$', file):
self._load_pattern_file(os.path.join(folder, file), pattern_dict)
return pattern_dict | python | def _load_patterns(self, folders, pattern_dict=None):
"""Load all pattern from all the files in folders"""
if pattern_dict is None:
pattern_dict = {}
for folder in folders:
for file in os.listdir(folder):
if regex.match('^[\w-]+$', file):
self._load_pattern_file(os.path.join(folder, file), pattern_dict)
return pattern_dict | [
"def",
"_load_patterns",
"(",
"self",
",",
"folders",
",",
"pattern_dict",
"=",
"None",
")",
":",
"if",
"pattern_dict",
"is",
"None",
":",
"pattern_dict",
"=",
"{",
"}",
"for",
"folder",
"in",
"folders",
":",
"for",
"file",
"in",
"os",
".",
"listdir",
"(",
"folder",
")",
":",
"if",
"regex",
".",
"match",
"(",
"'^[\\w-]+$'",
",",
"file",
")",
":",
"self",
".",
"_load_pattern_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"folder",
",",
"file",
")",
",",
"pattern_dict",
")",
"return",
"pattern_dict"
] | Load all pattern from all the files in folders | [
"Load",
"all",
"pattern",
"from",
"all",
"the",
"files",
"in",
"folders"
] | e931a673ce4bc79cdf26cb4f697fa23fa8a72e4f | https://github.com/finklabs/korg/blob/e931a673ce4bc79cdf26cb4f697fa23fa8a72e4f/korg/pattern.py#L73-L81 | train |
astooke/gtimer | gtimer/public/io.py | load_pkl | def load_pkl(filenames):
"""
Unpickle file contents.
Args:
filenames (str): Can be one or a list or tuple of filenames to retrieve.
Returns:
Times: A single object, or from a collection of filenames, a list of Times objects.
Raises:
TypeError: If any loaded object is not a Times object.
"""
if not isinstance(filenames, (list, tuple)):
filenames = [filenames]
times = []
for name in filenames:
name = str(name)
with open(name, 'rb') as file:
loaded_obj = pickle.load(file)
if not isinstance(loaded_obj, Times):
raise TypeError("At least one loaded object is not a Times data object.")
times.append(loaded_obj)
return times if len(times) > 1 else times[0] | python | def load_pkl(filenames):
"""
Unpickle file contents.
Args:
filenames (str): Can be one or a list or tuple of filenames to retrieve.
Returns:
Times: A single object, or from a collection of filenames, a list of Times objects.
Raises:
TypeError: If any loaded object is not a Times object.
"""
if not isinstance(filenames, (list, tuple)):
filenames = [filenames]
times = []
for name in filenames:
name = str(name)
with open(name, 'rb') as file:
loaded_obj = pickle.load(file)
if not isinstance(loaded_obj, Times):
raise TypeError("At least one loaded object is not a Times data object.")
times.append(loaded_obj)
return times if len(times) > 1 else times[0] | [
"def",
"load_pkl",
"(",
"filenames",
")",
":",
"if",
"not",
"isinstance",
"(",
"filenames",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"filenames",
"=",
"[",
"filenames",
"]",
"times",
"=",
"[",
"]",
"for",
"name",
"in",
"filenames",
":",
"name",
"=",
"str",
"(",
"name",
")",
"with",
"open",
"(",
"name",
",",
"'rb'",
")",
"as",
"file",
":",
"loaded_obj",
"=",
"pickle",
".",
"load",
"(",
"file",
")",
"if",
"not",
"isinstance",
"(",
"loaded_obj",
",",
"Times",
")",
":",
"raise",
"TypeError",
"(",
"\"At least one loaded object is not a Times data object.\"",
")",
"times",
".",
"append",
"(",
"loaded_obj",
")",
"return",
"times",
"if",
"len",
"(",
"times",
")",
">",
"1",
"else",
"times",
"[",
"0",
"]"
] | Unpickle file contents.
Args:
filenames (str): Can be one or a list or tuple of filenames to retrieve.
Returns:
Times: A single object, or from a collection of filenames, a list of Times objects.
Raises:
TypeError: If any loaded object is not a Times object. | [
"Unpickle",
"file",
"contents",
"."
] | 2146dab459e5d959feb291821733d3d3ba7c523c | https://github.com/astooke/gtimer/blob/2146dab459e5d959feb291821733d3d3ba7c523c/gtimer/public/io.py#L170-L193 | train |
dgomes/pyipma | pyipma/api.py | IPMA_API.retrieve | async def retrieve(self, url, **kwargs):
"""Issue API requests."""
try:
async with self.websession.request('GET', url, **kwargs) as res:
if res.status != 200:
raise Exception("Could not retrieve information from API")
if res.content_type == 'application/json':
return await res.json()
return await res.text()
except aiohttp.ClientError as err:
logging.error(err) | python | async def retrieve(self, url, **kwargs):
"""Issue API requests."""
try:
async with self.websession.request('GET', url, **kwargs) as res:
if res.status != 200:
raise Exception("Could not retrieve information from API")
if res.content_type == 'application/json':
return await res.json()
return await res.text()
except aiohttp.ClientError as err:
logging.error(err) | [
"async",
"def",
"retrieve",
"(",
"self",
",",
"url",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"async",
"with",
"self",
".",
"websession",
".",
"request",
"(",
"'GET'",
",",
"url",
",",
"*",
"*",
"kwargs",
")",
"as",
"res",
":",
"if",
"res",
".",
"status",
"!=",
"200",
":",
"raise",
"Exception",
"(",
"\"Could not retrieve information from API\"",
")",
"if",
"res",
".",
"content_type",
"==",
"'application/json'",
":",
"return",
"await",
"res",
".",
"json",
"(",
")",
"return",
"await",
"res",
".",
"text",
"(",
")",
"except",
"aiohttp",
".",
"ClientError",
"as",
"err",
":",
"logging",
".",
"error",
"(",
"err",
")"
] | Issue API requests. | [
"Issue",
"API",
"requests",
"."
] | cd808abeb70dca0e336afdf55bef3f73973eaa71 | https://github.com/dgomes/pyipma/blob/cd808abeb70dca0e336afdf55bef3f73973eaa71/pyipma/api.py#L22-L32 | train |
dgomes/pyipma | pyipma/api.py | IPMA_API._to_number | def _to_number(cls, string):
"""Convert string to int or float."""
try:
if float(string) - int(string) == 0:
return int(string)
return float(string)
except ValueError:
try:
return float(string)
except ValueError:
return string | python | def _to_number(cls, string):
"""Convert string to int or float."""
try:
if float(string) - int(string) == 0:
return int(string)
return float(string)
except ValueError:
try:
return float(string)
except ValueError:
return string | [
"def",
"_to_number",
"(",
"cls",
",",
"string",
")",
":",
"try",
":",
"if",
"float",
"(",
"string",
")",
"-",
"int",
"(",
"string",
")",
"==",
"0",
":",
"return",
"int",
"(",
"string",
")",
"return",
"float",
"(",
"string",
")",
"except",
"ValueError",
":",
"try",
":",
"return",
"float",
"(",
"string",
")",
"except",
"ValueError",
":",
"return",
"string"
] | Convert string to int or float. | [
"Convert",
"string",
"to",
"int",
"or",
"float",
"."
] | cd808abeb70dca0e336afdf55bef3f73973eaa71 | https://github.com/dgomes/pyipma/blob/cd808abeb70dca0e336afdf55bef3f73973eaa71/pyipma/api.py#L35-L45 | train |
dgomes/pyipma | pyipma/api.py | IPMA_API.stations | async def stations(self):
"""Retrieve stations."""
data = await self.retrieve(API_DISTRITS)
Station = namedtuple('Station', ['latitude', 'longitude',
'idAreaAviso', 'idConselho',
'idDistrito', 'idRegiao',
'globalIdLocal', 'local'])
_stations = []
for station in data['data']:
_station = Station(
self._to_number(station['latitude']),
self._to_number(station['longitude']),
station['idAreaAviso'],
station['idConcelho'],
station['idDistrito'],
station['idRegiao'],
station['globalIdLocal']//100 * 100,
station['local'],
)
_stations.append(_station)
return _stations | python | async def stations(self):
"""Retrieve stations."""
data = await self.retrieve(API_DISTRITS)
Station = namedtuple('Station', ['latitude', 'longitude',
'idAreaAviso', 'idConselho',
'idDistrito', 'idRegiao',
'globalIdLocal', 'local'])
_stations = []
for station in data['data']:
_station = Station(
self._to_number(station['latitude']),
self._to_number(station['longitude']),
station['idAreaAviso'],
station['idConcelho'],
station['idDistrito'],
station['idRegiao'],
station['globalIdLocal']//100 * 100,
station['local'],
)
_stations.append(_station)
return _stations | [
"async",
"def",
"stations",
"(",
"self",
")",
":",
"data",
"=",
"await",
"self",
".",
"retrieve",
"(",
"API_DISTRITS",
")",
"Station",
"=",
"namedtuple",
"(",
"'Station'",
",",
"[",
"'latitude'",
",",
"'longitude'",
",",
"'idAreaAviso'",
",",
"'idConselho'",
",",
"'idDistrito'",
",",
"'idRegiao'",
",",
"'globalIdLocal'",
",",
"'local'",
"]",
")",
"_stations",
"=",
"[",
"]",
"for",
"station",
"in",
"data",
"[",
"'data'",
"]",
":",
"_station",
"=",
"Station",
"(",
"self",
".",
"_to_number",
"(",
"station",
"[",
"'latitude'",
"]",
")",
",",
"self",
".",
"_to_number",
"(",
"station",
"[",
"'longitude'",
"]",
")",
",",
"station",
"[",
"'idAreaAviso'",
"]",
",",
"station",
"[",
"'idConcelho'",
"]",
",",
"station",
"[",
"'idDistrito'",
"]",
",",
"station",
"[",
"'idRegiao'",
"]",
",",
"station",
"[",
"'globalIdLocal'",
"]",
"//",
"100",
"*",
"100",
",",
"station",
"[",
"'local'",
"]",
",",
")",
"_stations",
".",
"append",
"(",
"_station",
")",
"return",
"_stations"
] | Retrieve stations. | [
"Retrieve",
"stations",
"."
] | cd808abeb70dca0e336afdf55bef3f73973eaa71 | https://github.com/dgomes/pyipma/blob/cd808abeb70dca0e336afdf55bef3f73973eaa71/pyipma/api.py#L47-L74 | train |
dgomes/pyipma | pyipma/api.py | IPMA_API.weather_type_classe | async def weather_type_classe(self):
"""Retrieve translation for weather type."""
data = await self.retrieve(url=API_WEATHER_TYPE)
self.weather_type = dict()
for _type in data['data']:
self.weather_type[_type['idWeatherType']] = _type['descIdWeatherTypePT']
return self.weather_type | python | async def weather_type_classe(self):
"""Retrieve translation for weather type."""
data = await self.retrieve(url=API_WEATHER_TYPE)
self.weather_type = dict()
for _type in data['data']:
self.weather_type[_type['idWeatherType']] = _type['descIdWeatherTypePT']
return self.weather_type | [
"async",
"def",
"weather_type_classe",
"(",
"self",
")",
":",
"data",
"=",
"await",
"self",
".",
"retrieve",
"(",
"url",
"=",
"API_WEATHER_TYPE",
")",
"self",
".",
"weather_type",
"=",
"dict",
"(",
")",
"for",
"_type",
"in",
"data",
"[",
"'data'",
"]",
":",
"self",
".",
"weather_type",
"[",
"_type",
"[",
"'idWeatherType'",
"]",
"]",
"=",
"_type",
"[",
"'descIdWeatherTypePT'",
"]",
"return",
"self",
".",
"weather_type"
] | Retrieve translation for weather type. | [
"Retrieve",
"translation",
"for",
"weather",
"type",
"."
] | cd808abeb70dca0e336afdf55bef3f73973eaa71 | https://github.com/dgomes/pyipma/blob/cd808abeb70dca0e336afdf55bef3f73973eaa71/pyipma/api.py#L99-L109 | train |
dgomes/pyipma | pyipma/api.py | IPMA_API.wind_type_classe | async def wind_type_classe(self):
"""Retrieve translation for wind type."""
data = await self.retrieve(url=API_WIND_TYPE)
self.wind_type = dict()
for _type in data['data']:
self.wind_type[int(_type['classWindSpeed'])] = _type['descClassWindSpeedDailyPT']
return self.wind_type | python | async def wind_type_classe(self):
"""Retrieve translation for wind type."""
data = await self.retrieve(url=API_WIND_TYPE)
self.wind_type = dict()
for _type in data['data']:
self.wind_type[int(_type['classWindSpeed'])] = _type['descClassWindSpeedDailyPT']
return self.wind_type | [
"async",
"def",
"wind_type_classe",
"(",
"self",
")",
":",
"data",
"=",
"await",
"self",
".",
"retrieve",
"(",
"url",
"=",
"API_WIND_TYPE",
")",
"self",
".",
"wind_type",
"=",
"dict",
"(",
")",
"for",
"_type",
"in",
"data",
"[",
"'data'",
"]",
":",
"self",
".",
"wind_type",
"[",
"int",
"(",
"_type",
"[",
"'classWindSpeed'",
"]",
")",
"]",
"=",
"_type",
"[",
"'descClassWindSpeedDailyPT'",
"]",
"return",
"self",
".",
"wind_type"
] | Retrieve translation for wind type. | [
"Retrieve",
"translation",
"for",
"wind",
"type",
"."
] | cd808abeb70dca0e336afdf55bef3f73973eaa71 | https://github.com/dgomes/pyipma/blob/cd808abeb70dca0e336afdf55bef3f73973eaa71/pyipma/api.py#L111-L121 | train |
jdodds/feather | feather/dispatcher.py | Dispatcher.register | def register(self, plugin):
"""Add the plugin to our set of listeners for each message that it
listens to, tell it to use our messages Queue for communication, and
start it up.
"""
for listener in plugin.listeners:
self.listeners[listener].add(plugin)
self.plugins.add(plugin)
plugin.messenger = self.messages
plugin.start() | python | def register(self, plugin):
"""Add the plugin to our set of listeners for each message that it
listens to, tell it to use our messages Queue for communication, and
start it up.
"""
for listener in plugin.listeners:
self.listeners[listener].add(plugin)
self.plugins.add(plugin)
plugin.messenger = self.messages
plugin.start() | [
"def",
"register",
"(",
"self",
",",
"plugin",
")",
":",
"for",
"listener",
"in",
"plugin",
".",
"listeners",
":",
"self",
".",
"listeners",
"[",
"listener",
"]",
".",
"add",
"(",
"plugin",
")",
"self",
".",
"plugins",
".",
"add",
"(",
"plugin",
")",
"plugin",
".",
"messenger",
"=",
"self",
".",
"messages",
"plugin",
".",
"start",
"(",
")"
] | Add the plugin to our set of listeners for each message that it
listens to, tell it to use our messages Queue for communication, and
start it up. | [
"Add",
"the",
"plugin",
"to",
"our",
"set",
"of",
"listeners",
"for",
"each",
"message",
"that",
"it",
"listens",
"to",
"tell",
"it",
"to",
"use",
"our",
"messages",
"Queue",
"for",
"communication",
"and",
"start",
"it",
"up",
"."
] | 92a9426e692b33c7fddf758df8dbc99a9a1ba8ef | https://github.com/jdodds/feather/blob/92a9426e692b33c7fddf758df8dbc99a9a1ba8ef/feather/dispatcher.py#L16-L25 | train |
jdodds/feather | feather/dispatcher.py | Dispatcher.start | def start(self):
"""Send 'APP_START' to any plugins that listen for it, and loop around
waiting for messages and sending them to their listening plugins until
it's time to shutdown.
"""
self.recieve('APP_START')
self.alive = True
while self.alive:
message, payload = self.messages.get()
if message == 'APP_STOP':
for plugin in self.plugins:
plugin.recieve('SHUTDOWN')
self.alive = False
else:
self.recieve(message, payload) | python | def start(self):
"""Send 'APP_START' to any plugins that listen for it, and loop around
waiting for messages and sending them to their listening plugins until
it's time to shutdown.
"""
self.recieve('APP_START')
self.alive = True
while self.alive:
message, payload = self.messages.get()
if message == 'APP_STOP':
for plugin in self.plugins:
plugin.recieve('SHUTDOWN')
self.alive = False
else:
self.recieve(message, payload) | [
"def",
"start",
"(",
"self",
")",
":",
"self",
".",
"recieve",
"(",
"'APP_START'",
")",
"self",
".",
"alive",
"=",
"True",
"while",
"self",
".",
"alive",
":",
"message",
",",
"payload",
"=",
"self",
".",
"messages",
".",
"get",
"(",
")",
"if",
"message",
"==",
"'APP_STOP'",
":",
"for",
"plugin",
"in",
"self",
".",
"plugins",
":",
"plugin",
".",
"recieve",
"(",
"'SHUTDOWN'",
")",
"self",
".",
"alive",
"=",
"False",
"else",
":",
"self",
".",
"recieve",
"(",
"message",
",",
"payload",
")"
] | Send 'APP_START' to any plugins that listen for it, and loop around
waiting for messages and sending them to their listening plugins until
it's time to shutdown. | [
"Send",
"APP_START",
"to",
"any",
"plugins",
"that",
"listen",
"for",
"it",
"and",
"loop",
"around",
"waiting",
"for",
"messages",
"and",
"sending",
"them",
"to",
"their",
"listening",
"plugins",
"until",
"it",
"s",
"time",
"to",
"shutdown",
"."
] | 92a9426e692b33c7fddf758df8dbc99a9a1ba8ef | https://github.com/jdodds/feather/blob/92a9426e692b33c7fddf758df8dbc99a9a1ba8ef/feather/dispatcher.py#L27-L41 | train |
tamasgal/km3pipe | km3pipe/style/__init__.py | ColourCycler.choose | def choose(self, palette):
"""Pick a palette"""
try:
self._cycler = cycle(self.colours[palette])
except KeyError:
raise KeyError(
"Chose one of the following colour palettes: {0}".format(
self.available
)
) | python | def choose(self, palette):
"""Pick a palette"""
try:
self._cycler = cycle(self.colours[palette])
except KeyError:
raise KeyError(
"Chose one of the following colour palettes: {0}".format(
self.available
)
) | [
"def",
"choose",
"(",
"self",
",",
"palette",
")",
":",
"try",
":",
"self",
".",
"_cycler",
"=",
"cycle",
"(",
"self",
".",
"colours",
"[",
"palette",
"]",
")",
"except",
"KeyError",
":",
"raise",
"KeyError",
"(",
"\"Chose one of the following colour palettes: {0}\"",
".",
"format",
"(",
"self",
".",
"available",
")",
")"
] | Pick a palette | [
"Pick",
"a",
"palette"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/style/__init__.py#L50-L59 | train |
tamasgal/km3pipe | km3pipe/style/__init__.py | ColourCycler.refresh_styles | def refresh_styles(self):
"""Load all available styles"""
import matplotlib.pyplot as plt
self.colours = {}
for style in plt.style.available:
try:
style_colours = plt.style.library[style]['axes.prop_cycle']
self.colours[style] = [c['color'] for c in list(style_colours)]
except KeyError:
continue
self.colours['km3pipe'] = [
"#ff7869", "#4babe1", "#96ad3e", "#e4823d", "#5d72b2", "#e2a3c2",
"#fd9844", "#e480e7"
] | python | def refresh_styles(self):
"""Load all available styles"""
import matplotlib.pyplot as plt
self.colours = {}
for style in plt.style.available:
try:
style_colours = plt.style.library[style]['axes.prop_cycle']
self.colours[style] = [c['color'] for c in list(style_colours)]
except KeyError:
continue
self.colours['km3pipe'] = [
"#ff7869", "#4babe1", "#96ad3e", "#e4823d", "#5d72b2", "#e2a3c2",
"#fd9844", "#e480e7"
] | [
"def",
"refresh_styles",
"(",
"self",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"self",
".",
"colours",
"=",
"{",
"}",
"for",
"style",
"in",
"plt",
".",
"style",
".",
"available",
":",
"try",
":",
"style_colours",
"=",
"plt",
".",
"style",
".",
"library",
"[",
"style",
"]",
"[",
"'axes.prop_cycle'",
"]",
"self",
".",
"colours",
"[",
"style",
"]",
"=",
"[",
"c",
"[",
"'color'",
"]",
"for",
"c",
"in",
"list",
"(",
"style_colours",
")",
"]",
"except",
"KeyError",
":",
"continue",
"self",
".",
"colours",
"[",
"'km3pipe'",
"]",
"=",
"[",
"\"#ff7869\"",
",",
"\"#4babe1\"",
",",
"\"#96ad3e\"",
",",
"\"#e4823d\"",
",",
"\"#5d72b2\"",
",",
"\"#e2a3c2\"",
",",
"\"#fd9844\"",
",",
"\"#e480e7\"",
"]"
] | Load all available styles | [
"Load",
"all",
"available",
"styles"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/style/__init__.py#L61-L76 | train |
dsoprea/PySchedules | pyschedules/retrieve.py | get_file_object | def get_file_object(username, password, utc_start=None, utc_stop=None):
"""Make the connection. Return a file-like object."""
if not utc_start:
utc_start = datetime.now()
if not utc_stop:
utc_stop = utc_start + timedelta(days=1)
logging.info("Downloading schedules for username [%s] in range [%s] to "
"[%s]." % (username, utc_start, utc_stop))
replacements = {'start_time': utc_start.strftime('%Y-%m-%dT%H:%M:%SZ'),
'stop_time': utc_stop.strftime('%Y-%m-%dT%H:%M:%SZ')}
soap_message_xml = (soap_message_xml_template % replacements)
authinfo = urllib2.HTTPDigestAuthHandler()
authinfo.add_password(realm, url, username, password)
try:
request = urllib2.Request(url, soap_message_xml, request_headers)
response = urllib2.build_opener(authinfo).open(request)
if response.headers['Content-Encoding'] == 'gzip':
response = GzipStream(response)
except:
logging.exception("Could not acquire connection to Schedules Direct.")
raise
return response | python | def get_file_object(username, password, utc_start=None, utc_stop=None):
"""Make the connection. Return a file-like object."""
if not utc_start:
utc_start = datetime.now()
if not utc_stop:
utc_stop = utc_start + timedelta(days=1)
logging.info("Downloading schedules for username [%s] in range [%s] to "
"[%s]." % (username, utc_start, utc_stop))
replacements = {'start_time': utc_start.strftime('%Y-%m-%dT%H:%M:%SZ'),
'stop_time': utc_stop.strftime('%Y-%m-%dT%H:%M:%SZ')}
soap_message_xml = (soap_message_xml_template % replacements)
authinfo = urllib2.HTTPDigestAuthHandler()
authinfo.add_password(realm, url, username, password)
try:
request = urllib2.Request(url, soap_message_xml, request_headers)
response = urllib2.build_opener(authinfo).open(request)
if response.headers['Content-Encoding'] == 'gzip':
response = GzipStream(response)
except:
logging.exception("Could not acquire connection to Schedules Direct.")
raise
return response | [
"def",
"get_file_object",
"(",
"username",
",",
"password",
",",
"utc_start",
"=",
"None",
",",
"utc_stop",
"=",
"None",
")",
":",
"if",
"not",
"utc_start",
":",
"utc_start",
"=",
"datetime",
".",
"now",
"(",
")",
"if",
"not",
"utc_stop",
":",
"utc_stop",
"=",
"utc_start",
"+",
"timedelta",
"(",
"days",
"=",
"1",
")",
"logging",
".",
"info",
"(",
"\"Downloading schedules for username [%s] in range [%s] to \"",
"\"[%s].\"",
"%",
"(",
"username",
",",
"utc_start",
",",
"utc_stop",
")",
")",
"replacements",
"=",
"{",
"'start_time'",
":",
"utc_start",
".",
"strftime",
"(",
"'%Y-%m-%dT%H:%M:%SZ'",
")",
",",
"'stop_time'",
":",
"utc_stop",
".",
"strftime",
"(",
"'%Y-%m-%dT%H:%M:%SZ'",
")",
"}",
"soap_message_xml",
"=",
"(",
"soap_message_xml_template",
"%",
"replacements",
")",
"authinfo",
"=",
"urllib2",
".",
"HTTPDigestAuthHandler",
"(",
")",
"authinfo",
".",
"add_password",
"(",
"realm",
",",
"url",
",",
"username",
",",
"password",
")",
"try",
":",
"request",
"=",
"urllib2",
".",
"Request",
"(",
"url",
",",
"soap_message_xml",
",",
"request_headers",
")",
"response",
"=",
"urllib2",
".",
"build_opener",
"(",
"authinfo",
")",
".",
"open",
"(",
"request",
")",
"if",
"response",
".",
"headers",
"[",
"'Content-Encoding'",
"]",
"==",
"'gzip'",
":",
"response",
"=",
"GzipStream",
"(",
"response",
")",
"except",
":",
"logging",
".",
"exception",
"(",
"\"Could not acquire connection to Schedules Direct.\"",
")",
"raise",
"return",
"response"
] | Make the connection. Return a file-like object. | [
"Make",
"the",
"connection",
".",
"Return",
"a",
"file",
"-",
"like",
"object",
"."
] | e5aae988fad90217f72db45f93bf69839f4d75e7 | https://github.com/dsoprea/PySchedules/blob/e5aae988fad90217f72db45f93bf69839f4d75e7/pyschedules/retrieve.py#L51-L81 | train |
dsoprea/PySchedules | pyschedules/retrieve.py | process_file_object | def process_file_object(file_obj, importer, progress):
"""Parse the data using the connected file-like object."""
logging.info("Processing schedule data.")
try:
handler = XmlCallbacks(importer, progress)
parser = sax.make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(handler)
parser.parse(file_obj)
except:
logging.exception("Parse failed.")
raise
logging.info("Schedule data processed.") | python | def process_file_object(file_obj, importer, progress):
"""Parse the data using the connected file-like object."""
logging.info("Processing schedule data.")
try:
handler = XmlCallbacks(importer, progress)
parser = sax.make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(handler)
parser.parse(file_obj)
except:
logging.exception("Parse failed.")
raise
logging.info("Schedule data processed.") | [
"def",
"process_file_object",
"(",
"file_obj",
",",
"importer",
",",
"progress",
")",
":",
"logging",
".",
"info",
"(",
"\"Processing schedule data.\"",
")",
"try",
":",
"handler",
"=",
"XmlCallbacks",
"(",
"importer",
",",
"progress",
")",
"parser",
"=",
"sax",
".",
"make_parser",
"(",
")",
"parser",
".",
"setContentHandler",
"(",
"handler",
")",
"parser",
".",
"setErrorHandler",
"(",
"handler",
")",
"parser",
".",
"parse",
"(",
"file_obj",
")",
"except",
":",
"logging",
".",
"exception",
"(",
"\"Parse failed.\"",
")",
"raise",
"logging",
".",
"info",
"(",
"\"Schedule data processed.\"",
")"
] | Parse the data using the connected file-like object. | [
"Parse",
"the",
"data",
"using",
"the",
"connected",
"file",
"-",
"like",
"object",
"."
] | e5aae988fad90217f72db45f93bf69839f4d75e7 | https://github.com/dsoprea/PySchedules/blob/e5aae988fad90217f72db45f93bf69839f4d75e7/pyschedules/retrieve.py#L83-L98 | train |
dsoprea/PySchedules | pyschedules/retrieve.py | parse_schedules | def parse_schedules(username, password, importer, progress, utc_start=None,
utc_stop=None):
"""A utility function to marry the connecting and reading functions."""
file_obj = get_file_object(username, password, utc_start, utc_stop)
process_file_object(file_obj, importer, progress) | python | def parse_schedules(username, password, importer, progress, utc_start=None,
utc_stop=None):
"""A utility function to marry the connecting and reading functions."""
file_obj = get_file_object(username, password, utc_start, utc_stop)
process_file_object(file_obj, importer, progress) | [
"def",
"parse_schedules",
"(",
"username",
",",
"password",
",",
"importer",
",",
"progress",
",",
"utc_start",
"=",
"None",
",",
"utc_stop",
"=",
"None",
")",
":",
"file_obj",
"=",
"get_file_object",
"(",
"username",
",",
"password",
",",
"utc_start",
",",
"utc_stop",
")",
"process_file_object",
"(",
"file_obj",
",",
"importer",
",",
"progress",
")"
] | A utility function to marry the connecting and reading functions. | [
"A",
"utility",
"function",
"to",
"marry",
"the",
"connecting",
"and",
"reading",
"functions",
"."
] | e5aae988fad90217f72db45f93bf69839f4d75e7 | https://github.com/dsoprea/PySchedules/blob/e5aae988fad90217f72db45f93bf69839f4d75e7/pyschedules/retrieve.py#L100-L105 | train |
tamasgal/km3pipe | km3pipe/utils/km3h5concat.py | km3h5concat | def km3h5concat(input_files, output_file, n_events=None, **kwargs):
"""Concatenate KM3HDF5 files via pipeline."""
from km3pipe import Pipeline # noqa
from km3pipe.io import HDF5Pump, HDF5Sink # noqa
pipe = Pipeline()
pipe.attach(HDF5Pump, filenames=input_files, **kwargs)
pipe.attach(StatusBar, every=250)
pipe.attach(HDF5Sink, filename=output_file, **kwargs)
pipe.drain(n_events) | python | def km3h5concat(input_files, output_file, n_events=None, **kwargs):
"""Concatenate KM3HDF5 files via pipeline."""
from km3pipe import Pipeline # noqa
from km3pipe.io import HDF5Pump, HDF5Sink # noqa
pipe = Pipeline()
pipe.attach(HDF5Pump, filenames=input_files, **kwargs)
pipe.attach(StatusBar, every=250)
pipe.attach(HDF5Sink, filename=output_file, **kwargs)
pipe.drain(n_events) | [
"def",
"km3h5concat",
"(",
"input_files",
",",
"output_file",
",",
"n_events",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"km3pipe",
"import",
"Pipeline",
"# noqa",
"from",
"km3pipe",
".",
"io",
"import",
"HDF5Pump",
",",
"HDF5Sink",
"# noqa",
"pipe",
"=",
"Pipeline",
"(",
")",
"pipe",
".",
"attach",
"(",
"HDF5Pump",
",",
"filenames",
"=",
"input_files",
",",
"*",
"*",
"kwargs",
")",
"pipe",
".",
"attach",
"(",
"StatusBar",
",",
"every",
"=",
"250",
")",
"pipe",
".",
"attach",
"(",
"HDF5Sink",
",",
"filename",
"=",
"output_file",
",",
"*",
"*",
"kwargs",
")",
"pipe",
".",
"drain",
"(",
"n_events",
")"
] | Concatenate KM3HDF5 files via pipeline. | [
"Concatenate",
"KM3HDF5",
"files",
"via",
"pipeline",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/utils/km3h5concat.py#L32-L41 | train |
tamasgal/km3pipe | km3pipe/utils/streamds.py | get_data | def get_data(stream, parameters, fmt):
"""Retrieve data for given stream and parameters, or None if not found"""
sds = kp.db.StreamDS()
if stream not in sds.streams:
log.error("Stream '{}' not found in the database.".format(stream))
return
params = {}
if parameters:
for parameter in parameters:
if '=' not in parameter:
log.error(
"Invalid parameter syntax '{}'\n"
"The correct syntax is 'parameter=value'".
format(parameter)
)
continue
key, value = parameter.split('=')
params[key] = value
data = sds.get(stream, fmt, **params)
if data is not None:
with pd.option_context('display.max_rows', None, 'display.max_columns',
None):
print(data)
else:
sds.help(stream) | python | def get_data(stream, parameters, fmt):
"""Retrieve data for given stream and parameters, or None if not found"""
sds = kp.db.StreamDS()
if stream not in sds.streams:
log.error("Stream '{}' not found in the database.".format(stream))
return
params = {}
if parameters:
for parameter in parameters:
if '=' not in parameter:
log.error(
"Invalid parameter syntax '{}'\n"
"The correct syntax is 'parameter=value'".
format(parameter)
)
continue
key, value = parameter.split('=')
params[key] = value
data = sds.get(stream, fmt, **params)
if data is not None:
with pd.option_context('display.max_rows', None, 'display.max_columns',
None):
print(data)
else:
sds.help(stream) | [
"def",
"get_data",
"(",
"stream",
",",
"parameters",
",",
"fmt",
")",
":",
"sds",
"=",
"kp",
".",
"db",
".",
"StreamDS",
"(",
")",
"if",
"stream",
"not",
"in",
"sds",
".",
"streams",
":",
"log",
".",
"error",
"(",
"\"Stream '{}' not found in the database.\"",
".",
"format",
"(",
"stream",
")",
")",
"return",
"params",
"=",
"{",
"}",
"if",
"parameters",
":",
"for",
"parameter",
"in",
"parameters",
":",
"if",
"'='",
"not",
"in",
"parameter",
":",
"log",
".",
"error",
"(",
"\"Invalid parameter syntax '{}'\\n\"",
"\"The correct syntax is 'parameter=value'\"",
".",
"format",
"(",
"parameter",
")",
")",
"continue",
"key",
",",
"value",
"=",
"parameter",
".",
"split",
"(",
"'='",
")",
"params",
"[",
"key",
"]",
"=",
"value",
"data",
"=",
"sds",
".",
"get",
"(",
"stream",
",",
"fmt",
",",
"*",
"*",
"params",
")",
"if",
"data",
"is",
"not",
"None",
":",
"with",
"pd",
".",
"option_context",
"(",
"'display.max_rows'",
",",
"None",
",",
"'display.max_columns'",
",",
"None",
")",
":",
"print",
"(",
"data",
")",
"else",
":",
"sds",
".",
"help",
"(",
"stream",
")"
] | Retrieve data for given stream and parameters, or None if not found | [
"Retrieve",
"data",
"for",
"given",
"stream",
"and",
"parameters",
"or",
"None",
"if",
"not",
"found"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/utils/streamds.py#L56-L80 | train |
tamasgal/km3pipe | km3pipe/utils/streamds.py | available_streams | def available_streams():
"""Show a short list of available streams."""
sds = kp.db.StreamDS()
print("Available streams: ")
print(', '.join(sorted(sds.streams))) | python | def available_streams():
"""Show a short list of available streams."""
sds = kp.db.StreamDS()
print("Available streams: ")
print(', '.join(sorted(sds.streams))) | [
"def",
"available_streams",
"(",
")",
":",
"sds",
"=",
"kp",
".",
"db",
".",
"StreamDS",
"(",
")",
"print",
"(",
"\"Available streams: \"",
")",
"print",
"(",
"', '",
".",
"join",
"(",
"sorted",
"(",
"sds",
".",
"streams",
")",
")",
")"
] | Show a short list of available streams. | [
"Show",
"a",
"short",
"list",
"of",
"available",
"streams",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/utils/streamds.py#L83-L87 | train |
tamasgal/km3pipe | km3pipe/utils/streamds.py | upload_runsummary | def upload_runsummary(csv_filename, dryrun=False):
"""Reads the CSV file and uploads its contents to the runsummary table"""
print("Checking '{}' for consistency.".format(csv_filename))
if not os.path.exists(csv_filename):
log.critical("{} -> file not found.".format(csv_filename))
return
try:
df = pd.read_csv(csv_filename, sep='\t')
except pd.errors.EmptyDataError as e:
log.error(e)
return
cols = set(df.columns)
if not REQUIRED_COLUMNS.issubset(cols):
log.error(
"Missing columns: {}.".format(
', '.join(str(c) for c in REQUIRED_COLUMNS - cols)
)
)
return
parameters = cols - REQUIRED_COLUMNS
if len(parameters) < 1:
log.error("No parameter columns found.")
return
if len(df) == 0:
log.critical("Empty dataset.")
return
print(
"Found data for parameters: {}.".format(
', '.join(str(c) for c in parameters)
)
)
print("Converting CSV data into JSON")
if dryrun:
log.warn("Dryrun: adding 'TEST_' prefix to parameter names")
prefix = "TEST_"
else:
prefix = ""
data = convert_runsummary_to_json(df, prefix=prefix)
print("We have {:.3f} MB to upload.".format(len(data) / 1024**2))
print("Requesting database session.")
db = kp.db.DBManager() # noqa
if kp.db.we_are_in_lyon():
session_cookie = "sid=_kmcprod_134.158_lyo7783844001343100343mcprod1223user" # noqa
else:
session_cookie = kp.config.Config().get('DB', 'session_cookie')
if session_cookie is None:
raise SystemExit("Could not restore DB session.")
log.debug("Using the session cookie: {}".format(session_cookie))
cookie_key, sid = session_cookie.split('=')
print("Uploading the data to the database.")
r = requests.post(
RUNSUMMARY_URL, cookies={cookie_key: sid}, files={'datafile': data}
)
if r.status_code == 200:
log.debug("POST request status code: {}".format(r.status_code))
print("Database response:")
db_answer = json.loads(r.text)
for key, value in db_answer.items():
print(" -> {}: {}".format(key, value))
if db_answer['Result'] == 'OK':
print("Upload successful.")
else:
log.critical("Something went wrong.")
else:
log.error("POST request status code: {}".format(r.status_code))
log.critical("Something went wrong...")
return | python | def upload_runsummary(csv_filename, dryrun=False):
"""Reads the CSV file and uploads its contents to the runsummary table"""
print("Checking '{}' for consistency.".format(csv_filename))
if not os.path.exists(csv_filename):
log.critical("{} -> file not found.".format(csv_filename))
return
try:
df = pd.read_csv(csv_filename, sep='\t')
except pd.errors.EmptyDataError as e:
log.error(e)
return
cols = set(df.columns)
if not REQUIRED_COLUMNS.issubset(cols):
log.error(
"Missing columns: {}.".format(
', '.join(str(c) for c in REQUIRED_COLUMNS - cols)
)
)
return
parameters = cols - REQUIRED_COLUMNS
if len(parameters) < 1:
log.error("No parameter columns found.")
return
if len(df) == 0:
log.critical("Empty dataset.")
return
print(
"Found data for parameters: {}.".format(
', '.join(str(c) for c in parameters)
)
)
print("Converting CSV data into JSON")
if dryrun:
log.warn("Dryrun: adding 'TEST_' prefix to parameter names")
prefix = "TEST_"
else:
prefix = ""
data = convert_runsummary_to_json(df, prefix=prefix)
print("We have {:.3f} MB to upload.".format(len(data) / 1024**2))
print("Requesting database session.")
db = kp.db.DBManager() # noqa
if kp.db.we_are_in_lyon():
session_cookie = "sid=_kmcprod_134.158_lyo7783844001343100343mcprod1223user" # noqa
else:
session_cookie = kp.config.Config().get('DB', 'session_cookie')
if session_cookie is None:
raise SystemExit("Could not restore DB session.")
log.debug("Using the session cookie: {}".format(session_cookie))
cookie_key, sid = session_cookie.split('=')
print("Uploading the data to the database.")
r = requests.post(
RUNSUMMARY_URL, cookies={cookie_key: sid}, files={'datafile': data}
)
if r.status_code == 200:
log.debug("POST request status code: {}".format(r.status_code))
print("Database response:")
db_answer = json.loads(r.text)
for key, value in db_answer.items():
print(" -> {}: {}".format(key, value))
if db_answer['Result'] == 'OK':
print("Upload successful.")
else:
log.critical("Something went wrong.")
else:
log.error("POST request status code: {}".format(r.status_code))
log.critical("Something went wrong...")
return | [
"def",
"upload_runsummary",
"(",
"csv_filename",
",",
"dryrun",
"=",
"False",
")",
":",
"print",
"(",
"\"Checking '{}' for consistency.\"",
".",
"format",
"(",
"csv_filename",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"csv_filename",
")",
":",
"log",
".",
"critical",
"(",
"\"{} -> file not found.\"",
".",
"format",
"(",
"csv_filename",
")",
")",
"return",
"try",
":",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"csv_filename",
",",
"sep",
"=",
"'\\t'",
")",
"except",
"pd",
".",
"errors",
".",
"EmptyDataError",
"as",
"e",
":",
"log",
".",
"error",
"(",
"e",
")",
"return",
"cols",
"=",
"set",
"(",
"df",
".",
"columns",
")",
"if",
"not",
"REQUIRED_COLUMNS",
".",
"issubset",
"(",
"cols",
")",
":",
"log",
".",
"error",
"(",
"\"Missing columns: {}.\"",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"str",
"(",
"c",
")",
"for",
"c",
"in",
"REQUIRED_COLUMNS",
"-",
"cols",
")",
")",
")",
"return",
"parameters",
"=",
"cols",
"-",
"REQUIRED_COLUMNS",
"if",
"len",
"(",
"parameters",
")",
"<",
"1",
":",
"log",
".",
"error",
"(",
"\"No parameter columns found.\"",
")",
"return",
"if",
"len",
"(",
"df",
")",
"==",
"0",
":",
"log",
".",
"critical",
"(",
"\"Empty dataset.\"",
")",
"return",
"print",
"(",
"\"Found data for parameters: {}.\"",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"str",
"(",
"c",
")",
"for",
"c",
"in",
"parameters",
")",
")",
")",
"print",
"(",
"\"Converting CSV data into JSON\"",
")",
"if",
"dryrun",
":",
"log",
".",
"warn",
"(",
"\"Dryrun: adding 'TEST_' prefix to parameter names\"",
")",
"prefix",
"=",
"\"TEST_\"",
"else",
":",
"prefix",
"=",
"\"\"",
"data",
"=",
"convert_runsummary_to_json",
"(",
"df",
",",
"prefix",
"=",
"prefix",
")",
"print",
"(",
"\"We have {:.3f} MB to upload.\"",
".",
"format",
"(",
"len",
"(",
"data",
")",
"/",
"1024",
"**",
"2",
")",
")",
"print",
"(",
"\"Requesting database session.\"",
")",
"db",
"=",
"kp",
".",
"db",
".",
"DBManager",
"(",
")",
"# noqa",
"if",
"kp",
".",
"db",
".",
"we_are_in_lyon",
"(",
")",
":",
"session_cookie",
"=",
"\"sid=_kmcprod_134.158_lyo7783844001343100343mcprod1223user\"",
"# noqa",
"else",
":",
"session_cookie",
"=",
"kp",
".",
"config",
".",
"Config",
"(",
")",
".",
"get",
"(",
"'DB'",
",",
"'session_cookie'",
")",
"if",
"session_cookie",
"is",
"None",
":",
"raise",
"SystemExit",
"(",
"\"Could not restore DB session.\"",
")",
"log",
".",
"debug",
"(",
"\"Using the session cookie: {}\"",
".",
"format",
"(",
"session_cookie",
")",
")",
"cookie_key",
",",
"sid",
"=",
"session_cookie",
".",
"split",
"(",
"'='",
")",
"print",
"(",
"\"Uploading the data to the database.\"",
")",
"r",
"=",
"requests",
".",
"post",
"(",
"RUNSUMMARY_URL",
",",
"cookies",
"=",
"{",
"cookie_key",
":",
"sid",
"}",
",",
"files",
"=",
"{",
"'datafile'",
":",
"data",
"}",
")",
"if",
"r",
".",
"status_code",
"==",
"200",
":",
"log",
".",
"debug",
"(",
"\"POST request status code: {}\"",
".",
"format",
"(",
"r",
".",
"status_code",
")",
")",
"print",
"(",
"\"Database response:\"",
")",
"db_answer",
"=",
"json",
".",
"loads",
"(",
"r",
".",
"text",
")",
"for",
"key",
",",
"value",
"in",
"db_answer",
".",
"items",
"(",
")",
":",
"print",
"(",
"\" -> {}: {}\"",
".",
"format",
"(",
"key",
",",
"value",
")",
")",
"if",
"db_answer",
"[",
"'Result'",
"]",
"==",
"'OK'",
":",
"print",
"(",
"\"Upload successful.\"",
")",
"else",
":",
"log",
".",
"critical",
"(",
"\"Something went wrong.\"",
")",
"else",
":",
"log",
".",
"error",
"(",
"\"POST request status code: {}\"",
".",
"format",
"(",
"r",
".",
"status_code",
")",
")",
"log",
".",
"critical",
"(",
"\"Something went wrong...\"",
")",
"return"
] | Reads the CSV file and uploads its contents to the runsummary table | [
"Reads",
"the",
"CSV",
"file",
"and",
"uploads",
"its",
"contents",
"to",
"the",
"runsummary",
"table"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/utils/streamds.py#L90-L162 | train |
tamasgal/km3pipe | km3pipe/utils/streamds.py | convert_runsummary_to_json | def convert_runsummary_to_json(
df, comment='Uploaded via km3pipe.StreamDS', prefix='TEST_'
):
"""Convert a Pandas DataFrame with runsummary to JSON for DB upload"""
data_field = []
comment += ", by {}".format(getpass.getuser())
for det_id, det_data in df.groupby('det_id'):
runs_field = []
data_field.append({"DetectorId": det_id, "Runs": runs_field})
for run, run_data in det_data.groupby('run'):
parameters_field = []
runs_field.append({
"Run": int(run),
"Parameters": parameters_field
})
parameter_dict = {}
for row in run_data.itertuples():
for parameter_name in run_data.columns:
if parameter_name in REQUIRED_COLUMNS:
continue
if parameter_name not in parameter_dict:
entry = {'Name': prefix + parameter_name, 'Data': []}
parameter_dict[parameter_name] = entry
data_value = getattr(row, parameter_name)
try:
data_value = float(data_value)
except ValueError as e:
log.critical("Data values has to be floats!")
raise ValueError(e)
value = {'S': str(getattr(row, 'source')), 'D': data_value}
parameter_dict[parameter_name]['Data'].append(value)
for parameter_data in parameter_dict.values():
parameters_field.append(parameter_data)
data_to_upload = {"Comment": comment, "Data": data_field}
file_data_to_upload = json.dumps(data_to_upload)
return file_data_to_upload | python | def convert_runsummary_to_json(
df, comment='Uploaded via km3pipe.StreamDS', prefix='TEST_'
):
"""Convert a Pandas DataFrame with runsummary to JSON for DB upload"""
data_field = []
comment += ", by {}".format(getpass.getuser())
for det_id, det_data in df.groupby('det_id'):
runs_field = []
data_field.append({"DetectorId": det_id, "Runs": runs_field})
for run, run_data in det_data.groupby('run'):
parameters_field = []
runs_field.append({
"Run": int(run),
"Parameters": parameters_field
})
parameter_dict = {}
for row in run_data.itertuples():
for parameter_name in run_data.columns:
if parameter_name in REQUIRED_COLUMNS:
continue
if parameter_name not in parameter_dict:
entry = {'Name': prefix + parameter_name, 'Data': []}
parameter_dict[parameter_name] = entry
data_value = getattr(row, parameter_name)
try:
data_value = float(data_value)
except ValueError as e:
log.critical("Data values has to be floats!")
raise ValueError(e)
value = {'S': str(getattr(row, 'source')), 'D': data_value}
parameter_dict[parameter_name]['Data'].append(value)
for parameter_data in parameter_dict.values():
parameters_field.append(parameter_data)
data_to_upload = {"Comment": comment, "Data": data_field}
file_data_to_upload = json.dumps(data_to_upload)
return file_data_to_upload | [
"def",
"convert_runsummary_to_json",
"(",
"df",
",",
"comment",
"=",
"'Uploaded via km3pipe.StreamDS'",
",",
"prefix",
"=",
"'TEST_'",
")",
":",
"data_field",
"=",
"[",
"]",
"comment",
"+=",
"\", by {}\"",
".",
"format",
"(",
"getpass",
".",
"getuser",
"(",
")",
")",
"for",
"det_id",
",",
"det_data",
"in",
"df",
".",
"groupby",
"(",
"'det_id'",
")",
":",
"runs_field",
"=",
"[",
"]",
"data_field",
".",
"append",
"(",
"{",
"\"DetectorId\"",
":",
"det_id",
",",
"\"Runs\"",
":",
"runs_field",
"}",
")",
"for",
"run",
",",
"run_data",
"in",
"det_data",
".",
"groupby",
"(",
"'run'",
")",
":",
"parameters_field",
"=",
"[",
"]",
"runs_field",
".",
"append",
"(",
"{",
"\"Run\"",
":",
"int",
"(",
"run",
")",
",",
"\"Parameters\"",
":",
"parameters_field",
"}",
")",
"parameter_dict",
"=",
"{",
"}",
"for",
"row",
"in",
"run_data",
".",
"itertuples",
"(",
")",
":",
"for",
"parameter_name",
"in",
"run_data",
".",
"columns",
":",
"if",
"parameter_name",
"in",
"REQUIRED_COLUMNS",
":",
"continue",
"if",
"parameter_name",
"not",
"in",
"parameter_dict",
":",
"entry",
"=",
"{",
"'Name'",
":",
"prefix",
"+",
"parameter_name",
",",
"'Data'",
":",
"[",
"]",
"}",
"parameter_dict",
"[",
"parameter_name",
"]",
"=",
"entry",
"data_value",
"=",
"getattr",
"(",
"row",
",",
"parameter_name",
")",
"try",
":",
"data_value",
"=",
"float",
"(",
"data_value",
")",
"except",
"ValueError",
"as",
"e",
":",
"log",
".",
"critical",
"(",
"\"Data values has to be floats!\"",
")",
"raise",
"ValueError",
"(",
"e",
")",
"value",
"=",
"{",
"'S'",
":",
"str",
"(",
"getattr",
"(",
"row",
",",
"'source'",
")",
")",
",",
"'D'",
":",
"data_value",
"}",
"parameter_dict",
"[",
"parameter_name",
"]",
"[",
"'Data'",
"]",
".",
"append",
"(",
"value",
")",
"for",
"parameter_data",
"in",
"parameter_dict",
".",
"values",
"(",
")",
":",
"parameters_field",
".",
"append",
"(",
"parameter_data",
")",
"data_to_upload",
"=",
"{",
"\"Comment\"",
":",
"comment",
",",
"\"Data\"",
":",
"data_field",
"}",
"file_data_to_upload",
"=",
"json",
".",
"dumps",
"(",
"data_to_upload",
")",
"return",
"file_data_to_upload"
] | Convert a Pandas DataFrame with runsummary to JSON for DB upload | [
"Convert",
"a",
"Pandas",
"DataFrame",
"with",
"runsummary",
"to",
"JSON",
"for",
"DB",
"upload"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/utils/streamds.py#L165-L203 | train |
PrefPy/prefpy | prefpy/mechanismMcmcSampleGenerator.py | MechanismMcmcSampleGeneratorMallows.calcAcceptanceRatio | def calcAcceptanceRatio(self, V, W):
"""
Given a order vector V and a proposed order vector W, calculate the acceptance ratio for
changing to W when using MCMC.
ivar: dict<int,<dict,<int,int>>> wmg: A two-dimensional dictionary that associates integer
representations of each pair of candidates, cand1 and cand2, with the number of times
cand1 is ranked above cand2 minus the number of times cand2 is ranked above cand1. The
dictionary represents a weighted majority graph for an election.
:ivar float phi: A value for phi such that 0 <= phi <= 1.
:ivar list<int> V: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last. This is the current sample.
:ivar list<int> W: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last. This is the proposed sample.
"""
acceptanceRatio = 1.0
for comb in itertools.combinations(V, 2):
#Check if comb[0] is ranked before comb[1] in V and W
vIOverJ = 1
wIOverJ = 1
if V.index(comb[0]) > V.index(comb[1]):
vIOverJ = 0
if W.index(comb[0]) > W.index(comb[1]):
wIOverJ = 0
acceptanceRatio = acceptanceRatio * self.phi**(self.wmg[comb[0]][comb[1]]*(vIOverJ-wIOverJ))
return acceptanceRatio | python | def calcAcceptanceRatio(self, V, W):
"""
Given a order vector V and a proposed order vector W, calculate the acceptance ratio for
changing to W when using MCMC.
ivar: dict<int,<dict,<int,int>>> wmg: A two-dimensional dictionary that associates integer
representations of each pair of candidates, cand1 and cand2, with the number of times
cand1 is ranked above cand2 minus the number of times cand2 is ranked above cand1. The
dictionary represents a weighted majority graph for an election.
:ivar float phi: A value for phi such that 0 <= phi <= 1.
:ivar list<int> V: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last. This is the current sample.
:ivar list<int> W: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last. This is the proposed sample.
"""
acceptanceRatio = 1.0
for comb in itertools.combinations(V, 2):
#Check if comb[0] is ranked before comb[1] in V and W
vIOverJ = 1
wIOverJ = 1
if V.index(comb[0]) > V.index(comb[1]):
vIOverJ = 0
if W.index(comb[0]) > W.index(comb[1]):
wIOverJ = 0
acceptanceRatio = acceptanceRatio * self.phi**(self.wmg[comb[0]][comb[1]]*(vIOverJ-wIOverJ))
return acceptanceRatio | [
"def",
"calcAcceptanceRatio",
"(",
"self",
",",
"V",
",",
"W",
")",
":",
"acceptanceRatio",
"=",
"1.0",
"for",
"comb",
"in",
"itertools",
".",
"combinations",
"(",
"V",
",",
"2",
")",
":",
"#Check if comb[0] is ranked before comb[1] in V and W",
"vIOverJ",
"=",
"1",
"wIOverJ",
"=",
"1",
"if",
"V",
".",
"index",
"(",
"comb",
"[",
"0",
"]",
")",
">",
"V",
".",
"index",
"(",
"comb",
"[",
"1",
"]",
")",
":",
"vIOverJ",
"=",
"0",
"if",
"W",
".",
"index",
"(",
"comb",
"[",
"0",
"]",
")",
">",
"W",
".",
"index",
"(",
"comb",
"[",
"1",
"]",
")",
":",
"wIOverJ",
"=",
"0",
"acceptanceRatio",
"=",
"acceptanceRatio",
"*",
"self",
".",
"phi",
"**",
"(",
"self",
".",
"wmg",
"[",
"comb",
"[",
"0",
"]",
"]",
"[",
"comb",
"[",
"1",
"]",
"]",
"*",
"(",
"vIOverJ",
"-",
"wIOverJ",
")",
")",
"return",
"acceptanceRatio"
] | Given a order vector V and a proposed order vector W, calculate the acceptance ratio for
changing to W when using MCMC.
ivar: dict<int,<dict,<int,int>>> wmg: A two-dimensional dictionary that associates integer
representations of each pair of candidates, cand1 and cand2, with the number of times
cand1 is ranked above cand2 minus the number of times cand2 is ranked above cand1. The
dictionary represents a weighted majority graph for an election.
:ivar float phi: A value for phi such that 0 <= phi <= 1.
:ivar list<int> V: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last. This is the current sample.
:ivar list<int> W: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last. This is the proposed sample. | [
"Given",
"a",
"order",
"vector",
"V",
"and",
"a",
"proposed",
"order",
"vector",
"W",
"calculate",
"the",
"acceptance",
"ratio",
"for",
"changing",
"to",
"W",
"when",
"using",
"MCMC",
"."
] | f395ba3782f05684fa5de0cece387a6da9391d02 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmcSampleGenerator.py#L34-L62 | train |
PrefPy/prefpy | prefpy/mechanismMcmcSampleGenerator.py | MechanismMcmcSampleGeneratorMallowsAdjacentPairwiseFlip.getNextSample | def getNextSample(self, V):
"""
Generate the next sample by randomly flipping two adjacent candidates.
:ivar list<int> V: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last. This is the current sample.
"""
# Select a random alternative in V to switch with its adacent alternatives.
randPos = random.randint(0, len(V)-2)
W = copy.deepcopy(V)
d = V[randPos]
c = V[randPos+1]
W[randPos] = c
W[randPos+1] = d
# Check whether we should change to the new ranking.
prMW = 1
prMV = 1
prob = min(1.0,(prMW/prMV)*pow(self.phi, self.wmg[d][c]))/2
if random.random() <= prob:
V = W
return V | python | def getNextSample(self, V):
"""
Generate the next sample by randomly flipping two adjacent candidates.
:ivar list<int> V: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last. This is the current sample.
"""
# Select a random alternative in V to switch with its adacent alternatives.
randPos = random.randint(0, len(V)-2)
W = copy.deepcopy(V)
d = V[randPos]
c = V[randPos+1]
W[randPos] = c
W[randPos+1] = d
# Check whether we should change to the new ranking.
prMW = 1
prMV = 1
prob = min(1.0,(prMW/prMV)*pow(self.phi, self.wmg[d][c]))/2
if random.random() <= prob:
V = W
return V | [
"def",
"getNextSample",
"(",
"self",
",",
"V",
")",
":",
"# Select a random alternative in V to switch with its adacent alternatives.",
"randPos",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"len",
"(",
"V",
")",
"-",
"2",
")",
"W",
"=",
"copy",
".",
"deepcopy",
"(",
"V",
")",
"d",
"=",
"V",
"[",
"randPos",
"]",
"c",
"=",
"V",
"[",
"randPos",
"+",
"1",
"]",
"W",
"[",
"randPos",
"]",
"=",
"c",
"W",
"[",
"randPos",
"+",
"1",
"]",
"=",
"d",
"# Check whether we should change to the new ranking.",
"prMW",
"=",
"1",
"prMV",
"=",
"1",
"prob",
"=",
"min",
"(",
"1.0",
",",
"(",
"prMW",
"/",
"prMV",
")",
"*",
"pow",
"(",
"self",
".",
"phi",
",",
"self",
".",
"wmg",
"[",
"d",
"]",
"[",
"c",
"]",
")",
")",
"/",
"2",
"if",
"random",
".",
"random",
"(",
")",
"<=",
"prob",
":",
"V",
"=",
"W",
"return",
"V"
] | Generate the next sample by randomly flipping two adjacent candidates.
:ivar list<int> V: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last. This is the current sample. | [
"Generate",
"the",
"next",
"sample",
"by",
"randomly",
"flipping",
"two",
"adjacent",
"candidates",
"."
] | f395ba3782f05684fa5de0cece387a6da9391d02 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmcSampleGenerator.py#L66-L89 | train |
PrefPy/prefpy | prefpy/mechanismMcmcSampleGenerator.py | MechanismMcmcSampleGeneratorMallowsRandShuffle.getNextSample | def getNextSample(self, V):
"""
Generate the next sample by randomly shuffling candidates.
:ivar list<int> V: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last. This is the current sample.
"""
positions = range(0, len(self.wmg))
randPoss = random.sample(positions, self.shuffleSize)
flipSet = copy.deepcopy(randPoss)
randPoss.sort()
W = copy.deepcopy(V)
for j in range(0, self.shuffleSize):
W[randPoss[j]] = V[flipSet[j]]
# Check whether we should change to the new ranking.
prMW = 1.0
prMV = 1.0
acceptanceRatio = self.calcAcceptanceRatio(V, W)
prob = min(1.0,(prMW/prMV)*acceptanceRatio)
if random.random() <= prob:
V = W
return V | python | def getNextSample(self, V):
"""
Generate the next sample by randomly shuffling candidates.
:ivar list<int> V: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last. This is the current sample.
"""
positions = range(0, len(self.wmg))
randPoss = random.sample(positions, self.shuffleSize)
flipSet = copy.deepcopy(randPoss)
randPoss.sort()
W = copy.deepcopy(V)
for j in range(0, self.shuffleSize):
W[randPoss[j]] = V[flipSet[j]]
# Check whether we should change to the new ranking.
prMW = 1.0
prMV = 1.0
acceptanceRatio = self.calcAcceptanceRatio(V, W)
prob = min(1.0,(prMW/prMV)*acceptanceRatio)
if random.random() <= prob:
V = W
return V | [
"def",
"getNextSample",
"(",
"self",
",",
"V",
")",
":",
"positions",
"=",
"range",
"(",
"0",
",",
"len",
"(",
"self",
".",
"wmg",
")",
")",
"randPoss",
"=",
"random",
".",
"sample",
"(",
"positions",
",",
"self",
".",
"shuffleSize",
")",
"flipSet",
"=",
"copy",
".",
"deepcopy",
"(",
"randPoss",
")",
"randPoss",
".",
"sort",
"(",
")",
"W",
"=",
"copy",
".",
"deepcopy",
"(",
"V",
")",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"self",
".",
"shuffleSize",
")",
":",
"W",
"[",
"randPoss",
"[",
"j",
"]",
"]",
"=",
"V",
"[",
"flipSet",
"[",
"j",
"]",
"]",
"# Check whether we should change to the new ranking.",
"prMW",
"=",
"1.0",
"prMV",
"=",
"1.0",
"acceptanceRatio",
"=",
"self",
".",
"calcAcceptanceRatio",
"(",
"V",
",",
"W",
")",
"prob",
"=",
"min",
"(",
"1.0",
",",
"(",
"prMW",
"/",
"prMV",
")",
"*",
"acceptanceRatio",
")",
"if",
"random",
".",
"random",
"(",
")",
"<=",
"prob",
":",
"V",
"=",
"W",
"return",
"V"
] | Generate the next sample by randomly shuffling candidates.
:ivar list<int> V: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last. This is the current sample. | [
"Generate",
"the",
"next",
"sample",
"by",
"randomly",
"shuffling",
"candidates",
"."
] | f395ba3782f05684fa5de0cece387a6da9391d02 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmcSampleGenerator.py#L98-L121 | train |
PrefPy/prefpy | prefpy/mechanismMcmcSampleGenerator.py | MechanismMcmcSampleGeneratorMallowsJumpingDistribution.getNextSample | def getNextSample(self, V):
"""
We generate a new ranking based on a Mallows-based jumping distribution. The algorithm is
described in "Bayesian Ordinal Peer Grading" by Raman and Joachims.
:ivar list<int> V: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last.
"""
phi = self.phi
wmg = self.wmg
W = []
W.append(V[0])
for j in range(2, len(V)+1):
randomSelect = random.random()
threshold = 0.0
denom = 1.0
for k in range(1, j):
denom = denom + phi**k
for k in range(1, j+1):
numerator = phi**(j - k)
threshold = threshold + numerator/denom
if randomSelect <= threshold:
W.insert(k-1,V[j-1])
break
# Check whether we should change to the new ranking.
acceptanceRatio = self.calcAcceptanceRatio(V, W)
prob = min(1.0,acceptanceRatio)
if random.random() <= prob:
V = W
return V | python | def getNextSample(self, V):
"""
We generate a new ranking based on a Mallows-based jumping distribution. The algorithm is
described in "Bayesian Ordinal Peer Grading" by Raman and Joachims.
:ivar list<int> V: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last.
"""
phi = self.phi
wmg = self.wmg
W = []
W.append(V[0])
for j in range(2, len(V)+1):
randomSelect = random.random()
threshold = 0.0
denom = 1.0
for k in range(1, j):
denom = denom + phi**k
for k in range(1, j+1):
numerator = phi**(j - k)
threshold = threshold + numerator/denom
if randomSelect <= threshold:
W.insert(k-1,V[j-1])
break
# Check whether we should change to the new ranking.
acceptanceRatio = self.calcAcceptanceRatio(V, W)
prob = min(1.0,acceptanceRatio)
if random.random() <= prob:
V = W
return V | [
"def",
"getNextSample",
"(",
"self",
",",
"V",
")",
":",
"phi",
"=",
"self",
".",
"phi",
"wmg",
"=",
"self",
".",
"wmg",
"W",
"=",
"[",
"]",
"W",
".",
"append",
"(",
"V",
"[",
"0",
"]",
")",
"for",
"j",
"in",
"range",
"(",
"2",
",",
"len",
"(",
"V",
")",
"+",
"1",
")",
":",
"randomSelect",
"=",
"random",
".",
"random",
"(",
")",
"threshold",
"=",
"0.0",
"denom",
"=",
"1.0",
"for",
"k",
"in",
"range",
"(",
"1",
",",
"j",
")",
":",
"denom",
"=",
"denom",
"+",
"phi",
"**",
"k",
"for",
"k",
"in",
"range",
"(",
"1",
",",
"j",
"+",
"1",
")",
":",
"numerator",
"=",
"phi",
"**",
"(",
"j",
"-",
"k",
")",
"threshold",
"=",
"threshold",
"+",
"numerator",
"/",
"denom",
"if",
"randomSelect",
"<=",
"threshold",
":",
"W",
".",
"insert",
"(",
"k",
"-",
"1",
",",
"V",
"[",
"j",
"-",
"1",
"]",
")",
"break",
"# Check whether we should change to the new ranking.",
"acceptanceRatio",
"=",
"self",
".",
"calcAcceptanceRatio",
"(",
"V",
",",
"W",
")",
"prob",
"=",
"min",
"(",
"1.0",
",",
"acceptanceRatio",
")",
"if",
"random",
".",
"random",
"(",
")",
"<=",
"prob",
":",
"V",
"=",
"W",
"return",
"V"
] | We generate a new ranking based on a Mallows-based jumping distribution. The algorithm is
described in "Bayesian Ordinal Peer Grading" by Raman and Joachims.
:ivar list<int> V: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last. | [
"We",
"generate",
"a",
"new",
"ranking",
"based",
"on",
"a",
"Mallows",
"-",
"based",
"jumping",
"distribution",
".",
"The",
"algorithm",
"is",
"described",
"in",
"Bayesian",
"Ordinal",
"Peer",
"Grading",
"by",
"Raman",
"and",
"Joachims",
"."
] | f395ba3782f05684fa5de0cece387a6da9391d02 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmcSampleGenerator.py#L125-L157 | train |
PrefPy/prefpy | prefpy/mechanismMcmcSampleGenerator.py | MechanismMcmcSampleGeneratorMallowsPlakettLuce.getNextSample | def getNextSample(self, V):
"""
Given a ranking over the candidates, generate a new ranking by assigning each candidate at
position i a Plakett-Luce weight of phi^i and draw a new ranking.
:ivar list<int> V: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last.
"""
W, WProb = self.drawRankingPlakettLuce(V)
VProb = self.calcProbOfVFromW(V, W)
acceptanceRatio = self.calcAcceptanceRatio(V, W)
prob = min(1.0, acceptanceRatio * (VProb/WProb))
if random.random() <= prob:
V = W
return V | python | def getNextSample(self, V):
"""
Given a ranking over the candidates, generate a new ranking by assigning each candidate at
position i a Plakett-Luce weight of phi^i and draw a new ranking.
:ivar list<int> V: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last.
"""
W, WProb = self.drawRankingPlakettLuce(V)
VProb = self.calcProbOfVFromW(V, W)
acceptanceRatio = self.calcAcceptanceRatio(V, W)
prob = min(1.0, acceptanceRatio * (VProb/WProb))
if random.random() <= prob:
V = W
return V | [
"def",
"getNextSample",
"(",
"self",
",",
"V",
")",
":",
"W",
",",
"WProb",
"=",
"self",
".",
"drawRankingPlakettLuce",
"(",
"V",
")",
"VProb",
"=",
"self",
".",
"calcProbOfVFromW",
"(",
"V",
",",
"W",
")",
"acceptanceRatio",
"=",
"self",
".",
"calcAcceptanceRatio",
"(",
"V",
",",
"W",
")",
"prob",
"=",
"min",
"(",
"1.0",
",",
"acceptanceRatio",
"*",
"(",
"VProb",
"/",
"WProb",
")",
")",
"if",
"random",
".",
"random",
"(",
")",
"<=",
"prob",
":",
"V",
"=",
"W",
"return",
"V"
] | Given a ranking over the candidates, generate a new ranking by assigning each candidate at
position i a Plakett-Luce weight of phi^i and draw a new ranking.
:ivar list<int> V: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last. | [
"Given",
"a",
"ranking",
"over",
"the",
"candidates",
"generate",
"a",
"new",
"ranking",
"by",
"assigning",
"each",
"candidate",
"at",
"position",
"i",
"a",
"Plakett",
"-",
"Luce",
"weight",
"of",
"phi^i",
"and",
"draw",
"a",
"new",
"ranking",
"."
] | f395ba3782f05684fa5de0cece387a6da9391d02 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmcSampleGenerator.py#L191-L206 | train |
PrefPy/prefpy | prefpy/mechanismMcmcSampleGenerator.py | MechanismMcmcSampleGeneratorMallowsPlakettLuce.calcDrawingProbs | def calcDrawingProbs(self):
"""
Returns a vector that contains the probabily of an item being from each position. We say
that every item in a order vector is drawn with weight phi^i where i is its position.
"""
wmg = self.wmg
phi = self.phi
# We say the weight of the candidate in position i is phi^i.
weights = []
for i in range(0, len(wmg.keys())):
weights.append(phi**i)
# Calculate the probabilty that an item at each weight is drawn.
totalWeight = sum(weights)
for i in range(0, len(wmg.keys())):
weights[i] = weights[i]/totalWeight
return weights | python | def calcDrawingProbs(self):
"""
Returns a vector that contains the probabily of an item being from each position. We say
that every item in a order vector is drawn with weight phi^i where i is its position.
"""
wmg = self.wmg
phi = self.phi
# We say the weight of the candidate in position i is phi^i.
weights = []
for i in range(0, len(wmg.keys())):
weights.append(phi**i)
# Calculate the probabilty that an item at each weight is drawn.
totalWeight = sum(weights)
for i in range(0, len(wmg.keys())):
weights[i] = weights[i]/totalWeight
return weights | [
"def",
"calcDrawingProbs",
"(",
"self",
")",
":",
"wmg",
"=",
"self",
".",
"wmg",
"phi",
"=",
"self",
".",
"phi",
"# We say the weight of the candidate in position i is phi^i.",
"weights",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"wmg",
".",
"keys",
"(",
")",
")",
")",
":",
"weights",
".",
"append",
"(",
"phi",
"**",
"i",
")",
"# Calculate the probabilty that an item at each weight is drawn.",
"totalWeight",
"=",
"sum",
"(",
"weights",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"wmg",
".",
"keys",
"(",
")",
")",
")",
":",
"weights",
"[",
"i",
"]",
"=",
"weights",
"[",
"i",
"]",
"/",
"totalWeight",
"return",
"weights"
] | Returns a vector that contains the probabily of an item being from each position. We say
that every item in a order vector is drawn with weight phi^i where i is its position. | [
"Returns",
"a",
"vector",
"that",
"contains",
"the",
"probabily",
"of",
"an",
"item",
"being",
"from",
"each",
"position",
".",
"We",
"say",
"that",
"every",
"item",
"in",
"a",
"order",
"vector",
"is",
"drawn",
"with",
"weight",
"phi^i",
"where",
"i",
"is",
"its",
"position",
"."
] | f395ba3782f05684fa5de0cece387a6da9391d02 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmcSampleGenerator.py#L208-L227 | train |
PrefPy/prefpy | prefpy/mechanismMcmcSampleGenerator.py | MechanismMcmcSampleGeneratorMallowsPlakettLuce.drawRankingPlakettLuce | def drawRankingPlakettLuce(self, rankList):
"""
Given an order vector over the candidates, draw candidates to generate a new order vector.
:ivar list<int> rankList: Contains integer representations of each candidate in order of their
rank in a vote, from first to last.
"""
probs = self.plakettLuceProbs
numCands = len(rankList)
newRanking = []
remainingCands = copy.deepcopy(rankList)
probsCopy = copy.deepcopy(self.plakettLuceProbs)
totalProb = sum(probs)
# We will use prob to iteratively calculate the probabilty that we draw the order vector
# that we end up drawing.
prob = 1.0
while (len(newRanking) < numCands):
# We generate a random number from 0 to 1, and use it to select a candidate.
rand = random.random()
threshold = 0.0
for i in range(0, len(probsCopy)):
threshold = threshold + probsCopy[i]/totalProb
if rand <= threshold:
prob = prob * probsCopy[i]/totalProb
newRanking.append(remainingCands[i])
remainingCands.pop(i)
totalProb = totalProb - probsCopy[i]
probsCopy.pop(i)
break
return newRanking, prob | python | def drawRankingPlakettLuce(self, rankList):
"""
Given an order vector over the candidates, draw candidates to generate a new order vector.
:ivar list<int> rankList: Contains integer representations of each candidate in order of their
rank in a vote, from first to last.
"""
probs = self.plakettLuceProbs
numCands = len(rankList)
newRanking = []
remainingCands = copy.deepcopy(rankList)
probsCopy = copy.deepcopy(self.plakettLuceProbs)
totalProb = sum(probs)
# We will use prob to iteratively calculate the probabilty that we draw the order vector
# that we end up drawing.
prob = 1.0
while (len(newRanking) < numCands):
# We generate a random number from 0 to 1, and use it to select a candidate.
rand = random.random()
threshold = 0.0
for i in range(0, len(probsCopy)):
threshold = threshold + probsCopy[i]/totalProb
if rand <= threshold:
prob = prob * probsCopy[i]/totalProb
newRanking.append(remainingCands[i])
remainingCands.pop(i)
totalProb = totalProb - probsCopy[i]
probsCopy.pop(i)
break
return newRanking, prob | [
"def",
"drawRankingPlakettLuce",
"(",
"self",
",",
"rankList",
")",
":",
"probs",
"=",
"self",
".",
"plakettLuceProbs",
"numCands",
"=",
"len",
"(",
"rankList",
")",
"newRanking",
"=",
"[",
"]",
"remainingCands",
"=",
"copy",
".",
"deepcopy",
"(",
"rankList",
")",
"probsCopy",
"=",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"plakettLuceProbs",
")",
"totalProb",
"=",
"sum",
"(",
"probs",
")",
"# We will use prob to iteratively calculate the probabilty that we draw the order vector",
"# that we end up drawing.",
"prob",
"=",
"1.0",
"while",
"(",
"len",
"(",
"newRanking",
")",
"<",
"numCands",
")",
":",
"# We generate a random number from 0 to 1, and use it to select a candidate. ",
"rand",
"=",
"random",
".",
"random",
"(",
")",
"threshold",
"=",
"0.0",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"probsCopy",
")",
")",
":",
"threshold",
"=",
"threshold",
"+",
"probsCopy",
"[",
"i",
"]",
"/",
"totalProb",
"if",
"rand",
"<=",
"threshold",
":",
"prob",
"=",
"prob",
"*",
"probsCopy",
"[",
"i",
"]",
"/",
"totalProb",
"newRanking",
".",
"append",
"(",
"remainingCands",
"[",
"i",
"]",
")",
"remainingCands",
".",
"pop",
"(",
"i",
")",
"totalProb",
"=",
"totalProb",
"-",
"probsCopy",
"[",
"i",
"]",
"probsCopy",
".",
"pop",
"(",
"i",
")",
"break",
"return",
"newRanking",
",",
"prob"
] | Given an order vector over the candidates, draw candidates to generate a new order vector.
:ivar list<int> rankList: Contains integer representations of each candidate in order of their
rank in a vote, from first to last. | [
"Given",
"an",
"order",
"vector",
"over",
"the",
"candidates",
"draw",
"candidates",
"to",
"generate",
"a",
"new",
"order",
"vector",
"."
] | f395ba3782f05684fa5de0cece387a6da9391d02 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmcSampleGenerator.py#L229-L263 | train |
PrefPy/prefpy | prefpy/mechanismMcmcSampleGenerator.py | MechanismMcmcSampleGeneratorMallowsPlakettLuce.calcProbOfVFromW | def calcProbOfVFromW(self, V, W):
"""
Given a order vector V and an order vector W, calculate the probability that we generate
V as our next sample if our current sample was W.
:ivar list<int> V: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last.
:ivar list<int> W: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last.
"""
weights = range(0, len(V))
i = 0
for alt in W:
weights[alt-1] = self.phi ** i
i = i + 1
# Calculate the probability that we draw V[0], V[1], and so on from W.
prob = 1.0
totalWeight = sum(weights)
for alt in V:
prob = prob * weights[alt-1]/totalWeight
totalWeight = totalWeight - weights[alt-1]
return prob | python | def calcProbOfVFromW(self, V, W):
"""
Given a order vector V and an order vector W, calculate the probability that we generate
V as our next sample if our current sample was W.
:ivar list<int> V: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last.
:ivar list<int> W: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last.
"""
weights = range(0, len(V))
i = 0
for alt in W:
weights[alt-1] = self.phi ** i
i = i + 1
# Calculate the probability that we draw V[0], V[1], and so on from W.
prob = 1.0
totalWeight = sum(weights)
for alt in V:
prob = prob * weights[alt-1]/totalWeight
totalWeight = totalWeight - weights[alt-1]
return prob | [
"def",
"calcProbOfVFromW",
"(",
"self",
",",
"V",
",",
"W",
")",
":",
"weights",
"=",
"range",
"(",
"0",
",",
"len",
"(",
"V",
")",
")",
"i",
"=",
"0",
"for",
"alt",
"in",
"W",
":",
"weights",
"[",
"alt",
"-",
"1",
"]",
"=",
"self",
".",
"phi",
"**",
"i",
"i",
"=",
"i",
"+",
"1",
"# Calculate the probability that we draw V[0], V[1], and so on from W.",
"prob",
"=",
"1.0",
"totalWeight",
"=",
"sum",
"(",
"weights",
")",
"for",
"alt",
"in",
"V",
":",
"prob",
"=",
"prob",
"*",
"weights",
"[",
"alt",
"-",
"1",
"]",
"/",
"totalWeight",
"totalWeight",
"=",
"totalWeight",
"-",
"weights",
"[",
"alt",
"-",
"1",
"]",
"return",
"prob"
] | Given a order vector V and an order vector W, calculate the probability that we generate
V as our next sample if our current sample was W.
:ivar list<int> V: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last.
:ivar list<int> W: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last. | [
"Given",
"a",
"order",
"vector",
"V",
"and",
"an",
"order",
"vector",
"W",
"calculate",
"the",
"probability",
"that",
"we",
"generate",
"V",
"as",
"our",
"next",
"sample",
"if",
"our",
"current",
"sample",
"was",
"W",
"."
] | f395ba3782f05684fa5de0cece387a6da9391d02 | https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/mechanismMcmcSampleGenerator.py#L265-L289 | train |
tamasgal/km3pipe | km3pipe/io/root.py | get_hist | def get_hist(rfile, histname, get_overflow=False):
"""Read a 1D Histogram."""
import root_numpy as rnp
rfile = open_rfile(rfile)
hist = rfile[histname]
xlims = np.array(list(hist.xedges()))
bin_values = rnp.hist2array(hist, include_overflow=get_overflow)
rfile.close()
return bin_values, xlims | python | def get_hist(rfile, histname, get_overflow=False):
"""Read a 1D Histogram."""
import root_numpy as rnp
rfile = open_rfile(rfile)
hist = rfile[histname]
xlims = np.array(list(hist.xedges()))
bin_values = rnp.hist2array(hist, include_overflow=get_overflow)
rfile.close()
return bin_values, xlims | [
"def",
"get_hist",
"(",
"rfile",
",",
"histname",
",",
"get_overflow",
"=",
"False",
")",
":",
"import",
"root_numpy",
"as",
"rnp",
"rfile",
"=",
"open_rfile",
"(",
"rfile",
")",
"hist",
"=",
"rfile",
"[",
"histname",
"]",
"xlims",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"hist",
".",
"xedges",
"(",
")",
")",
")",
"bin_values",
"=",
"rnp",
".",
"hist2array",
"(",
"hist",
",",
"include_overflow",
"=",
"get_overflow",
")",
"rfile",
".",
"close",
"(",
")",
"return",
"bin_values",
",",
"xlims"
] | Read a 1D Histogram. | [
"Read",
"a",
"1D",
"Histogram",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/root.py#L31-L40 | train |
tamasgal/km3pipe | km3pipe/io/root.py | interpol_hist2d | def interpol_hist2d(h2d, oversamp_factor=10):
"""Sample the interpolator of a root 2d hist.
Root's hist2d has a weird internal interpolation routine,
also using neighbouring bins.
"""
from rootpy import ROOTError
xlim = h2d.bins(axis=0)
ylim = h2d.bins(axis=1)
xn = h2d.nbins(0)
yn = h2d.nbins(1)
x = np.linspace(xlim[0], xlim[1], xn * oversamp_factor)
y = np.linspace(ylim[0], ylim[1], yn * oversamp_factor)
mat = np.zeros((xn, yn))
for xi in range(xn):
for yi in range(yn):
try:
mat[xi, yi] = h2d.interpolate(x[xi], y[yi])
except ROOTError:
continue
return mat, x, y | python | def interpol_hist2d(h2d, oversamp_factor=10):
"""Sample the interpolator of a root 2d hist.
Root's hist2d has a weird internal interpolation routine,
also using neighbouring bins.
"""
from rootpy import ROOTError
xlim = h2d.bins(axis=0)
ylim = h2d.bins(axis=1)
xn = h2d.nbins(0)
yn = h2d.nbins(1)
x = np.linspace(xlim[0], xlim[1], xn * oversamp_factor)
y = np.linspace(ylim[0], ylim[1], yn * oversamp_factor)
mat = np.zeros((xn, yn))
for xi in range(xn):
for yi in range(yn):
try:
mat[xi, yi] = h2d.interpolate(x[xi], y[yi])
except ROOTError:
continue
return mat, x, y | [
"def",
"interpol_hist2d",
"(",
"h2d",
",",
"oversamp_factor",
"=",
"10",
")",
":",
"from",
"rootpy",
"import",
"ROOTError",
"xlim",
"=",
"h2d",
".",
"bins",
"(",
"axis",
"=",
"0",
")",
"ylim",
"=",
"h2d",
".",
"bins",
"(",
"axis",
"=",
"1",
")",
"xn",
"=",
"h2d",
".",
"nbins",
"(",
"0",
")",
"yn",
"=",
"h2d",
".",
"nbins",
"(",
"1",
")",
"x",
"=",
"np",
".",
"linspace",
"(",
"xlim",
"[",
"0",
"]",
",",
"xlim",
"[",
"1",
"]",
",",
"xn",
"*",
"oversamp_factor",
")",
"y",
"=",
"np",
".",
"linspace",
"(",
"ylim",
"[",
"0",
"]",
",",
"ylim",
"[",
"1",
"]",
",",
"yn",
"*",
"oversamp_factor",
")",
"mat",
"=",
"np",
".",
"zeros",
"(",
"(",
"xn",
",",
"yn",
")",
")",
"for",
"xi",
"in",
"range",
"(",
"xn",
")",
":",
"for",
"yi",
"in",
"range",
"(",
"yn",
")",
":",
"try",
":",
"mat",
"[",
"xi",
",",
"yi",
"]",
"=",
"h2d",
".",
"interpolate",
"(",
"x",
"[",
"xi",
"]",
",",
"y",
"[",
"yi",
"]",
")",
"except",
"ROOTError",
":",
"continue",
"return",
"mat",
",",
"x",
",",
"y"
] | Sample the interpolator of a root 2d hist.
Root's hist2d has a weird internal interpolation routine,
also using neighbouring bins. | [
"Sample",
"the",
"interpolator",
"of",
"a",
"root",
"2d",
"hist",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/root.py#L70-L91 | train |
cprogrammer1994/GLWindow | GLWindow/__init__.py | create_window | def create_window(size=None, samples=16, *, fullscreen=False, title=None, threaded=True) -> Window:
'''
Create the main window.
Args:
size (tuple): The width and height of the window.
samples (int): The number of samples.
Keyword Args:
fullscreen (bool): Fullscreen?
title (bool): The title of the window.
threaded (bool): Threaded?
Returns:
Window: The main window.
'''
if size is None:
width, height = 1280, 720
else:
width, height = size
if samples < 0 or (samples & (samples - 1)) != 0:
raise Exception('Invalid number of samples: %d' % samples)
window = Window.__new__(Window)
window.wnd = glwnd.create_window(width, height, samples, fullscreen, title, threaded)
return window | python | def create_window(size=None, samples=16, *, fullscreen=False, title=None, threaded=True) -> Window:
'''
Create the main window.
Args:
size (tuple): The width and height of the window.
samples (int): The number of samples.
Keyword Args:
fullscreen (bool): Fullscreen?
title (bool): The title of the window.
threaded (bool): Threaded?
Returns:
Window: The main window.
'''
if size is None:
width, height = 1280, 720
else:
width, height = size
if samples < 0 or (samples & (samples - 1)) != 0:
raise Exception('Invalid number of samples: %d' % samples)
window = Window.__new__(Window)
window.wnd = glwnd.create_window(width, height, samples, fullscreen, title, threaded)
return window | [
"def",
"create_window",
"(",
"size",
"=",
"None",
",",
"samples",
"=",
"16",
",",
"*",
",",
"fullscreen",
"=",
"False",
",",
"title",
"=",
"None",
",",
"threaded",
"=",
"True",
")",
"->",
"Window",
":",
"if",
"size",
"is",
"None",
":",
"width",
",",
"height",
"=",
"1280",
",",
"720",
"else",
":",
"width",
",",
"height",
"=",
"size",
"if",
"samples",
"<",
"0",
"or",
"(",
"samples",
"&",
"(",
"samples",
"-",
"1",
")",
")",
"!=",
"0",
":",
"raise",
"Exception",
"(",
"'Invalid number of samples: %d'",
"%",
"samples",
")",
"window",
"=",
"Window",
".",
"__new__",
"(",
"Window",
")",
"window",
".",
"wnd",
"=",
"glwnd",
".",
"create_window",
"(",
"width",
",",
"height",
",",
"samples",
",",
"fullscreen",
",",
"title",
",",
"threaded",
")",
"return",
"window"
] | Create the main window.
Args:
size (tuple): The width and height of the window.
samples (int): The number of samples.
Keyword Args:
fullscreen (bool): Fullscreen?
title (bool): The title of the window.
threaded (bool): Threaded?
Returns:
Window: The main window. | [
"Create",
"the",
"main",
"window",
"."
] | 521e18fcbc15e88d3c1f3547aa313c3a07386ee5 | https://github.com/cprogrammer1994/GLWindow/blob/521e18fcbc15e88d3c1f3547aa313c3a07386ee5/GLWindow/__init__.py#L307-L335 | train |
cprogrammer1994/GLWindow | GLWindow/__init__.py | Window.clear | def clear(self, red=0.0, green=0.0, blue=0.0, alpha=0.0) -> None:
'''
Clear the window.
'''
self.wnd.clear(red, green, blue, alpha) | python | def clear(self, red=0.0, green=0.0, blue=0.0, alpha=0.0) -> None:
'''
Clear the window.
'''
self.wnd.clear(red, green, blue, alpha) | [
"def",
"clear",
"(",
"self",
",",
"red",
"=",
"0.0",
",",
"green",
"=",
"0.0",
",",
"blue",
"=",
"0.0",
",",
"alpha",
"=",
"0.0",
")",
"->",
"None",
":",
"self",
".",
"wnd",
".",
"clear",
"(",
"red",
",",
"green",
",",
"blue",
",",
"alpha",
")"
] | Clear the window. | [
"Clear",
"the",
"window",
"."
] | 521e18fcbc15e88d3c1f3547aa313c3a07386ee5 | https://github.com/cprogrammer1994/GLWindow/blob/521e18fcbc15e88d3c1f3547aa313c3a07386ee5/GLWindow/__init__.py#L59-L64 | train |
cprogrammer1994/GLWindow | GLWindow/__init__.py | Window.windowed | def windowed(self, size) -> None:
'''
Set the window to windowed mode.
'''
width, height = size
self.wnd.windowed(width, height) | python | def windowed(self, size) -> None:
'''
Set the window to windowed mode.
'''
width, height = size
self.wnd.windowed(width, height) | [
"def",
"windowed",
"(",
"self",
",",
"size",
")",
"->",
"None",
":",
"width",
",",
"height",
"=",
"size",
"self",
".",
"wnd",
".",
"windowed",
"(",
"width",
",",
"height",
")"
] | Set the window to windowed mode. | [
"Set",
"the",
"window",
"to",
"windowed",
"mode",
"."
] | 521e18fcbc15e88d3c1f3547aa313c3a07386ee5 | https://github.com/cprogrammer1994/GLWindow/blob/521e18fcbc15e88d3c1f3547aa313c3a07386ee5/GLWindow/__init__.py#L73-L80 | train |
developmentseed/sentinel-s3 | sentinel_s3/main.py | product_metadata | def product_metadata(product, dst_folder, counter=None, writers=[file_writer], geometry_check=None):
""" Extract metadata for a specific product """
if not counter:
counter = {
'products': 0,
'saved_tiles': 0,
'skipped_tiles': 0,
'skipped_tiles_paths': []
}
s3_url = 'http://sentinel-s2-l1c.s3.amazonaws.com'
product_meta_link = '{0}/{1}'.format(s3_url, product['metadata'])
product_info = requests.get(product_meta_link, stream=True)
product_metadata = metadata_to_dict(product_info.raw)
product_metadata['product_meta_link'] = product_meta_link
counter['products'] += 1
for tile in product['tiles']:
tile_info = requests.get('{0}/{1}'.format(s3_url, tile))
try:
metadata = tile_metadata(tile_info.json(), copy(product_metadata), geometry_check)
for w in writers:
w(dst_folder, metadata)
logger.info('Saving to disk: %s' % metadata['tile_name'])
counter['saved_tiles'] += 1
except JSONDecodeError:
logger.warning('Tile: %s was not found and skipped' % tile)
counter['skipped_tiles'] += 1
counter['skipped_tiles_paths'].append(tile)
return counter | python | def product_metadata(product, dst_folder, counter=None, writers=[file_writer], geometry_check=None):
""" Extract metadata for a specific product """
if not counter:
counter = {
'products': 0,
'saved_tiles': 0,
'skipped_tiles': 0,
'skipped_tiles_paths': []
}
s3_url = 'http://sentinel-s2-l1c.s3.amazonaws.com'
product_meta_link = '{0}/{1}'.format(s3_url, product['metadata'])
product_info = requests.get(product_meta_link, stream=True)
product_metadata = metadata_to_dict(product_info.raw)
product_metadata['product_meta_link'] = product_meta_link
counter['products'] += 1
for tile in product['tiles']:
tile_info = requests.get('{0}/{1}'.format(s3_url, tile))
try:
metadata = tile_metadata(tile_info.json(), copy(product_metadata), geometry_check)
for w in writers:
w(dst_folder, metadata)
logger.info('Saving to disk: %s' % metadata['tile_name'])
counter['saved_tiles'] += 1
except JSONDecodeError:
logger.warning('Tile: %s was not found and skipped' % tile)
counter['skipped_tiles'] += 1
counter['skipped_tiles_paths'].append(tile)
return counter | [
"def",
"product_metadata",
"(",
"product",
",",
"dst_folder",
",",
"counter",
"=",
"None",
",",
"writers",
"=",
"[",
"file_writer",
"]",
",",
"geometry_check",
"=",
"None",
")",
":",
"if",
"not",
"counter",
":",
"counter",
"=",
"{",
"'products'",
":",
"0",
",",
"'saved_tiles'",
":",
"0",
",",
"'skipped_tiles'",
":",
"0",
",",
"'skipped_tiles_paths'",
":",
"[",
"]",
"}",
"s3_url",
"=",
"'http://sentinel-s2-l1c.s3.amazonaws.com'",
"product_meta_link",
"=",
"'{0}/{1}'",
".",
"format",
"(",
"s3_url",
",",
"product",
"[",
"'metadata'",
"]",
")",
"product_info",
"=",
"requests",
".",
"get",
"(",
"product_meta_link",
",",
"stream",
"=",
"True",
")",
"product_metadata",
"=",
"metadata_to_dict",
"(",
"product_info",
".",
"raw",
")",
"product_metadata",
"[",
"'product_meta_link'",
"]",
"=",
"product_meta_link",
"counter",
"[",
"'products'",
"]",
"+=",
"1",
"for",
"tile",
"in",
"product",
"[",
"'tiles'",
"]",
":",
"tile_info",
"=",
"requests",
".",
"get",
"(",
"'{0}/{1}'",
".",
"format",
"(",
"s3_url",
",",
"tile",
")",
")",
"try",
":",
"metadata",
"=",
"tile_metadata",
"(",
"tile_info",
".",
"json",
"(",
")",
",",
"copy",
"(",
"product_metadata",
")",
",",
"geometry_check",
")",
"for",
"w",
"in",
"writers",
":",
"w",
"(",
"dst_folder",
",",
"metadata",
")",
"logger",
".",
"info",
"(",
"'Saving to disk: %s'",
"%",
"metadata",
"[",
"'tile_name'",
"]",
")",
"counter",
"[",
"'saved_tiles'",
"]",
"+=",
"1",
"except",
"JSONDecodeError",
":",
"logger",
".",
"warning",
"(",
"'Tile: %s was not found and skipped'",
"%",
"tile",
")",
"counter",
"[",
"'skipped_tiles'",
"]",
"+=",
"1",
"counter",
"[",
"'skipped_tiles_paths'",
"]",
".",
"append",
"(",
"tile",
")",
"return",
"counter"
] | Extract metadata for a specific product | [
"Extract",
"metadata",
"for",
"a",
"specific",
"product"
] | 02bf2f9cb6aff527e492b39518a54f0b4613ddda | https://github.com/developmentseed/sentinel-s3/blob/02bf2f9cb6aff527e492b39518a54f0b4613ddda/sentinel_s3/main.py#L58-L93 | train |
developmentseed/sentinel-s3 | sentinel_s3/main.py | daily_metadata | def daily_metadata(year, month, day, dst_folder, writers=[file_writer], geometry_check=None,
num_worker_threads=1):
""" Extra metadata for all products in a specific date """
threaded = False
counter = {
'products': 0,
'saved_tiles': 0,
'skipped_tiles': 0,
'skipped_tiles_paths': []
}
if num_worker_threads > 1:
threaded = True
queue = Queue()
# create folders
year_dir = os.path.join(dst_folder, str(year))
month_dir = os.path.join(year_dir, str(month))
day_dir = os.path.join(month_dir, str(day))
product_list = get_products_metadata_path(year, month, day)
logger.info('There are %s products in %s-%s-%s' % (len(list(iterkeys(product_list))),
year, month, day))
for name, product in iteritems(product_list):
product_dir = os.path.join(day_dir, name)
if threaded:
queue.put([product, product_dir, counter, writers, geometry_check])
else:
counter = product_metadata(product, product_dir, counter, writers, geometry_check)
if threaded:
def worker():
while not queue.empty():
args = queue.get()
try:
product_metadata(*args)
except Exception:
exc = sys.exc_info()
logger.error('%s tile skipped due to error: %s' % (threading.current_thread().name,
exc[1].__str__()))
args[2]['skipped_tiles'] += 1
queue.task_done()
threads = []
for i in range(num_worker_threads):
t = threading.Thread(target=worker)
t.start()
threads.append(t)
queue.join()
return counter | python | def daily_metadata(year, month, day, dst_folder, writers=[file_writer], geometry_check=None,
num_worker_threads=1):
""" Extra metadata for all products in a specific date """
threaded = False
counter = {
'products': 0,
'saved_tiles': 0,
'skipped_tiles': 0,
'skipped_tiles_paths': []
}
if num_worker_threads > 1:
threaded = True
queue = Queue()
# create folders
year_dir = os.path.join(dst_folder, str(year))
month_dir = os.path.join(year_dir, str(month))
day_dir = os.path.join(month_dir, str(day))
product_list = get_products_metadata_path(year, month, day)
logger.info('There are %s products in %s-%s-%s' % (len(list(iterkeys(product_list))),
year, month, day))
for name, product in iteritems(product_list):
product_dir = os.path.join(day_dir, name)
if threaded:
queue.put([product, product_dir, counter, writers, geometry_check])
else:
counter = product_metadata(product, product_dir, counter, writers, geometry_check)
if threaded:
def worker():
while not queue.empty():
args = queue.get()
try:
product_metadata(*args)
except Exception:
exc = sys.exc_info()
logger.error('%s tile skipped due to error: %s' % (threading.current_thread().name,
exc[1].__str__()))
args[2]['skipped_tiles'] += 1
queue.task_done()
threads = []
for i in range(num_worker_threads):
t = threading.Thread(target=worker)
t.start()
threads.append(t)
queue.join()
return counter | [
"def",
"daily_metadata",
"(",
"year",
",",
"month",
",",
"day",
",",
"dst_folder",
",",
"writers",
"=",
"[",
"file_writer",
"]",
",",
"geometry_check",
"=",
"None",
",",
"num_worker_threads",
"=",
"1",
")",
":",
"threaded",
"=",
"False",
"counter",
"=",
"{",
"'products'",
":",
"0",
",",
"'saved_tiles'",
":",
"0",
",",
"'skipped_tiles'",
":",
"0",
",",
"'skipped_tiles_paths'",
":",
"[",
"]",
"}",
"if",
"num_worker_threads",
">",
"1",
":",
"threaded",
"=",
"True",
"queue",
"=",
"Queue",
"(",
")",
"# create folders",
"year_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dst_folder",
",",
"str",
"(",
"year",
")",
")",
"month_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"year_dir",
",",
"str",
"(",
"month",
")",
")",
"day_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"month_dir",
",",
"str",
"(",
"day",
")",
")",
"product_list",
"=",
"get_products_metadata_path",
"(",
"year",
",",
"month",
",",
"day",
")",
"logger",
".",
"info",
"(",
"'There are %s products in %s-%s-%s'",
"%",
"(",
"len",
"(",
"list",
"(",
"iterkeys",
"(",
"product_list",
")",
")",
")",
",",
"year",
",",
"month",
",",
"day",
")",
")",
"for",
"name",
",",
"product",
"in",
"iteritems",
"(",
"product_list",
")",
":",
"product_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"day_dir",
",",
"name",
")",
"if",
"threaded",
":",
"queue",
".",
"put",
"(",
"[",
"product",
",",
"product_dir",
",",
"counter",
",",
"writers",
",",
"geometry_check",
"]",
")",
"else",
":",
"counter",
"=",
"product_metadata",
"(",
"product",
",",
"product_dir",
",",
"counter",
",",
"writers",
",",
"geometry_check",
")",
"if",
"threaded",
":",
"def",
"worker",
"(",
")",
":",
"while",
"not",
"queue",
".",
"empty",
"(",
")",
":",
"args",
"=",
"queue",
".",
"get",
"(",
")",
"try",
":",
"product_metadata",
"(",
"*",
"args",
")",
"except",
"Exception",
":",
"exc",
"=",
"sys",
".",
"exc_info",
"(",
")",
"logger",
".",
"error",
"(",
"'%s tile skipped due to error: %s'",
"%",
"(",
"threading",
".",
"current_thread",
"(",
")",
".",
"name",
",",
"exc",
"[",
"1",
"]",
".",
"__str__",
"(",
")",
")",
")",
"args",
"[",
"2",
"]",
"[",
"'skipped_tiles'",
"]",
"+=",
"1",
"queue",
".",
"task_done",
"(",
")",
"threads",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"num_worker_threads",
")",
":",
"t",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"worker",
")",
"t",
".",
"start",
"(",
")",
"threads",
".",
"append",
"(",
"t",
")",
"queue",
".",
"join",
"(",
")",
"return",
"counter"
] | Extra metadata for all products in a specific date | [
"Extra",
"metadata",
"for",
"all",
"products",
"in",
"a",
"specific",
"date"
] | 02bf2f9cb6aff527e492b39518a54f0b4613ddda | https://github.com/developmentseed/sentinel-s3/blob/02bf2f9cb6aff527e492b39518a54f0b4613ddda/sentinel_s3/main.py#L102-L158 | train |
developmentseed/sentinel-s3 | sentinel_s3/main.py | range_metadata | def range_metadata(start, end, dst_folder, num_worker_threads=0, writers=[file_writer], geometry_check=None):
""" Extra metadata for all products in a date range """
assert isinstance(start, date)
assert isinstance(end, date)
delta = end - start
dates = []
for i in range(delta.days + 1):
dates.append(start + timedelta(days=i))
days = len(dates)
total_counter = {
'days': days,
'products': 0,
'saved_tiles': 0,
'skipped_tiles': 0,
'skipped_tiles_paths': []
}
def update_counter(counter):
for key in iterkeys(total_counter):
if key in counter:
total_counter[key] += counter[key]
for d in dates:
logger.info('Getting metadata of {0}-{1}-{2}'.format(d.year, d.month, d.day))
update_counter(daily_metadata(d.year, d.month, d.day, dst_folder, writers, geometry_check,
num_worker_threads))
return total_counter | python | def range_metadata(start, end, dst_folder, num_worker_threads=0, writers=[file_writer], geometry_check=None):
""" Extra metadata for all products in a date range """
assert isinstance(start, date)
assert isinstance(end, date)
delta = end - start
dates = []
for i in range(delta.days + 1):
dates.append(start + timedelta(days=i))
days = len(dates)
total_counter = {
'days': days,
'products': 0,
'saved_tiles': 0,
'skipped_tiles': 0,
'skipped_tiles_paths': []
}
def update_counter(counter):
for key in iterkeys(total_counter):
if key in counter:
total_counter[key] += counter[key]
for d in dates:
logger.info('Getting metadata of {0}-{1}-{2}'.format(d.year, d.month, d.day))
update_counter(daily_metadata(d.year, d.month, d.day, dst_folder, writers, geometry_check,
num_worker_threads))
return total_counter | [
"def",
"range_metadata",
"(",
"start",
",",
"end",
",",
"dst_folder",
",",
"num_worker_threads",
"=",
"0",
",",
"writers",
"=",
"[",
"file_writer",
"]",
",",
"geometry_check",
"=",
"None",
")",
":",
"assert",
"isinstance",
"(",
"start",
",",
"date",
")",
"assert",
"isinstance",
"(",
"end",
",",
"date",
")",
"delta",
"=",
"end",
"-",
"start",
"dates",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"delta",
".",
"days",
"+",
"1",
")",
":",
"dates",
".",
"append",
"(",
"start",
"+",
"timedelta",
"(",
"days",
"=",
"i",
")",
")",
"days",
"=",
"len",
"(",
"dates",
")",
"total_counter",
"=",
"{",
"'days'",
":",
"days",
",",
"'products'",
":",
"0",
",",
"'saved_tiles'",
":",
"0",
",",
"'skipped_tiles'",
":",
"0",
",",
"'skipped_tiles_paths'",
":",
"[",
"]",
"}",
"def",
"update_counter",
"(",
"counter",
")",
":",
"for",
"key",
"in",
"iterkeys",
"(",
"total_counter",
")",
":",
"if",
"key",
"in",
"counter",
":",
"total_counter",
"[",
"key",
"]",
"+=",
"counter",
"[",
"key",
"]",
"for",
"d",
"in",
"dates",
":",
"logger",
".",
"info",
"(",
"'Getting metadata of {0}-{1}-{2}'",
".",
"format",
"(",
"d",
".",
"year",
",",
"d",
".",
"month",
",",
"d",
".",
"day",
")",
")",
"update_counter",
"(",
"daily_metadata",
"(",
"d",
".",
"year",
",",
"d",
".",
"month",
",",
"d",
".",
"day",
",",
"dst_folder",
",",
"writers",
",",
"geometry_check",
",",
"num_worker_threads",
")",
")",
"return",
"total_counter"
] | Extra metadata for all products in a date range | [
"Extra",
"metadata",
"for",
"all",
"products",
"in",
"a",
"date",
"range"
] | 02bf2f9cb6aff527e492b39518a54f0b4613ddda | https://github.com/developmentseed/sentinel-s3/blob/02bf2f9cb6aff527e492b39518a54f0b4613ddda/sentinel_s3/main.py#L161-L194 | train |
NaPs/Kolekto | kolekto/tmdb_proxy.py | get_on_tmdb | def get_on_tmdb(uri, **kwargs):
""" Get a resource on TMDB.
"""
kwargs['api_key'] = app.config['TMDB_API_KEY']
response = requests_session.get((TMDB_API_URL + uri).encode('utf8'), params=kwargs)
response.raise_for_status()
return json.loads(response.text) | python | def get_on_tmdb(uri, **kwargs):
""" Get a resource on TMDB.
"""
kwargs['api_key'] = app.config['TMDB_API_KEY']
response = requests_session.get((TMDB_API_URL + uri).encode('utf8'), params=kwargs)
response.raise_for_status()
return json.loads(response.text) | [
"def",
"get_on_tmdb",
"(",
"uri",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'api_key'",
"]",
"=",
"app",
".",
"config",
"[",
"'TMDB_API_KEY'",
"]",
"response",
"=",
"requests_session",
".",
"get",
"(",
"(",
"TMDB_API_URL",
"+",
"uri",
")",
".",
"encode",
"(",
"'utf8'",
")",
",",
"params",
"=",
"kwargs",
")",
"response",
".",
"raise_for_status",
"(",
")",
"return",
"json",
".",
"loads",
"(",
"response",
".",
"text",
")"
] | Get a resource on TMDB. | [
"Get",
"a",
"resource",
"on",
"TMDB",
"."
] | 29c5469da8782780a06bf9a76c59414bb6fd8fe3 | https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/tmdb_proxy.py#L40-L46 | train |
NaPs/Kolekto | kolekto/tmdb_proxy.py | search | def search():
""" Search a movie on TMDB.
"""
redis_key = 's_%s' % request.args['query'].lower()
cached = redis_ro_conn.get(redis_key)
if cached:
return Response(cached)
else:
try:
found = get_on_tmdb(u'/search/movie', query=request.args['query'])
movies = []
for movie in found['results']:
cast = get_on_tmdb(u'/movie/%s/casts' % movie['id'])
year = datetime.strptime(movie['release_date'], '%Y-%m-%d').year if movie['release_date'] else None
movies.append({'title': movie['original_title'],
'directors': [x['name'] for x in cast['crew'] if x['department'] == 'Directing' and x['job'] == 'Director'],
'year': year,
'_tmdb_id': movie['id']})
except requests.HTTPError as err:
return Response('TMDB API error: %s' % str(err), status=err.response.status_code)
json_response = json.dumps({'movies': movies})
redis_conn.setex(redis_key, app.config['CACHE_TTL'], json_response)
return Response(json_response) | python | def search():
""" Search a movie on TMDB.
"""
redis_key = 's_%s' % request.args['query'].lower()
cached = redis_ro_conn.get(redis_key)
if cached:
return Response(cached)
else:
try:
found = get_on_tmdb(u'/search/movie', query=request.args['query'])
movies = []
for movie in found['results']:
cast = get_on_tmdb(u'/movie/%s/casts' % movie['id'])
year = datetime.strptime(movie['release_date'], '%Y-%m-%d').year if movie['release_date'] else None
movies.append({'title': movie['original_title'],
'directors': [x['name'] for x in cast['crew'] if x['department'] == 'Directing' and x['job'] == 'Director'],
'year': year,
'_tmdb_id': movie['id']})
except requests.HTTPError as err:
return Response('TMDB API error: %s' % str(err), status=err.response.status_code)
json_response = json.dumps({'movies': movies})
redis_conn.setex(redis_key, app.config['CACHE_TTL'], json_response)
return Response(json_response) | [
"def",
"search",
"(",
")",
":",
"redis_key",
"=",
"'s_%s'",
"%",
"request",
".",
"args",
"[",
"'query'",
"]",
".",
"lower",
"(",
")",
"cached",
"=",
"redis_ro_conn",
".",
"get",
"(",
"redis_key",
")",
"if",
"cached",
":",
"return",
"Response",
"(",
"cached",
")",
"else",
":",
"try",
":",
"found",
"=",
"get_on_tmdb",
"(",
"u'/search/movie'",
",",
"query",
"=",
"request",
".",
"args",
"[",
"'query'",
"]",
")",
"movies",
"=",
"[",
"]",
"for",
"movie",
"in",
"found",
"[",
"'results'",
"]",
":",
"cast",
"=",
"get_on_tmdb",
"(",
"u'/movie/%s/casts'",
"%",
"movie",
"[",
"'id'",
"]",
")",
"year",
"=",
"datetime",
".",
"strptime",
"(",
"movie",
"[",
"'release_date'",
"]",
",",
"'%Y-%m-%d'",
")",
".",
"year",
"if",
"movie",
"[",
"'release_date'",
"]",
"else",
"None",
"movies",
".",
"append",
"(",
"{",
"'title'",
":",
"movie",
"[",
"'original_title'",
"]",
",",
"'directors'",
":",
"[",
"x",
"[",
"'name'",
"]",
"for",
"x",
"in",
"cast",
"[",
"'crew'",
"]",
"if",
"x",
"[",
"'department'",
"]",
"==",
"'Directing'",
"and",
"x",
"[",
"'job'",
"]",
"==",
"'Director'",
"]",
",",
"'year'",
":",
"year",
",",
"'_tmdb_id'",
":",
"movie",
"[",
"'id'",
"]",
"}",
")",
"except",
"requests",
".",
"HTTPError",
"as",
"err",
":",
"return",
"Response",
"(",
"'TMDB API error: %s'",
"%",
"str",
"(",
"err",
")",
",",
"status",
"=",
"err",
".",
"response",
".",
"status_code",
")",
"json_response",
"=",
"json",
".",
"dumps",
"(",
"{",
"'movies'",
":",
"movies",
"}",
")",
"redis_conn",
".",
"setex",
"(",
"redis_key",
",",
"app",
".",
"config",
"[",
"'CACHE_TTL'",
"]",
",",
"json_response",
")",
"return",
"Response",
"(",
"json_response",
")"
] | Search a movie on TMDB. | [
"Search",
"a",
"movie",
"on",
"TMDB",
"."
] | 29c5469da8782780a06bf9a76c59414bb6fd8fe3 | https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/tmdb_proxy.py#L50-L72 | train |
NaPs/Kolekto | kolekto/tmdb_proxy.py | get_movie | def get_movie(tmdb_id):
""" Get informations about a movie using its tmdb id.
"""
redis_key = 'm_%s' % tmdb_id
cached = redis_ro_conn.get(redis_key)
if cached:
return Response(cached)
else:
try:
details = get_on_tmdb(u'/movie/%d' % tmdb_id)
cast = get_on_tmdb(u'/movie/%d/casts' % tmdb_id)
alternative = get_on_tmdb(u'/movie/%d/alternative_titles' % tmdb_id)
except requests.HTTPError as err:
return Response('TMDB API error: %s' % str(err), status=err.response.status_code)
movie = {'title': details['original_title'],
'score': details['popularity'],
'directors': [x['name'] for x in cast['crew'] if x['department'] == 'Directing' and x['job'] == 'Director'],
'writers': [x['name'] for x in cast['crew'] if x['department'] == 'Writing'],
'cast': [x['name'] for x in cast['cast']],
'genres': [x['name'] for x in details['genres']],
'countries': [x['name'] for x in details['production_countries']],
'tmdb_votes': int(round(details.get('vote_average', 0) * 0.5)),
'_tmdb_id': tmdb_id}
if details.get('release_date'):
movie['year'] = datetime.strptime(details['release_date'], '%Y-%m-%d').year
if details.get('belongs_to_collection'):
movie['collection'] = details['belongs_to_collection']['name']
for alt in alternative['titles']:
movie['title_%s' % alt['iso_3166_1'].lower()] = alt['title']
json_response = json.dumps({'movie': movie})
redis_conn.setex(redis_key, app.config['CACHE_TTL'], json_response)
return Response(json_response) | python | def get_movie(tmdb_id):
""" Get informations about a movie using its tmdb id.
"""
redis_key = 'm_%s' % tmdb_id
cached = redis_ro_conn.get(redis_key)
if cached:
return Response(cached)
else:
try:
details = get_on_tmdb(u'/movie/%d' % tmdb_id)
cast = get_on_tmdb(u'/movie/%d/casts' % tmdb_id)
alternative = get_on_tmdb(u'/movie/%d/alternative_titles' % tmdb_id)
except requests.HTTPError as err:
return Response('TMDB API error: %s' % str(err), status=err.response.status_code)
movie = {'title': details['original_title'],
'score': details['popularity'],
'directors': [x['name'] for x in cast['crew'] if x['department'] == 'Directing' and x['job'] == 'Director'],
'writers': [x['name'] for x in cast['crew'] if x['department'] == 'Writing'],
'cast': [x['name'] for x in cast['cast']],
'genres': [x['name'] for x in details['genres']],
'countries': [x['name'] for x in details['production_countries']],
'tmdb_votes': int(round(details.get('vote_average', 0) * 0.5)),
'_tmdb_id': tmdb_id}
if details.get('release_date'):
movie['year'] = datetime.strptime(details['release_date'], '%Y-%m-%d').year
if details.get('belongs_to_collection'):
movie['collection'] = details['belongs_to_collection']['name']
for alt in alternative['titles']:
movie['title_%s' % alt['iso_3166_1'].lower()] = alt['title']
json_response = json.dumps({'movie': movie})
redis_conn.setex(redis_key, app.config['CACHE_TTL'], json_response)
return Response(json_response) | [
"def",
"get_movie",
"(",
"tmdb_id",
")",
":",
"redis_key",
"=",
"'m_%s'",
"%",
"tmdb_id",
"cached",
"=",
"redis_ro_conn",
".",
"get",
"(",
"redis_key",
")",
"if",
"cached",
":",
"return",
"Response",
"(",
"cached",
")",
"else",
":",
"try",
":",
"details",
"=",
"get_on_tmdb",
"(",
"u'/movie/%d'",
"%",
"tmdb_id",
")",
"cast",
"=",
"get_on_tmdb",
"(",
"u'/movie/%d/casts'",
"%",
"tmdb_id",
")",
"alternative",
"=",
"get_on_tmdb",
"(",
"u'/movie/%d/alternative_titles'",
"%",
"tmdb_id",
")",
"except",
"requests",
".",
"HTTPError",
"as",
"err",
":",
"return",
"Response",
"(",
"'TMDB API error: %s'",
"%",
"str",
"(",
"err",
")",
",",
"status",
"=",
"err",
".",
"response",
".",
"status_code",
")",
"movie",
"=",
"{",
"'title'",
":",
"details",
"[",
"'original_title'",
"]",
",",
"'score'",
":",
"details",
"[",
"'popularity'",
"]",
",",
"'directors'",
":",
"[",
"x",
"[",
"'name'",
"]",
"for",
"x",
"in",
"cast",
"[",
"'crew'",
"]",
"if",
"x",
"[",
"'department'",
"]",
"==",
"'Directing'",
"and",
"x",
"[",
"'job'",
"]",
"==",
"'Director'",
"]",
",",
"'writers'",
":",
"[",
"x",
"[",
"'name'",
"]",
"for",
"x",
"in",
"cast",
"[",
"'crew'",
"]",
"if",
"x",
"[",
"'department'",
"]",
"==",
"'Writing'",
"]",
",",
"'cast'",
":",
"[",
"x",
"[",
"'name'",
"]",
"for",
"x",
"in",
"cast",
"[",
"'cast'",
"]",
"]",
",",
"'genres'",
":",
"[",
"x",
"[",
"'name'",
"]",
"for",
"x",
"in",
"details",
"[",
"'genres'",
"]",
"]",
",",
"'countries'",
":",
"[",
"x",
"[",
"'name'",
"]",
"for",
"x",
"in",
"details",
"[",
"'production_countries'",
"]",
"]",
",",
"'tmdb_votes'",
":",
"int",
"(",
"round",
"(",
"details",
".",
"get",
"(",
"'vote_average'",
",",
"0",
")",
"*",
"0.5",
")",
")",
",",
"'_tmdb_id'",
":",
"tmdb_id",
"}",
"if",
"details",
".",
"get",
"(",
"'release_date'",
")",
":",
"movie",
"[",
"'year'",
"]",
"=",
"datetime",
".",
"strptime",
"(",
"details",
"[",
"'release_date'",
"]",
",",
"'%Y-%m-%d'",
")",
".",
"year",
"if",
"details",
".",
"get",
"(",
"'belongs_to_collection'",
")",
":",
"movie",
"[",
"'collection'",
"]",
"=",
"details",
"[",
"'belongs_to_collection'",
"]",
"[",
"'name'",
"]",
"for",
"alt",
"in",
"alternative",
"[",
"'titles'",
"]",
":",
"movie",
"[",
"'title_%s'",
"%",
"alt",
"[",
"'iso_3166_1'",
"]",
".",
"lower",
"(",
")",
"]",
"=",
"alt",
"[",
"'title'",
"]",
"json_response",
"=",
"json",
".",
"dumps",
"(",
"{",
"'movie'",
":",
"movie",
"}",
")",
"redis_conn",
".",
"setex",
"(",
"redis_key",
",",
"app",
".",
"config",
"[",
"'CACHE_TTL'",
"]",
",",
"json_response",
")",
"return",
"Response",
"(",
"json_response",
")"
] | Get informations about a movie using its tmdb id. | [
"Get",
"informations",
"about",
"a",
"movie",
"using",
"its",
"tmdb",
"id",
"."
] | 29c5469da8782780a06bf9a76c59414bb6fd8fe3 | https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/tmdb_proxy.py#L76-L107 | train |
LeadPages/gcloud_requests | gcloud_requests/proxy.py | RequestsProxy._handle_response_error | def _handle_response_error(self, response, retries, **kwargs):
r"""Provides a way for each connection wrapper to handle error
responses.
Parameters:
response(Response): An instance of :class:`.requests.Response`.
retries(int): The number of times :meth:`.request` has been
called so far.
\**kwargs: The parameters with which :meth:`.request` was
called. The `retries` parameter is excluded from `kwargs`
intentionally.
Returns:
requests.Response
"""
error = self._convert_response_to_error(response)
if error is None:
return response
max_retries = self._max_retries_for_error(error)
if max_retries is None or retries >= max_retries:
return response
backoff = min(0.0625 * 2 ** retries, 1.0)
self.logger.warning("Sleeping for %r before retrying failed request...", backoff)
time.sleep(backoff)
retries += 1
self.logger.warning("Retrying failed request. Attempt %d/%d.", retries, max_retries)
return self.request(retries=retries, **kwargs) | python | def _handle_response_error(self, response, retries, **kwargs):
r"""Provides a way for each connection wrapper to handle error
responses.
Parameters:
response(Response): An instance of :class:`.requests.Response`.
retries(int): The number of times :meth:`.request` has been
called so far.
\**kwargs: The parameters with which :meth:`.request` was
called. The `retries` parameter is excluded from `kwargs`
intentionally.
Returns:
requests.Response
"""
error = self._convert_response_to_error(response)
if error is None:
return response
max_retries = self._max_retries_for_error(error)
if max_retries is None or retries >= max_retries:
return response
backoff = min(0.0625 * 2 ** retries, 1.0)
self.logger.warning("Sleeping for %r before retrying failed request...", backoff)
time.sleep(backoff)
retries += 1
self.logger.warning("Retrying failed request. Attempt %d/%d.", retries, max_retries)
return self.request(retries=retries, **kwargs) | [
"def",
"_handle_response_error",
"(",
"self",
",",
"response",
",",
"retries",
",",
"*",
"*",
"kwargs",
")",
":",
"error",
"=",
"self",
".",
"_convert_response_to_error",
"(",
"response",
")",
"if",
"error",
"is",
"None",
":",
"return",
"response",
"max_retries",
"=",
"self",
".",
"_max_retries_for_error",
"(",
"error",
")",
"if",
"max_retries",
"is",
"None",
"or",
"retries",
">=",
"max_retries",
":",
"return",
"response",
"backoff",
"=",
"min",
"(",
"0.0625",
"*",
"2",
"**",
"retries",
",",
"1.0",
")",
"self",
".",
"logger",
".",
"warning",
"(",
"\"Sleeping for %r before retrying failed request...\"",
",",
"backoff",
")",
"time",
".",
"sleep",
"(",
"backoff",
")",
"retries",
"+=",
"1",
"self",
".",
"logger",
".",
"warning",
"(",
"\"Retrying failed request. Attempt %d/%d.\"",
",",
"retries",
",",
"max_retries",
")",
"return",
"self",
".",
"request",
"(",
"retries",
"=",
"retries",
",",
"*",
"*",
"kwargs",
")"
] | r"""Provides a way for each connection wrapper to handle error
responses.
Parameters:
response(Response): An instance of :class:`.requests.Response`.
retries(int): The number of times :meth:`.request` has been
called so far.
\**kwargs: The parameters with which :meth:`.request` was
called. The `retries` parameter is excluded from `kwargs`
intentionally.
Returns:
requests.Response | [
"r",
"Provides",
"a",
"way",
"for",
"each",
"connection",
"wrapper",
"to",
"handle",
"error",
"responses",
"."
] | 8933363c4e9fa1e5ec0e90d683fca8ef8a949752 | https://github.com/LeadPages/gcloud_requests/blob/8933363c4e9fa1e5ec0e90d683fca8ef8a949752/gcloud_requests/proxy.py#L132-L162 | train |
LeadPages/gcloud_requests | gcloud_requests/proxy.py | RequestsProxy._convert_response_to_error | def _convert_response_to_error(self, response):
"""Subclasses may override this method in order to influence
how errors are parsed from the response.
Parameters:
response(Response): The response object.
Returns:
object or None: Any object for which a max retry count can
be retrieved or None if the error cannot be handled.
"""
content_type = response.headers.get("content-type", "")
if "application/x-protobuf" in content_type:
self.logger.debug("Decoding protobuf response.")
data = status_pb2.Status.FromString(response.content)
status = self._PB_ERROR_CODES.get(data.code)
error = {"status": status}
return error
elif "application/json" in content_type:
self.logger.debug("Decoding json response.")
data = response.json()
error = data.get("error")
if not error or not isinstance(error, dict):
self.logger.warning("Unexpected error response: %r", data)
return None
return error
self.logger.warning("Unexpected response: %r", response.text)
return None | python | def _convert_response_to_error(self, response):
"""Subclasses may override this method in order to influence
how errors are parsed from the response.
Parameters:
response(Response): The response object.
Returns:
object or None: Any object for which a max retry count can
be retrieved or None if the error cannot be handled.
"""
content_type = response.headers.get("content-type", "")
if "application/x-protobuf" in content_type:
self.logger.debug("Decoding protobuf response.")
data = status_pb2.Status.FromString(response.content)
status = self._PB_ERROR_CODES.get(data.code)
error = {"status": status}
return error
elif "application/json" in content_type:
self.logger.debug("Decoding json response.")
data = response.json()
error = data.get("error")
if not error or not isinstance(error, dict):
self.logger.warning("Unexpected error response: %r", data)
return None
return error
self.logger.warning("Unexpected response: %r", response.text)
return None | [
"def",
"_convert_response_to_error",
"(",
"self",
",",
"response",
")",
":",
"content_type",
"=",
"response",
".",
"headers",
".",
"get",
"(",
"\"content-type\"",
",",
"\"\"",
")",
"if",
"\"application/x-protobuf\"",
"in",
"content_type",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Decoding protobuf response.\"",
")",
"data",
"=",
"status_pb2",
".",
"Status",
".",
"FromString",
"(",
"response",
".",
"content",
")",
"status",
"=",
"self",
".",
"_PB_ERROR_CODES",
".",
"get",
"(",
"data",
".",
"code",
")",
"error",
"=",
"{",
"\"status\"",
":",
"status",
"}",
"return",
"error",
"elif",
"\"application/json\"",
"in",
"content_type",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Decoding json response.\"",
")",
"data",
"=",
"response",
".",
"json",
"(",
")",
"error",
"=",
"data",
".",
"get",
"(",
"\"error\"",
")",
"if",
"not",
"error",
"or",
"not",
"isinstance",
"(",
"error",
",",
"dict",
")",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"\"Unexpected error response: %r\"",
",",
"data",
")",
"return",
"None",
"return",
"error",
"self",
".",
"logger",
".",
"warning",
"(",
"\"Unexpected response: %r\"",
",",
"response",
".",
"text",
")",
"return",
"None"
] | Subclasses may override this method in order to influence
how errors are parsed from the response.
Parameters:
response(Response): The response object.
Returns:
object or None: Any object for which a max retry count can
be retrieved or None if the error cannot be handled. | [
"Subclasses",
"may",
"override",
"this",
"method",
"in",
"order",
"to",
"influence",
"how",
"errors",
"are",
"parsed",
"from",
"the",
"response",
"."
] | 8933363c4e9fa1e5ec0e90d683fca8ef8a949752 | https://github.com/LeadPages/gcloud_requests/blob/8933363c4e9fa1e5ec0e90d683fca8ef8a949752/gcloud_requests/proxy.py#L164-L193 | train |
NaPs/Kolekto | kolekto/pattern.py | parse_pattern | def parse_pattern(format_string, env, wrapper=lambda x, y: y):
""" Parse the format_string and return prepared data according to the env.
Pick each field found in the format_string from the env(ironment), apply
the wrapper on each data and return a mapping between field-to-replace and
values for each.
"""
formatter = Formatter()
fields = [x[1] for x in formatter.parse(format_string) if x[1] is not None]
prepared_env = {}
# Create a prepared environment with only used fields, all as list:
for field in fields:
# Search for a movie attribute for each alternative field separated
# by a pipe sign:
for field_alt in (x.strip() for x in field.split('|')):
# Handle default values (enclosed by quotes):
if field_alt[0] in '\'"' and field_alt[-1] in '\'"':
field_values = field_alt[1:-1]
else:
field_values = env.get(field_alt)
if field_values is not None:
break
else:
field_values = []
if not isinstance(field_values, list):
field_values = [field_values]
prepared_env[field] = wrapper(field_alt, field_values)
return prepared_env | python | def parse_pattern(format_string, env, wrapper=lambda x, y: y):
""" Parse the format_string and return prepared data according to the env.
Pick each field found in the format_string from the env(ironment), apply
the wrapper on each data and return a mapping between field-to-replace and
values for each.
"""
formatter = Formatter()
fields = [x[1] for x in formatter.parse(format_string) if x[1] is not None]
prepared_env = {}
# Create a prepared environment with only used fields, all as list:
for field in fields:
# Search for a movie attribute for each alternative field separated
# by a pipe sign:
for field_alt in (x.strip() for x in field.split('|')):
# Handle default values (enclosed by quotes):
if field_alt[0] in '\'"' and field_alt[-1] in '\'"':
field_values = field_alt[1:-1]
else:
field_values = env.get(field_alt)
if field_values is not None:
break
else:
field_values = []
if not isinstance(field_values, list):
field_values = [field_values]
prepared_env[field] = wrapper(field_alt, field_values)
return prepared_env | [
"def",
"parse_pattern",
"(",
"format_string",
",",
"env",
",",
"wrapper",
"=",
"lambda",
"x",
",",
"y",
":",
"y",
")",
":",
"formatter",
"=",
"Formatter",
"(",
")",
"fields",
"=",
"[",
"x",
"[",
"1",
"]",
"for",
"x",
"in",
"formatter",
".",
"parse",
"(",
"format_string",
")",
"if",
"x",
"[",
"1",
"]",
"is",
"not",
"None",
"]",
"prepared_env",
"=",
"{",
"}",
"# Create a prepared environment with only used fields, all as list:",
"for",
"field",
"in",
"fields",
":",
"# Search for a movie attribute for each alternative field separated",
"# by a pipe sign:",
"for",
"field_alt",
"in",
"(",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"field",
".",
"split",
"(",
"'|'",
")",
")",
":",
"# Handle default values (enclosed by quotes):",
"if",
"field_alt",
"[",
"0",
"]",
"in",
"'\\'\"'",
"and",
"field_alt",
"[",
"-",
"1",
"]",
"in",
"'\\'\"'",
":",
"field_values",
"=",
"field_alt",
"[",
"1",
":",
"-",
"1",
"]",
"else",
":",
"field_values",
"=",
"env",
".",
"get",
"(",
"field_alt",
")",
"if",
"field_values",
"is",
"not",
"None",
":",
"break",
"else",
":",
"field_values",
"=",
"[",
"]",
"if",
"not",
"isinstance",
"(",
"field_values",
",",
"list",
")",
":",
"field_values",
"=",
"[",
"field_values",
"]",
"prepared_env",
"[",
"field",
"]",
"=",
"wrapper",
"(",
"field_alt",
",",
"field_values",
")",
"return",
"prepared_env"
] | Parse the format_string and return prepared data according to the env.
Pick each field found in the format_string from the env(ironment), apply
the wrapper on each data and return a mapping between field-to-replace and
values for each. | [
"Parse",
"the",
"format_string",
"and",
"return",
"prepared",
"data",
"according",
"to",
"the",
"env",
"."
] | 29c5469da8782780a06bf9a76c59414bb6fd8fe3 | https://github.com/NaPs/Kolekto/blob/29c5469da8782780a06bf9a76c59414bb6fd8fe3/kolekto/pattern.py#L7-L38 | train |
tamasgal/km3pipe | km3pipe/stats.py | perc | def perc(arr, p=95, **kwargs):
"""Create symmetric percentiles, with ``p`` coverage."""
offset = (100 - p) / 2
return np.percentile(arr, (offset, 100 - offset), **kwargs) | python | def perc(arr, p=95, **kwargs):
"""Create symmetric percentiles, with ``p`` coverage."""
offset = (100 - p) / 2
return np.percentile(arr, (offset, 100 - offset), **kwargs) | [
"def",
"perc",
"(",
"arr",
",",
"p",
"=",
"95",
",",
"*",
"*",
"kwargs",
")",
":",
"offset",
"=",
"(",
"100",
"-",
"p",
")",
"/",
"2",
"return",
"np",
".",
"percentile",
"(",
"arr",
",",
"(",
"offset",
",",
"100",
"-",
"offset",
")",
",",
"*",
"*",
"kwargs",
")"
] | Create symmetric percentiles, with ``p`` coverage. | [
"Create",
"symmetric",
"percentiles",
"with",
"p",
"coverage",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/stats.py#L143-L146 | train |
tamasgal/km3pipe | km3pipe/stats.py | resample_1d | def resample_1d(arr, n_out=None, random_state=None):
"""Resample an array, with replacement.
Parameters
==========
arr: np.ndarray
The array is resampled along the first axis.
n_out: int, optional
Number of samples to return. If not specified,
return ``len(arr)`` samples.
"""
if random_state is None:
random_state = np.random.RandomState()
arr = np.atleast_1d(arr)
n = len(arr)
if n_out is None:
n_out = n
idx = random_state.randint(0, n, size=n)
return arr[idx] | python | def resample_1d(arr, n_out=None, random_state=None):
"""Resample an array, with replacement.
Parameters
==========
arr: np.ndarray
The array is resampled along the first axis.
n_out: int, optional
Number of samples to return. If not specified,
return ``len(arr)`` samples.
"""
if random_state is None:
random_state = np.random.RandomState()
arr = np.atleast_1d(arr)
n = len(arr)
if n_out is None:
n_out = n
idx = random_state.randint(0, n, size=n)
return arr[idx] | [
"def",
"resample_1d",
"(",
"arr",
",",
"n_out",
"=",
"None",
",",
"random_state",
"=",
"None",
")",
":",
"if",
"random_state",
"is",
"None",
":",
"random_state",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
")",
"arr",
"=",
"np",
".",
"atleast_1d",
"(",
"arr",
")",
"n",
"=",
"len",
"(",
"arr",
")",
"if",
"n_out",
"is",
"None",
":",
"n_out",
"=",
"n",
"idx",
"=",
"random_state",
".",
"randint",
"(",
"0",
",",
"n",
",",
"size",
"=",
"n",
")",
"return",
"arr",
"[",
"idx",
"]"
] | Resample an array, with replacement.
Parameters
==========
arr: np.ndarray
The array is resampled along the first axis.
n_out: int, optional
Number of samples to return. If not specified,
return ``len(arr)`` samples. | [
"Resample",
"an",
"array",
"with",
"replacement",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/stats.py#L149-L167 | train |
tamasgal/km3pipe | km3pipe/stats.py | bootstrap_params | def bootstrap_params(rv_cont, data, n_iter=5, **kwargs):
"""Bootstrap the fit params of a distribution.
Parameters
==========
rv_cont: scipy.stats.rv_continuous instance
The distribution which to fit.
data: array-like, 1d
The data on which to fit.
n_iter: int [default=10]
Number of bootstrap iterations.
"""
fit_res = []
for _ in range(n_iter):
params = rv_cont.fit(resample_1d(data, **kwargs))
fit_res.append(params)
fit_res = np.array(fit_res)
return fit_res | python | def bootstrap_params(rv_cont, data, n_iter=5, **kwargs):
"""Bootstrap the fit params of a distribution.
Parameters
==========
rv_cont: scipy.stats.rv_continuous instance
The distribution which to fit.
data: array-like, 1d
The data on which to fit.
n_iter: int [default=10]
Number of bootstrap iterations.
"""
fit_res = []
for _ in range(n_iter):
params = rv_cont.fit(resample_1d(data, **kwargs))
fit_res.append(params)
fit_res = np.array(fit_res)
return fit_res | [
"def",
"bootstrap_params",
"(",
"rv_cont",
",",
"data",
",",
"n_iter",
"=",
"5",
",",
"*",
"*",
"kwargs",
")",
":",
"fit_res",
"=",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"n_iter",
")",
":",
"params",
"=",
"rv_cont",
".",
"fit",
"(",
"resample_1d",
"(",
"data",
",",
"*",
"*",
"kwargs",
")",
")",
"fit_res",
".",
"append",
"(",
"params",
")",
"fit_res",
"=",
"np",
".",
"array",
"(",
"fit_res",
")",
"return",
"fit_res"
] | Bootstrap the fit params of a distribution.
Parameters
==========
rv_cont: scipy.stats.rv_continuous instance
The distribution which to fit.
data: array-like, 1d
The data on which to fit.
n_iter: int [default=10]
Number of bootstrap iterations. | [
"Bootstrap",
"the",
"fit",
"params",
"of",
"a",
"distribution",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/stats.py#L170-L187 | train |
tamasgal/km3pipe | km3pipe/stats.py | param_describe | def param_describe(params, quant=95, axis=0):
"""Get mean + quantile range from bootstrapped params."""
par = np.mean(params, axis=axis)
lo, up = perc(quant)
p_up = np.percentile(params, up, axis=axis)
p_lo = np.percentile(params, lo, axis=axis)
return par, p_lo, p_up | python | def param_describe(params, quant=95, axis=0):
"""Get mean + quantile range from bootstrapped params."""
par = np.mean(params, axis=axis)
lo, up = perc(quant)
p_up = np.percentile(params, up, axis=axis)
p_lo = np.percentile(params, lo, axis=axis)
return par, p_lo, p_up | [
"def",
"param_describe",
"(",
"params",
",",
"quant",
"=",
"95",
",",
"axis",
"=",
"0",
")",
":",
"par",
"=",
"np",
".",
"mean",
"(",
"params",
",",
"axis",
"=",
"axis",
")",
"lo",
",",
"up",
"=",
"perc",
"(",
"quant",
")",
"p_up",
"=",
"np",
".",
"percentile",
"(",
"params",
",",
"up",
",",
"axis",
"=",
"axis",
")",
"p_lo",
"=",
"np",
".",
"percentile",
"(",
"params",
",",
"lo",
",",
"axis",
"=",
"axis",
")",
"return",
"par",
",",
"p_lo",
",",
"p_up"
] | Get mean + quantile range from bootstrapped params. | [
"Get",
"mean",
"+",
"quantile",
"range",
"from",
"bootstrapped",
"params",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/stats.py#L190-L196 | train |
tamasgal/km3pipe | km3pipe/stats.py | bootstrap_fit | def bootstrap_fit(
rv_cont, data, n_iter=10, quant=95, print_params=True, **kwargs
):
"""Bootstrap a distribution fit + get confidence intervals for the params.
Parameters
==========
rv_cont: scipy.stats.rv_continuous instance
The distribution which to fit.
data: array-like, 1d
The data on which to fit.
n_iter: int [default=10]
Number of bootstrap iterations.
quant: int [default=95]
percentile of the confidence limits (default is 95, i.e. 2.5%-97.5%)
print_params: bool [default=True]
Print a fit summary.
"""
fit_params = bootstrap_params(rv_cont, data, n_iter)
par, lo, up = param_describe(fit_params, quant=quant)
names = param_names(rv_cont)
maxlen = max([len(s) for s in names])
print("--------------")
print(rv_cont.name)
print("--------------")
for i, name in enumerate(names):
print(
"{nam:>{fill}}: {mean:+.3f} ∈ "
"[{lo:+.3f}, {up:+.3f}] ({q}%)".format(
nam=name,
fill=maxlen,
mean=par[i],
lo=lo[i],
up=up[i],
q=quant
)
)
out = {
'mean': par,
'lower limit': lo,
'upper limit': up,
}
return out | python | def bootstrap_fit(
rv_cont, data, n_iter=10, quant=95, print_params=True, **kwargs
):
"""Bootstrap a distribution fit + get confidence intervals for the params.
Parameters
==========
rv_cont: scipy.stats.rv_continuous instance
The distribution which to fit.
data: array-like, 1d
The data on which to fit.
n_iter: int [default=10]
Number of bootstrap iterations.
quant: int [default=95]
percentile of the confidence limits (default is 95, i.e. 2.5%-97.5%)
print_params: bool [default=True]
Print a fit summary.
"""
fit_params = bootstrap_params(rv_cont, data, n_iter)
par, lo, up = param_describe(fit_params, quant=quant)
names = param_names(rv_cont)
maxlen = max([len(s) for s in names])
print("--------------")
print(rv_cont.name)
print("--------------")
for i, name in enumerate(names):
print(
"{nam:>{fill}}: {mean:+.3f} ∈ "
"[{lo:+.3f}, {up:+.3f}] ({q}%)".format(
nam=name,
fill=maxlen,
mean=par[i],
lo=lo[i],
up=up[i],
q=quant
)
)
out = {
'mean': par,
'lower limit': lo,
'upper limit': up,
}
return out | [
"def",
"bootstrap_fit",
"(",
"rv_cont",
",",
"data",
",",
"n_iter",
"=",
"10",
",",
"quant",
"=",
"95",
",",
"print_params",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"fit_params",
"=",
"bootstrap_params",
"(",
"rv_cont",
",",
"data",
",",
"n_iter",
")",
"par",
",",
"lo",
",",
"up",
"=",
"param_describe",
"(",
"fit_params",
",",
"quant",
"=",
"quant",
")",
"names",
"=",
"param_names",
"(",
"rv_cont",
")",
"maxlen",
"=",
"max",
"(",
"[",
"len",
"(",
"s",
")",
"for",
"s",
"in",
"names",
"]",
")",
"print",
"(",
"\"--------------\"",
")",
"print",
"(",
"rv_cont",
".",
"name",
")",
"print",
"(",
"\"--------------\"",
")",
"for",
"i",
",",
"name",
"in",
"enumerate",
"(",
"names",
")",
":",
"print",
"(",
"\"{nam:>{fill}}: {mean:+.3f} ∈ \"",
"\"[{lo:+.3f}, {up:+.3f}] ({q}%)\"",
".",
"format",
"(",
"nam",
"=",
"name",
",",
"fill",
"=",
"maxlen",
",",
"mean",
"=",
"par",
"[",
"i",
"]",
",",
"lo",
"=",
"lo",
"[",
"i",
"]",
",",
"up",
"=",
"up",
"[",
"i",
"]",
",",
"q",
"=",
"quant",
")",
")",
"out",
"=",
"{",
"'mean'",
":",
"par",
",",
"'lower limit'",
":",
"lo",
",",
"'upper limit'",
":",
"up",
",",
"}",
"return",
"out"
] | Bootstrap a distribution fit + get confidence intervals for the params.
Parameters
==========
rv_cont: scipy.stats.rv_continuous instance
The distribution which to fit.
data: array-like, 1d
The data on which to fit.
n_iter: int [default=10]
Number of bootstrap iterations.
quant: int [default=95]
percentile of the confidence limits (default is 95, i.e. 2.5%-97.5%)
print_params: bool [default=True]
Print a fit summary. | [
"Bootstrap",
"a",
"distribution",
"fit",
"+",
"get",
"confidence",
"intervals",
"for",
"the",
"params",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/stats.py#L199-L241 | train |
tamasgal/km3pipe | km3pipe/stats.py | rv_kde.rvs | def rvs(self, *args, **kwargs):
"""Draw Random Variates.
Parameters
----------
size: int, optional (default=1)
random_state_: optional (default=None)
"""
# TODO REVERSE THIS FUCK PYTHON2
size = kwargs.pop('size', 1)
random_state = kwargs.pop('size', None)
# don't ask me why it uses `self._size`
return self._kde.sample(n_samples=size, random_state=random_state) | python | def rvs(self, *args, **kwargs):
"""Draw Random Variates.
Parameters
----------
size: int, optional (default=1)
random_state_: optional (default=None)
"""
# TODO REVERSE THIS FUCK PYTHON2
size = kwargs.pop('size', 1)
random_state = kwargs.pop('size', None)
# don't ask me why it uses `self._size`
return self._kde.sample(n_samples=size, random_state=random_state) | [
"def",
"rvs",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# TODO REVERSE THIS FUCK PYTHON2",
"size",
"=",
"kwargs",
".",
"pop",
"(",
"'size'",
",",
"1",
")",
"random_state",
"=",
"kwargs",
".",
"pop",
"(",
"'size'",
",",
"None",
")",
"# don't ask me why it uses `self._size`",
"return",
"self",
".",
"_kde",
".",
"sample",
"(",
"n_samples",
"=",
"size",
",",
"random_state",
"=",
"random_state",
")"
] | Draw Random Variates.
Parameters
----------
size: int, optional (default=1)
random_state_: optional (default=None) | [
"Draw",
"Random",
"Variates",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/stats.py#L99-L111 | train |
tamasgal/km3pipe | km3pipe/utils/i3shower2hdf5.py | main | def main():
"""Entry point when running as script from commandline."""
from docopt import docopt
args = docopt(__doc__)
infile = args['INFILE']
outfile = args['OUTFILE']
i3extract(infile, outfile) | python | def main():
"""Entry point when running as script from commandline."""
from docopt import docopt
args = docopt(__doc__)
infile = args['INFILE']
outfile = args['OUTFILE']
i3extract(infile, outfile) | [
"def",
"main",
"(",
")",
":",
"from",
"docopt",
"import",
"docopt",
"args",
"=",
"docopt",
"(",
"__doc__",
")",
"infile",
"=",
"args",
"[",
"'INFILE'",
"]",
"outfile",
"=",
"args",
"[",
"'OUTFILE'",
"]",
"i3extract",
"(",
"infile",
",",
"outfile",
")"
] | Entry point when running as script from commandline. | [
"Entry",
"point",
"when",
"running",
"as",
"script",
"from",
"commandline",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/utils/i3shower2hdf5.py#L348-L354 | train |
IRC-SPHERE/HyperStream | hyperstream/client.py | Client.connect | def connect(self, server_config):
"""Connect using the configuration given
:param server_config: The server configuration
"""
if 'connection_string' in server_config:
self.client = pymongo.MongoClient(
server_config['connection_string'])
self.db = self.client[server_config['db']]
else:
self.client = pymongo.MongoClient(
server_config['host'],
server_config['port'],
tz_aware=self.get_config_value('tz_aware', True))
self.db = self.client[server_config['db']]
if ('authentication_database' in server_config and
server_config['authentication_database']):
self.db.authenticate(
server_config['username'], server_config['password'],
source=server_config['authentication_database'])
else:
if 'username' in server_config:
if 'password' in server_config:
self.db.authenticate(server_config['username'],
server_config['password'])
else:
self.db.authenticate(server_config['username'])
# Mongo Engine connection
d = dict((k, v) for k, v in server_config.items()
if k not in ['modalities', 'summaries'])
if 'authentication_database' in d:
d['authentication_source'] = d['authentication_database']
del d['authentication_database']
self.session = connect(alias="hyperstream", **d)
# TODO: This sets the default connection of mongoengine, but seems to be a bit of a hack
if "default" not in connection._connections:
connection._connections["default"] = connection._connections["hyperstream"]
connection._connection_settings["default"] = connection._connection_settings["hyperstream"] | python | def connect(self, server_config):
"""Connect using the configuration given
:param server_config: The server configuration
"""
if 'connection_string' in server_config:
self.client = pymongo.MongoClient(
server_config['connection_string'])
self.db = self.client[server_config['db']]
else:
self.client = pymongo.MongoClient(
server_config['host'],
server_config['port'],
tz_aware=self.get_config_value('tz_aware', True))
self.db = self.client[server_config['db']]
if ('authentication_database' in server_config and
server_config['authentication_database']):
self.db.authenticate(
server_config['username'], server_config['password'],
source=server_config['authentication_database'])
else:
if 'username' in server_config:
if 'password' in server_config:
self.db.authenticate(server_config['username'],
server_config['password'])
else:
self.db.authenticate(server_config['username'])
# Mongo Engine connection
d = dict((k, v) for k, v in server_config.items()
if k not in ['modalities', 'summaries'])
if 'authentication_database' in d:
d['authentication_source'] = d['authentication_database']
del d['authentication_database']
self.session = connect(alias="hyperstream", **d)
# TODO: This sets the default connection of mongoengine, but seems to be a bit of a hack
if "default" not in connection._connections:
connection._connections["default"] = connection._connections["hyperstream"]
connection._connection_settings["default"] = connection._connection_settings["hyperstream"] | [
"def",
"connect",
"(",
"self",
",",
"server_config",
")",
":",
"if",
"'connection_string'",
"in",
"server_config",
":",
"self",
".",
"client",
"=",
"pymongo",
".",
"MongoClient",
"(",
"server_config",
"[",
"'connection_string'",
"]",
")",
"self",
".",
"db",
"=",
"self",
".",
"client",
"[",
"server_config",
"[",
"'db'",
"]",
"]",
"else",
":",
"self",
".",
"client",
"=",
"pymongo",
".",
"MongoClient",
"(",
"server_config",
"[",
"'host'",
"]",
",",
"server_config",
"[",
"'port'",
"]",
",",
"tz_aware",
"=",
"self",
".",
"get_config_value",
"(",
"'tz_aware'",
",",
"True",
")",
")",
"self",
".",
"db",
"=",
"self",
".",
"client",
"[",
"server_config",
"[",
"'db'",
"]",
"]",
"if",
"(",
"'authentication_database'",
"in",
"server_config",
"and",
"server_config",
"[",
"'authentication_database'",
"]",
")",
":",
"self",
".",
"db",
".",
"authenticate",
"(",
"server_config",
"[",
"'username'",
"]",
",",
"server_config",
"[",
"'password'",
"]",
",",
"source",
"=",
"server_config",
"[",
"'authentication_database'",
"]",
")",
"else",
":",
"if",
"'username'",
"in",
"server_config",
":",
"if",
"'password'",
"in",
"server_config",
":",
"self",
".",
"db",
".",
"authenticate",
"(",
"server_config",
"[",
"'username'",
"]",
",",
"server_config",
"[",
"'password'",
"]",
")",
"else",
":",
"self",
".",
"db",
".",
"authenticate",
"(",
"server_config",
"[",
"'username'",
"]",
")",
"# Mongo Engine connection",
"d",
"=",
"dict",
"(",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"server_config",
".",
"items",
"(",
")",
"if",
"k",
"not",
"in",
"[",
"'modalities'",
",",
"'summaries'",
"]",
")",
"if",
"'authentication_database'",
"in",
"d",
":",
"d",
"[",
"'authentication_source'",
"]",
"=",
"d",
"[",
"'authentication_database'",
"]",
"del",
"d",
"[",
"'authentication_database'",
"]",
"self",
".",
"session",
"=",
"connect",
"(",
"alias",
"=",
"\"hyperstream\"",
",",
"*",
"*",
"d",
")",
"# TODO: This sets the default connection of mongoengine, but seems to be a bit of a hack",
"if",
"\"default\"",
"not",
"in",
"connection",
".",
"_connections",
":",
"connection",
".",
"_connections",
"[",
"\"default\"",
"]",
"=",
"connection",
".",
"_connections",
"[",
"\"hyperstream\"",
"]",
"connection",
".",
"_connection_settings",
"[",
"\"default\"",
"]",
"=",
"connection",
".",
"_connection_settings",
"[",
"\"hyperstream\"",
"]"
] | Connect using the configuration given
:param server_config: The server configuration | [
"Connect",
"using",
"the",
"configuration",
"given"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/client.py#L65-L107 | train |
tamasgal/km3pipe | km3pipe/utils/ptconcat.py | ptconcat | def ptconcat(output_file, input_files, overwrite=False):
"""Concatenate HDF5 Files"""
filt = tb.Filters(
complevel=5, shuffle=True, fletcher32=True, complib='zlib'
)
out_tabs = {}
dt_file = input_files[0]
log.info("Reading data struct '%s'..." % dt_file)
h5struc = tb.open_file(dt_file, 'r')
log.info("Opening output file '%s'..." % output_file)
if overwrite:
outmode = 'w'
else:
outmode = 'a'
h5out = tb.open_file(output_file, outmode)
for node in h5struc.walk_nodes('/', classname='Table'):
path = node._v_pathname
log.debug(path)
dtype = node.dtype
p, n = os.path.split(path)
out_tabs[path] = h5out.create_table(
p, n, description=dtype, filters=filt, createparents=True
)
h5struc.close()
for fname in input_files:
log.info('Reading %s...' % fname)
h5 = tb.open_file(fname)
for path, out in out_tabs.items():
tab = h5.get_node(path)
out.append(tab[:])
h5.close()
h5out.close() | python | def ptconcat(output_file, input_files, overwrite=False):
"""Concatenate HDF5 Files"""
filt = tb.Filters(
complevel=5, shuffle=True, fletcher32=True, complib='zlib'
)
out_tabs = {}
dt_file = input_files[0]
log.info("Reading data struct '%s'..." % dt_file)
h5struc = tb.open_file(dt_file, 'r')
log.info("Opening output file '%s'..." % output_file)
if overwrite:
outmode = 'w'
else:
outmode = 'a'
h5out = tb.open_file(output_file, outmode)
for node in h5struc.walk_nodes('/', classname='Table'):
path = node._v_pathname
log.debug(path)
dtype = node.dtype
p, n = os.path.split(path)
out_tabs[path] = h5out.create_table(
p, n, description=dtype, filters=filt, createparents=True
)
h5struc.close()
for fname in input_files:
log.info('Reading %s...' % fname)
h5 = tb.open_file(fname)
for path, out in out_tabs.items():
tab = h5.get_node(path)
out.append(tab[:])
h5.close()
h5out.close() | [
"def",
"ptconcat",
"(",
"output_file",
",",
"input_files",
",",
"overwrite",
"=",
"False",
")",
":",
"filt",
"=",
"tb",
".",
"Filters",
"(",
"complevel",
"=",
"5",
",",
"shuffle",
"=",
"True",
",",
"fletcher32",
"=",
"True",
",",
"complib",
"=",
"'zlib'",
")",
"out_tabs",
"=",
"{",
"}",
"dt_file",
"=",
"input_files",
"[",
"0",
"]",
"log",
".",
"info",
"(",
"\"Reading data struct '%s'...\"",
"%",
"dt_file",
")",
"h5struc",
"=",
"tb",
".",
"open_file",
"(",
"dt_file",
",",
"'r'",
")",
"log",
".",
"info",
"(",
"\"Opening output file '%s'...\"",
"%",
"output_file",
")",
"if",
"overwrite",
":",
"outmode",
"=",
"'w'",
"else",
":",
"outmode",
"=",
"'a'",
"h5out",
"=",
"tb",
".",
"open_file",
"(",
"output_file",
",",
"outmode",
")",
"for",
"node",
"in",
"h5struc",
".",
"walk_nodes",
"(",
"'/'",
",",
"classname",
"=",
"'Table'",
")",
":",
"path",
"=",
"node",
".",
"_v_pathname",
"log",
".",
"debug",
"(",
"path",
")",
"dtype",
"=",
"node",
".",
"dtype",
"p",
",",
"n",
"=",
"os",
".",
"path",
".",
"split",
"(",
"path",
")",
"out_tabs",
"[",
"path",
"]",
"=",
"h5out",
".",
"create_table",
"(",
"p",
",",
"n",
",",
"description",
"=",
"dtype",
",",
"filters",
"=",
"filt",
",",
"createparents",
"=",
"True",
")",
"h5struc",
".",
"close",
"(",
")",
"for",
"fname",
"in",
"input_files",
":",
"log",
".",
"info",
"(",
"'Reading %s...'",
"%",
"fname",
")",
"h5",
"=",
"tb",
".",
"open_file",
"(",
"fname",
")",
"for",
"path",
",",
"out",
"in",
"out_tabs",
".",
"items",
"(",
")",
":",
"tab",
"=",
"h5",
".",
"get_node",
"(",
"path",
")",
"out",
".",
"append",
"(",
"tab",
"[",
":",
"]",
")",
"h5",
".",
"close",
"(",
")",
"h5out",
".",
"close",
"(",
")"
] | Concatenate HDF5 Files | [
"Concatenate",
"HDF5",
"Files"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/utils/ptconcat.py#L36-L68 | train |
tamasgal/km3pipe | km3modules/k40.py | calibrate_dom | def calibrate_dom(
dom_id,
data,
detector,
livetime=None,
fit_ang_dist=False,
scale_mc_to_data=True,
ad_fit_shape='pexp',
fit_background=True,
ctmin=-1.
):
"""Calibrate intra DOM PMT time offsets, efficiencies and sigmas
Parameters
----------
dom_id: DOM ID
data: dict of coincidences or root or hdf5 file
detector: instance of detector class
livetime: data-taking duration [s]
fixed_ang_dist: fixing angular distribution e.g. for data mc comparison
auto_scale: auto scales the fixed angular distribution to the data
Returns
-------
return_data: dictionary with fit results
"""
if isinstance(data, str):
filename = data
loaders = {
'.h5': load_k40_coincidences_from_hdf5,
'.root': load_k40_coincidences_from_rootfile
}
try:
loader = loaders[os.path.splitext(filename)[1]]
except KeyError:
log.critical('File format not supported.')
raise IOError
else:
data, livetime = loader(filename, dom_id)
combs = np.array(list(combinations(range(31), 2)))
angles = calculate_angles(detector, combs)
cos_angles = np.cos(angles)
angles = angles[cos_angles >= ctmin]
data = data[cos_angles >= ctmin]
combs = combs[cos_angles >= ctmin]
try:
fit_res = fit_delta_ts(data, livetime, fit_background=fit_background)
rates, means, sigmas, popts, pcovs = fit_res
except:
return 0
rate_errors = np.array([np.diag(pc)[2] for pc in pcovs])
# mean_errors = np.array([np.diag(pc)[0] for pc in pcovs])
scale_factor = None
if fit_ang_dist:
fit_res = fit_angular_distribution(
angles, rates, rate_errors, shape=ad_fit_shape
)
fitted_rates, exp_popts, exp_pcov = fit_res
else:
mc_fitted_rates = exponential_polinomial(np.cos(angles), *MC_ANG_DIST)
if scale_mc_to_data:
scale_factor = np.mean(rates[angles < 1.5]) / \
np.mean(mc_fitted_rates[angles < 1.5])
else:
scale_factor = 1.
fitted_rates = mc_fitted_rates * scale_factor
exp_popts = []
exp_pcov = []
print('Using angular distribution from Monte Carlo')
# t0_weights = np.array([0. if a>1. else 1. for a in angles])
if not fit_background:
minimize_weights = calculate_weights(fitted_rates, data)
else:
minimize_weights = fitted_rates
opt_t0s = minimize_t0s(means, minimize_weights, combs)
opt_sigmas = minimize_sigmas(sigmas, minimize_weights, combs)
opt_qes = minimize_qes(fitted_rates, rates, minimize_weights, combs)
corrected_means = correct_means(means, opt_t0s.x, combs)
corrected_rates = correct_rates(rates, opt_qes.x, combs)
rms_means, rms_corrected_means = calculate_rms_means(
means, corrected_means
)
rms_rates, rms_corrected_rates = calculate_rms_rates(
rates, fitted_rates, corrected_rates
)
cos_angles = np.cos(angles)
return_data = {
'opt_t0s': opt_t0s,
'opt_qes': opt_qes,
'data': data,
'means': means,
'rates': rates,
'fitted_rates': fitted_rates,
'angles': angles,
'corrected_means': corrected_means,
'corrected_rates': corrected_rates,
'rms_means': rms_means,
'rms_corrected_means': rms_corrected_means,
'rms_rates': rms_rates,
'rms_corrected_rates': rms_corrected_rates,
'gaussian_popts': popts,
'livetime': livetime,
'exp_popts': exp_popts,
'exp_pcov': exp_pcov,
'scale_factor': scale_factor,
'opt_sigmas': opt_sigmas,
'sigmas': sigmas,
'combs': combs
}
return return_data | python | def calibrate_dom(
dom_id,
data,
detector,
livetime=None,
fit_ang_dist=False,
scale_mc_to_data=True,
ad_fit_shape='pexp',
fit_background=True,
ctmin=-1.
):
"""Calibrate intra DOM PMT time offsets, efficiencies and sigmas
Parameters
----------
dom_id: DOM ID
data: dict of coincidences or root or hdf5 file
detector: instance of detector class
livetime: data-taking duration [s]
fixed_ang_dist: fixing angular distribution e.g. for data mc comparison
auto_scale: auto scales the fixed angular distribution to the data
Returns
-------
return_data: dictionary with fit results
"""
if isinstance(data, str):
filename = data
loaders = {
'.h5': load_k40_coincidences_from_hdf5,
'.root': load_k40_coincidences_from_rootfile
}
try:
loader = loaders[os.path.splitext(filename)[1]]
except KeyError:
log.critical('File format not supported.')
raise IOError
else:
data, livetime = loader(filename, dom_id)
combs = np.array(list(combinations(range(31), 2)))
angles = calculate_angles(detector, combs)
cos_angles = np.cos(angles)
angles = angles[cos_angles >= ctmin]
data = data[cos_angles >= ctmin]
combs = combs[cos_angles >= ctmin]
try:
fit_res = fit_delta_ts(data, livetime, fit_background=fit_background)
rates, means, sigmas, popts, pcovs = fit_res
except:
return 0
rate_errors = np.array([np.diag(pc)[2] for pc in pcovs])
# mean_errors = np.array([np.diag(pc)[0] for pc in pcovs])
scale_factor = None
if fit_ang_dist:
fit_res = fit_angular_distribution(
angles, rates, rate_errors, shape=ad_fit_shape
)
fitted_rates, exp_popts, exp_pcov = fit_res
else:
mc_fitted_rates = exponential_polinomial(np.cos(angles), *MC_ANG_DIST)
if scale_mc_to_data:
scale_factor = np.mean(rates[angles < 1.5]) / \
np.mean(mc_fitted_rates[angles < 1.5])
else:
scale_factor = 1.
fitted_rates = mc_fitted_rates * scale_factor
exp_popts = []
exp_pcov = []
print('Using angular distribution from Monte Carlo')
# t0_weights = np.array([0. if a>1. else 1. for a in angles])
if not fit_background:
minimize_weights = calculate_weights(fitted_rates, data)
else:
minimize_weights = fitted_rates
opt_t0s = minimize_t0s(means, minimize_weights, combs)
opt_sigmas = minimize_sigmas(sigmas, minimize_weights, combs)
opt_qes = minimize_qes(fitted_rates, rates, minimize_weights, combs)
corrected_means = correct_means(means, opt_t0s.x, combs)
corrected_rates = correct_rates(rates, opt_qes.x, combs)
rms_means, rms_corrected_means = calculate_rms_means(
means, corrected_means
)
rms_rates, rms_corrected_rates = calculate_rms_rates(
rates, fitted_rates, corrected_rates
)
cos_angles = np.cos(angles)
return_data = {
'opt_t0s': opt_t0s,
'opt_qes': opt_qes,
'data': data,
'means': means,
'rates': rates,
'fitted_rates': fitted_rates,
'angles': angles,
'corrected_means': corrected_means,
'corrected_rates': corrected_rates,
'rms_means': rms_means,
'rms_corrected_means': rms_corrected_means,
'rms_rates': rms_rates,
'rms_corrected_rates': rms_corrected_rates,
'gaussian_popts': popts,
'livetime': livetime,
'exp_popts': exp_popts,
'exp_pcov': exp_pcov,
'scale_factor': scale_factor,
'opt_sigmas': opt_sigmas,
'sigmas': sigmas,
'combs': combs
}
return return_data | [
"def",
"calibrate_dom",
"(",
"dom_id",
",",
"data",
",",
"detector",
",",
"livetime",
"=",
"None",
",",
"fit_ang_dist",
"=",
"False",
",",
"scale_mc_to_data",
"=",
"True",
",",
"ad_fit_shape",
"=",
"'pexp'",
",",
"fit_background",
"=",
"True",
",",
"ctmin",
"=",
"-",
"1.",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"str",
")",
":",
"filename",
"=",
"data",
"loaders",
"=",
"{",
"'.h5'",
":",
"load_k40_coincidences_from_hdf5",
",",
"'.root'",
":",
"load_k40_coincidences_from_rootfile",
"}",
"try",
":",
"loader",
"=",
"loaders",
"[",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"[",
"1",
"]",
"]",
"except",
"KeyError",
":",
"log",
".",
"critical",
"(",
"'File format not supported.'",
")",
"raise",
"IOError",
"else",
":",
"data",
",",
"livetime",
"=",
"loader",
"(",
"filename",
",",
"dom_id",
")",
"combs",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"combinations",
"(",
"range",
"(",
"31",
")",
",",
"2",
")",
")",
")",
"angles",
"=",
"calculate_angles",
"(",
"detector",
",",
"combs",
")",
"cos_angles",
"=",
"np",
".",
"cos",
"(",
"angles",
")",
"angles",
"=",
"angles",
"[",
"cos_angles",
">=",
"ctmin",
"]",
"data",
"=",
"data",
"[",
"cos_angles",
">=",
"ctmin",
"]",
"combs",
"=",
"combs",
"[",
"cos_angles",
">=",
"ctmin",
"]",
"try",
":",
"fit_res",
"=",
"fit_delta_ts",
"(",
"data",
",",
"livetime",
",",
"fit_background",
"=",
"fit_background",
")",
"rates",
",",
"means",
",",
"sigmas",
",",
"popts",
",",
"pcovs",
"=",
"fit_res",
"except",
":",
"return",
"0",
"rate_errors",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"diag",
"(",
"pc",
")",
"[",
"2",
"]",
"for",
"pc",
"in",
"pcovs",
"]",
")",
"# mean_errors = np.array([np.diag(pc)[0] for pc in pcovs])",
"scale_factor",
"=",
"None",
"if",
"fit_ang_dist",
":",
"fit_res",
"=",
"fit_angular_distribution",
"(",
"angles",
",",
"rates",
",",
"rate_errors",
",",
"shape",
"=",
"ad_fit_shape",
")",
"fitted_rates",
",",
"exp_popts",
",",
"exp_pcov",
"=",
"fit_res",
"else",
":",
"mc_fitted_rates",
"=",
"exponential_polinomial",
"(",
"np",
".",
"cos",
"(",
"angles",
")",
",",
"*",
"MC_ANG_DIST",
")",
"if",
"scale_mc_to_data",
":",
"scale_factor",
"=",
"np",
".",
"mean",
"(",
"rates",
"[",
"angles",
"<",
"1.5",
"]",
")",
"/",
"np",
".",
"mean",
"(",
"mc_fitted_rates",
"[",
"angles",
"<",
"1.5",
"]",
")",
"else",
":",
"scale_factor",
"=",
"1.",
"fitted_rates",
"=",
"mc_fitted_rates",
"*",
"scale_factor",
"exp_popts",
"=",
"[",
"]",
"exp_pcov",
"=",
"[",
"]",
"print",
"(",
"'Using angular distribution from Monte Carlo'",
")",
"# t0_weights = np.array([0. if a>1. else 1. for a in angles])",
"if",
"not",
"fit_background",
":",
"minimize_weights",
"=",
"calculate_weights",
"(",
"fitted_rates",
",",
"data",
")",
"else",
":",
"minimize_weights",
"=",
"fitted_rates",
"opt_t0s",
"=",
"minimize_t0s",
"(",
"means",
",",
"minimize_weights",
",",
"combs",
")",
"opt_sigmas",
"=",
"minimize_sigmas",
"(",
"sigmas",
",",
"minimize_weights",
",",
"combs",
")",
"opt_qes",
"=",
"minimize_qes",
"(",
"fitted_rates",
",",
"rates",
",",
"minimize_weights",
",",
"combs",
")",
"corrected_means",
"=",
"correct_means",
"(",
"means",
",",
"opt_t0s",
".",
"x",
",",
"combs",
")",
"corrected_rates",
"=",
"correct_rates",
"(",
"rates",
",",
"opt_qes",
".",
"x",
",",
"combs",
")",
"rms_means",
",",
"rms_corrected_means",
"=",
"calculate_rms_means",
"(",
"means",
",",
"corrected_means",
")",
"rms_rates",
",",
"rms_corrected_rates",
"=",
"calculate_rms_rates",
"(",
"rates",
",",
"fitted_rates",
",",
"corrected_rates",
")",
"cos_angles",
"=",
"np",
".",
"cos",
"(",
"angles",
")",
"return_data",
"=",
"{",
"'opt_t0s'",
":",
"opt_t0s",
",",
"'opt_qes'",
":",
"opt_qes",
",",
"'data'",
":",
"data",
",",
"'means'",
":",
"means",
",",
"'rates'",
":",
"rates",
",",
"'fitted_rates'",
":",
"fitted_rates",
",",
"'angles'",
":",
"angles",
",",
"'corrected_means'",
":",
"corrected_means",
",",
"'corrected_rates'",
":",
"corrected_rates",
",",
"'rms_means'",
":",
"rms_means",
",",
"'rms_corrected_means'",
":",
"rms_corrected_means",
",",
"'rms_rates'",
":",
"rms_rates",
",",
"'rms_corrected_rates'",
":",
"rms_corrected_rates",
",",
"'gaussian_popts'",
":",
"popts",
",",
"'livetime'",
":",
"livetime",
",",
"'exp_popts'",
":",
"exp_popts",
",",
"'exp_pcov'",
":",
"exp_pcov",
",",
"'scale_factor'",
":",
"scale_factor",
",",
"'opt_sigmas'",
":",
"opt_sigmas",
",",
"'sigmas'",
":",
"sigmas",
",",
"'combs'",
":",
"combs",
"}",
"return",
"return_data"
] | Calibrate intra DOM PMT time offsets, efficiencies and sigmas
Parameters
----------
dom_id: DOM ID
data: dict of coincidences or root or hdf5 file
detector: instance of detector class
livetime: data-taking duration [s]
fixed_ang_dist: fixing angular distribution e.g. for data mc comparison
auto_scale: auto scales the fixed angular distribution to the data
Returns
-------
return_data: dictionary with fit results | [
"Calibrate",
"intra",
"DOM",
"PMT",
"time",
"offsets",
"efficiencies",
"and",
"sigmas"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/k40.py#L382-L498 | train |
tamasgal/km3pipe | km3modules/k40.py | load_k40_coincidences_from_hdf5 | def load_k40_coincidences_from_hdf5(filename, dom_id):
"""Load k40 coincidences from hdf5 file
Parameters
----------
filename: filename of hdf5 file
dom_id: DOM ID
Returns
-------
data: numpy array of coincidences
livetime: duration of data-taking
"""
with h5py.File(filename, 'r') as h5f:
data = h5f['/k40counts/{0}'.format(dom_id)]
livetime = data.attrs['livetime']
data = np.array(data)
return data, livetime | python | def load_k40_coincidences_from_hdf5(filename, dom_id):
"""Load k40 coincidences from hdf5 file
Parameters
----------
filename: filename of hdf5 file
dom_id: DOM ID
Returns
-------
data: numpy array of coincidences
livetime: duration of data-taking
"""
with h5py.File(filename, 'r') as h5f:
data = h5f['/k40counts/{0}'.format(dom_id)]
livetime = data.attrs['livetime']
data = np.array(data)
return data, livetime | [
"def",
"load_k40_coincidences_from_hdf5",
"(",
"filename",
",",
"dom_id",
")",
":",
"with",
"h5py",
".",
"File",
"(",
"filename",
",",
"'r'",
")",
"as",
"h5f",
":",
"data",
"=",
"h5f",
"[",
"'/k40counts/{0}'",
".",
"format",
"(",
"dom_id",
")",
"]",
"livetime",
"=",
"data",
".",
"attrs",
"[",
"'livetime'",
"]",
"data",
"=",
"np",
".",
"array",
"(",
"data",
")",
"return",
"data",
",",
"livetime"
] | Load k40 coincidences from hdf5 file
Parameters
----------
filename: filename of hdf5 file
dom_id: DOM ID
Returns
-------
data: numpy array of coincidences
livetime: duration of data-taking | [
"Load",
"k40",
"coincidences",
"from",
"hdf5",
"file"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/k40.py#L507-L526 | train |
tamasgal/km3pipe | km3modules/k40.py | load_k40_coincidences_from_rootfile | def load_k40_coincidences_from_rootfile(filename, dom_id):
"""Load k40 coincidences from JMonitorK40 ROOT file
Parameters
----------
filename: root file produced by JMonitorK40
dom_id: DOM ID
Returns
-------
data: numpy array of coincidences
dom_weight: weight to apply to coincidences to get rate in Hz
"""
from ROOT import TFile
root_file_monitor = TFile(filename, "READ")
dom_name = str(dom_id) + ".2S"
histo_2d_monitor = root_file_monitor.Get(dom_name)
data = []
for c in range(1, histo_2d_monitor.GetNbinsX() + 1):
combination = []
for b in range(1, histo_2d_monitor.GetNbinsY() + 1):
combination.append(histo_2d_monitor.GetBinContent(c, b))
data.append(combination)
weights = {}
weights_histo = root_file_monitor.Get('weights_hist')
try:
for i in range(1, weights_histo.GetNbinsX() + 1):
# we have to read all the entries, unfortunately
weight = weights_histo.GetBinContent(i)
label = weights_histo.GetXaxis().GetBinLabel(i)
weights[label[3:]] = weight
dom_weight = weights[str(dom_id)]
except AttributeError:
log.info("Weights histogram broken or not found, setting weight to 1.")
dom_weight = 1.
return np.array(data), dom_weight | python | def load_k40_coincidences_from_rootfile(filename, dom_id):
"""Load k40 coincidences from JMonitorK40 ROOT file
Parameters
----------
filename: root file produced by JMonitorK40
dom_id: DOM ID
Returns
-------
data: numpy array of coincidences
dom_weight: weight to apply to coincidences to get rate in Hz
"""
from ROOT import TFile
root_file_monitor = TFile(filename, "READ")
dom_name = str(dom_id) + ".2S"
histo_2d_monitor = root_file_monitor.Get(dom_name)
data = []
for c in range(1, histo_2d_monitor.GetNbinsX() + 1):
combination = []
for b in range(1, histo_2d_monitor.GetNbinsY() + 1):
combination.append(histo_2d_monitor.GetBinContent(c, b))
data.append(combination)
weights = {}
weights_histo = root_file_monitor.Get('weights_hist')
try:
for i in range(1, weights_histo.GetNbinsX() + 1):
# we have to read all the entries, unfortunately
weight = weights_histo.GetBinContent(i)
label = weights_histo.GetXaxis().GetBinLabel(i)
weights[label[3:]] = weight
dom_weight = weights[str(dom_id)]
except AttributeError:
log.info("Weights histogram broken or not found, setting weight to 1.")
dom_weight = 1.
return np.array(data), dom_weight | [
"def",
"load_k40_coincidences_from_rootfile",
"(",
"filename",
",",
"dom_id",
")",
":",
"from",
"ROOT",
"import",
"TFile",
"root_file_monitor",
"=",
"TFile",
"(",
"filename",
",",
"\"READ\"",
")",
"dom_name",
"=",
"str",
"(",
"dom_id",
")",
"+",
"\".2S\"",
"histo_2d_monitor",
"=",
"root_file_monitor",
".",
"Get",
"(",
"dom_name",
")",
"data",
"=",
"[",
"]",
"for",
"c",
"in",
"range",
"(",
"1",
",",
"histo_2d_monitor",
".",
"GetNbinsX",
"(",
")",
"+",
"1",
")",
":",
"combination",
"=",
"[",
"]",
"for",
"b",
"in",
"range",
"(",
"1",
",",
"histo_2d_monitor",
".",
"GetNbinsY",
"(",
")",
"+",
"1",
")",
":",
"combination",
".",
"append",
"(",
"histo_2d_monitor",
".",
"GetBinContent",
"(",
"c",
",",
"b",
")",
")",
"data",
".",
"append",
"(",
"combination",
")",
"weights",
"=",
"{",
"}",
"weights_histo",
"=",
"root_file_monitor",
".",
"Get",
"(",
"'weights_hist'",
")",
"try",
":",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"weights_histo",
".",
"GetNbinsX",
"(",
")",
"+",
"1",
")",
":",
"# we have to read all the entries, unfortunately",
"weight",
"=",
"weights_histo",
".",
"GetBinContent",
"(",
"i",
")",
"label",
"=",
"weights_histo",
".",
"GetXaxis",
"(",
")",
".",
"GetBinLabel",
"(",
"i",
")",
"weights",
"[",
"label",
"[",
"3",
":",
"]",
"]",
"=",
"weight",
"dom_weight",
"=",
"weights",
"[",
"str",
"(",
"dom_id",
")",
"]",
"except",
"AttributeError",
":",
"log",
".",
"info",
"(",
"\"Weights histogram broken or not found, setting weight to 1.\"",
")",
"dom_weight",
"=",
"1.",
"return",
"np",
".",
"array",
"(",
"data",
")",
",",
"dom_weight"
] | Load k40 coincidences from JMonitorK40 ROOT file
Parameters
----------
filename: root file produced by JMonitorK40
dom_id: DOM ID
Returns
-------
data: numpy array of coincidences
dom_weight: weight to apply to coincidences to get rate in Hz | [
"Load",
"k40",
"coincidences",
"from",
"JMonitorK40",
"ROOT",
"file"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/k40.py#L529-L566 | train |
tamasgal/km3pipe | km3modules/k40.py | calculate_angles | def calculate_angles(detector, combs):
"""Calculates angles between PMT combinations according to positions in
detector_file
Parameters
----------
detector_file: file from which to read the PMT positions (.detx)
combs: pmt combinations
Returns
-------
angles: numpy array of angles between all PMT combinations
"""
angles = []
pmt_angles = detector.pmt_angles
for first, second in combs:
angles.append(
kp.math.angle_between(
np.array(pmt_angles[first]), np.array(pmt_angles[second])
)
)
return np.array(angles) | python | def calculate_angles(detector, combs):
"""Calculates angles between PMT combinations according to positions in
detector_file
Parameters
----------
detector_file: file from which to read the PMT positions (.detx)
combs: pmt combinations
Returns
-------
angles: numpy array of angles between all PMT combinations
"""
angles = []
pmt_angles = detector.pmt_angles
for first, second in combs:
angles.append(
kp.math.angle_between(
np.array(pmt_angles[first]), np.array(pmt_angles[second])
)
)
return np.array(angles) | [
"def",
"calculate_angles",
"(",
"detector",
",",
"combs",
")",
":",
"angles",
"=",
"[",
"]",
"pmt_angles",
"=",
"detector",
".",
"pmt_angles",
"for",
"first",
",",
"second",
"in",
"combs",
":",
"angles",
".",
"append",
"(",
"kp",
".",
"math",
".",
"angle_between",
"(",
"np",
".",
"array",
"(",
"pmt_angles",
"[",
"first",
"]",
")",
",",
"np",
".",
"array",
"(",
"pmt_angles",
"[",
"second",
"]",
")",
")",
")",
"return",
"np",
".",
"array",
"(",
"angles",
")"
] | Calculates angles between PMT combinations according to positions in
detector_file
Parameters
----------
detector_file: file from which to read the PMT positions (.detx)
combs: pmt combinations
Returns
-------
angles: numpy array of angles between all PMT combinations | [
"Calculates",
"angles",
"between",
"PMT",
"combinations",
"according",
"to",
"positions",
"in",
"detector_file"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/k40.py#L635-L657 | train |
tamasgal/km3pipe | km3modules/k40.py | fit_angular_distribution | def fit_angular_distribution(angles, rates, rate_errors, shape='pexp'):
"""Fits angular distribution of rates.
Parameters
----------
rates: numpy array
with rates for all PMT combinations
angles: numpy array
with angles for all PMT combinations
shape:
which function to fit; exp for exponential or pexp for
exponential_polinomial
Returns
-------
fitted_rates: numpy array of fitted rates (fit_function(angles, popt...))
"""
if shape == 'exp':
fit_function = exponential
# p0 = [-0.91871169, 2.72224241, -1.19065965, 1.48054122]
if shape == 'pexp':
fit_function = exponential_polinomial
# p0 = [0.34921202, 2.8629577]
cos_angles = np.cos(angles)
popt, pcov = optimize.curve_fit(fit_function, cos_angles, rates)
fitted_rates = fit_function(cos_angles, *popt)
return fitted_rates, popt, pcov | python | def fit_angular_distribution(angles, rates, rate_errors, shape='pexp'):
"""Fits angular distribution of rates.
Parameters
----------
rates: numpy array
with rates for all PMT combinations
angles: numpy array
with angles for all PMT combinations
shape:
which function to fit; exp for exponential or pexp for
exponential_polinomial
Returns
-------
fitted_rates: numpy array of fitted rates (fit_function(angles, popt...))
"""
if shape == 'exp':
fit_function = exponential
# p0 = [-0.91871169, 2.72224241, -1.19065965, 1.48054122]
if shape == 'pexp':
fit_function = exponential_polinomial
# p0 = [0.34921202, 2.8629577]
cos_angles = np.cos(angles)
popt, pcov = optimize.curve_fit(fit_function, cos_angles, rates)
fitted_rates = fit_function(cos_angles, *popt)
return fitted_rates, popt, pcov | [
"def",
"fit_angular_distribution",
"(",
"angles",
",",
"rates",
",",
"rate_errors",
",",
"shape",
"=",
"'pexp'",
")",
":",
"if",
"shape",
"==",
"'exp'",
":",
"fit_function",
"=",
"exponential",
"# p0 = [-0.91871169, 2.72224241, -1.19065965, 1.48054122]",
"if",
"shape",
"==",
"'pexp'",
":",
"fit_function",
"=",
"exponential_polinomial",
"# p0 = [0.34921202, 2.8629577]",
"cos_angles",
"=",
"np",
".",
"cos",
"(",
"angles",
")",
"popt",
",",
"pcov",
"=",
"optimize",
".",
"curve_fit",
"(",
"fit_function",
",",
"cos_angles",
",",
"rates",
")",
"fitted_rates",
"=",
"fit_function",
"(",
"cos_angles",
",",
"*",
"popt",
")",
"return",
"fitted_rates",
",",
"popt",
",",
"pcov"
] | Fits angular distribution of rates.
Parameters
----------
rates: numpy array
with rates for all PMT combinations
angles: numpy array
with angles for all PMT combinations
shape:
which function to fit; exp for exponential or pexp for
exponential_polinomial
Returns
-------
fitted_rates: numpy array of fitted rates (fit_function(angles, popt...)) | [
"Fits",
"angular",
"distribution",
"of",
"rates",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/k40.py#L668-L696 | train |
tamasgal/km3pipe | km3modules/k40.py | minimize_t0s | def minimize_t0s(means, weights, combs):
"""Varies t0s to minimize the deviation of the gaussian means from zero.
Parameters
----------
means: numpy array of means of all PMT combinations
weights: numpy array of weights for the squared sum
combs: pmt combinations to use for minimization
Returns
-------
opt_t0s: optimal t0 values for all PMTs
"""
def make_quality_function(means, weights, combs):
def quality_function(t0s):
sq_sum = 0
for mean, comb, weight in zip(means, combs, weights):
sq_sum += ((mean - (t0s[comb[1]] - t0s[comb[0]])) * weight)**2
return sq_sum
return quality_function
qfunc = make_quality_function(means, weights, combs)
# t0s = np.zeros(31)
t0s = np.random.rand(31)
bounds = [(0, 0)] + [(-10., 10.)] * 30
opt_t0s = optimize.minimize(qfunc, t0s, bounds=bounds)
return opt_t0s | python | def minimize_t0s(means, weights, combs):
"""Varies t0s to minimize the deviation of the gaussian means from zero.
Parameters
----------
means: numpy array of means of all PMT combinations
weights: numpy array of weights for the squared sum
combs: pmt combinations to use for minimization
Returns
-------
opt_t0s: optimal t0 values for all PMTs
"""
def make_quality_function(means, weights, combs):
def quality_function(t0s):
sq_sum = 0
for mean, comb, weight in zip(means, combs, weights):
sq_sum += ((mean - (t0s[comb[1]] - t0s[comb[0]])) * weight)**2
return sq_sum
return quality_function
qfunc = make_quality_function(means, weights, combs)
# t0s = np.zeros(31)
t0s = np.random.rand(31)
bounds = [(0, 0)] + [(-10., 10.)] * 30
opt_t0s = optimize.minimize(qfunc, t0s, bounds=bounds)
return opt_t0s | [
"def",
"minimize_t0s",
"(",
"means",
",",
"weights",
",",
"combs",
")",
":",
"def",
"make_quality_function",
"(",
"means",
",",
"weights",
",",
"combs",
")",
":",
"def",
"quality_function",
"(",
"t0s",
")",
":",
"sq_sum",
"=",
"0",
"for",
"mean",
",",
"comb",
",",
"weight",
"in",
"zip",
"(",
"means",
",",
"combs",
",",
"weights",
")",
":",
"sq_sum",
"+=",
"(",
"(",
"mean",
"-",
"(",
"t0s",
"[",
"comb",
"[",
"1",
"]",
"]",
"-",
"t0s",
"[",
"comb",
"[",
"0",
"]",
"]",
")",
")",
"*",
"weight",
")",
"**",
"2",
"return",
"sq_sum",
"return",
"quality_function",
"qfunc",
"=",
"make_quality_function",
"(",
"means",
",",
"weights",
",",
"combs",
")",
"# t0s = np.zeros(31)",
"t0s",
"=",
"np",
".",
"random",
".",
"rand",
"(",
"31",
")",
"bounds",
"=",
"[",
"(",
"0",
",",
"0",
")",
"]",
"+",
"[",
"(",
"-",
"10.",
",",
"10.",
")",
"]",
"*",
"30",
"opt_t0s",
"=",
"optimize",
".",
"minimize",
"(",
"qfunc",
",",
"t0s",
",",
"bounds",
"=",
"bounds",
")",
"return",
"opt_t0s"
] | Varies t0s to minimize the deviation of the gaussian means from zero.
Parameters
----------
means: numpy array of means of all PMT combinations
weights: numpy array of weights for the squared sum
combs: pmt combinations to use for minimization
Returns
-------
opt_t0s: optimal t0 values for all PMTs | [
"Varies",
"t0s",
"to",
"minimize",
"the",
"deviation",
"of",
"the",
"gaussian",
"means",
"from",
"zero",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/k40.py#L699-L728 | train |
tamasgal/km3pipe | km3modules/k40.py | minimize_qes | def minimize_qes(fitted_rates, rates, weights, combs):
"""Varies QEs to minimize the deviation of the rates from the fitted_rates.
Parameters
----------
fitted_rates: numpy array of fitted rates from fit_angular_distribution
rates: numpy array of rates of all PMT combinations
weights: numpy array of weights for the squared sum
combs: pmt combinations to use for minimization
Returns
-------
opt_qes: optimal qe values for all PMTs
"""
def make_quality_function(fitted_rates, rates, weights, combs):
def quality_function(qes):
sq_sum = 0
for fitted_rate, comb, rate, weight \
in zip(fitted_rates, combs, rates, weights):
sq_sum += ((rate / qes[comb[0]] / qes[comb[1]] - fitted_rate) *
weight)**2
return sq_sum
return quality_function
qfunc = make_quality_function(fitted_rates, rates, weights, combs)
qes = np.ones(31)
bounds = [(0.1, 2.)] * 31
opt_qes = optimize.minimize(qfunc, qes, bounds=bounds)
return opt_qes | python | def minimize_qes(fitted_rates, rates, weights, combs):
"""Varies QEs to minimize the deviation of the rates from the fitted_rates.
Parameters
----------
fitted_rates: numpy array of fitted rates from fit_angular_distribution
rates: numpy array of rates of all PMT combinations
weights: numpy array of weights for the squared sum
combs: pmt combinations to use for minimization
Returns
-------
opt_qes: optimal qe values for all PMTs
"""
def make_quality_function(fitted_rates, rates, weights, combs):
def quality_function(qes):
sq_sum = 0
for fitted_rate, comb, rate, weight \
in zip(fitted_rates, combs, rates, weights):
sq_sum += ((rate / qes[comb[0]] / qes[comb[1]] - fitted_rate) *
weight)**2
return sq_sum
return quality_function
qfunc = make_quality_function(fitted_rates, rates, weights, combs)
qes = np.ones(31)
bounds = [(0.1, 2.)] * 31
opt_qes = optimize.minimize(qfunc, qes, bounds=bounds)
return opt_qes | [
"def",
"minimize_qes",
"(",
"fitted_rates",
",",
"rates",
",",
"weights",
",",
"combs",
")",
":",
"def",
"make_quality_function",
"(",
"fitted_rates",
",",
"rates",
",",
"weights",
",",
"combs",
")",
":",
"def",
"quality_function",
"(",
"qes",
")",
":",
"sq_sum",
"=",
"0",
"for",
"fitted_rate",
",",
"comb",
",",
"rate",
",",
"weight",
"in",
"zip",
"(",
"fitted_rates",
",",
"combs",
",",
"rates",
",",
"weights",
")",
":",
"sq_sum",
"+=",
"(",
"(",
"rate",
"/",
"qes",
"[",
"comb",
"[",
"0",
"]",
"]",
"/",
"qes",
"[",
"comb",
"[",
"1",
"]",
"]",
"-",
"fitted_rate",
")",
"*",
"weight",
")",
"**",
"2",
"return",
"sq_sum",
"return",
"quality_function",
"qfunc",
"=",
"make_quality_function",
"(",
"fitted_rates",
",",
"rates",
",",
"weights",
",",
"combs",
")",
"qes",
"=",
"np",
".",
"ones",
"(",
"31",
")",
"bounds",
"=",
"[",
"(",
"0.1",
",",
"2.",
")",
"]",
"*",
"31",
"opt_qes",
"=",
"optimize",
".",
"minimize",
"(",
"qfunc",
",",
"qes",
",",
"bounds",
"=",
"bounds",
")",
"return",
"opt_qes"
] | Varies QEs to minimize the deviation of the rates from the fitted_rates.
Parameters
----------
fitted_rates: numpy array of fitted rates from fit_angular_distribution
rates: numpy array of rates of all PMT combinations
weights: numpy array of weights for the squared sum
combs: pmt combinations to use for minimization
Returns
-------
opt_qes: optimal qe values for all PMTs | [
"Varies",
"QEs",
"to",
"minimize",
"the",
"deviation",
"of",
"the",
"rates",
"from",
"the",
"fitted_rates",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/k40.py#L764-L795 | train |
tamasgal/km3pipe | km3modules/k40.py | correct_means | def correct_means(means, opt_t0s, combs):
"""Applies optimal t0s to gaussians means.
Should be around zero afterwards.
Parameters
----------
means: numpy array of means of gaussians of all PMT combinations
opt_t0s: numpy array of optimal t0 values for all PMTs
combs: pmt combinations used to correct
Returns
-------
corrected_means: numpy array of corrected gaussian means for all PMT combs
"""
corrected_means = np.array([(opt_t0s[comb[1]] - opt_t0s[comb[0]]) - mean
for mean, comb in zip(means, combs)])
return corrected_means | python | def correct_means(means, opt_t0s, combs):
"""Applies optimal t0s to gaussians means.
Should be around zero afterwards.
Parameters
----------
means: numpy array of means of gaussians of all PMT combinations
opt_t0s: numpy array of optimal t0 values for all PMTs
combs: pmt combinations used to correct
Returns
-------
corrected_means: numpy array of corrected gaussian means for all PMT combs
"""
corrected_means = np.array([(opt_t0s[comb[1]] - opt_t0s[comb[0]]) - mean
for mean, comb in zip(means, combs)])
return corrected_means | [
"def",
"correct_means",
"(",
"means",
",",
"opt_t0s",
",",
"combs",
")",
":",
"corrected_means",
"=",
"np",
".",
"array",
"(",
"[",
"(",
"opt_t0s",
"[",
"comb",
"[",
"1",
"]",
"]",
"-",
"opt_t0s",
"[",
"comb",
"[",
"0",
"]",
"]",
")",
"-",
"mean",
"for",
"mean",
",",
"comb",
"in",
"zip",
"(",
"means",
",",
"combs",
")",
"]",
")",
"return",
"corrected_means"
] | Applies optimal t0s to gaussians means.
Should be around zero afterwards.
Parameters
----------
means: numpy array of means of gaussians of all PMT combinations
opt_t0s: numpy array of optimal t0 values for all PMTs
combs: pmt combinations used to correct
Returns
-------
corrected_means: numpy array of corrected gaussian means for all PMT combs | [
"Applies",
"optimal",
"t0s",
"to",
"gaussians",
"means",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/k40.py#L798-L816 | train |
tamasgal/km3pipe | km3modules/k40.py | correct_rates | def correct_rates(rates, opt_qes, combs):
"""Applies optimal qes to rates.
Should be closer to fitted_rates afterwards.
Parameters
----------
rates: numpy array of rates of all PMT combinations
opt_qes: numpy array of optimal qe values for all PMTs
combs: pmt combinations used to correct
Returns
-------
corrected_rates: numpy array of corrected rates for all PMT combinations
"""
corrected_rates = np.array([
rate / opt_qes[comb[0]] / opt_qes[comb[1]]
for rate, comb in zip(rates, combs)
])
return corrected_rates | python | def correct_rates(rates, opt_qes, combs):
"""Applies optimal qes to rates.
Should be closer to fitted_rates afterwards.
Parameters
----------
rates: numpy array of rates of all PMT combinations
opt_qes: numpy array of optimal qe values for all PMTs
combs: pmt combinations used to correct
Returns
-------
corrected_rates: numpy array of corrected rates for all PMT combinations
"""
corrected_rates = np.array([
rate / opt_qes[comb[0]] / opt_qes[comb[1]]
for rate, comb in zip(rates, combs)
])
return corrected_rates | [
"def",
"correct_rates",
"(",
"rates",
",",
"opt_qes",
",",
"combs",
")",
":",
"corrected_rates",
"=",
"np",
".",
"array",
"(",
"[",
"rate",
"/",
"opt_qes",
"[",
"comb",
"[",
"0",
"]",
"]",
"/",
"opt_qes",
"[",
"comb",
"[",
"1",
"]",
"]",
"for",
"rate",
",",
"comb",
"in",
"zip",
"(",
"rates",
",",
"combs",
")",
"]",
")",
"return",
"corrected_rates"
] | Applies optimal qes to rates.
Should be closer to fitted_rates afterwards.
Parameters
----------
rates: numpy array of rates of all PMT combinations
opt_qes: numpy array of optimal qe values for all PMTs
combs: pmt combinations used to correct
Returns
-------
corrected_rates: numpy array of corrected rates for all PMT combinations | [
"Applies",
"optimal",
"qes",
"to",
"rates",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/k40.py#L819-L839 | train |
tamasgal/km3pipe | km3modules/k40.py | calculate_rms_means | def calculate_rms_means(means, corrected_means):
"""Calculates RMS of means from zero before and after correction
Parameters
----------
means: numpy array of means of gaussians of all PMT combinations
corrected_means: numpy array of corrected gaussian means for all PMT combs
Returns
-------
rms_means: RMS of means from zero
rms_corrected_means: RMS of corrected_means from zero
"""
rms_means = np.sqrt(np.mean((means - 0)**2))
rms_corrected_means = np.sqrt(np.mean((corrected_means - 0)**2))
return rms_means, rms_corrected_means | python | def calculate_rms_means(means, corrected_means):
"""Calculates RMS of means from zero before and after correction
Parameters
----------
means: numpy array of means of gaussians of all PMT combinations
corrected_means: numpy array of corrected gaussian means for all PMT combs
Returns
-------
rms_means: RMS of means from zero
rms_corrected_means: RMS of corrected_means from zero
"""
rms_means = np.sqrt(np.mean((means - 0)**2))
rms_corrected_means = np.sqrt(np.mean((corrected_means - 0)**2))
return rms_means, rms_corrected_means | [
"def",
"calculate_rms_means",
"(",
"means",
",",
"corrected_means",
")",
":",
"rms_means",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"mean",
"(",
"(",
"means",
"-",
"0",
")",
"**",
"2",
")",
")",
"rms_corrected_means",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"mean",
"(",
"(",
"corrected_means",
"-",
"0",
")",
"**",
"2",
")",
")",
"return",
"rms_means",
",",
"rms_corrected_means"
] | Calculates RMS of means from zero before and after correction
Parameters
----------
means: numpy array of means of gaussians of all PMT combinations
corrected_means: numpy array of corrected gaussian means for all PMT combs
Returns
-------
rms_means: RMS of means from zero
rms_corrected_means: RMS of corrected_means from zero | [
"Calculates",
"RMS",
"of",
"means",
"from",
"zero",
"before",
"and",
"after",
"correction"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/k40.py#L842-L857 | train |
tamasgal/km3pipe | km3modules/k40.py | calculate_rms_rates | def calculate_rms_rates(rates, fitted_rates, corrected_rates):
"""Calculates RMS of rates from fitted_rates before and after correction
Parameters
----------
rates: numpy array of rates of all PMT combinations
corrected_rates: numpy array of corrected rates for all PMT combinations
Returns
-------
rms_rates: RMS of rates from fitted_rates
rms_corrected_rates: RMS of corrected_ratesrates from fitted_rates
"""
rms_rates = np.sqrt(np.mean((rates - fitted_rates)**2))
rms_corrected_rates = np.sqrt(np.mean((corrected_rates - fitted_rates)**2))
return rms_rates, rms_corrected_rates | python | def calculate_rms_rates(rates, fitted_rates, corrected_rates):
"""Calculates RMS of rates from fitted_rates before and after correction
Parameters
----------
rates: numpy array of rates of all PMT combinations
corrected_rates: numpy array of corrected rates for all PMT combinations
Returns
-------
rms_rates: RMS of rates from fitted_rates
rms_corrected_rates: RMS of corrected_ratesrates from fitted_rates
"""
rms_rates = np.sqrt(np.mean((rates - fitted_rates)**2))
rms_corrected_rates = np.sqrt(np.mean((corrected_rates - fitted_rates)**2))
return rms_rates, rms_corrected_rates | [
"def",
"calculate_rms_rates",
"(",
"rates",
",",
"fitted_rates",
",",
"corrected_rates",
")",
":",
"rms_rates",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"mean",
"(",
"(",
"rates",
"-",
"fitted_rates",
")",
"**",
"2",
")",
")",
"rms_corrected_rates",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"mean",
"(",
"(",
"corrected_rates",
"-",
"fitted_rates",
")",
"**",
"2",
")",
")",
"return",
"rms_rates",
",",
"rms_corrected_rates"
] | Calculates RMS of rates from fitted_rates before and after correction
Parameters
----------
rates: numpy array of rates of all PMT combinations
corrected_rates: numpy array of corrected rates for all PMT combinations
Returns
-------
rms_rates: RMS of rates from fitted_rates
rms_corrected_rates: RMS of corrected_ratesrates from fitted_rates | [
"Calculates",
"RMS",
"of",
"rates",
"from",
"fitted_rates",
"before",
"and",
"after",
"correction"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/k40.py#L860-L875 | train |
tamasgal/km3pipe | km3modules/k40.py | add_to_twofold_matrix | def add_to_twofold_matrix(times, tdcs, mat, tmax=10):
"""Add counts to twofold coincidences for a given `tmax`.
Parameters
----------
times: np.ndarray of hit times (int32)
tdcs: np.ndarray of channel_ids (uint8)
mat: ref to a np.array((465, tmax * 2 + 1))
tmax: int (time window)
Returns
-------
mat: coincidence matrix (np.array((465, tmax * 2 + 1)))
"""
h_idx = 0 # index of initial hit
c_idx = 0 # index of coincident candidate hit
n_hits = len(times)
multiplicity = 0
while h_idx <= n_hits:
c_idx = h_idx + 1
if (c_idx < n_hits) and (times[c_idx] - times[h_idx] <= tmax):
multiplicity = 2
c_idx += 1
while (c_idx < n_hits) and (times[c_idx] - times[h_idx] <= tmax):
c_idx += 1
multiplicity += 1
if multiplicity != 2:
h_idx = c_idx
continue
c_idx -= 1
h_tdc = tdcs[h_idx]
c_tdc = tdcs[c_idx]
h_time = times[h_idx]
c_time = times[c_idx]
if h_tdc != c_tdc:
dt = int(c_time - h_time)
if h_tdc > c_tdc:
mat[get_comb_index(c_tdc, h_tdc), -dt + tmax] += 1
else:
mat[get_comb_index(h_tdc, c_tdc), dt + tmax] += 1
h_idx = c_idx | python | def add_to_twofold_matrix(times, tdcs, mat, tmax=10):
"""Add counts to twofold coincidences for a given `tmax`.
Parameters
----------
times: np.ndarray of hit times (int32)
tdcs: np.ndarray of channel_ids (uint8)
mat: ref to a np.array((465, tmax * 2 + 1))
tmax: int (time window)
Returns
-------
mat: coincidence matrix (np.array((465, tmax * 2 + 1)))
"""
h_idx = 0 # index of initial hit
c_idx = 0 # index of coincident candidate hit
n_hits = len(times)
multiplicity = 0
while h_idx <= n_hits:
c_idx = h_idx + 1
if (c_idx < n_hits) and (times[c_idx] - times[h_idx] <= tmax):
multiplicity = 2
c_idx += 1
while (c_idx < n_hits) and (times[c_idx] - times[h_idx] <= tmax):
c_idx += 1
multiplicity += 1
if multiplicity != 2:
h_idx = c_idx
continue
c_idx -= 1
h_tdc = tdcs[h_idx]
c_tdc = tdcs[c_idx]
h_time = times[h_idx]
c_time = times[c_idx]
if h_tdc != c_tdc:
dt = int(c_time - h_time)
if h_tdc > c_tdc:
mat[get_comb_index(c_tdc, h_tdc), -dt + tmax] += 1
else:
mat[get_comb_index(h_tdc, c_tdc), dt + tmax] += 1
h_idx = c_idx | [
"def",
"add_to_twofold_matrix",
"(",
"times",
",",
"tdcs",
",",
"mat",
",",
"tmax",
"=",
"10",
")",
":",
"h_idx",
"=",
"0",
"# index of initial hit",
"c_idx",
"=",
"0",
"# index of coincident candidate hit",
"n_hits",
"=",
"len",
"(",
"times",
")",
"multiplicity",
"=",
"0",
"while",
"h_idx",
"<=",
"n_hits",
":",
"c_idx",
"=",
"h_idx",
"+",
"1",
"if",
"(",
"c_idx",
"<",
"n_hits",
")",
"and",
"(",
"times",
"[",
"c_idx",
"]",
"-",
"times",
"[",
"h_idx",
"]",
"<=",
"tmax",
")",
":",
"multiplicity",
"=",
"2",
"c_idx",
"+=",
"1",
"while",
"(",
"c_idx",
"<",
"n_hits",
")",
"and",
"(",
"times",
"[",
"c_idx",
"]",
"-",
"times",
"[",
"h_idx",
"]",
"<=",
"tmax",
")",
":",
"c_idx",
"+=",
"1",
"multiplicity",
"+=",
"1",
"if",
"multiplicity",
"!=",
"2",
":",
"h_idx",
"=",
"c_idx",
"continue",
"c_idx",
"-=",
"1",
"h_tdc",
"=",
"tdcs",
"[",
"h_idx",
"]",
"c_tdc",
"=",
"tdcs",
"[",
"c_idx",
"]",
"h_time",
"=",
"times",
"[",
"h_idx",
"]",
"c_time",
"=",
"times",
"[",
"c_idx",
"]",
"if",
"h_tdc",
"!=",
"c_tdc",
":",
"dt",
"=",
"int",
"(",
"c_time",
"-",
"h_time",
")",
"if",
"h_tdc",
">",
"c_tdc",
":",
"mat",
"[",
"get_comb_index",
"(",
"c_tdc",
",",
"h_tdc",
")",
",",
"-",
"dt",
"+",
"tmax",
"]",
"+=",
"1",
"else",
":",
"mat",
"[",
"get_comb_index",
"(",
"h_tdc",
",",
"c_tdc",
")",
",",
"dt",
"+",
"tmax",
"]",
"+=",
"1",
"h_idx",
"=",
"c_idx"
] | Add counts to twofold coincidences for a given `tmax`.
Parameters
----------
times: np.ndarray of hit times (int32)
tdcs: np.ndarray of channel_ids (uint8)
mat: ref to a np.array((465, tmax * 2 + 1))
tmax: int (time window)
Returns
-------
mat: coincidence matrix (np.array((465, tmax * 2 + 1))) | [
"Add",
"counts",
"to",
"twofold",
"coincidences",
"for",
"a",
"given",
"tmax",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/k40.py#L885-L926 | train |
tamasgal/km3pipe | km3modules/k40.py | TwofoldCounter.reset | def reset(self):
"""Reset coincidence counter"""
self.counts = defaultdict(partial(np.zeros, (465, self.tmax * 2 + 1)))
self.n_timeslices = defaultdict(int) | python | def reset(self):
"""Reset coincidence counter"""
self.counts = defaultdict(partial(np.zeros, (465, self.tmax * 2 + 1)))
self.n_timeslices = defaultdict(int) | [
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"counts",
"=",
"defaultdict",
"(",
"partial",
"(",
"np",
".",
"zeros",
",",
"(",
"465",
",",
"self",
".",
"tmax",
"*",
"2",
"+",
"1",
")",
")",
")",
"self",
".",
"n_timeslices",
"=",
"defaultdict",
"(",
"int",
")"
] | Reset coincidence counter | [
"Reset",
"coincidence",
"counter"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/k40.py#L247-L250 | train |
tamasgal/km3pipe | km3modules/k40.py | TwofoldCounter.dump | def dump(self):
"""Write coincidence counts into a Python pickle"""
self.print("Dumping data to {}".format(self.dump_filename))
pickle.dump({
'data': self.counts,
'livetime': self.get_livetime()
}, open(self.dump_filename, "wb")) | python | def dump(self):
"""Write coincidence counts into a Python pickle"""
self.print("Dumping data to {}".format(self.dump_filename))
pickle.dump({
'data': self.counts,
'livetime': self.get_livetime()
}, open(self.dump_filename, "wb")) | [
"def",
"dump",
"(",
"self",
")",
":",
"self",
".",
"print",
"(",
"\"Dumping data to {}\"",
".",
"format",
"(",
"self",
".",
"dump_filename",
")",
")",
"pickle",
".",
"dump",
"(",
"{",
"'data'",
":",
"self",
".",
"counts",
",",
"'livetime'",
":",
"self",
".",
"get_livetime",
"(",
")",
"}",
",",
"open",
"(",
"self",
".",
"dump_filename",
",",
"\"wb\"",
")",
")"
] | Write coincidence counts into a Python pickle | [
"Write",
"coincidence",
"counts",
"into",
"a",
"Python",
"pickle"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/k40.py#L283-L289 | train |
ioos/pyoos | pyoos/parsers/ioos/one/describe_sensor.py | DescribeSensor.get_named_by_definition | def get_named_by_definition(cls, element_list, string_def):
"""Attempts to get an IOOS definition from a list of xml elements"""
try:
return next(
(
st.value
for st in element_list
if st.definition == string_def
)
)
except Exception:
return None | python | def get_named_by_definition(cls, element_list, string_def):
"""Attempts to get an IOOS definition from a list of xml elements"""
try:
return next(
(
st.value
for st in element_list
if st.definition == string_def
)
)
except Exception:
return None | [
"def",
"get_named_by_definition",
"(",
"cls",
",",
"element_list",
",",
"string_def",
")",
":",
"try",
":",
"return",
"next",
"(",
"(",
"st",
".",
"value",
"for",
"st",
"in",
"element_list",
"if",
"st",
".",
"definition",
"==",
"string_def",
")",
")",
"except",
"Exception",
":",
"return",
"None"
] | Attempts to get an IOOS definition from a list of xml elements | [
"Attempts",
"to",
"get",
"an",
"IOOS",
"definition",
"from",
"a",
"list",
"of",
"xml",
"elements"
] | 908660385029ecd8eccda8ab3a6b20b47b915c77 | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/parsers/ioos/one/describe_sensor.py#L37-L48 | train |
ioos/pyoos | pyoos/parsers/ioos/one/describe_sensor.py | DescribeSensor.get_ioos_def | def get_ioos_def(self, ident, elem_type, ont):
"""Gets a definition given an identifier and where to search for it"""
if elem_type == "identifier":
getter_fn = self.system.get_identifiers_by_name
elif elem_type == "classifier":
getter_fn = self.system.get_classifiers_by_name
else:
raise ValueError("Unknown element type '{}'".format(elem_type))
return DescribeSensor.get_named_by_definition(
getter_fn(ident), urljoin(ont, ident)
) | python | def get_ioos_def(self, ident, elem_type, ont):
"""Gets a definition given an identifier and where to search for it"""
if elem_type == "identifier":
getter_fn = self.system.get_identifiers_by_name
elif elem_type == "classifier":
getter_fn = self.system.get_classifiers_by_name
else:
raise ValueError("Unknown element type '{}'".format(elem_type))
return DescribeSensor.get_named_by_definition(
getter_fn(ident), urljoin(ont, ident)
) | [
"def",
"get_ioos_def",
"(",
"self",
",",
"ident",
",",
"elem_type",
",",
"ont",
")",
":",
"if",
"elem_type",
"==",
"\"identifier\"",
":",
"getter_fn",
"=",
"self",
".",
"system",
".",
"get_identifiers_by_name",
"elif",
"elem_type",
"==",
"\"classifier\"",
":",
"getter_fn",
"=",
"self",
".",
"system",
".",
"get_classifiers_by_name",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown element type '{}'\"",
".",
"format",
"(",
"elem_type",
")",
")",
"return",
"DescribeSensor",
".",
"get_named_by_definition",
"(",
"getter_fn",
"(",
"ident",
")",
",",
"urljoin",
"(",
"ont",
",",
"ident",
")",
")"
] | Gets a definition given an identifier and where to search for it | [
"Gets",
"a",
"definition",
"given",
"an",
"identifier",
"and",
"where",
"to",
"search",
"for",
"it"
] | 908660385029ecd8eccda8ab3a6b20b47b915c77 | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/parsers/ioos/one/describe_sensor.py#L50-L60 | train |
mouse-reeve/horoscope-generator | horoscope_generator/HoroscopeGenerator.py | get_sentence | def get_sentence(start=None, depth=7):
''' follow the grammatical patterns to generate a random sentence '''
if not GRAMMAR:
return 'Please set a GRAMMAR file'
start = start if start else GRAMMAR.start()
if isinstance(start, Nonterminal):
productions = GRAMMAR.productions(start)
if not depth:
# time to break the cycle
terminals = [p for p in productions if not isinstance(start, Nonterminal)]
if len(terminals):
production = terminals
production = random.choice(productions)
sentence = []
for piece in production.rhs():
sentence += get_sentence(start=piece, depth=depth-1)
return sentence
else:
return [start] | python | def get_sentence(start=None, depth=7):
''' follow the grammatical patterns to generate a random sentence '''
if not GRAMMAR:
return 'Please set a GRAMMAR file'
start = start if start else GRAMMAR.start()
if isinstance(start, Nonterminal):
productions = GRAMMAR.productions(start)
if not depth:
# time to break the cycle
terminals = [p for p in productions if not isinstance(start, Nonterminal)]
if len(terminals):
production = terminals
production = random.choice(productions)
sentence = []
for piece in production.rhs():
sentence += get_sentence(start=piece, depth=depth-1)
return sentence
else:
return [start] | [
"def",
"get_sentence",
"(",
"start",
"=",
"None",
",",
"depth",
"=",
"7",
")",
":",
"if",
"not",
"GRAMMAR",
":",
"return",
"'Please set a GRAMMAR file'",
"start",
"=",
"start",
"if",
"start",
"else",
"GRAMMAR",
".",
"start",
"(",
")",
"if",
"isinstance",
"(",
"start",
",",
"Nonterminal",
")",
":",
"productions",
"=",
"GRAMMAR",
".",
"productions",
"(",
"start",
")",
"if",
"not",
"depth",
":",
"# time to break the cycle",
"terminals",
"=",
"[",
"p",
"for",
"p",
"in",
"productions",
"if",
"not",
"isinstance",
"(",
"start",
",",
"Nonterminal",
")",
"]",
"if",
"len",
"(",
"terminals",
")",
":",
"production",
"=",
"terminals",
"production",
"=",
"random",
".",
"choice",
"(",
"productions",
")",
"sentence",
"=",
"[",
"]",
"for",
"piece",
"in",
"production",
".",
"rhs",
"(",
")",
":",
"sentence",
"+=",
"get_sentence",
"(",
"start",
"=",
"piece",
",",
"depth",
"=",
"depth",
"-",
"1",
")",
"return",
"sentence",
"else",
":",
"return",
"[",
"start",
"]"
] | follow the grammatical patterns to generate a random sentence | [
"follow",
"the",
"grammatical",
"patterns",
"to",
"generate",
"a",
"random",
"sentence"
] | 01acf298116745ded5819d348c28a98a7492ccf3 | https://github.com/mouse-reeve/horoscope-generator/blob/01acf298116745ded5819d348c28a98a7492ccf3/horoscope_generator/HoroscopeGenerator.py#L17-L38 | train |
mouse-reeve/horoscope-generator | horoscope_generator/HoroscopeGenerator.py | format_sentence | def format_sentence(sentence):
''' fix display formatting of a sentence array '''
for index, word in enumerate(sentence):
if word == 'a' and index + 1 < len(sentence) and \
re.match(r'^[aeiou]', sentence[index + 1]) and not \
re.match(r'^uni', sentence[index + 1]):
sentence[index] = 'an'
text = ' '.join(sentence)
text = '%s%s' % (text[0].upper(), text[1:])
text = text.replace(' ,', ',')
return '%s.' % text | python | def format_sentence(sentence):
''' fix display formatting of a sentence array '''
for index, word in enumerate(sentence):
if word == 'a' and index + 1 < len(sentence) and \
re.match(r'^[aeiou]', sentence[index + 1]) and not \
re.match(r'^uni', sentence[index + 1]):
sentence[index] = 'an'
text = ' '.join(sentence)
text = '%s%s' % (text[0].upper(), text[1:])
text = text.replace(' ,', ',')
return '%s.' % text | [
"def",
"format_sentence",
"(",
"sentence",
")",
":",
"for",
"index",
",",
"word",
"in",
"enumerate",
"(",
"sentence",
")",
":",
"if",
"word",
"==",
"'a'",
"and",
"index",
"+",
"1",
"<",
"len",
"(",
"sentence",
")",
"and",
"re",
".",
"match",
"(",
"r'^[aeiou]'",
",",
"sentence",
"[",
"index",
"+",
"1",
"]",
")",
"and",
"not",
"re",
".",
"match",
"(",
"r'^uni'",
",",
"sentence",
"[",
"index",
"+",
"1",
"]",
")",
":",
"sentence",
"[",
"index",
"]",
"=",
"'an'",
"text",
"=",
"' '",
".",
"join",
"(",
"sentence",
")",
"text",
"=",
"'%s%s'",
"%",
"(",
"text",
"[",
"0",
"]",
".",
"upper",
"(",
")",
",",
"text",
"[",
"1",
":",
"]",
")",
"text",
"=",
"text",
".",
"replace",
"(",
"' ,'",
",",
"','",
")",
"return",
"'%s.'",
"%",
"text"
] | fix display formatting of a sentence array | [
"fix",
"display",
"formatting",
"of",
"a",
"sentence",
"array"
] | 01acf298116745ded5819d348c28a98a7492ccf3 | https://github.com/mouse-reeve/horoscope-generator/blob/01acf298116745ded5819d348c28a98a7492ccf3/horoscope_generator/HoroscopeGenerator.py#L40-L50 | train |
dsoprea/PySchedules | pyschedules/examples/read.py | EntityTrigger.new_station | def new_station(self, _id, callSign, name, affiliate, fccChannelNumber):
"""Callback run for each new station"""
if self.__v_station:
# [Station: 11440, WFLX, WFLX, Fox Affiliate, 29]
# [Station: 11836, WSCV, WSCV, TELEMUNDO (HBC) Affiliate, 51]
# [Station: 11867, TBS, Turner Broadcasting System, Satellite, None]
# [Station: 11869, WTCE, WTCE, Independent, 21]
# [Station: 11924, WTVX, WTVX, CW Affiliate, 34]
# [Station: 11991, WXEL, WXEL, PBS Affiliate, 42]
# [Station: 12131, TOON, Cartoon Network, Satellite, None]
# [Station: 12444, ESPN2, ESPN2, Sports Satellite, None]
# [Station: 12471, WFGC, WFGC, Independent, 61]
# [Station: 16046, TVNI, TV Chile Internacional, Latin American Satellite, None]
# [Station: 22233, GOAC020, Government Access - GOAC020, Cablecast, None]
print("[Station: %s, %s, %s, %s, %s]" %
(_id, callSign, name, affiliate, fccChannelNumber)) | python | def new_station(self, _id, callSign, name, affiliate, fccChannelNumber):
"""Callback run for each new station"""
if self.__v_station:
# [Station: 11440, WFLX, WFLX, Fox Affiliate, 29]
# [Station: 11836, WSCV, WSCV, TELEMUNDO (HBC) Affiliate, 51]
# [Station: 11867, TBS, Turner Broadcasting System, Satellite, None]
# [Station: 11869, WTCE, WTCE, Independent, 21]
# [Station: 11924, WTVX, WTVX, CW Affiliate, 34]
# [Station: 11991, WXEL, WXEL, PBS Affiliate, 42]
# [Station: 12131, TOON, Cartoon Network, Satellite, None]
# [Station: 12444, ESPN2, ESPN2, Sports Satellite, None]
# [Station: 12471, WFGC, WFGC, Independent, 61]
# [Station: 16046, TVNI, TV Chile Internacional, Latin American Satellite, None]
# [Station: 22233, GOAC020, Government Access - GOAC020, Cablecast, None]
print("[Station: %s, %s, %s, %s, %s]" %
(_id, callSign, name, affiliate, fccChannelNumber)) | [
"def",
"new_station",
"(",
"self",
",",
"_id",
",",
"callSign",
",",
"name",
",",
"affiliate",
",",
"fccChannelNumber",
")",
":",
"if",
"self",
".",
"__v_station",
":",
"# [Station: 11440, WFLX, WFLX, Fox Affiliate, 29]",
"# [Station: 11836, WSCV, WSCV, TELEMUNDO (HBC) Affiliate, 51]",
"# [Station: 11867, TBS, Turner Broadcasting System, Satellite, None]",
"# [Station: 11869, WTCE, WTCE, Independent, 21]",
"# [Station: 11924, WTVX, WTVX, CW Affiliate, 34]",
"# [Station: 11991, WXEL, WXEL, PBS Affiliate, 42]",
"# [Station: 12131, TOON, Cartoon Network, Satellite, None]",
"# [Station: 12444, ESPN2, ESPN2, Sports Satellite, None]",
"# [Station: 12471, WFGC, WFGC, Independent, 61]",
"# [Station: 16046, TVNI, TV Chile Internacional, Latin American Satellite, None]",
"# [Station: 22233, GOAC020, Government Access - GOAC020, Cablecast, None]",
"print",
"(",
"\"[Station: %s, %s, %s, %s, %s]\"",
"%",
"(",
"_id",
",",
"callSign",
",",
"name",
",",
"affiliate",
",",
"fccChannelNumber",
")",
")"
] | Callback run for each new station | [
"Callback",
"run",
"for",
"each",
"new",
"station"
] | e5aae988fad90217f72db45f93bf69839f4d75e7 | https://github.com/dsoprea/PySchedules/blob/e5aae988fad90217f72db45f93bf69839f4d75e7/pyschedules/examples/read.py#L37-L53 | train |
dsoprea/PySchedules | pyschedules/examples/read.py | EntityTrigger.new_lineup | def new_lineup(self, name, location, device, _type, postalCode, _id):
"""Callback run for each new lineup"""
if self.__v_lineup:
# [Lineup: Comcast West Palm Beach /Palm Beach Co., West Palm Beach, Digital, CableDigital, 33436, FL09567:X]
print("[Lineup: %s, %s, %s, %s, %s, %s]" %
(name, location, device, _type, postalCode, _id)) | python | def new_lineup(self, name, location, device, _type, postalCode, _id):
"""Callback run for each new lineup"""
if self.__v_lineup:
# [Lineup: Comcast West Palm Beach /Palm Beach Co., West Palm Beach, Digital, CableDigital, 33436, FL09567:X]
print("[Lineup: %s, %s, %s, %s, %s, %s]" %
(name, location, device, _type, postalCode, _id)) | [
"def",
"new_lineup",
"(",
"self",
",",
"name",
",",
"location",
",",
"device",
",",
"_type",
",",
"postalCode",
",",
"_id",
")",
":",
"if",
"self",
".",
"__v_lineup",
":",
"# [Lineup: Comcast West Palm Beach /Palm Beach Co., West Palm Beach, Digital, CableDigital, 33436, FL09567:X]",
"print",
"(",
"\"[Lineup: %s, %s, %s, %s, %s, %s]\"",
"%",
"(",
"name",
",",
"location",
",",
"device",
",",
"_type",
",",
"postalCode",
",",
"_id",
")",
")"
] | Callback run for each new lineup | [
"Callback",
"run",
"for",
"each",
"new",
"lineup"
] | e5aae988fad90217f72db45f93bf69839f4d75e7 | https://github.com/dsoprea/PySchedules/blob/e5aae988fad90217f72db45f93bf69839f4d75e7/pyschedules/examples/read.py#L55-L61 | train |
dsoprea/PySchedules | pyschedules/examples/read.py | EntityTrigger.new_genre | def new_genre(self, program, genre, relevance):
"""Callback run for each new program genre entry"""
if self.__v_genre:
# [Genre: SP002709210000, Sports event, 0]
# [Genre: SP002709210000, Basketball, 1]
# [Genre: SP002737310000, Sports event, 0]
# [Genre: SP002737310000, Basketball, 1]
# [Genre: SH016761790000, News, 0]
# [Genre: SH016761790000, Talk, 1]
# [Genre: SH016761790000, Interview, 2]
# [Genre: SH016761790000, Politics, 3]
print("[Genre: %s, %s, %s]" % (program, genre, relevance)) | python | def new_genre(self, program, genre, relevance):
"""Callback run for each new program genre entry"""
if self.__v_genre:
# [Genre: SP002709210000, Sports event, 0]
# [Genre: SP002709210000, Basketball, 1]
# [Genre: SP002737310000, Sports event, 0]
# [Genre: SP002737310000, Basketball, 1]
# [Genre: SH016761790000, News, 0]
# [Genre: SH016761790000, Talk, 1]
# [Genre: SH016761790000, Interview, 2]
# [Genre: SH016761790000, Politics, 3]
print("[Genre: %s, %s, %s]" % (program, genre, relevance)) | [
"def",
"new_genre",
"(",
"self",
",",
"program",
",",
"genre",
",",
"relevance",
")",
":",
"if",
"self",
".",
"__v_genre",
":",
"# [Genre: SP002709210000, Sports event, 0]",
"# [Genre: SP002709210000, Basketball, 1]",
"# [Genre: SP002737310000, Sports event, 0]",
"# [Genre: SP002737310000, Basketball, 1]",
"# [Genre: SH016761790000, News, 0]",
"# [Genre: SH016761790000, Talk, 1]",
"# [Genre: SH016761790000, Interview, 2]",
"# [Genre: SH016761790000, Politics, 3]",
"print",
"(",
"\"[Genre: %s, %s, %s]\"",
"%",
"(",
"program",
",",
"genre",
",",
"relevance",
")",
")"
] | Callback run for each new program genre entry | [
"Callback",
"run",
"for",
"each",
"new",
"program",
"genre",
"entry"
] | e5aae988fad90217f72db45f93bf69839f4d75e7 | https://github.com/dsoprea/PySchedules/blob/e5aae988fad90217f72db45f93bf69839f4d75e7/pyschedules/examples/read.py#L108-L120 | train |
tamasgal/km3pipe | km3pipe/shell.py | qsub | def qsub(script, job_name, dryrun=False, *args, **kwargs):
"""Submit a job via qsub."""
print("Preparing job script...")
job_string = gen_job(script=script, job_name=job_name, *args, **kwargs)
env = os.environ.copy()
if dryrun:
print(
"This is a dry run! Here is the generated job file, which will "
"not be submitted:"
)
print(job_string)
else:
print("Calling qsub with the generated job script.")
p = subprocess.Popen(
'qsub -V', stdin=subprocess.PIPE, env=env, shell=True
)
p.communicate(input=bytes(job_string.encode('ascii'))) | python | def qsub(script, job_name, dryrun=False, *args, **kwargs):
"""Submit a job via qsub."""
print("Preparing job script...")
job_string = gen_job(script=script, job_name=job_name, *args, **kwargs)
env = os.environ.copy()
if dryrun:
print(
"This is a dry run! Here is the generated job file, which will "
"not be submitted:"
)
print(job_string)
else:
print("Calling qsub with the generated job script.")
p = subprocess.Popen(
'qsub -V', stdin=subprocess.PIPE, env=env, shell=True
)
p.communicate(input=bytes(job_string.encode('ascii'))) | [
"def",
"qsub",
"(",
"script",
",",
"job_name",
",",
"dryrun",
"=",
"False",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"print",
"(",
"\"Preparing job script...\"",
")",
"job_string",
"=",
"gen_job",
"(",
"script",
"=",
"script",
",",
"job_name",
"=",
"job_name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"env",
"=",
"os",
".",
"environ",
".",
"copy",
"(",
")",
"if",
"dryrun",
":",
"print",
"(",
"\"This is a dry run! Here is the generated job file, which will \"",
"\"not be submitted:\"",
")",
"print",
"(",
"job_string",
")",
"else",
":",
"print",
"(",
"\"Calling qsub with the generated job script.\"",
")",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"'qsub -V'",
",",
"stdin",
"=",
"subprocess",
".",
"PIPE",
",",
"env",
"=",
"env",
",",
"shell",
"=",
"True",
")",
"p",
".",
"communicate",
"(",
"input",
"=",
"bytes",
"(",
"job_string",
".",
"encode",
"(",
"'ascii'",
")",
")",
")"
] | Submit a job via qsub. | [
"Submit",
"a",
"job",
"via",
"qsub",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/shell.py#L66-L82 | train |
tamasgal/km3pipe | km3pipe/shell.py | gen_job | def gen_job(
script,
job_name,
log_path='qlogs',
group='km3net',
platform='cl7',
walltime='00:10:00',
vmem='8G',
fsize='8G',
shell=None,
email=None,
send_mail='n',
job_array_start=1,
job_array_stop=None,
job_array_step=1,
irods=False,
sps=True,
hpss=False,
xrootd=False,
dcache=False,
oracle=False,
split_array_logs=False
):
"""Generate a job script."""
if shell is None:
shell = os.environ['SHELL']
if email is None:
email = os.environ['USER'] + '@km3net.de'
if isinstance(script, Script):
script = str(script)
log_path = os.path.join(os.getcwd(), log_path)
if job_array_stop is not None:
job_array_option = "#$ -t {}-{}:{}" \
.format(job_array_start, job_array_stop,
job_array_step)
else:
job_array_option = "#"
if split_array_logs:
task_name = '_$TASK_ID'
else:
task_name = ''
job_string = JOB_TEMPLATE.format(
script=script,
email=email,
send_mail=send_mail,
log_path=log_path,
job_name=job_name,
group=group,
walltime=walltime,
vmem=vmem,
fsize=fsize,
irods=irods,
sps=sps,
hpss=hpss,
xrootd=xrootd,
dcache=dcache,
oracle=oracle,
shell=shell,
platform=platform,
job_array_option=job_array_option,
task_name=task_name
)
return job_string | python | def gen_job(
script,
job_name,
log_path='qlogs',
group='km3net',
platform='cl7',
walltime='00:10:00',
vmem='8G',
fsize='8G',
shell=None,
email=None,
send_mail='n',
job_array_start=1,
job_array_stop=None,
job_array_step=1,
irods=False,
sps=True,
hpss=False,
xrootd=False,
dcache=False,
oracle=False,
split_array_logs=False
):
"""Generate a job script."""
if shell is None:
shell = os.environ['SHELL']
if email is None:
email = os.environ['USER'] + '@km3net.de'
if isinstance(script, Script):
script = str(script)
log_path = os.path.join(os.getcwd(), log_path)
if job_array_stop is not None:
job_array_option = "#$ -t {}-{}:{}" \
.format(job_array_start, job_array_stop,
job_array_step)
else:
job_array_option = "#"
if split_array_logs:
task_name = '_$TASK_ID'
else:
task_name = ''
job_string = JOB_TEMPLATE.format(
script=script,
email=email,
send_mail=send_mail,
log_path=log_path,
job_name=job_name,
group=group,
walltime=walltime,
vmem=vmem,
fsize=fsize,
irods=irods,
sps=sps,
hpss=hpss,
xrootd=xrootd,
dcache=dcache,
oracle=oracle,
shell=shell,
platform=platform,
job_array_option=job_array_option,
task_name=task_name
)
return job_string | [
"def",
"gen_job",
"(",
"script",
",",
"job_name",
",",
"log_path",
"=",
"'qlogs'",
",",
"group",
"=",
"'km3net'",
",",
"platform",
"=",
"'cl7'",
",",
"walltime",
"=",
"'00:10:00'",
",",
"vmem",
"=",
"'8G'",
",",
"fsize",
"=",
"'8G'",
",",
"shell",
"=",
"None",
",",
"email",
"=",
"None",
",",
"send_mail",
"=",
"'n'",
",",
"job_array_start",
"=",
"1",
",",
"job_array_stop",
"=",
"None",
",",
"job_array_step",
"=",
"1",
",",
"irods",
"=",
"False",
",",
"sps",
"=",
"True",
",",
"hpss",
"=",
"False",
",",
"xrootd",
"=",
"False",
",",
"dcache",
"=",
"False",
",",
"oracle",
"=",
"False",
",",
"split_array_logs",
"=",
"False",
")",
":",
"if",
"shell",
"is",
"None",
":",
"shell",
"=",
"os",
".",
"environ",
"[",
"'SHELL'",
"]",
"if",
"email",
"is",
"None",
":",
"email",
"=",
"os",
".",
"environ",
"[",
"'USER'",
"]",
"+",
"'@km3net.de'",
"if",
"isinstance",
"(",
"script",
",",
"Script",
")",
":",
"script",
"=",
"str",
"(",
"script",
")",
"log_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"log_path",
")",
"if",
"job_array_stop",
"is",
"not",
"None",
":",
"job_array_option",
"=",
"\"#$ -t {}-{}:{}\"",
".",
"format",
"(",
"job_array_start",
",",
"job_array_stop",
",",
"job_array_step",
")",
"else",
":",
"job_array_option",
"=",
"\"#\"",
"if",
"split_array_logs",
":",
"task_name",
"=",
"'_$TASK_ID'",
"else",
":",
"task_name",
"=",
"''",
"job_string",
"=",
"JOB_TEMPLATE",
".",
"format",
"(",
"script",
"=",
"script",
",",
"email",
"=",
"email",
",",
"send_mail",
"=",
"send_mail",
",",
"log_path",
"=",
"log_path",
",",
"job_name",
"=",
"job_name",
",",
"group",
"=",
"group",
",",
"walltime",
"=",
"walltime",
",",
"vmem",
"=",
"vmem",
",",
"fsize",
"=",
"fsize",
",",
"irods",
"=",
"irods",
",",
"sps",
"=",
"sps",
",",
"hpss",
"=",
"hpss",
",",
"xrootd",
"=",
"xrootd",
",",
"dcache",
"=",
"dcache",
",",
"oracle",
"=",
"oracle",
",",
"shell",
"=",
"shell",
",",
"platform",
"=",
"platform",
",",
"job_array_option",
"=",
"job_array_option",
",",
"task_name",
"=",
"task_name",
")",
"return",
"job_string"
] | Generate a job script. | [
"Generate",
"a",
"job",
"script",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/shell.py#L85-L147 | train |
tamasgal/km3pipe | km3pipe/shell.py | get_jpp_env | def get_jpp_env(jpp_dir):
"""Return the environment dict of a loaded Jpp env.
The returned env can be passed to `subprocess.Popen("J...", env=env)`
to execute Jpp commands.
"""
env = {
v[0]: ''.join(v[1:])
for v in [
l.split('=') for l in os.popen(
"source {0}/setenv.sh {0} && env".format(jpp_dir)
).read().split('\n') if '=' in l
]
}
return env | python | def get_jpp_env(jpp_dir):
"""Return the environment dict of a loaded Jpp env.
The returned env can be passed to `subprocess.Popen("J...", env=env)`
to execute Jpp commands.
"""
env = {
v[0]: ''.join(v[1:])
for v in [
l.split('=') for l in os.popen(
"source {0}/setenv.sh {0} && env".format(jpp_dir)
).read().split('\n') if '=' in l
]
}
return env | [
"def",
"get_jpp_env",
"(",
"jpp_dir",
")",
":",
"env",
"=",
"{",
"v",
"[",
"0",
"]",
":",
"''",
".",
"join",
"(",
"v",
"[",
"1",
":",
"]",
")",
"for",
"v",
"in",
"[",
"l",
".",
"split",
"(",
"'='",
")",
"for",
"l",
"in",
"os",
".",
"popen",
"(",
"\"source {0}/setenv.sh {0} && env\"",
".",
"format",
"(",
"jpp_dir",
")",
")",
".",
"read",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
"if",
"'='",
"in",
"l",
"]",
"}",
"return",
"env"
] | Return the environment dict of a loaded Jpp env.
The returned env can be passed to `subprocess.Popen("J...", env=env)`
to execute Jpp commands. | [
"Return",
"the",
"environment",
"dict",
"of",
"a",
"loaded",
"Jpp",
"env",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/shell.py#L150-L165 | train |
tamasgal/km3pipe | km3pipe/shell.py | Script.iget | def iget(self, irods_path, attempts=1, pause=15):
"""Add an iget command to retrieve a file from iRODS.
Parameters
----------
irods_path: str
Filepath which should be fetched using iget
attempts: int (default: 1)
Number of retries, if iRODS access fails
pause: int (default: 15)
Pause between two access attempts in seconds
"""
if attempts > 1:
cmd = """ for i in {{1..{0}}}; do
ret=$(iget -v {1} 2>&1)
echo $ret
if [[ $ret == *"ERROR"* ]]; then
echo "Attempt $i failed"
else
break
fi
sleep {2}s
done """
cmd = lstrip(cmd)
cmd = cmd.format(attempts, irods_path, pause)
self.add(cmd)
else:
self.add('iget -v "{}"'.format(irods_path)) | python | def iget(self, irods_path, attempts=1, pause=15):
"""Add an iget command to retrieve a file from iRODS.
Parameters
----------
irods_path: str
Filepath which should be fetched using iget
attempts: int (default: 1)
Number of retries, if iRODS access fails
pause: int (default: 15)
Pause between two access attempts in seconds
"""
if attempts > 1:
cmd = """ for i in {{1..{0}}}; do
ret=$(iget -v {1} 2>&1)
echo $ret
if [[ $ret == *"ERROR"* ]]; then
echo "Attempt $i failed"
else
break
fi
sleep {2}s
done """
cmd = lstrip(cmd)
cmd = cmd.format(attempts, irods_path, pause)
self.add(cmd)
else:
self.add('iget -v "{}"'.format(irods_path)) | [
"def",
"iget",
"(",
"self",
",",
"irods_path",
",",
"attempts",
"=",
"1",
",",
"pause",
"=",
"15",
")",
":",
"if",
"attempts",
">",
"1",
":",
"cmd",
"=",
"\"\"\" for i in {{1..{0}}}; do\n ret=$(iget -v {1} 2>&1)\n echo $ret\n if [[ $ret == *\"ERROR\"* ]]; then\n echo \"Attempt $i failed\"\n else\n break\n fi\n sleep {2}s\n done \"\"\"",
"cmd",
"=",
"lstrip",
"(",
"cmd",
")",
"cmd",
"=",
"cmd",
".",
"format",
"(",
"attempts",
",",
"irods_path",
",",
"pause",
")",
"self",
".",
"add",
"(",
"cmd",
")",
"else",
":",
"self",
".",
"add",
"(",
"'iget -v \"{}\"'",
".",
"format",
"(",
"irods_path",
")",
")"
] | Add an iget command to retrieve a file from iRODS.
Parameters
----------
irods_path: str
Filepath which should be fetched using iget
attempts: int (default: 1)
Number of retries, if iRODS access fails
pause: int (default: 15)
Pause between two access attempts in seconds | [
"Add",
"an",
"iget",
"command",
"to",
"retrieve",
"a",
"file",
"from",
"iRODS",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/shell.py#L198-L225 | train |
tamasgal/km3pipe | km3pipe/shell.py | Script._add_two_argument_command | def _add_two_argument_command(self, command, arg1, arg2):
"""Helper function for two-argument commands"""
self.lines.append("{} {} {}".format(command, arg1, arg2)) | python | def _add_two_argument_command(self, command, arg1, arg2):
"""Helper function for two-argument commands"""
self.lines.append("{} {} {}".format(command, arg1, arg2)) | [
"def",
"_add_two_argument_command",
"(",
"self",
",",
"command",
",",
"arg1",
",",
"arg2",
")",
":",
"self",
".",
"lines",
".",
"append",
"(",
"\"{} {} {}\"",
".",
"format",
"(",
"command",
",",
"arg1",
",",
"arg2",
")",
")"
] | Helper function for two-argument commands | [
"Helper",
"function",
"for",
"two",
"-",
"argument",
"commands"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/shell.py#L227-L229 | train |
dlbroadfoot/pygogogate2 | pygogogate2/__init__.py | Gogogate2API.get_devices | def get_devices(self):
"""List all garage door devices."""
devices = self.make_request('["{username}","{password}","info","",""]'.format(
username=self.username,
password=self.password))
if devices != False:
garage_doors = []
try:
self.apicode = devices.find('apicode').text
self._device_states = {}
for doorNum in range(1, 4):
door = devices.find('door' + str(doorNum))
doorName = door.find('name').text
if doorName:
dev = {'door': doorNum, 'name': doorName}
for id in ['mode', 'sensor', 'status', 'sensorid', 'temperature', 'voltage',
'camera', 'events', 'permission']:
item = door.find(id)
if item is not None:
dev[id] = item.text
garage_state = door.find('status').text
dev['status'] = self.DOOR_STATE[garage_state]
self._device_states[doorNum] = self.DOOR_STATE[garage_state]
garage_doors.append(dev)
return garage_doors
except TypeError as ex:
print(ex)
return False
else:
return False; | python | def get_devices(self):
"""List all garage door devices."""
devices = self.make_request('["{username}","{password}","info","",""]'.format(
username=self.username,
password=self.password))
if devices != False:
garage_doors = []
try:
self.apicode = devices.find('apicode').text
self._device_states = {}
for doorNum in range(1, 4):
door = devices.find('door' + str(doorNum))
doorName = door.find('name').text
if doorName:
dev = {'door': doorNum, 'name': doorName}
for id in ['mode', 'sensor', 'status', 'sensorid', 'temperature', 'voltage',
'camera', 'events', 'permission']:
item = door.find(id)
if item is not None:
dev[id] = item.text
garage_state = door.find('status').text
dev['status'] = self.DOOR_STATE[garage_state]
self._device_states[doorNum] = self.DOOR_STATE[garage_state]
garage_doors.append(dev)
return garage_doors
except TypeError as ex:
print(ex)
return False
else:
return False; | [
"def",
"get_devices",
"(",
"self",
")",
":",
"devices",
"=",
"self",
".",
"make_request",
"(",
"'[\"{username}\",\"{password}\",\"info\",\"\",\"\"]'",
".",
"format",
"(",
"username",
"=",
"self",
".",
"username",
",",
"password",
"=",
"self",
".",
"password",
")",
")",
"if",
"devices",
"!=",
"False",
":",
"garage_doors",
"=",
"[",
"]",
"try",
":",
"self",
".",
"apicode",
"=",
"devices",
".",
"find",
"(",
"'apicode'",
")",
".",
"text",
"self",
".",
"_device_states",
"=",
"{",
"}",
"for",
"doorNum",
"in",
"range",
"(",
"1",
",",
"4",
")",
":",
"door",
"=",
"devices",
".",
"find",
"(",
"'door'",
"+",
"str",
"(",
"doorNum",
")",
")",
"doorName",
"=",
"door",
".",
"find",
"(",
"'name'",
")",
".",
"text",
"if",
"doorName",
":",
"dev",
"=",
"{",
"'door'",
":",
"doorNum",
",",
"'name'",
":",
"doorName",
"}",
"for",
"id",
"in",
"[",
"'mode'",
",",
"'sensor'",
",",
"'status'",
",",
"'sensorid'",
",",
"'temperature'",
",",
"'voltage'",
",",
"'camera'",
",",
"'events'",
",",
"'permission'",
"]",
":",
"item",
"=",
"door",
".",
"find",
"(",
"id",
")",
"if",
"item",
"is",
"not",
"None",
":",
"dev",
"[",
"id",
"]",
"=",
"item",
".",
"text",
"garage_state",
"=",
"door",
".",
"find",
"(",
"'status'",
")",
".",
"text",
"dev",
"[",
"'status'",
"]",
"=",
"self",
".",
"DOOR_STATE",
"[",
"garage_state",
"]",
"self",
".",
"_device_states",
"[",
"doorNum",
"]",
"=",
"self",
".",
"DOOR_STATE",
"[",
"garage_state",
"]",
"garage_doors",
".",
"append",
"(",
"dev",
")",
"return",
"garage_doors",
"except",
"TypeError",
"as",
"ex",
":",
"print",
"(",
"ex",
")",
"return",
"False",
"else",
":",
"return",
"False"
] | List all garage door devices. | [
"List",
"all",
"garage",
"door",
"devices",
"."
] | 3cc0a5d9e493024eeb0c07b39b2b90f7b5b7b406 | https://github.com/dlbroadfoot/pygogogate2/blob/3cc0a5d9e493024eeb0c07b39b2b90f7b5b7b406/pygogogate2/__init__.py#L70-L102 | train |
dlbroadfoot/pygogogate2 | pygogogate2/__init__.py | Gogogate2API.get_status | def get_status(self, device_id):
"""List only MyQ garage door devices."""
devices = self.get_devices()
if devices != False:
for device in devices:
if device['door'] == device_id:
return device['status']
return False | python | def get_status(self, device_id):
"""List only MyQ garage door devices."""
devices = self.get_devices()
if devices != False:
for device in devices:
if device['door'] == device_id:
return device['status']
return False | [
"def",
"get_status",
"(",
"self",
",",
"device_id",
")",
":",
"devices",
"=",
"self",
".",
"get_devices",
"(",
")",
"if",
"devices",
"!=",
"False",
":",
"for",
"device",
"in",
"devices",
":",
"if",
"device",
"[",
"'door'",
"]",
"==",
"device_id",
":",
"return",
"device",
"[",
"'status'",
"]",
"return",
"False"
] | List only MyQ garage door devices. | [
"List",
"only",
"MyQ",
"garage",
"door",
"devices",
"."
] | 3cc0a5d9e493024eeb0c07b39b2b90f7b5b7b406 | https://github.com/dlbroadfoot/pygogogate2/blob/3cc0a5d9e493024eeb0c07b39b2b90f7b5b7b406/pygogogate2/__init__.py#L105-L114 | train |
lexibank/pylexibank | src/pylexibank/transcription.py | analyze | def analyze(segments, analysis, lookup=dict(bipa={}, dolgo={})):
"""
Test a sequence for compatibility with CLPA and LingPy.
:param analysis: Pass a `TranscriptionAnalysis` instance for cumulative reporting.
"""
# raise a ValueError in case of empty segments/strings
if not segments:
raise ValueError('Empty sequence.')
# test if at least one element in `segments` has information
# (helps to catch really badly formed input, such as ['\n']
if not [segment for segment in segments if segment.strip()]:
raise ValueError('No information in the sequence.')
# build the phonologic and sound class analyses
try:
bipa_analysis, sc_analysis = [], []
for s in segments:
a = lookup['bipa'].get(s)
if a is None:
a = lookup['bipa'].setdefault(s, BIPA[s])
bipa_analysis.append(a)
sc = lookup['dolgo'].get(s)
if sc is None:
sc = lookup['dolgo'].setdefault(s, BIPA.translate(s, DOLGO))
sc_analysis.append(sc)
except: # noqa
print(segments)
raise
# compute general errors; this loop must take place outside the
# following one because the code for computing single errors (either
# in `bipa_analysis` or in `soundclass_analysis`) is unnecessary
# complicated
for sound_bipa, sound_class in zip(bipa_analysis, sc_analysis):
if isinstance(sound_bipa, pyclts.models.UnknownSound) or sound_class == '?':
analysis.general_errors += 1
# iterate over the segments and analyses, updating counts of occurrences
# and specific errors
for segment, sound_bipa, sound_class in zip(segments, bipa_analysis, sc_analysis):
# update the segment count
analysis.segments.update([segment])
# add an error if we got an unknown sound, otherwise just append
# the `replacements` dictionary
if isinstance(sound_bipa, pyclts.models.UnknownSound):
analysis.bipa_errors.add(segment)
else:
analysis.replacements[sound_bipa.source].add(sound_bipa.__unicode__())
# update sound class errors, if any
if sound_class == '?':
analysis.sclass_errors.add(segment)
return segments, bipa_analysis, sc_analysis, analysis | python | def analyze(segments, analysis, lookup=dict(bipa={}, dolgo={})):
"""
Test a sequence for compatibility with CLPA and LingPy.
:param analysis: Pass a `TranscriptionAnalysis` instance for cumulative reporting.
"""
# raise a ValueError in case of empty segments/strings
if not segments:
raise ValueError('Empty sequence.')
# test if at least one element in `segments` has information
# (helps to catch really badly formed input, such as ['\n']
if not [segment for segment in segments if segment.strip()]:
raise ValueError('No information in the sequence.')
# build the phonologic and sound class analyses
try:
bipa_analysis, sc_analysis = [], []
for s in segments:
a = lookup['bipa'].get(s)
if a is None:
a = lookup['bipa'].setdefault(s, BIPA[s])
bipa_analysis.append(a)
sc = lookup['dolgo'].get(s)
if sc is None:
sc = lookup['dolgo'].setdefault(s, BIPA.translate(s, DOLGO))
sc_analysis.append(sc)
except: # noqa
print(segments)
raise
# compute general errors; this loop must take place outside the
# following one because the code for computing single errors (either
# in `bipa_analysis` or in `soundclass_analysis`) is unnecessary
# complicated
for sound_bipa, sound_class in zip(bipa_analysis, sc_analysis):
if isinstance(sound_bipa, pyclts.models.UnknownSound) or sound_class == '?':
analysis.general_errors += 1
# iterate over the segments and analyses, updating counts of occurrences
# and specific errors
for segment, sound_bipa, sound_class in zip(segments, bipa_analysis, sc_analysis):
# update the segment count
analysis.segments.update([segment])
# add an error if we got an unknown sound, otherwise just append
# the `replacements` dictionary
if isinstance(sound_bipa, pyclts.models.UnknownSound):
analysis.bipa_errors.add(segment)
else:
analysis.replacements[sound_bipa.source].add(sound_bipa.__unicode__())
# update sound class errors, if any
if sound_class == '?':
analysis.sclass_errors.add(segment)
return segments, bipa_analysis, sc_analysis, analysis | [
"def",
"analyze",
"(",
"segments",
",",
"analysis",
",",
"lookup",
"=",
"dict",
"(",
"bipa",
"=",
"{",
"}",
",",
"dolgo",
"=",
"{",
"}",
")",
")",
":",
"# raise a ValueError in case of empty segments/strings",
"if",
"not",
"segments",
":",
"raise",
"ValueError",
"(",
"'Empty sequence.'",
")",
"# test if at least one element in `segments` has information",
"# (helps to catch really badly formed input, such as ['\\n']",
"if",
"not",
"[",
"segment",
"for",
"segment",
"in",
"segments",
"if",
"segment",
".",
"strip",
"(",
")",
"]",
":",
"raise",
"ValueError",
"(",
"'No information in the sequence.'",
")",
"# build the phonologic and sound class analyses",
"try",
":",
"bipa_analysis",
",",
"sc_analysis",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"s",
"in",
"segments",
":",
"a",
"=",
"lookup",
"[",
"'bipa'",
"]",
".",
"get",
"(",
"s",
")",
"if",
"a",
"is",
"None",
":",
"a",
"=",
"lookup",
"[",
"'bipa'",
"]",
".",
"setdefault",
"(",
"s",
",",
"BIPA",
"[",
"s",
"]",
")",
"bipa_analysis",
".",
"append",
"(",
"a",
")",
"sc",
"=",
"lookup",
"[",
"'dolgo'",
"]",
".",
"get",
"(",
"s",
")",
"if",
"sc",
"is",
"None",
":",
"sc",
"=",
"lookup",
"[",
"'dolgo'",
"]",
".",
"setdefault",
"(",
"s",
",",
"BIPA",
".",
"translate",
"(",
"s",
",",
"DOLGO",
")",
")",
"sc_analysis",
".",
"append",
"(",
"sc",
")",
"except",
":",
"# noqa",
"print",
"(",
"segments",
")",
"raise",
"# compute general errors; this loop must take place outside the",
"# following one because the code for computing single errors (either",
"# in `bipa_analysis` or in `soundclass_analysis`) is unnecessary",
"# complicated",
"for",
"sound_bipa",
",",
"sound_class",
"in",
"zip",
"(",
"bipa_analysis",
",",
"sc_analysis",
")",
":",
"if",
"isinstance",
"(",
"sound_bipa",
",",
"pyclts",
".",
"models",
".",
"UnknownSound",
")",
"or",
"sound_class",
"==",
"'?'",
":",
"analysis",
".",
"general_errors",
"+=",
"1",
"# iterate over the segments and analyses, updating counts of occurrences",
"# and specific errors",
"for",
"segment",
",",
"sound_bipa",
",",
"sound_class",
"in",
"zip",
"(",
"segments",
",",
"bipa_analysis",
",",
"sc_analysis",
")",
":",
"# update the segment count",
"analysis",
".",
"segments",
".",
"update",
"(",
"[",
"segment",
"]",
")",
"# add an error if we got an unknown sound, otherwise just append",
"# the `replacements` dictionary",
"if",
"isinstance",
"(",
"sound_bipa",
",",
"pyclts",
".",
"models",
".",
"UnknownSound",
")",
":",
"analysis",
".",
"bipa_errors",
".",
"add",
"(",
"segment",
")",
"else",
":",
"analysis",
".",
"replacements",
"[",
"sound_bipa",
".",
"source",
"]",
".",
"add",
"(",
"sound_bipa",
".",
"__unicode__",
"(",
")",
")",
"# update sound class errors, if any",
"if",
"sound_class",
"==",
"'?'",
":",
"analysis",
".",
"sclass_errors",
".",
"add",
"(",
"segment",
")",
"return",
"segments",
",",
"bipa_analysis",
",",
"sc_analysis",
",",
"analysis"
] | Test a sequence for compatibility with CLPA and LingPy.
:param analysis: Pass a `TranscriptionAnalysis` instance for cumulative reporting. | [
"Test",
"a",
"sequence",
"for",
"compatibility",
"with",
"CLPA",
"and",
"LingPy",
"."
] | c28e7f122f20de1232623dd7003cb5b01535e581 | https://github.com/lexibank/pylexibank/blob/c28e7f122f20de1232623dd7003cb5b01535e581/src/pylexibank/transcription.py#L37-L94 | train |
tamasgal/km3pipe | km3pipe/mc.py | most_energetic | def most_energetic(df):
"""Grab most energetic particle from mc_tracks dataframe."""
idx = df.groupby(['event_id'])['energy'].transform(max) == df['energy']
return df[idx].reindex() | python | def most_energetic(df):
"""Grab most energetic particle from mc_tracks dataframe."""
idx = df.groupby(['event_id'])['energy'].transform(max) == df['energy']
return df[idx].reindex() | [
"def",
"most_energetic",
"(",
"df",
")",
":",
"idx",
"=",
"df",
".",
"groupby",
"(",
"[",
"'event_id'",
"]",
")",
"[",
"'energy'",
"]",
".",
"transform",
"(",
"max",
")",
"==",
"df",
"[",
"'energy'",
"]",
"return",
"df",
"[",
"idx",
"]",
".",
"reindex",
"(",
")"
] | Grab most energetic particle from mc_tracks dataframe. | [
"Grab",
"most",
"energetic",
"particle",
"from",
"mc_tracks",
"dataframe",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/mc.py#L101-L104 | train |
tamasgal/km3pipe | km3pipe/controlhost.py | Client._connect | def _connect(self):
"""Connect to JLigier"""
log.debug("Connecting to JLigier")
self.socket = socket.socket()
self.socket.connect((self.host, self.port)) | python | def _connect(self):
"""Connect to JLigier"""
log.debug("Connecting to JLigier")
self.socket = socket.socket()
self.socket.connect((self.host, self.port)) | [
"def",
"_connect",
"(",
"self",
")",
":",
"log",
".",
"debug",
"(",
"\"Connecting to JLigier\"",
")",
"self",
".",
"socket",
"=",
"socket",
".",
"socket",
"(",
")",
"self",
".",
"socket",
".",
"connect",
"(",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
")",
")"
] | Connect to JLigier | [
"Connect",
"to",
"JLigier"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/controlhost.py#L124-L128 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.