repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
JensRantil/rewind | rewind/server/eventstores.py | RotatedEventStore.rotate | def rotate(self):
"""Rotate the files to disk.
This is done by calling `store.close()` on each store, bumping the
batchno and reopening the stores using their factories.
"""
self._logger.info('Rotating data files. New batch number will be: %s',
self.batchno + 1)
self.estore.close()
self.estore = None
self.batchno += 1
self.estore = self._open_event_store() | python | def rotate(self):
"""Rotate the files to disk.
This is done by calling `store.close()` on each store, bumping the
batchno and reopening the stores using their factories.
"""
self._logger.info('Rotating data files. New batch number will be: %s',
self.batchno + 1)
self.estore.close()
self.estore = None
self.batchno += 1
self.estore = self._open_event_store() | [
"def",
"rotate",
"(",
"self",
")",
":",
"self",
".",
"_logger",
".",
"info",
"(",
"'Rotating data files. New batch number will be: %s'",
",",
"self",
".",
"batchno",
"+",
"1",
")",
"self",
".",
"estore",
".",
"close",
"(",
")",
"self",
".",
"estore",
"=",
"None",
"self",
".",
"batchno",
"+=",
"1",
"self",
".",
"estore",
"=",
"self",
".",
"_open_event_store",
"(",
")"
] | Rotate the files to disk.
This is done by calling `store.close()` on each store, bumping the
batchno and reopening the stores using their factories. | [
"Rotate",
"the",
"files",
"to",
"disk",
"."
] | 7f645d20186c1db55cfe53a0310c9fd6292f91ea | https://github.com/JensRantil/rewind/blob/7f645d20186c1db55cfe53a0310c9fd6292f91ea/rewind/server/eventstores.py#L785-L797 | train |
JensRantil/rewind | rewind/server/eventstores.py | RotatedEventStore._find_batch_containing_event | def _find_batch_containing_event(self, uuid):
"""Find the batch number that contains a certain event.
Parameters:
uuid -- the event uuid to search for.
returns -- a batch number, or None if not found.
"""
if self.estore.key_exists(uuid):
# Reusing already opened DB if possible
return self.batchno
else:
for batchno in range(self.batchno - 1, -1, -1):
# Iterating backwards here because we are more likely to find
# the event in an later archive, than earlier.
db = self._open_event_store(batchno)
with contextlib.closing(db):
if db.key_exists(uuid):
return batchno
return None | python | def _find_batch_containing_event(self, uuid):
"""Find the batch number that contains a certain event.
Parameters:
uuid -- the event uuid to search for.
returns -- a batch number, or None if not found.
"""
if self.estore.key_exists(uuid):
# Reusing already opened DB if possible
return self.batchno
else:
for batchno in range(self.batchno - 1, -1, -1):
# Iterating backwards here because we are more likely to find
# the event in an later archive, than earlier.
db = self._open_event_store(batchno)
with contextlib.closing(db):
if db.key_exists(uuid):
return batchno
return None | [
"def",
"_find_batch_containing_event",
"(",
"self",
",",
"uuid",
")",
":",
"if",
"self",
".",
"estore",
".",
"key_exists",
"(",
"uuid",
")",
":",
"# Reusing already opened DB if possible",
"return",
"self",
".",
"batchno",
"else",
":",
"for",
"batchno",
"in",
"range",
"(",
"self",
".",
"batchno",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"# Iterating backwards here because we are more likely to find",
"# the event in an later archive, than earlier.",
"db",
"=",
"self",
".",
"_open_event_store",
"(",
"batchno",
")",
"with",
"contextlib",
".",
"closing",
"(",
"db",
")",
":",
"if",
"db",
".",
"key_exists",
"(",
"uuid",
")",
":",
"return",
"batchno",
"return",
"None"
] | Find the batch number that contains a certain event.
Parameters:
uuid -- the event uuid to search for.
returns -- a batch number, or None if not found. | [
"Find",
"the",
"batch",
"number",
"that",
"contains",
"a",
"certain",
"event",
"."
] | 7f645d20186c1db55cfe53a0310c9fd6292f91ea | https://github.com/JensRantil/rewind/blob/7f645d20186c1db55cfe53a0310c9fd6292f91ea/rewind/server/eventstores.py#L803-L823 | train |
JensRantil/rewind | rewind/server/eventstores.py | SyncedRotationEventStores.from_config | def from_config(config, **options):
"""Instantiate an `SyncedRotationEventStores` from config.
Parameters:
config -- the configuration file options read from file(s).
**options -- various options given to the specific event store. Shall
not be used with this event store. Warning will be logged
for every extra non-recognized option. The only required
key to this function is 'path'.
returns -- a newly instantiated `SyncedRotationEventStores`.
"""
required_args = ('storage-backends',)
optional_args = {'events_per_batch': 25000}
rconfig.check_config_options("SyncedRotationEventStores",
required_args,
tuple(optional_args.keys()), options)
if "events_per_batch" in options:
events_per_batch = int(options["events_per_batch"])
else:
events_per_batch = optional_args["events_per_batch"]
estore = SyncedRotationEventStores(events_per_batch)
for section in options['storage-backends'].split(' '):
try:
substore = rconfig.construct_eventstore(config, section)
estore.add_rotated_store(substore)
except Exception as e:
_logger.exception('Could not instantiate substore from'
' section %s', section)
estore.close()
raise
return estore | python | def from_config(config, **options):
"""Instantiate an `SyncedRotationEventStores` from config.
Parameters:
config -- the configuration file options read from file(s).
**options -- various options given to the specific event store. Shall
not be used with this event store. Warning will be logged
for every extra non-recognized option. The only required
key to this function is 'path'.
returns -- a newly instantiated `SyncedRotationEventStores`.
"""
required_args = ('storage-backends',)
optional_args = {'events_per_batch': 25000}
rconfig.check_config_options("SyncedRotationEventStores",
required_args,
tuple(optional_args.keys()), options)
if "events_per_batch" in options:
events_per_batch = int(options["events_per_batch"])
else:
events_per_batch = optional_args["events_per_batch"]
estore = SyncedRotationEventStores(events_per_batch)
for section in options['storage-backends'].split(' '):
try:
substore = rconfig.construct_eventstore(config, section)
estore.add_rotated_store(substore)
except Exception as e:
_logger.exception('Could not instantiate substore from'
' section %s', section)
estore.close()
raise
return estore | [
"def",
"from_config",
"(",
"config",
",",
"*",
"*",
"options",
")",
":",
"required_args",
"=",
"(",
"'storage-backends'",
",",
")",
"optional_args",
"=",
"{",
"'events_per_batch'",
":",
"25000",
"}",
"rconfig",
".",
"check_config_options",
"(",
"\"SyncedRotationEventStores\"",
",",
"required_args",
",",
"tuple",
"(",
"optional_args",
".",
"keys",
"(",
")",
")",
",",
"options",
")",
"if",
"\"events_per_batch\"",
"in",
"options",
":",
"events_per_batch",
"=",
"int",
"(",
"options",
"[",
"\"events_per_batch\"",
"]",
")",
"else",
":",
"events_per_batch",
"=",
"optional_args",
"[",
"\"events_per_batch\"",
"]",
"estore",
"=",
"SyncedRotationEventStores",
"(",
"events_per_batch",
")",
"for",
"section",
"in",
"options",
"[",
"'storage-backends'",
"]",
".",
"split",
"(",
"' '",
")",
":",
"try",
":",
"substore",
"=",
"rconfig",
".",
"construct_eventstore",
"(",
"config",
",",
"section",
")",
"estore",
".",
"add_rotated_store",
"(",
"substore",
")",
"except",
"Exception",
"as",
"e",
":",
"_logger",
".",
"exception",
"(",
"'Could not instantiate substore from'",
"' section %s'",
",",
"section",
")",
"estore",
".",
"close",
"(",
")",
"raise",
"return",
"estore"
] | Instantiate an `SyncedRotationEventStores` from config.
Parameters:
config -- the configuration file options read from file(s).
**options -- various options given to the specific event store. Shall
not be used with this event store. Warning will be logged
for every extra non-recognized option. The only required
key to this function is 'path'.
returns -- a newly instantiated `SyncedRotationEventStores`. | [
"Instantiate",
"an",
"SyncedRotationEventStores",
"from",
"config",
"."
] | 7f645d20186c1db55cfe53a0310c9fd6292f91ea | https://github.com/JensRantil/rewind/blob/7f645d20186c1db55cfe53a0310c9fd6292f91ea/rewind/server/eventstores.py#L915-L951 | train |
tehmaze/natural | natural/data.py | hexdump | def hexdump(stream):
'''
Display stream contents in hexadecimal and ASCII format. The ``stream``
specified must either be a file-like object that supports the ``read``
method to receive bytes, or it can be a string.
To dump a file::
>>> hexdump(file(filename)) # doctest: +SKIP
Or to dump stdin::
>>> import sys
>>> hexdump(sys.stdin) # doctest: +SKIP
:param stream: stream input
'''
if isinstance(stream, six.string_types):
stream = BytesIO(stream)
row = 0
while True:
data = stream.read(16)
if not data:
break
hextets = data.encode('hex').ljust(32)
canonical = printable(data)
print('%08x %s %s |%s|' % (
row * 16,
' '.join(hextets[x:x + 2] for x in range(0x00, 0x10, 2)),
' '.join(hextets[x:x + 2] for x in range(0x10, 0x20, 2)),
canonical,
))
row += 1 | python | def hexdump(stream):
'''
Display stream contents in hexadecimal and ASCII format. The ``stream``
specified must either be a file-like object that supports the ``read``
method to receive bytes, or it can be a string.
To dump a file::
>>> hexdump(file(filename)) # doctest: +SKIP
Or to dump stdin::
>>> import sys
>>> hexdump(sys.stdin) # doctest: +SKIP
:param stream: stream input
'''
if isinstance(stream, six.string_types):
stream = BytesIO(stream)
row = 0
while True:
data = stream.read(16)
if not data:
break
hextets = data.encode('hex').ljust(32)
canonical = printable(data)
print('%08x %s %s |%s|' % (
row * 16,
' '.join(hextets[x:x + 2] for x in range(0x00, 0x10, 2)),
' '.join(hextets[x:x + 2] for x in range(0x10, 0x20, 2)),
canonical,
))
row += 1 | [
"def",
"hexdump",
"(",
"stream",
")",
":",
"if",
"isinstance",
"(",
"stream",
",",
"six",
".",
"string_types",
")",
":",
"stream",
"=",
"BytesIO",
"(",
"stream",
")",
"row",
"=",
"0",
"while",
"True",
":",
"data",
"=",
"stream",
".",
"read",
"(",
"16",
")",
"if",
"not",
"data",
":",
"break",
"hextets",
"=",
"data",
".",
"encode",
"(",
"'hex'",
")",
".",
"ljust",
"(",
"32",
")",
"canonical",
"=",
"printable",
"(",
"data",
")",
"print",
"(",
"'%08x %s %s |%s|'",
"%",
"(",
"row",
"*",
"16",
",",
"' '",
".",
"join",
"(",
"hextets",
"[",
"x",
":",
"x",
"+",
"2",
"]",
"for",
"x",
"in",
"range",
"(",
"0x00",
",",
"0x10",
",",
"2",
")",
")",
",",
"' '",
".",
"join",
"(",
"hextets",
"[",
"x",
":",
"x",
"+",
"2",
"]",
"for",
"x",
"in",
"range",
"(",
"0x10",
",",
"0x20",
",",
"2",
")",
")",
",",
"canonical",
",",
")",
")",
"row",
"+=",
"1"
] | Display stream contents in hexadecimal and ASCII format. The ``stream``
specified must either be a file-like object that supports the ``read``
method to receive bytes, or it can be a string.
To dump a file::
>>> hexdump(file(filename)) # doctest: +SKIP
Or to dump stdin::
>>> import sys
>>> hexdump(sys.stdin) # doctest: +SKIP
:param stream: stream input | [
"Display",
"stream",
"contents",
"in",
"hexadecimal",
"and",
"ASCII",
"format",
".",
"The",
"stream",
"specified",
"must",
"either",
"be",
"a",
"file",
"-",
"like",
"object",
"that",
"supports",
"the",
"read",
"method",
"to",
"receive",
"bytes",
"or",
"it",
"can",
"be",
"a",
"string",
"."
] | d7a1fc9de712f9bcf68884a80826a7977df356fb | https://github.com/tehmaze/natural/blob/d7a1fc9de712f9bcf68884a80826a7977df356fb/natural/data.py#L39-L75 | train |
tehmaze/natural | natural/data.py | printable | def printable(sequence):
'''
Return a printable string from the input ``sequence``
:param sequence: byte or string sequence
>>> print(printable('\\x1b[1;34mtest\\x1b[0m'))
.[1;34mtest.[0m
>>> printable('\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x06') == '........'
True
>>> print(printable('12345678'))
12345678
>>> print(printable('testing\\n'))
testing.
'''
return ''.join(list(
map(lambda c: c if c in PRINTABLE else '.', sequence)
)) | python | def printable(sequence):
'''
Return a printable string from the input ``sequence``
:param sequence: byte or string sequence
>>> print(printable('\\x1b[1;34mtest\\x1b[0m'))
.[1;34mtest.[0m
>>> printable('\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x06') == '........'
True
>>> print(printable('12345678'))
12345678
>>> print(printable('testing\\n'))
testing.
'''
return ''.join(list(
map(lambda c: c if c in PRINTABLE else '.', sequence)
)) | [
"def",
"printable",
"(",
"sequence",
")",
":",
"return",
"''",
".",
"join",
"(",
"list",
"(",
"map",
"(",
"lambda",
"c",
":",
"c",
"if",
"c",
"in",
"PRINTABLE",
"else",
"'.'",
",",
"sequence",
")",
")",
")"
] | Return a printable string from the input ``sequence``
:param sequence: byte or string sequence
>>> print(printable('\\x1b[1;34mtest\\x1b[0m'))
.[1;34mtest.[0m
>>> printable('\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x06') == '........'
True
>>> print(printable('12345678'))
12345678
>>> print(printable('testing\\n'))
testing. | [
"Return",
"a",
"printable",
"string",
"from",
"the",
"input",
"sequence"
] | d7a1fc9de712f9bcf68884a80826a7977df356fb | https://github.com/tehmaze/natural/blob/d7a1fc9de712f9bcf68884a80826a7977df356fb/natural/data.py#L78-L97 | train |
tehmaze/natural | natural/data.py | sparkline | def sparkline(data):
'''
Return a spark line for the given data set.
:value data: sequence of numeric values
>>> print sparkline([1, 2, 3, 4, 5, 6, 5, 4, 3, 1, 5, 6]) # doctest: +SKIP
βββββ
ββ
ββββ
β
'''
min_value = float(min(data))
max_value = float(max(data))
steps = (max_value - min_value) / float(len(SPARKCHAR) - 1)
return ''.join([
SPARKCHAR[int((float(value) - min_value) / steps)]
for value in data
]) | python | def sparkline(data):
'''
Return a spark line for the given data set.
:value data: sequence of numeric values
>>> print sparkline([1, 2, 3, 4, 5, 6, 5, 4, 3, 1, 5, 6]) # doctest: +SKIP
βββββ
ββ
ββββ
β
'''
min_value = float(min(data))
max_value = float(max(data))
steps = (max_value - min_value) / float(len(SPARKCHAR) - 1)
return ''.join([
SPARKCHAR[int((float(value) - min_value) / steps)]
for value in data
]) | [
"def",
"sparkline",
"(",
"data",
")",
":",
"min_value",
"=",
"float",
"(",
"min",
"(",
"data",
")",
")",
"max_value",
"=",
"float",
"(",
"max",
"(",
"data",
")",
")",
"steps",
"=",
"(",
"max_value",
"-",
"min_value",
")",
"/",
"float",
"(",
"len",
"(",
"SPARKCHAR",
")",
"-",
"1",
")",
"return",
"''",
".",
"join",
"(",
"[",
"SPARKCHAR",
"[",
"int",
"(",
"(",
"float",
"(",
"value",
")",
"-",
"min_value",
")",
"/",
"steps",
")",
"]",
"for",
"value",
"in",
"data",
"]",
")"
] | Return a spark line for the given data set.
:value data: sequence of numeric values
>>> print sparkline([1, 2, 3, 4, 5, 6, 5, 4, 3, 1, 5, 6]) # doctest: +SKIP
βββββ
ββ
ββββ
β | [
"Return",
"a",
"spark",
"line",
"for",
"the",
"given",
"data",
"set",
"."
] | d7a1fc9de712f9bcf68884a80826a7977df356fb | https://github.com/tehmaze/natural/blob/d7a1fc9de712f9bcf68884a80826a7977df356fb/natural/data.py#L100-L117 | train |
yeraydiazdiaz/lunr.py | lunr/languages/stemmer.py | get_language_stemmer | def get_language_stemmer(language):
"""Retrieves the SnowballStemmer for a particular language.
Args:
language (str): ISO-639-1 code of the language.
"""
from lunr.languages import SUPPORTED_LANGUAGES
from nltk.stem.snowball import SnowballStemmer
return SnowballStemmer(SUPPORTED_LANGUAGES[language]) | python | def get_language_stemmer(language):
"""Retrieves the SnowballStemmer for a particular language.
Args:
language (str): ISO-639-1 code of the language.
"""
from lunr.languages import SUPPORTED_LANGUAGES
from nltk.stem.snowball import SnowballStemmer
return SnowballStemmer(SUPPORTED_LANGUAGES[language]) | [
"def",
"get_language_stemmer",
"(",
"language",
")",
":",
"from",
"lunr",
".",
"languages",
"import",
"SUPPORTED_LANGUAGES",
"from",
"nltk",
".",
"stem",
".",
"snowball",
"import",
"SnowballStemmer",
"return",
"SnowballStemmer",
"(",
"SUPPORTED_LANGUAGES",
"[",
"language",
"]",
")"
] | Retrieves the SnowballStemmer for a particular language.
Args:
language (str): ISO-639-1 code of the language. | [
"Retrieves",
"the",
"SnowballStemmer",
"for",
"a",
"particular",
"language",
"."
] | 28ec3f6d4888295eed730211ee9617aa488d6ba3 | https://github.com/yeraydiazdiaz/lunr.py/blob/28ec3f6d4888295eed730211ee9617aa488d6ba3/lunr/languages/stemmer.py#L1-L10 | train |
yeraydiazdiaz/lunr.py | lunr/languages/stemmer.py | nltk_stemmer | def nltk_stemmer(stemmer, token, i=None, tokens=None):
"""Wrapper around a NLTK SnowballStemmer, which includes stop words for
each language.
Args:
stemmer (SnowballStemmer): Stemmer instance that performs the stemming.
token (lunr.Token): The token to stem.
i (int): The index of the token in a set.
tokens (list): A list of tokens representing the set.
"""
def wrapped_stem(token, metadata=None):
return stemmer.stem(token)
return token.update(wrapped_stem) | python | def nltk_stemmer(stemmer, token, i=None, tokens=None):
"""Wrapper around a NLTK SnowballStemmer, which includes stop words for
each language.
Args:
stemmer (SnowballStemmer): Stemmer instance that performs the stemming.
token (lunr.Token): The token to stem.
i (int): The index of the token in a set.
tokens (list): A list of tokens representing the set.
"""
def wrapped_stem(token, metadata=None):
return stemmer.stem(token)
return token.update(wrapped_stem) | [
"def",
"nltk_stemmer",
"(",
"stemmer",
",",
"token",
",",
"i",
"=",
"None",
",",
"tokens",
"=",
"None",
")",
":",
"def",
"wrapped_stem",
"(",
"token",
",",
"metadata",
"=",
"None",
")",
":",
"return",
"stemmer",
".",
"stem",
"(",
"token",
")",
"return",
"token",
".",
"update",
"(",
"wrapped_stem",
")"
] | Wrapper around a NLTK SnowballStemmer, which includes stop words for
each language.
Args:
stemmer (SnowballStemmer): Stemmer instance that performs the stemming.
token (lunr.Token): The token to stem.
i (int): The index of the token in a set.
tokens (list): A list of tokens representing the set. | [
"Wrapper",
"around",
"a",
"NLTK",
"SnowballStemmer",
"which",
"includes",
"stop",
"words",
"for",
"each",
"language",
"."
] | 28ec3f6d4888295eed730211ee9617aa488d6ba3 | https://github.com/yeraydiazdiaz/lunr.py/blob/28ec3f6d4888295eed730211ee9617aa488d6ba3/lunr/languages/stemmer.py#L13-L27 | train |
Othernet-Project/sqlize | sqlize/builder.py | is_seq | def is_seq(obj):
""" Returns True if object is not a string but is iterable """
if not hasattr(obj, '__iter__'):
return False
if isinstance(obj, basestring):
return False
return True | python | def is_seq(obj):
""" Returns True if object is not a string but is iterable """
if not hasattr(obj, '__iter__'):
return False
if isinstance(obj, basestring):
return False
return True | [
"def",
"is_seq",
"(",
"obj",
")",
":",
"if",
"not",
"hasattr",
"(",
"obj",
",",
"'__iter__'",
")",
":",
"return",
"False",
"if",
"isinstance",
"(",
"obj",
",",
"basestring",
")",
":",
"return",
"False",
"return",
"True"
] | Returns True if object is not a string but is iterable | [
"Returns",
"True",
"if",
"object",
"is",
"not",
"a",
"string",
"but",
"is",
"iterable"
] | f32cb38e4245800ece339b998ae6647c207a8ca5 | https://github.com/Othernet-Project/sqlize/blob/f32cb38e4245800ece339b998ae6647c207a8ca5/sqlize/builder.py#L26-L32 | train |
jaraco/jaraco.mongodb | jaraco/mongodb/migration.py | Manager.register | def register(cls, func):
"""
Decorate a migration function with this method
to make it available for migrating cases.
"""
cls._add_version_info(func)
cls._upgrade_funcs.add(func)
return func | python | def register(cls, func):
"""
Decorate a migration function with this method
to make it available for migrating cases.
"""
cls._add_version_info(func)
cls._upgrade_funcs.add(func)
return func | [
"def",
"register",
"(",
"cls",
",",
"func",
")",
":",
"cls",
".",
"_add_version_info",
"(",
"func",
")",
"cls",
".",
"_upgrade_funcs",
".",
"add",
"(",
"func",
")",
"return",
"func"
] | Decorate a migration function with this method
to make it available for migrating cases. | [
"Decorate",
"a",
"migration",
"function",
"with",
"this",
"method",
"to",
"make",
"it",
"available",
"for",
"migrating",
"cases",
"."
] | 280f17894941f4babf2e97db033dbb1fd2b9f705 | https://github.com/jaraco/jaraco.mongodb/blob/280f17894941f4babf2e97db033dbb1fd2b9f705/jaraco/mongodb/migration.py#L69-L76 | train |
jaraco/jaraco.mongodb | jaraco/mongodb/migration.py | Manager._add_version_info | def _add_version_info(func):
"""
Add .source and .target attributes to the registered function.
"""
pattern = r'v(?P<source>\d+)_to_(?P<target>\d+)$'
match = re.match(pattern, func.__name__)
if not match:
raise ValueError("migration function name must match " + pattern)
func.source, func.target = map(int, match.groups()) | python | def _add_version_info(func):
"""
Add .source and .target attributes to the registered function.
"""
pattern = r'v(?P<source>\d+)_to_(?P<target>\d+)$'
match = re.match(pattern, func.__name__)
if not match:
raise ValueError("migration function name must match " + pattern)
func.source, func.target = map(int, match.groups()) | [
"def",
"_add_version_info",
"(",
"func",
")",
":",
"pattern",
"=",
"r'v(?P<source>\\d+)_to_(?P<target>\\d+)$'",
"match",
"=",
"re",
".",
"match",
"(",
"pattern",
",",
"func",
".",
"__name__",
")",
"if",
"not",
"match",
":",
"raise",
"ValueError",
"(",
"\"migration function name must match \"",
"+",
"pattern",
")",
"func",
".",
"source",
",",
"func",
".",
"target",
"=",
"map",
"(",
"int",
",",
"match",
".",
"groups",
"(",
")",
")"
] | Add .source and .target attributes to the registered function. | [
"Add",
".",
"source",
"and",
".",
"target",
"attributes",
"to",
"the",
"registered",
"function",
"."
] | 280f17894941f4babf2e97db033dbb1fd2b9f705 | https://github.com/jaraco/jaraco.mongodb/blob/280f17894941f4babf2e97db033dbb1fd2b9f705/jaraco/mongodb/migration.py#L79-L87 | train |
jaraco/jaraco.mongodb | jaraco/mongodb/migration.py | Manager.migrate_doc | def migrate_doc(self, doc):
"""
Migrate the doc from its current version to the target version
and return it.
"""
orig_ver = doc.get(self.version_attribute_name, 0)
funcs = self._get_migrate_funcs(orig_ver, self.target_version)
for func in funcs:
func(self, doc)
doc[self.version_attribute_name] = func.target
return doc | python | def migrate_doc(self, doc):
"""
Migrate the doc from its current version to the target version
and return it.
"""
orig_ver = doc.get(self.version_attribute_name, 0)
funcs = self._get_migrate_funcs(orig_ver, self.target_version)
for func in funcs:
func(self, doc)
doc[self.version_attribute_name] = func.target
return doc | [
"def",
"migrate_doc",
"(",
"self",
",",
"doc",
")",
":",
"orig_ver",
"=",
"doc",
".",
"get",
"(",
"self",
".",
"version_attribute_name",
",",
"0",
")",
"funcs",
"=",
"self",
".",
"_get_migrate_funcs",
"(",
"orig_ver",
",",
"self",
".",
"target_version",
")",
"for",
"func",
"in",
"funcs",
":",
"func",
"(",
"self",
",",
"doc",
")",
"doc",
"[",
"self",
".",
"version_attribute_name",
"]",
"=",
"func",
".",
"target",
"return",
"doc"
] | Migrate the doc from its current version to the target version
and return it. | [
"Migrate",
"the",
"doc",
"from",
"its",
"current",
"version",
"to",
"the",
"target",
"version",
"and",
"return",
"it",
"."
] | 280f17894941f4babf2e97db033dbb1fd2b9f705 | https://github.com/jaraco/jaraco.mongodb/blob/280f17894941f4babf2e97db033dbb1fd2b9f705/jaraco/mongodb/migration.py#L89-L99 | train |
jaraco/jaraco.mongodb | jaraco/mongodb/migration.py | Manager._get_func | def _get_func(cls, source_ver, target_ver):
"""
Return exactly one function to convert from source to target
"""
matches = (
func for func in cls._upgrade_funcs
if func.source == source_ver and func.target == target_ver
)
try:
match, = matches
except ValueError:
raise ValueError(
f"No migration from {source_ver} to {target_ver}")
return match | python | def _get_func(cls, source_ver, target_ver):
"""
Return exactly one function to convert from source to target
"""
matches = (
func for func in cls._upgrade_funcs
if func.source == source_ver and func.target == target_ver
)
try:
match, = matches
except ValueError:
raise ValueError(
f"No migration from {source_ver} to {target_ver}")
return match | [
"def",
"_get_func",
"(",
"cls",
",",
"source_ver",
",",
"target_ver",
")",
":",
"matches",
"=",
"(",
"func",
"for",
"func",
"in",
"cls",
".",
"_upgrade_funcs",
"if",
"func",
".",
"source",
"==",
"source_ver",
"and",
"func",
".",
"target",
"==",
"target_ver",
")",
"try",
":",
"match",
",",
"=",
"matches",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"f\"No migration from {source_ver} to {target_ver}\"",
")",
"return",
"match"
] | Return exactly one function to convert from source to target | [
"Return",
"exactly",
"one",
"function",
"to",
"convert",
"from",
"source",
"to",
"target"
] | 280f17894941f4babf2e97db033dbb1fd2b9f705 | https://github.com/jaraco/jaraco.mongodb/blob/280f17894941f4babf2e97db033dbb1fd2b9f705/jaraco/mongodb/migration.py#L132-L145 | train |
senaite/senaite.api | src/senaite/api/__init__.py | get_uid | def get_uid(brain_or_object):
"""Get the Plone UID for this object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: Plone UID
:rtype: string
"""
if is_portal(brain_or_object):
return '0'
if is_brain(brain_or_object) and base_hasattr(brain_or_object, "UID"):
return brain_or_object.UID
return get_object(brain_or_object).UID() | python | def get_uid(brain_or_object):
"""Get the Plone UID for this object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: Plone UID
:rtype: string
"""
if is_portal(brain_or_object):
return '0'
if is_brain(brain_or_object) and base_hasattr(brain_or_object, "UID"):
return brain_or_object.UID
return get_object(brain_or_object).UID() | [
"def",
"get_uid",
"(",
"brain_or_object",
")",
":",
"if",
"is_portal",
"(",
"brain_or_object",
")",
":",
"return",
"'0'",
"if",
"is_brain",
"(",
"brain_or_object",
")",
"and",
"base_hasattr",
"(",
"brain_or_object",
",",
"\"UID\"",
")",
":",
"return",
"brain_or_object",
".",
"UID",
"return",
"get_object",
"(",
"brain_or_object",
")",
".",
"UID",
"(",
")"
] | Get the Plone UID for this object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: Plone UID
:rtype: string | [
"Get",
"the",
"Plone",
"UID",
"for",
"this",
"object"
] | c79c53abcbe6e3a5ab3ced86d2f455275efa20cf | https://github.com/senaite/senaite.api/blob/c79c53abcbe6e3a5ab3ced86d2f455275efa20cf/src/senaite/api/__init__.py#L366-L378 | train |
senaite/senaite.api | src/senaite/api/__init__.py | get_icon | def get_icon(brain_or_object, html_tag=True):
"""Get the icon of the content object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:param html_tag: A value of 'True' returns the HTML tag, else the image url
:type html_tag: bool
:returns: HTML '<img>' tag if 'html_tag' is True else the image url
:rtype: string
"""
# Manual approach, because `plone.app.layout.getIcon` does not reliable
# work for Bika Contents coming from other catalogs than the
# `portal_catalog`
portal_types = get_tool("portal_types")
fti = portal_types.getTypeInfo(brain_or_object.portal_type)
icon = fti.getIcon()
if not icon:
return ""
url = "%s/%s" % (get_url(get_portal()), icon)
if not html_tag:
return url
tag = '<img width="16" height="16" src="{url}" title="{title}" />'.format(
url=url, title=get_title(brain_or_object))
return tag | python | def get_icon(brain_or_object, html_tag=True):
"""Get the icon of the content object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:param html_tag: A value of 'True' returns the HTML tag, else the image url
:type html_tag: bool
:returns: HTML '<img>' tag if 'html_tag' is True else the image url
:rtype: string
"""
# Manual approach, because `plone.app.layout.getIcon` does not reliable
# work for Bika Contents coming from other catalogs than the
# `portal_catalog`
portal_types = get_tool("portal_types")
fti = portal_types.getTypeInfo(brain_or_object.portal_type)
icon = fti.getIcon()
if not icon:
return ""
url = "%s/%s" % (get_url(get_portal()), icon)
if not html_tag:
return url
tag = '<img width="16" height="16" src="{url}" title="{title}" />'.format(
url=url, title=get_title(brain_or_object))
return tag | [
"def",
"get_icon",
"(",
"brain_or_object",
",",
"html_tag",
"=",
"True",
")",
":",
"# Manual approach, because `plone.app.layout.getIcon` does not reliable",
"# work for Bika Contents coming from other catalogs than the",
"# `portal_catalog`",
"portal_types",
"=",
"get_tool",
"(",
"\"portal_types\"",
")",
"fti",
"=",
"portal_types",
".",
"getTypeInfo",
"(",
"brain_or_object",
".",
"portal_type",
")",
"icon",
"=",
"fti",
".",
"getIcon",
"(",
")",
"if",
"not",
"icon",
":",
"return",
"\"\"",
"url",
"=",
"\"%s/%s\"",
"%",
"(",
"get_url",
"(",
"get_portal",
"(",
")",
")",
",",
"icon",
")",
"if",
"not",
"html_tag",
":",
"return",
"url",
"tag",
"=",
"'<img width=\"16\" height=\"16\" src=\"{url}\" title=\"{title}\" />'",
".",
"format",
"(",
"url",
"=",
"url",
",",
"title",
"=",
"get_title",
"(",
"brain_or_object",
")",
")",
"return",
"tag"
] | Get the icon of the content object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:param html_tag: A value of 'True' returns the HTML tag, else the image url
:type html_tag: bool
:returns: HTML '<img>' tag if 'html_tag' is True else the image url
:rtype: string | [
"Get",
"the",
"icon",
"of",
"the",
"content",
"object"
] | c79c53abcbe6e3a5ab3ced86d2f455275efa20cf | https://github.com/senaite/senaite.api/blob/c79c53abcbe6e3a5ab3ced86d2f455275efa20cf/src/senaite/api/__init__.py#L394-L417 | train |
senaite/senaite.api | src/senaite/api/__init__.py | get_review_history | def get_review_history(brain_or_object, rev=True):
"""Get the review history for the given brain or context.
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: Workflow history
:rtype: [{}, ...]
"""
obj = get_object(brain_or_object)
review_history = []
try:
workflow = get_tool("portal_workflow")
review_history = workflow.getInfoFor(obj, 'review_history')
except WorkflowException as e:
message = str(e)
logger.error("Cannot retrieve review_history on {}: {}".format(
obj, message))
if not isinstance(review_history, (list, tuple)):
logger.error("get_review_history: expected list, recieved {}".format(
review_history))
review_history = []
if rev is True:
review_history.reverse()
return review_history | python | def get_review_history(brain_or_object, rev=True):
"""Get the review history for the given brain or context.
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: Workflow history
:rtype: [{}, ...]
"""
obj = get_object(brain_or_object)
review_history = []
try:
workflow = get_tool("portal_workflow")
review_history = workflow.getInfoFor(obj, 'review_history')
except WorkflowException as e:
message = str(e)
logger.error("Cannot retrieve review_history on {}: {}".format(
obj, message))
if not isinstance(review_history, (list, tuple)):
logger.error("get_review_history: expected list, recieved {}".format(
review_history))
review_history = []
if rev is True:
review_history.reverse()
return review_history | [
"def",
"get_review_history",
"(",
"brain_or_object",
",",
"rev",
"=",
"True",
")",
":",
"obj",
"=",
"get_object",
"(",
"brain_or_object",
")",
"review_history",
"=",
"[",
"]",
"try",
":",
"workflow",
"=",
"get_tool",
"(",
"\"portal_workflow\"",
")",
"review_history",
"=",
"workflow",
".",
"getInfoFor",
"(",
"obj",
",",
"'review_history'",
")",
"except",
"WorkflowException",
"as",
"e",
":",
"message",
"=",
"str",
"(",
"e",
")",
"logger",
".",
"error",
"(",
"\"Cannot retrieve review_history on {}: {}\"",
".",
"format",
"(",
"obj",
",",
"message",
")",
")",
"if",
"not",
"isinstance",
"(",
"review_history",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"logger",
".",
"error",
"(",
"\"get_review_history: expected list, recieved {}\"",
".",
"format",
"(",
"review_history",
")",
")",
"review_history",
"=",
"[",
"]",
"if",
"rev",
"is",
"True",
":",
"review_history",
".",
"reverse",
"(",
")",
"return",
"review_history"
] | Get the review history for the given brain or context.
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: Workflow history
:rtype: [{}, ...] | [
"Get",
"the",
"review",
"history",
"for",
"the",
"given",
"brain",
"or",
"context",
"."
] | c79c53abcbe6e3a5ab3ced86d2f455275efa20cf | https://github.com/senaite/senaite.api/blob/c79c53abcbe6e3a5ab3ced86d2f455275efa20cf/src/senaite/api/__init__.py#L658-L681 | train |
senaite/senaite.api | src/senaite/api/__init__.py | get_cancellation_status | def get_cancellation_status(brain_or_object, default="active"):
"""Get the `cancellation_state` of an object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: Value of the review_status variable
:rtype: String
"""
if is_brain(brain_or_object):
return getattr(brain_or_object, "cancellation_state", default)
workflows = get_workflows_for(brain_or_object)
if 'bika_cancellation_workflow' not in workflows:
return default
return get_workflow_status_of(brain_or_object, 'cancellation_state') | python | def get_cancellation_status(brain_or_object, default="active"):
"""Get the `cancellation_state` of an object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: Value of the review_status variable
:rtype: String
"""
if is_brain(brain_or_object):
return getattr(brain_or_object, "cancellation_state", default)
workflows = get_workflows_for(brain_or_object)
if 'bika_cancellation_workflow' not in workflows:
return default
return get_workflow_status_of(brain_or_object, 'cancellation_state') | [
"def",
"get_cancellation_status",
"(",
"brain_or_object",
",",
"default",
"=",
"\"active\"",
")",
":",
"if",
"is_brain",
"(",
"brain_or_object",
")",
":",
"return",
"getattr",
"(",
"brain_or_object",
",",
"\"cancellation_state\"",
",",
"default",
")",
"workflows",
"=",
"get_workflows_for",
"(",
"brain_or_object",
")",
"if",
"'bika_cancellation_workflow'",
"not",
"in",
"workflows",
":",
"return",
"default",
"return",
"get_workflow_status_of",
"(",
"brain_or_object",
",",
"'cancellation_state'",
")"
] | Get the `cancellation_state` of an object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: Value of the review_status variable
:rtype: String | [
"Get",
"the",
"cancellation_state",
"of",
"an",
"object"
] | c79c53abcbe6e3a5ab3ced86d2f455275efa20cf | https://github.com/senaite/senaite.api/blob/c79c53abcbe6e3a5ab3ced86d2f455275efa20cf/src/senaite/api/__init__.py#L759-L772 | train |
senaite/senaite.api | src/senaite/api/__init__.py | get_inactive_status | def get_inactive_status(brain_or_object, default="active"):
"""Get the `cancellation_state` of an objct
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: Value of the review_status variable
:rtype: String
"""
if is_brain(brain_or_object):
return getattr(brain_or_object, "inactive_state", default)
workflows = get_workflows_for(brain_or_object)
if 'bika_inactive_workflow' not in workflows:
return default
return get_workflow_status_of(brain_or_object, 'inactive_state') | python | def get_inactive_status(brain_or_object, default="active"):
"""Get the `cancellation_state` of an objct
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: Value of the review_status variable
:rtype: String
"""
if is_brain(brain_or_object):
return getattr(brain_or_object, "inactive_state", default)
workflows = get_workflows_for(brain_or_object)
if 'bika_inactive_workflow' not in workflows:
return default
return get_workflow_status_of(brain_or_object, 'inactive_state') | [
"def",
"get_inactive_status",
"(",
"brain_or_object",
",",
"default",
"=",
"\"active\"",
")",
":",
"if",
"is_brain",
"(",
"brain_or_object",
")",
":",
"return",
"getattr",
"(",
"brain_or_object",
",",
"\"inactive_state\"",
",",
"default",
")",
"workflows",
"=",
"get_workflows_for",
"(",
"brain_or_object",
")",
"if",
"'bika_inactive_workflow'",
"not",
"in",
"workflows",
":",
"return",
"default",
"return",
"get_workflow_status_of",
"(",
"brain_or_object",
",",
"'inactive_state'",
")"
] | Get the `cancellation_state` of an objct
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: Value of the review_status variable
:rtype: String | [
"Get",
"the",
"cancellation_state",
"of",
"an",
"objct"
] | c79c53abcbe6e3a5ab3ced86d2f455275efa20cf | https://github.com/senaite/senaite.api/blob/c79c53abcbe6e3a5ab3ced86d2f455275efa20cf/src/senaite/api/__init__.py#L792-L805 | train |
wroberts/fsed | fsed/fsed.py | set_log_level | def set_log_level(verbose, quiet):
'''
Ses the logging level of the script based on command line options.
Arguments:
- `verbose`:
- `quiet`:
'''
if quiet:
verbose = -1
if verbose < 0:
verbose = logging.CRITICAL
elif verbose == 0:
verbose = logging.WARNING
elif verbose == 1:
verbose = logging.INFO
elif 1 < verbose:
verbose = logging.DEBUG
LOGGER.setLevel(verbose) | python | def set_log_level(verbose, quiet):
'''
Ses the logging level of the script based on command line options.
Arguments:
- `verbose`:
- `quiet`:
'''
if quiet:
verbose = -1
if verbose < 0:
verbose = logging.CRITICAL
elif verbose == 0:
verbose = logging.WARNING
elif verbose == 1:
verbose = logging.INFO
elif 1 < verbose:
verbose = logging.DEBUG
LOGGER.setLevel(verbose) | [
"def",
"set_log_level",
"(",
"verbose",
",",
"quiet",
")",
":",
"if",
"quiet",
":",
"verbose",
"=",
"-",
"1",
"if",
"verbose",
"<",
"0",
":",
"verbose",
"=",
"logging",
".",
"CRITICAL",
"elif",
"verbose",
"==",
"0",
":",
"verbose",
"=",
"logging",
".",
"WARNING",
"elif",
"verbose",
"==",
"1",
":",
"verbose",
"=",
"logging",
".",
"INFO",
"elif",
"1",
"<",
"verbose",
":",
"verbose",
"=",
"logging",
".",
"DEBUG",
"LOGGER",
".",
"setLevel",
"(",
"verbose",
")"
] | Ses the logging level of the script based on command line options.
Arguments:
- `verbose`:
- `quiet`: | [
"Ses",
"the",
"logging",
"level",
"of",
"the",
"script",
"based",
"on",
"command",
"line",
"options",
"."
] | c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2 | https://github.com/wroberts/fsed/blob/c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2/fsed/fsed.py#L23-L41 | train |
wroberts/fsed | fsed/fsed.py | detect_pattern_format | def detect_pattern_format(pattern_filename, encoding, on_word_boundaries):
'''
Automatically detects the pattern file format, and determines
whether the Aho-Corasick string matching should pay attention to
word boundaries or not.
Arguments:
- `pattern_filename`:
- `encoding`:
- `on_word_boundaries`:
'''
tsv = True
boundaries = on_word_boundaries
with open_file(pattern_filename) as input_file:
for line in input_file:
line = line.decode(encoding)
if line.count('\t') != 1:
tsv = False
if '\\b' in line:
boundaries = True
if boundaries and not tsv:
break
return tsv, boundaries | python | def detect_pattern_format(pattern_filename, encoding, on_word_boundaries):
'''
Automatically detects the pattern file format, and determines
whether the Aho-Corasick string matching should pay attention to
word boundaries or not.
Arguments:
- `pattern_filename`:
- `encoding`:
- `on_word_boundaries`:
'''
tsv = True
boundaries = on_word_boundaries
with open_file(pattern_filename) as input_file:
for line in input_file:
line = line.decode(encoding)
if line.count('\t') != 1:
tsv = False
if '\\b' in line:
boundaries = True
if boundaries and not tsv:
break
return tsv, boundaries | [
"def",
"detect_pattern_format",
"(",
"pattern_filename",
",",
"encoding",
",",
"on_word_boundaries",
")",
":",
"tsv",
"=",
"True",
"boundaries",
"=",
"on_word_boundaries",
"with",
"open_file",
"(",
"pattern_filename",
")",
"as",
"input_file",
":",
"for",
"line",
"in",
"input_file",
":",
"line",
"=",
"line",
".",
"decode",
"(",
"encoding",
")",
"if",
"line",
".",
"count",
"(",
"'\\t'",
")",
"!=",
"1",
":",
"tsv",
"=",
"False",
"if",
"'\\\\b'",
"in",
"line",
":",
"boundaries",
"=",
"True",
"if",
"boundaries",
"and",
"not",
"tsv",
":",
"break",
"return",
"tsv",
",",
"boundaries"
] | Automatically detects the pattern file format, and determines
whether the Aho-Corasick string matching should pay attention to
word boundaries or not.
Arguments:
- `pattern_filename`:
- `encoding`:
- `on_word_boundaries`: | [
"Automatically",
"detects",
"the",
"pattern",
"file",
"format",
"and",
"determines",
"whether",
"the",
"Aho",
"-",
"Corasick",
"string",
"matching",
"should",
"pay",
"attention",
"to",
"word",
"boundaries",
"or",
"not",
"."
] | c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2 | https://github.com/wroberts/fsed/blob/c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2/fsed/fsed.py#L43-L65 | train |
wroberts/fsed | fsed/fsed.py | sub_escapes | def sub_escapes(sval):
'''
Process escaped characters in ``sval``.
Arguments:
- `sval`:
'''
sval = sval.replace('\\a', '\a')
sval = sval.replace('\\b', '\x00')
sval = sval.replace('\\f', '\f')
sval = sval.replace('\\n', '\n')
sval = sval.replace('\\r', '\r')
sval = sval.replace('\\t', '\t')
sval = sval.replace('\\v', '\v')
sval = sval.replace('\\\\', '\\')
return sval | python | def sub_escapes(sval):
'''
Process escaped characters in ``sval``.
Arguments:
- `sval`:
'''
sval = sval.replace('\\a', '\a')
sval = sval.replace('\\b', '\x00')
sval = sval.replace('\\f', '\f')
sval = sval.replace('\\n', '\n')
sval = sval.replace('\\r', '\r')
sval = sval.replace('\\t', '\t')
sval = sval.replace('\\v', '\v')
sval = sval.replace('\\\\', '\\')
return sval | [
"def",
"sub_escapes",
"(",
"sval",
")",
":",
"sval",
"=",
"sval",
".",
"replace",
"(",
"'\\\\a'",
",",
"'\\a'",
")",
"sval",
"=",
"sval",
".",
"replace",
"(",
"'\\\\b'",
",",
"'\\x00'",
")",
"sval",
"=",
"sval",
".",
"replace",
"(",
"'\\\\f'",
",",
"'\\f'",
")",
"sval",
"=",
"sval",
".",
"replace",
"(",
"'\\\\n'",
",",
"'\\n'",
")",
"sval",
"=",
"sval",
".",
"replace",
"(",
"'\\\\r'",
",",
"'\\r'",
")",
"sval",
"=",
"sval",
".",
"replace",
"(",
"'\\\\t'",
",",
"'\\t'",
")",
"sval",
"=",
"sval",
".",
"replace",
"(",
"'\\\\v'",
",",
"'\\v'",
")",
"sval",
"=",
"sval",
".",
"replace",
"(",
"'\\\\\\\\'",
",",
"'\\\\'",
")",
"return",
"sval"
] | Process escaped characters in ``sval``.
Arguments:
- `sval`: | [
"Process",
"escaped",
"characters",
"in",
"sval",
"."
] | c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2 | https://github.com/wroberts/fsed/blob/c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2/fsed/fsed.py#L67-L82 | train |
wroberts/fsed | fsed/fsed.py | build_trie | def build_trie(pattern_filename, pattern_format, encoding, on_word_boundaries):
'''
Constructs a finite state machine for performing string rewriting.
Arguments:
- `pattern_filename`:
- `pattern_format`:
- `encoding`:
- `on_word_boundaries`:
'''
boundaries = on_word_boundaries
if pattern_format == 'auto' or not on_word_boundaries:
tsv, boundaries = detect_pattern_format(pattern_filename, encoding,
on_word_boundaries)
if pattern_format == 'auto':
if tsv:
pattern_format = 'tsv'
else:
pattern_format = 'sed'
trie = fsed.ahocorasick.AhoCorasickTrie()
num_candidates = 0
with open_file(pattern_filename) as pattern_file:
for lineno, line in enumerate(pattern_file):
line = line.decode(encoding).rstrip('\n')
if not line.strip():
continue
# decode the line
if pattern_format == 'tsv':
fields = line.split('\t')
if len(fields) != 2:
LOGGER.warning(('skipping line {} of pattern file (not '
'in tab-separated format): {}').format(lineno, line))
continue
before, after = fields
elif pattern_format == 'sed':
before = after = None
line = line.lstrip()
if line[0] == 's':
delim = line[1]
# delim might be a regex special character;
# escape it if necessary
if delim in '.^$*+?[](){}|\\':
delim = '\\' + delim
fields = re.split(r'(?<!\\){}'.format(delim), line)
if len(fields) == 4:
before, after = fields[1], fields[2]
before = re.sub(r'(?<!\\)\\{}'.format(delim), delim, before)
after = re.sub(r'(?<!\\)\\{}'.format(delim), delim, after)
if before is None or after is None:
LOGGER.warning(('skipping line {} of pattern file (not '
'in sed format): {}').format(lineno, line))
continue
num_candidates += 1
if on_word_boundaries and before != before.strip():
LOGGER.warning(('before pattern on line {} padded whitespace; '
'this may interact strangely with the --words '
'option: {}').format(lineno, line))
before = sub_escapes(before)
after = sub_escapes(after)
if boundaries:
before = fsed.ahocorasick.boundary_transform(before, on_word_boundaries)
trie[before] = after
LOGGER.info('{} patterns loaded from {}'.format(num_candidates,
pattern_filename))
return trie, boundaries | python | def build_trie(pattern_filename, pattern_format, encoding, on_word_boundaries):
'''
Constructs a finite state machine for performing string rewriting.
Arguments:
- `pattern_filename`:
- `pattern_format`:
- `encoding`:
- `on_word_boundaries`:
'''
boundaries = on_word_boundaries
if pattern_format == 'auto' or not on_word_boundaries:
tsv, boundaries = detect_pattern_format(pattern_filename, encoding,
on_word_boundaries)
if pattern_format == 'auto':
if tsv:
pattern_format = 'tsv'
else:
pattern_format = 'sed'
trie = fsed.ahocorasick.AhoCorasickTrie()
num_candidates = 0
with open_file(pattern_filename) as pattern_file:
for lineno, line in enumerate(pattern_file):
line = line.decode(encoding).rstrip('\n')
if not line.strip():
continue
# decode the line
if pattern_format == 'tsv':
fields = line.split('\t')
if len(fields) != 2:
LOGGER.warning(('skipping line {} of pattern file (not '
'in tab-separated format): {}').format(lineno, line))
continue
before, after = fields
elif pattern_format == 'sed':
before = after = None
line = line.lstrip()
if line[0] == 's':
delim = line[1]
# delim might be a regex special character;
# escape it if necessary
if delim in '.^$*+?[](){}|\\':
delim = '\\' + delim
fields = re.split(r'(?<!\\){}'.format(delim), line)
if len(fields) == 4:
before, after = fields[1], fields[2]
before = re.sub(r'(?<!\\)\\{}'.format(delim), delim, before)
after = re.sub(r'(?<!\\)\\{}'.format(delim), delim, after)
if before is None or after is None:
LOGGER.warning(('skipping line {} of pattern file (not '
'in sed format): {}').format(lineno, line))
continue
num_candidates += 1
if on_word_boundaries and before != before.strip():
LOGGER.warning(('before pattern on line {} padded whitespace; '
'this may interact strangely with the --words '
'option: {}').format(lineno, line))
before = sub_escapes(before)
after = sub_escapes(after)
if boundaries:
before = fsed.ahocorasick.boundary_transform(before, on_word_boundaries)
trie[before] = after
LOGGER.info('{} patterns loaded from {}'.format(num_candidates,
pattern_filename))
return trie, boundaries | [
"def",
"build_trie",
"(",
"pattern_filename",
",",
"pattern_format",
",",
"encoding",
",",
"on_word_boundaries",
")",
":",
"boundaries",
"=",
"on_word_boundaries",
"if",
"pattern_format",
"==",
"'auto'",
"or",
"not",
"on_word_boundaries",
":",
"tsv",
",",
"boundaries",
"=",
"detect_pattern_format",
"(",
"pattern_filename",
",",
"encoding",
",",
"on_word_boundaries",
")",
"if",
"pattern_format",
"==",
"'auto'",
":",
"if",
"tsv",
":",
"pattern_format",
"=",
"'tsv'",
"else",
":",
"pattern_format",
"=",
"'sed'",
"trie",
"=",
"fsed",
".",
"ahocorasick",
".",
"AhoCorasickTrie",
"(",
")",
"num_candidates",
"=",
"0",
"with",
"open_file",
"(",
"pattern_filename",
")",
"as",
"pattern_file",
":",
"for",
"lineno",
",",
"line",
"in",
"enumerate",
"(",
"pattern_file",
")",
":",
"line",
"=",
"line",
".",
"decode",
"(",
"encoding",
")",
".",
"rstrip",
"(",
"'\\n'",
")",
"if",
"not",
"line",
".",
"strip",
"(",
")",
":",
"continue",
"# decode the line",
"if",
"pattern_format",
"==",
"'tsv'",
":",
"fields",
"=",
"line",
".",
"split",
"(",
"'\\t'",
")",
"if",
"len",
"(",
"fields",
")",
"!=",
"2",
":",
"LOGGER",
".",
"warning",
"(",
"(",
"'skipping line {} of pattern file (not '",
"'in tab-separated format): {}'",
")",
".",
"format",
"(",
"lineno",
",",
"line",
")",
")",
"continue",
"before",
",",
"after",
"=",
"fields",
"elif",
"pattern_format",
"==",
"'sed'",
":",
"before",
"=",
"after",
"=",
"None",
"line",
"=",
"line",
".",
"lstrip",
"(",
")",
"if",
"line",
"[",
"0",
"]",
"==",
"'s'",
":",
"delim",
"=",
"line",
"[",
"1",
"]",
"# delim might be a regex special character;",
"# escape it if necessary",
"if",
"delim",
"in",
"'.^$*+?[](){}|\\\\'",
":",
"delim",
"=",
"'\\\\'",
"+",
"delim",
"fields",
"=",
"re",
".",
"split",
"(",
"r'(?<!\\\\){}'",
".",
"format",
"(",
"delim",
")",
",",
"line",
")",
"if",
"len",
"(",
"fields",
")",
"==",
"4",
":",
"before",
",",
"after",
"=",
"fields",
"[",
"1",
"]",
",",
"fields",
"[",
"2",
"]",
"before",
"=",
"re",
".",
"sub",
"(",
"r'(?<!\\\\)\\\\{}'",
".",
"format",
"(",
"delim",
")",
",",
"delim",
",",
"before",
")",
"after",
"=",
"re",
".",
"sub",
"(",
"r'(?<!\\\\)\\\\{}'",
".",
"format",
"(",
"delim",
")",
",",
"delim",
",",
"after",
")",
"if",
"before",
"is",
"None",
"or",
"after",
"is",
"None",
":",
"LOGGER",
".",
"warning",
"(",
"(",
"'skipping line {} of pattern file (not '",
"'in sed format): {}'",
")",
".",
"format",
"(",
"lineno",
",",
"line",
")",
")",
"continue",
"num_candidates",
"+=",
"1",
"if",
"on_word_boundaries",
"and",
"before",
"!=",
"before",
".",
"strip",
"(",
")",
":",
"LOGGER",
".",
"warning",
"(",
"(",
"'before pattern on line {} padded whitespace; '",
"'this may interact strangely with the --words '",
"'option: {}'",
")",
".",
"format",
"(",
"lineno",
",",
"line",
")",
")",
"before",
"=",
"sub_escapes",
"(",
"before",
")",
"after",
"=",
"sub_escapes",
"(",
"after",
")",
"if",
"boundaries",
":",
"before",
"=",
"fsed",
".",
"ahocorasick",
".",
"boundary_transform",
"(",
"before",
",",
"on_word_boundaries",
")",
"trie",
"[",
"before",
"]",
"=",
"after",
"LOGGER",
".",
"info",
"(",
"'{} patterns loaded from {}'",
".",
"format",
"(",
"num_candidates",
",",
"pattern_filename",
")",
")",
"return",
"trie",
",",
"boundaries"
] | Constructs a finite state machine for performing string rewriting.
Arguments:
- `pattern_filename`:
- `pattern_format`:
- `encoding`:
- `on_word_boundaries`: | [
"Constructs",
"a",
"finite",
"state",
"machine",
"for",
"performing",
"string",
"rewriting",
"."
] | c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2 | https://github.com/wroberts/fsed/blob/c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2/fsed/fsed.py#L84-L148 | train |
wroberts/fsed | fsed/fsed.py | warn_prefix_values | def warn_prefix_values(trie):
'''
Prints warning messages for every node that has both a value and a
longest_prefix.
'''
for current, _parent in trie.dfs():
if current.has_value and current.longest_prefix is not None:
LOGGER.warn(('pattern {} (value {}) is a superstring of pattern '
'{} (value {}) and will never be matched').format(
current.prefix, current.value,
current.longest_prefix.prefix, current.longest_prefix.value)) | python | def warn_prefix_values(trie):
'''
Prints warning messages for every node that has both a value and a
longest_prefix.
'''
for current, _parent in trie.dfs():
if current.has_value and current.longest_prefix is not None:
LOGGER.warn(('pattern {} (value {}) is a superstring of pattern '
'{} (value {}) and will never be matched').format(
current.prefix, current.value,
current.longest_prefix.prefix, current.longest_prefix.value)) | [
"def",
"warn_prefix_values",
"(",
"trie",
")",
":",
"for",
"current",
",",
"_parent",
"in",
"trie",
".",
"dfs",
"(",
")",
":",
"if",
"current",
".",
"has_value",
"and",
"current",
".",
"longest_prefix",
"is",
"not",
"None",
":",
"LOGGER",
".",
"warn",
"(",
"(",
"'pattern {} (value {}) is a superstring of pattern '",
"'{} (value {}) and will never be matched'",
")",
".",
"format",
"(",
"current",
".",
"prefix",
",",
"current",
".",
"value",
",",
"current",
".",
"longest_prefix",
".",
"prefix",
",",
"current",
".",
"longest_prefix",
".",
"value",
")",
")"
] | Prints warning messages for every node that has both a value and a
longest_prefix. | [
"Prints",
"warning",
"messages",
"for",
"every",
"node",
"that",
"has",
"both",
"a",
"value",
"and",
"a",
"longest_prefix",
"."
] | c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2 | https://github.com/wroberts/fsed/blob/c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2/fsed/fsed.py#L150-L160 | train |
wroberts/fsed | fsed/fsed.py | rewrite_str_with_trie | def rewrite_str_with_trie(sval, trie, boundaries = False, slow = False):
'''
Rewrites a string using the given trie object.
Arguments:
- `sval`:
- `trie`:
- `boundaries`:
- `slow`:
'''
if boundaries:
sval = fsed.ahocorasick.boundary_transform(sval)
if slow:
sval = trie.replace(sval)
else:
sval = trie.greedy_replace(sval)
if boundaries:
sval = ''.join(fsed.ahocorasick.boundary_untransform(sval))
return sval | python | def rewrite_str_with_trie(sval, trie, boundaries = False, slow = False):
'''
Rewrites a string using the given trie object.
Arguments:
- `sval`:
- `trie`:
- `boundaries`:
- `slow`:
'''
if boundaries:
sval = fsed.ahocorasick.boundary_transform(sval)
if slow:
sval = trie.replace(sval)
else:
sval = trie.greedy_replace(sval)
if boundaries:
sval = ''.join(fsed.ahocorasick.boundary_untransform(sval))
return sval | [
"def",
"rewrite_str_with_trie",
"(",
"sval",
",",
"trie",
",",
"boundaries",
"=",
"False",
",",
"slow",
"=",
"False",
")",
":",
"if",
"boundaries",
":",
"sval",
"=",
"fsed",
".",
"ahocorasick",
".",
"boundary_transform",
"(",
"sval",
")",
"if",
"slow",
":",
"sval",
"=",
"trie",
".",
"replace",
"(",
"sval",
")",
"else",
":",
"sval",
"=",
"trie",
".",
"greedy_replace",
"(",
"sval",
")",
"if",
"boundaries",
":",
"sval",
"=",
"''",
".",
"join",
"(",
"fsed",
".",
"ahocorasick",
".",
"boundary_untransform",
"(",
"sval",
")",
")",
"return",
"sval"
] | Rewrites a string using the given trie object.
Arguments:
- `sval`:
- `trie`:
- `boundaries`:
- `slow`: | [
"Rewrites",
"a",
"string",
"using",
"the",
"given",
"trie",
"object",
"."
] | c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2 | https://github.com/wroberts/fsed/blob/c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2/fsed/fsed.py#L162-L180 | train |
yeraydiazdiaz/lunr.py | lunr/pipeline.py | Pipeline.register_function | def register_function(cls, fn, label):
"""Register a function with the pipeline."""
if label in cls.registered_functions:
log.warning("Overwriting existing registered function %s", label)
fn.label = label
cls.registered_functions[fn.label] = fn | python | def register_function(cls, fn, label):
"""Register a function with the pipeline."""
if label in cls.registered_functions:
log.warning("Overwriting existing registered function %s", label)
fn.label = label
cls.registered_functions[fn.label] = fn | [
"def",
"register_function",
"(",
"cls",
",",
"fn",
",",
"label",
")",
":",
"if",
"label",
"in",
"cls",
".",
"registered_functions",
":",
"log",
".",
"warning",
"(",
"\"Overwriting existing registered function %s\"",
",",
"label",
")",
"fn",
".",
"label",
"=",
"label",
"cls",
".",
"registered_functions",
"[",
"fn",
".",
"label",
"]",
"=",
"fn"
] | Register a function with the pipeline. | [
"Register",
"a",
"function",
"with",
"the",
"pipeline",
"."
] | 28ec3f6d4888295eed730211ee9617aa488d6ba3 | https://github.com/yeraydiazdiaz/lunr.py/blob/28ec3f6d4888295eed730211ee9617aa488d6ba3/lunr/pipeline.py#L34-L40 | train |
yeraydiazdiaz/lunr.py | lunr/pipeline.py | Pipeline.load | def load(cls, serialised):
"""Loads a previously serialised pipeline."""
pipeline = cls()
for fn_name in serialised:
try:
fn = cls.registered_functions[fn_name]
except KeyError:
raise BaseLunrException(
"Cannot load unregistered function ".format(fn_name)
)
else:
pipeline.add(fn)
return pipeline | python | def load(cls, serialised):
"""Loads a previously serialised pipeline."""
pipeline = cls()
for fn_name in serialised:
try:
fn = cls.registered_functions[fn_name]
except KeyError:
raise BaseLunrException(
"Cannot load unregistered function ".format(fn_name)
)
else:
pipeline.add(fn)
return pipeline | [
"def",
"load",
"(",
"cls",
",",
"serialised",
")",
":",
"pipeline",
"=",
"cls",
"(",
")",
"for",
"fn_name",
"in",
"serialised",
":",
"try",
":",
"fn",
"=",
"cls",
".",
"registered_functions",
"[",
"fn_name",
"]",
"except",
"KeyError",
":",
"raise",
"BaseLunrException",
"(",
"\"Cannot load unregistered function \"",
".",
"format",
"(",
"fn_name",
")",
")",
"else",
":",
"pipeline",
".",
"add",
"(",
"fn",
")",
"return",
"pipeline"
] | Loads a previously serialised pipeline. | [
"Loads",
"a",
"previously",
"serialised",
"pipeline",
"."
] | 28ec3f6d4888295eed730211ee9617aa488d6ba3 | https://github.com/yeraydiazdiaz/lunr.py/blob/28ec3f6d4888295eed730211ee9617aa488d6ba3/lunr/pipeline.py#L43-L56 | train |
yeraydiazdiaz/lunr.py | lunr/pipeline.py | Pipeline.add | def add(self, *args):
"""Adds new functions to the end of the pipeline.
Functions must accept three arguments:
- Token: A lunr.Token object which will be updated
- i: The index of the token in the set
- tokens: A list of tokens representing the set
"""
for fn in args:
self.warn_if_function_not_registered(fn)
self._stack.append(fn) | python | def add(self, *args):
"""Adds new functions to the end of the pipeline.
Functions must accept three arguments:
- Token: A lunr.Token object which will be updated
- i: The index of the token in the set
- tokens: A list of tokens representing the set
"""
for fn in args:
self.warn_if_function_not_registered(fn)
self._stack.append(fn) | [
"def",
"add",
"(",
"self",
",",
"*",
"args",
")",
":",
"for",
"fn",
"in",
"args",
":",
"self",
".",
"warn_if_function_not_registered",
"(",
"fn",
")",
"self",
".",
"_stack",
".",
"append",
"(",
"fn",
")"
] | Adds new functions to the end of the pipeline.
Functions must accept three arguments:
- Token: A lunr.Token object which will be updated
- i: The index of the token in the set
- tokens: A list of tokens representing the set | [
"Adds",
"new",
"functions",
"to",
"the",
"end",
"of",
"the",
"pipeline",
"."
] | 28ec3f6d4888295eed730211ee9617aa488d6ba3 | https://github.com/yeraydiazdiaz/lunr.py/blob/28ec3f6d4888295eed730211ee9617aa488d6ba3/lunr/pipeline.py#L58-L68 | train |
yeraydiazdiaz/lunr.py | lunr/pipeline.py | Pipeline.after | def after(self, existing_fn, new_fn):
"""Adds a single function after a function that already exists in the
pipeline."""
self.warn_if_function_not_registered(new_fn)
try:
index = self._stack.index(existing_fn)
self._stack.insert(index + 1, new_fn)
except ValueError as e:
six.raise_from(BaseLunrException("Cannot find existing_fn"), e) | python | def after(self, existing_fn, new_fn):
"""Adds a single function after a function that already exists in the
pipeline."""
self.warn_if_function_not_registered(new_fn)
try:
index = self._stack.index(existing_fn)
self._stack.insert(index + 1, new_fn)
except ValueError as e:
six.raise_from(BaseLunrException("Cannot find existing_fn"), e) | [
"def",
"after",
"(",
"self",
",",
"existing_fn",
",",
"new_fn",
")",
":",
"self",
".",
"warn_if_function_not_registered",
"(",
"new_fn",
")",
"try",
":",
"index",
"=",
"self",
".",
"_stack",
".",
"index",
"(",
"existing_fn",
")",
"self",
".",
"_stack",
".",
"insert",
"(",
"index",
"+",
"1",
",",
"new_fn",
")",
"except",
"ValueError",
"as",
"e",
":",
"six",
".",
"raise_from",
"(",
"BaseLunrException",
"(",
"\"Cannot find existing_fn\"",
")",
",",
"e",
")"
] | Adds a single function after a function that already exists in the
pipeline. | [
"Adds",
"a",
"single",
"function",
"after",
"a",
"function",
"that",
"already",
"exists",
"in",
"the",
"pipeline",
"."
] | 28ec3f6d4888295eed730211ee9617aa488d6ba3 | https://github.com/yeraydiazdiaz/lunr.py/blob/28ec3f6d4888295eed730211ee9617aa488d6ba3/lunr/pipeline.py#L81-L89 | train |
yeraydiazdiaz/lunr.py | lunr/pipeline.py | Pipeline.run | def run(self, tokens):
"""Runs the current list of functions that make up the pipeline against
the passed tokens."""
for fn in self._stack:
results = []
for i, token in enumerate(tokens):
# JS ignores additional arguments to the functions but we
# force pipeline functions to declare (token, i, tokens)
# or *args
result = fn(token, i, tokens)
if not result:
continue
if isinstance(result, (list, tuple)): # simulate Array.concat
results.extend(result)
else:
results.append(result)
tokens = results
return tokens | python | def run(self, tokens):
"""Runs the current list of functions that make up the pipeline against
the passed tokens."""
for fn in self._stack:
results = []
for i, token in enumerate(tokens):
# JS ignores additional arguments to the functions but we
# force pipeline functions to declare (token, i, tokens)
# or *args
result = fn(token, i, tokens)
if not result:
continue
if isinstance(result, (list, tuple)): # simulate Array.concat
results.extend(result)
else:
results.append(result)
tokens = results
return tokens | [
"def",
"run",
"(",
"self",
",",
"tokens",
")",
":",
"for",
"fn",
"in",
"self",
".",
"_stack",
":",
"results",
"=",
"[",
"]",
"for",
"i",
",",
"token",
"in",
"enumerate",
"(",
"tokens",
")",
":",
"# JS ignores additional arguments to the functions but we",
"# force pipeline functions to declare (token, i, tokens)",
"# or *args",
"result",
"=",
"fn",
"(",
"token",
",",
"i",
",",
"tokens",
")",
"if",
"not",
"result",
":",
"continue",
"if",
"isinstance",
"(",
"result",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"# simulate Array.concat",
"results",
".",
"extend",
"(",
"result",
")",
"else",
":",
"results",
".",
"append",
"(",
"result",
")",
"tokens",
"=",
"results",
"return",
"tokens"
] | Runs the current list of functions that make up the pipeline against
the passed tokens. | [
"Runs",
"the",
"current",
"list",
"of",
"functions",
"that",
"make",
"up",
"the",
"pipeline",
"against",
"the",
"passed",
"tokens",
"."
] | 28ec3f6d4888295eed730211ee9617aa488d6ba3 | https://github.com/yeraydiazdiaz/lunr.py/blob/28ec3f6d4888295eed730211ee9617aa488d6ba3/lunr/pipeline.py#L110-L128 | train |
yeraydiazdiaz/lunr.py | lunr/pipeline.py | Pipeline.run_string | def run_string(self, string, metadata=None):
"""Convenience method for passing a string through a pipeline and
getting strings out. This method takes care of wrapping the passed
string in a token and mapping the resulting tokens back to strings."""
token = Token(string, metadata)
return [str(tkn) for tkn in self.run([token])] | python | def run_string(self, string, metadata=None):
"""Convenience method for passing a string through a pipeline and
getting strings out. This method takes care of wrapping the passed
string in a token and mapping the resulting tokens back to strings."""
token = Token(string, metadata)
return [str(tkn) for tkn in self.run([token])] | [
"def",
"run_string",
"(",
"self",
",",
"string",
",",
"metadata",
"=",
"None",
")",
":",
"token",
"=",
"Token",
"(",
"string",
",",
"metadata",
")",
"return",
"[",
"str",
"(",
"tkn",
")",
"for",
"tkn",
"in",
"self",
".",
"run",
"(",
"[",
"token",
"]",
")",
"]"
] | Convenience method for passing a string through a pipeline and
getting strings out. This method takes care of wrapping the passed
string in a token and mapping the resulting tokens back to strings. | [
"Convenience",
"method",
"for",
"passing",
"a",
"string",
"through",
"a",
"pipeline",
"and",
"getting",
"strings",
"out",
".",
"This",
"method",
"takes",
"care",
"of",
"wrapping",
"the",
"passed",
"string",
"in",
"a",
"token",
"and",
"mapping",
"the",
"resulting",
"tokens",
"back",
"to",
"strings",
"."
] | 28ec3f6d4888295eed730211ee9617aa488d6ba3 | https://github.com/yeraydiazdiaz/lunr.py/blob/28ec3f6d4888295eed730211ee9617aa488d6ba3/lunr/pipeline.py#L130-L135 | train |
jaraco/jaraco.mongodb | jaraco/mongodb/pmxbot.py | get_client | def get_client():
"""
Use the same MongoDB client as pmxbot if available.
"""
with contextlib.suppress(Exception):
store = Storage.from_URI()
assert isinstance(store, pmxbot.storage.MongoDBStorage)
return store.db.database.client | python | def get_client():
"""
Use the same MongoDB client as pmxbot if available.
"""
with contextlib.suppress(Exception):
store = Storage.from_URI()
assert isinstance(store, pmxbot.storage.MongoDBStorage)
return store.db.database.client | [
"def",
"get_client",
"(",
")",
":",
"with",
"contextlib",
".",
"suppress",
"(",
"Exception",
")",
":",
"store",
"=",
"Storage",
".",
"from_URI",
"(",
")",
"assert",
"isinstance",
"(",
"store",
",",
"pmxbot",
".",
"storage",
".",
"MongoDBStorage",
")",
"return",
"store",
".",
"db",
".",
"database",
".",
"client"
] | Use the same MongoDB client as pmxbot if available. | [
"Use",
"the",
"same",
"MongoDB",
"client",
"as",
"pmxbot",
"if",
"available",
"."
] | 280f17894941f4babf2e97db033dbb1fd2b9f705 | https://github.com/jaraco/jaraco.mongodb/blob/280f17894941f4babf2e97db033dbb1fd2b9f705/jaraco/mongodb/pmxbot.py#L13-L20 | train |
jaraco/jaraco.mongodb | jaraco/mongodb/sharding.py | create_db_in_shard | def create_db_in_shard(db_name, shard, client=None):
"""
In a sharded cluster, create a database in a particular shard.
"""
client = client or pymongo.MongoClient()
# flush the router config to ensure it's not stale
res = client.admin.command('flushRouterConfig')
if not res.get('ok'):
raise RuntimeError("unable to flush router config")
if shard not in get_ids(client.config.shards):
raise ValueError(f"Unknown shard {shard}")
if db_name in get_ids(client.config.databases):
raise ValueError("database already exists")
# MongoDB doesn't have a 'create database' command, so insert an
# item into a collection and then drop the collection.
client[db_name].foo.insert({'foo': 1})
client[db_name].foo.drop()
if client[db_name].collection_names():
raise ValueError("database has collections")
primary = client['config'].databases.find_one(db_name)['primary']
if primary != shard:
res = client.admin.command(
'movePrimary', value=db_name, to=shard)
if not res.get('ok'):
raise RuntimeError(str(res))
return (
f"Successfully created {db_name} in {shard} via {client.nodes} "
f"from {hostname}") | python | def create_db_in_shard(db_name, shard, client=None):
"""
In a sharded cluster, create a database in a particular shard.
"""
client = client or pymongo.MongoClient()
# flush the router config to ensure it's not stale
res = client.admin.command('flushRouterConfig')
if not res.get('ok'):
raise RuntimeError("unable to flush router config")
if shard not in get_ids(client.config.shards):
raise ValueError(f"Unknown shard {shard}")
if db_name in get_ids(client.config.databases):
raise ValueError("database already exists")
# MongoDB doesn't have a 'create database' command, so insert an
# item into a collection and then drop the collection.
client[db_name].foo.insert({'foo': 1})
client[db_name].foo.drop()
if client[db_name].collection_names():
raise ValueError("database has collections")
primary = client['config'].databases.find_one(db_name)['primary']
if primary != shard:
res = client.admin.command(
'movePrimary', value=db_name, to=shard)
if not res.get('ok'):
raise RuntimeError(str(res))
return (
f"Successfully created {db_name} in {shard} via {client.nodes} "
f"from {hostname}") | [
"def",
"create_db_in_shard",
"(",
"db_name",
",",
"shard",
",",
"client",
"=",
"None",
")",
":",
"client",
"=",
"client",
"or",
"pymongo",
".",
"MongoClient",
"(",
")",
"# flush the router config to ensure it's not stale",
"res",
"=",
"client",
".",
"admin",
".",
"command",
"(",
"'flushRouterConfig'",
")",
"if",
"not",
"res",
".",
"get",
"(",
"'ok'",
")",
":",
"raise",
"RuntimeError",
"(",
"\"unable to flush router config\"",
")",
"if",
"shard",
"not",
"in",
"get_ids",
"(",
"client",
".",
"config",
".",
"shards",
")",
":",
"raise",
"ValueError",
"(",
"f\"Unknown shard {shard}\"",
")",
"if",
"db_name",
"in",
"get_ids",
"(",
"client",
".",
"config",
".",
"databases",
")",
":",
"raise",
"ValueError",
"(",
"\"database already exists\"",
")",
"# MongoDB doesn't have a 'create database' command, so insert an",
"# item into a collection and then drop the collection.",
"client",
"[",
"db_name",
"]",
".",
"foo",
".",
"insert",
"(",
"{",
"'foo'",
":",
"1",
"}",
")",
"client",
"[",
"db_name",
"]",
".",
"foo",
".",
"drop",
"(",
")",
"if",
"client",
"[",
"db_name",
"]",
".",
"collection_names",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"database has collections\"",
")",
"primary",
"=",
"client",
"[",
"'config'",
"]",
".",
"databases",
".",
"find_one",
"(",
"db_name",
")",
"[",
"'primary'",
"]",
"if",
"primary",
"!=",
"shard",
":",
"res",
"=",
"client",
".",
"admin",
".",
"command",
"(",
"'movePrimary'",
",",
"value",
"=",
"db_name",
",",
"to",
"=",
"shard",
")",
"if",
"not",
"res",
".",
"get",
"(",
"'ok'",
")",
":",
"raise",
"RuntimeError",
"(",
"str",
"(",
"res",
")",
")",
"return",
"(",
"f\"Successfully created {db_name} in {shard} via {client.nodes} \"",
"f\"from {hostname}\"",
")"
] | In a sharded cluster, create a database in a particular shard. | [
"In",
"a",
"sharded",
"cluster",
"create",
"a",
"database",
"in",
"a",
"particular",
"shard",
"."
] | 280f17894941f4babf2e97db033dbb1fd2b9f705 | https://github.com/jaraco/jaraco.mongodb/blob/280f17894941f4babf2e97db033dbb1fd2b9f705/jaraco/mongodb/sharding.py#L16-L43 | train |
tehmaze/natural | natural/util.py | luhn_checksum | def luhn_checksum(number, chars=DIGITS):
'''
Calculates the Luhn checksum for `number`
:param number: string or int
:param chars: string
>>> luhn_checksum(1234)
4
'''
length = len(chars)
number = [chars.index(n) for n in reversed(str(number))]
return (
sum(number[::2]) +
sum(sum(divmod(i * 2, length)) for i in number[1::2])
) % length | python | def luhn_checksum(number, chars=DIGITS):
'''
Calculates the Luhn checksum for `number`
:param number: string or int
:param chars: string
>>> luhn_checksum(1234)
4
'''
length = len(chars)
number = [chars.index(n) for n in reversed(str(number))]
return (
sum(number[::2]) +
sum(sum(divmod(i * 2, length)) for i in number[1::2])
) % length | [
"def",
"luhn_checksum",
"(",
"number",
",",
"chars",
"=",
"DIGITS",
")",
":",
"length",
"=",
"len",
"(",
"chars",
")",
"number",
"=",
"[",
"chars",
".",
"index",
"(",
"n",
")",
"for",
"n",
"in",
"reversed",
"(",
"str",
"(",
"number",
")",
")",
"]",
"return",
"(",
"sum",
"(",
"number",
"[",
":",
":",
"2",
"]",
")",
"+",
"sum",
"(",
"sum",
"(",
"divmod",
"(",
"i",
"*",
"2",
",",
"length",
")",
")",
"for",
"i",
"in",
"number",
"[",
"1",
":",
":",
"2",
"]",
")",
")",
"%",
"length"
] | Calculates the Luhn checksum for `number`
:param number: string or int
:param chars: string
>>> luhn_checksum(1234)
4 | [
"Calculates",
"the",
"Luhn",
"checksum",
"for",
"number"
] | d7a1fc9de712f9bcf68884a80826a7977df356fb | https://github.com/tehmaze/natural/blob/d7a1fc9de712f9bcf68884a80826a7977df356fb/natural/util.py#L9-L25 | train |
tehmaze/natural | natural/util.py | luhn_calc | def luhn_calc(number, chars=DIGITS):
'''
Calculate the Luhn check digit for ``number``.
:param number: string
:param chars: string
>>> luhn_calc('42')
'2'
'''
checksum = luhn_checksum(str(number) + chars[0], chars)
return chars[-checksum] | python | def luhn_calc(number, chars=DIGITS):
'''
Calculate the Luhn check digit for ``number``.
:param number: string
:param chars: string
>>> luhn_calc('42')
'2'
'''
checksum = luhn_checksum(str(number) + chars[0], chars)
return chars[-checksum] | [
"def",
"luhn_calc",
"(",
"number",
",",
"chars",
"=",
"DIGITS",
")",
":",
"checksum",
"=",
"luhn_checksum",
"(",
"str",
"(",
"number",
")",
"+",
"chars",
"[",
"0",
"]",
",",
"chars",
")",
"return",
"chars",
"[",
"-",
"checksum",
"]"
] | Calculate the Luhn check digit for ``number``.
:param number: string
:param chars: string
>>> luhn_calc('42')
'2' | [
"Calculate",
"the",
"Luhn",
"check",
"digit",
"for",
"number",
"."
] | d7a1fc9de712f9bcf68884a80826a7977df356fb | https://github.com/tehmaze/natural/blob/d7a1fc9de712f9bcf68884a80826a7977df356fb/natural/util.py#L28-L41 | train |
tehmaze/natural | natural/util.py | to_decimal | def to_decimal(number, strip='- '):
'''
Converts a number to a string of decimals in base 10.
>>> to_decimal(123)
'123'
>>> to_decimal('o123')
'83'
>>> to_decimal('b101010')
'42'
>>> to_decimal('0x2a')
'42'
'''
if isinstance(number, six.integer_types):
return str(number)
number = str(number)
number = re.sub(r'[%s]' % re.escape(strip), '', number)
# hexadecimal
if number.startswith('0x'):
return to_decimal(int(number[2:], 16))
# octal
elif number.startswith('o'):
return to_decimal(int(number[1:], 8))
# binary
elif number.startswith('b'):
return to_decimal(int(number[1:], 2))
else:
return str(int(number)) | python | def to_decimal(number, strip='- '):
'''
Converts a number to a string of decimals in base 10.
>>> to_decimal(123)
'123'
>>> to_decimal('o123')
'83'
>>> to_decimal('b101010')
'42'
>>> to_decimal('0x2a')
'42'
'''
if isinstance(number, six.integer_types):
return str(number)
number = str(number)
number = re.sub(r'[%s]' % re.escape(strip), '', number)
# hexadecimal
if number.startswith('0x'):
return to_decimal(int(number[2:], 16))
# octal
elif number.startswith('o'):
return to_decimal(int(number[1:], 8))
# binary
elif number.startswith('b'):
return to_decimal(int(number[1:], 2))
else:
return str(int(number)) | [
"def",
"to_decimal",
"(",
"number",
",",
"strip",
"=",
"'- '",
")",
":",
"if",
"isinstance",
"(",
"number",
",",
"six",
".",
"integer_types",
")",
":",
"return",
"str",
"(",
"number",
")",
"number",
"=",
"str",
"(",
"number",
")",
"number",
"=",
"re",
".",
"sub",
"(",
"r'[%s]'",
"%",
"re",
".",
"escape",
"(",
"strip",
")",
",",
"''",
",",
"number",
")",
"# hexadecimal",
"if",
"number",
".",
"startswith",
"(",
"'0x'",
")",
":",
"return",
"to_decimal",
"(",
"int",
"(",
"number",
"[",
"2",
":",
"]",
",",
"16",
")",
")",
"# octal",
"elif",
"number",
".",
"startswith",
"(",
"'o'",
")",
":",
"return",
"to_decimal",
"(",
"int",
"(",
"number",
"[",
"1",
":",
"]",
",",
"8",
")",
")",
"# binary",
"elif",
"number",
".",
"startswith",
"(",
"'b'",
")",
":",
"return",
"to_decimal",
"(",
"int",
"(",
"number",
"[",
"1",
":",
"]",
",",
"2",
")",
")",
"else",
":",
"return",
"str",
"(",
"int",
"(",
"number",
")",
")"
] | Converts a number to a string of decimals in base 10.
>>> to_decimal(123)
'123'
>>> to_decimal('o123')
'83'
>>> to_decimal('b101010')
'42'
>>> to_decimal('0x2a')
'42' | [
"Converts",
"a",
"number",
"to",
"a",
"string",
"of",
"decimals",
"in",
"base",
"10",
"."
] | d7a1fc9de712f9bcf68884a80826a7977df356fb | https://github.com/tehmaze/natural/blob/d7a1fc9de712f9bcf68884a80826a7977df356fb/natural/util.py#L71-L103 | train |
druids/django-chamber | chamber/utils/__init__.py | get_class_method | def get_class_method(cls_or_inst, method_name):
"""
Returns a method from a given class or instance. When the method doest not exist, it returns `None`. Also works with
properties and cached properties.
"""
cls = cls_or_inst if isinstance(cls_or_inst, type) else cls_or_inst.__class__
meth = getattr(cls, method_name, None)
if isinstance(meth, property):
meth = meth.fget
elif isinstance(meth, cached_property):
meth = meth.func
return meth | python | def get_class_method(cls_or_inst, method_name):
"""
Returns a method from a given class or instance. When the method doest not exist, it returns `None`. Also works with
properties and cached properties.
"""
cls = cls_or_inst if isinstance(cls_or_inst, type) else cls_or_inst.__class__
meth = getattr(cls, method_name, None)
if isinstance(meth, property):
meth = meth.fget
elif isinstance(meth, cached_property):
meth = meth.func
return meth | [
"def",
"get_class_method",
"(",
"cls_or_inst",
",",
"method_name",
")",
":",
"cls",
"=",
"cls_or_inst",
"if",
"isinstance",
"(",
"cls_or_inst",
",",
"type",
")",
"else",
"cls_or_inst",
".",
"__class__",
"meth",
"=",
"getattr",
"(",
"cls",
",",
"method_name",
",",
"None",
")",
"if",
"isinstance",
"(",
"meth",
",",
"property",
")",
":",
"meth",
"=",
"meth",
".",
"fget",
"elif",
"isinstance",
"(",
"meth",
",",
"cached_property",
")",
":",
"meth",
"=",
"meth",
".",
"func",
"return",
"meth"
] | Returns a method from a given class or instance. When the method doest not exist, it returns `None`. Also works with
properties and cached properties. | [
"Returns",
"a",
"method",
"from",
"a",
"given",
"class",
"or",
"instance",
".",
"When",
"the",
"method",
"doest",
"not",
"exist",
"it",
"returns",
"None",
".",
"Also",
"works",
"with",
"properties",
"and",
"cached",
"properties",
"."
] | eef4169923557e96877a664fa254e8c0814f3f23 | https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/utils/__init__.py#L17-L28 | train |
guaix-ucm/numina | numina/frame/combine.py | manage_fits | def manage_fits(list_of_frame):
"""Manage a list of FITS resources"""
import astropy.io.fits as fits
import numina.types.dataframe as df
refs = []
for frame in list_of_frame:
if isinstance(frame, str):
ref = fits.open(frame)
refs.append(ref)
elif isinstance(frame, fits.HDUList):
refs.append(frame)
elif isinstance(frame, df.DataFrame):
ref = frame.open()
refs.append(ref)
else:
refs.append(frame)
try:
yield refs
finally:
# release
for obj in refs:
obj.close() | python | def manage_fits(list_of_frame):
"""Manage a list of FITS resources"""
import astropy.io.fits as fits
import numina.types.dataframe as df
refs = []
for frame in list_of_frame:
if isinstance(frame, str):
ref = fits.open(frame)
refs.append(ref)
elif isinstance(frame, fits.HDUList):
refs.append(frame)
elif isinstance(frame, df.DataFrame):
ref = frame.open()
refs.append(ref)
else:
refs.append(frame)
try:
yield refs
finally:
# release
for obj in refs:
obj.close() | [
"def",
"manage_fits",
"(",
"list_of_frame",
")",
":",
"import",
"astropy",
".",
"io",
".",
"fits",
"as",
"fits",
"import",
"numina",
".",
"types",
".",
"dataframe",
"as",
"df",
"refs",
"=",
"[",
"]",
"for",
"frame",
"in",
"list_of_frame",
":",
"if",
"isinstance",
"(",
"frame",
",",
"str",
")",
":",
"ref",
"=",
"fits",
".",
"open",
"(",
"frame",
")",
"refs",
".",
"append",
"(",
"ref",
")",
"elif",
"isinstance",
"(",
"frame",
",",
"fits",
".",
"HDUList",
")",
":",
"refs",
".",
"append",
"(",
"frame",
")",
"elif",
"isinstance",
"(",
"frame",
",",
"df",
".",
"DataFrame",
")",
":",
"ref",
"=",
"frame",
".",
"open",
"(",
")",
"refs",
".",
"append",
"(",
"ref",
")",
"else",
":",
"refs",
".",
"append",
"(",
"frame",
")",
"try",
":",
"yield",
"refs",
"finally",
":",
"# release",
"for",
"obj",
"in",
"refs",
":",
"obj",
".",
"close",
"(",
")"
] | Manage a list of FITS resources | [
"Manage",
"a",
"list",
"of",
"FITS",
"resources"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/frame/combine.py#L185-L208 | train |
guaix-ucm/numina | numina/array/display/logging_from_debugplot.py | logging_from_debugplot | def logging_from_debugplot(debugplot):
"""Set debugging level based on debugplot value.
Parameters
----------
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'.
"""
if isinstance(debugplot, int):
if abs(debugplot) >= 10:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
else:
raise ValueError("Unexpected debugplot=" + str(debugplot)) | python | def logging_from_debugplot(debugplot):
"""Set debugging level based on debugplot value.
Parameters
----------
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'.
"""
if isinstance(debugplot, int):
if abs(debugplot) >= 10:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
else:
raise ValueError("Unexpected debugplot=" + str(debugplot)) | [
"def",
"logging_from_debugplot",
"(",
"debugplot",
")",
":",
"if",
"isinstance",
"(",
"debugplot",
",",
"int",
")",
":",
"if",
"abs",
"(",
"debugplot",
")",
">=",
"10",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"DEBUG",
")",
"else",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"INFO",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unexpected debugplot=\"",
"+",
"str",
"(",
"debugplot",
")",
")"
] | Set debugging level based on debugplot value.
Parameters
----------
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'. | [
"Set",
"debugging",
"level",
"based",
"on",
"debugplot",
"value",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/display/logging_from_debugplot.py#L26-L42 | train |
guaix-ucm/numina | numina/array/display/ximplot.py | ximplot | def ximplot(ycut, title=None, show=True, plot_bbox=(0, 0),
geometry=(0, 0, 640, 480), tight_layout=True,
debugplot=None):
"""Auxiliary function to display 1d plot.
Parameters
----------
ycut : 1d numpy array, float
Array to be displayed.
title : string
Plot title.
show : bool
If True, the function shows the displayed image. Otherwise
plt.show() is expected to be executed outside.
plot_bbox : tuple (2 integers)
If tuple is (0,0), the plot is displayed with image
coordinates (indices corresponding to the numpy array).
Otherwise, the bounding box of the image is read from this
tuple, assuming (nc1,nc2). In this case, the coordinates
indicate pixels.
geometry : tuple (4 integers) or None
x, y, dx, dy values employed to set the window geometry.
tight_layout : bool
If True, and show=True, a tight display layout is set.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Returns
-------
ax : axes object
Matplotlib axes instance. This value is returned only when
'show' is False.
"""
# protections
if type(ycut) is not np.ndarray:
raise ValueError("ycut=" + str(ycut) +
" must be a numpy.ndarray")
elif ycut.ndim is not 1:
raise ValueError("ycut.ndim=" + str(ycut.dim) +
" must be 1")
# read bounding box limits
nc1, nc2 = plot_bbox
plot_coord = (nc1 == 0 and nc2 == 0)
naxis1_ = ycut.size
if not plot_coord:
# check that ycut size corresponds to bounding box size
if naxis1_ != nc2 - nc1 + 1:
raise ValueError("ycut.size=" + str(ycut.size) +
" does not correspond to bounding box size")
# display image
from numina.array.display.matplotlib_qt import plt
if not show:
plt.ioff()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.autoscale(False)
ymin = ycut.min()
ymax = ycut.max()
if plot_coord:
xmin = -0.5
xmax = (naxis1_ - 1) + 0.5
xcut = np.arange(naxis1_, dtype=np.float)
ax.set_xlabel('image array index in the X direction')
ax.set_ylabel('pixel value')
else:
xmin = float(nc1) - 0.5
xmax = float(nc2) + 0.5
xcut = np.linspace(start=nc1, stop=nc2, num=nc2 - nc1 + 1)
ax.set_xlabel('image pixel in the X direction')
ax.set_ylabel('pixel value')
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.plot(xcut, ycut, '-')
if title is not None:
ax.set_title(title)
# set the geometry
set_window_geometry(geometry)
if show:
pause_debugplot(debugplot, pltshow=show, tight_layout=tight_layout)
else:
if tight_layout:
plt.tight_layout()
# return axes
return ax | python | def ximplot(ycut, title=None, show=True, plot_bbox=(0, 0),
geometry=(0, 0, 640, 480), tight_layout=True,
debugplot=None):
"""Auxiliary function to display 1d plot.
Parameters
----------
ycut : 1d numpy array, float
Array to be displayed.
title : string
Plot title.
show : bool
If True, the function shows the displayed image. Otherwise
plt.show() is expected to be executed outside.
plot_bbox : tuple (2 integers)
If tuple is (0,0), the plot is displayed with image
coordinates (indices corresponding to the numpy array).
Otherwise, the bounding box of the image is read from this
tuple, assuming (nc1,nc2). In this case, the coordinates
indicate pixels.
geometry : tuple (4 integers) or None
x, y, dx, dy values employed to set the window geometry.
tight_layout : bool
If True, and show=True, a tight display layout is set.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Returns
-------
ax : axes object
Matplotlib axes instance. This value is returned only when
'show' is False.
"""
# protections
if type(ycut) is not np.ndarray:
raise ValueError("ycut=" + str(ycut) +
" must be a numpy.ndarray")
elif ycut.ndim is not 1:
raise ValueError("ycut.ndim=" + str(ycut.dim) +
" must be 1")
# read bounding box limits
nc1, nc2 = plot_bbox
plot_coord = (nc1 == 0 and nc2 == 0)
naxis1_ = ycut.size
if not plot_coord:
# check that ycut size corresponds to bounding box size
if naxis1_ != nc2 - nc1 + 1:
raise ValueError("ycut.size=" + str(ycut.size) +
" does not correspond to bounding box size")
# display image
from numina.array.display.matplotlib_qt import plt
if not show:
plt.ioff()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.autoscale(False)
ymin = ycut.min()
ymax = ycut.max()
if plot_coord:
xmin = -0.5
xmax = (naxis1_ - 1) + 0.5
xcut = np.arange(naxis1_, dtype=np.float)
ax.set_xlabel('image array index in the X direction')
ax.set_ylabel('pixel value')
else:
xmin = float(nc1) - 0.5
xmax = float(nc2) + 0.5
xcut = np.linspace(start=nc1, stop=nc2, num=nc2 - nc1 + 1)
ax.set_xlabel('image pixel in the X direction')
ax.set_ylabel('pixel value')
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.plot(xcut, ycut, '-')
if title is not None:
ax.set_title(title)
# set the geometry
set_window_geometry(geometry)
if show:
pause_debugplot(debugplot, pltshow=show, tight_layout=tight_layout)
else:
if tight_layout:
plt.tight_layout()
# return axes
return ax | [
"def",
"ximplot",
"(",
"ycut",
",",
"title",
"=",
"None",
",",
"show",
"=",
"True",
",",
"plot_bbox",
"=",
"(",
"0",
",",
"0",
")",
",",
"geometry",
"=",
"(",
"0",
",",
"0",
",",
"640",
",",
"480",
")",
",",
"tight_layout",
"=",
"True",
",",
"debugplot",
"=",
"None",
")",
":",
"# protections",
"if",
"type",
"(",
"ycut",
")",
"is",
"not",
"np",
".",
"ndarray",
":",
"raise",
"ValueError",
"(",
"\"ycut=\"",
"+",
"str",
"(",
"ycut",
")",
"+",
"\" must be a numpy.ndarray\"",
")",
"elif",
"ycut",
".",
"ndim",
"is",
"not",
"1",
":",
"raise",
"ValueError",
"(",
"\"ycut.ndim=\"",
"+",
"str",
"(",
"ycut",
".",
"dim",
")",
"+",
"\" must be 1\"",
")",
"# read bounding box limits",
"nc1",
",",
"nc2",
"=",
"plot_bbox",
"plot_coord",
"=",
"(",
"nc1",
"==",
"0",
"and",
"nc2",
"==",
"0",
")",
"naxis1_",
"=",
"ycut",
".",
"size",
"if",
"not",
"plot_coord",
":",
"# check that ycut size corresponds to bounding box size",
"if",
"naxis1_",
"!=",
"nc2",
"-",
"nc1",
"+",
"1",
":",
"raise",
"ValueError",
"(",
"\"ycut.size=\"",
"+",
"str",
"(",
"ycut",
".",
"size",
")",
"+",
"\" does not correspond to bounding box size\"",
")",
"# display image",
"from",
"numina",
".",
"array",
".",
"display",
".",
"matplotlib_qt",
"import",
"plt",
"if",
"not",
"show",
":",
"plt",
".",
"ioff",
"(",
")",
"fig",
"=",
"plt",
".",
"figure",
"(",
")",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"111",
")",
"ax",
".",
"autoscale",
"(",
"False",
")",
"ymin",
"=",
"ycut",
".",
"min",
"(",
")",
"ymax",
"=",
"ycut",
".",
"max",
"(",
")",
"if",
"plot_coord",
":",
"xmin",
"=",
"-",
"0.5",
"xmax",
"=",
"(",
"naxis1_",
"-",
"1",
")",
"+",
"0.5",
"xcut",
"=",
"np",
".",
"arange",
"(",
"naxis1_",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"ax",
".",
"set_xlabel",
"(",
"'image array index in the X direction'",
")",
"ax",
".",
"set_ylabel",
"(",
"'pixel value'",
")",
"else",
":",
"xmin",
"=",
"float",
"(",
"nc1",
")",
"-",
"0.5",
"xmax",
"=",
"float",
"(",
"nc2",
")",
"+",
"0.5",
"xcut",
"=",
"np",
".",
"linspace",
"(",
"start",
"=",
"nc1",
",",
"stop",
"=",
"nc2",
",",
"num",
"=",
"nc2",
"-",
"nc1",
"+",
"1",
")",
"ax",
".",
"set_xlabel",
"(",
"'image pixel in the X direction'",
")",
"ax",
".",
"set_ylabel",
"(",
"'pixel value'",
")",
"ax",
".",
"set_xlim",
"(",
"xmin",
",",
"xmax",
")",
"ax",
".",
"set_ylim",
"(",
"ymin",
",",
"ymax",
")",
"ax",
".",
"plot",
"(",
"xcut",
",",
"ycut",
",",
"'-'",
")",
"if",
"title",
"is",
"not",
"None",
":",
"ax",
".",
"set_title",
"(",
"title",
")",
"# set the geometry",
"set_window_geometry",
"(",
"geometry",
")",
"if",
"show",
":",
"pause_debugplot",
"(",
"debugplot",
",",
"pltshow",
"=",
"show",
",",
"tight_layout",
"=",
"tight_layout",
")",
"else",
":",
"if",
"tight_layout",
":",
"plt",
".",
"tight_layout",
"(",
")",
"# return axes",
"return",
"ax"
] | Auxiliary function to display 1d plot.
Parameters
----------
ycut : 1d numpy array, float
Array to be displayed.
title : string
Plot title.
show : bool
If True, the function shows the displayed image. Otherwise
plt.show() is expected to be executed outside.
plot_bbox : tuple (2 integers)
If tuple is (0,0), the plot is displayed with image
coordinates (indices corresponding to the numpy array).
Otherwise, the bounding box of the image is read from this
tuple, assuming (nc1,nc2). In this case, the coordinates
indicate pixels.
geometry : tuple (4 integers) or None
x, y, dx, dy values employed to set the window geometry.
tight_layout : bool
If True, and show=True, a tight display layout is set.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Returns
-------
ax : axes object
Matplotlib axes instance. This value is returned only when
'show' is False. | [
"Auxiliary",
"function",
"to",
"display",
"1d",
"plot",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/display/ximplot.py#L19-L112 | train |
guaix-ucm/numina | numina/array/wavecalib/resample.py | oversample1d | def oversample1d(sp, crval1, cdelt1, oversampling=1, debugplot=0):
"""Oversample spectrum.
Parameters
----------
sp : numpy array
Spectrum to be oversampled.
crval1 : float
Abscissae of the center of the first pixel in the original
spectrum 'sp'.
cdelt1 : float
Abscissae increment corresponding to 1 pixel in the original
spectrum 'sp'.
oversampling : int
Oversampling value per pixel.
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'.
Returns
-------
sp_over : numpy array
Oversampled data array.
crval1_over : float
Abscissae of the center of the first pixel in the oversampled
spectrum.
cdelt1_over : float
Abscissae of the center of the last pixel in the oversampled
spectrum.
"""
if sp.ndim != 1:
raise ValueError('Unexpected array dimensions')
naxis1 = sp.size
naxis1_over = naxis1 * oversampling
cdelt1_over = cdelt1 / oversampling
xmin = crval1 - cdelt1/2 # left border of first pixel
crval1_over = xmin + cdelt1_over / 2
sp_over = np.zeros(naxis1_over)
for i in range(naxis1):
i1 = i * oversampling
i2 = i1 + oversampling
sp_over[i1:i2] = sp[i]
if abs(debugplot) in (21, 22):
crvaln = crval1 + (naxis1 - 1) * cdelt1
crvaln_over = crval1_over + (naxis1_over - 1) * cdelt1_over
xover = np.linspace(crval1_over, crvaln_over, naxis1_over)
ax = ximplotxy(np.linspace(crval1, crvaln, naxis1), sp, 'bo',
label='original', show=False)
ax.plot(xover, sp_over, 'r+', label='resampled')
pause_debugplot(debugplot, pltshow=True)
return sp_over, crval1_over, cdelt1_over | python | def oversample1d(sp, crval1, cdelt1, oversampling=1, debugplot=0):
"""Oversample spectrum.
Parameters
----------
sp : numpy array
Spectrum to be oversampled.
crval1 : float
Abscissae of the center of the first pixel in the original
spectrum 'sp'.
cdelt1 : float
Abscissae increment corresponding to 1 pixel in the original
spectrum 'sp'.
oversampling : int
Oversampling value per pixel.
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'.
Returns
-------
sp_over : numpy array
Oversampled data array.
crval1_over : float
Abscissae of the center of the first pixel in the oversampled
spectrum.
cdelt1_over : float
Abscissae of the center of the last pixel in the oversampled
spectrum.
"""
if sp.ndim != 1:
raise ValueError('Unexpected array dimensions')
naxis1 = sp.size
naxis1_over = naxis1 * oversampling
cdelt1_over = cdelt1 / oversampling
xmin = crval1 - cdelt1/2 # left border of first pixel
crval1_over = xmin + cdelt1_over / 2
sp_over = np.zeros(naxis1_over)
for i in range(naxis1):
i1 = i * oversampling
i2 = i1 + oversampling
sp_over[i1:i2] = sp[i]
if abs(debugplot) in (21, 22):
crvaln = crval1 + (naxis1 - 1) * cdelt1
crvaln_over = crval1_over + (naxis1_over - 1) * cdelt1_over
xover = np.linspace(crval1_over, crvaln_over, naxis1_over)
ax = ximplotxy(np.linspace(crval1, crvaln, naxis1), sp, 'bo',
label='original', show=False)
ax.plot(xover, sp_over, 'r+', label='resampled')
pause_debugplot(debugplot, pltshow=True)
return sp_over, crval1_over, cdelt1_over | [
"def",
"oversample1d",
"(",
"sp",
",",
"crval1",
",",
"cdelt1",
",",
"oversampling",
"=",
"1",
",",
"debugplot",
"=",
"0",
")",
":",
"if",
"sp",
".",
"ndim",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"'Unexpected array dimensions'",
")",
"naxis1",
"=",
"sp",
".",
"size",
"naxis1_over",
"=",
"naxis1",
"*",
"oversampling",
"cdelt1_over",
"=",
"cdelt1",
"/",
"oversampling",
"xmin",
"=",
"crval1",
"-",
"cdelt1",
"/",
"2",
"# left border of first pixel",
"crval1_over",
"=",
"xmin",
"+",
"cdelt1_over",
"/",
"2",
"sp_over",
"=",
"np",
".",
"zeros",
"(",
"naxis1_over",
")",
"for",
"i",
"in",
"range",
"(",
"naxis1",
")",
":",
"i1",
"=",
"i",
"*",
"oversampling",
"i2",
"=",
"i1",
"+",
"oversampling",
"sp_over",
"[",
"i1",
":",
"i2",
"]",
"=",
"sp",
"[",
"i",
"]",
"if",
"abs",
"(",
"debugplot",
")",
"in",
"(",
"21",
",",
"22",
")",
":",
"crvaln",
"=",
"crval1",
"+",
"(",
"naxis1",
"-",
"1",
")",
"*",
"cdelt1",
"crvaln_over",
"=",
"crval1_over",
"+",
"(",
"naxis1_over",
"-",
"1",
")",
"*",
"cdelt1_over",
"xover",
"=",
"np",
".",
"linspace",
"(",
"crval1_over",
",",
"crvaln_over",
",",
"naxis1_over",
")",
"ax",
"=",
"ximplotxy",
"(",
"np",
".",
"linspace",
"(",
"crval1",
",",
"crvaln",
",",
"naxis1",
")",
",",
"sp",
",",
"'bo'",
",",
"label",
"=",
"'original'",
",",
"show",
"=",
"False",
")",
"ax",
".",
"plot",
"(",
"xover",
",",
"sp_over",
",",
"'r+'",
",",
"label",
"=",
"'resampled'",
")",
"pause_debugplot",
"(",
"debugplot",
",",
"pltshow",
"=",
"True",
")",
"return",
"sp_over",
",",
"crval1_over",
",",
"cdelt1_over"
] | Oversample spectrum.
Parameters
----------
sp : numpy array
Spectrum to be oversampled.
crval1 : float
Abscissae of the center of the first pixel in the original
spectrum 'sp'.
cdelt1 : float
Abscissae increment corresponding to 1 pixel in the original
spectrum 'sp'.
oversampling : int
Oversampling value per pixel.
debugplot : int
Debugging level for messages and plots. For details see
'numina.array.display.pause_debugplot.py'.
Returns
-------
sp_over : numpy array
Oversampled data array.
crval1_over : float
Abscissae of the center of the first pixel in the oversampled
spectrum.
cdelt1_over : float
Abscissae of the center of the last pixel in the oversampled
spectrum. | [
"Oversample",
"spectrum",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/wavecalib/resample.py#L22-L78 | train |
guaix-ucm/numina | numina/array/wavecalib/resample.py | map_borders | def map_borders(wls):
"""Compute borders of pixels for interpolation.
The border of the pixel is assumed to be midway of the wls
"""
midpt_wl = 0.5 * (wls[1:] + wls[:-1])
all_borders = np.zeros((wls.shape[0] + 1,))
all_borders[1:-1] = midpt_wl
all_borders[0] = 2 * wls[0] - midpt_wl[0]
all_borders[-1] = 2 * wls[-1] - midpt_wl[-1]
return all_borders | python | def map_borders(wls):
"""Compute borders of pixels for interpolation.
The border of the pixel is assumed to be midway of the wls
"""
midpt_wl = 0.5 * (wls[1:] + wls[:-1])
all_borders = np.zeros((wls.shape[0] + 1,))
all_borders[1:-1] = midpt_wl
all_borders[0] = 2 * wls[0] - midpt_wl[0]
all_borders[-1] = 2 * wls[-1] - midpt_wl[-1]
return all_borders | [
"def",
"map_borders",
"(",
"wls",
")",
":",
"midpt_wl",
"=",
"0.5",
"*",
"(",
"wls",
"[",
"1",
":",
"]",
"+",
"wls",
"[",
":",
"-",
"1",
"]",
")",
"all_borders",
"=",
"np",
".",
"zeros",
"(",
"(",
"wls",
".",
"shape",
"[",
"0",
"]",
"+",
"1",
",",
")",
")",
"all_borders",
"[",
"1",
":",
"-",
"1",
"]",
"=",
"midpt_wl",
"all_borders",
"[",
"0",
"]",
"=",
"2",
"*",
"wls",
"[",
"0",
"]",
"-",
"midpt_wl",
"[",
"0",
"]",
"all_borders",
"[",
"-",
"1",
"]",
"=",
"2",
"*",
"wls",
"[",
"-",
"1",
"]",
"-",
"midpt_wl",
"[",
"-",
"1",
"]",
"return",
"all_borders"
] | Compute borders of pixels for interpolation.
The border of the pixel is assumed to be midway of the wls | [
"Compute",
"borders",
"of",
"pixels",
"for",
"interpolation",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/wavecalib/resample.py#L215-L225 | train |
guaix-ucm/numina | numina/util/objimport.py | import_object | def import_object(path):
"""Import an object given its fully qualified name."""
spl = path.split('.')
if len(spl) == 1:
return importlib.import_module(path)
# avoid last part for the moment
cls = spl[-1]
mods = '.'.join(spl[:-1])
mm = importlib.import_module(mods)
# try to get the last part as an attribute
try:
obj = getattr(mm, cls)
return obj
except AttributeError:
pass
# Try to import the last part
rr = importlib.import_module(path)
return rr | python | def import_object(path):
"""Import an object given its fully qualified name."""
spl = path.split('.')
if len(spl) == 1:
return importlib.import_module(path)
# avoid last part for the moment
cls = spl[-1]
mods = '.'.join(spl[:-1])
mm = importlib.import_module(mods)
# try to get the last part as an attribute
try:
obj = getattr(mm, cls)
return obj
except AttributeError:
pass
# Try to import the last part
rr = importlib.import_module(path)
return rr | [
"def",
"import_object",
"(",
"path",
")",
":",
"spl",
"=",
"path",
".",
"split",
"(",
"'.'",
")",
"if",
"len",
"(",
"spl",
")",
"==",
"1",
":",
"return",
"importlib",
".",
"import_module",
"(",
"path",
")",
"# avoid last part for the moment",
"cls",
"=",
"spl",
"[",
"-",
"1",
"]",
"mods",
"=",
"'.'",
".",
"join",
"(",
"spl",
"[",
":",
"-",
"1",
"]",
")",
"mm",
"=",
"importlib",
".",
"import_module",
"(",
"mods",
")",
"# try to get the last part as an attribute",
"try",
":",
"obj",
"=",
"getattr",
"(",
"mm",
",",
"cls",
")",
"return",
"obj",
"except",
"AttributeError",
":",
"pass",
"# Try to import the last part",
"rr",
"=",
"importlib",
".",
"import_module",
"(",
"path",
")",
"return",
"rr"
] | Import an object given its fully qualified name. | [
"Import",
"an",
"object",
"given",
"its",
"fully",
"qualified",
"name",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/util/objimport.py#L17-L36 | train |
ckan/losser | losser/cli.py | make_parser | def make_parser(add_help=True, exclude_args=None):
"""Return an argparse.ArgumentParser object with losser's arguments.
Other projects can call this to get an ArgumentParser with losser's
command line interface to use as a parent parser for their own parser.
For example::
parent_parser = losser.cli.make_parser(
add_help=False, exclude_args=["-i"])
parser = argparse.ArgumentParser(
description="Export datasets from a CKAN site to JSON or CSV.",
parents=[parent_parser])
parser.add_argument(...
:param add_help: Whether or not to add losser's help text to the parser.
Pass add_help=False if you want to use your own help text in a child
parser.
:type add_help: bool
:param exclude_args: List of losser command-line arguments to exclude, use
this to exclude any default losser arguments that you don't want in
your own command. For example: exclude_args=["-i", "--max-length"].
:type exclude_args: list of strings
"""
if exclude_args is None:
exclude_args = []
parser = argparse.ArgumentParser(add_help=add_help)
parser.description = ("Filter, transform and export a list of JSON "
"objects on stdin to JSON or CSV on stdout")
if "--columns" not in exclude_args:
parser.add_argument(
"--columns", dest="columns_file",
help="the JSON file specifying the columns to be output",
)
if ("-i" not in exclude_args) and ("--input" not in exclude_args):
parser.add_argument(
"-i", "--input",
help="read input from the given file instead of from stdin",
dest='input_data', # Because input is a Python builtin.
)
if ("-c" not in exclude_args) and ("--column" not in exclude_args):
parser.add_argument("-c", "--column", action=ColumnsAction)
if "--pattern" not in exclude_args:
parser.add_argument("--pattern", action=ColumnsAction, nargs='+')
if "--max-length" not in exclude_args:
parser.add_argument("--max-length", action=ColumnsAction)
if "--strip" not in exclude_args:
parser.add_argument("--strip", nargs="?", action=ColumnsAction)
if "--deduplicate" not in exclude_args:
parser.add_argument("--deduplicate", nargs='?', action=ColumnsAction)
if "--case-sensitive" not in exclude_args:
parser.add_argument(
"--case-sensitive", nargs='?', action=ColumnsAction)
if "--unique" not in exclude_args:
parser.add_argument("--unique", nargs="?", action=ColumnsAction)
if ("-p" not in exclude_args) and ("--pretty" not in exclude_args):
parser.add_argument("-p", "--pretty", action="store_true")
return parser | python | def make_parser(add_help=True, exclude_args=None):
"""Return an argparse.ArgumentParser object with losser's arguments.
Other projects can call this to get an ArgumentParser with losser's
command line interface to use as a parent parser for their own parser.
For example::
parent_parser = losser.cli.make_parser(
add_help=False, exclude_args=["-i"])
parser = argparse.ArgumentParser(
description="Export datasets from a CKAN site to JSON or CSV.",
parents=[parent_parser])
parser.add_argument(...
:param add_help: Whether or not to add losser's help text to the parser.
Pass add_help=False if you want to use your own help text in a child
parser.
:type add_help: bool
:param exclude_args: List of losser command-line arguments to exclude, use
this to exclude any default losser arguments that you don't want in
your own command. For example: exclude_args=["-i", "--max-length"].
:type exclude_args: list of strings
"""
if exclude_args is None:
exclude_args = []
parser = argparse.ArgumentParser(add_help=add_help)
parser.description = ("Filter, transform and export a list of JSON "
"objects on stdin to JSON or CSV on stdout")
if "--columns" not in exclude_args:
parser.add_argument(
"--columns", dest="columns_file",
help="the JSON file specifying the columns to be output",
)
if ("-i" not in exclude_args) and ("--input" not in exclude_args):
parser.add_argument(
"-i", "--input",
help="read input from the given file instead of from stdin",
dest='input_data', # Because input is a Python builtin.
)
if ("-c" not in exclude_args) and ("--column" not in exclude_args):
parser.add_argument("-c", "--column", action=ColumnsAction)
if "--pattern" not in exclude_args:
parser.add_argument("--pattern", action=ColumnsAction, nargs='+')
if "--max-length" not in exclude_args:
parser.add_argument("--max-length", action=ColumnsAction)
if "--strip" not in exclude_args:
parser.add_argument("--strip", nargs="?", action=ColumnsAction)
if "--deduplicate" not in exclude_args:
parser.add_argument("--deduplicate", nargs='?', action=ColumnsAction)
if "--case-sensitive" not in exclude_args:
parser.add_argument(
"--case-sensitive", nargs='?', action=ColumnsAction)
if "--unique" not in exclude_args:
parser.add_argument("--unique", nargs="?", action=ColumnsAction)
if ("-p" not in exclude_args) and ("--pretty" not in exclude_args):
parser.add_argument("-p", "--pretty", action="store_true")
return parser | [
"def",
"make_parser",
"(",
"add_help",
"=",
"True",
",",
"exclude_args",
"=",
"None",
")",
":",
"if",
"exclude_args",
"is",
"None",
":",
"exclude_args",
"=",
"[",
"]",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"add_help",
"=",
"add_help",
")",
"parser",
".",
"description",
"=",
"(",
"\"Filter, transform and export a list of JSON \"",
"\"objects on stdin to JSON or CSV on stdout\"",
")",
"if",
"\"--columns\"",
"not",
"in",
"exclude_args",
":",
"parser",
".",
"add_argument",
"(",
"\"--columns\"",
",",
"dest",
"=",
"\"columns_file\"",
",",
"help",
"=",
"\"the JSON file specifying the columns to be output\"",
",",
")",
"if",
"(",
"\"-i\"",
"not",
"in",
"exclude_args",
")",
"and",
"(",
"\"--input\"",
"not",
"in",
"exclude_args",
")",
":",
"parser",
".",
"add_argument",
"(",
"\"-i\"",
",",
"\"--input\"",
",",
"help",
"=",
"\"read input from the given file instead of from stdin\"",
",",
"dest",
"=",
"'input_data'",
",",
"# Because input is a Python builtin.",
")",
"if",
"(",
"\"-c\"",
"not",
"in",
"exclude_args",
")",
"and",
"(",
"\"--column\"",
"not",
"in",
"exclude_args",
")",
":",
"parser",
".",
"add_argument",
"(",
"\"-c\"",
",",
"\"--column\"",
",",
"action",
"=",
"ColumnsAction",
")",
"if",
"\"--pattern\"",
"not",
"in",
"exclude_args",
":",
"parser",
".",
"add_argument",
"(",
"\"--pattern\"",
",",
"action",
"=",
"ColumnsAction",
",",
"nargs",
"=",
"'+'",
")",
"if",
"\"--max-length\"",
"not",
"in",
"exclude_args",
":",
"parser",
".",
"add_argument",
"(",
"\"--max-length\"",
",",
"action",
"=",
"ColumnsAction",
")",
"if",
"\"--strip\"",
"not",
"in",
"exclude_args",
":",
"parser",
".",
"add_argument",
"(",
"\"--strip\"",
",",
"nargs",
"=",
"\"?\"",
",",
"action",
"=",
"ColumnsAction",
")",
"if",
"\"--deduplicate\"",
"not",
"in",
"exclude_args",
":",
"parser",
".",
"add_argument",
"(",
"\"--deduplicate\"",
",",
"nargs",
"=",
"'?'",
",",
"action",
"=",
"ColumnsAction",
")",
"if",
"\"--case-sensitive\"",
"not",
"in",
"exclude_args",
":",
"parser",
".",
"add_argument",
"(",
"\"--case-sensitive\"",
",",
"nargs",
"=",
"'?'",
",",
"action",
"=",
"ColumnsAction",
")",
"if",
"\"--unique\"",
"not",
"in",
"exclude_args",
":",
"parser",
".",
"add_argument",
"(",
"\"--unique\"",
",",
"nargs",
"=",
"\"?\"",
",",
"action",
"=",
"ColumnsAction",
")",
"if",
"(",
"\"-p\"",
"not",
"in",
"exclude_args",
")",
"and",
"(",
"\"--pretty\"",
"not",
"in",
"exclude_args",
")",
":",
"parser",
".",
"add_argument",
"(",
"\"-p\"",
",",
"\"--pretty\"",
",",
"action",
"=",
"\"store_true\"",
")",
"return",
"parser"
] | Return an argparse.ArgumentParser object with losser's arguments.
Other projects can call this to get an ArgumentParser with losser's
command line interface to use as a parent parser for their own parser.
For example::
parent_parser = losser.cli.make_parser(
add_help=False, exclude_args=["-i"])
parser = argparse.ArgumentParser(
description="Export datasets from a CKAN site to JSON or CSV.",
parents=[parent_parser])
parser.add_argument(...
:param add_help: Whether or not to add losser's help text to the parser.
Pass add_help=False if you want to use your own help text in a child
parser.
:type add_help: bool
:param exclude_args: List of losser command-line arguments to exclude, use
this to exclude any default losser arguments that you don't want in
your own command. For example: exclude_args=["-i", "--max-length"].
:type exclude_args: list of strings | [
"Return",
"an",
"argparse",
".",
"ArgumentParser",
"object",
"with",
"losser",
"s",
"arguments",
"."
] | fd0832d9fa93cabe9ce9a9153dc923f2cf39cb5f | https://github.com/ckan/losser/blob/fd0832d9fa93cabe9ce9a9153dc923f2cf39cb5f/losser/cli.py#L153-L211 | train |
ckan/losser | losser/cli.py | parse | def parse(parser=None, args=None):
"""Parse the command line arguments, return an argparse namespace object.
Other projects can call this function and pass in their own ArgumentParser
object (which should have a losser ArgumentParser from make_parser() above
as parent) to do the argument parsing and get the result (this does some
custom post-processing, beyond what argparse's parse_args() does). For
example::
parent_parser = losser.cli.make_parser(...)
parser = argparse.ArgumentParser(parents=[parent_parser])
parser.add_argument(...)
try:
parsed_args = losser.cli.parse(parser=parser)
except losser.cli.CommandLineError as err:
...
:raises CommandLineError: If something went wrong during command-line
parsing. If the exception has a non-empty .message attribute it
contains an error message that hasn't been printed to stdout yet,
otherwise any error message has already been printed.
:raises CommandLineExit: If the result of command-line parsing means that
the command should exit without continuing, but this is not because of
an error (for example if the user passed --help). Any help text will
already have been written to stdout, the exit code that the process
should exit with is in the exception's .code attribute.
CommandLineExit is a subclass of CommandLineError above.
"""
if not parser:
parser = make_parser()
try:
parsed_args = parser.parse_args(args)
except SystemExit as err:
raise CommandLineExit(err.code)
try:
columns = parsed_args.columns
except AttributeError:
columns = collections.OrderedDict()
parsed_args.columns = columns
for title, spec in columns.items():
if "pattern" not in spec:
raise ColumnWithoutPatternError(
'Column "{0}" needs a pattern'.format(title))
# Change length-1 patterns into strings (not lists of one string).
if len(spec["pattern"]) == 1:
spec["pattern"] = spec["pattern"][0]
if columns and parsed_args.columns_file:
raise ColumnsAndColumnsFileError(
"You can't use the --column and --columns options together (yet)")
elif parsed_args.columns_file and not columns:
parsed_args.columns = parsed_args.columns_file
elif (not columns) and (not parsed_args.columns_file):
# Crash if no columns specified.
# In the future we'll support simply converting all JSON fields to CSV
# columns if no columns are specified, and this will be removed.
raise NoColumnsError(
"You must give either a --columns or at least one -c/--column "
"argument")
else:
assert columns
return parsed_args | python | def parse(parser=None, args=None):
"""Parse the command line arguments, return an argparse namespace object.
Other projects can call this function and pass in their own ArgumentParser
object (which should have a losser ArgumentParser from make_parser() above
as parent) to do the argument parsing and get the result (this does some
custom post-processing, beyond what argparse's parse_args() does). For
example::
parent_parser = losser.cli.make_parser(...)
parser = argparse.ArgumentParser(parents=[parent_parser])
parser.add_argument(...)
try:
parsed_args = losser.cli.parse(parser=parser)
except losser.cli.CommandLineError as err:
...
:raises CommandLineError: If something went wrong during command-line
parsing. If the exception has a non-empty .message attribute it
contains an error message that hasn't been printed to stdout yet,
otherwise any error message has already been printed.
:raises CommandLineExit: If the result of command-line parsing means that
the command should exit without continuing, but this is not because of
an error (for example if the user passed --help). Any help text will
already have been written to stdout, the exit code that the process
should exit with is in the exception's .code attribute.
CommandLineExit is a subclass of CommandLineError above.
"""
if not parser:
parser = make_parser()
try:
parsed_args = parser.parse_args(args)
except SystemExit as err:
raise CommandLineExit(err.code)
try:
columns = parsed_args.columns
except AttributeError:
columns = collections.OrderedDict()
parsed_args.columns = columns
for title, spec in columns.items():
if "pattern" not in spec:
raise ColumnWithoutPatternError(
'Column "{0}" needs a pattern'.format(title))
# Change length-1 patterns into strings (not lists of one string).
if len(spec["pattern"]) == 1:
spec["pattern"] = spec["pattern"][0]
if columns and parsed_args.columns_file:
raise ColumnsAndColumnsFileError(
"You can't use the --column and --columns options together (yet)")
elif parsed_args.columns_file and not columns:
parsed_args.columns = parsed_args.columns_file
elif (not columns) and (not parsed_args.columns_file):
# Crash if no columns specified.
# In the future we'll support simply converting all JSON fields to CSV
# columns if no columns are specified, and this will be removed.
raise NoColumnsError(
"You must give either a --columns or at least one -c/--column "
"argument")
else:
assert columns
return parsed_args | [
"def",
"parse",
"(",
"parser",
"=",
"None",
",",
"args",
"=",
"None",
")",
":",
"if",
"not",
"parser",
":",
"parser",
"=",
"make_parser",
"(",
")",
"try",
":",
"parsed_args",
"=",
"parser",
".",
"parse_args",
"(",
"args",
")",
"except",
"SystemExit",
"as",
"err",
":",
"raise",
"CommandLineExit",
"(",
"err",
".",
"code",
")",
"try",
":",
"columns",
"=",
"parsed_args",
".",
"columns",
"except",
"AttributeError",
":",
"columns",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"parsed_args",
".",
"columns",
"=",
"columns",
"for",
"title",
",",
"spec",
"in",
"columns",
".",
"items",
"(",
")",
":",
"if",
"\"pattern\"",
"not",
"in",
"spec",
":",
"raise",
"ColumnWithoutPatternError",
"(",
"'Column \"{0}\" needs a pattern'",
".",
"format",
"(",
"title",
")",
")",
"# Change length-1 patterns into strings (not lists of one string).",
"if",
"len",
"(",
"spec",
"[",
"\"pattern\"",
"]",
")",
"==",
"1",
":",
"spec",
"[",
"\"pattern\"",
"]",
"=",
"spec",
"[",
"\"pattern\"",
"]",
"[",
"0",
"]",
"if",
"columns",
"and",
"parsed_args",
".",
"columns_file",
":",
"raise",
"ColumnsAndColumnsFileError",
"(",
"\"You can't use the --column and --columns options together (yet)\"",
")",
"elif",
"parsed_args",
".",
"columns_file",
"and",
"not",
"columns",
":",
"parsed_args",
".",
"columns",
"=",
"parsed_args",
".",
"columns_file",
"elif",
"(",
"not",
"columns",
")",
"and",
"(",
"not",
"parsed_args",
".",
"columns_file",
")",
":",
"# Crash if no columns specified.",
"# In the future we'll support simply converting all JSON fields to CSV",
"# columns if no columns are specified, and this will be removed.",
"raise",
"NoColumnsError",
"(",
"\"You must give either a --columns or at least one -c/--column \"",
"\"argument\"",
")",
"else",
":",
"assert",
"columns",
"return",
"parsed_args"
] | Parse the command line arguments, return an argparse namespace object.
Other projects can call this function and pass in their own ArgumentParser
object (which should have a losser ArgumentParser from make_parser() above
as parent) to do the argument parsing and get the result (this does some
custom post-processing, beyond what argparse's parse_args() does). For
example::
parent_parser = losser.cli.make_parser(...)
parser = argparse.ArgumentParser(parents=[parent_parser])
parser.add_argument(...)
try:
parsed_args = losser.cli.parse(parser=parser)
except losser.cli.CommandLineError as err:
...
:raises CommandLineError: If something went wrong during command-line
parsing. If the exception has a non-empty .message attribute it
contains an error message that hasn't been printed to stdout yet,
otherwise any error message has already been printed.
:raises CommandLineExit: If the result of command-line parsing means that
the command should exit without continuing, but this is not because of
an error (for example if the user passed --help). Any help text will
already have been written to stdout, the exit code that the process
should exit with is in the exception's .code attribute.
CommandLineExit is a subclass of CommandLineError above. | [
"Parse",
"the",
"command",
"line",
"arguments",
"return",
"an",
"argparse",
"namespace",
"object",
"."
] | fd0832d9fa93cabe9ce9a9153dc923f2cf39cb5f | https://github.com/ckan/losser/blob/fd0832d9fa93cabe9ce9a9153dc923f2cf39cb5f/losser/cli.py#L214-L282 | train |
ckan/losser | losser/cli.py | do | def do(parser=None, args=None, in_=None, table_function=None):
"""Read command-line args and stdin, return the result.
Read the command line arguments and the input data from stdin, pass them to
the table() function to do the filter and transform, and return the string
of CSV- or JSON-formatted text that should be written to stdout.
Note that although the output data is returned rather than written to
stdout, this function may write error messages or help text to stdout
(for example if there's an error with the command-line parsing).
:raises CommandLineError: see parse() above for details
"""
in_ = in_ or sys.stdin
table_function = table_function or losser.table
parsed_args = parse(parser=parser, args=args)
# Read the input data from stdin or a file.
if parsed_args.input_data:
input_data = open(parsed_args.input_data, 'r').read()
else:
input_data = in_.read()
dicts = json.loads(input_data)
csv_string = table_function(dicts, parsed_args.columns, csv=True,
pretty=parsed_args.pretty)
return csv_string | python | def do(parser=None, args=None, in_=None, table_function=None):
"""Read command-line args and stdin, return the result.
Read the command line arguments and the input data from stdin, pass them to
the table() function to do the filter and transform, and return the string
of CSV- or JSON-formatted text that should be written to stdout.
Note that although the output data is returned rather than written to
stdout, this function may write error messages or help text to stdout
(for example if there's an error with the command-line parsing).
:raises CommandLineError: see parse() above for details
"""
in_ = in_ or sys.stdin
table_function = table_function or losser.table
parsed_args = parse(parser=parser, args=args)
# Read the input data from stdin or a file.
if parsed_args.input_data:
input_data = open(parsed_args.input_data, 'r').read()
else:
input_data = in_.read()
dicts = json.loads(input_data)
csv_string = table_function(dicts, parsed_args.columns, csv=True,
pretty=parsed_args.pretty)
return csv_string | [
"def",
"do",
"(",
"parser",
"=",
"None",
",",
"args",
"=",
"None",
",",
"in_",
"=",
"None",
",",
"table_function",
"=",
"None",
")",
":",
"in_",
"=",
"in_",
"or",
"sys",
".",
"stdin",
"table_function",
"=",
"table_function",
"or",
"losser",
".",
"table",
"parsed_args",
"=",
"parse",
"(",
"parser",
"=",
"parser",
",",
"args",
"=",
"args",
")",
"# Read the input data from stdin or a file.",
"if",
"parsed_args",
".",
"input_data",
":",
"input_data",
"=",
"open",
"(",
"parsed_args",
".",
"input_data",
",",
"'r'",
")",
".",
"read",
"(",
")",
"else",
":",
"input_data",
"=",
"in_",
".",
"read",
"(",
")",
"dicts",
"=",
"json",
".",
"loads",
"(",
"input_data",
")",
"csv_string",
"=",
"table_function",
"(",
"dicts",
",",
"parsed_args",
".",
"columns",
",",
"csv",
"=",
"True",
",",
"pretty",
"=",
"parsed_args",
".",
"pretty",
")",
"return",
"csv_string"
] | Read command-line args and stdin, return the result.
Read the command line arguments and the input data from stdin, pass them to
the table() function to do the filter and transform, and return the string
of CSV- or JSON-formatted text that should be written to stdout.
Note that although the output data is returned rather than written to
stdout, this function may write error messages or help text to stdout
(for example if there's an error with the command-line parsing).
:raises CommandLineError: see parse() above for details | [
"Read",
"command",
"-",
"line",
"args",
"and",
"stdin",
"return",
"the",
"result",
"."
] | fd0832d9fa93cabe9ce9a9153dc923f2cf39cb5f | https://github.com/ckan/losser/blob/fd0832d9fa93cabe9ce9a9153dc923f2cf39cb5f/losser/cli.py#L285-L314 | train |
guaix-ucm/numina | numina/instrument/simulation/atmosphere.py | generate_gaussian_profile | def generate_gaussian_profile(seeing_fwhm):
"""Generate a normalized Gaussian profile from its FWHM"""
FWHM_G = 2 * math.sqrt(2 * math.log(2))
sigma = seeing_fwhm / FWHM_G
amplitude = 1.0 / (2 * math.pi * sigma * sigma)
seeing_model = Gaussian2D(amplitude=amplitude,
x_mean=0.0,
y_mean=0.0,
x_stddev=sigma,
y_stddev=sigma)
return seeing_model | python | def generate_gaussian_profile(seeing_fwhm):
"""Generate a normalized Gaussian profile from its FWHM"""
FWHM_G = 2 * math.sqrt(2 * math.log(2))
sigma = seeing_fwhm / FWHM_G
amplitude = 1.0 / (2 * math.pi * sigma * sigma)
seeing_model = Gaussian2D(amplitude=amplitude,
x_mean=0.0,
y_mean=0.0,
x_stddev=sigma,
y_stddev=sigma)
return seeing_model | [
"def",
"generate_gaussian_profile",
"(",
"seeing_fwhm",
")",
":",
"FWHM_G",
"=",
"2",
"*",
"math",
".",
"sqrt",
"(",
"2",
"*",
"math",
".",
"log",
"(",
"2",
")",
")",
"sigma",
"=",
"seeing_fwhm",
"/",
"FWHM_G",
"amplitude",
"=",
"1.0",
"/",
"(",
"2",
"*",
"math",
".",
"pi",
"*",
"sigma",
"*",
"sigma",
")",
"seeing_model",
"=",
"Gaussian2D",
"(",
"amplitude",
"=",
"amplitude",
",",
"x_mean",
"=",
"0.0",
",",
"y_mean",
"=",
"0.0",
",",
"x_stddev",
"=",
"sigma",
",",
"y_stddev",
"=",
"sigma",
")",
"return",
"seeing_model"
] | Generate a normalized Gaussian profile from its FWHM | [
"Generate",
"a",
"normalized",
"Gaussian",
"profile",
"from",
"its",
"FWHM"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/instrument/simulation/atmosphere.py#L65-L75 | train |
guaix-ucm/numina | numina/instrument/simulation/atmosphere.py | generate_moffat_profile | def generate_moffat_profile(seeing_fwhm, alpha):
"""Generate a normalized Moffat profile from its FWHM and alpha"""
scale = 2 * math.sqrt(2**(1.0 / alpha) - 1)
gamma = seeing_fwhm / scale
amplitude = 1.0 / math.pi * (alpha - 1) / gamma**2
seeing_model = Moffat2D(amplitude=amplitude,
x_mean=0.0,
y_mean=0.0,
gamma=gamma,
alpha=alpha)
return seeing_model | python | def generate_moffat_profile(seeing_fwhm, alpha):
"""Generate a normalized Moffat profile from its FWHM and alpha"""
scale = 2 * math.sqrt(2**(1.0 / alpha) - 1)
gamma = seeing_fwhm / scale
amplitude = 1.0 / math.pi * (alpha - 1) / gamma**2
seeing_model = Moffat2D(amplitude=amplitude,
x_mean=0.0,
y_mean=0.0,
gamma=gamma,
alpha=alpha)
return seeing_model | [
"def",
"generate_moffat_profile",
"(",
"seeing_fwhm",
",",
"alpha",
")",
":",
"scale",
"=",
"2",
"*",
"math",
".",
"sqrt",
"(",
"2",
"**",
"(",
"1.0",
"/",
"alpha",
")",
"-",
"1",
")",
"gamma",
"=",
"seeing_fwhm",
"/",
"scale",
"amplitude",
"=",
"1.0",
"/",
"math",
".",
"pi",
"*",
"(",
"alpha",
"-",
"1",
")",
"/",
"gamma",
"**",
"2",
"seeing_model",
"=",
"Moffat2D",
"(",
"amplitude",
"=",
"amplitude",
",",
"x_mean",
"=",
"0.0",
",",
"y_mean",
"=",
"0.0",
",",
"gamma",
"=",
"gamma",
",",
"alpha",
"=",
"alpha",
")",
"return",
"seeing_model"
] | Generate a normalized Moffat profile from its FWHM and alpha | [
"Generate",
"a",
"normalized",
"Moffat",
"profile",
"from",
"its",
"FWHM",
"and",
"alpha"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/instrument/simulation/atmosphere.py#L78-L89 | train |
druids/django-chamber | chamber/models/__init__.py | field_to_dict | def field_to_dict(field, instance):
"""
Converts a model field to a dictionary
"""
# avoid a circular import
from django.db.models.fields.related import ManyToManyField
return (many_to_many_field_to_dict(field, instance) if isinstance(field, ManyToManyField)
else field.value_from_object(instance)) | python | def field_to_dict(field, instance):
"""
Converts a model field to a dictionary
"""
# avoid a circular import
from django.db.models.fields.related import ManyToManyField
return (many_to_many_field_to_dict(field, instance) if isinstance(field, ManyToManyField)
else field.value_from_object(instance)) | [
"def",
"field_to_dict",
"(",
"field",
",",
"instance",
")",
":",
"# avoid a circular import",
"from",
"django",
".",
"db",
".",
"models",
".",
"fields",
".",
"related",
"import",
"ManyToManyField",
"return",
"(",
"many_to_many_field_to_dict",
"(",
"field",
",",
"instance",
")",
"if",
"isinstance",
"(",
"field",
",",
"ManyToManyField",
")",
"else",
"field",
".",
"value_from_object",
"(",
"instance",
")",
")"
] | Converts a model field to a dictionary | [
"Converts",
"a",
"model",
"field",
"to",
"a",
"dictionary"
] | eef4169923557e96877a664fa254e8c0814f3f23 | https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/models/__init__.py#L37-L45 | train |
druids/django-chamber | chamber/models/__init__.py | model_to_dict | def model_to_dict(instance, fields=None, exclude=None):
"""
The same implementation as django model_to_dict but editable fields are allowed
"""
return {
field.name: field_to_dict(field, instance)
for field in chain(instance._meta.concrete_fields, instance._meta.many_to_many) # pylint: disable=W0212
if not should_exclude_field(field, fields, exclude)
} | python | def model_to_dict(instance, fields=None, exclude=None):
"""
The same implementation as django model_to_dict but editable fields are allowed
"""
return {
field.name: field_to_dict(field, instance)
for field in chain(instance._meta.concrete_fields, instance._meta.many_to_many) # pylint: disable=W0212
if not should_exclude_field(field, fields, exclude)
} | [
"def",
"model_to_dict",
"(",
"instance",
",",
"fields",
"=",
"None",
",",
"exclude",
"=",
"None",
")",
":",
"return",
"{",
"field",
".",
"name",
":",
"field_to_dict",
"(",
"field",
",",
"instance",
")",
"for",
"field",
"in",
"chain",
"(",
"instance",
".",
"_meta",
".",
"concrete_fields",
",",
"instance",
".",
"_meta",
".",
"many_to_many",
")",
"# pylint: disable=W0212",
"if",
"not",
"should_exclude_field",
"(",
"field",
",",
"fields",
",",
"exclude",
")",
"}"
] | The same implementation as django model_to_dict but editable fields are allowed | [
"The",
"same",
"implementation",
"as",
"django",
"model_to_dict",
"but",
"editable",
"fields",
"are",
"allowed"
] | eef4169923557e96877a664fa254e8c0814f3f23 | https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/models/__init__.py#L70-L79 | train |
druids/django-chamber | chamber/models/__init__.py | SmartQuerySet.change_and_save | def change_and_save(self, update_only_changed_fields=False, **changed_fields):
"""
Changes a given `changed_fields` on each object in the queryset, saves objects
and returns the changed objects in the queryset.
"""
bulk_change_and_save(self, update_only_changed_fields=update_only_changed_fields, **changed_fields)
return self.filter() | python | def change_and_save(self, update_only_changed_fields=False, **changed_fields):
"""
Changes a given `changed_fields` on each object in the queryset, saves objects
and returns the changed objects in the queryset.
"""
bulk_change_and_save(self, update_only_changed_fields=update_only_changed_fields, **changed_fields)
return self.filter() | [
"def",
"change_and_save",
"(",
"self",
",",
"update_only_changed_fields",
"=",
"False",
",",
"*",
"*",
"changed_fields",
")",
":",
"bulk_change_and_save",
"(",
"self",
",",
"update_only_changed_fields",
"=",
"update_only_changed_fields",
",",
"*",
"*",
"changed_fields",
")",
"return",
"self",
".",
"filter",
"(",
")"
] | Changes a given `changed_fields` on each object in the queryset, saves objects
and returns the changed objects in the queryset. | [
"Changes",
"a",
"given",
"changed_fields",
"on",
"each",
"object",
"in",
"the",
"queryset",
"saves",
"objects",
"and",
"returns",
"the",
"changed",
"objects",
"in",
"the",
"queryset",
"."
] | eef4169923557e96877a664fa254e8c0814f3f23 | https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/models/__init__.py#L258-L264 | train |
guaix-ucm/numina | numina/array/bbox.py | BoundingBox.extent | def extent(self):
"""Helper for matplotlib imshow"""
return (
self.intervals[1].pix1 - 0.5,
self.intervals[1].pix2 - 0.5,
self.intervals[0].pix1 - 0.5,
self.intervals[0].pix2 - 0.5,
) | python | def extent(self):
"""Helper for matplotlib imshow"""
return (
self.intervals[1].pix1 - 0.5,
self.intervals[1].pix2 - 0.5,
self.intervals[0].pix1 - 0.5,
self.intervals[0].pix2 - 0.5,
) | [
"def",
"extent",
"(",
"self",
")",
":",
"return",
"(",
"self",
".",
"intervals",
"[",
"1",
"]",
".",
"pix1",
"-",
"0.5",
",",
"self",
".",
"intervals",
"[",
"1",
"]",
".",
"pix2",
"-",
"0.5",
",",
"self",
".",
"intervals",
"[",
"0",
"]",
".",
"pix1",
"-",
"0.5",
",",
"self",
".",
"intervals",
"[",
"0",
"]",
".",
"pix2",
"-",
"0.5",
",",
")"
] | Helper for matplotlib imshow | [
"Helper",
"for",
"matplotlib",
"imshow"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/bbox.py#L214-L221 | train |
guaix-ucm/numina | numina/instrument/components/detector.py | DetectorBase.readout | def readout(self):
"""Readout the detector."""
elec = self.simulate_poisson_variate()
elec_pre = self.saturate(elec)
elec_f = self.pre_readout(elec_pre)
adu_r = self.base_readout(elec_f)
adu_p = self.post_readout(adu_r)
self.clean_up()
return adu_p | python | def readout(self):
"""Readout the detector."""
elec = self.simulate_poisson_variate()
elec_pre = self.saturate(elec)
elec_f = self.pre_readout(elec_pre)
adu_r = self.base_readout(elec_f)
adu_p = self.post_readout(adu_r)
self.clean_up()
return adu_p | [
"def",
"readout",
"(",
"self",
")",
":",
"elec",
"=",
"self",
".",
"simulate_poisson_variate",
"(",
")",
"elec_pre",
"=",
"self",
".",
"saturate",
"(",
"elec",
")",
"elec_f",
"=",
"self",
".",
"pre_readout",
"(",
"elec_pre",
")",
"adu_r",
"=",
"self",
".",
"base_readout",
"(",
"elec_f",
")",
"adu_p",
"=",
"self",
".",
"post_readout",
"(",
"adu_r",
")",
"self",
".",
"clean_up",
"(",
")",
"return",
"adu_p"
] | Readout the detector. | [
"Readout",
"the",
"detector",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/instrument/components/detector.py#L94-L109 | train |
guaix-ucm/numina | numina/util/parser.py | parse_arg_line | def parse_arg_line(fargs):
"""parse limited form of arguments of function
in the form a=1, b='c'
as a dictionary
"""
# Convert to literal dict
fargs = fargs.strip()
if fargs == '':
return {}
pairs = [s.strip() for s in fargs.split(',')]
# find first "="
result = []
for p in pairs:
fe = p.find("=")
if fe == -1:
# no equal
raise ValueError("malformed")
key = p[:fe]
val = p[fe + 1:]
tok = "'{}': {}".format(key, val)
result.append(tok)
tokj = ','.join(result)
result = "{{ {0} }}".format(tokj)
state = ast.literal_eval(result)
return state | python | def parse_arg_line(fargs):
"""parse limited form of arguments of function
in the form a=1, b='c'
as a dictionary
"""
# Convert to literal dict
fargs = fargs.strip()
if fargs == '':
return {}
pairs = [s.strip() for s in fargs.split(',')]
# find first "="
result = []
for p in pairs:
fe = p.find("=")
if fe == -1:
# no equal
raise ValueError("malformed")
key = p[:fe]
val = p[fe + 1:]
tok = "'{}': {}".format(key, val)
result.append(tok)
tokj = ','.join(result)
result = "{{ {0} }}".format(tokj)
state = ast.literal_eval(result)
return state | [
"def",
"parse_arg_line",
"(",
"fargs",
")",
":",
"# Convert to literal dict",
"fargs",
"=",
"fargs",
".",
"strip",
"(",
")",
"if",
"fargs",
"==",
"''",
":",
"return",
"{",
"}",
"pairs",
"=",
"[",
"s",
".",
"strip",
"(",
")",
"for",
"s",
"in",
"fargs",
".",
"split",
"(",
"','",
")",
"]",
"# find first \"=\"",
"result",
"=",
"[",
"]",
"for",
"p",
"in",
"pairs",
":",
"fe",
"=",
"p",
".",
"find",
"(",
"\"=\"",
")",
"if",
"fe",
"==",
"-",
"1",
":",
"# no equal",
"raise",
"ValueError",
"(",
"\"malformed\"",
")",
"key",
"=",
"p",
"[",
":",
"fe",
"]",
"val",
"=",
"p",
"[",
"fe",
"+",
"1",
":",
"]",
"tok",
"=",
"\"'{}': {}\"",
".",
"format",
"(",
"key",
",",
"val",
")",
"result",
".",
"append",
"(",
"tok",
")",
"tokj",
"=",
"','",
".",
"join",
"(",
"result",
")",
"result",
"=",
"\"{{ {0} }}\"",
".",
"format",
"(",
"tokj",
")",
"state",
"=",
"ast",
".",
"literal_eval",
"(",
"result",
")",
"return",
"state"
] | parse limited form of arguments of function
in the form a=1, b='c'
as a dictionary | [
"parse",
"limited",
"form",
"of",
"arguments",
"of",
"function"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/util/parser.py#L27-L54 | train |
druids/django-chamber | chamber/formatters/__init__.py | natural_number_with_currency | def natural_number_with_currency(number, currency, show_decimal_place=True, use_nbsp=True):
"""
Return a given `number` formatter a price for humans.
"""
humanized = '{} {}'.format(
numberformat.format(
number=number,
decimal_sep=',',
decimal_pos=2 if show_decimal_place else 0,
grouping=3,
thousand_sep=' ',
force_grouping=True
),
force_text(currency)
)
return mark_safe(humanized.replace(' ', '\u00a0')) if use_nbsp else humanized | python | def natural_number_with_currency(number, currency, show_decimal_place=True, use_nbsp=True):
"""
Return a given `number` formatter a price for humans.
"""
humanized = '{} {}'.format(
numberformat.format(
number=number,
decimal_sep=',',
decimal_pos=2 if show_decimal_place else 0,
grouping=3,
thousand_sep=' ',
force_grouping=True
),
force_text(currency)
)
return mark_safe(humanized.replace(' ', '\u00a0')) if use_nbsp else humanized | [
"def",
"natural_number_with_currency",
"(",
"number",
",",
"currency",
",",
"show_decimal_place",
"=",
"True",
",",
"use_nbsp",
"=",
"True",
")",
":",
"humanized",
"=",
"'{} {}'",
".",
"format",
"(",
"numberformat",
".",
"format",
"(",
"number",
"=",
"number",
",",
"decimal_sep",
"=",
"','",
",",
"decimal_pos",
"=",
"2",
"if",
"show_decimal_place",
"else",
"0",
",",
"grouping",
"=",
"3",
",",
"thousand_sep",
"=",
"' '",
",",
"force_grouping",
"=",
"True",
")",
",",
"force_text",
"(",
"currency",
")",
")",
"return",
"mark_safe",
"(",
"humanized",
".",
"replace",
"(",
"' '",
",",
"'\\u00a0'",
")",
")",
"if",
"use_nbsp",
"else",
"humanized"
] | Return a given `number` formatter a price for humans. | [
"Return",
"a",
"given",
"number",
"formatter",
"a",
"price",
"for",
"humans",
"."
] | eef4169923557e96877a664fa254e8c0814f3f23 | https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/formatters/__init__.py#L6-L21 | train |
guaix-ucm/numina | numina/types/frame.py | DataFrameType.extract_db_info | def extract_db_info(self, obj, keys):
"""Extract tags from serialized file"""
objl = self.convert(obj)
result = super(DataFrameType, self).extract_db_info(objl, keys)
ext = self.datamodel.extractor_map['fits']
if objl:
with objl.open() as hdulist:
for field in keys:
result[field] = ext.extract(field, hdulist)
tags = result['tags']
for field in self.tags_keys:
tags[field] = ext.extract(field, hdulist)
return result
else:
return result | python | def extract_db_info(self, obj, keys):
"""Extract tags from serialized file"""
objl = self.convert(obj)
result = super(DataFrameType, self).extract_db_info(objl, keys)
ext = self.datamodel.extractor_map['fits']
if objl:
with objl.open() as hdulist:
for field in keys:
result[field] = ext.extract(field, hdulist)
tags = result['tags']
for field in self.tags_keys:
tags[field] = ext.extract(field, hdulist)
return result
else:
return result | [
"def",
"extract_db_info",
"(",
"self",
",",
"obj",
",",
"keys",
")",
":",
"objl",
"=",
"self",
".",
"convert",
"(",
"obj",
")",
"result",
"=",
"super",
"(",
"DataFrameType",
",",
"self",
")",
".",
"extract_db_info",
"(",
"objl",
",",
"keys",
")",
"ext",
"=",
"self",
".",
"datamodel",
".",
"extractor_map",
"[",
"'fits'",
"]",
"if",
"objl",
":",
"with",
"objl",
".",
"open",
"(",
")",
"as",
"hdulist",
":",
"for",
"field",
"in",
"keys",
":",
"result",
"[",
"field",
"]",
"=",
"ext",
".",
"extract",
"(",
"field",
",",
"hdulist",
")",
"tags",
"=",
"result",
"[",
"'tags'",
"]",
"for",
"field",
"in",
"self",
".",
"tags_keys",
":",
"tags",
"[",
"field",
"]",
"=",
"ext",
".",
"extract",
"(",
"field",
",",
"hdulist",
")",
"return",
"result",
"else",
":",
"return",
"result"
] | Extract tags from serialized file | [
"Extract",
"tags",
"from",
"serialized",
"file"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/types/frame.py#L101-L119 | train |
guaix-ucm/numina | numina/array/display/iofunctions.py | readc | def readc(prompt, default=None, valid=None, question_mark=True):
"""Return a single character read from keyboard
Parameters
----------
prompt : str
Prompt string.
default : str
Default value.
valid : str
String providing valid characters. If None, all characters are
valid (default).
question_mark : bool
If True, display question mark after prompt.
Returns
-------
cresult : str
Read value.
"""
cresult = None # Avoid PyCharm warning
# question mark
if question_mark:
cquestion_mark = ' ? '
else:
cquestion_mark = ''
# main loop
loop = True
while loop:
# display prompt
if default is None:
print(prompt + cquestion_mark, end='')
sys.stdout.flush()
else:
print(prompt + ' [' + str(default) + ']' + cquestion_mark, end='')
sys.stdout.flush()
# read user's input
cresult = sys.stdin.readline().strip()
if cresult == '' and default is not None:
cresult = str(default)
if len(cresult) == 1:
# check that all the characters are valid
loop = False
if valid is not None:
for c in cresult:
if c not in str(valid):
print('*** Error: invalid characters found.')
print('*** Valid characters are:', valid)
print('*** Try again!')
loop = True
else:
print('*** Error: invalid string length. Try again!')
return cresult | python | def readc(prompt, default=None, valid=None, question_mark=True):
"""Return a single character read from keyboard
Parameters
----------
prompt : str
Prompt string.
default : str
Default value.
valid : str
String providing valid characters. If None, all characters are
valid (default).
question_mark : bool
If True, display question mark after prompt.
Returns
-------
cresult : str
Read value.
"""
cresult = None # Avoid PyCharm warning
# question mark
if question_mark:
cquestion_mark = ' ? '
else:
cquestion_mark = ''
# main loop
loop = True
while loop:
# display prompt
if default is None:
print(prompt + cquestion_mark, end='')
sys.stdout.flush()
else:
print(prompt + ' [' + str(default) + ']' + cquestion_mark, end='')
sys.stdout.flush()
# read user's input
cresult = sys.stdin.readline().strip()
if cresult == '' and default is not None:
cresult = str(default)
if len(cresult) == 1:
# check that all the characters are valid
loop = False
if valid is not None:
for c in cresult:
if c not in str(valid):
print('*** Error: invalid characters found.')
print('*** Valid characters are:', valid)
print('*** Try again!')
loop = True
else:
print('*** Error: invalid string length. Try again!')
return cresult | [
"def",
"readc",
"(",
"prompt",
",",
"default",
"=",
"None",
",",
"valid",
"=",
"None",
",",
"question_mark",
"=",
"True",
")",
":",
"cresult",
"=",
"None",
"# Avoid PyCharm warning",
"# question mark",
"if",
"question_mark",
":",
"cquestion_mark",
"=",
"' ? '",
"else",
":",
"cquestion_mark",
"=",
"''",
"# main loop",
"loop",
"=",
"True",
"while",
"loop",
":",
"# display prompt",
"if",
"default",
"is",
"None",
":",
"print",
"(",
"prompt",
"+",
"cquestion_mark",
",",
"end",
"=",
"''",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"else",
":",
"print",
"(",
"prompt",
"+",
"' ['",
"+",
"str",
"(",
"default",
")",
"+",
"']'",
"+",
"cquestion_mark",
",",
"end",
"=",
"''",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"# read user's input",
"cresult",
"=",
"sys",
".",
"stdin",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
"if",
"cresult",
"==",
"''",
"and",
"default",
"is",
"not",
"None",
":",
"cresult",
"=",
"str",
"(",
"default",
")",
"if",
"len",
"(",
"cresult",
")",
"==",
"1",
":",
"# check that all the characters are valid",
"loop",
"=",
"False",
"if",
"valid",
"is",
"not",
"None",
":",
"for",
"c",
"in",
"cresult",
":",
"if",
"c",
"not",
"in",
"str",
"(",
"valid",
")",
":",
"print",
"(",
"'*** Error: invalid characters found.'",
")",
"print",
"(",
"'*** Valid characters are:'",
",",
"valid",
")",
"print",
"(",
"'*** Try again!'",
")",
"loop",
"=",
"True",
"else",
":",
"print",
"(",
"'*** Error: invalid string length. Try again!'",
")",
"return",
"cresult"
] | Return a single character read from keyboard
Parameters
----------
prompt : str
Prompt string.
default : str
Default value.
valid : str
String providing valid characters. If None, all characters are
valid (default).
question_mark : bool
If True, display question mark after prompt.
Returns
-------
cresult : str
Read value. | [
"Return",
"a",
"single",
"character",
"read",
"from",
"keyboard"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/display/iofunctions.py#L7-L67 | train |
guaix-ucm/numina | numina/array/display/iofunctions.py | read_value | def read_value(ftype, prompt, default=None, minval=None, maxval=None,
allowed_single_chars=None, question_mark=True):
"""Return value read from keyboard
Parameters
----------
ftype : int() or float()
Function defining the expected type.
prompt : str
Prompt string.
default : int or None
Default value.
minval : int or None
Mininum allowed value.
maxval : int or None
Maximum allowed value.
allowed_single_chars : str
String containing allowed valid characters.
question_mark : bool
If True, display question mark after prompt.
Returns
-------
result : integer, float or str
Integer, float of single character.
"""
# avoid PyCharm warning 'might be referenced before assignment'
result = None
# question mark
if question_mark:
cquestion_mark = ' ? '
else:
cquestion_mark = ''
# check minimum value
if minval is not None:
try:
iminval = ftype(minval)
except ValueError:
raise ValueError("'" + str(minval) + "' cannot " +
"be used as an minval in readi()")
else:
iminval = None
# check maximum value
if maxval is not None:
try:
imaxval = ftype(maxval)
except ValueError:
raise ValueError("'" + str(maxval) + "' cannot " +
"be used as an maxval in readi()")
else:
imaxval = None
# minimum and maximum values
if minval is None and maxval is None:
cminmax = ''
elif minval is None:
cminmax = ' (number <= ' + str(imaxval) + ')'
elif maxval is None:
cminmax = ' (number >= ' + str(iminval) + ')'
else:
cminmax = ' (' + str(minval) + ' <= number <= ' + str(maxval) + ')'
# main loop
loop = True
while loop:
# display prompt
if default is None:
print(prompt + cminmax + cquestion_mark, end='')
sys.stdout.flush()
else:
print(prompt + cminmax + ' [' + str(default) + ']' +
cquestion_mark, end='')
sys.stdout.flush()
# read user's input
cresult = sys.stdin.readline().strip()
if cresult == '' and default is not None:
cresult = str(default)
# if valid allowed single character, return character
if len(cresult) == 1:
if allowed_single_chars is not None:
if cresult in allowed_single_chars:
return cresult
# convert to ftype value
try:
result = ftype(cresult)
except ValueError:
print("*** Error: invalid " + str(ftype) + " value. Try again!")
else:
# check number is within expected range
if minval is None and maxval is None:
loop = False
elif minval is None:
if result <= imaxval:
loop = False
else:
print("*** Error: number out of range. Try again!")
elif maxval is None:
if result >= iminval:
loop = False
else:
print("*** Error: number out of range. Try again!")
else:
if iminval <= result <= imaxval:
loop = False
else:
print("*** Error: number out of range. Try again!")
return result | python | def read_value(ftype, prompt, default=None, minval=None, maxval=None,
allowed_single_chars=None, question_mark=True):
"""Return value read from keyboard
Parameters
----------
ftype : int() or float()
Function defining the expected type.
prompt : str
Prompt string.
default : int or None
Default value.
minval : int or None
Mininum allowed value.
maxval : int or None
Maximum allowed value.
allowed_single_chars : str
String containing allowed valid characters.
question_mark : bool
If True, display question mark after prompt.
Returns
-------
result : integer, float or str
Integer, float of single character.
"""
# avoid PyCharm warning 'might be referenced before assignment'
result = None
# question mark
if question_mark:
cquestion_mark = ' ? '
else:
cquestion_mark = ''
# check minimum value
if minval is not None:
try:
iminval = ftype(minval)
except ValueError:
raise ValueError("'" + str(minval) + "' cannot " +
"be used as an minval in readi()")
else:
iminval = None
# check maximum value
if maxval is not None:
try:
imaxval = ftype(maxval)
except ValueError:
raise ValueError("'" + str(maxval) + "' cannot " +
"be used as an maxval in readi()")
else:
imaxval = None
# minimum and maximum values
if minval is None and maxval is None:
cminmax = ''
elif minval is None:
cminmax = ' (number <= ' + str(imaxval) + ')'
elif maxval is None:
cminmax = ' (number >= ' + str(iminval) + ')'
else:
cminmax = ' (' + str(minval) + ' <= number <= ' + str(maxval) + ')'
# main loop
loop = True
while loop:
# display prompt
if default is None:
print(prompt + cminmax + cquestion_mark, end='')
sys.stdout.flush()
else:
print(prompt + cminmax + ' [' + str(default) + ']' +
cquestion_mark, end='')
sys.stdout.flush()
# read user's input
cresult = sys.stdin.readline().strip()
if cresult == '' and default is not None:
cresult = str(default)
# if valid allowed single character, return character
if len(cresult) == 1:
if allowed_single_chars is not None:
if cresult in allowed_single_chars:
return cresult
# convert to ftype value
try:
result = ftype(cresult)
except ValueError:
print("*** Error: invalid " + str(ftype) + " value. Try again!")
else:
# check number is within expected range
if minval is None and maxval is None:
loop = False
elif minval is None:
if result <= imaxval:
loop = False
else:
print("*** Error: number out of range. Try again!")
elif maxval is None:
if result >= iminval:
loop = False
else:
print("*** Error: number out of range. Try again!")
else:
if iminval <= result <= imaxval:
loop = False
else:
print("*** Error: number out of range. Try again!")
return result | [
"def",
"read_value",
"(",
"ftype",
",",
"prompt",
",",
"default",
"=",
"None",
",",
"minval",
"=",
"None",
",",
"maxval",
"=",
"None",
",",
"allowed_single_chars",
"=",
"None",
",",
"question_mark",
"=",
"True",
")",
":",
"# avoid PyCharm warning 'might be referenced before assignment'",
"result",
"=",
"None",
"# question mark",
"if",
"question_mark",
":",
"cquestion_mark",
"=",
"' ? '",
"else",
":",
"cquestion_mark",
"=",
"''",
"# check minimum value",
"if",
"minval",
"is",
"not",
"None",
":",
"try",
":",
"iminval",
"=",
"ftype",
"(",
"minval",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"'\"",
"+",
"str",
"(",
"minval",
")",
"+",
"\"' cannot \"",
"+",
"\"be used as an minval in readi()\"",
")",
"else",
":",
"iminval",
"=",
"None",
"# check maximum value",
"if",
"maxval",
"is",
"not",
"None",
":",
"try",
":",
"imaxval",
"=",
"ftype",
"(",
"maxval",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"'\"",
"+",
"str",
"(",
"maxval",
")",
"+",
"\"' cannot \"",
"+",
"\"be used as an maxval in readi()\"",
")",
"else",
":",
"imaxval",
"=",
"None",
"# minimum and maximum values",
"if",
"minval",
"is",
"None",
"and",
"maxval",
"is",
"None",
":",
"cminmax",
"=",
"''",
"elif",
"minval",
"is",
"None",
":",
"cminmax",
"=",
"' (number <= '",
"+",
"str",
"(",
"imaxval",
")",
"+",
"')'",
"elif",
"maxval",
"is",
"None",
":",
"cminmax",
"=",
"' (number >= '",
"+",
"str",
"(",
"iminval",
")",
"+",
"')'",
"else",
":",
"cminmax",
"=",
"' ('",
"+",
"str",
"(",
"minval",
")",
"+",
"' <= number <= '",
"+",
"str",
"(",
"maxval",
")",
"+",
"')'",
"# main loop",
"loop",
"=",
"True",
"while",
"loop",
":",
"# display prompt",
"if",
"default",
"is",
"None",
":",
"print",
"(",
"prompt",
"+",
"cminmax",
"+",
"cquestion_mark",
",",
"end",
"=",
"''",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"else",
":",
"print",
"(",
"prompt",
"+",
"cminmax",
"+",
"' ['",
"+",
"str",
"(",
"default",
")",
"+",
"']'",
"+",
"cquestion_mark",
",",
"end",
"=",
"''",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"# read user's input",
"cresult",
"=",
"sys",
".",
"stdin",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
"if",
"cresult",
"==",
"''",
"and",
"default",
"is",
"not",
"None",
":",
"cresult",
"=",
"str",
"(",
"default",
")",
"# if valid allowed single character, return character",
"if",
"len",
"(",
"cresult",
")",
"==",
"1",
":",
"if",
"allowed_single_chars",
"is",
"not",
"None",
":",
"if",
"cresult",
"in",
"allowed_single_chars",
":",
"return",
"cresult",
"# convert to ftype value",
"try",
":",
"result",
"=",
"ftype",
"(",
"cresult",
")",
"except",
"ValueError",
":",
"print",
"(",
"\"*** Error: invalid \"",
"+",
"str",
"(",
"ftype",
")",
"+",
"\" value. Try again!\"",
")",
"else",
":",
"# check number is within expected range",
"if",
"minval",
"is",
"None",
"and",
"maxval",
"is",
"None",
":",
"loop",
"=",
"False",
"elif",
"minval",
"is",
"None",
":",
"if",
"result",
"<=",
"imaxval",
":",
"loop",
"=",
"False",
"else",
":",
"print",
"(",
"\"*** Error: number out of range. Try again!\"",
")",
"elif",
"maxval",
"is",
"None",
":",
"if",
"result",
">=",
"iminval",
":",
"loop",
"=",
"False",
"else",
":",
"print",
"(",
"\"*** Error: number out of range. Try again!\"",
")",
"else",
":",
"if",
"iminval",
"<=",
"result",
"<=",
"imaxval",
":",
"loop",
"=",
"False",
"else",
":",
"print",
"(",
"\"*** Error: number out of range. Try again!\"",
")",
"return",
"result"
] | Return value read from keyboard
Parameters
----------
ftype : int() or float()
Function defining the expected type.
prompt : str
Prompt string.
default : int or None
Default value.
minval : int or None
Mininum allowed value.
maxval : int or None
Maximum allowed value.
allowed_single_chars : str
String containing allowed valid characters.
question_mark : bool
If True, display question mark after prompt.
Returns
-------
result : integer, float or str
Integer, float of single character. | [
"Return",
"value",
"read",
"from",
"keyboard"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/display/iofunctions.py#L140-L256 | train |
guaix-ucm/numina | numina/core/pipeline.py | Pipeline.load_product_object | def load_product_object(self, name):
"""Load product object, according to name"""
product_entry = self.products[name]
product = self._get_base_object(product_entry)
return product | python | def load_product_object(self, name):
"""Load product object, according to name"""
product_entry = self.products[name]
product = self._get_base_object(product_entry)
return product | [
"def",
"load_product_object",
"(",
"self",
",",
"name",
")",
":",
"product_entry",
"=",
"self",
".",
"products",
"[",
"name",
"]",
"product",
"=",
"self",
".",
"_get_base_object",
"(",
"product_entry",
")",
"return",
"product"
] | Load product object, according to name | [
"Load",
"product",
"object",
"according",
"to",
"name"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/pipeline.py#L87-L94 | train |
guaix-ucm/numina | numina/core/pipeline.py | Pipeline.depsolve | def depsolve(self):
"""Load all recipes to search for products"""
# load everything
requires = {}
provides = {}
for mode, r in self.recipes.items():
l = self.load_recipe_object(mode)
for field, vv in l.requirements().items():
if vv.type.isproduct():
name = vv.type.name()
pe = ProductEntry(name, mode, field)
requires[name] = pe
for field, vv in l.products().items():
if vv.type.isproduct():
name = vv.type.name()
pe = ProductEntry(name, mode, field)
provides[name] = pe
return requires, provides | python | def depsolve(self):
"""Load all recipes to search for products"""
# load everything
requires = {}
provides = {}
for mode, r in self.recipes.items():
l = self.load_recipe_object(mode)
for field, vv in l.requirements().items():
if vv.type.isproduct():
name = vv.type.name()
pe = ProductEntry(name, mode, field)
requires[name] = pe
for field, vv in l.products().items():
if vv.type.isproduct():
name = vv.type.name()
pe = ProductEntry(name, mode, field)
provides[name] = pe
return requires, provides | [
"def",
"depsolve",
"(",
"self",
")",
":",
"# load everything",
"requires",
"=",
"{",
"}",
"provides",
"=",
"{",
"}",
"for",
"mode",
",",
"r",
"in",
"self",
".",
"recipes",
".",
"items",
"(",
")",
":",
"l",
"=",
"self",
".",
"load_recipe_object",
"(",
"mode",
")",
"for",
"field",
",",
"vv",
"in",
"l",
".",
"requirements",
"(",
")",
".",
"items",
"(",
")",
":",
"if",
"vv",
".",
"type",
".",
"isproduct",
"(",
")",
":",
"name",
"=",
"vv",
".",
"type",
".",
"name",
"(",
")",
"pe",
"=",
"ProductEntry",
"(",
"name",
",",
"mode",
",",
"field",
")",
"requires",
"[",
"name",
"]",
"=",
"pe",
"for",
"field",
",",
"vv",
"in",
"l",
".",
"products",
"(",
")",
".",
"items",
"(",
")",
":",
"if",
"vv",
".",
"type",
".",
"isproduct",
"(",
")",
":",
"name",
"=",
"vv",
".",
"type",
".",
"name",
"(",
")",
"pe",
"=",
"ProductEntry",
"(",
"name",
",",
"mode",
",",
"field",
")",
"provides",
"[",
"name",
"]",
"=",
"pe",
"return",
"requires",
",",
"provides"
] | Load all recipes to search for products | [
"Load",
"all",
"recipes",
"to",
"search",
"for",
"products"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/pipeline.py#L109-L129 | train |
guaix-ucm/numina | numina/core/pipeline.py | InstrumentDRP.search_mode_provides | def search_mode_provides(self, product, pipeline='default'):
"""Search the mode that provides a given product"""
pipeline = self.pipelines[pipeline]
for obj, mode, field in self.iterate_mode_provides(self.modes, pipeline):
# extract name from obj
if obj.name() == product:
return ProductEntry(obj.name(), mode.key, field)
else:
raise ValueError('no mode provides %s' % product) | python | def search_mode_provides(self, product, pipeline='default'):
"""Search the mode that provides a given product"""
pipeline = self.pipelines[pipeline]
for obj, mode, field in self.iterate_mode_provides(self.modes, pipeline):
# extract name from obj
if obj.name() == product:
return ProductEntry(obj.name(), mode.key, field)
else:
raise ValueError('no mode provides %s' % product) | [
"def",
"search_mode_provides",
"(",
"self",
",",
"product",
",",
"pipeline",
"=",
"'default'",
")",
":",
"pipeline",
"=",
"self",
".",
"pipelines",
"[",
"pipeline",
"]",
"for",
"obj",
",",
"mode",
",",
"field",
"in",
"self",
".",
"iterate_mode_provides",
"(",
"self",
".",
"modes",
",",
"pipeline",
")",
":",
"# extract name from obj",
"if",
"obj",
".",
"name",
"(",
")",
"==",
"product",
":",
"return",
"ProductEntry",
"(",
"obj",
".",
"name",
"(",
")",
",",
"mode",
".",
"key",
",",
"field",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'no mode provides %s'",
"%",
"product",
")"
] | Search the mode that provides a given product | [
"Search",
"the",
"mode",
"that",
"provides",
"a",
"given",
"product"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/pipeline.py#L220-L229 | train |
guaix-ucm/numina | numina/core/pipeline.py | InstrumentDRP.select_configuration | def select_configuration(self, obresult):
"""Select instrument configuration based on OB"""
logger = logging.getLogger(__name__)
logger.debug('calling default configuration selector')
# get first possible image
ref = obresult.get_sample_frame()
extr = self.datamodel.extractor_map['fits']
if ref:
# get INSCONF configuration
result = extr.extract('insconf', ref)
if result:
# found the keyword, try to match
logger.debug('found insconf config uuid=%s', result)
# Use insconf as uuid key
if result in self.configurations:
return self.configurations[result]
else:
# Additional check for conf.name
for conf in self.configurations.values():
if conf.name == result:
return conf
else:
raise KeyError('insconf {} does not match any config'.format(result))
# If not, try to match by DATE
date_obs = extr.extract('observation_date', ref)
for key, conf in self.configurations.items():
if key == 'default':
# skip default
continue
if conf.date_end is not None:
upper_t = date_obs < conf.date_end
else:
upper_t = True
if upper_t and (date_obs >= conf.date_start):
logger.debug('found date match, config uuid=%s', key)
return conf
else:
logger.debug('no match, using default configuration')
return self.configurations['default'] | python | def select_configuration(self, obresult):
"""Select instrument configuration based on OB"""
logger = logging.getLogger(__name__)
logger.debug('calling default configuration selector')
# get first possible image
ref = obresult.get_sample_frame()
extr = self.datamodel.extractor_map['fits']
if ref:
# get INSCONF configuration
result = extr.extract('insconf', ref)
if result:
# found the keyword, try to match
logger.debug('found insconf config uuid=%s', result)
# Use insconf as uuid key
if result in self.configurations:
return self.configurations[result]
else:
# Additional check for conf.name
for conf in self.configurations.values():
if conf.name == result:
return conf
else:
raise KeyError('insconf {} does not match any config'.format(result))
# If not, try to match by DATE
date_obs = extr.extract('observation_date', ref)
for key, conf in self.configurations.items():
if key == 'default':
# skip default
continue
if conf.date_end is not None:
upper_t = date_obs < conf.date_end
else:
upper_t = True
if upper_t and (date_obs >= conf.date_start):
logger.debug('found date match, config uuid=%s', key)
return conf
else:
logger.debug('no match, using default configuration')
return self.configurations['default'] | [
"def",
"select_configuration",
"(",
"self",
",",
"obresult",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"debug",
"(",
"'calling default configuration selector'",
")",
"# get first possible image",
"ref",
"=",
"obresult",
".",
"get_sample_frame",
"(",
")",
"extr",
"=",
"self",
".",
"datamodel",
".",
"extractor_map",
"[",
"'fits'",
"]",
"if",
"ref",
":",
"# get INSCONF configuration",
"result",
"=",
"extr",
".",
"extract",
"(",
"'insconf'",
",",
"ref",
")",
"if",
"result",
":",
"# found the keyword, try to match",
"logger",
".",
"debug",
"(",
"'found insconf config uuid=%s'",
",",
"result",
")",
"# Use insconf as uuid key",
"if",
"result",
"in",
"self",
".",
"configurations",
":",
"return",
"self",
".",
"configurations",
"[",
"result",
"]",
"else",
":",
"# Additional check for conf.name",
"for",
"conf",
"in",
"self",
".",
"configurations",
".",
"values",
"(",
")",
":",
"if",
"conf",
".",
"name",
"==",
"result",
":",
"return",
"conf",
"else",
":",
"raise",
"KeyError",
"(",
"'insconf {} does not match any config'",
".",
"format",
"(",
"result",
")",
")",
"# If not, try to match by DATE",
"date_obs",
"=",
"extr",
".",
"extract",
"(",
"'observation_date'",
",",
"ref",
")",
"for",
"key",
",",
"conf",
"in",
"self",
".",
"configurations",
".",
"items",
"(",
")",
":",
"if",
"key",
"==",
"'default'",
":",
"# skip default",
"continue",
"if",
"conf",
".",
"date_end",
"is",
"not",
"None",
":",
"upper_t",
"=",
"date_obs",
"<",
"conf",
".",
"date_end",
"else",
":",
"upper_t",
"=",
"True",
"if",
"upper_t",
"and",
"(",
"date_obs",
">=",
"conf",
".",
"date_start",
")",
":",
"logger",
".",
"debug",
"(",
"'found date match, config uuid=%s'",
",",
"key",
")",
"return",
"conf",
"else",
":",
"logger",
".",
"debug",
"(",
"'no match, using default configuration'",
")",
"return",
"self",
".",
"configurations",
"[",
"'default'",
"]"
] | Select instrument configuration based on OB | [
"Select",
"instrument",
"configuration",
"based",
"on",
"OB"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/pipeline.py#L251-L292 | train |
guaix-ucm/numina | numina/core/pipeline.py | InstrumentDRP.select_profile | def select_profile(self, obresult):
"""Select instrument profile based on OB"""
logger = logging.getLogger(__name__)
logger.debug('calling default profile selector')
# check configuration
insconf = obresult.configuration
if insconf != 'default':
key = insconf
date_obs = None
keyname = 'uuid'
else:
# get first possible image
ref = obresult.get_sample_frame()
if ref is None:
key = obresult.instrument
date_obs = None
keyname = 'name'
else:
extr = self.datamodel.extractor_map['fits']
date_obs = extr.extract('observation_date', ref)
key = extr.extract('insconf', ref)
if key is not None:
keyname = 'uuid'
else:
key = extr.extract('instrument', ref)
keyname = 'name'
return key, date_obs, keyname | python | def select_profile(self, obresult):
"""Select instrument profile based on OB"""
logger = logging.getLogger(__name__)
logger.debug('calling default profile selector')
# check configuration
insconf = obresult.configuration
if insconf != 'default':
key = insconf
date_obs = None
keyname = 'uuid'
else:
# get first possible image
ref = obresult.get_sample_frame()
if ref is None:
key = obresult.instrument
date_obs = None
keyname = 'name'
else:
extr = self.datamodel.extractor_map['fits']
date_obs = extr.extract('observation_date', ref)
key = extr.extract('insconf', ref)
if key is not None:
keyname = 'uuid'
else:
key = extr.extract('instrument', ref)
keyname = 'name'
return key, date_obs, keyname | [
"def",
"select_profile",
"(",
"self",
",",
"obresult",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"debug",
"(",
"'calling default profile selector'",
")",
"# check configuration",
"insconf",
"=",
"obresult",
".",
"configuration",
"if",
"insconf",
"!=",
"'default'",
":",
"key",
"=",
"insconf",
"date_obs",
"=",
"None",
"keyname",
"=",
"'uuid'",
"else",
":",
"# get first possible image",
"ref",
"=",
"obresult",
".",
"get_sample_frame",
"(",
")",
"if",
"ref",
"is",
"None",
":",
"key",
"=",
"obresult",
".",
"instrument",
"date_obs",
"=",
"None",
"keyname",
"=",
"'name'",
"else",
":",
"extr",
"=",
"self",
".",
"datamodel",
".",
"extractor_map",
"[",
"'fits'",
"]",
"date_obs",
"=",
"extr",
".",
"extract",
"(",
"'observation_date'",
",",
"ref",
")",
"key",
"=",
"extr",
".",
"extract",
"(",
"'insconf'",
",",
"ref",
")",
"if",
"key",
"is",
"not",
"None",
":",
"keyname",
"=",
"'uuid'",
"else",
":",
"key",
"=",
"extr",
".",
"extract",
"(",
"'instrument'",
",",
"ref",
")",
"keyname",
"=",
"'name'",
"return",
"key",
",",
"date_obs",
",",
"keyname"
] | Select instrument profile based on OB | [
"Select",
"instrument",
"profile",
"based",
"on",
"OB"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/pipeline.py#L294-L323 | train |
guaix-ucm/numina | numina/core/pipeline.py | InstrumentDRP.get_recipe_object | def get_recipe_object(self, mode_name, pipeline_name='default'):
"""Build a recipe object from a given mode name"""
active_mode = self.modes[mode_name]
active_pipeline = self.pipelines[pipeline_name]
recipe = active_pipeline.get_recipe_object(active_mode)
return recipe | python | def get_recipe_object(self, mode_name, pipeline_name='default'):
"""Build a recipe object from a given mode name"""
active_mode = self.modes[mode_name]
active_pipeline = self.pipelines[pipeline_name]
recipe = active_pipeline.get_recipe_object(active_mode)
return recipe | [
"def",
"get_recipe_object",
"(",
"self",
",",
"mode_name",
",",
"pipeline_name",
"=",
"'default'",
")",
":",
"active_mode",
"=",
"self",
".",
"modes",
"[",
"mode_name",
"]",
"active_pipeline",
"=",
"self",
".",
"pipelines",
"[",
"pipeline_name",
"]",
"recipe",
"=",
"active_pipeline",
".",
"get_recipe_object",
"(",
"active_mode",
")",
"return",
"recipe"
] | Build a recipe object from a given mode name | [
"Build",
"a",
"recipe",
"object",
"from",
"a",
"given",
"mode",
"name"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/pipeline.py#L325-L330 | train |
guaix-ucm/numina | numina/array/display/pause_debugplot.py | pause_debugplot | def pause_debugplot(debugplot, optional_prompt=None, pltshow=False,
tight_layout=True):
"""Ask the user to press RETURN to continue after plotting.
Parameters
----------
debugplot : int
Determines whether intermediate computations and/or plots
are displayed:
00 : no debug, no plots
01 : no debug, plots without pauses
02 : no debug, plots with pauses
10 : debug, no plots
11 : debug, plots without pauses
12 : debug, plots with pauses
21 : debug, extra plots without pauses
22 : debug, extra plots with pause
NOTE: negative values are also valid and indicate that a call
to plt.close() is also performed
optional_prompt : string
Optional prompt.
pltshow : bool
If True, a call to plt.show() is also performed.
tight_layout : bool
If True, and pltshow=True, a call to plt.tight_layout() is
also performed.
"""
if debugplot not in DEBUGPLOT_CODES:
raise ValueError('Invalid debugplot value:', debugplot)
if debugplot < 0:
debugplot_ = -debugplot
pltclose = True
else:
debugplot_ = debugplot
pltclose = False
if pltshow:
if debugplot_ in [1, 2, 11, 12, 21, 22]:
if tight_layout:
plt.tight_layout()
if debugplot_ in [1, 11, 21]:
plt.show(block=False)
plt.pause(0.2)
elif debugplot_ in [2, 12, 22]:
print('Press "q" to continue...', end='')
sys.stdout.flush()
plt.show()
print('')
else:
if debugplot_ in [2, 12, 22]:
if optional_prompt is None:
print('Press <RETURN> to continue...', end='')
else:
print(optional_prompt, end='')
sys.stdout.flush()
cdummy = sys.stdin.readline().strip()
if debugplot_ in [1, 2, 11, 12, 21, 22] and pltclose:
plt.close() | python | def pause_debugplot(debugplot, optional_prompt=None, pltshow=False,
tight_layout=True):
"""Ask the user to press RETURN to continue after plotting.
Parameters
----------
debugplot : int
Determines whether intermediate computations and/or plots
are displayed:
00 : no debug, no plots
01 : no debug, plots without pauses
02 : no debug, plots with pauses
10 : debug, no plots
11 : debug, plots without pauses
12 : debug, plots with pauses
21 : debug, extra plots without pauses
22 : debug, extra plots with pause
NOTE: negative values are also valid and indicate that a call
to plt.close() is also performed
optional_prompt : string
Optional prompt.
pltshow : bool
If True, a call to plt.show() is also performed.
tight_layout : bool
If True, and pltshow=True, a call to plt.tight_layout() is
also performed.
"""
if debugplot not in DEBUGPLOT_CODES:
raise ValueError('Invalid debugplot value:', debugplot)
if debugplot < 0:
debugplot_ = -debugplot
pltclose = True
else:
debugplot_ = debugplot
pltclose = False
if pltshow:
if debugplot_ in [1, 2, 11, 12, 21, 22]:
if tight_layout:
plt.tight_layout()
if debugplot_ in [1, 11, 21]:
plt.show(block=False)
plt.pause(0.2)
elif debugplot_ in [2, 12, 22]:
print('Press "q" to continue...', end='')
sys.stdout.flush()
plt.show()
print('')
else:
if debugplot_ in [2, 12, 22]:
if optional_prompt is None:
print('Press <RETURN> to continue...', end='')
else:
print(optional_prompt, end='')
sys.stdout.flush()
cdummy = sys.stdin.readline().strip()
if debugplot_ in [1, 2, 11, 12, 21, 22] and pltclose:
plt.close() | [
"def",
"pause_debugplot",
"(",
"debugplot",
",",
"optional_prompt",
"=",
"None",
",",
"pltshow",
"=",
"False",
",",
"tight_layout",
"=",
"True",
")",
":",
"if",
"debugplot",
"not",
"in",
"DEBUGPLOT_CODES",
":",
"raise",
"ValueError",
"(",
"'Invalid debugplot value:'",
",",
"debugplot",
")",
"if",
"debugplot",
"<",
"0",
":",
"debugplot_",
"=",
"-",
"debugplot",
"pltclose",
"=",
"True",
"else",
":",
"debugplot_",
"=",
"debugplot",
"pltclose",
"=",
"False",
"if",
"pltshow",
":",
"if",
"debugplot_",
"in",
"[",
"1",
",",
"2",
",",
"11",
",",
"12",
",",
"21",
",",
"22",
"]",
":",
"if",
"tight_layout",
":",
"plt",
".",
"tight_layout",
"(",
")",
"if",
"debugplot_",
"in",
"[",
"1",
",",
"11",
",",
"21",
"]",
":",
"plt",
".",
"show",
"(",
"block",
"=",
"False",
")",
"plt",
".",
"pause",
"(",
"0.2",
")",
"elif",
"debugplot_",
"in",
"[",
"2",
",",
"12",
",",
"22",
"]",
":",
"print",
"(",
"'Press \"q\" to continue...'",
",",
"end",
"=",
"''",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"plt",
".",
"show",
"(",
")",
"print",
"(",
"''",
")",
"else",
":",
"if",
"debugplot_",
"in",
"[",
"2",
",",
"12",
",",
"22",
"]",
":",
"if",
"optional_prompt",
"is",
"None",
":",
"print",
"(",
"'Press <RETURN> to continue...'",
",",
"end",
"=",
"''",
")",
"else",
":",
"print",
"(",
"optional_prompt",
",",
"end",
"=",
"''",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"cdummy",
"=",
"sys",
".",
"stdin",
".",
"readline",
"(",
")",
".",
"strip",
"(",
")",
"if",
"debugplot_",
"in",
"[",
"1",
",",
"2",
",",
"11",
",",
"12",
",",
"21",
",",
"22",
"]",
"and",
"pltclose",
":",
"plt",
".",
"close",
"(",
")"
] | Ask the user to press RETURN to continue after plotting.
Parameters
----------
debugplot : int
Determines whether intermediate computations and/or plots
are displayed:
00 : no debug, no plots
01 : no debug, plots without pauses
02 : no debug, plots with pauses
10 : debug, no plots
11 : debug, plots without pauses
12 : debug, plots with pauses
21 : debug, extra plots without pauses
22 : debug, extra plots with pause
NOTE: negative values are also valid and indicate that a call
to plt.close() is also performed
optional_prompt : string
Optional prompt.
pltshow : bool
If True, a call to plt.show() is also performed.
tight_layout : bool
If True, and pltshow=True, a call to plt.tight_layout() is
also performed. | [
"Ask",
"the",
"user",
"to",
"press",
"RETURN",
"to",
"continue",
"after",
"plotting",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/display/pause_debugplot.py#L21-L82 | train |
guaix-ucm/numina | numina/array/mode.py | mode_half_sample | def mode_half_sample(a, is_sorted=False):
'''
Estimate the mode using the Half Sample mode.
A method to estimate the mode, as described in
D. R. Bickel and R. FrΓΌhwirth (contributed equally),
"On a fast, robust estimator of the mode: Comparisons to other
robust estimators with applications,"
Computational Statistics and Data Analysis 50, 3500-3530 (2006).
Example
=======
>> import numpy as np
>> np.random.seed(1392838)
>> a = np.random.normal(1000, 200, size=1000)
>> a[:100] = np.random.normal(2000, 300, size=100)
>> b = np.sort(a)
>> mode_half_sample(b, is_sorted=True)
1041.9327885039545
'''
a = np.asanyarray(a)
if not is_sorted:
sdata = np.sort(a)
else:
sdata = a
n = len(sdata)
if n == 1:
return sdata[0]
elif n == 2:
return 0.5 * (sdata[0] + sdata[1])
elif n == 3:
ind = -sdata[0] + 2 * sdata[1] - sdata[2]
if ind < 0:
return 0.5 * (sdata[0] + sdata[1])
elif ind > 0:
return 0.5 * (sdata[1] + sdata[2])
else:
return sdata[1]
else:
N = int(math.ceil(n / 2.0))
w = sdata[(N-1):] - sdata[:(n-N+1)]
ar = w.argmin()
return mode_half_sample(sdata[ar:ar+N], is_sorted=True) | python | def mode_half_sample(a, is_sorted=False):
'''
Estimate the mode using the Half Sample mode.
A method to estimate the mode, as described in
D. R. Bickel and R. FrΓΌhwirth (contributed equally),
"On a fast, robust estimator of the mode: Comparisons to other
robust estimators with applications,"
Computational Statistics and Data Analysis 50, 3500-3530 (2006).
Example
=======
>> import numpy as np
>> np.random.seed(1392838)
>> a = np.random.normal(1000, 200, size=1000)
>> a[:100] = np.random.normal(2000, 300, size=100)
>> b = np.sort(a)
>> mode_half_sample(b, is_sorted=True)
1041.9327885039545
'''
a = np.asanyarray(a)
if not is_sorted:
sdata = np.sort(a)
else:
sdata = a
n = len(sdata)
if n == 1:
return sdata[0]
elif n == 2:
return 0.5 * (sdata[0] + sdata[1])
elif n == 3:
ind = -sdata[0] + 2 * sdata[1] - sdata[2]
if ind < 0:
return 0.5 * (sdata[0] + sdata[1])
elif ind > 0:
return 0.5 * (sdata[1] + sdata[2])
else:
return sdata[1]
else:
N = int(math.ceil(n / 2.0))
w = sdata[(N-1):] - sdata[:(n-N+1)]
ar = w.argmin()
return mode_half_sample(sdata[ar:ar+N], is_sorted=True) | [
"def",
"mode_half_sample",
"(",
"a",
",",
"is_sorted",
"=",
"False",
")",
":",
"a",
"=",
"np",
".",
"asanyarray",
"(",
"a",
")",
"if",
"not",
"is_sorted",
":",
"sdata",
"=",
"np",
".",
"sort",
"(",
"a",
")",
"else",
":",
"sdata",
"=",
"a",
"n",
"=",
"len",
"(",
"sdata",
")",
"if",
"n",
"==",
"1",
":",
"return",
"sdata",
"[",
"0",
"]",
"elif",
"n",
"==",
"2",
":",
"return",
"0.5",
"*",
"(",
"sdata",
"[",
"0",
"]",
"+",
"sdata",
"[",
"1",
"]",
")",
"elif",
"n",
"==",
"3",
":",
"ind",
"=",
"-",
"sdata",
"[",
"0",
"]",
"+",
"2",
"*",
"sdata",
"[",
"1",
"]",
"-",
"sdata",
"[",
"2",
"]",
"if",
"ind",
"<",
"0",
":",
"return",
"0.5",
"*",
"(",
"sdata",
"[",
"0",
"]",
"+",
"sdata",
"[",
"1",
"]",
")",
"elif",
"ind",
">",
"0",
":",
"return",
"0.5",
"*",
"(",
"sdata",
"[",
"1",
"]",
"+",
"sdata",
"[",
"2",
"]",
")",
"else",
":",
"return",
"sdata",
"[",
"1",
"]",
"else",
":",
"N",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"n",
"/",
"2.0",
")",
")",
"w",
"=",
"sdata",
"[",
"(",
"N",
"-",
"1",
")",
":",
"]",
"-",
"sdata",
"[",
":",
"(",
"n",
"-",
"N",
"+",
"1",
")",
"]",
"ar",
"=",
"w",
".",
"argmin",
"(",
")",
"return",
"mode_half_sample",
"(",
"sdata",
"[",
"ar",
":",
"ar",
"+",
"N",
"]",
",",
"is_sorted",
"=",
"True",
")"
] | Estimate the mode using the Half Sample mode.
A method to estimate the mode, as described in
D. R. Bickel and R. FrΓΌhwirth (contributed equally),
"On a fast, robust estimator of the mode: Comparisons to other
robust estimators with applications,"
Computational Statistics and Data Analysis 50, 3500-3530 (2006).
Example
=======
>> import numpy as np
>> np.random.seed(1392838)
>> a = np.random.normal(1000, 200, size=1000)
>> a[:100] = np.random.normal(2000, 300, size=100)
>> b = np.sort(a)
>> mode_half_sample(b, is_sorted=True)
1041.9327885039545 | [
"Estimate",
"the",
"mode",
"using",
"the",
"Half",
"Sample",
"mode",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/mode.py#L21-L69 | train |
guaix-ucm/numina | numina/array/display/overplot_ds9reg.py | overplot_ds9reg | def overplot_ds9reg(filename, ax):
"""Overplot a ds9 region file.
Parameters
----------
filename : str
File name of the ds9 region file.
ax : matplotlib axes instance
Matplotlib axes instance.
"""
# read ds9 region file
with open(filename) as f:
file_content = f.read().splitlines()
# check first line
first_line = file_content[0]
if "# Region file format: DS9" not in first_line:
raise ValueError("Unrecognized ds9 region file format")
for line in file_content:
if line[0:4] == "line":
line_fields = line.split()
x1 = float(line_fields[1])
y1 = float(line_fields[2])
x2 = float(line_fields[3])
y2 = float(line_fields[4])
if "color" in line:
i = line.find("color=")
color = line[i+6:i+13]
else:
color = "green"
ax.plot([x1,x2], [y1,y2], '-', color=color)
elif line[0:4] == "text":
line_fields = line.split()
x0 = float(line_fields[1])
y0 = float(line_fields[2])
text=line_fields[3][1:-1]
if "color" in line:
i = line.find("color=")
color = line[i+6:i+13]
else:
color = "green"
ax.text(x0, y0, text, fontsize=8,
bbox=dict(boxstyle="round,pad=0.1", fc="white", ec="grey", ),
color=color, fontweight='bold', backgroundcolor='white',
ha='center')
else:
# ignore
pass | python | def overplot_ds9reg(filename, ax):
"""Overplot a ds9 region file.
Parameters
----------
filename : str
File name of the ds9 region file.
ax : matplotlib axes instance
Matplotlib axes instance.
"""
# read ds9 region file
with open(filename) as f:
file_content = f.read().splitlines()
# check first line
first_line = file_content[0]
if "# Region file format: DS9" not in first_line:
raise ValueError("Unrecognized ds9 region file format")
for line in file_content:
if line[0:4] == "line":
line_fields = line.split()
x1 = float(line_fields[1])
y1 = float(line_fields[2])
x2 = float(line_fields[3])
y2 = float(line_fields[4])
if "color" in line:
i = line.find("color=")
color = line[i+6:i+13]
else:
color = "green"
ax.plot([x1,x2], [y1,y2], '-', color=color)
elif line[0:4] == "text":
line_fields = line.split()
x0 = float(line_fields[1])
y0 = float(line_fields[2])
text=line_fields[3][1:-1]
if "color" in line:
i = line.find("color=")
color = line[i+6:i+13]
else:
color = "green"
ax.text(x0, y0, text, fontsize=8,
bbox=dict(boxstyle="round,pad=0.1", fc="white", ec="grey", ),
color=color, fontweight='bold', backgroundcolor='white',
ha='center')
else:
# ignore
pass | [
"def",
"overplot_ds9reg",
"(",
"filename",
",",
"ax",
")",
":",
"# read ds9 region file",
"with",
"open",
"(",
"filename",
")",
"as",
"f",
":",
"file_content",
"=",
"f",
".",
"read",
"(",
")",
".",
"splitlines",
"(",
")",
"# check first line",
"first_line",
"=",
"file_content",
"[",
"0",
"]",
"if",
"\"# Region file format: DS9\"",
"not",
"in",
"first_line",
":",
"raise",
"ValueError",
"(",
"\"Unrecognized ds9 region file format\"",
")",
"for",
"line",
"in",
"file_content",
":",
"if",
"line",
"[",
"0",
":",
"4",
"]",
"==",
"\"line\"",
":",
"line_fields",
"=",
"line",
".",
"split",
"(",
")",
"x1",
"=",
"float",
"(",
"line_fields",
"[",
"1",
"]",
")",
"y1",
"=",
"float",
"(",
"line_fields",
"[",
"2",
"]",
")",
"x2",
"=",
"float",
"(",
"line_fields",
"[",
"3",
"]",
")",
"y2",
"=",
"float",
"(",
"line_fields",
"[",
"4",
"]",
")",
"if",
"\"color\"",
"in",
"line",
":",
"i",
"=",
"line",
".",
"find",
"(",
"\"color=\"",
")",
"color",
"=",
"line",
"[",
"i",
"+",
"6",
":",
"i",
"+",
"13",
"]",
"else",
":",
"color",
"=",
"\"green\"",
"ax",
".",
"plot",
"(",
"[",
"x1",
",",
"x2",
"]",
",",
"[",
"y1",
",",
"y2",
"]",
",",
"'-'",
",",
"color",
"=",
"color",
")",
"elif",
"line",
"[",
"0",
":",
"4",
"]",
"==",
"\"text\"",
":",
"line_fields",
"=",
"line",
".",
"split",
"(",
")",
"x0",
"=",
"float",
"(",
"line_fields",
"[",
"1",
"]",
")",
"y0",
"=",
"float",
"(",
"line_fields",
"[",
"2",
"]",
")",
"text",
"=",
"line_fields",
"[",
"3",
"]",
"[",
"1",
":",
"-",
"1",
"]",
"if",
"\"color\"",
"in",
"line",
":",
"i",
"=",
"line",
".",
"find",
"(",
"\"color=\"",
")",
"color",
"=",
"line",
"[",
"i",
"+",
"6",
":",
"i",
"+",
"13",
"]",
"else",
":",
"color",
"=",
"\"green\"",
"ax",
".",
"text",
"(",
"x0",
",",
"y0",
",",
"text",
",",
"fontsize",
"=",
"8",
",",
"bbox",
"=",
"dict",
"(",
"boxstyle",
"=",
"\"round,pad=0.1\"",
",",
"fc",
"=",
"\"white\"",
",",
"ec",
"=",
"\"grey\"",
",",
")",
",",
"color",
"=",
"color",
",",
"fontweight",
"=",
"'bold'",
",",
"backgroundcolor",
"=",
"'white'",
",",
"ha",
"=",
"'center'",
")",
"else",
":",
"# ignore",
"pass"
] | Overplot a ds9 region file.
Parameters
----------
filename : str
File name of the ds9 region file.
ax : matplotlib axes instance
Matplotlib axes instance. | [
"Overplot",
"a",
"ds9",
"region",
"file",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/display/overplot_ds9reg.py#L14-L64 | train |
guaix-ucm/numina | numina/array/peaks/peakdet.py | find_peaks_indexes | def find_peaks_indexes(arr, window_width=5, threshold=0.0, fpeak=0):
"""Find indexes of peaks in a 1d array.
Note that window_width must be an odd number. The function imposes that the
fluxes in the window_width /2 points to the left (and right) of the peak
decrease monotonously as one moves away from the peak, except that
it allows fpeak constant values around the peak.
Parameters
----------
arr : 1d numpy array
Input 1D spectrum.
window_width : int
Width of the window where the peak must be found. This number must be
odd.
threshold : float
Minimum signal in the peak (optional).
fpeak: int
Number of equal values around the peak
Returns
-------
ipeaks : 1d numpy array (int)
Indices of the input array arr in which the peaks have been found.
"""
_check_window_width(window_width)
if (fpeak<0 or fpeak + 1 >= window_width):
raise ValueError('fpeak must be in the range 0- window_width - 2')
kernel_peak = kernel_peak_function(threshold, fpeak)
out = generic_filter(arr, kernel_peak, window_width, mode="reflect")
result, = numpy.nonzero(out)
return filter_array_margins(arr, result, window_width) | python | def find_peaks_indexes(arr, window_width=5, threshold=0.0, fpeak=0):
"""Find indexes of peaks in a 1d array.
Note that window_width must be an odd number. The function imposes that the
fluxes in the window_width /2 points to the left (and right) of the peak
decrease monotonously as one moves away from the peak, except that
it allows fpeak constant values around the peak.
Parameters
----------
arr : 1d numpy array
Input 1D spectrum.
window_width : int
Width of the window where the peak must be found. This number must be
odd.
threshold : float
Minimum signal in the peak (optional).
fpeak: int
Number of equal values around the peak
Returns
-------
ipeaks : 1d numpy array (int)
Indices of the input array arr in which the peaks have been found.
"""
_check_window_width(window_width)
if (fpeak<0 or fpeak + 1 >= window_width):
raise ValueError('fpeak must be in the range 0- window_width - 2')
kernel_peak = kernel_peak_function(threshold, fpeak)
out = generic_filter(arr, kernel_peak, window_width, mode="reflect")
result, = numpy.nonzero(out)
return filter_array_margins(arr, result, window_width) | [
"def",
"find_peaks_indexes",
"(",
"arr",
",",
"window_width",
"=",
"5",
",",
"threshold",
"=",
"0.0",
",",
"fpeak",
"=",
"0",
")",
":",
"_check_window_width",
"(",
"window_width",
")",
"if",
"(",
"fpeak",
"<",
"0",
"or",
"fpeak",
"+",
"1",
">=",
"window_width",
")",
":",
"raise",
"ValueError",
"(",
"'fpeak must be in the range 0- window_width - 2'",
")",
"kernel_peak",
"=",
"kernel_peak_function",
"(",
"threshold",
",",
"fpeak",
")",
"out",
"=",
"generic_filter",
"(",
"arr",
",",
"kernel_peak",
",",
"window_width",
",",
"mode",
"=",
"\"reflect\"",
")",
"result",
",",
"=",
"numpy",
".",
"nonzero",
"(",
"out",
")",
"return",
"filter_array_margins",
"(",
"arr",
",",
"result",
",",
"window_width",
")"
] | Find indexes of peaks in a 1d array.
Note that window_width must be an odd number. The function imposes that the
fluxes in the window_width /2 points to the left (and right) of the peak
decrease monotonously as one moves away from the peak, except that
it allows fpeak constant values around the peak.
Parameters
----------
arr : 1d numpy array
Input 1D spectrum.
window_width : int
Width of the window where the peak must be found. This number must be
odd.
threshold : float
Minimum signal in the peak (optional).
fpeak: int
Number of equal values around the peak
Returns
-------
ipeaks : 1d numpy array (int)
Indices of the input array arr in which the peaks have been found. | [
"Find",
"indexes",
"of",
"peaks",
"in",
"a",
"1d",
"array",
"."
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/peaks/peakdet.py#L61-L98 | train |
guaix-ucm/numina | numina/array/peaks/peakdet.py | refine_peaks | def refine_peaks(arr, ipeaks, window_width):
"""Refine the peak location previously found by find_peaks_indexes
Parameters
----------
arr : 1d numpy array, float
Input 1D spectrum.
ipeaks : 1d numpy array (int)
Indices of the input array arr in which the peaks were initially found.
window_width : int
Width of the window where the peak must be found.
Returns
-------
xc, yc: tuple
X-coordinates in which the refined peaks have been found,
interpolated Y-coordinates
"""
_check_window_width(window_width)
step = window_width // 2
ipeaks = filter_array_margins(arr, ipeaks, window_width)
winoff = numpy.arange(-step, step+1, dtype='int')
peakwin = ipeaks[:, numpy.newaxis] + winoff
ycols = arr[peakwin]
ww = return_weights(window_width)
coff2 = numpy.dot(ww, ycols.T)
uc = -0.5 * coff2[1] / coff2[2]
yc = coff2[0] + uc * (coff2[1] + coff2[2] * uc)
xc = ipeaks + 0.5 * (window_width-1) * uc
return xc, yc | python | def refine_peaks(arr, ipeaks, window_width):
"""Refine the peak location previously found by find_peaks_indexes
Parameters
----------
arr : 1d numpy array, float
Input 1D spectrum.
ipeaks : 1d numpy array (int)
Indices of the input array arr in which the peaks were initially found.
window_width : int
Width of the window where the peak must be found.
Returns
-------
xc, yc: tuple
X-coordinates in which the refined peaks have been found,
interpolated Y-coordinates
"""
_check_window_width(window_width)
step = window_width // 2
ipeaks = filter_array_margins(arr, ipeaks, window_width)
winoff = numpy.arange(-step, step+1, dtype='int')
peakwin = ipeaks[:, numpy.newaxis] + winoff
ycols = arr[peakwin]
ww = return_weights(window_width)
coff2 = numpy.dot(ww, ycols.T)
uc = -0.5 * coff2[1] / coff2[2]
yc = coff2[0] + uc * (coff2[1] + coff2[2] * uc)
xc = ipeaks + 0.5 * (window_width-1) * uc
return xc, yc | [
"def",
"refine_peaks",
"(",
"arr",
",",
"ipeaks",
",",
"window_width",
")",
":",
"_check_window_width",
"(",
"window_width",
")",
"step",
"=",
"window_width",
"//",
"2",
"ipeaks",
"=",
"filter_array_margins",
"(",
"arr",
",",
"ipeaks",
",",
"window_width",
")",
"winoff",
"=",
"numpy",
".",
"arange",
"(",
"-",
"step",
",",
"step",
"+",
"1",
",",
"dtype",
"=",
"'int'",
")",
"peakwin",
"=",
"ipeaks",
"[",
":",
",",
"numpy",
".",
"newaxis",
"]",
"+",
"winoff",
"ycols",
"=",
"arr",
"[",
"peakwin",
"]",
"ww",
"=",
"return_weights",
"(",
"window_width",
")",
"coff2",
"=",
"numpy",
".",
"dot",
"(",
"ww",
",",
"ycols",
".",
"T",
")",
"uc",
"=",
"-",
"0.5",
"*",
"coff2",
"[",
"1",
"]",
"/",
"coff2",
"[",
"2",
"]",
"yc",
"=",
"coff2",
"[",
"0",
"]",
"+",
"uc",
"*",
"(",
"coff2",
"[",
"1",
"]",
"+",
"coff2",
"[",
"2",
"]",
"*",
"uc",
")",
"xc",
"=",
"ipeaks",
"+",
"0.5",
"*",
"(",
"window_width",
"-",
"1",
")",
"*",
"uc",
"return",
"xc",
",",
"yc"
] | Refine the peak location previously found by find_peaks_indexes
Parameters
----------
arr : 1d numpy array, float
Input 1D spectrum.
ipeaks : 1d numpy array (int)
Indices of the input array arr in which the peaks were initially found.
window_width : int
Width of the window where the peak must be found.
Returns
-------
xc, yc: tuple
X-coordinates in which the refined peaks have been found,
interpolated Y-coordinates | [
"Refine",
"the",
"peak",
"location",
"previously",
"found",
"by",
"find_peaks_indexes"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/peaks/peakdet.py#L137-L174 | train |
guaix-ucm/numina | numina/user/clirun.py | complete_config | def complete_config(config):
"""Complete config with default values"""
if not config.has_section('run'):
config.add_section('run')
values = {
'basedir': os.getcwd(),
'task_control': 'control.yaml',
}
for k, v in values.items():
if not config.has_option('run', k):
config.set('run', k, v)
return config | python | def complete_config(config):
"""Complete config with default values"""
if not config.has_section('run'):
config.add_section('run')
values = {
'basedir': os.getcwd(),
'task_control': 'control.yaml',
}
for k, v in values.items():
if not config.has_option('run', k):
config.set('run', k, v)
return config | [
"def",
"complete_config",
"(",
"config",
")",
":",
"if",
"not",
"config",
".",
"has_section",
"(",
"'run'",
")",
":",
"config",
".",
"add_section",
"(",
"'run'",
")",
"values",
"=",
"{",
"'basedir'",
":",
"os",
".",
"getcwd",
"(",
")",
",",
"'task_control'",
":",
"'control.yaml'",
",",
"}",
"for",
"k",
",",
"v",
"in",
"values",
".",
"items",
"(",
")",
":",
"if",
"not",
"config",
".",
"has_option",
"(",
"'run'",
",",
"k",
")",
":",
"config",
".",
"set",
"(",
"'run'",
",",
"k",
",",
"v",
")",
"return",
"config"
] | Complete config with default values | [
"Complete",
"config",
"with",
"default",
"values"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/user/clirun.py#L17-L32 | train |
guaix-ucm/numina | numina/array/recenter.py | centering_centroid | def centering_centroid(data, xi, yi, box, nloop=10, toldist=1e-3,
maxdist=10.0):
'''
returns x, y, background, status, message
status is:
* 0: not recentering
* 1: recentering successful
* 2: maximum distance reached
* 3: not converged
'''
# Store original center
cxy = (xi, yi)
origin = (xi, yi)
# initial background
back = 0.0
if nloop == 0:
return xi, yi, 0.0, 0, 'not recentering'
for i in range(nloop):
nxy, back = _centering_centroid_loop_xy(data, cxy, box)
# _logger.debug('new center is %s', ncenter)
# if we are to far away from the initial point, break
dst = distance.euclidean(origin, nxy)
if dst > maxdist:
msg = 'maximum distance (%5.2f) from origin reached' % maxdist
return cxy[0], cxy[1], back, 2, msg
# check convergence
dst = distance.euclidean(nxy, cxy)
if dst < toldist:
return nxy[0], nxy[1], back, 1, 'converged in iteration %i' % i
else:
cxy = nxy
return nxy[0], nxy[1], back, 3, 'not converged in %i iterations' % nloop | python | def centering_centroid(data, xi, yi, box, nloop=10, toldist=1e-3,
maxdist=10.0):
'''
returns x, y, background, status, message
status is:
* 0: not recentering
* 1: recentering successful
* 2: maximum distance reached
* 3: not converged
'''
# Store original center
cxy = (xi, yi)
origin = (xi, yi)
# initial background
back = 0.0
if nloop == 0:
return xi, yi, 0.0, 0, 'not recentering'
for i in range(nloop):
nxy, back = _centering_centroid_loop_xy(data, cxy, box)
# _logger.debug('new center is %s', ncenter)
# if we are to far away from the initial point, break
dst = distance.euclidean(origin, nxy)
if dst > maxdist:
msg = 'maximum distance (%5.2f) from origin reached' % maxdist
return cxy[0], cxy[1], back, 2, msg
# check convergence
dst = distance.euclidean(nxy, cxy)
if dst < toldist:
return nxy[0], nxy[1], back, 1, 'converged in iteration %i' % i
else:
cxy = nxy
return nxy[0], nxy[1], back, 3, 'not converged in %i iterations' % nloop | [
"def",
"centering_centroid",
"(",
"data",
",",
"xi",
",",
"yi",
",",
"box",
",",
"nloop",
"=",
"10",
",",
"toldist",
"=",
"1e-3",
",",
"maxdist",
"=",
"10.0",
")",
":",
"# Store original center",
"cxy",
"=",
"(",
"xi",
",",
"yi",
")",
"origin",
"=",
"(",
"xi",
",",
"yi",
")",
"# initial background",
"back",
"=",
"0.0",
"if",
"nloop",
"==",
"0",
":",
"return",
"xi",
",",
"yi",
",",
"0.0",
",",
"0",
",",
"'not recentering'",
"for",
"i",
"in",
"range",
"(",
"nloop",
")",
":",
"nxy",
",",
"back",
"=",
"_centering_centroid_loop_xy",
"(",
"data",
",",
"cxy",
",",
"box",
")",
"# _logger.debug('new center is %s', ncenter)",
"# if we are to far away from the initial point, break",
"dst",
"=",
"distance",
".",
"euclidean",
"(",
"origin",
",",
"nxy",
")",
"if",
"dst",
">",
"maxdist",
":",
"msg",
"=",
"'maximum distance (%5.2f) from origin reached'",
"%",
"maxdist",
"return",
"cxy",
"[",
"0",
"]",
",",
"cxy",
"[",
"1",
"]",
",",
"back",
",",
"2",
",",
"msg",
"# check convergence",
"dst",
"=",
"distance",
".",
"euclidean",
"(",
"nxy",
",",
"cxy",
")",
"if",
"dst",
"<",
"toldist",
":",
"return",
"nxy",
"[",
"0",
"]",
",",
"nxy",
"[",
"1",
"]",
",",
"back",
",",
"1",
",",
"'converged in iteration %i'",
"%",
"i",
"else",
":",
"cxy",
"=",
"nxy",
"return",
"nxy",
"[",
"0",
"]",
",",
"nxy",
"[",
"1",
"]",
",",
"back",
",",
"3",
",",
"'not converged in %i iterations'",
"%",
"nloop"
] | returns x, y, background, status, message
status is:
* 0: not recentering
* 1: recentering successful
* 2: maximum distance reached
* 3: not converged | [
"returns",
"x",
"y",
"background",
"status",
"message"
] | 6c829495df8937f77c2de9383c1038ffb3e713e3 | https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/array/recenter.py#L57-L94 | train |
twiebe/Flask-CacheControl | src/flask_cachecontrol/cache.py | cache_for | def cache_for(**timedelta_kw):
"""
Set Cache-Control headers and Expires-header.
Expects a timedelta instance.
"""
max_age_timedelta = timedelta(**timedelta_kw)
def decorate_func(func):
@wraps(func)
def decorate_func_call(*a, **kw):
callback = SetCacheControlHeadersFromTimedeltaCallback(max_age_timedelta)
registry_provider = AfterThisRequestCallbackRegistryProvider()
registry = registry_provider.provide()
registry.add(callback)
return func(*a, **kw)
return decorate_func_call
return decorate_func | python | def cache_for(**timedelta_kw):
"""
Set Cache-Control headers and Expires-header.
Expects a timedelta instance.
"""
max_age_timedelta = timedelta(**timedelta_kw)
def decorate_func(func):
@wraps(func)
def decorate_func_call(*a, **kw):
callback = SetCacheControlHeadersFromTimedeltaCallback(max_age_timedelta)
registry_provider = AfterThisRequestCallbackRegistryProvider()
registry = registry_provider.provide()
registry.add(callback)
return func(*a, **kw)
return decorate_func_call
return decorate_func | [
"def",
"cache_for",
"(",
"*",
"*",
"timedelta_kw",
")",
":",
"max_age_timedelta",
"=",
"timedelta",
"(",
"*",
"*",
"timedelta_kw",
")",
"def",
"decorate_func",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"decorate_func_call",
"(",
"*",
"a",
",",
"*",
"*",
"kw",
")",
":",
"callback",
"=",
"SetCacheControlHeadersFromTimedeltaCallback",
"(",
"max_age_timedelta",
")",
"registry_provider",
"=",
"AfterThisRequestCallbackRegistryProvider",
"(",
")",
"registry",
"=",
"registry_provider",
".",
"provide",
"(",
")",
"registry",
".",
"add",
"(",
"callback",
")",
"return",
"func",
"(",
"*",
"a",
",",
"*",
"*",
"kw",
")",
"return",
"decorate_func_call",
"return",
"decorate_func"
] | Set Cache-Control headers and Expires-header.
Expects a timedelta instance. | [
"Set",
"Cache",
"-",
"Control",
"headers",
"and",
"Expires",
"-",
"header",
"."
] | 8376156fafe3358b5a1201d348afb12994172962 | https://github.com/twiebe/Flask-CacheControl/blob/8376156fafe3358b5a1201d348afb12994172962/src/flask_cachecontrol/cache.py#L20-L37 | train |
twiebe/Flask-CacheControl | src/flask_cachecontrol/cache.py | cache | def cache(*cache_control_items, **cache_control_kw):
"""
Set Cache-Control headers.
Expects keyword arguments and/or an item list.
Each pair is used to set Flask Response.cache_control attributes,
where the key is the attribute name and the value is its value.
Use True as value for attributes without values.
In case of an invalid attribute, CacheControlAttributeInvalidError
will be thrown.
"""
cache_control_kw.update(cache_control_items)
def decorate_func(func):
@wraps(func)
def decorate_func_call(*a, **kw):
callback = SetCacheControlHeadersCallback(**cache_control_kw)
registry_provider = AfterThisRequestCallbackRegistryProvider()
registry = registry_provider.provide()
registry.add(callback)
return func(*a, **kw)
return decorate_func_call
return decorate_func | python | def cache(*cache_control_items, **cache_control_kw):
"""
Set Cache-Control headers.
Expects keyword arguments and/or an item list.
Each pair is used to set Flask Response.cache_control attributes,
where the key is the attribute name and the value is its value.
Use True as value for attributes without values.
In case of an invalid attribute, CacheControlAttributeInvalidError
will be thrown.
"""
cache_control_kw.update(cache_control_items)
def decorate_func(func):
@wraps(func)
def decorate_func_call(*a, **kw):
callback = SetCacheControlHeadersCallback(**cache_control_kw)
registry_provider = AfterThisRequestCallbackRegistryProvider()
registry = registry_provider.provide()
registry.add(callback)
return func(*a, **kw)
return decorate_func_call
return decorate_func | [
"def",
"cache",
"(",
"*",
"cache_control_items",
",",
"*",
"*",
"cache_control_kw",
")",
":",
"cache_control_kw",
".",
"update",
"(",
"cache_control_items",
")",
"def",
"decorate_func",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"decorate_func_call",
"(",
"*",
"a",
",",
"*",
"*",
"kw",
")",
":",
"callback",
"=",
"SetCacheControlHeadersCallback",
"(",
"*",
"*",
"cache_control_kw",
")",
"registry_provider",
"=",
"AfterThisRequestCallbackRegistryProvider",
"(",
")",
"registry",
"=",
"registry_provider",
".",
"provide",
"(",
")",
"registry",
".",
"add",
"(",
"callback",
")",
"return",
"func",
"(",
"*",
"a",
",",
"*",
"*",
"kw",
")",
"return",
"decorate_func_call",
"return",
"decorate_func"
] | Set Cache-Control headers.
Expects keyword arguments and/or an item list.
Each pair is used to set Flask Response.cache_control attributes,
where the key is the attribute name and the value is its value.
Use True as value for attributes without values.
In case of an invalid attribute, CacheControlAttributeInvalidError
will be thrown. | [
"Set",
"Cache",
"-",
"Control",
"headers",
"."
] | 8376156fafe3358b5a1201d348afb12994172962 | https://github.com/twiebe/Flask-CacheControl/blob/8376156fafe3358b5a1201d348afb12994172962/src/flask_cachecontrol/cache.py#L41-L66 | train |
twiebe/Flask-CacheControl | src/flask_cachecontrol/cache.py | dont_cache | def dont_cache():
"""
Set Cache-Control headers for no caching
Will generate proxy-revalidate, no-cache, no-store, must-revalidate,
max-age=0.
"""
def decorate_func(func):
@wraps(func)
def decorate_func_call(*a, **kw):
callback = SetCacheControlHeadersForNoCachingCallback()
registry_provider = AfterThisRequestCallbackRegistryProvider()
registry = registry_provider.provide()
registry.add(callback)
return func(*a, **kw)
return decorate_func_call
return decorate_func | python | def dont_cache():
"""
Set Cache-Control headers for no caching
Will generate proxy-revalidate, no-cache, no-store, must-revalidate,
max-age=0.
"""
def decorate_func(func):
@wraps(func)
def decorate_func_call(*a, **kw):
callback = SetCacheControlHeadersForNoCachingCallback()
registry_provider = AfterThisRequestCallbackRegistryProvider()
registry = registry_provider.provide()
registry.add(callback)
return func(*a, **kw)
return decorate_func_call
return decorate_func | [
"def",
"dont_cache",
"(",
")",
":",
"def",
"decorate_func",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"decorate_func_call",
"(",
"*",
"a",
",",
"*",
"*",
"kw",
")",
":",
"callback",
"=",
"SetCacheControlHeadersForNoCachingCallback",
"(",
")",
"registry_provider",
"=",
"AfterThisRequestCallbackRegistryProvider",
"(",
")",
"registry",
"=",
"registry_provider",
".",
"provide",
"(",
")",
"registry",
".",
"add",
"(",
"callback",
")",
"return",
"func",
"(",
"*",
"a",
",",
"*",
"*",
"kw",
")",
"return",
"decorate_func_call",
"return",
"decorate_func"
] | Set Cache-Control headers for no caching
Will generate proxy-revalidate, no-cache, no-store, must-revalidate,
max-age=0. | [
"Set",
"Cache",
"-",
"Control",
"headers",
"for",
"no",
"caching"
] | 8376156fafe3358b5a1201d348afb12994172962 | https://github.com/twiebe/Flask-CacheControl/blob/8376156fafe3358b5a1201d348afb12994172962/src/flask_cachecontrol/cache.py#L70-L86 | train |
inspirehep/inspire-schemas | inspire_schemas/utils.py | filter_empty_parameters | def filter_empty_parameters(func):
"""Decorator that is filtering empty parameters.
:param func: function that you want wrapping
:type func: function
"""
@wraps(func)
def func_wrapper(self, *args, **kwargs):
my_kwargs = {key: value for key, value in kwargs.items()
if value not in EMPTIES}
args_is_empty = all(arg in EMPTIES for arg in args)
if (
{'source', 'material'}.issuperset(my_kwargs) or not my_kwargs
) and args_is_empty:
return
return func(self, *args, **my_kwargs)
return func_wrapper | python | def filter_empty_parameters(func):
"""Decorator that is filtering empty parameters.
:param func: function that you want wrapping
:type func: function
"""
@wraps(func)
def func_wrapper(self, *args, **kwargs):
my_kwargs = {key: value for key, value in kwargs.items()
if value not in EMPTIES}
args_is_empty = all(arg in EMPTIES for arg in args)
if (
{'source', 'material'}.issuperset(my_kwargs) or not my_kwargs
) and args_is_empty:
return
return func(self, *args, **my_kwargs)
return func_wrapper | [
"def",
"filter_empty_parameters",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"func_wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"my_kwargs",
"=",
"{",
"key",
":",
"value",
"for",
"key",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
"if",
"value",
"not",
"in",
"EMPTIES",
"}",
"args_is_empty",
"=",
"all",
"(",
"arg",
"in",
"EMPTIES",
"for",
"arg",
"in",
"args",
")",
"if",
"(",
"{",
"'source'",
",",
"'material'",
"}",
".",
"issuperset",
"(",
"my_kwargs",
")",
"or",
"not",
"my_kwargs",
")",
"and",
"args_is_empty",
":",
"return",
"return",
"func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"my_kwargs",
")",
"return",
"func_wrapper"
] | Decorator that is filtering empty parameters.
:param func: function that you want wrapping
:type func: function | [
"Decorator",
"that",
"is",
"filtering",
"empty",
"parameters",
"."
] | 34bc124b62fba565b6b40d1a3c15103a23a05edb | https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L330-L348 | train |
inspirehep/inspire-schemas | inspire_schemas/utils.py | author_id_normalize_and_schema | def author_id_normalize_and_schema(uid, schema=None):
"""Detect and normalize an author UID schema.
Args:
uid (string): a UID string
schema (string): try to resolve to schema
Returns:
Tuple[string, string]: a tuple (uid, schema) where:
- uid: the UID normalized to comply with the id.json schema
- schema: a schema of the UID or *None* if not recognised
Raise:
UnknownUIDSchema: if UID is too little to definitively guess the schema
SchemaUIDConflict: if specified schema is not matching the given UID
"""
def _get_uid_normalized_in_schema(_uid, _schema):
regex, template = _RE_AUTHORS_UID[_schema]
match = regex.match(_uid)
if match:
return template.format(match.group('uid'))
if idutils.is_orcid(uid) and schema in (None, 'ORCID'):
return idutils.normalize_orcid(uid), 'ORCID'
if schema and schema not in _RE_AUTHORS_UID:
# Schema explicitly specified, but this function can't handle it
raise UnknownUIDSchema(uid)
if schema:
normalized_uid = _get_uid_normalized_in_schema(uid, schema)
if normalized_uid:
return normalized_uid, schema
else:
raise SchemaUIDConflict(schema, uid)
match_schema, normalized_uid = None, None
for candidate_schema in _RE_AUTHORS_UID:
candidate_uid = _get_uid_normalized_in_schema(uid, candidate_schema)
if candidate_uid:
if match_schema:
# Valid against more than one candidate schema, ambiguous
raise UnknownUIDSchema(uid)
match_schema = candidate_schema
normalized_uid = candidate_uid
if match_schema:
return normalized_uid, match_schema
# No guessess have been found
raise UnknownUIDSchema(uid) | python | def author_id_normalize_and_schema(uid, schema=None):
"""Detect and normalize an author UID schema.
Args:
uid (string): a UID string
schema (string): try to resolve to schema
Returns:
Tuple[string, string]: a tuple (uid, schema) where:
- uid: the UID normalized to comply with the id.json schema
- schema: a schema of the UID or *None* if not recognised
Raise:
UnknownUIDSchema: if UID is too little to definitively guess the schema
SchemaUIDConflict: if specified schema is not matching the given UID
"""
def _get_uid_normalized_in_schema(_uid, _schema):
regex, template = _RE_AUTHORS_UID[_schema]
match = regex.match(_uid)
if match:
return template.format(match.group('uid'))
if idutils.is_orcid(uid) and schema in (None, 'ORCID'):
return idutils.normalize_orcid(uid), 'ORCID'
if schema and schema not in _RE_AUTHORS_UID:
# Schema explicitly specified, but this function can't handle it
raise UnknownUIDSchema(uid)
if schema:
normalized_uid = _get_uid_normalized_in_schema(uid, schema)
if normalized_uid:
return normalized_uid, schema
else:
raise SchemaUIDConflict(schema, uid)
match_schema, normalized_uid = None, None
for candidate_schema in _RE_AUTHORS_UID:
candidate_uid = _get_uid_normalized_in_schema(uid, candidate_schema)
if candidate_uid:
if match_schema:
# Valid against more than one candidate schema, ambiguous
raise UnknownUIDSchema(uid)
match_schema = candidate_schema
normalized_uid = candidate_uid
if match_schema:
return normalized_uid, match_schema
# No guessess have been found
raise UnknownUIDSchema(uid) | [
"def",
"author_id_normalize_and_schema",
"(",
"uid",
",",
"schema",
"=",
"None",
")",
":",
"def",
"_get_uid_normalized_in_schema",
"(",
"_uid",
",",
"_schema",
")",
":",
"regex",
",",
"template",
"=",
"_RE_AUTHORS_UID",
"[",
"_schema",
"]",
"match",
"=",
"regex",
".",
"match",
"(",
"_uid",
")",
"if",
"match",
":",
"return",
"template",
".",
"format",
"(",
"match",
".",
"group",
"(",
"'uid'",
")",
")",
"if",
"idutils",
".",
"is_orcid",
"(",
"uid",
")",
"and",
"schema",
"in",
"(",
"None",
",",
"'ORCID'",
")",
":",
"return",
"idutils",
".",
"normalize_orcid",
"(",
"uid",
")",
",",
"'ORCID'",
"if",
"schema",
"and",
"schema",
"not",
"in",
"_RE_AUTHORS_UID",
":",
"# Schema explicitly specified, but this function can't handle it",
"raise",
"UnknownUIDSchema",
"(",
"uid",
")",
"if",
"schema",
":",
"normalized_uid",
"=",
"_get_uid_normalized_in_schema",
"(",
"uid",
",",
"schema",
")",
"if",
"normalized_uid",
":",
"return",
"normalized_uid",
",",
"schema",
"else",
":",
"raise",
"SchemaUIDConflict",
"(",
"schema",
",",
"uid",
")",
"match_schema",
",",
"normalized_uid",
"=",
"None",
",",
"None",
"for",
"candidate_schema",
"in",
"_RE_AUTHORS_UID",
":",
"candidate_uid",
"=",
"_get_uid_normalized_in_schema",
"(",
"uid",
",",
"candidate_schema",
")",
"if",
"candidate_uid",
":",
"if",
"match_schema",
":",
"# Valid against more than one candidate schema, ambiguous",
"raise",
"UnknownUIDSchema",
"(",
"uid",
")",
"match_schema",
"=",
"candidate_schema",
"normalized_uid",
"=",
"candidate_uid",
"if",
"match_schema",
":",
"return",
"normalized_uid",
",",
"match_schema",
"# No guessess have been found",
"raise",
"UnknownUIDSchema",
"(",
"uid",
")"
] | Detect and normalize an author UID schema.
Args:
uid (string): a UID string
schema (string): try to resolve to schema
Returns:
Tuple[string, string]: a tuple (uid, schema) where:
- uid: the UID normalized to comply with the id.json schema
- schema: a schema of the UID or *None* if not recognised
Raise:
UnknownUIDSchema: if UID is too little to definitively guess the schema
SchemaUIDConflict: if specified schema is not matching the given UID | [
"Detect",
"and",
"normalize",
"an",
"author",
"UID",
"schema",
"."
] | 34bc124b62fba565b6b40d1a3c15103a23a05edb | https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L351-L401 | train |
inspirehep/inspire-schemas | inspire_schemas/utils.py | normalize_arxiv_category | def normalize_arxiv_category(category):
"""Normalize arXiv category to be schema compliant.
This properly capitalizes the category and replaces the dash by a dot if
needed. If the category is obsolete, it also gets converted it to its
current equivalent.
Example:
>>> from inspire_schemas.utils import normalize_arxiv_category
>>> normalize_arxiv_category('funct-an') # doctest: +SKIP
u'math.FA'
"""
category = _NEW_CATEGORIES.get(category.lower(), category)
for valid_category in valid_arxiv_categories():
if (category.lower() == valid_category.lower() or
category.lower().replace('-', '.') == valid_category.lower()):
return valid_category
return category | python | def normalize_arxiv_category(category):
"""Normalize arXiv category to be schema compliant.
This properly capitalizes the category and replaces the dash by a dot if
needed. If the category is obsolete, it also gets converted it to its
current equivalent.
Example:
>>> from inspire_schemas.utils import normalize_arxiv_category
>>> normalize_arxiv_category('funct-an') # doctest: +SKIP
u'math.FA'
"""
category = _NEW_CATEGORIES.get(category.lower(), category)
for valid_category in valid_arxiv_categories():
if (category.lower() == valid_category.lower() or
category.lower().replace('-', '.') == valid_category.lower()):
return valid_category
return category | [
"def",
"normalize_arxiv_category",
"(",
"category",
")",
":",
"category",
"=",
"_NEW_CATEGORIES",
".",
"get",
"(",
"category",
".",
"lower",
"(",
")",
",",
"category",
")",
"for",
"valid_category",
"in",
"valid_arxiv_categories",
"(",
")",
":",
"if",
"(",
"category",
".",
"lower",
"(",
")",
"==",
"valid_category",
".",
"lower",
"(",
")",
"or",
"category",
".",
"lower",
"(",
")",
".",
"replace",
"(",
"'-'",
",",
"'.'",
")",
"==",
"valid_category",
".",
"lower",
"(",
")",
")",
":",
"return",
"valid_category",
"return",
"category"
] | Normalize arXiv category to be schema compliant.
This properly capitalizes the category and replaces the dash by a dot if
needed. If the category is obsolete, it also gets converted it to its
current equivalent.
Example:
>>> from inspire_schemas.utils import normalize_arxiv_category
>>> normalize_arxiv_category('funct-an') # doctest: +SKIP
u'math.FA' | [
"Normalize",
"arXiv",
"category",
"to",
"be",
"schema",
"compliant",
"."
] | 34bc124b62fba565b6b40d1a3c15103a23a05edb | https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L404-L422 | train |
inspirehep/inspire-schemas | inspire_schemas/utils.py | valid_arxiv_categories | def valid_arxiv_categories():
"""List of all arXiv categories that ever existed.
Example:
>>> from inspire_schemas.utils import valid_arxiv_categories
>>> 'funct-an' in valid_arxiv_categories()
True
"""
schema = load_schema('elements/arxiv_categories')
categories = schema['enum']
categories.extend(_NEW_CATEGORIES.keys())
return categories | python | def valid_arxiv_categories():
"""List of all arXiv categories that ever existed.
Example:
>>> from inspire_schemas.utils import valid_arxiv_categories
>>> 'funct-an' in valid_arxiv_categories()
True
"""
schema = load_schema('elements/arxiv_categories')
categories = schema['enum']
categories.extend(_NEW_CATEGORIES.keys())
return categories | [
"def",
"valid_arxiv_categories",
"(",
")",
":",
"schema",
"=",
"load_schema",
"(",
"'elements/arxiv_categories'",
")",
"categories",
"=",
"schema",
"[",
"'enum'",
"]",
"categories",
".",
"extend",
"(",
"_NEW_CATEGORIES",
".",
"keys",
"(",
")",
")",
"return",
"categories"
] | List of all arXiv categories that ever existed.
Example:
>>> from inspire_schemas.utils import valid_arxiv_categories
>>> 'funct-an' in valid_arxiv_categories()
True | [
"List",
"of",
"all",
"arXiv",
"categories",
"that",
"ever",
"existed",
"."
] | 34bc124b62fba565b6b40d1a3c15103a23a05edb | https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L425-L438 | train |
inspirehep/inspire-schemas | inspire_schemas/utils.py | classify_field | def classify_field(value):
"""Normalize ``value`` to an Inspire category.
Args:
value(str): an Inspire category to properly case, or an arXiv category
to translate to the corresponding Inspire category.
Returns:
str: ``None`` if ``value`` is not a non-empty string,
otherwise the corresponding Inspire category.
"""
if not (isinstance(value, six.string_types) and value):
return
schema = load_schema('elements/inspire_field')
inspire_categories = schema['properties']['term']['enum']
for inspire_category in inspire_categories:
if value.upper() == inspire_category.upper():
return inspire_category
category = normalize_arxiv_category(value)
return ARXIV_TO_INSPIRE_CATEGORY_MAPPING.get(category, 'Other') | python | def classify_field(value):
"""Normalize ``value`` to an Inspire category.
Args:
value(str): an Inspire category to properly case, or an arXiv category
to translate to the corresponding Inspire category.
Returns:
str: ``None`` if ``value`` is not a non-empty string,
otherwise the corresponding Inspire category.
"""
if not (isinstance(value, six.string_types) and value):
return
schema = load_schema('elements/inspire_field')
inspire_categories = schema['properties']['term']['enum']
for inspire_category in inspire_categories:
if value.upper() == inspire_category.upper():
return inspire_category
category = normalize_arxiv_category(value)
return ARXIV_TO_INSPIRE_CATEGORY_MAPPING.get(category, 'Other') | [
"def",
"classify_field",
"(",
"value",
")",
":",
"if",
"not",
"(",
"isinstance",
"(",
"value",
",",
"six",
".",
"string_types",
")",
"and",
"value",
")",
":",
"return",
"schema",
"=",
"load_schema",
"(",
"'elements/inspire_field'",
")",
"inspire_categories",
"=",
"schema",
"[",
"'properties'",
"]",
"[",
"'term'",
"]",
"[",
"'enum'",
"]",
"for",
"inspire_category",
"in",
"inspire_categories",
":",
"if",
"value",
".",
"upper",
"(",
")",
"==",
"inspire_category",
".",
"upper",
"(",
")",
":",
"return",
"inspire_category",
"category",
"=",
"normalize_arxiv_category",
"(",
"value",
")",
"return",
"ARXIV_TO_INSPIRE_CATEGORY_MAPPING",
".",
"get",
"(",
"category",
",",
"'Other'",
")"
] | Normalize ``value`` to an Inspire category.
Args:
value(str): an Inspire category to properly case, or an arXiv category
to translate to the corresponding Inspire category.
Returns:
str: ``None`` if ``value`` is not a non-empty string,
otherwise the corresponding Inspire category. | [
"Normalize",
"value",
"to",
"an",
"Inspire",
"category",
"."
] | 34bc124b62fba565b6b40d1a3c15103a23a05edb | https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L441-L464 | train |
inspirehep/inspire-schemas | inspire_schemas/utils.py | split_pubnote | def split_pubnote(pubnote_str):
"""Split pubnote into journal information."""
pubnote = {}
parts = pubnote_str.split(',')
if len(parts) > 2:
pubnote['journal_title'] = parts[0]
pubnote['journal_volume'] = parts[1]
pubnote['page_start'], pubnote['page_end'], pubnote['artid'] = split_page_artid(parts[2])
return {key: val for (key, val) in six.iteritems(pubnote) if val is not None} | python | def split_pubnote(pubnote_str):
"""Split pubnote into journal information."""
pubnote = {}
parts = pubnote_str.split(',')
if len(parts) > 2:
pubnote['journal_title'] = parts[0]
pubnote['journal_volume'] = parts[1]
pubnote['page_start'], pubnote['page_end'], pubnote['artid'] = split_page_artid(parts[2])
return {key: val for (key, val) in six.iteritems(pubnote) if val is not None} | [
"def",
"split_pubnote",
"(",
"pubnote_str",
")",
":",
"pubnote",
"=",
"{",
"}",
"parts",
"=",
"pubnote_str",
".",
"split",
"(",
"','",
")",
"if",
"len",
"(",
"parts",
")",
">",
"2",
":",
"pubnote",
"[",
"'journal_title'",
"]",
"=",
"parts",
"[",
"0",
"]",
"pubnote",
"[",
"'journal_volume'",
"]",
"=",
"parts",
"[",
"1",
"]",
"pubnote",
"[",
"'page_start'",
"]",
",",
"pubnote",
"[",
"'page_end'",
"]",
",",
"pubnote",
"[",
"'artid'",
"]",
"=",
"split_page_artid",
"(",
"parts",
"[",
"2",
"]",
")",
"return",
"{",
"key",
":",
"val",
"for",
"(",
"key",
",",
"val",
")",
"in",
"six",
".",
"iteritems",
"(",
"pubnote",
")",
"if",
"val",
"is",
"not",
"None",
"}"
] | Split pubnote into journal information. | [
"Split",
"pubnote",
"into",
"journal",
"information",
"."
] | 34bc124b62fba565b6b40d1a3c15103a23a05edb | https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L501-L511 | train |
inspirehep/inspire-schemas | inspire_schemas/utils.py | get_schema_path | def get_schema_path(schema, resolved=False):
"""Retrieve the installed path for the given schema.
Args:
schema(str): relative or absolute url of the schema to validate, for
example, 'records/authors.json' or 'jobs.json', or just the name of the
schema, like 'jobs'.
resolved(bool): if True, the returned path points to a fully resolved
schema, that is to the schema with all `$ref` replaced by their
targets.
Returns:
str: path to the given schema name.
Raises:
SchemaNotFound: if no schema could be found.
"""
def _strip_first_path_elem(path):
"""Pass doctests.
Strip the first element of the given path, returning an empty string if
there are no more elements. For example, 'something/other' will end up
as 'other', but passing then 'other' will return ''
"""
stripped_path = path.split(os.path.sep, 1)[1:]
return ''.join(stripped_path)
def _schema_to_normalized_path(schema):
"""Pass doctests.
Extracts the path from the url, makes sure to get rid of any '..' in
the path and adds the json extension if not there.
"""
path = os.path.normpath(os.path.sep + urlsplit(schema).path)
if path.startswith(os.path.sep):
path = path[1:]
if not path.endswith('.json'):
path += '.json'
return path
path = _schema_to_normalized_path(schema)
while path:
if resolved:
schema_path = os.path.abspath(os.path.join(_resolved_schema_root_path, path))
else:
schema_path = os.path.abspath(os.path.join(_schema_root_path, path))
if os.path.exists(schema_path):
return os.path.abspath(schema_path)
path = _strip_first_path_elem(path)
raise SchemaNotFound(schema=schema) | python | def get_schema_path(schema, resolved=False):
"""Retrieve the installed path for the given schema.
Args:
schema(str): relative or absolute url of the schema to validate, for
example, 'records/authors.json' or 'jobs.json', or just the name of the
schema, like 'jobs'.
resolved(bool): if True, the returned path points to a fully resolved
schema, that is to the schema with all `$ref` replaced by their
targets.
Returns:
str: path to the given schema name.
Raises:
SchemaNotFound: if no schema could be found.
"""
def _strip_first_path_elem(path):
"""Pass doctests.
Strip the first element of the given path, returning an empty string if
there are no more elements. For example, 'something/other' will end up
as 'other', but passing then 'other' will return ''
"""
stripped_path = path.split(os.path.sep, 1)[1:]
return ''.join(stripped_path)
def _schema_to_normalized_path(schema):
"""Pass doctests.
Extracts the path from the url, makes sure to get rid of any '..' in
the path and adds the json extension if not there.
"""
path = os.path.normpath(os.path.sep + urlsplit(schema).path)
if path.startswith(os.path.sep):
path = path[1:]
if not path.endswith('.json'):
path += '.json'
return path
path = _schema_to_normalized_path(schema)
while path:
if resolved:
schema_path = os.path.abspath(os.path.join(_resolved_schema_root_path, path))
else:
schema_path = os.path.abspath(os.path.join(_schema_root_path, path))
if os.path.exists(schema_path):
return os.path.abspath(schema_path)
path = _strip_first_path_elem(path)
raise SchemaNotFound(schema=schema) | [
"def",
"get_schema_path",
"(",
"schema",
",",
"resolved",
"=",
"False",
")",
":",
"def",
"_strip_first_path_elem",
"(",
"path",
")",
":",
"\"\"\"Pass doctests.\n\n Strip the first element of the given path, returning an empty string if\n there are no more elements. For example, 'something/other' will end up\n as 'other', but passing then 'other' will return ''\n \"\"\"",
"stripped_path",
"=",
"path",
".",
"split",
"(",
"os",
".",
"path",
".",
"sep",
",",
"1",
")",
"[",
"1",
":",
"]",
"return",
"''",
".",
"join",
"(",
"stripped_path",
")",
"def",
"_schema_to_normalized_path",
"(",
"schema",
")",
":",
"\"\"\"Pass doctests.\n\n Extracts the path from the url, makes sure to get rid of any '..' in\n the path and adds the json extension if not there.\n \"\"\"",
"path",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"sep",
"+",
"urlsplit",
"(",
"schema",
")",
".",
"path",
")",
"if",
"path",
".",
"startswith",
"(",
"os",
".",
"path",
".",
"sep",
")",
":",
"path",
"=",
"path",
"[",
"1",
":",
"]",
"if",
"not",
"path",
".",
"endswith",
"(",
"'.json'",
")",
":",
"path",
"+=",
"'.json'",
"return",
"path",
"path",
"=",
"_schema_to_normalized_path",
"(",
"schema",
")",
"while",
"path",
":",
"if",
"resolved",
":",
"schema_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"_resolved_schema_root_path",
",",
"path",
")",
")",
"else",
":",
"schema_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"_schema_root_path",
",",
"path",
")",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"schema_path",
")",
":",
"return",
"os",
".",
"path",
".",
"abspath",
"(",
"schema_path",
")",
"path",
"=",
"_strip_first_path_elem",
"(",
"path",
")",
"raise",
"SchemaNotFound",
"(",
"schema",
"=",
"schema",
")"
] | Retrieve the installed path for the given schema.
Args:
schema(str): relative or absolute url of the schema to validate, for
example, 'records/authors.json' or 'jobs.json', or just the name of the
schema, like 'jobs'.
resolved(bool): if True, the returned path points to a fully resolved
schema, that is to the schema with all `$ref` replaced by their
targets.
Returns:
str: path to the given schema name.
Raises:
SchemaNotFound: if no schema could be found. | [
"Retrieve",
"the",
"installed",
"path",
"for",
"the",
"given",
"schema",
"."
] | 34bc124b62fba565b6b40d1a3c15103a23a05edb | https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L545-L598 | train |
inspirehep/inspire-schemas | inspire_schemas/utils.py | load_schema | def load_schema(schema_name, resolved=False):
"""Load the given schema from wherever it's installed.
Args:
schema_name(str): Name of the schema to load, for example 'authors'.
resolved(bool): If True will return the resolved schema, that is with
all the $refs replaced by their targets.
Returns:
dict: the schema with the given name.
"""
schema_data = ''
with open(get_schema_path(schema_name, resolved)) as schema_fd:
schema_data = json.loads(schema_fd.read())
return schema_data | python | def load_schema(schema_name, resolved=False):
"""Load the given schema from wherever it's installed.
Args:
schema_name(str): Name of the schema to load, for example 'authors'.
resolved(bool): If True will return the resolved schema, that is with
all the $refs replaced by their targets.
Returns:
dict: the schema with the given name.
"""
schema_data = ''
with open(get_schema_path(schema_name, resolved)) as schema_fd:
schema_data = json.loads(schema_fd.read())
return schema_data | [
"def",
"load_schema",
"(",
"schema_name",
",",
"resolved",
"=",
"False",
")",
":",
"schema_data",
"=",
"''",
"with",
"open",
"(",
"get_schema_path",
"(",
"schema_name",
",",
"resolved",
")",
")",
"as",
"schema_fd",
":",
"schema_data",
"=",
"json",
".",
"loads",
"(",
"schema_fd",
".",
"read",
"(",
")",
")",
"return",
"schema_data"
] | Load the given schema from wherever it's installed.
Args:
schema_name(str): Name of the schema to load, for example 'authors'.
resolved(bool): If True will return the resolved schema, that is with
all the $refs replaced by their targets.
Returns:
dict: the schema with the given name. | [
"Load",
"the",
"given",
"schema",
"from",
"wherever",
"it",
"s",
"installed",
"."
] | 34bc124b62fba565b6b40d1a3c15103a23a05edb | https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L601-L616 | train |
inspirehep/inspire-schemas | inspire_schemas/utils.py | _load_schema_for_record | def _load_schema_for_record(data, schema=None):
"""Load the schema from a given record.
Args:
data (dict): record data.
schema (Union[dict, str]): schema to validate against.
Returns:
dict: the loaded schema.
Raises:
SchemaNotFound: if the given schema was not found.
SchemaKeyNotFound: if ``schema`` is ``None`` and no ``$schema`` key was
found in ``data``.
jsonschema.SchemaError: if the schema is invalid.
"""
if schema is None:
if '$schema' not in data:
raise SchemaKeyNotFound(data=data)
schema = data['$schema']
if isinstance(schema, six.string_types):
schema = load_schema(schema_name=schema)
return schema | python | def _load_schema_for_record(data, schema=None):
"""Load the schema from a given record.
Args:
data (dict): record data.
schema (Union[dict, str]): schema to validate against.
Returns:
dict: the loaded schema.
Raises:
SchemaNotFound: if the given schema was not found.
SchemaKeyNotFound: if ``schema`` is ``None`` and no ``$schema`` key was
found in ``data``.
jsonschema.SchemaError: if the schema is invalid.
"""
if schema is None:
if '$schema' not in data:
raise SchemaKeyNotFound(data=data)
schema = data['$schema']
if isinstance(schema, six.string_types):
schema = load_schema(schema_name=schema)
return schema | [
"def",
"_load_schema_for_record",
"(",
"data",
",",
"schema",
"=",
"None",
")",
":",
"if",
"schema",
"is",
"None",
":",
"if",
"'$schema'",
"not",
"in",
"data",
":",
"raise",
"SchemaKeyNotFound",
"(",
"data",
"=",
"data",
")",
"schema",
"=",
"data",
"[",
"'$schema'",
"]",
"if",
"isinstance",
"(",
"schema",
",",
"six",
".",
"string_types",
")",
":",
"schema",
"=",
"load_schema",
"(",
"schema_name",
"=",
"schema",
")",
"return",
"schema"
] | Load the schema from a given record.
Args:
data (dict): record data.
schema (Union[dict, str]): schema to validate against.
Returns:
dict: the loaded schema.
Raises:
SchemaNotFound: if the given schema was not found.
SchemaKeyNotFound: if ``schema`` is ``None`` and no ``$schema`` key was
found in ``data``.
jsonschema.SchemaError: if the schema is invalid. | [
"Load",
"the",
"schema",
"from",
"a",
"given",
"record",
"."
] | 34bc124b62fba565b6b40d1a3c15103a23a05edb | https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L627-L650 | train |
inspirehep/inspire-schemas | inspire_schemas/utils.py | validate | def validate(data, schema=None):
"""Validate the given dictionary against the given schema.
Args:
data (dict): record to validate.
schema (Union[dict, str]): schema to validate against. If it is a
string, it is intepreted as the name of the schema to load (e.g.
``authors`` or ``jobs``). If it is ``None``, the schema is taken
from ``data['$schema']``. If it is a dictionary, it is used
directly.
Raises:
SchemaNotFound: if the given schema was not found.
SchemaKeyNotFound: if ``schema`` is ``None`` and no ``$schema`` key was
found in ``data``.
jsonschema.SchemaError: if the schema is invalid.
jsonschema.ValidationError: if the data is invalid.
"""
schema = _load_schema_for_record(data, schema)
return jsonschema_validate(
instance=data,
schema=schema,
resolver=LocalRefResolver.from_schema(schema),
format_checker=inspire_format_checker,
) | python | def validate(data, schema=None):
"""Validate the given dictionary against the given schema.
Args:
data (dict): record to validate.
schema (Union[dict, str]): schema to validate against. If it is a
string, it is intepreted as the name of the schema to load (e.g.
``authors`` or ``jobs``). If it is ``None``, the schema is taken
from ``data['$schema']``. If it is a dictionary, it is used
directly.
Raises:
SchemaNotFound: if the given schema was not found.
SchemaKeyNotFound: if ``schema`` is ``None`` and no ``$schema`` key was
found in ``data``.
jsonschema.SchemaError: if the schema is invalid.
jsonschema.ValidationError: if the data is invalid.
"""
schema = _load_schema_for_record(data, schema)
return jsonschema_validate(
instance=data,
schema=schema,
resolver=LocalRefResolver.from_schema(schema),
format_checker=inspire_format_checker,
) | [
"def",
"validate",
"(",
"data",
",",
"schema",
"=",
"None",
")",
":",
"schema",
"=",
"_load_schema_for_record",
"(",
"data",
",",
"schema",
")",
"return",
"jsonschema_validate",
"(",
"instance",
"=",
"data",
",",
"schema",
"=",
"schema",
",",
"resolver",
"=",
"LocalRefResolver",
".",
"from_schema",
"(",
"schema",
")",
",",
"format_checker",
"=",
"inspire_format_checker",
",",
")"
] | Validate the given dictionary against the given schema.
Args:
data (dict): record to validate.
schema (Union[dict, str]): schema to validate against. If it is a
string, it is intepreted as the name of the schema to load (e.g.
``authors`` or ``jobs``). If it is ``None``, the schema is taken
from ``data['$schema']``. If it is a dictionary, it is used
directly.
Raises:
SchemaNotFound: if the given schema was not found.
SchemaKeyNotFound: if ``schema`` is ``None`` and no ``$schema`` key was
found in ``data``.
jsonschema.SchemaError: if the schema is invalid.
jsonschema.ValidationError: if the data is invalid. | [
"Validate",
"the",
"given",
"dictionary",
"against",
"the",
"given",
"schema",
"."
] | 34bc124b62fba565b6b40d1a3c15103a23a05edb | https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L653-L678 | train |
inspirehep/inspire-schemas | inspire_schemas/utils.py | get_validation_errors | def get_validation_errors(data, schema=None):
"""Validation errors for a given record.
Args:
data (dict): record to validate.
schema (Union[dict, str]): schema to validate against. If it is a
string, it is intepreted as the name of the schema to load (e.g.
``authors`` or ``jobs``). If it is ``None``, the schema is taken
from ``data['$schema']``. If it is a dictionary, it is used
directly.
Yields:
jsonschema.exceptions.ValidationError: validation errors.
Raises:
SchemaNotFound: if the given schema was not found.
SchemaKeyNotFound: if ``schema`` is ``None`` and no ``$schema`` key was
found in ``data``.
jsonschema.SchemaError: if the schema is invalid.
"""
schema = _load_schema_for_record(data, schema)
errors = Draft4Validator(
schema,
resolver=LocalRefResolver.from_schema(schema),
format_checker=inspire_format_checker
)
return errors.iter_errors(data) | python | def get_validation_errors(data, schema=None):
"""Validation errors for a given record.
Args:
data (dict): record to validate.
schema (Union[dict, str]): schema to validate against. If it is a
string, it is intepreted as the name of the schema to load (e.g.
``authors`` or ``jobs``). If it is ``None``, the schema is taken
from ``data['$schema']``. If it is a dictionary, it is used
directly.
Yields:
jsonschema.exceptions.ValidationError: validation errors.
Raises:
SchemaNotFound: if the given schema was not found.
SchemaKeyNotFound: if ``schema`` is ``None`` and no ``$schema`` key was
found in ``data``.
jsonschema.SchemaError: if the schema is invalid.
"""
schema = _load_schema_for_record(data, schema)
errors = Draft4Validator(
schema,
resolver=LocalRefResolver.from_schema(schema),
format_checker=inspire_format_checker
)
return errors.iter_errors(data) | [
"def",
"get_validation_errors",
"(",
"data",
",",
"schema",
"=",
"None",
")",
":",
"schema",
"=",
"_load_schema_for_record",
"(",
"data",
",",
"schema",
")",
"errors",
"=",
"Draft4Validator",
"(",
"schema",
",",
"resolver",
"=",
"LocalRefResolver",
".",
"from_schema",
"(",
"schema",
")",
",",
"format_checker",
"=",
"inspire_format_checker",
")",
"return",
"errors",
".",
"iter_errors",
"(",
"data",
")"
] | Validation errors for a given record.
Args:
data (dict): record to validate.
schema (Union[dict, str]): schema to validate against. If it is a
string, it is intepreted as the name of the schema to load (e.g.
``authors`` or ``jobs``). If it is ``None``, the schema is taken
from ``data['$schema']``. If it is a dictionary, it is used
directly.
Yields:
jsonschema.exceptions.ValidationError: validation errors.
Raises:
SchemaNotFound: if the given schema was not found.
SchemaKeyNotFound: if ``schema`` is ``None`` and no ``$schema`` key was
found in ``data``.
jsonschema.SchemaError: if the schema is invalid. | [
"Validation",
"errors",
"for",
"a",
"given",
"record",
"."
] | 34bc124b62fba565b6b40d1a3c15103a23a05edb | https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L681-L707 | train |
inspirehep/inspire-schemas | inspire_schemas/utils.py | normalize_collaboration | def normalize_collaboration(collaboration):
"""Normalize collaboration string.
Args:
collaboration: a string containing collaboration(s) or None
Returns:
list: List of extracted and normalized collaborations
Examples:
>>> from inspire_schemas.utils import normalize_collaboration
>>> normalize_collaboration('for the CMS and ATLAS Collaborations')
['CMS', 'ATLAS']
"""
if not collaboration:
return []
collaboration = collaboration.strip()
if collaboration.startswith('(') and collaboration.endswith(')'):
collaboration = collaboration[1:-1]
collaborations = _RE_AND.split(collaboration)
collaborations = (_RE_COLLABORATION_LEADING.sub('', collab)
for collab in collaborations)
collaborations = (_RE_COLLABORATION_TRAILING.sub('', collab)
for collab in collaborations)
return [collab.strip() for collab in collaborations] | python | def normalize_collaboration(collaboration):
"""Normalize collaboration string.
Args:
collaboration: a string containing collaboration(s) or None
Returns:
list: List of extracted and normalized collaborations
Examples:
>>> from inspire_schemas.utils import normalize_collaboration
>>> normalize_collaboration('for the CMS and ATLAS Collaborations')
['CMS', 'ATLAS']
"""
if not collaboration:
return []
collaboration = collaboration.strip()
if collaboration.startswith('(') and collaboration.endswith(')'):
collaboration = collaboration[1:-1]
collaborations = _RE_AND.split(collaboration)
collaborations = (_RE_COLLABORATION_LEADING.sub('', collab)
for collab in collaborations)
collaborations = (_RE_COLLABORATION_TRAILING.sub('', collab)
for collab in collaborations)
return [collab.strip() for collab in collaborations] | [
"def",
"normalize_collaboration",
"(",
"collaboration",
")",
":",
"if",
"not",
"collaboration",
":",
"return",
"[",
"]",
"collaboration",
"=",
"collaboration",
".",
"strip",
"(",
")",
"if",
"collaboration",
".",
"startswith",
"(",
"'('",
")",
"and",
"collaboration",
".",
"endswith",
"(",
"')'",
")",
":",
"collaboration",
"=",
"collaboration",
"[",
"1",
":",
"-",
"1",
"]",
"collaborations",
"=",
"_RE_AND",
".",
"split",
"(",
"collaboration",
")",
"collaborations",
"=",
"(",
"_RE_COLLABORATION_LEADING",
".",
"sub",
"(",
"''",
",",
"collab",
")",
"for",
"collab",
"in",
"collaborations",
")",
"collaborations",
"=",
"(",
"_RE_COLLABORATION_TRAILING",
".",
"sub",
"(",
"''",
",",
"collab",
")",
"for",
"collab",
"in",
"collaborations",
")",
"return",
"[",
"collab",
".",
"strip",
"(",
")",
"for",
"collab",
"in",
"collaborations",
"]"
] | Normalize collaboration string.
Args:
collaboration: a string containing collaboration(s) or None
Returns:
list: List of extracted and normalized collaborations
Examples:
>>> from inspire_schemas.utils import normalize_collaboration
>>> normalize_collaboration('for the CMS and ATLAS Collaborations')
['CMS', 'ATLAS'] | [
"Normalize",
"collaboration",
"string",
"."
] | 34bc124b62fba565b6b40d1a3c15103a23a05edb | https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L710-L737 | train |
inspirehep/inspire-schemas | inspire_schemas/utils.py | get_license_from_url | def get_license_from_url(url):
"""Get the license abbreviation from an URL.
Args:
url(str): canonical url of the license.
Returns:
str: the corresponding license abbreviation.
Raises:
ValueError: when the url is not recognized
"""
if not url:
return
split_url = urlsplit(url, scheme='http')
if split_url.netloc.lower() == 'creativecommons.org':
if 'publicdomain' in split_url.path:
match = _RE_PUBLIC_DOMAIN_URL.match(split_url.path)
if match is None:
license = ['public domain']
else:
license = ['CC0']
license.extend(part for part in match.groups() if part)
else:
license = ['CC']
match = _RE_LICENSE_URL.match(split_url.path)
license.extend(part.upper() for part in match.groups() if part)
elif split_url.netloc == 'arxiv.org':
license = ['arXiv']
match = _RE_LICENSE_URL.match(split_url.path)
license.extend(part for part in match.groups() if part)
else:
raise ValueError('Unknown license URL')
return u' '.join(license) | python | def get_license_from_url(url):
"""Get the license abbreviation from an URL.
Args:
url(str): canonical url of the license.
Returns:
str: the corresponding license abbreviation.
Raises:
ValueError: when the url is not recognized
"""
if not url:
return
split_url = urlsplit(url, scheme='http')
if split_url.netloc.lower() == 'creativecommons.org':
if 'publicdomain' in split_url.path:
match = _RE_PUBLIC_DOMAIN_URL.match(split_url.path)
if match is None:
license = ['public domain']
else:
license = ['CC0']
license.extend(part for part in match.groups() if part)
else:
license = ['CC']
match = _RE_LICENSE_URL.match(split_url.path)
license.extend(part.upper() for part in match.groups() if part)
elif split_url.netloc == 'arxiv.org':
license = ['arXiv']
match = _RE_LICENSE_URL.match(split_url.path)
license.extend(part for part in match.groups() if part)
else:
raise ValueError('Unknown license URL')
return u' '.join(license) | [
"def",
"get_license_from_url",
"(",
"url",
")",
":",
"if",
"not",
"url",
":",
"return",
"split_url",
"=",
"urlsplit",
"(",
"url",
",",
"scheme",
"=",
"'http'",
")",
"if",
"split_url",
".",
"netloc",
".",
"lower",
"(",
")",
"==",
"'creativecommons.org'",
":",
"if",
"'publicdomain'",
"in",
"split_url",
".",
"path",
":",
"match",
"=",
"_RE_PUBLIC_DOMAIN_URL",
".",
"match",
"(",
"split_url",
".",
"path",
")",
"if",
"match",
"is",
"None",
":",
"license",
"=",
"[",
"'public domain'",
"]",
"else",
":",
"license",
"=",
"[",
"'CC0'",
"]",
"license",
".",
"extend",
"(",
"part",
"for",
"part",
"in",
"match",
".",
"groups",
"(",
")",
"if",
"part",
")",
"else",
":",
"license",
"=",
"[",
"'CC'",
"]",
"match",
"=",
"_RE_LICENSE_URL",
".",
"match",
"(",
"split_url",
".",
"path",
")",
"license",
".",
"extend",
"(",
"part",
".",
"upper",
"(",
")",
"for",
"part",
"in",
"match",
".",
"groups",
"(",
")",
"if",
"part",
")",
"elif",
"split_url",
".",
"netloc",
"==",
"'arxiv.org'",
":",
"license",
"=",
"[",
"'arXiv'",
"]",
"match",
"=",
"_RE_LICENSE_URL",
".",
"match",
"(",
"split_url",
".",
"path",
")",
"license",
".",
"extend",
"(",
"part",
"for",
"part",
"in",
"match",
".",
"groups",
"(",
")",
"if",
"part",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unknown license URL'",
")",
"return",
"u' '",
".",
"join",
"(",
"license",
")"
] | Get the license abbreviation from an URL.
Args:
url(str): canonical url of the license.
Returns:
str: the corresponding license abbreviation.
Raises:
ValueError: when the url is not recognized | [
"Get",
"the",
"license",
"abbreviation",
"from",
"an",
"URL",
"."
] | 34bc124b62fba565b6b40d1a3c15103a23a05edb | https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L740-L776 | train |
inspirehep/inspire-schemas | inspire_schemas/utils.py | convert_old_publication_info_to_new | def convert_old_publication_info_to_new(publication_infos):
"""Convert a ``publication_info`` value from the old format to the new.
On Legacy different series of the same journal were modeled by adding the
letter part of the name to the journal volume. For example, a paper published
in Physical Review D contained::
{
'publication_info': [
{
'journal_title': 'Phys.Rev.',
'journal_volume': 'D43',
},
],
}
On Labs we instead represent each series with a different journal record. As
a consequence, the above example becomes::
{
'publication_info': [
{
'journal_title': 'Phys.Rev.D',
'journal_volume': '43',
},
],
}
This function handles this translation from the old format to the new. Please
also see the tests for various edge cases that this function also handles.
Args:
publication_infos: a ``publication_info`` in the old format.
Returns:
list(dict): a ``publication_info`` in the new format.
"""
result = []
hidden_publication_infos = []
for publication_info in publication_infos:
_publication_info = copy.deepcopy(publication_info)
journal_title = _publication_info.get('journal_title')
try:
journal_title = _JOURNALS_RENAMED_OLD_TO_NEW[journal_title]
_publication_info['journal_title'] = journal_title
result.append(_publication_info)
continue
except KeyError:
pass
journal_volume = _publication_info.get('journal_volume')
if journal_title in _JOURNALS_WITH_YEAR_ADDED_TO_VOLUME and journal_volume and len(journal_volume) == 4:
try:
was_last_century = int(journal_volume[:2]) > 50
except ValueError:
pass
else:
_publication_info['year'] = int('19' + journal_volume[:2] if was_last_century else '20' + journal_volume[:2])
_publication_info['journal_volume'] = journal_volume[2:]
result.append(_publication_info)
continue
if journal_title and journal_volume and journal_title.lower() not in JOURNALS_IGNORED_IN_OLD_TO_NEW:
volume_starts_with_a_letter = _RE_VOLUME_STARTS_WITH_A_LETTER.match(journal_volume)
volume_ends_with_a_letter = _RE_VOLUME_ENDS_WITH_A_LETTER.match(journal_volume)
match = volume_starts_with_a_letter or volume_ends_with_a_letter
if match:
_publication_info.pop('journal_record', None)
if journal_title in _JOURNALS_RENAMED_OLD_TO_NEW.values():
_publication_info['journal_title'] = journal_title
else:
_publication_info['journal_title'] = ''.join([
journal_title,
'' if journal_title.endswith('.') else ' ',
match.group('letter'),
])
_publication_info['journal_volume'] = match.group('volume')
hidden = _publication_info.pop('hidden', None)
if hidden:
hidden_publication_infos.append(_publication_info)
else:
result.append(_publication_info)
for publication_info in hidden_publication_infos:
if publication_info not in result:
publication_info['hidden'] = True
result.append(publication_info)
return result | python | def convert_old_publication_info_to_new(publication_infos):
"""Convert a ``publication_info`` value from the old format to the new.
On Legacy different series of the same journal were modeled by adding the
letter part of the name to the journal volume. For example, a paper published
in Physical Review D contained::
{
'publication_info': [
{
'journal_title': 'Phys.Rev.',
'journal_volume': 'D43',
},
],
}
On Labs we instead represent each series with a different journal record. As
a consequence, the above example becomes::
{
'publication_info': [
{
'journal_title': 'Phys.Rev.D',
'journal_volume': '43',
},
],
}
This function handles this translation from the old format to the new. Please
also see the tests for various edge cases that this function also handles.
Args:
publication_infos: a ``publication_info`` in the old format.
Returns:
list(dict): a ``publication_info`` in the new format.
"""
result = []
hidden_publication_infos = []
for publication_info in publication_infos:
_publication_info = copy.deepcopy(publication_info)
journal_title = _publication_info.get('journal_title')
try:
journal_title = _JOURNALS_RENAMED_OLD_TO_NEW[journal_title]
_publication_info['journal_title'] = journal_title
result.append(_publication_info)
continue
except KeyError:
pass
journal_volume = _publication_info.get('journal_volume')
if journal_title in _JOURNALS_WITH_YEAR_ADDED_TO_VOLUME and journal_volume and len(journal_volume) == 4:
try:
was_last_century = int(journal_volume[:2]) > 50
except ValueError:
pass
else:
_publication_info['year'] = int('19' + journal_volume[:2] if was_last_century else '20' + journal_volume[:2])
_publication_info['journal_volume'] = journal_volume[2:]
result.append(_publication_info)
continue
if journal_title and journal_volume and journal_title.lower() not in JOURNALS_IGNORED_IN_OLD_TO_NEW:
volume_starts_with_a_letter = _RE_VOLUME_STARTS_WITH_A_LETTER.match(journal_volume)
volume_ends_with_a_letter = _RE_VOLUME_ENDS_WITH_A_LETTER.match(journal_volume)
match = volume_starts_with_a_letter or volume_ends_with_a_letter
if match:
_publication_info.pop('journal_record', None)
if journal_title in _JOURNALS_RENAMED_OLD_TO_NEW.values():
_publication_info['journal_title'] = journal_title
else:
_publication_info['journal_title'] = ''.join([
journal_title,
'' if journal_title.endswith('.') else ' ',
match.group('letter'),
])
_publication_info['journal_volume'] = match.group('volume')
hidden = _publication_info.pop('hidden', None)
if hidden:
hidden_publication_infos.append(_publication_info)
else:
result.append(_publication_info)
for publication_info in hidden_publication_infos:
if publication_info not in result:
publication_info['hidden'] = True
result.append(publication_info)
return result | [
"def",
"convert_old_publication_info_to_new",
"(",
"publication_infos",
")",
":",
"result",
"=",
"[",
"]",
"hidden_publication_infos",
"=",
"[",
"]",
"for",
"publication_info",
"in",
"publication_infos",
":",
"_publication_info",
"=",
"copy",
".",
"deepcopy",
"(",
"publication_info",
")",
"journal_title",
"=",
"_publication_info",
".",
"get",
"(",
"'journal_title'",
")",
"try",
":",
"journal_title",
"=",
"_JOURNALS_RENAMED_OLD_TO_NEW",
"[",
"journal_title",
"]",
"_publication_info",
"[",
"'journal_title'",
"]",
"=",
"journal_title",
"result",
".",
"append",
"(",
"_publication_info",
")",
"continue",
"except",
"KeyError",
":",
"pass",
"journal_volume",
"=",
"_publication_info",
".",
"get",
"(",
"'journal_volume'",
")",
"if",
"journal_title",
"in",
"_JOURNALS_WITH_YEAR_ADDED_TO_VOLUME",
"and",
"journal_volume",
"and",
"len",
"(",
"journal_volume",
")",
"==",
"4",
":",
"try",
":",
"was_last_century",
"=",
"int",
"(",
"journal_volume",
"[",
":",
"2",
"]",
")",
">",
"50",
"except",
"ValueError",
":",
"pass",
"else",
":",
"_publication_info",
"[",
"'year'",
"]",
"=",
"int",
"(",
"'19'",
"+",
"journal_volume",
"[",
":",
"2",
"]",
"if",
"was_last_century",
"else",
"'20'",
"+",
"journal_volume",
"[",
":",
"2",
"]",
")",
"_publication_info",
"[",
"'journal_volume'",
"]",
"=",
"journal_volume",
"[",
"2",
":",
"]",
"result",
".",
"append",
"(",
"_publication_info",
")",
"continue",
"if",
"journal_title",
"and",
"journal_volume",
"and",
"journal_title",
".",
"lower",
"(",
")",
"not",
"in",
"JOURNALS_IGNORED_IN_OLD_TO_NEW",
":",
"volume_starts_with_a_letter",
"=",
"_RE_VOLUME_STARTS_WITH_A_LETTER",
".",
"match",
"(",
"journal_volume",
")",
"volume_ends_with_a_letter",
"=",
"_RE_VOLUME_ENDS_WITH_A_LETTER",
".",
"match",
"(",
"journal_volume",
")",
"match",
"=",
"volume_starts_with_a_letter",
"or",
"volume_ends_with_a_letter",
"if",
"match",
":",
"_publication_info",
".",
"pop",
"(",
"'journal_record'",
",",
"None",
")",
"if",
"journal_title",
"in",
"_JOURNALS_RENAMED_OLD_TO_NEW",
".",
"values",
"(",
")",
":",
"_publication_info",
"[",
"'journal_title'",
"]",
"=",
"journal_title",
"else",
":",
"_publication_info",
"[",
"'journal_title'",
"]",
"=",
"''",
".",
"join",
"(",
"[",
"journal_title",
",",
"''",
"if",
"journal_title",
".",
"endswith",
"(",
"'.'",
")",
"else",
"' '",
",",
"match",
".",
"group",
"(",
"'letter'",
")",
",",
"]",
")",
"_publication_info",
"[",
"'journal_volume'",
"]",
"=",
"match",
".",
"group",
"(",
"'volume'",
")",
"hidden",
"=",
"_publication_info",
".",
"pop",
"(",
"'hidden'",
",",
"None",
")",
"if",
"hidden",
":",
"hidden_publication_infos",
".",
"append",
"(",
"_publication_info",
")",
"else",
":",
"result",
".",
"append",
"(",
"_publication_info",
")",
"for",
"publication_info",
"in",
"hidden_publication_infos",
":",
"if",
"publication_info",
"not",
"in",
"result",
":",
"publication_info",
"[",
"'hidden'",
"]",
"=",
"True",
"result",
".",
"append",
"(",
"publication_info",
")",
"return",
"result"
] | Convert a ``publication_info`` value from the old format to the new.
On Legacy different series of the same journal were modeled by adding the
letter part of the name to the journal volume. For example, a paper published
in Physical Review D contained::
{
'publication_info': [
{
'journal_title': 'Phys.Rev.',
'journal_volume': 'D43',
},
],
}
On Labs we instead represent each series with a different journal record. As
a consequence, the above example becomes::
{
'publication_info': [
{
'journal_title': 'Phys.Rev.D',
'journal_volume': '43',
},
],
}
This function handles this translation from the old format to the new. Please
also see the tests for various edge cases that this function also handles.
Args:
publication_infos: a ``publication_info`` in the old format.
Returns:
list(dict): a ``publication_info`` in the new format. | [
"Convert",
"a",
"publication_info",
"value",
"from",
"the",
"old",
"format",
"to",
"the",
"new",
"."
] | 34bc124b62fba565b6b40d1a3c15103a23a05edb | https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L779-L872 | train |
inspirehep/inspire-schemas | inspire_schemas/utils.py | convert_new_publication_info_to_old | def convert_new_publication_info_to_old(publication_infos):
"""Convert back a ``publication_info`` value from the new format to the old.
Does the inverse transformation of :func:`convert_old_publication_info_to_new`,
to be used whenever we are sending back records from Labs to Legacy.
Args:
publication_infos: a ``publication_info`` in the new format.
Returns:
list(dict): a ``publication_info`` in the old format.
"""
def _needs_a_hidden_pubnote(journal_title, journal_volume):
return (
journal_title in _JOURNALS_THAT_NEED_A_HIDDEN_PUBNOTE and
journal_volume in _JOURNALS_THAT_NEED_A_HIDDEN_PUBNOTE[journal_title]
)
result = []
for publication_info in publication_infos:
_publication_info = copy.deepcopy(publication_info)
journal_title = _publication_info.get('journal_title')
try:
journal_title = _JOURNALS_RENAMED_NEW_TO_OLD[journal_title]
_publication_info['journal_title'] = journal_title
result.append(_publication_info)
continue
except KeyError:
pass
journal_volume = _publication_info.get('journal_volume')
year = _publication_info.get('year')
if (journal_title in _JOURNALS_WITH_YEAR_ADDED_TO_VOLUME and year and
journal_volume and len(journal_volume) == 2):
two_digit_year = str(year)[2:]
_publication_info['journal_volume'] = ''.join([two_digit_year, journal_volume])
result.append(_publication_info)
continue
if journal_title and journal_volume:
match = _RE_TITLE_ENDS_WITH_A_LETTER.match(journal_title)
if match and _needs_a_hidden_pubnote(journal_title, journal_volume):
_publication_info['journal_title'] = match.group('title')
_publication_info['journal_volume'] = journal_volume + match.group('letter')
result.append(_publication_info)
_publication_info = copy.deepcopy(publication_info)
_publication_info['hidden'] = True
_publication_info['journal_title'] = match.group('title')
_publication_info['journal_volume'] = match.group('letter') + journal_volume
elif match and journal_title not in _JOURNALS_ALREADY_ENDING_WITH_A_LETTER:
_publication_info['journal_title'] = match.group('title')
_publication_info['journal_volume'] = match.group('letter') + journal_volume
result.append(_publication_info)
return result | python | def convert_new_publication_info_to_old(publication_infos):
"""Convert back a ``publication_info`` value from the new format to the old.
Does the inverse transformation of :func:`convert_old_publication_info_to_new`,
to be used whenever we are sending back records from Labs to Legacy.
Args:
publication_infos: a ``publication_info`` in the new format.
Returns:
list(dict): a ``publication_info`` in the old format.
"""
def _needs_a_hidden_pubnote(journal_title, journal_volume):
return (
journal_title in _JOURNALS_THAT_NEED_A_HIDDEN_PUBNOTE and
journal_volume in _JOURNALS_THAT_NEED_A_HIDDEN_PUBNOTE[journal_title]
)
result = []
for publication_info in publication_infos:
_publication_info = copy.deepcopy(publication_info)
journal_title = _publication_info.get('journal_title')
try:
journal_title = _JOURNALS_RENAMED_NEW_TO_OLD[journal_title]
_publication_info['journal_title'] = journal_title
result.append(_publication_info)
continue
except KeyError:
pass
journal_volume = _publication_info.get('journal_volume')
year = _publication_info.get('year')
if (journal_title in _JOURNALS_WITH_YEAR_ADDED_TO_VOLUME and year and
journal_volume and len(journal_volume) == 2):
two_digit_year = str(year)[2:]
_publication_info['journal_volume'] = ''.join([two_digit_year, journal_volume])
result.append(_publication_info)
continue
if journal_title and journal_volume:
match = _RE_TITLE_ENDS_WITH_A_LETTER.match(journal_title)
if match and _needs_a_hidden_pubnote(journal_title, journal_volume):
_publication_info['journal_title'] = match.group('title')
_publication_info['journal_volume'] = journal_volume + match.group('letter')
result.append(_publication_info)
_publication_info = copy.deepcopy(publication_info)
_publication_info['hidden'] = True
_publication_info['journal_title'] = match.group('title')
_publication_info['journal_volume'] = match.group('letter') + journal_volume
elif match and journal_title not in _JOURNALS_ALREADY_ENDING_WITH_A_LETTER:
_publication_info['journal_title'] = match.group('title')
_publication_info['journal_volume'] = match.group('letter') + journal_volume
result.append(_publication_info)
return result | [
"def",
"convert_new_publication_info_to_old",
"(",
"publication_infos",
")",
":",
"def",
"_needs_a_hidden_pubnote",
"(",
"journal_title",
",",
"journal_volume",
")",
":",
"return",
"(",
"journal_title",
"in",
"_JOURNALS_THAT_NEED_A_HIDDEN_PUBNOTE",
"and",
"journal_volume",
"in",
"_JOURNALS_THAT_NEED_A_HIDDEN_PUBNOTE",
"[",
"journal_title",
"]",
")",
"result",
"=",
"[",
"]",
"for",
"publication_info",
"in",
"publication_infos",
":",
"_publication_info",
"=",
"copy",
".",
"deepcopy",
"(",
"publication_info",
")",
"journal_title",
"=",
"_publication_info",
".",
"get",
"(",
"'journal_title'",
")",
"try",
":",
"journal_title",
"=",
"_JOURNALS_RENAMED_NEW_TO_OLD",
"[",
"journal_title",
"]",
"_publication_info",
"[",
"'journal_title'",
"]",
"=",
"journal_title",
"result",
".",
"append",
"(",
"_publication_info",
")",
"continue",
"except",
"KeyError",
":",
"pass",
"journal_volume",
"=",
"_publication_info",
".",
"get",
"(",
"'journal_volume'",
")",
"year",
"=",
"_publication_info",
".",
"get",
"(",
"'year'",
")",
"if",
"(",
"journal_title",
"in",
"_JOURNALS_WITH_YEAR_ADDED_TO_VOLUME",
"and",
"year",
"and",
"journal_volume",
"and",
"len",
"(",
"journal_volume",
")",
"==",
"2",
")",
":",
"two_digit_year",
"=",
"str",
"(",
"year",
")",
"[",
"2",
":",
"]",
"_publication_info",
"[",
"'journal_volume'",
"]",
"=",
"''",
".",
"join",
"(",
"[",
"two_digit_year",
",",
"journal_volume",
"]",
")",
"result",
".",
"append",
"(",
"_publication_info",
")",
"continue",
"if",
"journal_title",
"and",
"journal_volume",
":",
"match",
"=",
"_RE_TITLE_ENDS_WITH_A_LETTER",
".",
"match",
"(",
"journal_title",
")",
"if",
"match",
"and",
"_needs_a_hidden_pubnote",
"(",
"journal_title",
",",
"journal_volume",
")",
":",
"_publication_info",
"[",
"'journal_title'",
"]",
"=",
"match",
".",
"group",
"(",
"'title'",
")",
"_publication_info",
"[",
"'journal_volume'",
"]",
"=",
"journal_volume",
"+",
"match",
".",
"group",
"(",
"'letter'",
")",
"result",
".",
"append",
"(",
"_publication_info",
")",
"_publication_info",
"=",
"copy",
".",
"deepcopy",
"(",
"publication_info",
")",
"_publication_info",
"[",
"'hidden'",
"]",
"=",
"True",
"_publication_info",
"[",
"'journal_title'",
"]",
"=",
"match",
".",
"group",
"(",
"'title'",
")",
"_publication_info",
"[",
"'journal_volume'",
"]",
"=",
"match",
".",
"group",
"(",
"'letter'",
")",
"+",
"journal_volume",
"elif",
"match",
"and",
"journal_title",
"not",
"in",
"_JOURNALS_ALREADY_ENDING_WITH_A_LETTER",
":",
"_publication_info",
"[",
"'journal_title'",
"]",
"=",
"match",
".",
"group",
"(",
"'title'",
")",
"_publication_info",
"[",
"'journal_volume'",
"]",
"=",
"match",
".",
"group",
"(",
"'letter'",
")",
"+",
"journal_volume",
"result",
".",
"append",
"(",
"_publication_info",
")",
"return",
"result"
] | Convert back a ``publication_info`` value from the new format to the old.
Does the inverse transformation of :func:`convert_old_publication_info_to_new`,
to be used whenever we are sending back records from Labs to Legacy.
Args:
publication_infos: a ``publication_info`` in the new format.
Returns:
list(dict): a ``publication_info`` in the old format. | [
"Convert",
"back",
"a",
"publication_info",
"value",
"from",
"the",
"new",
"format",
"to",
"the",
"old",
"."
] | 34bc124b62fba565b6b40d1a3c15103a23a05edb | https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L875-L934 | train |
inspirehep/inspire-schemas | inspire_schemas/utils.py | fix_reference_url | def fix_reference_url(url):
"""Used to parse an incorect url to try to fix it with the most common ocurrences for errors.
If the fixed url is still incorrect, it returns ``None``.
Returns:
String containing the fixed url or the original one if it could not be fixed.
"""
new_url = url
new_url = fix_url_bars_instead_of_slashes(new_url)
new_url = fix_url_add_http_if_missing(new_url)
new_url = fix_url_replace_tilde(new_url)
try:
rfc3987.parse(new_url, rule="URI")
return new_url
except ValueError:
return url | python | def fix_reference_url(url):
"""Used to parse an incorect url to try to fix it with the most common ocurrences for errors.
If the fixed url is still incorrect, it returns ``None``.
Returns:
String containing the fixed url or the original one if it could not be fixed.
"""
new_url = url
new_url = fix_url_bars_instead_of_slashes(new_url)
new_url = fix_url_add_http_if_missing(new_url)
new_url = fix_url_replace_tilde(new_url)
try:
rfc3987.parse(new_url, rule="URI")
return new_url
except ValueError:
return url | [
"def",
"fix_reference_url",
"(",
"url",
")",
":",
"new_url",
"=",
"url",
"new_url",
"=",
"fix_url_bars_instead_of_slashes",
"(",
"new_url",
")",
"new_url",
"=",
"fix_url_add_http_if_missing",
"(",
"new_url",
")",
"new_url",
"=",
"fix_url_replace_tilde",
"(",
"new_url",
")",
"try",
":",
"rfc3987",
".",
"parse",
"(",
"new_url",
",",
"rule",
"=",
"\"URI\"",
")",
"return",
"new_url",
"except",
"ValueError",
":",
"return",
"url"
] | Used to parse an incorect url to try to fix it with the most common ocurrences for errors.
If the fixed url is still incorrect, it returns ``None``.
Returns:
String containing the fixed url or the original one if it could not be fixed. | [
"Used",
"to",
"parse",
"an",
"incorect",
"url",
"to",
"try",
"to",
"fix",
"it",
"with",
"the",
"most",
"common",
"ocurrences",
"for",
"errors",
".",
"If",
"the",
"fixed",
"url",
"is",
"still",
"incorrect",
"it",
"returns",
"None",
"."
] | 34bc124b62fba565b6b40d1a3c15103a23a05edb | https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L957-L976 | train |
inspirehep/inspire-schemas | inspire_schemas/utils.py | is_arxiv | def is_arxiv(obj):
"""Return ``True`` if ``obj`` contains an arXiv identifier.
The ``idutils`` library's ``is_arxiv`` function has been
modified here to work with two regular expressions instead
of three and adding a check for valid arxiv categories only"""
arxiv_test = obj.split()
if not arxiv_test:
return False
matched_arxiv = (RE_ARXIV_PRE_2007_CLASS.match(arxiv_test[0]) or
RE_ARXIV_POST_2007_CLASS.match(arxiv_test[0]))
if not matched_arxiv:
return False
if not matched_arxiv.group('category'):
return True
valid_arxiv_categories_lower = [category.lower() for category in valid_arxiv_categories()]
category = matched_arxiv.group('category').lower()
return (category in valid_arxiv_categories_lower or
category.replace('-', '.') in valid_arxiv_categories_lower) | python | def is_arxiv(obj):
"""Return ``True`` if ``obj`` contains an arXiv identifier.
The ``idutils`` library's ``is_arxiv`` function has been
modified here to work with two regular expressions instead
of three and adding a check for valid arxiv categories only"""
arxiv_test = obj.split()
if not arxiv_test:
return False
matched_arxiv = (RE_ARXIV_PRE_2007_CLASS.match(arxiv_test[0]) or
RE_ARXIV_POST_2007_CLASS.match(arxiv_test[0]))
if not matched_arxiv:
return False
if not matched_arxiv.group('category'):
return True
valid_arxiv_categories_lower = [category.lower() for category in valid_arxiv_categories()]
category = matched_arxiv.group('category').lower()
return (category in valid_arxiv_categories_lower or
category.replace('-', '.') in valid_arxiv_categories_lower) | [
"def",
"is_arxiv",
"(",
"obj",
")",
":",
"arxiv_test",
"=",
"obj",
".",
"split",
"(",
")",
"if",
"not",
"arxiv_test",
":",
"return",
"False",
"matched_arxiv",
"=",
"(",
"RE_ARXIV_PRE_2007_CLASS",
".",
"match",
"(",
"arxiv_test",
"[",
"0",
"]",
")",
"or",
"RE_ARXIV_POST_2007_CLASS",
".",
"match",
"(",
"arxiv_test",
"[",
"0",
"]",
")",
")",
"if",
"not",
"matched_arxiv",
":",
"return",
"False",
"if",
"not",
"matched_arxiv",
".",
"group",
"(",
"'category'",
")",
":",
"return",
"True",
"valid_arxiv_categories_lower",
"=",
"[",
"category",
".",
"lower",
"(",
")",
"for",
"category",
"in",
"valid_arxiv_categories",
"(",
")",
"]",
"category",
"=",
"matched_arxiv",
".",
"group",
"(",
"'category'",
")",
".",
"lower",
"(",
")",
"return",
"(",
"category",
"in",
"valid_arxiv_categories_lower",
"or",
"category",
".",
"replace",
"(",
"'-'",
",",
"'.'",
")",
"in",
"valid_arxiv_categories_lower",
")"
] | Return ``True`` if ``obj`` contains an arXiv identifier.
The ``idutils`` library's ``is_arxiv`` function has been
modified here to work with two regular expressions instead
of three and adding a check for valid arxiv categories only | [
"Return",
"True",
"if",
"obj",
"contains",
"an",
"arXiv",
"identifier",
"."
] | 34bc124b62fba565b6b40d1a3c15103a23a05edb | https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L987-L1009 | train |
inspirehep/inspire-schemas | inspire_schemas/utils.py | normalize_arxiv | def normalize_arxiv(obj):
"""Return a normalized arXiv identifier from ``obj``."""
obj = obj.split()[0]
matched_arxiv_pre = RE_ARXIV_PRE_2007_CLASS.match(obj)
if matched_arxiv_pre:
return ('/'.join(matched_arxiv_pre.group("extraidentifier", "identifier"))).lower()
matched_arxiv_post = RE_ARXIV_POST_2007_CLASS.match(obj)
if matched_arxiv_post:
return matched_arxiv_post.group("identifier")
return None | python | def normalize_arxiv(obj):
"""Return a normalized arXiv identifier from ``obj``."""
obj = obj.split()[0]
matched_arxiv_pre = RE_ARXIV_PRE_2007_CLASS.match(obj)
if matched_arxiv_pre:
return ('/'.join(matched_arxiv_pre.group("extraidentifier", "identifier"))).lower()
matched_arxiv_post = RE_ARXIV_POST_2007_CLASS.match(obj)
if matched_arxiv_post:
return matched_arxiv_post.group("identifier")
return None | [
"def",
"normalize_arxiv",
"(",
"obj",
")",
":",
"obj",
"=",
"obj",
".",
"split",
"(",
")",
"[",
"0",
"]",
"matched_arxiv_pre",
"=",
"RE_ARXIV_PRE_2007_CLASS",
".",
"match",
"(",
"obj",
")",
"if",
"matched_arxiv_pre",
":",
"return",
"(",
"'/'",
".",
"join",
"(",
"matched_arxiv_pre",
".",
"group",
"(",
"\"extraidentifier\"",
",",
"\"identifier\"",
")",
")",
")",
".",
"lower",
"(",
")",
"matched_arxiv_post",
"=",
"RE_ARXIV_POST_2007_CLASS",
".",
"match",
"(",
"obj",
")",
"if",
"matched_arxiv_post",
":",
"return",
"matched_arxiv_post",
".",
"group",
"(",
"\"identifier\"",
")",
"return",
"None"
] | Return a normalized arXiv identifier from ``obj``. | [
"Return",
"a",
"normalized",
"arXiv",
"identifier",
"from",
"obj",
"."
] | 34bc124b62fba565b6b40d1a3c15103a23a05edb | https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L1012-L1024 | train |
inspirehep/inspire-schemas | inspire_schemas/utils.py | LocalRefResolver.resolve_remote | def resolve_remote(self, uri):
"""Resolve a uri or relative path to a schema."""
try:
return super(LocalRefResolver, self).resolve_remote(uri)
except ValueError:
return super(LocalRefResolver, self).resolve_remote(
'file://' + get_schema_path(uri.rsplit('.json', 1)[0])
) | python | def resolve_remote(self, uri):
"""Resolve a uri or relative path to a schema."""
try:
return super(LocalRefResolver, self).resolve_remote(uri)
except ValueError:
return super(LocalRefResolver, self).resolve_remote(
'file://' + get_schema_path(uri.rsplit('.json', 1)[0])
) | [
"def",
"resolve_remote",
"(",
"self",
",",
"uri",
")",
":",
"try",
":",
"return",
"super",
"(",
"LocalRefResolver",
",",
"self",
")",
".",
"resolve_remote",
"(",
"uri",
")",
"except",
"ValueError",
":",
"return",
"super",
"(",
"LocalRefResolver",
",",
"self",
")",
".",
"resolve_remote",
"(",
"'file://'",
"+",
"get_schema_path",
"(",
"uri",
".",
"rsplit",
"(",
"'.json'",
",",
"1",
")",
"[",
"0",
"]",
")",
")"
] | Resolve a uri or relative path to a schema. | [
"Resolve",
"a",
"uri",
"or",
"relative",
"path",
"to",
"a",
"schema",
"."
] | 34bc124b62fba565b6b40d1a3c15103a23a05edb | https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/utils.py#L535-L542 | train |
pylp/pylp | pylp/lib/file.py | File.set_path | def set_path(self, path):
"""Set the path of the file."""
if os.path.isabs(path):
path = os.path.normpath(os.path.join(self.cwd, path))
self.path = path
self.relative = os.path.relpath(self.path, self.base) | python | def set_path(self, path):
"""Set the path of the file."""
if os.path.isabs(path):
path = os.path.normpath(os.path.join(self.cwd, path))
self.path = path
self.relative = os.path.relpath(self.path, self.base) | [
"def",
"set_path",
"(",
"self",
",",
"path",
")",
":",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"path",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"cwd",
",",
"path",
")",
")",
"self",
".",
"path",
"=",
"path",
"self",
".",
"relative",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"self",
".",
"path",
",",
"self",
".",
"base",
")"
] | Set the path of the file. | [
"Set",
"the",
"path",
"of",
"the",
"file",
"."
] | 7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4 | https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/lib/file.py#L44-L50 | train |
pylp/pylp | pylp/lib/file.py | File.clone | def clone(self, path = None, *, with_contents = True, **options):
"""Clone the file."""
file = File(path if path else self.path, cwd=options.get("cwd", self.cwd))
file.base = options.get("base", self.base)
if with_contents:
file.contents = options.get("contents", self.contents)
return file | python | def clone(self, path = None, *, with_contents = True, **options):
"""Clone the file."""
file = File(path if path else self.path, cwd=options.get("cwd", self.cwd))
file.base = options.get("base", self.base)
if with_contents:
file.contents = options.get("contents", self.contents)
return file | [
"def",
"clone",
"(",
"self",
",",
"path",
"=",
"None",
",",
"*",
",",
"with_contents",
"=",
"True",
",",
"*",
"*",
"options",
")",
":",
"file",
"=",
"File",
"(",
"path",
"if",
"path",
"else",
"self",
".",
"path",
",",
"cwd",
"=",
"options",
".",
"get",
"(",
"\"cwd\"",
",",
"self",
".",
"cwd",
")",
")",
"file",
".",
"base",
"=",
"options",
".",
"get",
"(",
"\"base\"",
",",
"self",
".",
"base",
")",
"if",
"with_contents",
":",
"file",
".",
"contents",
"=",
"options",
".",
"get",
"(",
"\"contents\"",
",",
"self",
".",
"contents",
")",
"return",
"file"
] | Clone the file. | [
"Clone",
"the",
"file",
"."
] | 7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4 | https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/lib/file.py#L53-L61 | train |
pylp/pylp | pylp/cli/cli.py | launch_cli | def launch_cli():
"""Launch the CLI."""
# Create the CLI argument parser
parser = argparse.ArgumentParser(
prog="pylp",
description="Call some tasks defined in your pylpfile."
)
# Version of Pylp
parser.add_argument("-v", "--version",
action="version",
version="Pylp %s" % version,
help="get the Pylp version and exit"
)
# Set the pylpfile location
parser.add_argument('--pylpfile',
nargs=1,
help="manually set path of pylpfile",
metavar="<path>"
)
# Set the pylpfile location
parser.add_argument('--cwd',
nargs=1,
help="manually set the CWD",
metavar="<dir path>"
)
# Force Pylp to not display colors
parser.add_argument('--no-color',
action="store_false",
help="force Pylp to not display colors"
)
# Disable logging
parser.add_argument('--silent',
action="store_true",
help="disable all Pylp logging"
)
# List of tasks to execute
parser.add_argument('tasks',
nargs="*",
default=["default"],
help="tasks to execute (if none, execute the 'default' task)",
metavar="<task>"
)
# Parse the CLI arguments
args = parser.parse_args()
# Current working directory (CWD)
if args.cwd:
config.cwd = args.cwd[0]
else:
config.cwd = os.getcwd()
# Get the pylpfile location
if args.pylpfile:
pylpfile = args.pylpfile[0]
if not args.pylpfile:
pylpfile = path.join(config.cwd, "pylpfile.py")
elif not args.cwd:
config.cwd = path.dirname(pylpfile)
# Must the terminal have colors?
config.color = args.no_color
# Must Pylp be silent (no logging)?
config.silent = args.silent
# Execute the pylpfile
run(pylpfile, args.tasks) | python | def launch_cli():
"""Launch the CLI."""
# Create the CLI argument parser
parser = argparse.ArgumentParser(
prog="pylp",
description="Call some tasks defined in your pylpfile."
)
# Version of Pylp
parser.add_argument("-v", "--version",
action="version",
version="Pylp %s" % version,
help="get the Pylp version and exit"
)
# Set the pylpfile location
parser.add_argument('--pylpfile',
nargs=1,
help="manually set path of pylpfile",
metavar="<path>"
)
# Set the pylpfile location
parser.add_argument('--cwd',
nargs=1,
help="manually set the CWD",
metavar="<dir path>"
)
# Force Pylp to not display colors
parser.add_argument('--no-color',
action="store_false",
help="force Pylp to not display colors"
)
# Disable logging
parser.add_argument('--silent',
action="store_true",
help="disable all Pylp logging"
)
# List of tasks to execute
parser.add_argument('tasks',
nargs="*",
default=["default"],
help="tasks to execute (if none, execute the 'default' task)",
metavar="<task>"
)
# Parse the CLI arguments
args = parser.parse_args()
# Current working directory (CWD)
if args.cwd:
config.cwd = args.cwd[0]
else:
config.cwd = os.getcwd()
# Get the pylpfile location
if args.pylpfile:
pylpfile = args.pylpfile[0]
if not args.pylpfile:
pylpfile = path.join(config.cwd, "pylpfile.py")
elif not args.cwd:
config.cwd = path.dirname(pylpfile)
# Must the terminal have colors?
config.color = args.no_color
# Must Pylp be silent (no logging)?
config.silent = args.silent
# Execute the pylpfile
run(pylpfile, args.tasks) | [
"def",
"launch_cli",
"(",
")",
":",
"# Create the CLI argument parser",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"prog",
"=",
"\"pylp\"",
",",
"description",
"=",
"\"Call some tasks defined in your pylpfile.\"",
")",
"# Version of Pylp",
"parser",
".",
"add_argument",
"(",
"\"-v\"",
",",
"\"--version\"",
",",
"action",
"=",
"\"version\"",
",",
"version",
"=",
"\"Pylp %s\"",
"%",
"version",
",",
"help",
"=",
"\"get the Pylp version and exit\"",
")",
"# Set the pylpfile location",
"parser",
".",
"add_argument",
"(",
"'--pylpfile'",
",",
"nargs",
"=",
"1",
",",
"help",
"=",
"\"manually set path of pylpfile\"",
",",
"metavar",
"=",
"\"<path>\"",
")",
"# Set the pylpfile location",
"parser",
".",
"add_argument",
"(",
"'--cwd'",
",",
"nargs",
"=",
"1",
",",
"help",
"=",
"\"manually set the CWD\"",
",",
"metavar",
"=",
"\"<dir path>\"",
")",
"# Force Pylp to not display colors",
"parser",
".",
"add_argument",
"(",
"'--no-color'",
",",
"action",
"=",
"\"store_false\"",
",",
"help",
"=",
"\"force Pylp to not display colors\"",
")",
"# Disable logging",
"parser",
".",
"add_argument",
"(",
"'--silent'",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"disable all Pylp logging\"",
")",
"# List of tasks to execute",
"parser",
".",
"add_argument",
"(",
"'tasks'",
",",
"nargs",
"=",
"\"*\"",
",",
"default",
"=",
"[",
"\"default\"",
"]",
",",
"help",
"=",
"\"tasks to execute (if none, execute the 'default' task)\"",
",",
"metavar",
"=",
"\"<task>\"",
")",
"# Parse the CLI arguments",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"# Current working directory (CWD)",
"if",
"args",
".",
"cwd",
":",
"config",
".",
"cwd",
"=",
"args",
".",
"cwd",
"[",
"0",
"]",
"else",
":",
"config",
".",
"cwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"# Get the pylpfile location",
"if",
"args",
".",
"pylpfile",
":",
"pylpfile",
"=",
"args",
".",
"pylpfile",
"[",
"0",
"]",
"if",
"not",
"args",
".",
"pylpfile",
":",
"pylpfile",
"=",
"path",
".",
"join",
"(",
"config",
".",
"cwd",
",",
"\"pylpfile.py\"",
")",
"elif",
"not",
"args",
".",
"cwd",
":",
"config",
".",
"cwd",
"=",
"path",
".",
"dirname",
"(",
"pylpfile",
")",
"# Must the terminal have colors?",
"config",
".",
"color",
"=",
"args",
".",
"no_color",
"# Must Pylp be silent (no logging)?",
"config",
".",
"silent",
"=",
"args",
".",
"silent",
"# Execute the pylpfile",
"run",
"(",
"pylpfile",
",",
"args",
".",
"tasks",
")"
] | Launch the CLI. | [
"Launch",
"the",
"CLI",
"."
] | 7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4 | https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/cli/cli.py#L29-L110 | train |
inspirehep/inspire-schemas | inspire_schemas/builders/signatures.py | SignatureBuilder.add_affiliation | def add_affiliation(self, value, curated_relation=None, record=None):
"""Add an affiliation.
Args:
value (string): affiliation value
curated_relation (bool): is relation curated
record (dict): affiliation JSON reference
"""
if value:
affiliation = {
'value': value
}
if record:
affiliation['record'] = record
if curated_relation is not None:
affiliation['curated_relation'] = curated_relation
self._ensure_list_field('affiliations', affiliation) | python | def add_affiliation(self, value, curated_relation=None, record=None):
"""Add an affiliation.
Args:
value (string): affiliation value
curated_relation (bool): is relation curated
record (dict): affiliation JSON reference
"""
if value:
affiliation = {
'value': value
}
if record:
affiliation['record'] = record
if curated_relation is not None:
affiliation['curated_relation'] = curated_relation
self._ensure_list_field('affiliations', affiliation) | [
"def",
"add_affiliation",
"(",
"self",
",",
"value",
",",
"curated_relation",
"=",
"None",
",",
"record",
"=",
"None",
")",
":",
"if",
"value",
":",
"affiliation",
"=",
"{",
"'value'",
":",
"value",
"}",
"if",
"record",
":",
"affiliation",
"[",
"'record'",
"]",
"=",
"record",
"if",
"curated_relation",
"is",
"not",
"None",
":",
"affiliation",
"[",
"'curated_relation'",
"]",
"=",
"curated_relation",
"self",
".",
"_ensure_list_field",
"(",
"'affiliations'",
",",
"affiliation",
")"
] | Add an affiliation.
Args:
value (string): affiliation value
curated_relation (bool): is relation curated
record (dict): affiliation JSON reference | [
"Add",
"an",
"affiliation",
"."
] | 34bc124b62fba565b6b40d1a3c15103a23a05edb | https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/signatures.py#L69-L85 | train |
inspirehep/inspire-schemas | inspire_schemas/builders/signatures.py | SignatureBuilder.set_uid | def set_uid(self, uid, schema=None):
"""Set a unique ID.
If a UID of a given schema already exists in a record it will
be overwritten, otherwise it will be appended to the record.
Args:
uid (string): unique identifier.
schema (Optional[string]): schema of the unique identifier. If
``None``, the schema will be guessed based on the shape of
``uid``.
Raises:
SchemaUIDConflict: it UID and schema are not matching
"""
try:
uid, schema = author_id_normalize_and_schema(uid, schema)
except UnknownUIDSchema:
# Explicit schema wasn't provided, and the UID is too little
# to figure out the schema of it, this however doesn't mean
# the UID is invalid
pass
self._ensure_field('ids', [])
self.obj['ids'] = [id_ for id_ in self.obj['ids'] if id_.get('schema') != schema]
self._add_uid(uid, schema) | python | def set_uid(self, uid, schema=None):
"""Set a unique ID.
If a UID of a given schema already exists in a record it will
be overwritten, otherwise it will be appended to the record.
Args:
uid (string): unique identifier.
schema (Optional[string]): schema of the unique identifier. If
``None``, the schema will be guessed based on the shape of
``uid``.
Raises:
SchemaUIDConflict: it UID and schema are not matching
"""
try:
uid, schema = author_id_normalize_and_schema(uid, schema)
except UnknownUIDSchema:
# Explicit schema wasn't provided, and the UID is too little
# to figure out the schema of it, this however doesn't mean
# the UID is invalid
pass
self._ensure_field('ids', [])
self.obj['ids'] = [id_ for id_ in self.obj['ids'] if id_.get('schema') != schema]
self._add_uid(uid, schema) | [
"def",
"set_uid",
"(",
"self",
",",
"uid",
",",
"schema",
"=",
"None",
")",
":",
"try",
":",
"uid",
",",
"schema",
"=",
"author_id_normalize_and_schema",
"(",
"uid",
",",
"schema",
")",
"except",
"UnknownUIDSchema",
":",
"# Explicit schema wasn't provided, and the UID is too little",
"# to figure out the schema of it, this however doesn't mean",
"# the UID is invalid",
"pass",
"self",
".",
"_ensure_field",
"(",
"'ids'",
",",
"[",
"]",
")",
"self",
".",
"obj",
"[",
"'ids'",
"]",
"=",
"[",
"id_",
"for",
"id_",
"in",
"self",
".",
"obj",
"[",
"'ids'",
"]",
"if",
"id_",
".",
"get",
"(",
"'schema'",
")",
"!=",
"schema",
"]",
"self",
".",
"_add_uid",
"(",
"uid",
",",
"schema",
")"
] | Set a unique ID.
If a UID of a given schema already exists in a record it will
be overwritten, otherwise it will be appended to the record.
Args:
uid (string): unique identifier.
schema (Optional[string]): schema of the unique identifier. If
``None``, the schema will be guessed based on the shape of
``uid``.
Raises:
SchemaUIDConflict: it UID and schema are not matching | [
"Set",
"a",
"unique",
"ID",
"."
] | 34bc124b62fba565b6b40d1a3c15103a23a05edb | https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/signatures.py#L111-L136 | train |
druids/django-chamber | chamber/utils/decorators.py | singleton | def singleton(klass):
"""
Create singleton from class
"""
instances = {}
def getinstance(*args, **kwargs):
if klass not in instances:
instances[klass] = klass(*args, **kwargs)
return instances[klass]
return wraps(klass)(getinstance) | python | def singleton(klass):
"""
Create singleton from class
"""
instances = {}
def getinstance(*args, **kwargs):
if klass not in instances:
instances[klass] = klass(*args, **kwargs)
return instances[klass]
return wraps(klass)(getinstance) | [
"def",
"singleton",
"(",
"klass",
")",
":",
"instances",
"=",
"{",
"}",
"def",
"getinstance",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"klass",
"not",
"in",
"instances",
":",
"instances",
"[",
"klass",
"]",
"=",
"klass",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"instances",
"[",
"klass",
"]",
"return",
"wraps",
"(",
"klass",
")",
"(",
"getinstance",
")"
] | Create singleton from class | [
"Create",
"singleton",
"from",
"class"
] | eef4169923557e96877a664fa254e8c0814f3f23 | https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/utils/decorators.py#L13-L23 | train |
druids/django-chamber | chamber/utils/decorators.py | translation_activate_block | def translation_activate_block(function=None, language=None):
"""
Activate language only for one method or function
"""
def _translation_activate_block(function):
def _decorator(*args, **kwargs):
tmp_language = translation.get_language()
try:
translation.activate(language or settings.LANGUAGE_CODE)
return function(*args, **kwargs)
finally:
translation.activate(tmp_language)
return wraps(function)(_decorator)
if function:
return _translation_activate_block(function)
else:
return _translation_activate_block | python | def translation_activate_block(function=None, language=None):
"""
Activate language only for one method or function
"""
def _translation_activate_block(function):
def _decorator(*args, **kwargs):
tmp_language = translation.get_language()
try:
translation.activate(language or settings.LANGUAGE_CODE)
return function(*args, **kwargs)
finally:
translation.activate(tmp_language)
return wraps(function)(_decorator)
if function:
return _translation_activate_block(function)
else:
return _translation_activate_block | [
"def",
"translation_activate_block",
"(",
"function",
"=",
"None",
",",
"language",
"=",
"None",
")",
":",
"def",
"_translation_activate_block",
"(",
"function",
")",
":",
"def",
"_decorator",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"tmp_language",
"=",
"translation",
".",
"get_language",
"(",
")",
"try",
":",
"translation",
".",
"activate",
"(",
"language",
"or",
"settings",
".",
"LANGUAGE_CODE",
")",
"return",
"function",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"finally",
":",
"translation",
".",
"activate",
"(",
"tmp_language",
")",
"return",
"wraps",
"(",
"function",
")",
"(",
"_decorator",
")",
"if",
"function",
":",
"return",
"_translation_activate_block",
"(",
"function",
")",
"else",
":",
"return",
"_translation_activate_block"
] | Activate language only for one method or function | [
"Activate",
"language",
"only",
"for",
"one",
"method",
"or",
"function"
] | eef4169923557e96877a664fa254e8c0814f3f23 | https://github.com/druids/django-chamber/blob/eef4169923557e96877a664fa254e8c0814f3f23/chamber/utils/decorators.py#L26-L44 | train |
bachya/pyopenuv | pyopenuv/client.py | Client.uv_protection_window | async def uv_protection_window(
self, low: float = 3.5, high: float = 3.5) -> dict:
"""Get data on when a UV protection window is."""
return await self.request(
'get', 'protection', params={
'from': str(low),
'to': str(high)
}) | python | async def uv_protection_window(
self, low: float = 3.5, high: float = 3.5) -> dict:
"""Get data on when a UV protection window is."""
return await self.request(
'get', 'protection', params={
'from': str(low),
'to': str(high)
}) | [
"async",
"def",
"uv_protection_window",
"(",
"self",
",",
"low",
":",
"float",
"=",
"3.5",
",",
"high",
":",
"float",
"=",
"3.5",
")",
"->",
"dict",
":",
"return",
"await",
"self",
".",
"request",
"(",
"'get'",
",",
"'protection'",
",",
"params",
"=",
"{",
"'from'",
":",
"str",
"(",
"low",
")",
",",
"'to'",
":",
"str",
"(",
"high",
")",
"}",
")"
] | Get data on when a UV protection window is. | [
"Get",
"data",
"on",
"when",
"a",
"UV",
"protection",
"window",
"is",
"."
] | f7c2f9dd99dd4e3b8b1f9e501ea17ce62a7ace46 | https://github.com/bachya/pyopenuv/blob/f7c2f9dd99dd4e3b8b1f9e501ea17ce62a7ace46/pyopenuv/client.py#L69-L76 | train |
Subsets and Splits