repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
openvax/varlens | varlens/read_evidence/pileup_collection.py | PileupCollection.filter | def filter(self,
drop_duplicates=False,
drop_improper_mate_pairs=False,
min_mapping_quality=None,
min_base_quality=None,
filters=None):
'''
Return a new PileupCollection that includes only pileup elements
satisfying the specified criteria.
Parameters
----------
drop_duplicates (optional, default False) : boolean
Remove alignments with the is_duplicate flag.
drop_improper_mate_pairs (optional, default False) : boolean
Retain only alignments that have mapped mate pairs, where one
alignment in the pair is on the forward strand and the other
is on the reverse.
min_mapping_quality (optional) : int
If specified, retain only alignments with mapping quality >= the
specified threshold.
min_base_quality (optional) : int
If specified, retain only pileup elements where the base quality
for the bases aligning to the pileup's locus are >= the specified
threshold.
filters (optional) : list of PileupElement -> bool functions
User-specified filter functions to apply. This will be called on
each PileupElement, and should return True if the element should
be retained.
Returns
----------
A new PileupCollection that includes the subset of the pileup elements
matching all the specified filters.
'''
if filters is None:
filters = []
if drop_duplicates:
filters.append(lambda e: not e.alignment.is_duplicate)
if drop_improper_mate_pairs:
filters.append(lambda e: e.alignment.is_proper_pair)
if min_mapping_quality is not None:
filters.append(
lambda e: e.alignment.mapping_quality >= min_mapping_quality)
if min_base_quality is not None:
filters.append(
lambda e: e.min_base_quality >= min_base_quality)
pileups = OrderedDict(
(locus, pileup.filter(filters))
for (locus, pileup)
in self.pileups.items())
return PileupCollection(pileups=pileups, parent=self) | python | def filter(self,
drop_duplicates=False,
drop_improper_mate_pairs=False,
min_mapping_quality=None,
min_base_quality=None,
filters=None):
'''
Return a new PileupCollection that includes only pileup elements
satisfying the specified criteria.
Parameters
----------
drop_duplicates (optional, default False) : boolean
Remove alignments with the is_duplicate flag.
drop_improper_mate_pairs (optional, default False) : boolean
Retain only alignments that have mapped mate pairs, where one
alignment in the pair is on the forward strand and the other
is on the reverse.
min_mapping_quality (optional) : int
If specified, retain only alignments with mapping quality >= the
specified threshold.
min_base_quality (optional) : int
If specified, retain only pileup elements where the base quality
for the bases aligning to the pileup's locus are >= the specified
threshold.
filters (optional) : list of PileupElement -> bool functions
User-specified filter functions to apply. This will be called on
each PileupElement, and should return True if the element should
be retained.
Returns
----------
A new PileupCollection that includes the subset of the pileup elements
matching all the specified filters.
'''
if filters is None:
filters = []
if drop_duplicates:
filters.append(lambda e: not e.alignment.is_duplicate)
if drop_improper_mate_pairs:
filters.append(lambda e: e.alignment.is_proper_pair)
if min_mapping_quality is not None:
filters.append(
lambda e: e.alignment.mapping_quality >= min_mapping_quality)
if min_base_quality is not None:
filters.append(
lambda e: e.min_base_quality >= min_base_quality)
pileups = OrderedDict(
(locus, pileup.filter(filters))
for (locus, pileup)
in self.pileups.items())
return PileupCollection(pileups=pileups, parent=self) | [
"def",
"filter",
"(",
"self",
",",
"drop_duplicates",
"=",
"False",
",",
"drop_improper_mate_pairs",
"=",
"False",
",",
"min_mapping_quality",
"=",
"None",
",",
"min_base_quality",
"=",
"None",
",",
"filters",
"=",
"None",
")",
":",
"if",
"filters",
"is",
"None",
":",
"filters",
"=",
"[",
"]",
"if",
"drop_duplicates",
":",
"filters",
".",
"append",
"(",
"lambda",
"e",
":",
"not",
"e",
".",
"alignment",
".",
"is_duplicate",
")",
"if",
"drop_improper_mate_pairs",
":",
"filters",
".",
"append",
"(",
"lambda",
"e",
":",
"e",
".",
"alignment",
".",
"is_proper_pair",
")",
"if",
"min_mapping_quality",
"is",
"not",
"None",
":",
"filters",
".",
"append",
"(",
"lambda",
"e",
":",
"e",
".",
"alignment",
".",
"mapping_quality",
">=",
"min_mapping_quality",
")",
"if",
"min_base_quality",
"is",
"not",
"None",
":",
"filters",
".",
"append",
"(",
"lambda",
"e",
":",
"e",
".",
"min_base_quality",
">=",
"min_base_quality",
")",
"pileups",
"=",
"OrderedDict",
"(",
"(",
"locus",
",",
"pileup",
".",
"filter",
"(",
"filters",
")",
")",
"for",
"(",
"locus",
",",
"pileup",
")",
"in",
"self",
".",
"pileups",
".",
"items",
"(",
")",
")",
"return",
"PileupCollection",
"(",
"pileups",
"=",
"pileups",
",",
"parent",
"=",
"self",
")"
] | Return a new PileupCollection that includes only pileup elements
satisfying the specified criteria.
Parameters
----------
drop_duplicates (optional, default False) : boolean
Remove alignments with the is_duplicate flag.
drop_improper_mate_pairs (optional, default False) : boolean
Retain only alignments that have mapped mate pairs, where one
alignment in the pair is on the forward strand and the other
is on the reverse.
min_mapping_quality (optional) : int
If specified, retain only alignments with mapping quality >= the
specified threshold.
min_base_quality (optional) : int
If specified, retain only pileup elements where the base quality
for the bases aligning to the pileup's locus are >= the specified
threshold.
filters (optional) : list of PileupElement -> bool functions
User-specified filter functions to apply. This will be called on
each PileupElement, and should return True if the element should
be retained.
Returns
----------
A new PileupCollection that includes the subset of the pileup elements
matching all the specified filters. | [
"Return",
"a",
"new",
"PileupCollection",
"that",
"includes",
"only",
"pileup",
"elements",
"satisfying",
"the",
"specified",
"criteria",
"."
] | 715d3ede5893757b2fcba4117515621bca7b1e5d | https://github.com/openvax/varlens/blob/715d3ede5893757b2fcba4117515621bca7b1e5d/varlens/read_evidence/pileup_collection.py#L445-L501 | train |
openvax/varlens | varlens/read_evidence/pileup_collection.py | PileupCollection.merge | def merge(self, *others):
'''
Return a new PileupCollection that is the union of self and the other
specified collections.
'''
new_pileups = {}
for collection in (self,) + others:
for (locus, pileup) in collection.pileups.items():
if locus in new_pileups:
new_pileups[locus].update(pileup)
else:
new_pileups[locus] = Pileup(locus, pileup.elements)
return PileupCollection(new_pileups, parent=self) | python | def merge(self, *others):
'''
Return a new PileupCollection that is the union of self and the other
specified collections.
'''
new_pileups = {}
for collection in (self,) + others:
for (locus, pileup) in collection.pileups.items():
if locus in new_pileups:
new_pileups[locus].update(pileup)
else:
new_pileups[locus] = Pileup(locus, pileup.elements)
return PileupCollection(new_pileups, parent=self) | [
"def",
"merge",
"(",
"self",
",",
"*",
"others",
")",
":",
"new_pileups",
"=",
"{",
"}",
"for",
"collection",
"in",
"(",
"self",
",",
")",
"+",
"others",
":",
"for",
"(",
"locus",
",",
"pileup",
")",
"in",
"collection",
".",
"pileups",
".",
"items",
"(",
")",
":",
"if",
"locus",
"in",
"new_pileups",
":",
"new_pileups",
"[",
"locus",
"]",
".",
"update",
"(",
"pileup",
")",
"else",
":",
"new_pileups",
"[",
"locus",
"]",
"=",
"Pileup",
"(",
"locus",
",",
"pileup",
".",
"elements",
")",
"return",
"PileupCollection",
"(",
"new_pileups",
",",
"parent",
"=",
"self",
")"
] | Return a new PileupCollection that is the union of self and the other
specified collections. | [
"Return",
"a",
"new",
"PileupCollection",
"that",
"is",
"the",
"union",
"of",
"self",
"and",
"the",
"other",
"specified",
"collections",
"."
] | 715d3ede5893757b2fcba4117515621bca7b1e5d | https://github.com/openvax/varlens/blob/715d3ede5893757b2fcba4117515621bca7b1e5d/varlens/read_evidence/pileup_collection.py#L503-L515 | train |
openvax/varlens | varlens/read_evidence/pileup_collection.py | PileupCollection.from_bam | def from_bam(pysam_samfile, loci, normalized_contig_names=True):
'''
Create a PileupCollection for a set of loci from a BAM file.
Parameters
----------
pysam_samfile : `pysam.Samfile` instance, or filename string
to a BAM file. The BAM file must be indexed.
loci : list of Locus instances
Loci to collect pileups for.
normalized_contig_names : whether the contig names have been normalized
(e.g. pyensembl removes the 'chr' prefix). Set to true to
de-normalize the names when querying the BAM file.
Returns
----------
PileupCollection instance containing pileups for the specified loci.
All alignments in the BAM file are included (e.g. duplicate reads,
secondary alignments, etc.). See `PileupCollection.filter` if these
need to be removed.
'''
loci = [to_locus(obj) for obj in loci]
close_on_completion = False
if typechecks.is_string(pysam_samfile):
pysam_samfile = Samfile(pysam_samfile)
close_on_completion = True
try:
# Map from pyensembl normalized chromosome names used in Variant to
# the names used in the BAM file.
if normalized_contig_names:
chromosome_name_map = {}
for name in pysam_samfile.references:
normalized = pyensembl.locus.normalize_chromosome(name)
chromosome_name_map[normalized] = name
chromosome_name_map[name] = name
else:
chromosome_name_map = None
result = PileupCollection({})
# Optimization: we sort variants so our BAM reads are localized.
locus_iterator = itertools.chain.from_iterable(
(Locus.from_interbase_coordinates(locus_interval.contig, pos)
for pos
in locus_interval.positions)
for locus_interval in sorted(loci))
for locus in locus_iterator:
result.pileups[locus] = Pileup(locus, [])
if normalized_contig_names:
try:
chromosome = chromosome_name_map[locus.contig]
except KeyError:
logging.warn("No such contig in bam: %s" % locus.contig)
continue
else:
chromosome = locus.contig
columns = pysam_samfile.pileup(
chromosome,
locus.position,
locus.position + 1, # exclusive, 0-indexed
truncate=True,
stepper="nofilter")
try:
column = next(columns)
except StopIteration:
# No reads align to this locus.
continue
# Note that storing the pileups here is necessary, since the
# subsequent assertion will invalidate our column.
pileups = column.pileups
assert list(columns) == [] # column is invalid after this.
for pileup_read in pileups:
if not pileup_read.is_refskip:
element = PileupElement.from_pysam_alignment(
locus, pileup_read)
result.pileups[locus].append(element)
return result
finally:
if close_on_completion:
pysam_samfile.close() | python | def from_bam(pysam_samfile, loci, normalized_contig_names=True):
'''
Create a PileupCollection for a set of loci from a BAM file.
Parameters
----------
pysam_samfile : `pysam.Samfile` instance, or filename string
to a BAM file. The BAM file must be indexed.
loci : list of Locus instances
Loci to collect pileups for.
normalized_contig_names : whether the contig names have been normalized
(e.g. pyensembl removes the 'chr' prefix). Set to true to
de-normalize the names when querying the BAM file.
Returns
----------
PileupCollection instance containing pileups for the specified loci.
All alignments in the BAM file are included (e.g. duplicate reads,
secondary alignments, etc.). See `PileupCollection.filter` if these
need to be removed.
'''
loci = [to_locus(obj) for obj in loci]
close_on_completion = False
if typechecks.is_string(pysam_samfile):
pysam_samfile = Samfile(pysam_samfile)
close_on_completion = True
try:
# Map from pyensembl normalized chromosome names used in Variant to
# the names used in the BAM file.
if normalized_contig_names:
chromosome_name_map = {}
for name in pysam_samfile.references:
normalized = pyensembl.locus.normalize_chromosome(name)
chromosome_name_map[normalized] = name
chromosome_name_map[name] = name
else:
chromosome_name_map = None
result = PileupCollection({})
# Optimization: we sort variants so our BAM reads are localized.
locus_iterator = itertools.chain.from_iterable(
(Locus.from_interbase_coordinates(locus_interval.contig, pos)
for pos
in locus_interval.positions)
for locus_interval in sorted(loci))
for locus in locus_iterator:
result.pileups[locus] = Pileup(locus, [])
if normalized_contig_names:
try:
chromosome = chromosome_name_map[locus.contig]
except KeyError:
logging.warn("No such contig in bam: %s" % locus.contig)
continue
else:
chromosome = locus.contig
columns = pysam_samfile.pileup(
chromosome,
locus.position,
locus.position + 1, # exclusive, 0-indexed
truncate=True,
stepper="nofilter")
try:
column = next(columns)
except StopIteration:
# No reads align to this locus.
continue
# Note that storing the pileups here is necessary, since the
# subsequent assertion will invalidate our column.
pileups = column.pileups
assert list(columns) == [] # column is invalid after this.
for pileup_read in pileups:
if not pileup_read.is_refskip:
element = PileupElement.from_pysam_alignment(
locus, pileup_read)
result.pileups[locus].append(element)
return result
finally:
if close_on_completion:
pysam_samfile.close() | [
"def",
"from_bam",
"(",
"pysam_samfile",
",",
"loci",
",",
"normalized_contig_names",
"=",
"True",
")",
":",
"loci",
"=",
"[",
"to_locus",
"(",
"obj",
")",
"for",
"obj",
"in",
"loci",
"]",
"close_on_completion",
"=",
"False",
"if",
"typechecks",
".",
"is_string",
"(",
"pysam_samfile",
")",
":",
"pysam_samfile",
"=",
"Samfile",
"(",
"pysam_samfile",
")",
"close_on_completion",
"=",
"True",
"try",
":",
"# Map from pyensembl normalized chromosome names used in Variant to",
"# the names used in the BAM file.",
"if",
"normalized_contig_names",
":",
"chromosome_name_map",
"=",
"{",
"}",
"for",
"name",
"in",
"pysam_samfile",
".",
"references",
":",
"normalized",
"=",
"pyensembl",
".",
"locus",
".",
"normalize_chromosome",
"(",
"name",
")",
"chromosome_name_map",
"[",
"normalized",
"]",
"=",
"name",
"chromosome_name_map",
"[",
"name",
"]",
"=",
"name",
"else",
":",
"chromosome_name_map",
"=",
"None",
"result",
"=",
"PileupCollection",
"(",
"{",
"}",
")",
"# Optimization: we sort variants so our BAM reads are localized.",
"locus_iterator",
"=",
"itertools",
".",
"chain",
".",
"from_iterable",
"(",
"(",
"Locus",
".",
"from_interbase_coordinates",
"(",
"locus_interval",
".",
"contig",
",",
"pos",
")",
"for",
"pos",
"in",
"locus_interval",
".",
"positions",
")",
"for",
"locus_interval",
"in",
"sorted",
"(",
"loci",
")",
")",
"for",
"locus",
"in",
"locus_iterator",
":",
"result",
".",
"pileups",
"[",
"locus",
"]",
"=",
"Pileup",
"(",
"locus",
",",
"[",
"]",
")",
"if",
"normalized_contig_names",
":",
"try",
":",
"chromosome",
"=",
"chromosome_name_map",
"[",
"locus",
".",
"contig",
"]",
"except",
"KeyError",
":",
"logging",
".",
"warn",
"(",
"\"No such contig in bam: %s\"",
"%",
"locus",
".",
"contig",
")",
"continue",
"else",
":",
"chromosome",
"=",
"locus",
".",
"contig",
"columns",
"=",
"pysam_samfile",
".",
"pileup",
"(",
"chromosome",
",",
"locus",
".",
"position",
",",
"locus",
".",
"position",
"+",
"1",
",",
"# exclusive, 0-indexed",
"truncate",
"=",
"True",
",",
"stepper",
"=",
"\"nofilter\"",
")",
"try",
":",
"column",
"=",
"next",
"(",
"columns",
")",
"except",
"StopIteration",
":",
"# No reads align to this locus.",
"continue",
"# Note that storing the pileups here is necessary, since the",
"# subsequent assertion will invalidate our column.",
"pileups",
"=",
"column",
".",
"pileups",
"assert",
"list",
"(",
"columns",
")",
"==",
"[",
"]",
"# column is invalid after this.",
"for",
"pileup_read",
"in",
"pileups",
":",
"if",
"not",
"pileup_read",
".",
"is_refskip",
":",
"element",
"=",
"PileupElement",
".",
"from_pysam_alignment",
"(",
"locus",
",",
"pileup_read",
")",
"result",
".",
"pileups",
"[",
"locus",
"]",
".",
"append",
"(",
"element",
")",
"return",
"result",
"finally",
":",
"if",
"close_on_completion",
":",
"pysam_samfile",
".",
"close",
"(",
")"
] | Create a PileupCollection for a set of loci from a BAM file.
Parameters
----------
pysam_samfile : `pysam.Samfile` instance, or filename string
to a BAM file. The BAM file must be indexed.
loci : list of Locus instances
Loci to collect pileups for.
normalized_contig_names : whether the contig names have been normalized
(e.g. pyensembl removes the 'chr' prefix). Set to true to
de-normalize the names when querying the BAM file.
Returns
----------
PileupCollection instance containing pileups for the specified loci.
All alignments in the BAM file are included (e.g. duplicate reads,
secondary alignments, etc.). See `PileupCollection.filter` if these
need to be removed. | [
"Create",
"a",
"PileupCollection",
"for",
"a",
"set",
"of",
"loci",
"from",
"a",
"BAM",
"file",
"."
] | 715d3ede5893757b2fcba4117515621bca7b1e5d | https://github.com/openvax/varlens/blob/715d3ede5893757b2fcba4117515621bca7b1e5d/varlens/read_evidence/pileup_collection.py#L518-L603 | train |
inveniosoftware/invenio-query-parser | invenio_query_parser/contrib/elasticsearch/__init__.py | invenio_query_factory | def invenio_query_factory(parser=None, walkers=None):
"""Create a parser returning Elastic Search DSL query instance."""
parser = parser or Main
walkers = walkers or [PypegConverter()]
walkers.append(ElasticSearchDSL())
def invenio_query(pattern):
query = pypeg2.parse(pattern, parser, whitespace="")
for walker in walkers:
query = query.accept(walker)
return query
return invenio_query | python | def invenio_query_factory(parser=None, walkers=None):
"""Create a parser returning Elastic Search DSL query instance."""
parser = parser or Main
walkers = walkers or [PypegConverter()]
walkers.append(ElasticSearchDSL())
def invenio_query(pattern):
query = pypeg2.parse(pattern, parser, whitespace="")
for walker in walkers:
query = query.accept(walker)
return query
return invenio_query | [
"def",
"invenio_query_factory",
"(",
"parser",
"=",
"None",
",",
"walkers",
"=",
"None",
")",
":",
"parser",
"=",
"parser",
"or",
"Main",
"walkers",
"=",
"walkers",
"or",
"[",
"PypegConverter",
"(",
")",
"]",
"walkers",
".",
"append",
"(",
"ElasticSearchDSL",
"(",
")",
")",
"def",
"invenio_query",
"(",
"pattern",
")",
":",
"query",
"=",
"pypeg2",
".",
"parse",
"(",
"pattern",
",",
"parser",
",",
"whitespace",
"=",
"\"\"",
")",
"for",
"walker",
"in",
"walkers",
":",
"query",
"=",
"query",
".",
"accept",
"(",
"walker",
")",
"return",
"query",
"return",
"invenio_query"
] | Create a parser returning Elastic Search DSL query instance. | [
"Create",
"a",
"parser",
"returning",
"Elastic",
"Search",
"DSL",
"query",
"instance",
"."
] | 21a2c36318003ff52d2e18e7196bb420db8ecb4b | https://github.com/inveniosoftware/invenio-query-parser/blob/21a2c36318003ff52d2e18e7196bb420db8ecb4b/invenio_query_parser/contrib/elasticsearch/__init__.py#L34-L45 | train |
ioos/cc-plugin-ncei | cc_plugin_ncei/ncei_timeseries_profile.py | NCEITimeSeriesProfileOrthogonalBase.check_dimensions | def check_dimensions(self, dataset):
'''
Checks that the feature types of this dataset are consistent with a timeseries-profile-orthogonal dataset.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'All geophysical variables are timeseries-profile-orthogonal feature types')
message = '{} must be a valid profile-orthogonal feature type. It must have dimensions of (station, time, z).'
message += ' If it\'s a single station, it must have dimensions (time, z). x and y dimensions must be scalar or have'
message += ' dimensions (station). time must be a coordinate variable with dimension (time) and z must be a'
message += ' coordinate variabel with dimension (z).'
for variable in util.get_geophysical_variables(dataset):
is_valid = util.is_timeseries_profile_single_station(dataset, variable)
is_valid = is_valid or util.is_timeseries_profile_multi_station(dataset, variable)
required_ctx.assert_true(
is_valid,
message.format(variable)
)
results.append(required_ctx.to_result())
return results | python | def check_dimensions(self, dataset):
'''
Checks that the feature types of this dataset are consistent with a timeseries-profile-orthogonal dataset.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'All geophysical variables are timeseries-profile-orthogonal feature types')
message = '{} must be a valid profile-orthogonal feature type. It must have dimensions of (station, time, z).'
message += ' If it\'s a single station, it must have dimensions (time, z). x and y dimensions must be scalar or have'
message += ' dimensions (station). time must be a coordinate variable with dimension (time) and z must be a'
message += ' coordinate variabel with dimension (z).'
for variable in util.get_geophysical_variables(dataset):
is_valid = util.is_timeseries_profile_single_station(dataset, variable)
is_valid = is_valid or util.is_timeseries_profile_multi_station(dataset, variable)
required_ctx.assert_true(
is_valid,
message.format(variable)
)
results.append(required_ctx.to_result())
return results | [
"def",
"check_dimensions",
"(",
"self",
",",
"dataset",
")",
":",
"results",
"=",
"[",
"]",
"required_ctx",
"=",
"TestCtx",
"(",
"BaseCheck",
".",
"HIGH",
",",
"'All geophysical variables are timeseries-profile-orthogonal feature types'",
")",
"message",
"=",
"'{} must be a valid profile-orthogonal feature type. It must have dimensions of (station, time, z).'",
"message",
"+=",
"' If it\\'s a single station, it must have dimensions (time, z). x and y dimensions must be scalar or have'",
"message",
"+=",
"' dimensions (station). time must be a coordinate variable with dimension (time) and z must be a'",
"message",
"+=",
"' coordinate variabel with dimension (z).'",
"for",
"variable",
"in",
"util",
".",
"get_geophysical_variables",
"(",
"dataset",
")",
":",
"is_valid",
"=",
"util",
".",
"is_timeseries_profile_single_station",
"(",
"dataset",
",",
"variable",
")",
"is_valid",
"=",
"is_valid",
"or",
"util",
".",
"is_timeseries_profile_multi_station",
"(",
"dataset",
",",
"variable",
")",
"required_ctx",
".",
"assert_true",
"(",
"is_valid",
",",
"message",
".",
"format",
"(",
"variable",
")",
")",
"results",
".",
"append",
"(",
"required_ctx",
".",
"to_result",
"(",
")",
")",
"return",
"results"
] | Checks that the feature types of this dataset are consistent with a timeseries-profile-orthogonal dataset.
:param netCDF4.Dataset dataset: An open netCDF dataset | [
"Checks",
"that",
"the",
"feature",
"types",
"of",
"this",
"dataset",
"are",
"consistent",
"with",
"a",
"timeseries",
"-",
"profile",
"-",
"orthogonal",
"dataset",
"."
] | 963fefd7fa43afd32657ac4c36aad4ddb4c25acf | https://github.com/ioos/cc-plugin-ncei/blob/963fefd7fa43afd32657ac4c36aad4ddb4c25acf/cc_plugin_ncei/ncei_timeseries_profile.py#L21-L43 | train |
cloudmesh-cmd3/cmd3 | fabfile/doc.py | theme | def theme(name='readthedocs'):
"""set name to 'bootstrap' in case you want to use bootstrap.
This also requires the template sto be in the main dir"""
os.environ['SPHINX_THEME'] = name
if os.environ['SPHINX_THEME'] == 'bootstrap':
local('cp docs/source/_templates/layout_bootstrap.html docs/source/_templates/layout.html')
elif name is 'readthedocs':
return
else:
local('cp docs/source/_templates/layout_simple.html docs/source/_templates/layout.html') | python | def theme(name='readthedocs'):
"""set name to 'bootstrap' in case you want to use bootstrap.
This also requires the template sto be in the main dir"""
os.environ['SPHINX_THEME'] = name
if os.environ['SPHINX_THEME'] == 'bootstrap':
local('cp docs/source/_templates/layout_bootstrap.html docs/source/_templates/layout.html')
elif name is 'readthedocs':
return
else:
local('cp docs/source/_templates/layout_simple.html docs/source/_templates/layout.html') | [
"def",
"theme",
"(",
"name",
"=",
"'readthedocs'",
")",
":",
"os",
".",
"environ",
"[",
"'SPHINX_THEME'",
"]",
"=",
"name",
"if",
"os",
".",
"environ",
"[",
"'SPHINX_THEME'",
"]",
"==",
"'bootstrap'",
":",
"local",
"(",
"'cp docs/source/_templates/layout_bootstrap.html docs/source/_templates/layout.html'",
")",
"elif",
"name",
"is",
"'readthedocs'",
":",
"return",
"else",
":",
"local",
"(",
"'cp docs/source/_templates/layout_simple.html docs/source/_templates/layout.html'",
")"
] | set name to 'bootstrap' in case you want to use bootstrap.
This also requires the template sto be in the main dir | [
"set",
"name",
"to",
"bootstrap",
"in",
"case",
"you",
"want",
"to",
"use",
"bootstrap",
".",
"This",
"also",
"requires",
"the",
"template",
"sto",
"be",
"in",
"the",
"main",
"dir"
] | 92e33c96032fd3921f159198a0e57917c4dc34ed | https://github.com/cloudmesh-cmd3/cmd3/blob/92e33c96032fd3921f159198a0e57917c4dc34ed/fabfile/doc.py#L57-L67 | train |
cloudmesh-cmd3/cmd3 | fabfile/doc.py | html | def html(theme_name='readthedocs'):
# disable Flask RSTPAGES due to sphinx incompatibility
os.environ['RSTPAGES'] = 'FALSE'
theme(theme_name)
api()
man()
"""build the doc locally and view"""
clean()
local("cd docs; make html")
local("fab security.check")
local("touch docs/build/html/.nojekyll") | python | def html(theme_name='readthedocs'):
# disable Flask RSTPAGES due to sphinx incompatibility
os.environ['RSTPAGES'] = 'FALSE'
theme(theme_name)
api()
man()
"""build the doc locally and view"""
clean()
local("cd docs; make html")
local("fab security.check")
local("touch docs/build/html/.nojekyll") | [
"def",
"html",
"(",
"theme_name",
"=",
"'readthedocs'",
")",
":",
"# disable Flask RSTPAGES due to sphinx incompatibility",
"os",
".",
"environ",
"[",
"'RSTPAGES'",
"]",
"=",
"'FALSE'",
"theme",
"(",
"theme_name",
")",
"api",
"(",
")",
"man",
"(",
")",
"clean",
"(",
")",
"local",
"(",
"\"cd docs; make html\"",
")",
"local",
"(",
"\"fab security.check\"",
")",
"local",
"(",
"\"touch docs/build/html/.nojekyll\"",
")"
] | build the doc locally and view | [
"build",
"the",
"doc",
"locally",
"and",
"view"
] | 92e33c96032fd3921f159198a0e57917c4dc34ed | https://github.com/cloudmesh-cmd3/cmd3/blob/92e33c96032fd3921f159198a0e57917c4dc34ed/fabfile/doc.py#L70-L80 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | sign_message | def sign_message(body: ByteString, secret: Text) -> Text:
"""
Compute a message's signature.
"""
return 'sha1={}'.format(
hmac.new(secret.encode(), body, sha1).hexdigest()
) | python | def sign_message(body: ByteString, secret: Text) -> Text:
"""
Compute a message's signature.
"""
return 'sha1={}'.format(
hmac.new(secret.encode(), body, sha1).hexdigest()
) | [
"def",
"sign_message",
"(",
"body",
":",
"ByteString",
",",
"secret",
":",
"Text",
")",
"->",
"Text",
":",
"return",
"'sha1={}'",
".",
"format",
"(",
"hmac",
".",
"new",
"(",
"secret",
".",
"encode",
"(",
")",
",",
"body",
",",
"sha1",
")",
".",
"hexdigest",
"(",
")",
")"
] | Compute a message's signature. | [
"Compute",
"a",
"message",
"s",
"signature",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L112-L119 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | FacebookUser._get_user | async def _get_user(self):
"""
Get the user dict from cache or query it from the platform if missing.
"""
if self._cache is None:
try:
self._cache = \
await self.facebook.get_user(self.fbid, self.page_id)
except PlatformOperationError:
self._cache = {}
return self._cache | python | async def _get_user(self):
"""
Get the user dict from cache or query it from the platform if missing.
"""
if self._cache is None:
try:
self._cache = \
await self.facebook.get_user(self.fbid, self.page_id)
except PlatformOperationError:
self._cache = {}
return self._cache | [
"async",
"def",
"_get_user",
"(",
"self",
")",
":",
"if",
"self",
".",
"_cache",
"is",
"None",
":",
"try",
":",
"self",
".",
"_cache",
"=",
"await",
"self",
".",
"facebook",
".",
"get_user",
"(",
"self",
".",
"fbid",
",",
"self",
".",
"page_id",
")",
"except",
"PlatformOperationError",
":",
"self",
".",
"_cache",
"=",
"{",
"}",
"return",
"self",
".",
"_cache"
] | Get the user dict from cache or query it from the platform if missing. | [
"Get",
"the",
"user",
"dict",
"from",
"cache",
"or",
"query",
"it",
"from",
"the",
"platform",
"if",
"missing",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L146-L157 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | FacebookUser.get_friendly_name | async def get_friendly_name(self) -> Text:
"""
The friendly name is mapped to Facebook's first name. If the first
name is missing, use the last name.
"""
u = await self._get_user()
f = u.get('first_name', '').strip()
l = u.get('last_name', '').strip()
return f or l | python | async def get_friendly_name(self) -> Text:
"""
The friendly name is mapped to Facebook's first name. If the first
name is missing, use the last name.
"""
u = await self._get_user()
f = u.get('first_name', '').strip()
l = u.get('last_name', '').strip()
return f or l | [
"async",
"def",
"get_friendly_name",
"(",
"self",
")",
"->",
"Text",
":",
"u",
"=",
"await",
"self",
".",
"_get_user",
"(",
")",
"f",
"=",
"u",
".",
"get",
"(",
"'first_name'",
",",
"''",
")",
".",
"strip",
"(",
")",
"l",
"=",
"u",
".",
"get",
"(",
"'last_name'",
",",
"''",
")",
".",
"strip",
"(",
")",
"return",
"f",
"or",
"l"
] | The friendly name is mapped to Facebook's first name. If the first
name is missing, use the last name. | [
"The",
"friendly",
"name",
"is",
"mapped",
"to",
"Facebook",
"s",
"first",
"name",
".",
"If",
"the",
"first",
"name",
"is",
"missing",
"use",
"the",
"last",
"name",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L171-L180 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | FacebookUser.get_gender | async def get_gender(self) -> User.Gender:
"""
Get the gender from Facebook.
"""
u = await self._get_user()
try:
return User.Gender(u.get('gender'))
except ValueError:
return User.Gender.unknown | python | async def get_gender(self) -> User.Gender:
"""
Get the gender from Facebook.
"""
u = await self._get_user()
try:
return User.Gender(u.get('gender'))
except ValueError:
return User.Gender.unknown | [
"async",
"def",
"get_gender",
"(",
"self",
")",
"->",
"User",
".",
"Gender",
":",
"u",
"=",
"await",
"self",
".",
"_get_user",
"(",
")",
"try",
":",
"return",
"User",
".",
"Gender",
"(",
"u",
".",
"get",
"(",
"'gender'",
")",
")",
"except",
"ValueError",
":",
"return",
"User",
".",
"Gender",
".",
"unknown"
] | Get the gender from Facebook. | [
"Get",
"the",
"gender",
"from",
"Facebook",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L182-L191 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | FacebookMessage.get_user | def get_user(self) -> FacebookUser:
"""
Generate a Facebook user instance
"""
return FacebookUser(
self._event['sender']['id'],
self.get_page_id(),
self._facebook,
self,
) | python | def get_user(self) -> FacebookUser:
"""
Generate a Facebook user instance
"""
return FacebookUser(
self._event['sender']['id'],
self.get_page_id(),
self._facebook,
self,
) | [
"def",
"get_user",
"(",
"self",
")",
"->",
"FacebookUser",
":",
"return",
"FacebookUser",
"(",
"self",
".",
"_event",
"[",
"'sender'",
"]",
"[",
"'id'",
"]",
",",
"self",
".",
"get_page_id",
"(",
")",
",",
"self",
".",
"_facebook",
",",
"self",
",",
")"
] | Generate a Facebook user instance | [
"Generate",
"a",
"Facebook",
"user",
"instance"
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L244-L253 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | FacebookMessage.get_layers | def get_layers(self) -> List[BaseLayer]:
"""
Return all layers that can be found in the message.
"""
out = []
msg = self._event.get('message', {})
if 'text' in msg:
out.append(lyr.RawText(msg['text']))
for attachment in msg.get('attachments') or []:
if attachment['type'] == 'image':
out.append(lyr.Image(UrlMedia(attachment['payload']['url'])))
elif attachment['type'] == 'audio':
out.append(lyr.Audio(UrlMedia(attachment['payload']['url'])))
elif attachment['type'] == 'file':
out.append(lyr.File(UrlMedia(attachment['payload']['url'])))
elif attachment['type'] == 'video':
out.append(lyr.Video(UrlMedia(attachment['payload']['url'])))
elif attachment['type'] == 'location':
# noinspection PyArgumentList
out.append(lyr.Location(lyr.Location.Point(
lat=attachment['payload']['coordinates']['lat'],
lon=attachment['payload']['coordinates']['long'],
)))
if 'quick_reply' in msg:
out.append(QuickReply(msg['quick_reply']['payload']))
if 'postback' in self._event:
payload = ujson.loads(self._event['postback']['payload'])
out.append(lyr.Postback(payload))
if 'optin' in self._event:
out.append(OptIn(self._event['optin']['ref']))
return out | python | def get_layers(self) -> List[BaseLayer]:
"""
Return all layers that can be found in the message.
"""
out = []
msg = self._event.get('message', {})
if 'text' in msg:
out.append(lyr.RawText(msg['text']))
for attachment in msg.get('attachments') or []:
if attachment['type'] == 'image':
out.append(lyr.Image(UrlMedia(attachment['payload']['url'])))
elif attachment['type'] == 'audio':
out.append(lyr.Audio(UrlMedia(attachment['payload']['url'])))
elif attachment['type'] == 'file':
out.append(lyr.File(UrlMedia(attachment['payload']['url'])))
elif attachment['type'] == 'video':
out.append(lyr.Video(UrlMedia(attachment['payload']['url'])))
elif attachment['type'] == 'location':
# noinspection PyArgumentList
out.append(lyr.Location(lyr.Location.Point(
lat=attachment['payload']['coordinates']['lat'],
lon=attachment['payload']['coordinates']['long'],
)))
if 'quick_reply' in msg:
out.append(QuickReply(msg['quick_reply']['payload']))
if 'postback' in self._event:
payload = ujson.loads(self._event['postback']['payload'])
out.append(lyr.Postback(payload))
if 'optin' in self._event:
out.append(OptIn(self._event['optin']['ref']))
return out | [
"def",
"get_layers",
"(",
"self",
")",
"->",
"List",
"[",
"BaseLayer",
"]",
":",
"out",
"=",
"[",
"]",
"msg",
"=",
"self",
".",
"_event",
".",
"get",
"(",
"'message'",
",",
"{",
"}",
")",
"if",
"'text'",
"in",
"msg",
":",
"out",
".",
"append",
"(",
"lyr",
".",
"RawText",
"(",
"msg",
"[",
"'text'",
"]",
")",
")",
"for",
"attachment",
"in",
"msg",
".",
"get",
"(",
"'attachments'",
")",
"or",
"[",
"]",
":",
"if",
"attachment",
"[",
"'type'",
"]",
"==",
"'image'",
":",
"out",
".",
"append",
"(",
"lyr",
".",
"Image",
"(",
"UrlMedia",
"(",
"attachment",
"[",
"'payload'",
"]",
"[",
"'url'",
"]",
")",
")",
")",
"elif",
"attachment",
"[",
"'type'",
"]",
"==",
"'audio'",
":",
"out",
".",
"append",
"(",
"lyr",
".",
"Audio",
"(",
"UrlMedia",
"(",
"attachment",
"[",
"'payload'",
"]",
"[",
"'url'",
"]",
")",
")",
")",
"elif",
"attachment",
"[",
"'type'",
"]",
"==",
"'file'",
":",
"out",
".",
"append",
"(",
"lyr",
".",
"File",
"(",
"UrlMedia",
"(",
"attachment",
"[",
"'payload'",
"]",
"[",
"'url'",
"]",
")",
")",
")",
"elif",
"attachment",
"[",
"'type'",
"]",
"==",
"'video'",
":",
"out",
".",
"append",
"(",
"lyr",
".",
"Video",
"(",
"UrlMedia",
"(",
"attachment",
"[",
"'payload'",
"]",
"[",
"'url'",
"]",
")",
")",
")",
"elif",
"attachment",
"[",
"'type'",
"]",
"==",
"'location'",
":",
"# noinspection PyArgumentList",
"out",
".",
"append",
"(",
"lyr",
".",
"Location",
"(",
"lyr",
".",
"Location",
".",
"Point",
"(",
"lat",
"=",
"attachment",
"[",
"'payload'",
"]",
"[",
"'coordinates'",
"]",
"[",
"'lat'",
"]",
",",
"lon",
"=",
"attachment",
"[",
"'payload'",
"]",
"[",
"'coordinates'",
"]",
"[",
"'long'",
"]",
",",
")",
")",
")",
"if",
"'quick_reply'",
"in",
"msg",
":",
"out",
".",
"append",
"(",
"QuickReply",
"(",
"msg",
"[",
"'quick_reply'",
"]",
"[",
"'payload'",
"]",
")",
")",
"if",
"'postback'",
"in",
"self",
".",
"_event",
":",
"payload",
"=",
"ujson",
".",
"loads",
"(",
"self",
".",
"_event",
"[",
"'postback'",
"]",
"[",
"'payload'",
"]",
")",
"out",
".",
"append",
"(",
"lyr",
".",
"Postback",
"(",
"payload",
")",
")",
"if",
"'optin'",
"in",
"self",
".",
"_event",
":",
"out",
".",
"append",
"(",
"OptIn",
"(",
"self",
".",
"_event",
"[",
"'optin'",
"]",
"[",
"'ref'",
"]",
")",
")",
"return",
"out"
] | Return all layers that can be found in the message. | [
"Return",
"all",
"layers",
"that",
"can",
"be",
"found",
"in",
"the",
"message",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L261-L297 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | Facebook.verify_token | def verify_token(self):
"""
Automatically generated secure verify token
"""
h = sha256()
h.update(self.app_access_token.encode())
return h.hexdigest() | python | def verify_token(self):
"""
Automatically generated secure verify token
"""
h = sha256()
h.update(self.app_access_token.encode())
return h.hexdigest() | [
"def",
"verify_token",
"(",
"self",
")",
":",
"h",
"=",
"sha256",
"(",
")",
"h",
".",
"update",
"(",
"self",
".",
"app_access_token",
".",
"encode",
"(",
")",
")",
"return",
"h",
".",
"hexdigest",
"(",
")"
] | Automatically generated secure verify token | [
"Automatically",
"generated",
"secure",
"verify",
"token"
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L385-L392 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | Facebook.hook_up | def hook_up(self, router: UrlDispatcher):
"""
Dynamically hooks the right webhook paths
"""
router.add_get(self.webhook_path, self.check_hook)
router.add_post(self.webhook_path, self.receive_events) | python | def hook_up(self, router: UrlDispatcher):
"""
Dynamically hooks the right webhook paths
"""
router.add_get(self.webhook_path, self.check_hook)
router.add_post(self.webhook_path, self.receive_events) | [
"def",
"hook_up",
"(",
"self",
",",
"router",
":",
"UrlDispatcher",
")",
":",
"router",
".",
"add_get",
"(",
"self",
".",
"webhook_path",
",",
"self",
".",
"check_hook",
")",
"router",
".",
"add_post",
"(",
"self",
".",
"webhook_path",
",",
"self",
".",
"receive_events",
")"
] | Dynamically hooks the right webhook paths | [
"Dynamically",
"hooks",
"the",
"right",
"webhook",
"paths"
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L410-L416 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | Facebook.check_hook | async def check_hook(self, request: HttpRequest):
"""
Called when Facebook checks the hook
"""
verify_token = request.query.get('hub.verify_token')
if not verify_token:
return json_response({
'error': 'No verification token was provided',
}, status=400)
if verify_token == self.verify_token:
return Response(text=request.query.get('hub.challenge', ''))
return json_response({
'error': 'could not find the page token in the configuration',
}) | python | async def check_hook(self, request: HttpRequest):
"""
Called when Facebook checks the hook
"""
verify_token = request.query.get('hub.verify_token')
if not verify_token:
return json_response({
'error': 'No verification token was provided',
}, status=400)
if verify_token == self.verify_token:
return Response(text=request.query.get('hub.challenge', ''))
return json_response({
'error': 'could not find the page token in the configuration',
}) | [
"async",
"def",
"check_hook",
"(",
"self",
",",
"request",
":",
"HttpRequest",
")",
":",
"verify_token",
"=",
"request",
".",
"query",
".",
"get",
"(",
"'hub.verify_token'",
")",
"if",
"not",
"verify_token",
":",
"return",
"json_response",
"(",
"{",
"'error'",
":",
"'No verification token was provided'",
",",
"}",
",",
"status",
"=",
"400",
")",
"if",
"verify_token",
"==",
"self",
".",
"verify_token",
":",
"return",
"Response",
"(",
"text",
"=",
"request",
".",
"query",
".",
"get",
"(",
"'hub.challenge'",
",",
"''",
")",
")",
"return",
"json_response",
"(",
"{",
"'error'",
":",
"'could not find the page token in the configuration'",
",",
"}",
")"
] | Called when Facebook checks the hook | [
"Called",
"when",
"Facebook",
"checks",
"the",
"hook"
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L418-L435 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | Facebook.receive_events | async def receive_events(self, request: HttpRequest):
"""
Events received from Facebook
"""
body = await request.read()
s = self.settings()
try:
content = ujson.loads(body)
except ValueError:
return json_response({
'error': True,
'message': 'Cannot decode body'
}, status=400)
secret = s['app_secret']
actual_sig = request.headers['X-Hub-Signature']
expected_sig = sign_message(body, secret)
if not hmac.compare_digest(actual_sig, expected_sig):
return json_response({
'error': True,
'message': 'Invalid signature',
}, status=401)
for entry in content['entry']:
for raw_message in entry.get('messaging', []):
message = FacebookMessage(raw_message, self)
await self.handle_event(message)
return json_response({
'ok': True,
}) | python | async def receive_events(self, request: HttpRequest):
"""
Events received from Facebook
"""
body = await request.read()
s = self.settings()
try:
content = ujson.loads(body)
except ValueError:
return json_response({
'error': True,
'message': 'Cannot decode body'
}, status=400)
secret = s['app_secret']
actual_sig = request.headers['X-Hub-Signature']
expected_sig = sign_message(body, secret)
if not hmac.compare_digest(actual_sig, expected_sig):
return json_response({
'error': True,
'message': 'Invalid signature',
}, status=401)
for entry in content['entry']:
for raw_message in entry.get('messaging', []):
message = FacebookMessage(raw_message, self)
await self.handle_event(message)
return json_response({
'ok': True,
}) | [
"async",
"def",
"receive_events",
"(",
"self",
",",
"request",
":",
"HttpRequest",
")",
":",
"body",
"=",
"await",
"request",
".",
"read",
"(",
")",
"s",
"=",
"self",
".",
"settings",
"(",
")",
"try",
":",
"content",
"=",
"ujson",
".",
"loads",
"(",
"body",
")",
"except",
"ValueError",
":",
"return",
"json_response",
"(",
"{",
"'error'",
":",
"True",
",",
"'message'",
":",
"'Cannot decode body'",
"}",
",",
"status",
"=",
"400",
")",
"secret",
"=",
"s",
"[",
"'app_secret'",
"]",
"actual_sig",
"=",
"request",
".",
"headers",
"[",
"'X-Hub-Signature'",
"]",
"expected_sig",
"=",
"sign_message",
"(",
"body",
",",
"secret",
")",
"if",
"not",
"hmac",
".",
"compare_digest",
"(",
"actual_sig",
",",
"expected_sig",
")",
":",
"return",
"json_response",
"(",
"{",
"'error'",
":",
"True",
",",
"'message'",
":",
"'Invalid signature'",
",",
"}",
",",
"status",
"=",
"401",
")",
"for",
"entry",
"in",
"content",
"[",
"'entry'",
"]",
":",
"for",
"raw_message",
"in",
"entry",
".",
"get",
"(",
"'messaging'",
",",
"[",
"]",
")",
":",
"message",
"=",
"FacebookMessage",
"(",
"raw_message",
",",
"self",
")",
"await",
"self",
".",
"handle_event",
"(",
"message",
")",
"return",
"json_response",
"(",
"{",
"'ok'",
":",
"True",
",",
"}",
")"
] | Events received from Facebook | [
"Events",
"received",
"from",
"Facebook"
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L437-L470 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | Facebook._deferred_init | async def _deferred_init(self):
"""
Run those things in a sepearate tasks as they are not required for the
bot to work and they take a lot of time to run.
"""
await self._check_subscriptions()
await self._set_whitelist()
await self._set_get_started()
await self._set_greeting_text()
await self._set_persistent_menu() | python | async def _deferred_init(self):
"""
Run those things in a sepearate tasks as they are not required for the
bot to work and they take a lot of time to run.
"""
await self._check_subscriptions()
await self._set_whitelist()
await self._set_get_started()
await self._set_greeting_text()
await self._set_persistent_menu() | [
"async",
"def",
"_deferred_init",
"(",
"self",
")",
":",
"await",
"self",
".",
"_check_subscriptions",
"(",
")",
"await",
"self",
".",
"_set_whitelist",
"(",
")",
"await",
"self",
".",
"_set_get_started",
"(",
")",
"await",
"self",
".",
"_set_greeting_text",
"(",
")",
"await",
"self",
".",
"_set_persistent_menu",
"(",
")"
] | Run those things in a sepearate tasks as they are not required for the
bot to work and they take a lot of time to run. | [
"Run",
"those",
"things",
"in",
"a",
"sepearate",
"tasks",
"as",
"they",
"are",
"not",
"required",
"for",
"the",
"bot",
"to",
"work",
"and",
"they",
"take",
"a",
"lot",
"of",
"time",
"to",
"run",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L472-L482 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | Facebook._send_to_messenger_profile | async def _send_to_messenger_profile(self, page, content):
"""
The messenger profile API handles all meta-information about the bot,
like the menu. This allows to submit data to this API endpoint.
:param page: page dict from the configuration
:param content: content to be sent to Facebook (as dict)
"""
log_name = ', '.join(repr(x) for x in content.keys())
page_id = page['page_id']
current = await self._get_messenger_profile(page, content.keys())
if dict_is_subset(content, current):
logger.info('Page %s: %s is already up to date', page_id, log_name)
return
params = {
'access_token': page['page_token'],
}
headers = {
'content-type': 'application/json',
}
post = self.session.post(
PROFILE_ENDPOINT,
params=params,
headers=headers,
data=ujson.dumps(content)
)
# noinspection PyBroadException
try:
async with post as r:
await self._handle_fb_response(r)
except Exception:
logger.exception('Page %s: %s could not be set', page_id, log_name)
reporter.report()
else:
logger.info('Page %s: %s was updated', page_id, log_name) | python | async def _send_to_messenger_profile(self, page, content):
"""
The messenger profile API handles all meta-information about the bot,
like the menu. This allows to submit data to this API endpoint.
:param page: page dict from the configuration
:param content: content to be sent to Facebook (as dict)
"""
log_name = ', '.join(repr(x) for x in content.keys())
page_id = page['page_id']
current = await self._get_messenger_profile(page, content.keys())
if dict_is_subset(content, current):
logger.info('Page %s: %s is already up to date', page_id, log_name)
return
params = {
'access_token': page['page_token'],
}
headers = {
'content-type': 'application/json',
}
post = self.session.post(
PROFILE_ENDPOINT,
params=params,
headers=headers,
data=ujson.dumps(content)
)
# noinspection PyBroadException
try:
async with post as r:
await self._handle_fb_response(r)
except Exception:
logger.exception('Page %s: %s could not be set', page_id, log_name)
reporter.report()
else:
logger.info('Page %s: %s was updated', page_id, log_name) | [
"async",
"def",
"_send_to_messenger_profile",
"(",
"self",
",",
"page",
",",
"content",
")",
":",
"log_name",
"=",
"', '",
".",
"join",
"(",
"repr",
"(",
"x",
")",
"for",
"x",
"in",
"content",
".",
"keys",
"(",
")",
")",
"page_id",
"=",
"page",
"[",
"'page_id'",
"]",
"current",
"=",
"await",
"self",
".",
"_get_messenger_profile",
"(",
"page",
",",
"content",
".",
"keys",
"(",
")",
")",
"if",
"dict_is_subset",
"(",
"content",
",",
"current",
")",
":",
"logger",
".",
"info",
"(",
"'Page %s: %s is already up to date'",
",",
"page_id",
",",
"log_name",
")",
"return",
"params",
"=",
"{",
"'access_token'",
":",
"page",
"[",
"'page_token'",
"]",
",",
"}",
"headers",
"=",
"{",
"'content-type'",
":",
"'application/json'",
",",
"}",
"post",
"=",
"self",
".",
"session",
".",
"post",
"(",
"PROFILE_ENDPOINT",
",",
"params",
"=",
"params",
",",
"headers",
"=",
"headers",
",",
"data",
"=",
"ujson",
".",
"dumps",
"(",
"content",
")",
")",
"# noinspection PyBroadException",
"try",
":",
"async",
"with",
"post",
"as",
"r",
":",
"await",
"self",
".",
"_handle_fb_response",
"(",
"r",
")",
"except",
"Exception",
":",
"logger",
".",
"exception",
"(",
"'Page %s: %s could not be set'",
",",
"page_id",
",",
"log_name",
")",
"reporter",
".",
"report",
"(",
")",
"else",
":",
"logger",
".",
"info",
"(",
"'Page %s: %s was updated'",
",",
"page_id",
",",
"log_name",
")"
] | The messenger profile API handles all meta-information about the bot,
like the menu. This allows to submit data to this API endpoint.
:param page: page dict from the configuration
:param content: content to be sent to Facebook (as dict) | [
"The",
"messenger",
"profile",
"API",
"handles",
"all",
"meta",
"-",
"information",
"about",
"the",
"bot",
"like",
"the",
"menu",
".",
"This",
"allows",
"to",
"submit",
"data",
"to",
"this",
"API",
"endpoint",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L507-L548 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | Facebook._set_get_started | async def _set_get_started(self):
"""
Set the "get started" action for all configured pages.
"""
page = self.settings()
if 'get_started' in page:
payload = page['get_started']
else:
payload = {'action': 'get_started'}
await self._send_to_messenger_profile(page, {
'get_started': {
'payload': ujson.dumps(payload),
},
})
logger.info('Get started set for page %s', page['page_id']) | python | async def _set_get_started(self):
"""
Set the "get started" action for all configured pages.
"""
page = self.settings()
if 'get_started' in page:
payload = page['get_started']
else:
payload = {'action': 'get_started'}
await self._send_to_messenger_profile(page, {
'get_started': {
'payload': ujson.dumps(payload),
},
})
logger.info('Get started set for page %s', page['page_id']) | [
"async",
"def",
"_set_get_started",
"(",
"self",
")",
":",
"page",
"=",
"self",
".",
"settings",
"(",
")",
"if",
"'get_started'",
"in",
"page",
":",
"payload",
"=",
"page",
"[",
"'get_started'",
"]",
"else",
":",
"payload",
"=",
"{",
"'action'",
":",
"'get_started'",
"}",
"await",
"self",
".",
"_send_to_messenger_profile",
"(",
"page",
",",
"{",
"'get_started'",
":",
"{",
"'payload'",
":",
"ujson",
".",
"dumps",
"(",
"payload",
")",
",",
"}",
",",
"}",
")",
"logger",
".",
"info",
"(",
"'Get started set for page %s'",
",",
"page",
"[",
"'page_id'",
"]",
")"
] | Set the "get started" action for all configured pages. | [
"Set",
"the",
"get",
"started",
"action",
"for",
"all",
"configured",
"pages",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L550-L568 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | Facebook._set_greeting_text | async def _set_greeting_text(self):
"""
Set the greeting text of the page
"""
page = self.settings()
if 'greeting' in page:
await self._send_to_messenger_profile(page, {
'greeting': page['greeting'],
})
logger.info('Greeting text set for page %s', page['page_id']) | python | async def _set_greeting_text(self):
"""
Set the greeting text of the page
"""
page = self.settings()
if 'greeting' in page:
await self._send_to_messenger_profile(page, {
'greeting': page['greeting'],
})
logger.info('Greeting text set for page %s', page['page_id']) | [
"async",
"def",
"_set_greeting_text",
"(",
"self",
")",
":",
"page",
"=",
"self",
".",
"settings",
"(",
")",
"if",
"'greeting'",
"in",
"page",
":",
"await",
"self",
".",
"_send_to_messenger_profile",
"(",
"page",
",",
"{",
"'greeting'",
":",
"page",
"[",
"'greeting'",
"]",
",",
"}",
")",
"logger",
".",
"info",
"(",
"'Greeting text set for page %s'",
",",
"page",
"[",
"'page_id'",
"]",
")"
] | Set the greeting text of the page | [
"Set",
"the",
"greeting",
"text",
"of",
"the",
"page"
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L570-L582 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | Facebook._set_persistent_menu | async def _set_persistent_menu(self):
"""
Define the persistent menu for all pages
"""
page = self.settings()
if 'menu' in page:
await self._send_to_messenger_profile(page, {
'persistent_menu': page['menu'],
})
logger.info('Set menu for page %s', page['page_id']) | python | async def _set_persistent_menu(self):
"""
Define the persistent menu for all pages
"""
page = self.settings()
if 'menu' in page:
await self._send_to_messenger_profile(page, {
'persistent_menu': page['menu'],
})
logger.info('Set menu for page %s', page['page_id']) | [
"async",
"def",
"_set_persistent_menu",
"(",
"self",
")",
":",
"page",
"=",
"self",
".",
"settings",
"(",
")",
"if",
"'menu'",
"in",
"page",
":",
"await",
"self",
".",
"_send_to_messenger_profile",
"(",
"page",
",",
"{",
"'persistent_menu'",
":",
"page",
"[",
"'menu'",
"]",
",",
"}",
")",
"logger",
".",
"info",
"(",
"'Set menu for page %s'",
",",
"page",
"[",
"'page_id'",
"]",
")"
] | Define the persistent menu for all pages | [
"Define",
"the",
"persistent",
"menu",
"for",
"all",
"pages"
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L584-L596 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | Facebook._set_whitelist | async def _set_whitelist(self):
"""
Whitelist domains for the messenger extensions
"""
page = self.settings()
if 'whitelist' in page:
await self._send_to_messenger_profile(page, {
'whitelisted_domains': page['whitelist'],
})
logger.info('Whitelisted %s for page %s',
page['whitelist'],
page['page_id']) | python | async def _set_whitelist(self):
"""
Whitelist domains for the messenger extensions
"""
page = self.settings()
if 'whitelist' in page:
await self._send_to_messenger_profile(page, {
'whitelisted_domains': page['whitelist'],
})
logger.info('Whitelisted %s for page %s',
page['whitelist'],
page['page_id']) | [
"async",
"def",
"_set_whitelist",
"(",
"self",
")",
":",
"page",
"=",
"self",
".",
"settings",
"(",
")",
"if",
"'whitelist'",
"in",
"page",
":",
"await",
"self",
".",
"_send_to_messenger_profile",
"(",
"page",
",",
"{",
"'whitelisted_domains'",
":",
"page",
"[",
"'whitelist'",
"]",
",",
"}",
")",
"logger",
".",
"info",
"(",
"'Whitelisted %s for page %s'",
",",
"page",
"[",
"'whitelist'",
"]",
",",
"page",
"[",
"'page_id'",
"]",
")"
] | Whitelist domains for the messenger extensions | [
"Whitelist",
"domains",
"for",
"the",
"messenger",
"extensions"
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L598-L612 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | Facebook._get_subscriptions_endpoint | def _get_subscriptions_endpoint(self):
"""
Generates the URL and tokens for the subscriptions endpoint
"""
s = self.settings()
params = {
'access_token': self.app_access_token,
}
return (
GRAPH_ENDPOINT.format(f'{s["app_id"]}/subscriptions'),
params,
) | python | def _get_subscriptions_endpoint(self):
"""
Generates the URL and tokens for the subscriptions endpoint
"""
s = self.settings()
params = {
'access_token': self.app_access_token,
}
return (
GRAPH_ENDPOINT.format(f'{s["app_id"]}/subscriptions'),
params,
) | [
"def",
"_get_subscriptions_endpoint",
"(",
"self",
")",
":",
"s",
"=",
"self",
".",
"settings",
"(",
")",
"params",
"=",
"{",
"'access_token'",
":",
"self",
".",
"app_access_token",
",",
"}",
"return",
"(",
"GRAPH_ENDPOINT",
".",
"format",
"(",
"f'{s[\"app_id\"]}/subscriptions'",
")",
",",
"params",
",",
")"
] | Generates the URL and tokens for the subscriptions endpoint | [
"Generates",
"the",
"URL",
"and",
"tokens",
"for",
"the",
"subscriptions",
"endpoint"
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L614-L628 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | Facebook._get_subscriptions | async def _get_subscriptions(self) -> Tuple[Set[Text], Text]:
"""
List the subscriptions currently active
"""
url, params = self._get_subscriptions_endpoint()
get = self.session.get(url, params=params)
async with get as r:
await self._handle_fb_response(r)
data = await r.json()
for scope in data['data']:
if scope['object'] == 'page':
return (
set(x['name'] for x in scope['fields']),
scope['callback_url'],
)
return set(), '' | python | async def _get_subscriptions(self) -> Tuple[Set[Text], Text]:
"""
List the subscriptions currently active
"""
url, params = self._get_subscriptions_endpoint()
get = self.session.get(url, params=params)
async with get as r:
await self._handle_fb_response(r)
data = await r.json()
for scope in data['data']:
if scope['object'] == 'page':
return (
set(x['name'] for x in scope['fields']),
scope['callback_url'],
)
return set(), '' | [
"async",
"def",
"_get_subscriptions",
"(",
"self",
")",
"->",
"Tuple",
"[",
"Set",
"[",
"Text",
"]",
",",
"Text",
"]",
":",
"url",
",",
"params",
"=",
"self",
".",
"_get_subscriptions_endpoint",
"(",
")",
"get",
"=",
"self",
".",
"session",
".",
"get",
"(",
"url",
",",
"params",
"=",
"params",
")",
"async",
"with",
"get",
"as",
"r",
":",
"await",
"self",
".",
"_handle_fb_response",
"(",
"r",
")",
"data",
"=",
"await",
"r",
".",
"json",
"(",
")",
"for",
"scope",
"in",
"data",
"[",
"'data'",
"]",
":",
"if",
"scope",
"[",
"'object'",
"]",
"==",
"'page'",
":",
"return",
"(",
"set",
"(",
"x",
"[",
"'name'",
"]",
"for",
"x",
"in",
"scope",
"[",
"'fields'",
"]",
")",
",",
"scope",
"[",
"'callback_url'",
"]",
",",
")",
"return",
"set",
"(",
")",
",",
"''"
] | List the subscriptions currently active | [
"List",
"the",
"subscriptions",
"currently",
"active"
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L630-L650 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | Facebook._set_subscriptions | async def _set_subscriptions(self, subscriptions):
"""
Set the subscriptions to a specific list of values
"""
url, params = self._get_subscriptions_endpoint()
data = {
'object': 'page',
'callback_url': self.webhook_url,
'fields': ', '.join(subscriptions),
'verify_token': self.verify_token,
}
headers = {
'Content-Type': 'application/json',
}
post = self.session.post(
url,
params=params,
data=ujson.dumps(data),
headers=headers,
)
async with post as r:
await self._handle_fb_response(r)
data = await r.json() | python | async def _set_subscriptions(self, subscriptions):
"""
Set the subscriptions to a specific list of values
"""
url, params = self._get_subscriptions_endpoint()
data = {
'object': 'page',
'callback_url': self.webhook_url,
'fields': ', '.join(subscriptions),
'verify_token': self.verify_token,
}
headers = {
'Content-Type': 'application/json',
}
post = self.session.post(
url,
params=params,
data=ujson.dumps(data),
headers=headers,
)
async with post as r:
await self._handle_fb_response(r)
data = await r.json() | [
"async",
"def",
"_set_subscriptions",
"(",
"self",
",",
"subscriptions",
")",
":",
"url",
",",
"params",
"=",
"self",
".",
"_get_subscriptions_endpoint",
"(",
")",
"data",
"=",
"{",
"'object'",
":",
"'page'",
",",
"'callback_url'",
":",
"self",
".",
"webhook_url",
",",
"'fields'",
":",
"', '",
".",
"join",
"(",
"subscriptions",
")",
",",
"'verify_token'",
":",
"self",
".",
"verify_token",
",",
"}",
"headers",
"=",
"{",
"'Content-Type'",
":",
"'application/json'",
",",
"}",
"post",
"=",
"self",
".",
"session",
".",
"post",
"(",
"url",
",",
"params",
"=",
"params",
",",
"data",
"=",
"ujson",
".",
"dumps",
"(",
"data",
")",
",",
"headers",
"=",
"headers",
",",
")",
"async",
"with",
"post",
"as",
"r",
":",
"await",
"self",
".",
"_handle_fb_response",
"(",
"r",
")",
"data",
"=",
"await",
"r",
".",
"json",
"(",
")"
] | Set the subscriptions to a specific list of values | [
"Set",
"the",
"subscriptions",
"to",
"a",
"specific",
"list",
"of",
"values"
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L652-L679 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | Facebook._check_subscriptions | async def _check_subscriptions(self):
"""
Checks that all subscriptions are subscribed
"""
subscribed, url = await self._get_subscriptions()
expect = set(settings.FACEBOOK_SUBSCRIPTIONS)
if (expect - subscribed) or url != self.webhook_url:
await self._set_subscriptions(expect | subscribed)
logger.info('Updated webhook subscriptions')
else:
logger.info('No need to update webhook subscriptions') | python | async def _check_subscriptions(self):
"""
Checks that all subscriptions are subscribed
"""
subscribed, url = await self._get_subscriptions()
expect = set(settings.FACEBOOK_SUBSCRIPTIONS)
if (expect - subscribed) or url != self.webhook_url:
await self._set_subscriptions(expect | subscribed)
logger.info('Updated webhook subscriptions')
else:
logger.info('No need to update webhook subscriptions') | [
"async",
"def",
"_check_subscriptions",
"(",
"self",
")",
":",
"subscribed",
",",
"url",
"=",
"await",
"self",
".",
"_get_subscriptions",
"(",
")",
"expect",
"=",
"set",
"(",
"settings",
".",
"FACEBOOK_SUBSCRIPTIONS",
")",
"if",
"(",
"expect",
"-",
"subscribed",
")",
"or",
"url",
"!=",
"self",
".",
"webhook_url",
":",
"await",
"self",
".",
"_set_subscriptions",
"(",
"expect",
"|",
"subscribed",
")",
"logger",
".",
"info",
"(",
"'Updated webhook subscriptions'",
")",
"else",
":",
"logger",
".",
"info",
"(",
"'No need to update webhook subscriptions'",
")"
] | Checks that all subscriptions are subscribed | [
"Checks",
"that",
"all",
"subscriptions",
"are",
"subscribed"
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L681-L693 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | Facebook.handle_event | async def handle_event(self, event: FacebookMessage):
"""
Handle an incoming message from Facebook.
"""
responder = FacebookResponder(self)
await self._notify(event, responder) | python | async def handle_event(self, event: FacebookMessage):
"""
Handle an incoming message from Facebook.
"""
responder = FacebookResponder(self)
await self._notify(event, responder) | [
"async",
"def",
"handle_event",
"(",
"self",
",",
"event",
":",
"FacebookMessage",
")",
":",
"responder",
"=",
"FacebookResponder",
"(",
"self",
")",
"await",
"self",
".",
"_notify",
"(",
"event",
",",
"responder",
")"
] | Handle an incoming message from Facebook. | [
"Handle",
"an",
"incoming",
"message",
"from",
"Facebook",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L695-L700 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | Facebook._access_token | def _access_token(self, request: Request=None, page_id: Text=''):
"""
Guess the access token for that specific request.
"""
if not page_id:
msg = request.message # type: FacebookMessage
page_id = msg.get_page_id()
page = self.settings()
if page['page_id'] == page_id:
return page['page_token']
raise PlatformOperationError('Trying to get access token of the '
'page "{}", which is not configured.'
.format(page_id)) | python | def _access_token(self, request: Request=None, page_id: Text=''):
"""
Guess the access token for that specific request.
"""
if not page_id:
msg = request.message # type: FacebookMessage
page_id = msg.get_page_id()
page = self.settings()
if page['page_id'] == page_id:
return page['page_token']
raise PlatformOperationError('Trying to get access token of the '
'page "{}", which is not configured.'
.format(page_id)) | [
"def",
"_access_token",
"(",
"self",
",",
"request",
":",
"Request",
"=",
"None",
",",
"page_id",
":",
"Text",
"=",
"''",
")",
":",
"if",
"not",
"page_id",
":",
"msg",
"=",
"request",
".",
"message",
"# type: FacebookMessage",
"page_id",
"=",
"msg",
".",
"get_page_id",
"(",
")",
"page",
"=",
"self",
".",
"settings",
"(",
")",
"if",
"page",
"[",
"'page_id'",
"]",
"==",
"page_id",
":",
"return",
"page",
"[",
"'page_token'",
"]",
"raise",
"PlatformOperationError",
"(",
"'Trying to get access token of the '",
"'page \"{}\", which is not configured.'",
".",
"format",
"(",
"page_id",
")",
")"
] | Guess the access token for that specific request. | [
"Guess",
"the",
"access",
"token",
"for",
"that",
"specific",
"request",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L702-L718 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | Facebook._make_qr | async def _make_qr(self,
qr: QuickRepliesList.BaseOption,
request: Request):
"""
Generate a single quick reply's content.
"""
if isinstance(qr, QuickRepliesList.TextOption):
return {
'content_type': 'text',
'title': await render(qr.text, request),
'payload': qr.slug,
}
elif isinstance(qr, QuickRepliesList.LocationOption):
return {
'content_type': 'location',
} | python | async def _make_qr(self,
qr: QuickRepliesList.BaseOption,
request: Request):
"""
Generate a single quick reply's content.
"""
if isinstance(qr, QuickRepliesList.TextOption):
return {
'content_type': 'text',
'title': await render(qr.text, request),
'payload': qr.slug,
}
elif isinstance(qr, QuickRepliesList.LocationOption):
return {
'content_type': 'location',
} | [
"async",
"def",
"_make_qr",
"(",
"self",
",",
"qr",
":",
"QuickRepliesList",
".",
"BaseOption",
",",
"request",
":",
"Request",
")",
":",
"if",
"isinstance",
"(",
"qr",
",",
"QuickRepliesList",
".",
"TextOption",
")",
":",
"return",
"{",
"'content_type'",
":",
"'text'",
",",
"'title'",
":",
"await",
"render",
"(",
"qr",
".",
"text",
",",
"request",
")",
",",
"'payload'",
":",
"qr",
".",
"slug",
",",
"}",
"elif",
"isinstance",
"(",
"qr",
",",
"QuickRepliesList",
".",
"LocationOption",
")",
":",
"return",
"{",
"'content_type'",
":",
"'location'",
",",
"}"
] | Generate a single quick reply's content. | [
"Generate",
"a",
"single",
"quick",
"reply",
"s",
"content",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L720-L736 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | Facebook._send_text | async def _send_text(self, request: Request, stack: Stack):
"""
Send text layers to the user. Each layer will go in its own bubble.
Also, Facebook limits messages to 320 chars, so if any message is
longer than that it will be split into as many messages as needed to
be accepted by Facebook.
"""
parts = []
for layer in stack.layers:
if isinstance(layer, lyr.MultiText):
lines = await render(layer.text, request, multi_line=True)
for line in lines:
for part in wrap(line, 320):
parts.append(part)
elif isinstance(layer, (lyr.Text, lyr.RawText)):
text = await render(layer.text, request)
for part in wrap(text, 320):
parts.append(part)
for part in parts[:-1]:
await self._send(request, {
'text': part,
}, stack)
part = parts[-1]
msg = {
'text': part,
}
await self._add_qr(stack, msg, request)
await self._send(request, msg, stack) | python | async def _send_text(self, request: Request, stack: Stack):
"""
Send text layers to the user. Each layer will go in its own bubble.
Also, Facebook limits messages to 320 chars, so if any message is
longer than that it will be split into as many messages as needed to
be accepted by Facebook.
"""
parts = []
for layer in stack.layers:
if isinstance(layer, lyr.MultiText):
lines = await render(layer.text, request, multi_line=True)
for line in lines:
for part in wrap(line, 320):
parts.append(part)
elif isinstance(layer, (lyr.Text, lyr.RawText)):
text = await render(layer.text, request)
for part in wrap(text, 320):
parts.append(part)
for part in parts[:-1]:
await self._send(request, {
'text': part,
}, stack)
part = parts[-1]
msg = {
'text': part,
}
await self._add_qr(stack, msg, request)
await self._send(request, msg, stack) | [
"async",
"def",
"_send_text",
"(",
"self",
",",
"request",
":",
"Request",
",",
"stack",
":",
"Stack",
")",
":",
"parts",
"=",
"[",
"]",
"for",
"layer",
"in",
"stack",
".",
"layers",
":",
"if",
"isinstance",
"(",
"layer",
",",
"lyr",
".",
"MultiText",
")",
":",
"lines",
"=",
"await",
"render",
"(",
"layer",
".",
"text",
",",
"request",
",",
"multi_line",
"=",
"True",
")",
"for",
"line",
"in",
"lines",
":",
"for",
"part",
"in",
"wrap",
"(",
"line",
",",
"320",
")",
":",
"parts",
".",
"append",
"(",
"part",
")",
"elif",
"isinstance",
"(",
"layer",
",",
"(",
"lyr",
".",
"Text",
",",
"lyr",
".",
"RawText",
")",
")",
":",
"text",
"=",
"await",
"render",
"(",
"layer",
".",
"text",
",",
"request",
")",
"for",
"part",
"in",
"wrap",
"(",
"text",
",",
"320",
")",
":",
"parts",
".",
"append",
"(",
"part",
")",
"for",
"part",
"in",
"parts",
"[",
":",
"-",
"1",
"]",
":",
"await",
"self",
".",
"_send",
"(",
"request",
",",
"{",
"'text'",
":",
"part",
",",
"}",
",",
"stack",
")",
"part",
"=",
"parts",
"[",
"-",
"1",
"]",
"msg",
"=",
"{",
"'text'",
":",
"part",
",",
"}",
"await",
"self",
".",
"_add_qr",
"(",
"stack",
",",
"msg",
",",
"request",
")",
"await",
"self",
".",
"_send",
"(",
"request",
",",
"msg",
",",
"stack",
")"
] | Send text layers to the user. Each layer will go in its own bubble.
Also, Facebook limits messages to 320 chars, so if any message is
longer than that it will be split into as many messages as needed to
be accepted by Facebook. | [
"Send",
"text",
"layers",
"to",
"the",
"user",
".",
"Each",
"layer",
"will",
"go",
"in",
"its",
"own",
"bubble",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L749-L783 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | Facebook._send_generic_template | async def _send_generic_template(self, request: Request, stack: Stack):
"""
Generates and send a generic template.
"""
gt = stack.get_layer(GenericTemplate)
payload = await gt.serialize(request)
msg = {
'attachment': {
'type': 'template',
'payload': payload
}
}
await self._add_qr(stack, msg, request)
await self._send(request, msg, stack) | python | async def _send_generic_template(self, request: Request, stack: Stack):
"""
Generates and send a generic template.
"""
gt = stack.get_layer(GenericTemplate)
payload = await gt.serialize(request)
msg = {
'attachment': {
'type': 'template',
'payload': payload
}
}
await self._add_qr(stack, msg, request)
await self._send(request, msg, stack) | [
"async",
"def",
"_send_generic_template",
"(",
"self",
",",
"request",
":",
"Request",
",",
"stack",
":",
"Stack",
")",
":",
"gt",
"=",
"stack",
".",
"get_layer",
"(",
"GenericTemplate",
")",
"payload",
"=",
"await",
"gt",
".",
"serialize",
"(",
"request",
")",
"msg",
"=",
"{",
"'attachment'",
":",
"{",
"'type'",
":",
"'template'",
",",
"'payload'",
":",
"payload",
"}",
"}",
"await",
"self",
".",
"_add_qr",
"(",
"stack",
",",
"msg",
",",
"request",
")",
"await",
"self",
".",
"_send",
"(",
"request",
",",
"msg",
",",
"stack",
")"
] | Generates and send a generic template. | [
"Generates",
"and",
"send",
"a",
"generic",
"template",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L785-L801 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | Facebook._send_button_template | async def _send_button_template(self, request: Request, stack: Stack):
"""
Generates and send a button template.
"""
gt = stack.get_layer(ButtonTemplate)
payload = {
'template_type': 'button',
'text': await render(gt.text, request),
'buttons': [await b.serialize(request) for b in gt.buttons],
}
msg = {
'attachment': {
'type': 'template',
'payload': payload
}
}
await self._add_qr(stack, msg, request)
await self._send(request, msg, stack) | python | async def _send_button_template(self, request: Request, stack: Stack):
"""
Generates and send a button template.
"""
gt = stack.get_layer(ButtonTemplate)
payload = {
'template_type': 'button',
'text': await render(gt.text, request),
'buttons': [await b.serialize(request) for b in gt.buttons],
}
msg = {
'attachment': {
'type': 'template',
'payload': payload
}
}
await self._add_qr(stack, msg, request)
await self._send(request, msg, stack) | [
"async",
"def",
"_send_button_template",
"(",
"self",
",",
"request",
":",
"Request",
",",
"stack",
":",
"Stack",
")",
":",
"gt",
"=",
"stack",
".",
"get_layer",
"(",
"ButtonTemplate",
")",
"payload",
"=",
"{",
"'template_type'",
":",
"'button'",
",",
"'text'",
":",
"await",
"render",
"(",
"gt",
".",
"text",
",",
"request",
")",
",",
"'buttons'",
":",
"[",
"await",
"b",
".",
"serialize",
"(",
"request",
")",
"for",
"b",
"in",
"gt",
".",
"buttons",
"]",
",",
"}",
"msg",
"=",
"{",
"'attachment'",
":",
"{",
"'type'",
":",
"'template'",
",",
"'payload'",
":",
"payload",
"}",
"}",
"await",
"self",
".",
"_add_qr",
"(",
"stack",
",",
"msg",
",",
"request",
")",
"await",
"self",
".",
"_send",
"(",
"request",
",",
"msg",
",",
"stack",
")"
] | Generates and send a button template. | [
"Generates",
"and",
"send",
"a",
"button",
"template",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L803-L824 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | Facebook._send_typing | async def _send_typing(self, request: Request, stack: Stack):
"""
Send to Facebook typing indications
"""
active = stack.get_layer(lyr.Typing).active
msg = ujson.dumps({
'recipient': {
'id': request.conversation.fbid,
},
'sender_action': 'typing_on' if active else 'typing_off',
})
headers = {
'content-type': 'application/json',
}
params = {
'access_token': self._access_token(request),
}
post = self.session.post(
MESSAGES_ENDPOINT,
params=params,
data=msg,
headers=headers,
)
logger.debug('Sending: %s', msg)
async with post as r:
await self._handle_fb_response(r) | python | async def _send_typing(self, request: Request, stack: Stack):
"""
Send to Facebook typing indications
"""
active = stack.get_layer(lyr.Typing).active
msg = ujson.dumps({
'recipient': {
'id': request.conversation.fbid,
},
'sender_action': 'typing_on' if active else 'typing_off',
})
headers = {
'content-type': 'application/json',
}
params = {
'access_token': self._access_token(request),
}
post = self.session.post(
MESSAGES_ENDPOINT,
params=params,
data=msg,
headers=headers,
)
logger.debug('Sending: %s', msg)
async with post as r:
await self._handle_fb_response(r) | [
"async",
"def",
"_send_typing",
"(",
"self",
",",
"request",
":",
"Request",
",",
"stack",
":",
"Stack",
")",
":",
"active",
"=",
"stack",
".",
"get_layer",
"(",
"lyr",
".",
"Typing",
")",
".",
"active",
"msg",
"=",
"ujson",
".",
"dumps",
"(",
"{",
"'recipient'",
":",
"{",
"'id'",
":",
"request",
".",
"conversation",
".",
"fbid",
",",
"}",
",",
"'sender_action'",
":",
"'typing_on'",
"if",
"active",
"else",
"'typing_off'",
",",
"}",
")",
"headers",
"=",
"{",
"'content-type'",
":",
"'application/json'",
",",
"}",
"params",
"=",
"{",
"'access_token'",
":",
"self",
".",
"_access_token",
"(",
"request",
")",
",",
"}",
"post",
"=",
"self",
".",
"session",
".",
"post",
"(",
"MESSAGES_ENDPOINT",
",",
"params",
"=",
"params",
",",
"data",
"=",
"msg",
",",
"headers",
"=",
"headers",
",",
")",
"logger",
".",
"debug",
"(",
"'Sending: %s'",
",",
"msg",
")",
"async",
"with",
"post",
"as",
"r",
":",
"await",
"self",
".",
"_handle_fb_response",
"(",
"r",
")"
] | Send to Facebook typing indications | [
"Send",
"to",
"Facebook",
"typing",
"indications"
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L858-L890 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | Facebook._handle_fb_response | async def _handle_fb_response(self, response: aiohttp.ClientResponse):
"""
Check that Facebook was OK with the API call we just made and raise
an exception if it failed.
"""
ok = response.status == 200
if not ok:
# noinspection PyBroadException
try:
error = (await response.json())['error']['message']
except Exception:
error = '(nothing)'
raise PlatformOperationError('Facebook says: "{}"'
.format(error)) | python | async def _handle_fb_response(self, response: aiohttp.ClientResponse):
"""
Check that Facebook was OK with the API call we just made and raise
an exception if it failed.
"""
ok = response.status == 200
if not ok:
# noinspection PyBroadException
try:
error = (await response.json())['error']['message']
except Exception:
error = '(nothing)'
raise PlatformOperationError('Facebook says: "{}"'
.format(error)) | [
"async",
"def",
"_handle_fb_response",
"(",
"self",
",",
"response",
":",
"aiohttp",
".",
"ClientResponse",
")",
":",
"ok",
"=",
"response",
".",
"status",
"==",
"200",
"if",
"not",
"ok",
":",
"# noinspection PyBroadException",
"try",
":",
"error",
"=",
"(",
"await",
"response",
".",
"json",
"(",
")",
")",
"[",
"'error'",
"]",
"[",
"'message'",
"]",
"except",
"Exception",
":",
"error",
"=",
"'(nothing)'",
"raise",
"PlatformOperationError",
"(",
"'Facebook says: \"{}\"'",
".",
"format",
"(",
"error",
")",
")"
] | Check that Facebook was OK with the API call we just made and raise
an exception if it failed. | [
"Check",
"that",
"Facebook",
"was",
"OK",
"with",
"the",
"API",
"call",
"we",
"just",
"made",
"and",
"raise",
"an",
"exception",
"if",
"it",
"failed",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L892-L908 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | Facebook._send | async def _send(self,
request: Request,
content: Dict[Text, Any],
stack: Stack):
"""
Actually proceed to sending the message to the Facebook API.
"""
msg = {
'recipient': {
'id': request.conversation.fbid,
},
'message': content,
}
if stack and stack.has_layer(MessagingType):
mt = stack.get_layer(MessagingType)
else:
mt = MessagingType(response=True)
msg.update(mt.serialize())
msg_json = ujson.dumps(msg)
headers = {
'content-type': 'application/json',
}
params = {
'access_token': self._access_token(request),
}
post = self.session.post(
MESSAGES_ENDPOINT,
params=params,
data=msg_json,
headers=headers,
)
logger.debug('Sending: %s', msg_json)
async with post as r:
await self._handle_fb_response(r) | python | async def _send(self,
request: Request,
content: Dict[Text, Any],
stack: Stack):
"""
Actually proceed to sending the message to the Facebook API.
"""
msg = {
'recipient': {
'id': request.conversation.fbid,
},
'message': content,
}
if stack and stack.has_layer(MessagingType):
mt = stack.get_layer(MessagingType)
else:
mt = MessagingType(response=True)
msg.update(mt.serialize())
msg_json = ujson.dumps(msg)
headers = {
'content-type': 'application/json',
}
params = {
'access_token': self._access_token(request),
}
post = self.session.post(
MESSAGES_ENDPOINT,
params=params,
data=msg_json,
headers=headers,
)
logger.debug('Sending: %s', msg_json)
async with post as r:
await self._handle_fb_response(r) | [
"async",
"def",
"_send",
"(",
"self",
",",
"request",
":",
"Request",
",",
"content",
":",
"Dict",
"[",
"Text",
",",
"Any",
"]",
",",
"stack",
":",
"Stack",
")",
":",
"msg",
"=",
"{",
"'recipient'",
":",
"{",
"'id'",
":",
"request",
".",
"conversation",
".",
"fbid",
",",
"}",
",",
"'message'",
":",
"content",
",",
"}",
"if",
"stack",
"and",
"stack",
".",
"has_layer",
"(",
"MessagingType",
")",
":",
"mt",
"=",
"stack",
".",
"get_layer",
"(",
"MessagingType",
")",
"else",
":",
"mt",
"=",
"MessagingType",
"(",
"response",
"=",
"True",
")",
"msg",
".",
"update",
"(",
"mt",
".",
"serialize",
"(",
")",
")",
"msg_json",
"=",
"ujson",
".",
"dumps",
"(",
"msg",
")",
"headers",
"=",
"{",
"'content-type'",
":",
"'application/json'",
",",
"}",
"params",
"=",
"{",
"'access_token'",
":",
"self",
".",
"_access_token",
"(",
"request",
")",
",",
"}",
"post",
"=",
"self",
".",
"session",
".",
"post",
"(",
"MESSAGES_ENDPOINT",
",",
"params",
"=",
"params",
",",
"data",
"=",
"msg_json",
",",
"headers",
"=",
"headers",
",",
")",
"logger",
".",
"debug",
"(",
"'Sending: %s'",
",",
"msg_json",
")",
"async",
"with",
"post",
"as",
"r",
":",
"await",
"self",
".",
"_handle_fb_response",
"(",
"r",
")"
] | Actually proceed to sending the message to the Facebook API. | [
"Actually",
"proceed",
"to",
"sending",
"the",
"message",
"to",
"the",
"Facebook",
"API",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L910-L951 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | Facebook.get_user | async def get_user(self, user_id, page_id):
"""
Query a user from the API and return its JSON
"""
access_token = self._access_token(page_id=page_id)
params = {
'fields': 'first_name,last_name,profile_pic,locale,timezone'
',gender',
'access_token': access_token,
}
url = GRAPH_ENDPOINT.format(user_id)
get = self.session.get(url, params=params)
async with get as r:
await self._handle_fb_response(r)
return await r.json() | python | async def get_user(self, user_id, page_id):
"""
Query a user from the API and return its JSON
"""
access_token = self._access_token(page_id=page_id)
params = {
'fields': 'first_name,last_name,profile_pic,locale,timezone'
',gender',
'access_token': access_token,
}
url = GRAPH_ENDPOINT.format(user_id)
get = self.session.get(url, params=params)
async with get as r:
await self._handle_fb_response(r)
return await r.json() | [
"async",
"def",
"get_user",
"(",
"self",
",",
"user_id",
",",
"page_id",
")",
":",
"access_token",
"=",
"self",
".",
"_access_token",
"(",
"page_id",
"=",
"page_id",
")",
"params",
"=",
"{",
"'fields'",
":",
"'first_name,last_name,profile_pic,locale,timezone'",
"',gender'",
",",
"'access_token'",
":",
"access_token",
",",
"}",
"url",
"=",
"GRAPH_ENDPOINT",
".",
"format",
"(",
"user_id",
")",
"get",
"=",
"self",
".",
"session",
".",
"get",
"(",
"url",
",",
"params",
"=",
"params",
")",
"async",
"with",
"get",
"as",
"r",
":",
"await",
"self",
".",
"_handle_fb_response",
"(",
"r",
")",
"return",
"await",
"r",
".",
"json",
"(",
")"
] | Query a user from the API and return its JSON | [
"Query",
"a",
"user",
"from",
"the",
"API",
"and",
"return",
"its",
"JSON"
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L953-L971 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | Facebook.ensure_usable_media | async def ensure_usable_media(self, media: BaseMedia) -> UrlMedia:
"""
So far, let's just accept URL media. We'll see in the future how it
goes.
"""
if not isinstance(media, UrlMedia):
raise ValueError('Facebook platform only accepts URL media')
return media | python | async def ensure_usable_media(self, media: BaseMedia) -> UrlMedia:
"""
So far, let's just accept URL media. We'll see in the future how it
goes.
"""
if not isinstance(media, UrlMedia):
raise ValueError('Facebook platform only accepts URL media')
return media | [
"async",
"def",
"ensure_usable_media",
"(",
"self",
",",
"media",
":",
"BaseMedia",
")",
"->",
"UrlMedia",
":",
"if",
"not",
"isinstance",
"(",
"media",
",",
"UrlMedia",
")",
":",
"raise",
"ValueError",
"(",
"'Facebook platform only accepts URL media'",
")",
"return",
"media"
] | So far, let's just accept URL media. We'll see in the future how it
goes. | [
"So",
"far",
"let",
"s",
"just",
"accept",
"URL",
"media",
".",
"We",
"ll",
"see",
"in",
"the",
"future",
"how",
"it",
"goes",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L973-L982 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | Facebook._make_fake_message | def _make_fake_message(self, user_id, page_id, payload):
"""
Creates a fake message for the given user_id. It contains a postback
with the given payload.
"""
event = {
'sender': {
'id': user_id,
},
'recipient': {
'id': page_id,
},
'postback': {
'payload': ujson.dumps(payload),
},
}
return FacebookMessage(event, self, False) | python | def _make_fake_message(self, user_id, page_id, payload):
"""
Creates a fake message for the given user_id. It contains a postback
with the given payload.
"""
event = {
'sender': {
'id': user_id,
},
'recipient': {
'id': page_id,
},
'postback': {
'payload': ujson.dumps(payload),
},
}
return FacebookMessage(event, self, False) | [
"def",
"_make_fake_message",
"(",
"self",
",",
"user_id",
",",
"page_id",
",",
"payload",
")",
":",
"event",
"=",
"{",
"'sender'",
":",
"{",
"'id'",
":",
"user_id",
",",
"}",
",",
"'recipient'",
":",
"{",
"'id'",
":",
"page_id",
",",
"}",
",",
"'postback'",
":",
"{",
"'payload'",
":",
"ujson",
".",
"dumps",
"(",
"payload",
")",
",",
"}",
",",
"}",
"return",
"FacebookMessage",
"(",
"event",
",",
"self",
",",
"False",
")"
] | Creates a fake message for the given user_id. It contains a postback
with the given payload. | [
"Creates",
"a",
"fake",
"message",
"for",
"the",
"given",
"user_id",
".",
"It",
"contains",
"a",
"postback",
"with",
"the",
"given",
"payload",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L984-L1002 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | Facebook._message_from_sr | def _message_from_sr(self, token: Text, payload: Any) \
-> Optional[BaseMessage]:
"""
Tries to verify the signed request
"""
page = self.settings()
secret = page['app_secret']
try:
sr_data = SignedRequest.parse(token, secret)
except (TypeError, ValueError, SignedRequestError) as e:
return
return self._make_fake_message(
sr_data['psid'],
page['page_id'],
payload,
) | python | def _message_from_sr(self, token: Text, payload: Any) \
-> Optional[BaseMessage]:
"""
Tries to verify the signed request
"""
page = self.settings()
secret = page['app_secret']
try:
sr_data = SignedRequest.parse(token, secret)
except (TypeError, ValueError, SignedRequestError) as e:
return
return self._make_fake_message(
sr_data['psid'],
page['page_id'],
payload,
) | [
"def",
"_message_from_sr",
"(",
"self",
",",
"token",
":",
"Text",
",",
"payload",
":",
"Any",
")",
"->",
"Optional",
"[",
"BaseMessage",
"]",
":",
"page",
"=",
"self",
".",
"settings",
"(",
")",
"secret",
"=",
"page",
"[",
"'app_secret'",
"]",
"try",
":",
"sr_data",
"=",
"SignedRequest",
".",
"parse",
"(",
"token",
",",
"secret",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
",",
"SignedRequestError",
")",
"as",
"e",
":",
"return",
"return",
"self",
".",
"_make_fake_message",
"(",
"sr_data",
"[",
"'psid'",
"]",
",",
"page",
"[",
"'page_id'",
"]",
",",
"payload",
",",
")"
] | Tries to verify the signed request | [
"Tries",
"to",
"verify",
"the",
"signed",
"request"
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L1004-L1022 | train |
BernardFW/bernard | src/bernard/platforms/facebook/platform.py | Facebook._message_from_token | def _message_from_token(self, token: Text, payload: Any) \
-> Optional[BaseMessage]:
"""
Analyzes a signed token and generates the matching message
"""
try:
tk = jwt.decode(token, settings.WEBVIEW_SECRET_KEY)
except jwt.InvalidTokenError:
return
try:
user_id = tk['fb_psid']
assert isinstance(user_id, Text)
page_id = tk['fb_pid']
assert isinstance(page_id, Text)
except (KeyError, AssertionError):
return
if self.settings()['page_id'] == page_id:
return self._make_fake_message(user_id, page_id, payload) | python | def _message_from_token(self, token: Text, payload: Any) \
-> Optional[BaseMessage]:
"""
Analyzes a signed token and generates the matching message
"""
try:
tk = jwt.decode(token, settings.WEBVIEW_SECRET_KEY)
except jwt.InvalidTokenError:
return
try:
user_id = tk['fb_psid']
assert isinstance(user_id, Text)
page_id = tk['fb_pid']
assert isinstance(page_id, Text)
except (KeyError, AssertionError):
return
if self.settings()['page_id'] == page_id:
return self._make_fake_message(user_id, page_id, payload) | [
"def",
"_message_from_token",
"(",
"self",
",",
"token",
":",
"Text",
",",
"payload",
":",
"Any",
")",
"->",
"Optional",
"[",
"BaseMessage",
"]",
":",
"try",
":",
"tk",
"=",
"jwt",
".",
"decode",
"(",
"token",
",",
"settings",
".",
"WEBVIEW_SECRET_KEY",
")",
"except",
"jwt",
".",
"InvalidTokenError",
":",
"return",
"try",
":",
"user_id",
"=",
"tk",
"[",
"'fb_psid'",
"]",
"assert",
"isinstance",
"(",
"user_id",
",",
"Text",
")",
"page_id",
"=",
"tk",
"[",
"'fb_pid'",
"]",
"assert",
"isinstance",
"(",
"page_id",
",",
"Text",
")",
"except",
"(",
"KeyError",
",",
"AssertionError",
")",
":",
"return",
"if",
"self",
".",
"settings",
"(",
")",
"[",
"'page_id'",
"]",
"==",
"page_id",
":",
"return",
"self",
".",
"_make_fake_message",
"(",
"user_id",
",",
"page_id",
",",
"payload",
")"
] | Analyzes a signed token and generates the matching message | [
"Analyzes",
"a",
"signed",
"token",
"and",
"generates",
"the",
"matching",
"message"
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L1024-L1044 | train |
BernardFW/bernard | src/bernard/engine/request.py | Request.get_trans_reg | def get_trans_reg(self, name: Text, default: Any=None) -> Any:
"""
Convenience function to access the transition register of a specific
kind.
:param name: Name of the register you want to see
:param default: What to return by default
"""
tr = self.register.get(Register.TRANSITION, {})
return tr.get(name, default) | python | def get_trans_reg(self, name: Text, default: Any=None) -> Any:
"""
Convenience function to access the transition register of a specific
kind.
:param name: Name of the register you want to see
:param default: What to return by default
"""
tr = self.register.get(Register.TRANSITION, {})
return tr.get(name, default) | [
"def",
"get_trans_reg",
"(",
"self",
",",
"name",
":",
"Text",
",",
"default",
":",
"Any",
"=",
"None",
")",
"->",
"Any",
":",
"tr",
"=",
"self",
".",
"register",
".",
"get",
"(",
"Register",
".",
"TRANSITION",
",",
"{",
"}",
")",
"return",
"tr",
".",
"get",
"(",
"name",
",",
"default",
")"
] | Convenience function to access the transition register of a specific
kind.
:param name: Name of the register you want to see
:param default: What to return by default | [
"Convenience",
"function",
"to",
"access",
"the",
"transition",
"register",
"of",
"a",
"specific",
"kind",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/engine/request.py#L189-L199 | train |
BernardFW/bernard | src/bernard/engine/request.py | Request.get_locale | async def get_locale(self) -> Text:
"""
Get the locale to use for this request. It's either the overridden
locale or the locale provided by the platform.
:return: Locale to use for this request
"""
if self._locale_override:
return self._locale_override
else:
return await self.user.get_locale() | python | async def get_locale(self) -> Text:
"""
Get the locale to use for this request. It's either the overridden
locale or the locale provided by the platform.
:return: Locale to use for this request
"""
if self._locale_override:
return self._locale_override
else:
return await self.user.get_locale() | [
"async",
"def",
"get_locale",
"(",
"self",
")",
"->",
"Text",
":",
"if",
"self",
".",
"_locale_override",
":",
"return",
"self",
".",
"_locale_override",
"else",
":",
"return",
"await",
"self",
".",
"user",
".",
"get_locale",
"(",
")"
] | Get the locale to use for this request. It's either the overridden
locale or the locale provided by the platform.
:return: Locale to use for this request | [
"Get",
"the",
"locale",
"to",
"use",
"for",
"this",
"request",
".",
"It",
"s",
"either",
"the",
"overridden",
"locale",
"or",
"the",
"locale",
"provided",
"by",
"the",
"platform",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/engine/request.py#L229-L240 | train |
BernardFW/bernard | src/bernard/engine/request.py | Request.get_trans_flags | async def get_trans_flags(self) -> 'Flags':
"""
Gives a chance to middlewares to make the translation flags
"""
from bernard.middleware import MiddlewareManager
async def make_flags(request: Request) -> 'Flags':
return {}
mf = MiddlewareManager.instance().get('make_trans_flags', make_flags)
return await mf(self) | python | async def get_trans_flags(self) -> 'Flags':
"""
Gives a chance to middlewares to make the translation flags
"""
from bernard.middleware import MiddlewareManager
async def make_flags(request: Request) -> 'Flags':
return {}
mf = MiddlewareManager.instance().get('make_trans_flags', make_flags)
return await mf(self) | [
"async",
"def",
"get_trans_flags",
"(",
"self",
")",
"->",
"'Flags'",
":",
"from",
"bernard",
".",
"middleware",
"import",
"MiddlewareManager",
"async",
"def",
"make_flags",
"(",
"request",
":",
"Request",
")",
"->",
"'Flags'",
":",
"return",
"{",
"}",
"mf",
"=",
"MiddlewareManager",
".",
"instance",
"(",
")",
".",
"get",
"(",
"'make_trans_flags'",
",",
"make_flags",
")",
"return",
"await",
"mf",
"(",
"self",
")"
] | Gives a chance to middlewares to make the translation flags | [
"Gives",
"a",
"chance",
"to",
"middlewares",
"to",
"make",
"the",
"translation",
"flags"
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/engine/request.py#L242-L253 | train |
BernardFW/bernard | src/bernard/engine/request.py | Request.sign_url | async def sign_url(self, url, method=HASH):
"""
Sign an URL with this request's auth token
"""
token = await self.get_token()
if method == self.QUERY:
return patch_qs(url, {
settings.WEBVIEW_TOKEN_KEY: token,
})
elif method == self.HASH:
hash_id = 5
p = list(urlparse(url))
p[hash_id] = quote(token)
return urlunparse(p)
else:
raise ValueError(f'Invalid signing method "{method}"') | python | async def sign_url(self, url, method=HASH):
"""
Sign an URL with this request's auth token
"""
token = await self.get_token()
if method == self.QUERY:
return patch_qs(url, {
settings.WEBVIEW_TOKEN_KEY: token,
})
elif method == self.HASH:
hash_id = 5
p = list(urlparse(url))
p[hash_id] = quote(token)
return urlunparse(p)
else:
raise ValueError(f'Invalid signing method "{method}"') | [
"async",
"def",
"sign_url",
"(",
"self",
",",
"url",
",",
"method",
"=",
"HASH",
")",
":",
"token",
"=",
"await",
"self",
".",
"get_token",
"(",
")",
"if",
"method",
"==",
"self",
".",
"QUERY",
":",
"return",
"patch_qs",
"(",
"url",
",",
"{",
"settings",
".",
"WEBVIEW_TOKEN_KEY",
":",
"token",
",",
"}",
")",
"elif",
"method",
"==",
"self",
".",
"HASH",
":",
"hash_id",
"=",
"5",
"p",
"=",
"list",
"(",
"urlparse",
"(",
"url",
")",
")",
"p",
"[",
"hash_id",
"]",
"=",
"quote",
"(",
"token",
")",
"return",
"urlunparse",
"(",
"p",
")",
"else",
":",
"raise",
"ValueError",
"(",
"f'Invalid signing method \"{method}\"'",
")"
] | Sign an URL with this request's auth token | [
"Sign",
"an",
"URL",
"with",
"this",
"request",
"s",
"auth",
"token"
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/engine/request.py#L262-L279 | train |
BernardFW/bernard | src/bernard/layers/stack.py | Stack.layers | def layers(self, value: List['BaseLayer']):
"""
Perform a copy of the layers list in order to avoid the list changing
without updating the index.
Then update the index.
"""
self._layers = list(value) # type: List[BaseLayer]
self._index = self._make_index()
self._transformed = {} | python | def layers(self, value: List['BaseLayer']):
"""
Perform a copy of the layers list in order to avoid the list changing
without updating the index.
Then update the index.
"""
self._layers = list(value) # type: List[BaseLayer]
self._index = self._make_index()
self._transformed = {} | [
"def",
"layers",
"(",
"self",
",",
"value",
":",
"List",
"[",
"'BaseLayer'",
"]",
")",
":",
"self",
".",
"_layers",
"=",
"list",
"(",
"value",
")",
"# type: List[BaseLayer]",
"self",
".",
"_index",
"=",
"self",
".",
"_make_index",
"(",
")",
"self",
".",
"_transformed",
"=",
"{",
"}"
] | Perform a copy of the layers list in order to avoid the list changing
without updating the index.
Then update the index. | [
"Perform",
"a",
"copy",
"of",
"the",
"layers",
"list",
"in",
"order",
"to",
"avoid",
"the",
"list",
"changing",
"without",
"updating",
"the",
"index",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/layers/stack.py#L66-L75 | train |
BernardFW/bernard | src/bernard/layers/stack.py | Stack._make_index | def _make_index(self):
"""
Perform the index computation. It groups layers by type into a
dictionary, to allow quick access.
"""
out = {}
for layer in self._layers:
cls = layer.__class__
out[cls] = out.get(cls, []) + [layer]
return out | python | def _make_index(self):
"""
Perform the index computation. It groups layers by type into a
dictionary, to allow quick access.
"""
out = {}
for layer in self._layers:
cls = layer.__class__
out[cls] = out.get(cls, []) + [layer]
return out | [
"def",
"_make_index",
"(",
"self",
")",
":",
"out",
"=",
"{",
"}",
"for",
"layer",
"in",
"self",
".",
"_layers",
":",
"cls",
"=",
"layer",
".",
"__class__",
"out",
"[",
"cls",
"]",
"=",
"out",
".",
"get",
"(",
"cls",
",",
"[",
"]",
")",
"+",
"[",
"layer",
"]",
"return",
"out"
] | Perform the index computation. It groups layers by type into a
dictionary, to allow quick access. | [
"Perform",
"the",
"index",
"computation",
".",
"It",
"groups",
"layers",
"by",
"type",
"into",
"a",
"dictionary",
"to",
"allow",
"quick",
"access",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/layers/stack.py#L77-L89 | train |
BernardFW/bernard | src/bernard/layers/stack.py | Stack.has_layer | def has_layer(self, class_: Type[L], became: bool=True) -> bool:
"""
Test the presence of a given layer type.
:param class_: Layer class you're interested in.
:param became: Allow transformed layers in results
"""
return (class_ in self._index or
(became and class_ in self._transformed)) | python | def has_layer(self, class_: Type[L], became: bool=True) -> bool:
"""
Test the presence of a given layer type.
:param class_: Layer class you're interested in.
:param became: Allow transformed layers in results
"""
return (class_ in self._index or
(became and class_ in self._transformed)) | [
"def",
"has_layer",
"(",
"self",
",",
"class_",
":",
"Type",
"[",
"L",
"]",
",",
"became",
":",
"bool",
"=",
"True",
")",
"->",
"bool",
":",
"return",
"(",
"class_",
"in",
"self",
".",
"_index",
"or",
"(",
"became",
"and",
"class_",
"in",
"self",
".",
"_transformed",
")",
")"
] | Test the presence of a given layer type.
:param class_: Layer class you're interested in.
:param became: Allow transformed layers in results | [
"Test",
"the",
"presence",
"of",
"a",
"given",
"layer",
"type",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/layers/stack.py#L101-L110 | train |
BernardFW/bernard | src/bernard/layers/stack.py | Stack.get_layer | def get_layer(self, class_: Type[L], became: bool=True) -> L:
"""
Return the first layer of a given class. If that layer is not present,
then raise a KeyError.
:param class_: class of the expected layer
:param became: Allow transformed layers in results
"""
try:
return self._index[class_][0]
except KeyError:
if became:
return self._transformed[class_][0]
else:
raise | python | def get_layer(self, class_: Type[L], became: bool=True) -> L:
"""
Return the first layer of a given class. If that layer is not present,
then raise a KeyError.
:param class_: class of the expected layer
:param became: Allow transformed layers in results
"""
try:
return self._index[class_][0]
except KeyError:
if became:
return self._transformed[class_][0]
else:
raise | [
"def",
"get_layer",
"(",
"self",
",",
"class_",
":",
"Type",
"[",
"L",
"]",
",",
"became",
":",
"bool",
"=",
"True",
")",
"->",
"L",
":",
"try",
":",
"return",
"self",
".",
"_index",
"[",
"class_",
"]",
"[",
"0",
"]",
"except",
"KeyError",
":",
"if",
"became",
":",
"return",
"self",
".",
"_transformed",
"[",
"class_",
"]",
"[",
"0",
"]",
"else",
":",
"raise"
] | Return the first layer of a given class. If that layer is not present,
then raise a KeyError.
:param class_: class of the expected layer
:param became: Allow transformed layers in results | [
"Return",
"the",
"first",
"layer",
"of",
"a",
"given",
"class",
".",
"If",
"that",
"layer",
"is",
"not",
"present",
"then",
"raise",
"a",
"KeyError",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/layers/stack.py#L112-L127 | train |
BernardFW/bernard | src/bernard/layers/stack.py | Stack.get_layers | def get_layers(self, class_: Type[L], became: bool=True) -> List[L]:
"""
Returns the list of layers of a given class. If no layers are present
then the list will be empty.
:param class_: class of the expected layers
:param became: Allow transformed layers in results
"""
out = self._index.get(class_, [])
if became:
out += self._transformed.get(class_, [])
return out | python | def get_layers(self, class_: Type[L], became: bool=True) -> List[L]:
"""
Returns the list of layers of a given class. If no layers are present
then the list will be empty.
:param class_: class of the expected layers
:param became: Allow transformed layers in results
"""
out = self._index.get(class_, [])
if became:
out += self._transformed.get(class_, [])
return out | [
"def",
"get_layers",
"(",
"self",
",",
"class_",
":",
"Type",
"[",
"L",
"]",
",",
"became",
":",
"bool",
"=",
"True",
")",
"->",
"List",
"[",
"L",
"]",
":",
"out",
"=",
"self",
".",
"_index",
".",
"get",
"(",
"class_",
",",
"[",
"]",
")",
"if",
"became",
":",
"out",
"+=",
"self",
".",
"_transformed",
".",
"get",
"(",
"class_",
",",
"[",
"]",
")",
"return",
"out"
] | Returns the list of layers of a given class. If no layers are present
then the list will be empty.
:param class_: class of the expected layers
:param became: Allow transformed layers in results | [
"Returns",
"the",
"list",
"of",
"layers",
"of",
"a",
"given",
"class",
".",
"If",
"no",
"layers",
"are",
"present",
"then",
"the",
"list",
"will",
"be",
"empty",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/layers/stack.py#L129-L143 | train |
ioos/cc-plugin-ncei | cc_plugin_ncei/ncei_trajectory.py | NCEITrajectoryBase.check_trajectory_id | def check_trajectory_id(self, dataset):
'''
Checks that if a variable exists for the trajectory id it has the appropriate attributes
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
exists_ctx = TestCtx(BaseCheck.MEDIUM, 'Variable defining "trajectory_id" exists')
trajectory_ids = dataset.get_variables_by_attributes(cf_role='trajectory_id')
# No need to check
exists_ctx.assert_true(trajectory_ids, 'variable defining cf_role="trajectory_id" exists')
if not trajectory_ids:
return exists_ctx.to_result()
results.append(exists_ctx.to_result())
test_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended attributes for the {} variable'.format(trajectory_ids[0].name))
test_ctx.assert_true(
getattr(trajectory_ids[0], 'long_name', '') != "",
"long_name attribute should exist and not be empty"
)
results.append(test_ctx.to_result())
return results | python | def check_trajectory_id(self, dataset):
'''
Checks that if a variable exists for the trajectory id it has the appropriate attributes
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
exists_ctx = TestCtx(BaseCheck.MEDIUM, 'Variable defining "trajectory_id" exists')
trajectory_ids = dataset.get_variables_by_attributes(cf_role='trajectory_id')
# No need to check
exists_ctx.assert_true(trajectory_ids, 'variable defining cf_role="trajectory_id" exists')
if not trajectory_ids:
return exists_ctx.to_result()
results.append(exists_ctx.to_result())
test_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended attributes for the {} variable'.format(trajectory_ids[0].name))
test_ctx.assert_true(
getattr(trajectory_ids[0], 'long_name', '') != "",
"long_name attribute should exist and not be empty"
)
results.append(test_ctx.to_result())
return results | [
"def",
"check_trajectory_id",
"(",
"self",
",",
"dataset",
")",
":",
"results",
"=",
"[",
"]",
"exists_ctx",
"=",
"TestCtx",
"(",
"BaseCheck",
".",
"MEDIUM",
",",
"'Variable defining \"trajectory_id\" exists'",
")",
"trajectory_ids",
"=",
"dataset",
".",
"get_variables_by_attributes",
"(",
"cf_role",
"=",
"'trajectory_id'",
")",
"# No need to check",
"exists_ctx",
".",
"assert_true",
"(",
"trajectory_ids",
",",
"'variable defining cf_role=\"trajectory_id\" exists'",
")",
"if",
"not",
"trajectory_ids",
":",
"return",
"exists_ctx",
".",
"to_result",
"(",
")",
"results",
".",
"append",
"(",
"exists_ctx",
".",
"to_result",
"(",
")",
")",
"test_ctx",
"=",
"TestCtx",
"(",
"BaseCheck",
".",
"MEDIUM",
",",
"'Recommended attributes for the {} variable'",
".",
"format",
"(",
"trajectory_ids",
"[",
"0",
"]",
".",
"name",
")",
")",
"test_ctx",
".",
"assert_true",
"(",
"getattr",
"(",
"trajectory_ids",
"[",
"0",
"]",
",",
"'long_name'",
",",
"''",
")",
"!=",
"\"\"",
",",
"\"long_name attribute should exist and not be empty\"",
")",
"results",
".",
"append",
"(",
"test_ctx",
".",
"to_result",
"(",
")",
")",
"return",
"results"
] | Checks that if a variable exists for the trajectory id it has the appropriate attributes
:param netCDF4.Dataset dataset: An open netCDF dataset | [
"Checks",
"that",
"if",
"a",
"variable",
"exists",
"for",
"the",
"trajectory",
"id",
"it",
"has",
"the",
"appropriate",
"attributes"
] | 963fefd7fa43afd32657ac4c36aad4ddb4c25acf | https://github.com/ioos/cc-plugin-ncei/blob/963fefd7fa43afd32657ac4c36aad4ddb4c25acf/cc_plugin_ncei/ncei_trajectory.py#L41-L61 | train |
ioos/cc-plugin-ncei | cc_plugin_ncei/ncei_trajectory.py | NCEITrajectory1_1.check_required_attributes | def check_required_attributes(self, dataset):
'''
Feature type specific check of global required and highly recommended attributes.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'Required Global Attributes for Trajectory dataset')
required_ctx.assert_true(
getattr(dataset, 'nodc_template_version', '').lower() == self.valid_templates[0].lower(),
'nodc_template_version attribute must be {}'.format(self.valid_templates[0])
)
required_ctx.assert_true(
getattr(dataset, 'cdm_data_type', '') == 'Trajectory',
'cdm_data_type attribute must be set to Trajectory'
)
required_ctx.assert_true(
getattr(dataset, 'featureType', '') == 'trajectory',
'featureType attribute must be set to trajectory'
)
results.append(required_ctx.to_result())
return results | python | def check_required_attributes(self, dataset):
'''
Feature type specific check of global required and highly recommended attributes.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
results = []
required_ctx = TestCtx(BaseCheck.HIGH, 'Required Global Attributes for Trajectory dataset')
required_ctx.assert_true(
getattr(dataset, 'nodc_template_version', '').lower() == self.valid_templates[0].lower(),
'nodc_template_version attribute must be {}'.format(self.valid_templates[0])
)
required_ctx.assert_true(
getattr(dataset, 'cdm_data_type', '') == 'Trajectory',
'cdm_data_type attribute must be set to Trajectory'
)
required_ctx.assert_true(
getattr(dataset, 'featureType', '') == 'trajectory',
'featureType attribute must be set to trajectory'
)
results.append(required_ctx.to_result())
return results | [
"def",
"check_required_attributes",
"(",
"self",
",",
"dataset",
")",
":",
"results",
"=",
"[",
"]",
"required_ctx",
"=",
"TestCtx",
"(",
"BaseCheck",
".",
"HIGH",
",",
"'Required Global Attributes for Trajectory dataset'",
")",
"required_ctx",
".",
"assert_true",
"(",
"getattr",
"(",
"dataset",
",",
"'nodc_template_version'",
",",
"''",
")",
".",
"lower",
"(",
")",
"==",
"self",
".",
"valid_templates",
"[",
"0",
"]",
".",
"lower",
"(",
")",
",",
"'nodc_template_version attribute must be {}'",
".",
"format",
"(",
"self",
".",
"valid_templates",
"[",
"0",
"]",
")",
")",
"required_ctx",
".",
"assert_true",
"(",
"getattr",
"(",
"dataset",
",",
"'cdm_data_type'",
",",
"''",
")",
"==",
"'Trajectory'",
",",
"'cdm_data_type attribute must be set to Trajectory'",
")",
"required_ctx",
".",
"assert_true",
"(",
"getattr",
"(",
"dataset",
",",
"'featureType'",
",",
"''",
")",
"==",
"'trajectory'",
",",
"'featureType attribute must be set to trajectory'",
")",
"results",
".",
"append",
"(",
"required_ctx",
".",
"to_result",
"(",
")",
")",
"return",
"results"
] | Feature type specific check of global required and highly recommended attributes.
:param netCDF4.Dataset dataset: An open netCDF dataset | [
"Feature",
"type",
"specific",
"check",
"of",
"global",
"required",
"and",
"highly",
"recommended",
"attributes",
"."
] | 963fefd7fa43afd32657ac4c36aad4ddb4c25acf | https://github.com/ioos/cc-plugin-ncei/blob/963fefd7fa43afd32657ac4c36aad4ddb4c25acf/cc_plugin_ncei/ncei_trajectory.py#L92-L113 | train |
jpscaletti/authcode | authcode/auth_authentication_mixin.py | AuthenticationMixin.login | def login(self, user, remember=True, session=None):
"""Sets the current user UID in the session.
Instead of just storing the user's id, it generates a hash from the
password *salt*. That way, an admin or the user herself can invalidate
the login in other computers just by changing (or re-saving)
her password.
"""
logger = logging.getLogger(__name__)
logger.debug(u'User `{0}` logged in'.format(user.login))
if session is None:
session = self.session
session['permanent'] = remember
session[self.session_key] = user.get_uhmac()
if callable(getattr(session, 'save', None)):
session.save() | python | def login(self, user, remember=True, session=None):
"""Sets the current user UID in the session.
Instead of just storing the user's id, it generates a hash from the
password *salt*. That way, an admin or the user herself can invalidate
the login in other computers just by changing (or re-saving)
her password.
"""
logger = logging.getLogger(__name__)
logger.debug(u'User `{0}` logged in'.format(user.login))
if session is None:
session = self.session
session['permanent'] = remember
session[self.session_key] = user.get_uhmac()
if callable(getattr(session, 'save', None)):
session.save() | [
"def",
"login",
"(",
"self",
",",
"user",
",",
"remember",
"=",
"True",
",",
"session",
"=",
"None",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"debug",
"(",
"u'User `{0}` logged in'",
".",
"format",
"(",
"user",
".",
"login",
")",
")",
"if",
"session",
"is",
"None",
":",
"session",
"=",
"self",
".",
"session",
"session",
"[",
"'permanent'",
"]",
"=",
"remember",
"session",
"[",
"self",
".",
"session_key",
"]",
"=",
"user",
".",
"get_uhmac",
"(",
")",
"if",
"callable",
"(",
"getattr",
"(",
"session",
",",
"'save'",
",",
"None",
")",
")",
":",
"session",
".",
"save",
"(",
")"
] | Sets the current user UID in the session.
Instead of just storing the user's id, it generates a hash from the
password *salt*. That way, an admin or the user herself can invalidate
the login in other computers just by changing (or re-saving)
her password. | [
"Sets",
"the",
"current",
"user",
"UID",
"in",
"the",
"session",
"."
] | 91529b6d0caec07d1452758d937e1e0745826139 | https://github.com/jpscaletti/authcode/blob/91529b6d0caec07d1452758d937e1e0745826139/authcode/auth_authentication_mixin.py#L130-L146 | train |
jasonrbriggs/proton | python/proton/xmlutils.py | index | def index(elem):
'''
Return the index position of an element in the children of a parent.
'''
parent = elem.getparent()
for x in range(0, len(parent.getchildren())):
if parent.getchildren()[x] == elem:
return x
return -1 | python | def index(elem):
'''
Return the index position of an element in the children of a parent.
'''
parent = elem.getparent()
for x in range(0, len(parent.getchildren())):
if parent.getchildren()[x] == elem:
return x
return -1 | [
"def",
"index",
"(",
"elem",
")",
":",
"parent",
"=",
"elem",
".",
"getparent",
"(",
")",
"for",
"x",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"parent",
".",
"getchildren",
"(",
")",
")",
")",
":",
"if",
"parent",
".",
"getchildren",
"(",
")",
"[",
"x",
"]",
"==",
"elem",
":",
"return",
"x",
"return",
"-",
"1"
] | Return the index position of an element in the children of a parent. | [
"Return",
"the",
"index",
"position",
"of",
"an",
"element",
"in",
"the",
"children",
"of",
"a",
"parent",
"."
] | e734734750797ef0caaa1680379e07b86d7a53e3 | https://github.com/jasonrbriggs/proton/blob/e734734750797ef0caaa1680379e07b86d7a53e3/python/proton/xmlutils.py#L18-L26 | train |
jasonrbriggs/proton | python/proton/xmlutils.py | replaceelement | def replaceelement(oldelem, newelem):
'''
Given a parent element, replace oldelem with newelem.
'''
parent = oldelem.getparent()
if parent is not None:
size = len(parent.getchildren())
for x in range(0, size):
if parent.getchildren()[x] == oldelem:
parent.remove(oldelem)
parent.insert(x, newelem) | python | def replaceelement(oldelem, newelem):
'''
Given a parent element, replace oldelem with newelem.
'''
parent = oldelem.getparent()
if parent is not None:
size = len(parent.getchildren())
for x in range(0, size):
if parent.getchildren()[x] == oldelem:
parent.remove(oldelem)
parent.insert(x, newelem) | [
"def",
"replaceelement",
"(",
"oldelem",
",",
"newelem",
")",
":",
"parent",
"=",
"oldelem",
".",
"getparent",
"(",
")",
"if",
"parent",
"is",
"not",
"None",
":",
"size",
"=",
"len",
"(",
"parent",
".",
"getchildren",
"(",
")",
")",
"for",
"x",
"in",
"range",
"(",
"0",
",",
"size",
")",
":",
"if",
"parent",
".",
"getchildren",
"(",
")",
"[",
"x",
"]",
"==",
"oldelem",
":",
"parent",
".",
"remove",
"(",
"oldelem",
")",
"parent",
".",
"insert",
"(",
"x",
",",
"newelem",
")"
] | Given a parent element, replace oldelem with newelem. | [
"Given",
"a",
"parent",
"element",
"replace",
"oldelem",
"with",
"newelem",
"."
] | e734734750797ef0caaa1680379e07b86d7a53e3 | https://github.com/jasonrbriggs/proton/blob/e734734750797ef0caaa1680379e07b86d7a53e3/python/proton/xmlutils.py#L29-L39 | train |
jasonrbriggs/proton | python/proton/xmlutils.py | parseelement | def parseelement(elem):
'''
Convert the content of an element into more ElementTree structures.
We do this because sometimes we want to set xml as the content of an element.
'''
xml = '<%(tag)s>%(content)s</%(tag)s>' % {'tag' : elem.tag, 'content' : elem.text}
et = etree.fromstring(xml)
replaceelement(elem, et) | python | def parseelement(elem):
'''
Convert the content of an element into more ElementTree structures.
We do this because sometimes we want to set xml as the content of an element.
'''
xml = '<%(tag)s>%(content)s</%(tag)s>' % {'tag' : elem.tag, 'content' : elem.text}
et = etree.fromstring(xml)
replaceelement(elem, et) | [
"def",
"parseelement",
"(",
"elem",
")",
":",
"xml",
"=",
"'<%(tag)s>%(content)s</%(tag)s>'",
"%",
"{",
"'tag'",
":",
"elem",
".",
"tag",
",",
"'content'",
":",
"elem",
".",
"text",
"}",
"et",
"=",
"etree",
".",
"fromstring",
"(",
"xml",
")",
"replaceelement",
"(",
"elem",
",",
"et",
")"
] | Convert the content of an element into more ElementTree structures.
We do this because sometimes we want to set xml as the content of an element. | [
"Convert",
"the",
"content",
"of",
"an",
"element",
"into",
"more",
"ElementTree",
"structures",
".",
"We",
"do",
"this",
"because",
"sometimes",
"we",
"want",
"to",
"set",
"xml",
"as",
"the",
"content",
"of",
"an",
"element",
"."
] | e734734750797ef0caaa1680379e07b86d7a53e3 | https://github.com/jasonrbriggs/proton/blob/e734734750797ef0caaa1680379e07b86d7a53e3/python/proton/xmlutils.py#L42-L49 | train |
ioos/cc-plugin-ncei | cc_plugin_ncei/ncei_base.py | BaseNCEICheck._check_min_max_range | def _check_min_max_range(self, var, test_ctx):
"""
Checks that either both valid_min and valid_max exist, or valid_range
exists.
"""
if 'valid_range' in var.ncattrs():
test_ctx.assert_true(var.valid_range.dtype == var.dtype and
len(var.valid_range) == 2
and var.valid_range[0] <= var.valid_range[1],
"valid_range must be a two element vector of min followed by max with the same data type as {}".format(var.name)
)
else:
for bound in ('valid_min', 'valid_max'):
v_bound = getattr(var, bound, '')
warn_msg = '{} attribute should exist, have the same type as {}, and not be empty or valid_range should be defined'.format(bound, var.name)
# need to special case str attributes since they aren't directly
# comparable to numpy dtypes
if isinstance(v_bound, six.string_types):
test_ctx.assert_true(v_bound != '' and
var.dtype.char == 'S', warn_msg)
# otherwise compare the numpy types directly
else:
test_ctx.assert_true(v_bound.dtype == var.dtype, warn_msg)
return test_ctx | python | def _check_min_max_range(self, var, test_ctx):
"""
Checks that either both valid_min and valid_max exist, or valid_range
exists.
"""
if 'valid_range' in var.ncattrs():
test_ctx.assert_true(var.valid_range.dtype == var.dtype and
len(var.valid_range) == 2
and var.valid_range[0] <= var.valid_range[1],
"valid_range must be a two element vector of min followed by max with the same data type as {}".format(var.name)
)
else:
for bound in ('valid_min', 'valid_max'):
v_bound = getattr(var, bound, '')
warn_msg = '{} attribute should exist, have the same type as {}, and not be empty or valid_range should be defined'.format(bound, var.name)
# need to special case str attributes since they aren't directly
# comparable to numpy dtypes
if isinstance(v_bound, six.string_types):
test_ctx.assert_true(v_bound != '' and
var.dtype.char == 'S', warn_msg)
# otherwise compare the numpy types directly
else:
test_ctx.assert_true(v_bound.dtype == var.dtype, warn_msg)
return test_ctx | [
"def",
"_check_min_max_range",
"(",
"self",
",",
"var",
",",
"test_ctx",
")",
":",
"if",
"'valid_range'",
"in",
"var",
".",
"ncattrs",
"(",
")",
":",
"test_ctx",
".",
"assert_true",
"(",
"var",
".",
"valid_range",
".",
"dtype",
"==",
"var",
".",
"dtype",
"and",
"len",
"(",
"var",
".",
"valid_range",
")",
"==",
"2",
"and",
"var",
".",
"valid_range",
"[",
"0",
"]",
"<=",
"var",
".",
"valid_range",
"[",
"1",
"]",
",",
"\"valid_range must be a two element vector of min followed by max with the same data type as {}\"",
".",
"format",
"(",
"var",
".",
"name",
")",
")",
"else",
":",
"for",
"bound",
"in",
"(",
"'valid_min'",
",",
"'valid_max'",
")",
":",
"v_bound",
"=",
"getattr",
"(",
"var",
",",
"bound",
",",
"''",
")",
"warn_msg",
"=",
"'{} attribute should exist, have the same type as {}, and not be empty or valid_range should be defined'",
".",
"format",
"(",
"bound",
",",
"var",
".",
"name",
")",
"# need to special case str attributes since they aren't directly",
"# comparable to numpy dtypes",
"if",
"isinstance",
"(",
"v_bound",
",",
"six",
".",
"string_types",
")",
":",
"test_ctx",
".",
"assert_true",
"(",
"v_bound",
"!=",
"''",
"and",
"var",
".",
"dtype",
".",
"char",
"==",
"'S'",
",",
"warn_msg",
")",
"# otherwise compare the numpy types directly",
"else",
":",
"test_ctx",
".",
"assert_true",
"(",
"v_bound",
".",
"dtype",
"==",
"var",
".",
"dtype",
",",
"warn_msg",
")",
"return",
"test_ctx"
] | Checks that either both valid_min and valid_max exist, or valid_range
exists. | [
"Checks",
"that",
"either",
"both",
"valid_min",
"and",
"valid_max",
"exist",
"or",
"valid_range",
"exists",
"."
] | 963fefd7fa43afd32657ac4c36aad4ddb4c25acf | https://github.com/ioos/cc-plugin-ncei/blob/963fefd7fa43afd32657ac4c36aad4ddb4c25acf/cc_plugin_ncei/ncei_base.py#L92-L115 | train |
ioos/cc-plugin-ncei | cc_plugin_ncei/ncei_base.py | NCEI1_1Check.check_base_required_attributes | def check_base_required_attributes(self, dataset):
'''
Check the global required and highly recommended attributes for 1.1 templates. These go an extra step besides
just checking that they exist.
:param netCDF4.Dataset dataset: An open netCDF dataset
:Conventions = "CF-1.6" ; //......................................... REQUIRED - Always try to use latest value. (CF)
:Metadata_Conventions = "Unidata Dataset Discovery v1.0" ; //........ REQUIRED - Do not change. (ACDD)
:featureType = "timeSeries" ; //..................................... REQUIRED - CF attribute for identifying the featureType.
:cdm_data_type = "Station" ; //...................................... REQUIRED (ACDD)
:nodc_template_version = "NODC_NetCDF_TimeSeries_Orthogonal_Template_v1.1" ; //....... REQUIRED (NODC)
:standard_name_vocabulary = "NetCDF Climate and Forecast (CF) Metadata Convention Standard Name Table "X"" ; //........ REQUIRED - If using CF standard name attribute for variables. "X" denotes the table number (ACDD)
'''
test_ctx = TestCtx(BaseCheck.HIGH, 'Required global attributes')
conventions = getattr(dataset, 'Conventions', '')
metadata_conventions = getattr(dataset, 'Metadata_Conventions', '')
feature_type = getattr(dataset, 'featureType', '')
cdm_data_type = getattr(dataset, 'cdm_data_type', '')
standard_name_vocab = getattr(dataset, 'standard_name_vocabulary', '')
accepted_conventions = 'CF-1.6'
test_ctx.assert_true(conventions == accepted_conventions,
'Conventions attribute is missing or is not equal to CF-1.6: {}'.format(conventions))
test_ctx.assert_true(metadata_conventions == 'Unidata Dataset Discovery v1.0',
"Metadata_Conventions attribute is required to be 'Unidata Dataset Discovery v1.0': {}".format(metadata_conventions))
test_ctx.assert_true(feature_type in ['point', 'timeSeries', 'trajectory', 'profile', 'timeSeriesProfile', 'trajectoryProfile'],
'Feature type must be one of point, timeSeries, trajectory, profile, timeSeriesProfile, trajectoryProfile: {}'.format(feature_type))
test_ctx.assert_true(cdm_data_type.lower() in ['grid', 'image', 'point', 'radial', 'station', 'swath', 'trajectory'],
'cdm_data_type must be one of Grid, Image, Point, Radial, Station, Swath, Trajectory: {}'.format(cdm_data_type))
regex = re.compile(r'[sS]tandard [nN]ame [tT]able')
test_ctx.assert_true(regex.search(standard_name_vocab),
"standard_name_vocabulary doesn't contain 'Standard Name Table': {}".format(standard_name_vocab))
return test_ctx.to_result() | python | def check_base_required_attributes(self, dataset):
'''
Check the global required and highly recommended attributes for 1.1 templates. These go an extra step besides
just checking that they exist.
:param netCDF4.Dataset dataset: An open netCDF dataset
:Conventions = "CF-1.6" ; //......................................... REQUIRED - Always try to use latest value. (CF)
:Metadata_Conventions = "Unidata Dataset Discovery v1.0" ; //........ REQUIRED - Do not change. (ACDD)
:featureType = "timeSeries" ; //..................................... REQUIRED - CF attribute for identifying the featureType.
:cdm_data_type = "Station" ; //...................................... REQUIRED (ACDD)
:nodc_template_version = "NODC_NetCDF_TimeSeries_Orthogonal_Template_v1.1" ; //....... REQUIRED (NODC)
:standard_name_vocabulary = "NetCDF Climate and Forecast (CF) Metadata Convention Standard Name Table "X"" ; //........ REQUIRED - If using CF standard name attribute for variables. "X" denotes the table number (ACDD)
'''
test_ctx = TestCtx(BaseCheck.HIGH, 'Required global attributes')
conventions = getattr(dataset, 'Conventions', '')
metadata_conventions = getattr(dataset, 'Metadata_Conventions', '')
feature_type = getattr(dataset, 'featureType', '')
cdm_data_type = getattr(dataset, 'cdm_data_type', '')
standard_name_vocab = getattr(dataset, 'standard_name_vocabulary', '')
accepted_conventions = 'CF-1.6'
test_ctx.assert_true(conventions == accepted_conventions,
'Conventions attribute is missing or is not equal to CF-1.6: {}'.format(conventions))
test_ctx.assert_true(metadata_conventions == 'Unidata Dataset Discovery v1.0',
"Metadata_Conventions attribute is required to be 'Unidata Dataset Discovery v1.0': {}".format(metadata_conventions))
test_ctx.assert_true(feature_type in ['point', 'timeSeries', 'trajectory', 'profile', 'timeSeriesProfile', 'trajectoryProfile'],
'Feature type must be one of point, timeSeries, trajectory, profile, timeSeriesProfile, trajectoryProfile: {}'.format(feature_type))
test_ctx.assert_true(cdm_data_type.lower() in ['grid', 'image', 'point', 'radial', 'station', 'swath', 'trajectory'],
'cdm_data_type must be one of Grid, Image, Point, Radial, Station, Swath, Trajectory: {}'.format(cdm_data_type))
regex = re.compile(r'[sS]tandard [nN]ame [tT]able')
test_ctx.assert_true(regex.search(standard_name_vocab),
"standard_name_vocabulary doesn't contain 'Standard Name Table': {}".format(standard_name_vocab))
return test_ctx.to_result() | [
"def",
"check_base_required_attributes",
"(",
"self",
",",
"dataset",
")",
":",
"test_ctx",
"=",
"TestCtx",
"(",
"BaseCheck",
".",
"HIGH",
",",
"'Required global attributes'",
")",
"conventions",
"=",
"getattr",
"(",
"dataset",
",",
"'Conventions'",
",",
"''",
")",
"metadata_conventions",
"=",
"getattr",
"(",
"dataset",
",",
"'Metadata_Conventions'",
",",
"''",
")",
"feature_type",
"=",
"getattr",
"(",
"dataset",
",",
"'featureType'",
",",
"''",
")",
"cdm_data_type",
"=",
"getattr",
"(",
"dataset",
",",
"'cdm_data_type'",
",",
"''",
")",
"standard_name_vocab",
"=",
"getattr",
"(",
"dataset",
",",
"'standard_name_vocabulary'",
",",
"''",
")",
"accepted_conventions",
"=",
"'CF-1.6'",
"test_ctx",
".",
"assert_true",
"(",
"conventions",
"==",
"accepted_conventions",
",",
"'Conventions attribute is missing or is not equal to CF-1.6: {}'",
".",
"format",
"(",
"conventions",
")",
")",
"test_ctx",
".",
"assert_true",
"(",
"metadata_conventions",
"==",
"'Unidata Dataset Discovery v1.0'",
",",
"\"Metadata_Conventions attribute is required to be 'Unidata Dataset Discovery v1.0': {}\"",
".",
"format",
"(",
"metadata_conventions",
")",
")",
"test_ctx",
".",
"assert_true",
"(",
"feature_type",
"in",
"[",
"'point'",
",",
"'timeSeries'",
",",
"'trajectory'",
",",
"'profile'",
",",
"'timeSeriesProfile'",
",",
"'trajectoryProfile'",
"]",
",",
"'Feature type must be one of point, timeSeries, trajectory, profile, timeSeriesProfile, trajectoryProfile: {}'",
".",
"format",
"(",
"feature_type",
")",
")",
"test_ctx",
".",
"assert_true",
"(",
"cdm_data_type",
".",
"lower",
"(",
")",
"in",
"[",
"'grid'",
",",
"'image'",
",",
"'point'",
",",
"'radial'",
",",
"'station'",
",",
"'swath'",
",",
"'trajectory'",
"]",
",",
"'cdm_data_type must be one of Grid, Image, Point, Radial, Station, Swath, Trajectory: {}'",
".",
"format",
"(",
"cdm_data_type",
")",
")",
"regex",
"=",
"re",
".",
"compile",
"(",
"r'[sS]tandard [nN]ame [tT]able'",
")",
"test_ctx",
".",
"assert_true",
"(",
"regex",
".",
"search",
"(",
"standard_name_vocab",
")",
",",
"\"standard_name_vocabulary doesn't contain 'Standard Name Table': {}\"",
".",
"format",
"(",
"standard_name_vocab",
")",
")",
"return",
"test_ctx",
".",
"to_result",
"(",
")"
] | Check the global required and highly recommended attributes for 1.1 templates. These go an extra step besides
just checking that they exist.
:param netCDF4.Dataset dataset: An open netCDF dataset
:Conventions = "CF-1.6" ; //......................................... REQUIRED - Always try to use latest value. (CF)
:Metadata_Conventions = "Unidata Dataset Discovery v1.0" ; //........ REQUIRED - Do not change. (ACDD)
:featureType = "timeSeries" ; //..................................... REQUIRED - CF attribute for identifying the featureType.
:cdm_data_type = "Station" ; //...................................... REQUIRED (ACDD)
:nodc_template_version = "NODC_NetCDF_TimeSeries_Orthogonal_Template_v1.1" ; //....... REQUIRED (NODC)
:standard_name_vocabulary = "NetCDF Climate and Forecast (CF) Metadata Convention Standard Name Table "X"" ; //........ REQUIRED - If using CF standard name attribute for variables. "X" denotes the table number (ACDD) | [
"Check",
"the",
"global",
"required",
"and",
"highly",
"recommended",
"attributes",
"for",
"1",
".",
"1",
"templates",
".",
"These",
"go",
"an",
"extra",
"step",
"besides",
"just",
"checking",
"that",
"they",
"exist",
"."
] | 963fefd7fa43afd32657ac4c36aad4ddb4c25acf | https://github.com/ioos/cc-plugin-ncei/blob/963fefd7fa43afd32657ac4c36aad4ddb4c25acf/cc_plugin_ncei/ncei_base.py#L419-L457 | train |
ioos/cc-plugin-ncei | cc_plugin_ncei/ncei_base.py | NCEI2_0Check.check_base_required_attributes | def check_base_required_attributes(self, dataset):
'''
Check the global required and highly recommended attributes for 2.0 templates. These go an extra step besides
just checking that they exist.
:param netCDF4.Dataset dataset: An open netCDF dataset
:Conventions = "CF-1.6, ACDD-1.3" ; //............................... REQUIRED - Always try to use latest value. (CF)
:featureType = "timeSeries" ; //..................................... REQUIRED - CF attribute for identifying the featureType.
:cdm_data_type = "Station" ; //...................................... REQUIRED (ACDD)
:ncei_template_version = "NCEI_NetCDF_TimeSeries_Orthogonal_Template_v1.1" ; //....... REQUIRED (NCEI)
:title = "" ; //............................................... HIGHLY RECOMMENDED - Provide a useful title for the data in the file. (ACDD)
:summary = "" ; //............................................. HIGHLY RECOMMENDED - Provide a useful summary or abstract for the data in the file. (ACDD)
:keywords = "" ; //............................................ HIGHLY RECOMMENDED - A comma separated list of keywords coming from the keywords_vocabulary. (ACDD)
:Conventions = "CF-1.6, ACDD-1.3" ; //......................... HIGHLY RECOMMENDED - A comma separated list of the conventions being followed. Always try to use latest version. (CF/ACDD)
'''
test_ctx = TestCtx(BaseCheck.HIGH, 'Required global attributes')
conventions = getattr(dataset, 'Conventions', '')
feature_type = getattr(dataset, 'featureType', '')
# Define conventions
accepted_conventions = ['CF-1.6', 'ACDD-1.3']
dataset_conventions = conventions.replace(' ', '').split(',')
for accepted_convention in accepted_conventions:
if accepted_convention not in dataset_conventions:
test_ctx.assert_true(False, 'Conventions attribute is missing or is not equal to "CF-1.6, ACDD-1.3": {}'.format(conventions))
break
else:
test_ctx.assert_true(True, '')
# Check feature types
test_ctx.assert_true(feature_type in ['point', 'timeSeries', 'trajectory', 'profile', 'timeSeriesProfile', 'trajectoryProfile'],
'Feature type must be one of point, timeSeries, trajectory, profile, timeSeriesProfile, trajectoryProfile: {}'.format(feature_type))
return test_ctx.to_result() | python | def check_base_required_attributes(self, dataset):
'''
Check the global required and highly recommended attributes for 2.0 templates. These go an extra step besides
just checking that they exist.
:param netCDF4.Dataset dataset: An open netCDF dataset
:Conventions = "CF-1.6, ACDD-1.3" ; //............................... REQUIRED - Always try to use latest value. (CF)
:featureType = "timeSeries" ; //..................................... REQUIRED - CF attribute for identifying the featureType.
:cdm_data_type = "Station" ; //...................................... REQUIRED (ACDD)
:ncei_template_version = "NCEI_NetCDF_TimeSeries_Orthogonal_Template_v1.1" ; //....... REQUIRED (NCEI)
:title = "" ; //............................................... HIGHLY RECOMMENDED - Provide a useful title for the data in the file. (ACDD)
:summary = "" ; //............................................. HIGHLY RECOMMENDED - Provide a useful summary or abstract for the data in the file. (ACDD)
:keywords = "" ; //............................................ HIGHLY RECOMMENDED - A comma separated list of keywords coming from the keywords_vocabulary. (ACDD)
:Conventions = "CF-1.6, ACDD-1.3" ; //......................... HIGHLY RECOMMENDED - A comma separated list of the conventions being followed. Always try to use latest version. (CF/ACDD)
'''
test_ctx = TestCtx(BaseCheck.HIGH, 'Required global attributes')
conventions = getattr(dataset, 'Conventions', '')
feature_type = getattr(dataset, 'featureType', '')
# Define conventions
accepted_conventions = ['CF-1.6', 'ACDD-1.3']
dataset_conventions = conventions.replace(' ', '').split(',')
for accepted_convention in accepted_conventions:
if accepted_convention not in dataset_conventions:
test_ctx.assert_true(False, 'Conventions attribute is missing or is not equal to "CF-1.6, ACDD-1.3": {}'.format(conventions))
break
else:
test_ctx.assert_true(True, '')
# Check feature types
test_ctx.assert_true(feature_type in ['point', 'timeSeries', 'trajectory', 'profile', 'timeSeriesProfile', 'trajectoryProfile'],
'Feature type must be one of point, timeSeries, trajectory, profile, timeSeriesProfile, trajectoryProfile: {}'.format(feature_type))
return test_ctx.to_result() | [
"def",
"check_base_required_attributes",
"(",
"self",
",",
"dataset",
")",
":",
"test_ctx",
"=",
"TestCtx",
"(",
"BaseCheck",
".",
"HIGH",
",",
"'Required global attributes'",
")",
"conventions",
"=",
"getattr",
"(",
"dataset",
",",
"'Conventions'",
",",
"''",
")",
"feature_type",
"=",
"getattr",
"(",
"dataset",
",",
"'featureType'",
",",
"''",
")",
"# Define conventions",
"accepted_conventions",
"=",
"[",
"'CF-1.6'",
",",
"'ACDD-1.3'",
"]",
"dataset_conventions",
"=",
"conventions",
".",
"replace",
"(",
"' '",
",",
"''",
")",
".",
"split",
"(",
"','",
")",
"for",
"accepted_convention",
"in",
"accepted_conventions",
":",
"if",
"accepted_convention",
"not",
"in",
"dataset_conventions",
":",
"test_ctx",
".",
"assert_true",
"(",
"False",
",",
"'Conventions attribute is missing or is not equal to \"CF-1.6, ACDD-1.3\": {}'",
".",
"format",
"(",
"conventions",
")",
")",
"break",
"else",
":",
"test_ctx",
".",
"assert_true",
"(",
"True",
",",
"''",
")",
"# Check feature types",
"test_ctx",
".",
"assert_true",
"(",
"feature_type",
"in",
"[",
"'point'",
",",
"'timeSeries'",
",",
"'trajectory'",
",",
"'profile'",
",",
"'timeSeriesProfile'",
",",
"'trajectoryProfile'",
"]",
",",
"'Feature type must be one of point, timeSeries, trajectory, profile, timeSeriesProfile, trajectoryProfile: {}'",
".",
"format",
"(",
"feature_type",
")",
")",
"return",
"test_ctx",
".",
"to_result",
"(",
")"
] | Check the global required and highly recommended attributes for 2.0 templates. These go an extra step besides
just checking that they exist.
:param netCDF4.Dataset dataset: An open netCDF dataset
:Conventions = "CF-1.6, ACDD-1.3" ; //............................... REQUIRED - Always try to use latest value. (CF)
:featureType = "timeSeries" ; //..................................... REQUIRED - CF attribute for identifying the featureType.
:cdm_data_type = "Station" ; //...................................... REQUIRED (ACDD)
:ncei_template_version = "NCEI_NetCDF_TimeSeries_Orthogonal_Template_v1.1" ; //....... REQUIRED (NCEI)
:title = "" ; //............................................... HIGHLY RECOMMENDED - Provide a useful title for the data in the file. (ACDD)
:summary = "" ; //............................................. HIGHLY RECOMMENDED - Provide a useful summary or abstract for the data in the file. (ACDD)
:keywords = "" ; //............................................ HIGHLY RECOMMENDED - A comma separated list of keywords coming from the keywords_vocabulary. (ACDD)
:Conventions = "CF-1.6, ACDD-1.3" ; //......................... HIGHLY RECOMMENDED - A comma separated list of the conventions being followed. Always try to use latest version. (CF/ACDD) | [
"Check",
"the",
"global",
"required",
"and",
"highly",
"recommended",
"attributes",
"for",
"2",
".",
"0",
"templates",
".",
"These",
"go",
"an",
"extra",
"step",
"besides",
"just",
"checking",
"that",
"they",
"exist",
"."
] | 963fefd7fa43afd32657ac4c36aad4ddb4c25acf | https://github.com/ioos/cc-plugin-ncei/blob/963fefd7fa43afd32657ac4c36aad4ddb4c25acf/cc_plugin_ncei/ncei_base.py#L738-L773 | train |
ioos/cc-plugin-ncei | cc_plugin_ncei/ncei_base.py | NCEI2_0Check.check_recommended_global_attributes | def check_recommended_global_attributes(self, dataset):
'''
Check the global recommended attributes for 2.0 templates. These go an extra step besides
just checking that they exist.
:param netCDF4.Dataset dataset: An open netCDF dataset
:id = "" ; //.................................................. RECOMMENDED - Should be a human readable unique identifier for data set. (ACDD)
:naming_authority = "" ; //.................................... RECOMMENDED - Backward URL of institution (for example, gov.noaa.ncei). (ACDD)
:history = "" ; //............................................. RECOMMENDED - Provides an audit trail for modifications to the original data. (ACDD)
:source = "" ; //.............................................. RECOMMENDED - The method of production of the original data. (CF)
:processing_level = "" ; //.................................... RECOMMENDED - Provide a description of the processing or quality control level of the data. (ACDD)
:comment = "" ; //............................................. RECOMMENDED - Provide useful additional information here. (CF)
:acknowledgment = "" ; //...................................... RECOMMENDED - A place to acknowledge various types of support for the project that produced this data. (ACDD)
:license = "" ; //............................................. RECOMMENDED - Describe the restrictions to data access and distribution. (ACDD)
:standard_name_vocabulary = "CF Standard Name Table vNN" ; //.. RECOMMENDED - If using CF standard name attribute for variables. Replace NN with the CF standard name table number (CF)
:date_created = "" ; //........................................ RECOMMENDED - Creation date of this version of the data(netCDF). Use ISO 8601:2004 for date and time. (ACDD)
:creator_name = "" ; //........................................ RECOMMENDED - The name of the person (or other creator type specified by the creator_type attribute) principally responsible for creating this data. (ACDD)
:creator_email = "" ; //....................................... RECOMMENDED - The email address of the person (or other creator type specified by the creator_type attribute) principally responsible for creating this data. (ACDD)
:creator_url = "" ; //......................................... RECOMMENDED - The URL of the person (or other creator type specified by the creator_type attribute) principally responsible for creating this data. (ACDD)
:institution = "" ; //......................................... RECOMMENDED -The name of the institution principally responsible for originating this data.. An institution attribute can be used for each variable if variables come from more than one institution. (CF/ACDD)
:project = "" ; //............................................. RECOMMENDED - The name of the project(s) principally responsible for originating this data. Multiple projects can be separated by commas. (ACDD)
:publisher_name = "" ; //...................................... RECOMMENDED - The name of the person (or other entity specified by the publisher_type attribute) responsible for publishing the data file or product to users, with its current metadata and format. (ACDD)
:publisher_email = "" ; //..................................... RECOMMENDED - The email address of the person (or other entity specified by the publisher_type attribute) responsible for publishing the data file or product to users, with its current metadata and format. (ACDD)
:publisher_url = "" ; //....................................... RECOMMENDED - The URL of the person (or other entity specified by the publisher_type attribute) responsible for publishing the data file or product to users, with its current metadata and format. (ACDD)
:geospatial_bounds = "" ; //................................... RECOMMENDED - Describes the data's 2D or 3D geospatial extent in OGC's Well-Known Text (WKT) Geometry format. (ACDD)
:geospatial_bounds_crs = "" ; //............................... RECOMMENDED - The coordinate reference system (CRS) of the point coordinates in the geospatial_bounds attribute. (ACDD)
:geospatial_bounds_vertical_crs = "" ; //...................... RECOMMENDED - The vertical coordinate reference system (CRS) for the Z axis of the point coordinates in the geospatial_bounds attribute. (ACDD)
:geospatial_lat_min = 0.0d ; //................................ RECOMMENDED - Describes a simple lower latitude limit. (ACDD)
:geospatial_lat_max = 0.0d ; //................................ RECOMMENDED - Describes a simple upper latitude limit. (ACDD)
:geospatial_lon_min = 0.0d ; //................................ RECOMMENDED - Describes a simple lower longitude limit. (ACDD)
:geospatial_lon_max = 0.0d ; //................................ RECOMMENDED - Describes a simple upper longitude limit. (ACDD)
:geospatial_vertical_min = 0.0d ; //........................... RECOMMENDED - Describes the numerically smaller vertical limit. (ACDD)
:geospatial_vertical_max = 0.0d ; //........................... RECOMMENDED - Describes the numerically larger vertical limit. (ACDD)
:geospatial_vertical_positive = "" ; //........................ RECOMMENDED - Use "up" or "down". (ACDD)
:time_coverage_start = "" ; //................................. RECOMMENDED - Describes the time of the first data point in the data set. Use ISO 8601:2004 for date and time. (ACDD)
:time_coverage_end = "" ; //................................... RECOMMENDED - Describes the time of the last data point in the data set. Use ISO 8601:2004 for date and time.(ACDD)
:time_coverage_duration = "" ; //.............................. RECOMMENDED - Describes the duration of the data set. Use ISO 8601:2004 for date and time. (ACDD)
:time_coverage_resolution = "" ; //............................ RECOMMENDED - Describes the targeted time period between each value in the data set. Use ISO 8601:2004 for date and time. (ACDD)
:uuid = "" ; //................................................ RECOMMENDED - Machine readable unique identifier for each file. A new uuid is created whenever the file is changed. (NCEI)
:sea_name = "" ; //............................................ RECOMMENDED - The names of the sea in which the data were collected. Use NCEI sea names table. (NCEI)
'''
recommended_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended global attributes')
sea_names = [sn.lower() for sn in util.get_sea_names()]
sea_name = getattr(dataset, 'sea_name', '')
sea_name = sea_name.replace(', ', ',')
sea_name = sea_name.split(',') if sea_name else []
for sea in sea_name:
recommended_ctx.assert_true(
sea.lower() in sea_names,
'sea_name attribute should exist and should be from the NODC sea names list: {} is not a valid sea name'.format(sea)
)
# Parse dates, check for ISO 8601
for attr in ['time_coverage_start', 'time_coverage_end', 'date_created', 'date_modified']:
attr_value = getattr(dataset, attr, '')
try:
parse_datetime(attr_value)
recommended_ctx.assert_true(True, '') # Score it True!
except ISO8601Error:
recommended_ctx.assert_true(False, '{} should exist and be ISO-8601 format (example: PT1M30S), currently: {}'.format(attr, attr_value))
value = getattr(dataset, 'geospatial_vertical_positive', '')
recommended_ctx.assert_true(value.lower() in ['up', 'down'], 'geospatial_vertical_positive attribute should be up or down: {}'.format(value))
# I hate english.
ack_exists = any((getattr(dataset, attr, '') != '' for attr in ['acknowledgment', 'acknowledgement']))
recommended_ctx.assert_true(ack_exists, 'acknowledgement attribute should exist and not be empty')
standard_name_vocab = getattr(dataset, 'standard_name_vocabulary', '')
regex = re.compile(r'[sS]tandard [nN]ame [tT]able')
recommended_ctx.assert_true(regex.search(standard_name_vocab),
"standard_name_vocabulary doesn't contain 'Standard Name Table': {}".format(standard_name_vocab))
if hasattr(dataset, 'comment'):
recommended_ctx.assert_true(getattr(dataset, 'comment', '') != '', 'comment attribute should not be empty if specified')
return recommended_ctx.to_result() | python | def check_recommended_global_attributes(self, dataset):
'''
Check the global recommended attributes for 2.0 templates. These go an extra step besides
just checking that they exist.
:param netCDF4.Dataset dataset: An open netCDF dataset
:id = "" ; //.................................................. RECOMMENDED - Should be a human readable unique identifier for data set. (ACDD)
:naming_authority = "" ; //.................................... RECOMMENDED - Backward URL of institution (for example, gov.noaa.ncei). (ACDD)
:history = "" ; //............................................. RECOMMENDED - Provides an audit trail for modifications to the original data. (ACDD)
:source = "" ; //.............................................. RECOMMENDED - The method of production of the original data. (CF)
:processing_level = "" ; //.................................... RECOMMENDED - Provide a description of the processing or quality control level of the data. (ACDD)
:comment = "" ; //............................................. RECOMMENDED - Provide useful additional information here. (CF)
:acknowledgment = "" ; //...................................... RECOMMENDED - A place to acknowledge various types of support for the project that produced this data. (ACDD)
:license = "" ; //............................................. RECOMMENDED - Describe the restrictions to data access and distribution. (ACDD)
:standard_name_vocabulary = "CF Standard Name Table vNN" ; //.. RECOMMENDED - If using CF standard name attribute for variables. Replace NN with the CF standard name table number (CF)
:date_created = "" ; //........................................ RECOMMENDED - Creation date of this version of the data(netCDF). Use ISO 8601:2004 for date and time. (ACDD)
:creator_name = "" ; //........................................ RECOMMENDED - The name of the person (or other creator type specified by the creator_type attribute) principally responsible for creating this data. (ACDD)
:creator_email = "" ; //....................................... RECOMMENDED - The email address of the person (or other creator type specified by the creator_type attribute) principally responsible for creating this data. (ACDD)
:creator_url = "" ; //......................................... RECOMMENDED - The URL of the person (or other creator type specified by the creator_type attribute) principally responsible for creating this data. (ACDD)
:institution = "" ; //......................................... RECOMMENDED -The name of the institution principally responsible for originating this data.. An institution attribute can be used for each variable if variables come from more than one institution. (CF/ACDD)
:project = "" ; //............................................. RECOMMENDED - The name of the project(s) principally responsible for originating this data. Multiple projects can be separated by commas. (ACDD)
:publisher_name = "" ; //...................................... RECOMMENDED - The name of the person (or other entity specified by the publisher_type attribute) responsible for publishing the data file or product to users, with its current metadata and format. (ACDD)
:publisher_email = "" ; //..................................... RECOMMENDED - The email address of the person (or other entity specified by the publisher_type attribute) responsible for publishing the data file or product to users, with its current metadata and format. (ACDD)
:publisher_url = "" ; //....................................... RECOMMENDED - The URL of the person (or other entity specified by the publisher_type attribute) responsible for publishing the data file or product to users, with its current metadata and format. (ACDD)
:geospatial_bounds = "" ; //................................... RECOMMENDED - Describes the data's 2D or 3D geospatial extent in OGC's Well-Known Text (WKT) Geometry format. (ACDD)
:geospatial_bounds_crs = "" ; //............................... RECOMMENDED - The coordinate reference system (CRS) of the point coordinates in the geospatial_bounds attribute. (ACDD)
:geospatial_bounds_vertical_crs = "" ; //...................... RECOMMENDED - The vertical coordinate reference system (CRS) for the Z axis of the point coordinates in the geospatial_bounds attribute. (ACDD)
:geospatial_lat_min = 0.0d ; //................................ RECOMMENDED - Describes a simple lower latitude limit. (ACDD)
:geospatial_lat_max = 0.0d ; //................................ RECOMMENDED - Describes a simple upper latitude limit. (ACDD)
:geospatial_lon_min = 0.0d ; //................................ RECOMMENDED - Describes a simple lower longitude limit. (ACDD)
:geospatial_lon_max = 0.0d ; //................................ RECOMMENDED - Describes a simple upper longitude limit. (ACDD)
:geospatial_vertical_min = 0.0d ; //........................... RECOMMENDED - Describes the numerically smaller vertical limit. (ACDD)
:geospatial_vertical_max = 0.0d ; //........................... RECOMMENDED - Describes the numerically larger vertical limit. (ACDD)
:geospatial_vertical_positive = "" ; //........................ RECOMMENDED - Use "up" or "down". (ACDD)
:time_coverage_start = "" ; //................................. RECOMMENDED - Describes the time of the first data point in the data set. Use ISO 8601:2004 for date and time. (ACDD)
:time_coverage_end = "" ; //................................... RECOMMENDED - Describes the time of the last data point in the data set. Use ISO 8601:2004 for date and time.(ACDD)
:time_coverage_duration = "" ; //.............................. RECOMMENDED - Describes the duration of the data set. Use ISO 8601:2004 for date and time. (ACDD)
:time_coverage_resolution = "" ; //............................ RECOMMENDED - Describes the targeted time period between each value in the data set. Use ISO 8601:2004 for date and time. (ACDD)
:uuid = "" ; //................................................ RECOMMENDED - Machine readable unique identifier for each file. A new uuid is created whenever the file is changed. (NCEI)
:sea_name = "" ; //............................................ RECOMMENDED - The names of the sea in which the data were collected. Use NCEI sea names table. (NCEI)
'''
recommended_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended global attributes')
sea_names = [sn.lower() for sn in util.get_sea_names()]
sea_name = getattr(dataset, 'sea_name', '')
sea_name = sea_name.replace(', ', ',')
sea_name = sea_name.split(',') if sea_name else []
for sea in sea_name:
recommended_ctx.assert_true(
sea.lower() in sea_names,
'sea_name attribute should exist and should be from the NODC sea names list: {} is not a valid sea name'.format(sea)
)
# Parse dates, check for ISO 8601
for attr in ['time_coverage_start', 'time_coverage_end', 'date_created', 'date_modified']:
attr_value = getattr(dataset, attr, '')
try:
parse_datetime(attr_value)
recommended_ctx.assert_true(True, '') # Score it True!
except ISO8601Error:
recommended_ctx.assert_true(False, '{} should exist and be ISO-8601 format (example: PT1M30S), currently: {}'.format(attr, attr_value))
value = getattr(dataset, 'geospatial_vertical_positive', '')
recommended_ctx.assert_true(value.lower() in ['up', 'down'], 'geospatial_vertical_positive attribute should be up or down: {}'.format(value))
# I hate english.
ack_exists = any((getattr(dataset, attr, '') != '' for attr in ['acknowledgment', 'acknowledgement']))
recommended_ctx.assert_true(ack_exists, 'acknowledgement attribute should exist and not be empty')
standard_name_vocab = getattr(dataset, 'standard_name_vocabulary', '')
regex = re.compile(r'[sS]tandard [nN]ame [tT]able')
recommended_ctx.assert_true(regex.search(standard_name_vocab),
"standard_name_vocabulary doesn't contain 'Standard Name Table': {}".format(standard_name_vocab))
if hasattr(dataset, 'comment'):
recommended_ctx.assert_true(getattr(dataset, 'comment', '') != '', 'comment attribute should not be empty if specified')
return recommended_ctx.to_result() | [
"def",
"check_recommended_global_attributes",
"(",
"self",
",",
"dataset",
")",
":",
"recommended_ctx",
"=",
"TestCtx",
"(",
"BaseCheck",
".",
"MEDIUM",
",",
"'Recommended global attributes'",
")",
"sea_names",
"=",
"[",
"sn",
".",
"lower",
"(",
")",
"for",
"sn",
"in",
"util",
".",
"get_sea_names",
"(",
")",
"]",
"sea_name",
"=",
"getattr",
"(",
"dataset",
",",
"'sea_name'",
",",
"''",
")",
"sea_name",
"=",
"sea_name",
".",
"replace",
"(",
"', '",
",",
"','",
")",
"sea_name",
"=",
"sea_name",
".",
"split",
"(",
"','",
")",
"if",
"sea_name",
"else",
"[",
"]",
"for",
"sea",
"in",
"sea_name",
":",
"recommended_ctx",
".",
"assert_true",
"(",
"sea",
".",
"lower",
"(",
")",
"in",
"sea_names",
",",
"'sea_name attribute should exist and should be from the NODC sea names list: {} is not a valid sea name'",
".",
"format",
"(",
"sea",
")",
")",
"# Parse dates, check for ISO 8601",
"for",
"attr",
"in",
"[",
"'time_coverage_start'",
",",
"'time_coverage_end'",
",",
"'date_created'",
",",
"'date_modified'",
"]",
":",
"attr_value",
"=",
"getattr",
"(",
"dataset",
",",
"attr",
",",
"''",
")",
"try",
":",
"parse_datetime",
"(",
"attr_value",
")",
"recommended_ctx",
".",
"assert_true",
"(",
"True",
",",
"''",
")",
"# Score it True!",
"except",
"ISO8601Error",
":",
"recommended_ctx",
".",
"assert_true",
"(",
"False",
",",
"'{} should exist and be ISO-8601 format (example: PT1M30S), currently: {}'",
".",
"format",
"(",
"attr",
",",
"attr_value",
")",
")",
"value",
"=",
"getattr",
"(",
"dataset",
",",
"'geospatial_vertical_positive'",
",",
"''",
")",
"recommended_ctx",
".",
"assert_true",
"(",
"value",
".",
"lower",
"(",
")",
"in",
"[",
"'up'",
",",
"'down'",
"]",
",",
"'geospatial_vertical_positive attribute should be up or down: {}'",
".",
"format",
"(",
"value",
")",
")",
"# I hate english.",
"ack_exists",
"=",
"any",
"(",
"(",
"getattr",
"(",
"dataset",
",",
"attr",
",",
"''",
")",
"!=",
"''",
"for",
"attr",
"in",
"[",
"'acknowledgment'",
",",
"'acknowledgement'",
"]",
")",
")",
"recommended_ctx",
".",
"assert_true",
"(",
"ack_exists",
",",
"'acknowledgement attribute should exist and not be empty'",
")",
"standard_name_vocab",
"=",
"getattr",
"(",
"dataset",
",",
"'standard_name_vocabulary'",
",",
"''",
")",
"regex",
"=",
"re",
".",
"compile",
"(",
"r'[sS]tandard [nN]ame [tT]able'",
")",
"recommended_ctx",
".",
"assert_true",
"(",
"regex",
".",
"search",
"(",
"standard_name_vocab",
")",
",",
"\"standard_name_vocabulary doesn't contain 'Standard Name Table': {}\"",
".",
"format",
"(",
"standard_name_vocab",
")",
")",
"if",
"hasattr",
"(",
"dataset",
",",
"'comment'",
")",
":",
"recommended_ctx",
".",
"assert_true",
"(",
"getattr",
"(",
"dataset",
",",
"'comment'",
",",
"''",
")",
"!=",
"''",
",",
"'comment attribute should not be empty if specified'",
")",
"return",
"recommended_ctx",
".",
"to_result",
"(",
")"
] | Check the global recommended attributes for 2.0 templates. These go an extra step besides
just checking that they exist.
:param netCDF4.Dataset dataset: An open netCDF dataset
:id = "" ; //.................................................. RECOMMENDED - Should be a human readable unique identifier for data set. (ACDD)
:naming_authority = "" ; //.................................... RECOMMENDED - Backward URL of institution (for example, gov.noaa.ncei). (ACDD)
:history = "" ; //............................................. RECOMMENDED - Provides an audit trail for modifications to the original data. (ACDD)
:source = "" ; //.............................................. RECOMMENDED - The method of production of the original data. (CF)
:processing_level = "" ; //.................................... RECOMMENDED - Provide a description of the processing or quality control level of the data. (ACDD)
:comment = "" ; //............................................. RECOMMENDED - Provide useful additional information here. (CF)
:acknowledgment = "" ; //...................................... RECOMMENDED - A place to acknowledge various types of support for the project that produced this data. (ACDD)
:license = "" ; //............................................. RECOMMENDED - Describe the restrictions to data access and distribution. (ACDD)
:standard_name_vocabulary = "CF Standard Name Table vNN" ; //.. RECOMMENDED - If using CF standard name attribute for variables. Replace NN with the CF standard name table number (CF)
:date_created = "" ; //........................................ RECOMMENDED - Creation date of this version of the data(netCDF). Use ISO 8601:2004 for date and time. (ACDD)
:creator_name = "" ; //........................................ RECOMMENDED - The name of the person (or other creator type specified by the creator_type attribute) principally responsible for creating this data. (ACDD)
:creator_email = "" ; //....................................... RECOMMENDED - The email address of the person (or other creator type specified by the creator_type attribute) principally responsible for creating this data. (ACDD)
:creator_url = "" ; //......................................... RECOMMENDED - The URL of the person (or other creator type specified by the creator_type attribute) principally responsible for creating this data. (ACDD)
:institution = "" ; //......................................... RECOMMENDED -The name of the institution principally responsible for originating this data.. An institution attribute can be used for each variable if variables come from more than one institution. (CF/ACDD)
:project = "" ; //............................................. RECOMMENDED - The name of the project(s) principally responsible for originating this data. Multiple projects can be separated by commas. (ACDD)
:publisher_name = "" ; //...................................... RECOMMENDED - The name of the person (or other entity specified by the publisher_type attribute) responsible for publishing the data file or product to users, with its current metadata and format. (ACDD)
:publisher_email = "" ; //..................................... RECOMMENDED - The email address of the person (or other entity specified by the publisher_type attribute) responsible for publishing the data file or product to users, with its current metadata and format. (ACDD)
:publisher_url = "" ; //....................................... RECOMMENDED - The URL of the person (or other entity specified by the publisher_type attribute) responsible for publishing the data file or product to users, with its current metadata and format. (ACDD)
:geospatial_bounds = "" ; //................................... RECOMMENDED - Describes the data's 2D or 3D geospatial extent in OGC's Well-Known Text (WKT) Geometry format. (ACDD)
:geospatial_bounds_crs = "" ; //............................... RECOMMENDED - The coordinate reference system (CRS) of the point coordinates in the geospatial_bounds attribute. (ACDD)
:geospatial_bounds_vertical_crs = "" ; //...................... RECOMMENDED - The vertical coordinate reference system (CRS) for the Z axis of the point coordinates in the geospatial_bounds attribute. (ACDD)
:geospatial_lat_min = 0.0d ; //................................ RECOMMENDED - Describes a simple lower latitude limit. (ACDD)
:geospatial_lat_max = 0.0d ; //................................ RECOMMENDED - Describes a simple upper latitude limit. (ACDD)
:geospatial_lon_min = 0.0d ; //................................ RECOMMENDED - Describes a simple lower longitude limit. (ACDD)
:geospatial_lon_max = 0.0d ; //................................ RECOMMENDED - Describes a simple upper longitude limit. (ACDD)
:geospatial_vertical_min = 0.0d ; //........................... RECOMMENDED - Describes the numerically smaller vertical limit. (ACDD)
:geospatial_vertical_max = 0.0d ; //........................... RECOMMENDED - Describes the numerically larger vertical limit. (ACDD)
:geospatial_vertical_positive = "" ; //........................ RECOMMENDED - Use "up" or "down". (ACDD)
:time_coverage_start = "" ; //................................. RECOMMENDED - Describes the time of the first data point in the data set. Use ISO 8601:2004 for date and time. (ACDD)
:time_coverage_end = "" ; //................................... RECOMMENDED - Describes the time of the last data point in the data set. Use ISO 8601:2004 for date and time.(ACDD)
:time_coverage_duration = "" ; //.............................. RECOMMENDED - Describes the duration of the data set. Use ISO 8601:2004 for date and time. (ACDD)
:time_coverage_resolution = "" ; //............................ RECOMMENDED - Describes the targeted time period between each value in the data set. Use ISO 8601:2004 for date and time. (ACDD)
:uuid = "" ; //................................................ RECOMMENDED - Machine readable unique identifier for each file. A new uuid is created whenever the file is changed. (NCEI)
:sea_name = "" ; //............................................ RECOMMENDED - The names of the sea in which the data were collected. Use NCEI sea names table. (NCEI) | [
"Check",
"the",
"global",
"recommended",
"attributes",
"for",
"2",
".",
"0",
"templates",
".",
"These",
"go",
"an",
"extra",
"step",
"besides",
"just",
"checking",
"that",
"they",
"exist",
"."
] | 963fefd7fa43afd32657ac4c36aad4ddb4c25acf | https://github.com/ioos/cc-plugin-ncei/blob/963fefd7fa43afd32657ac4c36aad4ddb4c25acf/cc_plugin_ncei/ncei_base.py#L775-L853 | train |
ioos/cc-plugin-ncei | cc_plugin_ncei/ncei_base.py | NCEI2_0Check.check_base_suggested_attributes | def check_base_suggested_attributes(self, dataset):
'''
Check the global suggested attributes for 2.0 templates. These go an extra step besides
just checking that they exist.
:param netCDF4.Dataset dataset: An open netCDF dataset
:creator_type = "" ; //........................................ SUGGESTED - Specifies type of creator with one of the following: 'person', 'group', 'institution', or 'position'. (ACDD)
:creator_institution = "" ; //................................. SUGGESTED - The institution of the creator; should uniquely identify the creator's institution. (ACDD)
:publisher_type = "" ; //...................................... SUGGESTED - Specifies type of publisher with one of the following: 'person', 'group', 'institution', or 'position'. (ACDD)
:publisher_institution = "" ; //............................... SUGGESTED - The institution that presented the data file or equivalent product to users; should uniquely identify the institution. (ACDD)
:program = "" ; //............................................. SUGGESTED - The overarching program(s) of which the dataset is a part. (ACDD)
:contributor_name = "" ; //.................................... SUGGESTED - The name of any individuals, projects, or institutions that contributed to the creation of this data. (ACDD)
:contributor_role = "" ; //.................................... SUGGESTED - The role of any individuals, projects, or institutions that contributed to the creation of this data. (ACDD)
:geospatial_lat_units = "degrees_north" ; //.................. SUGGESTED - Units for the latitude axis described in "geospatial_lat_min" and "geospatial_lat_max" attributes. Use UDUNITS compatible units. (ACDD)
:geospatial_lon_units = "degrees_east"; //..................... SUGGESTED - Units for the longitude axis described in "geospatial_lon_min" and "geospatial_lon_max" attributes. Use UDUNITS compatible units. (ACDD)
:geospatial_vertical_units = "" ; //........................... SUGGESTED - Units for the vertical axis described in "geospatial_vertical_min" and "geospatial_vertical_max" attributes. The default is EPSG:4979. (ACDD)
:date_modified = "" ; //....................................... SUGGESTED - The date on which the data was last modified. Note that this applies just to the data, not the metadata. Use ISO 8601:2004 for date and time. (ACDD)
:date_issued = "" ; //......................................... SUGGESTED - The date on which this data (including all modifications) was formally issued (i.e., made available to a wider audience). Note that these apply just to the data, not the metadata. Use ISO 8601:2004 for date and time. (ACDD)
:date_metadata_modified = "" ; //.............................. SUGGESTED - The date on which the metadata was last modified. Use ISO 8601:2004 for date and time. (ACDD)
:product_version = "" ; //..................................... SUGGESTED - Version identifier of the data file or product as assigned by the data creator. (ACDD)
:keywords_vocabulary = "" ; //................................. SUGGESTED - Identifies the controlled keyword vocabulary used to specify the values within the attribute "keywords". Example: 'GCMD:GCMD Keywords' ACDD)
:platform = "" ; //............................................ SUGGESTED - Name of the platform(s) that supported the sensor data used to create this data set or product. Platforms can be of any type, including satellite, ship, station, aircraft or other. (ACDD)
:platform_vocabulary = "" ; //................................. SUGGESTED - Controlled vocabulary for the names used in the "platform" attribute . Example: ‘NASA/GCMD Platform Keywords Version 8.1’ (ACDD)
:instrument = "" ; //.......................................... SUGGESTED - Name of the contributing instrument(s) or sensor(s) used to create this data set or product. (ACDD)
:instrument_vocabulary = "" ; //............................... SUGGESTED - Controlled vocabulary for the names used in the "instrument" attribute. Example: ‘NASA/GCMD Instrument Keywords Version 8.1’ (ACDD)
:cdm_data_type = "Point" ; //.................................. SUGGESTED - The data type, as derived from Unidata's Common Data Model Scientific Data types and understood by THREDDS. (ACDD)
:metadata_link = "" ; //....................................... SUGGESTED - A URL that gives the location of more complete metadata. A persistent URL is recommended for this attribute. (ACDD)
:references = "" ; //.......................................... SUGGESTED - Published or web-based references that describe the data or methods used to produce it. Recommend URIs (such as a URL or DOI) for papers or other references. (CF)
'''
suggested_ctx = TestCtx(BaseCheck.LOW, 'Suggested global attributes')
# Do any of the variables define platform ?
platform_name = getattr(dataset, 'platform', '')
suggested_ctx.assert_true(platform_name != '', 'platform should exist and point to a term in :platform_vocabulary.')
cdm_data_type = getattr(dataset, 'cdm_data_type', '')
suggested_ctx.assert_true(cdm_data_type.lower() in ['grid', 'image', 'point', 'radial', 'station', 'swath', 'trajectory'],
'cdm_data_type must be one of Grid, Image, Point, Radial, Station, Swath, Trajectory: {}'.format(cdm_data_type))
# Parse dates, check for ISO 8601
for attr in ['date_modified', 'date_issued', 'date_metadata_modified']:
attr_value = getattr(dataset, attr, '')
try:
parse_datetime(attr_value)
suggested_ctx.assert_true(True, '') # Score it True!
except ISO8601Error:
suggested_ctx.assert_true(False, '{} should exist and be ISO-8601 format (example: PT1M30S), currently: {}'.format(attr, attr_value))
units = getattr(dataset, 'geospatial_lat_units', '').lower()
suggested_ctx.assert_true(units == 'degrees_north', 'geospatial_lat_units attribute should be degrees_north: {}'.format(units))
units = getattr(dataset, 'geospatial_lon_units', '').lower()
suggested_ctx.assert_true(units == 'degrees_east', 'geospatial_lon_units attribute should be degrees_east: {}'.format(units))
contributor_name = getattr(dataset, 'contributor_name', '')
contributor_role = getattr(dataset, 'contributor_role', '')
names = contributor_role.split(',')
roles = contributor_role.split(',')
suggested_ctx.assert_true(contributor_name != '', 'contributor_name should exist and not be empty.')
suggested_ctx.assert_true(len(names) == len(roles), 'length of contributor names matches length of roles')
suggested_ctx.assert_true(contributor_role != '', 'contributor_role should exist and not be empty.')
suggested_ctx.assert_true(len(names) == len(roles), 'length of contributor names matches length of roles')
return suggested_ctx.to_result() | python | def check_base_suggested_attributes(self, dataset):
'''
Check the global suggested attributes for 2.0 templates. These go an extra step besides
just checking that they exist.
:param netCDF4.Dataset dataset: An open netCDF dataset
:creator_type = "" ; //........................................ SUGGESTED - Specifies type of creator with one of the following: 'person', 'group', 'institution', or 'position'. (ACDD)
:creator_institution = "" ; //................................. SUGGESTED - The institution of the creator; should uniquely identify the creator's institution. (ACDD)
:publisher_type = "" ; //...................................... SUGGESTED - Specifies type of publisher with one of the following: 'person', 'group', 'institution', or 'position'. (ACDD)
:publisher_institution = "" ; //............................... SUGGESTED - The institution that presented the data file or equivalent product to users; should uniquely identify the institution. (ACDD)
:program = "" ; //............................................. SUGGESTED - The overarching program(s) of which the dataset is a part. (ACDD)
:contributor_name = "" ; //.................................... SUGGESTED - The name of any individuals, projects, or institutions that contributed to the creation of this data. (ACDD)
:contributor_role = "" ; //.................................... SUGGESTED - The role of any individuals, projects, or institutions that contributed to the creation of this data. (ACDD)
:geospatial_lat_units = "degrees_north" ; //.................. SUGGESTED - Units for the latitude axis described in "geospatial_lat_min" and "geospatial_lat_max" attributes. Use UDUNITS compatible units. (ACDD)
:geospatial_lon_units = "degrees_east"; //..................... SUGGESTED - Units for the longitude axis described in "geospatial_lon_min" and "geospatial_lon_max" attributes. Use UDUNITS compatible units. (ACDD)
:geospatial_vertical_units = "" ; //........................... SUGGESTED - Units for the vertical axis described in "geospatial_vertical_min" and "geospatial_vertical_max" attributes. The default is EPSG:4979. (ACDD)
:date_modified = "" ; //....................................... SUGGESTED - The date on which the data was last modified. Note that this applies just to the data, not the metadata. Use ISO 8601:2004 for date and time. (ACDD)
:date_issued = "" ; //......................................... SUGGESTED - The date on which this data (including all modifications) was formally issued (i.e., made available to a wider audience). Note that these apply just to the data, not the metadata. Use ISO 8601:2004 for date and time. (ACDD)
:date_metadata_modified = "" ; //.............................. SUGGESTED - The date on which the metadata was last modified. Use ISO 8601:2004 for date and time. (ACDD)
:product_version = "" ; //..................................... SUGGESTED - Version identifier of the data file or product as assigned by the data creator. (ACDD)
:keywords_vocabulary = "" ; //................................. SUGGESTED - Identifies the controlled keyword vocabulary used to specify the values within the attribute "keywords". Example: 'GCMD:GCMD Keywords' ACDD)
:platform = "" ; //............................................ SUGGESTED - Name of the platform(s) that supported the sensor data used to create this data set or product. Platforms can be of any type, including satellite, ship, station, aircraft or other. (ACDD)
:platform_vocabulary = "" ; //................................. SUGGESTED - Controlled vocabulary for the names used in the "platform" attribute . Example: ‘NASA/GCMD Platform Keywords Version 8.1’ (ACDD)
:instrument = "" ; //.......................................... SUGGESTED - Name of the contributing instrument(s) or sensor(s) used to create this data set or product. (ACDD)
:instrument_vocabulary = "" ; //............................... SUGGESTED - Controlled vocabulary for the names used in the "instrument" attribute. Example: ‘NASA/GCMD Instrument Keywords Version 8.1’ (ACDD)
:cdm_data_type = "Point" ; //.................................. SUGGESTED - The data type, as derived from Unidata's Common Data Model Scientific Data types and understood by THREDDS. (ACDD)
:metadata_link = "" ; //....................................... SUGGESTED - A URL that gives the location of more complete metadata. A persistent URL is recommended for this attribute. (ACDD)
:references = "" ; //.......................................... SUGGESTED - Published or web-based references that describe the data or methods used to produce it. Recommend URIs (such as a URL or DOI) for papers or other references. (CF)
'''
suggested_ctx = TestCtx(BaseCheck.LOW, 'Suggested global attributes')
# Do any of the variables define platform ?
platform_name = getattr(dataset, 'platform', '')
suggested_ctx.assert_true(platform_name != '', 'platform should exist and point to a term in :platform_vocabulary.')
cdm_data_type = getattr(dataset, 'cdm_data_type', '')
suggested_ctx.assert_true(cdm_data_type.lower() in ['grid', 'image', 'point', 'radial', 'station', 'swath', 'trajectory'],
'cdm_data_type must be one of Grid, Image, Point, Radial, Station, Swath, Trajectory: {}'.format(cdm_data_type))
# Parse dates, check for ISO 8601
for attr in ['date_modified', 'date_issued', 'date_metadata_modified']:
attr_value = getattr(dataset, attr, '')
try:
parse_datetime(attr_value)
suggested_ctx.assert_true(True, '') # Score it True!
except ISO8601Error:
suggested_ctx.assert_true(False, '{} should exist and be ISO-8601 format (example: PT1M30S), currently: {}'.format(attr, attr_value))
units = getattr(dataset, 'geospatial_lat_units', '').lower()
suggested_ctx.assert_true(units == 'degrees_north', 'geospatial_lat_units attribute should be degrees_north: {}'.format(units))
units = getattr(dataset, 'geospatial_lon_units', '').lower()
suggested_ctx.assert_true(units == 'degrees_east', 'geospatial_lon_units attribute should be degrees_east: {}'.format(units))
contributor_name = getattr(dataset, 'contributor_name', '')
contributor_role = getattr(dataset, 'contributor_role', '')
names = contributor_role.split(',')
roles = contributor_role.split(',')
suggested_ctx.assert_true(contributor_name != '', 'contributor_name should exist and not be empty.')
suggested_ctx.assert_true(len(names) == len(roles), 'length of contributor names matches length of roles')
suggested_ctx.assert_true(contributor_role != '', 'contributor_role should exist and not be empty.')
suggested_ctx.assert_true(len(names) == len(roles), 'length of contributor names matches length of roles')
return suggested_ctx.to_result() | [
"def",
"check_base_suggested_attributes",
"(",
"self",
",",
"dataset",
")",
":",
"suggested_ctx",
"=",
"TestCtx",
"(",
"BaseCheck",
".",
"LOW",
",",
"'Suggested global attributes'",
")",
"# Do any of the variables define platform ?",
"platform_name",
"=",
"getattr",
"(",
"dataset",
",",
"'platform'",
",",
"''",
")",
"suggested_ctx",
".",
"assert_true",
"(",
"platform_name",
"!=",
"''",
",",
"'platform should exist and point to a term in :platform_vocabulary.'",
")",
"cdm_data_type",
"=",
"getattr",
"(",
"dataset",
",",
"'cdm_data_type'",
",",
"''",
")",
"suggested_ctx",
".",
"assert_true",
"(",
"cdm_data_type",
".",
"lower",
"(",
")",
"in",
"[",
"'grid'",
",",
"'image'",
",",
"'point'",
",",
"'radial'",
",",
"'station'",
",",
"'swath'",
",",
"'trajectory'",
"]",
",",
"'cdm_data_type must be one of Grid, Image, Point, Radial, Station, Swath, Trajectory: {}'",
".",
"format",
"(",
"cdm_data_type",
")",
")",
"# Parse dates, check for ISO 8601",
"for",
"attr",
"in",
"[",
"'date_modified'",
",",
"'date_issued'",
",",
"'date_metadata_modified'",
"]",
":",
"attr_value",
"=",
"getattr",
"(",
"dataset",
",",
"attr",
",",
"''",
")",
"try",
":",
"parse_datetime",
"(",
"attr_value",
")",
"suggested_ctx",
".",
"assert_true",
"(",
"True",
",",
"''",
")",
"# Score it True!",
"except",
"ISO8601Error",
":",
"suggested_ctx",
".",
"assert_true",
"(",
"False",
",",
"'{} should exist and be ISO-8601 format (example: PT1M30S), currently: {}'",
".",
"format",
"(",
"attr",
",",
"attr_value",
")",
")",
"units",
"=",
"getattr",
"(",
"dataset",
",",
"'geospatial_lat_units'",
",",
"''",
")",
".",
"lower",
"(",
")",
"suggested_ctx",
".",
"assert_true",
"(",
"units",
"==",
"'degrees_north'",
",",
"'geospatial_lat_units attribute should be degrees_north: {}'",
".",
"format",
"(",
"units",
")",
")",
"units",
"=",
"getattr",
"(",
"dataset",
",",
"'geospatial_lon_units'",
",",
"''",
")",
".",
"lower",
"(",
")",
"suggested_ctx",
".",
"assert_true",
"(",
"units",
"==",
"'degrees_east'",
",",
"'geospatial_lon_units attribute should be degrees_east: {}'",
".",
"format",
"(",
"units",
")",
")",
"contributor_name",
"=",
"getattr",
"(",
"dataset",
",",
"'contributor_name'",
",",
"''",
")",
"contributor_role",
"=",
"getattr",
"(",
"dataset",
",",
"'contributor_role'",
",",
"''",
")",
"names",
"=",
"contributor_role",
".",
"split",
"(",
"','",
")",
"roles",
"=",
"contributor_role",
".",
"split",
"(",
"','",
")",
"suggested_ctx",
".",
"assert_true",
"(",
"contributor_name",
"!=",
"''",
",",
"'contributor_name should exist and not be empty.'",
")",
"suggested_ctx",
".",
"assert_true",
"(",
"len",
"(",
"names",
")",
"==",
"len",
"(",
"roles",
")",
",",
"'length of contributor names matches length of roles'",
")",
"suggested_ctx",
".",
"assert_true",
"(",
"contributor_role",
"!=",
"''",
",",
"'contributor_role should exist and not be empty.'",
")",
"suggested_ctx",
".",
"assert_true",
"(",
"len",
"(",
"names",
")",
"==",
"len",
"(",
"roles",
")",
",",
"'length of contributor names matches length of roles'",
")",
"return",
"suggested_ctx",
".",
"to_result",
"(",
")"
] | Check the global suggested attributes for 2.0 templates. These go an extra step besides
just checking that they exist.
:param netCDF4.Dataset dataset: An open netCDF dataset
:creator_type = "" ; //........................................ SUGGESTED - Specifies type of creator with one of the following: 'person', 'group', 'institution', or 'position'. (ACDD)
:creator_institution = "" ; //................................. SUGGESTED - The institution of the creator; should uniquely identify the creator's institution. (ACDD)
:publisher_type = "" ; //...................................... SUGGESTED - Specifies type of publisher with one of the following: 'person', 'group', 'institution', or 'position'. (ACDD)
:publisher_institution = "" ; //............................... SUGGESTED - The institution that presented the data file or equivalent product to users; should uniquely identify the institution. (ACDD)
:program = "" ; //............................................. SUGGESTED - The overarching program(s) of which the dataset is a part. (ACDD)
:contributor_name = "" ; //.................................... SUGGESTED - The name of any individuals, projects, or institutions that contributed to the creation of this data. (ACDD)
:contributor_role = "" ; //.................................... SUGGESTED - The role of any individuals, projects, or institutions that contributed to the creation of this data. (ACDD)
:geospatial_lat_units = "degrees_north" ; //.................. SUGGESTED - Units for the latitude axis described in "geospatial_lat_min" and "geospatial_lat_max" attributes. Use UDUNITS compatible units. (ACDD)
:geospatial_lon_units = "degrees_east"; //..................... SUGGESTED - Units for the longitude axis described in "geospatial_lon_min" and "geospatial_lon_max" attributes. Use UDUNITS compatible units. (ACDD)
:geospatial_vertical_units = "" ; //........................... SUGGESTED - Units for the vertical axis described in "geospatial_vertical_min" and "geospatial_vertical_max" attributes. The default is EPSG:4979. (ACDD)
:date_modified = "" ; //....................................... SUGGESTED - The date on which the data was last modified. Note that this applies just to the data, not the metadata. Use ISO 8601:2004 for date and time. (ACDD)
:date_issued = "" ; //......................................... SUGGESTED - The date on which this data (including all modifications) was formally issued (i.e., made available to a wider audience). Note that these apply just to the data, not the metadata. Use ISO 8601:2004 for date and time. (ACDD)
:date_metadata_modified = "" ; //.............................. SUGGESTED - The date on which the metadata was last modified. Use ISO 8601:2004 for date and time. (ACDD)
:product_version = "" ; //..................................... SUGGESTED - Version identifier of the data file or product as assigned by the data creator. (ACDD)
:keywords_vocabulary = "" ; //................................. SUGGESTED - Identifies the controlled keyword vocabulary used to specify the values within the attribute "keywords". Example: 'GCMD:GCMD Keywords' ACDD)
:platform = "" ; //............................................ SUGGESTED - Name of the platform(s) that supported the sensor data used to create this data set or product. Platforms can be of any type, including satellite, ship, station, aircraft or other. (ACDD)
:platform_vocabulary = "" ; //................................. SUGGESTED - Controlled vocabulary for the names used in the "platform" attribute . Example: ‘NASA/GCMD Platform Keywords Version 8.1’ (ACDD)
:instrument = "" ; //.......................................... SUGGESTED - Name of the contributing instrument(s) or sensor(s) used to create this data set or product. (ACDD)
:instrument_vocabulary = "" ; //............................... SUGGESTED - Controlled vocabulary for the names used in the "instrument" attribute. Example: ‘NASA/GCMD Instrument Keywords Version 8.1’ (ACDD)
:cdm_data_type = "Point" ; //.................................. SUGGESTED - The data type, as derived from Unidata's Common Data Model Scientific Data types and understood by THREDDS. (ACDD)
:metadata_link = "" ; //....................................... SUGGESTED - A URL that gives the location of more complete metadata. A persistent URL is recommended for this attribute. (ACDD)
:references = "" ; //.......................................... SUGGESTED - Published or web-based references that describe the data or methods used to produce it. Recommend URIs (such as a URL or DOI) for papers or other references. (CF) | [
"Check",
"the",
"global",
"suggested",
"attributes",
"for",
"2",
".",
"0",
"templates",
".",
"These",
"go",
"an",
"extra",
"step",
"besides",
"just",
"checking",
"that",
"they",
"exist",
"."
] | 963fefd7fa43afd32657ac4c36aad4ddb4c25acf | https://github.com/ioos/cc-plugin-ncei/blob/963fefd7fa43afd32657ac4c36aad4ddb4c25acf/cc_plugin_ncei/ncei_base.py#L855-L919 | train |
pyslackers/sir-bot-a-lot | sirbot/core/core.py | SirBot._configure | def _configure(self):
"""
Configure the core of sirbot
Merge the config with the default core config and configure logging.
The default logging level is `INFO`
"""
path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'config.yml'
)
with open(path) as file:
defaultconfig = yaml.load(file)
self.config = merge_dict(self.config, defaultconfig)
if 'logging' in self.config:
logging.config.dictConfig(self.config['logging'])
else:
logging.getLogger('sirbot').setLevel('INFO') | python | def _configure(self):
"""
Configure the core of sirbot
Merge the config with the default core config and configure logging.
The default logging level is `INFO`
"""
path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'config.yml'
)
with open(path) as file:
defaultconfig = yaml.load(file)
self.config = merge_dict(self.config, defaultconfig)
if 'logging' in self.config:
logging.config.dictConfig(self.config['logging'])
else:
logging.getLogger('sirbot').setLevel('INFO') | [
"def",
"_configure",
"(",
"self",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
")",
",",
"'config.yml'",
")",
"with",
"open",
"(",
"path",
")",
"as",
"file",
":",
"defaultconfig",
"=",
"yaml",
".",
"load",
"(",
"file",
")",
"self",
".",
"config",
"=",
"merge_dict",
"(",
"self",
".",
"config",
",",
"defaultconfig",
")",
"if",
"'logging'",
"in",
"self",
".",
"config",
":",
"logging",
".",
"config",
".",
"dictConfig",
"(",
"self",
".",
"config",
"[",
"'logging'",
"]",
")",
"else",
":",
"logging",
".",
"getLogger",
"(",
"'sirbot'",
")",
".",
"setLevel",
"(",
"'INFO'",
")"
] | Configure the core of sirbot
Merge the config with the default core config and configure logging.
The default logging level is `INFO` | [
"Configure",
"the",
"core",
"of",
"sirbot"
] | 22dfdd6a14d61dbe29423fd131b7a23e618b68d7 | https://github.com/pyslackers/sir-bot-a-lot/blob/22dfdd6a14d61dbe29423fd131b7a23e618b68d7/sirbot/core/core.py#L64-L83 | train |
pyslackers/sir-bot-a-lot | sirbot/core/core.py | SirBot._import_plugins | def _import_plugins(self) -> None:
"""
Import and register plugin in the plugin manager.
The pluggy library is used as plugin manager.
"""
logger.debug('Importing plugins')
self._pm = pluggy.PluginManager('sirbot')
self._pm.add_hookspecs(hookspecs)
for plugin in self.config['sirbot']['plugins']:
try:
p = importlib.import_module(plugin)
except (ModuleNotFoundError, ):
if os.getcwd() not in sys.path:
sys.path.append(os.getcwd())
p = importlib.import_module(plugin)
else:
raise
self._pm.register(p) | python | def _import_plugins(self) -> None:
"""
Import and register plugin in the plugin manager.
The pluggy library is used as plugin manager.
"""
logger.debug('Importing plugins')
self._pm = pluggy.PluginManager('sirbot')
self._pm.add_hookspecs(hookspecs)
for plugin in self.config['sirbot']['plugins']:
try:
p = importlib.import_module(plugin)
except (ModuleNotFoundError, ):
if os.getcwd() not in sys.path:
sys.path.append(os.getcwd())
p = importlib.import_module(plugin)
else:
raise
self._pm.register(p) | [
"def",
"_import_plugins",
"(",
"self",
")",
"->",
"None",
":",
"logger",
".",
"debug",
"(",
"'Importing plugins'",
")",
"self",
".",
"_pm",
"=",
"pluggy",
".",
"PluginManager",
"(",
"'sirbot'",
")",
"self",
".",
"_pm",
".",
"add_hookspecs",
"(",
"hookspecs",
")",
"for",
"plugin",
"in",
"self",
".",
"config",
"[",
"'sirbot'",
"]",
"[",
"'plugins'",
"]",
":",
"try",
":",
"p",
"=",
"importlib",
".",
"import_module",
"(",
"plugin",
")",
"except",
"(",
"ModuleNotFoundError",
",",
")",
":",
"if",
"os",
".",
"getcwd",
"(",
")",
"not",
"in",
"sys",
".",
"path",
":",
"sys",
".",
"path",
".",
"append",
"(",
"os",
".",
"getcwd",
"(",
")",
")",
"p",
"=",
"importlib",
".",
"import_module",
"(",
"plugin",
")",
"else",
":",
"raise",
"self",
".",
"_pm",
".",
"register",
"(",
"p",
")"
] | Import and register plugin in the plugin manager.
The pluggy library is used as plugin manager. | [
"Import",
"and",
"register",
"plugin",
"in",
"the",
"plugin",
"manager",
"."
] | 22dfdd6a14d61dbe29423fd131b7a23e618b68d7 | https://github.com/pyslackers/sir-bot-a-lot/blob/22dfdd6a14d61dbe29423fd131b7a23e618b68d7/sirbot/core/core.py#L107-L126 | train |
pyslackers/sir-bot-a-lot | sirbot/core/core.py | SirBot._initialize_plugins | def _initialize_plugins(self):
"""
Initialize the plugins
Query the configuration and the plugins for info
(name, registry name, start priority, etc)
"""
logger.debug('Initializing plugins')
plugins = self._pm.hook.plugins(loop=self._loop)
if plugins:
for plugin in plugins:
name = plugin.__name__
registry_name = plugin.__registry__ or plugin.__name__
config = self.config.get(name, {})
priority = config.get('priority', 50)
if priority:
self._plugins[name] = {
'plugin': plugin,
'config': config,
'priority': priority,
'factory': registry_name
}
self._start_priority[priority].append(name)
else:
logger.error('No plugins found') | python | def _initialize_plugins(self):
"""
Initialize the plugins
Query the configuration and the plugins for info
(name, registry name, start priority, etc)
"""
logger.debug('Initializing plugins')
plugins = self._pm.hook.plugins(loop=self._loop)
if plugins:
for plugin in plugins:
name = plugin.__name__
registry_name = plugin.__registry__ or plugin.__name__
config = self.config.get(name, {})
priority = config.get('priority', 50)
if priority:
self._plugins[name] = {
'plugin': plugin,
'config': config,
'priority': priority,
'factory': registry_name
}
self._start_priority[priority].append(name)
else:
logger.error('No plugins found') | [
"def",
"_initialize_plugins",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"'Initializing plugins'",
")",
"plugins",
"=",
"self",
".",
"_pm",
".",
"hook",
".",
"plugins",
"(",
"loop",
"=",
"self",
".",
"_loop",
")",
"if",
"plugins",
":",
"for",
"plugin",
"in",
"plugins",
":",
"name",
"=",
"plugin",
".",
"__name__",
"registry_name",
"=",
"plugin",
".",
"__registry__",
"or",
"plugin",
".",
"__name__",
"config",
"=",
"self",
".",
"config",
".",
"get",
"(",
"name",
",",
"{",
"}",
")",
"priority",
"=",
"config",
".",
"get",
"(",
"'priority'",
",",
"50",
")",
"if",
"priority",
":",
"self",
".",
"_plugins",
"[",
"name",
"]",
"=",
"{",
"'plugin'",
":",
"plugin",
",",
"'config'",
":",
"config",
",",
"'priority'",
":",
"priority",
",",
"'factory'",
":",
"registry_name",
"}",
"self",
".",
"_start_priority",
"[",
"priority",
"]",
".",
"append",
"(",
"name",
")",
"else",
":",
"logger",
".",
"error",
"(",
"'No plugins found'",
")"
] | Initialize the plugins
Query the configuration and the plugins for info
(name, registry name, start priority, etc) | [
"Initialize",
"the",
"plugins"
] | 22dfdd6a14d61dbe29423fd131b7a23e618b68d7 | https://github.com/pyslackers/sir-bot-a-lot/blob/22dfdd6a14d61dbe29423fd131b7a23e618b68d7/sirbot/core/core.py#L128-L155 | train |
pyslackers/sir-bot-a-lot | sirbot/core/core.py | SirBot._register_factory | def _register_factory(self):
"""
Index the available factories
Query the plugins for an usable factory and register it
"""
for name, info in self._plugins.items():
if info['priority']:
factory = getattr(info['plugin'], 'factory', None)
if callable(factory):
registry[info['factory']] = info['plugin'].factory
registry.freeze() | python | def _register_factory(self):
"""
Index the available factories
Query the plugins for an usable factory and register it
"""
for name, info in self._plugins.items():
if info['priority']:
factory = getattr(info['plugin'], 'factory', None)
if callable(factory):
registry[info['factory']] = info['plugin'].factory
registry.freeze() | [
"def",
"_register_factory",
"(",
"self",
")",
":",
"for",
"name",
",",
"info",
"in",
"self",
".",
"_plugins",
".",
"items",
"(",
")",
":",
"if",
"info",
"[",
"'priority'",
"]",
":",
"factory",
"=",
"getattr",
"(",
"info",
"[",
"'plugin'",
"]",
",",
"'factory'",
",",
"None",
")",
"if",
"callable",
"(",
"factory",
")",
":",
"registry",
"[",
"info",
"[",
"'factory'",
"]",
"]",
"=",
"info",
"[",
"'plugin'",
"]",
".",
"factory",
"registry",
".",
"freeze",
"(",
")"
] | Index the available factories
Query the plugins for an usable factory and register it | [
"Index",
"the",
"available",
"factories"
] | 22dfdd6a14d61dbe29423fd131b7a23e618b68d7 | https://github.com/pyslackers/sir-bot-a-lot/blob/22dfdd6a14d61dbe29423fd131b7a23e618b68d7/sirbot/core/core.py#L157-L168 | train |
pyslackers/sir-bot-a-lot | sirbot/core/core.py | SirBot._configure_plugins | async def _configure_plugins(self) -> None:
"""
Configure the plugins
Asynchronously configure the plugins. Pass them their configuration,
the aiohttp session, the registry and the aiohttp router
"""
logger.debug('Configuring plugins')
funcs = [
info['plugin'].configure(
config=info['config'],
session=self._session,
router=self.app.router
)
for info in self._plugins.values()
]
if funcs:
await asyncio.gather(*funcs, loop=self._loop)
logger.debug('Plugins configured') | python | async def _configure_plugins(self) -> None:
"""
Configure the plugins
Asynchronously configure the plugins. Pass them their configuration,
the aiohttp session, the registry and the aiohttp router
"""
logger.debug('Configuring plugins')
funcs = [
info['plugin'].configure(
config=info['config'],
session=self._session,
router=self.app.router
)
for info in self._plugins.values()
]
if funcs:
await asyncio.gather(*funcs, loop=self._loop)
logger.debug('Plugins configured') | [
"async",
"def",
"_configure_plugins",
"(",
"self",
")",
"->",
"None",
":",
"logger",
".",
"debug",
"(",
"'Configuring plugins'",
")",
"funcs",
"=",
"[",
"info",
"[",
"'plugin'",
"]",
".",
"configure",
"(",
"config",
"=",
"info",
"[",
"'config'",
"]",
",",
"session",
"=",
"self",
".",
"_session",
",",
"router",
"=",
"self",
".",
"app",
".",
"router",
")",
"for",
"info",
"in",
"self",
".",
"_plugins",
".",
"values",
"(",
")",
"]",
"if",
"funcs",
":",
"await",
"asyncio",
".",
"gather",
"(",
"*",
"funcs",
",",
"loop",
"=",
"self",
".",
"_loop",
")",
"logger",
".",
"debug",
"(",
"'Plugins configured'",
")"
] | Configure the plugins
Asynchronously configure the plugins. Pass them their configuration,
the aiohttp session, the registry and the aiohttp router | [
"Configure",
"the",
"plugins"
] | 22dfdd6a14d61dbe29423fd131b7a23e618b68d7 | https://github.com/pyslackers/sir-bot-a-lot/blob/22dfdd6a14d61dbe29423fd131b7a23e618b68d7/sirbot/core/core.py#L170-L189 | train |
pyslackers/sir-bot-a-lot | sirbot/core/core.py | SirBot._start_plugins | async def _start_plugins(self) -> None:
"""
Start the plugins by priority
Start the plugins based on the priority and wait for them to be fully
started before starting the next one. This ensure plugins can use
a previously started one during startup.
"""
logger.debug('Starting plugins')
for priority in sorted(self._start_priority, reverse=True):
logger.debug(
'Starting plugins %s',
', '.join(self._start_priority[priority])
)
for name in self._start_priority[priority]:
plugin = self._plugins[name]
self._tasks[name] = self._loop.create_task(
plugin['plugin'].start()
)
while not all(self._plugins[name]['plugin'].started
for name in self._tasks):
for task in self._tasks.values():
if task.done():
task.result()
await asyncio.sleep(0.2, loop=self._loop)
else:
logger.debug('Plugins %s started',
', '.join(self._start_priority[priority])) | python | async def _start_plugins(self) -> None:
"""
Start the plugins by priority
Start the plugins based on the priority and wait for them to be fully
started before starting the next one. This ensure plugins can use
a previously started one during startup.
"""
logger.debug('Starting plugins')
for priority in sorted(self._start_priority, reverse=True):
logger.debug(
'Starting plugins %s',
', '.join(self._start_priority[priority])
)
for name in self._start_priority[priority]:
plugin = self._plugins[name]
self._tasks[name] = self._loop.create_task(
plugin['plugin'].start()
)
while not all(self._plugins[name]['plugin'].started
for name in self._tasks):
for task in self._tasks.values():
if task.done():
task.result()
await asyncio.sleep(0.2, loop=self._loop)
else:
logger.debug('Plugins %s started',
', '.join(self._start_priority[priority])) | [
"async",
"def",
"_start_plugins",
"(",
"self",
")",
"->",
"None",
":",
"logger",
".",
"debug",
"(",
"'Starting plugins'",
")",
"for",
"priority",
"in",
"sorted",
"(",
"self",
".",
"_start_priority",
",",
"reverse",
"=",
"True",
")",
":",
"logger",
".",
"debug",
"(",
"'Starting plugins %s'",
",",
"', '",
".",
"join",
"(",
"self",
".",
"_start_priority",
"[",
"priority",
"]",
")",
")",
"for",
"name",
"in",
"self",
".",
"_start_priority",
"[",
"priority",
"]",
":",
"plugin",
"=",
"self",
".",
"_plugins",
"[",
"name",
"]",
"self",
".",
"_tasks",
"[",
"name",
"]",
"=",
"self",
".",
"_loop",
".",
"create_task",
"(",
"plugin",
"[",
"'plugin'",
"]",
".",
"start",
"(",
")",
")",
"while",
"not",
"all",
"(",
"self",
".",
"_plugins",
"[",
"name",
"]",
"[",
"'plugin'",
"]",
".",
"started",
"for",
"name",
"in",
"self",
".",
"_tasks",
")",
":",
"for",
"task",
"in",
"self",
".",
"_tasks",
".",
"values",
"(",
")",
":",
"if",
"task",
".",
"done",
"(",
")",
":",
"task",
".",
"result",
"(",
")",
"await",
"asyncio",
".",
"sleep",
"(",
"0.2",
",",
"loop",
"=",
"self",
".",
"_loop",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"'Plugins %s started'",
",",
"', '",
".",
"join",
"(",
"self",
".",
"_start_priority",
"[",
"priority",
"]",
")",
")"
] | Start the plugins by priority
Start the plugins based on the priority and wait for them to be fully
started before starting the next one. This ensure plugins can use
a previously started one during startup. | [
"Start",
"the",
"plugins",
"by",
"priority"
] | 22dfdd6a14d61dbe29423fd131b7a23e618b68d7 | https://github.com/pyslackers/sir-bot-a-lot/blob/22dfdd6a14d61dbe29423fd131b7a23e618b68d7/sirbot/core/core.py#L191-L222 | train |
samghelms/mathviz | mathviz_hopper/src/table.py | Table._create_settings | def _create_settings(self):
"""
Creates the settings object that will be sent
to the frontend vizualization
"""
self.settings = {
"columns": [{"Header": s, "accessor": s} for s in self.settings],
"port": self.port,
"docs": construct_trie(self.docs)
} | python | def _create_settings(self):
"""
Creates the settings object that will be sent
to the frontend vizualization
"""
self.settings = {
"columns": [{"Header": s, "accessor": s} for s in self.settings],
"port": self.port,
"docs": construct_trie(self.docs)
} | [
"def",
"_create_settings",
"(",
"self",
")",
":",
"self",
".",
"settings",
"=",
"{",
"\"columns\"",
":",
"[",
"{",
"\"Header\"",
":",
"s",
",",
"\"accessor\"",
":",
"s",
"}",
"for",
"s",
"in",
"self",
".",
"settings",
"]",
",",
"\"port\"",
":",
"self",
".",
"port",
",",
"\"docs\"",
":",
"construct_trie",
"(",
"self",
".",
"docs",
")",
"}"
] | Creates the settings object that will be sent
to the frontend vizualization | [
"Creates",
"the",
"settings",
"object",
"that",
"will",
"be",
"sent",
"to",
"the",
"frontend",
"vizualization"
] | 30fe89537379faea4de8c8b568ac6e52e4d15353 | https://github.com/samghelms/mathviz/blob/30fe89537379faea4de8c8b568ac6e52e4d15353/mathviz_hopper/src/table.py#L75-L86 | train |
samghelms/mathviz | mathviz_hopper/src/table.py | Table.run_server | def run_server(self):
"""
Runs a server to handle queries to the index without creating the
javascript table.
"""
app = build_app()
run(app, host='localhost', port=self.port) | python | def run_server(self):
"""
Runs a server to handle queries to the index without creating the
javascript table.
"""
app = build_app()
run(app, host='localhost', port=self.port) | [
"def",
"run_server",
"(",
"self",
")",
":",
"app",
"=",
"build_app",
"(",
")",
"run",
"(",
"app",
",",
"host",
"=",
"'localhost'",
",",
"port",
"=",
"self",
".",
"port",
")"
] | Runs a server to handle queries to the index without creating the
javascript table. | [
"Runs",
"a",
"server",
"to",
"handle",
"queries",
"to",
"the",
"index",
"without",
"creating",
"the",
"javascript",
"table",
"."
] | 30fe89537379faea4de8c8b568ac6e52e4d15353 | https://github.com/samghelms/mathviz/blob/30fe89537379faea4de8c8b568ac6e52e4d15353/mathviz_hopper/src/table.py#L133-L140 | train |
polyaxon/hestia | hestia/string_utils.py | strip_spaces | def strip_spaces(value, sep=None, join=True):
"""Cleans trailing whitespaces and replaces also multiple whitespaces with a single space."""
value = value.strip()
value = [v.strip() for v in value.split(sep)]
join_sep = sep or ' '
return join_sep.join(value) if join else value | python | def strip_spaces(value, sep=None, join=True):
"""Cleans trailing whitespaces and replaces also multiple whitespaces with a single space."""
value = value.strip()
value = [v.strip() for v in value.split(sep)]
join_sep = sep or ' '
return join_sep.join(value) if join else value | [
"def",
"strip_spaces",
"(",
"value",
",",
"sep",
"=",
"None",
",",
"join",
"=",
"True",
")",
":",
"value",
"=",
"value",
".",
"strip",
"(",
")",
"value",
"=",
"[",
"v",
".",
"strip",
"(",
")",
"for",
"v",
"in",
"value",
".",
"split",
"(",
"sep",
")",
"]",
"join_sep",
"=",
"sep",
"or",
"' '",
"return",
"join_sep",
".",
"join",
"(",
"value",
")",
"if",
"join",
"else",
"value"
] | Cleans trailing whitespaces and replaces also multiple whitespaces with a single space. | [
"Cleans",
"trailing",
"whitespaces",
"and",
"replaces",
"also",
"multiple",
"whitespaces",
"with",
"a",
"single",
"space",
"."
] | 382ed139cff8bf35c987cfc30a31b72c0d6b808e | https://github.com/polyaxon/hestia/blob/382ed139cff8bf35c987cfc30a31b72c0d6b808e/hestia/string_utils.py#L5-L10 | train |
BernardFW/bernard | src/bernard/engine/transition.py | Transition.rank | async def rank(self, request, origin: Optional[Text]) \
-> Tuple[
float,
Optional[BaseTrigger],
Optional[type],
Optional[bool],
]:
"""
Computes the rank of this transition for a given request.
It returns (in order):
- The score (from 0 to 1)
- The trigger instance (if it matched)
- The class of the destination state (if matched)
"""
if self.origin_name == origin:
score = 1.0
elif self.origin_name is None:
score = settings.JUMPING_TRIGGER_PENALTY
else:
return 0.0, None, None, None
trigger = self.factory(request)
rank = await run_or_return(trigger.rank())
score *= self.weight * (rank or 0.0)
return score, trigger, self.dest, self.do_not_register | python | async def rank(self, request, origin: Optional[Text]) \
-> Tuple[
float,
Optional[BaseTrigger],
Optional[type],
Optional[bool],
]:
"""
Computes the rank of this transition for a given request.
It returns (in order):
- The score (from 0 to 1)
- The trigger instance (if it matched)
- The class of the destination state (if matched)
"""
if self.origin_name == origin:
score = 1.0
elif self.origin_name is None:
score = settings.JUMPING_TRIGGER_PENALTY
else:
return 0.0, None, None, None
trigger = self.factory(request)
rank = await run_or_return(trigger.rank())
score *= self.weight * (rank or 0.0)
return score, trigger, self.dest, self.do_not_register | [
"async",
"def",
"rank",
"(",
"self",
",",
"request",
",",
"origin",
":",
"Optional",
"[",
"Text",
"]",
")",
"->",
"Tuple",
"[",
"float",
",",
"Optional",
"[",
"BaseTrigger",
"]",
",",
"Optional",
"[",
"type",
"]",
",",
"Optional",
"[",
"bool",
"]",
",",
"]",
":",
"if",
"self",
".",
"origin_name",
"==",
"origin",
":",
"score",
"=",
"1.0",
"elif",
"self",
".",
"origin_name",
"is",
"None",
":",
"score",
"=",
"settings",
".",
"JUMPING_TRIGGER_PENALTY",
"else",
":",
"return",
"0.0",
",",
"None",
",",
"None",
",",
"None",
"trigger",
"=",
"self",
".",
"factory",
"(",
"request",
")",
"rank",
"=",
"await",
"run_or_return",
"(",
"trigger",
".",
"rank",
"(",
")",
")",
"score",
"*=",
"self",
".",
"weight",
"*",
"(",
"rank",
"or",
"0.0",
")",
"return",
"score",
",",
"trigger",
",",
"self",
".",
"dest",
",",
"self",
".",
"do_not_register"
] | Computes the rank of this transition for a given request.
It returns (in order):
- The score (from 0 to 1)
- The trigger instance (if it matched)
- The class of the destination state (if matched) | [
"Computes",
"the",
"rank",
"of",
"this",
"transition",
"for",
"a",
"given",
"request",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/engine/transition.py#L72-L100 | train |
ioos/cc-plugin-ncei | cc_plugin_ncei/ncei_point.py | NCEIPointBase.check_dimensions | def check_dimensions(self, dataset):
'''
Checks that the feature types of this dataset are consitent with a point dataset
'''
required_ctx = TestCtx(BaseCheck.HIGH, 'All geophysical variables are point feature types')
t = util.get_time_variable(dataset)
# Exit prematurely
if not t:
required_ctx.assert_true(False, 'A dimension representing time is required for point feature types')
return required_ctx.to_result()
t_dims = dataset.variables[t].dimensions
o = None or (t_dims and t_dims[0])
message = '{} must be a valid timeseries feature type. It must have dimensions of ({}), and all coordinates must have dimensions of ({})'
for variable in util.get_geophysical_variables(dataset):
is_valid = util.is_point(dataset, variable)
required_ctx.assert_true(
is_valid,
message.format(variable, o, o)
)
return required_ctx.to_result() | python | def check_dimensions(self, dataset):
'''
Checks that the feature types of this dataset are consitent with a point dataset
'''
required_ctx = TestCtx(BaseCheck.HIGH, 'All geophysical variables are point feature types')
t = util.get_time_variable(dataset)
# Exit prematurely
if not t:
required_ctx.assert_true(False, 'A dimension representing time is required for point feature types')
return required_ctx.to_result()
t_dims = dataset.variables[t].dimensions
o = None or (t_dims and t_dims[0])
message = '{} must be a valid timeseries feature type. It must have dimensions of ({}), and all coordinates must have dimensions of ({})'
for variable in util.get_geophysical_variables(dataset):
is_valid = util.is_point(dataset, variable)
required_ctx.assert_true(
is_valid,
message.format(variable, o, o)
)
return required_ctx.to_result() | [
"def",
"check_dimensions",
"(",
"self",
",",
"dataset",
")",
":",
"required_ctx",
"=",
"TestCtx",
"(",
"BaseCheck",
".",
"HIGH",
",",
"'All geophysical variables are point feature types'",
")",
"t",
"=",
"util",
".",
"get_time_variable",
"(",
"dataset",
")",
"# Exit prematurely",
"if",
"not",
"t",
":",
"required_ctx",
".",
"assert_true",
"(",
"False",
",",
"'A dimension representing time is required for point feature types'",
")",
"return",
"required_ctx",
".",
"to_result",
"(",
")",
"t_dims",
"=",
"dataset",
".",
"variables",
"[",
"t",
"]",
".",
"dimensions",
"o",
"=",
"None",
"or",
"(",
"t_dims",
"and",
"t_dims",
"[",
"0",
"]",
")",
"message",
"=",
"'{} must be a valid timeseries feature type. It must have dimensions of ({}), and all coordinates must have dimensions of ({})'",
"for",
"variable",
"in",
"util",
".",
"get_geophysical_variables",
"(",
"dataset",
")",
":",
"is_valid",
"=",
"util",
".",
"is_point",
"(",
"dataset",
",",
"variable",
")",
"required_ctx",
".",
"assert_true",
"(",
"is_valid",
",",
"message",
".",
"format",
"(",
"variable",
",",
"o",
",",
"o",
")",
")",
"return",
"required_ctx",
".",
"to_result",
"(",
")"
] | Checks that the feature types of this dataset are consitent with a point dataset | [
"Checks",
"that",
"the",
"feature",
"types",
"of",
"this",
"dataset",
"are",
"consitent",
"with",
"a",
"point",
"dataset"
] | 963fefd7fa43afd32657ac4c36aad4ddb4c25acf | https://github.com/ioos/cc-plugin-ncei/blob/963fefd7fa43afd32657ac4c36aad4ddb4c25acf/cc_plugin_ncei/ncei_point.py#L19-L40 | train |
BernardFW/bernard | src/bernard/engine/platform.py | Platform.settings | def settings(cls):
"""
Find the settings for the current class inside the platforms
configuration.
"""
from bernard.platforms.management import get_platform_settings
for platform in get_platform_settings():
candidate = import_class(platform['class'])
if candidate == cls:
return platform.get('settings', {}) | python | def settings(cls):
"""
Find the settings for the current class inside the platforms
configuration.
"""
from bernard.platforms.management import get_platform_settings
for platform in get_platform_settings():
candidate = import_class(platform['class'])
if candidate == cls:
return platform.get('settings', {}) | [
"def",
"settings",
"(",
"cls",
")",
":",
"from",
"bernard",
".",
"platforms",
".",
"management",
"import",
"get_platform_settings",
"for",
"platform",
"in",
"get_platform_settings",
"(",
")",
":",
"candidate",
"=",
"import_class",
"(",
"platform",
"[",
"'class'",
"]",
")",
"if",
"candidate",
"==",
"cls",
":",
"return",
"platform",
".",
"get",
"(",
"'settings'",
",",
"{",
"}",
")"
] | Find the settings for the current class inside the platforms
configuration. | [
"Find",
"the",
"settings",
"for",
"the",
"current",
"class",
"inside",
"the",
"platforms",
"configuration",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/engine/platform.py#L72-L83 | train |
BernardFW/bernard | src/bernard/engine/platform.py | Platform._notify | async def _notify(self, message: BaseMessage, responder: Responder):
"""
Notify all callbacks that a message was received.
"""
for cb in self._listeners:
coro = cb(message, responder, self.fsm_creates_task)
if not self.fsm_creates_task:
self._register = await coro | python | async def _notify(self, message: BaseMessage, responder: Responder):
"""
Notify all callbacks that a message was received.
"""
for cb in self._listeners:
coro = cb(message, responder, self.fsm_creates_task)
if not self.fsm_creates_task:
self._register = await coro | [
"async",
"def",
"_notify",
"(",
"self",
",",
"message",
":",
"BaseMessage",
",",
"responder",
":",
"Responder",
")",
":",
"for",
"cb",
"in",
"self",
".",
"_listeners",
":",
"coro",
"=",
"cb",
"(",
"message",
",",
"responder",
",",
"self",
".",
"fsm_creates_task",
")",
"if",
"not",
"self",
".",
"fsm_creates_task",
":",
"self",
".",
"_register",
"=",
"await",
"coro"
] | Notify all callbacks that a message was received. | [
"Notify",
"all",
"callbacks",
"that",
"a",
"message",
"was",
"received",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/engine/platform.py#L104-L112 | train |
BernardFW/bernard | src/bernard/engine/platform.py | SimplePlatform.async_init | async def async_init(self):
"""
During async init we just need to create a HTTP session so we can keep
outgoing connexions to the platform alive.
"""
self.session = aiohttp.ClientSession()
asyncio.get_event_loop().create_task(self._deferred_init()) | python | async def async_init(self):
"""
During async init we just need to create a HTTP session so we can keep
outgoing connexions to the platform alive.
"""
self.session = aiohttp.ClientSession()
asyncio.get_event_loop().create_task(self._deferred_init()) | [
"async",
"def",
"async_init",
"(",
"self",
")",
":",
"self",
".",
"session",
"=",
"aiohttp",
".",
"ClientSession",
"(",
")",
"asyncio",
".",
"get_event_loop",
"(",
")",
".",
"create_task",
"(",
"self",
".",
"_deferred_init",
"(",
")",
")"
] | During async init we just need to create a HTTP session so we can keep
outgoing connexions to the platform alive. | [
"During",
"async",
"init",
"we",
"just",
"need",
"to",
"create",
"a",
"HTTP",
"session",
"so",
"we",
"can",
"keep",
"outgoing",
"connexions",
"to",
"the",
"platform",
"alive",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/engine/platform.py#L177-L183 | train |
BernardFW/bernard | src/bernard/engine/platform.py | SimplePlatform.accept | def accept(self, stack: Stack):
"""
Checks that the stack can be accepted according to the `PATTERNS`.
If the pattern is found, then its name is stored in the `annotation`
attribute of the stack.
"""
for name, pattern in self.PATTERNS.items():
if stack.match_exp(pattern):
stack.annotation = name
return True
return False | python | def accept(self, stack: Stack):
"""
Checks that the stack can be accepted according to the `PATTERNS`.
If the pattern is found, then its name is stored in the `annotation`
attribute of the stack.
"""
for name, pattern in self.PATTERNS.items():
if stack.match_exp(pattern):
stack.annotation = name
return True
return False | [
"def",
"accept",
"(",
"self",
",",
"stack",
":",
"Stack",
")",
":",
"for",
"name",
",",
"pattern",
"in",
"self",
".",
"PATTERNS",
".",
"items",
"(",
")",
":",
"if",
"stack",
".",
"match_exp",
"(",
"pattern",
")",
":",
"stack",
".",
"annotation",
"=",
"name",
"return",
"True",
"return",
"False"
] | Checks that the stack can be accepted according to the `PATTERNS`.
If the pattern is found, then its name is stored in the `annotation`
attribute of the stack. | [
"Checks",
"that",
"the",
"stack",
"can",
"be",
"accepted",
"according",
"to",
"the",
"PATTERNS",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/engine/platform.py#L192-L204 | train |
BernardFW/bernard | src/bernard/engine/platform.py | SimplePlatform.send | def send(self, request: Request, stack: Stack) -> Coroutine:
"""
Send a stack to the platform.
Actually this will delegate to one of the `_send_*` functions depending
on what the stack looks like.
"""
if stack.annotation not in self.PATTERNS:
if not self.accept(stack):
raise UnacceptableStack('Cannot accept stack {}'.format(stack))
func = getattr(self, '_send_' + stack.annotation)
return func(request, stack) | python | def send(self, request: Request, stack: Stack) -> Coroutine:
"""
Send a stack to the platform.
Actually this will delegate to one of the `_send_*` functions depending
on what the stack looks like.
"""
if stack.annotation not in self.PATTERNS:
if not self.accept(stack):
raise UnacceptableStack('Cannot accept stack {}'.format(stack))
func = getattr(self, '_send_' + stack.annotation)
return func(request, stack) | [
"def",
"send",
"(",
"self",
",",
"request",
":",
"Request",
",",
"stack",
":",
"Stack",
")",
"->",
"Coroutine",
":",
"if",
"stack",
".",
"annotation",
"not",
"in",
"self",
".",
"PATTERNS",
":",
"if",
"not",
"self",
".",
"accept",
"(",
"stack",
")",
":",
"raise",
"UnacceptableStack",
"(",
"'Cannot accept stack {}'",
".",
"format",
"(",
"stack",
")",
")",
"func",
"=",
"getattr",
"(",
"self",
",",
"'_send_'",
"+",
"stack",
".",
"annotation",
")",
"return",
"func",
"(",
"request",
",",
"stack",
")"
] | Send a stack to the platform.
Actually this will delegate to one of the `_send_*` functions depending
on what the stack looks like. | [
"Send",
"a",
"stack",
"to",
"the",
"platform",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/engine/platform.py#L206-L219 | train |
polyaxon/hestia | hestia/units.py | to_unit_memory | def to_unit_memory(number):
"""Creates a string representation of memory size given `number`."""
kb = 1024
number /= kb
if number < 100:
return '{} Kb'.format(round(number, 2))
number /= kb
if number < 300:
return '{} Mb'.format(round(number, 2))
number /= kb
return '{} Gb'.format(round(number, 2)) | python | def to_unit_memory(number):
"""Creates a string representation of memory size given `number`."""
kb = 1024
number /= kb
if number < 100:
return '{} Kb'.format(round(number, 2))
number /= kb
if number < 300:
return '{} Mb'.format(round(number, 2))
number /= kb
return '{} Gb'.format(round(number, 2)) | [
"def",
"to_unit_memory",
"(",
"number",
")",
":",
"kb",
"=",
"1024",
"number",
"/=",
"kb",
"if",
"number",
"<",
"100",
":",
"return",
"'{} Kb'",
".",
"format",
"(",
"round",
"(",
"number",
",",
"2",
")",
")",
"number",
"/=",
"kb",
"if",
"number",
"<",
"300",
":",
"return",
"'{} Mb'",
".",
"format",
"(",
"round",
"(",
"number",
",",
"2",
")",
")",
"number",
"/=",
"kb",
"return",
"'{} Gb'",
".",
"format",
"(",
"round",
"(",
"number",
",",
"2",
")",
")"
] | Creates a string representation of memory size given `number`. | [
"Creates",
"a",
"string",
"representation",
"of",
"memory",
"size",
"given",
"number",
"."
] | 382ed139cff8bf35c987cfc30a31b72c0d6b808e | https://github.com/polyaxon/hestia/blob/382ed139cff8bf35c987cfc30a31b72c0d6b808e/hestia/units.py#L5-L20 | train |
polyaxon/hestia | hestia/units.py | to_percentage | def to_percentage(number, rounding=2):
"""Creates a percentage string representation from the given `number`. The
number is multiplied by 100 before adding a '%' character.
Raises `ValueError` if `number` cannot be converted to a number.
"""
number = float(number) * 100
number_as_int = int(number)
rounded = round(number, rounding)
return '{}%'.format(number_as_int if number_as_int == rounded else rounded) | python | def to_percentage(number, rounding=2):
"""Creates a percentage string representation from the given `number`. The
number is multiplied by 100 before adding a '%' character.
Raises `ValueError` if `number` cannot be converted to a number.
"""
number = float(number) * 100
number_as_int = int(number)
rounded = round(number, rounding)
return '{}%'.format(number_as_int if number_as_int == rounded else rounded) | [
"def",
"to_percentage",
"(",
"number",
",",
"rounding",
"=",
"2",
")",
":",
"number",
"=",
"float",
"(",
"number",
")",
"*",
"100",
"number_as_int",
"=",
"int",
"(",
"number",
")",
"rounded",
"=",
"round",
"(",
"number",
",",
"rounding",
")",
"return",
"'{}%'",
".",
"format",
"(",
"number_as_int",
"if",
"number_as_int",
"==",
"rounded",
"else",
"rounded",
")"
] | Creates a percentage string representation from the given `number`. The
number is multiplied by 100 before adding a '%' character.
Raises `ValueError` if `number` cannot be converted to a number. | [
"Creates",
"a",
"percentage",
"string",
"representation",
"from",
"the",
"given",
"number",
".",
"The",
"number",
"is",
"multiplied",
"by",
"100",
"before",
"adding",
"a",
"%",
"character",
"."
] | 382ed139cff8bf35c987cfc30a31b72c0d6b808e | https://github.com/polyaxon/hestia/blob/382ed139cff8bf35c987cfc30a31b72c0d6b808e/hestia/units.py#L23-L33 | train |
pyQode/pyqode.cobol | pyqode/cobol/widgets/pic_offsets.py | PicOffsetsTable.set_editor | def set_editor(self, editor):
"""
Sets the associated editor, when the editor's offset calculator mode
emit the signal pic_infos_available, the table is automatically
refreshed.
You can also refresh manually by calling :meth:`update_pic_infos`.
"""
if self._editor is not None:
try:
self._editor.offset_calculator.pic_infos_available.disconnect(
self._update)
except (AttributeError, RuntimeError, ReferenceError):
# see https://github.com/OpenCobolIDE/OpenCobolIDE/issues/89
pass
self._editor = weakref.proxy(editor) if editor else editor
try:
self._editor.offset_calculator.pic_infos_available.connect(
self._update)
except AttributeError:
pass | python | def set_editor(self, editor):
"""
Sets the associated editor, when the editor's offset calculator mode
emit the signal pic_infos_available, the table is automatically
refreshed.
You can also refresh manually by calling :meth:`update_pic_infos`.
"""
if self._editor is not None:
try:
self._editor.offset_calculator.pic_infos_available.disconnect(
self._update)
except (AttributeError, RuntimeError, ReferenceError):
# see https://github.com/OpenCobolIDE/OpenCobolIDE/issues/89
pass
self._editor = weakref.proxy(editor) if editor else editor
try:
self._editor.offset_calculator.pic_infos_available.connect(
self._update)
except AttributeError:
pass | [
"def",
"set_editor",
"(",
"self",
",",
"editor",
")",
":",
"if",
"self",
".",
"_editor",
"is",
"not",
"None",
":",
"try",
":",
"self",
".",
"_editor",
".",
"offset_calculator",
".",
"pic_infos_available",
".",
"disconnect",
"(",
"self",
".",
"_update",
")",
"except",
"(",
"AttributeError",
",",
"RuntimeError",
",",
"ReferenceError",
")",
":",
"# see https://github.com/OpenCobolIDE/OpenCobolIDE/issues/89",
"pass",
"self",
".",
"_editor",
"=",
"weakref",
".",
"proxy",
"(",
"editor",
")",
"if",
"editor",
"else",
"editor",
"try",
":",
"self",
".",
"_editor",
".",
"offset_calculator",
".",
"pic_infos_available",
".",
"connect",
"(",
"self",
".",
"_update",
")",
"except",
"AttributeError",
":",
"pass"
] | Sets the associated editor, when the editor's offset calculator mode
emit the signal pic_infos_available, the table is automatically
refreshed.
You can also refresh manually by calling :meth:`update_pic_infos`. | [
"Sets",
"the",
"associated",
"editor",
"when",
"the",
"editor",
"s",
"offset",
"calculator",
"mode",
"emit",
"the",
"signal",
"pic_infos_available",
"the",
"table",
"is",
"automatically",
"refreshed",
"."
] | eedae4e320a4b2d0c44abb2c3061091321648fb7 | https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/widgets/pic_offsets.py#L25-L45 | train |
BernardFW/bernard | src/bernard/conf/utils.py | patch_conf | def patch_conf(settings_patch=None, settings_file=None):
"""
Reload the configuration form scratch. Only the default config is loaded,
not the environment-specified config.
Then the specified patch is applied.
This is for unit tests only!
:param settings_patch: Custom configuration values to insert
:param settings_file: Custom settings file to read
"""
if settings_patch is None:
settings_patch = {}
reload_config()
os.environ[ENVIRONMENT_VARIABLE] = settings_file if settings_file else ''
from bernard.conf import settings as l_settings
# noinspection PyProtectedMember
r_settings = l_settings._settings
r_settings.update(settings_patch)
if 'bernard.i18n' in modules:
from bernard.i18n import translate, intents
translate._regenerate_word_dict()
intents._refresh_intents_db()
yield | python | def patch_conf(settings_patch=None, settings_file=None):
"""
Reload the configuration form scratch. Only the default config is loaded,
not the environment-specified config.
Then the specified patch is applied.
This is for unit tests only!
:param settings_patch: Custom configuration values to insert
:param settings_file: Custom settings file to read
"""
if settings_patch is None:
settings_patch = {}
reload_config()
os.environ[ENVIRONMENT_VARIABLE] = settings_file if settings_file else ''
from bernard.conf import settings as l_settings
# noinspection PyProtectedMember
r_settings = l_settings._settings
r_settings.update(settings_patch)
if 'bernard.i18n' in modules:
from bernard.i18n import translate, intents
translate._regenerate_word_dict()
intents._refresh_intents_db()
yield | [
"def",
"patch_conf",
"(",
"settings_patch",
"=",
"None",
",",
"settings_file",
"=",
"None",
")",
":",
"if",
"settings_patch",
"is",
"None",
":",
"settings_patch",
"=",
"{",
"}",
"reload_config",
"(",
")",
"os",
".",
"environ",
"[",
"ENVIRONMENT_VARIABLE",
"]",
"=",
"settings_file",
"if",
"settings_file",
"else",
"''",
"from",
"bernard",
".",
"conf",
"import",
"settings",
"as",
"l_settings",
"# noinspection PyProtectedMember",
"r_settings",
"=",
"l_settings",
".",
"_settings",
"r_settings",
".",
"update",
"(",
"settings_patch",
")",
"if",
"'bernard.i18n'",
"in",
"modules",
":",
"from",
"bernard",
".",
"i18n",
"import",
"translate",
",",
"intents",
"translate",
".",
"_regenerate_word_dict",
"(",
")",
"intents",
".",
"_refresh_intents_db",
"(",
")",
"yield"
] | Reload the configuration form scratch. Only the default config is loaded,
not the environment-specified config.
Then the specified patch is applied.
This is for unit tests only!
:param settings_patch: Custom configuration values to insert
:param settings_file: Custom settings file to read | [
"Reload",
"the",
"configuration",
"form",
"scratch",
".",
"Only",
"the",
"default",
"config",
"is",
"loaded",
"not",
"the",
"environment",
"-",
"specified",
"config",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/conf/utils.py#L30-L59 | train |
giancosta86/Iris | info/gianlucacosta/iris/ioc.py | Container.resolve | def resolve(self, key):
"""
Resolves the requested key to an object instance, raising a KeyError if the key is missing
"""
registration = self._registrations.get(key)
if registration is None:
raise KeyError("Unknown key: '{0}'".format(key))
return registration.resolve(self, key) | python | def resolve(self, key):
"""
Resolves the requested key to an object instance, raising a KeyError if the key is missing
"""
registration = self._registrations.get(key)
if registration is None:
raise KeyError("Unknown key: '{0}'".format(key))
return registration.resolve(self, key) | [
"def",
"resolve",
"(",
"self",
",",
"key",
")",
":",
"registration",
"=",
"self",
".",
"_registrations",
".",
"get",
"(",
"key",
")",
"if",
"registration",
"is",
"None",
":",
"raise",
"KeyError",
"(",
"\"Unknown key: '{0}'\"",
".",
"format",
"(",
"key",
")",
")",
"return",
"registration",
".",
"resolve",
"(",
"self",
",",
"key",
")"
] | Resolves the requested key to an object instance, raising a KeyError if the key is missing | [
"Resolves",
"the",
"requested",
"key",
"to",
"an",
"object",
"instance",
"raising",
"a",
"KeyError",
"if",
"the",
"key",
"is",
"missing"
] | b3d92cca5cce3653519bd032346b211c46a57d05 | https://github.com/giancosta86/Iris/blob/b3d92cca5cce3653519bd032346b211c46a57d05/info/gianlucacosta/iris/ioc.py#L139-L148 | train |
giancosta86/Iris | info/gianlucacosta/iris/ioc.py | Container.dispose | def dispose(self):
"""
Disposes every performed registration; the container can then be used again
"""
for registration in self._registrations.values():
registration.dispose()
self._registrations = {} | python | def dispose(self):
"""
Disposes every performed registration; the container can then be used again
"""
for registration in self._registrations.values():
registration.dispose()
self._registrations = {} | [
"def",
"dispose",
"(",
"self",
")",
":",
"for",
"registration",
"in",
"self",
".",
"_registrations",
".",
"values",
"(",
")",
":",
"registration",
".",
"dispose",
"(",
")",
"self",
".",
"_registrations",
"=",
"{",
"}"
] | Disposes every performed registration; the container can then be used again | [
"Disposes",
"every",
"performed",
"registration",
";",
"the",
"container",
"can",
"then",
"be",
"used",
"again"
] | b3d92cca5cce3653519bd032346b211c46a57d05 | https://github.com/giancosta86/Iris/blob/b3d92cca5cce3653519bd032346b211c46a57d05/info/gianlucacosta/iris/ioc.py#L151-L158 | train |
reanahub/reana-db | reana_db/utils.py | build_workspace_path | def build_workspace_path(user_id, workflow_id=None):
"""Build user's workspace relative path.
:param user_id: Owner of the workspace.
:param workflow_id: Optional parameter, if provided gives the path to the
workflow workspace instead of just the path to the user workspace.
:return: String that represents the workspace relative path.
i.e. users/0000/workflows/0034
"""
workspace_path = os.path.join('users', str(user_id), 'workflows')
if workflow_id:
workspace_path = os.path.join(workspace_path, str(workflow_id))
return workspace_path | python | def build_workspace_path(user_id, workflow_id=None):
"""Build user's workspace relative path.
:param user_id: Owner of the workspace.
:param workflow_id: Optional parameter, if provided gives the path to the
workflow workspace instead of just the path to the user workspace.
:return: String that represents the workspace relative path.
i.e. users/0000/workflows/0034
"""
workspace_path = os.path.join('users', str(user_id), 'workflows')
if workflow_id:
workspace_path = os.path.join(workspace_path, str(workflow_id))
return workspace_path | [
"def",
"build_workspace_path",
"(",
"user_id",
",",
"workflow_id",
"=",
"None",
")",
":",
"workspace_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'users'",
",",
"str",
"(",
"user_id",
")",
",",
"'workflows'",
")",
"if",
"workflow_id",
":",
"workspace_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"workspace_path",
",",
"str",
"(",
"workflow_id",
")",
")",
"return",
"workspace_path"
] | Build user's workspace relative path.
:param user_id: Owner of the workspace.
:param workflow_id: Optional parameter, if provided gives the path to the
workflow workspace instead of just the path to the user workspace.
:return: String that represents the workspace relative path.
i.e. users/0000/workflows/0034 | [
"Build",
"user",
"s",
"workspace",
"relative",
"path",
"."
] | 4efcb46d23af035689964d8c25a804c5a8f1dfc3 | https://github.com/reanahub/reana-db/blob/4efcb46d23af035689964d8c25a804c5a8f1dfc3/reana_db/utils.py#L14-L27 | train |
reanahub/reana-db | reana_db/utils.py | _get_workflow_with_uuid_or_name | def _get_workflow_with_uuid_or_name(uuid_or_name, user_uuid):
"""Get Workflow from database with uuid or name.
:param uuid_or_name: String representing a valid UUIDv4 or valid
Workflow name. Valid name contains only ASCII alphanumerics.
Name might be in format 'reana.workflow.123' with arbitrary
number of dot-delimited substrings, where last substring specifies
the run number of the workflow this workflow name refers to.
If name does not contain a valid run number, but it is a valid name,
workflow with latest run number of all the workflows with this name
is returned.
:type uuid_or_name: String
:rtype: reana-db.models.Workflow
"""
from reana_db.models import Workflow
# Check existence
if not uuid_or_name:
raise ValueError('No Workflow was specified.')
# Check validity
try:
uuid_or_name.encode('ascii')
except UnicodeEncodeError:
# `workflow_name` contains something else than just ASCII.
raise ValueError('Workflow name {} is not valid.'.format(uuid_or_name))
# Check if UUIDv4
try:
# is_uuid = UUID(uuid_or_name, version=4)
is_uuid = UUID('{' + uuid_or_name + '}', version=4)
except (TypeError, ValueError):
is_uuid = None
if is_uuid:
# `uuid_or_name` is an UUIDv4.
# Search with it since it is expected to be unique.
return _get_workflow_by_uuid(uuid_or_name)
else:
# `uuid_or_name` is not and UUIDv4. Expect it is a name.
# Expect name might be in format 'reana.workflow.123' with arbitrary
# number of dot-delimited substring, where last substring specifies
# the run_number of the workflow this workflow name refers to.
# Possible candidates for names are e.g. :
# 'workflow_name' -> ValueError
# 'workflow.name' -> True, True
# 'workflow.name.123' -> True, True
# '123.' -> True, False
# '' -> ValueError
# '.123' -> False, True
# '..' -> False, False
# '123.12' -> True, True
# '123.12.' -> True, False
# Try to split the dot-separated string.
try:
workflow_name, run_number = uuid_or_name.rsplit('.', maxsplit=1)
except ValueError:
# Couldn't split. Probably not a dot-separated string.
# -> Search with `uuid_or_name`
return _get_workflow_by_name(uuid_or_name, user_uuid)
# Check if `run_number` was specified
if not run_number:
# No `run_number` specified.
# -> Search by `workflow_name`
return _get_workflow_by_name(workflow_name, user_uuid)
# `run_number` was specified.
# Check `run_number` is valid.
if not run_number.isdigit():
# `uuid_or_name` was split, so it is a dot-separated string
# but it didn't contain a valid `run_number`.
# Assume that this dot-separated string is the name of
# the workflow and search with it.
return _get_workflow_by_name(uuid_or_name, user_uuid)
# `run_number` is valid.
# Search by `run_number` since it is a primary key.
workflow = Workflow.query.filter(
Workflow.name == workflow_name,
Workflow.run_number == run_number,
Workflow.owner_id == user_uuid).\
one_or_none()
if not workflow:
raise ValueError(
'REANA_WORKON is set to {0}, but '
'that workflow does not exist. '
'Please set your REANA_WORKON environment '
'variable appropriately.'.
format(workflow_name, run_number))
return workflow | python | def _get_workflow_with_uuid_or_name(uuid_or_name, user_uuid):
"""Get Workflow from database with uuid or name.
:param uuid_or_name: String representing a valid UUIDv4 or valid
Workflow name. Valid name contains only ASCII alphanumerics.
Name might be in format 'reana.workflow.123' with arbitrary
number of dot-delimited substrings, where last substring specifies
the run number of the workflow this workflow name refers to.
If name does not contain a valid run number, but it is a valid name,
workflow with latest run number of all the workflows with this name
is returned.
:type uuid_or_name: String
:rtype: reana-db.models.Workflow
"""
from reana_db.models import Workflow
# Check existence
if not uuid_or_name:
raise ValueError('No Workflow was specified.')
# Check validity
try:
uuid_or_name.encode('ascii')
except UnicodeEncodeError:
# `workflow_name` contains something else than just ASCII.
raise ValueError('Workflow name {} is not valid.'.format(uuid_or_name))
# Check if UUIDv4
try:
# is_uuid = UUID(uuid_or_name, version=4)
is_uuid = UUID('{' + uuid_or_name + '}', version=4)
except (TypeError, ValueError):
is_uuid = None
if is_uuid:
# `uuid_or_name` is an UUIDv4.
# Search with it since it is expected to be unique.
return _get_workflow_by_uuid(uuid_or_name)
else:
# `uuid_or_name` is not and UUIDv4. Expect it is a name.
# Expect name might be in format 'reana.workflow.123' with arbitrary
# number of dot-delimited substring, where last substring specifies
# the run_number of the workflow this workflow name refers to.
# Possible candidates for names are e.g. :
# 'workflow_name' -> ValueError
# 'workflow.name' -> True, True
# 'workflow.name.123' -> True, True
# '123.' -> True, False
# '' -> ValueError
# '.123' -> False, True
# '..' -> False, False
# '123.12' -> True, True
# '123.12.' -> True, False
# Try to split the dot-separated string.
try:
workflow_name, run_number = uuid_or_name.rsplit('.', maxsplit=1)
except ValueError:
# Couldn't split. Probably not a dot-separated string.
# -> Search with `uuid_or_name`
return _get_workflow_by_name(uuid_or_name, user_uuid)
# Check if `run_number` was specified
if not run_number:
# No `run_number` specified.
# -> Search by `workflow_name`
return _get_workflow_by_name(workflow_name, user_uuid)
# `run_number` was specified.
# Check `run_number` is valid.
if not run_number.isdigit():
# `uuid_or_name` was split, so it is a dot-separated string
# but it didn't contain a valid `run_number`.
# Assume that this dot-separated string is the name of
# the workflow and search with it.
return _get_workflow_by_name(uuid_or_name, user_uuid)
# `run_number` is valid.
# Search by `run_number` since it is a primary key.
workflow = Workflow.query.filter(
Workflow.name == workflow_name,
Workflow.run_number == run_number,
Workflow.owner_id == user_uuid).\
one_or_none()
if not workflow:
raise ValueError(
'REANA_WORKON is set to {0}, but '
'that workflow does not exist. '
'Please set your REANA_WORKON environment '
'variable appropriately.'.
format(workflow_name, run_number))
return workflow | [
"def",
"_get_workflow_with_uuid_or_name",
"(",
"uuid_or_name",
",",
"user_uuid",
")",
":",
"from",
"reana_db",
".",
"models",
"import",
"Workflow",
"# Check existence",
"if",
"not",
"uuid_or_name",
":",
"raise",
"ValueError",
"(",
"'No Workflow was specified.'",
")",
"# Check validity",
"try",
":",
"uuid_or_name",
".",
"encode",
"(",
"'ascii'",
")",
"except",
"UnicodeEncodeError",
":",
"# `workflow_name` contains something else than just ASCII.",
"raise",
"ValueError",
"(",
"'Workflow name {} is not valid.'",
".",
"format",
"(",
"uuid_or_name",
")",
")",
"# Check if UUIDv4",
"try",
":",
"# is_uuid = UUID(uuid_or_name, version=4)",
"is_uuid",
"=",
"UUID",
"(",
"'{'",
"+",
"uuid_or_name",
"+",
"'}'",
",",
"version",
"=",
"4",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"is_uuid",
"=",
"None",
"if",
"is_uuid",
":",
"# `uuid_or_name` is an UUIDv4.",
"# Search with it since it is expected to be unique.",
"return",
"_get_workflow_by_uuid",
"(",
"uuid_or_name",
")",
"else",
":",
"# `uuid_or_name` is not and UUIDv4. Expect it is a name.",
"# Expect name might be in format 'reana.workflow.123' with arbitrary",
"# number of dot-delimited substring, where last substring specifies",
"# the run_number of the workflow this workflow name refers to.",
"# Possible candidates for names are e.g. :",
"# 'workflow_name' -> ValueError",
"# 'workflow.name' -> True, True",
"# 'workflow.name.123' -> True, True",
"# '123.' -> True, False",
"# '' -> ValueError",
"# '.123' -> False, True",
"# '..' -> False, False",
"# '123.12' -> True, True",
"# '123.12.' -> True, False",
"# Try to split the dot-separated string.",
"try",
":",
"workflow_name",
",",
"run_number",
"=",
"uuid_or_name",
".",
"rsplit",
"(",
"'.'",
",",
"maxsplit",
"=",
"1",
")",
"except",
"ValueError",
":",
"# Couldn't split. Probably not a dot-separated string.",
"# -> Search with `uuid_or_name`",
"return",
"_get_workflow_by_name",
"(",
"uuid_or_name",
",",
"user_uuid",
")",
"# Check if `run_number` was specified",
"if",
"not",
"run_number",
":",
"# No `run_number` specified.",
"# -> Search by `workflow_name`",
"return",
"_get_workflow_by_name",
"(",
"workflow_name",
",",
"user_uuid",
")",
"# `run_number` was specified.",
"# Check `run_number` is valid.",
"if",
"not",
"run_number",
".",
"isdigit",
"(",
")",
":",
"# `uuid_or_name` was split, so it is a dot-separated string",
"# but it didn't contain a valid `run_number`.",
"# Assume that this dot-separated string is the name of",
"# the workflow and search with it.",
"return",
"_get_workflow_by_name",
"(",
"uuid_or_name",
",",
"user_uuid",
")",
"# `run_number` is valid.",
"# Search by `run_number` since it is a primary key.",
"workflow",
"=",
"Workflow",
".",
"query",
".",
"filter",
"(",
"Workflow",
".",
"name",
"==",
"workflow_name",
",",
"Workflow",
".",
"run_number",
"==",
"run_number",
",",
"Workflow",
".",
"owner_id",
"==",
"user_uuid",
")",
".",
"one_or_none",
"(",
")",
"if",
"not",
"workflow",
":",
"raise",
"ValueError",
"(",
"'REANA_WORKON is set to {0}, but '",
"'that workflow does not exist. '",
"'Please set your REANA_WORKON environment '",
"'variable appropriately.'",
".",
"format",
"(",
"workflow_name",
",",
"run_number",
")",
")",
"return",
"workflow"
] | Get Workflow from database with uuid or name.
:param uuid_or_name: String representing a valid UUIDv4 or valid
Workflow name. Valid name contains only ASCII alphanumerics.
Name might be in format 'reana.workflow.123' with arbitrary
number of dot-delimited substrings, where last substring specifies
the run number of the workflow this workflow name refers to.
If name does not contain a valid run number, but it is a valid name,
workflow with latest run number of all the workflows with this name
is returned.
:type uuid_or_name: String
:rtype: reana-db.models.Workflow | [
"Get",
"Workflow",
"from",
"database",
"with",
"uuid",
"or",
"name",
"."
] | 4efcb46d23af035689964d8c25a804c5a8f1dfc3 | https://github.com/reanahub/reana-db/blob/4efcb46d23af035689964d8c25a804c5a8f1dfc3/reana_db/utils.py#L30-L127 | train |
reanahub/reana-db | reana_db/utils.py | _get_workflow_by_name | def _get_workflow_by_name(workflow_name, user_uuid):
"""From Workflows named as `workflow_name` the latest run_number.
Only use when you are sure that workflow_name is not UUIDv4.
:rtype: reana-db.models.Workflow
"""
from reana_db.models import Workflow
workflow = Workflow.query.filter(
Workflow.name == workflow_name,
Workflow.owner_id == user_uuid). \
order_by(Workflow.run_number.desc()).first()
if not workflow:
raise ValueError(
'REANA_WORKON is set to {0}, but '
'that workflow does not exist. '
'Please set your REANA_WORKON environment '
'variable appropriately.'.
format(workflow_name))
return workflow | python | def _get_workflow_by_name(workflow_name, user_uuid):
"""From Workflows named as `workflow_name` the latest run_number.
Only use when you are sure that workflow_name is not UUIDv4.
:rtype: reana-db.models.Workflow
"""
from reana_db.models import Workflow
workflow = Workflow.query.filter(
Workflow.name == workflow_name,
Workflow.owner_id == user_uuid). \
order_by(Workflow.run_number.desc()).first()
if not workflow:
raise ValueError(
'REANA_WORKON is set to {0}, but '
'that workflow does not exist. '
'Please set your REANA_WORKON environment '
'variable appropriately.'.
format(workflow_name))
return workflow | [
"def",
"_get_workflow_by_name",
"(",
"workflow_name",
",",
"user_uuid",
")",
":",
"from",
"reana_db",
".",
"models",
"import",
"Workflow",
"workflow",
"=",
"Workflow",
".",
"query",
".",
"filter",
"(",
"Workflow",
".",
"name",
"==",
"workflow_name",
",",
"Workflow",
".",
"owner_id",
"==",
"user_uuid",
")",
".",
"order_by",
"(",
"Workflow",
".",
"run_number",
".",
"desc",
"(",
")",
")",
".",
"first",
"(",
")",
"if",
"not",
"workflow",
":",
"raise",
"ValueError",
"(",
"'REANA_WORKON is set to {0}, but '",
"'that workflow does not exist. '",
"'Please set your REANA_WORKON environment '",
"'variable appropriately.'",
".",
"format",
"(",
"workflow_name",
")",
")",
"return",
"workflow"
] | From Workflows named as `workflow_name` the latest run_number.
Only use when you are sure that workflow_name is not UUIDv4.
:rtype: reana-db.models.Workflow | [
"From",
"Workflows",
"named",
"as",
"workflow_name",
"the",
"latest",
"run_number",
"."
] | 4efcb46d23af035689964d8c25a804c5a8f1dfc3 | https://github.com/reanahub/reana-db/blob/4efcb46d23af035689964d8c25a804c5a8f1dfc3/reana_db/utils.py#L130-L149 | train |
reanahub/reana-db | reana_db/utils.py | _get_workflow_by_uuid | def _get_workflow_by_uuid(workflow_uuid):
"""Get Workflow with UUIDv4.
:param workflow_uuid: UUIDv4 of a Workflow.
:type workflow_uuid: String representing a valid UUIDv4.
:rtype: reana-db.models.Workflow
"""
from reana_db.models import Workflow
workflow = Workflow.query.filter(Workflow.id_ ==
workflow_uuid).first()
if not workflow:
raise ValueError(
'REANA_WORKON is set to {0}, but '
'that workflow does not exist. '
'Please set your REANA_WORKON environment '
'variable appropriately.'.
format(workflow_uuid))
return workflow | python | def _get_workflow_by_uuid(workflow_uuid):
"""Get Workflow with UUIDv4.
:param workflow_uuid: UUIDv4 of a Workflow.
:type workflow_uuid: String representing a valid UUIDv4.
:rtype: reana-db.models.Workflow
"""
from reana_db.models import Workflow
workflow = Workflow.query.filter(Workflow.id_ ==
workflow_uuid).first()
if not workflow:
raise ValueError(
'REANA_WORKON is set to {0}, but '
'that workflow does not exist. '
'Please set your REANA_WORKON environment '
'variable appropriately.'.
format(workflow_uuid))
return workflow | [
"def",
"_get_workflow_by_uuid",
"(",
"workflow_uuid",
")",
":",
"from",
"reana_db",
".",
"models",
"import",
"Workflow",
"workflow",
"=",
"Workflow",
".",
"query",
".",
"filter",
"(",
"Workflow",
".",
"id_",
"==",
"workflow_uuid",
")",
".",
"first",
"(",
")",
"if",
"not",
"workflow",
":",
"raise",
"ValueError",
"(",
"'REANA_WORKON is set to {0}, but '",
"'that workflow does not exist. '",
"'Please set your REANA_WORKON environment '",
"'variable appropriately.'",
".",
"format",
"(",
"workflow_uuid",
")",
")",
"return",
"workflow"
] | Get Workflow with UUIDv4.
:param workflow_uuid: UUIDv4 of a Workflow.
:type workflow_uuid: String representing a valid UUIDv4.
:rtype: reana-db.models.Workflow | [
"Get",
"Workflow",
"with",
"UUIDv4",
"."
] | 4efcb46d23af035689964d8c25a804c5a8f1dfc3 | https://github.com/reanahub/reana-db/blob/4efcb46d23af035689964d8c25a804c5a8f1dfc3/reana_db/utils.py#L152-L170 | train |
BernardFW/bernard | src/bernard/i18n/loaders.py | LiveFileLoaderMixin._watch | async def _watch(self):
"""
Start the watching loop.
"""
file_name = os.path.basename(self._file_path)
logger.info(
'Watching %s "%s"',
self.THING,
self._file_path,
)
while self._running:
evt = await self._watcher.get_event()
if evt.name == file_name:
await self._load()
logger.info(
'Reloading changed %s from "%s"',
self.THING,
self._file_path
) | python | async def _watch(self):
"""
Start the watching loop.
"""
file_name = os.path.basename(self._file_path)
logger.info(
'Watching %s "%s"',
self.THING,
self._file_path,
)
while self._running:
evt = await self._watcher.get_event()
if evt.name == file_name:
await self._load()
logger.info(
'Reloading changed %s from "%s"',
self.THING,
self._file_path
) | [
"async",
"def",
"_watch",
"(",
"self",
")",
":",
"file_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"self",
".",
"_file_path",
")",
"logger",
".",
"info",
"(",
"'Watching %s \"%s\"'",
",",
"self",
".",
"THING",
",",
"self",
".",
"_file_path",
",",
")",
"while",
"self",
".",
"_running",
":",
"evt",
"=",
"await",
"self",
".",
"_watcher",
".",
"get_event",
"(",
")",
"if",
"evt",
".",
"name",
"==",
"file_name",
":",
"await",
"self",
".",
"_load",
"(",
")",
"logger",
".",
"info",
"(",
"'Reloading changed %s from \"%s\"'",
",",
"self",
".",
"THING",
",",
"self",
".",
"_file_path",
")"
] | Start the watching loop. | [
"Start",
"the",
"watching",
"loop",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/i18n/loaders.py#L54-L75 | train |
BernardFW/bernard | src/bernard/i18n/loaders.py | LiveFileLoaderMixin.start | async def start(self, file_path, locale=None, kwargs=None):
"""
Setup the watching utilities, start the loop and load data a first
time.
"""
self._file_path = os.path.realpath(file_path)
self._locale = locale
if kwargs:
self._kwargs = kwargs
if settings.I18N_LIVE_RELOAD:
loop = asyncio.get_event_loop()
self._running = True
self._watcher = aionotify.Watcher()
self._watcher.watch(
path=os.path.dirname(self._file_path),
flags=aionotify.Flags.MOVED_TO | aionotify.Flags.MODIFY,
)
await self._watcher.setup(loop)
await self._load()
loop.create_task(self._watch())
else:
await self._load() | python | async def start(self, file_path, locale=None, kwargs=None):
"""
Setup the watching utilities, start the loop and load data a first
time.
"""
self._file_path = os.path.realpath(file_path)
self._locale = locale
if kwargs:
self._kwargs = kwargs
if settings.I18N_LIVE_RELOAD:
loop = asyncio.get_event_loop()
self._running = True
self._watcher = aionotify.Watcher()
self._watcher.watch(
path=os.path.dirname(self._file_path),
flags=aionotify.Flags.MOVED_TO | aionotify.Flags.MODIFY,
)
await self._watcher.setup(loop)
await self._load()
loop.create_task(self._watch())
else:
await self._load() | [
"async",
"def",
"start",
"(",
"self",
",",
"file_path",
",",
"locale",
"=",
"None",
",",
"kwargs",
"=",
"None",
")",
":",
"self",
".",
"_file_path",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"file_path",
")",
"self",
".",
"_locale",
"=",
"locale",
"if",
"kwargs",
":",
"self",
".",
"_kwargs",
"=",
"kwargs",
"if",
"settings",
".",
"I18N_LIVE_RELOAD",
":",
"loop",
"=",
"asyncio",
".",
"get_event_loop",
"(",
")",
"self",
".",
"_running",
"=",
"True",
"self",
".",
"_watcher",
"=",
"aionotify",
".",
"Watcher",
"(",
")",
"self",
".",
"_watcher",
".",
"watch",
"(",
"path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"self",
".",
"_file_path",
")",
",",
"flags",
"=",
"aionotify",
".",
"Flags",
".",
"MOVED_TO",
"|",
"aionotify",
".",
"Flags",
".",
"MODIFY",
",",
")",
"await",
"self",
".",
"_watcher",
".",
"setup",
"(",
"loop",
")",
"await",
"self",
".",
"_load",
"(",
")",
"loop",
".",
"create_task",
"(",
"self",
".",
"_watch",
"(",
")",
")",
"else",
":",
"await",
"self",
".",
"_load",
"(",
")"
] | Setup the watching utilities, start the loop and load data a first
time. | [
"Setup",
"the",
"watching",
"utilities",
"start",
"the",
"loop",
"and",
"load",
"data",
"a",
"first",
"time",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/i18n/loaders.py#L77-L103 | train |
BernardFW/bernard | src/bernard/i18n/loaders.py | BaseTranslationLoader._update | def _update(self, data: TransDict, *args, **kwargs):
"""
Propagate updates to listeners
:param data: Data to propagate
"""
for l in self.listeners:
l(data, *args, **kwargs) | python | def _update(self, data: TransDict, *args, **kwargs):
"""
Propagate updates to listeners
:param data: Data to propagate
"""
for l in self.listeners:
l(data, *args, **kwargs) | [
"def",
"_update",
"(",
"self",
",",
"data",
":",
"TransDict",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"for",
"l",
"in",
"self",
".",
"listeners",
":",
"l",
"(",
"data",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Propagate updates to listeners
:param data: Data to propagate | [
"Propagate",
"updates",
"to",
"listeners"
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/i18n/loaders.py#L126-L134 | train |
cloudmesh-cmd3/cmd3 | cmd3/plugins/info.py | info.print_info | def print_info(self):
"""prints some info that the user may find useful"""
d = dir(self)
self.plugins = []
for key in d:
if key.startswith("info_"):
self.plugins.append(key)
for key in self.plugins:
if self.echo:
Console.ok("> {0}".format(key.replace("_", " ", 1)))
exec("self.%s()" % key) | python | def print_info(self):
"""prints some info that the user may find useful"""
d = dir(self)
self.plugins = []
for key in d:
if key.startswith("info_"):
self.plugins.append(key)
for key in self.plugins:
if self.echo:
Console.ok("> {0}".format(key.replace("_", " ", 1)))
exec("self.%s()" % key) | [
"def",
"print_info",
"(",
"self",
")",
":",
"d",
"=",
"dir",
"(",
"self",
")",
"self",
".",
"plugins",
"=",
"[",
"]",
"for",
"key",
"in",
"d",
":",
"if",
"key",
".",
"startswith",
"(",
"\"info_\"",
")",
":",
"self",
".",
"plugins",
".",
"append",
"(",
"key",
")",
"for",
"key",
"in",
"self",
".",
"plugins",
":",
"if",
"self",
".",
"echo",
":",
"Console",
".",
"ok",
"(",
"\"> {0}\"",
".",
"format",
"(",
"key",
".",
"replace",
"(",
"\"_\"",
",",
"\" \"",
",",
"1",
")",
")",
")",
"exec",
"(",
"\"self.%s()\"",
"%",
"key",
")"
] | prints some info that the user may find useful | [
"prints",
"some",
"info",
"that",
"the",
"user",
"may",
"find",
"useful"
] | 92e33c96032fd3921f159198a0e57917c4dc34ed | https://github.com/cloudmesh-cmd3/cmd3/blob/92e33c96032fd3921f159198a0e57917c4dc34ed/cmd3/plugins/info.py#L7-L19 | train |
openvax/varlens | varlens/variants_util.py | load_from_args_as_dataframe | def load_from_args_as_dataframe(args):
'''
Given parsed variant-loading arguments, return a pandas DataFrame.
If no variant loading arguments are specified, return None.
'''
if not args.variants and not args.single_variant:
return None
if args.variant_source_name:
variant_source_names = util.expand(
args.variant_source_name,
'variant_source_name',
'variant source',
len(args.variants))
else:
variant_source_names = util.drop_prefix(args.variants)
variant_to_sources = collections.defaultdict(list)
dfs = []
for i in range(len(args.variants)):
name = variant_source_names[i]
prefix = (
'metadata:' if len(args.variants) == 1 else "metadata:%s:" % name)
df = load_as_dataframe(
args.variants[i],
name=name,
genome=args.genome,
max_variants=args.max_variants_per_source,
only_passing=not args.include_failing_variants,
metadata_column_prefix=prefix)
if df.shape[0] == 0:
logging.warn("No variants loaded from: %s" % args.variants[i])
else:
for variant in df.variant:
variant_to_sources[variant].append(name)
dfs.append(df)
if args.single_variant:
variants = []
extra_args = {}
if args.genome:
extra_args = {
'ensembl': varcode.reference.infer_genome(args.genome)
}
for (locus_str, ref, alt) in args.single_variant:
locus = Locus.parse(locus_str)
variant = varcode.Variant(
locus.contig,
locus.inclusive_start,
ref,
alt,
**extra_args)
variants.append(variant)
variant_to_sources[variant].append("commandline")
dfs.append(variants_to_dataframe(variants))
df = dfs.pop(0)
for other_df in dfs:
df = pandas.merge(
df,
other_df,
how='outer',
on=["variant"] + STANDARD_DATAFRAME_COLUMNS)
genomes = df["genome"].unique()
if len(genomes) > 1:
raise ValueError(
"Mixing references is not supported. "
"Reference genomes: %s" % (", ".join(genomes)))
df["sources"] = [" ".join(variant_to_sources[v]) for v in df.variant]
# Apply filters:
if args.ref:
df = df.ix[df.ref.isin(args.ref)]
if args.alt:
df = df.ix[df.alt.isin(args.alt)]
loci = loci_util.load_from_args(
util.remove_prefix_from_parsed_args(args, "variant"))
if loci is not None:
df = df.ix[[
loci.intersects(pileup_collection.to_locus(v))
for v in df.variant
]]
return df | python | def load_from_args_as_dataframe(args):
'''
Given parsed variant-loading arguments, return a pandas DataFrame.
If no variant loading arguments are specified, return None.
'''
if not args.variants and not args.single_variant:
return None
if args.variant_source_name:
variant_source_names = util.expand(
args.variant_source_name,
'variant_source_name',
'variant source',
len(args.variants))
else:
variant_source_names = util.drop_prefix(args.variants)
variant_to_sources = collections.defaultdict(list)
dfs = []
for i in range(len(args.variants)):
name = variant_source_names[i]
prefix = (
'metadata:' if len(args.variants) == 1 else "metadata:%s:" % name)
df = load_as_dataframe(
args.variants[i],
name=name,
genome=args.genome,
max_variants=args.max_variants_per_source,
only_passing=not args.include_failing_variants,
metadata_column_prefix=prefix)
if df.shape[0] == 0:
logging.warn("No variants loaded from: %s" % args.variants[i])
else:
for variant in df.variant:
variant_to_sources[variant].append(name)
dfs.append(df)
if args.single_variant:
variants = []
extra_args = {}
if args.genome:
extra_args = {
'ensembl': varcode.reference.infer_genome(args.genome)
}
for (locus_str, ref, alt) in args.single_variant:
locus = Locus.parse(locus_str)
variant = varcode.Variant(
locus.contig,
locus.inclusive_start,
ref,
alt,
**extra_args)
variants.append(variant)
variant_to_sources[variant].append("commandline")
dfs.append(variants_to_dataframe(variants))
df = dfs.pop(0)
for other_df in dfs:
df = pandas.merge(
df,
other_df,
how='outer',
on=["variant"] + STANDARD_DATAFRAME_COLUMNS)
genomes = df["genome"].unique()
if len(genomes) > 1:
raise ValueError(
"Mixing references is not supported. "
"Reference genomes: %s" % (", ".join(genomes)))
df["sources"] = [" ".join(variant_to_sources[v]) for v in df.variant]
# Apply filters:
if args.ref:
df = df.ix[df.ref.isin(args.ref)]
if args.alt:
df = df.ix[df.alt.isin(args.alt)]
loci = loci_util.load_from_args(
util.remove_prefix_from_parsed_args(args, "variant"))
if loci is not None:
df = df.ix[[
loci.intersects(pileup_collection.to_locus(v))
for v in df.variant
]]
return df | [
"def",
"load_from_args_as_dataframe",
"(",
"args",
")",
":",
"if",
"not",
"args",
".",
"variants",
"and",
"not",
"args",
".",
"single_variant",
":",
"return",
"None",
"if",
"args",
".",
"variant_source_name",
":",
"variant_source_names",
"=",
"util",
".",
"expand",
"(",
"args",
".",
"variant_source_name",
",",
"'variant_source_name'",
",",
"'variant source'",
",",
"len",
"(",
"args",
".",
"variants",
")",
")",
"else",
":",
"variant_source_names",
"=",
"util",
".",
"drop_prefix",
"(",
"args",
".",
"variants",
")",
"variant_to_sources",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"dfs",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"args",
".",
"variants",
")",
")",
":",
"name",
"=",
"variant_source_names",
"[",
"i",
"]",
"prefix",
"=",
"(",
"'metadata:'",
"if",
"len",
"(",
"args",
".",
"variants",
")",
"==",
"1",
"else",
"\"metadata:%s:\"",
"%",
"name",
")",
"df",
"=",
"load_as_dataframe",
"(",
"args",
".",
"variants",
"[",
"i",
"]",
",",
"name",
"=",
"name",
",",
"genome",
"=",
"args",
".",
"genome",
",",
"max_variants",
"=",
"args",
".",
"max_variants_per_source",
",",
"only_passing",
"=",
"not",
"args",
".",
"include_failing_variants",
",",
"metadata_column_prefix",
"=",
"prefix",
")",
"if",
"df",
".",
"shape",
"[",
"0",
"]",
"==",
"0",
":",
"logging",
".",
"warn",
"(",
"\"No variants loaded from: %s\"",
"%",
"args",
".",
"variants",
"[",
"i",
"]",
")",
"else",
":",
"for",
"variant",
"in",
"df",
".",
"variant",
":",
"variant_to_sources",
"[",
"variant",
"]",
".",
"append",
"(",
"name",
")",
"dfs",
".",
"append",
"(",
"df",
")",
"if",
"args",
".",
"single_variant",
":",
"variants",
"=",
"[",
"]",
"extra_args",
"=",
"{",
"}",
"if",
"args",
".",
"genome",
":",
"extra_args",
"=",
"{",
"'ensembl'",
":",
"varcode",
".",
"reference",
".",
"infer_genome",
"(",
"args",
".",
"genome",
")",
"}",
"for",
"(",
"locus_str",
",",
"ref",
",",
"alt",
")",
"in",
"args",
".",
"single_variant",
":",
"locus",
"=",
"Locus",
".",
"parse",
"(",
"locus_str",
")",
"variant",
"=",
"varcode",
".",
"Variant",
"(",
"locus",
".",
"contig",
",",
"locus",
".",
"inclusive_start",
",",
"ref",
",",
"alt",
",",
"*",
"*",
"extra_args",
")",
"variants",
".",
"append",
"(",
"variant",
")",
"variant_to_sources",
"[",
"variant",
"]",
".",
"append",
"(",
"\"commandline\"",
")",
"dfs",
".",
"append",
"(",
"variants_to_dataframe",
"(",
"variants",
")",
")",
"df",
"=",
"dfs",
".",
"pop",
"(",
"0",
")",
"for",
"other_df",
"in",
"dfs",
":",
"df",
"=",
"pandas",
".",
"merge",
"(",
"df",
",",
"other_df",
",",
"how",
"=",
"'outer'",
",",
"on",
"=",
"[",
"\"variant\"",
"]",
"+",
"STANDARD_DATAFRAME_COLUMNS",
")",
"genomes",
"=",
"df",
"[",
"\"genome\"",
"]",
".",
"unique",
"(",
")",
"if",
"len",
"(",
"genomes",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"Mixing references is not supported. \"",
"\"Reference genomes: %s\"",
"%",
"(",
"\", \"",
".",
"join",
"(",
"genomes",
")",
")",
")",
"df",
"[",
"\"sources\"",
"]",
"=",
"[",
"\" \"",
".",
"join",
"(",
"variant_to_sources",
"[",
"v",
"]",
")",
"for",
"v",
"in",
"df",
".",
"variant",
"]",
"# Apply filters:",
"if",
"args",
".",
"ref",
":",
"df",
"=",
"df",
".",
"ix",
"[",
"df",
".",
"ref",
".",
"isin",
"(",
"args",
".",
"ref",
")",
"]",
"if",
"args",
".",
"alt",
":",
"df",
"=",
"df",
".",
"ix",
"[",
"df",
".",
"alt",
".",
"isin",
"(",
"args",
".",
"alt",
")",
"]",
"loci",
"=",
"loci_util",
".",
"load_from_args",
"(",
"util",
".",
"remove_prefix_from_parsed_args",
"(",
"args",
",",
"\"variant\"",
")",
")",
"if",
"loci",
"is",
"not",
"None",
":",
"df",
"=",
"df",
".",
"ix",
"[",
"[",
"loci",
".",
"intersects",
"(",
"pileup_collection",
".",
"to_locus",
"(",
"v",
")",
")",
"for",
"v",
"in",
"df",
".",
"variant",
"]",
"]",
"return",
"df"
] | Given parsed variant-loading arguments, return a pandas DataFrame.
If no variant loading arguments are specified, return None. | [
"Given",
"parsed",
"variant",
"-",
"loading",
"arguments",
"return",
"a",
"pandas",
"DataFrame",
"."
] | 715d3ede5893757b2fcba4117515621bca7b1e5d | https://github.com/openvax/varlens/blob/715d3ede5893757b2fcba4117515621bca7b1e5d/varlens/variants_util.py#L72-L159 | train |
rsgalloway/grit | grit/repo/proxy.py | Proxy.request | def request(self, cmd, *args, **kwargs):
"""
Request data fromo the server.
:param cmd: repo handler command.
:returns: Result.
"""
params = {'action': cmd}
#TODO: serialize the kwargs?
params.update(kwargs)
return self.__request(self.url, params) | python | def request(self, cmd, *args, **kwargs):
"""
Request data fromo the server.
:param cmd: repo handler command.
:returns: Result.
"""
params = {'action': cmd}
#TODO: serialize the kwargs?
params.update(kwargs)
return self.__request(self.url, params) | [
"def",
"request",
"(",
"self",
",",
"cmd",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"params",
"=",
"{",
"'action'",
":",
"cmd",
"}",
"#TODO: serialize the kwargs?",
"params",
".",
"update",
"(",
"kwargs",
")",
"return",
"self",
".",
"__request",
"(",
"self",
".",
"url",
",",
"params",
")"
] | Request data fromo the server.
:param cmd: repo handler command.
:returns: Result. | [
"Request",
"data",
"fromo",
"the",
"server",
"."
] | e6434ad8a1f4ac5d0903ebad630c81f8a5164d78 | https://github.com/rsgalloway/grit/blob/e6434ad8a1f4ac5d0903ebad630c81f8a5164d78/grit/repo/proxy.py#L77-L88 | train |
rsgalloway/grit | grit/repo/proxy.py | Proxy.__request | def __request(self, url, params):
"""
Make an HTTP POST request to the server and return JSON data.
:param url: HTTP URL to object.
:returns: Response as dict.
"""
log.debug('request: %s %s' %(url, str(params)))
try:
response = urlopen(url, urlencode(params)).read()
if params.get('action') != 'data':
log.debug('response: %s' % response)
if params.get('action', None) == 'data':
return response
else:
return json.loads(response)
except TypeError, e:
log.exception('request error')
raise ServerError(e)
except IOError, e:
log.error('request error: %s' % str(e))
raise ServerError(e) | python | def __request(self, url, params):
"""
Make an HTTP POST request to the server and return JSON data.
:param url: HTTP URL to object.
:returns: Response as dict.
"""
log.debug('request: %s %s' %(url, str(params)))
try:
response = urlopen(url, urlencode(params)).read()
if params.get('action') != 'data':
log.debug('response: %s' % response)
if params.get('action', None) == 'data':
return response
else:
return json.loads(response)
except TypeError, e:
log.exception('request error')
raise ServerError(e)
except IOError, e:
log.error('request error: %s' % str(e))
raise ServerError(e) | [
"def",
"__request",
"(",
"self",
",",
"url",
",",
"params",
")",
":",
"log",
".",
"debug",
"(",
"'request: %s %s'",
"%",
"(",
"url",
",",
"str",
"(",
"params",
")",
")",
")",
"try",
":",
"response",
"=",
"urlopen",
"(",
"url",
",",
"urlencode",
"(",
"params",
")",
")",
".",
"read",
"(",
")",
"if",
"params",
".",
"get",
"(",
"'action'",
")",
"!=",
"'data'",
":",
"log",
".",
"debug",
"(",
"'response: %s'",
"%",
"response",
")",
"if",
"params",
".",
"get",
"(",
"'action'",
",",
"None",
")",
"==",
"'data'",
":",
"return",
"response",
"else",
":",
"return",
"json",
".",
"loads",
"(",
"response",
")",
"except",
"TypeError",
",",
"e",
":",
"log",
".",
"exception",
"(",
"'request error'",
")",
"raise",
"ServerError",
"(",
"e",
")",
"except",
"IOError",
",",
"e",
":",
"log",
".",
"error",
"(",
"'request error: %s'",
"%",
"str",
"(",
"e",
")",
")",
"raise",
"ServerError",
"(",
"e",
")"
] | Make an HTTP POST request to the server and return JSON data.
:param url: HTTP URL to object.
:returns: Response as dict. | [
"Make",
"an",
"HTTP",
"POST",
"request",
"to",
"the",
"server",
"and",
"return",
"JSON",
"data",
"."
] | e6434ad8a1f4ac5d0903ebad630c81f8a5164d78 | https://github.com/rsgalloway/grit/blob/e6434ad8a1f4ac5d0903ebad630c81f8a5164d78/grit/repo/proxy.py#L90-L112 | train |
openvax/varlens | varlens/locus.py | Locus.position | def position(self):
'''
If this locus spans a single base, this property gives that position.
Otherwise, raises a ValueError.
'''
if self.end != self.start + 1:
raise ValueError("Not a single base: %s" % str(self))
return self.start | python | def position(self):
'''
If this locus spans a single base, this property gives that position.
Otherwise, raises a ValueError.
'''
if self.end != self.start + 1:
raise ValueError("Not a single base: %s" % str(self))
return self.start | [
"def",
"position",
"(",
"self",
")",
":",
"if",
"self",
".",
"end",
"!=",
"self",
".",
"start",
"+",
"1",
":",
"raise",
"ValueError",
"(",
"\"Not a single base: %s\"",
"%",
"str",
"(",
"self",
")",
")",
"return",
"self",
".",
"start"
] | If this locus spans a single base, this property gives that position.
Otherwise, raises a ValueError. | [
"If",
"this",
"locus",
"spans",
"a",
"single",
"base",
"this",
"property",
"gives",
"that",
"position",
".",
"Otherwise",
"raises",
"a",
"ValueError",
"."
] | 715d3ede5893757b2fcba4117515621bca7b1e5d | https://github.com/openvax/varlens/blob/715d3ede5893757b2fcba4117515621bca7b1e5d/varlens/locus.py#L45-L52 | train |
openvax/varlens | varlens/locus.py | Locus.from_interbase_coordinates | def from_interbase_coordinates(contig, start, end=None):
'''
Given coordinates in 0-based interbase coordinates, return a Locus
instance.
'''
typechecks.require_string(contig)
typechecks.require_integer(start)
if end is None:
end = start + 1
typechecks.require_integer(end)
contig = pyensembl.locus.normalize_chromosome(contig)
return Locus(contig, start, end) | python | def from_interbase_coordinates(contig, start, end=None):
'''
Given coordinates in 0-based interbase coordinates, return a Locus
instance.
'''
typechecks.require_string(contig)
typechecks.require_integer(start)
if end is None:
end = start + 1
typechecks.require_integer(end)
contig = pyensembl.locus.normalize_chromosome(contig)
return Locus(contig, start, end) | [
"def",
"from_interbase_coordinates",
"(",
"contig",
",",
"start",
",",
"end",
"=",
"None",
")",
":",
"typechecks",
".",
"require_string",
"(",
"contig",
")",
"typechecks",
".",
"require_integer",
"(",
"start",
")",
"if",
"end",
"is",
"None",
":",
"end",
"=",
"start",
"+",
"1",
"typechecks",
".",
"require_integer",
"(",
"end",
")",
"contig",
"=",
"pyensembl",
".",
"locus",
".",
"normalize_chromosome",
"(",
"contig",
")",
"return",
"Locus",
"(",
"contig",
",",
"start",
",",
"end",
")"
] | Given coordinates in 0-based interbase coordinates, return a Locus
instance. | [
"Given",
"coordinates",
"in",
"0",
"-",
"based",
"interbase",
"coordinates",
"return",
"a",
"Locus",
"instance",
"."
] | 715d3ede5893757b2fcba4117515621bca7b1e5d | https://github.com/openvax/varlens/blob/715d3ede5893757b2fcba4117515621bca7b1e5d/varlens/locus.py#L71-L82 | train |
openvax/varlens | varlens/sequence_context.py | variant_context | def variant_context(
reference_fasta,
contig,
inclusive_start,
inclusive_end,
alt,
context_length):
"""
Retrieve the surronding reference region from a variant.
SNVs are canonicalized so the reference base is a pyrmidine (C/T). For
indels the reverse complement will still be taken if the first base of
the reference is not a pyrmidine, but since the reference will also be
reversed, that doesn't guarantee it will start with a pyrmidine.
Parameters
----------
reference_fasta : FastaReference
reference sequence from pyfaidx package
contig : str
Chromosome of the variant
inclusive_start : int
start of the variant in 1-based inclusive coordinates
inclusive_end : int
end of the variant in 1-based inclusive coordinates
alt : string
alt sequence
context_length : int
number of bases on either side of the variant to return
Returns
---------
A tuple of (5', mutation, 3') where
5' - bases immediately 5 prime to the mutation
3' - bases immediately 3 prime to the mutation
mutation - the ref sequence followed by a > character followed by the
the alt sequence
"""
# Move from 1-base coorindates to 0-base coordinates
start = int(inclusive_start) - 1
end = int(inclusive_end)
full_sequence = reference_fasta[contig]
left = str(full_sequence[start - context_length:start].seq).upper()
middle = str(full_sequence[start: end].seq).upper()
right = str(full_sequence[end: end + context_length].seq).upper()
# Complement and reverse the context if necessary so the ref base is a
# pyrmidine (C/T)
if middle[0] in ('A', 'G'):
context_5prime = pyfaidx.complement(right)[::-1]
context_3prime = pyfaidx.complement(left)[::-1]
context_mutation = "%s>%s" % (
pyfaidx.complement(middle)[::-1], pyfaidx.complement(alt)[::-1])
else:
context_5prime = left
context_3prime = right
context_mutation = "%s>%s" % (middle, alt)
return (context_5prime, context_mutation, context_3prime) | python | def variant_context(
reference_fasta,
contig,
inclusive_start,
inclusive_end,
alt,
context_length):
"""
Retrieve the surronding reference region from a variant.
SNVs are canonicalized so the reference base is a pyrmidine (C/T). For
indels the reverse complement will still be taken if the first base of
the reference is not a pyrmidine, but since the reference will also be
reversed, that doesn't guarantee it will start with a pyrmidine.
Parameters
----------
reference_fasta : FastaReference
reference sequence from pyfaidx package
contig : str
Chromosome of the variant
inclusive_start : int
start of the variant in 1-based inclusive coordinates
inclusive_end : int
end of the variant in 1-based inclusive coordinates
alt : string
alt sequence
context_length : int
number of bases on either side of the variant to return
Returns
---------
A tuple of (5', mutation, 3') where
5' - bases immediately 5 prime to the mutation
3' - bases immediately 3 prime to the mutation
mutation - the ref sequence followed by a > character followed by the
the alt sequence
"""
# Move from 1-base coorindates to 0-base coordinates
start = int(inclusive_start) - 1
end = int(inclusive_end)
full_sequence = reference_fasta[contig]
left = str(full_sequence[start - context_length:start].seq).upper()
middle = str(full_sequence[start: end].seq).upper()
right = str(full_sequence[end: end + context_length].seq).upper()
# Complement and reverse the context if necessary so the ref base is a
# pyrmidine (C/T)
if middle[0] in ('A', 'G'):
context_5prime = pyfaidx.complement(right)[::-1]
context_3prime = pyfaidx.complement(left)[::-1]
context_mutation = "%s>%s" % (
pyfaidx.complement(middle)[::-1], pyfaidx.complement(alt)[::-1])
else:
context_5prime = left
context_3prime = right
context_mutation = "%s>%s" % (middle, alt)
return (context_5prime, context_mutation, context_3prime) | [
"def",
"variant_context",
"(",
"reference_fasta",
",",
"contig",
",",
"inclusive_start",
",",
"inclusive_end",
",",
"alt",
",",
"context_length",
")",
":",
"# Move from 1-base coorindates to 0-base coordinates",
"start",
"=",
"int",
"(",
"inclusive_start",
")",
"-",
"1",
"end",
"=",
"int",
"(",
"inclusive_end",
")",
"full_sequence",
"=",
"reference_fasta",
"[",
"contig",
"]",
"left",
"=",
"str",
"(",
"full_sequence",
"[",
"start",
"-",
"context_length",
":",
"start",
"]",
".",
"seq",
")",
".",
"upper",
"(",
")",
"middle",
"=",
"str",
"(",
"full_sequence",
"[",
"start",
":",
"end",
"]",
".",
"seq",
")",
".",
"upper",
"(",
")",
"right",
"=",
"str",
"(",
"full_sequence",
"[",
"end",
":",
"end",
"+",
"context_length",
"]",
".",
"seq",
")",
".",
"upper",
"(",
")",
"# Complement and reverse the context if necessary so the ref base is a",
"# pyrmidine (C/T)",
"if",
"middle",
"[",
"0",
"]",
"in",
"(",
"'A'",
",",
"'G'",
")",
":",
"context_5prime",
"=",
"pyfaidx",
".",
"complement",
"(",
"right",
")",
"[",
":",
":",
"-",
"1",
"]",
"context_3prime",
"=",
"pyfaidx",
".",
"complement",
"(",
"left",
")",
"[",
":",
":",
"-",
"1",
"]",
"context_mutation",
"=",
"\"%s>%s\"",
"%",
"(",
"pyfaidx",
".",
"complement",
"(",
"middle",
")",
"[",
":",
":",
"-",
"1",
"]",
",",
"pyfaidx",
".",
"complement",
"(",
"alt",
")",
"[",
":",
":",
"-",
"1",
"]",
")",
"else",
":",
"context_5prime",
"=",
"left",
"context_3prime",
"=",
"right",
"context_mutation",
"=",
"\"%s>%s\"",
"%",
"(",
"middle",
",",
"alt",
")",
"return",
"(",
"context_5prime",
",",
"context_mutation",
",",
"context_3prime",
")"
] | Retrieve the surronding reference region from a variant.
SNVs are canonicalized so the reference base is a pyrmidine (C/T). For
indels the reverse complement will still be taken if the first base of
the reference is not a pyrmidine, but since the reference will also be
reversed, that doesn't guarantee it will start with a pyrmidine.
Parameters
----------
reference_fasta : FastaReference
reference sequence from pyfaidx package
contig : str
Chromosome of the variant
inclusive_start : int
start of the variant in 1-based inclusive coordinates
inclusive_end : int
end of the variant in 1-based inclusive coordinates
alt : string
alt sequence
context_length : int
number of bases on either side of the variant to return
Returns
---------
A tuple of (5', mutation, 3') where
5' - bases immediately 5 prime to the mutation
3' - bases immediately 3 prime to the mutation
mutation - the ref sequence followed by a > character followed by the
the alt sequence | [
"Retrieve",
"the",
"surronding",
"reference",
"region",
"from",
"a",
"variant",
"."
] | 715d3ede5893757b2fcba4117515621bca7b1e5d | https://github.com/openvax/varlens/blob/715d3ede5893757b2fcba4117515621bca7b1e5d/varlens/sequence_context.py#L17-L85 | train |
BernardFW/bernard | src/bernard/trigram.py | Trigram.similarity | def similarity(self, other: 'Trigram') -> float:
"""
Compute the similarity with the provided other trigram.
"""
if not len(self._trigrams) or not len(other._trigrams):
return 0
count = float(len(self._trigrams & other._trigrams))
len1 = float(len(self._trigrams))
len2 = float(len(other._trigrams))
return count / (len1 + len2 - count) | python | def similarity(self, other: 'Trigram') -> float:
"""
Compute the similarity with the provided other trigram.
"""
if not len(self._trigrams) or not len(other._trigrams):
return 0
count = float(len(self._trigrams & other._trigrams))
len1 = float(len(self._trigrams))
len2 = float(len(other._trigrams))
return count / (len1 + len2 - count) | [
"def",
"similarity",
"(",
"self",
",",
"other",
":",
"'Trigram'",
")",
"->",
"float",
":",
"if",
"not",
"len",
"(",
"self",
".",
"_trigrams",
")",
"or",
"not",
"len",
"(",
"other",
".",
"_trigrams",
")",
":",
"return",
"0",
"count",
"=",
"float",
"(",
"len",
"(",
"self",
".",
"_trigrams",
"&",
"other",
".",
"_trigrams",
")",
")",
"len1",
"=",
"float",
"(",
"len",
"(",
"self",
".",
"_trigrams",
")",
")",
"len2",
"=",
"float",
"(",
"len",
"(",
"other",
".",
"_trigrams",
")",
")",
"return",
"count",
"/",
"(",
"len1",
"+",
"len2",
"-",
"count",
")"
] | Compute the similarity with the provided other trigram. | [
"Compute",
"the",
"similarity",
"with",
"the",
"provided",
"other",
"trigram",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/trigram.py#L89-L100 | train |
BernardFW/bernard | src/bernard/trigram.py | Matcher._match | def _match(self, local: Tuple[Trigram, ...], other: Trigram) -> float:
"""
Match a trigram with another one. If the negative matching wins,
returns an inverted matching.
"""
pos = local[0] % other
neg = max((x % other for x in local[1:]), default=0)
if neg > pos:
return 0.0
return pos | python | def _match(self, local: Tuple[Trigram, ...], other: Trigram) -> float:
"""
Match a trigram with another one. If the negative matching wins,
returns an inverted matching.
"""
pos = local[0] % other
neg = max((x % other for x in local[1:]), default=0)
if neg > pos:
return 0.0
return pos | [
"def",
"_match",
"(",
"self",
",",
"local",
":",
"Tuple",
"[",
"Trigram",
",",
"...",
"]",
",",
"other",
":",
"Trigram",
")",
"->",
"float",
":",
"pos",
"=",
"local",
"[",
"0",
"]",
"%",
"other",
"neg",
"=",
"max",
"(",
"(",
"x",
"%",
"other",
"for",
"x",
"in",
"local",
"[",
"1",
":",
"]",
")",
",",
"default",
"=",
"0",
")",
"if",
"neg",
">",
"pos",
":",
"return",
"0.0",
"return",
"pos"
] | Match a trigram with another one. If the negative matching wins,
returns an inverted matching. | [
"Match",
"a",
"trigram",
"with",
"another",
"one",
".",
"If",
"the",
"negative",
"matching",
"wins",
"returns",
"an",
"inverted",
"matching",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/trigram.py#L120-L132 | train |
BernardFW/bernard | src/bernard/trigram.py | Matcher.similarity | def similarity(self, other: Trigram) -> float:
"""
Find the best similarity within known trigrams.
"""
return max((self._match(x, other) for x in self.trigrams), default=0) | python | def similarity(self, other: Trigram) -> float:
"""
Find the best similarity within known trigrams.
"""
return max((self._match(x, other) for x in self.trigrams), default=0) | [
"def",
"similarity",
"(",
"self",
",",
"other",
":",
"Trigram",
")",
"->",
"float",
":",
"return",
"max",
"(",
"(",
"self",
".",
"_match",
"(",
"x",
",",
"other",
")",
"for",
"x",
"in",
"self",
".",
"trigrams",
")",
",",
"default",
"=",
"0",
")"
] | Find the best similarity within known trigrams. | [
"Find",
"the",
"best",
"similarity",
"within",
"known",
"trigrams",
"."
] | 9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/trigram.py#L134-L138 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.