sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def get_version(self, service_id, version_number):
"""Get the version for a particular service."""
content = self._fetch("/service/%s/version/%d" % (service_id, version_number))
return FastlyVersion(self, content) | Get the version for a particular service. | entailment |
def update_version(self, service_id, version_number, **kwargs):
"""Update a particular version for a particular service."""
body = self._formdata(kwargs, FastlyVersion.FIELDS)
content = self._fetch("/service/%s/version/%d/" % (service_id, version_number), method="PUT", body=body)
return FastlyVersion(self, content) | Update a particular version for a particular service. | entailment |
def clone_version(self, service_id, version_number):
"""Clone the current configuration into a new version."""
content = self._fetch("/service/%s/version/%d/clone" % (service_id, version_number), method="PUT")
return FastlyVersion(self, content) | Clone the current configuration into a new version. | entailment |
def activate_version(self, service_id, version_number):
"""Activate the current version."""
content = self._fetch("/service/%s/version/%d/activate" % (service_id, version_number), method="PUT")
return FastlyVersion(self, content) | Activate the current version. | entailment |
def deactivate_version(self, service_id, version_number):
"""Deactivate the current version."""
content = self._fetch("/service/%s/version/%d/deactivate" % (service_id, version_number), method="PUT")
return FastlyVersion(self, content) | Deactivate the current version. | entailment |
def validate_version(self, service_id, version_number):
"""Validate the version for a particular service and version."""
content = self._fetch("/service/%s/version/%d/validate" % (service_id, version_number))
return self._status(content) | Validate the version for a particular service and version. | entailment |
def lock_version(self, service_id, version_number):
"""Locks the specified version."""
content = self._fetch("/service/%s/version/%d/lock" % (service_id, version_number))
return self._status(content) | Locks the specified version. | entailment |
def list_wordpressess(self, service_id, version_number):
"""Get all of the wordpresses for a specified service and version."""
content = self._fetch("/service/%s/version/%d/wordpress" % (service_id, version_number))
return map(lambda x: FastlyWordpress(self, x), content) | Get all of the wordpresses for a specified service and version. | entailment |
def create_wordpress(self,
service_id,
version_number,
name,
path,
comment=None):
"""Create a wordpress for the specified service and version."""
body = self._formdata({
"name": name,
"path": path,
"comment": comment,
}, FastlyWordpress.FIELDS)
content = self._fetch("/service/%s/version/%d/wordpress" % (service_id, version_number), method="POST", body=body)
return FastlyWordpress(self, content) | Create a wordpress for the specified service and version. | entailment |
def get_wordpress(self, service_id, version_number, name):
"""Get information on a specific wordpress."""
content = self._fetch("/service/%s/version/%d/wordpress/%s" % (service_id, version_number, name))
return FastlyWordpress(self, content) | Get information on a specific wordpress. | entailment |
def update_wordpress(self, service_id, version_number, name_key, **kwargs):
"""Update a specified wordpress."""
body = self._formdata(kwargs, FastlyWordpress.FIELDS)
content = self._fetch("/service/%s/version/%d/wordpress/%s" % (service_id, version_number, name_key), method="PUT", body=body)
return FastlyWordpress(self, content) | Update a specified wordpress. | entailment |
def annotate(self, word):
'''Annotate 'word' for syllabification, stress, weights, and vowels.'''
info = [] # e.g., [ ('\'nak.su.`tus.ta', 'PUSU', 'HLHL', 'AUUA'), ]
for syllabification, _ in syllabify(self.normalize(word), stress=True):
stresses = ''
weights = ''
vowels = ''
for syll in syllable_split(syllabification):
try:
vowels += get_vowel(syll)
weights += get_weight(syll)
stresses += {'\'': 'P', '`': 'S'}.get(syll[0], 'U')
except AttributeError:
# if the syllable is vowel-less...
if syll[-1].isalpha():
stresses += '*'
weights += '*'
vowels += '*'
else:
stresses += ' '
weights += ' '
vowels += ' '
info.append((
syllabification,
stresses,
weights,
vowels,
))
return info | Annotate 'word' for syllabification, stress, weights, and vowels. | entailment |
def runner(self):
"""
Run the necessary methods in the correct order
"""
logging.info('Starting {} analysis pipeline'.format(self.analysistype))
# Run the analyses
Sippr(self, self.cutoff)
self.serotype_escherichia()
self.serotype_salmonella()
# Create the reports
self.reporter()
# Print the metadata
metadataprinter.MetadataPrinter(self) | Run the necessary methods in the correct order | entailment |
def reporter(self):
"""
Creates a report of the results
"""
logging.info('Creating {} report'.format(self.analysistype))
# Create the path in which the reports are stored
make_path(self.reportpath)
header = 'Strain,Serotype\n'
data = ''
with open(os.path.join(self.reportpath, '{}.csv'.format(self.analysistype)), 'w') as report:
for sample in self.runmetadata.samples:
if sample.general.bestassemblyfile != 'NA':
data += sample.name + ','
if sample[self.analysistype].results:
# Set the O-type as either the appropriate attribute, or O-untypable
if ';'.join(sample.serosippr.o_set) == '-':
otype = 'O-untypeable'
else:
otype = '{oset} ({opid})'.format(oset=';'.join(sample.serosippr.o_set),
opid=sample.serosippr.best_o_pid)
# Same as above, but for the H-type
if ';'.join(sample.serosippr.h_set) == '-':
htype = 'H-untypeable'
else:
htype = '{hset} ({hpid})'.format(hset=';'.join(sample.serosippr.h_set),
hpid=sample.serosippr.best_h_pid)
serotype = '{otype}:{htype}'.format(otype=otype,
htype=htype)
# Populate the data string
data += serotype if serotype != 'O-untypeable:H-untypeable' else 'ND'
data += '\n'
else:
data += '\n'
report.write(header)
report.write(data) | Creates a report of the results | entailment |
def serotype_escherichia(self):
"""
Create attributes storing the best results for the O and H types
"""
for sample in self.runmetadata.samples:
# Initialise negative results to be overwritten when necessary
sample[self.analysistype].best_o_pid = '-'
sample[self.analysistype].o_genes = ['-']
sample[self.analysistype].o_set = ['-']
sample[self.analysistype].best_h_pid = '-'
sample[self.analysistype].h_genes = ['-']
sample[self.analysistype].h_set = ['-']
if sample.general.bestassemblyfile != 'NA':
if sample.general.closestrefseqgenus == 'Escherichia':
o = dict()
h = dict()
for result, percentid in sample[self.analysistype].results.items():
if 'O' in result.split('_')[-1]:
o.update({result: float(percentid)})
if 'H' in result.split('_')[-1]:
h.update({result: float(percentid)})
# O
try:
sorted_o = sorted(o.items(), key=operator.itemgetter(1), reverse=True)
sample[self.analysistype].best_o_pid = str(sorted_o[0][1])
sample[self.analysistype].o_genes = [gene for gene, pid in o.items()
if str(pid) == sample[self.analysistype].best_o_pid]
sample[self.analysistype].o_set = \
list(set(gene.split('_')[-1] for gene in sample[self.analysistype].o_genes))
except (KeyError, IndexError):
pass
# H
try:
sorted_h = sorted(h.items(), key=operator.itemgetter(1), reverse=True)
sample[self.analysistype].best_h_pid = str(sorted_h[0][1])
sample[self.analysistype].h_genes = [gene for gene, pid in h.items()
if str(pid) == sample[self.analysistype].best_h_pid]
sample[self.analysistype].h_set = \
list(set(gene.split('_')[-1] for gene in sample[self.analysistype].h_genes))
except (KeyError, IndexError):
pass | Create attributes storing the best results for the O and H types | entailment |
def _syllabify(word):
'''Syllabify the given word.'''
word = replace_umlauts(word)
word, CONTINUE_VV, CONTINUE_VVV, applied_rules = apply_T1(word)
if CONTINUE_VV:
word, T2 = apply_T2(word)
word, T4 = apply_T4(word)
applied_rules += T2 + T4
if CONTINUE_VVV:
word, T5 = apply_T5(word)
word, T6 = apply_T6(word)
word, T7 = apply_T7(word)
applied_rules += T5 + T6 + T7
word = replace_umlauts(word, put_back=True)
return word, applied_rules | Syllabify the given word. | entailment |
def apply_T1(word):
'''There is a syllable boundary in front of every CV-sequence.'''
T1 = ' T1'
WORD = _split_consonants_and_vowels(word)
CONTINUE_VV = 0
CONTINUE_VVV = 0
for i, v in enumerate(WORD):
if i == 0 and is_consonant(v[0][0]):
continue
elif is_consonant(v[0]) and i + 1 != len(WORD):
WORD[i] = v[:-1] + '.' + v[-1]
elif is_vowel(v[0]):
if len(v) > 2:
CONTINUE_VVV += 1
elif len(v) > 1:
CONTINUE_VV += 1
word = ''.join(WORD)
return word, CONTINUE_VV, CONTINUE_VVV, T1 | There is a syllable boundary in front of every CV-sequence. | entailment |
def apply_T2(word):
'''There is a syllable boundary within a sequence VV of two nonidentical
vowels that are not a genuine diphthong, e.g., [ta.e], [ko.et.taa].'''
T2 = ''
WORD = word.split('.')
for i, v in enumerate(WORD):
if not contains_diphthong(v):
VV = contains_VV(v)
if VV:
I = v.find(VV) + 1
WORD[i] = v[:I] + '.' + v[I:]
T2 = ' T2'
word = '.'.join(WORD)
return word, T2 | There is a syllable boundary within a sequence VV of two nonidentical
vowels that are not a genuine diphthong, e.g., [ta.e], [ko.et.taa]. | entailment |
def apply_T4(word):
'''An agglutination diphthong that ends in /u, y/ usually contains a
syllable boundary when -C# or -CCV follow, e.g., [lau.ka.us],
[va.ka.ut.taa].'''
T4 = ''
WORD = word.split('.')
for i, v in enumerate(WORD):
# i % 2 != 0 prevents this rule from applying to first, third, etc.
# syllables, which receive stress (WSP)
if is_consonant(v[-1]) and i % 2 != 0:
if i + 1 == len(WORD) or is_consonant(WORD[i + 1][0]):
if contains_Vu_diphthong(v):
I = v.rfind('u')
WORD[i] = v[:I] + '.' + v[I:]
T4 = ' T4'
elif contains_Vy_diphthong(v):
I = v.rfind('y')
WORD[i] = v[:I] + '.' + v[I:]
T4 = ' T4'
word = '.'.join(WORD)
return word, T4 | An agglutination diphthong that ends in /u, y/ usually contains a
syllable boundary when -C# or -CCV follow, e.g., [lau.ka.us],
[va.ka.ut.taa]. | entailment |
def apply_T5(word): # BROKEN
'''If a (V)VVV-sequence contains a VV-sequence that could be an /i/-final
diphthong, there is a syllable boundary between it and the third vowel,
e.g., [raa.ois.sa], [huo.uim.me], [la.eis.sa], [sel.vi.äi.si], [tai.an],
[säi.e], [oi.om.me].'''
T5 = ''
WORD = word.split('.')
for i, v in enumerate(WORD):
if contains_VVV(v) and any(i for i in i_DIPHTHONGS if i in v):
I = v.rfind('i') - 1 or 2
I = I + 2 if is_consonant(v[I - 1]) else I
WORD[i] = v[:I] + '.' + v[I:]
T5 = ' T5'
word = '.'.join(WORD)
return word, T5 | If a (V)VVV-sequence contains a VV-sequence that could be an /i/-final
diphthong, there is a syllable boundary between it and the third vowel,
e.g., [raa.ois.sa], [huo.uim.me], [la.eis.sa], [sel.vi.äi.si], [tai.an],
[säi.e], [oi.om.me]. | entailment |
def apply_T6(word):
'''If a VVV-sequence contains a long vowel, there is a syllable boundary
between it and the third vowel, e.g. [kor.ke.aa], [yh.ti.öön], [ruu.an],
[mää.yt.te].'''
T6 = ''
WORD = word.split('.')
for i, v in enumerate(WORD):
if contains_VVV(v):
VV = [v.find(j) for j in LONG_VOWELS if v.find(j) > 0]
if VV:
I = VV[0]
T6 = ' T6'
if I + 2 == len(v) or is_vowel(v[I + 2]):
WORD[i] = v[:I + 2] + '.' + v[I + 2:] # TODO
else:
WORD[i] = v[:I] + '.' + v[I:]
word = '.'.join(WORD)
word = word.strip('.') # TODO
return word, T6 | If a VVV-sequence contains a long vowel, there is a syllable boundary
between it and the third vowel, e.g. [kor.ke.aa], [yh.ti.öön], [ruu.an],
[mää.yt.te]. | entailment |
def apply_T7(word):
'''If a VVV-sequence does not contain a potential /i/-final diphthong,
there is a syllable boundary between the second and third vowels, e.g.
[kau.an], [leu.an], [kiu.as].'''
T7 = ''
WORD = word.split('.')
for i, v in enumerate(WORD):
if contains_VVV(v):
for I, V in enumerate(v[::-1]):
if is_vowel(V):
WORD[i] = v[:I] + '.' + v[I:]
T7 = ' T7'
word = '.'.join(WORD)
return word, T7 | If a VVV-sequence does not contain a potential /i/-final diphthong,
there is a syllable boundary between the second and third vowels, e.g.
[kau.an], [leu.an], [kiu.as]. | entailment |
def main():
"""
Upload a vcl file to a fastly service, cloning the current version if
necessary. The uploaded vcl is set as main unless --include is given.
All existing vcl files will be deleted first if --delete is given.
"""
parser = OptionParser(description=
"Upload a vcl file (set as main) to a given fastly service. All arguments are required.")
parser.add_option("-k", "--key", dest="apikey", help="fastly api key")
parser.add_option("-u", "--user", dest="user", help="fastly user name")
parser.add_option("-p", "--password", dest="password",
help="fastly password")
parser.add_option("-f", "--file", dest="filename",
help="vcl file to upload")
parser.add_option("-s", "--service", dest="service_name",
help="service to update")
parser.add_option("-d", "--delete_vcl", action="store_true",
dest="delete_vcl", default=False,
help="delete existing vcl files from service\
before uploading")
parser.add_option("-i", "--include", action="store_true",
dest="include_vcl", default=False,
help="do not set uploaded vcl as main,\
to be included only")
(options, args) = parser.parse_args()
for val in options.__dict__.values():
if val is None:
print "Missing required options:"
parser.print_help()
sys.exit(1)
vcl_name = options.filename.split('/').pop()
service_name = options.service_name
vcl_file = open(options.filename, 'r')
vcl_content = vcl_file.read()
# Need to fully authenticate to access all features.
client = fastly.connect(options.apikey)
client.login(options.user, options.password)
service = client.get_service_by_name(service_name)
versions = client.list_versions(service.id)
latest = versions.pop()
if latest.locked is True or latest.active is True:
print "\n[ Cloning version %d ]\n"\
% (latest.number)
latest = client.clone_version(service.id, latest.number)
if options.delete_vcl:
vcls = client.list_vcls(service.id, latest.number)
for vcl in vcls:
print "\n[ Deleting vcl file %s from version %d ]\n" %\
(service_name, latest.number)
client.delete_vcl(service.id, latest.number, vcl.name)
if vcl_name in latest.vcls:
print "\n[ Updating vcl file %s on service %s version %d ]\n"\
% (vcl_name, service_name, latest.number)
client.update_vcl(service.id, latest.number, vcl_name,
content=vcl_content)
else:
print "\n[ Uploading new vcl file %s on service %s version %d ]\n"\
% (vcl_name, service_name, latest.number)
client.upload_vcl(service.id, latest.number, vcl_name, vcl_content)
if options.include_vcl is False:
print "\n[ Setting vcl %s as main ]\n" % (vcl_name)
client.set_main_vcl(service.id, latest.number, vcl_name)
client.activate_version(service.id, latest.number)
print "\n[ Activing configuration version %d ]\n" % (latest.number) | Upload a vcl file to a fastly service, cloning the current version if
necessary. The uploaded vcl is set as main unless --include is given.
All existing vcl files will be deleted first if --delete is given. | entailment |
def main(self):
"""
Run the necessary methods in the correct order
"""
self.target_validate()
self.gene_names()
Sippr(inputobject=self,
k=self.kmer_size,
allow_soft_clips=self.allow_soft_clips)
self.report() | Run the necessary methods in the correct order | entailment |
def gene_names(self):
"""
Extract the names of the user-supplied targets
"""
# Iterate through all the target names in the formatted targets file
for record in SeqIO.parse(self.targets, 'fasta'):
# Append all the gene names to the list of names
self.genes.append(record.id) | Extract the names of the user-supplied targets | entailment |
def report(self):
"""
Create the report for the user-supplied targets
"""
# Add all the genes to the header
header = 'Sample,'
data = str()
with open(os.path.join(self.reportpath, '{at}.csv'.format(at=self.analysistype)), 'w') as report:
write_header = True
for sample in self.runmetadata:
data += sample.name + ','
# Iterate through all the user-supplied target names
for target in sorted(self.genes):
write_results = False
# There was an issue with 'target' not matching 'name' due to a dash being replaced by an underscore
# only in 'name'. This will hopefully address this issue
target = target.replace('-', '_')
if write_header:
header += '{target}_match_details,{target},'.format(target=target)
for name, identity in sample[self.analysistype].results.items():
# Ensure that all dashes are replaced with underscores
name = name.replace('-', '_')
# If the current target matches the target in the header, add the data to the string
if name == target:
write_results = True
gene_results = '{percent_id}% ({avgdepth} +/- {stddev}),{record},'\
.format(percent_id=identity,
avgdepth=sample[self.analysistype].avgdepth[name],
stddev=sample[self.analysistype].standarddev[name],
record=sample[self.analysistype].sequences[target])
# Populate the data string appropriately
data += gene_results
# If the target is not present, write dashes to represent the results and sequence
if not write_results:
data += '-,-,'
data += ' \n'
write_header = False
header += '\n'
# Write the strings to the report
report.write(header)
report.write(data) | Create the report for the user-supplied targets | entailment |
def on_add(self, item):
"""Convert to pseuso acces"""
super(Tels, self).on_add(list_views.PseudoAccesCategorie(item)) | Convert to pseuso acces | entailment |
def set_data(self, *args):
"""we cant to call set_data to manually update"""
db = self.begining.get_data() or formats.DATE_DEFAULT
df = self.end.get_data() or formats.DATE_DEFAULT
jours = max((df - db).days + 1, 0)
self.setText(str(jours) + (jours >= 2 and " jours" or " jour")) | we cant to call set_data to manually update | entailment |
def runner(self):
"""
Run the necessary methods in the correct order
"""
logging.info('Starting {} analysis pipeline'.format(self.analysistype))
if not self.pipeline:
general = None
for sample in self.runmetadata.samples:
general = getattr(sample, 'general')
if general is None:
# Create the objects to be used in the analyses
objects = Objectprep(self)
objects.objectprep()
self.runmetadata = objects.samples
# Run the analyses
Sippr(self, self.cutoff)
# Create the reports
reports = Reports(self)
Reports.reporter(reports, analysistype=self.analysistype)
# Print the metadata
MetadataPrinter(self) | Run the necessary methods in the correct order | entailment |
def same_syllabic_feature(ch1, ch2):
'''Return True if ch1 and ch2 are both vowels or both consonants.'''
if ch1 == '.' or ch2 == '.':
return False
ch1 = 'V' if ch1 in VOWELS else 'C' if ch1 in CONSONANTS else None
ch2 = 'V' if ch2 in VOWELS else 'C' if ch2 in CONSONANTS else None
return ch1 == ch2 | Return True if ch1 and ch2 are both vowels or both consonants. | entailment |
def syllabify(word):
'''Syllabify the given word.'''
word = replace_umlauts(word)
word = apply_T1(word)
word = apply_T2(word)
word = apply_T4(word)
word = apply_T5(word)
word = apply_T6(word)
word = apply_T7(word)
word = replace_umlauts(word, put_back=True)[1:] # FENCEPOST
return word | Syllabify the given word. | entailment |
def apply_T1(word):
'''There is a syllable boundary in front of every CV-sequence.'''
WORD = _split_consonants_and_vowels(word)
for k, v in WORD.iteritems():
if k == 1 and is_consonantal_onset(v):
WORD[k] = '.' + v
elif is_consonant(v[0]) and WORD.get(k + 1, 0):
WORD[k] = v[:-1] + '.' + v[-1]
word = _compile_dict_into_word(WORD)
return word | There is a syllable boundary in front of every CV-sequence. | entailment |
def apply_T2(word):
'''There is a syllable boundary within a sequence VV of two nonidentical
that are not a genuine diphthong, e.g., [ta.e], [ko.et.taa].'''
WORD = _split_consonants_and_vowels(word)
for k, v in WORD.iteritems():
if is_diphthong(v):
continue
if len(v) == 2 and is_vowel(v[0]):
if v[0] != v[1]:
WORD[k] = v[0] + '.' + v[1]
word = _compile_dict_into_word(WORD)
return word | There is a syllable boundary within a sequence VV of two nonidentical
that are not a genuine diphthong, e.g., [ta.e], [ko.et.taa]. | entailment |
def apply_T4(word): # OPTIMIZE
'''An agglutination diphthong that ends in /u, y/ usually contains a
syllable boundary when -C# or -CCV follow, e.g., [lau.ka.us],
[va.ka.ut.taa].'''
WORD = _split_consonants_and_vowels(word)
for k, v in WORD.iteritems():
if len(v) == 2 and v.endswith(('u', 'y')):
if WORD.get(k + 2, 0):
if not WORD.get(k + 3, 0):
if len(WORD[k + 2]) == 1 and is_consonant(WORD[k + 2]):
WORD[k] = v[0] + '.' + v[1]
elif len(WORD[k + 1]) == 1 and WORD.get(k + 3, 0):
if is_consonant(WORD[k + 3][0]):
WORD[k] = v[0] + '.' + v[1]
elif len(WORD[k + 2]) == 2:
WORD[k] = v[0] + '.' + v[1]
word = _compile_dict_into_word(WORD)
return word | An agglutination diphthong that ends in /u, y/ usually contains a
syllable boundary when -C# or -CCV follow, e.g., [lau.ka.us],
[va.ka.ut.taa]. | entailment |
def apply_T5(word):
'''If a (V)VVV-sequence contains a VV-sequence that could be an /i/-final
diphthong, there is a syllable boundary between it and the third vowel,
e.g., [raa.ois.sa], [huo.uim.me], [la.eis.sa], [sel.vi.äi.si], [tai.an],
[säi.e], [oi.om.me].'''
WORD = _split_consonants_and_vowels(word)
for k, v in WORD.iteritems():
if len(v) >= 3 and is_vowel(v[0]):
vv = [v.find(i) for i in i_DIPHTHONGS if v.find(i) > 0]
if any(vv):
vv = vv[0]
if vv == v[0]:
WORD[k] = v[:2] + '.' + v[2:]
else:
WORD[k] = v[:vv] + '.' + v[vv:]
word = _compile_dict_into_word(WORD)
return word | If a (V)VVV-sequence contains a VV-sequence that could be an /i/-final
diphthong, there is a syllable boundary between it and the third vowel,
e.g., [raa.ois.sa], [huo.uim.me], [la.eis.sa], [sel.vi.äi.si], [tai.an],
[säi.e], [oi.om.me]. | entailment |
def apply_T6(word):
'''If a VVV-sequence contains a long vowel, there is a syllable boundary
between it and the third vowel, e.g. [kor.ke.aa], [yh.ti.öön], [ruu.an],
[mää.yt.te].'''
WORD = _split_consonants_and_vowels(word)
for k, v in WORD.iteritems():
if len(v) == 3 and is_vowel(v[0]):
vv = [v.find(i) for i in LONG_VOWELS if v.find(i) > 0]
if any(vv):
vv = vv[0]
if vv == v[0]:
WORD[k] = v[:2] + '.' + v[2:]
else:
WORD[k] = v[:vv] + '.' + v[vv:]
word = _compile_dict_into_word(WORD)
return word | If a VVV-sequence contains a long vowel, there is a syllable boundary
between it and the third vowel, e.g. [kor.ke.aa], [yh.ti.öön], [ruu.an],
[mää.yt.te]. | entailment |
def apply_T7(word):
'''If a VVV-sequence does not contain a potential /i/-final diphthong,
there is a syllable boundary between the second and third vowels, e.g.
[kau.an], [leu.an], [kiu.as].'''
WORD = _split_consonants_and_vowels(word)
for k, v in WORD.iteritems():
if len(v) == 3 and is_vowel(v[0]):
WORD[k] = v[:2] + '.' + v[2:]
word = _compile_dict_into_word(WORD)
return word | If a VVV-sequence does not contain a potential /i/-final diphthong,
there is a syllable boundary between the second and third vowels, e.g.
[kau.an], [leu.an], [kiu.as]. | entailment |
def syllabify(word):
'''Syllabify the given word, whether simplex or complex.'''
compound = not word.isalpha()
syllabify = _syllabify_complex if compound else _syllabify_simplex
syllabifications = list(syllabify(word))
# if variation, order variants from most preferred to least preferred
if len(syllabifications) > 1:
syllabifications = rank(syllabifications)
for word, rules in syllabifications:
yield _post_process(word, rules) | Syllabify the given word, whether simplex or complex. | entailment |
def wsp(word):
'''Return the number of unstressed superheavy syllables.'''
violations = 0
unstressed = []
for w in extract_words(word):
unstressed += w.split('.')[1::2] # even syllables
# include extrametrical odd syllables as potential WSP violations
if w.count('.') % 2 == 0:
unstressed += [w.rsplit('.', 1)[-1], ]
# SHSP
for syll in unstressed:
if re.search(r'[ieaouäöy]{2}[^$ieaouäöy]+', syll, flags=FLAGS):
violations += 1
# # WSP (CVV = heavy)
# for syll in unstressed:
# if re.search(
# ur'[ieaouäöy]{2}|[ieaouäöy]+[^ieaouäöy]+',
# syll, flags=re.I | re.U):
# violations += 1
return violations | Return the number of unstressed superheavy syllables. | entailment |
def modifie(self, key: str, value: Any) -> None:
"""Store the modification. `value` should be dumped in DB compatible format."""
if key in self.FIELDS_OPTIONS:
self.modifie_options(key, value)
else:
self.modifications[key] = value | Store the modification. `value` should be dumped in DB compatible format. | entailment |
def modifie_many(self, dic: dict):
"""Convenience function which calls modifie on each element of dic"""
for i, v in dic.items():
self.modifie(i, v) | Convenience function which calls modifie on each element of dic | entailment |
def save(self) -> sql.Executant:
"""Prepare a SQL request to save the current modifications.
Returns actually a LIST of requests (which may be of length one).
Note than it can include modifications on other part of the data.
After succes, the base should be updated.
"""
r = self._dict_to_SQL(self.modifications)
self.modifications.clear()
return r | Prepare a SQL request to save the current modifications.
Returns actually a LIST of requests (which may be of length one).
Note than it can include modifications on other part of the data.
After succes, the base should be updated. | entailment |
def modifie_options(self, field_option, value):
"""Set options in modifications.
All options will be stored since it should be grouped in the DB."""
options = dict(self["options"] or {}, **{field_option: value})
self.modifications["options"] = options | Set options in modifications.
All options will be stored since it should be grouped in the DB. | entailment |
def _from_dict_dict(cls, dic):
"""Takes a dict {id : dict_attributes} """
return cls({_convert_id(i): v for i, v in dic.items()}) | Takes a dict {id : dict_attributes} | entailment |
def _from_list_dict(cls, list_dic):
"""Takes a list of dict like objects and uses `champ_id` field as Id"""
return cls({_convert_id(dic[cls.CHAMP_ID]): dict(dic) for dic in list_dic}) | Takes a list of dict like objects and uses `champ_id` field as Id | entailment |
def base_recherche_rapide(self, base, pattern, to_string_hook=None):
"""
Return a collection of access matching `pattern`.
`to_string_hook` is an optionnal callable dict -> str to map record to string. Default to _record_to_string
"""
Ac = self.ACCES
if pattern == "*":
return groups.Collection(Ac(base, i) for i in self)
if len(pattern) >= MIN_CHAR_SEARCH: # Needed chars.
sub_patterns = pattern.split(" ")
try:
regexps = tuple(re.compile(sub_pattern, flags=re.I)
for sub_pattern in sub_patterns)
except re.error:
return groups.Collection()
def search(string):
for regexp in regexps:
if not regexp.search(string):
return False
return True
to_string_hook = to_string_hook or self._record_to_string
return groups.Collection(Ac(base, i) for i, p in self.items() if search(to_string_hook(p)))
return groups.Collection() | Return a collection of access matching `pattern`.
`to_string_hook` is an optionnal callable dict -> str to map record to string. Default to _record_to_string | entailment |
def select_by_field(self, base, field, value):
"""Return collection of acces whose field equal value"""
Ac = self.ACCES
return groups.Collection(Ac(base, i) for i, row in self.items() if row[field] == value) | Return collection of acces whose field equal value | entailment |
def select_by_critere(self, base, criteria):
"""
:param base: Reference on whole base
:param criteria: Callable abstractAcces -> Bool, acting as filter
:return: Collection on acces passing the criteria
"""
Ac = self.ACCES
return groups.Collection(Ac(base, i) for i in self if criteria(Ac(base, i))) | :param base: Reference on whole base
:param criteria: Callable abstractAcces -> Bool, acting as filter
:return: Collection on acces passing the criteria | entailment |
def load_from_db(cls, callback_etat=print, out=None):
"""Launch data fetching then load data received.
The method _load_remote_db should be overridden.
If out is given, datas are set in it, instead of returning a new base object.
"""
dic = cls._load_remote_db(callback_etat)
callback_etat("Chargement...", 2, 3)
if out is None:
return cls(dic)
cls.__init__(out, datas=dic) | Launch data fetching then load data received.
The method _load_remote_db should be overridden.
If out is given, datas are set in it, instead of returning a new base object. | entailment |
def _parse_text_DB(self, s):
"""Returns a dict of table interpreted from s.
s should be Json string encoding a dict { table_name : [fields_name,...] , [rows,... ] }"""
dic = self.decode_json_str(s)
new_dic = {}
for table_name, (header, rows) in dic.items():
newl = [{c: ligne[i]
for i, c in enumerate(header)} for ligne in rows]
new_dic[table_name] = newl
return new_dic | Returns a dict of table interpreted from s.
s should be Json string encoding a dict { table_name : [fields_name,...] , [rows,... ] } | entailment |
def load_from_local(cls):
"""Load datas from local file."""
try:
with open(cls.LOCAL_DB_PATH, 'rb') as f:
b = f.read()
s = security.protege_data(b, False)
except (FileNotFoundError, KeyError):
logging.exception(cls.__name__)
raise StructureError(
"Erreur dans le chargement de la sauvegarde locale !")
else:
return cls(cls.decode_json_str(s)) | Load datas from local file. | entailment |
def dumps(self):
"""Return a dictionnary of current tables"""
return {table_name: getattr(self, table_name).dumps() for table_name in self.TABLES} | Return a dictionnary of current tables | entailment |
def save_to_local(self, callback_etat=print):
"""
Saved current in memory base to local file.
It's a backup, not a convenient way to update datas
:param callback_etat: state callback, taking str,int,int as args
"""
callback_etat("Aquisition...", 0, 3)
d = self.dumps()
s = json.dumps(d, indent=4, cls=formats.JsonEncoder)
callback_etat("Chiffrement...", 1, 3)
s = security.protege_data(s, True)
callback_etat("Enregistrement...", 2, 3)
try:
with open(self.LOCAL_DB_PATH, 'wb') as f:
f.write(s)
except (FileNotFoundError):
logging.exception(self.__class__.__name__)
raise StructureError("Chemin de sauvegarde introuvable !") | Saved current in memory base to local file.
It's a backup, not a convenient way to update datas
:param callback_etat: state callback, taking str,int,int as args | entailment |
def read_cell(self, x, y):
"""
reads the cell at position x and y; puts the default styles in xlwt
"""
cell = self._sheet.row(x)[y]
if self._file.xf_list[
cell.xf_index].background.pattern_colour_index == 64:
self._file.xf_list[
cell.xf_index].background.pattern_colour_index = 9
if self._file.xf_list[
cell.xf_index].background.pattern_colour_index in self.colors.keys():
style = self.colors[self._file.xf_list[
cell.xf_index].background.pattern_colour_index]
else:
style = self.xlwt.easyxf(
'pattern: pattern solid; border: top thin, right thin, bottom thin, left thin;')
style.pattern.pattern_fore_colour = self._file.xf_list[
cell.xf_index].background.pattern_colour_index
self.colors[self._file.xf_list[
cell.xf_index].background.pattern_colour_index] = style
style.font.name = self._file.font_list[
self._file.xf_list[cell.xf_index].font_index].name
style.font.bold = self._file.font_list[
self._file.xf_list[cell.xf_index].font_index].bold
if isinstance(self.header[y], tuple):
header = self.header[y][0]
else:
header = self.header[y]
if self.strip:
if is_str_or_unicode(cell.value):
cell.value = cell.value.strip()
if self.style:
return {header: (cell.value, style)}
else:
return {header: cell.value} | reads the cell at position x and y; puts the default styles in xlwt | entailment |
def write_cell(self, x, y, value, style=None):
"""
writing style and value in the cell of x and y position
"""
if isinstance(style, str):
style = self.xlwt.easyxf(style)
if style:
self._sheet.write(x, y, label=value, style=style)
else:
self._sheet.write(x, y, label=value) | writing style and value in the cell of x and y position | entailment |
def get_string(string):
""" This function checks if a path was given as string, and tries to read the
file and return the string.
"""
truestring = string
if string is not None:
if '/' in string:
if os.path.isfile(string):
try:
with open_(string,'r') as f:
truestring = ' '.join(line.strip() for line in f)
except: pass
if truestring.strip() == '': truestring = None
return truestring | This function checks if a path was given as string, and tries to read the
file and return the string. | entailment |
def get_arguments(options):
""" This function handles and validates the wrapper arguments. """
# These the next couple of lines defines the header of the Help output
parser = ArgumentParser(
formatter_class=RawDescriptionHelpFormatter,
usage=("""%(prog)s
--------------------------------------------------------------------------------
"""),
description=("""
Service Wrapper
===============
This is the service wrapper script, which is a part of the CGE services.
Read the online manual for help.
A list of all published services can be found at:
cge.cbs.dtu.dk/services
"""), epilog=("""
--------------------------------------------------------------------------------
"""))
#ADDING ARGUMENTS
setarg = parser.add_argument
#SERVICE SPECIFIC ARGUMENTS
if isinstance(options, str):
options = [[x for i,x in enumerate(line.split()) if i in [1,2]] for line in options.split('\n') if len(line)>0]
for o in options:
try:
setarg(o[1], type=str, dest=o[0], default=None, help=SUPPRESS)
except:
None
else:
for o in options:
if o[2] is True:
# Handle negative flags
setarg(o[0], action="store_false", dest=o[1], default=o[2],
help=o[3])
elif o[2] is False:
# Handle positive flags
setarg(o[0], action="store_true", dest=o[1], default=o[2],
help=o[3])
else:
help_ = o[3] if o[2] is None else "%s [%s]"%(o[3], '%(default)s')
setarg(o[0], type=str, dest=o[1], default=o[2],
help=help_)
# VALIDATION OF ARGUMENTS
args = parser.parse_args()
debug.log("ARGS: %s"%args)
return args | This function handles and validates the wrapper arguments. | entailment |
def check_file_type(files):
""" Check whether the input files are in fasta format, reads format or
other/mix formats.
"""
all_are_fasta = True
all_are_reads = True
all_are_empty = True
if sys.version_info < (3, 0):
if isinstance(files, (str, unicode)): files = [files]
else:
if isinstance(files, str): files = [files]
for file_ in files:
debug.log('Checking file type: %s'%file_)
# Check if file is empty
if os.stat(file_).st_size == 0: continue
else: all_are_empty = False
with open_(file_) as f:
fc = f.readline()[0]
if fc != "@": all_are_reads = False
if fc != ">": all_are_fasta = False
if all_are_empty: return 'empty'
elif all_are_fasta: return 'fasta'
elif all_are_reads: return 'fastq'
else: return 'other' | Check whether the input files are in fasta format, reads format or
other/mix formats. | entailment |
def make_file_list(upload_path):
""" This function returns list of files in the given dir """
newlist = []
for el in sorted(os.listdir(upload_path)):
if ' ' in el:
raise Exception('Error: Spaces are not allowed in file names!\n')
newlist.append(os.path.normpath(upload_path+'/'+el))
debug.log('InputFiles: %s\n'%newlist)
return newlist | This function returns list of files in the given dir | entailment |
def create_server_rackspace(connection,
distribution,
disk_name,
disk_size,
ami,
region,
key_pair,
instance_type,
instance_name,
tags={},
security_groups=None):
"""
Creates Rackspace Instance and saves it state in a local json file
"""
log_yellow("Creating Rackspace instance...")
flavor = connection.flavors.find(name=instance_type)
image = connection.images.find(name=ami)
server = connection.servers.create(name=instance_name,
flavor=flavor.id,
image=image.id,
region=region,
availability_zone=region,
key_name=key_pair)
while server.status == 'BUILD':
log_yellow("Waiting for build to finish...")
sleep(5)
server = connection.servers.get(server.id)
# check for errors
if server.status != 'ACTIVE':
log_red("Error creating rackspace instance")
exit(1)
# the server was assigned IPv4 and IPv6 addresses, locate the IPv4 address
ip_address = server.accessIPv4
if ip_address is None:
log_red('No IP address assigned')
exit(1)
wait_for_ssh(ip_address)
log_green('New server with IP address {0}.'.format(ip_address))
return server | Creates Rackspace Instance and saves it state in a local json file | entailment |
def destroy_rackspace(connection, region, instance_id):
""" terminates the instance """
server = connection.servers.get(instance_id)
log_yellow('deleting rackspace instance ...')
server.delete()
# wait for server to be deleted
try:
while True:
server = connection.servers.get(server.id)
log_yellow('waiting for deletion ...')
sleep(5)
except:
pass
log_green('The server has been deleted') | terminates the instance | entailment |
def get_rackspace_info(connection,
server_id):
""" queries Rackspace for details about a particular server id
"""
server = connection.servers.get(server_id)
data = {}
data['ip_address'] = server.accessIPv4
data['accessIPv4'] = server.accessIPv4
data['accessIPv6'] = server.accessIPv6
data['addresses'] = server.addresses
data['created'] = server.created
data['flavor'] = server.flavor
data['id'] = server.hostId
data['human_id'] = server.human_id
data['image'] = server.image['id']
data['key_name'] = server.key_name
data['state'] = server.status
data['metadata'] = server.metadata
data['name'] = server.name
data['networks'] = server.networks
data['tenant_id'] = server.tenant_id
data['user_id'] = server.user_id
data['cloud_type'] = 'rackspace'
return data | queries Rackspace for details about a particular server id | entailment |
def date_decoder(dic):
"""Add python types decoding. See JsonEncoder"""
if '__date__' in dic:
try:
d = datetime.date(**{c: v for c, v in dic.items() if not c == "__date__"})
except (TypeError, ValueError):
raise json.JSONDecodeError("Corrupted date format !", str(dic), 1)
elif '__datetime__' in dic:
try:
d = datetime.datetime(**{c: v for c, v in dic.items() if not c == "__datetime__"})
except (TypeError, ValueError):
raise json.JSONDecodeError("Corrupted datetime format !", str(dic), 1)
else:
return dic
return d | Add python types decoding. See JsonEncoder | entailment |
def _type_string(label, case=None):
"""Shortcut for string like fields"""
return label, abstractSearch.in_string, lambda s: abstractRender.default(s, case=case), "" | Shortcut for string like fields | entailment |
def _type_bool(label,default=False):
"""Shortcut fot boolean like fields"""
return label, abstractSearch.nothing, abstractRender.boolen, default | Shortcut fot boolean like fields | entailment |
def in_string(objet, pattern):
""" abstractSearch dans une chaine, sans tenir compte de la casse. """
return bool(re.search(pattern, str(objet), flags=re.I)) if objet else False | abstractSearch dans une chaine, sans tenir compte de la casse. | entailment |
def in_date(objet, pattern):
""" abstractSearch dans une date datetime.date"""
if objet:
pattern = re.sub(" ", '', pattern)
objet_str = abstractRender.date(objet)
return bool(re.search(pattern, objet_str))
return False | abstractSearch dans une date datetime.date | entailment |
def in_dateheure(objet, pattern):
""" abstractSearch dans une date-heure datetime.datetime (cf abstractRender.dateheure) """
if objet:
pattern = re.sub(" ", '', pattern)
objet_str = abstractRender.dateheure(objet)
return bool(re.search(pattern, objet_str))
return False | abstractSearch dans une date-heure datetime.datetime (cf abstractRender.dateheure) | entailment |
def in_telephones(objet, pattern):
""" abstractSearch dans une liste de téléphones."""
objet = objet or []
if pattern == '' or not objet:
return False
return max(bool(re.search(pattern, t)) for t in objet) | abstractSearch dans une liste de téléphones. | entailment |
def date(objet):
""" abstractRender d'une date datetime.date"""
if objet:
return "{}/{}/{}".format(objet.day, objet.month, objet.year)
return "" | abstractRender d'une date datetime.date | entailment |
def dateheure(objet):
""" abstractRender d'une date-heure datetime.datetime au format JJ/MM/AAAAàHH:mm """
if objet:
return "{}/{}/{} à {:02}:{:02}".format(objet.day, objet.month, objet.year, objet.hour, objet.minute)
return "" | abstractRender d'une date-heure datetime.datetime au format JJ/MM/AAAAàHH:mm | entailment |
def install(domain, lazy=False):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
install() function.
The main difference from gettext.install() is that we allow
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
:param domain: the translation domain
:param lazy: indicates whether or not to install the lazy _() function.
The lazy _() introduces a way to do deferred translation
of messages by installing a _ that builds Message objects,
instead of strings, which can then be lazily translated into
any available locale.
"""
if lazy:
# NOTE(mrodden): Lazy gettext functionality.
#
# The following introduces a deferred way to do translations on
# messages in OpenStack. We override the standard _() function
# and % (format string) operation to build Message objects that can
# later be translated when we have more information.
def _lazy_gettext(msg):
"""Create and return a Message object.
Lazy gettext function for a given domain, it is a factory method
for a project/module to get a lazy gettext function for its own
translation domain (i.e. nova, glance, cinder, etc.)
Message encapsulates a string so that we can translate
it later when needed.
"""
return Message(msg, domain=domain)
from six import moves
moves.builtins.__dict__['_'] = _lazy_gettext
else:
localedir = '%s_LOCALEDIR' % domain.upper()
if six.PY3:
gettext.install(domain,
localedir=os.environ.get(localedir))
else:
gettext.install(domain,
localedir=os.environ.get(localedir),
unicode=True) | Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
install() function.
The main difference from gettext.install() is that we allow
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
:param domain: the translation domain
:param lazy: indicates whether or not to install the lazy _() function.
The lazy _() introduces a way to do deferred translation
of messages by installing a _ that builds Message objects,
instead of strings, which can then be lazily translated into
any available locale. | entailment |
def get_available_languages(domain):
"""Lists the available languages for the given translation domain.
:param domain: the domain to get languages for
"""
if domain in _AVAILABLE_LANGUAGES:
return copy.copy(_AVAILABLE_LANGUAGES[domain])
localedir = '%s_LOCALEDIR' % domain.upper()
find = lambda x: gettext.find(domain,
localedir=os.environ.get(localedir),
languages=[x])
# NOTE(mrodden): en_US should always be available (and first in case
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
# NOTE(luisg): Babel <1.0 used a function called list(), which was
# renamed to locale_identifiers() in >=1.0, the requirements master list
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
# this check when the master list updates to >=1.0, and update all projects
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
for i in locale_identifiers:
if find(i) is not None:
language_list.append(i)
# NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported
# locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they
# are perfectly legitimate locales:
# https://github.com/mitsuhiko/babel/issues/37
# In Babel 1.3 they fixed the bug and they support these locales, but
# they are still not explicitly "listed" by locale_identifiers().
# That is why we add the locales here explicitly if necessary so that
# they are listed as supported.
aliases = {'zh': 'zh_CN',
'zh_Hant_HK': 'zh_HK',
'zh_Hant': 'zh_TW',
'fil': 'tl_PH'}
for (locale_, alias) in six.iteritems(aliases):
if locale_ in language_list and alias not in language_list:
language_list.append(alias)
_AVAILABLE_LANGUAGES[domain] = language_list
return copy.copy(language_list) | Lists the available languages for the given translation domain.
:param domain: the domain to get languages for | entailment |
def translate(obj, desired_locale=None):
"""Gets the translated unicode representation of the given object.
If the object is not translatable it is returned as-is.
If the locale is None the object is translated to the system locale.
:param obj: the object to translate
:param desired_locale: the locale to translate the message to, if None the
default system locale will be used
:returns: the translated object in unicode, or the original object if
it could not be translated
"""
message = obj
if not isinstance(message, Message):
# If the object to translate is not already translatable,
# let's first get its unicode representation
message = six.text_type(obj)
if isinstance(message, Message):
# Even after unicoding() we still need to check if we are
# running with translatable unicode before translating
return message.translate(desired_locale)
return obj | Gets the translated unicode representation of the given object.
If the object is not translatable it is returned as-is.
If the locale is None the object is translated to the system locale.
:param obj: the object to translate
:param desired_locale: the locale to translate the message to, if None the
default system locale will be used
:returns: the translated object in unicode, or the original object if
it could not be translated | entailment |
def _translate_args(args, desired_locale=None):
"""Translates all the translatable elements of the given arguments object.
This method is used for translating the translatable values in method
arguments which include values of tuples or dictionaries.
If the object is not a tuple or a dictionary the object itself is
translated if it is translatable.
If the locale is None the object is translated to the system locale.
:param args: the args to translate
:param desired_locale: the locale to translate the args to, if None the
default system locale will be used
:returns: a new args object with the translated contents of the original
"""
if isinstance(args, tuple):
return tuple(translate(v, desired_locale) for v in args)
if isinstance(args, dict):
translated_dict = {}
for (k, v) in six.iteritems(args):
translated_v = translate(v, desired_locale)
translated_dict[k] = translated_v
return translated_dict
return translate(args, desired_locale) | Translates all the translatable elements of the given arguments object.
This method is used for translating the translatable values in method
arguments which include values of tuples or dictionaries.
If the object is not a tuple or a dictionary the object itself is
translated if it is translatable.
If the locale is None the object is translated to the system locale.
:param args: the args to translate
:param desired_locale: the locale to translate the args to, if None the
default system locale will be used
:returns: a new args object with the translated contents of the original | entailment |
def translate(self, desired_locale=None):
"""Translate this message to the desired locale.
:param desired_locale: The desired locale to translate the message to,
if no locale is provided the message will be
translated to the system's default locale.
:returns: the translated message in unicode
"""
translated_message = Message._translate_msgid(self.msgid,
self.domain,
desired_locale)
if self.params is None:
# No need for more translation
return translated_message
# This Message object may have been formatted with one or more
# Message objects as substitution arguments, given either as a single
# argument, part of a tuple, or as one or more values in a dictionary.
# When translating this Message we need to translate those Messages too
translated_params = _translate_args(self.params, desired_locale)
translated_message = translated_message % translated_params
return translated_message | Translate this message to the desired locale.
:param desired_locale: The desired locale to translate the message to,
if no locale is provided the message will be
translated to the system's default locale.
:returns: the translated message in unicode | entailment |
def _sanitize_mod_params(self, other):
"""Sanitize the object being modded with this Message.
- Add support for modding 'None' so translation supports it
- Trim the modded object, which can be a large dictionary, to only
those keys that would actually be used in a translation
- Snapshot the object being modded, in case the message is
translated, it will be used as it was when the Message was created
"""
if other is None:
params = (other,)
elif isinstance(other, dict):
params = self._trim_dictionary_parameters(other)
else:
params = self._copy_param(other)
return params | Sanitize the object being modded with this Message.
- Add support for modding 'None' so translation supports it
- Trim the modded object, which can be a large dictionary, to only
those keys that would actually be used in a translation
- Snapshot the object being modded, in case the message is
translated, it will be used as it was when the Message was created | entailment |
def _trim_dictionary_parameters(self, dict_param):
"""Return a dict that only has matching entries in the msgid."""
# NOTE(luisg): Here we trim down the dictionary passed as parameters
# to avoid carrying a lot of unnecessary weight around in the message
# object, for example if someone passes in Message() % locals() but
# only some params are used, and additionally we prevent errors for
# non-deepcopyable objects by unicoding() them.
# Look for %(param) keys in msgid;
# Skip %% and deal with the case where % is first character on the line
keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', self.msgid)
# If we don't find any %(param) keys but have a %s
if not keys and re.findall('(?:[^%]|^)%[a-z]', self.msgid):
# Apparently the full dictionary is the parameter
params = self._copy_param(dict_param)
else:
params = {}
# Save our existing parameters as defaults to protect
# ourselves from losing values if we are called through an
# (erroneous) chain that builds a valid Message with
# arguments, and then does something like "msg % kwds"
# where kwds is an empty dictionary.
src = {}
if isinstance(self.params, dict):
src.update(self.params)
src.update(dict_param)
for key in keys:
params[key] = self._copy_param(src[key])
return params | Return a dict that only has matching entries in the msgid. | entailment |
def registry_adapter(obj, request):
"""
Adapter for rendering a :class:`pyramid_urireferencer.models.RegistryResponse` to json.
:param pyramid_urireferencer.models.RegistryResponse obj: The response to be rendered.
:rtype: :class:`dict`
"""
return {
'query_uri': obj.query_uri,
'success': obj.success,
'has_references': obj.has_references,
'count': obj.count,
'applications': [{
'title': a.title,
'uri': a.uri,
'service_url': a.service_url,
'success': a.success,
'has_references': a.has_references,
'count': a.count,
'items': [{
'uri': i.uri,
'title': i.title
} for i in a.items] if a.items is not None else None
} for a in obj.applications] if obj.applications is not None else None
} | Adapter for rendering a :class:`pyramid_urireferencer.models.RegistryResponse` to json.
:param pyramid_urireferencer.models.RegistryResponse obj: The response to be rendered.
:rtype: :class:`dict` | entailment |
def application_adapter(obj, request):
"""
Adapter for rendering a :class:`pyramid_urireferencer.models.ApplicationResponse` to json.
:param pyramid_urireferencer.models.ApplicationResponse obj: The response to be rendered.
:rtype: :class:`dict`
"""
return {
'title': obj.title,
'uri': obj.uri,
'service_url': obj.service_url,
'success': obj.success,
'has_references': obj.has_references,
'count': obj.count,
'items': [{
'uri': i.uri,
'title': i.title
} for i in obj.items] if obj.items is not None else None
} | Adapter for rendering a :class:`pyramid_urireferencer.models.ApplicationResponse` to json.
:param pyramid_urireferencer.models.ApplicationResponse obj: The response to be rendered.
:rtype: :class:`dict` | entailment |
def replace_umlauts(word, put_back=False): # use translate()
'''If put_back is True, put in umlauts; else, take them out!'''
if put_back:
word = word.replace('A', 'ä')
word = word.replace('O', 'ö')
else:
word = word.replace('ä', 'A').replace('\xc3\xa4', 'A')
word = word.replace('ö', 'O').replace('\xc3\xb6', 'O')
return word | If put_back is True, put in umlauts; else, take them out! | entailment |
def get_query_align(hit, contig):
"""
Function for extracting extra seqeunce data to the query
alignment if the full reference length are not covered
"""
# Getting data needed to extract sequences
query_seq = hit['query_string']
homo_seq = hit['homo_string']
sbjct_start = int(hit['sbjct_start'])
sbjct_end = int(hit['sbjct_end'])
query_start = int(hit['query_start'])
query_end = int(hit['query_end'])
length = int(hit['sbjct_length'])
# If the alignment doesn't start at the first position data is
# added to the begnning
if sbjct_start != 1:
missing = sbjct_start - 1
if(query_start >= missing and hit['strand'] != 1
or hit['strand'] == 1 and missing <= (len(contig) - query_end)):
# Getting the query sequence.
# If the the hit is on the other strand the characters
# are reversed.
if hit['strand'] == 1:
start_pos = query_end
end_pos = query_end + missing
chars = contig[start_pos:end_pos]
chars = Blaster.reversecomplement(chars)
else:
start_pos = query_start - missing - 1
end_pos = query_start - 1
chars = contig[start_pos:end_pos]
query_seq = chars + str(query_seq)
else:
# Getting the query sequence.
# If the the hit is on the other strand the characters
# are reversed.
if hit['strand'] == 1:
if query_end == len(contig):
query_seq = "-" * missing + str(query_seq)
else:
start_pos = query_end
chars = contig[start_pos:]
chars = Blaster.reversecomplement(chars)
query_seq = ("-" * (missing - len(chars))
+ chars + str(query_seq))
elif query_start < 3:
query_seq = "-" * missing + str(query_seq)
else:
end_pos = query_start - 2
chars = contig[0:end_pos]
query_seq = ("-" * (missing - len(chars))
+ chars + str(query_seq))
# Adding to the homo sequence
spaces = " " * missing
homo_seq = str(spaces) + str(homo_seq)
# If the alignment dosen't end and the last position data is
# added to the end
if sbjct_end < length:
missing = length - sbjct_end
if(missing <= (len(contig) - query_end) and hit['strand'] != 1
or hit['strand'] == 1 and query_start >= missing):
# Getting the query sequence.
# If the the hit is on the other strand the characters
# are reversed.
if hit['strand'] == 1:
start_pos = query_start - missing - 1
end_pos = query_start - 1
chars = contig[start_pos:end_pos]
chars = Blaster.reversecomplement(chars)
else:
start_pos = query_end
end_pos = query_end + missing
chars = contig[start_pos:end_pos]
query_seq = query_seq + chars
else:
# If the hit is on the other strand the characters are reversed
if hit['strand'] == 1:
if query_start < 3:
query_seq = query_seq + "-" * missing
else:
end_pos = query_start - 2
chars = contig[0:end_pos]
chars = Blaster.reversecomplement(chars)
query_seq = (query_seq
+ chars + "-" * (missing - len(chars)))
elif query_end == len(contig):
query_seq = query_seq + "-" * missing
else:
start_pos = query_end
chars = contig[start_pos:]
query_seq = query_seq + chars + "-" * (missing - len(chars))
# Adding to the homo sequence
spaces = " " * int(missing)
homo_seq = str(homo_seq) + str(spaces)
return query_seq, homo_seq | Function for extracting extra seqeunce data to the query
alignment if the full reference length are not covered | entailment |
def get_ordering_for_column(self, column, direction):
"""
Returns a tuple of lookups to order by for the given column
and direction. Direction is an integer, either -1, 0 or 1.
"""
if direction == 0:
return ()
if column in self.orderings:
ordering = self.orderings[column]
else:
field = self.get_field(column)
if field is None:
return ()
ordering = column
if not isinstance(ordering, (tuple, list)):
ordering = [ordering]
if direction == 1:
return ordering
return [lookup[1:] if lookup[0] == '-' else '-' + lookup
for lookup in ordering] | Returns a tuple of lookups to order by for the given column
and direction. Direction is an integer, either -1, 0 or 1. | entailment |
def model_to_json(self, object, cleanup=True):
"""Take a model instance and return it as a json struct"""
model_name = type(object).__name__
if model_name not in self.swagger_dict['definitions']:
raise ValidationError("Swagger spec has no definition for model %s" % model_name)
model_def = self.swagger_dict['definitions'][model_name]
log.debug("Marshalling %s into json" % model_name)
m = marshal_model(self.spec, model_def, object)
if cleanup:
self.cleanup_model(m)
return m | Take a model instance and return it as a json struct | entailment |
def json_to_model(self, model_name, j):
"""Take a json strust and a model name, and return a model instance"""
if model_name not in self.swagger_dict['definitions']:
raise ValidationError("Swagger spec has no definition for model %s" % model_name)
model_def = self.swagger_dict['definitions'][model_name]
log.debug("Unmarshalling json into %s" % model_name)
return unmarshal_model(self.spec, model_def, j) | Take a json strust and a model name, and return a model instance | entailment |
def validate(self, model_name, object):
"""Validate an object against its swagger model"""
if model_name not in self.swagger_dict['definitions']:
raise ValidationError("Swagger spec has no definition for model %s" % model_name)
model_def = self.swagger_dict['definitions'][model_name]
log.debug("Validating %s" % model_name)
return validate_schema_object(self.spec, model_def, object) | Validate an object against its swagger model | entailment |
def call_on_each_endpoint(self, callback):
"""Find all server endpoints defined in the swagger spec and calls 'callback' for each,
with an instance of EndpointData as argument.
"""
if 'paths' not in self.swagger_dict:
return
for path, d in list(self.swagger_dict['paths'].items()):
for method, op_spec in list(d.items()):
data = EndpointData(path, method)
# Which server method handles this endpoint?
if 'x-bind-server' not in op_spec:
if 'x-no-bind-server' in op_spec:
# That route should not be auto-generated
log.info("Skipping generation of %s %s" % (method, path))
continue
else:
raise Exception("Swagger api defines no x-bind-server for %s %s" % (method, path))
data.handler_server = op_spec['x-bind-server']
# Make sure that endpoint only produces 'application/json'
if 'produces' not in op_spec:
raise Exception("Swagger api has no 'produces' section for %s %s" % (method, path))
if len(op_spec['produces']) != 1:
raise Exception("Expecting only one type under 'produces' for %s %s" % (method, path))
if op_spec['produces'][0] == 'application/json':
data.produces_json = True
elif op_spec['produces'][0] == 'text/html':
data.produces_html = True
else:
raise Exception("Only 'application/json' or 'text/html' are supported. See %s %s" % (method, path))
# Which client method handles this endpoint?
if 'x-bind-client' in op_spec:
data.handler_client = op_spec['x-bind-client']
# Should we decorate the server handler?
if 'x-decorate-server' in op_spec:
data.decorate_server = op_spec['x-decorate-server']
# Should we manipulate the requests parameters?
if 'x-decorate-request' in op_spec:
data.decorate_request = op_spec['x-decorate-request']
# Generate a bravado-core operation object
data.operation = Operation.from_spec(self.spec, path, method, op_spec)
# Figure out how parameters are passed: one json in body? one or
# more values in query?
if 'parameters' in op_spec:
params = op_spec['parameters']
for p in params:
if p['in'] == 'body':
data.param_in_body = True
if p['in'] == 'query':
data.param_in_query = True
if p['in'] == 'path':
data.param_in_path = True
if data.param_in_path:
# Substitute {...} with <...> in path, to make a Flask friendly path
data.path = data.path.replace('{', '<').replace('}', '>')
if data.param_in_body and data.param_in_query:
raise Exception("Cannot support params in both body and param (%s %s)" % (method, path))
else:
data.no_params = True
callback(data) | Find all server endpoints defined in the swagger spec and calls 'callback' for each,
with an instance of EndpointData as argument. | entailment |
def main(args=None):
"""Buffer stdin and flush, and avoid incomplete files."""
parser = argparse.ArgumentParser(description=main.__doc__)
parser.add_argument(
'--binary',
dest='mode',
action='store_const',
const="wb",
default="w",
help='write in binary mode')
parser.add_argument(
'output', metavar='FILE', type=unicode, help='Output file')
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stderr,
format='[%(levelname)s elapsed=%(relativeCreated)dms] %(message)s')
args = parser.parse_args(args or sys.argv[1:])
with open(args.output, args.mode) as fd:
for line in sys.stdin:
fd.write(line) | Buffer stdin and flush, and avoid incomplete files. | entailment |
def basic_c_defines(
layout,
keyboard_prefix="KEY_",
led_prefix="LED_",
sysctrl_prefix="SYS_",
cons_prefix="CONS_",
code_suffix=True,
all_caps=True,
space_char="_"
):
'''
Generates a list of C defines that can be used to generate a header file
@param layout: Layout object
@keyboard_prefix: Prefix used for to_hid_keyboard
@led_prefix: Prefix used for to_hid_led
@sysctrl_prefix: Prefix used for to_hid_sysctrl
@cons_prefix: Prefix used for to_hid_consumer
@code_suffix: Append _<usb code> to each name
@all_caps: Set to true if labels should be converted to all caps
@space_char: Character to replace space with
@returns: List of C tuples (<name>, <number>) that can be used to generate C-style defines. Each section has it's own list.
'''
# Keyboard Codes
keyboard_defines = []
for code, name in layout.json()['to_hid_keyboard'].items():
new_name = "{}{}".format(keyboard_prefix, name.replace(' ', space_char))
if all_caps:
new_name = new_name.upper()
if code_suffix:
new_name = "{}_{}".format(new_name, int(code, 0))
define = (new_name, code)
keyboard_defines.append(define)
# LED Codes
led_defines = []
for code, name in layout.json()['to_hid_led'].items():
new_name = "{}{}".format(led_prefix, name.replace(' ', space_char))
if all_caps:
new_name = new_name.upper()
if code_suffix:
new_name = "{}_{}".format(new_name, int(code, 0))
define = (new_name, code)
led_defines.append(define)
# System Control Codes
sysctrl_defines = []
for code, name in layout.json()['to_hid_sysctrl'].items():
new_name = "{}{}".format(sysctrl_prefix, name.replace(' ', space_char))
if all_caps:
new_name = new_name.upper()
if code_suffix:
new_name = "{}_{}".format(new_name, int(code, 0))
define = (new_name, code)
sysctrl_defines.append(define)
# Consumer Codes
cons_defines = []
for code, name in layout.json()['to_hid_consumer'].items():
new_name = "{}{}".format(cons_prefix, name.replace(' ', space_char))
if all_caps:
new_name = new_name.upper()
if code_suffix:
new_name = "{}_{}".format(new_name, int(code, 0))
define = (new_name, code)
cons_defines.append(define)
# Return list of list of tuples
defines = [keyboard_defines, led_defines, sysctrl_defines, cons_defines]
return defines | Generates a list of C defines that can be used to generate a header file
@param layout: Layout object
@keyboard_prefix: Prefix used for to_hid_keyboard
@led_prefix: Prefix used for to_hid_led
@sysctrl_prefix: Prefix used for to_hid_sysctrl
@cons_prefix: Prefix used for to_hid_consumer
@code_suffix: Append _<usb code> to each name
@all_caps: Set to true if labels should be converted to all caps
@space_char: Character to replace space with
@returns: List of C tuples (<name>, <number>) that can be used to generate C-style defines. Each section has it's own list. | entailment |
def new_email_marketing_campaign(self, name, email_content, from_email,
from_name, reply_to_email, subject,
text_content, address,
is_view_as_webpage_enabled=False,
view_as_web_page_link_text='',
view_as_web_page_text='',
is_permission_reminder_enabled=False,
permission_reminder_text=''):
"""Create a Constant Contact email marketing campaign.
Returns an EmailMarketingCampaign object.
"""
url = self.api.join(self.EMAIL_MARKETING_CAMPAIGN_URL)
inlined_email_content = self.inline_css(email_content)
minified_email_content = html_minify(inlined_email_content)
worked_around_email_content = work_around(minified_email_content)
data = {
'name': name,
'subject': subject,
'from_name': from_name,
'from_email': from_email,
'reply_to_email': reply_to_email,
'email_content': worked_around_email_content,
'email_content_format': 'HTML',
'text_content': text_content,
'message_footer': {
'organization_name': address['organization_name'],
'address_line_1': address['address_line_1'],
'address_line_2': address['address_line_2'],
'address_line_3': address['address_line_3'],
'city': address['city'],
'state': address['state'],
'international_state': address['international_state'],
'postal_code': address['postal_code'],
'country': address['country']
},
'is_view_as_webpage_enabled': is_view_as_webpage_enabled,
'view_as_web_page_link_text': view_as_web_page_link_text,
'view_as_web_page_text': view_as_web_page_text,
'is_permission_reminder_enabled': is_permission_reminder_enabled,
'permission_reminder_text': permission_reminder_text
}
response = url.post(data=json.dumps(data),
headers={'content-type': 'application/json'})
self.handle_response_status(response)
return EmailMarketingCampaign.objects.create(data=response.json()) | Create a Constant Contact email marketing campaign.
Returns an EmailMarketingCampaign object. | entailment |
def update_email_marketing_campaign(self, email_marketing_campaign,
name, email_content, from_email,
from_name, reply_to_email, subject,
text_content, address,
is_view_as_webpage_enabled=False,
view_as_web_page_link_text='',
view_as_web_page_text='',
is_permission_reminder_enabled=False,
permission_reminder_text=''):
"""Update a Constant Contact email marketing campaign.
Returns the updated EmailMarketingCampaign object.
"""
url = self.api.join(
'/'.join([self.EMAIL_MARKETING_CAMPAIGN_URL,
str(email_marketing_campaign.constant_contact_id)]))
inlined_email_content = self.inline_css(email_content)
minified_email_content = html_minify(inlined_email_content)
worked_around_email_content = work_around(minified_email_content)
data = {
'name': name,
'subject': subject,
'from_name': from_name,
'from_email': from_email,
'reply_to_email': reply_to_email,
'email_content': worked_around_email_content,
'email_content_format': 'HTML',
'text_content': text_content,
'message_footer': {
'organization_name': address['organization_name'],
'address_line_1': address['address_line_1'],
'address_line_2': address['address_line_2'],
'address_line_3': address['address_line_3'],
'city': address['city'],
'state': address['state'],
'international_state': address['international_state'],
'postal_code': address['postal_code'],
'country': address['country']
},
'is_view_as_webpage_enabled': is_view_as_webpage_enabled,
'view_as_web_page_link_text': view_as_web_page_link_text,
'view_as_web_page_text': view_as_web_page_text,
'is_permission_reminder_enabled': is_permission_reminder_enabled,
'permission_reminder_text': permission_reminder_text
}
response = url.put(data=json.dumps(data),
headers={'content-type': 'application/json'})
self.handle_response_status(response)
email_marketing_campaign.data = response.json()
email_marketing_campaign.save()
return email_marketing_campaign | Update a Constant Contact email marketing campaign.
Returns the updated EmailMarketingCampaign object. | entailment |
def delete_email_marketing_campaign(self, email_marketing_campaign):
"""Deletes a Constant Contact email marketing campaign.
"""
url = self.api.join('/'.join([
self.EMAIL_MARKETING_CAMPAIGN_URL,
str(email_marketing_campaign.constant_contact_id)]))
response = url.delete()
self.handle_response_status(response)
return response | Deletes a Constant Contact email marketing campaign. | entailment |
def inline_css(self, html):
"""Inlines CSS defined in external style sheets.
"""
premailer = Premailer(html)
inlined_html = premailer.transform(pretty_print=True)
return inlined_html | Inlines CSS defined in external style sheets. | entailment |
def preview_email_marketing_campaign(self, email_marketing_campaign):
"""Returns HTML and text previews of an EmailMarketingCampaign.
"""
url = self.api.join('/'.join([
self.EMAIL_MARKETING_CAMPAIGN_URL,
str(email_marketing_campaign.constant_contact_id),
'preview']))
response = url.get()
self.handle_response_status(response)
return (response.json()['preview_email_content'],
response.json()['preview_text_content']) | Returns HTML and text previews of an EmailMarketingCampaign. | entailment |
def pre_save(cls, sender, instance, *args, **kwargs):
"""Pull constant_contact_id out of data.
"""
instance.constant_contact_id = str(instance.data['id']) | Pull constant_contact_id out of data. | entailment |
def pre_delete(cls, sender, instance, *args, **kwargs):
"""Deletes the CC email marketing campaign associated with me.
"""
cc = ConstantContact()
response = cc.delete_email_marketing_campaign(instance)
response.raise_for_status() | Deletes the CC email marketing campaign associated with me. | entailment |
def runner(self):
"""
Run the necessary methods in the correct order
"""
printtime('Starting {} analysis pipeline'.format(self.analysistype), self.starttime)
if not self.pipeline:
# If the metadata has been passed from the method script, self.pipeline must still be false in order to
# get Sippr() to function correctly, but the metadata shouldn't be recreated
try:
_ = vars(self.runmetadata)['samples']
except KeyError:
# Create the objects to be used in the analyses
objects = Objectprep(self)
objects.objectprep()
self.runmetadata = objects.samples
# Run the analyses
# Sippr(self, self.cutoff)
ProbeSippr(self, self.cutoff)
#
# self.attributer()
# Create the reports
# self.sipprverse_reporter()
# Print the metadata
printer = MetadataPrinter(self)
printer.printmetadata()
quit() | Run the necessary methods in the correct order | entailment |
def send_email(recipients, subject, text_content=None, html_content=None, from_email=None, use_base_template=True, category=None, fail_silently=False, language=None, cc=None, bcc=None, attachments=None, headers=None, bypass_queue=False, bypass_hijacking=False, attach_files=None):
"""
Will send a multi-format email to recipients. Email may be queued through celery
"""
from django.conf import settings
if not bypass_queue and hasattr(settings, 'MAILING_USE_CELERY') and settings.MAILING_USE_CELERY:
from celery.execute import send_task
return send_task('mailing.queue_send_email',[recipients, subject, text_content, html_content, from_email, use_base_template, category, fail_silently, language if language else translation.get_language(), cc, bcc, attachments, headers, bypass_hijacking, attach_files])
else:
header_category_value = '%s%s' % (settings.MAILING_HEADER_CATEGORY_PREFIX if hasattr(settings, 'MAILING_HEADER_CATEGORY_PREFIX') else '', category)
# Check for sendgrid support and add category header
# --------------------------------
if hasattr(settings, 'MAILING_USE_SENDGRID'):
send_grid_support = settings.MAILING_USE_SENDGRID
else:
send_grid_support = False
if not headers:
headers = dict()
if send_grid_support and category:
headers['X-SMTPAPI'] = '{"category": "%s"}' % header_category_value
# Check for Mailgun support and add label header
# --------------------------------
if hasattr(settings, 'MAILING_USE_MAILGUN'):
mailgun_support = settings.MAILING_USE_MAILGUN
else:
mailgun_support = False
if not headers:
headers = dict()
if mailgun_support and category:
headers['X-Mailgun-Tag'] = header_category_value
# Ensure recipients are in a list
# --------------------------------
if isinstance(recipients, basestring):
recipients_list = [recipients]
else:
recipients_list = recipients
# Check if we need to hijack the email
# --------------------------------
if hasattr(settings, 'MAILING_MAILTO_HIJACK') and not bypass_hijacking:
headers['X-MAILER-ORIGINAL-MAILTO'] = ','.join(recipients_list)
recipients_list = [settings.MAILING_MAILTO_HIJACK]
if not subject:
raise MailerMissingSubjectError('Subject not supplied')
# Send ascii, html or multi-part email
# --------------------------------
if text_content or html_content:
if use_base_template:
prev_language = translation.get_language()
language and translation.activate(language)
text_content = render_to_string('mailing/base.txt', {'mailing_text_body': text_content, 'mailing_subject': subject, 'settings': settings}) if text_content else None
html_content = render_to_string('mailing/base.html', {'mailing_html_body': html_content, 'mailing_subject': subject, 'settings': settings}) if html_content else None
translation.activate(prev_language)
msg = EmailMultiAlternatives(subject, text_content if text_content else html_content, from_email if from_email else settings.DEFAULT_FROM_EMAIL, recipients_list, cc=cc, bcc=bcc, attachments=attachments, headers = headers)
if html_content and text_content:
msg.attach_alternative(html_content, "text/html")
elif html_content: # Only HTML
msg.content_subtype = "html"
# Attach files through attach_files helper
# --------------------------------
if attach_files:
for att in attach_files: # attachments are tuples of (filepath, mimetype, filename)
with open(att[0], 'rb') as f:
content = f.read()
msg.attach(att[2], content, att[1])
# Send email
# --------------------------------
msg.send(fail_silently=fail_silently)
else:
raise MailerInvalidBodyError('No text or html body supplied.') | Will send a multi-format email to recipients. Email may be queued through celery | entailment |
def initialize_connections(self, scopefunc=None):
"""
Initialize a database connection by each connection string
defined in the configuration file
"""
for connection_name, connection_string in\
self.app.config['FLASK_PHILO_SQLALCHEMY'].items():
engine = create_engine(connection_string)
session = scoped_session(sessionmaker(), scopefunc=scopefunc)
session.configure(bind=engine)
self.connections[connection_name] = Connection(engine, session) | Initialize a database connection by each connection string
defined in the configuration file | entailment |
def sort(self, attribut, order=False):
"""
Implément un tri par attrbut.
:param str attribut: Nom du champ concerné
:param bool order: Ordre croissant ou décroissant
"""
value_default = formats.ASSOCIATION[attribut][3]
if type(value_default) is str: # case insensitive sort
get = lambda d : (d[attribut] or value_default).casefold()
elif type(value_default) is dict: #can't sort dicts
def get(d):
u = d[attribut] or value_default
return [str(u[i]) for i in sorted(u.keys())]
else:
get = lambda d : d[attribut] or value_default
list.sort(self, key=get, reverse=order) | Implément un tri par attrbut.
:param str attribut: Nom du champ concerné
:param bool order: Ordre croissant ou décroissant | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.