repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
Kortemme-Lab/klab | klab/bio/fasta.py | FASTA.get_sequences | def get_sequences(self, pdb_id = None):
'''Create Sequence objects for each FASTA sequence.'''
sequences = {}
if pdb_id:
for chain_id, sequence in self.get(pdb_id, {}).iteritems():
sequences[chain_id] = Sequence.from_sequence(chain_id, sequence)
else:
for pdb_id, v in self.iteritems():
sequences[pdb_id] = {}
for chain_id, sequence in v.iteritems():
sequences[pdb_id][chain_id] = Sequence.from_sequence(chain_id, sequence)
return sequences | python | def get_sequences(self, pdb_id = None):
'''Create Sequence objects for each FASTA sequence.'''
sequences = {}
if pdb_id:
for chain_id, sequence in self.get(pdb_id, {}).iteritems():
sequences[chain_id] = Sequence.from_sequence(chain_id, sequence)
else:
for pdb_id, v in self.iteritems():
sequences[pdb_id] = {}
for chain_id, sequence in v.iteritems():
sequences[pdb_id][chain_id] = Sequence.from_sequence(chain_id, sequence)
return sequences | [
"def",
"get_sequences",
"(",
"self",
",",
"pdb_id",
"=",
"None",
")",
":",
"sequences",
"=",
"{",
"}",
"if",
"pdb_id",
":",
"for",
"chain_id",
",",
"sequence",
"in",
"self",
".",
"get",
"(",
"pdb_id",
",",
"{",
"}",
")",
".",
"iteritems",
"(",
")",
":",
"sequences",
"[",
"chain_id",
"]",
"=",
"Sequence",
".",
"from_sequence",
"(",
"chain_id",
",",
"sequence",
")",
"else",
":",
"for",
"pdb_id",
",",
"v",
"in",
"self",
".",
"iteritems",
"(",
")",
":",
"sequences",
"[",
"pdb_id",
"]",
"=",
"{",
"}",
"for",
"chain_id",
",",
"sequence",
"in",
"v",
".",
"iteritems",
"(",
")",
":",
"sequences",
"[",
"pdb_id",
"]",
"[",
"chain_id",
"]",
"=",
"Sequence",
".",
"from_sequence",
"(",
"chain_id",
",",
"sequence",
")",
"return",
"sequences"
]
| Create Sequence objects for each FASTA sequence. | [
"Create",
"Sequence",
"objects",
"for",
"each",
"FASTA",
"sequence",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/fasta.py#L175-L186 | train |
Kortemme-Lab/klab | klab/bio/fasta.py | FASTA.get_chain_ids | def get_chain_ids(self, pdb_id = None, safe_call = False):
'''If the FASTA file only has one PDB ID, pdb_id does not need to be specified. Otherwise, the list of chains identifiers for pdb_id is returned.'''
if pdb_id == None and len(self.keys()) == 1:
return self[self.keys()[0]].keys()
pdbID = pdbID.upper()
if not self.get(pdbID):
if not safe_call:
raise Exception("FASTA object does not contain sequences for PDB %s." % pdbID)
else:
return []
return self[pdbID].keys() | python | def get_chain_ids(self, pdb_id = None, safe_call = False):
'''If the FASTA file only has one PDB ID, pdb_id does not need to be specified. Otherwise, the list of chains identifiers for pdb_id is returned.'''
if pdb_id == None and len(self.keys()) == 1:
return self[self.keys()[0]].keys()
pdbID = pdbID.upper()
if not self.get(pdbID):
if not safe_call:
raise Exception("FASTA object does not contain sequences for PDB %s." % pdbID)
else:
return []
return self[pdbID].keys() | [
"def",
"get_chain_ids",
"(",
"self",
",",
"pdb_id",
"=",
"None",
",",
"safe_call",
"=",
"False",
")",
":",
"if",
"pdb_id",
"==",
"None",
"and",
"len",
"(",
"self",
".",
"keys",
"(",
")",
")",
"==",
"1",
":",
"return",
"self",
"[",
"self",
".",
"keys",
"(",
")",
"[",
"0",
"]",
"]",
".",
"keys",
"(",
")",
"pdbID",
"=",
"pdbID",
".",
"upper",
"(",
")",
"if",
"not",
"self",
".",
"get",
"(",
"pdbID",
")",
":",
"if",
"not",
"safe_call",
":",
"raise",
"Exception",
"(",
"\"FASTA object does not contain sequences for PDB %s.\"",
"%",
"pdbID",
")",
"else",
":",
"return",
"[",
"]",
"return",
"self",
"[",
"pdbID",
"]",
".",
"keys",
"(",
")"
]
| If the FASTA file only has one PDB ID, pdb_id does not need to be specified. Otherwise, the list of chains identifiers for pdb_id is returned. | [
"If",
"the",
"FASTA",
"file",
"only",
"has",
"one",
"PDB",
"ID",
"pdb_id",
"does",
"not",
"need",
"to",
"be",
"specified",
".",
"Otherwise",
"the",
"list",
"of",
"chains",
"identifiers",
"for",
"pdb_id",
"is",
"returned",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/fasta.py#L191-L203 | train |
Kortemme-Lab/klab | klab/bio/fasta.py | FASTA.match | def match(self, other):
''' This is a noisy terminal-printing function at present since there is no need to make it a proper API function.'''
colortext.message("FASTA Match")
for frompdbID, fromchains in sorted(self.iteritems()):
matched_pdbs = {}
matched_chains = {}
for fromchain, fromsequence in fromchains.iteritems():
for topdbID, tochains in other.iteritems():
for tochain, tosequence in tochains.iteritems():
if fromsequence == tosequence:
matched_pdbs[topdbID] = matched_pdbs.get(topdbID, set())
matched_pdbs[topdbID].add(fromchain)
matched_chains[fromchain] = matched_chains.get(fromchain, [])
matched_chains[fromchain].append((topdbID, tochain))
foundmatches = []
colortext.printf(" %s" % frompdbID, color="silver")
for mpdbID, mchains in matched_pdbs.iteritems():
if mchains == set(fromchains.keys()):
foundmatches.append(mpdbID)
colortext.printf(" PDB %s matched PDB %s on all chains" % (mpdbID, frompdbID), color="white")
if foundmatches:
for fromchain, fromsequence in fromchains.iteritems():
colortext.printf(" %s" % (fromchain), color = "silver")
colortext.printf(" %s" % (fromsequence), color = self.unique_sequences[fromsequence])
mstr = []
for mchain in matched_chains[fromchain]:
if mchain[0] in foundmatches:
mstr.append("%s chain %s" % (mchain[0], mchain[1]))
colortext.printf(" Matches: %s" % ", ".join(mstr))
else:
colortext.error(" No matches found.") | python | def match(self, other):
''' This is a noisy terminal-printing function at present since there is no need to make it a proper API function.'''
colortext.message("FASTA Match")
for frompdbID, fromchains in sorted(self.iteritems()):
matched_pdbs = {}
matched_chains = {}
for fromchain, fromsequence in fromchains.iteritems():
for topdbID, tochains in other.iteritems():
for tochain, tosequence in tochains.iteritems():
if fromsequence == tosequence:
matched_pdbs[topdbID] = matched_pdbs.get(topdbID, set())
matched_pdbs[topdbID].add(fromchain)
matched_chains[fromchain] = matched_chains.get(fromchain, [])
matched_chains[fromchain].append((topdbID, tochain))
foundmatches = []
colortext.printf(" %s" % frompdbID, color="silver")
for mpdbID, mchains in matched_pdbs.iteritems():
if mchains == set(fromchains.keys()):
foundmatches.append(mpdbID)
colortext.printf(" PDB %s matched PDB %s on all chains" % (mpdbID, frompdbID), color="white")
if foundmatches:
for fromchain, fromsequence in fromchains.iteritems():
colortext.printf(" %s" % (fromchain), color = "silver")
colortext.printf(" %s" % (fromsequence), color = self.unique_sequences[fromsequence])
mstr = []
for mchain in matched_chains[fromchain]:
if mchain[0] in foundmatches:
mstr.append("%s chain %s" % (mchain[0], mchain[1]))
colortext.printf(" Matches: %s" % ", ".join(mstr))
else:
colortext.error(" No matches found.") | [
"def",
"match",
"(",
"self",
",",
"other",
")",
":",
"colortext",
".",
"message",
"(",
"\"FASTA Match\"",
")",
"for",
"frompdbID",
",",
"fromchains",
"in",
"sorted",
"(",
"self",
".",
"iteritems",
"(",
")",
")",
":",
"matched_pdbs",
"=",
"{",
"}",
"matched_chains",
"=",
"{",
"}",
"for",
"fromchain",
",",
"fromsequence",
"in",
"fromchains",
".",
"iteritems",
"(",
")",
":",
"for",
"topdbID",
",",
"tochains",
"in",
"other",
".",
"iteritems",
"(",
")",
":",
"for",
"tochain",
",",
"tosequence",
"in",
"tochains",
".",
"iteritems",
"(",
")",
":",
"if",
"fromsequence",
"==",
"tosequence",
":",
"matched_pdbs",
"[",
"topdbID",
"]",
"=",
"matched_pdbs",
".",
"get",
"(",
"topdbID",
",",
"set",
"(",
")",
")",
"matched_pdbs",
"[",
"topdbID",
"]",
".",
"add",
"(",
"fromchain",
")",
"matched_chains",
"[",
"fromchain",
"]",
"=",
"matched_chains",
".",
"get",
"(",
"fromchain",
",",
"[",
"]",
")",
"matched_chains",
"[",
"fromchain",
"]",
".",
"append",
"(",
"(",
"topdbID",
",",
"tochain",
")",
")",
"foundmatches",
"=",
"[",
"]",
"colortext",
".",
"printf",
"(",
"\" %s\"",
"%",
"frompdbID",
",",
"color",
"=",
"\"silver\"",
")",
"for",
"mpdbID",
",",
"mchains",
"in",
"matched_pdbs",
".",
"iteritems",
"(",
")",
":",
"if",
"mchains",
"==",
"set",
"(",
"fromchains",
".",
"keys",
"(",
")",
")",
":",
"foundmatches",
".",
"append",
"(",
"mpdbID",
")",
"colortext",
".",
"printf",
"(",
"\" PDB %s matched PDB %s on all chains\"",
"%",
"(",
"mpdbID",
",",
"frompdbID",
")",
",",
"color",
"=",
"\"white\"",
")",
"if",
"foundmatches",
":",
"for",
"fromchain",
",",
"fromsequence",
"in",
"fromchains",
".",
"iteritems",
"(",
")",
":",
"colortext",
".",
"printf",
"(",
"\" %s\"",
"%",
"(",
"fromchain",
")",
",",
"color",
"=",
"\"silver\"",
")",
"colortext",
".",
"printf",
"(",
"\" %s\"",
"%",
"(",
"fromsequence",
")",
",",
"color",
"=",
"self",
".",
"unique_sequences",
"[",
"fromsequence",
"]",
")",
"mstr",
"=",
"[",
"]",
"for",
"mchain",
"in",
"matched_chains",
"[",
"fromchain",
"]",
":",
"if",
"mchain",
"[",
"0",
"]",
"in",
"foundmatches",
":",
"mstr",
".",
"append",
"(",
"\"%s chain %s\"",
"%",
"(",
"mchain",
"[",
"0",
"]",
",",
"mchain",
"[",
"1",
"]",
")",
")",
"colortext",
".",
"printf",
"(",
"\"\t Matches: %s\"",
"%",
"\", \"",
".",
"join",
"(",
"mstr",
")",
")",
"else",
":",
"colortext",
".",
"error",
"(",
"\" No matches found.\"",
")"
]
| This is a noisy terminal-printing function at present since there is no need to make it a proper API function. | [
"This",
"is",
"a",
"noisy",
"terminal",
"-",
"printing",
"function",
"at",
"present",
"since",
"there",
"is",
"no",
"need",
"to",
"make",
"it",
"a",
"proper",
"API",
"function",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/fasta.py#L205-L235 | train |
uw-it-aca/uw-restclients-sws | uw_sws/person.py | _process_json_data | def _process_json_data(person_data):
"""
Returns a uw_sws.models.SwsPerson object
"""
person = SwsPerson()
if person_data["BirthDate"]:
person.birth_date = parse(person_data["BirthDate"]).date()
person.directory_release = person_data["DirectoryRelease"]
person.email = person_data["Email"]
person.employee_id = person_data["EmployeeID"]
person.first_name = person_data["FirstName"]
person.gender = person_data["Gender"]
person.last_name = person_data["LastName"]
person.student_name = person_data["StudentName"]
if person_data["LastEnrolled"] is not None:
last_enrolled = LastEnrolled()
last_enrolled.href = person_data["LastEnrolled"]["Href"]
last_enrolled.quarter = person_data["LastEnrolled"]["Quarter"]
last_enrolled.year = person_data["LastEnrolled"]["Year"]
person.last_enrolled = last_enrolled
if person_data["LocalAddress"] is not None:
address_data = person_data["LocalAddress"]
local_address = StudentAddress()
local_address.city = address_data["City"]
local_address.country = address_data["Country"]
local_address.street_line1 = address_data["Line1"]
local_address.street_line2 = address_data["Line2"]
local_address.postal_code = address_data["PostalCode"]
local_address.state = address_data["State"]
local_address.zip_code = address_data["Zip"]
person.local_address = local_address
person.local_phone = person_data["LocalPhone"]
if person_data["PermanentAddress"] is not None:
perm_address_data = person_data["PermanentAddress"]
permanent_address = StudentAddress()
permanent_address.city = perm_address_data["City"]
permanent_address.country = perm_address_data["Country"]
permanent_address.street_line1 = perm_address_data["Line1"]
permanent_address.street_line2 = perm_address_data["Line2"]
permanent_address.postal_code = perm_address_data["PostalCode"]
permanent_address.state = perm_address_data["State"]
permanent_address.zip_code = perm_address_data["Zip"]
person.permanent_address = permanent_address
person.permanent_phone = person_data["PermanentPhone"]
person.uwregid = person_data["RegID"]
person.student_number = person_data["StudentNumber"]
person.student_system_key = person_data["StudentSystemKey"]
person.uwnetid = person_data["UWNetID"]
person.visa_type = person_data["VisaType"]
return person | python | def _process_json_data(person_data):
"""
Returns a uw_sws.models.SwsPerson object
"""
person = SwsPerson()
if person_data["BirthDate"]:
person.birth_date = parse(person_data["BirthDate"]).date()
person.directory_release = person_data["DirectoryRelease"]
person.email = person_data["Email"]
person.employee_id = person_data["EmployeeID"]
person.first_name = person_data["FirstName"]
person.gender = person_data["Gender"]
person.last_name = person_data["LastName"]
person.student_name = person_data["StudentName"]
if person_data["LastEnrolled"] is not None:
last_enrolled = LastEnrolled()
last_enrolled.href = person_data["LastEnrolled"]["Href"]
last_enrolled.quarter = person_data["LastEnrolled"]["Quarter"]
last_enrolled.year = person_data["LastEnrolled"]["Year"]
person.last_enrolled = last_enrolled
if person_data["LocalAddress"] is not None:
address_data = person_data["LocalAddress"]
local_address = StudentAddress()
local_address.city = address_data["City"]
local_address.country = address_data["Country"]
local_address.street_line1 = address_data["Line1"]
local_address.street_line2 = address_data["Line2"]
local_address.postal_code = address_data["PostalCode"]
local_address.state = address_data["State"]
local_address.zip_code = address_data["Zip"]
person.local_address = local_address
person.local_phone = person_data["LocalPhone"]
if person_data["PermanentAddress"] is not None:
perm_address_data = person_data["PermanentAddress"]
permanent_address = StudentAddress()
permanent_address.city = perm_address_data["City"]
permanent_address.country = perm_address_data["Country"]
permanent_address.street_line1 = perm_address_data["Line1"]
permanent_address.street_line2 = perm_address_data["Line2"]
permanent_address.postal_code = perm_address_data["PostalCode"]
permanent_address.state = perm_address_data["State"]
permanent_address.zip_code = perm_address_data["Zip"]
person.permanent_address = permanent_address
person.permanent_phone = person_data["PermanentPhone"]
person.uwregid = person_data["RegID"]
person.student_number = person_data["StudentNumber"]
person.student_system_key = person_data["StudentSystemKey"]
person.uwnetid = person_data["UWNetID"]
person.visa_type = person_data["VisaType"]
return person | [
"def",
"_process_json_data",
"(",
"person_data",
")",
":",
"person",
"=",
"SwsPerson",
"(",
")",
"if",
"person_data",
"[",
"\"BirthDate\"",
"]",
":",
"person",
".",
"birth_date",
"=",
"parse",
"(",
"person_data",
"[",
"\"BirthDate\"",
"]",
")",
".",
"date",
"(",
")",
"person",
".",
"directory_release",
"=",
"person_data",
"[",
"\"DirectoryRelease\"",
"]",
"person",
".",
"email",
"=",
"person_data",
"[",
"\"Email\"",
"]",
"person",
".",
"employee_id",
"=",
"person_data",
"[",
"\"EmployeeID\"",
"]",
"person",
".",
"first_name",
"=",
"person_data",
"[",
"\"FirstName\"",
"]",
"person",
".",
"gender",
"=",
"person_data",
"[",
"\"Gender\"",
"]",
"person",
".",
"last_name",
"=",
"person_data",
"[",
"\"LastName\"",
"]",
"person",
".",
"student_name",
"=",
"person_data",
"[",
"\"StudentName\"",
"]",
"if",
"person_data",
"[",
"\"LastEnrolled\"",
"]",
"is",
"not",
"None",
":",
"last_enrolled",
"=",
"LastEnrolled",
"(",
")",
"last_enrolled",
".",
"href",
"=",
"person_data",
"[",
"\"LastEnrolled\"",
"]",
"[",
"\"Href\"",
"]",
"last_enrolled",
".",
"quarter",
"=",
"person_data",
"[",
"\"LastEnrolled\"",
"]",
"[",
"\"Quarter\"",
"]",
"last_enrolled",
".",
"year",
"=",
"person_data",
"[",
"\"LastEnrolled\"",
"]",
"[",
"\"Year\"",
"]",
"person",
".",
"last_enrolled",
"=",
"last_enrolled",
"if",
"person_data",
"[",
"\"LocalAddress\"",
"]",
"is",
"not",
"None",
":",
"address_data",
"=",
"person_data",
"[",
"\"LocalAddress\"",
"]",
"local_address",
"=",
"StudentAddress",
"(",
")",
"local_address",
".",
"city",
"=",
"address_data",
"[",
"\"City\"",
"]",
"local_address",
".",
"country",
"=",
"address_data",
"[",
"\"Country\"",
"]",
"local_address",
".",
"street_line1",
"=",
"address_data",
"[",
"\"Line1\"",
"]",
"local_address",
".",
"street_line2",
"=",
"address_data",
"[",
"\"Line2\"",
"]",
"local_address",
".",
"postal_code",
"=",
"address_data",
"[",
"\"PostalCode\"",
"]",
"local_address",
".",
"state",
"=",
"address_data",
"[",
"\"State\"",
"]",
"local_address",
".",
"zip_code",
"=",
"address_data",
"[",
"\"Zip\"",
"]",
"person",
".",
"local_address",
"=",
"local_address",
"person",
".",
"local_phone",
"=",
"person_data",
"[",
"\"LocalPhone\"",
"]",
"if",
"person_data",
"[",
"\"PermanentAddress\"",
"]",
"is",
"not",
"None",
":",
"perm_address_data",
"=",
"person_data",
"[",
"\"PermanentAddress\"",
"]",
"permanent_address",
"=",
"StudentAddress",
"(",
")",
"permanent_address",
".",
"city",
"=",
"perm_address_data",
"[",
"\"City\"",
"]",
"permanent_address",
".",
"country",
"=",
"perm_address_data",
"[",
"\"Country\"",
"]",
"permanent_address",
".",
"street_line1",
"=",
"perm_address_data",
"[",
"\"Line1\"",
"]",
"permanent_address",
".",
"street_line2",
"=",
"perm_address_data",
"[",
"\"Line2\"",
"]",
"permanent_address",
".",
"postal_code",
"=",
"perm_address_data",
"[",
"\"PostalCode\"",
"]",
"permanent_address",
".",
"state",
"=",
"perm_address_data",
"[",
"\"State\"",
"]",
"permanent_address",
".",
"zip_code",
"=",
"perm_address_data",
"[",
"\"Zip\"",
"]",
"person",
".",
"permanent_address",
"=",
"permanent_address",
"person",
".",
"permanent_phone",
"=",
"person_data",
"[",
"\"PermanentPhone\"",
"]",
"person",
".",
"uwregid",
"=",
"person_data",
"[",
"\"RegID\"",
"]",
"person",
".",
"student_number",
"=",
"person_data",
"[",
"\"StudentNumber\"",
"]",
"person",
".",
"student_system_key",
"=",
"person_data",
"[",
"\"StudentSystemKey\"",
"]",
"person",
".",
"uwnetid",
"=",
"person_data",
"[",
"\"UWNetID\"",
"]",
"person",
".",
"visa_type",
"=",
"person_data",
"[",
"\"VisaType\"",
"]",
"return",
"person"
]
| Returns a uw_sws.models.SwsPerson object | [
"Returns",
"a",
"uw_sws",
".",
"models",
".",
"SwsPerson",
"object"
]
| 4d36776dcca36855fc15c1b8fe7650ae045194cf | https://github.com/uw-it-aca/uw-restclients-sws/blob/4d36776dcca36855fc15c1b8fe7650ae045194cf/uw_sws/person.py#L23-L78 | train |
uw-it-aca/uw-restclients-sws | uw_sws/dao.py | SWS_DAO._make_notice_date | def _make_notice_date(self, response):
"""
Set the date attribte value in the notice mock data
"""
today = date.today()
yesterday = today - timedelta(days=1)
tomorrow = today + timedelta(days=1)
week = today + timedelta(days=2)
next_week = today + timedelta(weeks=1)
future = today + timedelta(weeks=3)
future_end = today + timedelta(weeks=5)
json_data = json.loads(response.data)
for notice in json_data["Notices"]:
if notice["NoticeAttributes"] and\
len(notice["NoticeAttributes"]) > 0:
for attr in notice["NoticeAttributes"]:
if attr["DataType"] == "date":
if attr["Value"] == "yesterday":
attr["Value"] = yesterday.strftime("%Y%m%d")
elif attr["Value"] == "today":
attr["Value"] = today.strftime("%Y%m%d")
elif attr["Value"] == "tomorrow":
attr["Value"] = tomorrow.strftime("%Y%m%d")
elif attr["Value"] == "future":
attr["Value"] = future.strftime("%Y%m%d")
elif attr["Value"] == "future_end":
attr["Value"] = future_end.strftime("%Y%m%d")
elif attr["Value"] == "next_week":
attr["Value"] = next_week.strftime("%Y%m%d")
elif attr["Value"] == "week":
attr["Value"] = week.strftime("%Y%m%d")
else:
pass # use original
response.data = json.dumps(json_data) | python | def _make_notice_date(self, response):
"""
Set the date attribte value in the notice mock data
"""
today = date.today()
yesterday = today - timedelta(days=1)
tomorrow = today + timedelta(days=1)
week = today + timedelta(days=2)
next_week = today + timedelta(weeks=1)
future = today + timedelta(weeks=3)
future_end = today + timedelta(weeks=5)
json_data = json.loads(response.data)
for notice in json_data["Notices"]:
if notice["NoticeAttributes"] and\
len(notice["NoticeAttributes"]) > 0:
for attr in notice["NoticeAttributes"]:
if attr["DataType"] == "date":
if attr["Value"] == "yesterday":
attr["Value"] = yesterday.strftime("%Y%m%d")
elif attr["Value"] == "today":
attr["Value"] = today.strftime("%Y%m%d")
elif attr["Value"] == "tomorrow":
attr["Value"] = tomorrow.strftime("%Y%m%d")
elif attr["Value"] == "future":
attr["Value"] = future.strftime("%Y%m%d")
elif attr["Value"] == "future_end":
attr["Value"] = future_end.strftime("%Y%m%d")
elif attr["Value"] == "next_week":
attr["Value"] = next_week.strftime("%Y%m%d")
elif attr["Value"] == "week":
attr["Value"] = week.strftime("%Y%m%d")
else:
pass # use original
response.data = json.dumps(json_data) | [
"def",
"_make_notice_date",
"(",
"self",
",",
"response",
")",
":",
"today",
"=",
"date",
".",
"today",
"(",
")",
"yesterday",
"=",
"today",
"-",
"timedelta",
"(",
"days",
"=",
"1",
")",
"tomorrow",
"=",
"today",
"+",
"timedelta",
"(",
"days",
"=",
"1",
")",
"week",
"=",
"today",
"+",
"timedelta",
"(",
"days",
"=",
"2",
")",
"next_week",
"=",
"today",
"+",
"timedelta",
"(",
"weeks",
"=",
"1",
")",
"future",
"=",
"today",
"+",
"timedelta",
"(",
"weeks",
"=",
"3",
")",
"future_end",
"=",
"today",
"+",
"timedelta",
"(",
"weeks",
"=",
"5",
")",
"json_data",
"=",
"json",
".",
"loads",
"(",
"response",
".",
"data",
")",
"for",
"notice",
"in",
"json_data",
"[",
"\"Notices\"",
"]",
":",
"if",
"notice",
"[",
"\"NoticeAttributes\"",
"]",
"and",
"len",
"(",
"notice",
"[",
"\"NoticeAttributes\"",
"]",
")",
">",
"0",
":",
"for",
"attr",
"in",
"notice",
"[",
"\"NoticeAttributes\"",
"]",
":",
"if",
"attr",
"[",
"\"DataType\"",
"]",
"==",
"\"date\"",
":",
"if",
"attr",
"[",
"\"Value\"",
"]",
"==",
"\"yesterday\"",
":",
"attr",
"[",
"\"Value\"",
"]",
"=",
"yesterday",
".",
"strftime",
"(",
"\"%Y%m%d\"",
")",
"elif",
"attr",
"[",
"\"Value\"",
"]",
"==",
"\"today\"",
":",
"attr",
"[",
"\"Value\"",
"]",
"=",
"today",
".",
"strftime",
"(",
"\"%Y%m%d\"",
")",
"elif",
"attr",
"[",
"\"Value\"",
"]",
"==",
"\"tomorrow\"",
":",
"attr",
"[",
"\"Value\"",
"]",
"=",
"tomorrow",
".",
"strftime",
"(",
"\"%Y%m%d\"",
")",
"elif",
"attr",
"[",
"\"Value\"",
"]",
"==",
"\"future\"",
":",
"attr",
"[",
"\"Value\"",
"]",
"=",
"future",
".",
"strftime",
"(",
"\"%Y%m%d\"",
")",
"elif",
"attr",
"[",
"\"Value\"",
"]",
"==",
"\"future_end\"",
":",
"attr",
"[",
"\"Value\"",
"]",
"=",
"future_end",
".",
"strftime",
"(",
"\"%Y%m%d\"",
")",
"elif",
"attr",
"[",
"\"Value\"",
"]",
"==",
"\"next_week\"",
":",
"attr",
"[",
"\"Value\"",
"]",
"=",
"next_week",
".",
"strftime",
"(",
"\"%Y%m%d\"",
")",
"elif",
"attr",
"[",
"\"Value\"",
"]",
"==",
"\"week\"",
":",
"attr",
"[",
"\"Value\"",
"]",
"=",
"week",
".",
"strftime",
"(",
"\"%Y%m%d\"",
")",
"else",
":",
"pass",
"# use original",
"response",
".",
"data",
"=",
"json",
".",
"dumps",
"(",
"json_data",
")"
]
| Set the date attribte value in the notice mock data | [
"Set",
"the",
"date",
"attribte",
"value",
"in",
"the",
"notice",
"mock",
"data"
]
| 4d36776dcca36855fc15c1b8fe7650ae045194cf | https://github.com/uw-it-aca/uw-restclients-sws/blob/4d36776dcca36855fc15c1b8fe7650ae045194cf/uw_sws/dao.py#L64-L99 | train |
Kortemme-Lab/klab | klab/scripting.py | relative_symlink | def relative_symlink(target, link_name):
"""Make a symlink to target using the shortest possible relative path."""
link_name = os.path.abspath(link_name)
abs_target = os.path.abspath(target)
rel_target = os.path.relpath(target, os.path.dirname(link_name))
if os.path.exists(link_name):
os.remove(link_name)
os.symlink(rel_target, link_name) | python | def relative_symlink(target, link_name):
"""Make a symlink to target using the shortest possible relative path."""
link_name = os.path.abspath(link_name)
abs_target = os.path.abspath(target)
rel_target = os.path.relpath(target, os.path.dirname(link_name))
if os.path.exists(link_name):
os.remove(link_name)
os.symlink(rel_target, link_name) | [
"def",
"relative_symlink",
"(",
"target",
",",
"link_name",
")",
":",
"link_name",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"link_name",
")",
"abs_target",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"target",
")",
"rel_target",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"target",
",",
"os",
".",
"path",
".",
"dirname",
"(",
"link_name",
")",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"link_name",
")",
":",
"os",
".",
"remove",
"(",
"link_name",
")",
"os",
".",
"symlink",
"(",
"rel_target",
",",
"link_name",
")"
]
| Make a symlink to target using the shortest possible relative path. | [
"Make",
"a",
"symlink",
"to",
"target",
"using",
"the",
"shortest",
"possible",
"relative",
"path",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/scripting.py#L57-L65 | train |
ethan92429/onshapepy | onshapepy/part.py | Part.params | def params(self, dict):
"""Set configuration variables for an OnShape part."""
self._configuration.update(dict)
self._measurements.update() | python | def params(self, dict):
"""Set configuration variables for an OnShape part."""
self._configuration.update(dict)
self._measurements.update() | [
"def",
"params",
"(",
"self",
",",
"dict",
")",
":",
"self",
".",
"_configuration",
".",
"update",
"(",
"dict",
")",
"self",
".",
"_measurements",
".",
"update",
"(",
")"
]
| Set configuration variables for an OnShape part. | [
"Set",
"configuration",
"variables",
"for",
"an",
"OnShape",
"part",
"."
]
| 61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df | https://github.com/ethan92429/onshapepy/blob/61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df/onshapepy/part.py#L52-L55 | train |
ethan92429/onshapepy | onshapepy/part.py | Configuration.update | def update(self, params=None, client=c):
"""Push params to OnShape and synchronize the local copy
"""
uri = self.parent.uri
if not params or not self.res:
self.get_params()
return
d = self.payload
for k, v in params.items():
m = d["currentConfiguration"][self.parameter_map[k]]["message"]
if isinstance(v, bool) or isinstance(v, str):
m["value"] = v
else:
try:
m["expression"] = str(v)
except KeyError:
m["value"] = str(v)
res = client.update_configuration(uri.did, uri.wvm, uri.eid, json.dumps(d))
# If it was a good request, update config to be consistent with online.
if res.status_code == 200:
self.res = res | python | def update(self, params=None, client=c):
"""Push params to OnShape and synchronize the local copy
"""
uri = self.parent.uri
if not params or not self.res:
self.get_params()
return
d = self.payload
for k, v in params.items():
m = d["currentConfiguration"][self.parameter_map[k]]["message"]
if isinstance(v, bool) or isinstance(v, str):
m["value"] = v
else:
try:
m["expression"] = str(v)
except KeyError:
m["value"] = str(v)
res = client.update_configuration(uri.did, uri.wvm, uri.eid, json.dumps(d))
# If it was a good request, update config to be consistent with online.
if res.status_code == 200:
self.res = res | [
"def",
"update",
"(",
"self",
",",
"params",
"=",
"None",
",",
"client",
"=",
"c",
")",
":",
"uri",
"=",
"self",
".",
"parent",
".",
"uri",
"if",
"not",
"params",
"or",
"not",
"self",
".",
"res",
":",
"self",
".",
"get_params",
"(",
")",
"return",
"d",
"=",
"self",
".",
"payload",
"for",
"k",
",",
"v",
"in",
"params",
".",
"items",
"(",
")",
":",
"m",
"=",
"d",
"[",
"\"currentConfiguration\"",
"]",
"[",
"self",
".",
"parameter_map",
"[",
"k",
"]",
"]",
"[",
"\"message\"",
"]",
"if",
"isinstance",
"(",
"v",
",",
"bool",
")",
"or",
"isinstance",
"(",
"v",
",",
"str",
")",
":",
"m",
"[",
"\"value\"",
"]",
"=",
"v",
"else",
":",
"try",
":",
"m",
"[",
"\"expression\"",
"]",
"=",
"str",
"(",
"v",
")",
"except",
"KeyError",
":",
"m",
"[",
"\"value\"",
"]",
"=",
"str",
"(",
"v",
")",
"res",
"=",
"client",
".",
"update_configuration",
"(",
"uri",
".",
"did",
",",
"uri",
".",
"wvm",
",",
"uri",
".",
"eid",
",",
"json",
".",
"dumps",
"(",
"d",
")",
")",
"# If it was a good request, update config to be consistent with online.",
"if",
"res",
".",
"status_code",
"==",
"200",
":",
"self",
".",
"res",
"=",
"res"
]
| Push params to OnShape and synchronize the local copy | [
"Push",
"params",
"to",
"OnShape",
"and",
"synchronize",
"the",
"local",
"copy"
]
| 61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df | https://github.com/ethan92429/onshapepy/blob/61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df/onshapepy/part.py#L72-L94 | train |
ethan92429/onshapepy | onshapepy/part.py | Configuration.get_params | def get_params(self):
"""Manually pull params defined in config from OnShape and return a python representation of the params.
Quantities are converted to pint quantities, Bools are converted to python bools and Enums are converted
to strings. Note that Enum names are autogenerated by OnShape and do not match the name on the OnShape UI."""
self.res = c.get_configuration(self.parent.uri.as_dict()) | python | def get_params(self):
"""Manually pull params defined in config from OnShape and return a python representation of the params.
Quantities are converted to pint quantities, Bools are converted to python bools and Enums are converted
to strings. Note that Enum names are autogenerated by OnShape and do not match the name on the OnShape UI."""
self.res = c.get_configuration(self.parent.uri.as_dict()) | [
"def",
"get_params",
"(",
"self",
")",
":",
"self",
".",
"res",
"=",
"c",
".",
"get_configuration",
"(",
"self",
".",
"parent",
".",
"uri",
".",
"as_dict",
"(",
")",
")"
]
| Manually pull params defined in config from OnShape and return a python representation of the params.
Quantities are converted to pint quantities, Bools are converted to python bools and Enums are converted
to strings. Note that Enum names are autogenerated by OnShape and do not match the name on the OnShape UI. | [
"Manually",
"pull",
"params",
"defined",
"in",
"config",
"from",
"OnShape",
"and",
"return",
"a",
"python",
"representation",
"of",
"the",
"params",
".",
"Quantities",
"are",
"converted",
"to",
"pint",
"quantities",
"Bools",
"are",
"converted",
"to",
"python",
"bools",
"and",
"Enums",
"are",
"converted",
"to",
"strings",
".",
"Note",
"that",
"Enum",
"names",
"are",
"autogenerated",
"by",
"OnShape",
"and",
"do",
"not",
"match",
"the",
"name",
"on",
"the",
"OnShape",
"UI",
"."
]
| 61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df | https://github.com/ethan92429/onshapepy/blob/61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df/onshapepy/part.py#L96-L100 | train |
ethan92429/onshapepy | onshapepy/part.py | Configuration.params | def params(self):
"""Get the params of response data from the API.
Returns:
- d (dict): Dictionary mapping of all configuration values
"""
payload = self.payload
d = {}
for i, p in enumerate(payload["currentConfiguration"]):
type_name = p["typeName"]
cp = payload["configurationParameters"][i]["message"]
name = cp["parameterName"]
if type_name == "BTMParameterQuantity":
try:
v = q(p["message"]["expression"])
except:
v = q(p["message"]["value"], p["message"]["units"])
elif type_name == "BTMParameterBoolean":
v = p["message"]["value"]
elif type_name == "BTMParameterEnum":
enum = p["message"]["value"]
enum_map = {d['message']['option']: i for i, d in enumerate(cp['options'])}
v = cp['options'][enum_map[enum]]['message']['optionName']
d[name] = v
return d | python | def params(self):
"""Get the params of response data from the API.
Returns:
- d (dict): Dictionary mapping of all configuration values
"""
payload = self.payload
d = {}
for i, p in enumerate(payload["currentConfiguration"]):
type_name = p["typeName"]
cp = payload["configurationParameters"][i]["message"]
name = cp["parameterName"]
if type_name == "BTMParameterQuantity":
try:
v = q(p["message"]["expression"])
except:
v = q(p["message"]["value"], p["message"]["units"])
elif type_name == "BTMParameterBoolean":
v = p["message"]["value"]
elif type_name == "BTMParameterEnum":
enum = p["message"]["value"]
enum_map = {d['message']['option']: i for i, d in enumerate(cp['options'])}
v = cp['options'][enum_map[enum]]['message']['optionName']
d[name] = v
return d | [
"def",
"params",
"(",
"self",
")",
":",
"payload",
"=",
"self",
".",
"payload",
"d",
"=",
"{",
"}",
"for",
"i",
",",
"p",
"in",
"enumerate",
"(",
"payload",
"[",
"\"currentConfiguration\"",
"]",
")",
":",
"type_name",
"=",
"p",
"[",
"\"typeName\"",
"]",
"cp",
"=",
"payload",
"[",
"\"configurationParameters\"",
"]",
"[",
"i",
"]",
"[",
"\"message\"",
"]",
"name",
"=",
"cp",
"[",
"\"parameterName\"",
"]",
"if",
"type_name",
"==",
"\"BTMParameterQuantity\"",
":",
"try",
":",
"v",
"=",
"q",
"(",
"p",
"[",
"\"message\"",
"]",
"[",
"\"expression\"",
"]",
")",
"except",
":",
"v",
"=",
"q",
"(",
"p",
"[",
"\"message\"",
"]",
"[",
"\"value\"",
"]",
",",
"p",
"[",
"\"message\"",
"]",
"[",
"\"units\"",
"]",
")",
"elif",
"type_name",
"==",
"\"BTMParameterBoolean\"",
":",
"v",
"=",
"p",
"[",
"\"message\"",
"]",
"[",
"\"value\"",
"]",
"elif",
"type_name",
"==",
"\"BTMParameterEnum\"",
":",
"enum",
"=",
"p",
"[",
"\"message\"",
"]",
"[",
"\"value\"",
"]",
"enum_map",
"=",
"{",
"d",
"[",
"'message'",
"]",
"[",
"'option'",
"]",
":",
"i",
"for",
"i",
",",
"d",
"in",
"enumerate",
"(",
"cp",
"[",
"'options'",
"]",
")",
"}",
"v",
"=",
"cp",
"[",
"'options'",
"]",
"[",
"enum_map",
"[",
"enum",
"]",
"]",
"[",
"'message'",
"]",
"[",
"'optionName'",
"]",
"d",
"[",
"name",
"]",
"=",
"v",
"return",
"d"
]
| Get the params of response data from the API.
Returns:
- d (dict): Dictionary mapping of all configuration values | [
"Get",
"the",
"params",
"of",
"response",
"data",
"from",
"the",
"API",
"."
]
| 61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df | https://github.com/ethan92429/onshapepy/blob/61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df/onshapepy/part.py#L103-L127 | train |
ethan92429/onshapepy | onshapepy/part.py | Measurements.update | def update(self):
""" Update all local variable names to match OnShape. """
uri = self.parent.uri
script = r"""
function(context, queries) {
return getVariable(context, "measurements");
}
"""
self.res = c.evaluate_featurescript(uri.as_dict(), script) | python | def update(self):
""" Update all local variable names to match OnShape. """
uri = self.parent.uri
script = r"""
function(context, queries) {
return getVariable(context, "measurements");
}
"""
self.res = c.evaluate_featurescript(uri.as_dict(), script) | [
"def",
"update",
"(",
"self",
")",
":",
"uri",
"=",
"self",
".",
"parent",
".",
"uri",
"script",
"=",
"r\"\"\"\n function(context, queries) {\n return getVariable(context, \"measurements\");\n }\n \"\"\"",
"self",
".",
"res",
"=",
"c",
".",
"evaluate_featurescript",
"(",
"uri",
".",
"as_dict",
"(",
")",
",",
"script",
")"
]
| Update all local variable names to match OnShape. | [
"Update",
"all",
"local",
"variable",
"names",
"to",
"match",
"OnShape",
"."
]
| 61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df | https://github.com/ethan92429/onshapepy/blob/61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df/onshapepy/part.py#L146-L155 | train |
Kortemme-Lab/klab | klab/retrospect.py | LogReader.getFailedJobIDs | def getFailedJobIDs(self, extraLapse = TYPICAL_LAPSE):
'''Returns a list of which identify failed jobs in the scriptsRun table.
If a time stamp for a job can be found, we return this. The time stamp can be used to index the log.
If no time stamp was found, return the name of the script instead.
'''
scriptsRun = self.scriptsRun
failedJobTimestamps = []
nodata = []
for name, details in sorted(scriptsRun.iteritems()):
if details["lastSuccess"] and expectedScripts.get(name):
if not expectedScripts.check(name, details["lastSuccess"], extraLapse):
if details["lastRun"]:
failedJobTimestamps.append(details["lastRun"])
else:
nodata.append(name)
continue
else:
if details["lastRun"]:
failedJobTimestamps.append(details["lastRun"])
else:
nodata.append(name)
continue
if details["status"] & RETROSPECT_FAIL:
failedJobTimestamps.append(details["lastRun"])
elif details["status"] & RETROSPECT_WARNING:
failedJobTimestamps.append(details["lastRun"])
return failedJobTimestamps, nodata | python | def getFailedJobIDs(self, extraLapse = TYPICAL_LAPSE):
'''Returns a list of which identify failed jobs in the scriptsRun table.
If a time stamp for a job can be found, we return this. The time stamp can be used to index the log.
If no time stamp was found, return the name of the script instead.
'''
scriptsRun = self.scriptsRun
failedJobTimestamps = []
nodata = []
for name, details in sorted(scriptsRun.iteritems()):
if details["lastSuccess"] and expectedScripts.get(name):
if not expectedScripts.check(name, details["lastSuccess"], extraLapse):
if details["lastRun"]:
failedJobTimestamps.append(details["lastRun"])
else:
nodata.append(name)
continue
else:
if details["lastRun"]:
failedJobTimestamps.append(details["lastRun"])
else:
nodata.append(name)
continue
if details["status"] & RETROSPECT_FAIL:
failedJobTimestamps.append(details["lastRun"])
elif details["status"] & RETROSPECT_WARNING:
failedJobTimestamps.append(details["lastRun"])
return failedJobTimestamps, nodata | [
"def",
"getFailedJobIDs",
"(",
"self",
",",
"extraLapse",
"=",
"TYPICAL_LAPSE",
")",
":",
"scriptsRun",
"=",
"self",
".",
"scriptsRun",
"failedJobTimestamps",
"=",
"[",
"]",
"nodata",
"=",
"[",
"]",
"for",
"name",
",",
"details",
"in",
"sorted",
"(",
"scriptsRun",
".",
"iteritems",
"(",
")",
")",
":",
"if",
"details",
"[",
"\"lastSuccess\"",
"]",
"and",
"expectedScripts",
".",
"get",
"(",
"name",
")",
":",
"if",
"not",
"expectedScripts",
".",
"check",
"(",
"name",
",",
"details",
"[",
"\"lastSuccess\"",
"]",
",",
"extraLapse",
")",
":",
"if",
"details",
"[",
"\"lastRun\"",
"]",
":",
"failedJobTimestamps",
".",
"append",
"(",
"details",
"[",
"\"lastRun\"",
"]",
")",
"else",
":",
"nodata",
".",
"append",
"(",
"name",
")",
"continue",
"else",
":",
"if",
"details",
"[",
"\"lastRun\"",
"]",
":",
"failedJobTimestamps",
".",
"append",
"(",
"details",
"[",
"\"lastRun\"",
"]",
")",
"else",
":",
"nodata",
".",
"append",
"(",
"name",
")",
"continue",
"if",
"details",
"[",
"\"status\"",
"]",
"&",
"RETROSPECT_FAIL",
":",
"failedJobTimestamps",
".",
"append",
"(",
"details",
"[",
"\"lastRun\"",
"]",
")",
"elif",
"details",
"[",
"\"status\"",
"]",
"&",
"RETROSPECT_WARNING",
":",
"failedJobTimestamps",
".",
"append",
"(",
"details",
"[",
"\"lastRun\"",
"]",
")",
"return",
"failedJobTimestamps",
",",
"nodata"
]
| Returns a list of which identify failed jobs in the scriptsRun table.
If a time stamp for a job can be found, we return this. The time stamp can be used to index the log.
If no time stamp was found, return the name of the script instead. | [
"Returns",
"a",
"list",
"of",
"which",
"identify",
"failed",
"jobs",
"in",
"the",
"scriptsRun",
"table",
".",
"If",
"a",
"time",
"stamp",
"for",
"a",
"job",
"can",
"be",
"found",
"we",
"return",
"this",
".",
"The",
"time",
"stamp",
"can",
"be",
"used",
"to",
"index",
"the",
"log",
".",
"If",
"no",
"time",
"stamp",
"was",
"found",
"return",
"the",
"name",
"of",
"the",
"script",
"instead",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/retrospect.py#L302-L331 | train |
Kortemme-Lab/klab | klab/retrospect.py | LogReader.generateSummaryHTMLTable | def generateSummaryHTMLTable(self, extraLapse = TYPICAL_LAPSE):
'''Generates a summary in HTML of the status of the expected scripts broken based on the log.
This summary is returned as a list of strings.
'''
scriptsRun = self.scriptsRun
html = []
html.append("<table style='text-align:center;border:1px solid black;margin-left: auto;margin-right: auto;'>\n") # Start summary table
html.append(' <tr><td colspan="4" style="text-align:center"></td></tr>\n')
html.append(' <tr style="font-weight:bold;background-color:#cccccc;text-align:center"><td>Script</td><td>Last status</td><td>Last run</td><td>Last success</td></tr>\n')
# Alternate shades between rows
tablestyle = ['background-color:#33dd33;', 'background-color:#33ff33;']
warningstyle = ['background-color:#EA8737;', 'background-color:#f5b767;']
failstyle = ['background-color:#dd3333;', 'background-color:#ff3333;']
count = 0
for name, details in sorted(scriptsRun.iteritems()):
status = None
rowstyle = tablestyle[count % 2]
if details["lastSuccess"] and expectedScripts.get(name):
if not expectedScripts.check(name, details["lastSuccess"], extraLapse):
status = "STOPPED"
else:
rowstyle = failstyle[count % 2]
status = "FAIL"
laststatusstyle = tablestyle[count % 2]
if details["status"] & RETROSPECT_FAIL:
laststatusstyle = failstyle[count % 2]
status = "FAIL"
elif status != "STOPPED" and details["status"] & RETROSPECT_WARNING:
laststatusstyle = warningstyle[count % 2]
status = "WARNINGS"
elif status != "FAIL" and status != "STOPPED":
status = "OK"
# Start a row
html.append('<tr style="text-align:left;%s">\n' % rowstyle)
# Script name field
if status == "STOPPED":
html.append('\t<td style="%s">%s</td>\n' % (failstyle[count % 2], name))
else:
html.append('\t<td style="%s">%s</td>' % (tablestyle[count % 2], name))
# Last status field
if details["lastRun"]:
if status == "STOPPED":
html.append('\t<td style="%s"><a href="#%s">%s</a></td>\n' % (failstyle[count % 2], self.createAnchorID(name, details["lastRun"]), status))
else:
html.append('\t<td style="%s"><a href="#%s">%s</a></td>\n' % (laststatusstyle, self.createAnchorID(name, details["lastRun"]), status))
else:
html.append('\t<td style="%s">%s</td>\n' % (laststatusstyle, status))
# Last run field
if details["lastRun"]:
html.append('\t<td style="%s"><a href="#%s">%s</a></td>\n' % (laststatusstyle, self.createAnchorID(name, details["lastRun"]), details["lastRun"]))
else:
html.append('\t<td style="%s">none found</td>\n' % laststatusstyle)
# Last success field
if details["lastSuccess"]:
html.append('\t<td><a href="#%s">%s</a></td>\n' % (self.createAnchorID(name, details["lastSuccess"]), details["lastSuccess"]))
else:
html.append('\t<td>none found</td>\n')
html.append('</tr>\n')
count += 1
html.append("</table>")
return html | python | def generateSummaryHTMLTable(self, extraLapse = TYPICAL_LAPSE):
'''Generates a summary in HTML of the status of the expected scripts broken based on the log.
This summary is returned as a list of strings.
'''
scriptsRun = self.scriptsRun
html = []
html.append("<table style='text-align:center;border:1px solid black;margin-left: auto;margin-right: auto;'>\n") # Start summary table
html.append(' <tr><td colspan="4" style="text-align:center"></td></tr>\n')
html.append(' <tr style="font-weight:bold;background-color:#cccccc;text-align:center"><td>Script</td><td>Last status</td><td>Last run</td><td>Last success</td></tr>\n')
# Alternate shades between rows
tablestyle = ['background-color:#33dd33;', 'background-color:#33ff33;']
warningstyle = ['background-color:#EA8737;', 'background-color:#f5b767;']
failstyle = ['background-color:#dd3333;', 'background-color:#ff3333;']
count = 0
for name, details in sorted(scriptsRun.iteritems()):
status = None
rowstyle = tablestyle[count % 2]
if details["lastSuccess"] and expectedScripts.get(name):
if not expectedScripts.check(name, details["lastSuccess"], extraLapse):
status = "STOPPED"
else:
rowstyle = failstyle[count % 2]
status = "FAIL"
laststatusstyle = tablestyle[count % 2]
if details["status"] & RETROSPECT_FAIL:
laststatusstyle = failstyle[count % 2]
status = "FAIL"
elif status != "STOPPED" and details["status"] & RETROSPECT_WARNING:
laststatusstyle = warningstyle[count % 2]
status = "WARNINGS"
elif status != "FAIL" and status != "STOPPED":
status = "OK"
# Start a row
html.append('<tr style="text-align:left;%s">\n' % rowstyle)
# Script name field
if status == "STOPPED":
html.append('\t<td style="%s">%s</td>\n' % (failstyle[count % 2], name))
else:
html.append('\t<td style="%s">%s</td>' % (tablestyle[count % 2], name))
# Last status field
if details["lastRun"]:
if status == "STOPPED":
html.append('\t<td style="%s"><a href="#%s">%s</a></td>\n' % (failstyle[count % 2], self.createAnchorID(name, details["lastRun"]), status))
else:
html.append('\t<td style="%s"><a href="#%s">%s</a></td>\n' % (laststatusstyle, self.createAnchorID(name, details["lastRun"]), status))
else:
html.append('\t<td style="%s">%s</td>\n' % (laststatusstyle, status))
# Last run field
if details["lastRun"]:
html.append('\t<td style="%s"><a href="#%s">%s</a></td>\n' % (laststatusstyle, self.createAnchorID(name, details["lastRun"]), details["lastRun"]))
else:
html.append('\t<td style="%s">none found</td>\n' % laststatusstyle)
# Last success field
if details["lastSuccess"]:
html.append('\t<td><a href="#%s">%s</a></td>\n' % (self.createAnchorID(name, details["lastSuccess"]), details["lastSuccess"]))
else:
html.append('\t<td>none found</td>\n')
html.append('</tr>\n')
count += 1
html.append("</table>")
return html | [
"def",
"generateSummaryHTMLTable",
"(",
"self",
",",
"extraLapse",
"=",
"TYPICAL_LAPSE",
")",
":",
"scriptsRun",
"=",
"self",
".",
"scriptsRun",
"html",
"=",
"[",
"]",
"html",
".",
"append",
"(",
"\"<table style='text-align:center;border:1px solid black;margin-left: auto;margin-right: auto;'>\\n\"",
")",
"# Start summary table",
"html",
".",
"append",
"(",
"'\t<tr><td colspan=\"4\" style=\"text-align:center\"></td></tr>\\n'",
")",
"html",
".",
"append",
"(",
"'\t<tr style=\"font-weight:bold;background-color:#cccccc;text-align:center\"><td>Script</td><td>Last status</td><td>Last run</td><td>Last success</td></tr>\\n'",
")",
"# Alternate shades between rows",
"tablestyle",
"=",
"[",
"'background-color:#33dd33;'",
",",
"'background-color:#33ff33;'",
"]",
"warningstyle",
"=",
"[",
"'background-color:#EA8737;'",
",",
"'background-color:#f5b767;'",
"]",
"failstyle",
"=",
"[",
"'background-color:#dd3333;'",
",",
"'background-color:#ff3333;'",
"]",
"count",
"=",
"0",
"for",
"name",
",",
"details",
"in",
"sorted",
"(",
"scriptsRun",
".",
"iteritems",
"(",
")",
")",
":",
"status",
"=",
"None",
"rowstyle",
"=",
"tablestyle",
"[",
"count",
"%",
"2",
"]",
"if",
"details",
"[",
"\"lastSuccess\"",
"]",
"and",
"expectedScripts",
".",
"get",
"(",
"name",
")",
":",
"if",
"not",
"expectedScripts",
".",
"check",
"(",
"name",
",",
"details",
"[",
"\"lastSuccess\"",
"]",
",",
"extraLapse",
")",
":",
"status",
"=",
"\"STOPPED\"",
"else",
":",
"rowstyle",
"=",
"failstyle",
"[",
"count",
"%",
"2",
"]",
"status",
"=",
"\"FAIL\"",
"laststatusstyle",
"=",
"tablestyle",
"[",
"count",
"%",
"2",
"]",
"if",
"details",
"[",
"\"status\"",
"]",
"&",
"RETROSPECT_FAIL",
":",
"laststatusstyle",
"=",
"failstyle",
"[",
"count",
"%",
"2",
"]",
"status",
"=",
"\"FAIL\"",
"elif",
"status",
"!=",
"\"STOPPED\"",
"and",
"details",
"[",
"\"status\"",
"]",
"&",
"RETROSPECT_WARNING",
":",
"laststatusstyle",
"=",
"warningstyle",
"[",
"count",
"%",
"2",
"]",
"status",
"=",
"\"WARNINGS\"",
"elif",
"status",
"!=",
"\"FAIL\"",
"and",
"status",
"!=",
"\"STOPPED\"",
":",
"status",
"=",
"\"OK\"",
"# Start a row\t",
"html",
".",
"append",
"(",
"'<tr style=\"text-align:left;%s\">\\n'",
"%",
"rowstyle",
")",
"# Script name field",
"if",
"status",
"==",
"\"STOPPED\"",
":",
"html",
".",
"append",
"(",
"'\\t<td style=\"%s\">%s</td>\\n'",
"%",
"(",
"failstyle",
"[",
"count",
"%",
"2",
"]",
",",
"name",
")",
")",
"else",
":",
"html",
".",
"append",
"(",
"'\\t<td style=\"%s\">%s</td>'",
"%",
"(",
"tablestyle",
"[",
"count",
"%",
"2",
"]",
",",
"name",
")",
")",
"# Last status field",
"if",
"details",
"[",
"\"lastRun\"",
"]",
":",
"if",
"status",
"==",
"\"STOPPED\"",
":",
"html",
".",
"append",
"(",
"'\\t<td style=\"%s\"><a href=\"#%s\">%s</a></td>\\n'",
"%",
"(",
"failstyle",
"[",
"count",
"%",
"2",
"]",
",",
"self",
".",
"createAnchorID",
"(",
"name",
",",
"details",
"[",
"\"lastRun\"",
"]",
")",
",",
"status",
")",
")",
"else",
":",
"html",
".",
"append",
"(",
"'\\t<td style=\"%s\"><a href=\"#%s\">%s</a></td>\\n'",
"%",
"(",
"laststatusstyle",
",",
"self",
".",
"createAnchorID",
"(",
"name",
",",
"details",
"[",
"\"lastRun\"",
"]",
")",
",",
"status",
")",
")",
"else",
":",
"html",
".",
"append",
"(",
"'\\t<td style=\"%s\">%s</td>\\n'",
"%",
"(",
"laststatusstyle",
",",
"status",
")",
")",
"# Last run field",
"if",
"details",
"[",
"\"lastRun\"",
"]",
":",
"html",
".",
"append",
"(",
"'\\t<td style=\"%s\"><a href=\"#%s\">%s</a></td>\\n'",
"%",
"(",
"laststatusstyle",
",",
"self",
".",
"createAnchorID",
"(",
"name",
",",
"details",
"[",
"\"lastRun\"",
"]",
")",
",",
"details",
"[",
"\"lastRun\"",
"]",
")",
")",
"else",
":",
"html",
".",
"append",
"(",
"'\\t<td style=\"%s\">none found</td>\\n'",
"%",
"laststatusstyle",
")",
"# Last success field",
"if",
"details",
"[",
"\"lastSuccess\"",
"]",
":",
"html",
".",
"append",
"(",
"'\\t<td><a href=\"#%s\">%s</a></td>\\n'",
"%",
"(",
"self",
".",
"createAnchorID",
"(",
"name",
",",
"details",
"[",
"\"lastSuccess\"",
"]",
")",
",",
"details",
"[",
"\"lastSuccess\"",
"]",
")",
")",
"else",
":",
"html",
".",
"append",
"(",
"'\\t<td>none found</td>\\n'",
")",
"html",
".",
"append",
"(",
"'</tr>\\n'",
")",
"count",
"+=",
"1",
"html",
".",
"append",
"(",
"\"</table>\"",
")",
"return",
"html"
]
| Generates a summary in HTML of the status of the expected scripts broken based on the log.
This summary is returned as a list of strings. | [
"Generates",
"a",
"summary",
"in",
"HTML",
"of",
"the",
"status",
"of",
"the",
"expected",
"scripts",
"broken",
"based",
"on",
"the",
"log",
".",
"This",
"summary",
"is",
"returned",
"as",
"a",
"list",
"of",
"strings",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/retrospect.py#L411-L482 | train |
TheGhouls/oct | oct/tools/results_to_csv.py | to_csv | def to_csv(args):
"""Take a sqlite filled database of results and return a csv file
:param str result_file: the path of the sqlite database
:param str output_file: the path of the csv output file
:param str delimiter: the desired delimiter for the output csv file
"""
result_file = args.result_file
output_file = args.output_file
delimiter = args.delimiter
if not os.path.isfile(result_file):
raise OSError("Results file does not exists")
headers = ['elapsed', 'epoch', 'turret_name', 'scriptrun_time', 'error']
headers_row = {}
set_database(result_file, db, {})
results = Result.select()
for item in results:
result_item = item.to_dict()
for k in result_item['custom_timers'].keys():
if k not in headers:
headers.append(k)
headers_row[k] = k
with open(output_file, "w+") as f:
writer = csv.DictWriter(f, fieldnames=headers, delimiter=delimiter)
headers_row.update({
'elapsed': 'elapsed time',
'epoch': 'epoch (in seconds)',
'turret_name': 'turret name',
'scriptrun_time': 'transaction time',
'error': 'error'
})
writer.writerow(headers_row)
for result_item in results:
line = result_item.to_dict()
for key, value in line['custom_timers'].items():
line[key] = value
del line['custom_timers']
writer.writerow(line) | python | def to_csv(args):
"""Take a sqlite filled database of results and return a csv file
:param str result_file: the path of the sqlite database
:param str output_file: the path of the csv output file
:param str delimiter: the desired delimiter for the output csv file
"""
result_file = args.result_file
output_file = args.output_file
delimiter = args.delimiter
if not os.path.isfile(result_file):
raise OSError("Results file does not exists")
headers = ['elapsed', 'epoch', 'turret_name', 'scriptrun_time', 'error']
headers_row = {}
set_database(result_file, db, {})
results = Result.select()
for item in results:
result_item = item.to_dict()
for k in result_item['custom_timers'].keys():
if k not in headers:
headers.append(k)
headers_row[k] = k
with open(output_file, "w+") as f:
writer = csv.DictWriter(f, fieldnames=headers, delimiter=delimiter)
headers_row.update({
'elapsed': 'elapsed time',
'epoch': 'epoch (in seconds)',
'turret_name': 'turret name',
'scriptrun_time': 'transaction time',
'error': 'error'
})
writer.writerow(headers_row)
for result_item in results:
line = result_item.to_dict()
for key, value in line['custom_timers'].items():
line[key] = value
del line['custom_timers']
writer.writerow(line) | [
"def",
"to_csv",
"(",
"args",
")",
":",
"result_file",
"=",
"args",
".",
"result_file",
"output_file",
"=",
"args",
".",
"output_file",
"delimiter",
"=",
"args",
".",
"delimiter",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"result_file",
")",
":",
"raise",
"OSError",
"(",
"\"Results file does not exists\"",
")",
"headers",
"=",
"[",
"'elapsed'",
",",
"'epoch'",
",",
"'turret_name'",
",",
"'scriptrun_time'",
",",
"'error'",
"]",
"headers_row",
"=",
"{",
"}",
"set_database",
"(",
"result_file",
",",
"db",
",",
"{",
"}",
")",
"results",
"=",
"Result",
".",
"select",
"(",
")",
"for",
"item",
"in",
"results",
":",
"result_item",
"=",
"item",
".",
"to_dict",
"(",
")",
"for",
"k",
"in",
"result_item",
"[",
"'custom_timers'",
"]",
".",
"keys",
"(",
")",
":",
"if",
"k",
"not",
"in",
"headers",
":",
"headers",
".",
"append",
"(",
"k",
")",
"headers_row",
"[",
"k",
"]",
"=",
"k",
"with",
"open",
"(",
"output_file",
",",
"\"w+\"",
")",
"as",
"f",
":",
"writer",
"=",
"csv",
".",
"DictWriter",
"(",
"f",
",",
"fieldnames",
"=",
"headers",
",",
"delimiter",
"=",
"delimiter",
")",
"headers_row",
".",
"update",
"(",
"{",
"'elapsed'",
":",
"'elapsed time'",
",",
"'epoch'",
":",
"'epoch (in seconds)'",
",",
"'turret_name'",
":",
"'turret name'",
",",
"'scriptrun_time'",
":",
"'transaction time'",
",",
"'error'",
":",
"'error'",
"}",
")",
"writer",
".",
"writerow",
"(",
"headers_row",
")",
"for",
"result_item",
"in",
"results",
":",
"line",
"=",
"result_item",
".",
"to_dict",
"(",
")",
"for",
"key",
",",
"value",
"in",
"line",
"[",
"'custom_timers'",
"]",
".",
"items",
"(",
")",
":",
"line",
"[",
"key",
"]",
"=",
"value",
"del",
"line",
"[",
"'custom_timers'",
"]",
"writer",
".",
"writerow",
"(",
"line",
")"
]
| Take a sqlite filled database of results and return a csv file
:param str result_file: the path of the sqlite database
:param str output_file: the path of the csv output file
:param str delimiter: the desired delimiter for the output csv file | [
"Take",
"a",
"sqlite",
"filled",
"database",
"of",
"results",
"and",
"return",
"a",
"csv",
"file"
]
| 7e9bddeb3b8495a26442b1c86744e9fb187fe88f | https://github.com/TheGhouls/oct/blob/7e9bddeb3b8495a26442b1c86744e9fb187fe88f/oct/tools/results_to_csv.py#L8-L49 | train |
Kortemme-Lab/klab | klab/stats/misc.py | fraction_correct_fuzzy_linear_create_vector | def fraction_correct_fuzzy_linear_create_vector(z, z_cutoff, z_fuzzy_range):
'''A helper function for fraction_correct_fuzzy_linear.'''
assert(z_fuzzy_range * 2 < z_cutoff)
if (z == None or numpy.isnan(z)): # todo: and ignore_null_values: # If we are missing values then we either discount the case or consider it as incorrect depending on ignore_null_values
return None
elif (z >= z_cutoff + z_fuzzy_range): # positive e.g. z >= 1.1
return [0, 0, 1]
elif (z <= -z_cutoff - z_fuzzy_range): # negative e.g. z <= -1.1
return [1, 0, 0]
elif (-z_cutoff + z_fuzzy_range <= z <= z_cutoff - z_fuzzy_range): # neutral e.g. -0.9 <= z <= 0.9
return [0, 1, 0]
elif (-z_cutoff - z_fuzzy_range < z < -z_cutoff + z_fuzzy_range): # negative/neutral e.g. -1.1 < z < 0.9
neutrality = (z + z_cutoff + z_fuzzy_range) / (z_fuzzy_range * 2)
zvec = [1 - neutrality, neutrality, 0]
elif (z_cutoff - z_fuzzy_range < z < z_cutoff + z_fuzzy_range): # neutral/positive e.g. 0.9 < z < 1.1
positivity = (z - z_cutoff + z_fuzzy_range) / (z_fuzzy_range * 2)
zvec = [0, 1 - positivity, positivity]
else:
raise Exception('Logical error.')
# normalize the vector
length = math.sqrt(numpy.dot(zvec, zvec))
return numpy.divide(zvec, length) | python | def fraction_correct_fuzzy_linear_create_vector(z, z_cutoff, z_fuzzy_range):
'''A helper function for fraction_correct_fuzzy_linear.'''
assert(z_fuzzy_range * 2 < z_cutoff)
if (z == None or numpy.isnan(z)): # todo: and ignore_null_values: # If we are missing values then we either discount the case or consider it as incorrect depending on ignore_null_values
return None
elif (z >= z_cutoff + z_fuzzy_range): # positive e.g. z >= 1.1
return [0, 0, 1]
elif (z <= -z_cutoff - z_fuzzy_range): # negative e.g. z <= -1.1
return [1, 0, 0]
elif (-z_cutoff + z_fuzzy_range <= z <= z_cutoff - z_fuzzy_range): # neutral e.g. -0.9 <= z <= 0.9
return [0, 1, 0]
elif (-z_cutoff - z_fuzzy_range < z < -z_cutoff + z_fuzzy_range): # negative/neutral e.g. -1.1 < z < 0.9
neutrality = (z + z_cutoff + z_fuzzy_range) / (z_fuzzy_range * 2)
zvec = [1 - neutrality, neutrality, 0]
elif (z_cutoff - z_fuzzy_range < z < z_cutoff + z_fuzzy_range): # neutral/positive e.g. 0.9 < z < 1.1
positivity = (z - z_cutoff + z_fuzzy_range) / (z_fuzzy_range * 2)
zvec = [0, 1 - positivity, positivity]
else:
raise Exception('Logical error.')
# normalize the vector
length = math.sqrt(numpy.dot(zvec, zvec))
return numpy.divide(zvec, length) | [
"def",
"fraction_correct_fuzzy_linear_create_vector",
"(",
"z",
",",
"z_cutoff",
",",
"z_fuzzy_range",
")",
":",
"assert",
"(",
"z_fuzzy_range",
"*",
"2",
"<",
"z_cutoff",
")",
"if",
"(",
"z",
"==",
"None",
"or",
"numpy",
".",
"isnan",
"(",
"z",
")",
")",
":",
"# todo: and ignore_null_values: # If we are missing values then we either discount the case or consider it as incorrect depending on ignore_null_values",
"return",
"None",
"elif",
"(",
"z",
">=",
"z_cutoff",
"+",
"z_fuzzy_range",
")",
":",
"# positive e.g. z >= 1.1",
"return",
"[",
"0",
",",
"0",
",",
"1",
"]",
"elif",
"(",
"z",
"<=",
"-",
"z_cutoff",
"-",
"z_fuzzy_range",
")",
":",
"# negative e.g. z <= -1.1",
"return",
"[",
"1",
",",
"0",
",",
"0",
"]",
"elif",
"(",
"-",
"z_cutoff",
"+",
"z_fuzzy_range",
"<=",
"z",
"<=",
"z_cutoff",
"-",
"z_fuzzy_range",
")",
":",
"# neutral e.g. -0.9 <= z <= 0.9",
"return",
"[",
"0",
",",
"1",
",",
"0",
"]",
"elif",
"(",
"-",
"z_cutoff",
"-",
"z_fuzzy_range",
"<",
"z",
"<",
"-",
"z_cutoff",
"+",
"z_fuzzy_range",
")",
":",
"# negative/neutral e.g. -1.1 < z < 0.9",
"neutrality",
"=",
"(",
"z",
"+",
"z_cutoff",
"+",
"z_fuzzy_range",
")",
"/",
"(",
"z_fuzzy_range",
"*",
"2",
")",
"zvec",
"=",
"[",
"1",
"-",
"neutrality",
",",
"neutrality",
",",
"0",
"]",
"elif",
"(",
"z_cutoff",
"-",
"z_fuzzy_range",
"<",
"z",
"<",
"z_cutoff",
"+",
"z_fuzzy_range",
")",
":",
"# neutral/positive e.g. 0.9 < z < 1.1",
"positivity",
"=",
"(",
"z",
"-",
"z_cutoff",
"+",
"z_fuzzy_range",
")",
"/",
"(",
"z_fuzzy_range",
"*",
"2",
")",
"zvec",
"=",
"[",
"0",
",",
"1",
"-",
"positivity",
",",
"positivity",
"]",
"else",
":",
"raise",
"Exception",
"(",
"'Logical error.'",
")",
"# normalize the vector",
"length",
"=",
"math",
".",
"sqrt",
"(",
"numpy",
".",
"dot",
"(",
"zvec",
",",
"zvec",
")",
")",
"return",
"numpy",
".",
"divide",
"(",
"zvec",
",",
"length",
")"
]
| A helper function for fraction_correct_fuzzy_linear. | [
"A",
"helper",
"function",
"for",
"fraction_correct_fuzzy_linear",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/stats/misc.py#L113-L135 | train |
Kortemme-Lab/klab | klab/cloning/gen9.py | apply_quality_control_checks | def apply_quality_control_checks(
seq,
check_gen9_seqs=True,
check_short_length=True,
check_local_gc_content=True,
check_global_gc_content=True):
"""
Raise a ValueError if the given sequence doesn't pass all of the Gen9
quality control design guidelines. Certain checks can be enabled or
disabled via the command line.
"""
seq = seq.upper()
failure_reasons = []
# Minimum length
if check_short_length:
if len(seq) < min_gene_length:
failure_reasons.append('minimum_length: Sequence is %d bp long and needs to be at least %d bp'%(len(seq),min_gene_length))
# Maximum length
if len(seq) > max_gene_length:
failure_reasons.append('maximum_length: Sequence is %d bp long and needs to be shorter than %d bp'%(len(seq),max_gene_length))
# Gen9 restricted sequences
if check_gen9_seqs:
for site in reserved_restriction_sites:
pattern = dna.dna_to_re(site)
reverse_site = dna.reverse_complement(site)
reverse_pattern = dna.dna_to_re(reverse_site)
if pattern.match(seq):
failure_reasons.append('gen9_restricted_sequences: Reserved sequence %s is present'%(site))
if reverse_pattern.match(seq):
failure_reasons.append('gen9_restricted_sequences: Reverse-complement of reserved sequence %s is present'%(site))
# Global GC content
if check_global_gc_content:
gc_content = dna.gc_content(seq)
if gc_content < global_gc_content_min:
failure_reasons.append('global_gc_content_min: Global GC content is %.3f%% and must be at least %.3f%%'%(gc_content,global_gc_content_min))
if gc_content > global_gc_content_max:
failure_reasons.append('global_gc_content_max: Global GC content is %.3f%% and must be less than %.3f%%'%(gc_content,global_gc_content_max))
# Local GC content (windows)
if check_local_gc_content:
windows = [seq]
if local_gc_window_size < len(seq):
windows = dna.sliding_window(seq, local_gc_window_size)
for seq_window in windows:
lgc_content = dna.gc_content(seq_window)
if lgc_content < local_gc_content_min:
failure_reasons.append('local_gc_content_min: Local GC content is %.3f%% and must be at least %.3f%%'%(lgc_content,local_gc_content_min))
break
if lgc_content > local_gc_content_max:
failure_reasons.append('local_gc_content_max: Local GC content is %.3f%% and must be less than %.3f%%'%(lgc_content,local_gc_content_max))
break
# Homopolymers
for base in dna.dna_bases:
homopolymer = base * homopolymer_max_lengths[base]
if homopolymer in seq:
failure_reasons.append('max_%s_homopolymer: %s'%(
base.lower(), dna.case_highlight(seq,a_homopolymer)))
# Make sure all the checks passed.
if failure_reasons:
intro = "The given sequence fails following Gen9 design guidelines:"
raise ValueError('\n'.join([intro] + failure_reasons)) | python | def apply_quality_control_checks(
seq,
check_gen9_seqs=True,
check_short_length=True,
check_local_gc_content=True,
check_global_gc_content=True):
"""
Raise a ValueError if the given sequence doesn't pass all of the Gen9
quality control design guidelines. Certain checks can be enabled or
disabled via the command line.
"""
seq = seq.upper()
failure_reasons = []
# Minimum length
if check_short_length:
if len(seq) < min_gene_length:
failure_reasons.append('minimum_length: Sequence is %d bp long and needs to be at least %d bp'%(len(seq),min_gene_length))
# Maximum length
if len(seq) > max_gene_length:
failure_reasons.append('maximum_length: Sequence is %d bp long and needs to be shorter than %d bp'%(len(seq),max_gene_length))
# Gen9 restricted sequences
if check_gen9_seqs:
for site in reserved_restriction_sites:
pattern = dna.dna_to_re(site)
reverse_site = dna.reverse_complement(site)
reverse_pattern = dna.dna_to_re(reverse_site)
if pattern.match(seq):
failure_reasons.append('gen9_restricted_sequences: Reserved sequence %s is present'%(site))
if reverse_pattern.match(seq):
failure_reasons.append('gen9_restricted_sequences: Reverse-complement of reserved sequence %s is present'%(site))
# Global GC content
if check_global_gc_content:
gc_content = dna.gc_content(seq)
if gc_content < global_gc_content_min:
failure_reasons.append('global_gc_content_min: Global GC content is %.3f%% and must be at least %.3f%%'%(gc_content,global_gc_content_min))
if gc_content > global_gc_content_max:
failure_reasons.append('global_gc_content_max: Global GC content is %.3f%% and must be less than %.3f%%'%(gc_content,global_gc_content_max))
# Local GC content (windows)
if check_local_gc_content:
windows = [seq]
if local_gc_window_size < len(seq):
windows = dna.sliding_window(seq, local_gc_window_size)
for seq_window in windows:
lgc_content = dna.gc_content(seq_window)
if lgc_content < local_gc_content_min:
failure_reasons.append('local_gc_content_min: Local GC content is %.3f%% and must be at least %.3f%%'%(lgc_content,local_gc_content_min))
break
if lgc_content > local_gc_content_max:
failure_reasons.append('local_gc_content_max: Local GC content is %.3f%% and must be less than %.3f%%'%(lgc_content,local_gc_content_max))
break
# Homopolymers
for base in dna.dna_bases:
homopolymer = base * homopolymer_max_lengths[base]
if homopolymer in seq:
failure_reasons.append('max_%s_homopolymer: %s'%(
base.lower(), dna.case_highlight(seq,a_homopolymer)))
# Make sure all the checks passed.
if failure_reasons:
intro = "The given sequence fails following Gen9 design guidelines:"
raise ValueError('\n'.join([intro] + failure_reasons)) | [
"def",
"apply_quality_control_checks",
"(",
"seq",
",",
"check_gen9_seqs",
"=",
"True",
",",
"check_short_length",
"=",
"True",
",",
"check_local_gc_content",
"=",
"True",
",",
"check_global_gc_content",
"=",
"True",
")",
":",
"seq",
"=",
"seq",
".",
"upper",
"(",
")",
"failure_reasons",
"=",
"[",
"]",
"# Minimum length",
"if",
"check_short_length",
":",
"if",
"len",
"(",
"seq",
")",
"<",
"min_gene_length",
":",
"failure_reasons",
".",
"append",
"(",
"'minimum_length: Sequence is %d bp long and needs to be at least %d bp'",
"%",
"(",
"len",
"(",
"seq",
")",
",",
"min_gene_length",
")",
")",
"# Maximum length",
"if",
"len",
"(",
"seq",
")",
">",
"max_gene_length",
":",
"failure_reasons",
".",
"append",
"(",
"'maximum_length: Sequence is %d bp long and needs to be shorter than %d bp'",
"%",
"(",
"len",
"(",
"seq",
")",
",",
"max_gene_length",
")",
")",
"# Gen9 restricted sequences",
"if",
"check_gen9_seqs",
":",
"for",
"site",
"in",
"reserved_restriction_sites",
":",
"pattern",
"=",
"dna",
".",
"dna_to_re",
"(",
"site",
")",
"reverse_site",
"=",
"dna",
".",
"reverse_complement",
"(",
"site",
")",
"reverse_pattern",
"=",
"dna",
".",
"dna_to_re",
"(",
"reverse_site",
")",
"if",
"pattern",
".",
"match",
"(",
"seq",
")",
":",
"failure_reasons",
".",
"append",
"(",
"'gen9_restricted_sequences: Reserved sequence %s is present'",
"%",
"(",
"site",
")",
")",
"if",
"reverse_pattern",
".",
"match",
"(",
"seq",
")",
":",
"failure_reasons",
".",
"append",
"(",
"'gen9_restricted_sequences: Reverse-complement of reserved sequence %s is present'",
"%",
"(",
"site",
")",
")",
"# Global GC content",
"if",
"check_global_gc_content",
":",
"gc_content",
"=",
"dna",
".",
"gc_content",
"(",
"seq",
")",
"if",
"gc_content",
"<",
"global_gc_content_min",
":",
"failure_reasons",
".",
"append",
"(",
"'global_gc_content_min: Global GC content is %.3f%% and must be at least %.3f%%'",
"%",
"(",
"gc_content",
",",
"global_gc_content_min",
")",
")",
"if",
"gc_content",
">",
"global_gc_content_max",
":",
"failure_reasons",
".",
"append",
"(",
"'global_gc_content_max: Global GC content is %.3f%% and must be less than %.3f%%'",
"%",
"(",
"gc_content",
",",
"global_gc_content_max",
")",
")",
"# Local GC content (windows)",
"if",
"check_local_gc_content",
":",
"windows",
"=",
"[",
"seq",
"]",
"if",
"local_gc_window_size",
"<",
"len",
"(",
"seq",
")",
":",
"windows",
"=",
"dna",
".",
"sliding_window",
"(",
"seq",
",",
"local_gc_window_size",
")",
"for",
"seq_window",
"in",
"windows",
":",
"lgc_content",
"=",
"dna",
".",
"gc_content",
"(",
"seq_window",
")",
"if",
"lgc_content",
"<",
"local_gc_content_min",
":",
"failure_reasons",
".",
"append",
"(",
"'local_gc_content_min: Local GC content is %.3f%% and must be at least %.3f%%'",
"%",
"(",
"lgc_content",
",",
"local_gc_content_min",
")",
")",
"break",
"if",
"lgc_content",
">",
"local_gc_content_max",
":",
"failure_reasons",
".",
"append",
"(",
"'local_gc_content_max: Local GC content is %.3f%% and must be less than %.3f%%'",
"%",
"(",
"lgc_content",
",",
"local_gc_content_max",
")",
")",
"break",
"# Homopolymers",
"for",
"base",
"in",
"dna",
".",
"dna_bases",
":",
"homopolymer",
"=",
"base",
"*",
"homopolymer_max_lengths",
"[",
"base",
"]",
"if",
"homopolymer",
"in",
"seq",
":",
"failure_reasons",
".",
"append",
"(",
"'max_%s_homopolymer: %s'",
"%",
"(",
"base",
".",
"lower",
"(",
")",
",",
"dna",
".",
"case_highlight",
"(",
"seq",
",",
"a_homopolymer",
")",
")",
")",
"# Make sure all the checks passed.",
"if",
"failure_reasons",
":",
"intro",
"=",
"\"The given sequence fails following Gen9 design guidelines:\"",
"raise",
"ValueError",
"(",
"'\\n'",
".",
"join",
"(",
"[",
"intro",
"]",
"+",
"failure_reasons",
")",
")"
]
| Raise a ValueError if the given sequence doesn't pass all of the Gen9
quality control design guidelines. Certain checks can be enabled or
disabled via the command line. | [
"Raise",
"a",
"ValueError",
"if",
"the",
"given",
"sequence",
"doesn",
"t",
"pass",
"all",
"of",
"the",
"Gen9",
"quality",
"control",
"design",
"guidelines",
".",
"Certain",
"checks",
"can",
"be",
"enabled",
"or",
"disabled",
"via",
"the",
"command",
"line",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/cloning/gen9.py#L17-L90 | train |
Dapid/tmscoring | tmscoring/tmscore.py | Aligning.get_default_values | def get_default_values(self):
"""
Make a crude estimation of the alignment using the center of mass
and general C->N orientation.
"""
out = dict(dx=0, dy=0, dz=0, theta=0, phi=0, psi=0)
dx, dy, dz, _ = np.mean(self.coord1 - self.coord2, axis=1)
out['dx'] = dx
out['dy'] = dy
out['dz'] = dz
# C->N vector
vec1 = self.coord1[:-1, 1] - self.coord1[:-1, -1]
vec2 = self.coord2[:-1, 1] - self.coord2[:-1, -1]
vec1 /= np.linalg.norm(vec1)
vec2 /= np.linalg.norm(vec2)
# Find the rotation matrix that converts vec1 to vec2:
# http://math.stackexchange.com/questions/180418/#476311
v = np.cross(vec1, vec2)
s = np.linalg.norm(v) + np.finfo(DTYPE).eps
c = vec1.dot(vec2)
vx = np.array([[0, -v[2], v[1]],
[v[2], 0, -v[0]],
[-v[1], v[0], 0]], dtype=DTYPE)
rotation_matrix = np.eye(3) + vx + vx.dot(vx) * (1 - c) / (s * s)
# Recover the angles from the matrix as seen here:
# http://nghiaho.com/?page_id=846
out['theta'] = math.atan2(rotation_matrix[2, 1], rotation_matrix[2, 2])
out['phi'] = math.atan2(-rotation_matrix[2, 0],
math.hypot(rotation_matrix[2, 1],
rotation_matrix[2, 2]))
out['psi'] = math.atan2(rotation_matrix[1, 0], rotation_matrix[0, 0])
return out | python | def get_default_values(self):
"""
Make a crude estimation of the alignment using the center of mass
and general C->N orientation.
"""
out = dict(dx=0, dy=0, dz=0, theta=0, phi=0, psi=0)
dx, dy, dz, _ = np.mean(self.coord1 - self.coord2, axis=1)
out['dx'] = dx
out['dy'] = dy
out['dz'] = dz
# C->N vector
vec1 = self.coord1[:-1, 1] - self.coord1[:-1, -1]
vec2 = self.coord2[:-1, 1] - self.coord2[:-1, -1]
vec1 /= np.linalg.norm(vec1)
vec2 /= np.linalg.norm(vec2)
# Find the rotation matrix that converts vec1 to vec2:
# http://math.stackexchange.com/questions/180418/#476311
v = np.cross(vec1, vec2)
s = np.linalg.norm(v) + np.finfo(DTYPE).eps
c = vec1.dot(vec2)
vx = np.array([[0, -v[2], v[1]],
[v[2], 0, -v[0]],
[-v[1], v[0], 0]], dtype=DTYPE)
rotation_matrix = np.eye(3) + vx + vx.dot(vx) * (1 - c) / (s * s)
# Recover the angles from the matrix as seen here:
# http://nghiaho.com/?page_id=846
out['theta'] = math.atan2(rotation_matrix[2, 1], rotation_matrix[2, 2])
out['phi'] = math.atan2(-rotation_matrix[2, 0],
math.hypot(rotation_matrix[2, 1],
rotation_matrix[2, 2]))
out['psi'] = math.atan2(rotation_matrix[1, 0], rotation_matrix[0, 0])
return out | [
"def",
"get_default_values",
"(",
"self",
")",
":",
"out",
"=",
"dict",
"(",
"dx",
"=",
"0",
",",
"dy",
"=",
"0",
",",
"dz",
"=",
"0",
",",
"theta",
"=",
"0",
",",
"phi",
"=",
"0",
",",
"psi",
"=",
"0",
")",
"dx",
",",
"dy",
",",
"dz",
",",
"_",
"=",
"np",
".",
"mean",
"(",
"self",
".",
"coord1",
"-",
"self",
".",
"coord2",
",",
"axis",
"=",
"1",
")",
"out",
"[",
"'dx'",
"]",
"=",
"dx",
"out",
"[",
"'dy'",
"]",
"=",
"dy",
"out",
"[",
"'dz'",
"]",
"=",
"dz",
"# C->N vector",
"vec1",
"=",
"self",
".",
"coord1",
"[",
":",
"-",
"1",
",",
"1",
"]",
"-",
"self",
".",
"coord1",
"[",
":",
"-",
"1",
",",
"-",
"1",
"]",
"vec2",
"=",
"self",
".",
"coord2",
"[",
":",
"-",
"1",
",",
"1",
"]",
"-",
"self",
".",
"coord2",
"[",
":",
"-",
"1",
",",
"-",
"1",
"]",
"vec1",
"/=",
"np",
".",
"linalg",
".",
"norm",
"(",
"vec1",
")",
"vec2",
"/=",
"np",
".",
"linalg",
".",
"norm",
"(",
"vec2",
")",
"# Find the rotation matrix that converts vec1 to vec2:",
"# http://math.stackexchange.com/questions/180418/#476311",
"v",
"=",
"np",
".",
"cross",
"(",
"vec1",
",",
"vec2",
")",
"s",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"v",
")",
"+",
"np",
".",
"finfo",
"(",
"DTYPE",
")",
".",
"eps",
"c",
"=",
"vec1",
".",
"dot",
"(",
"vec2",
")",
"vx",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"0",
",",
"-",
"v",
"[",
"2",
"]",
",",
"v",
"[",
"1",
"]",
"]",
",",
"[",
"v",
"[",
"2",
"]",
",",
"0",
",",
"-",
"v",
"[",
"0",
"]",
"]",
",",
"[",
"-",
"v",
"[",
"1",
"]",
",",
"v",
"[",
"0",
"]",
",",
"0",
"]",
"]",
",",
"dtype",
"=",
"DTYPE",
")",
"rotation_matrix",
"=",
"np",
".",
"eye",
"(",
"3",
")",
"+",
"vx",
"+",
"vx",
".",
"dot",
"(",
"vx",
")",
"*",
"(",
"1",
"-",
"c",
")",
"/",
"(",
"s",
"*",
"s",
")",
"# Recover the angles from the matrix as seen here:",
"# http://nghiaho.com/?page_id=846",
"out",
"[",
"'theta'",
"]",
"=",
"math",
".",
"atan2",
"(",
"rotation_matrix",
"[",
"2",
",",
"1",
"]",
",",
"rotation_matrix",
"[",
"2",
",",
"2",
"]",
")",
"out",
"[",
"'phi'",
"]",
"=",
"math",
".",
"atan2",
"(",
"-",
"rotation_matrix",
"[",
"2",
",",
"0",
"]",
",",
"math",
".",
"hypot",
"(",
"rotation_matrix",
"[",
"2",
",",
"1",
"]",
",",
"rotation_matrix",
"[",
"2",
",",
"2",
"]",
")",
")",
"out",
"[",
"'psi'",
"]",
"=",
"math",
".",
"atan2",
"(",
"rotation_matrix",
"[",
"1",
",",
"0",
"]",
",",
"rotation_matrix",
"[",
"0",
",",
"0",
"]",
")",
"return",
"out"
]
| Make a crude estimation of the alignment using the center of mass
and general C->N orientation. | [
"Make",
"a",
"crude",
"estimation",
"of",
"the",
"alignment",
"using",
"the",
"center",
"of",
"mass",
"and",
"general",
"C",
"-",
">",
"N",
"orientation",
"."
]
| 353c567e201ee9835c8209f6130b80b1cfb5b10f | https://github.com/Dapid/tmscoring/blob/353c567e201ee9835c8209f6130b80b1cfb5b10f/tmscoring/tmscore.py#L45-L81 | train |
Dapid/tmscoring | tmscoring/tmscore.py | Aligning.get_matrix | def get_matrix(theta, phi, psi, dx, dy, dz,
matrix=np.zeros((4, 4), dtype=DTYPE),
angles=np.zeros(3, dtype=DTYPE)):
"""
Build the rotation-translation matrix.
It has the form:
[ | dx ]
[ R | dy ]
[ | dz ]
[ 0 0 0 | 1 ]
"""
# NB!: matrix and angles by default are being overwritten on each call
# thus, only created once at compile time.
angles[0] = theta
angles[1] = phi
angles[2] = psi
cx, cy, cz = np.cos(angles)
sx, sy, sz = np.sin(angles)
rotation = matrix[:3, :3]
rotation.flat = (cx * cz - sx * cy * sz,
cx * sz + sx * cy * cz, sx * sy,
-sx * cz - cx * cy * sz,
-sx * sz + cx * cy * cz, cx * sy,
sy * sz,
-sy * cz, cy)
# Translation component
matrix[:3, 3] = dx, dy, dz
matrix[3, 3] = 1.
return matrix | python | def get_matrix(theta, phi, psi, dx, dy, dz,
matrix=np.zeros((4, 4), dtype=DTYPE),
angles=np.zeros(3, dtype=DTYPE)):
"""
Build the rotation-translation matrix.
It has the form:
[ | dx ]
[ R | dy ]
[ | dz ]
[ 0 0 0 | 1 ]
"""
# NB!: matrix and angles by default are being overwritten on each call
# thus, only created once at compile time.
angles[0] = theta
angles[1] = phi
angles[2] = psi
cx, cy, cz = np.cos(angles)
sx, sy, sz = np.sin(angles)
rotation = matrix[:3, :3]
rotation.flat = (cx * cz - sx * cy * sz,
cx * sz + sx * cy * cz, sx * sy,
-sx * cz - cx * cy * sz,
-sx * sz + cx * cy * cz, cx * sy,
sy * sz,
-sy * cz, cy)
# Translation component
matrix[:3, 3] = dx, dy, dz
matrix[3, 3] = 1.
return matrix | [
"def",
"get_matrix",
"(",
"theta",
",",
"phi",
",",
"psi",
",",
"dx",
",",
"dy",
",",
"dz",
",",
"matrix",
"=",
"np",
".",
"zeros",
"(",
"(",
"4",
",",
"4",
")",
",",
"dtype",
"=",
"DTYPE",
")",
",",
"angles",
"=",
"np",
".",
"zeros",
"(",
"3",
",",
"dtype",
"=",
"DTYPE",
")",
")",
":",
"# NB!: matrix and angles by default are being overwritten on each call",
"# thus, only created once at compile time.",
"angles",
"[",
"0",
"]",
"=",
"theta",
"angles",
"[",
"1",
"]",
"=",
"phi",
"angles",
"[",
"2",
"]",
"=",
"psi",
"cx",
",",
"cy",
",",
"cz",
"=",
"np",
".",
"cos",
"(",
"angles",
")",
"sx",
",",
"sy",
",",
"sz",
"=",
"np",
".",
"sin",
"(",
"angles",
")",
"rotation",
"=",
"matrix",
"[",
":",
"3",
",",
":",
"3",
"]",
"rotation",
".",
"flat",
"=",
"(",
"cx",
"*",
"cz",
"-",
"sx",
"*",
"cy",
"*",
"sz",
",",
"cx",
"*",
"sz",
"+",
"sx",
"*",
"cy",
"*",
"cz",
",",
"sx",
"*",
"sy",
",",
"-",
"sx",
"*",
"cz",
"-",
"cx",
"*",
"cy",
"*",
"sz",
",",
"-",
"sx",
"*",
"sz",
"+",
"cx",
"*",
"cy",
"*",
"cz",
",",
"cx",
"*",
"sy",
",",
"sy",
"*",
"sz",
",",
"-",
"sy",
"*",
"cz",
",",
"cy",
")",
"# Translation component",
"matrix",
"[",
":",
"3",
",",
"3",
"]",
"=",
"dx",
",",
"dy",
",",
"dz",
"matrix",
"[",
"3",
",",
"3",
"]",
"=",
"1.",
"return",
"matrix"
]
| Build the rotation-translation matrix.
It has the form:
[ | dx ]
[ R | dy ]
[ | dz ]
[ 0 0 0 | 1 ] | [
"Build",
"the",
"rotation",
"-",
"translation",
"matrix",
"."
]
| 353c567e201ee9835c8209f6130b80b1cfb5b10f | https://github.com/Dapid/tmscoring/blob/353c567e201ee9835c8209f6130b80b1cfb5b10f/tmscoring/tmscore.py#L84-L118 | train |
Dapid/tmscoring | tmscoring/tmscore.py | Aligning._tm | def _tm(self, theta, phi, psi, dx, dy, dz):
"""
Compute the minimisation target, not normalised.
"""
matrix = self.get_matrix(theta, phi, psi, dx, dy, dz)
coord = matrix.dot(self.coord2)
dist = coord - self.coord1
d_i2 = (dist * dist).sum(axis=0)
tm = -(1 / (1 + (d_i2 / self.d02)))
return tm | python | def _tm(self, theta, phi, psi, dx, dy, dz):
"""
Compute the minimisation target, not normalised.
"""
matrix = self.get_matrix(theta, phi, psi, dx, dy, dz)
coord = matrix.dot(self.coord2)
dist = coord - self.coord1
d_i2 = (dist * dist).sum(axis=0)
tm = -(1 / (1 + (d_i2 / self.d02)))
return tm | [
"def",
"_tm",
"(",
"self",
",",
"theta",
",",
"phi",
",",
"psi",
",",
"dx",
",",
"dy",
",",
"dz",
")",
":",
"matrix",
"=",
"self",
".",
"get_matrix",
"(",
"theta",
",",
"phi",
",",
"psi",
",",
"dx",
",",
"dy",
",",
"dz",
")",
"coord",
"=",
"matrix",
".",
"dot",
"(",
"self",
".",
"coord2",
")",
"dist",
"=",
"coord",
"-",
"self",
".",
"coord1",
"d_i2",
"=",
"(",
"dist",
"*",
"dist",
")",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
"tm",
"=",
"-",
"(",
"1",
"/",
"(",
"1",
"+",
"(",
"d_i2",
"/",
"self",
".",
"d02",
")",
")",
")",
"return",
"tm"
]
| Compute the minimisation target, not normalised. | [
"Compute",
"the",
"minimisation",
"target",
"not",
"normalised",
"."
]
| 353c567e201ee9835c8209f6130b80b1cfb5b10f | https://github.com/Dapid/tmscoring/blob/353c567e201ee9835c8209f6130b80b1cfb5b10f/tmscoring/tmscore.py#L138-L150 | train |
Dapid/tmscoring | tmscoring/tmscore.py | Aligning.write | def write(self, outputfile='out.pdb', appended=False):
"""
Save the second PDB file aligned to the first.
If appended is True, both are saved as different chains.
"""
# FIXME some cases don't work.
matrix = self.get_matrix(**self.get_current_values())
out = open(outputfile, 'w')
atomid = 1
if appended:
for line in open(self.pdb1):
if not line.startswith('ATOM') or (line[21] != self.chain_1 and line[21] != ' '):
continue
out.write(line[:7])
out.write('{: >4}'.format(atomid))
atomid += 1
out.write(line[11:21])
out.write('A')
out.write(line[22:])
for line in open(self.pdb2):
if not line.startswith('ATOM') or (line[21] != self.chain_2 and line[21] != ' '):
continue
x = float(line[32:38])
y = float(line[39:46])
z = float(line[48:54])
vec = np.array([x, y, z, 1])
x, y, z, _ = matrix.dot(vec)
out.write(line[:7])
out.write('{: >4}'.format(atomid))
atomid += 1
out.write(line[11:21])
out.write('B')
out.write(line[22:30])
out.write('{:>8.3f}{:>8.3f}{:>8.3f}'.format(x, y, z))
out.write(line[54:])
out.close() | python | def write(self, outputfile='out.pdb', appended=False):
"""
Save the second PDB file aligned to the first.
If appended is True, both are saved as different chains.
"""
# FIXME some cases don't work.
matrix = self.get_matrix(**self.get_current_values())
out = open(outputfile, 'w')
atomid = 1
if appended:
for line in open(self.pdb1):
if not line.startswith('ATOM') or (line[21] != self.chain_1 and line[21] != ' '):
continue
out.write(line[:7])
out.write('{: >4}'.format(atomid))
atomid += 1
out.write(line[11:21])
out.write('A')
out.write(line[22:])
for line in open(self.pdb2):
if not line.startswith('ATOM') or (line[21] != self.chain_2 and line[21] != ' '):
continue
x = float(line[32:38])
y = float(line[39:46])
z = float(line[48:54])
vec = np.array([x, y, z, 1])
x, y, z, _ = matrix.dot(vec)
out.write(line[:7])
out.write('{: >4}'.format(atomid))
atomid += 1
out.write(line[11:21])
out.write('B')
out.write(line[22:30])
out.write('{:>8.3f}{:>8.3f}{:>8.3f}'.format(x, y, z))
out.write(line[54:])
out.close() | [
"def",
"write",
"(",
"self",
",",
"outputfile",
"=",
"'out.pdb'",
",",
"appended",
"=",
"False",
")",
":",
"# FIXME some cases don't work.",
"matrix",
"=",
"self",
".",
"get_matrix",
"(",
"*",
"*",
"self",
".",
"get_current_values",
"(",
")",
")",
"out",
"=",
"open",
"(",
"outputfile",
",",
"'w'",
")",
"atomid",
"=",
"1",
"if",
"appended",
":",
"for",
"line",
"in",
"open",
"(",
"self",
".",
"pdb1",
")",
":",
"if",
"not",
"line",
".",
"startswith",
"(",
"'ATOM'",
")",
"or",
"(",
"line",
"[",
"21",
"]",
"!=",
"self",
".",
"chain_1",
"and",
"line",
"[",
"21",
"]",
"!=",
"' '",
")",
":",
"continue",
"out",
".",
"write",
"(",
"line",
"[",
":",
"7",
"]",
")",
"out",
".",
"write",
"(",
"'{: >4}'",
".",
"format",
"(",
"atomid",
")",
")",
"atomid",
"+=",
"1",
"out",
".",
"write",
"(",
"line",
"[",
"11",
":",
"21",
"]",
")",
"out",
".",
"write",
"(",
"'A'",
")",
"out",
".",
"write",
"(",
"line",
"[",
"22",
":",
"]",
")",
"for",
"line",
"in",
"open",
"(",
"self",
".",
"pdb2",
")",
":",
"if",
"not",
"line",
".",
"startswith",
"(",
"'ATOM'",
")",
"or",
"(",
"line",
"[",
"21",
"]",
"!=",
"self",
".",
"chain_2",
"and",
"line",
"[",
"21",
"]",
"!=",
"' '",
")",
":",
"continue",
"x",
"=",
"float",
"(",
"line",
"[",
"32",
":",
"38",
"]",
")",
"y",
"=",
"float",
"(",
"line",
"[",
"39",
":",
"46",
"]",
")",
"z",
"=",
"float",
"(",
"line",
"[",
"48",
":",
"54",
"]",
")",
"vec",
"=",
"np",
".",
"array",
"(",
"[",
"x",
",",
"y",
",",
"z",
",",
"1",
"]",
")",
"x",
",",
"y",
",",
"z",
",",
"_",
"=",
"matrix",
".",
"dot",
"(",
"vec",
")",
"out",
".",
"write",
"(",
"line",
"[",
":",
"7",
"]",
")",
"out",
".",
"write",
"(",
"'{: >4}'",
".",
"format",
"(",
"atomid",
")",
")",
"atomid",
"+=",
"1",
"out",
".",
"write",
"(",
"line",
"[",
"11",
":",
"21",
"]",
")",
"out",
".",
"write",
"(",
"'B'",
")",
"out",
".",
"write",
"(",
"line",
"[",
"22",
":",
"30",
"]",
")",
"out",
".",
"write",
"(",
"'{:>8.3f}{:>8.3f}{:>8.3f}'",
".",
"format",
"(",
"x",
",",
"y",
",",
"z",
")",
")",
"out",
".",
"write",
"(",
"line",
"[",
"54",
":",
"]",
")",
"out",
".",
"close",
"(",
")"
]
| Save the second PDB file aligned to the first.
If appended is True, both are saved as different chains. | [
"Save",
"the",
"second",
"PDB",
"file",
"aligned",
"to",
"the",
"first",
"."
]
| 353c567e201ee9835c8209f6130b80b1cfb5b10f | https://github.com/Dapid/tmscoring/blob/353c567e201ee9835c8209f6130b80b1cfb5b10f/tmscoring/tmscore.py#L187-L228 | train |
Dapid/tmscoring | tmscoring/tmscore.py | Aligning._load_data_alignment | def _load_data_alignment(self, chain1, chain2):
"""
Extract the sequences from the PDB file, perform the alignment,
and load the coordinates of the CA of the common residues.
"""
parser = PDB.PDBParser(QUIET=True)
ppb = PDB.PPBuilder()
structure1 = parser.get_structure(chain1, self.pdb1)
structure2 = parser.get_structure(chain2, self.pdb2)
seq1 = str(ppb.build_peptides(structure1)[0].get_sequence())
seq2 = str(ppb.build_peptides(structure2)[0].get_sequence())
# Alignment parameters taken from PconsFold renumbering script.
align = pairwise2.align.globalms(seq1, seq2, 2, -1, -0.5, -0.1)[0]
indexes = set(i for i, (s1, s2) in enumerate(zip(align[0], align[1]))
if s1 != '-' and s2 != '-')
coord1 = np.hstack([np.concatenate((r['CA'].get_coord(), (1,)))[:, None]
for i, r in enumerate(structure1.get_residues())
if i in indexes and 'CA' in r]).astype(DTYPE,
copy=False)
coord2 = np.hstack([np.concatenate((r['CA'].get_coord(), (1,)))[:, None]
for i, r in enumerate(structure2.get_residues())
if i in indexes and 'CA' in r]).astype(DTYPE,
copy=False)
self.coord1 = coord1
self.coord2 = coord2
self.N = len(seq1) | python | def _load_data_alignment(self, chain1, chain2):
"""
Extract the sequences from the PDB file, perform the alignment,
and load the coordinates of the CA of the common residues.
"""
parser = PDB.PDBParser(QUIET=True)
ppb = PDB.PPBuilder()
structure1 = parser.get_structure(chain1, self.pdb1)
structure2 = parser.get_structure(chain2, self.pdb2)
seq1 = str(ppb.build_peptides(structure1)[0].get_sequence())
seq2 = str(ppb.build_peptides(structure2)[0].get_sequence())
# Alignment parameters taken from PconsFold renumbering script.
align = pairwise2.align.globalms(seq1, seq2, 2, -1, -0.5, -0.1)[0]
indexes = set(i for i, (s1, s2) in enumerate(zip(align[0], align[1]))
if s1 != '-' and s2 != '-')
coord1 = np.hstack([np.concatenate((r['CA'].get_coord(), (1,)))[:, None]
for i, r in enumerate(structure1.get_residues())
if i in indexes and 'CA' in r]).astype(DTYPE,
copy=False)
coord2 = np.hstack([np.concatenate((r['CA'].get_coord(), (1,)))[:, None]
for i, r in enumerate(structure2.get_residues())
if i in indexes and 'CA' in r]).astype(DTYPE,
copy=False)
self.coord1 = coord1
self.coord2 = coord2
self.N = len(seq1) | [
"def",
"_load_data_alignment",
"(",
"self",
",",
"chain1",
",",
"chain2",
")",
":",
"parser",
"=",
"PDB",
".",
"PDBParser",
"(",
"QUIET",
"=",
"True",
")",
"ppb",
"=",
"PDB",
".",
"PPBuilder",
"(",
")",
"structure1",
"=",
"parser",
".",
"get_structure",
"(",
"chain1",
",",
"self",
".",
"pdb1",
")",
"structure2",
"=",
"parser",
".",
"get_structure",
"(",
"chain2",
",",
"self",
".",
"pdb2",
")",
"seq1",
"=",
"str",
"(",
"ppb",
".",
"build_peptides",
"(",
"structure1",
")",
"[",
"0",
"]",
".",
"get_sequence",
"(",
")",
")",
"seq2",
"=",
"str",
"(",
"ppb",
".",
"build_peptides",
"(",
"structure2",
")",
"[",
"0",
"]",
".",
"get_sequence",
"(",
")",
")",
"# Alignment parameters taken from PconsFold renumbering script.",
"align",
"=",
"pairwise2",
".",
"align",
".",
"globalms",
"(",
"seq1",
",",
"seq2",
",",
"2",
",",
"-",
"1",
",",
"-",
"0.5",
",",
"-",
"0.1",
")",
"[",
"0",
"]",
"indexes",
"=",
"set",
"(",
"i",
"for",
"i",
",",
"(",
"s1",
",",
"s2",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"align",
"[",
"0",
"]",
",",
"align",
"[",
"1",
"]",
")",
")",
"if",
"s1",
"!=",
"'-'",
"and",
"s2",
"!=",
"'-'",
")",
"coord1",
"=",
"np",
".",
"hstack",
"(",
"[",
"np",
".",
"concatenate",
"(",
"(",
"r",
"[",
"'CA'",
"]",
".",
"get_coord",
"(",
")",
",",
"(",
"1",
",",
")",
")",
")",
"[",
":",
",",
"None",
"]",
"for",
"i",
",",
"r",
"in",
"enumerate",
"(",
"structure1",
".",
"get_residues",
"(",
")",
")",
"if",
"i",
"in",
"indexes",
"and",
"'CA'",
"in",
"r",
"]",
")",
".",
"astype",
"(",
"DTYPE",
",",
"copy",
"=",
"False",
")",
"coord2",
"=",
"np",
".",
"hstack",
"(",
"[",
"np",
".",
"concatenate",
"(",
"(",
"r",
"[",
"'CA'",
"]",
".",
"get_coord",
"(",
")",
",",
"(",
"1",
",",
")",
")",
")",
"[",
":",
",",
"None",
"]",
"for",
"i",
",",
"r",
"in",
"enumerate",
"(",
"structure2",
".",
"get_residues",
"(",
")",
")",
"if",
"i",
"in",
"indexes",
"and",
"'CA'",
"in",
"r",
"]",
")",
".",
"astype",
"(",
"DTYPE",
",",
"copy",
"=",
"False",
")",
"self",
".",
"coord1",
"=",
"coord1",
"self",
".",
"coord2",
"=",
"coord2",
"self",
".",
"N",
"=",
"len",
"(",
"seq1",
")"
]
| Extract the sequences from the PDB file, perform the alignment,
and load the coordinates of the CA of the common residues. | [
"Extract",
"the",
"sequences",
"from",
"the",
"PDB",
"file",
"perform",
"the",
"alignment",
"and",
"load",
"the",
"coordinates",
"of",
"the",
"CA",
"of",
"the",
"common",
"residues",
"."
]
| 353c567e201ee9835c8209f6130b80b1cfb5b10f | https://github.com/Dapid/tmscoring/blob/353c567e201ee9835c8209f6130b80b1cfb5b10f/tmscoring/tmscore.py#L230-L259 | train |
Dapid/tmscoring | tmscoring/tmscore.py | Aligning._load_data_index | def _load_data_index(self, chain1, chain2):
"""
Load the coordinates of the CA of the common residues.
"""
parser = PDB.PDBParser(QUIET=True)
structure1 = parser.get_structure(chain1, self.pdb1)
structure2 = parser.get_structure(chain2, self.pdb2)
residues1 = list(structure1.get_residues())
residues2 = list(structure2.get_residues())
indexes1 = set(r.id[1] for r in residues1)
indexes2 = set(r.id[1] for r in residues2)
indexes = indexes1.intersection(indexes2)
self.indexes = indexes.copy()
self.N = len(indexes)
coord1 = []
indexes1 = indexes.copy()
for r in residues1:
if r.id[1] in indexes1 and 'CA' in r:
coord1.append(np.concatenate((r['CA'].get_coord(), (1,)))[:, None])
# Remove from index to avoid repeated residues
indexes1.remove(r.id[1])
coord1 = np.hstack(coord1).astype(DTYPE, copy=False)
coord2 = []
for r in residues2:
if r.id[1] in indexes and 'CA' in r:
coord2.append(np.concatenate((r['CA'].get_coord(), (1,)))[:, None])
indexes.remove(r.id[1])
coord2 = np.hstack(coord2).astype(DTYPE, copy=False)
self.coord1 = coord1
self.coord2 = coord2 | python | def _load_data_index(self, chain1, chain2):
"""
Load the coordinates of the CA of the common residues.
"""
parser = PDB.PDBParser(QUIET=True)
structure1 = parser.get_structure(chain1, self.pdb1)
structure2 = parser.get_structure(chain2, self.pdb2)
residues1 = list(structure1.get_residues())
residues2 = list(structure2.get_residues())
indexes1 = set(r.id[1] for r in residues1)
indexes2 = set(r.id[1] for r in residues2)
indexes = indexes1.intersection(indexes2)
self.indexes = indexes.copy()
self.N = len(indexes)
coord1 = []
indexes1 = indexes.copy()
for r in residues1:
if r.id[1] in indexes1 and 'CA' in r:
coord1.append(np.concatenate((r['CA'].get_coord(), (1,)))[:, None])
# Remove from index to avoid repeated residues
indexes1.remove(r.id[1])
coord1 = np.hstack(coord1).astype(DTYPE, copy=False)
coord2 = []
for r in residues2:
if r.id[1] in indexes and 'CA' in r:
coord2.append(np.concatenate((r['CA'].get_coord(), (1,)))[:, None])
indexes.remove(r.id[1])
coord2 = np.hstack(coord2).astype(DTYPE, copy=False)
self.coord1 = coord1
self.coord2 = coord2 | [
"def",
"_load_data_index",
"(",
"self",
",",
"chain1",
",",
"chain2",
")",
":",
"parser",
"=",
"PDB",
".",
"PDBParser",
"(",
"QUIET",
"=",
"True",
")",
"structure1",
"=",
"parser",
".",
"get_structure",
"(",
"chain1",
",",
"self",
".",
"pdb1",
")",
"structure2",
"=",
"parser",
".",
"get_structure",
"(",
"chain2",
",",
"self",
".",
"pdb2",
")",
"residues1",
"=",
"list",
"(",
"structure1",
".",
"get_residues",
"(",
")",
")",
"residues2",
"=",
"list",
"(",
"structure2",
".",
"get_residues",
"(",
")",
")",
"indexes1",
"=",
"set",
"(",
"r",
".",
"id",
"[",
"1",
"]",
"for",
"r",
"in",
"residues1",
")",
"indexes2",
"=",
"set",
"(",
"r",
".",
"id",
"[",
"1",
"]",
"for",
"r",
"in",
"residues2",
")",
"indexes",
"=",
"indexes1",
".",
"intersection",
"(",
"indexes2",
")",
"self",
".",
"indexes",
"=",
"indexes",
".",
"copy",
"(",
")",
"self",
".",
"N",
"=",
"len",
"(",
"indexes",
")",
"coord1",
"=",
"[",
"]",
"indexes1",
"=",
"indexes",
".",
"copy",
"(",
")",
"for",
"r",
"in",
"residues1",
":",
"if",
"r",
".",
"id",
"[",
"1",
"]",
"in",
"indexes1",
"and",
"'CA'",
"in",
"r",
":",
"coord1",
".",
"append",
"(",
"np",
".",
"concatenate",
"(",
"(",
"r",
"[",
"'CA'",
"]",
".",
"get_coord",
"(",
")",
",",
"(",
"1",
",",
")",
")",
")",
"[",
":",
",",
"None",
"]",
")",
"# Remove from index to avoid repeated residues",
"indexes1",
".",
"remove",
"(",
"r",
".",
"id",
"[",
"1",
"]",
")",
"coord1",
"=",
"np",
".",
"hstack",
"(",
"coord1",
")",
".",
"astype",
"(",
"DTYPE",
",",
"copy",
"=",
"False",
")",
"coord2",
"=",
"[",
"]",
"for",
"r",
"in",
"residues2",
":",
"if",
"r",
".",
"id",
"[",
"1",
"]",
"in",
"indexes",
"and",
"'CA'",
"in",
"r",
":",
"coord2",
".",
"append",
"(",
"np",
".",
"concatenate",
"(",
"(",
"r",
"[",
"'CA'",
"]",
".",
"get_coord",
"(",
")",
",",
"(",
"1",
",",
")",
")",
")",
"[",
":",
",",
"None",
"]",
")",
"indexes",
".",
"remove",
"(",
"r",
".",
"id",
"[",
"1",
"]",
")",
"coord2",
"=",
"np",
".",
"hstack",
"(",
"coord2",
")",
".",
"astype",
"(",
"DTYPE",
",",
"copy",
"=",
"False",
")",
"self",
".",
"coord1",
"=",
"coord1",
"self",
".",
"coord2",
"=",
"coord2"
]
| Load the coordinates of the CA of the common residues. | [
"Load",
"the",
"coordinates",
"of",
"the",
"CA",
"of",
"the",
"common",
"residues",
"."
]
| 353c567e201ee9835c8209f6130b80b1cfb5b10f | https://github.com/Dapid/tmscoring/blob/353c567e201ee9835c8209f6130b80b1cfb5b10f/tmscoring/tmscore.py#L261-L297 | train |
uw-it-aca/uw-restclients-sws | uw_sws/section_status.py | _json_to_sectionstatus | def _json_to_sectionstatus(section_data):
"""
Returns a uw_sws.models.SectionStatus object
created from the passed json.
"""
section_status = SectionStatus()
if section_data["AddCodeRequired"] == 'true':
section_status.add_code_required = True
else:
section_status.add_code_required = False
section_status.current_enrollment = int(section_data["CurrentEnrollment"])
current_period = int(section_data["CurrentRegistrationPeriod"])
section_status.current_registration_period = current_period
if section_data["FacultyCodeRequired"] == 'true':
section_status.faculty_code_required = True
else:
section_status.faculty_code_required = False
limit_estimate = int(section_data["LimitEstimateEnrollment"])
section_status.limit_estimated_enrollment = limit_estimate
indicator = section_data["LimitEstimateEnrollmentIndicator"]
section_status.limit_estimate_enrollment_indicator = indicator
section_status.room_capacity = int(section_data["RoomCapacity"])
section_status.sln = int(section_data["SLN"])
section_status.space_available = int(section_data["SpaceAvailable"])
if section_data["Status"] == "open":
section_status.is_open = True
else:
section_status.is_open = False
return section_status | python | def _json_to_sectionstatus(section_data):
"""
Returns a uw_sws.models.SectionStatus object
created from the passed json.
"""
section_status = SectionStatus()
if section_data["AddCodeRequired"] == 'true':
section_status.add_code_required = True
else:
section_status.add_code_required = False
section_status.current_enrollment = int(section_data["CurrentEnrollment"])
current_period = int(section_data["CurrentRegistrationPeriod"])
section_status.current_registration_period = current_period
if section_data["FacultyCodeRequired"] == 'true':
section_status.faculty_code_required = True
else:
section_status.faculty_code_required = False
limit_estimate = int(section_data["LimitEstimateEnrollment"])
section_status.limit_estimated_enrollment = limit_estimate
indicator = section_data["LimitEstimateEnrollmentIndicator"]
section_status.limit_estimate_enrollment_indicator = indicator
section_status.room_capacity = int(section_data["RoomCapacity"])
section_status.sln = int(section_data["SLN"])
section_status.space_available = int(section_data["SpaceAvailable"])
if section_data["Status"] == "open":
section_status.is_open = True
else:
section_status.is_open = False
return section_status | [
"def",
"_json_to_sectionstatus",
"(",
"section_data",
")",
":",
"section_status",
"=",
"SectionStatus",
"(",
")",
"if",
"section_data",
"[",
"\"AddCodeRequired\"",
"]",
"==",
"'true'",
":",
"section_status",
".",
"add_code_required",
"=",
"True",
"else",
":",
"section_status",
".",
"add_code_required",
"=",
"False",
"section_status",
".",
"current_enrollment",
"=",
"int",
"(",
"section_data",
"[",
"\"CurrentEnrollment\"",
"]",
")",
"current_period",
"=",
"int",
"(",
"section_data",
"[",
"\"CurrentRegistrationPeriod\"",
"]",
")",
"section_status",
".",
"current_registration_period",
"=",
"current_period",
"if",
"section_data",
"[",
"\"FacultyCodeRequired\"",
"]",
"==",
"'true'",
":",
"section_status",
".",
"faculty_code_required",
"=",
"True",
"else",
":",
"section_status",
".",
"faculty_code_required",
"=",
"False",
"limit_estimate",
"=",
"int",
"(",
"section_data",
"[",
"\"LimitEstimateEnrollment\"",
"]",
")",
"section_status",
".",
"limit_estimated_enrollment",
"=",
"limit_estimate",
"indicator",
"=",
"section_data",
"[",
"\"LimitEstimateEnrollmentIndicator\"",
"]",
"section_status",
".",
"limit_estimate_enrollment_indicator",
"=",
"indicator",
"section_status",
".",
"room_capacity",
"=",
"int",
"(",
"section_data",
"[",
"\"RoomCapacity\"",
"]",
")",
"section_status",
".",
"sln",
"=",
"int",
"(",
"section_data",
"[",
"\"SLN\"",
"]",
")",
"section_status",
".",
"space_available",
"=",
"int",
"(",
"section_data",
"[",
"\"SpaceAvailable\"",
"]",
")",
"if",
"section_data",
"[",
"\"Status\"",
"]",
"==",
"\"open\"",
":",
"section_status",
".",
"is_open",
"=",
"True",
"else",
":",
"section_status",
".",
"is_open",
"=",
"False",
"return",
"section_status"
]
| Returns a uw_sws.models.SectionStatus object
created from the passed json. | [
"Returns",
"a",
"uw_sws",
".",
"models",
".",
"SectionStatus",
"object",
"created",
"from",
"the",
"passed",
"json",
"."
]
| 4d36776dcca36855fc15c1b8fe7650ae045194cf | https://github.com/uw-it-aca/uw-restclients-sws/blob/4d36776dcca36855fc15c1b8fe7650ae045194cf/uw_sws/section_status.py#L21-L52 | train |
Kortemme-Lab/klab | klab/bio/pdb_util.py | renumber_atoms | def renumber_atoms(lines):
'''
Takes in a list of PDB lines and renumbers the atoms appropriately
'''
new_lines = []
current_number = 1
for line in lines:
if line.startswith('ATOM') or line.startswith('HETATM'):
new_lines.append(
line[:6] + string.rjust('%d' % current_number, 5) + line[11:]
)
current_number += 1
else:
if line.startswith('TER'):
current_number += 1
new_lines.append(line)
return new_lines | python | def renumber_atoms(lines):
'''
Takes in a list of PDB lines and renumbers the atoms appropriately
'''
new_lines = []
current_number = 1
for line in lines:
if line.startswith('ATOM') or line.startswith('HETATM'):
new_lines.append(
line[:6] + string.rjust('%d' % current_number, 5) + line[11:]
)
current_number += 1
else:
if line.startswith('TER'):
current_number += 1
new_lines.append(line)
return new_lines | [
"def",
"renumber_atoms",
"(",
"lines",
")",
":",
"new_lines",
"=",
"[",
"]",
"current_number",
"=",
"1",
"for",
"line",
"in",
"lines",
":",
"if",
"line",
".",
"startswith",
"(",
"'ATOM'",
")",
"or",
"line",
".",
"startswith",
"(",
"'HETATM'",
")",
":",
"new_lines",
".",
"append",
"(",
"line",
"[",
":",
"6",
"]",
"+",
"string",
".",
"rjust",
"(",
"'%d'",
"%",
"current_number",
",",
"5",
")",
"+",
"line",
"[",
"11",
":",
"]",
")",
"current_number",
"+=",
"1",
"else",
":",
"if",
"line",
".",
"startswith",
"(",
"'TER'",
")",
":",
"current_number",
"+=",
"1",
"new_lines",
".",
"append",
"(",
"line",
")",
"return",
"new_lines"
]
| Takes in a list of PDB lines and renumbers the atoms appropriately | [
"Takes",
"in",
"a",
"list",
"of",
"PDB",
"lines",
"and",
"renumbers",
"the",
"atoms",
"appropriately"
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/pdb_util.py#L30-L46 | train |
Kortemme-Lab/klab | klab/bio/pdb_util.py | clean_alternate_location_indicators | def clean_alternate_location_indicators(lines):
'''
Keeps only the first atom, if alternated location identifiers are being used
Removes alternate location ID charactor
'''
new_lines = []
previously_seen_alt_atoms = set()
for line in lines:
if line.startswith('ATOM'):
alt_loc_id = line[16]
if alt_loc_id != ' ':
atom_name = line[12:16].strip()
res_name = line[17:20].strip()
chain = line[21]
resnum = long( line[22:26].strip() )
loc_tup = (atom_name, res_name, chain, resnum)
if loc_tup in previously_seen_alt_atoms:
# Continue main for loop
continue
else:
previously_seen_alt_atoms.add( loc_tup )
line = line[:16] + ' ' + line[17:]
new_lines.append(line)
return new_lines | python | def clean_alternate_location_indicators(lines):
'''
Keeps only the first atom, if alternated location identifiers are being used
Removes alternate location ID charactor
'''
new_lines = []
previously_seen_alt_atoms = set()
for line in lines:
if line.startswith('ATOM'):
alt_loc_id = line[16]
if alt_loc_id != ' ':
atom_name = line[12:16].strip()
res_name = line[17:20].strip()
chain = line[21]
resnum = long( line[22:26].strip() )
loc_tup = (atom_name, res_name, chain, resnum)
if loc_tup in previously_seen_alt_atoms:
# Continue main for loop
continue
else:
previously_seen_alt_atoms.add( loc_tup )
line = line[:16] + ' ' + line[17:]
new_lines.append(line)
return new_lines | [
"def",
"clean_alternate_location_indicators",
"(",
"lines",
")",
":",
"new_lines",
"=",
"[",
"]",
"previously_seen_alt_atoms",
"=",
"set",
"(",
")",
"for",
"line",
"in",
"lines",
":",
"if",
"line",
".",
"startswith",
"(",
"'ATOM'",
")",
":",
"alt_loc_id",
"=",
"line",
"[",
"16",
"]",
"if",
"alt_loc_id",
"!=",
"' '",
":",
"atom_name",
"=",
"line",
"[",
"12",
":",
"16",
"]",
".",
"strip",
"(",
")",
"res_name",
"=",
"line",
"[",
"17",
":",
"20",
"]",
".",
"strip",
"(",
")",
"chain",
"=",
"line",
"[",
"21",
"]",
"resnum",
"=",
"long",
"(",
"line",
"[",
"22",
":",
"26",
"]",
".",
"strip",
"(",
")",
")",
"loc_tup",
"=",
"(",
"atom_name",
",",
"res_name",
",",
"chain",
",",
"resnum",
")",
"if",
"loc_tup",
"in",
"previously_seen_alt_atoms",
":",
"# Continue main for loop",
"continue",
"else",
":",
"previously_seen_alt_atoms",
".",
"add",
"(",
"loc_tup",
")",
"line",
"=",
"line",
"[",
":",
"16",
"]",
"+",
"' '",
"+",
"line",
"[",
"17",
":",
"]",
"new_lines",
".",
"append",
"(",
"line",
")",
"return",
"new_lines"
]
| Keeps only the first atom, if alternated location identifiers are being used
Removes alternate location ID charactor | [
"Keeps",
"only",
"the",
"first",
"atom",
"if",
"alternated",
"location",
"identifiers",
"are",
"being",
"used",
"Removes",
"alternate",
"location",
"ID",
"charactor"
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/pdb_util.py#L48-L71 | train |
Kortemme-Lab/klab | klab/bio/ligand.py | Ligand.parse_pdb_ligand_info | def parse_pdb_ligand_info(self, pdb_ligand_info):
'''This only parses the ligand type as all the other information should be in the .cif file. The XML file has
proper capitalization whereas the .cif file uses all caps for the ligand type.'''
mtchs = re.findall('(<ligand.*?</ligand>)', pdb_ligand_info, re.DOTALL)
for m in mtchs:
if m.upper().find('CHEMICALID="{0}"'.format(self.PDBCode.upper())) != -1:
ligand_type = re.match('<ligand.*?\stype="(.*?)".*?>', m, re.DOTALL)
if ligand_type:
self.LigandType = ligand_type.group(1) | python | def parse_pdb_ligand_info(self, pdb_ligand_info):
'''This only parses the ligand type as all the other information should be in the .cif file. The XML file has
proper capitalization whereas the .cif file uses all caps for the ligand type.'''
mtchs = re.findall('(<ligand.*?</ligand>)', pdb_ligand_info, re.DOTALL)
for m in mtchs:
if m.upper().find('CHEMICALID="{0}"'.format(self.PDBCode.upper())) != -1:
ligand_type = re.match('<ligand.*?\stype="(.*?)".*?>', m, re.DOTALL)
if ligand_type:
self.LigandType = ligand_type.group(1) | [
"def",
"parse_pdb_ligand_info",
"(",
"self",
",",
"pdb_ligand_info",
")",
":",
"mtchs",
"=",
"re",
".",
"findall",
"(",
"'(<ligand.*?</ligand>)'",
",",
"pdb_ligand_info",
",",
"re",
".",
"DOTALL",
")",
"for",
"m",
"in",
"mtchs",
":",
"if",
"m",
".",
"upper",
"(",
")",
".",
"find",
"(",
"'CHEMICALID=\"{0}\"'",
".",
"format",
"(",
"self",
".",
"PDBCode",
".",
"upper",
"(",
")",
")",
")",
"!=",
"-",
"1",
":",
"ligand_type",
"=",
"re",
".",
"match",
"(",
"'<ligand.*?\\stype=\"(.*?)\".*?>'",
",",
"m",
",",
"re",
".",
"DOTALL",
")",
"if",
"ligand_type",
":",
"self",
".",
"LigandType",
"=",
"ligand_type",
".",
"group",
"(",
"1",
")"
]
| This only parses the ligand type as all the other information should be in the .cif file. The XML file has
proper capitalization whereas the .cif file uses all caps for the ligand type. | [
"This",
"only",
"parses",
"the",
"ligand",
"type",
"as",
"all",
"the",
"other",
"information",
"should",
"be",
"in",
"the",
".",
"cif",
"file",
".",
"The",
"XML",
"file",
"has",
"proper",
"capitalization",
"whereas",
"the",
".",
"cif",
"file",
"uses",
"all",
"caps",
"for",
"the",
"ligand",
"type",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/ligand.py#L318-L326 | train |
Kortemme-Lab/klab | klab/bio/ligand.py | LigandMap.add_code_mapping | def add_code_mapping(self, from_pdb_code, to_pdb_code):
'''Add a code mapping without a given instance.'''
# Consistency check - make sure that we always map the same code e.g. 'LIG' to the same code e.g. 'GTP'
if from_pdb_code in self.code_map:
assert(self.code_map[from_pdb_code] == to_pdb_code)
else:
self.code_map[from_pdb_code] = to_pdb_code | python | def add_code_mapping(self, from_pdb_code, to_pdb_code):
'''Add a code mapping without a given instance.'''
# Consistency check - make sure that we always map the same code e.g. 'LIG' to the same code e.g. 'GTP'
if from_pdb_code in self.code_map:
assert(self.code_map[from_pdb_code] == to_pdb_code)
else:
self.code_map[from_pdb_code] = to_pdb_code | [
"def",
"add_code_mapping",
"(",
"self",
",",
"from_pdb_code",
",",
"to_pdb_code",
")",
":",
"# Consistency check - make sure that we always map the same code e.g. 'LIG' to the same code e.g. 'GTP'",
"if",
"from_pdb_code",
"in",
"self",
".",
"code_map",
":",
"assert",
"(",
"self",
".",
"code_map",
"[",
"from_pdb_code",
"]",
"==",
"to_pdb_code",
")",
"else",
":",
"self",
".",
"code_map",
"[",
"from_pdb_code",
"]",
"=",
"to_pdb_code"
]
| Add a code mapping without a given instance. | [
"Add",
"a",
"code",
"mapping",
"without",
"a",
"given",
"instance",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/ligand.py#L554-L561 | train |
mardix/Mocha | mocha/contrib/views/auth.py | Login.reset_password | def reset_password(self, action_token, signed_data):
"""Reset the user password. It was triggered by LOST-PASSWORD """
try:
action = "reset-password"
user = get_user_by_action_token(action, action_token)
if not user or not user.signed_data_match(signed_data, action):
raise mocha_exc.AppError("Verification Invalid!")
if request.method == "POST":
password = request.form.get("password", "").strip()
password_confirm = request.form.get("password_confirm",
"").strip()
if not password or password != password_confirm:
raise exceptions.AuthError(
"Password is missing or passwords don't match")
user.change_password(password)
user.set_email_verified(True)
session_set_require_password_change(False)
flash_success("Password updated successfully!")
return redirect(__options__.get("login_view") or self.login)
return {"action_token": action_token, "signed_data": signed_data}
except (mocha_exc.AppError, exceptions.AuthError) as ex:
flash_error(str(ex))
except Exception as e:
logging.exception(e)
flash_error("Unable to reset password")
return redirect(self.login) | python | def reset_password(self, action_token, signed_data):
"""Reset the user password. It was triggered by LOST-PASSWORD """
try:
action = "reset-password"
user = get_user_by_action_token(action, action_token)
if not user or not user.signed_data_match(signed_data, action):
raise mocha_exc.AppError("Verification Invalid!")
if request.method == "POST":
password = request.form.get("password", "").strip()
password_confirm = request.form.get("password_confirm",
"").strip()
if not password or password != password_confirm:
raise exceptions.AuthError(
"Password is missing or passwords don't match")
user.change_password(password)
user.set_email_verified(True)
session_set_require_password_change(False)
flash_success("Password updated successfully!")
return redirect(__options__.get("login_view") or self.login)
return {"action_token": action_token, "signed_data": signed_data}
except (mocha_exc.AppError, exceptions.AuthError) as ex:
flash_error(str(ex))
except Exception as e:
logging.exception(e)
flash_error("Unable to reset password")
return redirect(self.login) | [
"def",
"reset_password",
"(",
"self",
",",
"action_token",
",",
"signed_data",
")",
":",
"try",
":",
"action",
"=",
"\"reset-password\"",
"user",
"=",
"get_user_by_action_token",
"(",
"action",
",",
"action_token",
")",
"if",
"not",
"user",
"or",
"not",
"user",
".",
"signed_data_match",
"(",
"signed_data",
",",
"action",
")",
":",
"raise",
"mocha_exc",
".",
"AppError",
"(",
"\"Verification Invalid!\"",
")",
"if",
"request",
".",
"method",
"==",
"\"POST\"",
":",
"password",
"=",
"request",
".",
"form",
".",
"get",
"(",
"\"password\"",
",",
"\"\"",
")",
".",
"strip",
"(",
")",
"password_confirm",
"=",
"request",
".",
"form",
".",
"get",
"(",
"\"password_confirm\"",
",",
"\"\"",
")",
".",
"strip",
"(",
")",
"if",
"not",
"password",
"or",
"password",
"!=",
"password_confirm",
":",
"raise",
"exceptions",
".",
"AuthError",
"(",
"\"Password is missing or passwords don't match\"",
")",
"user",
".",
"change_password",
"(",
"password",
")",
"user",
".",
"set_email_verified",
"(",
"True",
")",
"session_set_require_password_change",
"(",
"False",
")",
"flash_success",
"(",
"\"Password updated successfully!\"",
")",
"return",
"redirect",
"(",
"__options__",
".",
"get",
"(",
"\"login_view\"",
")",
"or",
"self",
".",
"login",
")",
"return",
"{",
"\"action_token\"",
":",
"action_token",
",",
"\"signed_data\"",
":",
"signed_data",
"}",
"except",
"(",
"mocha_exc",
".",
"AppError",
",",
"exceptions",
".",
"AuthError",
")",
"as",
"ex",
":",
"flash_error",
"(",
"str",
"(",
"ex",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"exception",
"(",
"e",
")",
"flash_error",
"(",
"\"Unable to reset password\"",
")",
"return",
"redirect",
"(",
"self",
".",
"login",
")"
]
| Reset the user password. It was triggered by LOST-PASSWORD | [
"Reset",
"the",
"user",
"password",
".",
"It",
"was",
"triggered",
"by",
"LOST",
"-",
"PASSWORD"
]
| bce481cb31a0972061dd99bc548701411dcb9de3 | https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/contrib/views/auth.py#L337-L366 | train |
mardix/Mocha | mocha/contrib/views/auth.py | Login.verify_email | def verify_email(self, action_token, signed_data):
""" Verify email account, in which a link was sent to """
try:
action = "verify-email"
user = get_user_by_action_token(action, action_token)
if not user or not user.signed_data_match(signed_data, action):
raise mocha_exc.AppError("Verification Invalid!")
else:
user.set_email_verified(True)
flash_success("Account verified. You can now login")
username = user.username
if user.login_method == "email":
username = user.email
return redirect(self.login, username=username)
except Exception as e:
logging.exception(e)
flash_error("Verification Failed!")
return redirect(self.login) | python | def verify_email(self, action_token, signed_data):
""" Verify email account, in which a link was sent to """
try:
action = "verify-email"
user = get_user_by_action_token(action, action_token)
if not user or not user.signed_data_match(signed_data, action):
raise mocha_exc.AppError("Verification Invalid!")
else:
user.set_email_verified(True)
flash_success("Account verified. You can now login")
username = user.username
if user.login_method == "email":
username = user.email
return redirect(self.login, username=username)
except Exception as e:
logging.exception(e)
flash_error("Verification Failed!")
return redirect(self.login) | [
"def",
"verify_email",
"(",
"self",
",",
"action_token",
",",
"signed_data",
")",
":",
"try",
":",
"action",
"=",
"\"verify-email\"",
"user",
"=",
"get_user_by_action_token",
"(",
"action",
",",
"action_token",
")",
"if",
"not",
"user",
"or",
"not",
"user",
".",
"signed_data_match",
"(",
"signed_data",
",",
"action",
")",
":",
"raise",
"mocha_exc",
".",
"AppError",
"(",
"\"Verification Invalid!\"",
")",
"else",
":",
"user",
".",
"set_email_verified",
"(",
"True",
")",
"flash_success",
"(",
"\"Account verified. You can now login\"",
")",
"username",
"=",
"user",
".",
"username",
"if",
"user",
".",
"login_method",
"==",
"\"email\"",
":",
"username",
"=",
"user",
".",
"email",
"return",
"redirect",
"(",
"self",
".",
"login",
",",
"username",
"=",
"username",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"exception",
"(",
"e",
")",
"flash_error",
"(",
"\"Verification Failed!\"",
")",
"return",
"redirect",
"(",
"self",
".",
"login",
")"
]
| Verify email account, in which a link was sent to | [
"Verify",
"email",
"account",
"in",
"which",
"a",
"link",
"was",
"sent",
"to"
]
| bce481cb31a0972061dd99bc548701411dcb9de3 | https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/contrib/views/auth.py#L372-L390 | train |
mardix/Mocha | mocha/contrib/views/auth.py | Login.oauth_connect | def oauth_connect(self, provider, action):
"""
This endpoint doesn't check if user is logged in, because it has two functions
1. If the user is not logged in, it will try to signup the user
- if the social info exist, it will login
- not, it will create a new account and proceed
2. If user is logged in, it will try to create a social login entry with
the current user
**** This methods doesn't save the user token, it only retrieves the ID
to login or ID, name, email if signing up
:param provider:
:param action: connect|authorized|
- connect: to connect to the endpoint
- authorized, when coming back
"""
valid_actions = ["connect", "authorized", "test"]
_redirect = views.auth.Account.account_settings if is_authenticated() else self.login
if action not in valid_actions \
or "oauth" not in __options__.get("registration_methods") \
or not __options__.get("allow_registration") \
or not hasattr(oauth, provider):
return redirect(_redirect)
client = getattr(oauth, provider)
params = client.__params__
me_args = params.get("me")
user_id = params.get("user_id")
oauth_user_id = None
oauth_name = None
oauth_email = None
if action == "test":
session_data = {
"provider": "ensure",
"user_id": "1234",
"name": "Mardix",
"email": "[email protected]",
}
set_oauth_session(session_data)
return redirect(url_for(self.register, oauth=1))
if action == "connect":
_next = request.args.get('next')
authorized_url = url_for(self,
provider=provider,
action="authorized",
next=_next or request.referrer or None,
_external=True)
return client.authorize(callback=authorized_url)
elif action == "authorized":
resp = client.authorized_response()
if resp is None:
pass
elif isinstance(resp, OAuthException):
flash_error("Access Denied")
else:
if not me_args:
oauth_user_id = resp.get(user_id)
else:
me = client.get(me_args)
if action == "authorized" and oauth_user_id:
if is_authenticated():
try:
# Add federated login to current_user
current_user.add_federated_login(provider=provider,
federated_id=oauth_user_id)
flash_success(
"You can now login with your %s account" % provider.upper())
except Exception as e:
logging.exception(e)
return redirect(views.auth.Account.account_settings)
# User not logged in
else:
# Existing user
user = with_federation(provider, oauth_user_id)
if user:
create_session(user)
return redirect(request.args.get("next") or __options__.get(
"login_view"))
# New User
else:
session_data = {
"provider": provider,
"user_id": oauth_user_id,
"name": oauth_name,
"email": oauth_email,
}
set_oauth_session(session_data)
else:
return redirect(_redirect)
return {
"action": action,
"provider": provider,
"authorized_url": ""
}
return redirect(_redirect) | python | def oauth_connect(self, provider, action):
"""
This endpoint doesn't check if user is logged in, because it has two functions
1. If the user is not logged in, it will try to signup the user
- if the social info exist, it will login
- not, it will create a new account and proceed
2. If user is logged in, it will try to create a social login entry with
the current user
**** This methods doesn't save the user token, it only retrieves the ID
to login or ID, name, email if signing up
:param provider:
:param action: connect|authorized|
- connect: to connect to the endpoint
- authorized, when coming back
"""
valid_actions = ["connect", "authorized", "test"]
_redirect = views.auth.Account.account_settings if is_authenticated() else self.login
if action not in valid_actions \
or "oauth" not in __options__.get("registration_methods") \
or not __options__.get("allow_registration") \
or not hasattr(oauth, provider):
return redirect(_redirect)
client = getattr(oauth, provider)
params = client.__params__
me_args = params.get("me")
user_id = params.get("user_id")
oauth_user_id = None
oauth_name = None
oauth_email = None
if action == "test":
session_data = {
"provider": "ensure",
"user_id": "1234",
"name": "Mardix",
"email": "[email protected]",
}
set_oauth_session(session_data)
return redirect(url_for(self.register, oauth=1))
if action == "connect":
_next = request.args.get('next')
authorized_url = url_for(self,
provider=provider,
action="authorized",
next=_next or request.referrer or None,
_external=True)
return client.authorize(callback=authorized_url)
elif action == "authorized":
resp = client.authorized_response()
if resp is None:
pass
elif isinstance(resp, OAuthException):
flash_error("Access Denied")
else:
if not me_args:
oauth_user_id = resp.get(user_id)
else:
me = client.get(me_args)
if action == "authorized" and oauth_user_id:
if is_authenticated():
try:
# Add federated login to current_user
current_user.add_federated_login(provider=provider,
federated_id=oauth_user_id)
flash_success(
"You can now login with your %s account" % provider.upper())
except Exception as e:
logging.exception(e)
return redirect(views.auth.Account.account_settings)
# User not logged in
else:
# Existing user
user = with_federation(provider, oauth_user_id)
if user:
create_session(user)
return redirect(request.args.get("next") or __options__.get(
"login_view"))
# New User
else:
session_data = {
"provider": provider,
"user_id": oauth_user_id,
"name": oauth_name,
"email": oauth_email,
}
set_oauth_session(session_data)
else:
return redirect(_redirect)
return {
"action": action,
"provider": provider,
"authorized_url": ""
}
return redirect(_redirect) | [
"def",
"oauth_connect",
"(",
"self",
",",
"provider",
",",
"action",
")",
":",
"valid_actions",
"=",
"[",
"\"connect\"",
",",
"\"authorized\"",
",",
"\"test\"",
"]",
"_redirect",
"=",
"views",
".",
"auth",
".",
"Account",
".",
"account_settings",
"if",
"is_authenticated",
"(",
")",
"else",
"self",
".",
"login",
"if",
"action",
"not",
"in",
"valid_actions",
"or",
"\"oauth\"",
"not",
"in",
"__options__",
".",
"get",
"(",
"\"registration_methods\"",
")",
"or",
"not",
"__options__",
".",
"get",
"(",
"\"allow_registration\"",
")",
"or",
"not",
"hasattr",
"(",
"oauth",
",",
"provider",
")",
":",
"return",
"redirect",
"(",
"_redirect",
")",
"client",
"=",
"getattr",
"(",
"oauth",
",",
"provider",
")",
"params",
"=",
"client",
".",
"__params__",
"me_args",
"=",
"params",
".",
"get",
"(",
"\"me\"",
")",
"user_id",
"=",
"params",
".",
"get",
"(",
"\"user_id\"",
")",
"oauth_user_id",
"=",
"None",
"oauth_name",
"=",
"None",
"oauth_email",
"=",
"None",
"if",
"action",
"==",
"\"test\"",
":",
"session_data",
"=",
"{",
"\"provider\"",
":",
"\"ensure\"",
",",
"\"user_id\"",
":",
"\"1234\"",
",",
"\"name\"",
":",
"\"Mardix\"",
",",
"\"email\"",
":",
"\"[email protected]\"",
",",
"}",
"set_oauth_session",
"(",
"session_data",
")",
"return",
"redirect",
"(",
"url_for",
"(",
"self",
".",
"register",
",",
"oauth",
"=",
"1",
")",
")",
"if",
"action",
"==",
"\"connect\"",
":",
"_next",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'next'",
")",
"authorized_url",
"=",
"url_for",
"(",
"self",
",",
"provider",
"=",
"provider",
",",
"action",
"=",
"\"authorized\"",
",",
"next",
"=",
"_next",
"or",
"request",
".",
"referrer",
"or",
"None",
",",
"_external",
"=",
"True",
")",
"return",
"client",
".",
"authorize",
"(",
"callback",
"=",
"authorized_url",
")",
"elif",
"action",
"==",
"\"authorized\"",
":",
"resp",
"=",
"client",
".",
"authorized_response",
"(",
")",
"if",
"resp",
"is",
"None",
":",
"pass",
"elif",
"isinstance",
"(",
"resp",
",",
"OAuthException",
")",
":",
"flash_error",
"(",
"\"Access Denied\"",
")",
"else",
":",
"if",
"not",
"me_args",
":",
"oauth_user_id",
"=",
"resp",
".",
"get",
"(",
"user_id",
")",
"else",
":",
"me",
"=",
"client",
".",
"get",
"(",
"me_args",
")",
"if",
"action",
"==",
"\"authorized\"",
"and",
"oauth_user_id",
":",
"if",
"is_authenticated",
"(",
")",
":",
"try",
":",
"# Add federated login to current_user",
"current_user",
".",
"add_federated_login",
"(",
"provider",
"=",
"provider",
",",
"federated_id",
"=",
"oauth_user_id",
")",
"flash_success",
"(",
"\"You can now login with your %s account\"",
"%",
"provider",
".",
"upper",
"(",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"exception",
"(",
"e",
")",
"return",
"redirect",
"(",
"views",
".",
"auth",
".",
"Account",
".",
"account_settings",
")",
"# User not logged in",
"else",
":",
"# Existing user",
"user",
"=",
"with_federation",
"(",
"provider",
",",
"oauth_user_id",
")",
"if",
"user",
":",
"create_session",
"(",
"user",
")",
"return",
"redirect",
"(",
"request",
".",
"args",
".",
"get",
"(",
"\"next\"",
")",
"or",
"__options__",
".",
"get",
"(",
"\"login_view\"",
")",
")",
"# New User",
"else",
":",
"session_data",
"=",
"{",
"\"provider\"",
":",
"provider",
",",
"\"user_id\"",
":",
"oauth_user_id",
",",
"\"name\"",
":",
"oauth_name",
",",
"\"email\"",
":",
"oauth_email",
",",
"}",
"set_oauth_session",
"(",
"session_data",
")",
"else",
":",
"return",
"redirect",
"(",
"_redirect",
")",
"return",
"{",
"\"action\"",
":",
"action",
",",
"\"provider\"",
":",
"provider",
",",
"\"authorized_url\"",
":",
"\"\"",
"}",
"return",
"redirect",
"(",
"_redirect",
")"
]
| This endpoint doesn't check if user is logged in, because it has two functions
1. If the user is not logged in, it will try to signup the user
- if the social info exist, it will login
- not, it will create a new account and proceed
2. If user is logged in, it will try to create a social login entry with
the current user
**** This methods doesn't save the user token, it only retrieves the ID
to login or ID, name, email if signing up
:param provider:
:param action: connect|authorized|
- connect: to connect to the endpoint
- authorized, when coming back | [
"This",
"endpoint",
"doesn",
"t",
"check",
"if",
"user",
"is",
"logged",
"in",
"because",
"it",
"has",
"two",
"functions"
]
| bce481cb31a0972061dd99bc548701411dcb9de3 | https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/contrib/views/auth.py#L430-L537 | train |
projectshift/shift-boiler | boiler/abstract/abstract_service.py | AbstractService.log | def log(self, message, level=None):
""" Write a message to log """
if level is None:
level = logging.INFO
current_app.logger.log(msg=message, level=level) | python | def log(self, message, level=None):
""" Write a message to log """
if level is None:
level = logging.INFO
current_app.logger.log(msg=message, level=level) | [
"def",
"log",
"(",
"self",
",",
"message",
",",
"level",
"=",
"None",
")",
":",
"if",
"level",
"is",
"None",
":",
"level",
"=",
"logging",
".",
"INFO",
"current_app",
".",
"logger",
".",
"log",
"(",
"msg",
"=",
"message",
",",
"level",
"=",
"level",
")"
]
| Write a message to log | [
"Write",
"a",
"message",
"to",
"log"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/abstract/abstract_service.py#L15-L20 | train |
projectshift/shift-boiler | boiler/abstract/abstract_service.py | AbstractService.is_instance | def is_instance(self, model):
"""
Is instance?
Checks if provided object is instance of this service's model.
:param model: object
:return: bool
"""
result = isinstance(model, self.__model__)
if result is True:
return True
err = 'Object {} is not of type {}'
raise ValueError(err.format(model, self.__model__)) | python | def is_instance(self, model):
"""
Is instance?
Checks if provided object is instance of this service's model.
:param model: object
:return: bool
"""
result = isinstance(model, self.__model__)
if result is True:
return True
err = 'Object {} is not of type {}'
raise ValueError(err.format(model, self.__model__)) | [
"def",
"is_instance",
"(",
"self",
",",
"model",
")",
":",
"result",
"=",
"isinstance",
"(",
"model",
",",
"self",
".",
"__model__",
")",
"if",
"result",
"is",
"True",
":",
"return",
"True",
"err",
"=",
"'Object {} is not of type {}'",
"raise",
"ValueError",
"(",
"err",
".",
"format",
"(",
"model",
",",
"self",
".",
"__model__",
")",
")"
]
| Is instance?
Checks if provided object is instance of this service's model.
:param model: object
:return: bool | [
"Is",
"instance?",
"Checks",
"if",
"provided",
"object",
"is",
"instance",
"of",
"this",
"service",
"s",
"model",
"."
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/abstract/abstract_service.py#L22-L35 | train |
projectshift/shift-boiler | boiler/abstract/abstract_service.py | AbstractService.create | def create(self, **kwargs):
"""
Create
Instantiates and persists new model populated from provided
arguments
:param kwargs: varargs, data to populate with
:return: object, persisted new instance of model
"""
model = self.new(**kwargs)
return self.save(model) | python | def create(self, **kwargs):
"""
Create
Instantiates and persists new model populated from provided
arguments
:param kwargs: varargs, data to populate with
:return: object, persisted new instance of model
"""
model = self.new(**kwargs)
return self.save(model) | [
"def",
"create",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"model",
"=",
"self",
".",
"new",
"(",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"save",
"(",
"model",
")"
]
| Create
Instantiates and persists new model populated from provided
arguments
:param kwargs: varargs, data to populate with
:return: object, persisted new instance of model | [
"Create",
"Instantiates",
"and",
"persists",
"new",
"model",
"populated",
"from",
"provided",
"arguments"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/abstract/abstract_service.py#L58-L68 | train |
projectshift/shift-boiler | boiler/abstract/abstract_service.py | AbstractService.save | def save(self, model, commit=True):
"""
Save
Puts model into unit of work for persistence. Can optionally
commit transaction. Returns persisted model as a result.
:param model: object, model to persist
:param commit: bool, commit transaction?
:return: object, saved model
"""
self.is_instance(model)
db.session.add(model)
if commit:
db.session.commit()
return model | python | def save(self, model, commit=True):
"""
Save
Puts model into unit of work for persistence. Can optionally
commit transaction. Returns persisted model as a result.
:param model: object, model to persist
:param commit: bool, commit transaction?
:return: object, saved model
"""
self.is_instance(model)
db.session.add(model)
if commit:
db.session.commit()
return model | [
"def",
"save",
"(",
"self",
",",
"model",
",",
"commit",
"=",
"True",
")",
":",
"self",
".",
"is_instance",
"(",
"model",
")",
"db",
".",
"session",
".",
"add",
"(",
"model",
")",
"if",
"commit",
":",
"db",
".",
"session",
".",
"commit",
"(",
")",
"return",
"model"
]
| Save
Puts model into unit of work for persistence. Can optionally
commit transaction. Returns persisted model as a result.
:param model: object, model to persist
:param commit: bool, commit transaction?
:return: object, saved model | [
"Save",
"Puts",
"model",
"into",
"unit",
"of",
"work",
"for",
"persistence",
".",
"Can",
"optionally",
"commit",
"transaction",
".",
"Returns",
"persisted",
"model",
"as",
"a",
"result",
"."
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/abstract/abstract_service.py#L70-L85 | train |
projectshift/shift-boiler | boiler/abstract/abstract_service.py | AbstractService.delete | def delete(self, model, commit=True):
"""
Delete
Puts model for deletion into unit of work and optionall commits
transaction
:param model: object, model to delete
:param commit: bool, commit?
:return: object, deleted model
"""
self.is_instance(model)
db.session.delete(model)
if commit:
db.session.commit()
return model | python | def delete(self, model, commit=True):
"""
Delete
Puts model for deletion into unit of work and optionall commits
transaction
:param model: object, model to delete
:param commit: bool, commit?
:return: object, deleted model
"""
self.is_instance(model)
db.session.delete(model)
if commit:
db.session.commit()
return model | [
"def",
"delete",
"(",
"self",
",",
"model",
",",
"commit",
"=",
"True",
")",
":",
"self",
".",
"is_instance",
"(",
"model",
")",
"db",
".",
"session",
".",
"delete",
"(",
"model",
")",
"if",
"commit",
":",
"db",
".",
"session",
".",
"commit",
"(",
")",
"return",
"model"
]
| Delete
Puts model for deletion into unit of work and optionall commits
transaction
:param model: object, model to delete
:param commit: bool, commit?
:return: object, deleted model | [
"Delete",
"Puts",
"model",
"for",
"deletion",
"into",
"unit",
"of",
"work",
"and",
"optionall",
"commits",
"transaction"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/abstract/abstract_service.py#L87-L102 | train |
fjwCode/cerium | cerium/utils.py | is_connectable | def is_connectable(host: str, port: Union[int, str]) -> bool:
"""Tries to connect to the device to see if it is connectable.
Args:
host: The host to connect.
port: The port to connect.
Returns:
True or False.
"""
socket_ = None
try:
socket_ = socket.create_connection((host, port), 1)
result = True
except socket.timeout:
result = False
finally:
if socket_:
socket_.close()
return result | python | def is_connectable(host: str, port: Union[int, str]) -> bool:
"""Tries to connect to the device to see if it is connectable.
Args:
host: The host to connect.
port: The port to connect.
Returns:
True or False.
"""
socket_ = None
try:
socket_ = socket.create_connection((host, port), 1)
result = True
except socket.timeout:
result = False
finally:
if socket_:
socket_.close()
return result | [
"def",
"is_connectable",
"(",
"host",
":",
"str",
",",
"port",
":",
"Union",
"[",
"int",
",",
"str",
"]",
")",
"->",
"bool",
":",
"socket_",
"=",
"None",
"try",
":",
"socket_",
"=",
"socket",
".",
"create_connection",
"(",
"(",
"host",
",",
"port",
")",
",",
"1",
")",
"result",
"=",
"True",
"except",
"socket",
".",
"timeout",
":",
"result",
"=",
"False",
"finally",
":",
"if",
"socket_",
":",
"socket_",
".",
"close",
"(",
")",
"return",
"result"
]
| Tries to connect to the device to see if it is connectable.
Args:
host: The host to connect.
port: The port to connect.
Returns:
True or False. | [
"Tries",
"to",
"connect",
"to",
"the",
"device",
"to",
"see",
"if",
"it",
"is",
"connectable",
"."
]
| f6e06e0dcf83a0bc924828e9d6cb81383ed2364f | https://github.com/fjwCode/cerium/blob/f6e06e0dcf83a0bc924828e9d6cb81383ed2364f/cerium/utils.py#L34-L53 | train |
truveris/py-mdstat | mdstat/utils.py | group_lines | def group_lines(lines):
"""Split a list of lines using empty lines as separators."""
groups = []
group = []
for line in lines:
if line.strip() == "":
groups.append(group[:])
group = []
continue
group.append(line)
if group:
groups.append(group[:])
return groups | python | def group_lines(lines):
"""Split a list of lines using empty lines as separators."""
groups = []
group = []
for line in lines:
if line.strip() == "":
groups.append(group[:])
group = []
continue
group.append(line)
if group:
groups.append(group[:])
return groups | [
"def",
"group_lines",
"(",
"lines",
")",
":",
"groups",
"=",
"[",
"]",
"group",
"=",
"[",
"]",
"for",
"line",
"in",
"lines",
":",
"if",
"line",
".",
"strip",
"(",
")",
"==",
"\"\"",
":",
"groups",
".",
"append",
"(",
"group",
"[",
":",
"]",
")",
"group",
"=",
"[",
"]",
"continue",
"group",
".",
"append",
"(",
"line",
")",
"if",
"group",
":",
"groups",
".",
"append",
"(",
"group",
"[",
":",
"]",
")",
"return",
"groups"
]
| Split a list of lines using empty lines as separators. | [
"Split",
"a",
"list",
"of",
"lines",
"using",
"empty",
"lines",
"as",
"separators",
"."
]
| 881af99d1168694d2f38e606af377ef6cabe2297 | https://github.com/truveris/py-mdstat/blob/881af99d1168694d2f38e606af377ef6cabe2297/mdstat/utils.py#L6-L21 | train |
assamite/creamas | creamas/examples/grid/main.py | DistributedGridEnvironment.set_neighbors | async def set_neighbors(self):
'''Set neighbors for multi-environments, their slave environments,
and agents.
'''
t = time.time()
self.logger.debug("Settings grid neighbors for the multi-environments.")
tasks = []
for i in range(len(self.grid)):
for j in range(len(self.grid[0])):
addr = self.grid[i][j]
N, E, S, W = None, None, None, None
if i != 0:
W = self.grid[i-1][j]
if i != len(self.grid) - 1:
E = self.grid[i+1][j]
if j != 0:
N = self.grid[i][j-1]
if j != len(self.grid[0]) - 1:
S = self.grid[i][j+1]
task = asyncio.ensure_future(self._set_node_neighbors(addr, N, E, S, W))
tasks.append(task)
await asyncio.gather(*tasks)
self.logger.debug("Setting grid neighbors for the slave environments "
"and their agents.")
tasks = []
for addr in self.addrs:
task = asyncio.ensure_future(self._set_neighbors(addr))
tasks.append(task)
await asyncio.gather(*tasks)
self.logger.debug("All grid neighbors set in {} seconds."
.format(time.time() - t))
x = self._ngs[0] * self._gs[0] * self._n_slaves
y = self._ngs[1] * self._gs[1]
self.logger.info("Initialized a distributed grid with overall size "
"({}, {}). Total of {} agents.".format(x, y, x*y)) | python | async def set_neighbors(self):
'''Set neighbors for multi-environments, their slave environments,
and agents.
'''
t = time.time()
self.logger.debug("Settings grid neighbors for the multi-environments.")
tasks = []
for i in range(len(self.grid)):
for j in range(len(self.grid[0])):
addr = self.grid[i][j]
N, E, S, W = None, None, None, None
if i != 0:
W = self.grid[i-1][j]
if i != len(self.grid) - 1:
E = self.grid[i+1][j]
if j != 0:
N = self.grid[i][j-1]
if j != len(self.grid[0]) - 1:
S = self.grid[i][j+1]
task = asyncio.ensure_future(self._set_node_neighbors(addr, N, E, S, W))
tasks.append(task)
await asyncio.gather(*tasks)
self.logger.debug("Setting grid neighbors for the slave environments "
"and their agents.")
tasks = []
for addr in self.addrs:
task = asyncio.ensure_future(self._set_neighbors(addr))
tasks.append(task)
await asyncio.gather(*tasks)
self.logger.debug("All grid neighbors set in {} seconds."
.format(time.time() - t))
x = self._ngs[0] * self._gs[0] * self._n_slaves
y = self._ngs[1] * self._gs[1]
self.logger.info("Initialized a distributed grid with overall size "
"({}, {}). Total of {} agents.".format(x, y, x*y)) | [
"async",
"def",
"set_neighbors",
"(",
"self",
")",
":",
"t",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Settings grid neighbors for the multi-environments.\"",
")",
"tasks",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"grid",
")",
")",
":",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"grid",
"[",
"0",
"]",
")",
")",
":",
"addr",
"=",
"self",
".",
"grid",
"[",
"i",
"]",
"[",
"j",
"]",
"N",
",",
"E",
",",
"S",
",",
"W",
"=",
"None",
",",
"None",
",",
"None",
",",
"None",
"if",
"i",
"!=",
"0",
":",
"W",
"=",
"self",
".",
"grid",
"[",
"i",
"-",
"1",
"]",
"[",
"j",
"]",
"if",
"i",
"!=",
"len",
"(",
"self",
".",
"grid",
")",
"-",
"1",
":",
"E",
"=",
"self",
".",
"grid",
"[",
"i",
"+",
"1",
"]",
"[",
"j",
"]",
"if",
"j",
"!=",
"0",
":",
"N",
"=",
"self",
".",
"grid",
"[",
"i",
"]",
"[",
"j",
"-",
"1",
"]",
"if",
"j",
"!=",
"len",
"(",
"self",
".",
"grid",
"[",
"0",
"]",
")",
"-",
"1",
":",
"S",
"=",
"self",
".",
"grid",
"[",
"i",
"]",
"[",
"j",
"+",
"1",
"]",
"task",
"=",
"asyncio",
".",
"ensure_future",
"(",
"self",
".",
"_set_node_neighbors",
"(",
"addr",
",",
"N",
",",
"E",
",",
"S",
",",
"W",
")",
")",
"tasks",
".",
"append",
"(",
"task",
")",
"await",
"asyncio",
".",
"gather",
"(",
"*",
"tasks",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Setting grid neighbors for the slave environments \"",
"\"and their agents.\"",
")",
"tasks",
"=",
"[",
"]",
"for",
"addr",
"in",
"self",
".",
"addrs",
":",
"task",
"=",
"asyncio",
".",
"ensure_future",
"(",
"self",
".",
"_set_neighbors",
"(",
"addr",
")",
")",
"tasks",
".",
"append",
"(",
"task",
")",
"await",
"asyncio",
".",
"gather",
"(",
"*",
"tasks",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"All grid neighbors set in {} seconds.\"",
".",
"format",
"(",
"time",
".",
"time",
"(",
")",
"-",
"t",
")",
")",
"x",
"=",
"self",
".",
"_ngs",
"[",
"0",
"]",
"*",
"self",
".",
"_gs",
"[",
"0",
"]",
"*",
"self",
".",
"_n_slaves",
"y",
"=",
"self",
".",
"_ngs",
"[",
"1",
"]",
"*",
"self",
".",
"_gs",
"[",
"1",
"]",
"self",
".",
"logger",
".",
"info",
"(",
"\"Initialized a distributed grid with overall size \"",
"\"({}, {}). Total of {} agents.\"",
".",
"format",
"(",
"x",
",",
"y",
",",
"x",
"*",
"y",
")",
")"
]
| Set neighbors for multi-environments, their slave environments,
and agents. | [
"Set",
"neighbors",
"for",
"multi",
"-",
"environments",
"their",
"slave",
"environments",
"and",
"agents",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/examples/grid/main.py#L151-L186 | train |
assamite/creamas | creamas/ds.py | ssh_exec | async def ssh_exec(server, cmd, timeout=10, **ssh_kwargs):
"""Execute a command on a given server using asynchronous SSH-connection.
The connection to the server is wrapped in :func:`asyncio.wait_for` and
given :attr:`timeout` is applied to it. If the server is not reachable
before timeout expires, :exc:`asyncio.TimeoutError` is raised.
:param str server: Address of the server
:param str cmd: Command to be executed
:param int timeout: Timeout to connect to server.
:param ssh_kwargs:
Any additional SSH-connection arguments, as specified by
:func:`asyncssh.connect`. See `asyncssh documentation
<http://asyncssh.readthedocs.io/en/latest/api.html#connect>`_ for
details.
:returns:
closed SSH-connection
"""
conn = await asyncio.wait_for(asyncssh.connect(server, **ssh_kwargs),
timeout=timeout)
ret = await conn.run(cmd)
conn.close()
return ret | python | async def ssh_exec(server, cmd, timeout=10, **ssh_kwargs):
"""Execute a command on a given server using asynchronous SSH-connection.
The connection to the server is wrapped in :func:`asyncio.wait_for` and
given :attr:`timeout` is applied to it. If the server is not reachable
before timeout expires, :exc:`asyncio.TimeoutError` is raised.
:param str server: Address of the server
:param str cmd: Command to be executed
:param int timeout: Timeout to connect to server.
:param ssh_kwargs:
Any additional SSH-connection arguments, as specified by
:func:`asyncssh.connect`. See `asyncssh documentation
<http://asyncssh.readthedocs.io/en/latest/api.html#connect>`_ for
details.
:returns:
closed SSH-connection
"""
conn = await asyncio.wait_for(asyncssh.connect(server, **ssh_kwargs),
timeout=timeout)
ret = await conn.run(cmd)
conn.close()
return ret | [
"async",
"def",
"ssh_exec",
"(",
"server",
",",
"cmd",
",",
"timeout",
"=",
"10",
",",
"*",
"*",
"ssh_kwargs",
")",
":",
"conn",
"=",
"await",
"asyncio",
".",
"wait_for",
"(",
"asyncssh",
".",
"connect",
"(",
"server",
",",
"*",
"*",
"ssh_kwargs",
")",
",",
"timeout",
"=",
"timeout",
")",
"ret",
"=",
"await",
"conn",
".",
"run",
"(",
"cmd",
")",
"conn",
".",
"close",
"(",
")",
"return",
"ret"
]
| Execute a command on a given server using asynchronous SSH-connection.
The connection to the server is wrapped in :func:`asyncio.wait_for` and
given :attr:`timeout` is applied to it. If the server is not reachable
before timeout expires, :exc:`asyncio.TimeoutError` is raised.
:param str server: Address of the server
:param str cmd: Command to be executed
:param int timeout: Timeout to connect to server.
:param ssh_kwargs:
Any additional SSH-connection arguments, as specified by
:func:`asyncssh.connect`. See `asyncssh documentation
<http://asyncssh.readthedocs.io/en/latest/api.html#connect>`_ for
details.
:returns:
closed SSH-connection | [
"Execute",
"a",
"command",
"on",
"a",
"given",
"server",
"using",
"asynchronous",
"SSH",
"-",
"connection",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/ds.py#L28-L52 | train |
assamite/creamas | creamas/ds.py | DistributedEnvironment.spawn_slaves | async def spawn_slaves(self, spawn_cmd, ports=None, **ssh_kwargs):
"""Spawn multi-environments on the nodes through SSH-connections.
:param spawn_cmd:
str or list, command(s) used to spawn the environment on each node.
If *list*, it must contain one command for each node in
:attr:`nodes`. If *str*, the same command is used for each node.
:param ports:
Optional. If not ``None``, must be a mapping from nodes
(``(server, port)``-tuples) to ports which are used for the spawned
multi-environments' master manager environments. If ``None``, then
the same port is used to derive the master manager addresses as was
used to initialize this distributed environment's managing
environment (port in :attr:`addr`).
:param ssh_kwargs:
Any additional SSH-connection arguments, as specified by
:meth:`asyncssh.connect`. See `asyncssh documentation
<http://asyncssh.readthedocs.io/en/latest/api.html#connect>`_ for
details.
Nodes are spawned by creating a multiprocessing pool where each node
has its own subprocess. These subprocesses then use SSH-connections
to spawn the multi-environments on the nodes. The SSH-connections in
the pool are kept alive until the nodes are stopped, i.e. this
distributed environment is destroyed.
"""
pool = multiprocessing.Pool(len(self.nodes))
rets = []
for i, node in enumerate(self.nodes):
server, server_port = node
port = ports[node] if ports is not None else self.port
mgr_addr = "tcp://{}:{}/0".format(server, port)
self._manager_addrs.append(mgr_addr)
if type(spawn_cmd) in [list, tuple]:
cmd = spawn_cmd[i]
else:
cmd = spawn_cmd
args = [server, cmd]
ssh_kwargs_cp = ssh_kwargs.copy()
ssh_kwargs_cp['port'] = server_port
ret = pool.apply_async(ssh_exec_in_new_loop,
args=args,
kwds=ssh_kwargs_cp,
error_callback=logger.warning)
rets.append(ret)
self._pool = pool
self._r = rets | python | async def spawn_slaves(self, spawn_cmd, ports=None, **ssh_kwargs):
"""Spawn multi-environments on the nodes through SSH-connections.
:param spawn_cmd:
str or list, command(s) used to spawn the environment on each node.
If *list*, it must contain one command for each node in
:attr:`nodes`. If *str*, the same command is used for each node.
:param ports:
Optional. If not ``None``, must be a mapping from nodes
(``(server, port)``-tuples) to ports which are used for the spawned
multi-environments' master manager environments. If ``None``, then
the same port is used to derive the master manager addresses as was
used to initialize this distributed environment's managing
environment (port in :attr:`addr`).
:param ssh_kwargs:
Any additional SSH-connection arguments, as specified by
:meth:`asyncssh.connect`. See `asyncssh documentation
<http://asyncssh.readthedocs.io/en/latest/api.html#connect>`_ for
details.
Nodes are spawned by creating a multiprocessing pool where each node
has its own subprocess. These subprocesses then use SSH-connections
to spawn the multi-environments on the nodes. The SSH-connections in
the pool are kept alive until the nodes are stopped, i.e. this
distributed environment is destroyed.
"""
pool = multiprocessing.Pool(len(self.nodes))
rets = []
for i, node in enumerate(self.nodes):
server, server_port = node
port = ports[node] if ports is not None else self.port
mgr_addr = "tcp://{}:{}/0".format(server, port)
self._manager_addrs.append(mgr_addr)
if type(spawn_cmd) in [list, tuple]:
cmd = spawn_cmd[i]
else:
cmd = spawn_cmd
args = [server, cmd]
ssh_kwargs_cp = ssh_kwargs.copy()
ssh_kwargs_cp['port'] = server_port
ret = pool.apply_async(ssh_exec_in_new_loop,
args=args,
kwds=ssh_kwargs_cp,
error_callback=logger.warning)
rets.append(ret)
self._pool = pool
self._r = rets | [
"async",
"def",
"spawn_slaves",
"(",
"self",
",",
"spawn_cmd",
",",
"ports",
"=",
"None",
",",
"*",
"*",
"ssh_kwargs",
")",
":",
"pool",
"=",
"multiprocessing",
".",
"Pool",
"(",
"len",
"(",
"self",
".",
"nodes",
")",
")",
"rets",
"=",
"[",
"]",
"for",
"i",
",",
"node",
"in",
"enumerate",
"(",
"self",
".",
"nodes",
")",
":",
"server",
",",
"server_port",
"=",
"node",
"port",
"=",
"ports",
"[",
"node",
"]",
"if",
"ports",
"is",
"not",
"None",
"else",
"self",
".",
"port",
"mgr_addr",
"=",
"\"tcp://{}:{}/0\"",
".",
"format",
"(",
"server",
",",
"port",
")",
"self",
".",
"_manager_addrs",
".",
"append",
"(",
"mgr_addr",
")",
"if",
"type",
"(",
"spawn_cmd",
")",
"in",
"[",
"list",
",",
"tuple",
"]",
":",
"cmd",
"=",
"spawn_cmd",
"[",
"i",
"]",
"else",
":",
"cmd",
"=",
"spawn_cmd",
"args",
"=",
"[",
"server",
",",
"cmd",
"]",
"ssh_kwargs_cp",
"=",
"ssh_kwargs",
".",
"copy",
"(",
")",
"ssh_kwargs_cp",
"[",
"'port'",
"]",
"=",
"server_port",
"ret",
"=",
"pool",
".",
"apply_async",
"(",
"ssh_exec_in_new_loop",
",",
"args",
"=",
"args",
",",
"kwds",
"=",
"ssh_kwargs_cp",
",",
"error_callback",
"=",
"logger",
".",
"warning",
")",
"rets",
".",
"append",
"(",
"ret",
")",
"self",
".",
"_pool",
"=",
"pool",
"self",
".",
"_r",
"=",
"rets"
]
| Spawn multi-environments on the nodes through SSH-connections.
:param spawn_cmd:
str or list, command(s) used to spawn the environment on each node.
If *list*, it must contain one command for each node in
:attr:`nodes`. If *str*, the same command is used for each node.
:param ports:
Optional. If not ``None``, must be a mapping from nodes
(``(server, port)``-tuples) to ports which are used for the spawned
multi-environments' master manager environments. If ``None``, then
the same port is used to derive the master manager addresses as was
used to initialize this distributed environment's managing
environment (port in :attr:`addr`).
:param ssh_kwargs:
Any additional SSH-connection arguments, as specified by
:meth:`asyncssh.connect`. See `asyncssh documentation
<http://asyncssh.readthedocs.io/en/latest/api.html#connect>`_ for
details.
Nodes are spawned by creating a multiprocessing pool where each node
has its own subprocess. These subprocesses then use SSH-connections
to spawn the multi-environments on the nodes. The SSH-connections in
the pool are kept alive until the nodes are stopped, i.e. this
distributed environment is destroyed. | [
"Spawn",
"multi",
"-",
"environments",
"on",
"the",
"nodes",
"through",
"SSH",
"-",
"connections",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/ds.py#L203-L251 | train |
assamite/creamas | creamas/ds.py | DistributedEnvironment.get_slave_managers | def get_slave_managers(self, as_coro=False):
"""Return all slave environment manager addresses.
:param bool as_coro:
If ``True`` returns awaitable coroutine, otherwise runs the calls
to the slave managers asynchoronously in the event loop.
This method returns the addresses of the true slave environment
managers, i.e. managers derived from :class:`~creamas.mp.EnvManager`,
not multi-environment managers. For example, if this node environment
has two nodes with four slave environments in each, then this method
returns 8 addresses.
"""
async def slave_task(addr):
r_manager = await self.env.connect(addr)
return await r_manager.get_slave_managers()
tasks = create_tasks(slave_task, self.addrs)
return run_or_coro(tasks, as_coro) | python | def get_slave_managers(self, as_coro=False):
"""Return all slave environment manager addresses.
:param bool as_coro:
If ``True`` returns awaitable coroutine, otherwise runs the calls
to the slave managers asynchoronously in the event loop.
This method returns the addresses of the true slave environment
managers, i.e. managers derived from :class:`~creamas.mp.EnvManager`,
not multi-environment managers. For example, if this node environment
has two nodes with four slave environments in each, then this method
returns 8 addresses.
"""
async def slave_task(addr):
r_manager = await self.env.connect(addr)
return await r_manager.get_slave_managers()
tasks = create_tasks(slave_task, self.addrs)
return run_or_coro(tasks, as_coro) | [
"def",
"get_slave_managers",
"(",
"self",
",",
"as_coro",
"=",
"False",
")",
":",
"async",
"def",
"slave_task",
"(",
"addr",
")",
":",
"r_manager",
"=",
"await",
"self",
".",
"env",
".",
"connect",
"(",
"addr",
")",
"return",
"await",
"r_manager",
".",
"get_slave_managers",
"(",
")",
"tasks",
"=",
"create_tasks",
"(",
"slave_task",
",",
"self",
".",
"addrs",
")",
"return",
"run_or_coro",
"(",
"tasks",
",",
"as_coro",
")"
]
| Return all slave environment manager addresses.
:param bool as_coro:
If ``True`` returns awaitable coroutine, otherwise runs the calls
to the slave managers asynchoronously in the event loop.
This method returns the addresses of the true slave environment
managers, i.e. managers derived from :class:`~creamas.mp.EnvManager`,
not multi-environment managers. For example, if this node environment
has two nodes with four slave environments in each, then this method
returns 8 addresses. | [
"Return",
"all",
"slave",
"environment",
"manager",
"addresses",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/ds.py#L262-L280 | train |
berkeley-cocosci/Wallace | wallace/models.py | Participant.nodes | def nodes(self, type=None, failed=False):
"""Get nodes associated with this participant.
Return a list of nodes associated with the participant. If specified,
``type`` filters by class. By default failed nodes are excluded, to
include only failed nodes use ``failed=True``, for all nodes use
``failed=all``.
"""
if type is None:
type = Node
if not issubclass(type, Node):
raise(TypeError("{} is not a valid node type.".format(type)))
if failed not in ["all", False, True]:
raise ValueError("{} is not a valid node failed".format(failed))
if failed == "all":
return type\
.query\
.filter_by(participant_id=self.id)\
.all()
else:
return type\
.query\
.filter_by(failed=failed, participant_id=self.id)\
.all() | python | def nodes(self, type=None, failed=False):
"""Get nodes associated with this participant.
Return a list of nodes associated with the participant. If specified,
``type`` filters by class. By default failed nodes are excluded, to
include only failed nodes use ``failed=True``, for all nodes use
``failed=all``.
"""
if type is None:
type = Node
if not issubclass(type, Node):
raise(TypeError("{} is not a valid node type.".format(type)))
if failed not in ["all", False, True]:
raise ValueError("{} is not a valid node failed".format(failed))
if failed == "all":
return type\
.query\
.filter_by(participant_id=self.id)\
.all()
else:
return type\
.query\
.filter_by(failed=failed, participant_id=self.id)\
.all() | [
"def",
"nodes",
"(",
"self",
",",
"type",
"=",
"None",
",",
"failed",
"=",
"False",
")",
":",
"if",
"type",
"is",
"None",
":",
"type",
"=",
"Node",
"if",
"not",
"issubclass",
"(",
"type",
",",
"Node",
")",
":",
"raise",
"(",
"TypeError",
"(",
"\"{} is not a valid node type.\"",
".",
"format",
"(",
"type",
")",
")",
")",
"if",
"failed",
"not",
"in",
"[",
"\"all\"",
",",
"False",
",",
"True",
"]",
":",
"raise",
"ValueError",
"(",
"\"{} is not a valid node failed\"",
".",
"format",
"(",
"failed",
")",
")",
"if",
"failed",
"==",
"\"all\"",
":",
"return",
"type",
".",
"query",
".",
"filter_by",
"(",
"participant_id",
"=",
"self",
".",
"id",
")",
".",
"all",
"(",
")",
"else",
":",
"return",
"type",
".",
"query",
".",
"filter_by",
"(",
"failed",
"=",
"failed",
",",
"participant_id",
"=",
"self",
".",
"id",
")",
".",
"all",
"(",
")"
]
| Get nodes associated with this participant.
Return a list of nodes associated with the participant. If specified,
``type`` filters by class. By default failed nodes are excluded, to
include only failed nodes use ``failed=True``, for all nodes use
``failed=all``. | [
"Get",
"nodes",
"associated",
"with",
"this",
"participant",
"."
]
| 3650c0bc3b0804d0adb1d178c5eba9992babb1b0 | https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/models.py#L152-L179 | train |
berkeley-cocosci/Wallace | wallace/models.py | Network.print_verbose | def print_verbose(self):
"""Print a verbose representation of a network."""
print "Nodes: "
for a in (self.nodes(failed="all")):
print a
print "\nVectors: "
for v in (self.vectors(failed="all")):
print v
print "\nInfos: "
for i in (self.infos(failed="all")):
print i
print "\nTransmissions: "
for t in (self.transmissions(failed="all")):
print t
print "\nTransformations: "
for t in (self.transformations(failed="all")):
print t | python | def print_verbose(self):
"""Print a verbose representation of a network."""
print "Nodes: "
for a in (self.nodes(failed="all")):
print a
print "\nVectors: "
for v in (self.vectors(failed="all")):
print v
print "\nInfos: "
for i in (self.infos(failed="all")):
print i
print "\nTransmissions: "
for t in (self.transmissions(failed="all")):
print t
print "\nTransformations: "
for t in (self.transformations(failed="all")):
print t | [
"def",
"print_verbose",
"(",
"self",
")",
":",
"print",
"\"Nodes: \"",
"for",
"a",
"in",
"(",
"self",
".",
"nodes",
"(",
"failed",
"=",
"\"all\"",
")",
")",
":",
"print",
"a",
"print",
"\"\\nVectors: \"",
"for",
"v",
"in",
"(",
"self",
".",
"vectors",
"(",
"failed",
"=",
"\"all\"",
")",
")",
":",
"print",
"v",
"print",
"\"\\nInfos: \"",
"for",
"i",
"in",
"(",
"self",
".",
"infos",
"(",
"failed",
"=",
"\"all\"",
")",
")",
":",
"print",
"i",
"print",
"\"\\nTransmissions: \"",
"for",
"t",
"in",
"(",
"self",
".",
"transmissions",
"(",
"failed",
"=",
"\"all\"",
")",
")",
":",
"print",
"t",
"print",
"\"\\nTransformations: \"",
"for",
"t",
"in",
"(",
"self",
".",
"transformations",
"(",
"failed",
"=",
"\"all\"",
")",
")",
":",
"print",
"t"
]
| Print a verbose representation of a network. | [
"Print",
"a",
"verbose",
"representation",
"of",
"a",
"network",
"."
]
| 3650c0bc3b0804d0adb1d178c5eba9992babb1b0 | https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/models.py#L559-L579 | train |
berkeley-cocosci/Wallace | wallace/models.py | Node.vectors | def vectors(self, direction="all", failed=False):
"""Get vectors that connect at this node.
Direction can be "incoming", "outgoing" or "all" (default).
Failed can be True, False or all
"""
# check direction
if direction not in ["all", "incoming", "outgoing"]:
raise ValueError(
"{} is not a valid vector direction. "
"Must be all, incoming or outgoing.".format(direction))
if failed not in ["all", False, True]:
raise ValueError("{} is not a valid vector failed".format(failed))
# get the vectors
if failed == "all":
if direction == "all":
return Vector.query\
.filter(or_(Vector.destination_id == self.id,
Vector.origin_id == self.id))\
.all()
if direction == "incoming":
return Vector.query\
.filter_by(destination_id=self.id)\
.all()
if direction == "outgoing":
return Vector.query\
.filter_by(origin_id=self.id)\
.all()
else:
if direction == "all":
return Vector.query\
.filter(and_(Vector.failed == failed,
or_(Vector.destination_id == self.id,
Vector.origin_id == self.id)))\
.all()
if direction == "incoming":
return Vector.query\
.filter_by(destination_id=self.id, failed=failed)\
.all()
if direction == "outgoing":
return Vector.query\
.filter_by(origin_id=self.id, failed=failed)\
.all() | python | def vectors(self, direction="all", failed=False):
"""Get vectors that connect at this node.
Direction can be "incoming", "outgoing" or "all" (default).
Failed can be True, False or all
"""
# check direction
if direction not in ["all", "incoming", "outgoing"]:
raise ValueError(
"{} is not a valid vector direction. "
"Must be all, incoming or outgoing.".format(direction))
if failed not in ["all", False, True]:
raise ValueError("{} is not a valid vector failed".format(failed))
# get the vectors
if failed == "all":
if direction == "all":
return Vector.query\
.filter(or_(Vector.destination_id == self.id,
Vector.origin_id == self.id))\
.all()
if direction == "incoming":
return Vector.query\
.filter_by(destination_id=self.id)\
.all()
if direction == "outgoing":
return Vector.query\
.filter_by(origin_id=self.id)\
.all()
else:
if direction == "all":
return Vector.query\
.filter(and_(Vector.failed == failed,
or_(Vector.destination_id == self.id,
Vector.origin_id == self.id)))\
.all()
if direction == "incoming":
return Vector.query\
.filter_by(destination_id=self.id, failed=failed)\
.all()
if direction == "outgoing":
return Vector.query\
.filter_by(origin_id=self.id, failed=failed)\
.all() | [
"def",
"vectors",
"(",
"self",
",",
"direction",
"=",
"\"all\"",
",",
"failed",
"=",
"False",
")",
":",
"# check direction",
"if",
"direction",
"not",
"in",
"[",
"\"all\"",
",",
"\"incoming\"",
",",
"\"outgoing\"",
"]",
":",
"raise",
"ValueError",
"(",
"\"{} is not a valid vector direction. \"",
"\"Must be all, incoming or outgoing.\"",
".",
"format",
"(",
"direction",
")",
")",
"if",
"failed",
"not",
"in",
"[",
"\"all\"",
",",
"False",
",",
"True",
"]",
":",
"raise",
"ValueError",
"(",
"\"{} is not a valid vector failed\"",
".",
"format",
"(",
"failed",
")",
")",
"# get the vectors",
"if",
"failed",
"==",
"\"all\"",
":",
"if",
"direction",
"==",
"\"all\"",
":",
"return",
"Vector",
".",
"query",
".",
"filter",
"(",
"or_",
"(",
"Vector",
".",
"destination_id",
"==",
"self",
".",
"id",
",",
"Vector",
".",
"origin_id",
"==",
"self",
".",
"id",
")",
")",
".",
"all",
"(",
")",
"if",
"direction",
"==",
"\"incoming\"",
":",
"return",
"Vector",
".",
"query",
".",
"filter_by",
"(",
"destination_id",
"=",
"self",
".",
"id",
")",
".",
"all",
"(",
")",
"if",
"direction",
"==",
"\"outgoing\"",
":",
"return",
"Vector",
".",
"query",
".",
"filter_by",
"(",
"origin_id",
"=",
"self",
".",
"id",
")",
".",
"all",
"(",
")",
"else",
":",
"if",
"direction",
"==",
"\"all\"",
":",
"return",
"Vector",
".",
"query",
".",
"filter",
"(",
"and_",
"(",
"Vector",
".",
"failed",
"==",
"failed",
",",
"or_",
"(",
"Vector",
".",
"destination_id",
"==",
"self",
".",
"id",
",",
"Vector",
".",
"origin_id",
"==",
"self",
".",
"id",
")",
")",
")",
".",
"all",
"(",
")",
"if",
"direction",
"==",
"\"incoming\"",
":",
"return",
"Vector",
".",
"query",
".",
"filter_by",
"(",
"destination_id",
"=",
"self",
".",
"id",
",",
"failed",
"=",
"failed",
")",
".",
"all",
"(",
")",
"if",
"direction",
"==",
"\"outgoing\"",
":",
"return",
"Vector",
".",
"query",
".",
"filter_by",
"(",
"origin_id",
"=",
"self",
".",
"id",
",",
"failed",
"=",
"failed",
")",
".",
"all",
"(",
")"
]
| Get vectors that connect at this node.
Direction can be "incoming", "outgoing" or "all" (default).
Failed can be True, False or all | [
"Get",
"vectors",
"that",
"connect",
"at",
"this",
"node",
"."
]
| 3650c0bc3b0804d0adb1d178c5eba9992babb1b0 | https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/models.py#L655-L703 | train |
berkeley-cocosci/Wallace | wallace/models.py | Node.transmissions | def transmissions(self, direction="outgoing", status="all", failed=False):
"""Get transmissions sent to or from this node.
Direction can be "all", "incoming" or "outgoing" (default).
Status can be "all" (default), "pending", or "received".
failed can be True, False or "all"
"""
# check parameters
if direction not in ["incoming", "outgoing", "all"]:
raise(ValueError("You cannot get transmissions of direction {}."
.format(direction) +
"Type can only be incoming, outgoing or all."))
if status not in ["all", "pending", "received"]:
raise(ValueError("You cannot get transmission of status {}."
.format(status) +
"Status can only be pending, received or all"))
if failed not in ["all", False, True]:
raise ValueError("{} is not a valid transmission failed"
.format(failed))
# get transmissions
if direction == "all":
if status == "all":
return Transmission.query\
.filter(and_(Transmission.failed == False,
or_(Transmission.destination_id == self.id,
Transmission.origin_id == self.id)))\
.all()
else:
return Transmission.query\
.filter(and_(Transmission.failed == False,
Transmission.status == status,
or_(Transmission.destination_id == self.id,
Transmission.origin_id == self.id)))\
.all()
if direction == "incoming":
if status == "all":
return Transmission.query\
.filter_by(failed=False, destination_id=self.id)\
.all()
else:
return Transmission.query\
.filter(and_(Transmission.failed == False,
Transmission.destination_id == self.id,
Transmission.status == status))\
.all()
if direction == "outgoing":
if status == "all":
return Transmission.query\
.filter_by(failed=False, origin_id=self.id)\
.all()
else:
return Transmission.query\
.filter(and_(Transmission.failed == False,
Transmission.origin_id == self.id,
Transmission.status == status))\
.all() | python | def transmissions(self, direction="outgoing", status="all", failed=False):
"""Get transmissions sent to or from this node.
Direction can be "all", "incoming" or "outgoing" (default).
Status can be "all" (default), "pending", or "received".
failed can be True, False or "all"
"""
# check parameters
if direction not in ["incoming", "outgoing", "all"]:
raise(ValueError("You cannot get transmissions of direction {}."
.format(direction) +
"Type can only be incoming, outgoing or all."))
if status not in ["all", "pending", "received"]:
raise(ValueError("You cannot get transmission of status {}."
.format(status) +
"Status can only be pending, received or all"))
if failed not in ["all", False, True]:
raise ValueError("{} is not a valid transmission failed"
.format(failed))
# get transmissions
if direction == "all":
if status == "all":
return Transmission.query\
.filter(and_(Transmission.failed == False,
or_(Transmission.destination_id == self.id,
Transmission.origin_id == self.id)))\
.all()
else:
return Transmission.query\
.filter(and_(Transmission.failed == False,
Transmission.status == status,
or_(Transmission.destination_id == self.id,
Transmission.origin_id == self.id)))\
.all()
if direction == "incoming":
if status == "all":
return Transmission.query\
.filter_by(failed=False, destination_id=self.id)\
.all()
else:
return Transmission.query\
.filter(and_(Transmission.failed == False,
Transmission.destination_id == self.id,
Transmission.status == status))\
.all()
if direction == "outgoing":
if status == "all":
return Transmission.query\
.filter_by(failed=False, origin_id=self.id)\
.all()
else:
return Transmission.query\
.filter(and_(Transmission.failed == False,
Transmission.origin_id == self.id,
Transmission.status == status))\
.all() | [
"def",
"transmissions",
"(",
"self",
",",
"direction",
"=",
"\"outgoing\"",
",",
"status",
"=",
"\"all\"",
",",
"failed",
"=",
"False",
")",
":",
"# check parameters",
"if",
"direction",
"not",
"in",
"[",
"\"incoming\"",
",",
"\"outgoing\"",
",",
"\"all\"",
"]",
":",
"raise",
"(",
"ValueError",
"(",
"\"You cannot get transmissions of direction {}.\"",
".",
"format",
"(",
"direction",
")",
"+",
"\"Type can only be incoming, outgoing or all.\"",
")",
")",
"if",
"status",
"not",
"in",
"[",
"\"all\"",
",",
"\"pending\"",
",",
"\"received\"",
"]",
":",
"raise",
"(",
"ValueError",
"(",
"\"You cannot get transmission of status {}.\"",
".",
"format",
"(",
"status",
")",
"+",
"\"Status can only be pending, received or all\"",
")",
")",
"if",
"failed",
"not",
"in",
"[",
"\"all\"",
",",
"False",
",",
"True",
"]",
":",
"raise",
"ValueError",
"(",
"\"{} is not a valid transmission failed\"",
".",
"format",
"(",
"failed",
")",
")",
"# get transmissions",
"if",
"direction",
"==",
"\"all\"",
":",
"if",
"status",
"==",
"\"all\"",
":",
"return",
"Transmission",
".",
"query",
".",
"filter",
"(",
"and_",
"(",
"Transmission",
".",
"failed",
"==",
"False",
",",
"or_",
"(",
"Transmission",
".",
"destination_id",
"==",
"self",
".",
"id",
",",
"Transmission",
".",
"origin_id",
"==",
"self",
".",
"id",
")",
")",
")",
".",
"all",
"(",
")",
"else",
":",
"return",
"Transmission",
".",
"query",
".",
"filter",
"(",
"and_",
"(",
"Transmission",
".",
"failed",
"==",
"False",
",",
"Transmission",
".",
"status",
"==",
"status",
",",
"or_",
"(",
"Transmission",
".",
"destination_id",
"==",
"self",
".",
"id",
",",
"Transmission",
".",
"origin_id",
"==",
"self",
".",
"id",
")",
")",
")",
".",
"all",
"(",
")",
"if",
"direction",
"==",
"\"incoming\"",
":",
"if",
"status",
"==",
"\"all\"",
":",
"return",
"Transmission",
".",
"query",
".",
"filter_by",
"(",
"failed",
"=",
"False",
",",
"destination_id",
"=",
"self",
".",
"id",
")",
".",
"all",
"(",
")",
"else",
":",
"return",
"Transmission",
".",
"query",
".",
"filter",
"(",
"and_",
"(",
"Transmission",
".",
"failed",
"==",
"False",
",",
"Transmission",
".",
"destination_id",
"==",
"self",
".",
"id",
",",
"Transmission",
".",
"status",
"==",
"status",
")",
")",
".",
"all",
"(",
")",
"if",
"direction",
"==",
"\"outgoing\"",
":",
"if",
"status",
"==",
"\"all\"",
":",
"return",
"Transmission",
".",
"query",
".",
"filter_by",
"(",
"failed",
"=",
"False",
",",
"origin_id",
"=",
"self",
".",
"id",
")",
".",
"all",
"(",
")",
"else",
":",
"return",
"Transmission",
".",
"query",
".",
"filter",
"(",
"and_",
"(",
"Transmission",
".",
"failed",
"==",
"False",
",",
"Transmission",
".",
"origin_id",
"==",
"self",
".",
"id",
",",
"Transmission",
".",
"status",
"==",
"status",
")",
")",
".",
"all",
"(",
")"
]
| Get transmissions sent to or from this node.
Direction can be "all", "incoming" or "outgoing" (default).
Status can be "all" (default), "pending", or "received".
failed can be True, False or "all" | [
"Get",
"transmissions",
"sent",
"to",
"or",
"from",
"this",
"node",
"."
]
| 3650c0bc3b0804d0adb1d178c5eba9992babb1b0 | https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/models.py#L915-L973 | train |
berkeley-cocosci/Wallace | wallace/models.py | Node.receive | def receive(self, what=None):
"""Receive some transmissions.
Received transmissions are marked as received, then their infos are
passed to update().
"what" can be:
1. None (the default) in which case all pending transmissions are
received.
2. a specific transmission.
Will raise an error if the node is told to receive a transmission it has
not been sent.
"""
# check self is not failed
if self.failed:
raise ValueError("{} cannot receive as it has failed."
.format(self))
received_transmissions = []
if what is None:
pending_transmissions = self.transmissions(direction="incoming",
status="pending")
for transmission in pending_transmissions:
transmission.status = "received"
transmission.receive_time = timenow()
received_transmissions.append(transmission)
elif isinstance(what, Transmission):
if what in self.transmissions(direction="incoming",
status="pending"):
transmission.status = "received"
what.receive_time = timenow()
received_transmissions.append(what)
else:
raise(ValueError("{} cannot receive {} as it is not "
"in its pending_transmissions"
.format(self, what)))
else:
raise ValueError("Nodes cannot receive {}".format(what))
self.update([t.info for t in received_transmissions]) | python | def receive(self, what=None):
"""Receive some transmissions.
Received transmissions are marked as received, then their infos are
passed to update().
"what" can be:
1. None (the default) in which case all pending transmissions are
received.
2. a specific transmission.
Will raise an error if the node is told to receive a transmission it has
not been sent.
"""
# check self is not failed
if self.failed:
raise ValueError("{} cannot receive as it has failed."
.format(self))
received_transmissions = []
if what is None:
pending_transmissions = self.transmissions(direction="incoming",
status="pending")
for transmission in pending_transmissions:
transmission.status = "received"
transmission.receive_time = timenow()
received_transmissions.append(transmission)
elif isinstance(what, Transmission):
if what in self.transmissions(direction="incoming",
status="pending"):
transmission.status = "received"
what.receive_time = timenow()
received_transmissions.append(what)
else:
raise(ValueError("{} cannot receive {} as it is not "
"in its pending_transmissions"
.format(self, what)))
else:
raise ValueError("Nodes cannot receive {}".format(what))
self.update([t.info for t in received_transmissions]) | [
"def",
"receive",
"(",
"self",
",",
"what",
"=",
"None",
")",
":",
"# check self is not failed",
"if",
"self",
".",
"failed",
":",
"raise",
"ValueError",
"(",
"\"{} cannot receive as it has failed.\"",
".",
"format",
"(",
"self",
")",
")",
"received_transmissions",
"=",
"[",
"]",
"if",
"what",
"is",
"None",
":",
"pending_transmissions",
"=",
"self",
".",
"transmissions",
"(",
"direction",
"=",
"\"incoming\"",
",",
"status",
"=",
"\"pending\"",
")",
"for",
"transmission",
"in",
"pending_transmissions",
":",
"transmission",
".",
"status",
"=",
"\"received\"",
"transmission",
".",
"receive_time",
"=",
"timenow",
"(",
")",
"received_transmissions",
".",
"append",
"(",
"transmission",
")",
"elif",
"isinstance",
"(",
"what",
",",
"Transmission",
")",
":",
"if",
"what",
"in",
"self",
".",
"transmissions",
"(",
"direction",
"=",
"\"incoming\"",
",",
"status",
"=",
"\"pending\"",
")",
":",
"transmission",
".",
"status",
"=",
"\"received\"",
"what",
".",
"receive_time",
"=",
"timenow",
"(",
")",
"received_transmissions",
".",
"append",
"(",
"what",
")",
"else",
":",
"raise",
"(",
"ValueError",
"(",
"\"{} cannot receive {} as it is not \"",
"\"in its pending_transmissions\"",
".",
"format",
"(",
"self",
",",
"what",
")",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Nodes cannot receive {}\"",
".",
"format",
"(",
"what",
")",
")",
"self",
".",
"update",
"(",
"[",
"t",
".",
"info",
"for",
"t",
"in",
"received_transmissions",
"]",
")"
]
| Receive some transmissions.
Received transmissions are marked as received, then their infos are
passed to update().
"what" can be:
1. None (the default) in which case all pending transmissions are
received.
2. a specific transmission.
Will raise an error if the node is told to receive a transmission it has
not been sent. | [
"Receive",
"some",
"transmissions",
"."
]
| 3650c0bc3b0804d0adb1d178c5eba9992babb1b0 | https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/models.py#L1186-L1229 | train |
berkeley-cocosci/Wallace | wallace/models.py | Node.replicate | def replicate(self, info_in):
"""Replicate an info."""
# check self is not failed
if self.failed:
raise ValueError("{} cannot replicate as it has failed."
.format(self))
from transformations import Replication
info_out = type(info_in)(origin=self, contents=info_in.contents)
Replication(info_in=info_in, info_out=info_out) | python | def replicate(self, info_in):
"""Replicate an info."""
# check self is not failed
if self.failed:
raise ValueError("{} cannot replicate as it has failed."
.format(self))
from transformations import Replication
info_out = type(info_in)(origin=self, contents=info_in.contents)
Replication(info_in=info_in, info_out=info_out) | [
"def",
"replicate",
"(",
"self",
",",
"info_in",
")",
":",
"# check self is not failed",
"if",
"self",
".",
"failed",
":",
"raise",
"ValueError",
"(",
"\"{} cannot replicate as it has failed.\"",
".",
"format",
"(",
"self",
")",
")",
"from",
"transformations",
"import",
"Replication",
"info_out",
"=",
"type",
"(",
"info_in",
")",
"(",
"origin",
"=",
"self",
",",
"contents",
"=",
"info_in",
".",
"contents",
")",
"Replication",
"(",
"info_in",
"=",
"info_in",
",",
"info_out",
"=",
"info_out",
")"
]
| Replicate an info. | [
"Replicate",
"an",
"info",
"."
]
| 3650c0bc3b0804d0adb1d178c5eba9992babb1b0 | https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/models.py#L1241-L1250 | train |
berkeley-cocosci/Wallace | wallace/models.py | Node.mutate | def mutate(self, info_in):
"""Replicate an info + mutation.
To mutate an info, that info must have a method called
``_mutated_contents``.
"""
# check self is not failed
if self.failed:
raise ValueError("{} cannot mutate as it has failed.".format(self))
from transformations import Mutation
info_out = type(info_in)(origin=self,
contents=info_in._mutated_contents())
Mutation(info_in=info_in, info_out=info_out) | python | def mutate(self, info_in):
"""Replicate an info + mutation.
To mutate an info, that info must have a method called
``_mutated_contents``.
"""
# check self is not failed
if self.failed:
raise ValueError("{} cannot mutate as it has failed.".format(self))
from transformations import Mutation
info_out = type(info_in)(origin=self,
contents=info_in._mutated_contents())
Mutation(info_in=info_in, info_out=info_out) | [
"def",
"mutate",
"(",
"self",
",",
"info_in",
")",
":",
"# check self is not failed",
"if",
"self",
".",
"failed",
":",
"raise",
"ValueError",
"(",
"\"{} cannot mutate as it has failed.\"",
".",
"format",
"(",
"self",
")",
")",
"from",
"transformations",
"import",
"Mutation",
"info_out",
"=",
"type",
"(",
"info_in",
")",
"(",
"origin",
"=",
"self",
",",
"contents",
"=",
"info_in",
".",
"_mutated_contents",
"(",
")",
")",
"Mutation",
"(",
"info_in",
"=",
"info_in",
",",
"info_out",
"=",
"info_out",
")"
]
| Replicate an info + mutation.
To mutate an info, that info must have a method called
``_mutated_contents``. | [
"Replicate",
"an",
"info",
"+",
"mutation",
"."
]
| 3650c0bc3b0804d0adb1d178c5eba9992babb1b0 | https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/models.py#L1252-L1266 | train |
berkeley-cocosci/Wallace | wallace/models.py | Vector.transmissions | def transmissions(self, status="all"):
"""Get transmissions sent along this Vector.
Status can be "all" (the default), "pending", or "received".
"""
if status not in ["all", "pending", "received"]:
raise(ValueError("You cannot get {} transmissions."
.format(status) +
"Status can only be pending, received or all"))
if status == "all":
return Transmission\
.query\
.filter_by(vector_id=self.id,
failed=False)\
.all()
else:
return Transmission\
.query\
.filter_by(vector_id=self.id,
status=status,
failed=False)\
.all() | python | def transmissions(self, status="all"):
"""Get transmissions sent along this Vector.
Status can be "all" (the default), "pending", or "received".
"""
if status not in ["all", "pending", "received"]:
raise(ValueError("You cannot get {} transmissions."
.format(status) +
"Status can only be pending, received or all"))
if status == "all":
return Transmission\
.query\
.filter_by(vector_id=self.id,
failed=False)\
.all()
else:
return Transmission\
.query\
.filter_by(vector_id=self.id,
status=status,
failed=False)\
.all() | [
"def",
"transmissions",
"(",
"self",
",",
"status",
"=",
"\"all\"",
")",
":",
"if",
"status",
"not",
"in",
"[",
"\"all\"",
",",
"\"pending\"",
",",
"\"received\"",
"]",
":",
"raise",
"(",
"ValueError",
"(",
"\"You cannot get {} transmissions.\"",
".",
"format",
"(",
"status",
")",
"+",
"\"Status can only be pending, received or all\"",
")",
")",
"if",
"status",
"==",
"\"all\"",
":",
"return",
"Transmission",
".",
"query",
".",
"filter_by",
"(",
"vector_id",
"=",
"self",
".",
"id",
",",
"failed",
"=",
"False",
")",
".",
"all",
"(",
")",
"else",
":",
"return",
"Transmission",
".",
"query",
".",
"filter_by",
"(",
"vector_id",
"=",
"self",
".",
"id",
",",
"status",
"=",
"status",
",",
"failed",
"=",
"False",
")",
".",
"all",
"(",
")"
]
| Get transmissions sent along this Vector.
Status can be "all" (the default), "pending", or "received". | [
"Get",
"transmissions",
"sent",
"along",
"this",
"Vector",
"."
]
| 3650c0bc3b0804d0adb1d178c5eba9992babb1b0 | https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/models.py#L1358-L1380 | train |
Ceasar/twosheds | twosheds/shell.py | Shell.serve_forever | def serve_forever(self, banner=None):
"""Interact with the user.
:param banner: (optional) the banner to print before the first
interaction. Defaults to ``None``.
"""
if hasattr(readline, "read_history_file"):
try:
readline.read_history_file(self.histfile)
except IOError:
pass
atexit.register(self._save_history)
super(Shell, self).serve_forever(banner) | python | def serve_forever(self, banner=None):
"""Interact with the user.
:param banner: (optional) the banner to print before the first
interaction. Defaults to ``None``.
"""
if hasattr(readline, "read_history_file"):
try:
readline.read_history_file(self.histfile)
except IOError:
pass
atexit.register(self._save_history)
super(Shell, self).serve_forever(banner) | [
"def",
"serve_forever",
"(",
"self",
",",
"banner",
"=",
"None",
")",
":",
"if",
"hasattr",
"(",
"readline",
",",
"\"read_history_file\"",
")",
":",
"try",
":",
"readline",
".",
"read_history_file",
"(",
"self",
".",
"histfile",
")",
"except",
"IOError",
":",
"pass",
"atexit",
".",
"register",
"(",
"self",
".",
"_save_history",
")",
"super",
"(",
"Shell",
",",
"self",
")",
".",
"serve_forever",
"(",
"banner",
")"
]
| Interact with the user.
:param banner: (optional) the banner to print before the first
interaction. Defaults to ``None``. | [
"Interact",
"with",
"the",
"user",
"."
]
| 55b0a207e3a06b85e9a9567069b3822a651501a7 | https://github.com/Ceasar/twosheds/blob/55b0a207e3a06b85e9a9567069b3822a651501a7/twosheds/shell.py#L74-L86 | train |
Ceasar/twosheds | twosheds/completer.py | Completer.complete | def complete(self, word, state):
"""Return the next possible completion for ``word``.
This is called successively with ``state == 0, 1, 2, ...`` until it
returns ``None``.
The completion should begin with ``word``.
:param word: the word to complete
:param state: an int, used to iterate over the choices
"""
try:
import rl
# TODO: doing this manually right now, but may make sense to
# exploit
rl.completion.suppress_append = True
except ImportError:
pass
word = transform(word, self.transforms, word=True)
if state == 0:
self.matches = self.get_matches(word)
try:
match = self.matches[state]
except IndexError:
return None
else:
return transform(match, self.transforms, word=True, inverse=True) | python | def complete(self, word, state):
"""Return the next possible completion for ``word``.
This is called successively with ``state == 0, 1, 2, ...`` until it
returns ``None``.
The completion should begin with ``word``.
:param word: the word to complete
:param state: an int, used to iterate over the choices
"""
try:
import rl
# TODO: doing this manually right now, but may make sense to
# exploit
rl.completion.suppress_append = True
except ImportError:
pass
word = transform(word, self.transforms, word=True)
if state == 0:
self.matches = self.get_matches(word)
try:
match = self.matches[state]
except IndexError:
return None
else:
return transform(match, self.transforms, word=True, inverse=True) | [
"def",
"complete",
"(",
"self",
",",
"word",
",",
"state",
")",
":",
"try",
":",
"import",
"rl",
"# TODO: doing this manually right now, but may make sense to",
"# exploit",
"rl",
".",
"completion",
".",
"suppress_append",
"=",
"True",
"except",
"ImportError",
":",
"pass",
"word",
"=",
"transform",
"(",
"word",
",",
"self",
".",
"transforms",
",",
"word",
"=",
"True",
")",
"if",
"state",
"==",
"0",
":",
"self",
".",
"matches",
"=",
"self",
".",
"get_matches",
"(",
"word",
")",
"try",
":",
"match",
"=",
"self",
".",
"matches",
"[",
"state",
"]",
"except",
"IndexError",
":",
"return",
"None",
"else",
":",
"return",
"transform",
"(",
"match",
",",
"self",
".",
"transforms",
",",
"word",
"=",
"True",
",",
"inverse",
"=",
"True",
")"
]
| Return the next possible completion for ``word``.
This is called successively with ``state == 0, 1, 2, ...`` until it
returns ``None``.
The completion should begin with ``word``.
:param word: the word to complete
:param state: an int, used to iterate over the choices | [
"Return",
"the",
"next",
"possible",
"completion",
"for",
"word",
"."
]
| 55b0a207e3a06b85e9a9567069b3822a651501a7 | https://github.com/Ceasar/twosheds/blob/55b0a207e3a06b85e9a9567069b3822a651501a7/twosheds/completer.py#L97-L125 | train |
Ceasar/twosheds | twosheds/completer.py | Completer.exclude_matches | def exclude_matches(self, matches):
"""Filter any matches that match an exclude pattern.
:param matches: a list of possible completions
"""
for match in matches:
for exclude_pattern in self.exclude_patterns:
if re.match(exclude_pattern, match) is not None:
break
else:
yield match | python | def exclude_matches(self, matches):
"""Filter any matches that match an exclude pattern.
:param matches: a list of possible completions
"""
for match in matches:
for exclude_pattern in self.exclude_patterns:
if re.match(exclude_pattern, match) is not None:
break
else:
yield match | [
"def",
"exclude_matches",
"(",
"self",
",",
"matches",
")",
":",
"for",
"match",
"in",
"matches",
":",
"for",
"exclude_pattern",
"in",
"self",
".",
"exclude_patterns",
":",
"if",
"re",
".",
"match",
"(",
"exclude_pattern",
",",
"match",
")",
"is",
"not",
"None",
":",
"break",
"else",
":",
"yield",
"match"
]
| Filter any matches that match an exclude pattern.
:param matches: a list of possible completions | [
"Filter",
"any",
"matches",
"that",
"match",
"an",
"exclude",
"pattern",
"."
]
| 55b0a207e3a06b85e9a9567069b3822a651501a7 | https://github.com/Ceasar/twosheds/blob/55b0a207e3a06b85e9a9567069b3822a651501a7/twosheds/completer.py#L127-L137 | train |
Ceasar/twosheds | twosheds/completer.py | Completer.gen_filename_completions | def gen_filename_completions(self, word, filenames):
"""Generate a sequence of filenames that match ``word``.
:param word: the word to complete
"""
if not word:
return filenames
else:
trie = pygtrie.CharTrie()
for filename in filenames:
trie[filename] = filename
return trie.iterkeys(prefix=word) | python | def gen_filename_completions(self, word, filenames):
"""Generate a sequence of filenames that match ``word``.
:param word: the word to complete
"""
if not word:
return filenames
else:
trie = pygtrie.CharTrie()
for filename in filenames:
trie[filename] = filename
return trie.iterkeys(prefix=word) | [
"def",
"gen_filename_completions",
"(",
"self",
",",
"word",
",",
"filenames",
")",
":",
"if",
"not",
"word",
":",
"return",
"filenames",
"else",
":",
"trie",
"=",
"pygtrie",
".",
"CharTrie",
"(",
")",
"for",
"filename",
"in",
"filenames",
":",
"trie",
"[",
"filename",
"]",
"=",
"filename",
"return",
"trie",
".",
"iterkeys",
"(",
"prefix",
"=",
"word",
")"
]
| Generate a sequence of filenames that match ``word``.
:param word: the word to complete | [
"Generate",
"a",
"sequence",
"of",
"filenames",
"that",
"match",
"word",
"."
]
| 55b0a207e3a06b85e9a9567069b3822a651501a7 | https://github.com/Ceasar/twosheds/blob/55b0a207e3a06b85e9a9567069b3822a651501a7/twosheds/completer.py#L142-L153 | train |
Ceasar/twosheds | twosheds/completer.py | Completer.gen_matches | def gen_matches(self, word):
"""Generate a sequence of possible completions for ``word``.
:param word: the word to complete
"""
if word.startswith("$"):
for match in self.gen_variable_completions(word, os.environ):
yield match
else:
head, tail = os.path.split(word)
filenames = os.listdir(head or '.')
completions = self.gen_filename_completions(tail, filenames)
for match in completions:
yield os.path.join(head, match)
for extension in self.extensions:
for match in extension(word):
yield match | python | def gen_matches(self, word):
"""Generate a sequence of possible completions for ``word``.
:param word: the word to complete
"""
if word.startswith("$"):
for match in self.gen_variable_completions(word, os.environ):
yield match
else:
head, tail = os.path.split(word)
filenames = os.listdir(head or '.')
completions = self.gen_filename_completions(tail, filenames)
for match in completions:
yield os.path.join(head, match)
for extension in self.extensions:
for match in extension(word):
yield match | [
"def",
"gen_matches",
"(",
"self",
",",
"word",
")",
":",
"if",
"word",
".",
"startswith",
"(",
"\"$\"",
")",
":",
"for",
"match",
"in",
"self",
".",
"gen_variable_completions",
"(",
"word",
",",
"os",
".",
"environ",
")",
":",
"yield",
"match",
"else",
":",
"head",
",",
"tail",
"=",
"os",
".",
"path",
".",
"split",
"(",
"word",
")",
"filenames",
"=",
"os",
".",
"listdir",
"(",
"head",
"or",
"'.'",
")",
"completions",
"=",
"self",
".",
"gen_filename_completions",
"(",
"tail",
",",
"filenames",
")",
"for",
"match",
"in",
"completions",
":",
"yield",
"os",
".",
"path",
".",
"join",
"(",
"head",
",",
"match",
")",
"for",
"extension",
"in",
"self",
".",
"extensions",
":",
"for",
"match",
"in",
"extension",
"(",
"word",
")",
":",
"yield",
"match"
]
| Generate a sequence of possible completions for ``word``.
:param word: the word to complete | [
"Generate",
"a",
"sequence",
"of",
"possible",
"completions",
"for",
"word",
"."
]
| 55b0a207e3a06b85e9a9567069b3822a651501a7 | https://github.com/Ceasar/twosheds/blob/55b0a207e3a06b85e9a9567069b3822a651501a7/twosheds/completer.py#L155-L172 | train |
Ceasar/twosheds | twosheds/completer.py | Completer.gen_variable_completions | def gen_variable_completions(self, word, env):
"""Generate a sequence of possible variable completions for ``word``.
:param word: the word to complete
:param env: the environment
"""
# ignore the first character, which is a dollar sign
var = word[1:]
for k in env:
if k.startswith(var):
yield "$" + k | python | def gen_variable_completions(self, word, env):
"""Generate a sequence of possible variable completions for ``word``.
:param word: the word to complete
:param env: the environment
"""
# ignore the first character, which is a dollar sign
var = word[1:]
for k in env:
if k.startswith(var):
yield "$" + k | [
"def",
"gen_variable_completions",
"(",
"self",
",",
"word",
",",
"env",
")",
":",
"# ignore the first character, which is a dollar sign",
"var",
"=",
"word",
"[",
"1",
":",
"]",
"for",
"k",
"in",
"env",
":",
"if",
"k",
".",
"startswith",
"(",
"var",
")",
":",
"yield",
"\"$\"",
"+",
"k"
]
| Generate a sequence of possible variable completions for ``word``.
:param word: the word to complete
:param env: the environment | [
"Generate",
"a",
"sequence",
"of",
"possible",
"variable",
"completions",
"for",
"word",
"."
]
| 55b0a207e3a06b85e9a9567069b3822a651501a7 | https://github.com/Ceasar/twosheds/blob/55b0a207e3a06b85e9a9567069b3822a651501a7/twosheds/completer.py#L174-L184 | train |
Ceasar/twosheds | twosheds/completer.py | Completer.inflect | def inflect(self, filename):
"""Inflect a filename to indicate its type.
If the file is a directory, the suffix "/" is appended, otherwise
a space is appended.
:param filename: the name of the file to inflect
"""
suffix = ("/" if os.path.isdir(filename) else " ")
return self._escape(filename) + suffix | python | def inflect(self, filename):
"""Inflect a filename to indicate its type.
If the file is a directory, the suffix "/" is appended, otherwise
a space is appended.
:param filename: the name of the file to inflect
"""
suffix = ("/" if os.path.isdir(filename) else " ")
return self._escape(filename) + suffix | [
"def",
"inflect",
"(",
"self",
",",
"filename",
")",
":",
"suffix",
"=",
"(",
"\"/\"",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"filename",
")",
"else",
"\" \"",
")",
"return",
"self",
".",
"_escape",
"(",
"filename",
")",
"+",
"suffix"
]
| Inflect a filename to indicate its type.
If the file is a directory, the suffix "/" is appended, otherwise
a space is appended.
:param filename: the name of the file to inflect | [
"Inflect",
"a",
"filename",
"to",
"indicate",
"its",
"type",
"."
]
| 55b0a207e3a06b85e9a9567069b3822a651501a7 | https://github.com/Ceasar/twosheds/blob/55b0a207e3a06b85e9a9567069b3822a651501a7/twosheds/completer.py#L202-L211 | train |
berkeley-cocosci/Wallace | wallace/nodes.py | Environment.state | def state(self, time=None):
"""The most recently-created info of type State at the specfied time.
If time is None then it returns the most recent state as of now.
"""
if time is None:
return max(self.infos(type=State), key=attrgetter('creation_time'))
else:
states = [
s for s in self.infos(type=State) if s.creation_time < time]
return max(states, key=attrgetter('creation_time')) | python | def state(self, time=None):
"""The most recently-created info of type State at the specfied time.
If time is None then it returns the most recent state as of now.
"""
if time is None:
return max(self.infos(type=State), key=attrgetter('creation_time'))
else:
states = [
s for s in self.infos(type=State) if s.creation_time < time]
return max(states, key=attrgetter('creation_time')) | [
"def",
"state",
"(",
"self",
",",
"time",
"=",
"None",
")",
":",
"if",
"time",
"is",
"None",
":",
"return",
"max",
"(",
"self",
".",
"infos",
"(",
"type",
"=",
"State",
")",
",",
"key",
"=",
"attrgetter",
"(",
"'creation_time'",
")",
")",
"else",
":",
"states",
"=",
"[",
"s",
"for",
"s",
"in",
"self",
".",
"infos",
"(",
"type",
"=",
"State",
")",
"if",
"s",
".",
"creation_time",
"<",
"time",
"]",
"return",
"max",
"(",
"states",
",",
"key",
"=",
"attrgetter",
"(",
"'creation_time'",
")",
")"
]
| The most recently-created info of type State at the specfied time.
If time is None then it returns the most recent state as of now. | [
"The",
"most",
"recently",
"-",
"created",
"info",
"of",
"type",
"State",
"at",
"the",
"specfied",
"time",
"."
]
| 3650c0bc3b0804d0adb1d178c5eba9992babb1b0 | https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/nodes.py#L96-L106 | train |
projectshift/shift-boiler | boiler/feature/sentry.py | sentry_feature | def sentry_feature(app):
"""
Sentry feature
Adds basic integration with Sentry via the raven library
"""
# get keys
sentry_public_key = app.config.get('SENTRY_PUBLIC_KEY')
sentry_project_id = app.config.get('SENTRY_PROJECT_ID')
if not sentry_public_key or not sentry_project_id:
return
# prepare dsn
dsn = 'https://{key}@sentry.io/{project_id}'
dsn = dsn.format(key=sentry_public_key, project_id=sentry_project_id)
# init sentry
sentry.init_app(app=app, dsn=dsn) | python | def sentry_feature(app):
"""
Sentry feature
Adds basic integration with Sentry via the raven library
"""
# get keys
sentry_public_key = app.config.get('SENTRY_PUBLIC_KEY')
sentry_project_id = app.config.get('SENTRY_PROJECT_ID')
if not sentry_public_key or not sentry_project_id:
return
# prepare dsn
dsn = 'https://{key}@sentry.io/{project_id}'
dsn = dsn.format(key=sentry_public_key, project_id=sentry_project_id)
# init sentry
sentry.init_app(app=app, dsn=dsn) | [
"def",
"sentry_feature",
"(",
"app",
")",
":",
"# get keys",
"sentry_public_key",
"=",
"app",
".",
"config",
".",
"get",
"(",
"'SENTRY_PUBLIC_KEY'",
")",
"sentry_project_id",
"=",
"app",
".",
"config",
".",
"get",
"(",
"'SENTRY_PROJECT_ID'",
")",
"if",
"not",
"sentry_public_key",
"or",
"not",
"sentry_project_id",
":",
"return",
"# prepare dsn",
"dsn",
"=",
"'https://{key}@sentry.io/{project_id}'",
"dsn",
"=",
"dsn",
".",
"format",
"(",
"key",
"=",
"sentry_public_key",
",",
"project_id",
"=",
"sentry_project_id",
")",
"# init sentry",
"sentry",
".",
"init_app",
"(",
"app",
"=",
"app",
",",
"dsn",
"=",
"dsn",
")"
]
| Sentry feature
Adds basic integration with Sentry via the raven library | [
"Sentry",
"feature",
"Adds",
"basic",
"integration",
"with",
"Sentry",
"via",
"the",
"raven",
"library"
]
| 8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b | https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/feature/sentry.py#L6-L23 | train |
20c/pluginmgr | pluginmgr/config.py | ConfigPluginManager.new_plugin | def new_plugin(self, config, *args, **kwargs):
"""
instantiate a plugin
creates the object, stores it in _instance
"""
typ = None
obj = None
# if type is defined, create a new instance
if 'type' in config:
typ = config['type']
# single key is overriding an existing plugin instance
elif isinstance(config, collections.Mapping) and len(config) == 1:
# get type name and shift out config to parent level
(typ, config) = list(config.items())[0]
obj = self._ctor(typ, config, *args, **kwargs)
# store if named
if 'name' in config:
self._instance[config['name']] = obj
else:
# this could dupe on .name, make name=''?
config['name'] = typ
return obj | python | def new_plugin(self, config, *args, **kwargs):
"""
instantiate a plugin
creates the object, stores it in _instance
"""
typ = None
obj = None
# if type is defined, create a new instance
if 'type' in config:
typ = config['type']
# single key is overriding an existing plugin instance
elif isinstance(config, collections.Mapping) and len(config) == 1:
# get type name and shift out config to parent level
(typ, config) = list(config.items())[0]
obj = self._ctor(typ, config, *args, **kwargs)
# store if named
if 'name' in config:
self._instance[config['name']] = obj
else:
# this could dupe on .name, make name=''?
config['name'] = typ
return obj | [
"def",
"new_plugin",
"(",
"self",
",",
"config",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"typ",
"=",
"None",
"obj",
"=",
"None",
"# if type is defined, create a new instance",
"if",
"'type'",
"in",
"config",
":",
"typ",
"=",
"config",
"[",
"'type'",
"]",
"# single key is overriding an existing plugin instance",
"elif",
"isinstance",
"(",
"config",
",",
"collections",
".",
"Mapping",
")",
"and",
"len",
"(",
"config",
")",
"==",
"1",
":",
"# get type name and shift out config to parent level",
"(",
"typ",
",",
"config",
")",
"=",
"list",
"(",
"config",
".",
"items",
"(",
")",
")",
"[",
"0",
"]",
"obj",
"=",
"self",
".",
"_ctor",
"(",
"typ",
",",
"config",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# store if named",
"if",
"'name'",
"in",
"config",
":",
"self",
".",
"_instance",
"[",
"config",
"[",
"'name'",
"]",
"]",
"=",
"obj",
"else",
":",
"# this could dupe on .name, make name=''?",
"config",
"[",
"'name'",
"]",
"=",
"typ",
"return",
"obj"
]
| instantiate a plugin
creates the object, stores it in _instance | [
"instantiate",
"a",
"plugin",
"creates",
"the",
"object",
"stores",
"it",
"in",
"_instance"
]
| ea19edab6d145f539641c304745acd4ab2c67eb7 | https://github.com/20c/pluginmgr/blob/ea19edab6d145f539641c304745acd4ab2c67eb7/pluginmgr/config.py#L47-L73 | train |
adaptive-learning/proso-apps | proso_models/views.py | to_practice_counts | def to_practice_counts(request):
"""
Get number of items available to practice.
filters: -- use this or body
json as in BODY
language:
language of the items
BODY
json in following format:
{
"#identifier": [] -- custom identifier (str) and filter
...
}
"""
data = None
if request.method == "POST":
data = json.loads(request.body.decode("utf-8"))["filters"]
if "filters" in request.GET:
data = load_query_json(request.GET, "filters")
if data is None or len(data) == 0:
return render_json(request, {}, template='models_json.html', help_text=to_practice_counts.__doc__)
language = get_language(request)
timer('to_practice_counts')
filter_names, filter_filters = list(zip(*sorted(data.items())))
reachable_leaves = Item.objects.filter_all_reachable_leaves_many(filter_filters, language)
response = {
group_id: {
'filter': data[group_id],
'number_of_items': len(items),
}
for group_id, items in zip(filter_names, reachable_leaves)
}
LOGGER.debug("to_practice_counts - getting items in groups took %s seconds", (timer('to_practice_counts')))
return render_json(request, response, template='models_json.html', help_text=to_practice_counts.__doc__) | python | def to_practice_counts(request):
"""
Get number of items available to practice.
filters: -- use this or body
json as in BODY
language:
language of the items
BODY
json in following format:
{
"#identifier": [] -- custom identifier (str) and filter
...
}
"""
data = None
if request.method == "POST":
data = json.loads(request.body.decode("utf-8"))["filters"]
if "filters" in request.GET:
data = load_query_json(request.GET, "filters")
if data is None or len(data) == 0:
return render_json(request, {}, template='models_json.html', help_text=to_practice_counts.__doc__)
language = get_language(request)
timer('to_practice_counts')
filter_names, filter_filters = list(zip(*sorted(data.items())))
reachable_leaves = Item.objects.filter_all_reachable_leaves_many(filter_filters, language)
response = {
group_id: {
'filter': data[group_id],
'number_of_items': len(items),
}
for group_id, items in zip(filter_names, reachable_leaves)
}
LOGGER.debug("to_practice_counts - getting items in groups took %s seconds", (timer('to_practice_counts')))
return render_json(request, response, template='models_json.html', help_text=to_practice_counts.__doc__) | [
"def",
"to_practice_counts",
"(",
"request",
")",
":",
"data",
"=",
"None",
"if",
"request",
".",
"method",
"==",
"\"POST\"",
":",
"data",
"=",
"json",
".",
"loads",
"(",
"request",
".",
"body",
".",
"decode",
"(",
"\"utf-8\"",
")",
")",
"[",
"\"filters\"",
"]",
"if",
"\"filters\"",
"in",
"request",
".",
"GET",
":",
"data",
"=",
"load_query_json",
"(",
"request",
".",
"GET",
",",
"\"filters\"",
")",
"if",
"data",
"is",
"None",
"or",
"len",
"(",
"data",
")",
"==",
"0",
":",
"return",
"render_json",
"(",
"request",
",",
"{",
"}",
",",
"template",
"=",
"'models_json.html'",
",",
"help_text",
"=",
"to_practice_counts",
".",
"__doc__",
")",
"language",
"=",
"get_language",
"(",
"request",
")",
"timer",
"(",
"'to_practice_counts'",
")",
"filter_names",
",",
"filter_filters",
"=",
"list",
"(",
"zip",
"(",
"*",
"sorted",
"(",
"data",
".",
"items",
"(",
")",
")",
")",
")",
"reachable_leaves",
"=",
"Item",
".",
"objects",
".",
"filter_all_reachable_leaves_many",
"(",
"filter_filters",
",",
"language",
")",
"response",
"=",
"{",
"group_id",
":",
"{",
"'filter'",
":",
"data",
"[",
"group_id",
"]",
",",
"'number_of_items'",
":",
"len",
"(",
"items",
")",
",",
"}",
"for",
"group_id",
",",
"items",
"in",
"zip",
"(",
"filter_names",
",",
"reachable_leaves",
")",
"}",
"LOGGER",
".",
"debug",
"(",
"\"to_practice_counts - getting items in groups took %s seconds\"",
",",
"(",
"timer",
"(",
"'to_practice_counts'",
")",
")",
")",
"return",
"render_json",
"(",
"request",
",",
"response",
",",
"template",
"=",
"'models_json.html'",
",",
"help_text",
"=",
"to_practice_counts",
".",
"__doc__",
")"
]
| Get number of items available to practice.
filters: -- use this or body
json as in BODY
language:
language of the items
BODY
json in following format:
{
"#identifier": [] -- custom identifier (str) and filter
...
} | [
"Get",
"number",
"of",
"items",
"available",
"to",
"practice",
"."
]
| 8278c72e498d6ef8d392cc47b48473f4ec037142 | https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_models/views.py#L89-L124 | train |
adaptive-learning/proso-apps | proso_models/views.py | answer | def answer(request):
"""
Save the answer.
GET parameters:
html:
turn on the HTML version of the API
BODY
json in following format:
{
"answer": #answer, -- for one answer
"answers": [#answer, #answer, #answer ...] -- for multiple answers
}
answer = {
"answer_class": str, -- class of answer to save (e.g., flashcard_answer)
"response_time": int, -- response time in milliseconds
"meta": "str" -- optional information
"time_gap": int -- waiting time in frontend in seconds
... -- other fields depending on aswer type
(see from_json method of Django model class)
}
"""
if request.method == 'GET':
return render(request, 'models_answer.html', {}, help_text=answer.__doc__)
elif request.method == 'POST':
practice_filter = get_filter(request)
practice_context = PracticeContext.objects.from_content(practice_filter)
saved_answers = _save_answers(request, practice_context, True)
return render_json(request, saved_answers, status=200, template='models_answer.html')
else:
return HttpResponseBadRequest("method %s is not allowed".format(request.method)) | python | def answer(request):
"""
Save the answer.
GET parameters:
html:
turn on the HTML version of the API
BODY
json in following format:
{
"answer": #answer, -- for one answer
"answers": [#answer, #answer, #answer ...] -- for multiple answers
}
answer = {
"answer_class": str, -- class of answer to save (e.g., flashcard_answer)
"response_time": int, -- response time in milliseconds
"meta": "str" -- optional information
"time_gap": int -- waiting time in frontend in seconds
... -- other fields depending on aswer type
(see from_json method of Django model class)
}
"""
if request.method == 'GET':
return render(request, 'models_answer.html', {}, help_text=answer.__doc__)
elif request.method == 'POST':
practice_filter = get_filter(request)
practice_context = PracticeContext.objects.from_content(practice_filter)
saved_answers = _save_answers(request, practice_context, True)
return render_json(request, saved_answers, status=200, template='models_answer.html')
else:
return HttpResponseBadRequest("method %s is not allowed".format(request.method)) | [
"def",
"answer",
"(",
"request",
")",
":",
"if",
"request",
".",
"method",
"==",
"'GET'",
":",
"return",
"render",
"(",
"request",
",",
"'models_answer.html'",
",",
"{",
"}",
",",
"help_text",
"=",
"answer",
".",
"__doc__",
")",
"elif",
"request",
".",
"method",
"==",
"'POST'",
":",
"practice_filter",
"=",
"get_filter",
"(",
"request",
")",
"practice_context",
"=",
"PracticeContext",
".",
"objects",
".",
"from_content",
"(",
"practice_filter",
")",
"saved_answers",
"=",
"_save_answers",
"(",
"request",
",",
"practice_context",
",",
"True",
")",
"return",
"render_json",
"(",
"request",
",",
"saved_answers",
",",
"status",
"=",
"200",
",",
"template",
"=",
"'models_answer.html'",
")",
"else",
":",
"return",
"HttpResponseBadRequest",
"(",
"\"method %s is not allowed\"",
".",
"format",
"(",
"request",
".",
"method",
")",
")"
]
| Save the answer.
GET parameters:
html:
turn on the HTML version of the API
BODY
json in following format:
{
"answer": #answer, -- for one answer
"answers": [#answer, #answer, #answer ...] -- for multiple answers
}
answer = {
"answer_class": str, -- class of answer to save (e.g., flashcard_answer)
"response_time": int, -- response time in milliseconds
"meta": "str" -- optional information
"time_gap": int -- waiting time in frontend in seconds
... -- other fields depending on aswer type
(see from_json method of Django model class)
} | [
"Save",
"the",
"answer",
"."
]
| 8278c72e498d6ef8d392cc47b48473f4ec037142 | https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_models/views.py#L245-L277 | train |
adaptive-learning/proso-apps | proso_models/views.py | user_stats | def user_stats(request):
"""
Get user statistics for selected groups of items
time:
time in format '%Y-%m-%d_%H:%M:%S' used for practicing
user:
identifier of the user (only for stuff users)
username:
username of user (only for users with public profile)
filters: -- use this or body
json as in BODY
mastered:
use model to compute number of mastered items - can be slowed
language:
language of the items
BODY
json in following format:
{
"#identifier": [] -- custom identifier (str) and filter
...
}
"""
timer('user_stats')
response = {}
data = None
if request.method == "POST":
data = json.loads(request.body.decode("utf-8"))["filters"]
if "filters" in request.GET:
data = load_query_json(request.GET, "filters")
if data is None:
return render_json(request, {}, template='models_user_stats.html', help_text=user_stats.__doc__)
environment = get_environment()
if is_time_overridden(request):
environment.shift_time(get_time(request))
user_id = get_user_id(request)
language = get_language(request)
filter_names, filter_filters = list(zip(*sorted(data.items())))
reachable_leaves = Item.objects.filter_all_reachable_leaves_many(filter_filters, language)
all_leaves = sorted(list(set(flatten(reachable_leaves))))
answers = environment.number_of_answers_more_items(all_leaves, user_id)
correct_answers = environment.number_of_correct_answers_more_items(all_leaves, user_id)
if request.GET.get("mastered"):
timer('user_stats_mastered')
mastery_threshold = get_mastery_trashold()
predictions = Item.objects.predict_for_overview(environment, user_id, all_leaves)
mastered = dict(list(zip(all_leaves, [p >= mastery_threshold for p in predictions])))
LOGGER.debug("user_stats - getting predictions for items took %s seconds", (timer('user_stats_mastered')))
for identifier, items in zip(filter_names, reachable_leaves):
if len(items) == 0:
response[identifier] = {
"filter": data[identifier],
"number_of_items": 0,
}
else:
response[identifier] = {
"filter": data[identifier],
"number_of_items": len(items),
"number_of_practiced_items": sum(answers[i] > 0 for i in items),
"number_of_answers": sum(answers[i] for i in items),
"number_of_correct_answers": sum(correct_answers[i] for i in items),
}
if request.GET.get("mastered"):
response[identifier]["number_of_mastered_items"]= sum(mastered[i] for i in items)
return render_json(request, response, template='models_user_stats.html', help_text=user_stats.__doc__) | python | def user_stats(request):
"""
Get user statistics for selected groups of items
time:
time in format '%Y-%m-%d_%H:%M:%S' used for practicing
user:
identifier of the user (only for stuff users)
username:
username of user (only for users with public profile)
filters: -- use this or body
json as in BODY
mastered:
use model to compute number of mastered items - can be slowed
language:
language of the items
BODY
json in following format:
{
"#identifier": [] -- custom identifier (str) and filter
...
}
"""
timer('user_stats')
response = {}
data = None
if request.method == "POST":
data = json.loads(request.body.decode("utf-8"))["filters"]
if "filters" in request.GET:
data = load_query_json(request.GET, "filters")
if data is None:
return render_json(request, {}, template='models_user_stats.html', help_text=user_stats.__doc__)
environment = get_environment()
if is_time_overridden(request):
environment.shift_time(get_time(request))
user_id = get_user_id(request)
language = get_language(request)
filter_names, filter_filters = list(zip(*sorted(data.items())))
reachable_leaves = Item.objects.filter_all_reachable_leaves_many(filter_filters, language)
all_leaves = sorted(list(set(flatten(reachable_leaves))))
answers = environment.number_of_answers_more_items(all_leaves, user_id)
correct_answers = environment.number_of_correct_answers_more_items(all_leaves, user_id)
if request.GET.get("mastered"):
timer('user_stats_mastered')
mastery_threshold = get_mastery_trashold()
predictions = Item.objects.predict_for_overview(environment, user_id, all_leaves)
mastered = dict(list(zip(all_leaves, [p >= mastery_threshold for p in predictions])))
LOGGER.debug("user_stats - getting predictions for items took %s seconds", (timer('user_stats_mastered')))
for identifier, items in zip(filter_names, reachable_leaves):
if len(items) == 0:
response[identifier] = {
"filter": data[identifier],
"number_of_items": 0,
}
else:
response[identifier] = {
"filter": data[identifier],
"number_of_items": len(items),
"number_of_practiced_items": sum(answers[i] > 0 for i in items),
"number_of_answers": sum(answers[i] for i in items),
"number_of_correct_answers": sum(correct_answers[i] for i in items),
}
if request.GET.get("mastered"):
response[identifier]["number_of_mastered_items"]= sum(mastered[i] for i in items)
return render_json(request, response, template='models_user_stats.html', help_text=user_stats.__doc__) | [
"def",
"user_stats",
"(",
"request",
")",
":",
"timer",
"(",
"'user_stats'",
")",
"response",
"=",
"{",
"}",
"data",
"=",
"None",
"if",
"request",
".",
"method",
"==",
"\"POST\"",
":",
"data",
"=",
"json",
".",
"loads",
"(",
"request",
".",
"body",
".",
"decode",
"(",
"\"utf-8\"",
")",
")",
"[",
"\"filters\"",
"]",
"if",
"\"filters\"",
"in",
"request",
".",
"GET",
":",
"data",
"=",
"load_query_json",
"(",
"request",
".",
"GET",
",",
"\"filters\"",
")",
"if",
"data",
"is",
"None",
":",
"return",
"render_json",
"(",
"request",
",",
"{",
"}",
",",
"template",
"=",
"'models_user_stats.html'",
",",
"help_text",
"=",
"user_stats",
".",
"__doc__",
")",
"environment",
"=",
"get_environment",
"(",
")",
"if",
"is_time_overridden",
"(",
"request",
")",
":",
"environment",
".",
"shift_time",
"(",
"get_time",
"(",
"request",
")",
")",
"user_id",
"=",
"get_user_id",
"(",
"request",
")",
"language",
"=",
"get_language",
"(",
"request",
")",
"filter_names",
",",
"filter_filters",
"=",
"list",
"(",
"zip",
"(",
"*",
"sorted",
"(",
"data",
".",
"items",
"(",
")",
")",
")",
")",
"reachable_leaves",
"=",
"Item",
".",
"objects",
".",
"filter_all_reachable_leaves_many",
"(",
"filter_filters",
",",
"language",
")",
"all_leaves",
"=",
"sorted",
"(",
"list",
"(",
"set",
"(",
"flatten",
"(",
"reachable_leaves",
")",
")",
")",
")",
"answers",
"=",
"environment",
".",
"number_of_answers_more_items",
"(",
"all_leaves",
",",
"user_id",
")",
"correct_answers",
"=",
"environment",
".",
"number_of_correct_answers_more_items",
"(",
"all_leaves",
",",
"user_id",
")",
"if",
"request",
".",
"GET",
".",
"get",
"(",
"\"mastered\"",
")",
":",
"timer",
"(",
"'user_stats_mastered'",
")",
"mastery_threshold",
"=",
"get_mastery_trashold",
"(",
")",
"predictions",
"=",
"Item",
".",
"objects",
".",
"predict_for_overview",
"(",
"environment",
",",
"user_id",
",",
"all_leaves",
")",
"mastered",
"=",
"dict",
"(",
"list",
"(",
"zip",
"(",
"all_leaves",
",",
"[",
"p",
">=",
"mastery_threshold",
"for",
"p",
"in",
"predictions",
"]",
")",
")",
")",
"LOGGER",
".",
"debug",
"(",
"\"user_stats - getting predictions for items took %s seconds\"",
",",
"(",
"timer",
"(",
"'user_stats_mastered'",
")",
")",
")",
"for",
"identifier",
",",
"items",
"in",
"zip",
"(",
"filter_names",
",",
"reachable_leaves",
")",
":",
"if",
"len",
"(",
"items",
")",
"==",
"0",
":",
"response",
"[",
"identifier",
"]",
"=",
"{",
"\"filter\"",
":",
"data",
"[",
"identifier",
"]",
",",
"\"number_of_items\"",
":",
"0",
",",
"}",
"else",
":",
"response",
"[",
"identifier",
"]",
"=",
"{",
"\"filter\"",
":",
"data",
"[",
"identifier",
"]",
",",
"\"number_of_items\"",
":",
"len",
"(",
"items",
")",
",",
"\"number_of_practiced_items\"",
":",
"sum",
"(",
"answers",
"[",
"i",
"]",
">",
"0",
"for",
"i",
"in",
"items",
")",
",",
"\"number_of_answers\"",
":",
"sum",
"(",
"answers",
"[",
"i",
"]",
"for",
"i",
"in",
"items",
")",
",",
"\"number_of_correct_answers\"",
":",
"sum",
"(",
"correct_answers",
"[",
"i",
"]",
"for",
"i",
"in",
"items",
")",
",",
"}",
"if",
"request",
".",
"GET",
".",
"get",
"(",
"\"mastered\"",
")",
":",
"response",
"[",
"identifier",
"]",
"[",
"\"number_of_mastered_items\"",
"]",
"=",
"sum",
"(",
"mastered",
"[",
"i",
"]",
"for",
"i",
"in",
"items",
")",
"return",
"render_json",
"(",
"request",
",",
"response",
",",
"template",
"=",
"'models_user_stats.html'",
",",
"help_text",
"=",
"user_stats",
".",
"__doc__",
")"
]
| Get user statistics for selected groups of items
time:
time in format '%Y-%m-%d_%H:%M:%S' used for practicing
user:
identifier of the user (only for stuff users)
username:
username of user (only for users with public profile)
filters: -- use this or body
json as in BODY
mastered:
use model to compute number of mastered items - can be slowed
language:
language of the items
BODY
json in following format:
{
"#identifier": [] -- custom identifier (str) and filter
...
} | [
"Get",
"user",
"statistics",
"for",
"selected",
"groups",
"of",
"items"
]
| 8278c72e498d6ef8d392cc47b48473f4ec037142 | https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_models/views.py#L282-L347 | train |
Kortemme-Lab/klab | klab/rosetta/input_files.py | LoopsFile.add | def add(self, start, end, cut_point = None, skip_rate = None, extend_loop = None):
'''Add a new loop definition.'''
self.data.append(self.parse_loop_line(['LOOP', start, end, cut_point, skip_rate, extend_loop]))
assert(start <= end) | python | def add(self, start, end, cut_point = None, skip_rate = None, extend_loop = None):
'''Add a new loop definition.'''
self.data.append(self.parse_loop_line(['LOOP', start, end, cut_point, skip_rate, extend_loop]))
assert(start <= end) | [
"def",
"add",
"(",
"self",
",",
"start",
",",
"end",
",",
"cut_point",
"=",
"None",
",",
"skip_rate",
"=",
"None",
",",
"extend_loop",
"=",
"None",
")",
":",
"self",
".",
"data",
".",
"append",
"(",
"self",
".",
"parse_loop_line",
"(",
"[",
"'LOOP'",
",",
"start",
",",
"end",
",",
"cut_point",
",",
"skip_rate",
",",
"extend_loop",
"]",
")",
")",
"assert",
"(",
"start",
"<=",
"end",
")"
]
| Add a new loop definition. | [
"Add",
"a",
"new",
"loop",
"definition",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/rosetta/input_files.py#L123-L126 | train |
berkeley-cocosci/Wallace | wallace/experiments.py | Experiment.log | def log(self, text, key="?????", force=False):
"""Print a string to the logs."""
if force or self.verbose:
print ">>>> {} {}".format(key, text)
sys.stdout.flush() | python | def log(self, text, key="?????", force=False):
"""Print a string to the logs."""
if force or self.verbose:
print ">>>> {} {}".format(key, text)
sys.stdout.flush() | [
"def",
"log",
"(",
"self",
",",
"text",
",",
"key",
"=",
"\"?????\"",
",",
"force",
"=",
"False",
")",
":",
"if",
"force",
"or",
"self",
".",
"verbose",
":",
"print",
"\">>>> {} {}\"",
".",
"format",
"(",
"key",
",",
"text",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")"
]
| Print a string to the logs. | [
"Print",
"a",
"string",
"to",
"the",
"logs",
"."
]
| 3650c0bc3b0804d0adb1d178c5eba9992babb1b0 | https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/experiments.py#L230-L234 | train |
Kortemme-Lab/klab | klab/view_commit_log.py | input_yes_no | def input_yes_no(msg=''):
"""
Simple helper function
"""
print '\n'+msg
while(True):
i=raw_input('Input yes or no: ')
i=i.lower()
if i=='y' or i=='yes':
return True
elif i=='n' or i=='no':
return False
else:
print 'ERROR: Bad input. Must enter y/n/yes/no' | python | def input_yes_no(msg=''):
"""
Simple helper function
"""
print '\n'+msg
while(True):
i=raw_input('Input yes or no: ')
i=i.lower()
if i=='y' or i=='yes':
return True
elif i=='n' or i=='no':
return False
else:
print 'ERROR: Bad input. Must enter y/n/yes/no' | [
"def",
"input_yes_no",
"(",
"msg",
"=",
"''",
")",
":",
"print",
"'\\n'",
"+",
"msg",
"while",
"(",
"True",
")",
":",
"i",
"=",
"raw_input",
"(",
"'Input yes or no: '",
")",
"i",
"=",
"i",
".",
"lower",
"(",
")",
"if",
"i",
"==",
"'y'",
"or",
"i",
"==",
"'yes'",
":",
"return",
"True",
"elif",
"i",
"==",
"'n'",
"or",
"i",
"==",
"'no'",
":",
"return",
"False",
"else",
":",
"print",
"'ERROR: Bad input. Must enter y/n/yes/no'"
]
| Simple helper function | [
"Simple",
"helper",
"function"
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/view_commit_log.py#L120-L133 | train |
inveniosoftware/invenio-pidrelations | invenio_pidrelations/utils.py | resolve_relation_type_config | def resolve_relation_type_config(value):
"""Resolve the relation type to config object.
Resolve relation type from string (e.g.: serialization) or int (db value)
to the full config object.
"""
relation_types = current_app.config['PIDRELATIONS_RELATION_TYPES']
if isinstance(value, six.string_types):
try:
obj = next(rt for rt in relation_types if rt.name == value)
except StopIteration:
raise ValueError("Relation name '{0}' is not configured.".format(
value))
elif isinstance(value, int):
try:
obj = next(rt for rt in relation_types if rt.id == value)
except StopIteration:
raise ValueError("Relation ID {0} is not configured.".format(
value))
else:
raise ValueError("Type of value '{0}' is not supported for resolving.".
format(value))
api_class = obj_or_import_string(obj.api)
schema_class = obj_or_import_string(obj.schema)
return obj.__class__(obj.id, obj.name, obj.label, api_class, schema_class) | python | def resolve_relation_type_config(value):
"""Resolve the relation type to config object.
Resolve relation type from string (e.g.: serialization) or int (db value)
to the full config object.
"""
relation_types = current_app.config['PIDRELATIONS_RELATION_TYPES']
if isinstance(value, six.string_types):
try:
obj = next(rt for rt in relation_types if rt.name == value)
except StopIteration:
raise ValueError("Relation name '{0}' is not configured.".format(
value))
elif isinstance(value, int):
try:
obj = next(rt for rt in relation_types if rt.id == value)
except StopIteration:
raise ValueError("Relation ID {0} is not configured.".format(
value))
else:
raise ValueError("Type of value '{0}' is not supported for resolving.".
format(value))
api_class = obj_or_import_string(obj.api)
schema_class = obj_or_import_string(obj.schema)
return obj.__class__(obj.id, obj.name, obj.label, api_class, schema_class) | [
"def",
"resolve_relation_type_config",
"(",
"value",
")",
":",
"relation_types",
"=",
"current_app",
".",
"config",
"[",
"'PIDRELATIONS_RELATION_TYPES'",
"]",
"if",
"isinstance",
"(",
"value",
",",
"six",
".",
"string_types",
")",
":",
"try",
":",
"obj",
"=",
"next",
"(",
"rt",
"for",
"rt",
"in",
"relation_types",
"if",
"rt",
".",
"name",
"==",
"value",
")",
"except",
"StopIteration",
":",
"raise",
"ValueError",
"(",
"\"Relation name '{0}' is not configured.\"",
".",
"format",
"(",
"value",
")",
")",
"elif",
"isinstance",
"(",
"value",
",",
"int",
")",
":",
"try",
":",
"obj",
"=",
"next",
"(",
"rt",
"for",
"rt",
"in",
"relation_types",
"if",
"rt",
".",
"id",
"==",
"value",
")",
"except",
"StopIteration",
":",
"raise",
"ValueError",
"(",
"\"Relation ID {0} is not configured.\"",
".",
"format",
"(",
"value",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Type of value '{0}' is not supported for resolving.\"",
".",
"format",
"(",
"value",
")",
")",
"api_class",
"=",
"obj_or_import_string",
"(",
"obj",
".",
"api",
")",
"schema_class",
"=",
"obj_or_import_string",
"(",
"obj",
".",
"schema",
")",
"return",
"obj",
".",
"__class__",
"(",
"obj",
".",
"id",
",",
"obj",
".",
"name",
",",
"obj",
".",
"label",
",",
"api_class",
",",
"schema_class",
")"
]
| Resolve the relation type to config object.
Resolve relation type from string (e.g.: serialization) or int (db value)
to the full config object. | [
"Resolve",
"the",
"relation",
"type",
"to",
"config",
"object",
"."
]
| a49f3725cf595b663c5b04814280b231f88bc333 | https://github.com/inveniosoftware/invenio-pidrelations/blob/a49f3725cf595b663c5b04814280b231f88bc333/invenio_pidrelations/utils.py#L48-L73 | train |
Kortemme-Lab/klab | klab/bio/alignment.py | match_RCSB_pdb_chains | def match_RCSB_pdb_chains(pdb_id1, pdb_id2, cut_off = 60.0, allow_multiple_matches = False, multiple_match_error_margin = 3.0, use_seqres_sequences_if_possible = True, strict = True):
'''A convenience function for match_pdb_chains. The required arguments are two PDB IDs from the RCSB.'''
try:
stage = pdb_id1
pdb_1 = PDB(retrieve_pdb(pdb_id1), strict = strict)
stage = pdb_id2
pdb_2 = PDB(retrieve_pdb(pdb_id2), strict = strict)
except (PDBParsingException, NonCanonicalResidueException, PDBValidationException), e:
raise PDBParsingException("An error occurred while loading %s: '%s'" % (stage, str(e)))
return match_pdb_chains(pdb_1, pdb_id1, pdb_2, pdb_id2, cut_off = cut_off, allow_multiple_matches = allow_multiple_matches, multiple_match_error_margin = multiple_match_error_margin, use_seqres_sequences_if_possible = use_seqres_sequences_if_possible) | python | def match_RCSB_pdb_chains(pdb_id1, pdb_id2, cut_off = 60.0, allow_multiple_matches = False, multiple_match_error_margin = 3.0, use_seqres_sequences_if_possible = True, strict = True):
'''A convenience function for match_pdb_chains. The required arguments are two PDB IDs from the RCSB.'''
try:
stage = pdb_id1
pdb_1 = PDB(retrieve_pdb(pdb_id1), strict = strict)
stage = pdb_id2
pdb_2 = PDB(retrieve_pdb(pdb_id2), strict = strict)
except (PDBParsingException, NonCanonicalResidueException, PDBValidationException), e:
raise PDBParsingException("An error occurred while loading %s: '%s'" % (stage, str(e)))
return match_pdb_chains(pdb_1, pdb_id1, pdb_2, pdb_id2, cut_off = cut_off, allow_multiple_matches = allow_multiple_matches, multiple_match_error_margin = multiple_match_error_margin, use_seqres_sequences_if_possible = use_seqres_sequences_if_possible) | [
"def",
"match_RCSB_pdb_chains",
"(",
"pdb_id1",
",",
"pdb_id2",
",",
"cut_off",
"=",
"60.0",
",",
"allow_multiple_matches",
"=",
"False",
",",
"multiple_match_error_margin",
"=",
"3.0",
",",
"use_seqres_sequences_if_possible",
"=",
"True",
",",
"strict",
"=",
"True",
")",
":",
"try",
":",
"stage",
"=",
"pdb_id1",
"pdb_1",
"=",
"PDB",
"(",
"retrieve_pdb",
"(",
"pdb_id1",
")",
",",
"strict",
"=",
"strict",
")",
"stage",
"=",
"pdb_id2",
"pdb_2",
"=",
"PDB",
"(",
"retrieve_pdb",
"(",
"pdb_id2",
")",
",",
"strict",
"=",
"strict",
")",
"except",
"(",
"PDBParsingException",
",",
"NonCanonicalResidueException",
",",
"PDBValidationException",
")",
",",
"e",
":",
"raise",
"PDBParsingException",
"(",
"\"An error occurred while loading %s: '%s'\"",
"%",
"(",
"stage",
",",
"str",
"(",
"e",
")",
")",
")",
"return",
"match_pdb_chains",
"(",
"pdb_1",
",",
"pdb_id1",
",",
"pdb_2",
",",
"pdb_id2",
",",
"cut_off",
"=",
"cut_off",
",",
"allow_multiple_matches",
"=",
"allow_multiple_matches",
",",
"multiple_match_error_margin",
"=",
"multiple_match_error_margin",
",",
"use_seqres_sequences_if_possible",
"=",
"use_seqres_sequences_if_possible",
")"
]
| A convenience function for match_pdb_chains. The required arguments are two PDB IDs from the RCSB. | [
"A",
"convenience",
"function",
"for",
"match_pdb_chains",
".",
"The",
"required",
"arguments",
"are",
"two",
"PDB",
"IDs",
"from",
"the",
"RCSB",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/alignment.py#L139-L149 | train |
uogbuji/versa | tools/py/pipeline/main.py | create_resource | def create_resource(output_model, rtype, unique, links, existing_ids=None, id_helper=None):
'''
General-purpose routine to create a new resource in the output model, based on data provided
output_model - Versa connection to model to be updated
rtype - Type IRI for the new resource, set with Versa type
unique - list of key/value pairs for determining a unique hash for the new resource
links - list of key/value pairs for setting properties on the new resource
id_helper - If a string, a base URL for the generatd ID. If callable, a function used to return the entity. If None, set a default good enough for testing.
existing_ids - set of existing IDs to not recreate, or None, in which case a new resource will always be created
'''
if isinstance(id_helper, str):
idg = idgen(id_helper)
elif isinstance(id_helper, GeneratorType):
idg = id_helper
elif id_helper is None:
idg = default_idgen(None)
else:
#FIXME: G11N
raise ValueError('id_helper must be string (URL), callable or None')
ctx = context(None, None, output_model, base=None, idgen=idg, existing_ids=existing_ids, extras=None)
rid = I(materialize_entity(ctx, rtype, unique=unique))
if existing_ids is not None:
if rid in existing_ids:
return (False, rid)
existing_ids.add(rid)
output_model.add(rid, VTYPE_REL, rtype)
for r, t in links:
output_model.add(rid, r, t)
return (True, rid) | python | def create_resource(output_model, rtype, unique, links, existing_ids=None, id_helper=None):
'''
General-purpose routine to create a new resource in the output model, based on data provided
output_model - Versa connection to model to be updated
rtype - Type IRI for the new resource, set with Versa type
unique - list of key/value pairs for determining a unique hash for the new resource
links - list of key/value pairs for setting properties on the new resource
id_helper - If a string, a base URL for the generatd ID. If callable, a function used to return the entity. If None, set a default good enough for testing.
existing_ids - set of existing IDs to not recreate, or None, in which case a new resource will always be created
'''
if isinstance(id_helper, str):
idg = idgen(id_helper)
elif isinstance(id_helper, GeneratorType):
idg = id_helper
elif id_helper is None:
idg = default_idgen(None)
else:
#FIXME: G11N
raise ValueError('id_helper must be string (URL), callable or None')
ctx = context(None, None, output_model, base=None, idgen=idg, existing_ids=existing_ids, extras=None)
rid = I(materialize_entity(ctx, rtype, unique=unique))
if existing_ids is not None:
if rid in existing_ids:
return (False, rid)
existing_ids.add(rid)
output_model.add(rid, VTYPE_REL, rtype)
for r, t in links:
output_model.add(rid, r, t)
return (True, rid) | [
"def",
"create_resource",
"(",
"output_model",
",",
"rtype",
",",
"unique",
",",
"links",
",",
"existing_ids",
"=",
"None",
",",
"id_helper",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"id_helper",
",",
"str",
")",
":",
"idg",
"=",
"idgen",
"(",
"id_helper",
")",
"elif",
"isinstance",
"(",
"id_helper",
",",
"GeneratorType",
")",
":",
"idg",
"=",
"id_helper",
"elif",
"id_helper",
"is",
"None",
":",
"idg",
"=",
"default_idgen",
"(",
"None",
")",
"else",
":",
"#FIXME: G11N",
"raise",
"ValueError",
"(",
"'id_helper must be string (URL), callable or None'",
")",
"ctx",
"=",
"context",
"(",
"None",
",",
"None",
",",
"output_model",
",",
"base",
"=",
"None",
",",
"idgen",
"=",
"idg",
",",
"existing_ids",
"=",
"existing_ids",
",",
"extras",
"=",
"None",
")",
"rid",
"=",
"I",
"(",
"materialize_entity",
"(",
"ctx",
",",
"rtype",
",",
"unique",
"=",
"unique",
")",
")",
"if",
"existing_ids",
"is",
"not",
"None",
":",
"if",
"rid",
"in",
"existing_ids",
":",
"return",
"(",
"False",
",",
"rid",
")",
"existing_ids",
".",
"add",
"(",
"rid",
")",
"output_model",
".",
"add",
"(",
"rid",
",",
"VTYPE_REL",
",",
"rtype",
")",
"for",
"r",
",",
"t",
"in",
"links",
":",
"output_model",
".",
"add",
"(",
"rid",
",",
"r",
",",
"t",
")",
"return",
"(",
"True",
",",
"rid",
")"
]
| General-purpose routine to create a new resource in the output model, based on data provided
output_model - Versa connection to model to be updated
rtype - Type IRI for the new resource, set with Versa type
unique - list of key/value pairs for determining a unique hash for the new resource
links - list of key/value pairs for setting properties on the new resource
id_helper - If a string, a base URL for the generatd ID. If callable, a function used to return the entity. If None, set a default good enough for testing.
existing_ids - set of existing IDs to not recreate, or None, in which case a new resource will always be created | [
"General",
"-",
"purpose",
"routine",
"to",
"create",
"a",
"new",
"resource",
"in",
"the",
"output",
"model",
"based",
"on",
"data",
"provided"
]
| f092ffc7ed363a5b170890955168500f32de0dd5 | https://github.com/uogbuji/versa/blob/f092ffc7ed363a5b170890955168500f32de0dd5/tools/py/pipeline/main.py#L104-L133 | train |
brunato/lograptor | lograptor/core.py | LogRaptor._read_apps | def _read_apps(self):
"""
Read the configuration of applications returning a dictionary
:return: A dictionary with application names as keys and configuration \
object as values.
"""
apps = {}
for cfgfile in glob.iglob(os.path.join(self.confdir, '*.conf')):
name = os.path.basename(cfgfile)[0:-5]
try:
app = AppLogParser(name, cfgfile, self.args, self.logdir,
self.fields, self.name_cache, self.report)
except (LogRaptorOptionError, LogRaptorConfigError, LogFormatError) as err:
logger.error('cannot add app %r: %s', name, err)
else:
apps[name] = app
if not apps:
raise LogRaptorConfigError('no configured application in %r!' % self.confdir)
return apps | python | def _read_apps(self):
"""
Read the configuration of applications returning a dictionary
:return: A dictionary with application names as keys and configuration \
object as values.
"""
apps = {}
for cfgfile in glob.iglob(os.path.join(self.confdir, '*.conf')):
name = os.path.basename(cfgfile)[0:-5]
try:
app = AppLogParser(name, cfgfile, self.args, self.logdir,
self.fields, self.name_cache, self.report)
except (LogRaptorOptionError, LogRaptorConfigError, LogFormatError) as err:
logger.error('cannot add app %r: %s', name, err)
else:
apps[name] = app
if not apps:
raise LogRaptorConfigError('no configured application in %r!' % self.confdir)
return apps | [
"def",
"_read_apps",
"(",
"self",
")",
":",
"apps",
"=",
"{",
"}",
"for",
"cfgfile",
"in",
"glob",
".",
"iglob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"confdir",
",",
"'*.conf'",
")",
")",
":",
"name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"cfgfile",
")",
"[",
"0",
":",
"-",
"5",
"]",
"try",
":",
"app",
"=",
"AppLogParser",
"(",
"name",
",",
"cfgfile",
",",
"self",
".",
"args",
",",
"self",
".",
"logdir",
",",
"self",
".",
"fields",
",",
"self",
".",
"name_cache",
",",
"self",
".",
"report",
")",
"except",
"(",
"LogRaptorOptionError",
",",
"LogRaptorConfigError",
",",
"LogFormatError",
")",
"as",
"err",
":",
"logger",
".",
"error",
"(",
"'cannot add app %r: %s'",
",",
"name",
",",
"err",
")",
"else",
":",
"apps",
"[",
"name",
"]",
"=",
"app",
"if",
"not",
"apps",
":",
"raise",
"LogRaptorConfigError",
"(",
"'no configured application in %r!'",
"%",
"self",
".",
"confdir",
")",
"return",
"apps"
]
| Read the configuration of applications returning a dictionary
:return: A dictionary with application names as keys and configuration \
object as values. | [
"Read",
"the",
"configuration",
"of",
"applications",
"returning",
"a",
"dictionary"
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/core.py#L122-L142 | train |
brunato/lograptor | lograptor/core.py | LogRaptor.patterns | def patterns(self):
"""
A tuple with re.RegexObject objects created from regex pattern arguments.
"""
# No explicit argument for patterns ==> consider the first source argument as pattern.
if not self.args.patterns and not self.args.pattern_files:
try:
self.args.patterns.append(self.args.files.pop(0))
except IndexError:
raise LogRaptorArgumentError('PATTERN', 'no search pattern')
# Get the patterns from arguments and files
patterns = set()
if self.args.pattern_files:
patterns.update([p.rstrip('\n') for p in fileinput.input(self.args.pattern_files)])
patterns.update(self.args.patterns)
logger.debug("search patterns to be processed: %r", patterns)
# If one pattern is empty then skip the other patterns
if '' in patterns:
logger.info("an empty pattern provided: match all strings!")
return tuple()
try:
flags = re.IGNORECASE if self.args.case else 0 | re.UNICODE
return tuple([
re.compile(r'(\b%s\b)' % pat if self.args.word else '(%s)' % pat, flags=flags)
for pat in patterns if pat
])
except re.error as err:
raise LogRaptorArgumentError('wrong regex syntax for pattern: %r' % err) | python | def patterns(self):
"""
A tuple with re.RegexObject objects created from regex pattern arguments.
"""
# No explicit argument for patterns ==> consider the first source argument as pattern.
if not self.args.patterns and not self.args.pattern_files:
try:
self.args.patterns.append(self.args.files.pop(0))
except IndexError:
raise LogRaptorArgumentError('PATTERN', 'no search pattern')
# Get the patterns from arguments and files
patterns = set()
if self.args.pattern_files:
patterns.update([p.rstrip('\n') for p in fileinput.input(self.args.pattern_files)])
patterns.update(self.args.patterns)
logger.debug("search patterns to be processed: %r", patterns)
# If one pattern is empty then skip the other patterns
if '' in patterns:
logger.info("an empty pattern provided: match all strings!")
return tuple()
try:
flags = re.IGNORECASE if self.args.case else 0 | re.UNICODE
return tuple([
re.compile(r'(\b%s\b)' % pat if self.args.word else '(%s)' % pat, flags=flags)
for pat in patterns if pat
])
except re.error as err:
raise LogRaptorArgumentError('wrong regex syntax for pattern: %r' % err) | [
"def",
"patterns",
"(",
"self",
")",
":",
"# No explicit argument for patterns ==> consider the first source argument as pattern.",
"if",
"not",
"self",
".",
"args",
".",
"patterns",
"and",
"not",
"self",
".",
"args",
".",
"pattern_files",
":",
"try",
":",
"self",
".",
"args",
".",
"patterns",
".",
"append",
"(",
"self",
".",
"args",
".",
"files",
".",
"pop",
"(",
"0",
")",
")",
"except",
"IndexError",
":",
"raise",
"LogRaptorArgumentError",
"(",
"'PATTERN'",
",",
"'no search pattern'",
")",
"# Get the patterns from arguments and files",
"patterns",
"=",
"set",
"(",
")",
"if",
"self",
".",
"args",
".",
"pattern_files",
":",
"patterns",
".",
"update",
"(",
"[",
"p",
".",
"rstrip",
"(",
"'\\n'",
")",
"for",
"p",
"in",
"fileinput",
".",
"input",
"(",
"self",
".",
"args",
".",
"pattern_files",
")",
"]",
")",
"patterns",
".",
"update",
"(",
"self",
".",
"args",
".",
"patterns",
")",
"logger",
".",
"debug",
"(",
"\"search patterns to be processed: %r\"",
",",
"patterns",
")",
"# If one pattern is empty then skip the other patterns",
"if",
"''",
"in",
"patterns",
":",
"logger",
".",
"info",
"(",
"\"an empty pattern provided: match all strings!\"",
")",
"return",
"tuple",
"(",
")",
"try",
":",
"flags",
"=",
"re",
".",
"IGNORECASE",
"if",
"self",
".",
"args",
".",
"case",
"else",
"0",
"|",
"re",
".",
"UNICODE",
"return",
"tuple",
"(",
"[",
"re",
".",
"compile",
"(",
"r'(\\b%s\\b)'",
"%",
"pat",
"if",
"self",
".",
"args",
".",
"word",
"else",
"'(%s)'",
"%",
"pat",
",",
"flags",
"=",
"flags",
")",
"for",
"pat",
"in",
"patterns",
"if",
"pat",
"]",
")",
"except",
"re",
".",
"error",
"as",
"err",
":",
"raise",
"LogRaptorArgumentError",
"(",
"'wrong regex syntax for pattern: %r'",
"%",
"err",
")"
]
| A tuple with re.RegexObject objects created from regex pattern arguments. | [
"A",
"tuple",
"with",
"re",
".",
"RegexObject",
"objects",
"created",
"from",
"regex",
"pattern",
"arguments",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/core.py#L233-L263 | train |
brunato/lograptor | lograptor/core.py | LogRaptor.files | def files(self):
"""
A list of input sources. Each item can be a file path, a glob path or URL.
"""
# If no files but a recursion option ==> use the current directory
if not self.args.files and self.recursive:
return ['.']
else:
return self.args.files | python | def files(self):
"""
A list of input sources. Each item can be a file path, a glob path or URL.
"""
# If no files but a recursion option ==> use the current directory
if not self.args.files and self.recursive:
return ['.']
else:
return self.args.files | [
"def",
"files",
"(",
"self",
")",
":",
"# If no files but a recursion option ==> use the current directory",
"if",
"not",
"self",
".",
"args",
".",
"files",
"and",
"self",
".",
"recursive",
":",
"return",
"[",
"'.'",
"]",
"else",
":",
"return",
"self",
".",
"args",
".",
"files"
]
| A list of input sources. Each item can be a file path, a glob path or URL. | [
"A",
"list",
"of",
"input",
"sources",
".",
"Each",
"item",
"can",
"be",
"a",
"file",
"path",
"a",
"glob",
"path",
"or",
"URL",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/core.py#L266-L274 | train |
brunato/lograptor | lograptor/core.py | LogRaptor.apps | def apps(self):
"""
Dictionary with loaded applications.
"""
logger.debug("initialize applications ...")
enabled = None
apps = self.args.apps or self._config_apps.keys()
unknown = set(apps) - set(self._config_apps.keys())
if unknown:
raise LogRaptorArgumentError("--apps", "not found apps %r" % list(unknown))
if apps or enabled is None:
return {k: v for k, v in self._config_apps.items() if k in apps}
else:
return {k: v for k, v in self._config_apps.items() if k in apps and v.enabled == enabled} | python | def apps(self):
"""
Dictionary with loaded applications.
"""
logger.debug("initialize applications ...")
enabled = None
apps = self.args.apps or self._config_apps.keys()
unknown = set(apps) - set(self._config_apps.keys())
if unknown:
raise LogRaptorArgumentError("--apps", "not found apps %r" % list(unknown))
if apps or enabled is None:
return {k: v for k, v in self._config_apps.items() if k in apps}
else:
return {k: v for k, v in self._config_apps.items() if k in apps and v.enabled == enabled} | [
"def",
"apps",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"\"initialize applications ...\"",
")",
"enabled",
"=",
"None",
"apps",
"=",
"self",
".",
"args",
".",
"apps",
"or",
"self",
".",
"_config_apps",
".",
"keys",
"(",
")",
"unknown",
"=",
"set",
"(",
"apps",
")",
"-",
"set",
"(",
"self",
".",
"_config_apps",
".",
"keys",
"(",
")",
")",
"if",
"unknown",
":",
"raise",
"LogRaptorArgumentError",
"(",
"\"--apps\"",
",",
"\"not found apps %r\"",
"%",
"list",
"(",
"unknown",
")",
")",
"if",
"apps",
"or",
"enabled",
"is",
"None",
":",
"return",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"self",
".",
"_config_apps",
".",
"items",
"(",
")",
"if",
"k",
"in",
"apps",
"}",
"else",
":",
"return",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"self",
".",
"_config_apps",
".",
"items",
"(",
")",
"if",
"k",
"in",
"apps",
"and",
"v",
".",
"enabled",
"==",
"enabled",
"}"
]
| Dictionary with loaded applications. | [
"Dictionary",
"with",
"loaded",
"applications",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/core.py#L350-L364 | train |
brunato/lograptor | lograptor/core.py | LogRaptor.apptags | def apptags(self):
"""
Map from log app-name to an application.
"""
logger.debug("populate tags map ...")
apps = self._apps.keys()
unknown = set(apps)
unknown.difference_update(self._config_apps.keys())
if unknown:
raise ValueError("unknown apps: %r" % list(unknown))
apps = [v for v in self._config_apps.values() if v.name in apps]
tagmap = {}
for app in sorted(apps, key=lambda x: (x.priority, x.name)):
for tag in app.tags:
if not tag:
raise LogRaptorConfigError('found an empty tag for app %r' % app.name)
try:
tagmap[tag].append(app)
except KeyError:
tagmap[tag] = [app]
return tagmap | python | def apptags(self):
"""
Map from log app-name to an application.
"""
logger.debug("populate tags map ...")
apps = self._apps.keys()
unknown = set(apps)
unknown.difference_update(self._config_apps.keys())
if unknown:
raise ValueError("unknown apps: %r" % list(unknown))
apps = [v for v in self._config_apps.values() if v.name in apps]
tagmap = {}
for app in sorted(apps, key=lambda x: (x.priority, x.name)):
for tag in app.tags:
if not tag:
raise LogRaptorConfigError('found an empty tag for app %r' % app.name)
try:
tagmap[tag].append(app)
except KeyError:
tagmap[tag] = [app]
return tagmap | [
"def",
"apptags",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"\"populate tags map ...\"",
")",
"apps",
"=",
"self",
".",
"_apps",
".",
"keys",
"(",
")",
"unknown",
"=",
"set",
"(",
"apps",
")",
"unknown",
".",
"difference_update",
"(",
"self",
".",
"_config_apps",
".",
"keys",
"(",
")",
")",
"if",
"unknown",
":",
"raise",
"ValueError",
"(",
"\"unknown apps: %r\"",
"%",
"list",
"(",
"unknown",
")",
")",
"apps",
"=",
"[",
"v",
"for",
"v",
"in",
"self",
".",
"_config_apps",
".",
"values",
"(",
")",
"if",
"v",
".",
"name",
"in",
"apps",
"]",
"tagmap",
"=",
"{",
"}",
"for",
"app",
"in",
"sorted",
"(",
"apps",
",",
"key",
"=",
"lambda",
"x",
":",
"(",
"x",
".",
"priority",
",",
"x",
".",
"name",
")",
")",
":",
"for",
"tag",
"in",
"app",
".",
"tags",
":",
"if",
"not",
"tag",
":",
"raise",
"LogRaptorConfigError",
"(",
"'found an empty tag for app %r'",
"%",
"app",
".",
"name",
")",
"try",
":",
"tagmap",
"[",
"tag",
"]",
".",
"append",
"(",
"app",
")",
"except",
"KeyError",
":",
"tagmap",
"[",
"tag",
"]",
"=",
"[",
"app",
"]",
"return",
"tagmap"
]
| Map from log app-name to an application. | [
"Map",
"from",
"log",
"app",
"-",
"name",
"to",
"an",
"application",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/core.py#L367-L388 | train |
brunato/lograptor | lograptor/core.py | LogRaptor.create_dispatcher | def create_dispatcher(self):
"""
Return a dispatcher for configured channels.
"""
before_context = max(self.args.before_context, self.args.context)
after_context = max(self.args.after_context, self.args.context)
if self.args.files_with_match is not None or self.args.count or self.args.only_matching or self.args.quiet:
# Sending of log lines disabled by arguments
return UnbufferedDispatcher(self._channels)
elif before_context == 0 and after_context == 0:
# Don't need line buffering
return UnbufferedDispatcher(self._channels)
elif self.args.thread:
return ThreadedDispatcher(self._channels, before_context, after_context)
else:
return LineBufferDispatcher(self._channels, before_context, after_context) | python | def create_dispatcher(self):
"""
Return a dispatcher for configured channels.
"""
before_context = max(self.args.before_context, self.args.context)
after_context = max(self.args.after_context, self.args.context)
if self.args.files_with_match is not None or self.args.count or self.args.only_matching or self.args.quiet:
# Sending of log lines disabled by arguments
return UnbufferedDispatcher(self._channels)
elif before_context == 0 and after_context == 0:
# Don't need line buffering
return UnbufferedDispatcher(self._channels)
elif self.args.thread:
return ThreadedDispatcher(self._channels, before_context, after_context)
else:
return LineBufferDispatcher(self._channels, before_context, after_context) | [
"def",
"create_dispatcher",
"(",
"self",
")",
":",
"before_context",
"=",
"max",
"(",
"self",
".",
"args",
".",
"before_context",
",",
"self",
".",
"args",
".",
"context",
")",
"after_context",
"=",
"max",
"(",
"self",
".",
"args",
".",
"after_context",
",",
"self",
".",
"args",
".",
"context",
")",
"if",
"self",
".",
"args",
".",
"files_with_match",
"is",
"not",
"None",
"or",
"self",
".",
"args",
".",
"count",
"or",
"self",
".",
"args",
".",
"only_matching",
"or",
"self",
".",
"args",
".",
"quiet",
":",
"# Sending of log lines disabled by arguments",
"return",
"UnbufferedDispatcher",
"(",
"self",
".",
"_channels",
")",
"elif",
"before_context",
"==",
"0",
"and",
"after_context",
"==",
"0",
":",
"# Don't need line buffering",
"return",
"UnbufferedDispatcher",
"(",
"self",
".",
"_channels",
")",
"elif",
"self",
".",
"args",
".",
"thread",
":",
"return",
"ThreadedDispatcher",
"(",
"self",
".",
"_channels",
",",
"before_context",
",",
"after_context",
")",
"else",
":",
"return",
"LineBufferDispatcher",
"(",
"self",
".",
"_channels",
",",
"before_context",
",",
"after_context",
")"
]
| Return a dispatcher for configured channels. | [
"Return",
"a",
"dispatcher",
"for",
"configured",
"channels",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/core.py#L562-L578 | train |
brunato/lograptor | lograptor/core.py | LogRaptor.get_config | def get_config(self):
"""
Return a formatted text with main configuration parameters.
"""
# Create a dummy report object if necessary
channels = [sect.rsplit('_')[0] for sect in self.config.sections(suffix='_channel')]
channels.sort()
disabled_apps = [app for app in self._config_apps.keys() if app not in self._apps]
return u''.join([
u"\n--- %s configuration ---" % __package__,
u"\nConfiguration file: %s" % self.config.cfgfile,
u"\nConfiguration directory: %s" % self.confdir,
u"\nConfigured applications: %s" % ', '.join(self._config_apps.keys()),
u"\nDisabled applications: %s" % ', '.join(disabled_apps) if disabled_apps else '',
u"\nFilter fields: %s" % ', '.join(self.config.options('fields')),
u"\nOutput channels: %s" % ', '.join(channels) if channels else u'No channels defined',
u"\nReports: %s\n" % ', '.join(
[section[:-7] for section in self.config.sections(suffix='_report')]
),
''
]) | python | def get_config(self):
"""
Return a formatted text with main configuration parameters.
"""
# Create a dummy report object if necessary
channels = [sect.rsplit('_')[0] for sect in self.config.sections(suffix='_channel')]
channels.sort()
disabled_apps = [app for app in self._config_apps.keys() if app not in self._apps]
return u''.join([
u"\n--- %s configuration ---" % __package__,
u"\nConfiguration file: %s" % self.config.cfgfile,
u"\nConfiguration directory: %s" % self.confdir,
u"\nConfigured applications: %s" % ', '.join(self._config_apps.keys()),
u"\nDisabled applications: %s" % ', '.join(disabled_apps) if disabled_apps else '',
u"\nFilter fields: %s" % ', '.join(self.config.options('fields')),
u"\nOutput channels: %s" % ', '.join(channels) if channels else u'No channels defined',
u"\nReports: %s\n" % ', '.join(
[section[:-7] for section in self.config.sections(suffix='_report')]
),
''
]) | [
"def",
"get_config",
"(",
"self",
")",
":",
"# Create a dummy report object if necessary",
"channels",
"=",
"[",
"sect",
".",
"rsplit",
"(",
"'_'",
")",
"[",
"0",
"]",
"for",
"sect",
"in",
"self",
".",
"config",
".",
"sections",
"(",
"suffix",
"=",
"'_channel'",
")",
"]",
"channels",
".",
"sort",
"(",
")",
"disabled_apps",
"=",
"[",
"app",
"for",
"app",
"in",
"self",
".",
"_config_apps",
".",
"keys",
"(",
")",
"if",
"app",
"not",
"in",
"self",
".",
"_apps",
"]",
"return",
"u''",
".",
"join",
"(",
"[",
"u\"\\n--- %s configuration ---\"",
"%",
"__package__",
",",
"u\"\\nConfiguration file: %s\"",
"%",
"self",
".",
"config",
".",
"cfgfile",
",",
"u\"\\nConfiguration directory: %s\"",
"%",
"self",
".",
"confdir",
",",
"u\"\\nConfigured applications: %s\"",
"%",
"', '",
".",
"join",
"(",
"self",
".",
"_config_apps",
".",
"keys",
"(",
")",
")",
",",
"u\"\\nDisabled applications: %s\"",
"%",
"', '",
".",
"join",
"(",
"disabled_apps",
")",
"if",
"disabled_apps",
"else",
"''",
",",
"u\"\\nFilter fields: %s\"",
"%",
"', '",
".",
"join",
"(",
"self",
".",
"config",
".",
"options",
"(",
"'fields'",
")",
")",
",",
"u\"\\nOutput channels: %s\"",
"%",
"', '",
".",
"join",
"(",
"channels",
")",
"if",
"channels",
"else",
"u'No channels defined'",
",",
"u\"\\nReports: %s\\n\"",
"%",
"', '",
".",
"join",
"(",
"[",
"section",
"[",
":",
"-",
"7",
"]",
"for",
"section",
"in",
"self",
".",
"config",
".",
"sections",
"(",
"suffix",
"=",
"'_report'",
")",
"]",
")",
",",
"''",
"]",
")"
]
| Return a formatted text with main configuration parameters. | [
"Return",
"a",
"formatted",
"text",
"with",
"main",
"configuration",
"parameters",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/core.py#L600-L620 | train |
brunato/lograptor | lograptor/core.py | LogRaptor.get_run_summary | def get_run_summary(self, run_stats):
"""
Produce a text summary from run statistics.
:param run_stats: A dictionary containing run stats
:return: Formatted multiline string
"""
run_stats = run_stats.copy()
run_stats['files'] = len(run_stats['files'])
summary = [
u'\n--- %s run summary ---' % __package__,
u'Number of processed files: %(files)d',
u'Total lines read: %(lines)d',
u'Total log events matched: %(matches)d',
]
if any([app.matches or app.unparsed for app in self.apps.values()]):
if self.matcher == 'unruled':
summary.append("Applications found (application rules not used):")
for app in filter(lambda x: x.matches, self.apps.values()):
summary.append(u' %s(matches=%d)' % (app.name, app.matches))
else:
summary.append("Applications found:")
for app in filter(lambda x: x.matches or x.unparsed, self.apps.values()):
summary.append(u' %s(matches=%d, unparsed=%s)' % (app.name, app.matches, app.unparsed))
summary.append('\n')
return '\n'.join(summary) % run_stats | python | def get_run_summary(self, run_stats):
"""
Produce a text summary from run statistics.
:param run_stats: A dictionary containing run stats
:return: Formatted multiline string
"""
run_stats = run_stats.copy()
run_stats['files'] = len(run_stats['files'])
summary = [
u'\n--- %s run summary ---' % __package__,
u'Number of processed files: %(files)d',
u'Total lines read: %(lines)d',
u'Total log events matched: %(matches)d',
]
if any([app.matches or app.unparsed for app in self.apps.values()]):
if self.matcher == 'unruled':
summary.append("Applications found (application rules not used):")
for app in filter(lambda x: x.matches, self.apps.values()):
summary.append(u' %s(matches=%d)' % (app.name, app.matches))
else:
summary.append("Applications found:")
for app in filter(lambda x: x.matches or x.unparsed, self.apps.values()):
summary.append(u' %s(matches=%d, unparsed=%s)' % (app.name, app.matches, app.unparsed))
summary.append('\n')
return '\n'.join(summary) % run_stats | [
"def",
"get_run_summary",
"(",
"self",
",",
"run_stats",
")",
":",
"run_stats",
"=",
"run_stats",
".",
"copy",
"(",
")",
"run_stats",
"[",
"'files'",
"]",
"=",
"len",
"(",
"run_stats",
"[",
"'files'",
"]",
")",
"summary",
"=",
"[",
"u'\\n--- %s run summary ---'",
"%",
"__package__",
",",
"u'Number of processed files: %(files)d'",
",",
"u'Total lines read: %(lines)d'",
",",
"u'Total log events matched: %(matches)d'",
",",
"]",
"if",
"any",
"(",
"[",
"app",
".",
"matches",
"or",
"app",
".",
"unparsed",
"for",
"app",
"in",
"self",
".",
"apps",
".",
"values",
"(",
")",
"]",
")",
":",
"if",
"self",
".",
"matcher",
"==",
"'unruled'",
":",
"summary",
".",
"append",
"(",
"\"Applications found (application rules not used):\"",
")",
"for",
"app",
"in",
"filter",
"(",
"lambda",
"x",
":",
"x",
".",
"matches",
",",
"self",
".",
"apps",
".",
"values",
"(",
")",
")",
":",
"summary",
".",
"append",
"(",
"u' %s(matches=%d)'",
"%",
"(",
"app",
".",
"name",
",",
"app",
".",
"matches",
")",
")",
"else",
":",
"summary",
".",
"append",
"(",
"\"Applications found:\"",
")",
"for",
"app",
"in",
"filter",
"(",
"lambda",
"x",
":",
"x",
".",
"matches",
"or",
"x",
".",
"unparsed",
",",
"self",
".",
"apps",
".",
"values",
"(",
")",
")",
":",
"summary",
".",
"append",
"(",
"u' %s(matches=%d, unparsed=%s)'",
"%",
"(",
"app",
".",
"name",
",",
"app",
".",
"matches",
",",
"app",
".",
"unparsed",
")",
")",
"summary",
".",
"append",
"(",
"'\\n'",
")",
"return",
"'\\n'",
".",
"join",
"(",
"summary",
")",
"%",
"run_stats"
]
| Produce a text summary from run statistics.
:param run_stats: A dictionary containing run stats
:return: Formatted multiline string | [
"Produce",
"a",
"text",
"summary",
"from",
"run",
"statistics",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/core.py#L622-L647 | train |
peergradeio/flask-mongo-profiler | flask_mongo_profiler/helpers.py | add_template_dirs | def add_template_dirs(app):
"""Add flask_mongo_profiler's template directories.
Parameters
----------
app : flask.Flask
"""
template_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
app.jinja_loader = jinja2.ChoiceLoader(
[app.jinja_loader, jinja2.FileSystemLoader(template_dir)]
) | python | def add_template_dirs(app):
"""Add flask_mongo_profiler's template directories.
Parameters
----------
app : flask.Flask
"""
template_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
app.jinja_loader = jinja2.ChoiceLoader(
[app.jinja_loader, jinja2.FileSystemLoader(template_dir)]
) | [
"def",
"add_template_dirs",
"(",
"app",
")",
":",
"template_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
")",
",",
"'templates'",
")",
"app",
".",
"jinja_loader",
"=",
"jinja2",
".",
"ChoiceLoader",
"(",
"[",
"app",
".",
"jinja_loader",
",",
"jinja2",
".",
"FileSystemLoader",
"(",
"template_dir",
")",
"]",
")"
]
| Add flask_mongo_profiler's template directories.
Parameters
----------
app : flask.Flask | [
"Add",
"flask_mongo_profiler",
"s",
"template",
"directories",
"."
]
| a267eeb49fea07c9a24fb370bd9d7a90ed313ccf | https://github.com/peergradeio/flask-mongo-profiler/blob/a267eeb49fea07c9a24fb370bd9d7a90ed313ccf/flask_mongo_profiler/helpers.py#L7-L18 | train |
berkeley-cocosci/Wallace | wallace/command_line.py | setup | def setup():
"""Walk the user though the Wallace setup."""
# Create the Wallace config file if it does not already exist.
config_name = ".wallaceconfig"
config_path = os.path.join(os.path.expanduser("~"), config_name)
if os.path.isfile(config_path):
log("Wallace config file already exists.", chevrons=False)
else:
log("Creating Wallace config file at ~/.wallaceconfig...",
chevrons=False)
wallace_module_path = os.path.dirname(os.path.realpath(__file__))
src = os.path.join(wallace_module_path, "config", config_name)
shutil.copyfile(src, config_path) | python | def setup():
"""Walk the user though the Wallace setup."""
# Create the Wallace config file if it does not already exist.
config_name = ".wallaceconfig"
config_path = os.path.join(os.path.expanduser("~"), config_name)
if os.path.isfile(config_path):
log("Wallace config file already exists.", chevrons=False)
else:
log("Creating Wallace config file at ~/.wallaceconfig...",
chevrons=False)
wallace_module_path = os.path.dirname(os.path.realpath(__file__))
src = os.path.join(wallace_module_path, "config", config_name)
shutil.copyfile(src, config_path) | [
"def",
"setup",
"(",
")",
":",
"# Create the Wallace config file if it does not already exist.",
"config_name",
"=",
"\".wallaceconfig\"",
"config_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~\"",
")",
",",
"config_name",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"config_path",
")",
":",
"log",
"(",
"\"Wallace config file already exists.\"",
",",
"chevrons",
"=",
"False",
")",
"else",
":",
"log",
"(",
"\"Creating Wallace config file at ~/.wallaceconfig...\"",
",",
"chevrons",
"=",
"False",
")",
"wallace_module_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
"src",
"=",
"os",
".",
"path",
".",
"join",
"(",
"wallace_module_path",
",",
"\"config\"",
",",
"config_name",
")",
"shutil",
".",
"copyfile",
"(",
"src",
",",
"config_path",
")"
]
| Walk the user though the Wallace setup. | [
"Walk",
"the",
"user",
"though",
"the",
"Wallace",
"setup",
"."
]
| 3650c0bc3b0804d0adb1d178c5eba9992babb1b0 | https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/command_line.py#L66-L80 | train |
berkeley-cocosci/Wallace | wallace/command_line.py | summary | def summary(app):
"""Print a summary of a deployed app's status."""
r = requests.get('https://{}.herokuapp.com/summary'.format(app))
summary = r.json()['summary']
click.echo("\nstatus \t| count")
click.echo("----------------")
for s in summary:
click.echo("{}\t| {}".format(s[0], s[1]))
num_101s = sum([s[1] for s in summary if s[0] == 101])
num_10xs = sum([s[1] for s in summary if s[0] >= 100])
if num_10xs > 0:
click.echo("\nYield: {:.2%}".format(1.0 * num_101s / num_10xs)) | python | def summary(app):
"""Print a summary of a deployed app's status."""
r = requests.get('https://{}.herokuapp.com/summary'.format(app))
summary = r.json()['summary']
click.echo("\nstatus \t| count")
click.echo("----------------")
for s in summary:
click.echo("{}\t| {}".format(s[0], s[1]))
num_101s = sum([s[1] for s in summary if s[0] == 101])
num_10xs = sum([s[1] for s in summary if s[0] >= 100])
if num_10xs > 0:
click.echo("\nYield: {:.2%}".format(1.0 * num_101s / num_10xs)) | [
"def",
"summary",
"(",
"app",
")",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"'https://{}.herokuapp.com/summary'",
".",
"format",
"(",
"app",
")",
")",
"summary",
"=",
"r",
".",
"json",
"(",
")",
"[",
"'summary'",
"]",
"click",
".",
"echo",
"(",
"\"\\nstatus \\t| count\"",
")",
"click",
".",
"echo",
"(",
"\"----------------\"",
")",
"for",
"s",
"in",
"summary",
":",
"click",
".",
"echo",
"(",
"\"{}\\t| {}\"",
".",
"format",
"(",
"s",
"[",
"0",
"]",
",",
"s",
"[",
"1",
"]",
")",
")",
"num_101s",
"=",
"sum",
"(",
"[",
"s",
"[",
"1",
"]",
"for",
"s",
"in",
"summary",
"if",
"s",
"[",
"0",
"]",
"==",
"101",
"]",
")",
"num_10xs",
"=",
"sum",
"(",
"[",
"s",
"[",
"1",
"]",
"for",
"s",
"in",
"summary",
"if",
"s",
"[",
"0",
"]",
">=",
"100",
"]",
")",
"if",
"num_10xs",
">",
"0",
":",
"click",
".",
"echo",
"(",
"\"\\nYield: {:.2%}\"",
".",
"format",
"(",
"1.0",
"*",
"num_101s",
"/",
"num_10xs",
")",
")"
]
| Print a summary of a deployed app's status. | [
"Print",
"a",
"summary",
"of",
"a",
"deployed",
"app",
"s",
"status",
"."
]
| 3650c0bc3b0804d0adb1d178c5eba9992babb1b0 | https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/command_line.py#L224-L235 | train |
berkeley-cocosci/Wallace | wallace/command_line.py | scale_up_dynos | def scale_up_dynos(id):
"""Scale up the Heroku dynos."""
# Load psiTurk configuration.
config = PsiturkConfig()
config.load_config()
dyno_type = config.get('Server Parameters', 'dyno_type')
num_dynos_web = config.get('Server Parameters', 'num_dynos_web')
num_dynos_worker = config.get('Server Parameters', 'num_dynos_worker')
log("Scaling up the dynos...")
subprocess.call(
"heroku ps:scale web=" + str(num_dynos_web) + ":" +
str(dyno_type) + " --app " + id, shell=True)
subprocess.call(
"heroku ps:scale worker=" + str(num_dynos_worker) + ":" +
str(dyno_type) + " --app " + id, shell=True)
clock_on = config.getboolean('Server Parameters', 'clock_on')
if clock_on:
subprocess.call(
"heroku ps:scale clock=1:" + dyno_type + " --app " + id,
shell=True) | python | def scale_up_dynos(id):
"""Scale up the Heroku dynos."""
# Load psiTurk configuration.
config = PsiturkConfig()
config.load_config()
dyno_type = config.get('Server Parameters', 'dyno_type')
num_dynos_web = config.get('Server Parameters', 'num_dynos_web')
num_dynos_worker = config.get('Server Parameters', 'num_dynos_worker')
log("Scaling up the dynos...")
subprocess.call(
"heroku ps:scale web=" + str(num_dynos_web) + ":" +
str(dyno_type) + " --app " + id, shell=True)
subprocess.call(
"heroku ps:scale worker=" + str(num_dynos_worker) + ":" +
str(dyno_type) + " --app " + id, shell=True)
clock_on = config.getboolean('Server Parameters', 'clock_on')
if clock_on:
subprocess.call(
"heroku ps:scale clock=1:" + dyno_type + " --app " + id,
shell=True) | [
"def",
"scale_up_dynos",
"(",
"id",
")",
":",
"# Load psiTurk configuration.",
"config",
"=",
"PsiturkConfig",
"(",
")",
"config",
".",
"load_config",
"(",
")",
"dyno_type",
"=",
"config",
".",
"get",
"(",
"'Server Parameters'",
",",
"'dyno_type'",
")",
"num_dynos_web",
"=",
"config",
".",
"get",
"(",
"'Server Parameters'",
",",
"'num_dynos_web'",
")",
"num_dynos_worker",
"=",
"config",
".",
"get",
"(",
"'Server Parameters'",
",",
"'num_dynos_worker'",
")",
"log",
"(",
"\"Scaling up the dynos...\"",
")",
"subprocess",
".",
"call",
"(",
"\"heroku ps:scale web=\"",
"+",
"str",
"(",
"num_dynos_web",
")",
"+",
"\":\"",
"+",
"str",
"(",
"dyno_type",
")",
"+",
"\" --app \"",
"+",
"id",
",",
"shell",
"=",
"True",
")",
"subprocess",
".",
"call",
"(",
"\"heroku ps:scale worker=\"",
"+",
"str",
"(",
"num_dynos_worker",
")",
"+",
"\":\"",
"+",
"str",
"(",
"dyno_type",
")",
"+",
"\" --app \"",
"+",
"id",
",",
"shell",
"=",
"True",
")",
"clock_on",
"=",
"config",
".",
"getboolean",
"(",
"'Server Parameters'",
",",
"'clock_on'",
")",
"if",
"clock_on",
":",
"subprocess",
".",
"call",
"(",
"\"heroku ps:scale clock=1:\"",
"+",
"dyno_type",
"+",
"\" --app \"",
"+",
"id",
",",
"shell",
"=",
"True",
")"
]
| Scale up the Heroku dynos. | [
"Scale",
"up",
"the",
"Heroku",
"dynos",
"."
]
| 3650c0bc3b0804d0adb1d178c5eba9992babb1b0 | https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/command_line.py#L321-L344 | train |
berkeley-cocosci/Wallace | wallace/command_line.py | deploy | def deploy(verbose, app):
"""Deploy app using Heroku to MTurk."""
# Load psiTurk configuration.
config = PsiturkConfig()
config.load_config()
# Set the mode.
config.set("Experiment Configuration", "mode", "deploy")
config.set("Server Parameters", "logfile", "-")
# Ensure that psiTurk is not in sandbox mode.
config.set("Shell Parameters", "launch_in_sandbox_mode", "false")
# Do shared setup.
deploy_sandbox_shared_setup(verbose=verbose, app=app) | python | def deploy(verbose, app):
"""Deploy app using Heroku to MTurk."""
# Load psiTurk configuration.
config = PsiturkConfig()
config.load_config()
# Set the mode.
config.set("Experiment Configuration", "mode", "deploy")
config.set("Server Parameters", "logfile", "-")
# Ensure that psiTurk is not in sandbox mode.
config.set("Shell Parameters", "launch_in_sandbox_mode", "false")
# Do shared setup.
deploy_sandbox_shared_setup(verbose=verbose, app=app) | [
"def",
"deploy",
"(",
"verbose",
",",
"app",
")",
":",
"# Load psiTurk configuration.",
"config",
"=",
"PsiturkConfig",
"(",
")",
"config",
".",
"load_config",
"(",
")",
"# Set the mode.",
"config",
".",
"set",
"(",
"\"Experiment Configuration\"",
",",
"\"mode\"",
",",
"\"deploy\"",
")",
"config",
".",
"set",
"(",
"\"Server Parameters\"",
",",
"\"logfile\"",
",",
"\"-\"",
")",
"# Ensure that psiTurk is not in sandbox mode.",
"config",
".",
"set",
"(",
"\"Shell Parameters\"",
",",
"\"launch_in_sandbox_mode\"",
",",
"\"false\"",
")",
"# Do shared setup.",
"deploy_sandbox_shared_setup",
"(",
"verbose",
"=",
"verbose",
",",
"app",
"=",
"app",
")"
]
| Deploy app using Heroku to MTurk. | [
"Deploy",
"app",
"using",
"Heroku",
"to",
"MTurk",
"."
]
| 3650c0bc3b0804d0adb1d178c5eba9992babb1b0 | https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/command_line.py#L513-L527 | train |
berkeley-cocosci/Wallace | wallace/command_line.py | qualify | def qualify(qualification, value, worker):
"""Assign a qualification to a worker."""
# create connection to AWS
from boto.mturk.connection import MTurkConnection
config = PsiturkConfig()
config.load_config()
aws_access_key_id = config.get('AWS Access', 'aws_access_key_id')
aws_secret_access_key = config.get('AWS Access', 'aws_secret_access_key')
conn = MTurkConnection(aws_access_key_id, aws_secret_access_key)
def get_workers_with_qualification(qualification):
"""Get workers with the given qualification."""
results = []
continue_flag = True
page = 1
while(continue_flag):
new_results = conn.get_qualifications_for_qualification_type(
qualification,
page_size=100,
page_number=page)
if(len(new_results) == 0):
continue_flag = False
else:
results.extend(new_results)
page = page + 1
return results
results = get_workers_with_qualification(qualification)
workers = [x.SubjectId for x in results]
# assign the qualification
click.echo(
"Assigning qualification {} with value {} to worker {}".format(
qualification,
value,
worker))
if worker in workers:
result = conn.update_qualification_score(qualification, worker, value)
else:
result = conn.assign_qualification(qualification, worker, value)
if result:
click.echo(result)
# print out the current set of workers with the qualification
results = get_workers_with_qualification(qualification)
click.echo("{} workers with qualification {}:".format(
len(results),
qualification))
values = [r.IntegerValue for r in results]
unique_values = list(set([r.IntegerValue for r in results]))
for v in unique_values:
click.echo("{} with value {}".format(
len([val for val in values if val == v]),
v)) | python | def qualify(qualification, value, worker):
"""Assign a qualification to a worker."""
# create connection to AWS
from boto.mturk.connection import MTurkConnection
config = PsiturkConfig()
config.load_config()
aws_access_key_id = config.get('AWS Access', 'aws_access_key_id')
aws_secret_access_key = config.get('AWS Access', 'aws_secret_access_key')
conn = MTurkConnection(aws_access_key_id, aws_secret_access_key)
def get_workers_with_qualification(qualification):
"""Get workers with the given qualification."""
results = []
continue_flag = True
page = 1
while(continue_flag):
new_results = conn.get_qualifications_for_qualification_type(
qualification,
page_size=100,
page_number=page)
if(len(new_results) == 0):
continue_flag = False
else:
results.extend(new_results)
page = page + 1
return results
results = get_workers_with_qualification(qualification)
workers = [x.SubjectId for x in results]
# assign the qualification
click.echo(
"Assigning qualification {} with value {} to worker {}".format(
qualification,
value,
worker))
if worker in workers:
result = conn.update_qualification_score(qualification, worker, value)
else:
result = conn.assign_qualification(qualification, worker, value)
if result:
click.echo(result)
# print out the current set of workers with the qualification
results = get_workers_with_qualification(qualification)
click.echo("{} workers with qualification {}:".format(
len(results),
qualification))
values = [r.IntegerValue for r in results]
unique_values = list(set([r.IntegerValue for r in results]))
for v in unique_values:
click.echo("{} with value {}".format(
len([val for val in values if val == v]),
v)) | [
"def",
"qualify",
"(",
"qualification",
",",
"value",
",",
"worker",
")",
":",
"# create connection to AWS",
"from",
"boto",
".",
"mturk",
".",
"connection",
"import",
"MTurkConnection",
"config",
"=",
"PsiturkConfig",
"(",
")",
"config",
".",
"load_config",
"(",
")",
"aws_access_key_id",
"=",
"config",
".",
"get",
"(",
"'AWS Access'",
",",
"'aws_access_key_id'",
")",
"aws_secret_access_key",
"=",
"config",
".",
"get",
"(",
"'AWS Access'",
",",
"'aws_secret_access_key'",
")",
"conn",
"=",
"MTurkConnection",
"(",
"aws_access_key_id",
",",
"aws_secret_access_key",
")",
"def",
"get_workers_with_qualification",
"(",
"qualification",
")",
":",
"\"\"\"Get workers with the given qualification.\"\"\"",
"results",
"=",
"[",
"]",
"continue_flag",
"=",
"True",
"page",
"=",
"1",
"while",
"(",
"continue_flag",
")",
":",
"new_results",
"=",
"conn",
".",
"get_qualifications_for_qualification_type",
"(",
"qualification",
",",
"page_size",
"=",
"100",
",",
"page_number",
"=",
"page",
")",
"if",
"(",
"len",
"(",
"new_results",
")",
"==",
"0",
")",
":",
"continue_flag",
"=",
"False",
"else",
":",
"results",
".",
"extend",
"(",
"new_results",
")",
"page",
"=",
"page",
"+",
"1",
"return",
"results",
"results",
"=",
"get_workers_with_qualification",
"(",
"qualification",
")",
"workers",
"=",
"[",
"x",
".",
"SubjectId",
"for",
"x",
"in",
"results",
"]",
"# assign the qualification",
"click",
".",
"echo",
"(",
"\"Assigning qualification {} with value {} to worker {}\"",
".",
"format",
"(",
"qualification",
",",
"value",
",",
"worker",
")",
")",
"if",
"worker",
"in",
"workers",
":",
"result",
"=",
"conn",
".",
"update_qualification_score",
"(",
"qualification",
",",
"worker",
",",
"value",
")",
"else",
":",
"result",
"=",
"conn",
".",
"assign_qualification",
"(",
"qualification",
",",
"worker",
",",
"value",
")",
"if",
"result",
":",
"click",
".",
"echo",
"(",
"result",
")",
"# print out the current set of workers with the qualification",
"results",
"=",
"get_workers_with_qualification",
"(",
"qualification",
")",
"click",
".",
"echo",
"(",
"\"{} workers with qualification {}:\"",
".",
"format",
"(",
"len",
"(",
"results",
")",
",",
"qualification",
")",
")",
"values",
"=",
"[",
"r",
".",
"IntegerValue",
"for",
"r",
"in",
"results",
"]",
"unique_values",
"=",
"list",
"(",
"set",
"(",
"[",
"r",
".",
"IntegerValue",
"for",
"r",
"in",
"results",
"]",
")",
")",
"for",
"v",
"in",
"unique_values",
":",
"click",
".",
"echo",
"(",
"\"{} with value {}\"",
".",
"format",
"(",
"len",
"(",
"[",
"val",
"for",
"val",
"in",
"values",
"if",
"val",
"==",
"v",
"]",
")",
",",
"v",
")",
")"
]
| Assign a qualification to a worker. | [
"Assign",
"a",
"qualification",
"to",
"a",
"worker",
"."
]
| 3650c0bc3b0804d0adb1d178c5eba9992babb1b0 | https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/command_line.py#L534-L593 | train |
berkeley-cocosci/Wallace | wallace/command_line.py | dump_database | def dump_database(id):
"""Backup the Postgres database locally."""
log("Generating a backup of the database on Heroku...")
dump_filename = "data.dump"
data_directory = "data"
dump_dir = os.path.join(data_directory, id)
if not os.path.exists(dump_dir):
os.makedirs(dump_dir)
subprocess.call("heroku pg:backups capture --app " + id, shell=True)
backup_url = subprocess.check_output(
"heroku pg:backups public-url --app " + id, shell=True)
backup_url = backup_url.replace('"', '').rstrip()
backup_url = re.search("https:.*", backup_url).group(0)
print(backup_url)
log("Downloading the backup...")
dump_path = os.path.join(dump_dir, dump_filename)
with open(dump_path, 'wb') as file:
subprocess.call(['curl', '-o', dump_path, backup_url], stdout=file)
return dump_path | python | def dump_database(id):
"""Backup the Postgres database locally."""
log("Generating a backup of the database on Heroku...")
dump_filename = "data.dump"
data_directory = "data"
dump_dir = os.path.join(data_directory, id)
if not os.path.exists(dump_dir):
os.makedirs(dump_dir)
subprocess.call("heroku pg:backups capture --app " + id, shell=True)
backup_url = subprocess.check_output(
"heroku pg:backups public-url --app " + id, shell=True)
backup_url = backup_url.replace('"', '').rstrip()
backup_url = re.search("https:.*", backup_url).group(0)
print(backup_url)
log("Downloading the backup...")
dump_path = os.path.join(dump_dir, dump_filename)
with open(dump_path, 'wb') as file:
subprocess.call(['curl', '-o', dump_path, backup_url], stdout=file)
return dump_path | [
"def",
"dump_database",
"(",
"id",
")",
":",
"log",
"(",
"\"Generating a backup of the database on Heroku...\"",
")",
"dump_filename",
"=",
"\"data.dump\"",
"data_directory",
"=",
"\"data\"",
"dump_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_directory",
",",
"id",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dump_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"dump_dir",
")",
"subprocess",
".",
"call",
"(",
"\"heroku pg:backups capture --app \"",
"+",
"id",
",",
"shell",
"=",
"True",
")",
"backup_url",
"=",
"subprocess",
".",
"check_output",
"(",
"\"heroku pg:backups public-url --app \"",
"+",
"id",
",",
"shell",
"=",
"True",
")",
"backup_url",
"=",
"backup_url",
".",
"replace",
"(",
"'\"'",
",",
"''",
")",
".",
"rstrip",
"(",
")",
"backup_url",
"=",
"re",
".",
"search",
"(",
"\"https:.*\"",
",",
"backup_url",
")",
".",
"group",
"(",
"0",
")",
"print",
"(",
"backup_url",
")",
"log",
"(",
"\"Downloading the backup...\"",
")",
"dump_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dump_dir",
",",
"dump_filename",
")",
"with",
"open",
"(",
"dump_path",
",",
"'wb'",
")",
"as",
"file",
":",
"subprocess",
".",
"call",
"(",
"[",
"'curl'",
",",
"'-o'",
",",
"dump_path",
",",
"backup_url",
"]",
",",
"stdout",
"=",
"file",
")",
"return",
"dump_path"
]
| Backup the Postgres database locally. | [
"Backup",
"the",
"Postgres",
"database",
"locally",
"."
]
| 3650c0bc3b0804d0adb1d178c5eba9992babb1b0 | https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/command_line.py#L596-L619 | train |
berkeley-cocosci/Wallace | wallace/command_line.py | backup | def backup(app):
"""Dump the database."""
dump_path = dump_database(app)
config = PsiturkConfig()
config.load_config()
conn = boto.connect_s3(
config.get('AWS Access', 'aws_access_key_id'),
config.get('AWS Access', 'aws_secret_access_key'),
)
bucket = conn.create_bucket(
app,
location=boto.s3.connection.Location.DEFAULT
)
k = boto.s3.key.Key(bucket)
k.key = 'database.dump'
k.set_contents_from_filename(dump_path)
url = k.generate_url(expires_in=0, query_auth=False)
log("The database backup URL is...")
print(url) | python | def backup(app):
"""Dump the database."""
dump_path = dump_database(app)
config = PsiturkConfig()
config.load_config()
conn = boto.connect_s3(
config.get('AWS Access', 'aws_access_key_id'),
config.get('AWS Access', 'aws_secret_access_key'),
)
bucket = conn.create_bucket(
app,
location=boto.s3.connection.Location.DEFAULT
)
k = boto.s3.key.Key(bucket)
k.key = 'database.dump'
k.set_contents_from_filename(dump_path)
url = k.generate_url(expires_in=0, query_auth=False)
log("The database backup URL is...")
print(url) | [
"def",
"backup",
"(",
"app",
")",
":",
"dump_path",
"=",
"dump_database",
"(",
"app",
")",
"config",
"=",
"PsiturkConfig",
"(",
")",
"config",
".",
"load_config",
"(",
")",
"conn",
"=",
"boto",
".",
"connect_s3",
"(",
"config",
".",
"get",
"(",
"'AWS Access'",
",",
"'aws_access_key_id'",
")",
",",
"config",
".",
"get",
"(",
"'AWS Access'",
",",
"'aws_secret_access_key'",
")",
",",
")",
"bucket",
"=",
"conn",
".",
"create_bucket",
"(",
"app",
",",
"location",
"=",
"boto",
".",
"s3",
".",
"connection",
".",
"Location",
".",
"DEFAULT",
")",
"k",
"=",
"boto",
".",
"s3",
".",
"key",
".",
"Key",
"(",
"bucket",
")",
"k",
".",
"key",
"=",
"'database.dump'",
"k",
".",
"set_contents_from_filename",
"(",
"dump_path",
")",
"url",
"=",
"k",
".",
"generate_url",
"(",
"expires_in",
"=",
"0",
",",
"query_auth",
"=",
"False",
")",
"log",
"(",
"\"The database backup URL is...\"",
")",
"print",
"(",
"url",
")"
]
| Dump the database. | [
"Dump",
"the",
"database",
"."
]
| 3650c0bc3b0804d0adb1d178c5eba9992babb1b0 | https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/command_line.py#L622-L645 | train |
berkeley-cocosci/Wallace | wallace/command_line.py | create | def create(example):
"""Create a copy of the given example."""
try:
this_dir = os.path.dirname(os.path.realpath(__file__))
example_dir = os.path.join(this_dir, os.pardir, "examples", example)
shutil.copytree(example_dir, os.path.join(os.getcwd(), example))
log("Example created.", delay=0)
except TypeError:
click.echo("Example '{}' does not exist.".format(example))
except OSError:
click.echo("Example '{}' already exists here.".format(example)) | python | def create(example):
"""Create a copy of the given example."""
try:
this_dir = os.path.dirname(os.path.realpath(__file__))
example_dir = os.path.join(this_dir, os.pardir, "examples", example)
shutil.copytree(example_dir, os.path.join(os.getcwd(), example))
log("Example created.", delay=0)
except TypeError:
click.echo("Example '{}' does not exist.".format(example))
except OSError:
click.echo("Example '{}' already exists here.".format(example)) | [
"def",
"create",
"(",
"example",
")",
":",
"try",
":",
"this_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
"example_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"this_dir",
",",
"os",
".",
"pardir",
",",
"\"examples\"",
",",
"example",
")",
"shutil",
".",
"copytree",
"(",
"example_dir",
",",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"example",
")",
")",
"log",
"(",
"\"Example created.\"",
",",
"delay",
"=",
"0",
")",
"except",
"TypeError",
":",
"click",
".",
"echo",
"(",
"\"Example '{}' does not exist.\"",
".",
"format",
"(",
"example",
")",
")",
"except",
"OSError",
":",
"click",
".",
"echo",
"(",
"\"Example '{}' already exists here.\"",
".",
"format",
"(",
"example",
")",
")"
]
| Create a copy of the given example. | [
"Create",
"a",
"copy",
"of",
"the",
"given",
"example",
"."
]
| 3650c0bc3b0804d0adb1d178c5eba9992babb1b0 | https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/command_line.py#L815-L825 | train |
brunato/lograptor | lograptor/timedate.py | get_datetime_interval | def get_datetime_interval(timestamp, diff, offset=0):
"""
Returns datetime interval from timestamp backward in the past,
computed using the milliseconds difference passed as argument.
The final datetime is corrected with an optional offset.
"""
fin_datetime = datetime.datetime.fromtimestamp(timestamp + offset)
ini_datetime = datetime.datetime.fromtimestamp(timestamp - diff)
return ini_datetime, fin_datetime | python | def get_datetime_interval(timestamp, diff, offset=0):
"""
Returns datetime interval from timestamp backward in the past,
computed using the milliseconds difference passed as argument.
The final datetime is corrected with an optional offset.
"""
fin_datetime = datetime.datetime.fromtimestamp(timestamp + offset)
ini_datetime = datetime.datetime.fromtimestamp(timestamp - diff)
return ini_datetime, fin_datetime | [
"def",
"get_datetime_interval",
"(",
"timestamp",
",",
"diff",
",",
"offset",
"=",
"0",
")",
":",
"fin_datetime",
"=",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"timestamp",
"+",
"offset",
")",
"ini_datetime",
"=",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"timestamp",
"-",
"diff",
")",
"return",
"ini_datetime",
",",
"fin_datetime"
]
| Returns datetime interval from timestamp backward in the past,
computed using the milliseconds difference passed as argument.
The final datetime is corrected with an optional offset. | [
"Returns",
"datetime",
"interval",
"from",
"timestamp",
"backward",
"in",
"the",
"past",
"computed",
"using",
"the",
"milliseconds",
"difference",
"passed",
"as",
"argument",
".",
"The",
"final",
"datetime",
"is",
"corrected",
"with",
"an",
"optional",
"offset",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/timedate.py#L84-L92 | train |
brunato/lograptor | lograptor/timedate.py | strftimegen | def strftimegen(start_dt, end_dt):
"""
Return a generator function for datetime format strings.
The generator produce a day-by-day sequence starting from the first datetime
to the second datetime argument.
"""
if start_dt > end_dt:
raise ValueError("the start datetime is after the end datetime: (%r,%r)" % (start_dt, end_dt))
def iterftime(string):
date_subs = [i for i in DATE_FORMATS if i[1].search(string) is not None]
if not date_subs:
yield string
else:
dt = start_dt
date_path = string
while end_dt >= dt:
for item in date_subs:
date_path = item[1].sub(dt.strftime(item[0]), date_path)
yield date_path
dt = dt + datetime.timedelta(days=1)
return iterftime | python | def strftimegen(start_dt, end_dt):
"""
Return a generator function for datetime format strings.
The generator produce a day-by-day sequence starting from the first datetime
to the second datetime argument.
"""
if start_dt > end_dt:
raise ValueError("the start datetime is after the end datetime: (%r,%r)" % (start_dt, end_dt))
def iterftime(string):
date_subs = [i for i in DATE_FORMATS if i[1].search(string) is not None]
if not date_subs:
yield string
else:
dt = start_dt
date_path = string
while end_dt >= dt:
for item in date_subs:
date_path = item[1].sub(dt.strftime(item[0]), date_path)
yield date_path
dt = dt + datetime.timedelta(days=1)
return iterftime | [
"def",
"strftimegen",
"(",
"start_dt",
",",
"end_dt",
")",
":",
"if",
"start_dt",
">",
"end_dt",
":",
"raise",
"ValueError",
"(",
"\"the start datetime is after the end datetime: (%r,%r)\"",
"%",
"(",
"start_dt",
",",
"end_dt",
")",
")",
"def",
"iterftime",
"(",
"string",
")",
":",
"date_subs",
"=",
"[",
"i",
"for",
"i",
"in",
"DATE_FORMATS",
"if",
"i",
"[",
"1",
"]",
".",
"search",
"(",
"string",
")",
"is",
"not",
"None",
"]",
"if",
"not",
"date_subs",
":",
"yield",
"string",
"else",
":",
"dt",
"=",
"start_dt",
"date_path",
"=",
"string",
"while",
"end_dt",
">=",
"dt",
":",
"for",
"item",
"in",
"date_subs",
":",
"date_path",
"=",
"item",
"[",
"1",
"]",
".",
"sub",
"(",
"dt",
".",
"strftime",
"(",
"item",
"[",
"0",
"]",
")",
",",
"date_path",
")",
"yield",
"date_path",
"dt",
"=",
"dt",
"+",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"1",
")",
"return",
"iterftime"
]
| Return a generator function for datetime format strings.
The generator produce a day-by-day sequence starting from the first datetime
to the second datetime argument. | [
"Return",
"a",
"generator",
"function",
"for",
"datetime",
"format",
"strings",
".",
"The",
"generator",
"produce",
"a",
"day",
"-",
"by",
"-",
"day",
"sequence",
"starting",
"from",
"the",
"first",
"datetime",
"to",
"the",
"second",
"datetime",
"argument",
"."
]
| b1f09fe1b429ed15110610092704ef12d253f3c9 | https://github.com/brunato/lograptor/blob/b1f09fe1b429ed15110610092704ef12d253f3c9/lograptor/timedate.py#L181-L203 | train |
Kortemme-Lab/klab | klab/bio/fragments/generate_fragments.py | setup_jobs | def setup_jobs(outpath, options, input_files):
''' This function sets up the jobs by creating the necessary input files as expected.
- outpath is where the output is to be stored.
- options is the optparse options object.
- input_files is a list of paths to input files.
'''
job_inputs = None
reverse_mapping = None
fasta_file_contents = {}
# Generate FASTA files for PDB inputs
# fasta_file_contents is a mapping from a file path to a pair (FASTA contents, file type). We remember the file type
# since we offset residue IDs depending on file type i.e. for FASTA files, we treat each sequence separately and do
# not renumber the fragments in postprocessing. For PDB files, however, we need to respect the order and length of
# sequences so that we renumber the fragments appropriately in postprocessing - we assume that if a PDB file is passed in
# then all chains (protein, RNA, or DNA) will be used in a Rosetta run.
for input_file in input_files:
assert(not(fasta_file_contents.get(input_file)))
if any(fnmatch(input_file, x) for x in pdb_file_wildcards):
pdb = PDB.from_filepath(input_file, strict=True)
pdb.pdb_id = os.path.basename(input_file).split('.')[0]
if pdb.pdb_id.startswith('pdb') and len(pdb.pdb_id) >= 7:
# Hack to rename FASTA identifiers for pdb*.ent files which are present in mirrors of the PDB
pdb.pdb_id = pdb.pdb_id.replace('pdb', '')
fasta_file_contents[input_file] = (pdb.create_fasta(prefer_seqres_order = False), 'PDB')
else:
fasta_file_contents[input_file] = (read_file(input_file), 'FASTA')
# Extract sequences from the input FASTA files.
found_sequences, reverse_mapping, errors = get_sequences(options, fasta_file_contents)
if found_sequences:
reformat(found_sequences)
if errors:
return None, False, errors
# Discard sequences that are the wrong chain.
desired_sequences = {}
for key, sequence in found_sequences.iteritems():
pdb_id, chain, file_name = key
if options.chain is None or chain == options.chain:
desired_sequences[key] = sequence
# Create the input FASTA and script files.
job_inputs, errors = create_inputs(options, outpath, desired_sequences)
# Create the reverse mapping file
if reverse_mapping:
segment_mapping_file = os.path.join(outpath, "segment_map.json")
colorprinter.message("Creating a reverse mapping file %s." % segment_mapping_file)
write_file(segment_mapping_file, json.dumps(reverse_mapping))
# Create the post-processing script file
post_processing_script = read_file(os.path.join(os.path.split(os.path.realpath(__file__))[0], 'post_processing.py'))
write_file(os.path.join(outpath, 'post_processing.py'), post_processing_script, 'w')
# Create the secondary structure filter file
if options.secondary_structure_file:
write_file(os.path.join(outpath, 'ss_filter.json'), json.dumps({'secondary_structure_filter' : SecondaryStructureDefinition.from_filepath(options.secondary_structure_file).data}), 'w')
return job_inputs, reverse_mapping != None, errors | python | def setup_jobs(outpath, options, input_files):
''' This function sets up the jobs by creating the necessary input files as expected.
- outpath is where the output is to be stored.
- options is the optparse options object.
- input_files is a list of paths to input files.
'''
job_inputs = None
reverse_mapping = None
fasta_file_contents = {}
# Generate FASTA files for PDB inputs
# fasta_file_contents is a mapping from a file path to a pair (FASTA contents, file type). We remember the file type
# since we offset residue IDs depending on file type i.e. for FASTA files, we treat each sequence separately and do
# not renumber the fragments in postprocessing. For PDB files, however, we need to respect the order and length of
# sequences so that we renumber the fragments appropriately in postprocessing - we assume that if a PDB file is passed in
# then all chains (protein, RNA, or DNA) will be used in a Rosetta run.
for input_file in input_files:
assert(not(fasta_file_contents.get(input_file)))
if any(fnmatch(input_file, x) for x in pdb_file_wildcards):
pdb = PDB.from_filepath(input_file, strict=True)
pdb.pdb_id = os.path.basename(input_file).split('.')[0]
if pdb.pdb_id.startswith('pdb') and len(pdb.pdb_id) >= 7:
# Hack to rename FASTA identifiers for pdb*.ent files which are present in mirrors of the PDB
pdb.pdb_id = pdb.pdb_id.replace('pdb', '')
fasta_file_contents[input_file] = (pdb.create_fasta(prefer_seqres_order = False), 'PDB')
else:
fasta_file_contents[input_file] = (read_file(input_file), 'FASTA')
# Extract sequences from the input FASTA files.
found_sequences, reverse_mapping, errors = get_sequences(options, fasta_file_contents)
if found_sequences:
reformat(found_sequences)
if errors:
return None, False, errors
# Discard sequences that are the wrong chain.
desired_sequences = {}
for key, sequence in found_sequences.iteritems():
pdb_id, chain, file_name = key
if options.chain is None or chain == options.chain:
desired_sequences[key] = sequence
# Create the input FASTA and script files.
job_inputs, errors = create_inputs(options, outpath, desired_sequences)
# Create the reverse mapping file
if reverse_mapping:
segment_mapping_file = os.path.join(outpath, "segment_map.json")
colorprinter.message("Creating a reverse mapping file %s." % segment_mapping_file)
write_file(segment_mapping_file, json.dumps(reverse_mapping))
# Create the post-processing script file
post_processing_script = read_file(os.path.join(os.path.split(os.path.realpath(__file__))[0], 'post_processing.py'))
write_file(os.path.join(outpath, 'post_processing.py'), post_processing_script, 'w')
# Create the secondary structure filter file
if options.secondary_structure_file:
write_file(os.path.join(outpath, 'ss_filter.json'), json.dumps({'secondary_structure_filter' : SecondaryStructureDefinition.from_filepath(options.secondary_structure_file).data}), 'w')
return job_inputs, reverse_mapping != None, errors | [
"def",
"setup_jobs",
"(",
"outpath",
",",
"options",
",",
"input_files",
")",
":",
"job_inputs",
"=",
"None",
"reverse_mapping",
"=",
"None",
"fasta_file_contents",
"=",
"{",
"}",
"# Generate FASTA files for PDB inputs",
"# fasta_file_contents is a mapping from a file path to a pair (FASTA contents, file type). We remember the file type",
"# since we offset residue IDs depending on file type i.e. for FASTA files, we treat each sequence separately and do",
"# not renumber the fragments in postprocessing. For PDB files, however, we need to respect the order and length of",
"# sequences so that we renumber the fragments appropriately in postprocessing - we assume that if a PDB file is passed in",
"# then all chains (protein, RNA, or DNA) will be used in a Rosetta run.",
"for",
"input_file",
"in",
"input_files",
":",
"assert",
"(",
"not",
"(",
"fasta_file_contents",
".",
"get",
"(",
"input_file",
")",
")",
")",
"if",
"any",
"(",
"fnmatch",
"(",
"input_file",
",",
"x",
")",
"for",
"x",
"in",
"pdb_file_wildcards",
")",
":",
"pdb",
"=",
"PDB",
".",
"from_filepath",
"(",
"input_file",
",",
"strict",
"=",
"True",
")",
"pdb",
".",
"pdb_id",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"input_file",
")",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"if",
"pdb",
".",
"pdb_id",
".",
"startswith",
"(",
"'pdb'",
")",
"and",
"len",
"(",
"pdb",
".",
"pdb_id",
")",
">=",
"7",
":",
"# Hack to rename FASTA identifiers for pdb*.ent files which are present in mirrors of the PDB",
"pdb",
".",
"pdb_id",
"=",
"pdb",
".",
"pdb_id",
".",
"replace",
"(",
"'pdb'",
",",
"''",
")",
"fasta_file_contents",
"[",
"input_file",
"]",
"=",
"(",
"pdb",
".",
"create_fasta",
"(",
"prefer_seqres_order",
"=",
"False",
")",
",",
"'PDB'",
")",
"else",
":",
"fasta_file_contents",
"[",
"input_file",
"]",
"=",
"(",
"read_file",
"(",
"input_file",
")",
",",
"'FASTA'",
")",
"# Extract sequences from the input FASTA files.",
"found_sequences",
",",
"reverse_mapping",
",",
"errors",
"=",
"get_sequences",
"(",
"options",
",",
"fasta_file_contents",
")",
"if",
"found_sequences",
":",
"reformat",
"(",
"found_sequences",
")",
"if",
"errors",
":",
"return",
"None",
",",
"False",
",",
"errors",
"# Discard sequences that are the wrong chain.",
"desired_sequences",
"=",
"{",
"}",
"for",
"key",
",",
"sequence",
"in",
"found_sequences",
".",
"iteritems",
"(",
")",
":",
"pdb_id",
",",
"chain",
",",
"file_name",
"=",
"key",
"if",
"options",
".",
"chain",
"is",
"None",
"or",
"chain",
"==",
"options",
".",
"chain",
":",
"desired_sequences",
"[",
"key",
"]",
"=",
"sequence",
"# Create the input FASTA and script files.",
"job_inputs",
",",
"errors",
"=",
"create_inputs",
"(",
"options",
",",
"outpath",
",",
"desired_sequences",
")",
"# Create the reverse mapping file",
"if",
"reverse_mapping",
":",
"segment_mapping_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"outpath",
",",
"\"segment_map.json\"",
")",
"colorprinter",
".",
"message",
"(",
"\"Creating a reverse mapping file %s.\"",
"%",
"segment_mapping_file",
")",
"write_file",
"(",
"segment_mapping_file",
",",
"json",
".",
"dumps",
"(",
"reverse_mapping",
")",
")",
"# Create the post-processing script file",
"post_processing_script",
"=",
"read_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"split",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
"[",
"0",
"]",
",",
"'post_processing.py'",
")",
")",
"write_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"outpath",
",",
"'post_processing.py'",
")",
",",
"post_processing_script",
",",
"'w'",
")",
"# Create the secondary structure filter file",
"if",
"options",
".",
"secondary_structure_file",
":",
"write_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"outpath",
",",
"'ss_filter.json'",
")",
",",
"json",
".",
"dumps",
"(",
"{",
"'secondary_structure_filter'",
":",
"SecondaryStructureDefinition",
".",
"from_filepath",
"(",
"options",
".",
"secondary_structure_file",
")",
".",
"data",
"}",
")",
",",
"'w'",
")",
"return",
"job_inputs",
",",
"reverse_mapping",
"!=",
"None",
",",
"errors"
]
| This function sets up the jobs by creating the necessary input files as expected.
- outpath is where the output is to be stored.
- options is the optparse options object.
- input_files is a list of paths to input files. | [
"This",
"function",
"sets",
"up",
"the",
"jobs",
"by",
"creating",
"the",
"necessary",
"input",
"files",
"as",
"expected",
".",
"-",
"outpath",
"is",
"where",
"the",
"output",
"is",
"to",
"be",
"stored",
".",
"-",
"options",
"is",
"the",
"optparse",
"options",
"object",
".",
"-",
"input_files",
"is",
"a",
"list",
"of",
"paths",
"to",
"input",
"files",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/fragments/generate_fragments.py#L460-L520 | train |
Kortemme-Lab/klab | klab/bio/fragments/generate_fragments.py | reformat | def reformat(found_sequences):
'''Truncate the FASTA headers so that the first field is a 4-character ID.'''
for (pdb_id, chain, file_name), sequence in sorted(found_sequences.iteritems()):
header = sequence[0]
assert(header[0] == '>')
tokens = header.split('|')
tokens[0] = tokens[0][:5]
assert(len(tokens[0]) == 5)
sequence[0] = "|".join(tokens) | python | def reformat(found_sequences):
'''Truncate the FASTA headers so that the first field is a 4-character ID.'''
for (pdb_id, chain, file_name), sequence in sorted(found_sequences.iteritems()):
header = sequence[0]
assert(header[0] == '>')
tokens = header.split('|')
tokens[0] = tokens[0][:5]
assert(len(tokens[0]) == 5)
sequence[0] = "|".join(tokens) | [
"def",
"reformat",
"(",
"found_sequences",
")",
":",
"for",
"(",
"pdb_id",
",",
"chain",
",",
"file_name",
")",
",",
"sequence",
"in",
"sorted",
"(",
"found_sequences",
".",
"iteritems",
"(",
")",
")",
":",
"header",
"=",
"sequence",
"[",
"0",
"]",
"assert",
"(",
"header",
"[",
"0",
"]",
"==",
"'>'",
")",
"tokens",
"=",
"header",
".",
"split",
"(",
"'|'",
")",
"tokens",
"[",
"0",
"]",
"=",
"tokens",
"[",
"0",
"]",
"[",
":",
"5",
"]",
"assert",
"(",
"len",
"(",
"tokens",
"[",
"0",
"]",
")",
"==",
"5",
")",
"sequence",
"[",
"0",
"]",
"=",
"\"|\"",
".",
"join",
"(",
"tokens",
")"
]
| Truncate the FASTA headers so that the first field is a 4-character ID. | [
"Truncate",
"the",
"FASTA",
"headers",
"so",
"that",
"the",
"first",
"field",
"is",
"a",
"4",
"-",
"character",
"ID",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/fragments/generate_fragments.py#L723-L731 | train |
Kortemme-Lab/klab | klab/bio/fragments/generate_fragments.py | search_configuration_files | def search_configuration_files(findstr, replacestr = None):
'''This function could be used to find and replace paths in the configuration files.
At present, it only finds phrases.'''
F = open(configurationFilesLocation, "r")
lines = F.readlines()
F.close()
allerrors = {}
alloutput = {}
for line in lines:
line = line.strip()
if line:
if line.endswith("generate_fragments.py"):
# Do not parse the Python script but check that it exists
if not(os.path.exists(line)):
allerrors[line] = "File/directory %s does not exist." % line
else:
cmd = ["grep", "-n", "-i", findstr, line]
output = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
errors = output[1]
output = output[0]
if errors:
errors = errors.strip()
allerrors[line] = errors
if output:
output = output.strip()
alloutput[line] = output.split("\n")
return alloutput, allerrors | python | def search_configuration_files(findstr, replacestr = None):
'''This function could be used to find and replace paths in the configuration files.
At present, it only finds phrases.'''
F = open(configurationFilesLocation, "r")
lines = F.readlines()
F.close()
allerrors = {}
alloutput = {}
for line in lines:
line = line.strip()
if line:
if line.endswith("generate_fragments.py"):
# Do not parse the Python script but check that it exists
if not(os.path.exists(line)):
allerrors[line] = "File/directory %s does not exist." % line
else:
cmd = ["grep", "-n", "-i", findstr, line]
output = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
errors = output[1]
output = output[0]
if errors:
errors = errors.strip()
allerrors[line] = errors
if output:
output = output.strip()
alloutput[line] = output.split("\n")
return alloutput, allerrors | [
"def",
"search_configuration_files",
"(",
"findstr",
",",
"replacestr",
"=",
"None",
")",
":",
"F",
"=",
"open",
"(",
"configurationFilesLocation",
",",
"\"r\"",
")",
"lines",
"=",
"F",
".",
"readlines",
"(",
")",
"F",
".",
"close",
"(",
")",
"allerrors",
"=",
"{",
"}",
"alloutput",
"=",
"{",
"}",
"for",
"line",
"in",
"lines",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line",
":",
"if",
"line",
".",
"endswith",
"(",
"\"generate_fragments.py\"",
")",
":",
"# Do not parse the Python script but check that it exists",
"if",
"not",
"(",
"os",
".",
"path",
".",
"exists",
"(",
"line",
")",
")",
":",
"allerrors",
"[",
"line",
"]",
"=",
"\"File/directory %s does not exist.\"",
"%",
"line",
"else",
":",
"cmd",
"=",
"[",
"\"grep\"",
",",
"\"-n\"",
",",
"\"-i\"",
",",
"findstr",
",",
"line",
"]",
"output",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
".",
"communicate",
"(",
")",
"errors",
"=",
"output",
"[",
"1",
"]",
"output",
"=",
"output",
"[",
"0",
"]",
"if",
"errors",
":",
"errors",
"=",
"errors",
".",
"strip",
"(",
")",
"allerrors",
"[",
"line",
"]",
"=",
"errors",
"if",
"output",
":",
"output",
"=",
"output",
".",
"strip",
"(",
")",
"alloutput",
"[",
"line",
"]",
"=",
"output",
".",
"split",
"(",
"\"\\n\"",
")",
"return",
"alloutput",
",",
"allerrors"
]
| This function could be used to find and replace paths in the configuration files.
At present, it only finds phrases. | [
"This",
"function",
"could",
"be",
"used",
"to",
"find",
"and",
"replace",
"paths",
"in",
"the",
"configuration",
"files",
".",
"At",
"present",
"it",
"only",
"finds",
"phrases",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/fragments/generate_fragments.py#L776-L804 | train |
ronhanson/python-tbx | tbx/network.py | get_local_ip_address | def get_local_ip_address(target):
"""
Get the local ip address to access one specific target.
"""
ip_adr = ''
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((target, 8000))
ip_adr = s.getsockname()[0]
s.close()
except:
pass
return ip_adr | python | def get_local_ip_address(target):
"""
Get the local ip address to access one specific target.
"""
ip_adr = ''
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((target, 8000))
ip_adr = s.getsockname()[0]
s.close()
except:
pass
return ip_adr | [
"def",
"get_local_ip_address",
"(",
"target",
")",
":",
"ip_adr",
"=",
"''",
"try",
":",
"s",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_DGRAM",
")",
"s",
".",
"connect",
"(",
"(",
"target",
",",
"8000",
")",
")",
"ip_adr",
"=",
"s",
".",
"getsockname",
"(",
")",
"[",
"0",
"]",
"s",
".",
"close",
"(",
")",
"except",
":",
"pass",
"return",
"ip_adr"
]
| Get the local ip address to access one specific target. | [
"Get",
"the",
"local",
"ip",
"address",
"to",
"access",
"one",
"specific",
"target",
"."
]
| 87f72ae0cadecafbcd144f1e930181fba77f6b83 | https://github.com/ronhanson/python-tbx/blob/87f72ae0cadecafbcd144f1e930181fba77f6b83/tbx/network.py#L24-L37 | train |
ronhanson/python-tbx | tbx/network.py | SocketClient.connect | def connect(self):
"""
Connect socket to server
"""
try:
self.sock.connect((self.host, self.port))
return self.sock
except socket.error as ex:
logging.error('Exception while connecting socket on %s:%s - Error %s' % (self.host, self.port, ex))
raise
except Exception as ex:
logging.exception('Exception while connecting socket on %s:%s - Error %s' % (self.host, self.port, ex))
raise | python | def connect(self):
"""
Connect socket to server
"""
try:
self.sock.connect((self.host, self.port))
return self.sock
except socket.error as ex:
logging.error('Exception while connecting socket on %s:%s - Error %s' % (self.host, self.port, ex))
raise
except Exception as ex:
logging.exception('Exception while connecting socket on %s:%s - Error %s' % (self.host, self.port, ex))
raise | [
"def",
"connect",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"sock",
".",
"connect",
"(",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
")",
")",
"return",
"self",
".",
"sock",
"except",
"socket",
".",
"error",
"as",
"ex",
":",
"logging",
".",
"error",
"(",
"'Exception while connecting socket on %s:%s - Error %s'",
"%",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
",",
"ex",
")",
")",
"raise",
"except",
"Exception",
"as",
"ex",
":",
"logging",
".",
"exception",
"(",
"'Exception while connecting socket on %s:%s - Error %s'",
"%",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
",",
"ex",
")",
")",
"raise"
]
| Connect socket to server | [
"Connect",
"socket",
"to",
"server"
]
| 87f72ae0cadecafbcd144f1e930181fba77f6b83 | https://github.com/ronhanson/python-tbx/blob/87f72ae0cadecafbcd144f1e930181fba77f6b83/tbx/network.py#L97-L109 | train |
ronhanson/python-tbx | tbx/network.py | SocketClient.send_by_packet | def send_by_packet(self, data):
"""
Send data by packet on socket
"""
total_sent = 0
while total_sent < PACKET_SIZE:
sent = self.sock.send(data[total_sent:])
if sent == 0:
raise RuntimeError("socket connection broken")
total_sent += sent
return total_sent | python | def send_by_packet(self, data):
"""
Send data by packet on socket
"""
total_sent = 0
while total_sent < PACKET_SIZE:
sent = self.sock.send(data[total_sent:])
if sent == 0:
raise RuntimeError("socket connection broken")
total_sent += sent
return total_sent | [
"def",
"send_by_packet",
"(",
"self",
",",
"data",
")",
":",
"total_sent",
"=",
"0",
"while",
"total_sent",
"<",
"PACKET_SIZE",
":",
"sent",
"=",
"self",
".",
"sock",
".",
"send",
"(",
"data",
"[",
"total_sent",
":",
"]",
")",
"if",
"sent",
"==",
"0",
":",
"raise",
"RuntimeError",
"(",
"\"socket connection broken\"",
")",
"total_sent",
"+=",
"sent",
"return",
"total_sent"
]
| Send data by packet on socket | [
"Send",
"data",
"by",
"packet",
"on",
"socket"
]
| 87f72ae0cadecafbcd144f1e930181fba77f6b83 | https://github.com/ronhanson/python-tbx/blob/87f72ae0cadecafbcd144f1e930181fba77f6b83/tbx/network.py#L111-L121 | train |
ronhanson/python-tbx | tbx/network.py | SocketClient.receive | def receive(self, siz):
"""
Receive a known length of bytes from a socket
"""
result = bytearray()
data = 'x'
while len(data) > 0:
data = self.sock.recv(siz - len(result))
result += data
if len(result) == siz:
return result
if len(result) > siz:
raise Exception('Received more bytes than expected')
raise Exception('Error receiving data. %d bytes received'%len(result)) | python | def receive(self, siz):
"""
Receive a known length of bytes from a socket
"""
result = bytearray()
data = 'x'
while len(data) > 0:
data = self.sock.recv(siz - len(result))
result += data
if len(result) == siz:
return result
if len(result) > siz:
raise Exception('Received more bytes than expected')
raise Exception('Error receiving data. %d bytes received'%len(result)) | [
"def",
"receive",
"(",
"self",
",",
"siz",
")",
":",
"result",
"=",
"bytearray",
"(",
")",
"data",
"=",
"'x'",
"while",
"len",
"(",
"data",
")",
">",
"0",
":",
"data",
"=",
"self",
".",
"sock",
".",
"recv",
"(",
"siz",
"-",
"len",
"(",
"result",
")",
")",
"result",
"+=",
"data",
"if",
"len",
"(",
"result",
")",
"==",
"siz",
":",
"return",
"result",
"if",
"len",
"(",
"result",
")",
">",
"siz",
":",
"raise",
"Exception",
"(",
"'Received more bytes than expected'",
")",
"raise",
"Exception",
"(",
"'Error receiving data. %d bytes received'",
"%",
"len",
"(",
"result",
")",
")"
]
| Receive a known length of bytes from a socket | [
"Receive",
"a",
"known",
"length",
"of",
"bytes",
"from",
"a",
"socket"
]
| 87f72ae0cadecafbcd144f1e930181fba77f6b83 | https://github.com/ronhanson/python-tbx/blob/87f72ae0cadecafbcd144f1e930181fba77f6b83/tbx/network.py#L136-L149 | train |
assamite/creamas | creamas/mp.py | spawn_container | def spawn_container(addr, env_cls=Environment,
mgr_cls=EnvManager, set_seed=True, *args, **kwargs):
"""Spawn a new environment in a given address as a coroutine.
Arguments and keyword arguments are passed down to the created environment
at initialization time.
If `setproctitle <https://pypi.python.org/pypi/setproctitle>`_ is
installed, this function renames the title of the process to start with
'creamas' so that the process is easily identifiable, e.g. with
``ps -x | grep creamas``.
"""
# Try setting the process name to easily recognize the spawned
# environments with 'ps -x' or 'top'
try:
import setproctitle as spt
title = 'creamas: {}({})'.format(env_cls.__class__.__name__,
_get_base_url(addr))
spt.setproctitle(title)
except:
pass
if set_seed:
_set_random_seeds()
# kwargs['codec'] = aiomas.MsgPack
task = start(addr, env_cls, mgr_cls, *args, **kwargs)
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(task) | python | def spawn_container(addr, env_cls=Environment,
mgr_cls=EnvManager, set_seed=True, *args, **kwargs):
"""Spawn a new environment in a given address as a coroutine.
Arguments and keyword arguments are passed down to the created environment
at initialization time.
If `setproctitle <https://pypi.python.org/pypi/setproctitle>`_ is
installed, this function renames the title of the process to start with
'creamas' so that the process is easily identifiable, e.g. with
``ps -x | grep creamas``.
"""
# Try setting the process name to easily recognize the spawned
# environments with 'ps -x' or 'top'
try:
import setproctitle as spt
title = 'creamas: {}({})'.format(env_cls.__class__.__name__,
_get_base_url(addr))
spt.setproctitle(title)
except:
pass
if set_seed:
_set_random_seeds()
# kwargs['codec'] = aiomas.MsgPack
task = start(addr, env_cls, mgr_cls, *args, **kwargs)
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(task) | [
"def",
"spawn_container",
"(",
"addr",
",",
"env_cls",
"=",
"Environment",
",",
"mgr_cls",
"=",
"EnvManager",
",",
"set_seed",
"=",
"True",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Try setting the process name to easily recognize the spawned",
"# environments with 'ps -x' or 'top'",
"try",
":",
"import",
"setproctitle",
"as",
"spt",
"title",
"=",
"'creamas: {}({})'",
".",
"format",
"(",
"env_cls",
".",
"__class__",
".",
"__name__",
",",
"_get_base_url",
"(",
"addr",
")",
")",
"spt",
".",
"setproctitle",
"(",
"title",
")",
"except",
":",
"pass",
"if",
"set_seed",
":",
"_set_random_seeds",
"(",
")",
"# kwargs['codec'] = aiomas.MsgPack",
"task",
"=",
"start",
"(",
"addr",
",",
"env_cls",
",",
"mgr_cls",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"loop",
"=",
"asyncio",
".",
"new_event_loop",
"(",
")",
"asyncio",
".",
"set_event_loop",
"(",
"loop",
")",
"loop",
".",
"run_until_complete",
"(",
"task",
")"
]
| Spawn a new environment in a given address as a coroutine.
Arguments and keyword arguments are passed down to the created environment
at initialization time.
If `setproctitle <https://pypi.python.org/pypi/setproctitle>`_ is
installed, this function renames the title of the process to start with
'creamas' so that the process is easily identifiable, e.g. with
``ps -x | grep creamas``. | [
"Spawn",
"a",
"new",
"environment",
"in",
"a",
"given",
"address",
"as",
"a",
"coroutine",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/mp.py#L886-L915 | train |
assamite/creamas | creamas/mp.py | _set_random_seeds | def _set_random_seeds():
"""Set new random seeds for the process.
"""
try:
import numpy as np
np.random.seed()
except:
pass
try:
import scipy as sp
sp.random.seed()
except:
pass
import random
random.seed() | python | def _set_random_seeds():
"""Set new random seeds for the process.
"""
try:
import numpy as np
np.random.seed()
except:
pass
try:
import scipy as sp
sp.random.seed()
except:
pass
import random
random.seed() | [
"def",
"_set_random_seeds",
"(",
")",
":",
"try",
":",
"import",
"numpy",
"as",
"np",
"np",
".",
"random",
".",
"seed",
"(",
")",
"except",
":",
"pass",
"try",
":",
"import",
"scipy",
"as",
"sp",
"sp",
".",
"random",
".",
"seed",
"(",
")",
"except",
":",
"pass",
"import",
"random",
"random",
".",
"seed",
"(",
")"
]
| Set new random seeds for the process. | [
"Set",
"new",
"random",
"seeds",
"for",
"the",
"process",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/mp.py#L1007-L1023 | train |
assamite/creamas | creamas/mp.py | EnvManager.report | async def report(self, msg, timeout=5):
"""Report message to the host manager.
"""
try:
host_manager = await self.env.connect(self.host_manager,
timeout=timeout)
except:
raise ConnectionError("Could not reach host manager ({})."
.format(self.host_manager))
ret = await host_manager.handle(msg)
return ret | python | async def report(self, msg, timeout=5):
"""Report message to the host manager.
"""
try:
host_manager = await self.env.connect(self.host_manager,
timeout=timeout)
except:
raise ConnectionError("Could not reach host manager ({})."
.format(self.host_manager))
ret = await host_manager.handle(msg)
return ret | [
"async",
"def",
"report",
"(",
"self",
",",
"msg",
",",
"timeout",
"=",
"5",
")",
":",
"try",
":",
"host_manager",
"=",
"await",
"self",
".",
"env",
".",
"connect",
"(",
"self",
".",
"host_manager",
",",
"timeout",
"=",
"timeout",
")",
"except",
":",
"raise",
"ConnectionError",
"(",
"\"Could not reach host manager ({}).\"",
".",
"format",
"(",
"self",
".",
"host_manager",
")",
")",
"ret",
"=",
"await",
"host_manager",
".",
"handle",
"(",
"msg",
")",
"return",
"ret"
]
| Report message to the host manager. | [
"Report",
"message",
"to",
"the",
"host",
"manager",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/mp.py#L77-L87 | train |
assamite/creamas | creamas/mp.py | EnvManager.get_agents | def get_agents(self, addr=True, agent_cls=None, as_coro=False):
"""Get agents from the managed environment.
This is a managing function for the
:py:meth:`~creamas.environment.Environment.get_agents`. Returned
agent list excludes the environment's manager agent (this agent) by
design.
"""
return self.env.get_agents(addr=addr, agent_cls=agent_cls) | python | def get_agents(self, addr=True, agent_cls=None, as_coro=False):
"""Get agents from the managed environment.
This is a managing function for the
:py:meth:`~creamas.environment.Environment.get_agents`. Returned
agent list excludes the environment's manager agent (this agent) by
design.
"""
return self.env.get_agents(addr=addr, agent_cls=agent_cls) | [
"def",
"get_agents",
"(",
"self",
",",
"addr",
"=",
"True",
",",
"agent_cls",
"=",
"None",
",",
"as_coro",
"=",
"False",
")",
":",
"return",
"self",
".",
"env",
".",
"get_agents",
"(",
"addr",
"=",
"addr",
",",
"agent_cls",
"=",
"agent_cls",
")"
]
| Get agents from the managed environment.
This is a managing function for the
:py:meth:`~creamas.environment.Environment.get_agents`. Returned
agent list excludes the environment's manager agent (this agent) by
design. | [
"Get",
"agents",
"from",
"the",
"managed",
"environment",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/mp.py#L96-L104 | train |
assamite/creamas | creamas/mp.py | EnvManager.get_artifacts | async def get_artifacts(self):
"""Get all artifacts from the host environment.
:returns: All the artifacts in the environment.
"""
host_manager = await self.env.connect(self._host_manager,
timeout=TIMEOUT)
artifacts = await host_manager.get_artifacts()
return artifacts | python | async def get_artifacts(self):
"""Get all artifacts from the host environment.
:returns: All the artifacts in the environment.
"""
host_manager = await self.env.connect(self._host_manager,
timeout=TIMEOUT)
artifacts = await host_manager.get_artifacts()
return artifacts | [
"async",
"def",
"get_artifacts",
"(",
"self",
")",
":",
"host_manager",
"=",
"await",
"self",
".",
"env",
".",
"connect",
"(",
"self",
".",
"_host_manager",
",",
"timeout",
"=",
"TIMEOUT",
")",
"artifacts",
"=",
"await",
"host_manager",
".",
"get_artifacts",
"(",
")",
"return",
"artifacts"
]
| Get all artifacts from the host environment.
:returns: All the artifacts in the environment. | [
"Get",
"all",
"artifacts",
"from",
"the",
"host",
"environment",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/mp.py#L135-L143 | train |
assamite/creamas | creamas/mp.py | MultiEnvManager.spawn | async def spawn(self, agent_cls, *args, addr=None, **kwargs):
"""Spawn an agent to the environment.
This is a managing function for
:meth:`~creamas.mp.MultiEnvironment.spawn`.
.. note::
Since :class:`aiomas.rpc.Proxy` objects do not seem to handle
(re)serialization, only the address of the spawned agent is
returned.
"""
_, addr = await self.menv.spawn(agent_cls, *args, addr=addr, **kwargs)
return addr | python | async def spawn(self, agent_cls, *args, addr=None, **kwargs):
"""Spawn an agent to the environment.
This is a managing function for
:meth:`~creamas.mp.MultiEnvironment.spawn`.
.. note::
Since :class:`aiomas.rpc.Proxy` objects do not seem to handle
(re)serialization, only the address of the spawned agent is
returned.
"""
_, addr = await self.menv.spawn(agent_cls, *args, addr=addr, **kwargs)
return addr | [
"async",
"def",
"spawn",
"(",
"self",
",",
"agent_cls",
",",
"*",
"args",
",",
"addr",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"_",
",",
"addr",
"=",
"await",
"self",
".",
"menv",
".",
"spawn",
"(",
"agent_cls",
",",
"*",
"args",
",",
"addr",
"=",
"addr",
",",
"*",
"*",
"kwargs",
")",
"return",
"addr"
]
| Spawn an agent to the environment.
This is a managing function for
:meth:`~creamas.mp.MultiEnvironment.spawn`.
.. note::
Since :class:`aiomas.rpc.Proxy` objects do not seem to handle
(re)serialization, only the address of the spawned agent is
returned. | [
"Spawn",
"an",
"agent",
"to",
"the",
"environment",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/mp.py#L218-L231 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.