repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
LeKono/pyhgnc
src/pyhgnc/manager/query.py
QueryManager.get_model_queries
def get_model_queries(self, query_obj, model_queries_config): """use this if your are searching for a field in the same model""" for search4, model_attrib in model_queries_config: if search4 is not None: query_obj = self._model_query(query_obj, search4, model_attrib) return query_obj
python
def get_model_queries(self, query_obj, model_queries_config): """use this if your are searching for a field in the same model""" for search4, model_attrib in model_queries_config: if search4 is not None: query_obj = self._model_query(query_obj, search4, model_attrib) return query_obj
[ "def", "get_model_queries", "(", "self", ",", "query_obj", ",", "model_queries_config", ")", ":", "for", "search4", ",", "model_attrib", "in", "model_queries_config", ":", "if", "search4", "is", "not", "None", ":", "query_obj", "=", "self", ".", "_model_query", "(", "query_obj", ",", "search4", ",", "model_attrib", ")", "return", "query_obj" ]
use this if your are searching for a field in the same model
[ "use", "this", "if", "your", "are", "searching", "for", "a", "field", "in", "the", "same", "model" ]
1cae20c40874bfb51581b7c5c1481707e942b5d0
https://github.com/LeKono/pyhgnc/blob/1cae20c40874bfb51581b7c5c1481707e942b5d0/src/pyhgnc/manager/query.py#L44-L50
train
happyleavesaoc/python-upsmychoice
upsmychoice/__init__.py
_login
def _login(session): """Login to UPS.""" resp = session.get(LOGIN_URL, params=_get_params(session.auth.locale)) parsed = BeautifulSoup(resp.text, HTML_PARSER) csrf = parsed.find(CSRF_FIND_TAG, CSRF_FIND_ATTR).get(VALUE_ATTR) resp = session.post(LOGIN_URL, { 'userID': session.auth.username, 'password': session.auth.password, 'loginAction': 'X', 'CSRFToken': csrf, 'loc': session.auth.locale }) if resp.status_code == 403: raise UPSError('login failure') parsed = BeautifulSoup(resp.text, HTML_PARSER) error = parsed.find(ERROR_FIND_TAG, ERROR_FIND_ATTR) if error and error.string: raise UPSError(error.string.strip()) _save_cookies(session.cookies, session.auth.cookie_path)
python
def _login(session): """Login to UPS.""" resp = session.get(LOGIN_URL, params=_get_params(session.auth.locale)) parsed = BeautifulSoup(resp.text, HTML_PARSER) csrf = parsed.find(CSRF_FIND_TAG, CSRF_FIND_ATTR).get(VALUE_ATTR) resp = session.post(LOGIN_URL, { 'userID': session.auth.username, 'password': session.auth.password, 'loginAction': 'X', 'CSRFToken': csrf, 'loc': session.auth.locale }) if resp.status_code == 403: raise UPSError('login failure') parsed = BeautifulSoup(resp.text, HTML_PARSER) error = parsed.find(ERROR_FIND_TAG, ERROR_FIND_ATTR) if error and error.string: raise UPSError(error.string.strip()) _save_cookies(session.cookies, session.auth.cookie_path)
[ "def", "_login", "(", "session", ")", ":", "resp", "=", "session", ".", "get", "(", "LOGIN_URL", ",", "params", "=", "_get_params", "(", "session", ".", "auth", ".", "locale", ")", ")", "parsed", "=", "BeautifulSoup", "(", "resp", ".", "text", ",", "HTML_PARSER", ")", "csrf", "=", "parsed", ".", "find", "(", "CSRF_FIND_TAG", ",", "CSRF_FIND_ATTR", ")", ".", "get", "(", "VALUE_ATTR", ")", "resp", "=", "session", ".", "post", "(", "LOGIN_URL", ",", "{", "'userID'", ":", "session", ".", "auth", ".", "username", ",", "'password'", ":", "session", ".", "auth", ".", "password", ",", "'loginAction'", ":", "'X'", ",", "'CSRFToken'", ":", "csrf", ",", "'loc'", ":", "session", ".", "auth", ".", "locale", "}", ")", "if", "resp", ".", "status_code", "==", "403", ":", "raise", "UPSError", "(", "'login failure'", ")", "parsed", "=", "BeautifulSoup", "(", "resp", ".", "text", ",", "HTML_PARSER", ")", "error", "=", "parsed", ".", "find", "(", "ERROR_FIND_TAG", ",", "ERROR_FIND_ATTR", ")", "if", "error", "and", "error", ".", "string", ":", "raise", "UPSError", "(", "error", ".", "string", ".", "strip", "(", ")", ")", "_save_cookies", "(", "session", ".", "cookies", ",", "session", ".", "auth", ".", "cookie_path", ")" ]
Login to UPS.
[ "Login", "to", "UPS", "." ]
df4d7e9d92f95884c8d86f9d38b5a2291cf9edbe
https://github.com/happyleavesaoc/python-upsmychoice/blob/df4d7e9d92f95884c8d86f9d38b5a2291cf9edbe/upsmychoice/__init__.py#L64-L82
train
happyleavesaoc/python-upsmychoice
upsmychoice/__init__.py
get_packages
def get_packages(session): """Get deliveries in progress and completed.""" resp = session.get(DELIVERIES_URL, params=_get_params(session.auth.locale)) parsed = BeautifulSoup(resp.text, HTML_PARSER) token_elem = parsed.find(TOKEN_FIND_TAG, TOKEN_FIND_ATTR) tid_elem = parsed.find(TID_FIND_TAG, TID_FIND_ATTR) if not token_elem or not tid_elem: raise UPSError('failed to find token or tid') token = token_elem.get(VALUE_ATTR) tid = tid_elem.get(VALUE_ATTR) resp = session.post(SERVICE_URL, { 'token': token, 'uid': session.auth.username, 'callType': 'allShipments', 'tid': tid, 'loc': session.auth.locale }) try: packages = [] data = json.loads(resp.text[UPS_JSON_PREAMBLE_SIZE:]) shipments = data['shipmentContainer']['inboundShipments'] + \ data['shipmentContainer']['historyShipments'] for shipment in shipments: from_location = '{}, {}, {}'.format(shipment['sfc'], shipment['sfs'], shipment['sfcn']) estimated_date = _parsed_date(shipment['sddfd']) actual_date = _parsed_date(shipment['dd']) packages.append({ 'tracking_number': shipment['tn'], 'status': shipment['sts'], 'from': shipment['sfn'], 'from_location': from_location, 'estimated_delivery_date': estimated_date, 'estimated_delivery_timeframe': shipment['sdtfd'], 'delivery_date': actual_date }) return packages except JSONDecodeError: raise UPSError('failed to parse json')
python
def get_packages(session): """Get deliveries in progress and completed.""" resp = session.get(DELIVERIES_URL, params=_get_params(session.auth.locale)) parsed = BeautifulSoup(resp.text, HTML_PARSER) token_elem = parsed.find(TOKEN_FIND_TAG, TOKEN_FIND_ATTR) tid_elem = parsed.find(TID_FIND_TAG, TID_FIND_ATTR) if not token_elem or not tid_elem: raise UPSError('failed to find token or tid') token = token_elem.get(VALUE_ATTR) tid = tid_elem.get(VALUE_ATTR) resp = session.post(SERVICE_URL, { 'token': token, 'uid': session.auth.username, 'callType': 'allShipments', 'tid': tid, 'loc': session.auth.locale }) try: packages = [] data = json.loads(resp.text[UPS_JSON_PREAMBLE_SIZE:]) shipments = data['shipmentContainer']['inboundShipments'] + \ data['shipmentContainer']['historyShipments'] for shipment in shipments: from_location = '{}, {}, {}'.format(shipment['sfc'], shipment['sfs'], shipment['sfcn']) estimated_date = _parsed_date(shipment['sddfd']) actual_date = _parsed_date(shipment['dd']) packages.append({ 'tracking_number': shipment['tn'], 'status': shipment['sts'], 'from': shipment['sfn'], 'from_location': from_location, 'estimated_delivery_date': estimated_date, 'estimated_delivery_timeframe': shipment['sdtfd'], 'delivery_date': actual_date }) return packages except JSONDecodeError: raise UPSError('failed to parse json')
[ "def", "get_packages", "(", "session", ")", ":", "resp", "=", "session", ".", "get", "(", "DELIVERIES_URL", ",", "params", "=", "_get_params", "(", "session", ".", "auth", ".", "locale", ")", ")", "parsed", "=", "BeautifulSoup", "(", "resp", ".", "text", ",", "HTML_PARSER", ")", "token_elem", "=", "parsed", ".", "find", "(", "TOKEN_FIND_TAG", ",", "TOKEN_FIND_ATTR", ")", "tid_elem", "=", "parsed", ".", "find", "(", "TID_FIND_TAG", ",", "TID_FIND_ATTR", ")", "if", "not", "token_elem", "or", "not", "tid_elem", ":", "raise", "UPSError", "(", "'failed to find token or tid'", ")", "token", "=", "token_elem", ".", "get", "(", "VALUE_ATTR", ")", "tid", "=", "tid_elem", ".", "get", "(", "VALUE_ATTR", ")", "resp", "=", "session", ".", "post", "(", "SERVICE_URL", ",", "{", "'token'", ":", "token", ",", "'uid'", ":", "session", ".", "auth", ".", "username", ",", "'callType'", ":", "'allShipments'", ",", "'tid'", ":", "tid", ",", "'loc'", ":", "session", ".", "auth", ".", "locale", "}", ")", "try", ":", "packages", "=", "[", "]", "data", "=", "json", ".", "loads", "(", "resp", ".", "text", "[", "UPS_JSON_PREAMBLE_SIZE", ":", "]", ")", "shipments", "=", "data", "[", "'shipmentContainer'", "]", "[", "'inboundShipments'", "]", "+", "data", "[", "'shipmentContainer'", "]", "[", "'historyShipments'", "]", "for", "shipment", "in", "shipments", ":", "from_location", "=", "'{}, {}, {}'", ".", "format", "(", "shipment", "[", "'sfc'", "]", ",", "shipment", "[", "'sfs'", "]", ",", "shipment", "[", "'sfcn'", "]", ")", "estimated_date", "=", "_parsed_date", "(", "shipment", "[", "'sddfd'", "]", ")", "actual_date", "=", "_parsed_date", "(", "shipment", "[", "'dd'", "]", ")", "packages", ".", "append", "(", "{", "'tracking_number'", ":", "shipment", "[", "'tn'", "]", ",", "'status'", ":", "shipment", "[", "'sts'", "]", ",", "'from'", ":", "shipment", "[", "'sfn'", "]", ",", "'from_location'", ":", "from_location", ",", "'estimated_delivery_date'", ":", "estimated_date", ",", "'estimated_delivery_timeframe'", ":", "shipment", "[", "'sdtfd'", "]", ",", "'delivery_date'", ":", "actual_date", "}", ")", "return", "packages", "except", "JSONDecodeError", ":", "raise", "UPSError", "(", "'failed to parse json'", ")" ]
Get deliveries in progress and completed.
[ "Get", "deliveries", "in", "progress", "and", "completed", "." ]
df4d7e9d92f95884c8d86f9d38b5a2291cf9edbe
https://github.com/happyleavesaoc/python-upsmychoice/blob/df4d7e9d92f95884c8d86f9d38b5a2291cf9edbe/upsmychoice/__init__.py#L98-L137
train
happyleavesaoc/python-upsmychoice
upsmychoice/__init__.py
get_session
def get_session(username, password, locale=DEFAULT_LOCALE, cookie_path=COOKIE_PATH): """Get UPS HTTP session.""" class UPSAuth(AuthBase): # pylint: disable=too-few-public-methods """UPS authorization storage.""" def __init__(self, username, password, locale, cookie_path): """Init.""" self.username = username self.password = password self.locale = locale self.cookie_path = cookie_path def __call__(self, r): """Call is no-op.""" return r session = requests.session() session.auth = UPSAuth(username, password, locale, cookie_path) if os.path.exists(cookie_path): session.cookies = _load_cookies(cookie_path) else: _login(session) return session
python
def get_session(username, password, locale=DEFAULT_LOCALE, cookie_path=COOKIE_PATH): """Get UPS HTTP session.""" class UPSAuth(AuthBase): # pylint: disable=too-few-public-methods """UPS authorization storage.""" def __init__(self, username, password, locale, cookie_path): """Init.""" self.username = username self.password = password self.locale = locale self.cookie_path = cookie_path def __call__(self, r): """Call is no-op.""" return r session = requests.session() session.auth = UPSAuth(username, password, locale, cookie_path) if os.path.exists(cookie_path): session.cookies = _load_cookies(cookie_path) else: _login(session) return session
[ "def", "get_session", "(", "username", ",", "password", ",", "locale", "=", "DEFAULT_LOCALE", ",", "cookie_path", "=", "COOKIE_PATH", ")", ":", "class", "UPSAuth", "(", "AuthBase", ")", ":", "# pylint: disable=too-few-public-methods", "\"\"\"UPS authorization storage.\"\"\"", "def", "__init__", "(", "self", ",", "username", ",", "password", ",", "locale", ",", "cookie_path", ")", ":", "\"\"\"Init.\"\"\"", "self", ".", "username", "=", "username", "self", ".", "password", "=", "password", "self", ".", "locale", "=", "locale", "self", ".", "cookie_path", "=", "cookie_path", "def", "__call__", "(", "self", ",", "r", ")", ":", "\"\"\"Call is no-op.\"\"\"", "return", "r", "session", "=", "requests", ".", "session", "(", ")", "session", ".", "auth", "=", "UPSAuth", "(", "username", ",", "password", ",", "locale", ",", "cookie_path", ")", "if", "os", ".", "path", ".", "exists", "(", "cookie_path", ")", ":", "session", ".", "cookies", "=", "_load_cookies", "(", "cookie_path", ")", "else", ":", "_login", "(", "session", ")", "return", "session" ]
Get UPS HTTP session.
[ "Get", "UPS", "HTTP", "session", "." ]
df4d7e9d92f95884c8d86f9d38b5a2291cf9edbe
https://github.com/happyleavesaoc/python-upsmychoice/blob/df4d7e9d92f95884c8d86f9d38b5a2291cf9edbe/upsmychoice/__init__.py#L140-L163
train
portfors-lab/sparkle
sparkle/gui/hidden_widget.py
WidgetHider.hide
def hide(self, event): """Toggles the visiblity of the content widget""" if self.content.isHidden(): self.content.show() self.hideBtn.setIcon(self.hideIcon) self.setMaximumHeight(16777215) else: self.content.hide() self.hideBtn.setIcon(self.showIcon) self.setFixedHeight(30)
python
def hide(self, event): """Toggles the visiblity of the content widget""" if self.content.isHidden(): self.content.show() self.hideBtn.setIcon(self.hideIcon) self.setMaximumHeight(16777215) else: self.content.hide() self.hideBtn.setIcon(self.showIcon) self.setFixedHeight(30)
[ "def", "hide", "(", "self", ",", "event", ")", ":", "if", "self", ".", "content", ".", "isHidden", "(", ")", ":", "self", ".", "content", ".", "show", "(", ")", "self", ".", "hideBtn", ".", "setIcon", "(", "self", ".", "hideIcon", ")", "self", ".", "setMaximumHeight", "(", "16777215", ")", "else", ":", "self", ".", "content", ".", "hide", "(", ")", "self", ".", "hideBtn", ".", "setIcon", "(", "self", ".", "showIcon", ")", "self", ".", "setFixedHeight", "(", "30", ")" ]
Toggles the visiblity of the content widget
[ "Toggles", "the", "visiblity", "of", "the", "content", "widget" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/hidden_widget.py#L41-L50
train
maxharp3r/archive-rotator
archive_rotator/rotator.py
_next_rotation_id
def _next_rotation_id(rotated_files): """Given the hanoi_rotator generated files in the output directory, returns the rotation_id that will be given to the current file. If there are no existing rotated files, return 0. """ if not rotated_files: return 0 else: highest_rotated_file = max(rotated_files, key=lambda x: x[1]) return highest_rotated_file[1] + 1
python
def _next_rotation_id(rotated_files): """Given the hanoi_rotator generated files in the output directory, returns the rotation_id that will be given to the current file. If there are no existing rotated files, return 0. """ if not rotated_files: return 0 else: highest_rotated_file = max(rotated_files, key=lambda x: x[1]) return highest_rotated_file[1] + 1
[ "def", "_next_rotation_id", "(", "rotated_files", ")", ":", "if", "not", "rotated_files", ":", "return", "0", "else", ":", "highest_rotated_file", "=", "max", "(", "rotated_files", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ")", "return", "highest_rotated_file", "[", "1", "]", "+", "1" ]
Given the hanoi_rotator generated files in the output directory, returns the rotation_id that will be given to the current file. If there are no existing rotated files, return 0.
[ "Given", "the", "hanoi_rotator", "generated", "files", "in", "the", "output", "directory", "returns", "the", "rotation_id", "that", "will", "be", "given", "to", "the", "current", "file", ".", "If", "there", "are", "no", "existing", "rotated", "files", "return", "0", "." ]
40b8e571461c54717cee7daead04dbc9751062c8
https://github.com/maxharp3r/archive-rotator/blob/40b8e571461c54717cee7daead04dbc9751062c8/archive_rotator/rotator.py#L73-L82
train
maxharp3r/archive-rotator
archive_rotator/rotator.py
_locate_files_to_delete
def _locate_files_to_delete(algorithm, rotated_files, next_rotation_id): """Looks for hanoi_rotator generated files that occupy the same slot that will be given to rotation_id. """ rotation_slot = algorithm.id_to_slot(next_rotation_id) for a_path, a_rotation_id in rotated_files: if rotation_slot == algorithm.id_to_slot(a_rotation_id): yield a_path
python
def _locate_files_to_delete(algorithm, rotated_files, next_rotation_id): """Looks for hanoi_rotator generated files that occupy the same slot that will be given to rotation_id. """ rotation_slot = algorithm.id_to_slot(next_rotation_id) for a_path, a_rotation_id in rotated_files: if rotation_slot == algorithm.id_to_slot(a_rotation_id): yield a_path
[ "def", "_locate_files_to_delete", "(", "algorithm", ",", "rotated_files", ",", "next_rotation_id", ")", ":", "rotation_slot", "=", "algorithm", ".", "id_to_slot", "(", "next_rotation_id", ")", "for", "a_path", ",", "a_rotation_id", "in", "rotated_files", ":", "if", "rotation_slot", "==", "algorithm", ".", "id_to_slot", "(", "a_rotation_id", ")", ":", "yield", "a_path" ]
Looks for hanoi_rotator generated files that occupy the same slot that will be given to rotation_id.
[ "Looks", "for", "hanoi_rotator", "generated", "files", "that", "occupy", "the", "same", "slot", "that", "will", "be", "given", "to", "rotation_id", "." ]
40b8e571461c54717cee7daead04dbc9751062c8
https://github.com/maxharp3r/archive-rotator/blob/40b8e571461c54717cee7daead04dbc9751062c8/archive_rotator/rotator.py#L85-L92
train
maxharp3r/archive-rotator
archive_rotator/rotator.py
rotate
def rotate(algorithm, path, ext="", destination_dir=None, verbose=False): """ Programmatic access to the archive rotator :param algorithm: an instance of BaseRotator from algorithms.py :param path: full path to input file :param ext: (optional) file extension to preserve :param destination_dir: (optional) different location for output file :param verbose: (optional) print more to stdout :return: nothing """ paths = Paths(path, ext, destination_dir) _move_files(algorithm, paths, verbose)
python
def rotate(algorithm, path, ext="", destination_dir=None, verbose=False): """ Programmatic access to the archive rotator :param algorithm: an instance of BaseRotator from algorithms.py :param path: full path to input file :param ext: (optional) file extension to preserve :param destination_dir: (optional) different location for output file :param verbose: (optional) print more to stdout :return: nothing """ paths = Paths(path, ext, destination_dir) _move_files(algorithm, paths, verbose)
[ "def", "rotate", "(", "algorithm", ",", "path", ",", "ext", "=", "\"\"", ",", "destination_dir", "=", "None", ",", "verbose", "=", "False", ")", ":", "paths", "=", "Paths", "(", "path", ",", "ext", ",", "destination_dir", ")", "_move_files", "(", "algorithm", ",", "paths", ",", "verbose", ")" ]
Programmatic access to the archive rotator :param algorithm: an instance of BaseRotator from algorithms.py :param path: full path to input file :param ext: (optional) file extension to preserve :param destination_dir: (optional) different location for output file :param verbose: (optional) print more to stdout :return: nothing
[ "Programmatic", "access", "to", "the", "archive", "rotator" ]
40b8e571461c54717cee7daead04dbc9751062c8
https://github.com/maxharp3r/archive-rotator/blob/40b8e571461c54717cee7daead04dbc9751062c8/archive_rotator/rotator.py#L126-L138
train
The-Politico/politico-civic-election
election/models/election.py
Election.update_or_create_candidate
def update_or_create_candidate( self, candidate, aggregable=True, uncontested=False ): """Create a CandidateElection.""" candidate_election, c = CandidateElection.objects.update_or_create( candidate=candidate, election=self, defaults={"aggregable": aggregable, "uncontested": uncontested}, ) return candidate_election
python
def update_or_create_candidate( self, candidate, aggregable=True, uncontested=False ): """Create a CandidateElection.""" candidate_election, c = CandidateElection.objects.update_or_create( candidate=candidate, election=self, defaults={"aggregable": aggregable, "uncontested": uncontested}, ) return candidate_election
[ "def", "update_or_create_candidate", "(", "self", ",", "candidate", ",", "aggregable", "=", "True", ",", "uncontested", "=", "False", ")", ":", "candidate_election", ",", "c", "=", "CandidateElection", ".", "objects", ".", "update_or_create", "(", "candidate", "=", "candidate", ",", "election", "=", "self", ",", "defaults", "=", "{", "\"aggregable\"", ":", "aggregable", ",", "\"uncontested\"", ":", "uncontested", "}", ",", ")", "return", "candidate_election" ]
Create a CandidateElection.
[ "Create", "a", "CandidateElection", "." ]
44c6872c419909df616e997e1990c4d295b25eda
https://github.com/The-Politico/politico-civic-election/blob/44c6872c419909df616e997e1990c4d295b25eda/election/models/election.py#L54-L64
train
The-Politico/politico-civic-election
election/models/election.py
Election.delete_candidate
def delete_candidate(self, candidate): """Delete a CandidateElection.""" CandidateElection.objects.filter( candidate=candidate, election=self ).delete()
python
def delete_candidate(self, candidate): """Delete a CandidateElection.""" CandidateElection.objects.filter( candidate=candidate, election=self ).delete()
[ "def", "delete_candidate", "(", "self", ",", "candidate", ")", ":", "CandidateElection", ".", "objects", ".", "filter", "(", "candidate", "=", "candidate", ",", "election", "=", "self", ")", ".", "delete", "(", ")" ]
Delete a CandidateElection.
[ "Delete", "a", "CandidateElection", "." ]
44c6872c419909df616e997e1990c4d295b25eda
https://github.com/The-Politico/politico-civic-election/blob/44c6872c419909df616e997e1990c4d295b25eda/election/models/election.py#L66-L70
train
The-Politico/politico-civic-election
election/models/election.py
Election.get_candidates
def get_candidates(self): """Get all CandidateElections for this election.""" candidate_elections = CandidateElection.objects.filter(election=self) return [ce.candidate for ce in candidate_elections]
python
def get_candidates(self): """Get all CandidateElections for this election.""" candidate_elections = CandidateElection.objects.filter(election=self) return [ce.candidate for ce in candidate_elections]
[ "def", "get_candidates", "(", "self", ")", ":", "candidate_elections", "=", "CandidateElection", ".", "objects", ".", "filter", "(", "election", "=", "self", ")", "return", "[", "ce", ".", "candidate", "for", "ce", "in", "candidate_elections", "]" ]
Get all CandidateElections for this election.
[ "Get", "all", "CandidateElections", "for", "this", "election", "." ]
44c6872c419909df616e997e1990c4d295b25eda
https://github.com/The-Politico/politico-civic-election/blob/44c6872c419909df616e997e1990c4d295b25eda/election/models/election.py#L72-L76
train
The-Politico/politico-civic-election
election/models/election.py
Election.get_candidates_by_party
def get_candidates_by_party(self): """ Get CandidateElections serialized into an object with party-slug keys. """ candidate_elections = CandidateElection.objects.filter(election=self) return { ce.candidate.party.slug: ce.candidate for ce in candidate_elections }
python
def get_candidates_by_party(self): """ Get CandidateElections serialized into an object with party-slug keys. """ candidate_elections = CandidateElection.objects.filter(election=self) return { ce.candidate.party.slug: ce.candidate for ce in candidate_elections }
[ "def", "get_candidates_by_party", "(", "self", ")", ":", "candidate_elections", "=", "CandidateElection", ".", "objects", ".", "filter", "(", "election", "=", "self", ")", "return", "{", "ce", ".", "candidate", ".", "party", ".", "slug", ":", "ce", ".", "candidate", "for", "ce", "in", "candidate_elections", "}" ]
Get CandidateElections serialized into an object with party-slug keys.
[ "Get", "CandidateElections", "serialized", "into", "an", "object", "with", "party", "-", "slug", "keys", "." ]
44c6872c419909df616e997e1990c4d295b25eda
https://github.com/The-Politico/politico-civic-election/blob/44c6872c419909df616e997e1990c4d295b25eda/election/models/election.py#L78-L87
train
The-Politico/politico-civic-election
election/models/election.py
Election.get_candidate_election
def get_candidate_election(self, candidate): """Get CandidateElection for a Candidate in this election.""" return CandidateElection.objects.get( candidate=candidate, election=self )
python
def get_candidate_election(self, candidate): """Get CandidateElection for a Candidate in this election.""" return CandidateElection.objects.get( candidate=candidate, election=self )
[ "def", "get_candidate_election", "(", "self", ",", "candidate", ")", ":", "return", "CandidateElection", ".", "objects", ".", "get", "(", "candidate", "=", "candidate", ",", "election", "=", "self", ")" ]
Get CandidateElection for a Candidate in this election.
[ "Get", "CandidateElection", "for", "a", "Candidate", "in", "this", "election", "." ]
44c6872c419909df616e997e1990c4d295b25eda
https://github.com/The-Politico/politico-civic-election/blob/44c6872c419909df616e997e1990c4d295b25eda/election/models/election.py#L89-L93
train
The-Politico/politico-civic-election
election/models/election.py
Election.get_candidate_votes
def get_candidate_votes(self, candidate): """ Get all votes attached to a CandidateElection for a Candidate in this election. """ candidate_election = CandidateElection.objects.get( candidate=candidate, election=self ) return candidate_election.votes.all()
python
def get_candidate_votes(self, candidate): """ Get all votes attached to a CandidateElection for a Candidate in this election. """ candidate_election = CandidateElection.objects.get( candidate=candidate, election=self ) return candidate_election.votes.all()
[ "def", "get_candidate_votes", "(", "self", ",", "candidate", ")", ":", "candidate_election", "=", "CandidateElection", ".", "objects", ".", "get", "(", "candidate", "=", "candidate", ",", "election", "=", "self", ")", "return", "candidate_election", ".", "votes", ".", "all", "(", ")" ]
Get all votes attached to a CandidateElection for a Candidate in this election.
[ "Get", "all", "votes", "attached", "to", "a", "CandidateElection", "for", "a", "Candidate", "in", "this", "election", "." ]
44c6872c419909df616e997e1990c4d295b25eda
https://github.com/The-Politico/politico-civic-election/blob/44c6872c419909df616e997e1990c4d295b25eda/election/models/election.py#L95-L104
train
The-Politico/politico-civic-election
election/models/election.py
Election.get_votes
def get_votes(self): """ Get all votes for this election. """ candidate_elections = CandidateElection.objects.filter(election=self) votes = None for ce in candidate_elections: votes = votes | ce.votes.all() return votes
python
def get_votes(self): """ Get all votes for this election. """ candidate_elections = CandidateElection.objects.filter(election=self) votes = None for ce in candidate_elections: votes = votes | ce.votes.all() return votes
[ "def", "get_votes", "(", "self", ")", ":", "candidate_elections", "=", "CandidateElection", ".", "objects", ".", "filter", "(", "election", "=", "self", ")", "votes", "=", "None", "for", "ce", "in", "candidate_elections", ":", "votes", "=", "votes", "|", "ce", ".", "votes", ".", "all", "(", ")", "return", "votes" ]
Get all votes for this election.
[ "Get", "all", "votes", "for", "this", "election", "." ]
44c6872c419909df616e997e1990c4d295b25eda
https://github.com/The-Politico/politico-civic-election/blob/44c6872c419909df616e997e1990c4d295b25eda/election/models/election.py#L106-L116
train
The-Politico/politico-civic-election
election/models/election.py
Election.get_candidate_electoral_votes
def get_candidate_electoral_votes(self, candidate): """ Get all electoral votes for a candidate in this election. """ candidate_election = CandidateElection.objects.get( candidate=candidate, election=self ) return candidate_election.electoral_votes.all()
python
def get_candidate_electoral_votes(self, candidate): """ Get all electoral votes for a candidate in this election. """ candidate_election = CandidateElection.objects.get( candidate=candidate, election=self ) return candidate_election.electoral_votes.all()
[ "def", "get_candidate_electoral_votes", "(", "self", ",", "candidate", ")", ":", "candidate_election", "=", "CandidateElection", ".", "objects", ".", "get", "(", "candidate", "=", "candidate", ",", "election", "=", "self", ")", "return", "candidate_election", ".", "electoral_votes", ".", "all", "(", ")" ]
Get all electoral votes for a candidate in this election.
[ "Get", "all", "electoral", "votes", "for", "a", "candidate", "in", "this", "election", "." ]
44c6872c419909df616e997e1990c4d295b25eda
https://github.com/The-Politico/politico-civic-election/blob/44c6872c419909df616e997e1990c4d295b25eda/election/models/election.py#L118-L126
train
The-Politico/politico-civic-election
election/models/election.py
Election.get_electoral_votes
def get_electoral_votes(self): """ Get all electoral votes for all candidates in this election. """ candidate_elections = CandidateElection.objects.filter(election=self) electoral_votes = None for ce in candidate_elections: electoral_votes = electoral_votes | ce.electoral_votes.all() return electoral_votes
python
def get_electoral_votes(self): """ Get all electoral votes for all candidates in this election. """ candidate_elections = CandidateElection.objects.filter(election=self) electoral_votes = None for ce in candidate_elections: electoral_votes = electoral_votes | ce.electoral_votes.all() return electoral_votes
[ "def", "get_electoral_votes", "(", "self", ")", ":", "candidate_elections", "=", "CandidateElection", ".", "objects", ".", "filter", "(", "election", "=", "self", ")", "electoral_votes", "=", "None", "for", "ce", "in", "candidate_elections", ":", "electoral_votes", "=", "electoral_votes", "|", "ce", ".", "electoral_votes", ".", "all", "(", ")", "return", "electoral_votes" ]
Get all electoral votes for all candidates in this election.
[ "Get", "all", "electoral", "votes", "for", "all", "candidates", "in", "this", "election", "." ]
44c6872c419909df616e997e1990c4d295b25eda
https://github.com/The-Politico/politico-civic-election/blob/44c6872c419909df616e997e1990c4d295b25eda/election/models/election.py#L128-L138
train
The-Politico/politico-civic-election
election/models/election.py
Election.get_candidate_delegates
def get_candidate_delegates(self, candidate): """ Get all pledged delegates for a candidate in this election. """ candidate_election = CandidateElection.objects.get( candidate=candidate, election=self ) return candidate_election.delegates.all()
python
def get_candidate_delegates(self, candidate): """ Get all pledged delegates for a candidate in this election. """ candidate_election = CandidateElection.objects.get( candidate=candidate, election=self ) return candidate_election.delegates.all()
[ "def", "get_candidate_delegates", "(", "self", ",", "candidate", ")", ":", "candidate_election", "=", "CandidateElection", ".", "objects", ".", "get", "(", "candidate", "=", "candidate", ",", "election", "=", "self", ")", "return", "candidate_election", ".", "delegates", ".", "all", "(", ")" ]
Get all pledged delegates for a candidate in this election.
[ "Get", "all", "pledged", "delegates", "for", "a", "candidate", "in", "this", "election", "." ]
44c6872c419909df616e997e1990c4d295b25eda
https://github.com/The-Politico/politico-civic-election/blob/44c6872c419909df616e997e1990c4d295b25eda/election/models/election.py#L140-L148
train
The-Politico/politico-civic-election
election/models/election.py
Election.get_delegates
def get_delegates(self): """ Get all pledged delegates for any candidate in this election. """ candidate_elections = CandidateElection.objects.filter(election=self) delegates = None for ce in candidate_elections: delegates = delegates | ce.delegates.all() return delegates
python
def get_delegates(self): """ Get all pledged delegates for any candidate in this election. """ candidate_elections = CandidateElection.objects.filter(election=self) delegates = None for ce in candidate_elections: delegates = delegates | ce.delegates.all() return delegates
[ "def", "get_delegates", "(", "self", ")", ":", "candidate_elections", "=", "CandidateElection", ".", "objects", ".", "filter", "(", "election", "=", "self", ")", "delegates", "=", "None", "for", "ce", "in", "candidate_elections", ":", "delegates", "=", "delegates", "|", "ce", ".", "delegates", ".", "all", "(", ")", "return", "delegates" ]
Get all pledged delegates for any candidate in this election.
[ "Get", "all", "pledged", "delegates", "for", "any", "candidate", "in", "this", "election", "." ]
44c6872c419909df616e997e1990c4d295b25eda
https://github.com/The-Politico/politico-civic-election/blob/44c6872c419909df616e997e1990c4d295b25eda/election/models/election.py#L150-L160
train
yamcs/yamcs-python
yamcs-client/yamcs/archive/client.py
ArchiveClient.list_packet_names
def list_packet_names(self): """ Returns the existing packet names. :rtype: ~collections.Iterable[str] """ # Server does not do pagination on listings of this resource. # Return an iterator anyway for similarity with other API methods path = '/archive/{}/packet-names'.format(self._instance) response = self._client.get_proto(path=path) message = archive_pb2.GetPacketNamesResponse() message.ParseFromString(response.content) names = getattr(message, 'name') return iter(names)
python
def list_packet_names(self): """ Returns the existing packet names. :rtype: ~collections.Iterable[str] """ # Server does not do pagination on listings of this resource. # Return an iterator anyway for similarity with other API methods path = '/archive/{}/packet-names'.format(self._instance) response = self._client.get_proto(path=path) message = archive_pb2.GetPacketNamesResponse() message.ParseFromString(response.content) names = getattr(message, 'name') return iter(names)
[ "def", "list_packet_names", "(", "self", ")", ":", "# Server does not do pagination on listings of this resource.", "# Return an iterator anyway for similarity with other API methods", "path", "=", "'/archive/{}/packet-names'", ".", "format", "(", "self", ".", "_instance", ")", "response", "=", "self", ".", "_client", ".", "get_proto", "(", "path", "=", "path", ")", "message", "=", "archive_pb2", ".", "GetPacketNamesResponse", "(", ")", "message", ".", "ParseFromString", "(", "response", ".", "content", ")", "names", "=", "getattr", "(", "message", ",", "'name'", ")", "return", "iter", "(", "names", ")" ]
Returns the existing packet names. :rtype: ~collections.Iterable[str]
[ "Returns", "the", "existing", "packet", "names", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/archive/client.py#L37-L50
train
yamcs/yamcs-python
yamcs-client/yamcs/archive/client.py
ArchiveClient.list_processed_parameter_groups
def list_processed_parameter_groups(self): """ Returns the existing parameter groups. :rtype: ~collections.Iterable[str] """ # Server does not do pagination on listings of this resource. # Return an iterator anyway for similarity with other API methods path = '/archive/{}/parameter-groups'.format(self._instance) response = self._client.get_proto(path=path) message = archive_pb2.ParameterGroupInfo() message.ParseFromString(response.content) groups = getattr(message, 'group') return iter(groups)
python
def list_processed_parameter_groups(self): """ Returns the existing parameter groups. :rtype: ~collections.Iterable[str] """ # Server does not do pagination on listings of this resource. # Return an iterator anyway for similarity with other API methods path = '/archive/{}/parameter-groups'.format(self._instance) response = self._client.get_proto(path=path) message = archive_pb2.ParameterGroupInfo() message.ParseFromString(response.content) groups = getattr(message, 'group') return iter(groups)
[ "def", "list_processed_parameter_groups", "(", "self", ")", ":", "# Server does not do pagination on listings of this resource.", "# Return an iterator anyway for similarity with other API methods", "path", "=", "'/archive/{}/parameter-groups'", ".", "format", "(", "self", ".", "_instance", ")", "response", "=", "self", ".", "_client", ".", "get_proto", "(", "path", "=", "path", ")", "message", "=", "archive_pb2", ".", "ParameterGroupInfo", "(", ")", "message", ".", "ParseFromString", "(", "response", ".", "content", ")", "groups", "=", "getattr", "(", "message", ",", "'group'", ")", "return", "iter", "(", "groups", ")" ]
Returns the existing parameter groups. :rtype: ~collections.Iterable[str]
[ "Returns", "the", "existing", "parameter", "groups", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/archive/client.py#L81-L94
train
yamcs/yamcs-python
yamcs-client/yamcs/archive/client.py
ArchiveClient.list_processed_parameter_group_histogram
def list_processed_parameter_group_histogram(self, group=None, start=None, stop=None, merge_time=20): """ Reads index records related to processed parameter groups between the specified start and stop time. Each iteration returns a chunk of chronologically-sorted records. :param float merge_time: Maximum gap in seconds before two consecutive index records are merged together. :rtype: ~collections.Iterable[.IndexGroup] """ params = {} if group is not None: params['group'] = group if start is not None: params['start'] = to_isostring(start) if stop is not None: params['stop'] = to_isostring(stop) if merge_time is not None: params['mergeTime'] = int(merge_time * 1000) return pagination.Iterator( client=self._client, path='/archive/{}/parameter-index'.format(self._instance), params=params, response_class=archive_pb2.IndexResponse, items_key='group', item_mapper=IndexGroup, )
python
def list_processed_parameter_group_histogram(self, group=None, start=None, stop=None, merge_time=20): """ Reads index records related to processed parameter groups between the specified start and stop time. Each iteration returns a chunk of chronologically-sorted records. :param float merge_time: Maximum gap in seconds before two consecutive index records are merged together. :rtype: ~collections.Iterable[.IndexGroup] """ params = {} if group is not None: params['group'] = group if start is not None: params['start'] = to_isostring(start) if stop is not None: params['stop'] = to_isostring(stop) if merge_time is not None: params['mergeTime'] = int(merge_time * 1000) return pagination.Iterator( client=self._client, path='/archive/{}/parameter-index'.format(self._instance), params=params, response_class=archive_pb2.IndexResponse, items_key='group', item_mapper=IndexGroup, )
[ "def", "list_processed_parameter_group_histogram", "(", "self", ",", "group", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "merge_time", "=", "20", ")", ":", "params", "=", "{", "}", "if", "group", "is", "not", "None", ":", "params", "[", "'group'", "]", "=", "group", "if", "start", "is", "not", "None", ":", "params", "[", "'start'", "]", "=", "to_isostring", "(", "start", ")", "if", "stop", "is", "not", "None", ":", "params", "[", "'stop'", "]", "=", "to_isostring", "(", "stop", ")", "if", "merge_time", "is", "not", "None", ":", "params", "[", "'mergeTime'", "]", "=", "int", "(", "merge_time", "*", "1000", ")", "return", "pagination", ".", "Iterator", "(", "client", "=", "self", ".", "_client", ",", "path", "=", "'/archive/{}/parameter-index'", ".", "format", "(", "self", ".", "_instance", ")", ",", "params", "=", "params", ",", "response_class", "=", "archive_pb2", ".", "IndexResponse", ",", "items_key", "=", "'group'", ",", "item_mapper", "=", "IndexGroup", ",", ")" ]
Reads index records related to processed parameter groups between the specified start and stop time. Each iteration returns a chunk of chronologically-sorted records. :param float merge_time: Maximum gap in seconds before two consecutive index records are merged together. :rtype: ~collections.Iterable[.IndexGroup]
[ "Reads", "index", "records", "related", "to", "processed", "parameter", "groups", "between", "the", "specified", "start", "and", "stop", "time", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/archive/client.py#L96-L123
train
yamcs/yamcs-python
yamcs-client/yamcs/archive/client.py
ArchiveClient.list_event_sources
def list_event_sources(self): """ Returns the existing event sources. :rtype: ~collections.Iterable[str] """ # Server does not do pagination on listings of this resource. # Return an iterator anyway for similarity with other API methods path = '/archive/{}/events/sources'.format(self._instance) response = self._client.get_proto(path=path) message = archive_pb2.EventSourceInfo() message.ParseFromString(response.content) sources = getattr(message, 'source') return iter(sources)
python
def list_event_sources(self): """ Returns the existing event sources. :rtype: ~collections.Iterable[str] """ # Server does not do pagination on listings of this resource. # Return an iterator anyway for similarity with other API methods path = '/archive/{}/events/sources'.format(self._instance) response = self._client.get_proto(path=path) message = archive_pb2.EventSourceInfo() message.ParseFromString(response.content) sources = getattr(message, 'source') return iter(sources)
[ "def", "list_event_sources", "(", "self", ")", ":", "# Server does not do pagination on listings of this resource.", "# Return an iterator anyway for similarity with other API methods", "path", "=", "'/archive/{}/events/sources'", ".", "format", "(", "self", ".", "_instance", ")", "response", "=", "self", ".", "_client", ".", "get_proto", "(", "path", "=", "path", ")", "message", "=", "archive_pb2", ".", "EventSourceInfo", "(", ")", "message", ".", "ParseFromString", "(", "response", ".", "content", ")", "sources", "=", "getattr", "(", "message", ",", "'source'", ")", "return", "iter", "(", "sources", ")" ]
Returns the existing event sources. :rtype: ~collections.Iterable[str]
[ "Returns", "the", "existing", "event", "sources", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/archive/client.py#L125-L138
train
yamcs/yamcs-python
yamcs-client/yamcs/archive/client.py
ArchiveClient.list_completeness_index
def list_completeness_index(self, start=None, stop=None): """ Reads completeness index records between the specified start and stop time. Each iteration returns a chunk of chronologically-sorted records. :rtype: ~collections.Iterable[.IndexGroup] """ params = {} if start is not None: params['start'] = to_isostring(start) if stop is not None: params['stop'] = to_isostring(stop) return pagination.Iterator( client=self._client, path='/archive/{}/completeness-index'.format(self._instance), params=params, response_class=archive_pb2.IndexResponse, items_key='group', item_mapper=IndexGroup, )
python
def list_completeness_index(self, start=None, stop=None): """ Reads completeness index records between the specified start and stop time. Each iteration returns a chunk of chronologically-sorted records. :rtype: ~collections.Iterable[.IndexGroup] """ params = {} if start is not None: params['start'] = to_isostring(start) if stop is not None: params['stop'] = to_isostring(stop) return pagination.Iterator( client=self._client, path='/archive/{}/completeness-index'.format(self._instance), params=params, response_class=archive_pb2.IndexResponse, items_key='group', item_mapper=IndexGroup, )
[ "def", "list_completeness_index", "(", "self", ",", "start", "=", "None", ",", "stop", "=", "None", ")", ":", "params", "=", "{", "}", "if", "start", "is", "not", "None", ":", "params", "[", "'start'", "]", "=", "to_isostring", "(", "start", ")", "if", "stop", "is", "not", "None", ":", "params", "[", "'stop'", "]", "=", "to_isostring", "(", "stop", ")", "return", "pagination", ".", "Iterator", "(", "client", "=", "self", ".", "_client", ",", "path", "=", "'/archive/{}/completeness-index'", ".", "format", "(", "self", ".", "_instance", ")", ",", "params", "=", "params", ",", "response_class", "=", "archive_pb2", ".", "IndexResponse", ",", "items_key", "=", "'group'", ",", "item_mapper", "=", "IndexGroup", ",", ")" ]
Reads completeness index records between the specified start and stop time. Each iteration returns a chunk of chronologically-sorted records. :rtype: ~collections.Iterable[.IndexGroup]
[ "Reads", "completeness", "index", "records", "between", "the", "specified", "start", "and", "stop", "time", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/archive/client.py#L198-L220
train
yamcs/yamcs-python
yamcs-client/yamcs/archive/client.py
ArchiveClient.list_packets
def list_packets(self, name=None, start=None, stop=None, page_size=500, descending=False): """ Reads packet information between the specified start and stop time. Packets are sorted by generation time and sequence number. :param ~datetime.datetime start: Minimum generation time of the returned packets (inclusive) :param ~datetime.datetime stop: Maximum genreation time of the returned packets (exclusive) :param int page_size: Page size of underlying requests. Higher values imply less overhead, but risk hitting the maximum message size limit. :param bool descending: If set to ``True`` packets are fetched in reverse order (most recent first). :rtype: ~collections.Iterable[.Packet] """ params = { 'order': 'desc' if descending else 'asc', } if name is not None: params['name'] = name if page_size is not None: params['limit'] = page_size if start is not None: params['start'] = to_isostring(start) if stop is not None: params['stop'] = to_isostring(stop) return pagination.Iterator( client=self._client, path='/archive/{}/packets'.format(self._instance), params=params, response_class=rest_pb2.ListPacketsResponse, items_key='packet', item_mapper=Packet, )
python
def list_packets(self, name=None, start=None, stop=None, page_size=500, descending=False): """ Reads packet information between the specified start and stop time. Packets are sorted by generation time and sequence number. :param ~datetime.datetime start: Minimum generation time of the returned packets (inclusive) :param ~datetime.datetime stop: Maximum genreation time of the returned packets (exclusive) :param int page_size: Page size of underlying requests. Higher values imply less overhead, but risk hitting the maximum message size limit. :param bool descending: If set to ``True`` packets are fetched in reverse order (most recent first). :rtype: ~collections.Iterable[.Packet] """ params = { 'order': 'desc' if descending else 'asc', } if name is not None: params['name'] = name if page_size is not None: params['limit'] = page_size if start is not None: params['start'] = to_isostring(start) if stop is not None: params['stop'] = to_isostring(stop) return pagination.Iterator( client=self._client, path='/archive/{}/packets'.format(self._instance), params=params, response_class=rest_pb2.ListPacketsResponse, items_key='packet', item_mapper=Packet, )
[ "def", "list_packets", "(", "self", ",", "name", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "page_size", "=", "500", ",", "descending", "=", "False", ")", ":", "params", "=", "{", "'order'", ":", "'desc'", "if", "descending", "else", "'asc'", ",", "}", "if", "name", "is", "not", "None", ":", "params", "[", "'name'", "]", "=", "name", "if", "page_size", "is", "not", "None", ":", "params", "[", "'limit'", "]", "=", "page_size", "if", "start", "is", "not", "None", ":", "params", "[", "'start'", "]", "=", "to_isostring", "(", "start", ")", "if", "stop", "is", "not", "None", ":", "params", "[", "'stop'", "]", "=", "to_isostring", "(", "stop", ")", "return", "pagination", ".", "Iterator", "(", "client", "=", "self", ".", "_client", ",", "path", "=", "'/archive/{}/packets'", ".", "format", "(", "self", ".", "_instance", ")", ",", "params", "=", "params", ",", "response_class", "=", "rest_pb2", ".", "ListPacketsResponse", ",", "items_key", "=", "'packet'", ",", "item_mapper", "=", "Packet", ",", ")" ]
Reads packet information between the specified start and stop time. Packets are sorted by generation time and sequence number. :param ~datetime.datetime start: Minimum generation time of the returned packets (inclusive) :param ~datetime.datetime stop: Maximum genreation time of the returned packets (exclusive) :param int page_size: Page size of underlying requests. Higher values imply less overhead, but risk hitting the maximum message size limit. :param bool descending: If set to ``True`` packets are fetched in reverse order (most recent first). :rtype: ~collections.Iterable[.Packet]
[ "Reads", "packet", "information", "between", "the", "specified", "start", "and", "stop", "time", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/archive/client.py#L222-L258
train
yamcs/yamcs-python
yamcs-client/yamcs/archive/client.py
ArchiveClient.list_events
def list_events(self, source=None, severity=None, text_filter=None, start=None, stop=None, page_size=500, descending=False): """ Reads events between the specified start and stop time. Events are sorted by generation time, source, then sequence number. :param str source: The source of the returned events. :param str severity: The minimum severity level of the returned events. One of ``INFO``, ``WATCH``, ``WARNING``, ``DISTRESS``, ``CRITICAL`` or ``SEVERE``. :param str text_filter: Filter the text message of the returned events :param ~datetime.datetime start: Minimum start date of the returned events (inclusive) :param ~datetime.datetime stop: Maximum start date of the returned events (exclusive) :param int page_size: Page size of underlying requests. Higher values imply less overhead, but risk hitting the maximum message size limit. :param bool descending: If set to ``True`` events are fetched in reverse order (most recent first). :rtype: ~collections.Iterable[.Event] """ params = { 'order': 'desc' if descending else 'asc', } if source is not None: params['source'] = source if page_size is not None: params['limit'] = page_size if severity is not None: params['severity'] = severity if start is not None: params['start'] = to_isostring(start) if stop is not None: params['stop'] = to_isostring(stop) if text_filter is not None: params['q'] = text_filter return pagination.Iterator( client=self._client, path='/archive/{}/events'.format(self._instance), params=params, response_class=rest_pb2.ListEventsResponse, items_key='event', item_mapper=Event, )
python
def list_events(self, source=None, severity=None, text_filter=None, start=None, stop=None, page_size=500, descending=False): """ Reads events between the specified start and stop time. Events are sorted by generation time, source, then sequence number. :param str source: The source of the returned events. :param str severity: The minimum severity level of the returned events. One of ``INFO``, ``WATCH``, ``WARNING``, ``DISTRESS``, ``CRITICAL`` or ``SEVERE``. :param str text_filter: Filter the text message of the returned events :param ~datetime.datetime start: Minimum start date of the returned events (inclusive) :param ~datetime.datetime stop: Maximum start date of the returned events (exclusive) :param int page_size: Page size of underlying requests. Higher values imply less overhead, but risk hitting the maximum message size limit. :param bool descending: If set to ``True`` events are fetched in reverse order (most recent first). :rtype: ~collections.Iterable[.Event] """ params = { 'order': 'desc' if descending else 'asc', } if source is not None: params['source'] = source if page_size is not None: params['limit'] = page_size if severity is not None: params['severity'] = severity if start is not None: params['start'] = to_isostring(start) if stop is not None: params['stop'] = to_isostring(stop) if text_filter is not None: params['q'] = text_filter return pagination.Iterator( client=self._client, path='/archive/{}/events'.format(self._instance), params=params, response_class=rest_pb2.ListEventsResponse, items_key='event', item_mapper=Event, )
[ "def", "list_events", "(", "self", ",", "source", "=", "None", ",", "severity", "=", "None", ",", "text_filter", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "page_size", "=", "500", ",", "descending", "=", "False", ")", ":", "params", "=", "{", "'order'", ":", "'desc'", "if", "descending", "else", "'asc'", ",", "}", "if", "source", "is", "not", "None", ":", "params", "[", "'source'", "]", "=", "source", "if", "page_size", "is", "not", "None", ":", "params", "[", "'limit'", "]", "=", "page_size", "if", "severity", "is", "not", "None", ":", "params", "[", "'severity'", "]", "=", "severity", "if", "start", "is", "not", "None", ":", "params", "[", "'start'", "]", "=", "to_isostring", "(", "start", ")", "if", "stop", "is", "not", "None", ":", "params", "[", "'stop'", "]", "=", "to_isostring", "(", "stop", ")", "if", "text_filter", "is", "not", "None", ":", "params", "[", "'q'", "]", "=", "text_filter", "return", "pagination", ".", "Iterator", "(", "client", "=", "self", ".", "_client", ",", "path", "=", "'/archive/{}/events'", ".", "format", "(", "self", ".", "_instance", ")", ",", "params", "=", "params", ",", "response_class", "=", "rest_pb2", ".", "ListEventsResponse", ",", "items_key", "=", "'event'", ",", "item_mapper", "=", "Event", ",", ")" ]
Reads events between the specified start and stop time. Events are sorted by generation time, source, then sequence number. :param str source: The source of the returned events. :param str severity: The minimum severity level of the returned events. One of ``INFO``, ``WATCH``, ``WARNING``, ``DISTRESS``, ``CRITICAL`` or ``SEVERE``. :param str text_filter: Filter the text message of the returned events :param ~datetime.datetime start: Minimum start date of the returned events (inclusive) :param ~datetime.datetime stop: Maximum start date of the returned events (exclusive) :param int page_size: Page size of underlying requests. Higher values imply less overhead, but risk hitting the maximum message size limit. :param bool descending: If set to ``True`` events are fetched in reverse order (most recent first). :rtype: ~collections.Iterable[.Event]
[ "Reads", "events", "between", "the", "specified", "start", "and", "stop", "time", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/archive/client.py#L275-L318
train
yamcs/yamcs-python
yamcs-client/yamcs/archive/client.py
ArchiveClient.sample_parameter_values
def sample_parameter_values(self, parameter, start=None, stop=None, sample_count=500, parameter_cache='realtime', source='ParameterArchive'): """ Returns parameter samples. The query range is split in sample intervals of equal length. For each interval a :class:`.Sample` is returned which describes the min, max, count and avg during that interval. Note that sample times are determined without considering the actual parameter values. Two separate queries with equal start/stop arguments will always return the same number of samples with the same timestamps. This is done to ease merging of multiple sample series. You should always be explicit about the ``start`` and ``stop`` times when relying on this property. :param str parameter: Either a fully-qualified XTCE name or an alias in the format ``NAMESPACE/NAME``. :param ~datetime.datetime start: Minimum generation time of the sampled parameter values (inclusive). If not set this defaults to one hour ago. :param ~datetime.datetime stop: Maximum generation time of the sampled parameter values (exclusive). If not set this defaults to the current time. :param int sample_count: The number of returned samples. :param str parameter_cache: Specify the name of the processor who's parameter cache is merged with already archived values. To disable results from the parameter cache, set this to ``None``. :param str source: Specify how to retrieve parameter values. By default this uses the ``ParameterArchive`` which is optimized for retrieval. For Yamcs instances that do not enable the ``ParameterArchive``, you can still get results by specifying ``replay`` as the source. Replay requests take longer to return because the data needs to be reprocessed. :rtype: .Sample[] """ path = '/archive/{}/parameters{}/samples'.format( self._instance, parameter) now = datetime.utcnow() params = { 'count': sample_count, 'source': source, 'start': to_isostring(now - timedelta(hours=1)), 'stop': to_isostring(now), } if start is not None: params['start'] = to_isostring(start) if stop is not None: params['stop'] = to_isostring(stop) if parameter_cache: params['processor'] = parameter_cache else: params['norealtime'] = True response = self._client.get_proto(path=path, params=params) message = pvalue_pb2.TimeSeries() message.ParseFromString(response.content) samples = getattr(message, 'sample') return [Sample(s) for s in samples]
python
def sample_parameter_values(self, parameter, start=None, stop=None, sample_count=500, parameter_cache='realtime', source='ParameterArchive'): """ Returns parameter samples. The query range is split in sample intervals of equal length. For each interval a :class:`.Sample` is returned which describes the min, max, count and avg during that interval. Note that sample times are determined without considering the actual parameter values. Two separate queries with equal start/stop arguments will always return the same number of samples with the same timestamps. This is done to ease merging of multiple sample series. You should always be explicit about the ``start`` and ``stop`` times when relying on this property. :param str parameter: Either a fully-qualified XTCE name or an alias in the format ``NAMESPACE/NAME``. :param ~datetime.datetime start: Minimum generation time of the sampled parameter values (inclusive). If not set this defaults to one hour ago. :param ~datetime.datetime stop: Maximum generation time of the sampled parameter values (exclusive). If not set this defaults to the current time. :param int sample_count: The number of returned samples. :param str parameter_cache: Specify the name of the processor who's parameter cache is merged with already archived values. To disable results from the parameter cache, set this to ``None``. :param str source: Specify how to retrieve parameter values. By default this uses the ``ParameterArchive`` which is optimized for retrieval. For Yamcs instances that do not enable the ``ParameterArchive``, you can still get results by specifying ``replay`` as the source. Replay requests take longer to return because the data needs to be reprocessed. :rtype: .Sample[] """ path = '/archive/{}/parameters{}/samples'.format( self._instance, parameter) now = datetime.utcnow() params = { 'count': sample_count, 'source': source, 'start': to_isostring(now - timedelta(hours=1)), 'stop': to_isostring(now), } if start is not None: params['start'] = to_isostring(start) if stop is not None: params['stop'] = to_isostring(stop) if parameter_cache: params['processor'] = parameter_cache else: params['norealtime'] = True response = self._client.get_proto(path=path, params=params) message = pvalue_pb2.TimeSeries() message.ParseFromString(response.content) samples = getattr(message, 'sample') return [Sample(s) for s in samples]
[ "def", "sample_parameter_values", "(", "self", ",", "parameter", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "sample_count", "=", "500", ",", "parameter_cache", "=", "'realtime'", ",", "source", "=", "'ParameterArchive'", ")", ":", "path", "=", "'/archive/{}/parameters{}/samples'", ".", "format", "(", "self", ".", "_instance", ",", "parameter", ")", "now", "=", "datetime", ".", "utcnow", "(", ")", "params", "=", "{", "'count'", ":", "sample_count", ",", "'source'", ":", "source", ",", "'start'", ":", "to_isostring", "(", "now", "-", "timedelta", "(", "hours", "=", "1", ")", ")", ",", "'stop'", ":", "to_isostring", "(", "now", ")", ",", "}", "if", "start", "is", "not", "None", ":", "params", "[", "'start'", "]", "=", "to_isostring", "(", "start", ")", "if", "stop", "is", "not", "None", ":", "params", "[", "'stop'", "]", "=", "to_isostring", "(", "stop", ")", "if", "parameter_cache", ":", "params", "[", "'processor'", "]", "=", "parameter_cache", "else", ":", "params", "[", "'norealtime'", "]", "=", "True", "response", "=", "self", ".", "_client", ".", "get_proto", "(", "path", "=", "path", ",", "params", "=", "params", ")", "message", "=", "pvalue_pb2", ".", "TimeSeries", "(", ")", "message", ".", "ParseFromString", "(", "response", ".", "content", ")", "samples", "=", "getattr", "(", "message", ",", "'sample'", ")", "return", "[", "Sample", "(", "s", ")", "for", "s", "in", "samples", "]" ]
Returns parameter samples. The query range is split in sample intervals of equal length. For each interval a :class:`.Sample` is returned which describes the min, max, count and avg during that interval. Note that sample times are determined without considering the actual parameter values. Two separate queries with equal start/stop arguments will always return the same number of samples with the same timestamps. This is done to ease merging of multiple sample series. You should always be explicit about the ``start`` and ``stop`` times when relying on this property. :param str parameter: Either a fully-qualified XTCE name or an alias in the format ``NAMESPACE/NAME``. :param ~datetime.datetime start: Minimum generation time of the sampled parameter values (inclusive). If not set this defaults to one hour ago. :param ~datetime.datetime stop: Maximum generation time of the sampled parameter values (exclusive). If not set this defaults to the current time. :param int sample_count: The number of returned samples. :param str parameter_cache: Specify the name of the processor who's parameter cache is merged with already archived values. To disable results from the parameter cache, set this to ``None``. :param str source: Specify how to retrieve parameter values. By default this uses the ``ParameterArchive`` which is optimized for retrieval. For Yamcs instances that do not enable the ``ParameterArchive``, you can still get results by specifying ``replay`` as the source. Replay requests take longer to return because the data needs to be reprocessed. :rtype: .Sample[]
[ "Returns", "parameter", "samples", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/archive/client.py#L320-L382
train
yamcs/yamcs-python
yamcs-client/yamcs/archive/client.py
ArchiveClient.list_parameter_ranges
def list_parameter_ranges(self, parameter, start=None, stop=None, min_gap=None, max_gap=None, parameter_cache='realtime'): """ Returns parameter ranges between the specified start and stop time. Each range indicates an interval during which this parameter's value was uninterrupted and unchanged. Ranges are a good fit for retrieving the value of a parameter that does not change frequently. For example an on/off indicator or some operational status. Querying ranges will then induce much less overhead than manually processing the output of :meth:`list_parameter_values` would. The maximum number of returned ranges is limited to 500. :param str parameter: Either a fully-qualified XTCE name or an alias in the format ``NAMESPACE/NAME``. :param ~datetime.datetime start: Minimum generation time of the considered values (inclusive) :param ~datetime.datetime stop: Maximum generation time of the considered values (exclusive) :param float min_gap: Time in seconds. Any gap (detected based on parameter expiration) smaller than this will be ignored. However if the parameter changes value, the ranges will still be split. :param float max_gap: Time in seconds. If the distance between two subsequent parameter values is bigger than this value (but smaller than the parameter expiration), then an artificial gap is created. This also applies if there is no expiration defined for the parameter. :param str parameter_cache: Specify the name of the processor who's parameter cache is merged with already archived values. To disable results from the parameter cache, set this to ``None``. :rtype: .ParameterRange[] """ path = '/archive/{}/parameters{}/ranges'.format( self._instance, parameter) params = {} if start is not None: params['start'] = to_isostring(start) if stop is not None: params['stop'] = to_isostring(stop) if min_gap is not None: params['minGap'] = int(min_gap * 1000) if max_gap is not None: params['maxGap'] = int(max_gap * 1000) if parameter_cache: params['processor'] = parameter_cache else: params['norealtime'] = True response = self._client.get_proto(path=path, params=params) message = pvalue_pb2.Ranges() message.ParseFromString(response.content) ranges = getattr(message, 'range') return [ParameterRange(r) for r in ranges]
python
def list_parameter_ranges(self, parameter, start=None, stop=None, min_gap=None, max_gap=None, parameter_cache='realtime'): """ Returns parameter ranges between the specified start and stop time. Each range indicates an interval during which this parameter's value was uninterrupted and unchanged. Ranges are a good fit for retrieving the value of a parameter that does not change frequently. For example an on/off indicator or some operational status. Querying ranges will then induce much less overhead than manually processing the output of :meth:`list_parameter_values` would. The maximum number of returned ranges is limited to 500. :param str parameter: Either a fully-qualified XTCE name or an alias in the format ``NAMESPACE/NAME``. :param ~datetime.datetime start: Minimum generation time of the considered values (inclusive) :param ~datetime.datetime stop: Maximum generation time of the considered values (exclusive) :param float min_gap: Time in seconds. Any gap (detected based on parameter expiration) smaller than this will be ignored. However if the parameter changes value, the ranges will still be split. :param float max_gap: Time in seconds. If the distance between two subsequent parameter values is bigger than this value (but smaller than the parameter expiration), then an artificial gap is created. This also applies if there is no expiration defined for the parameter. :param str parameter_cache: Specify the name of the processor who's parameter cache is merged with already archived values. To disable results from the parameter cache, set this to ``None``. :rtype: .ParameterRange[] """ path = '/archive/{}/parameters{}/ranges'.format( self._instance, parameter) params = {} if start is not None: params['start'] = to_isostring(start) if stop is not None: params['stop'] = to_isostring(stop) if min_gap is not None: params['minGap'] = int(min_gap * 1000) if max_gap is not None: params['maxGap'] = int(max_gap * 1000) if parameter_cache: params['processor'] = parameter_cache else: params['norealtime'] = True response = self._client.get_proto(path=path, params=params) message = pvalue_pb2.Ranges() message.ParseFromString(response.content) ranges = getattr(message, 'range') return [ParameterRange(r) for r in ranges]
[ "def", "list_parameter_ranges", "(", "self", ",", "parameter", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "min_gap", "=", "None", ",", "max_gap", "=", "None", ",", "parameter_cache", "=", "'realtime'", ")", ":", "path", "=", "'/archive/{}/parameters{}/ranges'", ".", "format", "(", "self", ".", "_instance", ",", "parameter", ")", "params", "=", "{", "}", "if", "start", "is", "not", "None", ":", "params", "[", "'start'", "]", "=", "to_isostring", "(", "start", ")", "if", "stop", "is", "not", "None", ":", "params", "[", "'stop'", "]", "=", "to_isostring", "(", "stop", ")", "if", "min_gap", "is", "not", "None", ":", "params", "[", "'minGap'", "]", "=", "int", "(", "min_gap", "*", "1000", ")", "if", "max_gap", "is", "not", "None", ":", "params", "[", "'maxGap'", "]", "=", "int", "(", "max_gap", "*", "1000", ")", "if", "parameter_cache", ":", "params", "[", "'processor'", "]", "=", "parameter_cache", "else", ":", "params", "[", "'norealtime'", "]", "=", "True", "response", "=", "self", ".", "_client", ".", "get_proto", "(", "path", "=", "path", ",", "params", "=", "params", ")", "message", "=", "pvalue_pb2", ".", "Ranges", "(", ")", "message", ".", "ParseFromString", "(", "response", ".", "content", ")", "ranges", "=", "getattr", "(", "message", ",", "'range'", ")", "return", "[", "ParameterRange", "(", "r", ")", "for", "r", "in", "ranges", "]" ]
Returns parameter ranges between the specified start and stop time. Each range indicates an interval during which this parameter's value was uninterrupted and unchanged. Ranges are a good fit for retrieving the value of a parameter that does not change frequently. For example an on/off indicator or some operational status. Querying ranges will then induce much less overhead than manually processing the output of :meth:`list_parameter_values` would. The maximum number of returned ranges is limited to 500. :param str parameter: Either a fully-qualified XTCE name or an alias in the format ``NAMESPACE/NAME``. :param ~datetime.datetime start: Minimum generation time of the considered values (inclusive) :param ~datetime.datetime stop: Maximum generation time of the considered values (exclusive) :param float min_gap: Time in seconds. Any gap (detected based on parameter expiration) smaller than this will be ignored. However if the parameter changes value, the ranges will still be split. :param float max_gap: Time in seconds. If the distance between two subsequent parameter values is bigger than this value (but smaller than the parameter expiration), then an artificial gap is created. This also applies if there is no expiration defined for the parameter. :param str parameter_cache: Specify the name of the processor who's parameter cache is merged with already archived values. To disable results from the parameter cache, set this to ``None``. :rtype: .ParameterRange[]
[ "Returns", "parameter", "ranges", "between", "the", "specified", "start", "and", "stop", "time", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/archive/client.py#L384-L444
train
yamcs/yamcs-python
yamcs-client/yamcs/archive/client.py
ArchiveClient.list_parameter_values
def list_parameter_values(self, parameter, start=None, stop=None, page_size=500, descending=False, parameter_cache='realtime', source='ParameterArchive'): """ Reads parameter values between the specified start and stop time. :param str parameter: Either a fully-qualified XTCE name or an alias in the format ``NAMESPACE/NAME``. :param ~datetime.datetime start: Minimum generation time of the returned values (inclusive) :param ~datetime.datetime stop: Maximum generation time of the returned values (exclusive) :param int page_size: Page size of underlying requests. Higher values imply less overhead, but risk hitting the maximum message size limit. :param bool descending: If set to ``True`` values are fetched in reverse order (most recent first). :param str parameter_cache: Specify the name of the processor who's parameter cache is merged with already archived values. To disable results from the parameter cache, set this to ``None``. :param str source: Specify how to retrieve parameter values. By default this uses the ``ParameterArchive`` which is optimized for retrieval. For Yamcs instances that do not enable the ``ParameterArchive``, you can still get results by specifying ``replay`` as the source. Replay requests take longer to return because the data needs to be reprocessed. :rtype: ~collections.Iterable[.ParameterValue] """ params = { 'source': source, 'order': 'desc' if descending else 'asc', } if page_size is not None: params['limit'] = page_size if start is not None: params['start'] = to_isostring(start) if stop is not None: params['stop'] = to_isostring(stop) if parameter_cache: params['processor'] = parameter_cache else: params['norealtime'] = True return pagination.Iterator( client=self._client, path='/archive/{}/parameters{}'.format(self._instance, parameter), params=params, response_class=rest_pb2.ListParameterValuesResponse, items_key='parameter', item_mapper=ParameterValue, )
python
def list_parameter_values(self, parameter, start=None, stop=None, page_size=500, descending=False, parameter_cache='realtime', source='ParameterArchive'): """ Reads parameter values between the specified start and stop time. :param str parameter: Either a fully-qualified XTCE name or an alias in the format ``NAMESPACE/NAME``. :param ~datetime.datetime start: Minimum generation time of the returned values (inclusive) :param ~datetime.datetime stop: Maximum generation time of the returned values (exclusive) :param int page_size: Page size of underlying requests. Higher values imply less overhead, but risk hitting the maximum message size limit. :param bool descending: If set to ``True`` values are fetched in reverse order (most recent first). :param str parameter_cache: Specify the name of the processor who's parameter cache is merged with already archived values. To disable results from the parameter cache, set this to ``None``. :param str source: Specify how to retrieve parameter values. By default this uses the ``ParameterArchive`` which is optimized for retrieval. For Yamcs instances that do not enable the ``ParameterArchive``, you can still get results by specifying ``replay`` as the source. Replay requests take longer to return because the data needs to be reprocessed. :rtype: ~collections.Iterable[.ParameterValue] """ params = { 'source': source, 'order': 'desc' if descending else 'asc', } if page_size is not None: params['limit'] = page_size if start is not None: params['start'] = to_isostring(start) if stop is not None: params['stop'] = to_isostring(stop) if parameter_cache: params['processor'] = parameter_cache else: params['norealtime'] = True return pagination.Iterator( client=self._client, path='/archive/{}/parameters{}'.format(self._instance, parameter), params=params, response_class=rest_pb2.ListParameterValuesResponse, items_key='parameter', item_mapper=ParameterValue, )
[ "def", "list_parameter_values", "(", "self", ",", "parameter", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "page_size", "=", "500", ",", "descending", "=", "False", ",", "parameter_cache", "=", "'realtime'", ",", "source", "=", "'ParameterArchive'", ")", ":", "params", "=", "{", "'source'", ":", "source", ",", "'order'", ":", "'desc'", "if", "descending", "else", "'asc'", ",", "}", "if", "page_size", "is", "not", "None", ":", "params", "[", "'limit'", "]", "=", "page_size", "if", "start", "is", "not", "None", ":", "params", "[", "'start'", "]", "=", "to_isostring", "(", "start", ")", "if", "stop", "is", "not", "None", ":", "params", "[", "'stop'", "]", "=", "to_isostring", "(", "stop", ")", "if", "parameter_cache", ":", "params", "[", "'processor'", "]", "=", "parameter_cache", "else", ":", "params", "[", "'norealtime'", "]", "=", "True", "return", "pagination", ".", "Iterator", "(", "client", "=", "self", ".", "_client", ",", "path", "=", "'/archive/{}/parameters{}'", ".", "format", "(", "self", ".", "_instance", ",", "parameter", ")", ",", "params", "=", "params", ",", "response_class", "=", "rest_pb2", ".", "ListParameterValuesResponse", ",", "items_key", "=", "'parameter'", ",", "item_mapper", "=", "ParameterValue", ",", ")" ]
Reads parameter values between the specified start and stop time. :param str parameter: Either a fully-qualified XTCE name or an alias in the format ``NAMESPACE/NAME``. :param ~datetime.datetime start: Minimum generation time of the returned values (inclusive) :param ~datetime.datetime stop: Maximum generation time of the returned values (exclusive) :param int page_size: Page size of underlying requests. Higher values imply less overhead, but risk hitting the maximum message size limit. :param bool descending: If set to ``True`` values are fetched in reverse order (most recent first). :param str parameter_cache: Specify the name of the processor who's parameter cache is merged with already archived values. To disable results from the parameter cache, set this to ``None``. :param str source: Specify how to retrieve parameter values. By default this uses the ``ParameterArchive`` which is optimized for retrieval. For Yamcs instances that do not enable the ``ParameterArchive``, you can still get results by specifying ``replay`` as the source. Replay requests take longer to return because the data needs to be reprocessed. :rtype: ~collections.Iterable[.ParameterValue]
[ "Reads", "parameter", "values", "between", "the", "specified", "start", "and", "stop", "time", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/archive/client.py#L446-L499
train
yamcs/yamcs-python
yamcs-client/yamcs/archive/client.py
ArchiveClient.list_command_history
def list_command_history(self, command=None, start=None, stop=None, page_size=500, descending=False): """ Reads command history entries between the specified start and stop time. :param str command: Either a fully-qualified XTCE name or an alias in the format ``NAMESPACE/NAME``. :param ~datetime.datetime start: Minimum generation time of the returned command history entries (inclusive) :param ~datetime.datetime stop: Maximum generation time of the returned command history entries (exclusive) :param int page_size: Page size of underlying requests. Higher values imply less overhead, but risk hitting the maximum message size limit. :param bool descending: If set to ``True`` results are fetched in reverse order (most recent first). :rtype: ~collections.Iterable[.CommandHistory] """ params = { 'order': 'desc' if descending else 'asc', } if page_size is not None: params['limit'] = page_size if start is not None: params['start'] = to_isostring(start) if stop is not None: params['stop'] = to_isostring(stop) if command: path = '/archive/{}/commands{}'.format(self._instance, command) else: path = '/archive/{}/commands'.format(self._instance) return pagination.Iterator( client=self._client, path=path, params=params, response_class=rest_pb2.ListCommandsResponse, items_key='entry', item_mapper=CommandHistory, )
python
def list_command_history(self, command=None, start=None, stop=None, page_size=500, descending=False): """ Reads command history entries between the specified start and stop time. :param str command: Either a fully-qualified XTCE name or an alias in the format ``NAMESPACE/NAME``. :param ~datetime.datetime start: Minimum generation time of the returned command history entries (inclusive) :param ~datetime.datetime stop: Maximum generation time of the returned command history entries (exclusive) :param int page_size: Page size of underlying requests. Higher values imply less overhead, but risk hitting the maximum message size limit. :param bool descending: If set to ``True`` results are fetched in reverse order (most recent first). :rtype: ~collections.Iterable[.CommandHistory] """ params = { 'order': 'desc' if descending else 'asc', } if page_size is not None: params['limit'] = page_size if start is not None: params['start'] = to_isostring(start) if stop is not None: params['stop'] = to_isostring(stop) if command: path = '/archive/{}/commands{}'.format(self._instance, command) else: path = '/archive/{}/commands'.format(self._instance) return pagination.Iterator( client=self._client, path=path, params=params, response_class=rest_pb2.ListCommandsResponse, items_key='entry', item_mapper=CommandHistory, )
[ "def", "list_command_history", "(", "self", ",", "command", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "page_size", "=", "500", ",", "descending", "=", "False", ")", ":", "params", "=", "{", "'order'", ":", "'desc'", "if", "descending", "else", "'asc'", ",", "}", "if", "page_size", "is", "not", "None", ":", "params", "[", "'limit'", "]", "=", "page_size", "if", "start", "is", "not", "None", ":", "params", "[", "'start'", "]", "=", "to_isostring", "(", "start", ")", "if", "stop", "is", "not", "None", ":", "params", "[", "'stop'", "]", "=", "to_isostring", "(", "stop", ")", "if", "command", ":", "path", "=", "'/archive/{}/commands{}'", ".", "format", "(", "self", ".", "_instance", ",", "command", ")", "else", ":", "path", "=", "'/archive/{}/commands'", ".", "format", "(", "self", ".", "_instance", ")", "return", "pagination", ".", "Iterator", "(", "client", "=", "self", ".", "_client", ",", "path", "=", "path", ",", "params", "=", "params", ",", "response_class", "=", "rest_pb2", ".", "ListCommandsResponse", ",", "items_key", "=", "'entry'", ",", "item_mapper", "=", "CommandHistory", ",", ")" ]
Reads command history entries between the specified start and stop time. :param str command: Either a fully-qualified XTCE name or an alias in the format ``NAMESPACE/NAME``. :param ~datetime.datetime start: Minimum generation time of the returned command history entries (inclusive) :param ~datetime.datetime stop: Maximum generation time of the returned command history entries (exclusive) :param int page_size: Page size of underlying requests. Higher values imply less overhead, but risk hitting the maximum message size limit. :param bool descending: If set to ``True`` results are fetched in reverse order (most recent first). :rtype: ~collections.Iterable[.CommandHistory]
[ "Reads", "command", "history", "entries", "between", "the", "specified", "start", "and", "stop", "time", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/archive/client.py#L501-L540
train
yamcs/yamcs-python
yamcs-client/yamcs/archive/client.py
ArchiveClient.list_tables
def list_tables(self): """ Returns the existing tables. Tables are returned in lexicographical order. :rtype: ~collections.Iterable[.Table] """ # Server does not do pagination on listings of this resource. # Return an iterator anyway for similarity with other API methods path = '/archive/{}/tables'.format(self._instance) response = self._client.get_proto(path=path) message = rest_pb2.ListTablesResponse() message.ParseFromString(response.content) tables = getattr(message, 'table') return iter([Table(table) for table in tables])
python
def list_tables(self): """ Returns the existing tables. Tables are returned in lexicographical order. :rtype: ~collections.Iterable[.Table] """ # Server does not do pagination on listings of this resource. # Return an iterator anyway for similarity with other API methods path = '/archive/{}/tables'.format(self._instance) response = self._client.get_proto(path=path) message = rest_pb2.ListTablesResponse() message.ParseFromString(response.content) tables = getattr(message, 'table') return iter([Table(table) for table in tables])
[ "def", "list_tables", "(", "self", ")", ":", "# Server does not do pagination on listings of this resource.", "# Return an iterator anyway for similarity with other API methods", "path", "=", "'/archive/{}/tables'", ".", "format", "(", "self", ".", "_instance", ")", "response", "=", "self", ".", "_client", ".", "get_proto", "(", "path", "=", "path", ")", "message", "=", "rest_pb2", ".", "ListTablesResponse", "(", ")", "message", ".", "ParseFromString", "(", "response", ".", "content", ")", "tables", "=", "getattr", "(", "message", ",", "'table'", ")", "return", "iter", "(", "[", "Table", "(", "table", ")", "for", "table", "in", "tables", "]", ")" ]
Returns the existing tables. Tables are returned in lexicographical order. :rtype: ~collections.Iterable[.Table]
[ "Returns", "the", "existing", "tables", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/archive/client.py#L542-L557
train
yamcs/yamcs-python
yamcs-client/yamcs/archive/client.py
ArchiveClient.get_table
def get_table(self, table): """ Gets a single table. :param str table: The name of the table. :rtype: .Table """ path = '/archive/{}/tables/{}'.format(self._instance, table) response = self._client.get_proto(path=path) message = archive_pb2.TableInfo() message.ParseFromString(response.content) return Table(message)
python
def get_table(self, table): """ Gets a single table. :param str table: The name of the table. :rtype: .Table """ path = '/archive/{}/tables/{}'.format(self._instance, table) response = self._client.get_proto(path=path) message = archive_pb2.TableInfo() message.ParseFromString(response.content) return Table(message)
[ "def", "get_table", "(", "self", ",", "table", ")", ":", "path", "=", "'/archive/{}/tables/{}'", ".", "format", "(", "self", ".", "_instance", ",", "table", ")", "response", "=", "self", ".", "_client", ".", "get_proto", "(", "path", "=", "path", ")", "message", "=", "archive_pb2", ".", "TableInfo", "(", ")", "message", ".", "ParseFromString", "(", "response", ".", "content", ")", "return", "Table", "(", "message", ")" ]
Gets a single table. :param str table: The name of the table. :rtype: .Table
[ "Gets", "a", "single", "table", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/archive/client.py#L559-L570
train
yamcs/yamcs-python
yamcs-client/yamcs/archive/client.py
ArchiveClient.list_streams
def list_streams(self): """ Returns the existing streams. Streams are returned in lexicographical order. :rtype: ~collections.Iterable[.Stream] """ # Server does not do pagination on listings of this resource. # Return an iterator anyway for similarity with other API methods path = '/archive/{}/streams'.format(self._instance) response = self._client.get_proto(path=path) message = rest_pb2.ListStreamsResponse() message.ParseFromString(response.content) streams = getattr(message, 'stream') return iter([Stream(stream) for stream in streams])
python
def list_streams(self): """ Returns the existing streams. Streams are returned in lexicographical order. :rtype: ~collections.Iterable[.Stream] """ # Server does not do pagination on listings of this resource. # Return an iterator anyway for similarity with other API methods path = '/archive/{}/streams'.format(self._instance) response = self._client.get_proto(path=path) message = rest_pb2.ListStreamsResponse() message.ParseFromString(response.content) streams = getattr(message, 'stream') return iter([Stream(stream) for stream in streams])
[ "def", "list_streams", "(", "self", ")", ":", "# Server does not do pagination on listings of this resource.", "# Return an iterator anyway for similarity with other API methods", "path", "=", "'/archive/{}/streams'", ".", "format", "(", "self", ".", "_instance", ")", "response", "=", "self", ".", "_client", ".", "get_proto", "(", "path", "=", "path", ")", "message", "=", "rest_pb2", ".", "ListStreamsResponse", "(", ")", "message", ".", "ParseFromString", "(", "response", ".", "content", ")", "streams", "=", "getattr", "(", "message", ",", "'stream'", ")", "return", "iter", "(", "[", "Stream", "(", "stream", ")", "for", "stream", "in", "streams", "]", ")" ]
Returns the existing streams. Streams are returned in lexicographical order. :rtype: ~collections.Iterable[.Stream]
[ "Returns", "the", "existing", "streams", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/archive/client.py#L585-L600
train
yamcs/yamcs-python
yamcs-client/yamcs/archive/client.py
ArchiveClient.get_stream
def get_stream(self, stream): """ Gets a single stream. :param str stream: The name of the stream. :rtype: .Stream """ path = '/archive/{}/streams/{}'.format(self._instance, stream) response = self._client.get_proto(path=path) message = archive_pb2.StreamInfo() message.ParseFromString(response.content) return Stream(message)
python
def get_stream(self, stream): """ Gets a single stream. :param str stream: The name of the stream. :rtype: .Stream """ path = '/archive/{}/streams/{}'.format(self._instance, stream) response = self._client.get_proto(path=path) message = archive_pb2.StreamInfo() message.ParseFromString(response.content) return Stream(message)
[ "def", "get_stream", "(", "self", ",", "stream", ")", ":", "path", "=", "'/archive/{}/streams/{}'", ".", "format", "(", "self", ".", "_instance", ",", "stream", ")", "response", "=", "self", ".", "_client", ".", "get_proto", "(", "path", "=", "path", ")", "message", "=", "archive_pb2", ".", "StreamInfo", "(", ")", "message", ".", "ParseFromString", "(", "response", ".", "content", ")", "return", "Stream", "(", "message", ")" ]
Gets a single stream. :param str stream: The name of the stream. :rtype: .Stream
[ "Gets", "a", "single", "stream", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/archive/client.py#L602-L613
train
yamcs/yamcs-python
yamcs-client/yamcs/archive/client.py
ArchiveClient.create_stream_subscription
def create_stream_subscription(self, stream, on_data, timeout=60): """ Create a new stream subscription. :param str stream: The name of the stream. :param on_data: Function that gets called with :class:`.StreamData` updates. :param float timeout: The amount of seconds to wait for the request to complete. :return: Future that can be used to manage the background websocket subscription :rtype: .WebSocketSubscriptionFuture """ options = rest_pb2.StreamSubscribeRequest() options.stream = stream manager = WebSocketSubscriptionManager( self._client, resource='stream', options=options) # Represent subscription as a future subscription = WebSocketSubscriptionFuture(manager) wrapped_callback = functools.partial( _wrap_callback_parse_stream_data, subscription, on_data) manager.open(wrapped_callback, instance=self._instance) # Wait until a reply or exception is received subscription.reply(timeout=timeout) return subscription
python
def create_stream_subscription(self, stream, on_data, timeout=60): """ Create a new stream subscription. :param str stream: The name of the stream. :param on_data: Function that gets called with :class:`.StreamData` updates. :param float timeout: The amount of seconds to wait for the request to complete. :return: Future that can be used to manage the background websocket subscription :rtype: .WebSocketSubscriptionFuture """ options = rest_pb2.StreamSubscribeRequest() options.stream = stream manager = WebSocketSubscriptionManager( self._client, resource='stream', options=options) # Represent subscription as a future subscription = WebSocketSubscriptionFuture(manager) wrapped_callback = functools.partial( _wrap_callback_parse_stream_data, subscription, on_data) manager.open(wrapped_callback, instance=self._instance) # Wait until a reply or exception is received subscription.reply(timeout=timeout) return subscription
[ "def", "create_stream_subscription", "(", "self", ",", "stream", ",", "on_data", ",", "timeout", "=", "60", ")", ":", "options", "=", "rest_pb2", ".", "StreamSubscribeRequest", "(", ")", "options", ".", "stream", "=", "stream", "manager", "=", "WebSocketSubscriptionManager", "(", "self", ".", "_client", ",", "resource", "=", "'stream'", ",", "options", "=", "options", ")", "# Represent subscription as a future", "subscription", "=", "WebSocketSubscriptionFuture", "(", "manager", ")", "wrapped_callback", "=", "functools", ".", "partial", "(", "_wrap_callback_parse_stream_data", ",", "subscription", ",", "on_data", ")", "manager", ".", "open", "(", "wrapped_callback", ",", "instance", "=", "self", ".", "_instance", ")", "# Wait until a reply or exception is received", "subscription", ".", "reply", "(", "timeout", "=", "timeout", ")", "return", "subscription" ]
Create a new stream subscription. :param str stream: The name of the stream. :param on_data: Function that gets called with :class:`.StreamData` updates. :param float timeout: The amount of seconds to wait for the request to complete. :return: Future that can be used to manage the background websocket subscription :rtype: .WebSocketSubscriptionFuture
[ "Create", "a", "new", "stream", "subscription", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/archive/client.py#L615-L645
train
yamcs/yamcs-python
yamcs-client/yamcs/archive/client.py
ArchiveClient.execute_sql
def execute_sql(self, statement): """ Executes a single SQL statement. :param statement: SQL string :return: String response :rtype: str """ path = '/archive/{}/sql'.format(self._instance) req = archive_pb2.ExecuteSqlRequest() req.statement = statement response = self._client.post_proto(path=path, data=req.SerializeToString()) message = archive_pb2.ExecuteSqlResponse() message.ParseFromString(response.content) if message.HasField('result'): return message.result return None
python
def execute_sql(self, statement): """ Executes a single SQL statement. :param statement: SQL string :return: String response :rtype: str """ path = '/archive/{}/sql'.format(self._instance) req = archive_pb2.ExecuteSqlRequest() req.statement = statement response = self._client.post_proto(path=path, data=req.SerializeToString()) message = archive_pb2.ExecuteSqlResponse() message.ParseFromString(response.content) if message.HasField('result'): return message.result return None
[ "def", "execute_sql", "(", "self", ",", "statement", ")", ":", "path", "=", "'/archive/{}/sql'", ".", "format", "(", "self", ".", "_instance", ")", "req", "=", "archive_pb2", ".", "ExecuteSqlRequest", "(", ")", "req", ".", "statement", "=", "statement", "response", "=", "self", ".", "_client", ".", "post_proto", "(", "path", "=", "path", ",", "data", "=", "req", ".", "SerializeToString", "(", ")", ")", "message", "=", "archive_pb2", ".", "ExecuteSqlResponse", "(", ")", "message", ".", "ParseFromString", "(", "response", ".", "content", ")", "if", "message", ".", "HasField", "(", "'result'", ")", ":", "return", "message", ".", "result", "return", "None" ]
Executes a single SQL statement. :param statement: SQL string :return: String response :rtype: str
[ "Executes", "a", "single", "SQL", "statement", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/archive/client.py#L647-L665
train
moin18/utilspie
utilspie/fileutils/file_utils.py
copy_file
def copy_file(source, destination, unique=False, sort=False, case_sensitive=True, create_path=False): """ Python utility to create file Args: source: absolute/relative path of source file destination: absolute/relative path of destination file. Use same as source for replacing the content of existing file. unique: Copy only unique lines from file sort: Sort the content of file case_sensitive: unique/sort operations to be performed case-sensitive string create_path: Recursively create the path to destination directory in case not found Returns: None """ _File.copy(source, destination, unique, sort, case_sensitive, create_path)
python
def copy_file(source, destination, unique=False, sort=False, case_sensitive=True, create_path=False): """ Python utility to create file Args: source: absolute/relative path of source file destination: absolute/relative path of destination file. Use same as source for replacing the content of existing file. unique: Copy only unique lines from file sort: Sort the content of file case_sensitive: unique/sort operations to be performed case-sensitive string create_path: Recursively create the path to destination directory in case not found Returns: None """ _File.copy(source, destination, unique, sort, case_sensitive, create_path)
[ "def", "copy_file", "(", "source", ",", "destination", ",", "unique", "=", "False", ",", "sort", "=", "False", ",", "case_sensitive", "=", "True", ",", "create_path", "=", "False", ")", ":", "_File", ".", "copy", "(", "source", ",", "destination", ",", "unique", ",", "sort", ",", "case_sensitive", ",", "create_path", ")" ]
Python utility to create file Args: source: absolute/relative path of source file destination: absolute/relative path of destination file. Use same as source for replacing the content of existing file. unique: Copy only unique lines from file sort: Sort the content of file case_sensitive: unique/sort operations to be performed case-sensitive string create_path: Recursively create the path to destination directory in case not found Returns: None
[ "Python", "utility", "to", "create", "file" ]
ea96860b93fd058019a829847258e39323fef31f
https://github.com/moin18/utilspie/blob/ea96860b93fd058019a829847258e39323fef31f/utilspie/fileutils/file_utils.py#L34-L50
train
sirfoga/pyhal
hal/files/models/audio.py
MP3Song.get_details
def get_details(self): """Finds songs details :return: Dictionary with songs details about title, artist, album and year """ title = str(self.get_title()).strip() artist = str(self.get_artist()).strip() album = str(self.get_album()).strip() year = str(self.get_year()).strip() return { "title": title, "artist": artist, "album": album, "year": year }
python
def get_details(self): """Finds songs details :return: Dictionary with songs details about title, artist, album and year """ title = str(self.get_title()).strip() artist = str(self.get_artist()).strip() album = str(self.get_album()).strip() year = str(self.get_year()).strip() return { "title": title, "artist": artist, "album": album, "year": year }
[ "def", "get_details", "(", "self", ")", ":", "title", "=", "str", "(", "self", ".", "get_title", "(", ")", ")", ".", "strip", "(", ")", "artist", "=", "str", "(", "self", ".", "get_artist", "(", ")", ")", ".", "strip", "(", ")", "album", "=", "str", "(", "self", ".", "get_album", "(", ")", ")", ".", "strip", "(", ")", "year", "=", "str", "(", "self", ".", "get_year", "(", ")", ")", ".", "strip", "(", ")", "return", "{", "\"title\"", ":", "title", ",", "\"artist\"", ":", "artist", ",", "\"album\"", ":", "album", ",", "\"year\"", ":", "year", "}" ]
Finds songs details :return: Dictionary with songs details about title, artist, album and year
[ "Finds", "songs", "details" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/files/models/audio.py#L51-L67
train
sirfoga/pyhal
hal/files/models/audio.py
MP3Song._set_attr
def _set_attr(self, attribute): """Sets attribute of song :param attribute: Attribute to save :return: True iff operation completed """ self.tags.add(attribute) self.song.save()
python
def _set_attr(self, attribute): """Sets attribute of song :param attribute: Attribute to save :return: True iff operation completed """ self.tags.add(attribute) self.song.save()
[ "def", "_set_attr", "(", "self", ",", "attribute", ")", ":", "self", ".", "tags", ".", "add", "(", "attribute", ")", "self", ".", "song", ".", "save", "(", ")" ]
Sets attribute of song :param attribute: Attribute to save :return: True iff operation completed
[ "Sets", "attribute", "of", "song" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/files/models/audio.py#L70-L78
train
sirfoga/pyhal
hal/files/models/audio.py
MP3Song.set_title
def set_title(self, name): """Sets song's title :param name: title """ self._set_attr(TIT2(encoding=3, text=name.decode('utf-8')))
python
def set_title(self, name): """Sets song's title :param name: title """ self._set_attr(TIT2(encoding=3, text=name.decode('utf-8')))
[ "def", "set_title", "(", "self", ",", "name", ")", ":", "self", ".", "_set_attr", "(", "TIT2", "(", "encoding", "=", "3", ",", "text", "=", "name", ".", "decode", "(", "'utf-8'", ")", ")", ")" ]
Sets song's title :param name: title
[ "Sets", "song", "s", "title" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/files/models/audio.py#L80-L85
train
sirfoga/pyhal
hal/files/models/audio.py
MP3Song.set_artist
def set_artist(self, artist): """Sets song's artist :param artist: artist """ self._set_attr(TPE1(encoding=3, text=artist.decode('utf-8')))
python
def set_artist(self, artist): """Sets song's artist :param artist: artist """ self._set_attr(TPE1(encoding=3, text=artist.decode('utf-8')))
[ "def", "set_artist", "(", "self", ",", "artist", ")", ":", "self", ".", "_set_attr", "(", "TPE1", "(", "encoding", "=", "3", ",", "text", "=", "artist", ".", "decode", "(", "'utf-8'", ")", ")", ")" ]
Sets song's artist :param artist: artist
[ "Sets", "song", "s", "artist" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/files/models/audio.py#L87-L92
train
sirfoga/pyhal
hal/files/models/audio.py
MP3Song.set_album
def set_album(self, album): """Sets song's album :param album: album """ self._set_attr(TALB(encoding=3, text=album.decode('utf-8')))
python
def set_album(self, album): """Sets song's album :param album: album """ self._set_attr(TALB(encoding=3, text=album.decode('utf-8')))
[ "def", "set_album", "(", "self", ",", "album", ")", ":", "self", ".", "_set_attr", "(", "TALB", "(", "encoding", "=", "3", ",", "text", "=", "album", ".", "decode", "(", "'utf-8'", ")", ")", ")" ]
Sets song's album :param album: album
[ "Sets", "song", "s", "album" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/files/models/audio.py#L94-L99
train
sirfoga/pyhal
hal/files/models/audio.py
MP3Song.set_nr_track
def set_nr_track(self, nr_track): """Sets song's track numb :param nr_track: of track """ self._set_attr(TRCK(encoding=3, text=str(nr_track)))
python
def set_nr_track(self, nr_track): """Sets song's track numb :param nr_track: of track """ self._set_attr(TRCK(encoding=3, text=str(nr_track)))
[ "def", "set_nr_track", "(", "self", ",", "nr_track", ")", ":", "self", ".", "_set_attr", "(", "TRCK", "(", "encoding", "=", "3", ",", "text", "=", "str", "(", "nr_track", ")", ")", ")" ]
Sets song's track numb :param nr_track: of track
[ "Sets", "song", "s", "track", "numb" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/files/models/audio.py#L101-L106
train
sirfoga/pyhal
hal/files/models/audio.py
MP3Song.set_year
def set_year(self, year): """Sets song's year :param year: year """ self._set_attr(TDRC(encoding=3, text=str(year)))
python
def set_year(self, year): """Sets song's year :param year: year """ self._set_attr(TDRC(encoding=3, text=str(year)))
[ "def", "set_year", "(", "self", ",", "year", ")", ":", "self", ".", "_set_attr", "(", "TDRC", "(", "encoding", "=", "3", ",", "text", "=", "str", "(", "year", ")", ")", ")" ]
Sets song's year :param year: year
[ "Sets", "song", "s", "year" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/files/models/audio.py#L108-L113
train
sirfoga/pyhal
hal/files/models/audio.py
MP3Song.set_genre
def set_genre(self, genre): """Sets song's genre :param genre: genre """ self._set_attr(TCON(encoding=3, text=str(genre)))
python
def set_genre(self, genre): """Sets song's genre :param genre: genre """ self._set_attr(TCON(encoding=3, text=str(genre)))
[ "def", "set_genre", "(", "self", ",", "genre", ")", ":", "self", ".", "_set_attr", "(", "TCON", "(", "encoding", "=", "3", ",", "text", "=", "str", "(", "genre", ")", ")", ")" ]
Sets song's genre :param genre: genre
[ "Sets", "song", "s", "genre" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/files/models/audio.py#L115-L120
train
portfors-lab/sparkle
sparkle/gui/stim/stimulus_editor.py
StimulusEditor.updateTraceCount
def updateTraceCount(self): """Updates the trace count label with the data from the model""" self.ui.ntracesLbl.setNum(self.ui.trackview.model().traceCount())
python
def updateTraceCount(self): """Updates the trace count label with the data from the model""" self.ui.ntracesLbl.setNum(self.ui.trackview.model().traceCount())
[ "def", "updateTraceCount", "(", "self", ")", ":", "self", ".", "ui", ".", "ntracesLbl", ".", "setNum", "(", "self", ".", "ui", ".", "trackview", ".", "model", "(", ")", ".", "traceCount", "(", ")", ")" ]
Updates the trace count label with the data from the model
[ "Updates", "the", "trace", "count", "label", "with", "the", "data", "from", "the", "model" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/stimulus_editor.py#L70-L72
train
portfors-lab/sparkle
sparkle/gui/stim/stimulus_editor.py
StimulusEditor.preview
def preview(self): """Assemble the current components in the QStimulusModel and generate a spectrogram plot in a separate window""" msg = self.ui.trackview.model().verify() if msg: answer = QtGui.QMessageBox.warning(self, "Bummer", 'Problem: {}.'.format(msg)) return stim_signal, atten, ovld = self.ui.trackview.model().signal() fig = SpecWidget() fig.setWindowModality(2) # application modal fig.updateData(stim_signal, self.ui.trackview.model().samplerate()) fig.setTitle('Stimulus Preview') fig.show() self.previewFig = fig
python
def preview(self): """Assemble the current components in the QStimulusModel and generate a spectrogram plot in a separate window""" msg = self.ui.trackview.model().verify() if msg: answer = QtGui.QMessageBox.warning(self, "Bummer", 'Problem: {}.'.format(msg)) return stim_signal, atten, ovld = self.ui.trackview.model().signal() fig = SpecWidget() fig.setWindowModality(2) # application modal fig.updateData(stim_signal, self.ui.trackview.model().samplerate()) fig.setTitle('Stimulus Preview') fig.show() self.previewFig = fig
[ "def", "preview", "(", "self", ")", ":", "msg", "=", "self", ".", "ui", ".", "trackview", ".", "model", "(", ")", ".", "verify", "(", ")", "if", "msg", ":", "answer", "=", "QtGui", ".", "QMessageBox", ".", "warning", "(", "self", ",", "\"Bummer\"", ",", "'Problem: {}.'", ".", "format", "(", "msg", ")", ")", "return", "stim_signal", ",", "atten", ",", "ovld", "=", "self", ".", "ui", ".", "trackview", ".", "model", "(", ")", ".", "signal", "(", ")", "fig", "=", "SpecWidget", "(", ")", "fig", ".", "setWindowModality", "(", "2", ")", "# application modal", "fig", ".", "updateData", "(", "stim_signal", ",", "self", ".", "ui", ".", "trackview", ".", "model", "(", ")", ".", "samplerate", "(", ")", ")", "fig", ".", "setTitle", "(", "'Stimulus Preview'", ")", "fig", ".", "show", "(", ")", "self", ".", "previewFig", "=", "fig" ]
Assemble the current components in the QStimulusModel and generate a spectrogram plot in a separate window
[ "Assemble", "the", "current", "components", "in", "the", "QStimulusModel", "and", "generate", "a", "spectrogram", "plot", "in", "a", "separate", "window" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/stimulus_editor.py#L74-L87
train
lowandrew/OLCTools
spadespipeline/offhours.py
Offhours.assertpathsandfiles
def assertpathsandfiles(self): """Assertions to make sure that arguments are at least mostly valid""" # Assertion to ensure that the MiSeq path exists assert os.path.isdir(self.miseqpath), u'MiSeqPath is not a valid directory {0!r:s}'.format(self.miseqpath) # If the miseq folder name is not provided, the default of the most recent run will be used if not self.miseqfolder: # Get a list of folders miseqfolders = glob('{}*/'.format(self.miseqpath)) self.miseqfolder = sorted(miseqfolders)[-1] # Create :miseqfoldername to store the name of this folder by splitting the path and taking the second # last piece (it's not the last piece because the folder has a trailing slash) self.miseqfoldername = self.miseqfolder.split("/")[-2] # Otherwise add the folder to the miseq path to yield the destination folder else: # Set the folder name before adding the path to the miseq path self.miseqfoldername = self.miseqfolder self.miseqfolder = self.miseqpath + self.miseqfolder + "/" # Assert to ensure that the folder exists assert os.path.isdir(self.miseqfolder), u'MiSeqFolder is not a valid directory {0!r:s}'\ .format(self.miseqfolder) # Pull the data from the SampleSheet.csv if self.customsamplesheet: self.samplesheet = self.customsamplesheet assert os.path.isfile(self.customsamplesheet), u'Could not find CustomSampleSheet as entered: {0!r:s}'\ .format(self.customsamplesheet) # Otherwise use the SampleSheet.csv located in :self.miseqfolder else: self.samplesheet = self.miseqfolder + "SampleSheet.csv"
python
def assertpathsandfiles(self): """Assertions to make sure that arguments are at least mostly valid""" # Assertion to ensure that the MiSeq path exists assert os.path.isdir(self.miseqpath), u'MiSeqPath is not a valid directory {0!r:s}'.format(self.miseqpath) # If the miseq folder name is not provided, the default of the most recent run will be used if not self.miseqfolder: # Get a list of folders miseqfolders = glob('{}*/'.format(self.miseqpath)) self.miseqfolder = sorted(miseqfolders)[-1] # Create :miseqfoldername to store the name of this folder by splitting the path and taking the second # last piece (it's not the last piece because the folder has a trailing slash) self.miseqfoldername = self.miseqfolder.split("/")[-2] # Otherwise add the folder to the miseq path to yield the destination folder else: # Set the folder name before adding the path to the miseq path self.miseqfoldername = self.miseqfolder self.miseqfolder = self.miseqpath + self.miseqfolder + "/" # Assert to ensure that the folder exists assert os.path.isdir(self.miseqfolder), u'MiSeqFolder is not a valid directory {0!r:s}'\ .format(self.miseqfolder) # Pull the data from the SampleSheet.csv if self.customsamplesheet: self.samplesheet = self.customsamplesheet assert os.path.isfile(self.customsamplesheet), u'Could not find CustomSampleSheet as entered: {0!r:s}'\ .format(self.customsamplesheet) # Otherwise use the SampleSheet.csv located in :self.miseqfolder else: self.samplesheet = self.miseqfolder + "SampleSheet.csv"
[ "def", "assertpathsandfiles", "(", "self", ")", ":", "# Assertion to ensure that the MiSeq path exists", "assert", "os", ".", "path", ".", "isdir", "(", "self", ".", "miseqpath", ")", ",", "u'MiSeqPath is not a valid directory {0!r:s}'", ".", "format", "(", "self", ".", "miseqpath", ")", "# If the miseq folder name is not provided, the default of the most recent run will be used", "if", "not", "self", ".", "miseqfolder", ":", "# Get a list of folders", "miseqfolders", "=", "glob", "(", "'{}*/'", ".", "format", "(", "self", ".", "miseqpath", ")", ")", "self", ".", "miseqfolder", "=", "sorted", "(", "miseqfolders", ")", "[", "-", "1", "]", "# Create :miseqfoldername to store the name of this folder by splitting the path and taking the second", "# last piece (it's not the last piece because the folder has a trailing slash)", "self", ".", "miseqfoldername", "=", "self", ".", "miseqfolder", ".", "split", "(", "\"/\"", ")", "[", "-", "2", "]", "# Otherwise add the folder to the miseq path to yield the destination folder", "else", ":", "# Set the folder name before adding the path to the miseq path", "self", ".", "miseqfoldername", "=", "self", ".", "miseqfolder", "self", ".", "miseqfolder", "=", "self", ".", "miseqpath", "+", "self", ".", "miseqfolder", "+", "\"/\"", "# Assert to ensure that the folder exists", "assert", "os", ".", "path", ".", "isdir", "(", "self", ".", "miseqfolder", ")", ",", "u'MiSeqFolder is not a valid directory {0!r:s}'", ".", "format", "(", "self", ".", "miseqfolder", ")", "# Pull the data from the SampleSheet.csv", "if", "self", ".", "customsamplesheet", ":", "self", ".", "samplesheet", "=", "self", ".", "customsamplesheet", "assert", "os", ".", "path", ".", "isfile", "(", "self", ".", "customsamplesheet", ")", ",", "u'Could not find CustomSampleSheet as entered: {0!r:s}'", ".", "format", "(", "self", ".", "customsamplesheet", ")", "# Otherwise use the SampleSheet.csv located in :self.miseqfolder", "else", ":", "self", ".", "samplesheet", "=", "self", ".", "miseqfolder", "+", "\"SampleSheet.csv\"" ]
Assertions to make sure that arguments are at least mostly valid
[ "Assertions", "to", "make", "sure", "that", "arguments", "are", "at", "least", "mostly", "valid" ]
88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/offhours.py#L14-L41
train
lowandrew/OLCTools
spadespipeline/offhours.py
Offhours.numberofsamples
def numberofsamples(self): """Count the number of samples is the samplesheet""" # Initialise variables to store line data idline = 0 linenumber = 0 # Parse the sample sheet to find the number of samples with open(self.samplesheet, "rb") as ssheet: # Use enumerate to iterate through the lines in the sample sheet to retrieve the line number and the data for linenumber, entry in enumerate(ssheet): # Once Sample_ID is encountered if "Sample_ID" in entry: # Set the id line as the current line number idline = linenumber # :samplecount is the last line number in the file minus the line number of Sample_ID self.samplecount = linenumber - idline printtime('There are {} samples in this run. ' 'Running off-hours module with the following parameters:\n' 'MiSeqPath: {},\n' 'MiSeqFolder: {},\n' 'SampleSheet: {}'.format(self.samplecount, self.miseqpath, self.miseqfolder, self.samplesheet), self.start) # Run the fastqmover module now that the number of sequences is known self.fastqlinker()
python
def numberofsamples(self): """Count the number of samples is the samplesheet""" # Initialise variables to store line data idline = 0 linenumber = 0 # Parse the sample sheet to find the number of samples with open(self.samplesheet, "rb") as ssheet: # Use enumerate to iterate through the lines in the sample sheet to retrieve the line number and the data for linenumber, entry in enumerate(ssheet): # Once Sample_ID is encountered if "Sample_ID" in entry: # Set the id line as the current line number idline = linenumber # :samplecount is the last line number in the file minus the line number of Sample_ID self.samplecount = linenumber - idline printtime('There are {} samples in this run. ' 'Running off-hours module with the following parameters:\n' 'MiSeqPath: {},\n' 'MiSeqFolder: {},\n' 'SampleSheet: {}'.format(self.samplecount, self.miseqpath, self.miseqfolder, self.samplesheet), self.start) # Run the fastqmover module now that the number of sequences is known self.fastqlinker()
[ "def", "numberofsamples", "(", "self", ")", ":", "# Initialise variables to store line data", "idline", "=", "0", "linenumber", "=", "0", "# Parse the sample sheet to find the number of samples", "with", "open", "(", "self", ".", "samplesheet", ",", "\"rb\"", ")", "as", "ssheet", ":", "# Use enumerate to iterate through the lines in the sample sheet to retrieve the line number and the data", "for", "linenumber", ",", "entry", "in", "enumerate", "(", "ssheet", ")", ":", "# Once Sample_ID is encountered", "if", "\"Sample_ID\"", "in", "entry", ":", "# Set the id line as the current line number", "idline", "=", "linenumber", "# :samplecount is the last line number in the file minus the line number of Sample_ID", "self", ".", "samplecount", "=", "linenumber", "-", "idline", "printtime", "(", "'There are {} samples in this run. '", "'Running off-hours module with the following parameters:\\n'", "'MiSeqPath: {},\\n'", "'MiSeqFolder: {},\\n'", "'SampleSheet: {}'", ".", "format", "(", "self", ".", "samplecount", ",", "self", ".", "miseqpath", ",", "self", ".", "miseqfolder", ",", "self", ".", "samplesheet", ")", ",", "self", ".", "start", ")", "# Run the fastqmover module now that the number of sequences is known", "self", ".", "fastqlinker", "(", ")" ]
Count the number of samples is the samplesheet
[ "Count", "the", "number", "of", "samples", "is", "the", "samplesheet" ]
88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/spadespipeline/offhours.py#L43-L65
train
yamcs/yamcs-python
yamcs-client/examples/archive_retrieval.py
print_packet_range
def print_packet_range(): """Print the range of archived packets.""" first_packet = next(iter(archive.list_packets())) last_packet = next(iter(archive.list_packets(descending=True))) print('First packet:', first_packet) print('Last packet:', last_packet) td = last_packet.generation_time - first_packet.generation_time print('Timespan:', td)
python
def print_packet_range(): """Print the range of archived packets.""" first_packet = next(iter(archive.list_packets())) last_packet = next(iter(archive.list_packets(descending=True))) print('First packet:', first_packet) print('Last packet:', last_packet) td = last_packet.generation_time - first_packet.generation_time print('Timespan:', td)
[ "def", "print_packet_range", "(", ")", ":", "first_packet", "=", "next", "(", "iter", "(", "archive", ".", "list_packets", "(", ")", ")", ")", "last_packet", "=", "next", "(", "iter", "(", "archive", ".", "list_packets", "(", "descending", "=", "True", ")", ")", ")", "print", "(", "'First packet:'", ",", "first_packet", ")", "print", "(", "'Last packet:'", ",", "last_packet", ")", "td", "=", "last_packet", ".", "generation_time", "-", "first_packet", ".", "generation_time", "print", "(", "'Timespan:'", ",", "td", ")" ]
Print the range of archived packets.
[ "Print", "the", "range", "of", "archived", "packets", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/examples/archive_retrieval.py#L15-L23
train
yamcs/yamcs-python
yamcs-client/examples/archive_retrieval.py
iterate_specific_packet_range
def iterate_specific_packet_range(): """Count the number of packets in a specific range.""" now = datetime.utcnow() start = now - timedelta(hours=1) total = 0 for packet in archive.list_packets(start=start, stop=now): total += 1 # print(packet) print('Found', total, 'packets in range')
python
def iterate_specific_packet_range(): """Count the number of packets in a specific range.""" now = datetime.utcnow() start = now - timedelta(hours=1) total = 0 for packet in archive.list_packets(start=start, stop=now): total += 1 # print(packet) print('Found', total, 'packets in range')
[ "def", "iterate_specific_packet_range", "(", ")", ":", "now", "=", "datetime", ".", "utcnow", "(", ")", "start", "=", "now", "-", "timedelta", "(", "hours", "=", "1", ")", "total", "=", "0", "for", "packet", "in", "archive", ".", "list_packets", "(", "start", "=", "start", ",", "stop", "=", "now", ")", ":", "total", "+=", "1", "# print(packet)", "print", "(", "'Found'", ",", "total", ",", "'packets in range'", ")" ]
Count the number of packets in a specific range.
[ "Count", "the", "number", "of", "packets", "in", "a", "specific", "range", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/examples/archive_retrieval.py#L26-L35
train
yamcs/yamcs-python
yamcs-client/examples/archive_retrieval.py
iterate_specific_event_range
def iterate_specific_event_range(): """Count the number of events in a specific range.""" now = datetime.utcnow() start = now - timedelta(hours=1) total = 0 for event in archive.list_events(start=start, stop=now): total += 1 # print(event) print('Found', total, 'events in range')
python
def iterate_specific_event_range(): """Count the number of events in a specific range.""" now = datetime.utcnow() start = now - timedelta(hours=1) total = 0 for event in archive.list_events(start=start, stop=now): total += 1 # print(event) print('Found', total, 'events in range')
[ "def", "iterate_specific_event_range", "(", ")", ":", "now", "=", "datetime", ".", "utcnow", "(", ")", "start", "=", "now", "-", "timedelta", "(", "hours", "=", "1", ")", "total", "=", "0", "for", "event", "in", "archive", ".", "list_events", "(", "start", "=", "start", ",", "stop", "=", "now", ")", ":", "total", "+=", "1", "# print(event)", "print", "(", "'Found'", ",", "total", ",", "'events in range'", ")" ]
Count the number of events in a specific range.
[ "Count", "the", "number", "of", "events", "in", "a", "specific", "range", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/examples/archive_retrieval.py#L38-L47
train
yamcs/yamcs-python
yamcs-client/examples/archive_retrieval.py
print_last_values
def print_last_values(): """Print the last 10 values.""" iterable = archive.list_parameter_values('/YSS/SIMULATOR/BatteryVoltage1', descending=True) for pval in islice(iterable, 0, 10): print(pval)
python
def print_last_values(): """Print the last 10 values.""" iterable = archive.list_parameter_values('/YSS/SIMULATOR/BatteryVoltage1', descending=True) for pval in islice(iterable, 0, 10): print(pval)
[ "def", "print_last_values", "(", ")", ":", "iterable", "=", "archive", ".", "list_parameter_values", "(", "'/YSS/SIMULATOR/BatteryVoltage1'", ",", "descending", "=", "True", ")", "for", "pval", "in", "islice", "(", "iterable", ",", "0", ",", "10", ")", ":", "print", "(", "pval", ")" ]
Print the last 10 values.
[ "Print", "the", "last", "10", "values", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/examples/archive_retrieval.py#L50-L55
train
yamcs/yamcs-python
yamcs-client/examples/archive_retrieval.py
iterate_specific_parameter_range
def iterate_specific_parameter_range(): """Count the number of parameter values in a specific range.""" now = datetime.utcnow() start = now - timedelta(hours=1) total = 0 for pval in archive.list_parameter_values( '/YSS/SIMULATOR/BatteryVoltage1', start=start, stop=now): total += 1 # print(pval) print('Found', total, 'parameter values in range')
python
def iterate_specific_parameter_range(): """Count the number of parameter values in a specific range.""" now = datetime.utcnow() start = now - timedelta(hours=1) total = 0 for pval in archive.list_parameter_values( '/YSS/SIMULATOR/BatteryVoltage1', start=start, stop=now): total += 1 # print(pval) print('Found', total, 'parameter values in range')
[ "def", "iterate_specific_parameter_range", "(", ")", ":", "now", "=", "datetime", ".", "utcnow", "(", ")", "start", "=", "now", "-", "timedelta", "(", "hours", "=", "1", ")", "total", "=", "0", "for", "pval", "in", "archive", ".", "list_parameter_values", "(", "'/YSS/SIMULATOR/BatteryVoltage1'", ",", "start", "=", "start", ",", "stop", "=", "now", ")", ":", "total", "+=", "1", "# print(pval)", "print", "(", "'Found'", ",", "total", ",", "'parameter values in range'", ")" ]
Count the number of parameter values in a specific range.
[ "Count", "the", "number", "of", "parameter", "values", "in", "a", "specific", "range", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/examples/archive_retrieval.py#L58-L68
train
yamcs/yamcs-python
yamcs-client/examples/archive_retrieval.py
print_last_commands
def print_last_commands(): """Print the last 10 commands.""" iterable = archive.list_command_history(descending=True) for entry in islice(iterable, 0, 10): print(entry)
python
def print_last_commands(): """Print the last 10 commands.""" iterable = archive.list_command_history(descending=True) for entry in islice(iterable, 0, 10): print(entry)
[ "def", "print_last_commands", "(", ")", ":", "iterable", "=", "archive", ".", "list_command_history", "(", "descending", "=", "True", ")", "for", "entry", "in", "islice", "(", "iterable", ",", "0", ",", "10", ")", ":", "print", "(", "entry", ")" ]
Print the last 10 commands.
[ "Print", "the", "last", "10", "commands", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/examples/archive_retrieval.py#L71-L75
train
sholsapp/py509
py509/utils.py
transmogrify
def transmogrify(l): """Fit a flat list into a treeable object.""" d = {l[0]: {}} tmp = d for c in l: tmp[c] = {} tmp = tmp[c] return d
python
def transmogrify(l): """Fit a flat list into a treeable object.""" d = {l[0]: {}} tmp = d for c in l: tmp[c] = {} tmp = tmp[c] return d
[ "def", "transmogrify", "(", "l", ")", ":", "d", "=", "{", "l", "[", "0", "]", ":", "{", "}", "}", "tmp", "=", "d", "for", "c", "in", "l", ":", "tmp", "[", "c", "]", "=", "{", "}", "tmp", "=", "tmp", "[", "c", "]", "return", "d" ]
Fit a flat list into a treeable object.
[ "Fit", "a", "flat", "list", "into", "a", "treeable", "object", "." ]
83bd6786a8ec1543b66c42ea5523e611c3e8dc5a
https://github.com/sholsapp/py509/blob/83bd6786a8ec1543b66c42ea5523e611c3e8dc5a/py509/utils.py#L8-L15
train
sholsapp/py509
py509/utils.py
tree
def tree(node, formatter=None, prefix=None, postfix=None, _depth=1): """Print a tree. Sometimes it's useful to print datastructures as a tree. This function prints out a pretty tree with root `node`. A tree is represented as a :class:`dict`, whose keys are node names and values are :class:`dict` objects for sub-trees and :class:`None` for terminals. :param dict node: The root of the tree to print. :param callable formatter: A callable that takes a single argument, the key, that formats the key in the tree. :param callable prefix: A callable that takes a single argument, the key, that adds any additional text before the formatted key. :param callable postfix: A callable that takes a single argument, the key, that adds any additional text after the formatted key. """ current = 0 length = len(node.keys()) tee_joint = '\xe2\x94\x9c\xe2\x94\x80\xe2\x94\x80' elbow_joint = '\xe2\x94\x94\xe2\x94\x80\xe2\x94\x80' for key, value in node.iteritems(): current += 1 k = formatter(key) if formatter else key pre = prefix(key) if prefix else '' post = postfix(key) if postfix else '' space = elbow_joint if current == length else tee_joint yield ' {space} {prefix}{key}{postfix}'.format(space=space, key=k, prefix=pre, postfix=post) if value: for e in tree(value, formatter=formatter, prefix=prefix, postfix=postfix, _depth=_depth + 1): yield (' | ' if current != length else ' ') + e
python
def tree(node, formatter=None, prefix=None, postfix=None, _depth=1): """Print a tree. Sometimes it's useful to print datastructures as a tree. This function prints out a pretty tree with root `node`. A tree is represented as a :class:`dict`, whose keys are node names and values are :class:`dict` objects for sub-trees and :class:`None` for terminals. :param dict node: The root of the tree to print. :param callable formatter: A callable that takes a single argument, the key, that formats the key in the tree. :param callable prefix: A callable that takes a single argument, the key, that adds any additional text before the formatted key. :param callable postfix: A callable that takes a single argument, the key, that adds any additional text after the formatted key. """ current = 0 length = len(node.keys()) tee_joint = '\xe2\x94\x9c\xe2\x94\x80\xe2\x94\x80' elbow_joint = '\xe2\x94\x94\xe2\x94\x80\xe2\x94\x80' for key, value in node.iteritems(): current += 1 k = formatter(key) if formatter else key pre = prefix(key) if prefix else '' post = postfix(key) if postfix else '' space = elbow_joint if current == length else tee_joint yield ' {space} {prefix}{key}{postfix}'.format(space=space, key=k, prefix=pre, postfix=post) if value: for e in tree(value, formatter=formatter, prefix=prefix, postfix=postfix, _depth=_depth + 1): yield (' | ' if current != length else ' ') + e
[ "def", "tree", "(", "node", ",", "formatter", "=", "None", ",", "prefix", "=", "None", ",", "postfix", "=", "None", ",", "_depth", "=", "1", ")", ":", "current", "=", "0", "length", "=", "len", "(", "node", ".", "keys", "(", ")", ")", "tee_joint", "=", "'\\xe2\\x94\\x9c\\xe2\\x94\\x80\\xe2\\x94\\x80'", "elbow_joint", "=", "'\\xe2\\x94\\x94\\xe2\\x94\\x80\\xe2\\x94\\x80'", "for", "key", ",", "value", "in", "node", ".", "iteritems", "(", ")", ":", "current", "+=", "1", "k", "=", "formatter", "(", "key", ")", "if", "formatter", "else", "key", "pre", "=", "prefix", "(", "key", ")", "if", "prefix", "else", "''", "post", "=", "postfix", "(", "key", ")", "if", "postfix", "else", "''", "space", "=", "elbow_joint", "if", "current", "==", "length", "else", "tee_joint", "yield", "' {space} {prefix}{key}{postfix}'", ".", "format", "(", "space", "=", "space", ",", "key", "=", "k", ",", "prefix", "=", "pre", ",", "postfix", "=", "post", ")", "if", "value", ":", "for", "e", "in", "tree", "(", "value", ",", "formatter", "=", "formatter", ",", "prefix", "=", "prefix", ",", "postfix", "=", "postfix", ",", "_depth", "=", "_depth", "+", "1", ")", ":", "yield", "(", "' | '", "if", "current", "!=", "length", "else", "' '", ")", "+", "e" ]
Print a tree. Sometimes it's useful to print datastructures as a tree. This function prints out a pretty tree with root `node`. A tree is represented as a :class:`dict`, whose keys are node names and values are :class:`dict` objects for sub-trees and :class:`None` for terminals. :param dict node: The root of the tree to print. :param callable formatter: A callable that takes a single argument, the key, that formats the key in the tree. :param callable prefix: A callable that takes a single argument, the key, that adds any additional text before the formatted key. :param callable postfix: A callable that takes a single argument, the key, that adds any additional text after the formatted key.
[ "Print", "a", "tree", "." ]
83bd6786a8ec1543b66c42ea5523e611c3e8dc5a
https://github.com/sholsapp/py509/blob/83bd6786a8ec1543b66c42ea5523e611c3e8dc5a/py509/utils.py#L18-L48
train
sholsapp/py509
py509/utils.py
assemble_chain
def assemble_chain(leaf, store): """Assemble the trust chain. This assembly method uses the certificates subject and issuer common name and should be used for informational purposes only. It does *not* cryptographically verify the chain! :param OpenSSL.crypto.X509 leaf: The leaf certificate from which to build the chain. :param list[OpenSSL.crypto.X509] store: A list of certificates to use to resolve the chain. :return: The trust chain. :rtype: list[OpenSSL.crypto.X509] """ store_dict = {} for cert in store: store_dict[cert.get_subject().CN] = cert chain = [leaf] current = leaf try: while current.get_issuer().CN != current.get_subject().CN: chain.append(store_dict[current.get_issuer().CN]) current = store_dict[current.get_issuer().CN] except KeyError: invalid = crypto.X509() patch_certificate(invalid) invalid.set_subject(current.get_issuer()) chain.append(invalid) chain.reverse() return chain
python
def assemble_chain(leaf, store): """Assemble the trust chain. This assembly method uses the certificates subject and issuer common name and should be used for informational purposes only. It does *not* cryptographically verify the chain! :param OpenSSL.crypto.X509 leaf: The leaf certificate from which to build the chain. :param list[OpenSSL.crypto.X509] store: A list of certificates to use to resolve the chain. :return: The trust chain. :rtype: list[OpenSSL.crypto.X509] """ store_dict = {} for cert in store: store_dict[cert.get_subject().CN] = cert chain = [leaf] current = leaf try: while current.get_issuer().CN != current.get_subject().CN: chain.append(store_dict[current.get_issuer().CN]) current = store_dict[current.get_issuer().CN] except KeyError: invalid = crypto.X509() patch_certificate(invalid) invalid.set_subject(current.get_issuer()) chain.append(invalid) chain.reverse() return chain
[ "def", "assemble_chain", "(", "leaf", ",", "store", ")", ":", "store_dict", "=", "{", "}", "for", "cert", "in", "store", ":", "store_dict", "[", "cert", ".", "get_subject", "(", ")", ".", "CN", "]", "=", "cert", "chain", "=", "[", "leaf", "]", "current", "=", "leaf", "try", ":", "while", "current", ".", "get_issuer", "(", ")", ".", "CN", "!=", "current", ".", "get_subject", "(", ")", ".", "CN", ":", "chain", ".", "append", "(", "store_dict", "[", "current", ".", "get_issuer", "(", ")", ".", "CN", "]", ")", "current", "=", "store_dict", "[", "current", ".", "get_issuer", "(", ")", ".", "CN", "]", "except", "KeyError", ":", "invalid", "=", "crypto", ".", "X509", "(", ")", "patch_certificate", "(", "invalid", ")", "invalid", ".", "set_subject", "(", "current", ".", "get_issuer", "(", ")", ")", "chain", ".", "append", "(", "invalid", ")", "chain", ".", "reverse", "(", ")", "return", "chain" ]
Assemble the trust chain. This assembly method uses the certificates subject and issuer common name and should be used for informational purposes only. It does *not* cryptographically verify the chain! :param OpenSSL.crypto.X509 leaf: The leaf certificate from which to build the chain. :param list[OpenSSL.crypto.X509] store: A list of certificates to use to resolve the chain. :return: The trust chain. :rtype: list[OpenSSL.crypto.X509]
[ "Assemble", "the", "trust", "chain", "." ]
83bd6786a8ec1543b66c42ea5523e611c3e8dc5a
https://github.com/sholsapp/py509/blob/83bd6786a8ec1543b66c42ea5523e611c3e8dc5a/py509/utils.py#L53-L86
train
sirfoga/pyhal
hal/internet/services/github.py
GithubRawApi._get_api_content
def _get_api_content(self): """Updates class api content by calling Github api and storing result""" if GITHUB_TOKEN is not None: self.add_params_to_url({ "access_token": GITHUB_TOKEN }) api_content_response = requests.get(self.api_url) self.api_content = json.loads( api_content_response.text )
python
def _get_api_content(self): """Updates class api content by calling Github api and storing result""" if GITHUB_TOKEN is not None: self.add_params_to_url({ "access_token": GITHUB_TOKEN }) api_content_response = requests.get(self.api_url) self.api_content = json.loads( api_content_response.text )
[ "def", "_get_api_content", "(", "self", ")", ":", "if", "GITHUB_TOKEN", "is", "not", "None", ":", "self", ".", "add_params_to_url", "(", "{", "\"access_token\"", ":", "GITHUB_TOKEN", "}", ")", "api_content_response", "=", "requests", ".", "get", "(", "self", ".", "api_url", ")", "self", ".", "api_content", "=", "json", ".", "loads", "(", "api_content_response", ".", "text", ")" ]
Updates class api content by calling Github api and storing result
[ "Updates", "class", "api", "content", "by", "calling", "Github", "api", "and", "storing", "result" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/internet/services/github.py#L79-L90
train
sirfoga/pyhal
hal/internet/services/github.py
GithubApi.get_trending_daily
def get_trending_daily(lang=""): """Fetches repos in "Trending Daily" Github section :param lang: Coding language :return: List of GithubUserRepository """ url = "https://github.com/trending/" url += str(lang).lower().replace(" ", "") + "?since=daily" api_content_request = urllib.request.Request(url) api_content_response = urllib.request.urlopen( api_content_request).read().decode("utf-8") # parse response soup = BeautifulSoup(api_content_response, "lxml") # html parser raw_repo_list = soup.find( "ol", {"class": "repo-list"} ).find_all("li") repos_list = [] for repo in raw_repo_list: details = repo.find_all("div")[0].a.text.split("/") repo_owner = details[0].strip() repo_name = details[1].strip() repos_list.append(GithubUserRepository(repo_owner, repo_name)) return repos_list
python
def get_trending_daily(lang=""): """Fetches repos in "Trending Daily" Github section :param lang: Coding language :return: List of GithubUserRepository """ url = "https://github.com/trending/" url += str(lang).lower().replace(" ", "") + "?since=daily" api_content_request = urllib.request.Request(url) api_content_response = urllib.request.urlopen( api_content_request).read().decode("utf-8") # parse response soup = BeautifulSoup(api_content_response, "lxml") # html parser raw_repo_list = soup.find( "ol", {"class": "repo-list"} ).find_all("li") repos_list = [] for repo in raw_repo_list: details = repo.find_all("div")[0].a.text.split("/") repo_owner = details[0].strip() repo_name = details[1].strip() repos_list.append(GithubUserRepository(repo_owner, repo_name)) return repos_list
[ "def", "get_trending_daily", "(", "lang", "=", "\"\"", ")", ":", "url", "=", "\"https://github.com/trending/\"", "url", "+=", "str", "(", "lang", ")", ".", "lower", "(", ")", ".", "replace", "(", "\" \"", ",", "\"\"", ")", "+", "\"?since=daily\"", "api_content_request", "=", "urllib", ".", "request", ".", "Request", "(", "url", ")", "api_content_response", "=", "urllib", ".", "request", ".", "urlopen", "(", "api_content_request", ")", ".", "read", "(", ")", ".", "decode", "(", "\"utf-8\"", ")", "# parse response", "soup", "=", "BeautifulSoup", "(", "api_content_response", ",", "\"lxml\"", ")", "# html parser", "raw_repo_list", "=", "soup", ".", "find", "(", "\"ol\"", ",", "{", "\"class\"", ":", "\"repo-list\"", "}", ")", ".", "find_all", "(", "\"li\"", ")", "repos_list", "=", "[", "]", "for", "repo", "in", "raw_repo_list", ":", "details", "=", "repo", ".", "find_all", "(", "\"div\"", ")", "[", "0", "]", ".", "a", ".", "text", ".", "split", "(", "\"/\"", ")", "repo_owner", "=", "details", "[", "0", "]", ".", "strip", "(", ")", "repo_name", "=", "details", "[", "1", "]", ".", "strip", "(", ")", "repos_list", ".", "append", "(", "GithubUserRepository", "(", "repo_owner", ",", "repo_name", ")", ")", "return", "repos_list" ]
Fetches repos in "Trending Daily" Github section :param lang: Coding language :return: List of GithubUserRepository
[ "Fetches", "repos", "in", "Trending", "Daily", "Github", "section" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/internet/services/github.py#L114-L136
train
sirfoga/pyhal
hal/internet/services/github.py
GithubUser._get_repos
def _get_repos(url): """Gets repos in url :param url: Url :return: List of repositories in given url """ current_page = 1 there_is_something_left = True repos_list = [] while there_is_something_left: api_driver = GithubRawApi( url, url_params={"page": current_page}, get_api_content_now=True ) # driver to parse API content for repo in api_driver.api_content: # list of raw repository repo_name = repo["name"] repo_user = repo["owner"]["login"] repos_list.append( GithubUserRepository(repo_user, repo_name)) there_is_something_left = bool(api_driver.api_content) current_page += 1 return repos_list
python
def _get_repos(url): """Gets repos in url :param url: Url :return: List of repositories in given url """ current_page = 1 there_is_something_left = True repos_list = [] while there_is_something_left: api_driver = GithubRawApi( url, url_params={"page": current_page}, get_api_content_now=True ) # driver to parse API content for repo in api_driver.api_content: # list of raw repository repo_name = repo["name"] repo_user = repo["owner"]["login"] repos_list.append( GithubUserRepository(repo_user, repo_name)) there_is_something_left = bool(api_driver.api_content) current_page += 1 return repos_list
[ "def", "_get_repos", "(", "url", ")", ":", "current_page", "=", "1", "there_is_something_left", "=", "True", "repos_list", "=", "[", "]", "while", "there_is_something_left", ":", "api_driver", "=", "GithubRawApi", "(", "url", ",", "url_params", "=", "{", "\"page\"", ":", "current_page", "}", ",", "get_api_content_now", "=", "True", ")", "# driver to parse API content", "for", "repo", "in", "api_driver", ".", "api_content", ":", "# list of raw repository", "repo_name", "=", "repo", "[", "\"name\"", "]", "repo_user", "=", "repo", "[", "\"owner\"", "]", "[", "\"login\"", "]", "repos_list", ".", "append", "(", "GithubUserRepository", "(", "repo_user", ",", "repo_name", ")", ")", "there_is_something_left", "=", "bool", "(", "api_driver", ".", "api_content", ")", "current_page", "+=", "1", "return", "repos_list" ]
Gets repos in url :param url: Url :return: List of repositories in given url
[ "Gets", "repos", "in", "url" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/internet/services/github.py#L167-L193
train
iqbal-lab-org/cluster_vcf_records
cluster_vcf_records/vcf_merge.py
_dict_of_vars_to_vcf_file
def _dict_of_vars_to_vcf_file(variants, outfile): '''Input is dict made by vcf_file_read.vcf_file_to_dict_of_vars or vcf_file_read.vcf_file_to_dict_of_vars. Output is bare-bones VCF file (columns empty wherever possible''' header_lines = [ '##fileformat=VCFv4.2', '##source=cluster_vcf_records, version ' + cluster_vcf_records_version, '##fileDate=' + str(datetime.date.today()), '\t'.join(['#CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO']) ] with open(outfile, 'w') as f: print(*header_lines, sep='\n', file=f) for ref_name in sorted(variants): for pos in sorted(variants[ref_name]): for ref_string in sorted(variants[ref_name][pos]): alts = sorted(list(variants[ref_name][pos][ref_string])) print(ref_name, pos + 1, '.', ref_string, ','.join(alts), '.', 'PASS', 'SVTYPE=MERGED', sep='\t', file=f)
python
def _dict_of_vars_to_vcf_file(variants, outfile): '''Input is dict made by vcf_file_read.vcf_file_to_dict_of_vars or vcf_file_read.vcf_file_to_dict_of_vars. Output is bare-bones VCF file (columns empty wherever possible''' header_lines = [ '##fileformat=VCFv4.2', '##source=cluster_vcf_records, version ' + cluster_vcf_records_version, '##fileDate=' + str(datetime.date.today()), '\t'.join(['#CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO']) ] with open(outfile, 'w') as f: print(*header_lines, sep='\n', file=f) for ref_name in sorted(variants): for pos in sorted(variants[ref_name]): for ref_string in sorted(variants[ref_name][pos]): alts = sorted(list(variants[ref_name][pos][ref_string])) print(ref_name, pos + 1, '.', ref_string, ','.join(alts), '.', 'PASS', 'SVTYPE=MERGED', sep='\t', file=f)
[ "def", "_dict_of_vars_to_vcf_file", "(", "variants", ",", "outfile", ")", ":", "header_lines", "=", "[", "'##fileformat=VCFv4.2'", ",", "'##source=cluster_vcf_records, version '", "+", "cluster_vcf_records_version", ",", "'##fileDate='", "+", "str", "(", "datetime", ".", "date", ".", "today", "(", ")", ")", ",", "'\\t'", ".", "join", "(", "[", "'#CHROM'", ",", "'POS'", ",", "'ID'", ",", "'REF'", ",", "'ALT'", ",", "'QUAL'", ",", "'FILTER'", ",", "'INFO'", "]", ")", "]", "with", "open", "(", "outfile", ",", "'w'", ")", "as", "f", ":", "print", "(", "*", "header_lines", ",", "sep", "=", "'\\n'", ",", "file", "=", "f", ")", "for", "ref_name", "in", "sorted", "(", "variants", ")", ":", "for", "pos", "in", "sorted", "(", "variants", "[", "ref_name", "]", ")", ":", "for", "ref_string", "in", "sorted", "(", "variants", "[", "ref_name", "]", "[", "pos", "]", ")", ":", "alts", "=", "sorted", "(", "list", "(", "variants", "[", "ref_name", "]", "[", "pos", "]", "[", "ref_string", "]", ")", ")", "print", "(", "ref_name", ",", "pos", "+", "1", ",", "'.'", ",", "ref_string", ",", "','", ".", "join", "(", "alts", ")", ",", "'.'", ",", "'PASS'", ",", "'SVTYPE=MERGED'", ",", "sep", "=", "'\\t'", ",", "file", "=", "f", ")" ]
Input is dict made by vcf_file_read.vcf_file_to_dict_of_vars or vcf_file_read.vcf_file_to_dict_of_vars. Output is bare-bones VCF file (columns empty wherever possible
[ "Input", "is", "dict", "made", "by", "vcf_file_read", ".", "vcf_file_to_dict_of_vars", "or", "vcf_file_read", ".", "vcf_file_to_dict_of_vars", ".", "Output", "is", "bare", "-", "bones", "VCF", "file", "(", "columns", "empty", "wherever", "possible" ]
0db26af36b6da97a7361364457d2152dc756055c
https://github.com/iqbal-lab-org/cluster_vcf_records/blob/0db26af36b6da97a7361364457d2152dc756055c/cluster_vcf_records/vcf_merge.py#L8-L26
train
wylee/runcommands
runcommands/util/__init__.py
collect_commands
def collect_commands(package_name=None, in_place=False, level=1): """Collect commands from package and its subpackages. This replaces the tedium of adding and maintaining a bunch of imports like ``from .xyz import x, y, z`` in modules that are used to collect all of the commands in a package. Args: package_name (str): Package to collect from. If not passed, the package containing the module of the call site will be used. in_place (bool): If set, the call site's globals will be updated in place (using some frame magic). level (int): If not called from the global scope, set this appropriately to account for the call stack. Returns: OrderedDict: The commands found in the package, ordered by name. Example usage:: # mypackage.commands __all__ = list(collect_commands(in_place=True)) Less magical usage:: # mypackage.commands commands = collect_commands() globals().update(commands) __all__ = list(commands) .. note:: If ``package_name`` is passed and refers to a namespace package, all corresponding namespace package directories will be searched for commands. """ commands = {} frame = inspect.stack()[level][0] f_globals = frame.f_globals if package_name is None: # Collect from package containing module of call site package_name = f_globals['__name__'].rsplit('.', 1)[0] package_paths = [os.path.dirname(f_globals['__file__'])] else: # Collect from named package package = importlib.import_module(package_name) package_name = package.__name__ package_paths = package.__path__ for package_path in package_paths: package_path = pathlib.Path(package_path) for file in package_path.rglob('*.py'): rel_path = str(file.relative_to(package_path)) rel_path = rel_path[:-3] module_name = rel_path.replace(os.sep, '.') module_name = '.'.join((package_name, module_name)) module = importlib.import_module(module_name) module_commands = get_commands_in_namespace(module) commands.update(module_commands) commands = OrderedDict((name, commands[name]) for name in sorted(commands)) if in_place: f_globals.update(commands) return commands
python
def collect_commands(package_name=None, in_place=False, level=1): """Collect commands from package and its subpackages. This replaces the tedium of adding and maintaining a bunch of imports like ``from .xyz import x, y, z`` in modules that are used to collect all of the commands in a package. Args: package_name (str): Package to collect from. If not passed, the package containing the module of the call site will be used. in_place (bool): If set, the call site's globals will be updated in place (using some frame magic). level (int): If not called from the global scope, set this appropriately to account for the call stack. Returns: OrderedDict: The commands found in the package, ordered by name. Example usage:: # mypackage.commands __all__ = list(collect_commands(in_place=True)) Less magical usage:: # mypackage.commands commands = collect_commands() globals().update(commands) __all__ = list(commands) .. note:: If ``package_name`` is passed and refers to a namespace package, all corresponding namespace package directories will be searched for commands. """ commands = {} frame = inspect.stack()[level][0] f_globals = frame.f_globals if package_name is None: # Collect from package containing module of call site package_name = f_globals['__name__'].rsplit('.', 1)[0] package_paths = [os.path.dirname(f_globals['__file__'])] else: # Collect from named package package = importlib.import_module(package_name) package_name = package.__name__ package_paths = package.__path__ for package_path in package_paths: package_path = pathlib.Path(package_path) for file in package_path.rglob('*.py'): rel_path = str(file.relative_to(package_path)) rel_path = rel_path[:-3] module_name = rel_path.replace(os.sep, '.') module_name = '.'.join((package_name, module_name)) module = importlib.import_module(module_name) module_commands = get_commands_in_namespace(module) commands.update(module_commands) commands = OrderedDict((name, commands[name]) for name in sorted(commands)) if in_place: f_globals.update(commands) return commands
[ "def", "collect_commands", "(", "package_name", "=", "None", ",", "in_place", "=", "False", ",", "level", "=", "1", ")", ":", "commands", "=", "{", "}", "frame", "=", "inspect", ".", "stack", "(", ")", "[", "level", "]", "[", "0", "]", "f_globals", "=", "frame", ".", "f_globals", "if", "package_name", "is", "None", ":", "# Collect from package containing module of call site", "package_name", "=", "f_globals", "[", "'__name__'", "]", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "0", "]", "package_paths", "=", "[", "os", ".", "path", ".", "dirname", "(", "f_globals", "[", "'__file__'", "]", ")", "]", "else", ":", "# Collect from named package", "package", "=", "importlib", ".", "import_module", "(", "package_name", ")", "package_name", "=", "package", ".", "__name__", "package_paths", "=", "package", ".", "__path__", "for", "package_path", "in", "package_paths", ":", "package_path", "=", "pathlib", ".", "Path", "(", "package_path", ")", "for", "file", "in", "package_path", ".", "rglob", "(", "'*.py'", ")", ":", "rel_path", "=", "str", "(", "file", ".", "relative_to", "(", "package_path", ")", ")", "rel_path", "=", "rel_path", "[", ":", "-", "3", "]", "module_name", "=", "rel_path", ".", "replace", "(", "os", ".", "sep", ",", "'.'", ")", "module_name", "=", "'.'", ".", "join", "(", "(", "package_name", ",", "module_name", ")", ")", "module", "=", "importlib", ".", "import_module", "(", "module_name", ")", "module_commands", "=", "get_commands_in_namespace", "(", "module", ")", "commands", ".", "update", "(", "module_commands", ")", "commands", "=", "OrderedDict", "(", "(", "name", ",", "commands", "[", "name", "]", ")", "for", "name", "in", "sorted", "(", "commands", ")", ")", "if", "in_place", ":", "f_globals", ".", "update", "(", "commands", ")", "return", "commands" ]
Collect commands from package and its subpackages. This replaces the tedium of adding and maintaining a bunch of imports like ``from .xyz import x, y, z`` in modules that are used to collect all of the commands in a package. Args: package_name (str): Package to collect from. If not passed, the package containing the module of the call site will be used. in_place (bool): If set, the call site's globals will be updated in place (using some frame magic). level (int): If not called from the global scope, set this appropriately to account for the call stack. Returns: OrderedDict: The commands found in the package, ordered by name. Example usage:: # mypackage.commands __all__ = list(collect_commands(in_place=True)) Less magical usage:: # mypackage.commands commands = collect_commands() globals().update(commands) __all__ = list(commands) .. note:: If ``package_name`` is passed and refers to a namespace package, all corresponding namespace package directories will be searched for commands.
[ "Collect", "commands", "from", "package", "and", "its", "subpackages", "." ]
b1d7c262885b9ced7ab89b63562f5464ca9970fe
https://github.com/wylee/runcommands/blob/b1d7c262885b9ced7ab89b63562f5464ca9970fe/runcommands/util/__init__.py#L28-L94
train
wylee/runcommands
runcommands/util/__init__.py
get_commands_in_namespace
def get_commands_in_namespace(namespace=None, level=1): """Get commands in namespace. Args: namespace (dict|module): Typically a module. If not passed, the globals from the call site will be used. level (int): If not called from the global scope, set this appropriately to account for the call stack. Returns: OrderedDict: The commands found in the namespace, ordered by name. Can be used to create ``__all__`` lists:: __all__ = list(get_commands_in_namespace()) """ from ..command import Command # noqa: Avoid circular import commands = {} if namespace is None: frame = inspect.stack()[level][0] namespace = frame.f_globals elif inspect.ismodule(namespace): namespace = vars(namespace) for name in namespace: obj = namespace[name] if isinstance(obj, Command): commands[name] = obj return OrderedDict((name, commands[name]) for name in sorted(commands))
python
def get_commands_in_namespace(namespace=None, level=1): """Get commands in namespace. Args: namespace (dict|module): Typically a module. If not passed, the globals from the call site will be used. level (int): If not called from the global scope, set this appropriately to account for the call stack. Returns: OrderedDict: The commands found in the namespace, ordered by name. Can be used to create ``__all__`` lists:: __all__ = list(get_commands_in_namespace()) """ from ..command import Command # noqa: Avoid circular import commands = {} if namespace is None: frame = inspect.stack()[level][0] namespace = frame.f_globals elif inspect.ismodule(namespace): namespace = vars(namespace) for name in namespace: obj = namespace[name] if isinstance(obj, Command): commands[name] = obj return OrderedDict((name, commands[name]) for name in sorted(commands))
[ "def", "get_commands_in_namespace", "(", "namespace", "=", "None", ",", "level", "=", "1", ")", ":", "from", ".", ".", "command", "import", "Command", "# noqa: Avoid circular import", "commands", "=", "{", "}", "if", "namespace", "is", "None", ":", "frame", "=", "inspect", ".", "stack", "(", ")", "[", "level", "]", "[", "0", "]", "namespace", "=", "frame", ".", "f_globals", "elif", "inspect", ".", "ismodule", "(", "namespace", ")", ":", "namespace", "=", "vars", "(", "namespace", ")", "for", "name", "in", "namespace", ":", "obj", "=", "namespace", "[", "name", "]", "if", "isinstance", "(", "obj", ",", "Command", ")", ":", "commands", "[", "name", "]", "=", "obj", "return", "OrderedDict", "(", "(", "name", ",", "commands", "[", "name", "]", ")", "for", "name", "in", "sorted", "(", "commands", ")", ")" ]
Get commands in namespace. Args: namespace (dict|module): Typically a module. If not passed, the globals from the call site will be used. level (int): If not called from the global scope, set this appropriately to account for the call stack. Returns: OrderedDict: The commands found in the namespace, ordered by name. Can be used to create ``__all__`` lists:: __all__ = list(get_commands_in_namespace())
[ "Get", "commands", "in", "namespace", "." ]
b1d7c262885b9ced7ab89b63562f5464ca9970fe
https://github.com/wylee/runcommands/blob/b1d7c262885b9ced7ab89b63562f5464ca9970fe/runcommands/util/__init__.py#L97-L126
train
portfors-lab/sparkle
sparkle/gui/stim/selectionmodel.py
ComponentSelectionModel.selectedIndexes
def selectedIndexes(self): """Returns a list of QModelIndex currently in the model""" model = self.model() indexes = [] for comp in self._selectedComponents: index = model.indexByComponent(comp) if index is None: # must have been removed from model, discard self._selectedComponents.remove(comp) else: indexes.append(index) return indexes
python
def selectedIndexes(self): """Returns a list of QModelIndex currently in the model""" model = self.model() indexes = [] for comp in self._selectedComponents: index = model.indexByComponent(comp) if index is None: # must have been removed from model, discard self._selectedComponents.remove(comp) else: indexes.append(index) return indexes
[ "def", "selectedIndexes", "(", "self", ")", ":", "model", "=", "self", ".", "model", "(", ")", "indexes", "=", "[", "]", "for", "comp", "in", "self", ".", "_selectedComponents", ":", "index", "=", "model", ".", "indexByComponent", "(", "comp", ")", "if", "index", "is", "None", ":", "# must have been removed from model, discard", "self", ".", "_selectedComponents", ".", "remove", "(", "comp", ")", "else", ":", "indexes", ".", "append", "(", "index", ")", "return", "indexes" ]
Returns a list of QModelIndex currently in the model
[ "Returns", "a", "list", "of", "QModelIndex", "currently", "in", "the", "model" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/selectionmodel.py#L35-L46
train
portfors-lab/sparkle
sparkle/gui/stim/selectionmodel.py
ComponentSelectionModel.selection
def selection(self): """Returns items in selection as a QItemSelection object""" sel = QtGui.QItemSelection() for index in self.selectedIndexes(): sel.select(index, index) return sel
python
def selection(self): """Returns items in selection as a QItemSelection object""" sel = QtGui.QItemSelection() for index in self.selectedIndexes(): sel.select(index, index) return sel
[ "def", "selection", "(", "self", ")", ":", "sel", "=", "QtGui", ".", "QItemSelection", "(", ")", "for", "index", "in", "self", ".", "selectedIndexes", "(", ")", ":", "sel", ".", "select", "(", "index", ",", "index", ")", "return", "sel" ]
Returns items in selection as a QItemSelection object
[ "Returns", "items", "in", "selection", "as", "a", "QItemSelection", "object" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/selectionmodel.py#L48-L53
train
portfors-lab/sparkle
sparkle/gui/stim/selectionmodel.py
ComponentSelectionModel.selectionComponents
def selectionComponents(self): """Returns the names of the component types in this selection""" comps = [] model = self.model() for comp in self._selectedComponents: index = model.indexByComponent(comp) if index is not None: comps.append(comp) return comps
python
def selectionComponents(self): """Returns the names of the component types in this selection""" comps = [] model = self.model() for comp in self._selectedComponents: index = model.indexByComponent(comp) if index is not None: comps.append(comp) return comps
[ "def", "selectionComponents", "(", "self", ")", ":", "comps", "=", "[", "]", "model", "=", "self", ".", "model", "(", ")", "for", "comp", "in", "self", ".", "_selectedComponents", ":", "index", "=", "model", ".", "indexByComponent", "(", "comp", ")", "if", "index", "is", "not", "None", ":", "comps", ".", "append", "(", "comp", ")", "return", "comps" ]
Returns the names of the component types in this selection
[ "Returns", "the", "names", "of", "the", "component", "types", "in", "this", "selection" ]
5fad1cf2bec58ec6b15d91da20f6236a74826110
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/selectionmodel.py#L55-L63
train
ArabellaTech/django-basic-cms
basic_cms/models.py
Page.expose_content
def expose_content(self): """Return all the current content of this page into a `string`. This is used by the haystack framework to build the search index.""" placeholders = get_placeholders(self.get_template()) exposed_content = [] for lang in self.get_languages(): for ctype in [p.name for p in placeholders]: content = self.get_content(lang, ctype, False) if content: exposed_content.append(content) return u"\r\n".join(exposed_content)
python
def expose_content(self): """Return all the current content of this page into a `string`. This is used by the haystack framework to build the search index.""" placeholders = get_placeholders(self.get_template()) exposed_content = [] for lang in self.get_languages(): for ctype in [p.name for p in placeholders]: content = self.get_content(lang, ctype, False) if content: exposed_content.append(content) return u"\r\n".join(exposed_content)
[ "def", "expose_content", "(", "self", ")", ":", "placeholders", "=", "get_placeholders", "(", "self", ".", "get_template", "(", ")", ")", "exposed_content", "=", "[", "]", "for", "lang", "in", "self", ".", "get_languages", "(", ")", ":", "for", "ctype", "in", "[", "p", ".", "name", "for", "p", "in", "placeholders", "]", ":", "content", "=", "self", ".", "get_content", "(", "lang", ",", "ctype", ",", "False", ")", "if", "content", ":", "exposed_content", ".", "append", "(", "content", ")", "return", "u\"\\r\\n\"", ".", "join", "(", "exposed_content", ")" ]
Return all the current content of this page into a `string`. This is used by the haystack framework to build the search index.
[ "Return", "all", "the", "current", "content", "of", "this", "page", "into", "a", "string", "." ]
863f3c6098606f663994930cd8e7723ad0c07caf
https://github.com/ArabellaTech/django-basic-cms/blob/863f3c6098606f663994930cd8e7723ad0c07caf/basic_cms/models.py#L346-L357
train
CyberInt/dockermon
dockermon.py
read_http_header
def read_http_header(sock): """Read HTTP header from socket, return header and rest of data.""" buf = [] hdr_end = '\r\n\r\n' while True: buf.append(sock.recv(bufsize).decode('utf-8')) data = ''.join(buf) i = data.find(hdr_end) if i == -1: continue return data[:i], data[i + len(hdr_end):]
python
def read_http_header(sock): """Read HTTP header from socket, return header and rest of data.""" buf = [] hdr_end = '\r\n\r\n' while True: buf.append(sock.recv(bufsize).decode('utf-8')) data = ''.join(buf) i = data.find(hdr_end) if i == -1: continue return data[:i], data[i + len(hdr_end):]
[ "def", "read_http_header", "(", "sock", ")", ":", "buf", "=", "[", "]", "hdr_end", "=", "'\\r\\n\\r\\n'", "while", "True", ":", "buf", ".", "append", "(", "sock", ".", "recv", "(", "bufsize", ")", ".", "decode", "(", "'utf-8'", ")", ")", "data", "=", "''", ".", "join", "(", "buf", ")", "i", "=", "data", ".", "find", "(", "hdr_end", ")", "if", "i", "==", "-", "1", ":", "continue", "return", "data", "[", ":", "i", "]", ",", "data", "[", "i", "+", "len", "(", "hdr_end", ")", ":", "]" ]
Read HTTP header from socket, return header and rest of data.
[ "Read", "HTTP", "header", "from", "socket", "return", "header", "and", "rest", "of", "data", "." ]
a8733b9395cb1b551971f17c31d7f4a8268bb969
https://github.com/CyberInt/dockermon/blob/a8733b9395cb1b551971f17c31d7f4a8268bb969/dockermon.py#L28-L39
train
CyberInt/dockermon
dockermon.py
connect
def connect(url): """Connect to UNIX or TCP socket. url can be either tcp://<host>:port or ipc://<path> """ url = urlparse(url) if url.scheme == 'tcp': sock = socket() netloc = tuple(url.netloc.rsplit(':', 1)) hostname = socket.gethostname() elif url.scheme == 'ipc': sock = socket(AF_UNIX) netloc = url.path hostname = 'localhost' else: raise ValueError('unknown socket type: %s' % url.scheme) sock.connect(netloc) return sock, hostname
python
def connect(url): """Connect to UNIX or TCP socket. url can be either tcp://<host>:port or ipc://<path> """ url = urlparse(url) if url.scheme == 'tcp': sock = socket() netloc = tuple(url.netloc.rsplit(':', 1)) hostname = socket.gethostname() elif url.scheme == 'ipc': sock = socket(AF_UNIX) netloc = url.path hostname = 'localhost' else: raise ValueError('unknown socket type: %s' % url.scheme) sock.connect(netloc) return sock, hostname
[ "def", "connect", "(", "url", ")", ":", "url", "=", "urlparse", "(", "url", ")", "if", "url", ".", "scheme", "==", "'tcp'", ":", "sock", "=", "socket", "(", ")", "netloc", "=", "tuple", "(", "url", ".", "netloc", ".", "rsplit", "(", "':'", ",", "1", ")", ")", "hostname", "=", "socket", ".", "gethostname", "(", ")", "elif", "url", ".", "scheme", "==", "'ipc'", ":", "sock", "=", "socket", "(", "AF_UNIX", ")", "netloc", "=", "url", ".", "path", "hostname", "=", "'localhost'", "else", ":", "raise", "ValueError", "(", "'unknown socket type: %s'", "%", "url", ".", "scheme", ")", "sock", ".", "connect", "(", "netloc", ")", "return", "sock", ",", "hostname" ]
Connect to UNIX or TCP socket. url can be either tcp://<host>:port or ipc://<path>
[ "Connect", "to", "UNIX", "or", "TCP", "socket", "." ]
a8733b9395cb1b551971f17c31d7f4a8268bb969
https://github.com/CyberInt/dockermon/blob/a8733b9395cb1b551971f17c31d7f4a8268bb969/dockermon.py#L50-L68
train
CyberInt/dockermon
dockermon.py
print_callback
def print_callback(msg): """Print callback, prints message to stdout as JSON in one line.""" json.dump(msg, stdout) stdout.write('\n') stdout.flush()
python
def print_callback(msg): """Print callback, prints message to stdout as JSON in one line.""" json.dump(msg, stdout) stdout.write('\n') stdout.flush()
[ "def", "print_callback", "(", "msg", ")", ":", "json", ".", "dump", "(", "msg", ",", "stdout", ")", "stdout", ".", "write", "(", "'\\n'", ")", "stdout", ".", "flush", "(", ")" ]
Print callback, prints message to stdout as JSON in one line.
[ "Print", "callback", "prints", "message", "to", "stdout", "as", "JSON", "in", "one", "line", "." ]
a8733b9395cb1b551971f17c31d7f4a8268bb969
https://github.com/CyberInt/dockermon/blob/a8733b9395cb1b551971f17c31d7f4a8268bb969/dockermon.py#L109-L113
train
CyberInt/dockermon
dockermon.py
prog_callback
def prog_callback(prog, msg): """Program callback, calls prog with message in stdin""" pipe = Popen(prog, stdin=PIPE) data = json.dumps(msg) pipe.stdin.write(data.encode('utf-8')) pipe.stdin.close()
python
def prog_callback(prog, msg): """Program callback, calls prog with message in stdin""" pipe = Popen(prog, stdin=PIPE) data = json.dumps(msg) pipe.stdin.write(data.encode('utf-8')) pipe.stdin.close()
[ "def", "prog_callback", "(", "prog", ",", "msg", ")", ":", "pipe", "=", "Popen", "(", "prog", ",", "stdin", "=", "PIPE", ")", "data", "=", "json", ".", "dumps", "(", "msg", ")", "pipe", ".", "stdin", ".", "write", "(", "data", ".", "encode", "(", "'utf-8'", ")", ")", "pipe", ".", "stdin", ".", "close", "(", ")" ]
Program callback, calls prog with message in stdin
[ "Program", "callback", "calls", "prog", "with", "message", "in", "stdin" ]
a8733b9395cb1b551971f17c31d7f4a8268bb969
https://github.com/CyberInt/dockermon/blob/a8733b9395cb1b551971f17c31d7f4a8268bb969/dockermon.py#L116-L121
train
lsst-sqre/sqre-codekit
codekit/eups.py
git_tag2eups_tag
def git_tag2eups_tag(git_tag): """Convert git tag to an acceptable eups tag format I.e., eups no likey semantic versioning markup, wants underscores Parameters ---------- git_tag: str literal git tag string Returns ------- eups_tag: string A string suitable for use as an eups tag name """ eups_tag = git_tag # eups tags should not start with a numeric value -- prefix `v` if # it does if re.match(r'\d', eups_tag): eups_tag = "v{eups_tag}".format(eups_tag=eups_tag) # convert '.'s and '-'s to '_'s eups_tag = eups_tag.translate(str.maketrans('.-', '__')) return eups_tag
python
def git_tag2eups_tag(git_tag): """Convert git tag to an acceptable eups tag format I.e., eups no likey semantic versioning markup, wants underscores Parameters ---------- git_tag: str literal git tag string Returns ------- eups_tag: string A string suitable for use as an eups tag name """ eups_tag = git_tag # eups tags should not start with a numeric value -- prefix `v` if # it does if re.match(r'\d', eups_tag): eups_tag = "v{eups_tag}".format(eups_tag=eups_tag) # convert '.'s and '-'s to '_'s eups_tag = eups_tag.translate(str.maketrans('.-', '__')) return eups_tag
[ "def", "git_tag2eups_tag", "(", "git_tag", ")", ":", "eups_tag", "=", "git_tag", "# eups tags should not start with a numeric value -- prefix `v` if", "# it does", "if", "re", ".", "match", "(", "r'\\d'", ",", "eups_tag", ")", ":", "eups_tag", "=", "\"v{eups_tag}\"", ".", "format", "(", "eups_tag", "=", "eups_tag", ")", "# convert '.'s and '-'s to '_'s", "eups_tag", "=", "eups_tag", ".", "translate", "(", "str", ".", "maketrans", "(", "'.-'", ",", "'__'", ")", ")", "return", "eups_tag" ]
Convert git tag to an acceptable eups tag format I.e., eups no likey semantic versioning markup, wants underscores Parameters ---------- git_tag: str literal git tag string Returns ------- eups_tag: string A string suitable for use as an eups tag name
[ "Convert", "git", "tag", "to", "an", "acceptable", "eups", "tag", "format" ]
98122404cd9065d4d1d570867fe518042669126c
https://github.com/lsst-sqre/sqre-codekit/blob/98122404cd9065d4d1d570867fe518042669126c/codekit/eups.py#L160-L185
train
leovt/constructible
constructible.py
sqrt
def sqrt(n): '''return the square root of n in an exact representation''' if isinstance(n, Rational): n = Constructible(n) elif not isinstance(n, Constructible): raise ValueError('the square root is not implemented for the type %s' % type(n)) r = n._try_sqrt() # pylint: disable=protected-access if r is not None: return r return Constructible(Constructible.lift_rational_field(0, n.field), Constructible.lift_rational_field(1, n.field), (n, n.field))
python
def sqrt(n): '''return the square root of n in an exact representation''' if isinstance(n, Rational): n = Constructible(n) elif not isinstance(n, Constructible): raise ValueError('the square root is not implemented for the type %s' % type(n)) r = n._try_sqrt() # pylint: disable=protected-access if r is not None: return r return Constructible(Constructible.lift_rational_field(0, n.field), Constructible.lift_rational_field(1, n.field), (n, n.field))
[ "def", "sqrt", "(", "n", ")", ":", "if", "isinstance", "(", "n", ",", "Rational", ")", ":", "n", "=", "Constructible", "(", "n", ")", "elif", "not", "isinstance", "(", "n", ",", "Constructible", ")", ":", "raise", "ValueError", "(", "'the square root is not implemented for the type %s'", "%", "type", "(", "n", ")", ")", "r", "=", "n", ".", "_try_sqrt", "(", ")", "# pylint: disable=protected-access", "if", "r", "is", "not", "None", ":", "return", "r", "return", "Constructible", "(", "Constructible", ".", "lift_rational_field", "(", "0", ",", "n", ".", "field", ")", ",", "Constructible", ".", "lift_rational_field", "(", "1", ",", "n", ".", "field", ")", ",", "(", "n", ",", "n", ".", "field", ")", ")" ]
return the square root of n in an exact representation
[ "return", "the", "square", "root", "of", "n", "in", "an", "exact", "representation" ]
16fb627c81d15ffd8373397633224f50f047f882
https://github.com/leovt/constructible/blob/16fb627c81d15ffd8373397633224f50f047f882/constructible.py#L435-L447
train
leovt/constructible
constructible.py
Constructible._try_sqrt
def _try_sqrt(self): ''' try to compute the square root in the field itself. if there is no square root in the field return None. ''' if not self.field: assert self.b == 0 root, remainder = fsqrt(self.a) if remainder == 1: return Constructible(root) else: return None if self._sign() < 0: raise ValueError('math domain error %s' % self) nn = self.a * self.a - self.b * self.b * self.r if nn._sign() < 0: return None n = nn._try_sqrt() if n is None: return None a = ((self.a + n) * Fraction(1, 2))._try_sqrt() if a is not None: result = Constructible(a, self.b / a * Fraction(1, 2), self.field) assert result.field == self.field return result b = ((self.a + n) / self.r * Fraction(1, 2))._try_sqrt() if b is not None: result = Constructible(self.b / b * Fraction(1, 2), b, self.field) assert result.field == self.field return result return None
python
def _try_sqrt(self): ''' try to compute the square root in the field itself. if there is no square root in the field return None. ''' if not self.field: assert self.b == 0 root, remainder = fsqrt(self.a) if remainder == 1: return Constructible(root) else: return None if self._sign() < 0: raise ValueError('math domain error %s' % self) nn = self.a * self.a - self.b * self.b * self.r if nn._sign() < 0: return None n = nn._try_sqrt() if n is None: return None a = ((self.a + n) * Fraction(1, 2))._try_sqrt() if a is not None: result = Constructible(a, self.b / a * Fraction(1, 2), self.field) assert result.field == self.field return result b = ((self.a + n) / self.r * Fraction(1, 2))._try_sqrt() if b is not None: result = Constructible(self.b / b * Fraction(1, 2), b, self.field) assert result.field == self.field return result return None
[ "def", "_try_sqrt", "(", "self", ")", ":", "if", "not", "self", ".", "field", ":", "assert", "self", ".", "b", "==", "0", "root", ",", "remainder", "=", "fsqrt", "(", "self", ".", "a", ")", "if", "remainder", "==", "1", ":", "return", "Constructible", "(", "root", ")", "else", ":", "return", "None", "if", "self", ".", "_sign", "(", ")", "<", "0", ":", "raise", "ValueError", "(", "'math domain error %s'", "%", "self", ")", "nn", "=", "self", ".", "a", "*", "self", ".", "a", "-", "self", ".", "b", "*", "self", ".", "b", "*", "self", ".", "r", "if", "nn", ".", "_sign", "(", ")", "<", "0", ":", "return", "None", "n", "=", "nn", ".", "_try_sqrt", "(", ")", "if", "n", "is", "None", ":", "return", "None", "a", "=", "(", "(", "self", ".", "a", "+", "n", ")", "*", "Fraction", "(", "1", ",", "2", ")", ")", ".", "_try_sqrt", "(", ")", "if", "a", "is", "not", "None", ":", "result", "=", "Constructible", "(", "a", ",", "self", ".", "b", "/", "a", "*", "Fraction", "(", "1", ",", "2", ")", ",", "self", ".", "field", ")", "assert", "result", ".", "field", "==", "self", ".", "field", "return", "result", "b", "=", "(", "(", "self", ".", "a", "+", "n", ")", "/", "self", ".", "r", "*", "Fraction", "(", "1", ",", "2", ")", ")", ".", "_try_sqrt", "(", ")", "if", "b", "is", "not", "None", ":", "result", "=", "Constructible", "(", "self", ".", "b", "/", "b", "*", "Fraction", "(", "1", ",", "2", ")", ",", "b", ",", "self", ".", "field", ")", "assert", "result", ".", "field", "==", "self", ".", "field", "return", "result", "return", "None" ]
try to compute the square root in the field itself. if there is no square root in the field return None.
[ "try", "to", "compute", "the", "square", "root", "in", "the", "field", "itself", "." ]
16fb627c81d15ffd8373397633224f50f047f882
https://github.com/leovt/constructible/blob/16fb627c81d15ffd8373397633224f50f047f882/constructible.py#L397-L433
train
yamcs/yamcs-python
yamcs-client/examples/archive_breakdown.py
print_packet_count
def print_packet_count(): """Print the number of packets grouped by packet name.""" for name in archive.list_packet_names(): packet_count = 0 for group in archive.list_packet_histogram(name): for rec in group.records: packet_count += rec.count print(' {: <40} {: >20}'.format(name, packet_count))
python
def print_packet_count(): """Print the number of packets grouped by packet name.""" for name in archive.list_packet_names(): packet_count = 0 for group in archive.list_packet_histogram(name): for rec in group.records: packet_count += rec.count print(' {: <40} {: >20}'.format(name, packet_count))
[ "def", "print_packet_count", "(", ")", ":", "for", "name", "in", "archive", ".", "list_packet_names", "(", ")", ":", "packet_count", "=", "0", "for", "group", "in", "archive", ".", "list_packet_histogram", "(", "name", ")", ":", "for", "rec", "in", "group", ".", "records", ":", "packet_count", "+=", "rec", ".", "count", "print", "(", "' {: <40} {: >20}'", ".", "format", "(", "name", ",", "packet_count", ")", ")" ]
Print the number of packets grouped by packet name.
[ "Print", "the", "number", "of", "packets", "grouped", "by", "packet", "name", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/examples/archive_breakdown.py#L6-L13
train
yamcs/yamcs-python
yamcs-client/examples/archive_breakdown.py
print_pp_groups
def print_pp_groups(): """Print the number of processed parameter frames by group name.""" for group in archive.list_processed_parameter_groups(): frame_count = 0 for pp_group in archive.list_processed_parameter_group_histogram(group): for rec in pp_group.records: frame_count += rec.count print(' {: <40} {: >20}'.format(group, frame_count))
python
def print_pp_groups(): """Print the number of processed parameter frames by group name.""" for group in archive.list_processed_parameter_groups(): frame_count = 0 for pp_group in archive.list_processed_parameter_group_histogram(group): for rec in pp_group.records: frame_count += rec.count print(' {: <40} {: >20}'.format(group, frame_count))
[ "def", "print_pp_groups", "(", ")", ":", "for", "group", "in", "archive", ".", "list_processed_parameter_groups", "(", ")", ":", "frame_count", "=", "0", "for", "pp_group", "in", "archive", ".", "list_processed_parameter_group_histogram", "(", "group", ")", ":", "for", "rec", "in", "pp_group", ".", "records", ":", "frame_count", "+=", "rec", ".", "count", "print", "(", "' {: <40} {: >20}'", ".", "format", "(", "group", ",", "frame_count", ")", ")" ]
Print the number of processed parameter frames by group name.
[ "Print", "the", "number", "of", "processed", "parameter", "frames", "by", "group", "name", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/examples/archive_breakdown.py#L16-L23
train
yamcs/yamcs-python
yamcs-client/examples/archive_breakdown.py
print_event_count
def print_event_count(): """Print the number of events grouped by source.""" for source in archive.list_event_sources(): event_count = 0 for group in archive.list_event_histogram(source): for rec in group.records: event_count += rec.count print(' {: <40} {: >20}'.format(source, event_count))
python
def print_event_count(): """Print the number of events grouped by source.""" for source in archive.list_event_sources(): event_count = 0 for group in archive.list_event_histogram(source): for rec in group.records: event_count += rec.count print(' {: <40} {: >20}'.format(source, event_count))
[ "def", "print_event_count", "(", ")", ":", "for", "source", "in", "archive", ".", "list_event_sources", "(", ")", ":", "event_count", "=", "0", "for", "group", "in", "archive", ".", "list_event_histogram", "(", "source", ")", ":", "for", "rec", "in", "group", ".", "records", ":", "event_count", "+=", "rec", ".", "count", "print", "(", "' {: <40} {: >20}'", ".", "format", "(", "source", ",", "event_count", ")", ")" ]
Print the number of events grouped by source.
[ "Print", "the", "number", "of", "events", "grouped", "by", "source", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/examples/archive_breakdown.py#L26-L33
train
yamcs/yamcs-python
yamcs-client/examples/archive_breakdown.py
print_command_count
def print_command_count(): """Print the number of commands grouped by name.""" mdb = client.get_mdb(instance='simulator') for command in mdb.list_commands(): total = 0 for group in archive.list_command_histogram(command.qualified_name): for rec in group.records: total += rec.count print(' {: <40} {: >20}'.format(command, total))
python
def print_command_count(): """Print the number of commands grouped by name.""" mdb = client.get_mdb(instance='simulator') for command in mdb.list_commands(): total = 0 for group in archive.list_command_histogram(command.qualified_name): for rec in group.records: total += rec.count print(' {: <40} {: >20}'.format(command, total))
[ "def", "print_command_count", "(", ")", ":", "mdb", "=", "client", ".", "get_mdb", "(", "instance", "=", "'simulator'", ")", "for", "command", "in", "mdb", ".", "list_commands", "(", ")", ":", "total", "=", "0", "for", "group", "in", "archive", ".", "list_command_histogram", "(", "command", ".", "qualified_name", ")", ":", "for", "rec", "in", "group", ".", "records", ":", "total", "+=", "rec", ".", "count", "print", "(", "' {: <40} {: >20}'", ".", "format", "(", "command", ",", "total", ")", ")" ]
Print the number of commands grouped by name.
[ "Print", "the", "number", "of", "commands", "grouped", "by", "name", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/examples/archive_breakdown.py#L36-L44
train
lsst-sqre/sqre-codekit
codekit/cli/github_tag_release.py
cmp_dict
def cmp_dict(d1, d2, ignore_keys=[]): """Compare dicts ignoring select keys""" # https://stackoverflow.com/questions/10480806/compare-dictionaries-ignoring-specific-keys return {k: v for k, v in d1.items() if k not in ignore_keys} \ == {k: v for k, v in d2.items() if k not in ignore_keys}
python
def cmp_dict(d1, d2, ignore_keys=[]): """Compare dicts ignoring select keys""" # https://stackoverflow.com/questions/10480806/compare-dictionaries-ignoring-specific-keys return {k: v for k, v in d1.items() if k not in ignore_keys} \ == {k: v for k, v in d2.items() if k not in ignore_keys}
[ "def", "cmp_dict", "(", "d1", ",", "d2", ",", "ignore_keys", "=", "[", "]", ")", ":", "# https://stackoverflow.com/questions/10480806/compare-dictionaries-ignoring-specific-keys", "return", "{", "k", ":", "v", "for", "k", ",", "v", "in", "d1", ".", "items", "(", ")", "if", "k", "not", "in", "ignore_keys", "}", "==", "{", "k", ":", "v", "for", "k", ",", "v", "in", "d2", ".", "items", "(", ")", "if", "k", "not", "in", "ignore_keys", "}" ]
Compare dicts ignoring select keys
[ "Compare", "dicts", "ignoring", "select", "keys" ]
98122404cd9065d4d1d570867fe518042669126c
https://github.com/lsst-sqre/sqre-codekit/blob/98122404cd9065d4d1d570867fe518042669126c/codekit/cli/github_tag_release.py#L232-L236
train
lsst-sqre/sqre-codekit
codekit/cli/github_tag_release.py
cross_reference_products
def cross_reference_products( eups_products, manifest_products, ignore_manifest_versions=False, fail_fast=False, ): """ Cross reference EupsTag and Manifest data and return a merged result Parameters ---------- eups_products: manifest: fail_fast: bool ignore_manifest_versions: bool Returns ------- products: dict Raises ------ RuntimeError Upon error if `fail_fast` is `True`. """ products = {} problems = [] for name, eups_data in eups_products.items(): try: manifest_data = manifest_products[name] except KeyError: yikes = RuntimeError(textwrap.dedent("""\ failed to find record in manifest for: {product} {eups_version}\ """).format( product=name, eups_version=eups_data['eups_version'], )) if fail_fast: raise yikes from None problems.append(yikes) error(yikes) if ignore_manifest_versions: # ignore the manifest eups_version string by simply setting it to # the eups tag value. This ensures that the eups tag value will be # passed though. manifest_data = manifest_data.copy() manifest_data['eups_version'] = eups_data['eups_version'] if eups_data['eups_version'] != manifest_data['eups_version']: yikes = RuntimeError(textwrap.dedent("""\ eups version string mismatch: eups tag: {product} {eups_eups_version} manifest: {product} {manifest_eups_version}\ """).format( product=name, eups_eups_version=eups_data['eups_version'], manifest_eups_version=manifest_data['eups_version'], )) if fail_fast: raise yikes problems.append(yikes) error(yikes) products[name] = eups_data.copy() products[name].update(manifest_data) if problems: error("{n} product(s) have error(s)".format(n=len(problems))) return products, problems
python
def cross_reference_products( eups_products, manifest_products, ignore_manifest_versions=False, fail_fast=False, ): """ Cross reference EupsTag and Manifest data and return a merged result Parameters ---------- eups_products: manifest: fail_fast: bool ignore_manifest_versions: bool Returns ------- products: dict Raises ------ RuntimeError Upon error if `fail_fast` is `True`. """ products = {} problems = [] for name, eups_data in eups_products.items(): try: manifest_data = manifest_products[name] except KeyError: yikes = RuntimeError(textwrap.dedent("""\ failed to find record in manifest for: {product} {eups_version}\ """).format( product=name, eups_version=eups_data['eups_version'], )) if fail_fast: raise yikes from None problems.append(yikes) error(yikes) if ignore_manifest_versions: # ignore the manifest eups_version string by simply setting it to # the eups tag value. This ensures that the eups tag value will be # passed though. manifest_data = manifest_data.copy() manifest_data['eups_version'] = eups_data['eups_version'] if eups_data['eups_version'] != manifest_data['eups_version']: yikes = RuntimeError(textwrap.dedent("""\ eups version string mismatch: eups tag: {product} {eups_eups_version} manifest: {product} {manifest_eups_version}\ """).format( product=name, eups_eups_version=eups_data['eups_version'], manifest_eups_version=manifest_data['eups_version'], )) if fail_fast: raise yikes problems.append(yikes) error(yikes) products[name] = eups_data.copy() products[name].update(manifest_data) if problems: error("{n} product(s) have error(s)".format(n=len(problems))) return products, problems
[ "def", "cross_reference_products", "(", "eups_products", ",", "manifest_products", ",", "ignore_manifest_versions", "=", "False", ",", "fail_fast", "=", "False", ",", ")", ":", "products", "=", "{", "}", "problems", "=", "[", "]", "for", "name", ",", "eups_data", "in", "eups_products", ".", "items", "(", ")", ":", "try", ":", "manifest_data", "=", "manifest_products", "[", "name", "]", "except", "KeyError", ":", "yikes", "=", "RuntimeError", "(", "textwrap", ".", "dedent", "(", "\"\"\"\\\n failed to find record in manifest for:\n {product} {eups_version}\\\n \"\"\"", ")", ".", "format", "(", "product", "=", "name", ",", "eups_version", "=", "eups_data", "[", "'eups_version'", "]", ",", ")", ")", "if", "fail_fast", ":", "raise", "yikes", "from", "None", "problems", ".", "append", "(", "yikes", ")", "error", "(", "yikes", ")", "if", "ignore_manifest_versions", ":", "# ignore the manifest eups_version string by simply setting it to", "# the eups tag value. This ensures that the eups tag value will be", "# passed though.", "manifest_data", "=", "manifest_data", ".", "copy", "(", ")", "manifest_data", "[", "'eups_version'", "]", "=", "eups_data", "[", "'eups_version'", "]", "if", "eups_data", "[", "'eups_version'", "]", "!=", "manifest_data", "[", "'eups_version'", "]", ":", "yikes", "=", "RuntimeError", "(", "textwrap", ".", "dedent", "(", "\"\"\"\\\n eups version string mismatch:\n eups tag: {product} {eups_eups_version}\n manifest: {product} {manifest_eups_version}\\\n \"\"\"", ")", ".", "format", "(", "product", "=", "name", ",", "eups_eups_version", "=", "eups_data", "[", "'eups_version'", "]", ",", "manifest_eups_version", "=", "manifest_data", "[", "'eups_version'", "]", ",", ")", ")", "if", "fail_fast", ":", "raise", "yikes", "problems", ".", "append", "(", "yikes", ")", "error", "(", "yikes", ")", "products", "[", "name", "]", "=", "eups_data", ".", "copy", "(", ")", "products", "[", "name", "]", ".", "update", "(", "manifest_data", ")", "if", "problems", ":", "error", "(", "\"{n} product(s) have error(s)\"", ".", "format", "(", "n", "=", "len", "(", "problems", ")", ")", ")", "return", "products", ",", "problems" ]
Cross reference EupsTag and Manifest data and return a merged result Parameters ---------- eups_products: manifest: fail_fast: bool ignore_manifest_versions: bool Returns ------- products: dict Raises ------ RuntimeError Upon error if `fail_fast` is `True`.
[ "Cross", "reference", "EupsTag", "and", "Manifest", "data", "and", "return", "a", "merged", "result" ]
98122404cd9065d4d1d570867fe518042669126c
https://github.com/lsst-sqre/sqre-codekit/blob/98122404cd9065d4d1d570867fe518042669126c/codekit/cli/github_tag_release.py#L239-L311
train
lsst-sqre/sqre-codekit
codekit/cli/github_tag_release.py
check_existing_git_tag
def check_existing_git_tag(repo, t_tag, **kwargs): """ Check for a pre-existng tag in the github repo. Parameters ---------- repo : github.Repository.Repository repo to inspect for an existing tagsdf t_tag: codekit.pygithub.TargetTag dict repesenting a target git tag Returns ------- insync : `bool` True if tag exists and is in sync. False if tag does not exist. Raises ------ GitTagExistsError If tag exists but is not in sync. """ assert isinstance(repo, github.Repository.Repository), type(repo) assert isinstance(t_tag, codekit.pygithub.TargetTag), type(t_tag) debug("looking for existing tag: {tag} in repo: {repo}".format( repo=repo.full_name, tag=t_tag.name, )) # find ref/tag by name e_ref = pygithub.find_tag_by_name(repo, t_tag.name) if not e_ref: debug(" not found: {tag}".format(tag=t_tag.name)) return False # find tag object pointed to by the ref try: e_tag = repo.get_git_tag(e_ref.object.sha) except github.RateLimitExceededException: raise except github.GithubException as e: msg = "error getting tag: {tag} [{sha}]".format( tag=e_tag.tag, sha=e_tag.sha, ) raise pygithub.CaughtRepositoryError(repo, e, msg) from None debug(" found existing: {tag} [{sha}]".format( tag=e_tag.tag, sha=e_tag.sha, )) if cmp_existing_git_tag(t_tag, e_tag, **kwargs): return True yikes = GitTagExistsError(textwrap.dedent("""\ tag: {tag} already exists in repo: {repo} with conflicting values: existing: sha: {e_sha} message: {e_message} tagger: {e_tagger} target: sha: {t_sha} message: {t_message} tagger: {t_tagger}\ """).format( tag=t_tag.name, repo=repo.full_name, e_sha=e_tag.object.sha, e_message=e_tag.message, e_tagger=e_tag.tagger, t_sha=t_tag.sha, t_message=t_tag.message, t_tagger=t_tag.tagger, )) raise yikes
python
def check_existing_git_tag(repo, t_tag, **kwargs): """ Check for a pre-existng tag in the github repo. Parameters ---------- repo : github.Repository.Repository repo to inspect for an existing tagsdf t_tag: codekit.pygithub.TargetTag dict repesenting a target git tag Returns ------- insync : `bool` True if tag exists and is in sync. False if tag does not exist. Raises ------ GitTagExistsError If tag exists but is not in sync. """ assert isinstance(repo, github.Repository.Repository), type(repo) assert isinstance(t_tag, codekit.pygithub.TargetTag), type(t_tag) debug("looking for existing tag: {tag} in repo: {repo}".format( repo=repo.full_name, tag=t_tag.name, )) # find ref/tag by name e_ref = pygithub.find_tag_by_name(repo, t_tag.name) if not e_ref: debug(" not found: {tag}".format(tag=t_tag.name)) return False # find tag object pointed to by the ref try: e_tag = repo.get_git_tag(e_ref.object.sha) except github.RateLimitExceededException: raise except github.GithubException as e: msg = "error getting tag: {tag} [{sha}]".format( tag=e_tag.tag, sha=e_tag.sha, ) raise pygithub.CaughtRepositoryError(repo, e, msg) from None debug(" found existing: {tag} [{sha}]".format( tag=e_tag.tag, sha=e_tag.sha, )) if cmp_existing_git_tag(t_tag, e_tag, **kwargs): return True yikes = GitTagExistsError(textwrap.dedent("""\ tag: {tag} already exists in repo: {repo} with conflicting values: existing: sha: {e_sha} message: {e_message} tagger: {e_tagger} target: sha: {t_sha} message: {t_message} tagger: {t_tagger}\ """).format( tag=t_tag.name, repo=repo.full_name, e_sha=e_tag.object.sha, e_message=e_tag.message, e_tagger=e_tag.tagger, t_sha=t_tag.sha, t_message=t_tag.message, t_tagger=t_tag.tagger, )) raise yikes
[ "def", "check_existing_git_tag", "(", "repo", ",", "t_tag", ",", "*", "*", "kwargs", ")", ":", "assert", "isinstance", "(", "repo", ",", "github", ".", "Repository", ".", "Repository", ")", ",", "type", "(", "repo", ")", "assert", "isinstance", "(", "t_tag", ",", "codekit", ".", "pygithub", ".", "TargetTag", ")", ",", "type", "(", "t_tag", ")", "debug", "(", "\"looking for existing tag: {tag} in repo: {repo}\"", ".", "format", "(", "repo", "=", "repo", ".", "full_name", ",", "tag", "=", "t_tag", ".", "name", ",", ")", ")", "# find ref/tag by name", "e_ref", "=", "pygithub", ".", "find_tag_by_name", "(", "repo", ",", "t_tag", ".", "name", ")", "if", "not", "e_ref", ":", "debug", "(", "\" not found: {tag}\"", ".", "format", "(", "tag", "=", "t_tag", ".", "name", ")", ")", "return", "False", "# find tag object pointed to by the ref", "try", ":", "e_tag", "=", "repo", ".", "get_git_tag", "(", "e_ref", ".", "object", ".", "sha", ")", "except", "github", ".", "RateLimitExceededException", ":", "raise", "except", "github", ".", "GithubException", "as", "e", ":", "msg", "=", "\"error getting tag: {tag} [{sha}]\"", ".", "format", "(", "tag", "=", "e_tag", ".", "tag", ",", "sha", "=", "e_tag", ".", "sha", ",", ")", "raise", "pygithub", ".", "CaughtRepositoryError", "(", "repo", ",", "e", ",", "msg", ")", "from", "None", "debug", "(", "\" found existing: {tag} [{sha}]\"", ".", "format", "(", "tag", "=", "e_tag", ".", "tag", ",", "sha", "=", "e_tag", ".", "sha", ",", ")", ")", "if", "cmp_existing_git_tag", "(", "t_tag", ",", "e_tag", ",", "*", "*", "kwargs", ")", ":", "return", "True", "yikes", "=", "GitTagExistsError", "(", "textwrap", ".", "dedent", "(", "\"\"\"\\\n tag: {tag} already exists in repo: {repo}\n with conflicting values:\n existing:\n sha: {e_sha}\n message: {e_message}\n tagger: {e_tagger}\n target:\n sha: {t_sha}\n message: {t_message}\n tagger: {t_tagger}\\\n \"\"\"", ")", ".", "format", "(", "tag", "=", "t_tag", ".", "name", ",", "repo", "=", "repo", ".", "full_name", ",", "e_sha", "=", "e_tag", ".", "object", ".", "sha", ",", "e_message", "=", "e_tag", ".", "message", ",", "e_tagger", "=", "e_tag", ".", "tagger", ",", "t_sha", "=", "t_tag", ".", "sha", ",", "t_message", "=", "t_tag", ".", "message", ",", "t_tagger", "=", "t_tag", ".", "tagger", ",", ")", ")", "raise", "yikes" ]
Check for a pre-existng tag in the github repo. Parameters ---------- repo : github.Repository.Repository repo to inspect for an existing tagsdf t_tag: codekit.pygithub.TargetTag dict repesenting a target git tag Returns ------- insync : `bool` True if tag exists and is in sync. False if tag does not exist. Raises ------ GitTagExistsError If tag exists but is not in sync.
[ "Check", "for", "a", "pre", "-", "existng", "tag", "in", "the", "github", "repo", "." ]
98122404cd9065d4d1d570867fe518042669126c
https://github.com/lsst-sqre/sqre-codekit/blob/98122404cd9065d4d1d570867fe518042669126c/codekit/cli/github_tag_release.py#L445-L523
train
lsst-sqre/sqre-codekit
codekit/cli/github_tag_release.py
run
def run(): """Create the tag""" args = parse_args() codetools.setup_logging(args.debug) git_tag = args.tag # if email not specified, try getting it from the gitconfig git_email = codetools.lookup_email(args) # ditto for the name of the git user git_user = codetools.lookup_user(args) # The default eups tag is derived from the git tag, otherwise specified # with the --eups-tag option. The reason to currently do this is that for # weeklies and other internal builds, it's okay to eups publish the weekly # and git tag post-facto. However for official releases, we don't want to # publish until the git tag goes down, because we want to eups publish the # build that has the official versions in the eups ref. if not args.manifest_only: eups_tag = args.eups_tag if not eups_tag: # generate eups-style version eups_tag = eups.git_tag2eups_tag(git_tag) debug("using eups tag: {eups_tag}".format(eups_tag=eups_tag)) # sadly we need to "just" know this # XXX this can be parsed from the eups tag file post d_2018_05_08 manifest = args.manifest debug("using manifest: {manifest}".format(manifest=manifest)) if not args.manifest_only: # release from eups tag message_template = "Version {{git_tag}}"\ " release from {eups_tag}/{manifest}".format( eups_tag=eups_tag, manifest=manifest, ) else: # release from manifest only message_template = "Version {{git_tag}}"\ " release from manifest {manifest}".format( manifest=manifest, ) debug("using tag message: {msg}".format(msg=message_template)) tagger = github.InputGitAuthor( git_user, git_email, codetools.current_timestamp(), ) debug("using taggger: {tagger}".format(tagger=tagger)) global g g = pygithub.login_github(token_path=args.token_path, token=args.token) org = g.get_organization(args.org) info("tagging repos in org: {org}".format(org=org.login)) problems = [] manifest_products = versiondb.Manifest( manifest, base_url=args.versiondb_base_url).products if not args.manifest_only: # cross-reference eups tag version strings with manifest eups_products = eups.EupsTag( eups_tag, base_url=args.eupstag_base_url).products # do not fail-fast on non-write operations products, err = cross_reference_products( eups_products, manifest_products, ignore_manifest_versions=args.ignore_manifest_versions, fail_fast=False, ) problems += err else: # no eups tag; use manifest products without sanity check against eups # tag version strings products = manifest_products if args.limit: products = dict(itertools.islice(products.items(), args.limit)) # do not fail-fast on non-write operations products, err = get_repo_for_products( org=org, products=products, allow_teams=args.allow_team, ext_teams=args.external_team, deny_teams=args.deny_team, fail_fast=False, ) problems += err # do not fail-fast on non-write operations products_to_tag, err = check_product_tags( products, git_tag, tag_message_template=message_template, tagger=tagger, force_tag=args.force_tag, fail_fast=False, ignore_git_message=args.ignore_git_message, ignore_git_tagger=args.ignore_git_tagger, ) problems += err if args.verify: # in verify mode, it is an error if there are products that need to be # tagged. err = identify_products_missing_tags(products_to_tag) problems += err if problems: msg = "{n} pre-flight error(s)".format(n=len(problems)) raise codetools.DogpileError(problems, msg) tag_products( products_to_tag, fail_fast=args.fail_fast, dry_run=args.dry_run, )
python
def run(): """Create the tag""" args = parse_args() codetools.setup_logging(args.debug) git_tag = args.tag # if email not specified, try getting it from the gitconfig git_email = codetools.lookup_email(args) # ditto for the name of the git user git_user = codetools.lookup_user(args) # The default eups tag is derived from the git tag, otherwise specified # with the --eups-tag option. The reason to currently do this is that for # weeklies and other internal builds, it's okay to eups publish the weekly # and git tag post-facto. However for official releases, we don't want to # publish until the git tag goes down, because we want to eups publish the # build that has the official versions in the eups ref. if not args.manifest_only: eups_tag = args.eups_tag if not eups_tag: # generate eups-style version eups_tag = eups.git_tag2eups_tag(git_tag) debug("using eups tag: {eups_tag}".format(eups_tag=eups_tag)) # sadly we need to "just" know this # XXX this can be parsed from the eups tag file post d_2018_05_08 manifest = args.manifest debug("using manifest: {manifest}".format(manifest=manifest)) if not args.manifest_only: # release from eups tag message_template = "Version {{git_tag}}"\ " release from {eups_tag}/{manifest}".format( eups_tag=eups_tag, manifest=manifest, ) else: # release from manifest only message_template = "Version {{git_tag}}"\ " release from manifest {manifest}".format( manifest=manifest, ) debug("using tag message: {msg}".format(msg=message_template)) tagger = github.InputGitAuthor( git_user, git_email, codetools.current_timestamp(), ) debug("using taggger: {tagger}".format(tagger=tagger)) global g g = pygithub.login_github(token_path=args.token_path, token=args.token) org = g.get_organization(args.org) info("tagging repos in org: {org}".format(org=org.login)) problems = [] manifest_products = versiondb.Manifest( manifest, base_url=args.versiondb_base_url).products if not args.manifest_only: # cross-reference eups tag version strings with manifest eups_products = eups.EupsTag( eups_tag, base_url=args.eupstag_base_url).products # do not fail-fast on non-write operations products, err = cross_reference_products( eups_products, manifest_products, ignore_manifest_versions=args.ignore_manifest_versions, fail_fast=False, ) problems += err else: # no eups tag; use manifest products without sanity check against eups # tag version strings products = manifest_products if args.limit: products = dict(itertools.islice(products.items(), args.limit)) # do not fail-fast on non-write operations products, err = get_repo_for_products( org=org, products=products, allow_teams=args.allow_team, ext_teams=args.external_team, deny_teams=args.deny_team, fail_fast=False, ) problems += err # do not fail-fast on non-write operations products_to_tag, err = check_product_tags( products, git_tag, tag_message_template=message_template, tagger=tagger, force_tag=args.force_tag, fail_fast=False, ignore_git_message=args.ignore_git_message, ignore_git_tagger=args.ignore_git_tagger, ) problems += err if args.verify: # in verify mode, it is an error if there are products that need to be # tagged. err = identify_products_missing_tags(products_to_tag) problems += err if problems: msg = "{n} pre-flight error(s)".format(n=len(problems)) raise codetools.DogpileError(problems, msg) tag_products( products_to_tag, fail_fast=args.fail_fast, dry_run=args.dry_run, )
[ "def", "run", "(", ")", ":", "args", "=", "parse_args", "(", ")", "codetools", ".", "setup_logging", "(", "args", ".", "debug", ")", "git_tag", "=", "args", ".", "tag", "# if email not specified, try getting it from the gitconfig", "git_email", "=", "codetools", ".", "lookup_email", "(", "args", ")", "# ditto for the name of the git user", "git_user", "=", "codetools", ".", "lookup_user", "(", "args", ")", "# The default eups tag is derived from the git tag, otherwise specified", "# with the --eups-tag option. The reason to currently do this is that for", "# weeklies and other internal builds, it's okay to eups publish the weekly", "# and git tag post-facto. However for official releases, we don't want to", "# publish until the git tag goes down, because we want to eups publish the", "# build that has the official versions in the eups ref.", "if", "not", "args", ".", "manifest_only", ":", "eups_tag", "=", "args", ".", "eups_tag", "if", "not", "eups_tag", ":", "# generate eups-style version", "eups_tag", "=", "eups", ".", "git_tag2eups_tag", "(", "git_tag", ")", "debug", "(", "\"using eups tag: {eups_tag}\"", ".", "format", "(", "eups_tag", "=", "eups_tag", ")", ")", "# sadly we need to \"just\" know this", "# XXX this can be parsed from the eups tag file post d_2018_05_08", "manifest", "=", "args", ".", "manifest", "debug", "(", "\"using manifest: {manifest}\"", ".", "format", "(", "manifest", "=", "manifest", ")", ")", "if", "not", "args", ".", "manifest_only", ":", "# release from eups tag", "message_template", "=", "\"Version {{git_tag}}\"", "\" release from {eups_tag}/{manifest}\"", ".", "format", "(", "eups_tag", "=", "eups_tag", ",", "manifest", "=", "manifest", ",", ")", "else", ":", "# release from manifest only", "message_template", "=", "\"Version {{git_tag}}\"", "\" release from manifest {manifest}\"", ".", "format", "(", "manifest", "=", "manifest", ",", ")", "debug", "(", "\"using tag message: {msg}\"", ".", "format", "(", "msg", "=", "message_template", ")", ")", "tagger", "=", "github", ".", "InputGitAuthor", "(", "git_user", ",", "git_email", ",", "codetools", ".", "current_timestamp", "(", ")", ",", ")", "debug", "(", "\"using taggger: {tagger}\"", ".", "format", "(", "tagger", "=", "tagger", ")", ")", "global", "g", "g", "=", "pygithub", ".", "login_github", "(", "token_path", "=", "args", ".", "token_path", ",", "token", "=", "args", ".", "token", ")", "org", "=", "g", ".", "get_organization", "(", "args", ".", "org", ")", "info", "(", "\"tagging repos in org: {org}\"", ".", "format", "(", "org", "=", "org", ".", "login", ")", ")", "problems", "=", "[", "]", "manifest_products", "=", "versiondb", ".", "Manifest", "(", "manifest", ",", "base_url", "=", "args", ".", "versiondb_base_url", ")", ".", "products", "if", "not", "args", ".", "manifest_only", ":", "# cross-reference eups tag version strings with manifest", "eups_products", "=", "eups", ".", "EupsTag", "(", "eups_tag", ",", "base_url", "=", "args", ".", "eupstag_base_url", ")", ".", "products", "# do not fail-fast on non-write operations", "products", ",", "err", "=", "cross_reference_products", "(", "eups_products", ",", "manifest_products", ",", "ignore_manifest_versions", "=", "args", ".", "ignore_manifest_versions", ",", "fail_fast", "=", "False", ",", ")", "problems", "+=", "err", "else", ":", "# no eups tag; use manifest products without sanity check against eups", "# tag version strings", "products", "=", "manifest_products", "if", "args", ".", "limit", ":", "products", "=", "dict", "(", "itertools", ".", "islice", "(", "products", ".", "items", "(", ")", ",", "args", ".", "limit", ")", ")", "# do not fail-fast on non-write operations", "products", ",", "err", "=", "get_repo_for_products", "(", "org", "=", "org", ",", "products", "=", "products", ",", "allow_teams", "=", "args", ".", "allow_team", ",", "ext_teams", "=", "args", ".", "external_team", ",", "deny_teams", "=", "args", ".", "deny_team", ",", "fail_fast", "=", "False", ",", ")", "problems", "+=", "err", "# do not fail-fast on non-write operations", "products_to_tag", ",", "err", "=", "check_product_tags", "(", "products", ",", "git_tag", ",", "tag_message_template", "=", "message_template", ",", "tagger", "=", "tagger", ",", "force_tag", "=", "args", ".", "force_tag", ",", "fail_fast", "=", "False", ",", "ignore_git_message", "=", "args", ".", "ignore_git_message", ",", "ignore_git_tagger", "=", "args", ".", "ignore_git_tagger", ",", ")", "problems", "+=", "err", "if", "args", ".", "verify", ":", "# in verify mode, it is an error if there are products that need to be", "# tagged.", "err", "=", "identify_products_missing_tags", "(", "products_to_tag", ")", "problems", "+=", "err", "if", "problems", ":", "msg", "=", "\"{n} pre-flight error(s)\"", ".", "format", "(", "n", "=", "len", "(", "problems", ")", ")", "raise", "codetools", ".", "DogpileError", "(", "problems", ",", "msg", ")", "tag_products", "(", "products_to_tag", ",", "fail_fast", "=", "args", ".", "fail_fast", ",", "dry_run", "=", "args", ".", "dry_run", ",", ")" ]
Create the tag
[ "Create", "the", "tag" ]
98122404cd9065d4d1d570867fe518042669126c
https://github.com/lsst-sqre/sqre-codekit/blob/98122404cd9065d4d1d570867fe518042669126c/codekit/cli/github_tag_release.py#L722-L847
train
sirfoga/pyhal
hal/times/cron.py
AppCronLock.can_proceed
def can_proceed(self): """Checks whether app can proceed :return: True iff app is not locked and times since last update < app update interval """ now = datetime.datetime.now() delta = datetime.timedelta(days=self.update_interval) return now >= self.last_update + delta
python
def can_proceed(self): """Checks whether app can proceed :return: True iff app is not locked and times since last update < app update interval """ now = datetime.datetime.now() delta = datetime.timedelta(days=self.update_interval) return now >= self.last_update + delta
[ "def", "can_proceed", "(", "self", ")", ":", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "delta", "=", "datetime", ".", "timedelta", "(", "days", "=", "self", ".", "update_interval", ")", "return", "now", ">=", "self", ".", "last_update", "+", "delta" ]
Checks whether app can proceed :return: True iff app is not locked and times since last update < app update interval
[ "Checks", "whether", "app", "can", "proceed" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/times/cron.py#L31-L39
train
sirfoga/pyhal
hal/times/cron.py
AppCronLock.parse_lock
def parse_lock(self): """Parses app lock file :return: Details about last update """ try: with open(self.lock_file, "r") as reader: data = json.loads(reader.read()) self.last_update = datetime.datetime.strptime( data["last_update"], AppCronLock.DATETIME_FORMAT ) except: # malformed lock file self.write_lock(last_update=datetime.datetime.fromtimestamp(0)) self.parse_lock()
python
def parse_lock(self): """Parses app lock file :return: Details about last update """ try: with open(self.lock_file, "r") as reader: data = json.loads(reader.read()) self.last_update = datetime.datetime.strptime( data["last_update"], AppCronLock.DATETIME_FORMAT ) except: # malformed lock file self.write_lock(last_update=datetime.datetime.fromtimestamp(0)) self.parse_lock()
[ "def", "parse_lock", "(", "self", ")", ":", "try", ":", "with", "open", "(", "self", ".", "lock_file", ",", "\"r\"", ")", "as", "reader", ":", "data", "=", "json", ".", "loads", "(", "reader", ".", "read", "(", ")", ")", "self", ".", "last_update", "=", "datetime", ".", "datetime", ".", "strptime", "(", "data", "[", "\"last_update\"", "]", ",", "AppCronLock", ".", "DATETIME_FORMAT", ")", "except", ":", "# malformed lock file", "self", ".", "write_lock", "(", "last_update", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "0", ")", ")", "self", ".", "parse_lock", "(", ")" ]
Parses app lock file :return: Details about last update
[ "Parses", "app", "lock", "file" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/times/cron.py#L41-L55
train
sirfoga/pyhal
hal/times/cron.py
AppCronLock.write_lock
def write_lock(self, last_update=datetime.datetime.now()): """Writes lock file :param last_update: last update of app """ data = { "last_update": last_update.strftime(AppCronLock.DATETIME_FORMAT) } with open(self.lock_file, "w") as writer: json.dump(data, writer)
python
def write_lock(self, last_update=datetime.datetime.now()): """Writes lock file :param last_update: last update of app """ data = { "last_update": last_update.strftime(AppCronLock.DATETIME_FORMAT) } with open(self.lock_file, "w") as writer: json.dump(data, writer)
[ "def", "write_lock", "(", "self", ",", "last_update", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ")", ":", "data", "=", "{", "\"last_update\"", ":", "last_update", ".", "strftime", "(", "AppCronLock", ".", "DATETIME_FORMAT", ")", "}", "with", "open", "(", "self", ".", "lock_file", ",", "\"w\"", ")", "as", "writer", ":", "json", ".", "dump", "(", "data", ",", "writer", ")" ]
Writes lock file :param last_update: last update of app
[ "Writes", "lock", "file" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/times/cron.py#L57-L67
train
MersenneForum/MersenneForumAliquot
scripts/drivers.py
filter_seq
def filter_seq(seq): '''Examines unreserved sequences to see if they are prone to mutation. This currently ignores solely-power-of-2 guides with b > 3''' if seq.res: return None n = nt.Factors(seq.factors) guide, s, t = aq.canonical_form(n) seq.guide = guide # The target_tau for the composite is at most the class minus extant prime factor count cls = aq.get_class(guide=guide) num_larges = seq.factors.count('P') upper_bound_tau = cls - num_larges - len(t) if cls < 2 or upper_bound_tau < 2: # Cheap tests to eliminate almost all sequences return None # Next we ignore sequences whose guide is solely a power of 2 greater than 3 v = nt.Factors({p: a for p, a in guide.items() if p != 2 and a > 0}) if int(v) == 1 and cls > 3: return None # This condition greatly reduces fdb load, but excludes a lot of sequences if not aq.is_driver(guide=guide): return None return n, guide
python
def filter_seq(seq): '''Examines unreserved sequences to see if they are prone to mutation. This currently ignores solely-power-of-2 guides with b > 3''' if seq.res: return None n = nt.Factors(seq.factors) guide, s, t = aq.canonical_form(n) seq.guide = guide # The target_tau for the composite is at most the class minus extant prime factor count cls = aq.get_class(guide=guide) num_larges = seq.factors.count('P') upper_bound_tau = cls - num_larges - len(t) if cls < 2 or upper_bound_tau < 2: # Cheap tests to eliminate almost all sequences return None # Next we ignore sequences whose guide is solely a power of 2 greater than 3 v = nt.Factors({p: a for p, a in guide.items() if p != 2 and a > 0}) if int(v) == 1 and cls > 3: return None # This condition greatly reduces fdb load, but excludes a lot of sequences if not aq.is_driver(guide=guide): return None return n, guide
[ "def", "filter_seq", "(", "seq", ")", ":", "if", "seq", ".", "res", ":", "return", "None", "n", "=", "nt", ".", "Factors", "(", "seq", ".", "factors", ")", "guide", ",", "s", ",", "t", "=", "aq", ".", "canonical_form", "(", "n", ")", "seq", ".", "guide", "=", "guide", "# The target_tau for the composite is at most the class minus extant prime factor count", "cls", "=", "aq", ".", "get_class", "(", "guide", "=", "guide", ")", "num_larges", "=", "seq", ".", "factors", ".", "count", "(", "'P'", ")", "upper_bound_tau", "=", "cls", "-", "num_larges", "-", "len", "(", "t", ")", "if", "cls", "<", "2", "or", "upper_bound_tau", "<", "2", ":", "# Cheap tests to eliminate almost all sequences", "return", "None", "# Next we ignore sequences whose guide is solely a power of 2 greater than 3", "v", "=", "nt", ".", "Factors", "(", "{", "p", ":", "a", "for", "p", ",", "a", "in", "guide", ".", "items", "(", ")", "if", "p", "!=", "2", "and", "a", ">", "0", "}", ")", "if", "int", "(", "v", ")", "==", "1", "and", "cls", ">", "3", ":", "return", "None", "# This condition greatly reduces fdb load, but excludes a lot of sequences", "if", "not", "aq", ".", "is_driver", "(", "guide", "=", "guide", ")", ":", "return", "None", "return", "n", ",", "guide" ]
Examines unreserved sequences to see if they are prone to mutation. This currently ignores solely-power-of-2 guides with b > 3
[ "Examines", "unreserved", "sequences", "to", "see", "if", "they", "are", "prone", "to", "mutation", ".", "This", "currently", "ignores", "solely", "-", "power", "-", "of", "-", "2", "guides", "with", "b", ">", "3" ]
d2605f1b3bb7c25980d11698d915ffc1c525acda
https://github.com/MersenneForum/MersenneForumAliquot/blob/d2605f1b3bb7c25980d11698d915ffc1c525acda/scripts/drivers.py#L110-L134
train
farshidce/touchworks-python
touchworks/api/http.py
TouchWorks.get_token
def get_token(self, appname, username, password): """ get the security token by connecting to TouchWorks API """ ext_exception = TouchWorksException( TouchWorksErrorMessages.GET_TOKEN_FAILED_ERROR) data = {'Username': username, 'Password': password} resp = self._http_request(TouchWorksEndPoints.GET_TOKEN, data) try: logger.debug('token : %s' % resp) if not resp.text: raise ext_exception try: uuid.UUID(resp.text, version=4) return SecurityToken(resp.text) except ValueError: logger.error('response was not valid uuid string. %s' % resp.text) raise ext_exception except Exception as ex: logger.exception(ex) raise ext_exception
python
def get_token(self, appname, username, password): """ get the security token by connecting to TouchWorks API """ ext_exception = TouchWorksException( TouchWorksErrorMessages.GET_TOKEN_FAILED_ERROR) data = {'Username': username, 'Password': password} resp = self._http_request(TouchWorksEndPoints.GET_TOKEN, data) try: logger.debug('token : %s' % resp) if not resp.text: raise ext_exception try: uuid.UUID(resp.text, version=4) return SecurityToken(resp.text) except ValueError: logger.error('response was not valid uuid string. %s' % resp.text) raise ext_exception except Exception as ex: logger.exception(ex) raise ext_exception
[ "def", "get_token", "(", "self", ",", "appname", ",", "username", ",", "password", ")", ":", "ext_exception", "=", "TouchWorksException", "(", "TouchWorksErrorMessages", ".", "GET_TOKEN_FAILED_ERROR", ")", "data", "=", "{", "'Username'", ":", "username", ",", "'Password'", ":", "password", "}", "resp", "=", "self", ".", "_http_request", "(", "TouchWorksEndPoints", ".", "GET_TOKEN", ",", "data", ")", "try", ":", "logger", ".", "debug", "(", "'token : %s'", "%", "resp", ")", "if", "not", "resp", ".", "text", ":", "raise", "ext_exception", "try", ":", "uuid", ".", "UUID", "(", "resp", ".", "text", ",", "version", "=", "4", ")", "return", "SecurityToken", "(", "resp", ".", "text", ")", "except", "ValueError", ":", "logger", ".", "error", "(", "'response was not valid uuid string. %s'", "%", "resp", ".", "text", ")", "raise", "ext_exception", "except", "Exception", "as", "ex", ":", "logger", ".", "exception", "(", "ex", ")", "raise", "ext_exception" ]
get the security token by connecting to TouchWorks API
[ "get", "the", "security", "token", "by", "connecting", "to", "TouchWorks", "API" ]
ea8f93a0f4273de1317a318e945a571f5038ba62
https://github.com/farshidce/touchworks-python/blob/ea8f93a0f4273de1317a318e945a571f5038ba62/touchworks/api/http.py#L139-L161
train
farshidce/touchworks-python
touchworks/api/http.py
TouchWorks._http_request
def _http_request(self, api, data, headers=None): """ internal method for handling request and response and raising an exception is http return status code is not success :rtype : response object from requests.post() """ if not headers: headers = {'Content-Type': 'application/json'} if not self._token_valid: self._token = self.get_token(self._app_name, self._username, self._password) response = requests.post(self._base_url + '/' + api, data=json.dumps(data), headers=headers) # raise an exception if the status was not 200 logger.debug(json.dumps(data)) logger.debug(response.text) response.raise_for_status() return response
python
def _http_request(self, api, data, headers=None): """ internal method for handling request and response and raising an exception is http return status code is not success :rtype : response object from requests.post() """ if not headers: headers = {'Content-Type': 'application/json'} if not self._token_valid: self._token = self.get_token(self._app_name, self._username, self._password) response = requests.post(self._base_url + '/' + api, data=json.dumps(data), headers=headers) # raise an exception if the status was not 200 logger.debug(json.dumps(data)) logger.debug(response.text) response.raise_for_status() return response
[ "def", "_http_request", "(", "self", ",", "api", ",", "data", ",", "headers", "=", "None", ")", ":", "if", "not", "headers", ":", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", "}", "if", "not", "self", ".", "_token_valid", ":", "self", ".", "_token", "=", "self", ".", "get_token", "(", "self", ".", "_app_name", ",", "self", ".", "_username", ",", "self", ".", "_password", ")", "response", "=", "requests", ".", "post", "(", "self", ".", "_base_url", "+", "'/'", "+", "api", ",", "data", "=", "json", ".", "dumps", "(", "data", ")", ",", "headers", "=", "headers", ")", "# raise an exception if the status was not 200", "logger", ".", "debug", "(", "json", ".", "dumps", "(", "data", ")", ")", "logger", ".", "debug", "(", "response", ".", "text", ")", "response", ".", "raise_for_status", "(", ")", "return", "response" ]
internal method for handling request and response and raising an exception is http return status code is not success :rtype : response object from requests.post()
[ "internal", "method", "for", "handling", "request", "and", "response", "and", "raising", "an", "exception", "is", "http", "return", "status", "code", "is", "not", "success" ]
ea8f93a0f4273de1317a318e945a571f5038ba62
https://github.com/farshidce/touchworks-python/blob/ea8f93a0f4273de1317a318e945a571f5038ba62/touchworks/api/http.py#L178-L195
train
NoviceLive/intellicoder
intellicoder/database.py
Database.query_item
def query_item(self, key, abis): """Query items based on system call number or name.""" try: key = int(key) field = 'number' except ValueError: try: key = int(key, 16) field = 'number' except ValueError: field = 'name' arg = and_(getattr(Item, field) == key, or_(Item.abi == abi for abi in abis)) return self.session.query(Item).filter(arg).all()
python
def query_item(self, key, abis): """Query items based on system call number or name.""" try: key = int(key) field = 'number' except ValueError: try: key = int(key, 16) field = 'number' except ValueError: field = 'name' arg = and_(getattr(Item, field) == key, or_(Item.abi == abi for abi in abis)) return self.session.query(Item).filter(arg).all()
[ "def", "query_item", "(", "self", ",", "key", ",", "abis", ")", ":", "try", ":", "key", "=", "int", "(", "key", ")", "field", "=", "'number'", "except", "ValueError", ":", "try", ":", "key", "=", "int", "(", "key", ",", "16", ")", "field", "=", "'number'", "except", "ValueError", ":", "field", "=", "'name'", "arg", "=", "and_", "(", "getattr", "(", "Item", ",", "field", ")", "==", "key", ",", "or_", "(", "Item", ".", "abi", "==", "abi", "for", "abi", "in", "abis", ")", ")", "return", "self", ".", "session", ".", "query", "(", "Item", ")", ".", "filter", "(", "arg", ")", ".", "all", "(", ")" ]
Query items based on system call number or name.
[ "Query", "items", "based", "on", "system", "call", "number", "or", "name", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/database.py#L49-L62
train
NoviceLive/intellicoder
intellicoder/database.py
Database.query_decl
def query_decl(self, **kwargs): """Query declarations.""" return self.session.query(Decl).filter_by(**kwargs).all()
python
def query_decl(self, **kwargs): """Query declarations.""" return self.session.query(Decl).filter_by(**kwargs).all()
[ "def", "query_decl", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "session", ".", "query", "(", "Decl", ")", ".", "filter_by", "(", "*", "*", "kwargs", ")", ".", "all", "(", ")" ]
Query declarations.
[ "Query", "declarations", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/database.py#L64-L66
train
NoviceLive/intellicoder
intellicoder/database.py
Database.add_data
def add_data(self, filenames): """Add data.""" def _parse_table(table): def _parse_line(line): return line.split('\t') lines = (_parse_line(one) for one in table.splitlines() if re.match(r'^\d', one)) return (remove_false(one) for one in lines) def _parse_decl(decl): index = len('SYSCALL_DEFINE') argc = decl[index] rest = decl[index + 1:][1:-1].split(',') name = rest[0] # args = [one.strip() for one in rest[1:]] args = ','.join(rest[1:]) return name, argc, args def _parse_line(line): index = line.find(':') if index == -1: raise RuntimeError('This is unexpected: %s', line) filename = line[:index] decl = line[index + 1:] return filename, _parse_decl(decl) def _split_into_lines(grep_output): lines = grep_output.replace('\n\n', '\n') lines = lines.replace('\n\t', '').replace('\t', ' ') return lines.strip().splitlines() for one in filenames: if one.name.endswith('.tbl'): for item in _parse_table(one.read()): args = list(item) if len(args) != 5: args += [''] * (5 - len(args)) self.session.add( Item(name=args[2], abi=args[1], number=args[0], entry=args[3], compat=args[4])) else: for line in _split_into_lines(one.read()): filename, rest = (_parse_line(line)) self.session.add( Decl(name=rest[0], filename=filename, argc=rest[1], args=rest[2])) self.session.commit()
python
def add_data(self, filenames): """Add data.""" def _parse_table(table): def _parse_line(line): return line.split('\t') lines = (_parse_line(one) for one in table.splitlines() if re.match(r'^\d', one)) return (remove_false(one) for one in lines) def _parse_decl(decl): index = len('SYSCALL_DEFINE') argc = decl[index] rest = decl[index + 1:][1:-1].split(',') name = rest[0] # args = [one.strip() for one in rest[1:]] args = ','.join(rest[1:]) return name, argc, args def _parse_line(line): index = line.find(':') if index == -1: raise RuntimeError('This is unexpected: %s', line) filename = line[:index] decl = line[index + 1:] return filename, _parse_decl(decl) def _split_into_lines(grep_output): lines = grep_output.replace('\n\n', '\n') lines = lines.replace('\n\t', '').replace('\t', ' ') return lines.strip().splitlines() for one in filenames: if one.name.endswith('.tbl'): for item in _parse_table(one.read()): args = list(item) if len(args) != 5: args += [''] * (5 - len(args)) self.session.add( Item(name=args[2], abi=args[1], number=args[0], entry=args[3], compat=args[4])) else: for line in _split_into_lines(one.read()): filename, rest = (_parse_line(line)) self.session.add( Decl(name=rest[0], filename=filename, argc=rest[1], args=rest[2])) self.session.commit()
[ "def", "add_data", "(", "self", ",", "filenames", ")", ":", "def", "_parse_table", "(", "table", ")", ":", "def", "_parse_line", "(", "line", ")", ":", "return", "line", ".", "split", "(", "'\\t'", ")", "lines", "=", "(", "_parse_line", "(", "one", ")", "for", "one", "in", "table", ".", "splitlines", "(", ")", "if", "re", ".", "match", "(", "r'^\\d'", ",", "one", ")", ")", "return", "(", "remove_false", "(", "one", ")", "for", "one", "in", "lines", ")", "def", "_parse_decl", "(", "decl", ")", ":", "index", "=", "len", "(", "'SYSCALL_DEFINE'", ")", "argc", "=", "decl", "[", "index", "]", "rest", "=", "decl", "[", "index", "+", "1", ":", "]", "[", "1", ":", "-", "1", "]", ".", "split", "(", "','", ")", "name", "=", "rest", "[", "0", "]", "# args = [one.strip() for one in rest[1:]]", "args", "=", "','", ".", "join", "(", "rest", "[", "1", ":", "]", ")", "return", "name", ",", "argc", ",", "args", "def", "_parse_line", "(", "line", ")", ":", "index", "=", "line", ".", "find", "(", "':'", ")", "if", "index", "==", "-", "1", ":", "raise", "RuntimeError", "(", "'This is unexpected: %s'", ",", "line", ")", "filename", "=", "line", "[", ":", "index", "]", "decl", "=", "line", "[", "index", "+", "1", ":", "]", "return", "filename", ",", "_parse_decl", "(", "decl", ")", "def", "_split_into_lines", "(", "grep_output", ")", ":", "lines", "=", "grep_output", ".", "replace", "(", "'\\n\\n'", ",", "'\\n'", ")", "lines", "=", "lines", ".", "replace", "(", "'\\n\\t'", ",", "''", ")", ".", "replace", "(", "'\\t'", ",", "' '", ")", "return", "lines", ".", "strip", "(", ")", ".", "splitlines", "(", ")", "for", "one", "in", "filenames", ":", "if", "one", ".", "name", ".", "endswith", "(", "'.tbl'", ")", ":", "for", "item", "in", "_parse_table", "(", "one", ".", "read", "(", ")", ")", ":", "args", "=", "list", "(", "item", ")", "if", "len", "(", "args", ")", "!=", "5", ":", "args", "+=", "[", "''", "]", "*", "(", "5", "-", "len", "(", "args", ")", ")", "self", ".", "session", ".", "add", "(", "Item", "(", "name", "=", "args", "[", "2", "]", ",", "abi", "=", "args", "[", "1", "]", ",", "number", "=", "args", "[", "0", "]", ",", "entry", "=", "args", "[", "3", "]", ",", "compat", "=", "args", "[", "4", "]", ")", ")", "else", ":", "for", "line", "in", "_split_into_lines", "(", "one", ".", "read", "(", ")", ")", ":", "filename", ",", "rest", "=", "(", "_parse_line", "(", "line", ")", ")", "self", ".", "session", ".", "add", "(", "Decl", "(", "name", "=", "rest", "[", "0", "]", ",", "filename", "=", "filename", ",", "argc", "=", "rest", "[", "1", "]", ",", "args", "=", "rest", "[", "2", "]", ")", ")", "self", ".", "session", ".", "commit", "(", ")" ]
Add data.
[ "Add", "data", "." ]
6cac5ebfce65c370dbebe47756a1789b120ef982
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/database.py#L68-L115
train
dalloriam/engel
engel/application.py
Application.start
def start(self, on_exit_callback=None): """ Start the Engel application by initializing all registered services and starting an Autobahn IOLoop. :param on_exit_callback: Callback triggered on application exit """ # TODO: Support params for services by mapping {servicename: {class, # params}}? for service in self.services.keys(): self.services[service] = self.services[service]() self.server.start(on_exit_callback)
python
def start(self, on_exit_callback=None): """ Start the Engel application by initializing all registered services and starting an Autobahn IOLoop. :param on_exit_callback: Callback triggered on application exit """ # TODO: Support params for services by mapping {servicename: {class, # params}}? for service in self.services.keys(): self.services[service] = self.services[service]() self.server.start(on_exit_callback)
[ "def", "start", "(", "self", ",", "on_exit_callback", "=", "None", ")", ":", "# TODO: Support params for services by mapping {servicename: {class,", "# params}}?", "for", "service", "in", "self", ".", "services", ".", "keys", "(", ")", ":", "self", ".", "services", "[", "service", "]", "=", "self", ".", "services", "[", "service", "]", "(", ")", "self", ".", "server", ".", "start", "(", "on_exit_callback", ")" ]
Start the Engel application by initializing all registered services and starting an Autobahn IOLoop. :param on_exit_callback: Callback triggered on application exit
[ "Start", "the", "Engel", "application", "by", "initializing", "all", "registered", "services", "and", "starting", "an", "Autobahn", "IOLoop", "." ]
f3477cd546e885bc53e755b3eb1452ce43ef5697
https://github.com/dalloriam/engel/blob/f3477cd546e885bc53e755b3eb1452ce43ef5697/engel/application.py#L59-L70
train
dalloriam/engel
engel/application.py
Application.register
def register(self, event, callback, selector=None): """ Resister an event that you want to monitor. :param event: Name of the event to monitor :param callback: Callback function for when the event is received (Params: event, interface). :param selector: `(Optional)` CSS selector for the element(s) you want to monitor. """ self.processor.register(event, callback, selector)
python
def register(self, event, callback, selector=None): """ Resister an event that you want to monitor. :param event: Name of the event to monitor :param callback: Callback function for when the event is received (Params: event, interface). :param selector: `(Optional)` CSS selector for the element(s) you want to monitor. """ self.processor.register(event, callback, selector)
[ "def", "register", "(", "self", ",", "event", ",", "callback", ",", "selector", "=", "None", ")", ":", "self", ".", "processor", ".", "register", "(", "event", ",", "callback", ",", "selector", ")" ]
Resister an event that you want to monitor. :param event: Name of the event to monitor :param callback: Callback function for when the event is received (Params: event, interface). :param selector: `(Optional)` CSS selector for the element(s) you want to monitor.
[ "Resister", "an", "event", "that", "you", "want", "to", "monitor", "." ]
f3477cd546e885bc53e755b3eb1452ce43ef5697
https://github.com/dalloriam/engel/blob/f3477cd546e885bc53e755b3eb1452ce43ef5697/engel/application.py#L72-L80
train
dalloriam/engel
engel/application.py
Application.unregister
def unregister(self, event, callback, selector=None): """ Unregisters an event that was being monitored. :param event: Name of the event to monitor :param callback: Callback function for when the event is received (Params: event, interface). :param selector: `(Optional)` CSS selector for the element(s) you want to monitor """ self.processor.unregister(event, callback, selector)
python
def unregister(self, event, callback, selector=None): """ Unregisters an event that was being monitored. :param event: Name of the event to monitor :param callback: Callback function for when the event is received (Params: event, interface). :param selector: `(Optional)` CSS selector for the element(s) you want to monitor """ self.processor.unregister(event, callback, selector)
[ "def", "unregister", "(", "self", ",", "event", ",", "callback", ",", "selector", "=", "None", ")", ":", "self", ".", "processor", ".", "unregister", "(", "event", ",", "callback", ",", "selector", ")" ]
Unregisters an event that was being monitored. :param event: Name of the event to monitor :param callback: Callback function for when the event is received (Params: event, interface). :param selector: `(Optional)` CSS selector for the element(s) you want to monitor
[ "Unregisters", "an", "event", "that", "was", "being", "monitored", "." ]
f3477cd546e885bc53e755b3eb1452ce43ef5697
https://github.com/dalloriam/engel/blob/f3477cd546e885bc53e755b3eb1452ce43ef5697/engel/application.py#L82-L90
train
RedHatQE/Sentaku
examples/todo_example/spec.py
TodoAPI.from_api
def from_api(cls, api): """ create an application description for the todo app, that based on the api can use either tha api or the ux for interaction """ ux = TodoUX(api) from .pseudorpc import PseudoRpc rpc = PseudoRpc(api) return cls({ViaAPI: api, ViaUX: ux, ViaRPC: rpc})
python
def from_api(cls, api): """ create an application description for the todo app, that based on the api can use either tha api or the ux for interaction """ ux = TodoUX(api) from .pseudorpc import PseudoRpc rpc = PseudoRpc(api) return cls({ViaAPI: api, ViaUX: ux, ViaRPC: rpc})
[ "def", "from_api", "(", "cls", ",", "api", ")", ":", "ux", "=", "TodoUX", "(", "api", ")", "from", ".", "pseudorpc", "import", "PseudoRpc", "rpc", "=", "PseudoRpc", "(", "api", ")", "return", "cls", "(", "{", "ViaAPI", ":", "api", ",", "ViaUX", ":", "ux", ",", "ViaRPC", ":", "rpc", "}", ")" ]
create an application description for the todo app, that based on the api can use either tha api or the ux for interaction
[ "create", "an", "application", "description", "for", "the", "todo", "app", "that", "based", "on", "the", "api", "can", "use", "either", "tha", "api", "or", "the", "ux", "for", "interaction" ]
b336cef5b6ee2db4e8dff28dcdb2be35a1f3d01c
https://github.com/RedHatQE/Sentaku/blob/b336cef5b6ee2db4e8dff28dcdb2be35a1f3d01c/examples/todo_example/spec.py#L14-L24
train
yamcs/yamcs-python
yamcs-client/yamcs/mdb/model.py
Significance.consequence_level
def consequence_level(self): """ One of ``NONE``, ``WATCH``, ``WARNING``, ``DISTRESS``, ``CRITICAL`` or ``SEVERE``. """ if self._proto.HasField('consequenceLevel'): return mdb_pb2.SignificanceInfo.SignificanceLevelType.Name(self._proto.consequenceLevel) return None
python
def consequence_level(self): """ One of ``NONE``, ``WATCH``, ``WARNING``, ``DISTRESS``, ``CRITICAL`` or ``SEVERE``. """ if self._proto.HasField('consequenceLevel'): return mdb_pb2.SignificanceInfo.SignificanceLevelType.Name(self._proto.consequenceLevel) return None
[ "def", "consequence_level", "(", "self", ")", ":", "if", "self", ".", "_proto", ".", "HasField", "(", "'consequenceLevel'", ")", ":", "return", "mdb_pb2", ".", "SignificanceInfo", ".", "SignificanceLevelType", ".", "Name", "(", "self", ".", "_proto", ".", "consequenceLevel", ")", "return", "None" ]
One of ``NONE``, ``WATCH``, ``WARNING``, ``DISTRESS``, ``CRITICAL`` or ``SEVERE``.
[ "One", "of", "NONE", "WATCH", "WARNING", "DISTRESS", "CRITICAL", "or", "SEVERE", "." ]
1082fee8a299010cc44416bbb7518fac0ef08b48
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/mdb/model.py#L106-L113
train
sirfoga/pyhal
hal/mongodb/models.py
DbBrowser.get_documents_count
def get_documents_count(self): """Counts documents in database :return: Number of documents in db """ db_collections = [ self.database[c] for c in self.get_collection_names() ] # list of all collections in database return sum([c.count() for c in db_collections])
python
def get_documents_count(self): """Counts documents in database :return: Number of documents in db """ db_collections = [ self.database[c] for c in self.get_collection_names() ] # list of all collections in database return sum([c.count() for c in db_collections])
[ "def", "get_documents_count", "(", "self", ")", ":", "db_collections", "=", "[", "self", ".", "database", "[", "c", "]", "for", "c", "in", "self", ".", "get_collection_names", "(", ")", "]", "# list of all collections in database", "return", "sum", "(", "[", "c", ".", "count", "(", ")", "for", "c", "in", "db_collections", "]", ")" ]
Counts documents in database :return: Number of documents in db
[ "Counts", "documents", "in", "database" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/mongodb/models.py#L25-L33
train
sirfoga/pyhal
hal/mongodb/models.py
DbBrowser.get_documents_in_collection
def get_documents_in_collection(self, collection_name, with_id=True): """Gets all documents in collection :param collection_name: Name of collection :param with_id: True iff each document should also come with its id :return: List of documents in collection in self.db """ documents_iterator = self.database[collection_name].find() # anything documents = [ d for d in documents_iterator ] # list of all documents in collection in database if not with_id: for doc in documents: doc.pop("_id") # remove id key return documents
python
def get_documents_in_collection(self, collection_name, with_id=True): """Gets all documents in collection :param collection_name: Name of collection :param with_id: True iff each document should also come with its id :return: List of documents in collection in self.db """ documents_iterator = self.database[collection_name].find() # anything documents = [ d for d in documents_iterator ] # list of all documents in collection in database if not with_id: for doc in documents: doc.pop("_id") # remove id key return documents
[ "def", "get_documents_in_collection", "(", "self", ",", "collection_name", ",", "with_id", "=", "True", ")", ":", "documents_iterator", "=", "self", ".", "database", "[", "collection_name", "]", ".", "find", "(", ")", "# anything", "documents", "=", "[", "d", "for", "d", "in", "documents_iterator", "]", "# list of all documents in collection in database", "if", "not", "with_id", ":", "for", "doc", "in", "documents", ":", "doc", ".", "pop", "(", "\"_id\"", ")", "# remove id key", "return", "documents" ]
Gets all documents in collection :param collection_name: Name of collection :param with_id: True iff each document should also come with its id :return: List of documents in collection in self.db
[ "Gets", "all", "documents", "in", "collection" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/mongodb/models.py#L35-L51
train
sirfoga/pyhal
hal/mongodb/models.py
DbBrowser.get_documents_in_database
def get_documents_in_database(self, with_id=True): """Gets all documents in database :param with_id: True iff each document should also come with its id :return: List of documents in collection in database """ documents = [] for coll in self.get_collection_names(): documents += self.get_documents_in_collection( coll, with_id=with_id ) return documents
python
def get_documents_in_database(self, with_id=True): """Gets all documents in database :param with_id: True iff each document should also come with its id :return: List of documents in collection in database """ documents = [] for coll in self.get_collection_names(): documents += self.get_documents_in_collection( coll, with_id=with_id ) return documents
[ "def", "get_documents_in_database", "(", "self", ",", "with_id", "=", "True", ")", ":", "documents", "=", "[", "]", "for", "coll", "in", "self", ".", "get_collection_names", "(", ")", ":", "documents", "+=", "self", ".", "get_documents_in_collection", "(", "coll", ",", "with_id", "=", "with_id", ")", "return", "documents" ]
Gets all documents in database :param with_id: True iff each document should also come with its id :return: List of documents in collection in database
[ "Gets", "all", "documents", "in", "database" ]
4394d8a1f7e45bea28a255ec390f4962ee64d33a
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/mongodb/models.py#L61-L74
train