repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
bykof/billomapy
billomapy/billomapy.py
Billomapy.complete_delivery_note
def complete_delivery_note(self, delivery_note_id, complete_dict): """ Completes an delivery note :param complete_dict: the complete dict with the template id :param delivery_note_id: the delivery note id :return: Response """ return self._create_put_request( resource=DELIVERY_NOTES, billomat_id=delivery_note_id, command=COMPLETE, send_data=complete_dict )
python
def complete_delivery_note(self, delivery_note_id, complete_dict): """ Completes an delivery note :param complete_dict: the complete dict with the template id :param delivery_note_id: the delivery note id :return: Response """ return self._create_put_request( resource=DELIVERY_NOTES, billomat_id=delivery_note_id, command=COMPLETE, send_data=complete_dict )
[ "def", "complete_delivery_note", "(", "self", ",", "delivery_note_id", ",", "complete_dict", ")", ":", "return", "self", ".", "_create_put_request", "(", "resource", "=", "DELIVERY_NOTES", ",", "billomat_id", "=", "delivery_note_id", ",", "command", "=", "COMPLETE", ",", "send_data", "=", "complete_dict", ")" ]
Completes an delivery note :param complete_dict: the complete dict with the template id :param delivery_note_id: the delivery note id :return: Response
[ "Completes", "an", "delivery", "note" ]
a28ba69fd37654fa145d0411d52c200e7f8984ab
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L3564-L3577
train
bykof/billomapy
billomapy/billomapy.py
Billomapy.delivery_note_pdf
def delivery_note_pdf(self, delivery_note_id): """ Opens a pdf of a delivery note :param delivery_note_id: the delivery note id :return: dict """ return self._create_get_request(resource=DELIVERY_NOTES, billomat_id=delivery_note_id, command=PDF)
python
def delivery_note_pdf(self, delivery_note_id): """ Opens a pdf of a delivery note :param delivery_note_id: the delivery note id :return: dict """ return self._create_get_request(resource=DELIVERY_NOTES, billomat_id=delivery_note_id, command=PDF)
[ "def", "delivery_note_pdf", "(", "self", ",", "delivery_note_id", ")", ":", "return", "self", ".", "_create_get_request", "(", "resource", "=", "DELIVERY_NOTES", ",", "billomat_id", "=", "delivery_note_id", ",", "command", "=", "PDF", ")" ]
Opens a pdf of a delivery note :param delivery_note_id: the delivery note id :return: dict
[ "Opens", "a", "pdf", "of", "a", "delivery", "note" ]
a28ba69fd37654fa145d0411d52c200e7f8984ab
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L3579-L3586
train
bykof/billomapy
billomapy/billomapy.py
Billomapy.get_items_of_delivery_note_per_page
def get_items_of_delivery_note_per_page(self, delivery_note_id, per_page=1000, page=1): """ Get items of delivery note per page :param delivery_note_id: the delivery note id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list """ return self._get_resource_per_page( resource=DELIVERY_NOTE_ITEMS, per_page=per_page, page=page, params={'delivery_note_id': delivery_note_id}, )
python
def get_items_of_delivery_note_per_page(self, delivery_note_id, per_page=1000, page=1): """ Get items of delivery note per page :param delivery_note_id: the delivery note id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list """ return self._get_resource_per_page( resource=DELIVERY_NOTE_ITEMS, per_page=per_page, page=page, params={'delivery_note_id': delivery_note_id}, )
[ "def", "get_items_of_delivery_note_per_page", "(", "self", ",", "delivery_note_id", ",", "per_page", "=", "1000", ",", "page", "=", "1", ")", ":", "return", "self", ".", "_get_resource_per_page", "(", "resource", "=", "DELIVERY_NOTE_ITEMS", ",", "per_page", "=", "per_page", ",", "page", "=", "page", ",", "params", "=", "{", "'delivery_note_id'", ":", "delivery_note_id", "}", ",", ")" ]
Get items of delivery note per page :param delivery_note_id: the delivery note id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list
[ "Get", "items", "of", "delivery", "note", "per", "page" ]
a28ba69fd37654fa145d0411d52c200e7f8984ab
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L3612-L3626
train
bykof/billomapy
billomapy/billomapy.py
Billomapy.get_all_items_of_delivery_note
def get_all_items_of_delivery_note(self, delivery_note_id): """ Get all items of delivery note This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param delivery_note_id: the delivery note id :return: list """ return self._iterate_through_pages( get_function=self.get_items_of_delivery_note_per_page, resource=DELIVERY_NOTE_ITEMS, **{'delivery_note_id': delivery_note_id} )
python
def get_all_items_of_delivery_note(self, delivery_note_id): """ Get all items of delivery note This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param delivery_note_id: the delivery note id :return: list """ return self._iterate_through_pages( get_function=self.get_items_of_delivery_note_per_page, resource=DELIVERY_NOTE_ITEMS, **{'delivery_note_id': delivery_note_id} )
[ "def", "get_all_items_of_delivery_note", "(", "self", ",", "delivery_note_id", ")", ":", "return", "self", ".", "_iterate_through_pages", "(", "get_function", "=", "self", ".", "get_items_of_delivery_note_per_page", ",", "resource", "=", "DELIVERY_NOTE_ITEMS", ",", "*", "*", "{", "'delivery_note_id'", ":", "delivery_note_id", "}", ")" ]
Get all items of delivery note This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param delivery_note_id: the delivery note id :return: list
[ "Get", "all", "items", "of", "delivery", "note", "This", "will", "iterate", "over", "all", "pages", "until", "it", "gets", "all", "elements", ".", "So", "if", "the", "rate", "limit", "exceeded", "it", "will", "throw", "an", "Exception", "and", "you", "will", "get", "nothing" ]
a28ba69fd37654fa145d0411d52c200e7f8984ab
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L3628-L3641
train
bykof/billomapy
billomapy/billomapy.py
Billomapy.update_delivery_note_item
def update_delivery_note_item(self, delivery_note_item_id, delivery_note_item_dict): """ Updates a delivery note item :param delivery_note_item_id: delivery note item id :param delivery_note_item_dict: dict :return: dict """ return self._create_put_request( resource=DELIVERY_NOTE_ITEMS, billomat_id=delivery_note_item_id, send_data=delivery_note_item_dict )
python
def update_delivery_note_item(self, delivery_note_item_id, delivery_note_item_dict): """ Updates a delivery note item :param delivery_note_item_id: delivery note item id :param delivery_note_item_dict: dict :return: dict """ return self._create_put_request( resource=DELIVERY_NOTE_ITEMS, billomat_id=delivery_note_item_id, send_data=delivery_note_item_dict )
[ "def", "update_delivery_note_item", "(", "self", ",", "delivery_note_item_id", ",", "delivery_note_item_dict", ")", ":", "return", "self", ".", "_create_put_request", "(", "resource", "=", "DELIVERY_NOTE_ITEMS", ",", "billomat_id", "=", "delivery_note_item_id", ",", "send_data", "=", "delivery_note_item_dict", ")" ]
Updates a delivery note item :param delivery_note_item_id: delivery note item id :param delivery_note_item_dict: dict :return: dict
[ "Updates", "a", "delivery", "note", "item" ]
a28ba69fd37654fa145d0411d52c200e7f8984ab
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L3661-L3673
train
bykof/billomapy
billomapy/billomapy.py
Billomapy.get_comments_of_delivery_note_per_page
def get_comments_of_delivery_note_per_page(self, delivery_note_id, per_page=1000, page=1): """ Get comments of delivery note per page :param delivery_note_id: the delivery note :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list """ return self._get_resource_per_page( resource=DELIVERY_NOTE_COMMENTS, per_page=per_page, page=page, params={'delivery_note_id': delivery_note_id}, )
python
def get_comments_of_delivery_note_per_page(self, delivery_note_id, per_page=1000, page=1): """ Get comments of delivery note per page :param delivery_note_id: the delivery note :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list """ return self._get_resource_per_page( resource=DELIVERY_NOTE_COMMENTS, per_page=per_page, page=page, params={'delivery_note_id': delivery_note_id}, )
[ "def", "get_comments_of_delivery_note_per_page", "(", "self", ",", "delivery_note_id", ",", "per_page", "=", "1000", ",", "page", "=", "1", ")", ":", "return", "self", ".", "_get_resource_per_page", "(", "resource", "=", "DELIVERY_NOTE_COMMENTS", ",", "per_page", "=", "per_page", ",", "page", "=", "page", ",", "params", "=", "{", "'delivery_note_id'", ":", "delivery_note_id", "}", ",", ")" ]
Get comments of delivery note per page :param delivery_note_id: the delivery note :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list
[ "Get", "comments", "of", "delivery", "note", "per", "page" ]
a28ba69fd37654fa145d0411d52c200e7f8984ab
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L3691-L3705
train
bykof/billomapy
billomapy/billomapy.py
Billomapy.get_all_comments_of_delivery_note
def get_all_comments_of_delivery_note(self, delivery_note_id): """ Get all comments of delivery note This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param delivery_note_id: the delivery note id :return: list """ return self._iterate_through_pages( get_function=self.get_comments_of_delivery_note_per_page, resource=DELIVERY_NOTE_COMMENTS, **{'delivery_note_id': delivery_note_id} )
python
def get_all_comments_of_delivery_note(self, delivery_note_id): """ Get all comments of delivery note This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param delivery_note_id: the delivery note id :return: list """ return self._iterate_through_pages( get_function=self.get_comments_of_delivery_note_per_page, resource=DELIVERY_NOTE_COMMENTS, **{'delivery_note_id': delivery_note_id} )
[ "def", "get_all_comments_of_delivery_note", "(", "self", ",", "delivery_note_id", ")", ":", "return", "self", ".", "_iterate_through_pages", "(", "get_function", "=", "self", ".", "get_comments_of_delivery_note_per_page", ",", "resource", "=", "DELIVERY_NOTE_COMMENTS", ",", "*", "*", "{", "'delivery_note_id'", ":", "delivery_note_id", "}", ")" ]
Get all comments of delivery note This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param delivery_note_id: the delivery note id :return: list
[ "Get", "all", "comments", "of", "delivery", "note", "This", "will", "iterate", "over", "all", "pages", "until", "it", "gets", "all", "elements", ".", "So", "if", "the", "rate", "limit", "exceeded", "it", "will", "throw", "an", "Exception", "and", "you", "will", "get", "nothing" ]
a28ba69fd37654fa145d0411d52c200e7f8984ab
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L3707-L3720
train
bykof/billomapy
billomapy/billomapy.py
Billomapy.update_delivery_note_comment
def update_delivery_note_comment(self, delivery_note_comment_id, delivery_note_comment_dict): """ Updates a delivery note comment :param delivery_note_comment_id: the delivery note comment id :param delivery_note_comment_dict: dict :return: dict """ return self._create_put_request( resource=DELIVERY_NOTE_COMMENTS, billomat_id=delivery_note_comment_id, send_data=delivery_note_comment_dict )
python
def update_delivery_note_comment(self, delivery_note_comment_id, delivery_note_comment_dict): """ Updates a delivery note comment :param delivery_note_comment_id: the delivery note comment id :param delivery_note_comment_dict: dict :return: dict """ return self._create_put_request( resource=DELIVERY_NOTE_COMMENTS, billomat_id=delivery_note_comment_id, send_data=delivery_note_comment_dict )
[ "def", "update_delivery_note_comment", "(", "self", ",", "delivery_note_comment_id", ",", "delivery_note_comment_dict", ")", ":", "return", "self", ".", "_create_put_request", "(", "resource", "=", "DELIVERY_NOTE_COMMENTS", ",", "billomat_id", "=", "delivery_note_comment_id", ",", "send_data", "=", "delivery_note_comment_dict", ")" ]
Updates a delivery note comment :param delivery_note_comment_id: the delivery note comment id :param delivery_note_comment_dict: dict :return: dict
[ "Updates", "a", "delivery", "note", "comment" ]
a28ba69fd37654fa145d0411d52c200e7f8984ab
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L3740-L3752
train
bykof/billomapy
billomapy/billomapy.py
Billomapy.get_tags_of_delivery_note_per_page
def get_tags_of_delivery_note_per_page(self, delivery_note_id, per_page=1000, page=1): """ Get tags of delivery note per page :param delivery_note_id: the delivery note id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list """ return self._get_resource_per_page( resource=DELIVERY_NOTE_TAGS, per_page=per_page, page=page, params={'delivery_note_id': delivery_note_id}, )
python
def get_tags_of_delivery_note_per_page(self, delivery_note_id, per_page=1000, page=1): """ Get tags of delivery note per page :param delivery_note_id: the delivery note id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list """ return self._get_resource_per_page( resource=DELIVERY_NOTE_TAGS, per_page=per_page, page=page, params={'delivery_note_id': delivery_note_id}, )
[ "def", "get_tags_of_delivery_note_per_page", "(", "self", ",", "delivery_note_id", ",", "per_page", "=", "1000", ",", "page", "=", "1", ")", ":", "return", "self", ".", "_get_resource_per_page", "(", "resource", "=", "DELIVERY_NOTE_TAGS", ",", "per_page", "=", "per_page", ",", "page", "=", "page", ",", "params", "=", "{", "'delivery_note_id'", ":", "delivery_note_id", "}", ",", ")" ]
Get tags of delivery note per page :param delivery_note_id: the delivery note id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list
[ "Get", "tags", "of", "delivery", "note", "per", "page" ]
a28ba69fd37654fa145d0411d52c200e7f8984ab
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L3769-L3783
train
bykof/billomapy
billomapy/billomapy.py
Billomapy.get_all_tags_of_delivery_note
def get_all_tags_of_delivery_note(self, delivery_note_id): """ Get all tags of delivery note This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param delivery_note_id: the delivery note id :return: list """ return self._iterate_through_pages( get_function=self.get_tags_of_delivery_note_per_page, resource=DELIVERY_NOTE_TAGS, **{'delivery_note_id': delivery_note_id} )
python
def get_all_tags_of_delivery_note(self, delivery_note_id): """ Get all tags of delivery note This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param delivery_note_id: the delivery note id :return: list """ return self._iterate_through_pages( get_function=self.get_tags_of_delivery_note_per_page, resource=DELIVERY_NOTE_TAGS, **{'delivery_note_id': delivery_note_id} )
[ "def", "get_all_tags_of_delivery_note", "(", "self", ",", "delivery_note_id", ")", ":", "return", "self", ".", "_iterate_through_pages", "(", "get_function", "=", "self", ".", "get_tags_of_delivery_note_per_page", ",", "resource", "=", "DELIVERY_NOTE_TAGS", ",", "*", "*", "{", "'delivery_note_id'", ":", "delivery_note_id", "}", ")" ]
Get all tags of delivery note This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param delivery_note_id: the delivery note id :return: list
[ "Get", "all", "tags", "of", "delivery", "note", "This", "will", "iterate", "over", "all", "pages", "until", "it", "gets", "all", "elements", ".", "So", "if", "the", "rate", "limit", "exceeded", "it", "will", "throw", "an", "Exception", "and", "you", "will", "get", "nothing" ]
a28ba69fd37654fa145d0411d52c200e7f8984ab
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L3785-L3798
train
bykof/billomapy
billomapy/billomapy.py
Billomapy.get_letters_per_page
def get_letters_per_page(self, per_page=1000, page=1, params=None): """ Get letters per page :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :param params: Search parameters. Default: {} :return: list """ return self._get_resource_per_page(resource=LETTERS, per_page=per_page, page=page, params=params)
python
def get_letters_per_page(self, per_page=1000, page=1, params=None): """ Get letters per page :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :param params: Search parameters. Default: {} :return: list """ return self._get_resource_per_page(resource=LETTERS, per_page=per_page, page=page, params=params)
[ "def", "get_letters_per_page", "(", "self", ",", "per_page", "=", "1000", ",", "page", "=", "1", ",", "params", "=", "None", ")", ":", "return", "self", ".", "_get_resource_per_page", "(", "resource", "=", "LETTERS", ",", "per_page", "=", "per_page", ",", "page", "=", "page", ",", "params", "=", "params", ")" ]
Get letters per page :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :param params: Search parameters. Default: {} :return: list
[ "Get", "letters", "per", "page" ]
a28ba69fd37654fa145d0411d52c200e7f8984ab
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L3834-L3843
train
bykof/billomapy
billomapy/billomapy.py
Billomapy.get_all_letters
def get_all_letters(self, params=None): """ Get all letters This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param params: search params :return: list """ if not params: params = {} return self._iterate_through_pages(self.get_letters_per_page, resource=LETTERS, **{'params': params})
python
def get_all_letters(self, params=None): """ Get all letters This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param params: search params :return: list """ if not params: params = {} return self._iterate_through_pages(self.get_letters_per_page, resource=LETTERS, **{'params': params})
[ "def", "get_all_letters", "(", "self", ",", "params", "=", "None", ")", ":", "if", "not", "params", ":", "params", "=", "{", "}", "return", "self", ".", "_iterate_through_pages", "(", "self", ".", "get_letters_per_page", ",", "resource", "=", "LETTERS", ",", "*", "*", "{", "'params'", ":", "params", "}", ")" ]
Get all letters This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param params: search params :return: list
[ "Get", "all", "letters", "This", "will", "iterate", "over", "all", "pages", "until", "it", "gets", "all", "elements", ".", "So", "if", "the", "rate", "limit", "exceeded", "it", "will", "throw", "an", "Exception", "and", "you", "will", "get", "nothing" ]
a28ba69fd37654fa145d0411d52c200e7f8984ab
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L3845-L3856
train
bykof/billomapy
billomapy/billomapy.py
Billomapy.update_letter
def update_letter(self, letter_id, letter_dict): """ Updates a letter :param letter_id: the letter id :param letter_dict: dict :return: dict """ return self._create_put_request( resource=LETTERS, billomat_id=letter_id, send_data=letter_dict )
python
def update_letter(self, letter_id, letter_dict): """ Updates a letter :param letter_id: the letter id :param letter_dict: dict :return: dict """ return self._create_put_request( resource=LETTERS, billomat_id=letter_id, send_data=letter_dict )
[ "def", "update_letter", "(", "self", ",", "letter_id", ",", "letter_dict", ")", ":", "return", "self", ".", "_create_put_request", "(", "resource", "=", "LETTERS", ",", "billomat_id", "=", "letter_id", ",", "send_data", "=", "letter_dict", ")" ]
Updates a letter :param letter_id: the letter id :param letter_dict: dict :return: dict
[ "Updates", "a", "letter" ]
a28ba69fd37654fa145d0411d52c200e7f8984ab
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L3876-L3888
train
bykof/billomapy
billomapy/billomapy.py
Billomapy.get_comments_of_letter_per_page
def get_comments_of_letter_per_page(self, letter_id, per_page=1000, page=1): """ Get comments of letter per page :param letter_id: the letter id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list """ return self._get_resource_per_page( resource=LETTER_COMMENTS, per_page=per_page, page=page, params={'letter_id': letter_id}, )
python
def get_comments_of_letter_per_page(self, letter_id, per_page=1000, page=1): """ Get comments of letter per page :param letter_id: the letter id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list """ return self._get_resource_per_page( resource=LETTER_COMMENTS, per_page=per_page, page=page, params={'letter_id': letter_id}, )
[ "def", "get_comments_of_letter_per_page", "(", "self", ",", "letter_id", ",", "per_page", "=", "1000", ",", "page", "=", "1", ")", ":", "return", "self", ".", "_get_resource_per_page", "(", "resource", "=", "LETTER_COMMENTS", ",", "per_page", "=", "per_page", ",", "page", "=", "page", ",", "params", "=", "{", "'letter_id'", ":", "letter_id", "}", ",", ")" ]
Get comments of letter per page :param letter_id: the letter id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list
[ "Get", "comments", "of", "letter", "per", "page" ]
a28ba69fd37654fa145d0411d52c200e7f8984ab
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L3906-L3920
train
bykof/billomapy
billomapy/billomapy.py
Billomapy.get_all_comments_of_letter
def get_all_comments_of_letter(self, letter_id): """ Get all comments of letter This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param letter_id: the letter id :return: list """ return self._iterate_through_pages( get_function=self.get_comments_of_letter_per_page, resource=LETTER_COMMENTS, **{'letter_id': letter_id} )
python
def get_all_comments_of_letter(self, letter_id): """ Get all comments of letter This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param letter_id: the letter id :return: list """ return self._iterate_through_pages( get_function=self.get_comments_of_letter_per_page, resource=LETTER_COMMENTS, **{'letter_id': letter_id} )
[ "def", "get_all_comments_of_letter", "(", "self", ",", "letter_id", ")", ":", "return", "self", ".", "_iterate_through_pages", "(", "get_function", "=", "self", ".", "get_comments_of_letter_per_page", ",", "resource", "=", "LETTER_COMMENTS", ",", "*", "*", "{", "'letter_id'", ":", "letter_id", "}", ")" ]
Get all comments of letter This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param letter_id: the letter id :return: list
[ "Get", "all", "comments", "of", "letter", "This", "will", "iterate", "over", "all", "pages", "until", "it", "gets", "all", "elements", ".", "So", "if", "the", "rate", "limit", "exceeded", "it", "will", "throw", "an", "Exception", "and", "you", "will", "get", "nothing" ]
a28ba69fd37654fa145d0411d52c200e7f8984ab
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L3922-L3935
train
bykof/billomapy
billomapy/billomapy.py
Billomapy.update_letter_comment
def update_letter_comment(self, letter_comment_id, letter_comment_dict): """ Updates a letter comment :param letter_comment_id: the letter command id :param letter_comment_dict: dict :return: dict """ return self._create_put_request( resource=LETTER_COMMENTS, billomat_id=letter_comment_id, send_data=letter_comment_dict )
python
def update_letter_comment(self, letter_comment_id, letter_comment_dict): """ Updates a letter comment :param letter_comment_id: the letter command id :param letter_comment_dict: dict :return: dict """ return self._create_put_request( resource=LETTER_COMMENTS, billomat_id=letter_comment_id, send_data=letter_comment_dict )
[ "def", "update_letter_comment", "(", "self", ",", "letter_comment_id", ",", "letter_comment_dict", ")", ":", "return", "self", ".", "_create_put_request", "(", "resource", "=", "LETTER_COMMENTS", ",", "billomat_id", "=", "letter_comment_id", ",", "send_data", "=", "letter_comment_dict", ")" ]
Updates a letter comment :param letter_comment_id: the letter command id :param letter_comment_dict: dict :return: dict
[ "Updates", "a", "letter", "comment" ]
a28ba69fd37654fa145d0411d52c200e7f8984ab
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L3955-L3967
train
bykof/billomapy
billomapy/billomapy.py
Billomapy.get_tags_of_letter_per_page
def get_tags_of_letter_per_page(self, letter_id, per_page=1000, page=1): """ Get tags of letter per page :param letter_id: the letter id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list """ return self._get_resource_per_page( resource=LETTER_TAGS, per_page=per_page, page=page, params={'letter_id': letter_id}, )
python
def get_tags_of_letter_per_page(self, letter_id, per_page=1000, page=1): """ Get tags of letter per page :param letter_id: the letter id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list """ return self._get_resource_per_page( resource=LETTER_TAGS, per_page=per_page, page=page, params={'letter_id': letter_id}, )
[ "def", "get_tags_of_letter_per_page", "(", "self", ",", "letter_id", ",", "per_page", "=", "1000", ",", "page", "=", "1", ")", ":", "return", "self", ".", "_get_resource_per_page", "(", "resource", "=", "LETTER_TAGS", ",", "per_page", "=", "per_page", ",", "page", "=", "page", ",", "params", "=", "{", "'letter_id'", ":", "letter_id", "}", ",", ")" ]
Get tags of letter per page :param letter_id: the letter id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list
[ "Get", "tags", "of", "letter", "per", "page" ]
a28ba69fd37654fa145d0411d52c200e7f8984ab
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L3984-L3998
train
bykof/billomapy
billomapy/billomapy.py
Billomapy.get_all_tags_of_letter
def get_all_tags_of_letter(self, letter_id): """ Get all tags of letter This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param letter_id: the letter id :return: list """ return self._iterate_through_pages( get_function=self.get_tags_of_letter_per_page, resource=LETTER_TAGS, **{'letter_id': letter_id} )
python
def get_all_tags_of_letter(self, letter_id): """ Get all tags of letter This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param letter_id: the letter id :return: list """ return self._iterate_through_pages( get_function=self.get_tags_of_letter_per_page, resource=LETTER_TAGS, **{'letter_id': letter_id} )
[ "def", "get_all_tags_of_letter", "(", "self", ",", "letter_id", ")", ":", "return", "self", ".", "_iterate_through_pages", "(", "get_function", "=", "self", ".", "get_tags_of_letter_per_page", ",", "resource", "=", "LETTER_TAGS", ",", "*", "*", "{", "'letter_id'", ":", "letter_id", "}", ")" ]
Get all tags of letter This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param letter_id: the letter id :return: list
[ "Get", "all", "tags", "of", "letter", "This", "will", "iterate", "over", "all", "pages", "until", "it", "gets", "all", "elements", ".", "So", "if", "the", "rate", "limit", "exceeded", "it", "will", "throw", "an", "Exception", "and", "you", "will", "get", "nothing" ]
a28ba69fd37654fa145d0411d52c200e7f8984ab
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L4000-L4013
train
bykof/billomapy
billomapy/billomapy.py
Billomapy.get_email_templates_per_page
def get_email_templates_per_page(self, per_page=1000, page=1, params=None): """ Get e-mail templates per page :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :param params: Search parameters. Default: {} :return: list """ return self._get_resource_per_page(resource=EMAIL_TEMPLATES, per_page=per_page, page=page, params=params)
python
def get_email_templates_per_page(self, per_page=1000, page=1, params=None): """ Get e-mail templates per page :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :param params: Search parameters. Default: {} :return: list """ return self._get_resource_per_page(resource=EMAIL_TEMPLATES, per_page=per_page, page=page, params=params)
[ "def", "get_email_templates_per_page", "(", "self", ",", "per_page", "=", "1000", ",", "page", "=", "1", ",", "params", "=", "None", ")", ":", "return", "self", ".", "_get_resource_per_page", "(", "resource", "=", "EMAIL_TEMPLATES", ",", "per_page", "=", "per_page", ",", "page", "=", "page", ",", "params", "=", "params", ")" ]
Get e-mail templates per page :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :param params: Search parameters. Default: {} :return: list
[ "Get", "e", "-", "mail", "templates", "per", "page" ]
a28ba69fd37654fa145d0411d52c200e7f8984ab
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L4050-L4059
train
bykof/billomapy
billomapy/billomapy.py
Billomapy.get_email_templates
def get_email_templates(self, params=None): """ Get all e-mail templates This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param params: search params :return: list """ if not params: params = {} return self._iterate_through_pages(self.get_email_templates_per_page, resource=EMAIL_TEMPLATES, **{'params': params})
python
def get_email_templates(self, params=None): """ Get all e-mail templates This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param params: search params :return: list """ if not params: params = {} return self._iterate_through_pages(self.get_email_templates_per_page, resource=EMAIL_TEMPLATES, **{'params': params})
[ "def", "get_email_templates", "(", "self", ",", "params", "=", "None", ")", ":", "if", "not", "params", ":", "params", "=", "{", "}", "return", "self", ".", "_iterate_through_pages", "(", "self", ".", "get_email_templates_per_page", ",", "resource", "=", "EMAIL_TEMPLATES", ",", "*", "*", "{", "'params'", ":", "params", "}", ")" ]
Get all e-mail templates This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param params: search params :return: list
[ "Get", "all", "e", "-", "mail", "templates", "This", "will", "iterate", "over", "all", "pages", "until", "it", "gets", "all", "elements", ".", "So", "if", "the", "rate", "limit", "exceeded", "it", "will", "throw", "an", "Exception", "and", "you", "will", "get", "nothing" ]
a28ba69fd37654fa145d0411d52c200e7f8984ab
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L4061-L4073
train
bykof/billomapy
billomapy/billomapy.py
Billomapy.update_email_template
def update_email_template(self, template_id, template_dict): """ Updates a emailtemplate :param template_id: the template id :param template_dict: dict :return: dict """ return self._create_put_request( resource=EMAIL_TEMPLATES, billomat_id=template_id, send_data=template_dict )
python
def update_email_template(self, template_id, template_dict): """ Updates a emailtemplate :param template_id: the template id :param template_dict: dict :return: dict """ return self._create_put_request( resource=EMAIL_TEMPLATES, billomat_id=template_id, send_data=template_dict )
[ "def", "update_email_template", "(", "self", ",", "template_id", ",", "template_dict", ")", ":", "return", "self", ".", "_create_put_request", "(", "resource", "=", "EMAIL_TEMPLATES", ",", "billomat_id", "=", "template_id", ",", "send_data", "=", "template_dict", ")" ]
Updates a emailtemplate :param template_id: the template id :param template_dict: dict :return: dict
[ "Updates", "a", "emailtemplate" ]
a28ba69fd37654fa145d0411d52c200e7f8984ab
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L4093-L4105
train
bykof/billomapy
billomapy/billomapy.py
Billomapy.get_templates_per_page
def get_templates_per_page(self, per_page=1000, page=1, params=None): """ Get templates per page :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :param params: Search parameters. Default: {} :return: list """ return self._get_resource_per_page(resource=TEMPLATES, per_page=per_page, page=page, params=params)
python
def get_templates_per_page(self, per_page=1000, page=1, params=None): """ Get templates per page :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :param params: Search parameters. Default: {} :return: list """ return self._get_resource_per_page(resource=TEMPLATES, per_page=per_page, page=page, params=params)
[ "def", "get_templates_per_page", "(", "self", ",", "per_page", "=", "1000", ",", "page", "=", "1", ",", "params", "=", "None", ")", ":", "return", "self", ".", "_get_resource_per_page", "(", "resource", "=", "TEMPLATES", ",", "per_page", "=", "per_page", ",", "page", "=", "page", ",", "params", "=", "params", ")" ]
Get templates per page :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :param params: Search parameters. Default: {} :return: list
[ "Get", "templates", "per", "page" ]
a28ba69fd37654fa145d0411d52c200e7f8984ab
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L4123-L4132
train
bykof/billomapy
billomapy/billomapy.py
Billomapy.get_all_templates
def get_all_templates(self, params=None): """ Get all templates This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param params: search params :return: list """ if not params: params = {} return self._iterate_through_pages(self.get_templates_per_page, resource=TEMPLATES, **{'params': params})
python
def get_all_templates(self, params=None): """ Get all templates This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param params: search params :return: list """ if not params: params = {} return self._iterate_through_pages(self.get_templates_per_page, resource=TEMPLATES, **{'params': params})
[ "def", "get_all_templates", "(", "self", ",", "params", "=", "None", ")", ":", "if", "not", "params", ":", "params", "=", "{", "}", "return", "self", ".", "_iterate_through_pages", "(", "self", ".", "get_templates_per_page", ",", "resource", "=", "TEMPLATES", ",", "*", "*", "{", "'params'", ":", "params", "}", ")" ]
Get all templates This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param params: search params :return: list
[ "Get", "all", "templates", "This", "will", "iterate", "over", "all", "pages", "until", "it", "gets", "all", "elements", ".", "So", "if", "the", "rate", "limit", "exceeded", "it", "will", "throw", "an", "Exception", "and", "you", "will", "get", "nothing" ]
a28ba69fd37654fa145d0411d52c200e7f8984ab
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L4134-L4145
train
bykof/billomapy
billomapy/billomapy.py
Billomapy.update_template
def update_template(self, template_id, template_dict): """ Updates a template :param template_id: the template id :param template_dict: dict :return: dict """ return self._create_put_request( resource=TEMPLATES, billomat_id=template_id, send_data=template_dict )
python
def update_template(self, template_id, template_dict): """ Updates a template :param template_id: the template id :param template_dict: dict :return: dict """ return self._create_put_request( resource=TEMPLATES, billomat_id=template_id, send_data=template_dict )
[ "def", "update_template", "(", "self", ",", "template_id", ",", "template_dict", ")", ":", "return", "self", ".", "_create_put_request", "(", "resource", "=", "TEMPLATES", ",", "billomat_id", "=", "template_id", ",", "send_data", "=", "template_dict", ")" ]
Updates a template :param template_id: the template id :param template_dict: dict :return: dict
[ "Updates", "a", "template" ]
a28ba69fd37654fa145d0411d52c200e7f8984ab
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L4165-L4177
train
Kortemme-Lab/klab
klab/cloning/cloning.py
reverse_translate
def reverse_translate( protein_seq, template_dna=None, leading_seq=None, trailing_seq=None, forbidden_seqs=(), include_stop=True, manufacturer=None): """ Generate a well-behaved DNA sequence from the given protein sequence. If a template DNA sequence is specified, the returned DNA sequence will be as similar to it as possible. Any given restriction sites will not be present in the sequence. And finally, the given leading and trailing sequences will be appropriately concatenated. """ if manufacturer == 'gen9': forbidden_seqs += gen9.reserved_restriction_sites leading_seq = restriction_sites.get(leading_seq, leading_seq or '') trailing_seq = restriction_sites.get(trailing_seq, trailing_seq or '') codon_list = make_codon_list(protein_seq, template_dna, include_stop) sanitize_codon_list(codon_list, forbidden_seqs) dna_seq = leading_seq + ''.join(codon_list) + trailing_seq if manufacturer == 'gen9': gen9.apply_quality_control_checks(dna_seq) return dna_seq
python
def reverse_translate( protein_seq, template_dna=None, leading_seq=None, trailing_seq=None, forbidden_seqs=(), include_stop=True, manufacturer=None): """ Generate a well-behaved DNA sequence from the given protein sequence. If a template DNA sequence is specified, the returned DNA sequence will be as similar to it as possible. Any given restriction sites will not be present in the sequence. And finally, the given leading and trailing sequences will be appropriately concatenated. """ if manufacturer == 'gen9': forbidden_seqs += gen9.reserved_restriction_sites leading_seq = restriction_sites.get(leading_seq, leading_seq or '') trailing_seq = restriction_sites.get(trailing_seq, trailing_seq or '') codon_list = make_codon_list(protein_seq, template_dna, include_stop) sanitize_codon_list(codon_list, forbidden_seqs) dna_seq = leading_seq + ''.join(codon_list) + trailing_seq if manufacturer == 'gen9': gen9.apply_quality_control_checks(dna_seq) return dna_seq
[ "def", "reverse_translate", "(", "protein_seq", ",", "template_dna", "=", "None", ",", "leading_seq", "=", "None", ",", "trailing_seq", "=", "None", ",", "forbidden_seqs", "=", "(", ")", ",", "include_stop", "=", "True", ",", "manufacturer", "=", "None", ")", ":", "if", "manufacturer", "==", "'gen9'", ":", "forbidden_seqs", "+=", "gen9", ".", "reserved_restriction_sites", "leading_seq", "=", "restriction_sites", ".", "get", "(", "leading_seq", ",", "leading_seq", "or", "''", ")", "trailing_seq", "=", "restriction_sites", ".", "get", "(", "trailing_seq", ",", "trailing_seq", "or", "''", ")", "codon_list", "=", "make_codon_list", "(", "protein_seq", ",", "template_dna", ",", "include_stop", ")", "sanitize_codon_list", "(", "codon_list", ",", "forbidden_seqs", ")", "dna_seq", "=", "leading_seq", "+", "''", ".", "join", "(", "codon_list", ")", "+", "trailing_seq", "if", "manufacturer", "==", "'gen9'", ":", "gen9", ".", "apply_quality_control_checks", "(", "dna_seq", ")", "return", "dna_seq" ]
Generate a well-behaved DNA sequence from the given protein sequence. If a template DNA sequence is specified, the returned DNA sequence will be as similar to it as possible. Any given restriction sites will not be present in the sequence. And finally, the given leading and trailing sequences will be appropriately concatenated.
[ "Generate", "a", "well", "-", "behaved", "DNA", "sequence", "from", "the", "given", "protein", "sequence", ".", "If", "a", "template", "DNA", "sequence", "is", "specified", "the", "returned", "DNA", "sequence", "will", "be", "as", "similar", "to", "it", "as", "possible", ".", "Any", "given", "restriction", "sites", "will", "not", "be", "present", "in", "the", "sequence", ".", "And", "finally", "the", "given", "leading", "and", "trailing", "sequences", "will", "be", "appropriately", "concatenated", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/cloning/cloning.py#L21-L46
train
Kortemme-Lab/klab
klab/cloning/cloning.py
make_codon_list
def make_codon_list(protein_seq, template_dna=None, include_stop=True): """ Return a list of codons that would be translated to the given protein sequence. Codons are picked first to minimize the mutations relative to a template DNA sequence and second to prefer "optimal" codons. """ codon_list = [] if template_dna is None: template_dna = [] # Reverse translate each codon, preferring (in order): # 1. The codon with the most similarity to the template codon. # 2. The codon with the highest natural usage. for i, res in enumerate(protein_seq.upper()): try: template_codon = template_dna[3*i:3*i+3] except IndexError: template_codon = '---' # Already sorted by natural codon usage possible_codons = dna.ecoli_reverse_translate[res] # Sort by similarity. Note that this is a stable sort. possible_codons.sort( key=lambda x: dna.num_mutations(x, template_codon)) # Pick the best codon. codon_list.append(possible_codons[0]) # Make sure the sequence ends with a stop codon. last_codon = codon_list[-1] stop_codons = dna.ecoli_reverse_translate['.'] if include_stop and last_codon not in stop_codons: codon_list.append(stop_codons[0]) return codon_list
python
def make_codon_list(protein_seq, template_dna=None, include_stop=True): """ Return a list of codons that would be translated to the given protein sequence. Codons are picked first to minimize the mutations relative to a template DNA sequence and second to prefer "optimal" codons. """ codon_list = [] if template_dna is None: template_dna = [] # Reverse translate each codon, preferring (in order): # 1. The codon with the most similarity to the template codon. # 2. The codon with the highest natural usage. for i, res in enumerate(protein_seq.upper()): try: template_codon = template_dna[3*i:3*i+3] except IndexError: template_codon = '---' # Already sorted by natural codon usage possible_codons = dna.ecoli_reverse_translate[res] # Sort by similarity. Note that this is a stable sort. possible_codons.sort( key=lambda x: dna.num_mutations(x, template_codon)) # Pick the best codon. codon_list.append(possible_codons[0]) # Make sure the sequence ends with a stop codon. last_codon = codon_list[-1] stop_codons = dna.ecoli_reverse_translate['.'] if include_stop and last_codon not in stop_codons: codon_list.append(stop_codons[0]) return codon_list
[ "def", "make_codon_list", "(", "protein_seq", ",", "template_dna", "=", "None", ",", "include_stop", "=", "True", ")", ":", "codon_list", "=", "[", "]", "if", "template_dna", "is", "None", ":", "template_dna", "=", "[", "]", "# Reverse translate each codon, preferring (in order):", "# 1. The codon with the most similarity to the template codon.", "# 2. The codon with the highest natural usage.", "for", "i", ",", "res", "in", "enumerate", "(", "protein_seq", ".", "upper", "(", ")", ")", ":", "try", ":", "template_codon", "=", "template_dna", "[", "3", "*", "i", ":", "3", "*", "i", "+", "3", "]", "except", "IndexError", ":", "template_codon", "=", "'---'", "# Already sorted by natural codon usage", "possible_codons", "=", "dna", ".", "ecoli_reverse_translate", "[", "res", "]", "# Sort by similarity. Note that this is a stable sort.", "possible_codons", ".", "sort", "(", "key", "=", "lambda", "x", ":", "dna", ".", "num_mutations", "(", "x", ",", "template_codon", ")", ")", "# Pick the best codon.", "codon_list", ".", "append", "(", "possible_codons", "[", "0", "]", ")", "# Make sure the sequence ends with a stop codon.", "last_codon", "=", "codon_list", "[", "-", "1", "]", "stop_codons", "=", "dna", ".", "ecoli_reverse_translate", "[", "'.'", "]", "if", "include_stop", "and", "last_codon", "not", "in", "stop_codons", ":", "codon_list", ".", "append", "(", "stop_codons", "[", "0", "]", ")", "return", "codon_list" ]
Return a list of codons that would be translated to the given protein sequence. Codons are picked first to minimize the mutations relative to a template DNA sequence and second to prefer "optimal" codons.
[ "Return", "a", "list", "of", "codons", "that", "would", "be", "translated", "to", "the", "given", "protein", "sequence", ".", "Codons", "are", "picked", "first", "to", "minimize", "the", "mutations", "relative", "to", "a", "template", "DNA", "sequence", "and", "second", "to", "prefer", "optimal", "codons", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/cloning/cloning.py#L48-L85
train
Kortemme-Lab/klab
klab/cloning/cloning.py
sanitize_codon_list
def sanitize_codon_list(codon_list, forbidden_seqs=()): """ Make silent mutations to the given codon lists to remove any undesirable sequences that are present within it. Undesirable sequences include restriction sites, which may be optionally specified as a second argument, and homopolymers above a pre-defined length. The return value is the number of corrections made to the codon list. """ # Unit test missing for: # Homopolymer fixing for codon in codon_list: if len(codon) != 3: raise ValueError("Codons must have exactly 3 bases: '{}'".format(codon)) # Compile a collection of all the sequences we don't want to appear in the # gene. This includes the given restriction sites and their reverse # complements, plus any homopolymers above a pre-defined length. bad_seqs = set() bad_seqs.union( restriction_sites.get(seq, seq) for seq in forbidden_seqs) bad_seqs.union( dna.reverse_complement(seq) for seq in bad_seqs) bad_seqs.union( base * (gen9.homopolymer_max_lengths[base] + 1) for base in dna.dna_bases) bad_seqs = [ dna.dna_to_re(bs) for bs in bad_seqs] # Remove every bad sequence from the gene by making silent mutations to the # codon list. num_corrections = 0 for bad_seq in bad_seqs: while remove_bad_sequence(codon_list, bad_seq, bad_seqs): num_corrections += 1 return num_corrections
python
def sanitize_codon_list(codon_list, forbidden_seqs=()): """ Make silent mutations to the given codon lists to remove any undesirable sequences that are present within it. Undesirable sequences include restriction sites, which may be optionally specified as a second argument, and homopolymers above a pre-defined length. The return value is the number of corrections made to the codon list. """ # Unit test missing for: # Homopolymer fixing for codon in codon_list: if len(codon) != 3: raise ValueError("Codons must have exactly 3 bases: '{}'".format(codon)) # Compile a collection of all the sequences we don't want to appear in the # gene. This includes the given restriction sites and their reverse # complements, plus any homopolymers above a pre-defined length. bad_seqs = set() bad_seqs.union( restriction_sites.get(seq, seq) for seq in forbidden_seqs) bad_seqs.union( dna.reverse_complement(seq) for seq in bad_seqs) bad_seqs.union( base * (gen9.homopolymer_max_lengths[base] + 1) for base in dna.dna_bases) bad_seqs = [ dna.dna_to_re(bs) for bs in bad_seqs] # Remove every bad sequence from the gene by making silent mutations to the # codon list. num_corrections = 0 for bad_seq in bad_seqs: while remove_bad_sequence(codon_list, bad_seq, bad_seqs): num_corrections += 1 return num_corrections
[ "def", "sanitize_codon_list", "(", "codon_list", ",", "forbidden_seqs", "=", "(", ")", ")", ":", "# Unit test missing for:", "# Homopolymer fixing", "for", "codon", "in", "codon_list", ":", "if", "len", "(", "codon", ")", "!=", "3", ":", "raise", "ValueError", "(", "\"Codons must have exactly 3 bases: '{}'\"", ".", "format", "(", "codon", ")", ")", "# Compile a collection of all the sequences we don't want to appear in the ", "# gene. This includes the given restriction sites and their reverse ", "# complements, plus any homopolymers above a pre-defined length.", "bad_seqs", "=", "set", "(", ")", "bad_seqs", ".", "union", "(", "restriction_sites", ".", "get", "(", "seq", ",", "seq", ")", "for", "seq", "in", "forbidden_seqs", ")", "bad_seqs", ".", "union", "(", "dna", ".", "reverse_complement", "(", "seq", ")", "for", "seq", "in", "bad_seqs", ")", "bad_seqs", ".", "union", "(", "base", "*", "(", "gen9", ".", "homopolymer_max_lengths", "[", "base", "]", "+", "1", ")", "for", "base", "in", "dna", ".", "dna_bases", ")", "bad_seqs", "=", "[", "dna", ".", "dna_to_re", "(", "bs", ")", "for", "bs", "in", "bad_seqs", "]", "# Remove every bad sequence from the gene by making silent mutations to the ", "# codon list.", "num_corrections", "=", "0", "for", "bad_seq", "in", "bad_seqs", ":", "while", "remove_bad_sequence", "(", "codon_list", ",", "bad_seq", ",", "bad_seqs", ")", ":", "num_corrections", "+=", "1", "return", "num_corrections" ]
Make silent mutations to the given codon lists to remove any undesirable sequences that are present within it. Undesirable sequences include restriction sites, which may be optionally specified as a second argument, and homopolymers above a pre-defined length. The return value is the number of corrections made to the codon list.
[ "Make", "silent", "mutations", "to", "the", "given", "codon", "lists", "to", "remove", "any", "undesirable", "sequences", "that", "are", "present", "within", "it", ".", "Undesirable", "sequences", "include", "restriction", "sites", "which", "may", "be", "optionally", "specified", "as", "a", "second", "argument", "and", "homopolymers", "above", "a", "pre", "-", "defined", "length", ".", "The", "return", "value", "is", "the", "number", "of", "corrections", "made", "to", "the", "codon", "list", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/cloning/cloning.py#L87-L134
train
Kortemme-Lab/klab
klab/cloning/cloning.py
remove_bad_sequence
def remove_bad_sequence(codon_list, bad_seq, bad_seqs): """ Make a silent mutation to the given codon list to remove the first instance of the given bad sequence found in the gene sequence. If the bad sequence isn't found, nothing happens and the function returns false. Otherwise the function returns true. You can use these return values to easily write a loop totally purges the bad sequence from the codon list. Both the specific bad sequence in question and the list of all bad sequences are expected to be regular expressions. """ gene_seq = ''.join(codon_list) problem = bad_seq.search(gene_seq) if not problem: return False bs_start_codon = problem.start() // 3 bs_end_codon = problem.end() // 3 for i in range(bs_start_codon, bs_end_codon): problem_codon = codon_list[i] amino_acid = translate_dna(problem_codon) alternate_codons = [ codon for codon in dna.ecoli_reverse_translate[amino_acid] if codon != problem_codon] for alternate_codon in alternate_codons: codon_list[i] = alternate_codon if problem_with_codon(i, codon_list, bad_seqs): codon_list[i] = problem_codon else: return True raise RuntimeError("Could not remove bad sequence '{}' from gene.".format(bs))
python
def remove_bad_sequence(codon_list, bad_seq, bad_seqs): """ Make a silent mutation to the given codon list to remove the first instance of the given bad sequence found in the gene sequence. If the bad sequence isn't found, nothing happens and the function returns false. Otherwise the function returns true. You can use these return values to easily write a loop totally purges the bad sequence from the codon list. Both the specific bad sequence in question and the list of all bad sequences are expected to be regular expressions. """ gene_seq = ''.join(codon_list) problem = bad_seq.search(gene_seq) if not problem: return False bs_start_codon = problem.start() // 3 bs_end_codon = problem.end() // 3 for i in range(bs_start_codon, bs_end_codon): problem_codon = codon_list[i] amino_acid = translate_dna(problem_codon) alternate_codons = [ codon for codon in dna.ecoli_reverse_translate[amino_acid] if codon != problem_codon] for alternate_codon in alternate_codons: codon_list[i] = alternate_codon if problem_with_codon(i, codon_list, bad_seqs): codon_list[i] = problem_codon else: return True raise RuntimeError("Could not remove bad sequence '{}' from gene.".format(bs))
[ "def", "remove_bad_sequence", "(", "codon_list", ",", "bad_seq", ",", "bad_seqs", ")", ":", "gene_seq", "=", "''", ".", "join", "(", "codon_list", ")", "problem", "=", "bad_seq", ".", "search", "(", "gene_seq", ")", "if", "not", "problem", ":", "return", "False", "bs_start_codon", "=", "problem", ".", "start", "(", ")", "//", "3", "bs_end_codon", "=", "problem", ".", "end", "(", ")", "//", "3", "for", "i", "in", "range", "(", "bs_start_codon", ",", "bs_end_codon", ")", ":", "problem_codon", "=", "codon_list", "[", "i", "]", "amino_acid", "=", "translate_dna", "(", "problem_codon", ")", "alternate_codons", "=", "[", "codon", "for", "codon", "in", "dna", ".", "ecoli_reverse_translate", "[", "amino_acid", "]", "if", "codon", "!=", "problem_codon", "]", "for", "alternate_codon", "in", "alternate_codons", ":", "codon_list", "[", "i", "]", "=", "alternate_codon", "if", "problem_with_codon", "(", "i", ",", "codon_list", ",", "bad_seqs", ")", ":", "codon_list", "[", "i", "]", "=", "problem_codon", "else", ":", "return", "True", "raise", "RuntimeError", "(", "\"Could not remove bad sequence '{}' from gene.\"", ".", "format", "(", "bs", ")", ")" ]
Make a silent mutation to the given codon list to remove the first instance of the given bad sequence found in the gene sequence. If the bad sequence isn't found, nothing happens and the function returns false. Otherwise the function returns true. You can use these return values to easily write a loop totally purges the bad sequence from the codon list. Both the specific bad sequence in question and the list of all bad sequences are expected to be regular expressions.
[ "Make", "a", "silent", "mutation", "to", "the", "given", "codon", "list", "to", "remove", "the", "first", "instance", "of", "the", "given", "bad", "sequence", "found", "in", "the", "gene", "sequence", ".", "If", "the", "bad", "sequence", "isn", "t", "found", "nothing", "happens", "and", "the", "function", "returns", "false", ".", "Otherwise", "the", "function", "returns", "true", ".", "You", "can", "use", "these", "return", "values", "to", "easily", "write", "a", "loop", "totally", "purges", "the", "bad", "sequence", "from", "the", "codon", "list", ".", "Both", "the", "specific", "bad", "sequence", "in", "question", "and", "the", "list", "of", "all", "bad", "sequences", "are", "expected", "to", "be", "regular", "expressions", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/cloning/cloning.py#L136-L173
train
Kortemme-Lab/klab
klab/cloning/cloning.py
problem_with_codon
def problem_with_codon(codon_index, codon_list, bad_seqs): """ Return true if the given codon overlaps with a bad sequence. """ base_1 = 3 * codon_index base_3 = 3 * codon_index + 2 gene_seq = ''.join(codon_list) for bad_seq in bad_seqs: problem = bad_seq.search(gene_seq) if problem and problem.start() < base_3 and problem.end() > base_1: return True return False
python
def problem_with_codon(codon_index, codon_list, bad_seqs): """ Return true if the given codon overlaps with a bad sequence. """ base_1 = 3 * codon_index base_3 = 3 * codon_index + 2 gene_seq = ''.join(codon_list) for bad_seq in bad_seqs: problem = bad_seq.search(gene_seq) if problem and problem.start() < base_3 and problem.end() > base_1: return True return False
[ "def", "problem_with_codon", "(", "codon_index", ",", "codon_list", ",", "bad_seqs", ")", ":", "base_1", "=", "3", "*", "codon_index", "base_3", "=", "3", "*", "codon_index", "+", "2", "gene_seq", "=", "''", ".", "join", "(", "codon_list", ")", "for", "bad_seq", "in", "bad_seqs", ":", "problem", "=", "bad_seq", ".", "search", "(", "gene_seq", ")", "if", "problem", "and", "problem", ".", "start", "(", ")", "<", "base_3", "and", "problem", ".", "end", "(", ")", ">", "base_1", ":", "return", "True", "return", "False" ]
Return true if the given codon overlaps with a bad sequence.
[ "Return", "true", "if", "the", "given", "codon", "overlaps", "with", "a", "bad", "sequence", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/cloning/cloning.py#L175-L190
train
Kortemme-Lab/klab
klab/cloning/cloning.py
sequences_from_fasta
def sequences_from_fasta(path): """ Extract multiple sequences from a FASTA file. """ from Bio import SeqIO return {x.description: x.seq for x in SeqIO.parse(path, 'fasta')}
python
def sequences_from_fasta(path): """ Extract multiple sequences from a FASTA file. """ from Bio import SeqIO return {x.description: x.seq for x in SeqIO.parse(path, 'fasta')}
[ "def", "sequences_from_fasta", "(", "path", ")", ":", "from", "Bio", "import", "SeqIO", "return", "{", "x", ".", "description", ":", "x", ".", "seq", "for", "x", "in", "SeqIO", ".", "parse", "(", "path", ",", "'fasta'", ")", "}" ]
Extract multiple sequences from a FASTA file.
[ "Extract", "multiple", "sequences", "from", "a", "FASTA", "file", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/cloning/cloning.py#L204-L209
train
Kortemme-Lab/klab
klab/cloning/cloning.py
write_sequences_to_fasta
def write_sequences_to_fasta(path, seqs): """ Create a FASTA file listing the given sequences. Arguments ========= path: str or pathlib.Path The name of the file to create. seqs: dict A mapping of names to sequences, which can be either protein or DNA. """ from Bio import SeqIO from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord path = Path(path) records = [] for id, seq in seqs.items(): record = SeqRecord(Seq(seq), id=id, description='') records.append(record) SeqIO.write(records, str(path), 'fasta')
python
def write_sequences_to_fasta(path, seqs): """ Create a FASTA file listing the given sequences. Arguments ========= path: str or pathlib.Path The name of the file to create. seqs: dict A mapping of names to sequences, which can be either protein or DNA. """ from Bio import SeqIO from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord path = Path(path) records = [] for id, seq in seqs.items(): record = SeqRecord(Seq(seq), id=id, description='') records.append(record) SeqIO.write(records, str(path), 'fasta')
[ "def", "write_sequences_to_fasta", "(", "path", ",", "seqs", ")", ":", "from", "Bio", "import", "SeqIO", "from", "Bio", ".", "Seq", "import", "Seq", "from", "Bio", ".", "SeqRecord", "import", "SeqRecord", "path", "=", "Path", "(", "path", ")", "records", "=", "[", "]", "for", "id", ",", "seq", "in", "seqs", ".", "items", "(", ")", ":", "record", "=", "SeqRecord", "(", "Seq", "(", "seq", ")", ",", "id", "=", "id", ",", "description", "=", "''", ")", "records", ".", "append", "(", "record", ")", "SeqIO", ".", "write", "(", "records", ",", "str", "(", "path", ")", ",", "'fasta'", ")" ]
Create a FASTA file listing the given sequences. Arguments ========= path: str or pathlib.Path The name of the file to create. seqs: dict A mapping of names to sequences, which can be either protein or DNA.
[ "Create", "a", "FASTA", "file", "listing", "the", "given", "sequences", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/cloning/cloning.py#L374-L397
train
Kortemme-Lab/klab
klab/cloning/cloning.py
write_sequences_to_xlsx
def write_sequences_to_xlsx(path, seqs): """ Create a XLSX file listing the given sequences. Arguments ========= path: str or pathlib.Path The name of the file to create. seqs: dict A mapping of names to sequences, which can be either protein or DNA. """ from openpyxl import Workbook wb = Workbook() ws = wb.active for row, id in enumerate(seqs, 1): ws.cell(row, 1).value = id ws.cell(row, 2).value = seqs[id] wb.save(path)
python
def write_sequences_to_xlsx(path, seqs): """ Create a XLSX file listing the given sequences. Arguments ========= path: str or pathlib.Path The name of the file to create. seqs: dict A mapping of names to sequences, which can be either protein or DNA. """ from openpyxl import Workbook wb = Workbook() ws = wb.active for row, id in enumerate(seqs, 1): ws.cell(row, 1).value = id ws.cell(row, 2).value = seqs[id] wb.save(path)
[ "def", "write_sequences_to_xlsx", "(", "path", ",", "seqs", ")", ":", "from", "openpyxl", "import", "Workbook", "wb", "=", "Workbook", "(", ")", "ws", "=", "wb", ".", "active", "for", "row", ",", "id", "in", "enumerate", "(", "seqs", ",", "1", ")", ":", "ws", ".", "cell", "(", "row", ",", "1", ")", ".", "value", "=", "id", "ws", ".", "cell", "(", "row", ",", "2", ")", ".", "value", "=", "seqs", "[", "id", "]", "wb", ".", "save", "(", "path", ")" ]
Create a XLSX file listing the given sequences. Arguments ========= path: str or pathlib.Path The name of the file to create. seqs: dict A mapping of names to sequences, which can be either protein or DNA.
[ "Create", "a", "XLSX", "file", "listing", "the", "given", "sequences", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/cloning/cloning.py#L422-L443
train
berkeley-cocosci/Wallace
wallace/networks.py
DiscreteGenerational.add_node
def add_node(self, node): """Link the agent to a random member of the previous generation.""" nodes = [n for n in self.nodes() if not isinstance(n, Source)] num_agents = len(nodes) curr_generation = int((num_agents - 1) / float(self.generation_size)) node.generation = curr_generation if curr_generation == 0: if self.initial_source: source = min( self.nodes(type=Source), key=attrgetter('creation_time')) source.connect(whom=node) source.transmit(to_whom=node) else: prev_agents = Node.query\ .filter_by(failed=False, network_id=self.id, generation=(curr_generation - 1))\ .all() prev_fits = [p.fitness for p in prev_agents] prev_probs = [(f / (1.0 * sum(prev_fits))) for f in prev_fits] rnd = random.random() temp = 0.0 for i, probability in enumerate(prev_probs): temp += probability if temp > rnd: parent = prev_agents[i] break parent.connect(whom=node) parent.transmit(to_whom=node)
python
def add_node(self, node): """Link the agent to a random member of the previous generation.""" nodes = [n for n in self.nodes() if not isinstance(n, Source)] num_agents = len(nodes) curr_generation = int((num_agents - 1) / float(self.generation_size)) node.generation = curr_generation if curr_generation == 0: if self.initial_source: source = min( self.nodes(type=Source), key=attrgetter('creation_time')) source.connect(whom=node) source.transmit(to_whom=node) else: prev_agents = Node.query\ .filter_by(failed=False, network_id=self.id, generation=(curr_generation - 1))\ .all() prev_fits = [p.fitness for p in prev_agents] prev_probs = [(f / (1.0 * sum(prev_fits))) for f in prev_fits] rnd = random.random() temp = 0.0 for i, probability in enumerate(prev_probs): temp += probability if temp > rnd: parent = prev_agents[i] break parent.connect(whom=node) parent.transmit(to_whom=node)
[ "def", "add_node", "(", "self", ",", "node", ")", ":", "nodes", "=", "[", "n", "for", "n", "in", "self", ".", "nodes", "(", ")", "if", "not", "isinstance", "(", "n", ",", "Source", ")", "]", "num_agents", "=", "len", "(", "nodes", ")", "curr_generation", "=", "int", "(", "(", "num_agents", "-", "1", ")", "/", "float", "(", "self", ".", "generation_size", ")", ")", "node", ".", "generation", "=", "curr_generation", "if", "curr_generation", "==", "0", ":", "if", "self", ".", "initial_source", ":", "source", "=", "min", "(", "self", ".", "nodes", "(", "type", "=", "Source", ")", ",", "key", "=", "attrgetter", "(", "'creation_time'", ")", ")", "source", ".", "connect", "(", "whom", "=", "node", ")", "source", ".", "transmit", "(", "to_whom", "=", "node", ")", "else", ":", "prev_agents", "=", "Node", ".", "query", ".", "filter_by", "(", "failed", "=", "False", ",", "network_id", "=", "self", ".", "id", ",", "generation", "=", "(", "curr_generation", "-", "1", ")", ")", ".", "all", "(", ")", "prev_fits", "=", "[", "p", ".", "fitness", "for", "p", "in", "prev_agents", "]", "prev_probs", "=", "[", "(", "f", "/", "(", "1.0", "*", "sum", "(", "prev_fits", ")", ")", ")", "for", "f", "in", "prev_fits", "]", "rnd", "=", "random", ".", "random", "(", ")", "temp", "=", "0.0", "for", "i", ",", "probability", "in", "enumerate", "(", "prev_probs", ")", ":", "temp", "+=", "probability", "if", "temp", ">", "rnd", ":", "parent", "=", "prev_agents", "[", "i", "]", "break", "parent", ".", "connect", "(", "whom", "=", "node", ")", "parent", ".", "transmit", "(", "to_whom", "=", "node", ")" ]
Link the agent to a random member of the previous generation.
[ "Link", "the", "agent", "to", "a", "random", "member", "of", "the", "previous", "generation", "." ]
3650c0bc3b0804d0adb1d178c5eba9992babb1b0
https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/networks.py#L138-L170
train
berkeley-cocosci/Wallace
wallace/networks.py
ScaleFree.add_node
def add_node(self, node): """Add newcomers one by one, using linear preferential attachment.""" nodes = self.nodes() # Start with a core of m0 fully-connected agents... if len(nodes) <= self.m0: other_nodes = [n for n in nodes if n.id != node.id] for n in other_nodes: node.connect(direction="both", whom=n) # ...then add newcomers one by one with preferential attachment. else: for idx_newvector in xrange(self.m): these_nodes = [ n for n in nodes if ( n.id != node.id and not n.is_connected(direction="either", whom=node))] outdegrees = [ len(n.vectors(direction="outgoing")) for n in these_nodes] # Select a member using preferential attachment ps = [(d / (1.0 * sum(outdegrees))) for d in outdegrees] rnd = random.random() * sum(ps) cur = 0.0 for i, p in enumerate(ps): cur += p if rnd < cur: vector_to = these_nodes[i] # Create vector from newcomer to selected member and back node.connect(direction="both", whom=vector_to)
python
def add_node(self, node): """Add newcomers one by one, using linear preferential attachment.""" nodes = self.nodes() # Start with a core of m0 fully-connected agents... if len(nodes) <= self.m0: other_nodes = [n for n in nodes if n.id != node.id] for n in other_nodes: node.connect(direction="both", whom=n) # ...then add newcomers one by one with preferential attachment. else: for idx_newvector in xrange(self.m): these_nodes = [ n for n in nodes if ( n.id != node.id and not n.is_connected(direction="either", whom=node))] outdegrees = [ len(n.vectors(direction="outgoing")) for n in these_nodes] # Select a member using preferential attachment ps = [(d / (1.0 * sum(outdegrees))) for d in outdegrees] rnd = random.random() * sum(ps) cur = 0.0 for i, p in enumerate(ps): cur += p if rnd < cur: vector_to = these_nodes[i] # Create vector from newcomer to selected member and back node.connect(direction="both", whom=vector_to)
[ "def", "add_node", "(", "self", ",", "node", ")", ":", "nodes", "=", "self", ".", "nodes", "(", ")", "# Start with a core of m0 fully-connected agents...", "if", "len", "(", "nodes", ")", "<=", "self", ".", "m0", ":", "other_nodes", "=", "[", "n", "for", "n", "in", "nodes", "if", "n", ".", "id", "!=", "node", ".", "id", "]", "for", "n", "in", "other_nodes", ":", "node", ".", "connect", "(", "direction", "=", "\"both\"", ",", "whom", "=", "n", ")", "# ...then add newcomers one by one with preferential attachment.", "else", ":", "for", "idx_newvector", "in", "xrange", "(", "self", ".", "m", ")", ":", "these_nodes", "=", "[", "n", "for", "n", "in", "nodes", "if", "(", "n", ".", "id", "!=", "node", ".", "id", "and", "not", "n", ".", "is_connected", "(", "direction", "=", "\"either\"", ",", "whom", "=", "node", ")", ")", "]", "outdegrees", "=", "[", "len", "(", "n", ".", "vectors", "(", "direction", "=", "\"outgoing\"", ")", ")", "for", "n", "in", "these_nodes", "]", "# Select a member using preferential attachment", "ps", "=", "[", "(", "d", "/", "(", "1.0", "*", "sum", "(", "outdegrees", ")", ")", ")", "for", "d", "in", "outdegrees", "]", "rnd", "=", "random", ".", "random", "(", ")", "*", "sum", "(", "ps", ")", "cur", "=", "0.0", "for", "i", ",", "p", "in", "enumerate", "(", "ps", ")", ":", "cur", "+=", "p", "if", "rnd", "<", "cur", ":", "vector_to", "=", "these_nodes", "[", "i", "]", "# Create vector from newcomer to selected member and back", "node", ".", "connect", "(", "direction", "=", "\"both\"", ",", "whom", "=", "vector_to", ")" ]
Add newcomers one by one, using linear preferential attachment.
[ "Add", "newcomers", "one", "by", "one", "using", "linear", "preferential", "attachment", "." ]
3650c0bc3b0804d0adb1d178c5eba9992babb1b0
https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/networks.py#L200-L232
train
TUNE-Archive/freight_forwarder
freight_forwarder/container/config.py
Config.docker_py_dict
def docker_py_dict(self): """Convert object to match valid docker-py properties. """ return { 'image': self.image, 'command': self.cmd, 'hostname': self.hostname, 'user': self.user, 'detach': self.detach, 'stdin_open': self.open_stdin, 'tty': self.tty, 'ports': self.exposed_ports, 'environment': self.env, 'volumes': self.volumes, 'network_disabled': self.network_disabled, 'entrypoint': self.entry_point, 'working_dir': self.working_dir, 'domainname': self.domain_name, 'labels': self.labels }
python
def docker_py_dict(self): """Convert object to match valid docker-py properties. """ return { 'image': self.image, 'command': self.cmd, 'hostname': self.hostname, 'user': self.user, 'detach': self.detach, 'stdin_open': self.open_stdin, 'tty': self.tty, 'ports': self.exposed_ports, 'environment': self.env, 'volumes': self.volumes, 'network_disabled': self.network_disabled, 'entrypoint': self.entry_point, 'working_dir': self.working_dir, 'domainname': self.domain_name, 'labels': self.labels }
[ "def", "docker_py_dict", "(", "self", ")", ":", "return", "{", "'image'", ":", "self", ".", "image", ",", "'command'", ":", "self", ".", "cmd", ",", "'hostname'", ":", "self", ".", "hostname", ",", "'user'", ":", "self", ".", "user", ",", "'detach'", ":", "self", ".", "detach", ",", "'stdin_open'", ":", "self", ".", "open_stdin", ",", "'tty'", ":", "self", ".", "tty", ",", "'ports'", ":", "self", ".", "exposed_ports", ",", "'environment'", ":", "self", ".", "env", ",", "'volumes'", ":", "self", ".", "volumes", ",", "'network_disabled'", ":", "self", ".", "network_disabled", ",", "'entrypoint'", ":", "self", ".", "entry_point", ",", "'working_dir'", ":", "self", ".", "working_dir", ",", "'domainname'", ":", "self", ".", "domain_name", ",", "'labels'", ":", "self", ".", "labels", "}" ]
Convert object to match valid docker-py properties.
[ "Convert", "object", "to", "match", "valid", "docker", "-", "py", "properties", "." ]
6ea4a49f474ec04abb8bb81b175c774a16b5312f
https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/container/config.py#L75-L94
train
uw-it-aca/uw-restclients-pws
uw_pws/__init__.py
PWS.get_person_by_regid
def get_person_by_regid(self, regid): """ Returns a restclients.Person object for the given regid. If the regid isn't found, or if there is an error communicating with the PWS, a DataFailureException will be thrown. """ if not self.valid_uwregid(regid): raise InvalidRegID(regid) url = "{}/{}/full.json".format(PERSON_PREFIX, regid.upper()) response = DAO.getURL(url, {"Accept": "application/json"}) if response.status != 200: raise DataFailureException(url, response.status, response.data) return self._person_from_json(response.data)
python
def get_person_by_regid(self, regid): """ Returns a restclients.Person object for the given regid. If the regid isn't found, or if there is an error communicating with the PWS, a DataFailureException will be thrown. """ if not self.valid_uwregid(regid): raise InvalidRegID(regid) url = "{}/{}/full.json".format(PERSON_PREFIX, regid.upper()) response = DAO.getURL(url, {"Accept": "application/json"}) if response.status != 200: raise DataFailureException(url, response.status, response.data) return self._person_from_json(response.data)
[ "def", "get_person_by_regid", "(", "self", ",", "regid", ")", ":", "if", "not", "self", ".", "valid_uwregid", "(", "regid", ")", ":", "raise", "InvalidRegID", "(", "regid", ")", "url", "=", "\"{}/{}/full.json\"", ".", "format", "(", "PERSON_PREFIX", ",", "regid", ".", "upper", "(", ")", ")", "response", "=", "DAO", ".", "getURL", "(", "url", ",", "{", "\"Accept\"", ":", "\"application/json\"", "}", ")", "if", "response", ".", "status", "!=", "200", ":", "raise", "DataFailureException", "(", "url", ",", "response", ".", "status", ",", "response", ".", "data", ")", "return", "self", ".", "_person_from_json", "(", "response", ".", "data", ")" ]
Returns a restclients.Person object for the given regid. If the regid isn't found, or if there is an error communicating with the PWS, a DataFailureException will be thrown.
[ "Returns", "a", "restclients", ".", "Person", "object", "for", "the", "given", "regid", ".", "If", "the", "regid", "isn", "t", "found", "or", "if", "there", "is", "an", "error", "communicating", "with", "the", "PWS", "a", "DataFailureException", "will", "be", "thrown", "." ]
758d94b42a01762738140c5f984d05f389325b7a
https://github.com/uw-it-aca/uw-restclients-pws/blob/758d94b42a01762738140c5f984d05f389325b7a/uw_pws/__init__.py#L38-L53
train
uw-it-aca/uw-restclients-pws
uw_pws/__init__.py
PWS.get_person_by_netid
def get_person_by_netid(self, netid): """ Returns a restclients.Person object for the given netid. If the netid isn't found, or if there is an error communicating with the PWS, a DataFailureException will be thrown. """ if not self.valid_uwnetid(netid): raise InvalidNetID(netid) url = "{}/{}/full.json".format(PERSON_PREFIX, netid.lower()) response = DAO.getURL(url, {"Accept": "application/json"}) if response.status != 200: raise DataFailureException(url, response.status, response.data) return self._person_from_json(response.data)
python
def get_person_by_netid(self, netid): """ Returns a restclients.Person object for the given netid. If the netid isn't found, or if there is an error communicating with the PWS, a DataFailureException will be thrown. """ if not self.valid_uwnetid(netid): raise InvalidNetID(netid) url = "{}/{}/full.json".format(PERSON_PREFIX, netid.lower()) response = DAO.getURL(url, {"Accept": "application/json"}) if response.status != 200: raise DataFailureException(url, response.status, response.data) return self._person_from_json(response.data)
[ "def", "get_person_by_netid", "(", "self", ",", "netid", ")", ":", "if", "not", "self", ".", "valid_uwnetid", "(", "netid", ")", ":", "raise", "InvalidNetID", "(", "netid", ")", "url", "=", "\"{}/{}/full.json\"", ".", "format", "(", "PERSON_PREFIX", ",", "netid", ".", "lower", "(", ")", ")", "response", "=", "DAO", ".", "getURL", "(", "url", ",", "{", "\"Accept\"", ":", "\"application/json\"", "}", ")", "if", "response", ".", "status", "!=", "200", ":", "raise", "DataFailureException", "(", "url", ",", "response", ".", "status", ",", "response", ".", "data", ")", "return", "self", ".", "_person_from_json", "(", "response", ".", "data", ")" ]
Returns a restclients.Person object for the given netid. If the netid isn't found, or if there is an error communicating with the PWS, a DataFailureException will be thrown.
[ "Returns", "a", "restclients", ".", "Person", "object", "for", "the", "given", "netid", ".", "If", "the", "netid", "isn", "t", "found", "or", "if", "there", "is", "an", "error", "communicating", "with", "the", "PWS", "a", "DataFailureException", "will", "be", "thrown", "." ]
758d94b42a01762738140c5f984d05f389325b7a
https://github.com/uw-it-aca/uw-restclients-pws/blob/758d94b42a01762738140c5f984d05f389325b7a/uw_pws/__init__.py#L55-L70
train
uw-it-aca/uw-restclients-pws
uw_pws/__init__.py
PWS.get_person_by_employee_id
def get_person_by_employee_id(self, employee_id): """ Returns a restclients.Person object for the given employee id. If the employee id isn't found, or if there is an error communicating with the PWS, a DataFailureException will be thrown. """ if not self.valid_employee_id(employee_id): raise InvalidEmployeeID(employee_id) url = "{}.json?{}".format( PERSON_PREFIX, urlencode({"employee_id": employee_id})) response = DAO.getURL(url, {"Accept": "application/json"}) if response.status != 200: raise DataFailureException(url, response.status, response.data) # Search does not return a full person resource data = json.loads(response.data) if not len(data["Persons"]): raise DataFailureException(url, 404, "No person found") regid = data["Persons"][0]["PersonURI"]["UWRegID"] return self.get_person_by_regid(regid)
python
def get_person_by_employee_id(self, employee_id): """ Returns a restclients.Person object for the given employee id. If the employee id isn't found, or if there is an error communicating with the PWS, a DataFailureException will be thrown. """ if not self.valid_employee_id(employee_id): raise InvalidEmployeeID(employee_id) url = "{}.json?{}".format( PERSON_PREFIX, urlencode({"employee_id": employee_id})) response = DAO.getURL(url, {"Accept": "application/json"}) if response.status != 200: raise DataFailureException(url, response.status, response.data) # Search does not return a full person resource data = json.loads(response.data) if not len(data["Persons"]): raise DataFailureException(url, 404, "No person found") regid = data["Persons"][0]["PersonURI"]["UWRegID"] return self.get_person_by_regid(regid)
[ "def", "get_person_by_employee_id", "(", "self", ",", "employee_id", ")", ":", "if", "not", "self", ".", "valid_employee_id", "(", "employee_id", ")", ":", "raise", "InvalidEmployeeID", "(", "employee_id", ")", "url", "=", "\"{}.json?{}\"", ".", "format", "(", "PERSON_PREFIX", ",", "urlencode", "(", "{", "\"employee_id\"", ":", "employee_id", "}", ")", ")", "response", "=", "DAO", ".", "getURL", "(", "url", ",", "{", "\"Accept\"", ":", "\"application/json\"", "}", ")", "if", "response", ".", "status", "!=", "200", ":", "raise", "DataFailureException", "(", "url", ",", "response", ".", "status", ",", "response", ".", "data", ")", "# Search does not return a full person resource", "data", "=", "json", ".", "loads", "(", "response", ".", "data", ")", "if", "not", "len", "(", "data", "[", "\"Persons\"", "]", ")", ":", "raise", "DataFailureException", "(", "url", ",", "404", ",", "\"No person found\"", ")", "regid", "=", "data", "[", "\"Persons\"", "]", "[", "0", "]", "[", "\"PersonURI\"", "]", "[", "\"UWRegID\"", "]", "return", "self", ".", "get_person_by_regid", "(", "regid", ")" ]
Returns a restclients.Person object for the given employee id. If the employee id isn't found, or if there is an error communicating with the PWS, a DataFailureException will be thrown.
[ "Returns", "a", "restclients", ".", "Person", "object", "for", "the", "given", "employee", "id", ".", "If", "the", "employee", "id", "isn", "t", "found", "or", "if", "there", "is", "an", "error", "communicating", "with", "the", "PWS", "a", "DataFailureException", "will", "be", "thrown", "." ]
758d94b42a01762738140c5f984d05f389325b7a
https://github.com/uw-it-aca/uw-restclients-pws/blob/758d94b42a01762738140c5f984d05f389325b7a/uw_pws/__init__.py#L72-L94
train
uw-it-aca/uw-restclients-pws
uw_pws/__init__.py
PWS.get_person_by_student_number
def get_person_by_student_number(self, student_number): """ Returns a restclients.Person object for the given student number. If the student number isn't found, or if there is an error communicating with the PWS, a DataFailureException will be thrown. """ if not self.valid_student_number(student_number): raise InvalidStudentNumber(student_number) url = "{}.json?{}".format( PERSON_PREFIX, urlencode({"student_number": student_number})) response = DAO.getURL(url, {"Accept": "application/json"}) if response.status != 200: raise DataFailureException(url, response.status, response.data) # Search does not return a full person resource data = json.loads(response.data) if not len(data["Persons"]): raise DataFailureException(url, 404, "No person found") regid = data["Persons"][0]["PersonURI"]["UWRegID"] return self.get_person_by_regid(regid)
python
def get_person_by_student_number(self, student_number): """ Returns a restclients.Person object for the given student number. If the student number isn't found, or if there is an error communicating with the PWS, a DataFailureException will be thrown. """ if not self.valid_student_number(student_number): raise InvalidStudentNumber(student_number) url = "{}.json?{}".format( PERSON_PREFIX, urlencode({"student_number": student_number})) response = DAO.getURL(url, {"Accept": "application/json"}) if response.status != 200: raise DataFailureException(url, response.status, response.data) # Search does not return a full person resource data = json.loads(response.data) if not len(data["Persons"]): raise DataFailureException(url, 404, "No person found") regid = data["Persons"][0]["PersonURI"]["UWRegID"] return self.get_person_by_regid(regid)
[ "def", "get_person_by_student_number", "(", "self", ",", "student_number", ")", ":", "if", "not", "self", ".", "valid_student_number", "(", "student_number", ")", ":", "raise", "InvalidStudentNumber", "(", "student_number", ")", "url", "=", "\"{}.json?{}\"", ".", "format", "(", "PERSON_PREFIX", ",", "urlencode", "(", "{", "\"student_number\"", ":", "student_number", "}", ")", ")", "response", "=", "DAO", ".", "getURL", "(", "url", ",", "{", "\"Accept\"", ":", "\"application/json\"", "}", ")", "if", "response", ".", "status", "!=", "200", ":", "raise", "DataFailureException", "(", "url", ",", "response", ".", "status", ",", "response", ".", "data", ")", "# Search does not return a full person resource", "data", "=", "json", ".", "loads", "(", "response", ".", "data", ")", "if", "not", "len", "(", "data", "[", "\"Persons\"", "]", ")", ":", "raise", "DataFailureException", "(", "url", ",", "404", ",", "\"No person found\"", ")", "regid", "=", "data", "[", "\"Persons\"", "]", "[", "0", "]", "[", "\"PersonURI\"", "]", "[", "\"UWRegID\"", "]", "return", "self", ".", "get_person_by_regid", "(", "regid", ")" ]
Returns a restclients.Person object for the given student number. If the student number isn't found, or if there is an error communicating with the PWS, a DataFailureException will be thrown.
[ "Returns", "a", "restclients", ".", "Person", "object", "for", "the", "given", "student", "number", ".", "If", "the", "student", "number", "isn", "t", "found", "or", "if", "there", "is", "an", "error", "communicating", "with", "the", "PWS", "a", "DataFailureException", "will", "be", "thrown", "." ]
758d94b42a01762738140c5f984d05f389325b7a
https://github.com/uw-it-aca/uw-restclients-pws/blob/758d94b42a01762738140c5f984d05f389325b7a/uw_pws/__init__.py#L96-L118
train
uw-it-aca/uw-restclients-pws
uw_pws/__init__.py
PWS.get_person_by_prox_rfid
def get_person_by_prox_rfid(self, prox_rfid): """ Returns a restclients.Person object for the given rfid. If the rfid isn't found, or if there is an error communicating with the IdCard WS, a DataFailureException will be thrown. """ if not self.valid_prox_rfid(prox_rfid): raise InvalidProxRFID(prox_rfid) url = "{}.json?{}".format( CARD_PREFIX, urlencode({"prox_rfid": prox_rfid})) response = DAO.getURL(url, {"Accept": "application/json"}) if response.status != 200: raise DataFailureException(url, response.status, response.data) data = json.loads(response.data) if not len(data["Cards"]): raise DataFailureException(url, 404, "No card found") regid = data["Cards"][0]["RegID"] return self.get_person_by_regid(regid)
python
def get_person_by_prox_rfid(self, prox_rfid): """ Returns a restclients.Person object for the given rfid. If the rfid isn't found, or if there is an error communicating with the IdCard WS, a DataFailureException will be thrown. """ if not self.valid_prox_rfid(prox_rfid): raise InvalidProxRFID(prox_rfid) url = "{}.json?{}".format( CARD_PREFIX, urlencode({"prox_rfid": prox_rfid})) response = DAO.getURL(url, {"Accept": "application/json"}) if response.status != 200: raise DataFailureException(url, response.status, response.data) data = json.loads(response.data) if not len(data["Cards"]): raise DataFailureException(url, 404, "No card found") regid = data["Cards"][0]["RegID"] return self.get_person_by_regid(regid)
[ "def", "get_person_by_prox_rfid", "(", "self", ",", "prox_rfid", ")", ":", "if", "not", "self", ".", "valid_prox_rfid", "(", "prox_rfid", ")", ":", "raise", "InvalidProxRFID", "(", "prox_rfid", ")", "url", "=", "\"{}.json?{}\"", ".", "format", "(", "CARD_PREFIX", ",", "urlencode", "(", "{", "\"prox_rfid\"", ":", "prox_rfid", "}", ")", ")", "response", "=", "DAO", ".", "getURL", "(", "url", ",", "{", "\"Accept\"", ":", "\"application/json\"", "}", ")", "if", "response", ".", "status", "!=", "200", ":", "raise", "DataFailureException", "(", "url", ",", "response", ".", "status", ",", "response", ".", "data", ")", "data", "=", "json", ".", "loads", "(", "response", ".", "data", ")", "if", "not", "len", "(", "data", "[", "\"Cards\"", "]", ")", ":", "raise", "DataFailureException", "(", "url", ",", "404", ",", "\"No card found\"", ")", "regid", "=", "data", "[", "\"Cards\"", "]", "[", "0", "]", "[", "\"RegID\"", "]", "return", "self", ".", "get_person_by_regid", "(", "regid", ")" ]
Returns a restclients.Person object for the given rfid. If the rfid isn't found, or if there is an error communicating with the IdCard WS, a DataFailureException will be thrown.
[ "Returns", "a", "restclients", ".", "Person", "object", "for", "the", "given", "rfid", ".", "If", "the", "rfid", "isn", "t", "found", "or", "if", "there", "is", "an", "error", "communicating", "with", "the", "IdCard", "WS", "a", "DataFailureException", "will", "be", "thrown", "." ]
758d94b42a01762738140c5f984d05f389325b7a
https://github.com/uw-it-aca/uw-restclients-pws/blob/758d94b42a01762738140c5f984d05f389325b7a/uw_pws/__init__.py#L120-L141
train
uw-it-aca/uw-restclients-pws
uw_pws/__init__.py
PWS.get_entity_by_regid
def get_entity_by_regid(self, regid): """ Returns a restclients.Entity object for the given regid. If the regid isn't found, or if there is an error communicating with the PWS, a DataFailureException will be thrown. """ if not self.valid_uwregid(regid): raise InvalidRegID(regid) url = "{}/{}.json".format(ENTITY_PREFIX, regid.upper()) response = DAO.getURL(url, {"Accept": "application/json"}) if response.status != 200: raise DataFailureException(url, response.status, response.data) return self._entity_from_json(response.data)
python
def get_entity_by_regid(self, regid): """ Returns a restclients.Entity object for the given regid. If the regid isn't found, or if there is an error communicating with the PWS, a DataFailureException will be thrown. """ if not self.valid_uwregid(regid): raise InvalidRegID(regid) url = "{}/{}.json".format(ENTITY_PREFIX, regid.upper()) response = DAO.getURL(url, {"Accept": "application/json"}) if response.status != 200: raise DataFailureException(url, response.status, response.data) return self._entity_from_json(response.data)
[ "def", "get_entity_by_regid", "(", "self", ",", "regid", ")", ":", "if", "not", "self", ".", "valid_uwregid", "(", "regid", ")", ":", "raise", "InvalidRegID", "(", "regid", ")", "url", "=", "\"{}/{}.json\"", ".", "format", "(", "ENTITY_PREFIX", ",", "regid", ".", "upper", "(", ")", ")", "response", "=", "DAO", ".", "getURL", "(", "url", ",", "{", "\"Accept\"", ":", "\"application/json\"", "}", ")", "if", "response", ".", "status", "!=", "200", ":", "raise", "DataFailureException", "(", "url", ",", "response", ".", "status", ",", "response", ".", "data", ")", "return", "self", ".", "_entity_from_json", "(", "response", ".", "data", ")" ]
Returns a restclients.Entity object for the given regid. If the regid isn't found, or if there is an error communicating with the PWS, a DataFailureException will be thrown.
[ "Returns", "a", "restclients", ".", "Entity", "object", "for", "the", "given", "regid", ".", "If", "the", "regid", "isn", "t", "found", "or", "if", "there", "is", "an", "error", "communicating", "with", "the", "PWS", "a", "DataFailureException", "will", "be", "thrown", "." ]
758d94b42a01762738140c5f984d05f389325b7a
https://github.com/uw-it-aca/uw-restclients-pws/blob/758d94b42a01762738140c5f984d05f389325b7a/uw_pws/__init__.py#L143-L158
train
uw-it-aca/uw-restclients-pws
uw_pws/__init__.py
PWS.get_entity_by_netid
def get_entity_by_netid(self, netid): """ Returns a restclients.Entity object for the given netid. If the netid isn't found, or if there is an error communicating with the PWS, a DataFailureException will be thrown. """ if not self.valid_uwnetid(netid): raise InvalidNetID(netid) url = "{}/{}.json".format(ENTITY_PREFIX, netid.lower()) response = DAO.getURL(url, {"Accept": "application/json"}) if response.status != 200: raise DataFailureException(url, response.status, response.data) return self._entity_from_json(response.data)
python
def get_entity_by_netid(self, netid): """ Returns a restclients.Entity object for the given netid. If the netid isn't found, or if there is an error communicating with the PWS, a DataFailureException will be thrown. """ if not self.valid_uwnetid(netid): raise InvalidNetID(netid) url = "{}/{}.json".format(ENTITY_PREFIX, netid.lower()) response = DAO.getURL(url, {"Accept": "application/json"}) if response.status != 200: raise DataFailureException(url, response.status, response.data) return self._entity_from_json(response.data)
[ "def", "get_entity_by_netid", "(", "self", ",", "netid", ")", ":", "if", "not", "self", ".", "valid_uwnetid", "(", "netid", ")", ":", "raise", "InvalidNetID", "(", "netid", ")", "url", "=", "\"{}/{}.json\"", ".", "format", "(", "ENTITY_PREFIX", ",", "netid", ".", "lower", "(", ")", ")", "response", "=", "DAO", ".", "getURL", "(", "url", ",", "{", "\"Accept\"", ":", "\"application/json\"", "}", ")", "if", "response", ".", "status", "!=", "200", ":", "raise", "DataFailureException", "(", "url", ",", "response", ".", "status", ",", "response", ".", "data", ")", "return", "self", ".", "_entity_from_json", "(", "response", ".", "data", ")" ]
Returns a restclients.Entity object for the given netid. If the netid isn't found, or if there is an error communicating with the PWS, a DataFailureException will be thrown.
[ "Returns", "a", "restclients", ".", "Entity", "object", "for", "the", "given", "netid", ".", "If", "the", "netid", "isn", "t", "found", "or", "if", "there", "is", "an", "error", "communicating", "with", "the", "PWS", "a", "DataFailureException", "will", "be", "thrown", "." ]
758d94b42a01762738140c5f984d05f389325b7a
https://github.com/uw-it-aca/uw-restclients-pws/blob/758d94b42a01762738140c5f984d05f389325b7a/uw_pws/__init__.py#L160-L175
train
adaptive-learning/proso-apps
proso_concepts/models.py
generate_identifier
def generate_identifier(sender, instance, **kwargs): """ Generate and set identifier of concept before saving object to DB Args: sender (class): should be Concept instance (Concept): saving concept """ identifier = Concept.create_identifier(instance.query) qs = Concept.objects.filter(identifier=identifier, lang=instance.lang) if instance.pk: qs = qs.exclude(pk=instance.pk) if qs.count() > 0: raise ValueError("Concept identifier conflict") instance.identifier = identifier
python
def generate_identifier(sender, instance, **kwargs): """ Generate and set identifier of concept before saving object to DB Args: sender (class): should be Concept instance (Concept): saving concept """ identifier = Concept.create_identifier(instance.query) qs = Concept.objects.filter(identifier=identifier, lang=instance.lang) if instance.pk: qs = qs.exclude(pk=instance.pk) if qs.count() > 0: raise ValueError("Concept identifier conflict") instance.identifier = identifier
[ "def", "generate_identifier", "(", "sender", ",", "instance", ",", "*", "*", "kwargs", ")", ":", "identifier", "=", "Concept", ".", "create_identifier", "(", "instance", ".", "query", ")", "qs", "=", "Concept", ".", "objects", ".", "filter", "(", "identifier", "=", "identifier", ",", "lang", "=", "instance", ".", "lang", ")", "if", "instance", ".", "pk", ":", "qs", "=", "qs", ".", "exclude", "(", "pk", "=", "instance", ".", "pk", ")", "if", "qs", ".", "count", "(", ")", ">", "0", ":", "raise", "ValueError", "(", "\"Concept identifier conflict\"", ")", "instance", ".", "identifier", "=", "identifier" ]
Generate and set identifier of concept before saving object to DB Args: sender (class): should be Concept instance (Concept): saving concept
[ "Generate", "and", "set", "identifier", "of", "concept", "before", "saving", "object", "to", "DB" ]
8278c72e498d6ef8d392cc47b48473f4ec037142
https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_concepts/models.py#L369-L383
train
adaptive-learning/proso-apps
proso_concepts/models.py
ConceptManager.get_concept_item_mapping
def get_concept_item_mapping(self, concepts=None, lang=None): """ Get mapping of concepts to items belonging to concept. Args: concepts (list of Concept): Defaults to None meaning all concepts lang (str): language of concepts, if None use language of concepts Returns: dict: concept (int) -> list of item ids (int) """ if concepts is None: concepts = self.filter(active=True) if lang is not None: concepts = concepts.filter(lang=lang) if lang is None: languages = set([concept.lang for concept in concepts]) if len(languages) > 1: raise Exception('Concepts has multiple languages') lang = list(languages)[0] item_lists = Item.objects.filter_all_reachable_leaves_many([json.loads(concept.query) for concept in concepts], lang) return dict(zip([c.pk for c in concepts], item_lists))
python
def get_concept_item_mapping(self, concepts=None, lang=None): """ Get mapping of concepts to items belonging to concept. Args: concepts (list of Concept): Defaults to None meaning all concepts lang (str): language of concepts, if None use language of concepts Returns: dict: concept (int) -> list of item ids (int) """ if concepts is None: concepts = self.filter(active=True) if lang is not None: concepts = concepts.filter(lang=lang) if lang is None: languages = set([concept.lang for concept in concepts]) if len(languages) > 1: raise Exception('Concepts has multiple languages') lang = list(languages)[0] item_lists = Item.objects.filter_all_reachable_leaves_many([json.loads(concept.query) for concept in concepts], lang) return dict(zip([c.pk for c in concepts], item_lists))
[ "def", "get_concept_item_mapping", "(", "self", ",", "concepts", "=", "None", ",", "lang", "=", "None", ")", ":", "if", "concepts", "is", "None", ":", "concepts", "=", "self", ".", "filter", "(", "active", "=", "True", ")", "if", "lang", "is", "not", "None", ":", "concepts", "=", "concepts", ".", "filter", "(", "lang", "=", "lang", ")", "if", "lang", "is", "None", ":", "languages", "=", "set", "(", "[", "concept", ".", "lang", "for", "concept", "in", "concepts", "]", ")", "if", "len", "(", "languages", ")", ">", "1", ":", "raise", "Exception", "(", "'Concepts has multiple languages'", ")", "lang", "=", "list", "(", "languages", ")", "[", "0", "]", "item_lists", "=", "Item", ".", "objects", ".", "filter_all_reachable_leaves_many", "(", "[", "json", ".", "loads", "(", "concept", ".", "query", ")", "for", "concept", "in", "concepts", "]", ",", "lang", ")", "return", "dict", "(", "zip", "(", "[", "c", ".", "pk", "for", "c", "in", "concepts", "]", ",", "item_lists", ")", ")" ]
Get mapping of concepts to items belonging to concept. Args: concepts (list of Concept): Defaults to None meaning all concepts lang (str): language of concepts, if None use language of concepts Returns: dict: concept (int) -> list of item ids (int)
[ "Get", "mapping", "of", "concepts", "to", "items", "belonging", "to", "concept", "." ]
8278c72e498d6ef8d392cc47b48473f4ec037142
https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_concepts/models.py#L66-L88
train
adaptive-learning/proso-apps
proso_concepts/models.py
ConceptManager.get_item_concept_mapping
def get_item_concept_mapping(self, lang): """ Get mapping of items_ids to concepts containing these items Args: lang (str): language of concepts Returns: dict: item (int) -> set of concepts (int) """ concepts = self.filter(active=True, lang=lang) return group_keys_by_value_lists(Concept.objects.get_concept_item_mapping(concepts, lang))
python
def get_item_concept_mapping(self, lang): """ Get mapping of items_ids to concepts containing these items Args: lang (str): language of concepts Returns: dict: item (int) -> set of concepts (int) """ concepts = self.filter(active=True, lang=lang) return group_keys_by_value_lists(Concept.objects.get_concept_item_mapping(concepts, lang))
[ "def", "get_item_concept_mapping", "(", "self", ",", "lang", ")", ":", "concepts", "=", "self", ".", "filter", "(", "active", "=", "True", ",", "lang", "=", "lang", ")", "return", "group_keys_by_value_lists", "(", "Concept", ".", "objects", ".", "get_concept_item_mapping", "(", "concepts", ",", "lang", ")", ")" ]
Get mapping of items_ids to concepts containing these items Args: lang (str): language of concepts Returns: dict: item (int) -> set of concepts (int)
[ "Get", "mapping", "of", "items_ids", "to", "concepts", "containing", "these", "items" ]
8278c72e498d6ef8d392cc47b48473f4ec037142
https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_concepts/models.py#L91-L102
train
adaptive-learning/proso-apps
proso_concepts/models.py
ConceptManager.get_concepts_to_recalculate
def get_concepts_to_recalculate(self, users, lang, concepts=None): """ Get concept which have same changes and have to be recalculated Args: users (list of users or user): users whose user stats we are interesting in lang (str): language of used concepts concepts (Optional[list of concepts]): list of primary keys of concepts or concepts Defaults to None meaning all concepts. Returns: dict: user -> set of concepts (int) - in case of list of users list of stats (str) - in case of one user """ only_one_user = False if not isinstance(users, list): only_one_user = True users = [users] mapping = self.get_item_concept_mapping(lang) current_user_stats = defaultdict(lambda: {}) user_stats_qs = UserStat.objects.filter(user__in=users, stat="answer_count") # we need only one type if concepts is not None: user_stats_qs = user_stats_qs.filter(concept__in=concepts) for user_stat in user_stats_qs: current_user_stats[user_stat.user_id][user_stat.concept_id] = user_stat concepts_to_recalculate = defaultdict(lambda: set()) for user, item, time in Answer.objects.filter(user__in=users)\ .values_list("user_id", "item").annotate(Max("time")): if item not in mapping: # in reality this should by corner case, so it is efficient to not filter Answers continue # item is not in concept time_expiration_lower_bound = get_config('proso_models', 'knowledge_overview.time_shift_hours', default=4) time_expiration_factor = get_config('proso_models', 'knowledge_overview.time_expiration_factor', default=2) for concept in mapping[item]: if user in current_user_stats and concept in current_user_stats[user] \ and current_user_stats[user][concept].time > time: if not self.has_time_expired(current_user_stats[user][concept].time, time, time_expiration_lower_bound, time_expiration_factor): continue # cache is up to date if concepts is None or concept in ([c.pk for c in concepts] if type(concepts[0]) == Concept else Concept): concepts_to_recalculate[user].add(concept) if only_one_user: return concepts_to_recalculate[users[0]] return concepts_to_recalculate
python
def get_concepts_to_recalculate(self, users, lang, concepts=None): """ Get concept which have same changes and have to be recalculated Args: users (list of users or user): users whose user stats we are interesting in lang (str): language of used concepts concepts (Optional[list of concepts]): list of primary keys of concepts or concepts Defaults to None meaning all concepts. Returns: dict: user -> set of concepts (int) - in case of list of users list of stats (str) - in case of one user """ only_one_user = False if not isinstance(users, list): only_one_user = True users = [users] mapping = self.get_item_concept_mapping(lang) current_user_stats = defaultdict(lambda: {}) user_stats_qs = UserStat.objects.filter(user__in=users, stat="answer_count") # we need only one type if concepts is not None: user_stats_qs = user_stats_qs.filter(concept__in=concepts) for user_stat in user_stats_qs: current_user_stats[user_stat.user_id][user_stat.concept_id] = user_stat concepts_to_recalculate = defaultdict(lambda: set()) for user, item, time in Answer.objects.filter(user__in=users)\ .values_list("user_id", "item").annotate(Max("time")): if item not in mapping: # in reality this should by corner case, so it is efficient to not filter Answers continue # item is not in concept time_expiration_lower_bound = get_config('proso_models', 'knowledge_overview.time_shift_hours', default=4) time_expiration_factor = get_config('proso_models', 'knowledge_overview.time_expiration_factor', default=2) for concept in mapping[item]: if user in current_user_stats and concept in current_user_stats[user] \ and current_user_stats[user][concept].time > time: if not self.has_time_expired(current_user_stats[user][concept].time, time, time_expiration_lower_bound, time_expiration_factor): continue # cache is up to date if concepts is None or concept in ([c.pk for c in concepts] if type(concepts[0]) == Concept else Concept): concepts_to_recalculate[user].add(concept) if only_one_user: return concepts_to_recalculate[users[0]] return concepts_to_recalculate
[ "def", "get_concepts_to_recalculate", "(", "self", ",", "users", ",", "lang", ",", "concepts", "=", "None", ")", ":", "only_one_user", "=", "False", "if", "not", "isinstance", "(", "users", ",", "list", ")", ":", "only_one_user", "=", "True", "users", "=", "[", "users", "]", "mapping", "=", "self", ".", "get_item_concept_mapping", "(", "lang", ")", "current_user_stats", "=", "defaultdict", "(", "lambda", ":", "{", "}", ")", "user_stats_qs", "=", "UserStat", ".", "objects", ".", "filter", "(", "user__in", "=", "users", ",", "stat", "=", "\"answer_count\"", ")", "# we need only one type", "if", "concepts", "is", "not", "None", ":", "user_stats_qs", "=", "user_stats_qs", ".", "filter", "(", "concept__in", "=", "concepts", ")", "for", "user_stat", "in", "user_stats_qs", ":", "current_user_stats", "[", "user_stat", ".", "user_id", "]", "[", "user_stat", ".", "concept_id", "]", "=", "user_stat", "concepts_to_recalculate", "=", "defaultdict", "(", "lambda", ":", "set", "(", ")", ")", "for", "user", ",", "item", ",", "time", "in", "Answer", ".", "objects", ".", "filter", "(", "user__in", "=", "users", ")", ".", "values_list", "(", "\"user_id\"", ",", "\"item\"", ")", ".", "annotate", "(", "Max", "(", "\"time\"", ")", ")", ":", "if", "item", "not", "in", "mapping", ":", "# in reality this should by corner case, so it is efficient to not filter Answers", "continue", "# item is not in concept", "time_expiration_lower_bound", "=", "get_config", "(", "'proso_models'", ",", "'knowledge_overview.time_shift_hours'", ",", "default", "=", "4", ")", "time_expiration_factor", "=", "get_config", "(", "'proso_models'", ",", "'knowledge_overview.time_expiration_factor'", ",", "default", "=", "2", ")", "for", "concept", "in", "mapping", "[", "item", "]", ":", "if", "user", "in", "current_user_stats", "and", "concept", "in", "current_user_stats", "[", "user", "]", "and", "current_user_stats", "[", "user", "]", "[", "concept", "]", ".", "time", ">", "time", ":", "if", "not", "self", ".", "has_time_expired", "(", "current_user_stats", "[", "user", "]", "[", "concept", "]", ".", "time", ",", "time", ",", "time_expiration_lower_bound", ",", "time_expiration_factor", ")", ":", "continue", "# cache is up to date", "if", "concepts", "is", "None", "or", "concept", "in", "(", "[", "c", ".", "pk", "for", "c", "in", "concepts", "]", "if", "type", "(", "concepts", "[", "0", "]", ")", "==", "Concept", "else", "Concept", ")", ":", "concepts_to_recalculate", "[", "user", "]", ".", "add", "(", "concept", ")", "if", "only_one_user", ":", "return", "concepts_to_recalculate", "[", "users", "[", "0", "]", "]", "return", "concepts_to_recalculate" ]
Get concept which have same changes and have to be recalculated Args: users (list of users or user): users whose user stats we are interesting in lang (str): language of used concepts concepts (Optional[list of concepts]): list of primary keys of concepts or concepts Defaults to None meaning all concepts. Returns: dict: user -> set of concepts (int) - in case of list of users list of stats (str) - in case of one user
[ "Get", "concept", "which", "have", "same", "changes", "and", "have", "to", "be", "recalculated" ]
8278c72e498d6ef8d392cc47b48473f4ec037142
https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_concepts/models.py#L104-L150
train
adaptive-learning/proso-apps
proso_concepts/models.py
UserStatManager.recalculate_concepts
def recalculate_concepts(self, concepts, lang=None): """ Recalculated given concepts for given users Args: concepts (dict): user id (int -> set of concepts to recalculate) lang(Optional[str]): language used to get items in all concepts (cached). Defaults to None, in that case are get items only in used concepts """ if len(concepts) == 0: return if lang is None: items = Concept.objects.get_concept_item_mapping(concepts=Concept.objects.filter(pk__in=set(flatten(concepts.values())))) else: items = Concept.objects.get_concept_item_mapping(lang=lang) environment = get_environment() mastery_threshold = get_mastery_trashold() for user, concepts in concepts.items(): all_items = list(set(flatten([items[c] for c in concepts]))) answer_counts = environment.number_of_answers_more_items(all_items, user) correct_answer_counts = environment.number_of_correct_answers_more_items(all_items, user) predictions = dict(list(zip(all_items, get_predictive_model(). predict_more_items(environment, user, all_items, time=get_time_for_knowledge_overview())))) new_user_stats = [] stats_to_delete_condition = Q() for concept in concepts: answer_aggregates = Answer.objects.filter(user=user, item__in=items[concept]).aggregate( time_spent=Sum("response_time"), sessions=Count("session", True), time_first=Min("time"), time_last=Max("time"), ) stats = { "answer_count": sum(answer_counts[i] for i in items[concept]), "correct_answer_count": sum(correct_answer_counts[i] for i in items[concept]), "item_count": len(items[concept]), "practiced_items_count": sum([answer_counts[i] > 0 for i in items[concept]]), "mastered_items_count": sum([predictions[i] >= mastery_threshold for i in items[concept]]), "prediction": sum([predictions[i] for i in items[concept]]) / len(items[concept]), "time_spent": answer_aggregates["time_spent"] / 1000, "session_count": answer_aggregates["sessions"], "time_first": answer_aggregates["time_first"].timestamp(), "time_last": answer_aggregates["time_last"].timestamp(), } stats_to_delete_condition |= Q(user=user, concept=concept) for stat_name, value in stats.items(): new_user_stats.append(UserStat(user_id=user, concept_id=concept, stat=stat_name, value=value)) self.filter(stats_to_delete_condition).delete() self.bulk_create(new_user_stats)
python
def recalculate_concepts(self, concepts, lang=None): """ Recalculated given concepts for given users Args: concepts (dict): user id (int -> set of concepts to recalculate) lang(Optional[str]): language used to get items in all concepts (cached). Defaults to None, in that case are get items only in used concepts """ if len(concepts) == 0: return if lang is None: items = Concept.objects.get_concept_item_mapping(concepts=Concept.objects.filter(pk__in=set(flatten(concepts.values())))) else: items = Concept.objects.get_concept_item_mapping(lang=lang) environment = get_environment() mastery_threshold = get_mastery_trashold() for user, concepts in concepts.items(): all_items = list(set(flatten([items[c] for c in concepts]))) answer_counts = environment.number_of_answers_more_items(all_items, user) correct_answer_counts = environment.number_of_correct_answers_more_items(all_items, user) predictions = dict(list(zip(all_items, get_predictive_model(). predict_more_items(environment, user, all_items, time=get_time_for_knowledge_overview())))) new_user_stats = [] stats_to_delete_condition = Q() for concept in concepts: answer_aggregates = Answer.objects.filter(user=user, item__in=items[concept]).aggregate( time_spent=Sum("response_time"), sessions=Count("session", True), time_first=Min("time"), time_last=Max("time"), ) stats = { "answer_count": sum(answer_counts[i] for i in items[concept]), "correct_answer_count": sum(correct_answer_counts[i] for i in items[concept]), "item_count": len(items[concept]), "practiced_items_count": sum([answer_counts[i] > 0 for i in items[concept]]), "mastered_items_count": sum([predictions[i] >= mastery_threshold for i in items[concept]]), "prediction": sum([predictions[i] for i in items[concept]]) / len(items[concept]), "time_spent": answer_aggregates["time_spent"] / 1000, "session_count": answer_aggregates["sessions"], "time_first": answer_aggregates["time_first"].timestamp(), "time_last": answer_aggregates["time_last"].timestamp(), } stats_to_delete_condition |= Q(user=user, concept=concept) for stat_name, value in stats.items(): new_user_stats.append(UserStat(user_id=user, concept_id=concept, stat=stat_name, value=value)) self.filter(stats_to_delete_condition).delete() self.bulk_create(new_user_stats)
[ "def", "recalculate_concepts", "(", "self", ",", "concepts", ",", "lang", "=", "None", ")", ":", "if", "len", "(", "concepts", ")", "==", "0", ":", "return", "if", "lang", "is", "None", ":", "items", "=", "Concept", ".", "objects", ".", "get_concept_item_mapping", "(", "concepts", "=", "Concept", ".", "objects", ".", "filter", "(", "pk__in", "=", "set", "(", "flatten", "(", "concepts", ".", "values", "(", ")", ")", ")", ")", ")", "else", ":", "items", "=", "Concept", ".", "objects", ".", "get_concept_item_mapping", "(", "lang", "=", "lang", ")", "environment", "=", "get_environment", "(", ")", "mastery_threshold", "=", "get_mastery_trashold", "(", ")", "for", "user", ",", "concepts", "in", "concepts", ".", "items", "(", ")", ":", "all_items", "=", "list", "(", "set", "(", "flatten", "(", "[", "items", "[", "c", "]", "for", "c", "in", "concepts", "]", ")", ")", ")", "answer_counts", "=", "environment", ".", "number_of_answers_more_items", "(", "all_items", ",", "user", ")", "correct_answer_counts", "=", "environment", ".", "number_of_correct_answers_more_items", "(", "all_items", ",", "user", ")", "predictions", "=", "dict", "(", "list", "(", "zip", "(", "all_items", ",", "get_predictive_model", "(", ")", ".", "predict_more_items", "(", "environment", ",", "user", ",", "all_items", ",", "time", "=", "get_time_for_knowledge_overview", "(", ")", ")", ")", ")", ")", "new_user_stats", "=", "[", "]", "stats_to_delete_condition", "=", "Q", "(", ")", "for", "concept", "in", "concepts", ":", "answer_aggregates", "=", "Answer", ".", "objects", ".", "filter", "(", "user", "=", "user", ",", "item__in", "=", "items", "[", "concept", "]", ")", ".", "aggregate", "(", "time_spent", "=", "Sum", "(", "\"response_time\"", ")", ",", "sessions", "=", "Count", "(", "\"session\"", ",", "True", ")", ",", "time_first", "=", "Min", "(", "\"time\"", ")", ",", "time_last", "=", "Max", "(", "\"time\"", ")", ",", ")", "stats", "=", "{", "\"answer_count\"", ":", "sum", "(", "answer_counts", "[", "i", "]", "for", "i", "in", "items", "[", "concept", "]", ")", ",", "\"correct_answer_count\"", ":", "sum", "(", "correct_answer_counts", "[", "i", "]", "for", "i", "in", "items", "[", "concept", "]", ")", ",", "\"item_count\"", ":", "len", "(", "items", "[", "concept", "]", ")", ",", "\"practiced_items_count\"", ":", "sum", "(", "[", "answer_counts", "[", "i", "]", ">", "0", "for", "i", "in", "items", "[", "concept", "]", "]", ")", ",", "\"mastered_items_count\"", ":", "sum", "(", "[", "predictions", "[", "i", "]", ">=", "mastery_threshold", "for", "i", "in", "items", "[", "concept", "]", "]", ")", ",", "\"prediction\"", ":", "sum", "(", "[", "predictions", "[", "i", "]", "for", "i", "in", "items", "[", "concept", "]", "]", ")", "/", "len", "(", "items", "[", "concept", "]", ")", ",", "\"time_spent\"", ":", "answer_aggregates", "[", "\"time_spent\"", "]", "/", "1000", ",", "\"session_count\"", ":", "answer_aggregates", "[", "\"sessions\"", "]", ",", "\"time_first\"", ":", "answer_aggregates", "[", "\"time_first\"", "]", ".", "timestamp", "(", ")", ",", "\"time_last\"", ":", "answer_aggregates", "[", "\"time_last\"", "]", ".", "timestamp", "(", ")", ",", "}", "stats_to_delete_condition", "|=", "Q", "(", "user", "=", "user", ",", "concept", "=", "concept", ")", "for", "stat_name", ",", "value", "in", "stats", ".", "items", "(", ")", ":", "new_user_stats", ".", "append", "(", "UserStat", "(", "user_id", "=", "user", ",", "concept_id", "=", "concept", ",", "stat", "=", "stat_name", ",", "value", "=", "value", ")", ")", "self", ".", "filter", "(", "stats_to_delete_condition", ")", ".", "delete", "(", ")", "self", ".", "bulk_create", "(", "new_user_stats", ")" ]
Recalculated given concepts for given users Args: concepts (dict): user id (int -> set of concepts to recalculate) lang(Optional[str]): language used to get items in all concepts (cached). Defaults to None, in that case are get items only in used concepts
[ "Recalculated", "given", "concepts", "for", "given", "users" ]
8278c72e498d6ef8d392cc47b48473f4ec037142
https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_concepts/models.py#L250-L300
train
adaptive-learning/proso-apps
proso_concepts/models.py
UserStatManager.get_user_stats
def get_user_stats(self, users, lang=None, concepts=None, since=None, recalculate=True): """ Finds all UserStats of given concepts and users. Recompute UserStats if necessary Args: users (Optional[list of users] or [user]): list of primary keys of user or users Defaults to None meaning all users. lang (string): use only concepts witch the lang. Defaults to None meaning all languages. concepts (Optional[list of concepts]): list of primary keys of concepts or concepts Defaults to None meaning all concepts. Returns: dict: user_id -> dict (concept_identifier - > (stat_name -> value)) -- for more users dict: concept_identifier - > (stat_name -> value) -- for one user """ only_one_user = False if not isinstance(users, list): users = [users] only_one_user = True if recalculate: if lang is None: raise ValueError('Recalculation without lang is not supported.') time_start = time_lib() concepts_to_recalculate = Concept.objects.get_concepts_to_recalculate(users, lang, concepts) LOGGER.debug("user_stats - getting identifying concepts to recalculate: %ss", (time_lib() - time_start)) time_start = time_lib() self.recalculate_concepts(concepts_to_recalculate, lang) LOGGER.debug("user_stats - recalculating concepts: %ss", (time_lib() - time_start)) qs = self.prepare_related().filter(user__in=users, concept__active=True) if concepts is not None: qs = qs.filter(concept__in=concepts) if lang is not None: qs = qs.filter(concept__lang=lang) if since is not None: qs = qs.filter(time__gte=since) data = defaultdict(lambda: defaultdict(lambda: {})) for user_stat in qs: data[user_stat.user_id][user_stat.concept.identifier][user_stat.stat] = user_stat.value if only_one_user: return data[users[0].pk if type(users[0]) == User else users[0]] return data
python
def get_user_stats(self, users, lang=None, concepts=None, since=None, recalculate=True): """ Finds all UserStats of given concepts and users. Recompute UserStats if necessary Args: users (Optional[list of users] or [user]): list of primary keys of user or users Defaults to None meaning all users. lang (string): use only concepts witch the lang. Defaults to None meaning all languages. concepts (Optional[list of concepts]): list of primary keys of concepts or concepts Defaults to None meaning all concepts. Returns: dict: user_id -> dict (concept_identifier - > (stat_name -> value)) -- for more users dict: concept_identifier - > (stat_name -> value) -- for one user """ only_one_user = False if not isinstance(users, list): users = [users] only_one_user = True if recalculate: if lang is None: raise ValueError('Recalculation without lang is not supported.') time_start = time_lib() concepts_to_recalculate = Concept.objects.get_concepts_to_recalculate(users, lang, concepts) LOGGER.debug("user_stats - getting identifying concepts to recalculate: %ss", (time_lib() - time_start)) time_start = time_lib() self.recalculate_concepts(concepts_to_recalculate, lang) LOGGER.debug("user_stats - recalculating concepts: %ss", (time_lib() - time_start)) qs = self.prepare_related().filter(user__in=users, concept__active=True) if concepts is not None: qs = qs.filter(concept__in=concepts) if lang is not None: qs = qs.filter(concept__lang=lang) if since is not None: qs = qs.filter(time__gte=since) data = defaultdict(lambda: defaultdict(lambda: {})) for user_stat in qs: data[user_stat.user_id][user_stat.concept.identifier][user_stat.stat] = user_stat.value if only_one_user: return data[users[0].pk if type(users[0]) == User else users[0]] return data
[ "def", "get_user_stats", "(", "self", ",", "users", ",", "lang", "=", "None", ",", "concepts", "=", "None", ",", "since", "=", "None", ",", "recalculate", "=", "True", ")", ":", "only_one_user", "=", "False", "if", "not", "isinstance", "(", "users", ",", "list", ")", ":", "users", "=", "[", "users", "]", "only_one_user", "=", "True", "if", "recalculate", ":", "if", "lang", "is", "None", ":", "raise", "ValueError", "(", "'Recalculation without lang is not supported.'", ")", "time_start", "=", "time_lib", "(", ")", "concepts_to_recalculate", "=", "Concept", ".", "objects", ".", "get_concepts_to_recalculate", "(", "users", ",", "lang", ",", "concepts", ")", "LOGGER", ".", "debug", "(", "\"user_stats - getting identifying concepts to recalculate: %ss\"", ",", "(", "time_lib", "(", ")", "-", "time_start", ")", ")", "time_start", "=", "time_lib", "(", ")", "self", ".", "recalculate_concepts", "(", "concepts_to_recalculate", ",", "lang", ")", "LOGGER", ".", "debug", "(", "\"user_stats - recalculating concepts: %ss\"", ",", "(", "time_lib", "(", ")", "-", "time_start", ")", ")", "qs", "=", "self", ".", "prepare_related", "(", ")", ".", "filter", "(", "user__in", "=", "users", ",", "concept__active", "=", "True", ")", "if", "concepts", "is", "not", "None", ":", "qs", "=", "qs", ".", "filter", "(", "concept__in", "=", "concepts", ")", "if", "lang", "is", "not", "None", ":", "qs", "=", "qs", ".", "filter", "(", "concept__lang", "=", "lang", ")", "if", "since", "is", "not", "None", ":", "qs", "=", "qs", ".", "filter", "(", "time__gte", "=", "since", ")", "data", "=", "defaultdict", "(", "lambda", ":", "defaultdict", "(", "lambda", ":", "{", "}", ")", ")", "for", "user_stat", "in", "qs", ":", "data", "[", "user_stat", ".", "user_id", "]", "[", "user_stat", ".", "concept", ".", "identifier", "]", "[", "user_stat", ".", "stat", "]", "=", "user_stat", ".", "value", "if", "only_one_user", ":", "return", "data", "[", "users", "[", "0", "]", ".", "pk", "if", "type", "(", "users", "[", "0", "]", ")", "==", "User", "else", "users", "[", "0", "]", "]", "return", "data" ]
Finds all UserStats of given concepts and users. Recompute UserStats if necessary Args: users (Optional[list of users] or [user]): list of primary keys of user or users Defaults to None meaning all users. lang (string): use only concepts witch the lang. Defaults to None meaning all languages. concepts (Optional[list of concepts]): list of primary keys of concepts or concepts Defaults to None meaning all concepts. Returns: dict: user_id -> dict (concept_identifier - > (stat_name -> value)) -- for more users dict: concept_identifier - > (stat_name -> value) -- for one user
[ "Finds", "all", "UserStats", "of", "given", "concepts", "and", "users", ".", "Recompute", "UserStats", "if", "necessary" ]
8278c72e498d6ef8d392cc47b48473f4ec037142
https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_concepts/models.py#L302-L346
train
Kortemme-Lab/klab
klab/deprecated/rosettadb.py
DatabaseInterface.locked_execute
def locked_execute(self, sql, parameters = None, cursorClass = DictCursor, quiet = False): '''We are lock-happy here but SQL performance is not currently an issue daemon-side.''' return self.execute(sql, parameters, cursorClass, quiet = quiet, locked = True)
python
def locked_execute(self, sql, parameters = None, cursorClass = DictCursor, quiet = False): '''We are lock-happy here but SQL performance is not currently an issue daemon-side.''' return self.execute(sql, parameters, cursorClass, quiet = quiet, locked = True)
[ "def", "locked_execute", "(", "self", ",", "sql", ",", "parameters", "=", "None", ",", "cursorClass", "=", "DictCursor", ",", "quiet", "=", "False", ")", ":", "return", "self", ".", "execute", "(", "sql", ",", "parameters", ",", "cursorClass", ",", "quiet", "=", "quiet", ",", "locked", "=", "True", ")" ]
We are lock-happy here but SQL performance is not currently an issue daemon-side.
[ "We", "are", "lock", "-", "happy", "here", "but", "SQL", "performance", "is", "not", "currently", "an", "issue", "daemon", "-", "side", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/deprecated/rosettadb.py#L109-L111
train
Kortemme-Lab/klab
klab/deprecated/rosettadb.py
DatabaseInterface.execute
def execute(self, sql, parameters = None, cursorClass = DictCursor, quiet = False, locked = False, do_commit = True): """Execute SQL query. This uses DictCursor by default.""" i = 0 errcode = 0 caughte = None cursor = None if sql.find(";") != -1 or sql.find("\\G") != -1: # Catches some injections raise Exception("The SQL command '%s' contains a semi-colon or \\G. This is a potential SQL injection." % sql) while i < self.numTries: i += 1 try: assert(not(self.connection) or not(self.connection.open)) self._get_connection(cursorClass) cursor = self.connection.cursor() if locked: cursor.execute(self.lockstring) self.locked = True if parameters: errcode = cursor.execute(sql, parameters) else: errcode = cursor.execute(sql) self.lastrowid = int(cursor.lastrowid) if do_commit and self.isInnoDB: self.connection.commit() results = cursor.fetchall() if locked: cursor.execute(self.unlockstring) self.locked = False cursor.close() self._close_connection() return results except MySQLdb.OperationalError, e: if cursor: if self.locked: cursor.execute(self.unlockstring) self.locked = False cursor.close() self._close_connection() caughte = str(e) errcode = e[0] continue except Exception, e: if cursor: if self.locked: cursor.execute(self.unlockstring) self.locked = False cursor.close() self._close_connection() caughte = str(e) traceback.print_exc() break sleep(0.2) if not quiet: sys.stderr.write("\nSQL execution error in query %s at %s:" % (sql, datetime.now().strftime("%Y-%m-%d %H:%M:%S"))) sys.stderr.write("\nErrorcode/Error: %d - '%s'.\n" % (errcode, str(caughte))) sys.stderr.flush() raise MySQLdb.OperationalError(caughte)
python
def execute(self, sql, parameters = None, cursorClass = DictCursor, quiet = False, locked = False, do_commit = True): """Execute SQL query. This uses DictCursor by default.""" i = 0 errcode = 0 caughte = None cursor = None if sql.find(";") != -1 or sql.find("\\G") != -1: # Catches some injections raise Exception("The SQL command '%s' contains a semi-colon or \\G. This is a potential SQL injection." % sql) while i < self.numTries: i += 1 try: assert(not(self.connection) or not(self.connection.open)) self._get_connection(cursorClass) cursor = self.connection.cursor() if locked: cursor.execute(self.lockstring) self.locked = True if parameters: errcode = cursor.execute(sql, parameters) else: errcode = cursor.execute(sql) self.lastrowid = int(cursor.lastrowid) if do_commit and self.isInnoDB: self.connection.commit() results = cursor.fetchall() if locked: cursor.execute(self.unlockstring) self.locked = False cursor.close() self._close_connection() return results except MySQLdb.OperationalError, e: if cursor: if self.locked: cursor.execute(self.unlockstring) self.locked = False cursor.close() self._close_connection() caughte = str(e) errcode = e[0] continue except Exception, e: if cursor: if self.locked: cursor.execute(self.unlockstring) self.locked = False cursor.close() self._close_connection() caughte = str(e) traceback.print_exc() break sleep(0.2) if not quiet: sys.stderr.write("\nSQL execution error in query %s at %s:" % (sql, datetime.now().strftime("%Y-%m-%d %H:%M:%S"))) sys.stderr.write("\nErrorcode/Error: %d - '%s'.\n" % (errcode, str(caughte))) sys.stderr.flush() raise MySQLdb.OperationalError(caughte)
[ "def", "execute", "(", "self", ",", "sql", ",", "parameters", "=", "None", ",", "cursorClass", "=", "DictCursor", ",", "quiet", "=", "False", ",", "locked", "=", "False", ",", "do_commit", "=", "True", ")", ":", "i", "=", "0", "errcode", "=", "0", "caughte", "=", "None", "cursor", "=", "None", "if", "sql", ".", "find", "(", "\";\"", ")", "!=", "-", "1", "or", "sql", ".", "find", "(", "\"\\\\G\"", ")", "!=", "-", "1", ":", "# Catches some injections", "raise", "Exception", "(", "\"The SQL command '%s' contains a semi-colon or \\\\G. This is a potential SQL injection.\"", "%", "sql", ")", "while", "i", "<", "self", ".", "numTries", ":", "i", "+=", "1", "try", ":", "assert", "(", "not", "(", "self", ".", "connection", ")", "or", "not", "(", "self", ".", "connection", ".", "open", ")", ")", "self", ".", "_get_connection", "(", "cursorClass", ")", "cursor", "=", "self", ".", "connection", ".", "cursor", "(", ")", "if", "locked", ":", "cursor", ".", "execute", "(", "self", ".", "lockstring", ")", "self", ".", "locked", "=", "True", "if", "parameters", ":", "errcode", "=", "cursor", ".", "execute", "(", "sql", ",", "parameters", ")", "else", ":", "errcode", "=", "cursor", ".", "execute", "(", "sql", ")", "self", ".", "lastrowid", "=", "int", "(", "cursor", ".", "lastrowid", ")", "if", "do_commit", "and", "self", ".", "isInnoDB", ":", "self", ".", "connection", ".", "commit", "(", ")", "results", "=", "cursor", ".", "fetchall", "(", ")", "if", "locked", ":", "cursor", ".", "execute", "(", "self", ".", "unlockstring", ")", "self", ".", "locked", "=", "False", "cursor", ".", "close", "(", ")", "self", ".", "_close_connection", "(", ")", "return", "results", "except", "MySQLdb", ".", "OperationalError", ",", "e", ":", "if", "cursor", ":", "if", "self", ".", "locked", ":", "cursor", ".", "execute", "(", "self", ".", "unlockstring", ")", "self", ".", "locked", "=", "False", "cursor", ".", "close", "(", ")", "self", ".", "_close_connection", "(", ")", "caughte", "=", "str", "(", "e", ")", "errcode", "=", "e", "[", "0", "]", "continue", "except", "Exception", ",", "e", ":", "if", "cursor", ":", "if", "self", ".", "locked", ":", "cursor", ".", "execute", "(", "self", ".", "unlockstring", ")", "self", ".", "locked", "=", "False", "cursor", ".", "close", "(", ")", "self", ".", "_close_connection", "(", ")", "caughte", "=", "str", "(", "e", ")", "traceback", ".", "print_exc", "(", ")", "break", "sleep", "(", "0.2", ")", "if", "not", "quiet", ":", "sys", ".", "stderr", ".", "write", "(", "\"\\nSQL execution error in query %s at %s:\"", "%", "(", "sql", ",", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "\"%Y-%m-%d %H:%M:%S\"", ")", ")", ")", "sys", ".", "stderr", ".", "write", "(", "\"\\nErrorcode/Error: %d - '%s'.\\n\"", "%", "(", "errcode", ",", "str", "(", "caughte", ")", ")", ")", "sys", ".", "stderr", ".", "flush", "(", ")", "raise", "MySQLdb", ".", "OperationalError", "(", "caughte", ")" ]
Execute SQL query. This uses DictCursor by default.
[ "Execute", "SQL", "query", ".", "This", "uses", "DictCursor", "by", "default", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/deprecated/rosettadb.py#L116-L174
train
Kortemme-Lab/klab
klab/deprecated/rosettadb.py
DatabaseInterface.insertDict
def insertDict(self, tblname, d, fields = None): '''Simple function for inserting a dictionary whose keys match the fieldnames of tblname.''' if fields == None: fields = sorted(d.keys()) values = None try: SQL = 'INSERT INTO %s (%s) VALUES (%s)' % (tblname, join(fields, ", "), join(['%s' for x in range(len(fields))], ',')) values = tuple([d[k] for k in fields]) self.locked_execute(SQL, parameters = values) except Exception, e: if SQL and values: sys.stderr.write("\nSQL execution error in query '%s' %% %s at %s:" % (SQL, values, datetime.now().strftime("%Y-%m-%d %H:%M:%S"))) sys.stderr.write("\nError: '%s'.\n" % (str(e))) sys.stderr.flush() raise Exception("Error occurred during database insertion: '%s'." % str(e))
python
def insertDict(self, tblname, d, fields = None): '''Simple function for inserting a dictionary whose keys match the fieldnames of tblname.''' if fields == None: fields = sorted(d.keys()) values = None try: SQL = 'INSERT INTO %s (%s) VALUES (%s)' % (tblname, join(fields, ", "), join(['%s' for x in range(len(fields))], ',')) values = tuple([d[k] for k in fields]) self.locked_execute(SQL, parameters = values) except Exception, e: if SQL and values: sys.stderr.write("\nSQL execution error in query '%s' %% %s at %s:" % (SQL, values, datetime.now().strftime("%Y-%m-%d %H:%M:%S"))) sys.stderr.write("\nError: '%s'.\n" % (str(e))) sys.stderr.flush() raise Exception("Error occurred during database insertion: '%s'." % str(e))
[ "def", "insertDict", "(", "self", ",", "tblname", ",", "d", ",", "fields", "=", "None", ")", ":", "if", "fields", "==", "None", ":", "fields", "=", "sorted", "(", "d", ".", "keys", "(", ")", ")", "values", "=", "None", "try", ":", "SQL", "=", "'INSERT INTO %s (%s) VALUES (%s)'", "%", "(", "tblname", ",", "join", "(", "fields", ",", "\", \"", ")", ",", "join", "(", "[", "'%s'", "for", "x", "in", "range", "(", "len", "(", "fields", ")", ")", "]", ",", "','", ")", ")", "values", "=", "tuple", "(", "[", "d", "[", "k", "]", "for", "k", "in", "fields", "]", ")", "self", ".", "locked_execute", "(", "SQL", ",", "parameters", "=", "values", ")", "except", "Exception", ",", "e", ":", "if", "SQL", "and", "values", ":", "sys", ".", "stderr", ".", "write", "(", "\"\\nSQL execution error in query '%s' %% %s at %s:\"", "%", "(", "SQL", ",", "values", ",", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "\"%Y-%m-%d %H:%M:%S\"", ")", ")", ")", "sys", ".", "stderr", ".", "write", "(", "\"\\nError: '%s'.\\n\"", "%", "(", "str", "(", "e", ")", ")", ")", "sys", ".", "stderr", ".", "flush", "(", ")", "raise", "Exception", "(", "\"Error occurred during database insertion: '%s'.\"", "%", "str", "(", "e", ")", ")" ]
Simple function for inserting a dictionary whose keys match the fieldnames of tblname.
[ "Simple", "function", "for", "inserting", "a", "dictionary", "whose", "keys", "match", "the", "fieldnames", "of", "tblname", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/deprecated/rosettadb.py#L176-L191
train
Kortemme-Lab/klab
klab/deprecated/rosettadb.py
RosettaDB.callproc
def callproc(self, procname, parameters = (), cursorClass = DictCursor, quiet = False): """Calls a MySQL stored procedure procname. This uses DictCursor by default.""" i = 0 errcode = 0 caughte = None while i < self.numTries: i += 1 try: cursor = self.connection.cursor(cursorClass) if type(parameters) != type(()): parameters = (parameters,) errcode = cursor.callproc(procname, parameters) results = cursor.fetchall() self.lastrowid = int(cursor.lastrowid) cursor.close() return results except MySQLdb.OperationalError, e: errcode = e[0] self.connection.ping() caughte = e continue except: traceback.print_exc() break if not quiet: sys.stderr.write("\nSQL execution error call stored procedure %s at %s:" % (procname, datetime.now().strftime("%Y-%m-%d %H:%M:%S"))) sys.stderr.write("\nErrorcode/Error: %d - '%s'.\n" % (errcode, str(caughte))) sys.stderr.flush() raise MySQLdb.OperationalError(caughte)
python
def callproc(self, procname, parameters = (), cursorClass = DictCursor, quiet = False): """Calls a MySQL stored procedure procname. This uses DictCursor by default.""" i = 0 errcode = 0 caughte = None while i < self.numTries: i += 1 try: cursor = self.connection.cursor(cursorClass) if type(parameters) != type(()): parameters = (parameters,) errcode = cursor.callproc(procname, parameters) results = cursor.fetchall() self.lastrowid = int(cursor.lastrowid) cursor.close() return results except MySQLdb.OperationalError, e: errcode = e[0] self.connection.ping() caughte = e continue except: traceback.print_exc() break if not quiet: sys.stderr.write("\nSQL execution error call stored procedure %s at %s:" % (procname, datetime.now().strftime("%Y-%m-%d %H:%M:%S"))) sys.stderr.write("\nErrorcode/Error: %d - '%s'.\n" % (errcode, str(caughte))) sys.stderr.flush() raise MySQLdb.OperationalError(caughte)
[ "def", "callproc", "(", "self", ",", "procname", ",", "parameters", "=", "(", ")", ",", "cursorClass", "=", "DictCursor", ",", "quiet", "=", "False", ")", ":", "i", "=", "0", "errcode", "=", "0", "caughte", "=", "None", "while", "i", "<", "self", ".", "numTries", ":", "i", "+=", "1", "try", ":", "cursor", "=", "self", ".", "connection", ".", "cursor", "(", "cursorClass", ")", "if", "type", "(", "parameters", ")", "!=", "type", "(", "(", ")", ")", ":", "parameters", "=", "(", "parameters", ",", ")", "errcode", "=", "cursor", ".", "callproc", "(", "procname", ",", "parameters", ")", "results", "=", "cursor", ".", "fetchall", "(", ")", "self", ".", "lastrowid", "=", "int", "(", "cursor", ".", "lastrowid", ")", "cursor", ".", "close", "(", ")", "return", "results", "except", "MySQLdb", ".", "OperationalError", ",", "e", ":", "errcode", "=", "e", "[", "0", "]", "self", ".", "connection", ".", "ping", "(", ")", "caughte", "=", "e", "continue", "except", ":", "traceback", ".", "print_exc", "(", ")", "break", "if", "not", "quiet", ":", "sys", ".", "stderr", ".", "write", "(", "\"\\nSQL execution error call stored procedure %s at %s:\"", "%", "(", "procname", ",", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "\"%Y-%m-%d %H:%M:%S\"", ")", ")", ")", "sys", ".", "stderr", ".", "write", "(", "\"\\nErrorcode/Error: %d - '%s'.\\n\"", "%", "(", "errcode", ",", "str", "(", "caughte", ")", ")", ")", "sys", ".", "stderr", ".", "flush", "(", ")", "raise", "MySQLdb", ".", "OperationalError", "(", "caughte", ")" ]
Calls a MySQL stored procedure procname. This uses DictCursor by default.
[ "Calls", "a", "MySQL", "stored", "procedure", "procname", ".", "This", "uses", "DictCursor", "by", "default", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/deprecated/rosettadb.py#L625-L654
train
Kortemme-Lab/klab
klab/deprecated/rosettadb.py
RosettaDB.execQuery
def execQuery(self, sql, parameters = None, cursorClass = MySQLdb.cursors.Cursor, InnoDB = False): """Execute SQL query.""" i = 0 errcode = 0 caughte = None while i < self.numTries: i += 1 try: cursor = self.connection.cursor(cursorClass) if parameters: errcode = cursor.execute(sql, parameters) else: errcode = cursor.execute(sql) if InnoDB: self.connection.commit() results = cursor.fetchall() self.lastrowid = int(cursor.lastrowid) cursor.close() return results except MySQLdb.OperationalError, e: errcode = e[0] # errcodes of 2006 or 2013 usually indicate a dropped connection # errcode 1100 is an error with table locking print(e) self.connection.ping(True) caughte = e continue except: traceback.print_exc() break sys.stderr.write("\nSQL execution error in query at %s:" % datetime.now().strftime("%Y-%m-%d %H:%M:%S")) sys.stderr.write("\n %s." % sql) sys.stderr.flush() sys.stderr.write("\nErrorcode: '%s'.\n" % (str(caughte))) sys.stderr.flush() raise MySQLdb.OperationalError(caughte)
python
def execQuery(self, sql, parameters = None, cursorClass = MySQLdb.cursors.Cursor, InnoDB = False): """Execute SQL query.""" i = 0 errcode = 0 caughte = None while i < self.numTries: i += 1 try: cursor = self.connection.cursor(cursorClass) if parameters: errcode = cursor.execute(sql, parameters) else: errcode = cursor.execute(sql) if InnoDB: self.connection.commit() results = cursor.fetchall() self.lastrowid = int(cursor.lastrowid) cursor.close() return results except MySQLdb.OperationalError, e: errcode = e[0] # errcodes of 2006 or 2013 usually indicate a dropped connection # errcode 1100 is an error with table locking print(e) self.connection.ping(True) caughte = e continue except: traceback.print_exc() break sys.stderr.write("\nSQL execution error in query at %s:" % datetime.now().strftime("%Y-%m-%d %H:%M:%S")) sys.stderr.write("\n %s." % sql) sys.stderr.flush() sys.stderr.write("\nErrorcode: '%s'.\n" % (str(caughte))) sys.stderr.flush() raise MySQLdb.OperationalError(caughte)
[ "def", "execQuery", "(", "self", ",", "sql", ",", "parameters", "=", "None", ",", "cursorClass", "=", "MySQLdb", ".", "cursors", ".", "Cursor", ",", "InnoDB", "=", "False", ")", ":", "i", "=", "0", "errcode", "=", "0", "caughte", "=", "None", "while", "i", "<", "self", ".", "numTries", ":", "i", "+=", "1", "try", ":", "cursor", "=", "self", ".", "connection", ".", "cursor", "(", "cursorClass", ")", "if", "parameters", ":", "errcode", "=", "cursor", ".", "execute", "(", "sql", ",", "parameters", ")", "else", ":", "errcode", "=", "cursor", ".", "execute", "(", "sql", ")", "if", "InnoDB", ":", "self", ".", "connection", ".", "commit", "(", ")", "results", "=", "cursor", ".", "fetchall", "(", ")", "self", ".", "lastrowid", "=", "int", "(", "cursor", ".", "lastrowid", ")", "cursor", ".", "close", "(", ")", "return", "results", "except", "MySQLdb", ".", "OperationalError", ",", "e", ":", "errcode", "=", "e", "[", "0", "]", "# errcodes of 2006 or 2013 usually indicate a dropped connection ", "# errcode 1100 is an error with table locking", "print", "(", "e", ")", "self", ".", "connection", ".", "ping", "(", "True", ")", "caughte", "=", "e", "continue", "except", ":", "traceback", ".", "print_exc", "(", ")", "break", "sys", ".", "stderr", ".", "write", "(", "\"\\nSQL execution error in query at %s:\"", "%", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "\"%Y-%m-%d %H:%M:%S\"", ")", ")", "sys", ".", "stderr", ".", "write", "(", "\"\\n %s.\"", "%", "sql", ")", "sys", ".", "stderr", ".", "flush", "(", ")", "sys", ".", "stderr", ".", "write", "(", "\"\\nErrorcode: '%s'.\\n\"", "%", "(", "str", "(", "caughte", ")", ")", ")", "sys", ".", "stderr", ".", "flush", "(", ")", "raise", "MySQLdb", ".", "OperationalError", "(", "caughte", ")" ]
Execute SQL query.
[ "Execute", "SQL", "query", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/deprecated/rosettadb.py#L660-L696
train
Kortemme-Lab/klab
klab/deprecated/rosettadb.py
RosettaDB._getFieldsInDB
def _getFieldsInDB(self, tablename): """get all the fields from a specific table""" SQL = 'SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.Columns where TABLE_NAME="%s"' % tablename array_data = self.execQuery(SQL) return [x[0] for x in array_data]
python
def _getFieldsInDB(self, tablename): """get all the fields from a specific table""" SQL = 'SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.Columns where TABLE_NAME="%s"' % tablename array_data = self.execQuery(SQL) return [x[0] for x in array_data]
[ "def", "_getFieldsInDB", "(", "self", ",", "tablename", ")", ":", "SQL", "=", "'SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.Columns where TABLE_NAME=\"%s\"'", "%", "tablename", "array_data", "=", "self", ".", "execQuery", "(", "SQL", ")", "return", "[", "x", "[", "0", "]", "for", "x", "in", "array_data", "]" ]
get all the fields from a specific table
[ "get", "all", "the", "fields", "from", "a", "specific", "table" ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/deprecated/rosettadb.py#L699-L705
train
ABI-Software/MeshParser
src/meshparser/stlparser/parser.py
_is_ascii_stl
def _is_ascii_stl(first_bytes): """ Determine if this is an ASCII based data stream, simply by checking the bytes for the word 'solid'. """ is_ascii = False if 'solid' in first_bytes.decode("utf-8").lower(): is_ascii = True return is_ascii
python
def _is_ascii_stl(first_bytes): """ Determine if this is an ASCII based data stream, simply by checking the bytes for the word 'solid'. """ is_ascii = False if 'solid' in first_bytes.decode("utf-8").lower(): is_ascii = True return is_ascii
[ "def", "_is_ascii_stl", "(", "first_bytes", ")", ":", "is_ascii", "=", "False", "if", "'solid'", "in", "first_bytes", ".", "decode", "(", "\"utf-8\"", ")", ".", "lower", "(", ")", ":", "is_ascii", "=", "True", "return", "is_ascii" ]
Determine if this is an ASCII based data stream, simply by checking the bytes for the word 'solid'.
[ "Determine", "if", "this", "is", "an", "ASCII", "based", "data", "stream", "simply", "by", "checking", "the", "bytes", "for", "the", "word", "solid", "." ]
08dc0ce7c44d0149b443261ff6d3708e28a928e7
https://github.com/ABI-Software/MeshParser/blob/08dc0ce7c44d0149b443261ff6d3708e28a928e7/src/meshparser/stlparser/parser.py#L108-L116
train
ABI-Software/MeshParser
src/meshparser/stlparser/parser.py
_is_binary_stl
def _is_binary_stl(data): """ Determine if this is a binary file through unpacking the first value after the 80th character and testing whether this value is greater than zero. This indicates the number of facets in the file. Could possibly extend this to check that the remaining number of bytes is divisible by 50. """ is_bin = False start_byte = 0 end_byte = 80 _ = data[start_byte:end_byte] # header data start_byte = end_byte end_byte += 4 facet_count = struct.unpack('I', data[start_byte:end_byte])[0] if facet_count > 0: is_bin = True return is_bin
python
def _is_binary_stl(data): """ Determine if this is a binary file through unpacking the first value after the 80th character and testing whether this value is greater than zero. This indicates the number of facets in the file. Could possibly extend this to check that the remaining number of bytes is divisible by 50. """ is_bin = False start_byte = 0 end_byte = 80 _ = data[start_byte:end_byte] # header data start_byte = end_byte end_byte += 4 facet_count = struct.unpack('I', data[start_byte:end_byte])[0] if facet_count > 0: is_bin = True return is_bin
[ "def", "_is_binary_stl", "(", "data", ")", ":", "is_bin", "=", "False", "start_byte", "=", "0", "end_byte", "=", "80", "_", "=", "data", "[", "start_byte", ":", "end_byte", "]", "# header data", "start_byte", "=", "end_byte", "end_byte", "+=", "4", "facet_count", "=", "struct", ".", "unpack", "(", "'I'", ",", "data", "[", "start_byte", ":", "end_byte", "]", ")", "[", "0", "]", "if", "facet_count", ">", "0", ":", "is_bin", "=", "True", "return", "is_bin" ]
Determine if this is a binary file through unpacking the first value after the 80th character and testing whether this value is greater than zero. This indicates the number of facets in the file. Could possibly extend this to check that the remaining number of bytes is divisible by 50.
[ "Determine", "if", "this", "is", "a", "binary", "file", "through", "unpacking", "the", "first", "value", "after", "the", "80th", "character", "and", "testing", "whether", "this", "value", "is", "greater", "than", "zero", ".", "This", "indicates", "the", "number", "of", "facets", "in", "the", "file", ".", "Could", "possibly", "extend", "this", "to", "check", "that", "the", "remaining", "number", "of", "bytes", "is", "divisible", "by", "50", "." ]
08dc0ce7c44d0149b443261ff6d3708e28a928e7
https://github.com/ABI-Software/MeshParser/blob/08dc0ce7c44d0149b443261ff6d3708e28a928e7/src/meshparser/stlparser/parser.py#L119-L135
train
mardix/Mocha
mocha/extras/mocha_db.py
StorageObjectType.process_bind_param
def process_bind_param(self, obj, dialect): """Get a flask_cloudy.Object and save it as a dict""" value = obj or {} if isinstance(obj, flask_cloudy.Object): value = {} for k in self.DEFAULT_KEYS: value[k] = getattr(obj, k) return super(self.__class__, self).process_bind_param(value, dialect)
python
def process_bind_param(self, obj, dialect): """Get a flask_cloudy.Object and save it as a dict""" value = obj or {} if isinstance(obj, flask_cloudy.Object): value = {} for k in self.DEFAULT_KEYS: value[k] = getattr(obj, k) return super(self.__class__, self).process_bind_param(value, dialect)
[ "def", "process_bind_param", "(", "self", ",", "obj", ",", "dialect", ")", ":", "value", "=", "obj", "or", "{", "}", "if", "isinstance", "(", "obj", ",", "flask_cloudy", ".", "Object", ")", ":", "value", "=", "{", "}", "for", "k", "in", "self", ".", "DEFAULT_KEYS", ":", "value", "[", "k", "]", "=", "getattr", "(", "obj", ",", "k", ")", "return", "super", "(", "self", ".", "__class__", ",", "self", ")", ".", "process_bind_param", "(", "value", ",", "dialect", ")" ]
Get a flask_cloudy.Object and save it as a dict
[ "Get", "a", "flask_cloudy", ".", "Object", "and", "save", "it", "as", "a", "dict" ]
bce481cb31a0972061dd99bc548701411dcb9de3
https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/extras/mocha_db.py#L105-L113
train
assamite/creamas
creamas/util.py
create_tasks
def create_tasks(task_coro, addrs, *args, flatten=True, **kwargs): '''Create and schedule a set of asynchronous tasks. The function creates the tasks using a given list of agent addresses and wraps each of them in :func:`asyncio.ensure_future`. The ``*args`` and ``**kwargs`` are passed down to :func:`task_coro` when creating tasks for each address in :attr:`addrs`. Usage example for a method in a class derived from :class:`~creamas.mp.MultiEnvironment`:: async def my_method(self, *args, **kwargs): async def task(addr, *args, **kwargs): r_manager = await self.env.connect(addr) return await r_manager.my_method(*args, **kwargs) return await util.create_tasks(task, self.addrs, *args, **kwargs) :param task_coro: Coroutine which is used for each address in :attr:`addrs`. The coroutine should accept an agent address as the first parameter. :param list addrs: A list of agent addresses used as the first parameters of :func:`task_coro`. :param bool flatten: If ``True`` the returned results are flattened into one list if the tasks return iterable objects. The parameter does nothing if all the results are not iterable. :returns: An awaitable coroutine which returns the results of tasks as a list or as a flattened list ''' tasks = [] for agent_addr in addrs: task = asyncio.ensure_future(task_coro(agent_addr, *args, **kwargs)) tasks.append(task) return wait_tasks(tasks, flatten)
python
def create_tasks(task_coro, addrs, *args, flatten=True, **kwargs): '''Create and schedule a set of asynchronous tasks. The function creates the tasks using a given list of agent addresses and wraps each of them in :func:`asyncio.ensure_future`. The ``*args`` and ``**kwargs`` are passed down to :func:`task_coro` when creating tasks for each address in :attr:`addrs`. Usage example for a method in a class derived from :class:`~creamas.mp.MultiEnvironment`:: async def my_method(self, *args, **kwargs): async def task(addr, *args, **kwargs): r_manager = await self.env.connect(addr) return await r_manager.my_method(*args, **kwargs) return await util.create_tasks(task, self.addrs, *args, **kwargs) :param task_coro: Coroutine which is used for each address in :attr:`addrs`. The coroutine should accept an agent address as the first parameter. :param list addrs: A list of agent addresses used as the first parameters of :func:`task_coro`. :param bool flatten: If ``True`` the returned results are flattened into one list if the tasks return iterable objects. The parameter does nothing if all the results are not iterable. :returns: An awaitable coroutine which returns the results of tasks as a list or as a flattened list ''' tasks = [] for agent_addr in addrs: task = asyncio.ensure_future(task_coro(agent_addr, *args, **kwargs)) tasks.append(task) return wait_tasks(tasks, flatten)
[ "def", "create_tasks", "(", "task_coro", ",", "addrs", ",", "*", "args", ",", "flatten", "=", "True", ",", "*", "*", "kwargs", ")", ":", "tasks", "=", "[", "]", "for", "agent_addr", "in", "addrs", ":", "task", "=", "asyncio", ".", "ensure_future", "(", "task_coro", "(", "agent_addr", ",", "*", "args", ",", "*", "*", "kwargs", ")", ")", "tasks", ".", "append", "(", "task", ")", "return", "wait_tasks", "(", "tasks", ",", "flatten", ")" ]
Create and schedule a set of asynchronous tasks. The function creates the tasks using a given list of agent addresses and wraps each of them in :func:`asyncio.ensure_future`. The ``*args`` and ``**kwargs`` are passed down to :func:`task_coro` when creating tasks for each address in :attr:`addrs`. Usage example for a method in a class derived from :class:`~creamas.mp.MultiEnvironment`:: async def my_method(self, *args, **kwargs): async def task(addr, *args, **kwargs): r_manager = await self.env.connect(addr) return await r_manager.my_method(*args, **kwargs) return await util.create_tasks(task, self.addrs, *args, **kwargs) :param task_coro: Coroutine which is used for each address in :attr:`addrs`. The coroutine should accept an agent address as the first parameter. :param list addrs: A list of agent addresses used as the first parameters of :func:`task_coro`. :param bool flatten: If ``True`` the returned results are flattened into one list if the tasks return iterable objects. The parameter does nothing if all the results are not iterable. :returns: An awaitable coroutine which returns the results of tasks as a list or as a flattened list
[ "Create", "and", "schedule", "a", "set", "of", "asynchronous", "tasks", "." ]
54dc3e31c97a3f938e58272f8ab80b6bcafeff58
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/util.py#L12-L48
train
assamite/creamas
creamas/util.py
wait_tasks
async def wait_tasks(tasks, flatten=True): '''Gather a list of asynchronous tasks and wait their completion. :param list tasks: A list of *asyncio* tasks wrapped in :func:`asyncio.ensure_future`. :param bool flatten: If ``True`` the returned results are flattened into one list if the tasks return iterable objects. The parameter does nothing if all the results are not iterable. :returns: The results of tasks as a list or as a flattened list ''' rets = await asyncio.gather(*tasks) if flatten and all(map(lambda x: hasattr(x, '__iter__'), rets)): rets = list(itertools.chain(*rets)) return rets
python
async def wait_tasks(tasks, flatten=True): '''Gather a list of asynchronous tasks and wait their completion. :param list tasks: A list of *asyncio* tasks wrapped in :func:`asyncio.ensure_future`. :param bool flatten: If ``True`` the returned results are flattened into one list if the tasks return iterable objects. The parameter does nothing if all the results are not iterable. :returns: The results of tasks as a list or as a flattened list ''' rets = await asyncio.gather(*tasks) if flatten and all(map(lambda x: hasattr(x, '__iter__'), rets)): rets = list(itertools.chain(*rets)) return rets
[ "async", "def", "wait_tasks", "(", "tasks", ",", "flatten", "=", "True", ")", ":", "rets", "=", "await", "asyncio", ".", "gather", "(", "*", "tasks", ")", "if", "flatten", "and", "all", "(", "map", "(", "lambda", "x", ":", "hasattr", "(", "x", ",", "'__iter__'", ")", ",", "rets", ")", ")", ":", "rets", "=", "list", "(", "itertools", ".", "chain", "(", "*", "rets", ")", ")", "return", "rets" ]
Gather a list of asynchronous tasks and wait their completion. :param list tasks: A list of *asyncio* tasks wrapped in :func:`asyncio.ensure_future`. :param bool flatten: If ``True`` the returned results are flattened into one list if the tasks return iterable objects. The parameter does nothing if all the results are not iterable. :returns: The results of tasks as a list or as a flattened list
[ "Gather", "a", "list", "of", "asynchronous", "tasks", "and", "wait", "their", "completion", "." ]
54dc3e31c97a3f938e58272f8ab80b6bcafeff58
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/util.py#L51-L66
train
assamite/creamas
creamas/util.py
split_addrs
def split_addrs(addrs): '''Split addresses into dictionaries by hosts and ports. :param list addrs: A list of addresses. :returns: A dictionary of dictionaries, where ``dict[HOST][PORT]`` holds a list of all agent addresses in that environment. ''' splitted = {} for addr in addrs: host, port, _ = _addr_key(addr) if host not in splitted: splitted[host] = {} if port not in splitted[host]: splitted[host][port] = [] splitted[host][port].append(addr) return splitted
python
def split_addrs(addrs): '''Split addresses into dictionaries by hosts and ports. :param list addrs: A list of addresses. :returns: A dictionary of dictionaries, where ``dict[HOST][PORT]`` holds a list of all agent addresses in that environment. ''' splitted = {} for addr in addrs: host, port, _ = _addr_key(addr) if host not in splitted: splitted[host] = {} if port not in splitted[host]: splitted[host][port] = [] splitted[host][port].append(addr) return splitted
[ "def", "split_addrs", "(", "addrs", ")", ":", "splitted", "=", "{", "}", "for", "addr", "in", "addrs", ":", "host", ",", "port", ",", "_", "=", "_addr_key", "(", "addr", ")", "if", "host", "not", "in", "splitted", ":", "splitted", "[", "host", "]", "=", "{", "}", "if", "port", "not", "in", "splitted", "[", "host", "]", ":", "splitted", "[", "host", "]", "[", "port", "]", "=", "[", "]", "splitted", "[", "host", "]", "[", "port", "]", ".", "append", "(", "addr", ")", "return", "splitted" ]
Split addresses into dictionaries by hosts and ports. :param list addrs: A list of addresses. :returns: A dictionary of dictionaries, where ``dict[HOST][PORT]`` holds a list of all agent addresses in that environment.
[ "Split", "addresses", "into", "dictionaries", "by", "hosts", "and", "ports", "." ]
54dc3e31c97a3f938e58272f8ab80b6bcafeff58
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/util.py#L173-L190
train
assamite/creamas
creamas/util.py
addrs2managers
def addrs2managers(addrs): '''Map agent addresses to their assumed managers. .. seealso:: :func:`creamas.util.get_manager` ''' mgrs = {} for addr in addrs: mgr_addr = get_manager(addr) if mgr_addr not in mgrs: mgrs[mgr_addr] = [] mgrs[mgr_addr].append(addr) return mgrs
python
def addrs2managers(addrs): '''Map agent addresses to their assumed managers. .. seealso:: :func:`creamas.util.get_manager` ''' mgrs = {} for addr in addrs: mgr_addr = get_manager(addr) if mgr_addr not in mgrs: mgrs[mgr_addr] = [] mgrs[mgr_addr].append(addr) return mgrs
[ "def", "addrs2managers", "(", "addrs", ")", ":", "mgrs", "=", "{", "}", "for", "addr", "in", "addrs", ":", "mgr_addr", "=", "get_manager", "(", "addr", ")", "if", "mgr_addr", "not", "in", "mgrs", ":", "mgrs", "[", "mgr_addr", "]", "=", "[", "]", "mgrs", "[", "mgr_addr", "]", ".", "append", "(", "addr", ")", "return", "mgrs" ]
Map agent addresses to their assumed managers. .. seealso:: :func:`creamas.util.get_manager`
[ "Map", "agent", "addresses", "to", "their", "assumed", "managers", "." ]
54dc3e31c97a3f938e58272f8ab80b6bcafeff58
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/util.py#L199-L212
train
ronhanson/python-tbx
tbx/template.py
create_jinja_env
def create_jinja_env(template_path): """ Creates a Jinja2 environment with a specific template path. """ jinja_env = jinja2.Environment( loader=jinja2.FileSystemLoader(template_path), block_start_string='{%', block_end_string='%}', variable_start_string='${', variable_end_string='}', comment_start_string='{#', comment_end_string='#}', line_statement_prefix=None, line_comment_prefix=None, trim_blocks=True, lstrip_blocks=True, newline_sequence='\n' ) jinja_env.filters['regexreplace'] = regex_replace jinja_env.globals.update(uuidgen=uuidgen) return jinja_env
python
def create_jinja_env(template_path): """ Creates a Jinja2 environment with a specific template path. """ jinja_env = jinja2.Environment( loader=jinja2.FileSystemLoader(template_path), block_start_string='{%', block_end_string='%}', variable_start_string='${', variable_end_string='}', comment_start_string='{#', comment_end_string='#}', line_statement_prefix=None, line_comment_prefix=None, trim_blocks=True, lstrip_blocks=True, newline_sequence='\n' ) jinja_env.filters['regexreplace'] = regex_replace jinja_env.globals.update(uuidgen=uuidgen) return jinja_env
[ "def", "create_jinja_env", "(", "template_path", ")", ":", "jinja_env", "=", "jinja2", ".", "Environment", "(", "loader", "=", "jinja2", ".", "FileSystemLoader", "(", "template_path", ")", ",", "block_start_string", "=", "'{%'", ",", "block_end_string", "=", "'%}'", ",", "variable_start_string", "=", "'${'", ",", "variable_end_string", "=", "'}'", ",", "comment_start_string", "=", "'{#'", ",", "comment_end_string", "=", "'#}'", ",", "line_statement_prefix", "=", "None", ",", "line_comment_prefix", "=", "None", ",", "trim_blocks", "=", "True", ",", "lstrip_blocks", "=", "True", ",", "newline_sequence", "=", "'\\n'", ")", "jinja_env", ".", "filters", "[", "'regexreplace'", "]", "=", "regex_replace", "jinja_env", ".", "globals", ".", "update", "(", "uuidgen", "=", "uuidgen", ")", "return", "jinja_env" ]
Creates a Jinja2 environment with a specific template path.
[ "Creates", "a", "Jinja2", "environment", "with", "a", "specific", "template", "path", "." ]
87f72ae0cadecafbcd144f1e930181fba77f6b83
https://github.com/ronhanson/python-tbx/blob/87f72ae0cadecafbcd144f1e930181fba77f6b83/tbx/template.py#L15-L35
train
urain39/KngetPy
knget/base.py
Knget._debug_info
def _debug_info(self): """Show a list of recently variables info. """ self._msg('DEBUG') self._msg2('WorkDir: {0}'.format(self._curdir)) self._msg2('Cookies: {0}'.format(self._session.cookies)) self._msg2('Headers: {0}'.format(self._session.headers)) self._msg2('Configs: {0}'.format(self._config)) self._msg2('Customs: {0}'.format(self._custom)) self._msg2('Account: {0}'.format(self._account))
python
def _debug_info(self): """Show a list of recently variables info. """ self._msg('DEBUG') self._msg2('WorkDir: {0}'.format(self._curdir)) self._msg2('Cookies: {0}'.format(self._session.cookies)) self._msg2('Headers: {0}'.format(self._session.headers)) self._msg2('Configs: {0}'.format(self._config)) self._msg2('Customs: {0}'.format(self._custom)) self._msg2('Account: {0}'.format(self._account))
[ "def", "_debug_info", "(", "self", ")", ":", "self", ".", "_msg", "(", "'DEBUG'", ")", "self", ".", "_msg2", "(", "'WorkDir: {0}'", ".", "format", "(", "self", ".", "_curdir", ")", ")", "self", ".", "_msg2", "(", "'Cookies: {0}'", ".", "format", "(", "self", ".", "_session", ".", "cookies", ")", ")", "self", ".", "_msg2", "(", "'Headers: {0}'", ".", "format", "(", "self", ".", "_session", ".", "headers", ")", ")", "self", ".", "_msg2", "(", "'Configs: {0}'", ".", "format", "(", "self", ".", "_config", ")", ")", "self", ".", "_msg2", "(", "'Customs: {0}'", ".", "format", "(", "self", ".", "_custom", ")", ")", "self", ".", "_msg2", "(", "'Account: {0}'", ".", "format", "(", "self", ".", "_account", ")", ")" ]
Show a list of recently variables info.
[ "Show", "a", "list", "of", "recently", "variables", "info", "." ]
00986bc16a497cee08aceb1c072f6187f152ee5d
https://github.com/urain39/KngetPy/blob/00986bc16a497cee08aceb1c072f6187f152ee5d/knget/base.py#L174-L183
train
urain39/KngetPy
knget/base.py
KngetCommand.register
def register(self, argtypes=r'M', help_msg=None): """Register a method to a command. NOTE: Method registered here is unbound method, e.g. registered `run` command -> `KngetShell.run` So we call it should add `self` at first. See also: KngetShell.execute() :param argtypes: a str of the command args type. M: Myself -> self S: String -> str I: Integer -> int H: placeHolder -> pass or anything :param help_msg: a short help string of commands. :return: a callable function or method. """ def format_args(method): def wrapped_method(*args, **kwargs): args_count = len(args) # + len(kwargs) argtypes_count = len(argtypes) placeholder_count = argtypes.count('H') + argtypes.count('h') # We check the placeholder count to select a way to # format the args. If placeholder is not equals zero # then we calculate the minimum args count at first. if placeholder_count: min_args_count = (argtypes_count - placeholder_count) # If args_count less than minimum args count or bigger # than argtypes count then we raise a Exception to exit. if args_count < min_args_count or args_count > argtypes_count: raise KngetError("args count is invalid.", reason='args count is {0}'.format(args_count)) # Otherwise, we just check if args count equals argtypes count elif args_count != argtypes_count: raise KngetError("args count is invalid", reason='args count is {0}'.format(args_count)) argv = [] # NOTE: We cannot modify the args. for i in range(args_count): if argtypes[i] in ('m', 'M'): argv.append(args[i]) elif argtypes[i] in ('i', 'I'): argv.append(int(args[i])) elif argtypes[i] in ('s', 'S'): argv.append(str(args[i])) elif argtypes[i] in ('h', 'H'): argv.append(args[i]) else: raise KngetError('argtype {0} is invalid!'.format(argtypes[i])) return method(*argv, **kwargs) # Keep the docs. wrapped_method.__doc__ = method.__doc__ self._commands[method.__name__] = ( wrapped_method, help_msg ) return wrapped_method # only format_args touched the method return format_args
python
def register(self, argtypes=r'M', help_msg=None): """Register a method to a command. NOTE: Method registered here is unbound method, e.g. registered `run` command -> `KngetShell.run` So we call it should add `self` at first. See also: KngetShell.execute() :param argtypes: a str of the command args type. M: Myself -> self S: String -> str I: Integer -> int H: placeHolder -> pass or anything :param help_msg: a short help string of commands. :return: a callable function or method. """ def format_args(method): def wrapped_method(*args, **kwargs): args_count = len(args) # + len(kwargs) argtypes_count = len(argtypes) placeholder_count = argtypes.count('H') + argtypes.count('h') # We check the placeholder count to select a way to # format the args. If placeholder is not equals zero # then we calculate the minimum args count at first. if placeholder_count: min_args_count = (argtypes_count - placeholder_count) # If args_count less than minimum args count or bigger # than argtypes count then we raise a Exception to exit. if args_count < min_args_count or args_count > argtypes_count: raise KngetError("args count is invalid.", reason='args count is {0}'.format(args_count)) # Otherwise, we just check if args count equals argtypes count elif args_count != argtypes_count: raise KngetError("args count is invalid", reason='args count is {0}'.format(args_count)) argv = [] # NOTE: We cannot modify the args. for i in range(args_count): if argtypes[i] in ('m', 'M'): argv.append(args[i]) elif argtypes[i] in ('i', 'I'): argv.append(int(args[i])) elif argtypes[i] in ('s', 'S'): argv.append(str(args[i])) elif argtypes[i] in ('h', 'H'): argv.append(args[i]) else: raise KngetError('argtype {0} is invalid!'.format(argtypes[i])) return method(*argv, **kwargs) # Keep the docs. wrapped_method.__doc__ = method.__doc__ self._commands[method.__name__] = ( wrapped_method, help_msg ) return wrapped_method # only format_args touched the method return format_args
[ "def", "register", "(", "self", ",", "argtypes", "=", "r'M'", ",", "help_msg", "=", "None", ")", ":", "def", "format_args", "(", "method", ")", ":", "def", "wrapped_method", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "args_count", "=", "len", "(", "args", ")", "# + len(kwargs)", "argtypes_count", "=", "len", "(", "argtypes", ")", "placeholder_count", "=", "argtypes", ".", "count", "(", "'H'", ")", "+", "argtypes", ".", "count", "(", "'h'", ")", "# We check the placeholder count to select a way to", "# format the args. If placeholder is not equals zero", "# then we calculate the minimum args count at first.", "if", "placeholder_count", ":", "min_args_count", "=", "(", "argtypes_count", "-", "placeholder_count", ")", "# If args_count less than minimum args count or bigger", "# than argtypes count then we raise a Exception to exit.", "if", "args_count", "<", "min_args_count", "or", "args_count", ">", "argtypes_count", ":", "raise", "KngetError", "(", "\"args count is invalid.\"", ",", "reason", "=", "'args count is {0}'", ".", "format", "(", "args_count", ")", ")", "# Otherwise, we just check if args count equals argtypes count", "elif", "args_count", "!=", "argtypes_count", ":", "raise", "KngetError", "(", "\"args count is invalid\"", ",", "reason", "=", "'args count is {0}'", ".", "format", "(", "args_count", ")", ")", "argv", "=", "[", "]", "# NOTE: We cannot modify the args.", "for", "i", "in", "range", "(", "args_count", ")", ":", "if", "argtypes", "[", "i", "]", "in", "(", "'m'", ",", "'M'", ")", ":", "argv", ".", "append", "(", "args", "[", "i", "]", ")", "elif", "argtypes", "[", "i", "]", "in", "(", "'i'", ",", "'I'", ")", ":", "argv", ".", "append", "(", "int", "(", "args", "[", "i", "]", ")", ")", "elif", "argtypes", "[", "i", "]", "in", "(", "'s'", ",", "'S'", ")", ":", "argv", ".", "append", "(", "str", "(", "args", "[", "i", "]", ")", ")", "elif", "argtypes", "[", "i", "]", "in", "(", "'h'", ",", "'H'", ")", ":", "argv", ".", "append", "(", "args", "[", "i", "]", ")", "else", ":", "raise", "KngetError", "(", "'argtype {0} is invalid!'", ".", "format", "(", "argtypes", "[", "i", "]", ")", ")", "return", "method", "(", "*", "argv", ",", "*", "*", "kwargs", ")", "# Keep the docs.", "wrapped_method", ".", "__doc__", "=", "method", ".", "__doc__", "self", ".", "_commands", "[", "method", ".", "__name__", "]", "=", "(", "wrapped_method", ",", "help_msg", ")", "return", "wrapped_method", "# only format_args touched the method", "return", "format_args" ]
Register a method to a command. NOTE: Method registered here is unbound method, e.g. registered `run` command -> `KngetShell.run` So we call it should add `self` at first. See also: KngetShell.execute() :param argtypes: a str of the command args type. M: Myself -> self S: String -> str I: Integer -> int H: placeHolder -> pass or anything :param help_msg: a short help string of commands. :return: a callable function or method.
[ "Register", "a", "method", "to", "a", "command", "." ]
00986bc16a497cee08aceb1c072f6187f152ee5d
https://github.com/urain39/KngetPy/blob/00986bc16a497cee08aceb1c072f6187f152ee5d/knget/base.py#L497-L563
train
urain39/KngetPy
knget/base.py
KngetShell.run
def run(self, tags, begin, end=False): """Override method of class Knget """ if not end: end = begin # Type `H` doesn't cast anything, so we # manually cast the strings end to integer. super(KngetShell, self).run(tags, begin, int(end))
python
def run(self, tags, begin, end=False): """Override method of class Knget """ if not end: end = begin # Type `H` doesn't cast anything, so we # manually cast the strings end to integer. super(KngetShell, self).run(tags, begin, int(end))
[ "def", "run", "(", "self", ",", "tags", ",", "begin", ",", "end", "=", "False", ")", ":", "if", "not", "end", ":", "end", "=", "begin", "# Type `H` doesn't cast anything, so we", "# manually cast the strings end to integer.", "super", "(", "KngetShell", ",", "self", ")", ".", "run", "(", "tags", ",", "begin", ",", "int", "(", "end", ")", ")" ]
Override method of class Knget
[ "Override", "method", "of", "class", "Knget" ]
00986bc16a497cee08aceb1c072f6187f152ee5d
https://github.com/urain39/KngetPy/blob/00986bc16a497cee08aceb1c072f6187f152ee5d/knget/base.py#L584-L592
train
uogbuji/versa
tools/py/writer/md.py
write
def write(models, out=None, base=None, propertybase=None, shorteners=None, logger=logging): ''' models - input Versa models from which output is generated. Must be a sequence object, not an iterator ''' assert out is not None #Output stream required if not isinstance(models, list): models = [models] shorteners = shorteners or {} all_propertybase = [propertybase] if propertybase else [] all_propertybase.append(VERSA_BASEIRI) if any((base, propertybase, shorteners)): out.write('# @docheader\n\n* @iri:\n') if base: out.write(' * @base: {0}'.format(base)) #for k, v in shorteners: # out.write(' * @base: {0}'.format(base)) out.write('\n\n') origin_space = set() #base_out = models[0].base for m in models: origin_space.update(all_origins(m)) for o in origin_space: out.write('# {0}\n\n'.format(o)) for o_, r, t, a in m.match(o): abbr_r = abbreviate(r, all_propertybase) value_format(t) out.write('* {0}: {1}\n'.format(abbr_r, value_format(t))) for k, v in a.items(): abbr_k = abbreviate(k, all_propertybase) out.write(' * {0}: {1}\n'.format(k, value_format(v))) out.write('\n') return
python
def write(models, out=None, base=None, propertybase=None, shorteners=None, logger=logging): ''' models - input Versa models from which output is generated. Must be a sequence object, not an iterator ''' assert out is not None #Output stream required if not isinstance(models, list): models = [models] shorteners = shorteners or {} all_propertybase = [propertybase] if propertybase else [] all_propertybase.append(VERSA_BASEIRI) if any((base, propertybase, shorteners)): out.write('# @docheader\n\n* @iri:\n') if base: out.write(' * @base: {0}'.format(base)) #for k, v in shorteners: # out.write(' * @base: {0}'.format(base)) out.write('\n\n') origin_space = set() #base_out = models[0].base for m in models: origin_space.update(all_origins(m)) for o in origin_space: out.write('# {0}\n\n'.format(o)) for o_, r, t, a in m.match(o): abbr_r = abbreviate(r, all_propertybase) value_format(t) out.write('* {0}: {1}\n'.format(abbr_r, value_format(t))) for k, v in a.items(): abbr_k = abbreviate(k, all_propertybase) out.write(' * {0}: {1}\n'.format(k, value_format(v))) out.write('\n') return
[ "def", "write", "(", "models", ",", "out", "=", "None", ",", "base", "=", "None", ",", "propertybase", "=", "None", ",", "shorteners", "=", "None", ",", "logger", "=", "logging", ")", ":", "assert", "out", "is", "not", "None", "#Output stream required", "if", "not", "isinstance", "(", "models", ",", "list", ")", ":", "models", "=", "[", "models", "]", "shorteners", "=", "shorteners", "or", "{", "}", "all_propertybase", "=", "[", "propertybase", "]", "if", "propertybase", "else", "[", "]", "all_propertybase", ".", "append", "(", "VERSA_BASEIRI", ")", "if", "any", "(", "(", "base", ",", "propertybase", ",", "shorteners", ")", ")", ":", "out", ".", "write", "(", "'# @docheader\\n\\n* @iri:\\n'", ")", "if", "base", ":", "out", ".", "write", "(", "' * @base: {0}'", ".", "format", "(", "base", ")", ")", "#for k, v in shorteners:", "# out.write(' * @base: {0}'.format(base))", "out", ".", "write", "(", "'\\n\\n'", ")", "origin_space", "=", "set", "(", ")", "#base_out = models[0].base", "for", "m", "in", "models", ":", "origin_space", ".", "update", "(", "all_origins", "(", "m", ")", ")", "for", "o", "in", "origin_space", ":", "out", ".", "write", "(", "'# {0}\\n\\n'", ".", "format", "(", "o", ")", ")", "for", "o_", ",", "r", ",", "t", ",", "a", "in", "m", ".", "match", "(", "o", ")", ":", "abbr_r", "=", "abbreviate", "(", "r", ",", "all_propertybase", ")", "value_format", "(", "t", ")", "out", ".", "write", "(", "'* {0}: {1}\\n'", ".", "format", "(", "abbr_r", ",", "value_format", "(", "t", ")", ")", ")", "for", "k", ",", "v", "in", "a", ".", "items", "(", ")", ":", "abbr_k", "=", "abbreviate", "(", "k", ",", "all_propertybase", ")", "out", ".", "write", "(", "' * {0}: {1}\\n'", ".", "format", "(", "k", ",", "value_format", "(", "v", ")", ")", ")", "out", ".", "write", "(", "'\\n'", ")", "return" ]
models - input Versa models from which output is generated. Must be a sequence object, not an iterator
[ "models", "-", "input", "Versa", "models", "from", "which", "output", "is", "generated", ".", "Must", "be", "a", "sequence", "object", "not", "an", "iterator" ]
f092ffc7ed363a5b170890955168500f32de0dd5
https://github.com/uogbuji/versa/blob/f092ffc7ed363a5b170890955168500f32de0dd5/tools/py/writer/md.py#L42-L79
train
aacanakin/glim
glim/app.py
Glim.register_routes
def register_routes(self): """ Function creates instances of controllers, adds into bottle routes """ routes = self.flatten_urls(self.urls) self.controllers = {} controller_names = set() for route in routes: cname = route['endpoint'].split('.')[0] controller_names.add(cname) for cname in controller_names: attr = getattr(self.mcontrollers, cname) instance = attr(request, response) self.controllers[cname] = instance for route in routes: cname, aname = route['endpoint'].split('.') action = getattr(self.controllers[cname], aname) self.wsgi.route(route['url'], route['methods'], action)
python
def register_routes(self): """ Function creates instances of controllers, adds into bottle routes """ routes = self.flatten_urls(self.urls) self.controllers = {} controller_names = set() for route in routes: cname = route['endpoint'].split('.')[0] controller_names.add(cname) for cname in controller_names: attr = getattr(self.mcontrollers, cname) instance = attr(request, response) self.controllers[cname] = instance for route in routes: cname, aname = route['endpoint'].split('.') action = getattr(self.controllers[cname], aname) self.wsgi.route(route['url'], route['methods'], action)
[ "def", "register_routes", "(", "self", ")", ":", "routes", "=", "self", ".", "flatten_urls", "(", "self", ".", "urls", ")", "self", ".", "controllers", "=", "{", "}", "controller_names", "=", "set", "(", ")", "for", "route", "in", "routes", ":", "cname", "=", "route", "[", "'endpoint'", "]", ".", "split", "(", "'.'", ")", "[", "0", "]", "controller_names", ".", "add", "(", "cname", ")", "for", "cname", "in", "controller_names", ":", "attr", "=", "getattr", "(", "self", ".", "mcontrollers", ",", "cname", ")", "instance", "=", "attr", "(", "request", ",", "response", ")", "self", ".", "controllers", "[", "cname", "]", "=", "instance", "for", "route", "in", "routes", ":", "cname", ",", "aname", "=", "route", "[", "'endpoint'", "]", ".", "split", "(", "'.'", ")", "action", "=", "getattr", "(", "self", ".", "controllers", "[", "cname", "]", ",", "aname", ")", "self", ".", "wsgi", ".", "route", "(", "route", "[", "'url'", "]", ",", "route", "[", "'methods'", "]", ",", "action", ")" ]
Function creates instances of controllers, adds into bottle routes
[ "Function", "creates", "instances", "of", "controllers", "adds", "into", "bottle", "routes" ]
71a20ac149a1292c0d6c1dc7414985ea51854f7a
https://github.com/aacanakin/glim/blob/71a20ac149a1292c0d6c1dc7414985ea51854f7a/glim/app.py#L62-L82
train
aacanakin/glim
glim/app.py
Glim.register_extensions
def register_extensions(self): """ Function registers extensions given extensions list Args ---- extensions (list) : the extensions dict on app.config.<env> Raises ------ Exception: Raises exception when extension can't be loaded properly. """ try: for extension, config in self.config['extensions'].items(): extension_bstr = '' # gather package name if exists extension_pieces = extension.split('.') # if the extensions is not in glim_extensions package if len(extension_pieces) > 1: extension_bstr = '.'.join(extension_pieces) else: # if the extension is in glim_extensions package extension_bstr = 'glim_extensions.%s' % extension_pieces[0] extension_module = import_module(extension_bstr) if extension_module: extension_startstr = '%s.%s' % (extension_bstr, 'start') extension_start = import_module(extension_startstr, pass_errors=True) extension_cmdsstr = '%s.%s' % (extension_bstr, 'commands') extension_cmds = import_module(extension_cmdsstr, pass_errors=True) if extension_start is not None: before = extension_start.before before(config) if extension_cmds is not None: if self.commandadapter is not None: self.commandadapter.register_extension(extension_cmds, extension_pieces[0]) else: GlimLog.error('Extension %s could not be loaded' % extension) except Exception as e: GlimLog.error(traceback.format_exc())
python
def register_extensions(self): """ Function registers extensions given extensions list Args ---- extensions (list) : the extensions dict on app.config.<env> Raises ------ Exception: Raises exception when extension can't be loaded properly. """ try: for extension, config in self.config['extensions'].items(): extension_bstr = '' # gather package name if exists extension_pieces = extension.split('.') # if the extensions is not in glim_extensions package if len(extension_pieces) > 1: extension_bstr = '.'.join(extension_pieces) else: # if the extension is in glim_extensions package extension_bstr = 'glim_extensions.%s' % extension_pieces[0] extension_module = import_module(extension_bstr) if extension_module: extension_startstr = '%s.%s' % (extension_bstr, 'start') extension_start = import_module(extension_startstr, pass_errors=True) extension_cmdsstr = '%s.%s' % (extension_bstr, 'commands') extension_cmds = import_module(extension_cmdsstr, pass_errors=True) if extension_start is not None: before = extension_start.before before(config) if extension_cmds is not None: if self.commandadapter is not None: self.commandadapter.register_extension(extension_cmds, extension_pieces[0]) else: GlimLog.error('Extension %s could not be loaded' % extension) except Exception as e: GlimLog.error(traceback.format_exc())
[ "def", "register_extensions", "(", "self", ")", ":", "try", ":", "for", "extension", ",", "config", "in", "self", ".", "config", "[", "'extensions'", "]", ".", "items", "(", ")", ":", "extension_bstr", "=", "''", "# gather package name if exists", "extension_pieces", "=", "extension", ".", "split", "(", "'.'", ")", "# if the extensions is not in glim_extensions package", "if", "len", "(", "extension_pieces", ")", ">", "1", ":", "extension_bstr", "=", "'.'", ".", "join", "(", "extension_pieces", ")", "else", ":", "# if the extension is in glim_extensions package", "extension_bstr", "=", "'glim_extensions.%s'", "%", "extension_pieces", "[", "0", "]", "extension_module", "=", "import_module", "(", "extension_bstr", ")", "if", "extension_module", ":", "extension_startstr", "=", "'%s.%s'", "%", "(", "extension_bstr", ",", "'start'", ")", "extension_start", "=", "import_module", "(", "extension_startstr", ",", "pass_errors", "=", "True", ")", "extension_cmdsstr", "=", "'%s.%s'", "%", "(", "extension_bstr", ",", "'commands'", ")", "extension_cmds", "=", "import_module", "(", "extension_cmdsstr", ",", "pass_errors", "=", "True", ")", "if", "extension_start", "is", "not", "None", ":", "before", "=", "extension_start", ".", "before", "before", "(", "config", ")", "if", "extension_cmds", "is", "not", "None", ":", "if", "self", ".", "commandadapter", "is", "not", "None", ":", "self", ".", "commandadapter", ".", "register_extension", "(", "extension_cmds", ",", "extension_pieces", "[", "0", "]", ")", "else", ":", "GlimLog", ".", "error", "(", "'Extension %s could not be loaded'", "%", "extension", ")", "except", "Exception", "as", "e", ":", "GlimLog", ".", "error", "(", "traceback", ".", "format_exc", "(", ")", ")" ]
Function registers extensions given extensions list Args ---- extensions (list) : the extensions dict on app.config.<env> Raises ------ Exception: Raises exception when extension can't be loaded properly.
[ "Function", "registers", "extensions", "given", "extensions", "list" ]
71a20ac149a1292c0d6c1dc7414985ea51854f7a
https://github.com/aacanakin/glim/blob/71a20ac149a1292c0d6c1dc7414985ea51854f7a/glim/app.py#L84-L131
train
aacanakin/glim
glim/app.py
Glim.register_ssl_context
def register_ssl_context(self): """ Function detects ssl context """ if not empty('ssl', self.config['app']): self.ssl_context = self.config['app']['ssl'] else: self.ssl_context = None
python
def register_ssl_context(self): """ Function detects ssl context """ if not empty('ssl', self.config['app']): self.ssl_context = self.config['app']['ssl'] else: self.ssl_context = None
[ "def", "register_ssl_context", "(", "self", ")", ":", "if", "not", "empty", "(", "'ssl'", ",", "self", ".", "config", "[", "'app'", "]", ")", ":", "self", ".", "ssl_context", "=", "self", ".", "config", "[", "'app'", "]", "[", "'ssl'", "]", "else", ":", "self", ".", "ssl_context", "=", "None" ]
Function detects ssl context
[ "Function", "detects", "ssl", "context" ]
71a20ac149a1292c0d6c1dc7414985ea51854f7a
https://github.com/aacanakin/glim/blob/71a20ac149a1292c0d6c1dc7414985ea51854f7a/glim/app.py#L156-L163
train
aacanakin/glim
glim/app.py
Glim.flatten_urls
def flatten_urls(self, urls): """ Function flatten urls for route grouping feature of glim. Args ---- urls (dict): a dict of url definitions. current_key (unknown type): a dict or a string marking the current key that is used for recursive calls. ruleset (dict): the ruleset that is eventually returned to dispatcher. Returns ------- ruleset (list): a list of ruleset dict with endpoint, url and method functions """ available_methods = ['POST', 'PUT', 'OPTIONS', 'GET', 'DELETE', 'TRACE', 'COPY'] ruleset = [] for route, endpoint in urls.items(): route_pieces = route.split(' ') try: methods = url = None if len(route_pieces) > 1: methods = [route_pieces[0]] url = route_pieces[1] else: methods = available_methods url = route_pieces[0] endpoint_pieces = endpoint.split('.') if len(endpoint_pieces) > 1: rule = {'url': url, 'endpoint': endpoint, 'methods': methods} ruleset.append(rule) else: for method in available_methods: rule = { 'url': url, 'endpoint': '%s.%s' % (endpoint, method.lower()), 'methods': [method] } ruleset.append(rule) except Exception as e: raise InvalidRouteDefinitionError() return ruleset
python
def flatten_urls(self, urls): """ Function flatten urls for route grouping feature of glim. Args ---- urls (dict): a dict of url definitions. current_key (unknown type): a dict or a string marking the current key that is used for recursive calls. ruleset (dict): the ruleset that is eventually returned to dispatcher. Returns ------- ruleset (list): a list of ruleset dict with endpoint, url and method functions """ available_methods = ['POST', 'PUT', 'OPTIONS', 'GET', 'DELETE', 'TRACE', 'COPY'] ruleset = [] for route, endpoint in urls.items(): route_pieces = route.split(' ') try: methods = url = None if len(route_pieces) > 1: methods = [route_pieces[0]] url = route_pieces[1] else: methods = available_methods url = route_pieces[0] endpoint_pieces = endpoint.split('.') if len(endpoint_pieces) > 1: rule = {'url': url, 'endpoint': endpoint, 'methods': methods} ruleset.append(rule) else: for method in available_methods: rule = { 'url': url, 'endpoint': '%s.%s' % (endpoint, method.lower()), 'methods': [method] } ruleset.append(rule) except Exception as e: raise InvalidRouteDefinitionError() return ruleset
[ "def", "flatten_urls", "(", "self", ",", "urls", ")", ":", "available_methods", "=", "[", "'POST'", ",", "'PUT'", ",", "'OPTIONS'", ",", "'GET'", ",", "'DELETE'", ",", "'TRACE'", ",", "'COPY'", "]", "ruleset", "=", "[", "]", "for", "route", ",", "endpoint", "in", "urls", ".", "items", "(", ")", ":", "route_pieces", "=", "route", ".", "split", "(", "' '", ")", "try", ":", "methods", "=", "url", "=", "None", "if", "len", "(", "route_pieces", ")", ">", "1", ":", "methods", "=", "[", "route_pieces", "[", "0", "]", "]", "url", "=", "route_pieces", "[", "1", "]", "else", ":", "methods", "=", "available_methods", "url", "=", "route_pieces", "[", "0", "]", "endpoint_pieces", "=", "endpoint", ".", "split", "(", "'.'", ")", "if", "len", "(", "endpoint_pieces", ")", ">", "1", ":", "rule", "=", "{", "'url'", ":", "url", ",", "'endpoint'", ":", "endpoint", ",", "'methods'", ":", "methods", "}", "ruleset", ".", "append", "(", "rule", ")", "else", ":", "for", "method", "in", "available_methods", ":", "rule", "=", "{", "'url'", ":", "url", ",", "'endpoint'", ":", "'%s.%s'", "%", "(", "endpoint", ",", "method", ".", "lower", "(", ")", ")", ",", "'methods'", ":", "[", "method", "]", "}", "ruleset", ".", "append", "(", "rule", ")", "except", "Exception", "as", "e", ":", "raise", "InvalidRouteDefinitionError", "(", ")", "return", "ruleset" ]
Function flatten urls for route grouping feature of glim. Args ---- urls (dict): a dict of url definitions. current_key (unknown type): a dict or a string marking the current key that is used for recursive calls. ruleset (dict): the ruleset that is eventually returned to dispatcher. Returns ------- ruleset (list): a list of ruleset dict with endpoint, url and method functions
[ "Function", "flatten", "urls", "for", "route", "grouping", "feature", "of", "glim", "." ]
71a20ac149a1292c0d6c1dc7414985ea51854f7a
https://github.com/aacanakin/glim/blob/71a20ac149a1292c0d6c1dc7414985ea51854f7a/glim/app.py#L165-L209
train
TUNE-Archive/freight_forwarder
freight_forwarder/cli/info.py
InfoCommand._info
def _info(self, args, **extra_args): """Print freight forwarder info to the user. """ if not isinstance(args, argparse.Namespace): raise logger.error(Exception("args should of an instance of argparse.Namespace")) logger.info("Freight Forwarder: {0}".format(VERSION)) logger.info("docker-py: {0}".format(docker_py_version)) logger.info("Docker Api: {0}".format(DOCKER_API_VERSION)) logger.info("{0} version: {1}".format(platform.python_implementation(), platform.python_version()))
python
def _info(self, args, **extra_args): """Print freight forwarder info to the user. """ if not isinstance(args, argparse.Namespace): raise logger.error(Exception("args should of an instance of argparse.Namespace")) logger.info("Freight Forwarder: {0}".format(VERSION)) logger.info("docker-py: {0}".format(docker_py_version)) logger.info("Docker Api: {0}".format(DOCKER_API_VERSION)) logger.info("{0} version: {1}".format(platform.python_implementation(), platform.python_version()))
[ "def", "_info", "(", "self", ",", "args", ",", "*", "*", "extra_args", ")", ":", "if", "not", "isinstance", "(", "args", ",", "argparse", ".", "Namespace", ")", ":", "raise", "logger", ".", "error", "(", "Exception", "(", "\"args should of an instance of argparse.Namespace\"", ")", ")", "logger", ".", "info", "(", "\"Freight Forwarder: {0}\"", ".", "format", "(", "VERSION", ")", ")", "logger", ".", "info", "(", "\"docker-py: {0}\"", ".", "format", "(", "docker_py_version", ")", ")", "logger", ".", "info", "(", "\"Docker Api: {0}\"", ".", "format", "(", "DOCKER_API_VERSION", ")", ")", "logger", ".", "info", "(", "\"{0} version: {1}\"", ".", "format", "(", "platform", ".", "python_implementation", "(", ")", ",", "platform", ".", "python_version", "(", ")", ")", ")" ]
Print freight forwarder info to the user.
[ "Print", "freight", "forwarder", "info", "to", "the", "user", "." ]
6ea4a49f474ec04abb8bb81b175c774a16b5312f
https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/cli/info.py#L38-L47
train
20c/pluginmgr
pluginmgr/__init__.py
PluginManager.register
def register(self, typ): """ register a plugin """ # should be able to combine class/instance namespace, and inherit from either # would need to store meta or rely on copy ctor def _func(cls): if typ in self._class: raise ValueError("duplicated type name '%s'" % typ) cls.plugin_type = typ self._class[typ] = cls return cls return _func
python
def register(self, typ): """ register a plugin """ # should be able to combine class/instance namespace, and inherit from either # would need to store meta or rely on copy ctor def _func(cls): if typ in self._class: raise ValueError("duplicated type name '%s'" % typ) cls.plugin_type = typ self._class[typ] = cls return cls return _func
[ "def", "register", "(", "self", ",", "typ", ")", ":", "# should be able to combine class/instance namespace, and inherit from either", "# would need to store meta or rely on copy ctor", "def", "_func", "(", "cls", ")", ":", "if", "typ", "in", "self", ".", "_class", ":", "raise", "ValueError", "(", "\"duplicated type name '%s'\"", "%", "typ", ")", "cls", ".", "plugin_type", "=", "typ", "self", ".", "_class", "[", "typ", "]", "=", "cls", "return", "cls", "return", "_func" ]
register a plugin
[ "register", "a", "plugin" ]
ea19edab6d145f539641c304745acd4ab2c67eb7
https://github.com/20c/pluginmgr/blob/ea19edab6d145f539641c304745acd4ab2c67eb7/pluginmgr/__init__.py#L167-L177
train
20c/pluginmgr
pluginmgr/__init__.py
PluginManager.get_plugin_class
def get_plugin_class(self, typ): """ get class by name """ if typ in self._class: return self._class[typ] # try to import by same name try: importlib.import_module("%s.%s" % (self.namespace, typ)) if typ in self._class: return self._class[typ] except ImportError as e: self.log.debug("ImportError " + str(e)) raise ValueError("unknown plugin '%s'" % typ)
python
def get_plugin_class(self, typ): """ get class by name """ if typ in self._class: return self._class[typ] # try to import by same name try: importlib.import_module("%s.%s" % (self.namespace, typ)) if typ in self._class: return self._class[typ] except ImportError as e: self.log.debug("ImportError " + str(e)) raise ValueError("unknown plugin '%s'" % typ)
[ "def", "get_plugin_class", "(", "self", ",", "typ", ")", ":", "if", "typ", "in", "self", ".", "_class", ":", "return", "self", ".", "_class", "[", "typ", "]", "# try to import by same name", "try", ":", "importlib", ".", "import_module", "(", "\"%s.%s\"", "%", "(", "self", ".", "namespace", ",", "typ", ")", ")", "if", "typ", "in", "self", ".", "_class", ":", "return", "self", ".", "_class", "[", "typ", "]", "except", "ImportError", "as", "e", ":", "self", ".", "log", ".", "debug", "(", "\"ImportError \"", "+", "str", "(", "e", ")", ")", "raise", "ValueError", "(", "\"unknown plugin '%s'\"", "%", "typ", ")" ]
get class by name
[ "get", "class", "by", "name" ]
ea19edab6d145f539641c304745acd4ab2c67eb7
https://github.com/20c/pluginmgr/blob/ea19edab6d145f539641c304745acd4ab2c67eb7/pluginmgr/__init__.py#L186-L202
train
projectshift/shift-boiler
boiler/errors.py
register_error_handler
def register_error_handler(app, handler=None): """ Register error handler Registers an exception handler on the app instance for every type of exception code werkzeug is aware about. :param app: flask.Flask - flask application instance :param handler: function - the handler :return: None """ if not handler: handler = default_error_handler for code in exceptions.default_exceptions.keys(): app.register_error_handler(code, handler)
python
def register_error_handler(app, handler=None): """ Register error handler Registers an exception handler on the app instance for every type of exception code werkzeug is aware about. :param app: flask.Flask - flask application instance :param handler: function - the handler :return: None """ if not handler: handler = default_error_handler for code in exceptions.default_exceptions.keys(): app.register_error_handler(code, handler)
[ "def", "register_error_handler", "(", "app", ",", "handler", "=", "None", ")", ":", "if", "not", "handler", ":", "handler", "=", "default_error_handler", "for", "code", "in", "exceptions", ".", "default_exceptions", ".", "keys", "(", ")", ":", "app", ".", "register_error_handler", "(", "code", ",", "handler", ")" ]
Register error handler Registers an exception handler on the app instance for every type of exception code werkzeug is aware about. :param app: flask.Flask - flask application instance :param handler: function - the handler :return: None
[ "Register", "error", "handler", "Registers", "an", "exception", "handler", "on", "the", "app", "instance", "for", "every", "type", "of", "exception", "code", "werkzeug", "is", "aware", "about", "." ]
8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b
https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/errors.py#L6-L20
train
projectshift/shift-boiler
boiler/errors.py
default_error_handler
def default_error_handler(exception): """ Default error handler Will display an error page with the corresponding error code from template directory, for example, a not found will load a 404.html etc. Will first look in userland app templates and if not found, fallback to boiler templates to display a default page. :param exception: Exception :return: string """ http_exception = isinstance(exception, exceptions.HTTPException) code = exception.code if http_exception else 500 # log exceptions only (app debug should be off) if code == 500: current_app.logger.error(exception) # jsonify error if json requested via accept header if has_app_context() and has_request_context(): headers = request.headers if 'Accept' in headers and headers['Accept'] == 'application/json': return json_error_handler(exception) # otherwise render template return template_error_handler(exception)
python
def default_error_handler(exception): """ Default error handler Will display an error page with the corresponding error code from template directory, for example, a not found will load a 404.html etc. Will first look in userland app templates and if not found, fallback to boiler templates to display a default page. :param exception: Exception :return: string """ http_exception = isinstance(exception, exceptions.HTTPException) code = exception.code if http_exception else 500 # log exceptions only (app debug should be off) if code == 500: current_app.logger.error(exception) # jsonify error if json requested via accept header if has_app_context() and has_request_context(): headers = request.headers if 'Accept' in headers and headers['Accept'] == 'application/json': return json_error_handler(exception) # otherwise render template return template_error_handler(exception)
[ "def", "default_error_handler", "(", "exception", ")", ":", "http_exception", "=", "isinstance", "(", "exception", ",", "exceptions", ".", "HTTPException", ")", "code", "=", "exception", ".", "code", "if", "http_exception", "else", "500", "# log exceptions only (app debug should be off)", "if", "code", "==", "500", ":", "current_app", ".", "logger", ".", "error", "(", "exception", ")", "# jsonify error if json requested via accept header", "if", "has_app_context", "(", ")", "and", "has_request_context", "(", ")", ":", "headers", "=", "request", ".", "headers", "if", "'Accept'", "in", "headers", "and", "headers", "[", "'Accept'", "]", "==", "'application/json'", ":", "return", "json_error_handler", "(", "exception", ")", "# otherwise render template", "return", "template_error_handler", "(", "exception", ")" ]
Default error handler Will display an error page with the corresponding error code from template directory, for example, a not found will load a 404.html etc. Will first look in userland app templates and if not found, fallback to boiler templates to display a default page. :param exception: Exception :return: string
[ "Default", "error", "handler", "Will", "display", "an", "error", "page", "with", "the", "corresponding", "error", "code", "from", "template", "directory", "for", "example", "a", "not", "found", "will", "load", "a", "404", ".", "html", "etc", ".", "Will", "first", "look", "in", "userland", "app", "templates", "and", "if", "not", "found", "fallback", "to", "boiler", "templates", "to", "display", "a", "default", "page", "." ]
8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b
https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/errors.py#L23-L48
train
ethan92429/onshapepy
onshapepy/core/onshape.py
Onshape._make_nonce
def _make_nonce(self): ''' Generate a unique ID for the request, 25 chars in length Returns: - str: Cryptographic nonce ''' chars = string.digits + string.ascii_letters nonce = ''.join(random.choice(chars) for i in range(25)) if self._logging: utils.log('nonce created: %s' % nonce) return nonce
python
def _make_nonce(self): ''' Generate a unique ID for the request, 25 chars in length Returns: - str: Cryptographic nonce ''' chars = string.digits + string.ascii_letters nonce = ''.join(random.choice(chars) for i in range(25)) if self._logging: utils.log('nonce created: %s' % nonce) return nonce
[ "def", "_make_nonce", "(", "self", ")", ":", "chars", "=", "string", ".", "digits", "+", "string", ".", "ascii_letters", "nonce", "=", "''", ".", "join", "(", "random", ".", "choice", "(", "chars", ")", "for", "i", "in", "range", "(", "25", ")", ")", "if", "self", ".", "_logging", ":", "utils", ".", "log", "(", "'nonce created: %s'", "%", "nonce", ")", "return", "nonce" ]
Generate a unique ID for the request, 25 chars in length Returns: - str: Cryptographic nonce
[ "Generate", "a", "unique", "ID", "for", "the", "request", "25", "chars", "in", "length" ]
61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df
https://github.com/ethan92429/onshapepy/blob/61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df/onshapepy/core/onshape.py#L60-L74
train
ethan92429/onshapepy
onshapepy/core/onshape.py
Onshape._make_auth
def _make_auth(self, method, date, nonce, path, query={}, ctype='application/json'): ''' Create the request signature to authenticate Args: - method (str): HTTP method - date (str): HTTP date header string - nonce (str): Cryptographic nonce - path (str): URL pathname - query (dict, default={}): URL query string in key-value pairs - ctype (str, default='application/json'): HTTP Content-Type ''' query = urlencode(query) hmac_str = (method + '\n' + nonce + '\n' + date + '\n' + ctype + '\n' + path + '\n' + query + '\n').lower().encode('utf-8') signature = base64.b64encode(hmac.new(self._secret_key, hmac_str, digestmod=hashlib.sha256).digest()) auth = 'On ' + self._access_key.decode('utf-8') + ':HmacSHA256:' + signature.decode('utf-8') if self._logging: utils.log({ 'query': query, 'hmac_str': hmac_str, 'signature': signature, 'auth': auth }) return auth
python
def _make_auth(self, method, date, nonce, path, query={}, ctype='application/json'): ''' Create the request signature to authenticate Args: - method (str): HTTP method - date (str): HTTP date header string - nonce (str): Cryptographic nonce - path (str): URL pathname - query (dict, default={}): URL query string in key-value pairs - ctype (str, default='application/json'): HTTP Content-Type ''' query = urlencode(query) hmac_str = (method + '\n' + nonce + '\n' + date + '\n' + ctype + '\n' + path + '\n' + query + '\n').lower().encode('utf-8') signature = base64.b64encode(hmac.new(self._secret_key, hmac_str, digestmod=hashlib.sha256).digest()) auth = 'On ' + self._access_key.decode('utf-8') + ':HmacSHA256:' + signature.decode('utf-8') if self._logging: utils.log({ 'query': query, 'hmac_str': hmac_str, 'signature': signature, 'auth': auth }) return auth
[ "def", "_make_auth", "(", "self", ",", "method", ",", "date", ",", "nonce", ",", "path", ",", "query", "=", "{", "}", ",", "ctype", "=", "'application/json'", ")", ":", "query", "=", "urlencode", "(", "query", ")", "hmac_str", "=", "(", "method", "+", "'\\n'", "+", "nonce", "+", "'\\n'", "+", "date", "+", "'\\n'", "+", "ctype", "+", "'\\n'", "+", "path", "+", "'\\n'", "+", "query", "+", "'\\n'", ")", ".", "lower", "(", ")", ".", "encode", "(", "'utf-8'", ")", "signature", "=", "base64", ".", "b64encode", "(", "hmac", ".", "new", "(", "self", ".", "_secret_key", ",", "hmac_str", ",", "digestmod", "=", "hashlib", ".", "sha256", ")", ".", "digest", "(", ")", ")", "auth", "=", "'On '", "+", "self", ".", "_access_key", ".", "decode", "(", "'utf-8'", ")", "+", "':HmacSHA256:'", "+", "signature", ".", "decode", "(", "'utf-8'", ")", "if", "self", ".", "_logging", ":", "utils", ".", "log", "(", "{", "'query'", ":", "query", ",", "'hmac_str'", ":", "hmac_str", ",", "'signature'", ":", "signature", ",", "'auth'", ":", "auth", "}", ")", "return", "auth" ]
Create the request signature to authenticate Args: - method (str): HTTP method - date (str): HTTP date header string - nonce (str): Cryptographic nonce - path (str): URL pathname - query (dict, default={}): URL query string in key-value pairs - ctype (str, default='application/json'): HTTP Content-Type
[ "Create", "the", "request", "signature", "to", "authenticate" ]
61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df
https://github.com/ethan92429/onshapepy/blob/61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df/onshapepy/core/onshape.py#L76-L105
train
ethan92429/onshapepy
onshapepy/core/onshape.py
Onshape._make_headers
def _make_headers(self, method, path, query={}, headers={}): ''' Creates a headers object to sign the request Args: - method (str): HTTP method - path (str): Request path, e.g. /api/documents. No query string - query (dict, default={}): Query string in key-value format - headers (dict, default={}): Other headers to pass in Returns: - dict: Dictionary containing all headers ''' date = datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT') nonce = self._make_nonce() ctype = headers.get('Content-Type') if headers.get('Content-Type') else 'application/json' auth = self._make_auth(method, date, nonce, path, query=query, ctype=ctype) req_headers = { 'Content-Type': 'application/json', 'Date': date, 'On-Nonce': nonce, 'Authorization': auth, 'User-Agent': 'Onshape Python Sample App', 'Accept': 'application/json' } # add in user-defined headers for h in headers: req_headers[h] = headers[h] return req_headers
python
def _make_headers(self, method, path, query={}, headers={}): ''' Creates a headers object to sign the request Args: - method (str): HTTP method - path (str): Request path, e.g. /api/documents. No query string - query (dict, default={}): Query string in key-value format - headers (dict, default={}): Other headers to pass in Returns: - dict: Dictionary containing all headers ''' date = datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT') nonce = self._make_nonce() ctype = headers.get('Content-Type') if headers.get('Content-Type') else 'application/json' auth = self._make_auth(method, date, nonce, path, query=query, ctype=ctype) req_headers = { 'Content-Type': 'application/json', 'Date': date, 'On-Nonce': nonce, 'Authorization': auth, 'User-Agent': 'Onshape Python Sample App', 'Accept': 'application/json' } # add in user-defined headers for h in headers: req_headers[h] = headers[h] return req_headers
[ "def", "_make_headers", "(", "self", ",", "method", ",", "path", ",", "query", "=", "{", "}", ",", "headers", "=", "{", "}", ")", ":", "date", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "strftime", "(", "'%a, %d %b %Y %H:%M:%S GMT'", ")", "nonce", "=", "self", ".", "_make_nonce", "(", ")", "ctype", "=", "headers", ".", "get", "(", "'Content-Type'", ")", "if", "headers", ".", "get", "(", "'Content-Type'", ")", "else", "'application/json'", "auth", "=", "self", ".", "_make_auth", "(", "method", ",", "date", ",", "nonce", ",", "path", ",", "query", "=", "query", ",", "ctype", "=", "ctype", ")", "req_headers", "=", "{", "'Content-Type'", ":", "'application/json'", ",", "'Date'", ":", "date", ",", "'On-Nonce'", ":", "nonce", ",", "'Authorization'", ":", "auth", ",", "'User-Agent'", ":", "'Onshape Python Sample App'", ",", "'Accept'", ":", "'application/json'", "}", "# add in user-defined headers", "for", "h", "in", "headers", ":", "req_headers", "[", "h", "]", "=", "headers", "[", "h", "]", "return", "req_headers" ]
Creates a headers object to sign the request Args: - method (str): HTTP method - path (str): Request path, e.g. /api/documents. No query string - query (dict, default={}): Query string in key-value format - headers (dict, default={}): Other headers to pass in Returns: - dict: Dictionary containing all headers
[ "Creates", "a", "headers", "object", "to", "sign", "the", "request" ]
61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df
https://github.com/ethan92429/onshapepy/blob/61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df/onshapepy/core/onshape.py#L107-L140
train
ethan92429/onshapepy
onshapepy/core/onshape.py
Onshape.request
def request(self, method, path, query={}, headers={}, body={}, base_url=None): ''' Issues a request to Onshape Args: - method (str): HTTP method - path (str): Path e.g. /api/documents/:id - query (dict, default={}): Query params in key-value pairs - headers (dict, default={}): Key-value pairs of headers - body (dict, default={}): Body for POST request - base_url (str, default=None): Host, including scheme and port (if different from creds file) Returns: - requests.Response: Object containing the response from Onshape ''' req_headers = self._make_headers(method, path, query, headers) if base_url is None: base_url = self._url url = base_url + path + '?' + urlencode(query) if self._logging: utils.log(body) utils.log(req_headers) utils.log('request url: ' + url) # only parse as json string if we have to body = json.dumps(body) if type(body) == dict else body res = requests.request(method, url, headers=req_headers, data=body, allow_redirects=False, stream=True) if res.status_code == 307: location = urlparse(res.headers["Location"]) querystring = parse_qs(location.query) if self._logging: utils.log('request redirected to: ' + location.geturl()) new_query = {} new_base_url = location.scheme + '://' + location.netloc for key in querystring: new_query[key] = querystring[key][0] # won't work for repeated query params return self.request(method, location.path, query=new_query, headers=headers, base_url=new_base_url) elif not 200 <= res.status_code <= 206: if self._logging: utils.log('request failed, details: ' + res.text, level=1) else: if self._logging: utils.log('request succeeded, details: ' + res.text) return res
python
def request(self, method, path, query={}, headers={}, body={}, base_url=None): ''' Issues a request to Onshape Args: - method (str): HTTP method - path (str): Path e.g. /api/documents/:id - query (dict, default={}): Query params in key-value pairs - headers (dict, default={}): Key-value pairs of headers - body (dict, default={}): Body for POST request - base_url (str, default=None): Host, including scheme and port (if different from creds file) Returns: - requests.Response: Object containing the response from Onshape ''' req_headers = self._make_headers(method, path, query, headers) if base_url is None: base_url = self._url url = base_url + path + '?' + urlencode(query) if self._logging: utils.log(body) utils.log(req_headers) utils.log('request url: ' + url) # only parse as json string if we have to body = json.dumps(body) if type(body) == dict else body res = requests.request(method, url, headers=req_headers, data=body, allow_redirects=False, stream=True) if res.status_code == 307: location = urlparse(res.headers["Location"]) querystring = parse_qs(location.query) if self._logging: utils.log('request redirected to: ' + location.geturl()) new_query = {} new_base_url = location.scheme + '://' + location.netloc for key in querystring: new_query[key] = querystring[key][0] # won't work for repeated query params return self.request(method, location.path, query=new_query, headers=headers, base_url=new_base_url) elif not 200 <= res.status_code <= 206: if self._logging: utils.log('request failed, details: ' + res.text, level=1) else: if self._logging: utils.log('request succeeded, details: ' + res.text) return res
[ "def", "request", "(", "self", ",", "method", ",", "path", ",", "query", "=", "{", "}", ",", "headers", "=", "{", "}", ",", "body", "=", "{", "}", ",", "base_url", "=", "None", ")", ":", "req_headers", "=", "self", ".", "_make_headers", "(", "method", ",", "path", ",", "query", ",", "headers", ")", "if", "base_url", "is", "None", ":", "base_url", "=", "self", ".", "_url", "url", "=", "base_url", "+", "path", "+", "'?'", "+", "urlencode", "(", "query", ")", "if", "self", ".", "_logging", ":", "utils", ".", "log", "(", "body", ")", "utils", ".", "log", "(", "req_headers", ")", "utils", ".", "log", "(", "'request url: '", "+", "url", ")", "# only parse as json string if we have to", "body", "=", "json", ".", "dumps", "(", "body", ")", "if", "type", "(", "body", ")", "==", "dict", "else", "body", "res", "=", "requests", ".", "request", "(", "method", ",", "url", ",", "headers", "=", "req_headers", ",", "data", "=", "body", ",", "allow_redirects", "=", "False", ",", "stream", "=", "True", ")", "if", "res", ".", "status_code", "==", "307", ":", "location", "=", "urlparse", "(", "res", ".", "headers", "[", "\"Location\"", "]", ")", "querystring", "=", "parse_qs", "(", "location", ".", "query", ")", "if", "self", ".", "_logging", ":", "utils", ".", "log", "(", "'request redirected to: '", "+", "location", ".", "geturl", "(", ")", ")", "new_query", "=", "{", "}", "new_base_url", "=", "location", ".", "scheme", "+", "'://'", "+", "location", ".", "netloc", "for", "key", "in", "querystring", ":", "new_query", "[", "key", "]", "=", "querystring", "[", "key", "]", "[", "0", "]", "# won't work for repeated query params", "return", "self", ".", "request", "(", "method", ",", "location", ".", "path", ",", "query", "=", "new_query", ",", "headers", "=", "headers", ",", "base_url", "=", "new_base_url", ")", "elif", "not", "200", "<=", "res", ".", "status_code", "<=", "206", ":", "if", "self", ".", "_logging", ":", "utils", ".", "log", "(", "'request failed, details: '", "+", "res", ".", "text", ",", "level", "=", "1", ")", "else", ":", "if", "self", ".", "_logging", ":", "utils", ".", "log", "(", "'request succeeded, details: '", "+", "res", ".", "text", ")", "return", "res" ]
Issues a request to Onshape Args: - method (str): HTTP method - path (str): Path e.g. /api/documents/:id - query (dict, default={}): Query params in key-value pairs - headers (dict, default={}): Key-value pairs of headers - body (dict, default={}): Body for POST request - base_url (str, default=None): Host, including scheme and port (if different from creds file) Returns: - requests.Response: Object containing the response from Onshape
[ "Issues", "a", "request", "to", "Onshape" ]
61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df
https://github.com/ethan92429/onshapepy/blob/61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df/onshapepy/core/onshape.py#L142-L194
train
Ceasar/twosheds
twosheds/program.py
Program.gen_sentences
def gen_sentences(self, tokens, aliases=None): """ Generate a sequence of sentences from stream of tokens. """ if aliases is None: aliases = {} for sentence in self._gen_sentences(tokens): try: alias = aliases[str(sentence[0])] except KeyError: # do nothing if no alias is found pass except IndexError: pass else: sentence[0:1] = list(Program(alias).gen_tokens()) yield transform(Sentence(sentence), self.transforms)
python
def gen_sentences(self, tokens, aliases=None): """ Generate a sequence of sentences from stream of tokens. """ if aliases is None: aliases = {} for sentence in self._gen_sentences(tokens): try: alias = aliases[str(sentence[0])] except KeyError: # do nothing if no alias is found pass except IndexError: pass else: sentence[0:1] = list(Program(alias).gen_tokens()) yield transform(Sentence(sentence), self.transforms)
[ "def", "gen_sentences", "(", "self", ",", "tokens", ",", "aliases", "=", "None", ")", ":", "if", "aliases", "is", "None", ":", "aliases", "=", "{", "}", "for", "sentence", "in", "self", ".", "_gen_sentences", "(", "tokens", ")", ":", "try", ":", "alias", "=", "aliases", "[", "str", "(", "sentence", "[", "0", "]", ")", "]", "except", "KeyError", ":", "# do nothing if no alias is found", "pass", "except", "IndexError", ":", "pass", "else", ":", "sentence", "[", "0", ":", "1", "]", "=", "list", "(", "Program", "(", "alias", ")", ".", "gen_tokens", "(", ")", ")", "yield", "transform", "(", "Sentence", "(", "sentence", ")", ",", "self", ".", "transforms", ")" ]
Generate a sequence of sentences from stream of tokens.
[ "Generate", "a", "sequence", "of", "sentences", "from", "stream", "of", "tokens", "." ]
55b0a207e3a06b85e9a9567069b3822a651501a7
https://github.com/Ceasar/twosheds/blob/55b0a207e3a06b85e9a9567069b3822a651501a7/twosheds/program.py#L106-L122
train
ZEDGR/pychal
challonge/api.py
set_timezone
def set_timezone(new_tz=None): """Set the timezone for datetime fields. By default is your machine's time. If it's called without parameter sets the local time again. :keyword param new_tz: timezone string ex. 'Europe/Athens', 'Asia/Seoul', 'America/Los_Angeles', 'UTC' :return None """ global tz if new_tz: tz = pytz.timezone(new_tz) else: tz = tzlocal.get_localzone()
python
def set_timezone(new_tz=None): """Set the timezone for datetime fields. By default is your machine's time. If it's called without parameter sets the local time again. :keyword param new_tz: timezone string ex. 'Europe/Athens', 'Asia/Seoul', 'America/Los_Angeles', 'UTC' :return None """ global tz if new_tz: tz = pytz.timezone(new_tz) else: tz = tzlocal.get_localzone()
[ "def", "set_timezone", "(", "new_tz", "=", "None", ")", ":", "global", "tz", "if", "new_tz", ":", "tz", "=", "pytz", ".", "timezone", "(", "new_tz", ")", "else", ":", "tz", "=", "tzlocal", ".", "get_localzone", "(", ")" ]
Set the timezone for datetime fields. By default is your machine's time. If it's called without parameter sets the local time again. :keyword param new_tz: timezone string ex. 'Europe/Athens', 'Asia/Seoul', 'America/Los_Angeles', 'UTC' :return None
[ "Set", "the", "timezone", "for", "datetime", "fields", ".", "By", "default", "is", "your", "machine", "s", "time", ".", "If", "it", "s", "called", "without", "parameter", "sets", "the", "local", "time", "again", "." ]
3600fa9e0557a2a14eb1ad0c0711d28dad3693d7
https://github.com/ZEDGR/pychal/blob/3600fa9e0557a2a14eb1ad0c0711d28dad3693d7/challonge/api.py#L32-L51
train
ZEDGR/pychal
challonge/api.py
fetch_and_parse
def fetch_and_parse(method, uri, params_prefix=None, **params): """Fetch the given uri and return python dictionary with parsed data-types.""" response = fetch(method, uri, params_prefix, **params) return _parse(json.loads(response.text))
python
def fetch_and_parse(method, uri, params_prefix=None, **params): """Fetch the given uri and return python dictionary with parsed data-types.""" response = fetch(method, uri, params_prefix, **params) return _parse(json.loads(response.text))
[ "def", "fetch_and_parse", "(", "method", ",", "uri", ",", "params_prefix", "=", "None", ",", "*", "*", "params", ")", ":", "response", "=", "fetch", "(", "method", ",", "uri", ",", "params_prefix", ",", "*", "*", "params", ")", "return", "_parse", "(", "json", ".", "loads", "(", "response", ".", "text", ")", ")" ]
Fetch the given uri and return python dictionary with parsed data-types.
[ "Fetch", "the", "given", "uri", "and", "return", "python", "dictionary", "with", "parsed", "data", "-", "types", "." ]
3600fa9e0557a2a14eb1ad0c0711d28dad3693d7
https://github.com/ZEDGR/pychal/blob/3600fa9e0557a2a14eb1ad0c0711d28dad3693d7/challonge/api.py#L94-L97
train
ZEDGR/pychal
challonge/api.py
_parse
def _parse(data): """Recursively convert a json into python data types""" if not data: return [] elif isinstance(data, (tuple, list)): return [_parse(subdata) for subdata in data] # extract the nested dict. ex. {"tournament": {"url": "7k1safq" ...}} d = {ik: v for k in data.keys() for ik, v in data[k].items()} # convert datetime strings to datetime objects # and float number strings to float to_parse = dict(d) for k, v in to_parse.items(): if k in { "name", "display_name", "display_name_with_invitation_email_address", "username", "challonge_username"}: continue # do not test type of fields which are always strings if isinstance(v, TEXT_TYPE): try: dt = iso8601.parse_date(v) d[k] = dt.astimezone(tz) except iso8601.ParseError: try: d[k] = float(v) except ValueError: pass return d
python
def _parse(data): """Recursively convert a json into python data types""" if not data: return [] elif isinstance(data, (tuple, list)): return [_parse(subdata) for subdata in data] # extract the nested dict. ex. {"tournament": {"url": "7k1safq" ...}} d = {ik: v for k in data.keys() for ik, v in data[k].items()} # convert datetime strings to datetime objects # and float number strings to float to_parse = dict(d) for k, v in to_parse.items(): if k in { "name", "display_name", "display_name_with_invitation_email_address", "username", "challonge_username"}: continue # do not test type of fields which are always strings if isinstance(v, TEXT_TYPE): try: dt = iso8601.parse_date(v) d[k] = dt.astimezone(tz) except iso8601.ParseError: try: d[k] = float(v) except ValueError: pass return d
[ "def", "_parse", "(", "data", ")", ":", "if", "not", "data", ":", "return", "[", "]", "elif", "isinstance", "(", "data", ",", "(", "tuple", ",", "list", ")", ")", ":", "return", "[", "_parse", "(", "subdata", ")", "for", "subdata", "in", "data", "]", "# extract the nested dict. ex. {\"tournament\": {\"url\": \"7k1safq\" ...}}", "d", "=", "{", "ik", ":", "v", "for", "k", "in", "data", ".", "keys", "(", ")", "for", "ik", ",", "v", "in", "data", "[", "k", "]", ".", "items", "(", ")", "}", "# convert datetime strings to datetime objects", "# and float number strings to float", "to_parse", "=", "dict", "(", "d", ")", "for", "k", ",", "v", "in", "to_parse", ".", "items", "(", ")", ":", "if", "k", "in", "{", "\"name\"", ",", "\"display_name\"", ",", "\"display_name_with_invitation_email_address\"", ",", "\"username\"", ",", "\"challonge_username\"", "}", ":", "continue", "# do not test type of fields which are always strings", "if", "isinstance", "(", "v", ",", "TEXT_TYPE", ")", ":", "try", ":", "dt", "=", "iso8601", ".", "parse_date", "(", "v", ")", "d", "[", "k", "]", "=", "dt", ".", "astimezone", "(", "tz", ")", "except", "iso8601", ".", "ParseError", ":", "try", ":", "d", "[", "k", "]", "=", "float", "(", "v", ")", "except", "ValueError", ":", "pass", "return", "d" ]
Recursively convert a json into python data types
[ "Recursively", "convert", "a", "json", "into", "python", "data", "types" ]
3600fa9e0557a2a14eb1ad0c0711d28dad3693d7
https://github.com/ZEDGR/pychal/blob/3600fa9e0557a2a14eb1ad0c0711d28dad3693d7/challonge/api.py#L100-L132
train
uw-it-aca/uw-restclients-sws
uw_sws/department.py
get_departments_by_college
def get_departments_by_college(college): """ Returns a list of restclients.Department models, for the passed College model. """ url = "{}?{}".format( dept_search_url_prefix, urlencode({"college_abbreviation": college.label})) return _json_to_departments(get_resource(url), college)
python
def get_departments_by_college(college): """ Returns a list of restclients.Department models, for the passed College model. """ url = "{}?{}".format( dept_search_url_prefix, urlencode({"college_abbreviation": college.label})) return _json_to_departments(get_resource(url), college)
[ "def", "get_departments_by_college", "(", "college", ")", ":", "url", "=", "\"{}?{}\"", ".", "format", "(", "dept_search_url_prefix", ",", "urlencode", "(", "{", "\"college_abbreviation\"", ":", "college", ".", "label", "}", ")", ")", "return", "_json_to_departments", "(", "get_resource", "(", "url", ")", ",", "college", ")" ]
Returns a list of restclients.Department models, for the passed College model.
[ "Returns", "a", "list", "of", "restclients", ".", "Department", "models", "for", "the", "passed", "College", "model", "." ]
4d36776dcca36855fc15c1b8fe7650ae045194cf
https://github.com/uw-it-aca/uw-restclients-sws/blob/4d36776dcca36855fc15c1b8fe7650ae045194cf/uw_sws/department.py#L14-L22
train
berkeley-cocosci/Wallace
wallace/db.py
scoped_session_decorator
def scoped_session_decorator(func): """Manage contexts and add debugging to psiTurk sessions.""" @wraps(func) def wrapper(*args, **kwargs): from wallace.db import session as wallace_session with sessions_scope(wallace_session) as session: from psiturk.db import db_session as psi_session with sessions_scope(psi_session) as session_psiturk: # The sessions used in func come from the funcs globals, but # they will be proxied thread locals vars from the session # registry, and will therefore be identical to those returned # by the context managers above. logger.debug('Running worker %s in scoped DB sessions', func.__name__) return func(*args, **kwargs) return wrapper
python
def scoped_session_decorator(func): """Manage contexts and add debugging to psiTurk sessions.""" @wraps(func) def wrapper(*args, **kwargs): from wallace.db import session as wallace_session with sessions_scope(wallace_session) as session: from psiturk.db import db_session as psi_session with sessions_scope(psi_session) as session_psiturk: # The sessions used in func come from the funcs globals, but # they will be proxied thread locals vars from the session # registry, and will therefore be identical to those returned # by the context managers above. logger.debug('Running worker %s in scoped DB sessions', func.__name__) return func(*args, **kwargs) return wrapper
[ "def", "scoped_session_decorator", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", "wallace", ".", "db", "import", "session", "as", "wallace_session", "with", "sessions_scope", "(", "wallace_session", ")", "as", "session", ":", "from", "psiturk", ".", "db", "import", "db_session", "as", "psi_session", "with", "sessions_scope", "(", "psi_session", ")", "as", "session_psiturk", ":", "# The sessions used in func come from the funcs globals, but", "# they will be proxied thread locals vars from the session", "# registry, and will therefore be identical to those returned", "# by the context managers above.", "logger", ".", "debug", "(", "'Running worker %s in scoped DB sessions'", ",", "func", ".", "__name__", ")", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
Manage contexts and add debugging to psiTurk sessions.
[ "Manage", "contexts", "and", "add", "debugging", "to", "psiTurk", "sessions", "." ]
3650c0bc3b0804d0adb1d178c5eba9992babb1b0
https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/db.py#L47-L62
train
Ceasar/twosheds
twosheds/terminal.py
Terminal.readline
def readline(self, continuation=False): """Read a line from the terminal. A backslash followed by a <newline> is interpreted as a line continuation. The backslash and <newline>s are removed before return. For example:: $ uname \ > -m x86_64 :param continuation: True if the line is a continuation. Defaults to False. """ prompt = (self.secondary_prompt_string if continuation else self.primary_prompt_string) try: line = raw_input(prompt) while line.endswith("\\"): line = line[:-1] + raw_input(self.secondary_prompt_string) except EOFError: raise SystemExit() else: return line
python
def readline(self, continuation=False): """Read a line from the terminal. A backslash followed by a <newline> is interpreted as a line continuation. The backslash and <newline>s are removed before return. For example:: $ uname \ > -m x86_64 :param continuation: True if the line is a continuation. Defaults to False. """ prompt = (self.secondary_prompt_string if continuation else self.primary_prompt_string) try: line = raw_input(prompt) while line.endswith("\\"): line = line[:-1] + raw_input(self.secondary_prompt_string) except EOFError: raise SystemExit() else: return line
[ "def", "readline", "(", "self", ",", "continuation", "=", "False", ")", ":", "prompt", "=", "(", "self", ".", "secondary_prompt_string", "if", "continuation", "else", "self", ".", "primary_prompt_string", ")", "try", ":", "line", "=", "raw_input", "(", "prompt", ")", "while", "line", ".", "endswith", "(", "\"\\\\\"", ")", ":", "line", "=", "line", "[", ":", "-", "1", "]", "+", "raw_input", "(", "self", ".", "secondary_prompt_string", ")", "except", "EOFError", ":", "raise", "SystemExit", "(", ")", "else", ":", "return", "line" ]
Read a line from the terminal. A backslash followed by a <newline> is interpreted as a line continuation. The backslash and <newline>s are removed before return. For example:: $ uname \ > -m x86_64 :param continuation: True if the line is a continuation. Defaults to False.
[ "Read", "a", "line", "from", "the", "terminal", "." ]
55b0a207e3a06b85e9a9567069b3822a651501a7
https://github.com/Ceasar/twosheds/blob/55b0a207e3a06b85e9a9567069b3822a651501a7/twosheds/terminal.py#L24-L48
train
Ceasar/twosheds
twosheds/terminal.py
Terminal.readlines
def readlines(self): """Read a command from the terminal. Returns a list of tokens containing the user's input. """ continuation = False while True: yield self.readline(continuation) continuation = True
python
def readlines(self): """Read a command from the terminal. Returns a list of tokens containing the user's input. """ continuation = False while True: yield self.readline(continuation) continuation = True
[ "def", "readlines", "(", "self", ")", ":", "continuation", "=", "False", "while", "True", ":", "yield", "self", ".", "readline", "(", "continuation", ")", "continuation", "=", "True" ]
Read a command from the terminal. Returns a list of tokens containing the user's input.
[ "Read", "a", "command", "from", "the", "terminal", "." ]
55b0a207e3a06b85e9a9567069b3822a651501a7
https://github.com/Ceasar/twosheds/blob/55b0a207e3a06b85e9a9567069b3822a651501a7/twosheds/terminal.py#L50-L58
train
Kortemme-Lab/klab
klab/bio/pdb.py
sequence
def sequence(pdb_filepath): '''A convenience method for printing PDB sequences in command-line execution. :param pdb_filepath: A path to a PDB file. :return: A string where odd line is a chain letter and each following even line is that chain's ATOM sequence. Example use: from klab.bio import pdb print(pdb.sequence('1234.pdb')) ''' return '\n'.join(['{0}\n{1}'.format(chain_id, str(seq)) for chain_id, seq in sorted(PDB.from_filepath(pdb_filepath).atom_sequences.iteritems())])
python
def sequence(pdb_filepath): '''A convenience method for printing PDB sequences in command-line execution. :param pdb_filepath: A path to a PDB file. :return: A string where odd line is a chain letter and each following even line is that chain's ATOM sequence. Example use: from klab.bio import pdb print(pdb.sequence('1234.pdb')) ''' return '\n'.join(['{0}\n{1}'.format(chain_id, str(seq)) for chain_id, seq in sorted(PDB.from_filepath(pdb_filepath).atom_sequences.iteritems())])
[ "def", "sequence", "(", "pdb_filepath", ")", ":", "return", "'\\n'", ".", "join", "(", "[", "'{0}\\n{1}'", ".", "format", "(", "chain_id", ",", "str", "(", "seq", ")", ")", "for", "chain_id", ",", "seq", "in", "sorted", "(", "PDB", ".", "from_filepath", "(", "pdb_filepath", ")", ".", "atom_sequences", ".", "iteritems", "(", ")", ")", "]", ")" ]
A convenience method for printing PDB sequences in command-line execution. :param pdb_filepath: A path to a PDB file. :return: A string where odd line is a chain letter and each following even line is that chain's ATOM sequence. Example use: from klab.bio import pdb print(pdb.sequence('1234.pdb'))
[ "A", "convenience", "method", "for", "printing", "PDB", "sequences", "in", "command", "-", "line", "execution", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/pdb.py#L3149-L3159
train
Kortemme-Lab/klab
klab/bio/pdb.py
PDB.fix_pdb
def fix_pdb(self): '''A function to fix fatal errors in PDB files when they can be automatically fixed. At present, this only runs if self.strict is False. We may want a separate property for this since we may want to keep strict mode but still allow PDBs to be fixed. The only fixes at the moment are for missing chain IDs which get filled in with a valid PDB ID, if possible.''' if self.strict: return # Get the list of chains chains = set() for l in self.lines: if l.startswith('ATOM ') or l.startswith('HETATM'): chains.add(l[21]) # If there is a chain with a blank ID, change that ID to a valid unused ID if ' ' in chains: fresh_id = None allowed_chain_ids = list(string.uppercase) + list(string.lowercase) + map(str, range(10)) for c in chains: try: allowed_chain_ids.remove(c) except: pass if allowed_chain_ids: fresh_id = allowed_chain_ids[0] # Rewrite the lines new_lines = [] if fresh_id: for l in self.lines: if (l.startswith('ATOM ') or l.startswith('HETATM')) and l[21] == ' ': new_lines.append('%s%s%s' % (l[:21], fresh_id, l[22:])) else: new_lines.append(l) self.lines = new_lines
python
def fix_pdb(self): '''A function to fix fatal errors in PDB files when they can be automatically fixed. At present, this only runs if self.strict is False. We may want a separate property for this since we may want to keep strict mode but still allow PDBs to be fixed. The only fixes at the moment are for missing chain IDs which get filled in with a valid PDB ID, if possible.''' if self.strict: return # Get the list of chains chains = set() for l in self.lines: if l.startswith('ATOM ') or l.startswith('HETATM'): chains.add(l[21]) # If there is a chain with a blank ID, change that ID to a valid unused ID if ' ' in chains: fresh_id = None allowed_chain_ids = list(string.uppercase) + list(string.lowercase) + map(str, range(10)) for c in chains: try: allowed_chain_ids.remove(c) except: pass if allowed_chain_ids: fresh_id = allowed_chain_ids[0] # Rewrite the lines new_lines = [] if fresh_id: for l in self.lines: if (l.startswith('ATOM ') or l.startswith('HETATM')) and l[21] == ' ': new_lines.append('%s%s%s' % (l[:21], fresh_id, l[22:])) else: new_lines.append(l) self.lines = new_lines
[ "def", "fix_pdb", "(", "self", ")", ":", "if", "self", ".", "strict", ":", "return", "# Get the list of chains", "chains", "=", "set", "(", ")", "for", "l", "in", "self", ".", "lines", ":", "if", "l", ".", "startswith", "(", "'ATOM '", ")", "or", "l", ".", "startswith", "(", "'HETATM'", ")", ":", "chains", ".", "add", "(", "l", "[", "21", "]", ")", "# If there is a chain with a blank ID, change that ID to a valid unused ID", "if", "' '", "in", "chains", ":", "fresh_id", "=", "None", "allowed_chain_ids", "=", "list", "(", "string", ".", "uppercase", ")", "+", "list", "(", "string", ".", "lowercase", ")", "+", "map", "(", "str", ",", "range", "(", "10", ")", ")", "for", "c", "in", "chains", ":", "try", ":", "allowed_chain_ids", ".", "remove", "(", "c", ")", "except", ":", "pass", "if", "allowed_chain_ids", ":", "fresh_id", "=", "allowed_chain_ids", "[", "0", "]", "# Rewrite the lines", "new_lines", "=", "[", "]", "if", "fresh_id", ":", "for", "l", "in", "self", ".", "lines", ":", "if", "(", "l", ".", "startswith", "(", "'ATOM '", ")", "or", "l", ".", "startswith", "(", "'HETATM'", ")", ")", "and", "l", "[", "21", "]", "==", "' '", ":", "new_lines", ".", "append", "(", "'%s%s%s'", "%", "(", "l", "[", ":", "21", "]", ",", "fresh_id", ",", "l", "[", "22", ":", "]", ")", ")", "else", ":", "new_lines", ".", "append", "(", "l", ")", "self", ".", "lines", "=", "new_lines" ]
A function to fix fatal errors in PDB files when they can be automatically fixed. At present, this only runs if self.strict is False. We may want a separate property for this since we may want to keep strict mode but still allow PDBs to be fixed. The only fixes at the moment are for missing chain IDs which get filled in with a valid PDB ID, if possible.
[ "A", "function", "to", "fix", "fatal", "errors", "in", "PDB", "files", "when", "they", "can", "be", "automatically", "fixed", ".", "At", "present", "this", "only", "runs", "if", "self", ".", "strict", "is", "False", ".", "We", "may", "want", "a", "separate", "property", "for", "this", "since", "we", "may", "want", "to", "keep", "strict", "mode", "but", "still", "allow", "PDBs", "to", "be", "fixed", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/pdb.py#L456-L490
train
Kortemme-Lab/klab
klab/bio/pdb.py
PDB.replace_headers
def replace_headers(source_pdb_content, target_pdb_content): '''Takes the headers from source_pdb_content and adds them to target_pdb_content, removing any headers that target_pdb_content had. Only the content up to the first structural line are taken from source_pdb_content and only the content from the first structural line in target_pdb_content are taken. ''' s = PDB(source_pdb_content) t = PDB(target_pdb_content) source_headers = [] for l in s.lines: if l[:6].strip() in non_header_records: break else: source_headers.append(l) target_body = [] in_header = True for l in t.lines: if l[:6].strip() in non_header_records: in_header = False if not in_header: target_body.append(l) return '\n'.join(source_headers + target_body)
python
def replace_headers(source_pdb_content, target_pdb_content): '''Takes the headers from source_pdb_content and adds them to target_pdb_content, removing any headers that target_pdb_content had. Only the content up to the first structural line are taken from source_pdb_content and only the content from the first structural line in target_pdb_content are taken. ''' s = PDB(source_pdb_content) t = PDB(target_pdb_content) source_headers = [] for l in s.lines: if l[:6].strip() in non_header_records: break else: source_headers.append(l) target_body = [] in_header = True for l in t.lines: if l[:6].strip() in non_header_records: in_header = False if not in_header: target_body.append(l) return '\n'.join(source_headers + target_body)
[ "def", "replace_headers", "(", "source_pdb_content", ",", "target_pdb_content", ")", ":", "s", "=", "PDB", "(", "source_pdb_content", ")", "t", "=", "PDB", "(", "target_pdb_content", ")", "source_headers", "=", "[", "]", "for", "l", "in", "s", ".", "lines", ":", "if", "l", "[", ":", "6", "]", ".", "strip", "(", ")", "in", "non_header_records", ":", "break", "else", ":", "source_headers", ".", "append", "(", "l", ")", "target_body", "=", "[", "]", "in_header", "=", "True", "for", "l", "in", "t", ".", "lines", ":", "if", "l", "[", ":", "6", "]", ".", "strip", "(", ")", "in", "non_header_records", ":", "in_header", "=", "False", "if", "not", "in_header", ":", "target_body", ".", "append", "(", "l", ")", "return", "'\\n'", ".", "join", "(", "source_headers", "+", "target_body", ")" ]
Takes the headers from source_pdb_content and adds them to target_pdb_content, removing any headers that target_pdb_content had. Only the content up to the first structural line are taken from source_pdb_content and only the content from the first structural line in target_pdb_content are taken.
[ "Takes", "the", "headers", "from", "source_pdb_content", "and", "adds", "them", "to", "target_pdb_content", "removing", "any", "headers", "that", "target_pdb_content", "had", ".", "Only", "the", "content", "up", "to", "the", "first", "structural", "line", "are", "taken", "from", "source_pdb_content", "and", "only", "the", "content", "from", "the", "first", "structural", "line", "in", "target_pdb_content", "are", "taken", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/pdb.py#L557-L581
train
Kortemme-Lab/klab
klab/bio/pdb.py
PDB.from_lines
def from_lines(pdb_file_lines, strict = True, parse_ligands = False): '''A function to replace the old constructor call where a list of the file's lines was passed in.''' return PDB("\n".join(pdb_file_lines), strict = strict, parse_ligands = parse_ligands)
python
def from_lines(pdb_file_lines, strict = True, parse_ligands = False): '''A function to replace the old constructor call where a list of the file's lines was passed in.''' return PDB("\n".join(pdb_file_lines), strict = strict, parse_ligands = parse_ligands)
[ "def", "from_lines", "(", "pdb_file_lines", ",", "strict", "=", "True", ",", "parse_ligands", "=", "False", ")", ":", "return", "PDB", "(", "\"\\n\"", ".", "join", "(", "pdb_file_lines", ")", ",", "strict", "=", "strict", ",", "parse_ligands", "=", "parse_ligands", ")" ]
A function to replace the old constructor call where a list of the file's lines was passed in.
[ "A", "function", "to", "replace", "the", "old", "constructor", "call", "where", "a", "list", "of", "the", "file", "s", "lines", "was", "passed", "in", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/pdb.py#L591-L593
train
Kortemme-Lab/klab
klab/bio/pdb.py
PDB._split_lines
def _split_lines(self): '''Creates the parsed_lines dict which keeps all record data in document order indexed by the record type.''' parsed_lines = {} for rt in all_record_types: parsed_lines[rt] = [] parsed_lines[0] = [] for line in self.lines: linetype = line[0:6] if linetype in all_record_types: parsed_lines[linetype].append(line) else: parsed_lines[0].append(line) self.parsed_lines = parsed_lines self._update_structure_lines()
python
def _split_lines(self): '''Creates the parsed_lines dict which keeps all record data in document order indexed by the record type.''' parsed_lines = {} for rt in all_record_types: parsed_lines[rt] = [] parsed_lines[0] = [] for line in self.lines: linetype = line[0:6] if linetype in all_record_types: parsed_lines[linetype].append(line) else: parsed_lines[0].append(line) self.parsed_lines = parsed_lines self._update_structure_lines()
[ "def", "_split_lines", "(", "self", ")", ":", "parsed_lines", "=", "{", "}", "for", "rt", "in", "all_record_types", ":", "parsed_lines", "[", "rt", "]", "=", "[", "]", "parsed_lines", "[", "0", "]", "=", "[", "]", "for", "line", "in", "self", ".", "lines", ":", "linetype", "=", "line", "[", "0", ":", "6", "]", "if", "linetype", "in", "all_record_types", ":", "parsed_lines", "[", "linetype", "]", ".", "append", "(", "line", ")", "else", ":", "parsed_lines", "[", "0", "]", ".", "append", "(", "line", ")", "self", ".", "parsed_lines", "=", "parsed_lines", "self", ".", "_update_structure_lines", "(", ")" ]
Creates the parsed_lines dict which keeps all record data in document order indexed by the record type.
[ "Creates", "the", "parsed_lines", "dict", "which", "keeps", "all", "record", "data", "in", "document", "order", "indexed", "by", "the", "record", "type", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/pdb.py#L621-L636
train
Kortemme-Lab/klab
klab/bio/pdb.py
PDB._update_structure_lines
def _update_structure_lines(self): '''ATOM and HETATM lines may be altered by function calls. When this happens, this function should be called to keep self.structure_lines up to date.''' structure_lines = [] atom_chain_order = [] chain_atoms = {} for line in self.lines: linetype = line[0:6] if linetype == 'ATOM ' or linetype == 'HETATM' or linetype == 'TER ': chain_id = line[21] self.residue_types.add(line[17:20].strip()) if missing_chain_ids.get(self.pdb_id): chain_id = missing_chain_ids[self.pdb_id] structure_lines.append(line) if (chain_id not in atom_chain_order) and (chain_id != ' '): atom_chain_order.append(chain_id) if linetype == 'ATOM ': atom_type = line[12:16].strip() if atom_type: chain_atoms[chain_id] = chain_atoms.get(chain_id, set()) chain_atoms[chain_id].add(atom_type) if linetype == 'ENDMDL': colortext.warning("ENDMDL detected: Breaking out early. We do not currently handle NMR structures properly.") break self.structure_lines = structure_lines self.atom_chain_order = atom_chain_order self.chain_atoms = chain_atoms
python
def _update_structure_lines(self): '''ATOM and HETATM lines may be altered by function calls. When this happens, this function should be called to keep self.structure_lines up to date.''' structure_lines = [] atom_chain_order = [] chain_atoms = {} for line in self.lines: linetype = line[0:6] if linetype == 'ATOM ' or linetype == 'HETATM' or linetype == 'TER ': chain_id = line[21] self.residue_types.add(line[17:20].strip()) if missing_chain_ids.get(self.pdb_id): chain_id = missing_chain_ids[self.pdb_id] structure_lines.append(line) if (chain_id not in atom_chain_order) and (chain_id != ' '): atom_chain_order.append(chain_id) if linetype == 'ATOM ': atom_type = line[12:16].strip() if atom_type: chain_atoms[chain_id] = chain_atoms.get(chain_id, set()) chain_atoms[chain_id].add(atom_type) if linetype == 'ENDMDL': colortext.warning("ENDMDL detected: Breaking out early. We do not currently handle NMR structures properly.") break self.structure_lines = structure_lines self.atom_chain_order = atom_chain_order self.chain_atoms = chain_atoms
[ "def", "_update_structure_lines", "(", "self", ")", ":", "structure_lines", "=", "[", "]", "atom_chain_order", "=", "[", "]", "chain_atoms", "=", "{", "}", "for", "line", "in", "self", ".", "lines", ":", "linetype", "=", "line", "[", "0", ":", "6", "]", "if", "linetype", "==", "'ATOM '", "or", "linetype", "==", "'HETATM'", "or", "linetype", "==", "'TER '", ":", "chain_id", "=", "line", "[", "21", "]", "self", ".", "residue_types", ".", "add", "(", "line", "[", "17", ":", "20", "]", ".", "strip", "(", ")", ")", "if", "missing_chain_ids", ".", "get", "(", "self", ".", "pdb_id", ")", ":", "chain_id", "=", "missing_chain_ids", "[", "self", ".", "pdb_id", "]", "structure_lines", ".", "append", "(", "line", ")", "if", "(", "chain_id", "not", "in", "atom_chain_order", ")", "and", "(", "chain_id", "!=", "' '", ")", ":", "atom_chain_order", ".", "append", "(", "chain_id", ")", "if", "linetype", "==", "'ATOM '", ":", "atom_type", "=", "line", "[", "12", ":", "16", "]", ".", "strip", "(", ")", "if", "atom_type", ":", "chain_atoms", "[", "chain_id", "]", "=", "chain_atoms", ".", "get", "(", "chain_id", ",", "set", "(", ")", ")", "chain_atoms", "[", "chain_id", "]", ".", "add", "(", "atom_type", ")", "if", "linetype", "==", "'ENDMDL'", ":", "colortext", ".", "warning", "(", "\"ENDMDL detected: Breaking out early. We do not currently handle NMR structures properly.\"", ")", "break", "self", ".", "structure_lines", "=", "structure_lines", "self", ".", "atom_chain_order", "=", "atom_chain_order", "self", ".", "chain_atoms", "=", "chain_atoms" ]
ATOM and HETATM lines may be altered by function calls. When this happens, this function should be called to keep self.structure_lines up to date.
[ "ATOM", "and", "HETATM", "lines", "may", "be", "altered", "by", "function", "calls", ".", "When", "this", "happens", "this", "function", "should", "be", "called", "to", "keep", "self", ".", "structure_lines", "up", "to", "date", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/pdb.py#L639-L666
train
Kortemme-Lab/klab
klab/bio/pdb.py
PDB.clone
def clone(self, parse_ligands = False): '''A function to replace the old constructor call where a PDB object was passed in and 'cloned'.''' return PDB("\n".join(self.lines), pdb_id = self.pdb_id, strict = self.strict, parse_ligands = parse_ligands)
python
def clone(self, parse_ligands = False): '''A function to replace the old constructor call where a PDB object was passed in and 'cloned'.''' return PDB("\n".join(self.lines), pdb_id = self.pdb_id, strict = self.strict, parse_ligands = parse_ligands)
[ "def", "clone", "(", "self", ",", "parse_ligands", "=", "False", ")", ":", "return", "PDB", "(", "\"\\n\"", ".", "join", "(", "self", ".", "lines", ")", ",", "pdb_id", "=", "self", ".", "pdb_id", ",", "strict", "=", "self", ".", "strict", ",", "parse_ligands", "=", "parse_ligands", ")" ]
A function to replace the old constructor call where a PDB object was passed in and 'cloned'.
[ "A", "function", "to", "replace", "the", "old", "constructor", "call", "where", "a", "PDB", "object", "was", "passed", "in", "and", "cloned", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/pdb.py#L670-L672
train
Kortemme-Lab/klab
klab/bio/pdb.py
PDB.get_pdb_id
def get_pdb_id(self): '''Return the PDB ID. If one was passed in to the constructor, this takes precedence, otherwise the header is parsed to try to find an ID. The header does not always contain a PDB ID in regular PDB files and appears to always have an ID of 'XXXX' in biological units so the constructor override is useful.''' if self.pdb_id: return self.pdb_id else: header = self.parsed_lines["HEADER"] assert(len(header) <= 1) if header: self.pdb_id = header[0][62:66] return self.pdb_id return None
python
def get_pdb_id(self): '''Return the PDB ID. If one was passed in to the constructor, this takes precedence, otherwise the header is parsed to try to find an ID. The header does not always contain a PDB ID in regular PDB files and appears to always have an ID of 'XXXX' in biological units so the constructor override is useful.''' if self.pdb_id: return self.pdb_id else: header = self.parsed_lines["HEADER"] assert(len(header) <= 1) if header: self.pdb_id = header[0][62:66] return self.pdb_id return None
[ "def", "get_pdb_id", "(", "self", ")", ":", "if", "self", ".", "pdb_id", ":", "return", "self", ".", "pdb_id", "else", ":", "header", "=", "self", ".", "parsed_lines", "[", "\"HEADER\"", "]", "assert", "(", "len", "(", "header", ")", "<=", "1", ")", "if", "header", ":", "self", ".", "pdb_id", "=", "header", "[", "0", "]", "[", "62", ":", "66", "]", "return", "self", ".", "pdb_id", "return", "None" ]
Return the PDB ID. If one was passed in to the constructor, this takes precedence, otherwise the header is parsed to try to find an ID. The header does not always contain a PDB ID in regular PDB files and appears to always have an ID of 'XXXX' in biological units so the constructor override is useful.
[ "Return", "the", "PDB", "ID", ".", "If", "one", "was", "passed", "in", "to", "the", "constructor", "this", "takes", "precedence", "otherwise", "the", "header", "is", "parsed", "to", "try", "to", "find", "an", "ID", ".", "The", "header", "does", "not", "always", "contain", "a", "PDB", "ID", "in", "regular", "PDB", "files", "and", "appears", "to", "always", "have", "an", "ID", "of", "XXXX", "in", "biological", "units", "so", "the", "constructor", "override", "is", "useful", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/pdb.py#L684-L696
train
Kortemme-Lab/klab
klab/bio/pdb.py
PDB.get_annotated_chain_sequence_string
def get_annotated_chain_sequence_string(self, chain_id, use_seqres_sequences_if_possible, raise_Exception_if_not_found = True): '''A helper function to return the Sequence for a chain. If use_seqres_sequences_if_possible then we return the SEQRES Sequence if it exists. We return a tuple of values, the first identifying which sequence was returned.''' if use_seqres_sequences_if_possible and self.seqres_sequences and self.seqres_sequences.get(chain_id): return ('SEQRES', self.seqres_sequences[chain_id]) elif self.atom_sequences.get(chain_id): return ('ATOM', self.atom_sequences[chain_id]) elif raise_Exception_if_not_found: raise Exception('Error: Chain %s expected but not found.' % (str(chain_id))) else: return None
python
def get_annotated_chain_sequence_string(self, chain_id, use_seqres_sequences_if_possible, raise_Exception_if_not_found = True): '''A helper function to return the Sequence for a chain. If use_seqres_sequences_if_possible then we return the SEQRES Sequence if it exists. We return a tuple of values, the first identifying which sequence was returned.''' if use_seqres_sequences_if_possible and self.seqres_sequences and self.seqres_sequences.get(chain_id): return ('SEQRES', self.seqres_sequences[chain_id]) elif self.atom_sequences.get(chain_id): return ('ATOM', self.atom_sequences[chain_id]) elif raise_Exception_if_not_found: raise Exception('Error: Chain %s expected but not found.' % (str(chain_id))) else: return None
[ "def", "get_annotated_chain_sequence_string", "(", "self", ",", "chain_id", ",", "use_seqres_sequences_if_possible", ",", "raise_Exception_if_not_found", "=", "True", ")", ":", "if", "use_seqres_sequences_if_possible", "and", "self", ".", "seqres_sequences", "and", "self", ".", "seqres_sequences", ".", "get", "(", "chain_id", ")", ":", "return", "(", "'SEQRES'", ",", "self", ".", "seqres_sequences", "[", "chain_id", "]", ")", "elif", "self", ".", "atom_sequences", ".", "get", "(", "chain_id", ")", ":", "return", "(", "'ATOM'", ",", "self", ".", "atom_sequences", "[", "chain_id", "]", ")", "elif", "raise_Exception_if_not_found", ":", "raise", "Exception", "(", "'Error: Chain %s expected but not found.'", "%", "(", "str", "(", "chain_id", ")", ")", ")", "else", ":", "return", "None" ]
A helper function to return the Sequence for a chain. If use_seqres_sequences_if_possible then we return the SEQRES Sequence if it exists. We return a tuple of values, the first identifying which sequence was returned.
[ "A", "helper", "function", "to", "return", "the", "Sequence", "for", "a", "chain", ".", "If", "use_seqres_sequences_if_possible", "then", "we", "return", "the", "SEQRES", "Sequence", "if", "it", "exists", ".", "We", "return", "a", "tuple", "of", "values", "the", "first", "identifying", "which", "sequence", "was", "returned", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/pdb.py#L704-L714
train
Kortemme-Lab/klab
klab/bio/pdb.py
PDB.get_chain_sequence_string
def get_chain_sequence_string(self, chain_id, use_seqres_sequences_if_possible, raise_Exception_if_not_found = True): '''Similar to get_annotated_chain_sequence_string except that we only return the Sequence and do not state which sequence it was.''' chain_pair = self.get_annotated_chain_sequence_string(chain_id, use_seqres_sequences_if_possible, raise_Exception_if_not_found = raise_Exception_if_not_found) if chain_pair: return chain_pair[1] return None
python
def get_chain_sequence_string(self, chain_id, use_seqres_sequences_if_possible, raise_Exception_if_not_found = True): '''Similar to get_annotated_chain_sequence_string except that we only return the Sequence and do not state which sequence it was.''' chain_pair = self.get_annotated_chain_sequence_string(chain_id, use_seqres_sequences_if_possible, raise_Exception_if_not_found = raise_Exception_if_not_found) if chain_pair: return chain_pair[1] return None
[ "def", "get_chain_sequence_string", "(", "self", ",", "chain_id", ",", "use_seqres_sequences_if_possible", ",", "raise_Exception_if_not_found", "=", "True", ")", ":", "chain_pair", "=", "self", ".", "get_annotated_chain_sequence_string", "(", "chain_id", ",", "use_seqres_sequences_if_possible", ",", "raise_Exception_if_not_found", "=", "raise_Exception_if_not_found", ")", "if", "chain_pair", ":", "return", "chain_pair", "[", "1", "]", "return", "None" ]
Similar to get_annotated_chain_sequence_string except that we only return the Sequence and do not state which sequence it was.
[ "Similar", "to", "get_annotated_chain_sequence_string", "except", "that", "we", "only", "return", "the", "Sequence", "and", "do", "not", "state", "which", "sequence", "it", "was", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/pdb.py#L717-L722
train
Kortemme-Lab/klab
klab/bio/pdb.py
PDB.strip_HETATMs
def strip_HETATMs(self, only_strip_these_chains = []): '''Throw away all HETATM lines. If only_strip_these_chains is specified then only strip HETATMs lines for those chains.''' if only_strip_these_chains: self.lines = [l for l in self.lines if not(l.startswith('HETATM')) or l[21] not in only_strip_these_chains] else: self.lines = [l for l in self.lines if not(l.startswith('HETATM'))] self._update_structure_lines()
python
def strip_HETATMs(self, only_strip_these_chains = []): '''Throw away all HETATM lines. If only_strip_these_chains is specified then only strip HETATMs lines for those chains.''' if only_strip_these_chains: self.lines = [l for l in self.lines if not(l.startswith('HETATM')) or l[21] not in only_strip_these_chains] else: self.lines = [l for l in self.lines if not(l.startswith('HETATM'))] self._update_structure_lines()
[ "def", "strip_HETATMs", "(", "self", ",", "only_strip_these_chains", "=", "[", "]", ")", ":", "if", "only_strip_these_chains", ":", "self", ".", "lines", "=", "[", "l", "for", "l", "in", "self", ".", "lines", "if", "not", "(", "l", ".", "startswith", "(", "'HETATM'", ")", ")", "or", "l", "[", "21", "]", "not", "in", "only_strip_these_chains", "]", "else", ":", "self", ".", "lines", "=", "[", "l", "for", "l", "in", "self", ".", "lines", "if", "not", "(", "l", ".", "startswith", "(", "'HETATM'", ")", ")", "]", "self", ".", "_update_structure_lines", "(", ")" ]
Throw away all HETATM lines. If only_strip_these_chains is specified then only strip HETATMs lines for those chains.
[ "Throw", "away", "all", "HETATM", "lines", ".", "If", "only_strip_these_chains", "is", "specified", "then", "only", "strip", "HETATMs", "lines", "for", "those", "chains", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/pdb.py#L800-L806
train
Kortemme-Lab/klab
klab/bio/pdb.py
PDB._get_pdb_format_version
def _get_pdb_format_version(self): '''Remark 4 indicates the version of the PDB File Format used to generate the file.''' if not self.format_version: version = None version_lines = None try: version_lines = [line for line in self.parsed_lines['REMARK'] if int(line[7:10]) == 4 and line[10:].strip()] except: pass if version_lines: assert(len(version_lines) == 1) version_line = version_lines[0] version_regex = re.compile('.*?FORMAT V.(.*),') mtch = version_regex.match(version_line) if mtch and mtch.groups(0): try: version = float(mtch.groups(0)[0]) except: pass self.format_version = version
python
def _get_pdb_format_version(self): '''Remark 4 indicates the version of the PDB File Format used to generate the file.''' if not self.format_version: version = None version_lines = None try: version_lines = [line for line in self.parsed_lines['REMARK'] if int(line[7:10]) == 4 and line[10:].strip()] except: pass if version_lines: assert(len(version_lines) == 1) version_line = version_lines[0] version_regex = re.compile('.*?FORMAT V.(.*),') mtch = version_regex.match(version_line) if mtch and mtch.groups(0): try: version = float(mtch.groups(0)[0]) except: pass self.format_version = version
[ "def", "_get_pdb_format_version", "(", "self", ")", ":", "if", "not", "self", ".", "format_version", ":", "version", "=", "None", "version_lines", "=", "None", "try", ":", "version_lines", "=", "[", "line", "for", "line", "in", "self", ".", "parsed_lines", "[", "'REMARK'", "]", "if", "int", "(", "line", "[", "7", ":", "10", "]", ")", "==", "4", "and", "line", "[", "10", ":", "]", ".", "strip", "(", ")", "]", "except", ":", "pass", "if", "version_lines", ":", "assert", "(", "len", "(", "version_lines", ")", "==", "1", ")", "version_line", "=", "version_lines", "[", "0", "]", "version_regex", "=", "re", ".", "compile", "(", "'.*?FORMAT V.(.*),'", ")", "mtch", "=", "version_regex", ".", "match", "(", "version_line", ")", "if", "mtch", "and", "mtch", ".", "groups", "(", "0", ")", ":", "try", ":", "version", "=", "float", "(", "mtch", ".", "groups", "(", "0", ")", "[", "0", "]", ")", "except", ":", "pass", "self", ".", "format_version", "=", "version" ]
Remark 4 indicates the version of the PDB File Format used to generate the file.
[ "Remark", "4", "indicates", "the", "version", "of", "the", "PDB", "File", "Format", "used", "to", "generate", "the", "file", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/pdb.py#L926-L944
train
Kortemme-Lab/klab
klab/bio/pdb.py
PDB.get_atom_sequence_to_rosetta_json_map
def get_atom_sequence_to_rosetta_json_map(self): '''Returns the mapping from PDB ATOM residue IDs to Rosetta residue IDs in JSON format.''' import json d = {} atom_sequence_to_rosetta_mapping = self.get_atom_sequence_to_rosetta_map() for c, sm in atom_sequence_to_rosetta_mapping.iteritems(): for k, v in sm.map.iteritems(): d[k] = v return json.dumps(d, indent = 4, sort_keys = True)
python
def get_atom_sequence_to_rosetta_json_map(self): '''Returns the mapping from PDB ATOM residue IDs to Rosetta residue IDs in JSON format.''' import json d = {} atom_sequence_to_rosetta_mapping = self.get_atom_sequence_to_rosetta_map() for c, sm in atom_sequence_to_rosetta_mapping.iteritems(): for k, v in sm.map.iteritems(): d[k] = v return json.dumps(d, indent = 4, sort_keys = True)
[ "def", "get_atom_sequence_to_rosetta_json_map", "(", "self", ")", ":", "import", "json", "d", "=", "{", "}", "atom_sequence_to_rosetta_mapping", "=", "self", ".", "get_atom_sequence_to_rosetta_map", "(", ")", "for", "c", ",", "sm", "in", "atom_sequence_to_rosetta_mapping", ".", "iteritems", "(", ")", ":", "for", "k", ",", "v", "in", "sm", ".", "map", ".", "iteritems", "(", ")", ":", "d", "[", "k", "]", "=", "v", "return", "json", ".", "dumps", "(", "d", ",", "indent", "=", "4", ",", "sort_keys", "=", "True", ")" ]
Returns the mapping from PDB ATOM residue IDs to Rosetta residue IDs in JSON format.
[ "Returns", "the", "mapping", "from", "PDB", "ATOM", "residue", "IDs", "to", "Rosetta", "residue", "IDs", "in", "JSON", "format", "." ]
6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/pdb.py#L1744-L1752
train