body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def prepare_auth(self, auth, url=''): 'Prepares the given HTTP auth data.' if (auth is None): url_auth = get_auth_from_url(self.url) auth = (url_auth if any(url_auth) else None) if auth: if (isinstance(auth, tuple) and (len(auth) == 2)): auth = HTTPBasicAuth(*auth) r = auth(self) self.__dict__.update(r.__dict__) self.prepare_content_length(self.body)
5,152,842,564,660,482,000
Prepares the given HTTP auth data.
src/oci/_vendor/requests/models.py
prepare_auth
LaudateCorpus1/oci-python-sdk
python
def prepare_auth(self, auth, url=): if (auth is None): url_auth = get_auth_from_url(self.url) auth = (url_auth if any(url_auth) else None) if auth: if (isinstance(auth, tuple) and (len(auth) == 2)): auth = HTTPBasicAuth(*auth) r = auth(self) self.__dict__.update(r.__dict__) self.prepare_content_length(self.body)
def prepare_cookies(self, cookies): 'Prepares the given HTTP cookie data.\n\n This function eventually generates a ``Cookie`` header from the\n given cookies using cookielib. Due to cookielib\'s design, the header\n will not be regenerated if it already exists, meaning this function\n can only be called once for the life of the\n :class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls\n to ``prepare_cookies`` will have no actual effect, unless the "Cookie"\n header is removed beforehand.\n ' if isinstance(cookies, cookielib.CookieJar): self._cookies = cookies else: self._cookies = cookiejar_from_dict(cookies) cookie_header = get_cookie_header(self._cookies, self) if (cookie_header is not None): self.headers['Cookie'] = cookie_header
-4,880,843,362,105,130,000
Prepares the given HTTP cookie data. This function eventually generates a ``Cookie`` header from the given cookies using cookielib. Due to cookielib's design, the header will not be regenerated if it already exists, meaning this function can only be called once for the life of the :class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls to ``prepare_cookies`` will have no actual effect, unless the "Cookie" header is removed beforehand.
src/oci/_vendor/requests/models.py
prepare_cookies
LaudateCorpus1/oci-python-sdk
python
def prepare_cookies(self, cookies): 'Prepares the given HTTP cookie data.\n\n This function eventually generates a ``Cookie`` header from the\n given cookies using cookielib. Due to cookielib\'s design, the header\n will not be regenerated if it already exists, meaning this function\n can only be called once for the life of the\n :class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls\n to ``prepare_cookies`` will have no actual effect, unless the "Cookie"\n header is removed beforehand.\n ' if isinstance(cookies, cookielib.CookieJar): self._cookies = cookies else: self._cookies = cookiejar_from_dict(cookies) cookie_header = get_cookie_header(self._cookies, self) if (cookie_header is not None): self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks): 'Prepares the given hooks.' hooks = (hooks or []) for event in hooks: self.register_hook(event, hooks[event])
-4,515,863,383,951,718,400
Prepares the given hooks.
src/oci/_vendor/requests/models.py
prepare_hooks
LaudateCorpus1/oci-python-sdk
python
def prepare_hooks(self, hooks): hooks = (hooks or []) for event in hooks: self.register_hook(event, hooks[event])
def __bool__(self): 'Returns True if :attr:`status_code` is less than 400.\n\n This attribute checks if the status code of the response is between\n 400 and 600 to see if there was a client error or a server error. If\n the status code, is between 200 and 400, this will return True. This\n is **not** a check to see if the response code is ``200 OK``.\n ' return self.ok
-7,938,503,880,147,866,000
Returns True if :attr:`status_code` is less than 400. This attribute checks if the status code of the response is between 400 and 600 to see if there was a client error or a server error. If the status code, is between 200 and 400, this will return True. This is **not** a check to see if the response code is ``200 OK``.
src/oci/_vendor/requests/models.py
__bool__
LaudateCorpus1/oci-python-sdk
python
def __bool__(self): 'Returns True if :attr:`status_code` is less than 400.\n\n This attribute checks if the status code of the response is between\n 400 and 600 to see if there was a client error or a server error. If\n the status code, is between 200 and 400, this will return True. This\n is **not** a check to see if the response code is ``200 OK``.\n ' return self.ok
def __nonzero__(self): 'Returns True if :attr:`status_code` is less than 400.\n\n This attribute checks if the status code of the response is between\n 400 and 600 to see if there was a client error or a server error. If\n the status code, is between 200 and 400, this will return True. This\n is **not** a check to see if the response code is ``200 OK``.\n ' return self.ok
4,933,757,067,486,797,000
Returns True if :attr:`status_code` is less than 400. This attribute checks if the status code of the response is between 400 and 600 to see if there was a client error or a server error. If the status code, is between 200 and 400, this will return True. This is **not** a check to see if the response code is ``200 OK``.
src/oci/_vendor/requests/models.py
__nonzero__
LaudateCorpus1/oci-python-sdk
python
def __nonzero__(self): 'Returns True if :attr:`status_code` is less than 400.\n\n This attribute checks if the status code of the response is between\n 400 and 600 to see if there was a client error or a server error. If\n the status code, is between 200 and 400, this will return True. This\n is **not** a check to see if the response code is ``200 OK``.\n ' return self.ok
def __iter__(self): 'Allows you to use a response as an iterator.' return self.iter_content(128)
-7,277,316,857,547,251,000
Allows you to use a response as an iterator.
src/oci/_vendor/requests/models.py
__iter__
LaudateCorpus1/oci-python-sdk
python
def __iter__(self): return self.iter_content(128)
@property def ok(self): 'Returns True if :attr:`status_code` is less than 400, False if not.\n\n This attribute checks if the status code of the response is between\n 400 and 600 to see if there was a client error or a server error. If\n the status code is between 200 and 400, this will return True. This\n is **not** a check to see if the response code is ``200 OK``.\n ' try: self.raise_for_status() except HTTPError: return False return True
-6,177,503,495,560,946,000
Returns True if :attr:`status_code` is less than 400, False if not. This attribute checks if the status code of the response is between 400 and 600 to see if there was a client error or a server error. If the status code is between 200 and 400, this will return True. This is **not** a check to see if the response code is ``200 OK``.
src/oci/_vendor/requests/models.py
ok
LaudateCorpus1/oci-python-sdk
python
@property def ok(self): 'Returns True if :attr:`status_code` is less than 400, False if not.\n\n This attribute checks if the status code of the response is between\n 400 and 600 to see if there was a client error or a server error. If\n the status code is between 200 and 400, this will return True. This\n is **not** a check to see if the response code is ``200 OK``.\n ' try: self.raise_for_status() except HTTPError: return False return True
@property def is_redirect(self): 'True if this Response is a well-formed HTTP redirect that could have\n been processed automatically (by :meth:`Session.resolve_redirects`).\n ' return (('location' in self.headers) and (self.status_code in REDIRECT_STATI))
8,650,224,052,504,523,000
True if this Response is a well-formed HTTP redirect that could have been processed automatically (by :meth:`Session.resolve_redirects`).
src/oci/_vendor/requests/models.py
is_redirect
LaudateCorpus1/oci-python-sdk
python
@property def is_redirect(self): 'True if this Response is a well-formed HTTP redirect that could have\n been processed automatically (by :meth:`Session.resolve_redirects`).\n ' return (('location' in self.headers) and (self.status_code in REDIRECT_STATI))
@property def is_permanent_redirect(self): 'True if this Response one of the permanent versions of redirect.' return (('location' in self.headers) and (self.status_code in (codes.moved_permanently, codes.permanent_redirect)))
6,790,407,169,252,320,000
True if this Response one of the permanent versions of redirect.
src/oci/_vendor/requests/models.py
is_permanent_redirect
LaudateCorpus1/oci-python-sdk
python
@property def is_permanent_redirect(self): return (('location' in self.headers) and (self.status_code in (codes.moved_permanently, codes.permanent_redirect)))
@property def next(self): 'Returns a PreparedRequest for the next request in a redirect chain, if there is one.' return self._next
6,609,836,077,647,073,000
Returns a PreparedRequest for the next request in a redirect chain, if there is one.
src/oci/_vendor/requests/models.py
next
LaudateCorpus1/oci-python-sdk
python
@property def next(self): return self._next
@property def apparent_encoding(self): 'The apparent encoding, provided by the chardet library.' return chardet.detect(self.content)['encoding']
-1,142,759,535,317,406,500
The apparent encoding, provided by the chardet library.
src/oci/_vendor/requests/models.py
apparent_encoding
LaudateCorpus1/oci-python-sdk
python
@property def apparent_encoding(self): return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False): 'Iterates over the response data. When stream=True is set on the\n request, this avoids reading the content at once into memory for\n large responses. The chunk size is the number of bytes it should\n read into memory. This is not necessarily the length of each item\n returned as decoding can take place.\n\n chunk_size must be of type int or None. A value of None will\n function differently depending on the value of `stream`.\n stream=True will read data as it arrives in whatever size the\n chunks are received. If stream=False, data is returned as\n a single chunk.\n\n If decode_unicode is True, content will be decoded using the best\n available encoding based on the response.\n ' def generate(): if hasattr(self.raw, 'stream'): try: for chunk in self.raw.stream(chunk_size, decode_content=True): (yield chunk) except ProtocolError as e: raise ChunkedEncodingError(e) except DecodeError as e: raise ContentDecodingError(e) except ReadTimeoutError as e: raise ConnectionError(e) else: while True: chunk = self.raw.read(chunk_size) if (not chunk): break (yield chunk) self._content_consumed = True if (self._content_consumed and isinstance(self._content, bool)): raise StreamConsumedError() elif ((chunk_size is not None) and (not isinstance(chunk_size, int))): raise TypeError(('chunk_size must be an int, it is instead a %s.' % type(chunk_size))) reused_chunks = iter_slices(self._content, chunk_size) stream_chunks = generate() chunks = (reused_chunks if self._content_consumed else stream_chunks) if decode_unicode: chunks = stream_decode_response_unicode(chunks, self) return chunks
-1,745,230,368,267,707,000
Iterates over the response data. When stream=True is set on the request, this avoids reading the content at once into memory for large responses. The chunk size is the number of bytes it should read into memory. This is not necessarily the length of each item returned as decoding can take place. chunk_size must be of type int or None. A value of None will function differently depending on the value of `stream`. stream=True will read data as it arrives in whatever size the chunks are received. If stream=False, data is returned as a single chunk. If decode_unicode is True, content will be decoded using the best available encoding based on the response.
src/oci/_vendor/requests/models.py
iter_content
LaudateCorpus1/oci-python-sdk
python
def iter_content(self, chunk_size=1, decode_unicode=False): 'Iterates over the response data. When stream=True is set on the\n request, this avoids reading the content at once into memory for\n large responses. The chunk size is the number of bytes it should\n read into memory. This is not necessarily the length of each item\n returned as decoding can take place.\n\n chunk_size must be of type int or None. A value of None will\n function differently depending on the value of `stream`.\n stream=True will read data as it arrives in whatever size the\n chunks are received. If stream=False, data is returned as\n a single chunk.\n\n If decode_unicode is True, content will be decoded using the best\n available encoding based on the response.\n ' def generate(): if hasattr(self.raw, 'stream'): try: for chunk in self.raw.stream(chunk_size, decode_content=True): (yield chunk) except ProtocolError as e: raise ChunkedEncodingError(e) except DecodeError as e: raise ContentDecodingError(e) except ReadTimeoutError as e: raise ConnectionError(e) else: while True: chunk = self.raw.read(chunk_size) if (not chunk): break (yield chunk) self._content_consumed = True if (self._content_consumed and isinstance(self._content, bool)): raise StreamConsumedError() elif ((chunk_size is not None) and (not isinstance(chunk_size, int))): raise TypeError(('chunk_size must be an int, it is instead a %s.' % type(chunk_size))) reused_chunks = iter_slices(self._content, chunk_size) stream_chunks = generate() chunks = (reused_chunks if self._content_consumed else stream_chunks) if decode_unicode: chunks = stream_decode_response_unicode(chunks, self) return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None): 'Iterates over the response data, one line at a time. When\n stream=True is set on the request, this avoids reading the\n content at once into memory for large responses.\n\n .. note:: This method is not reentrant safe.\n ' pending = None for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode): if (pending is not None): chunk = (pending + chunk) if delimiter: lines = chunk.split(delimiter) else: lines = chunk.splitlines() if (lines and lines[(- 1)] and chunk and (lines[(- 1)][(- 1)] == chunk[(- 1)])): pending = lines.pop() else: pending = None for line in lines: (yield line) if (pending is not None): (yield pending)
5,187,716,744,981,455,000
Iterates over the response data, one line at a time. When stream=True is set on the request, this avoids reading the content at once into memory for large responses. .. note:: This method is not reentrant safe.
src/oci/_vendor/requests/models.py
iter_lines
LaudateCorpus1/oci-python-sdk
python
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None): 'Iterates over the response data, one line at a time. When\n stream=True is set on the request, this avoids reading the\n content at once into memory for large responses.\n\n .. note:: This method is not reentrant safe.\n ' pending = None for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode): if (pending is not None): chunk = (pending + chunk) if delimiter: lines = chunk.split(delimiter) else: lines = chunk.splitlines() if (lines and lines[(- 1)] and chunk and (lines[(- 1)][(- 1)] == chunk[(- 1)])): pending = lines.pop() else: pending = None for line in lines: (yield line) if (pending is not None): (yield pending)
@property def content(self): 'Content of the response, in bytes.' if (self._content is False): if self._content_consumed: raise RuntimeError('The content for this response was already consumed') if ((self.status_code == 0) or (self.raw is None)): self._content = None else: self._content = (b''.join(self.iter_content(CONTENT_CHUNK_SIZE)) or b'') self._content_consumed = True return self._content
4,100,658,395,437,519,400
Content of the response, in bytes.
src/oci/_vendor/requests/models.py
content
LaudateCorpus1/oci-python-sdk
python
@property def content(self): if (self._content is False): if self._content_consumed: raise RuntimeError('The content for this response was already consumed') if ((self.status_code == 0) or (self.raw is None)): self._content = None else: self._content = (b.join(self.iter_content(CONTENT_CHUNK_SIZE)) or b) self._content_consumed = True return self._content
@property def text(self): 'Content of the response, in unicode.\n\n If Response.encoding is None, encoding will be guessed using\n ``chardet``.\n\n The encoding of the response content is determined based solely on HTTP\n headers, following RFC 2616 to the letter. If you can take advantage of\n non-HTTP knowledge to make a better guess at the encoding, you should\n set ``r.encoding`` appropriately before accessing this property.\n ' content = None encoding = self.encoding if (not self.content): return str('') if (self.encoding is None): encoding = self.apparent_encoding try: content = str(self.content, encoding, errors='replace') except (LookupError, TypeError): content = str(self.content, errors='replace') return content
-7,794,038,974,435,198,000
Content of the response, in unicode. If Response.encoding is None, encoding will be guessed using ``chardet``. The encoding of the response content is determined based solely on HTTP headers, following RFC 2616 to the letter. If you can take advantage of non-HTTP knowledge to make a better guess at the encoding, you should set ``r.encoding`` appropriately before accessing this property.
src/oci/_vendor/requests/models.py
text
LaudateCorpus1/oci-python-sdk
python
@property def text(self): 'Content of the response, in unicode.\n\n If Response.encoding is None, encoding will be guessed using\n ``chardet``.\n\n The encoding of the response content is determined based solely on HTTP\n headers, following RFC 2616 to the letter. If you can take advantage of\n non-HTTP knowledge to make a better guess at the encoding, you should\n set ``r.encoding`` appropriately before accessing this property.\n ' content = None encoding = self.encoding if (not self.content): return str() if (self.encoding is None): encoding = self.apparent_encoding try: content = str(self.content, encoding, errors='replace') except (LookupError, TypeError): content = str(self.content, errors='replace') return content
def json(self, **kwargs): 'Returns the json-encoded content of a response, if any.\n\n :param \\*\\*kwargs: Optional arguments that ``json.loads`` takes.\n :raises ValueError: If the response body does not contain valid json.\n ' if ((not self.encoding) and self.content and (len(self.content) > 3)): encoding = guess_json_utf(self.content) if (encoding is not None): try: return complexjson.loads(self.content.decode(encoding), **kwargs) except UnicodeDecodeError: pass return complexjson.loads(self.text, **kwargs)
6,506,800,846,072,340,000
Returns the json-encoded content of a response, if any. :param \*\*kwargs: Optional arguments that ``json.loads`` takes. :raises ValueError: If the response body does not contain valid json.
src/oci/_vendor/requests/models.py
json
LaudateCorpus1/oci-python-sdk
python
def json(self, **kwargs): 'Returns the json-encoded content of a response, if any.\n\n :param \\*\\*kwargs: Optional arguments that ``json.loads`` takes.\n :raises ValueError: If the response body does not contain valid json.\n ' if ((not self.encoding) and self.content and (len(self.content) > 3)): encoding = guess_json_utf(self.content) if (encoding is not None): try: return complexjson.loads(self.content.decode(encoding), **kwargs) except UnicodeDecodeError: pass return complexjson.loads(self.text, **kwargs)
@property def links(self): 'Returns the parsed header links of the response, if any.' header = self.headers.get('link') l = {} if header: links = parse_header_links(header) for link in links: key = (link.get('rel') or link.get('url')) l[key] = link return l
7,586,059,766,961,026,000
Returns the parsed header links of the response, if any.
src/oci/_vendor/requests/models.py
links
LaudateCorpus1/oci-python-sdk
python
@property def links(self): header = self.headers.get('link') l = {} if header: links = parse_header_links(header) for link in links: key = (link.get('rel') or link.get('url')) l[key] = link return l
def raise_for_status(self): 'Raises :class:`HTTPError`, if one occurred.' http_error_msg = '' if isinstance(self.reason, bytes): try: reason = self.reason.decode('utf-8') except UnicodeDecodeError: reason = self.reason.decode('iso-8859-1') else: reason = self.reason if (400 <= self.status_code < 500): http_error_msg = (u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url)) elif (500 <= self.status_code < 600): http_error_msg = (u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url)) if http_error_msg: raise HTTPError(http_error_msg, response=self)
6,135,942,110,397,613,000
Raises :class:`HTTPError`, if one occurred.
src/oci/_vendor/requests/models.py
raise_for_status
LaudateCorpus1/oci-python-sdk
python
def raise_for_status(self): http_error_msg = if isinstance(self.reason, bytes): try: reason = self.reason.decode('utf-8') except UnicodeDecodeError: reason = self.reason.decode('iso-8859-1') else: reason = self.reason if (400 <= self.status_code < 500): http_error_msg = (u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url)) elif (500 <= self.status_code < 600): http_error_msg = (u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url)) if http_error_msg: raise HTTPError(http_error_msg, response=self)
def close(self): 'Releases the connection back to the pool. Once this method has been\n called the underlying ``raw`` object must not be accessed again.\n\n *Note: Should not normally need to be called explicitly.*\n ' if (not self._content_consumed): self.raw.close() release_conn = getattr(self.raw, 'release_conn', None) if (release_conn is not None): release_conn()
5,014,410,288,615,067,000
Releases the connection back to the pool. Once this method has been called the underlying ``raw`` object must not be accessed again. *Note: Should not normally need to be called explicitly.*
src/oci/_vendor/requests/models.py
close
LaudateCorpus1/oci-python-sdk
python
def close(self): 'Releases the connection back to the pool. Once this method has been\n called the underlying ``raw`` object must not be accessed again.\n\n *Note: Should not normally need to be called explicitly.*\n ' if (not self._content_consumed): self.raw.close() release_conn = getattr(self.raw, 'release_conn', None) if (release_conn is not None): release_conn()
def update_init_info(module, init_info): 'Update the `_params_init_info` in the module if the value of parameters\n are changed.\n\n Args:\n module (obj:`nn.Module`): The module of PyTorch with a user-defined\n attribute `_params_init_info` which records the initialization\n information.\n init_info (str): The string that describes the initialization.\n ' assert hasattr(module, '_params_init_info'), f'Can not find `_params_init_info` in {module}' for (name, param) in module.named_parameters(): assert (param in module._params_init_info), f'Find a new :obj:`Parameter` named `{name}` during executing the `init_weights` of `{module.__class__.__name__}`. Please do not add or replace parameters during executing the `init_weights`. ' mean_value = param.data.mean() if (module._params_init_info[param]['tmp_mean_value'] != mean_value): module._params_init_info[param]['init_info'] = init_info module._params_init_info[param]['tmp_mean_value'] = mean_value
-2,051,522,352,694,961,400
Update the `_params_init_info` in the module if the value of parameters are changed. Args: module (obj:`nn.Module`): The module of PyTorch with a user-defined attribute `_params_init_info` which records the initialization information. init_info (str): The string that describes the initialization.
deep3dmap/core/utils/weight_init.py
update_init_info
achao2013/DeepRecon
python
def update_init_info(module, init_info): 'Update the `_params_init_info` in the module if the value of parameters\n are changed.\n\n Args:\n module (obj:`nn.Module`): The module of PyTorch with a user-defined\n attribute `_params_init_info` which records the initialization\n information.\n init_info (str): The string that describes the initialization.\n ' assert hasattr(module, '_params_init_info'), f'Can not find `_params_init_info` in {module}' for (name, param) in module.named_parameters(): assert (param in module._params_init_info), f'Find a new :obj:`Parameter` named `{name}` during executing the `init_weights` of `{module.__class__.__name__}`. Please do not add or replace parameters during executing the `init_weights`. ' mean_value = param.data.mean() if (module._params_init_info[param]['tmp_mean_value'] != mean_value): module._params_init_info[param]['init_info'] = init_info module._params_init_info[param]['tmp_mean_value'] = mean_value
def bias_init_with_prob(prior_prob): 'initialize conv/fc bias value according to a given probability value.' bias_init = float((- np.log(((1 - prior_prob) / prior_prob)))) return bias_init
4,357,258,245,550,052,400
initialize conv/fc bias value according to a given probability value.
deep3dmap/core/utils/weight_init.py
bias_init_with_prob
achao2013/DeepRecon
python
def bias_init_with_prob(prior_prob): bias_init = float((- np.log(((1 - prior_prob) / prior_prob)))) return bias_init
def initialize(module, init_cfg): 'Initialize a module.\n\n Args:\n module (``torch.nn.Module``): the module will be initialized.\n init_cfg (dict | list[dict]): initialization configuration dict to\n define initializer. OpenMMLab has implemented 6 initializers\n including ``Constant``, ``Xavier``, ``Normal``, ``Uniform``,\n ``Kaiming``, and ``Pretrained``.\n Example:\n >>> module = nn.Linear(2, 3, bias=True)\n >>> init_cfg = dict(type=\'Constant\', layer=\'Linear\', val =1 , bias =2)\n >>> initialize(module, init_cfg)\n\n >>> module = nn.Sequential(nn.Conv1d(3, 1, 3), nn.Linear(1,2))\n >>> # define key ``\'layer\'`` for initializing layer with different\n >>> # configuration\n >>> init_cfg = [dict(type=\'Constant\', layer=\'Conv1d\', val=1),\n dict(type=\'Constant\', layer=\'Linear\', val=2)]\n >>> initialize(module, init_cfg)\n\n >>> # define key``\'override\'`` to initialize some specific part in\n >>> # module\n >>> class FooNet(nn.Module):\n >>> def __init__(self):\n >>> super().__init__()\n >>> self.feat = nn.Conv2d(3, 16, 3)\n >>> self.reg = nn.Conv2d(16, 10, 3)\n >>> self.cls = nn.Conv2d(16, 5, 3)\n >>> model = FooNet()\n >>> init_cfg = dict(type=\'Constant\', val=1, bias=2, layer=\'Conv2d\',\n >>> override=dict(type=\'Constant\', name=\'reg\', val=3, bias=4))\n >>> initialize(model, init_cfg)\n\n >>> model = ResNet(depth=50)\n >>> # Initialize weights with the pretrained model.\n >>> init_cfg = dict(type=\'Pretrained\',\n checkpoint=\'torchvision://resnet50\')\n >>> initialize(model, init_cfg)\n\n >>> # Initialize weights of a sub-module with the specific part of\n >>> # a pretrained model by using "prefix".\n >>> url = \'http://download.openmmlab.com/mmdetection/v2.0/retinanet/\' >>> \'retinanet_r50_fpn_1x_coco/\' >>> \'retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth\'\n >>> init_cfg = dict(type=\'Pretrained\',\n checkpoint=url, prefix=\'backbone.\')\n ' if (not isinstance(init_cfg, (dict, list))): raise TypeError(f'init_cfg must be a dict or a list of dict, but got {type(init_cfg)}') if isinstance(init_cfg, dict): init_cfg = [init_cfg] for cfg in init_cfg: cp_cfg = copy.deepcopy(cfg) override = cp_cfg.pop('override', None) _initialize(module, cp_cfg) if (override is not None): cp_cfg.pop('layer', None) _initialize_override(module, override, cp_cfg) else: pass
-8,268,513,196,977,106,000
Initialize a module. Args: module (``torch.nn.Module``): the module will be initialized. init_cfg (dict | list[dict]): initialization configuration dict to define initializer. OpenMMLab has implemented 6 initializers including ``Constant``, ``Xavier``, ``Normal``, ``Uniform``, ``Kaiming``, and ``Pretrained``. Example: >>> module = nn.Linear(2, 3, bias=True) >>> init_cfg = dict(type='Constant', layer='Linear', val =1 , bias =2) >>> initialize(module, init_cfg) >>> module = nn.Sequential(nn.Conv1d(3, 1, 3), nn.Linear(1,2)) >>> # define key ``'layer'`` for initializing layer with different >>> # configuration >>> init_cfg = [dict(type='Constant', layer='Conv1d', val=1), dict(type='Constant', layer='Linear', val=2)] >>> initialize(module, init_cfg) >>> # define key``'override'`` to initialize some specific part in >>> # module >>> class FooNet(nn.Module): >>> def __init__(self): >>> super().__init__() >>> self.feat = nn.Conv2d(3, 16, 3) >>> self.reg = nn.Conv2d(16, 10, 3) >>> self.cls = nn.Conv2d(16, 5, 3) >>> model = FooNet() >>> init_cfg = dict(type='Constant', val=1, bias=2, layer='Conv2d', >>> override=dict(type='Constant', name='reg', val=3, bias=4)) >>> initialize(model, init_cfg) >>> model = ResNet(depth=50) >>> # Initialize weights with the pretrained model. >>> init_cfg = dict(type='Pretrained', checkpoint='torchvision://resnet50') >>> initialize(model, init_cfg) >>> # Initialize weights of a sub-module with the specific part of >>> # a pretrained model by using "prefix". >>> url = 'http://download.openmmlab.com/mmdetection/v2.0/retinanet/' >>> 'retinanet_r50_fpn_1x_coco/' >>> 'retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth' >>> init_cfg = dict(type='Pretrained', checkpoint=url, prefix='backbone.')
deep3dmap/core/utils/weight_init.py
initialize
achao2013/DeepRecon
python
def initialize(module, init_cfg): 'Initialize a module.\n\n Args:\n module (``torch.nn.Module``): the module will be initialized.\n init_cfg (dict | list[dict]): initialization configuration dict to\n define initializer. OpenMMLab has implemented 6 initializers\n including ``Constant``, ``Xavier``, ``Normal``, ``Uniform``,\n ``Kaiming``, and ``Pretrained``.\n Example:\n >>> module = nn.Linear(2, 3, bias=True)\n >>> init_cfg = dict(type=\'Constant\', layer=\'Linear\', val =1 , bias =2)\n >>> initialize(module, init_cfg)\n\n >>> module = nn.Sequential(nn.Conv1d(3, 1, 3), nn.Linear(1,2))\n >>> # define key ``\'layer\'`` for initializing layer with different\n >>> # configuration\n >>> init_cfg = [dict(type=\'Constant\', layer=\'Conv1d\', val=1),\n dict(type=\'Constant\', layer=\'Linear\', val=2)]\n >>> initialize(module, init_cfg)\n\n >>> # define key``\'override\'`` to initialize some specific part in\n >>> # module\n >>> class FooNet(nn.Module):\n >>> def __init__(self):\n >>> super().__init__()\n >>> self.feat = nn.Conv2d(3, 16, 3)\n >>> self.reg = nn.Conv2d(16, 10, 3)\n >>> self.cls = nn.Conv2d(16, 5, 3)\n >>> model = FooNet()\n >>> init_cfg = dict(type=\'Constant\', val=1, bias=2, layer=\'Conv2d\',\n >>> override=dict(type=\'Constant\', name=\'reg\', val=3, bias=4))\n >>> initialize(model, init_cfg)\n\n >>> model = ResNet(depth=50)\n >>> # Initialize weights with the pretrained model.\n >>> init_cfg = dict(type=\'Pretrained\',\n checkpoint=\'torchvision://resnet50\')\n >>> initialize(model, init_cfg)\n\n >>> # Initialize weights of a sub-module with the specific part of\n >>> # a pretrained model by using "prefix".\n >>> url = \'http://download.openmmlab.com/mmdetection/v2.0/retinanet/\' >>> \'retinanet_r50_fpn_1x_coco/\' >>> \'retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth\'\n >>> init_cfg = dict(type=\'Pretrained\',\n checkpoint=url, prefix=\'backbone.\')\n ' if (not isinstance(init_cfg, (dict, list))): raise TypeError(f'init_cfg must be a dict or a list of dict, but got {type(init_cfg)}') if isinstance(init_cfg, dict): init_cfg = [init_cfg] for cfg in init_cfg: cp_cfg = copy.deepcopy(cfg) override = cp_cfg.pop('override', None) _initialize(module, cp_cfg) if (override is not None): cp_cfg.pop('layer', None) _initialize_override(module, override, cp_cfg) else: pass
def trunc_normal_(tensor: Tensor, mean: float=0.0, std: float=1.0, a: float=(- 2.0), b: float=2.0) -> Tensor: 'Fills the input Tensor with values drawn from a truncated\n normal distribution. The values are effectively drawn from the\n normal distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`\n with values outside :math:`[a, b]` redrawn until they are within\n the bounds. The method used for generating the random values works\n best when :math:`a \\leq \\text{mean} \\leq b`.\n\n Modified from\n https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py\n\n Args:\n tensor (``torch.Tensor``): an n-dimensional `torch.Tensor`.\n mean (float): the mean of the normal distribution.\n std (float): the standard deviation of the normal distribution.\n a (float): the minimum cutoff value.\n b (float): the maximum cutoff value.\n ' return _no_grad_trunc_normal_(tensor, mean, std, a, b)
-7,831,877,177,259,050,000
Fills the input Tensor with values drawn from a truncated normal distribution. The values are effectively drawn from the normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. The method used for generating the random values works best when :math:`a \leq \text{mean} \leq b`. Modified from https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py Args: tensor (``torch.Tensor``): an n-dimensional `torch.Tensor`. mean (float): the mean of the normal distribution. std (float): the standard deviation of the normal distribution. a (float): the minimum cutoff value. b (float): the maximum cutoff value.
deep3dmap/core/utils/weight_init.py
trunc_normal_
achao2013/DeepRecon
python
def trunc_normal_(tensor: Tensor, mean: float=0.0, std: float=1.0, a: float=(- 2.0), b: float=2.0) -> Tensor: 'Fills the input Tensor with values drawn from a truncated\n normal distribution. The values are effectively drawn from the\n normal distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`\n with values outside :math:`[a, b]` redrawn until they are within\n the bounds. The method used for generating the random values works\n best when :math:`a \\leq \\text{mean} \\leq b`.\n\n Modified from\n https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py\n\n Args:\n tensor (``torch.Tensor``): an n-dimensional `torch.Tensor`.\n mean (float): the mean of the normal distribution.\n std (float): the standard deviation of the normal distribution.\n a (float): the minimum cutoff value.\n b (float): the maximum cutoff value.\n ' return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def _parse_inputs(self): 'A number of the command line options expect precisely one or two files.\n ' nr_input_files = len(self.inputs.input_files) for n in self.input_spec.bool_or_const_traits: t = self.inputs.__getattribute__(n) if isdefined(t): if isinstance(t, bool): if (nr_input_files != 2): raise ValueError(('Due to the %s option we expected 2 files but input_files is of length %d' % (n, nr_input_files))) elif isinstance(t, float): if (nr_input_files != 1): raise ValueError(('Due to the %s option we expected 1 file but input_files is of length %d' % (n, nr_input_files))) else: raise ValueError(('Argument should be a bool or const, but got: %s' % t)) for n in self.input_spec.single_volume_traits: t = self.inputs.__getattribute__(n) if isdefined(t): if (nr_input_files != 1): raise ValueError(('Due to the %s option we expected 1 file but input_files is of length %d' % (n, nr_input_files))) for n in self.input_spec.two_volume_traits: t = self.inputs.__getattribute__(n) if isdefined(t): if (nr_input_files != 2): raise ValueError(('Due to the %s option we expected 2 files but input_files is of length %d' % (n, nr_input_files))) for n in self.input_spec.n_volume_traits: t = self.inputs.__getattribute__(n) if isdefined(t): if (not (nr_input_files >= 1)): raise ValueError(('Due to the %s option we expected at least one file but input_files is of length %d' % (n, nr_input_files))) return super(Math, self)._parse_inputs()
-7,755,718,070,246,585,000
A number of the command line options expect precisely one or two files.
nipype/interfaces/minc/minc.py
_parse_inputs
Inria-Visages/nipype
python
def _parse_inputs(self): '\n ' nr_input_files = len(self.inputs.input_files) for n in self.input_spec.bool_or_const_traits: t = self.inputs.__getattribute__(n) if isdefined(t): if isinstance(t, bool): if (nr_input_files != 2): raise ValueError(('Due to the %s option we expected 2 files but input_files is of length %d' % (n, nr_input_files))) elif isinstance(t, float): if (nr_input_files != 1): raise ValueError(('Due to the %s option we expected 1 file but input_files is of length %d' % (n, nr_input_files))) else: raise ValueError(('Argument should be a bool or const, but got: %s' % t)) for n in self.input_spec.single_volume_traits: t = self.inputs.__getattribute__(n) if isdefined(t): if (nr_input_files != 1): raise ValueError(('Due to the %s option we expected 1 file but input_files is of length %d' % (n, nr_input_files))) for n in self.input_spec.two_volume_traits: t = self.inputs.__getattribute__(n) if isdefined(t): if (nr_input_files != 2): raise ValueError(('Due to the %s option we expected 2 files but input_files is of length %d' % (n, nr_input_files))) for n in self.input_spec.n_volume_traits: t = self.inputs.__getattribute__(n) if isdefined(t): if (not (nr_input_files >= 1)): raise ValueError(('Due to the %s option we expected at least one file but input_files is of length %d' % (n, nr_input_files))) return super(Math, self)._parse_inputs()
def patch_settings(): 'Merge settings with global cms settings, so all required attributes\n will exist. Never override, just append non existing settings.\n \n Also check for setting inconsistencies if settings.DEBUG\n ' if patch_settings.ALREADY_PATCHED: return patch_settings.ALREADY_PATCHED = True if getattr(settings, 'CMS_MODERATOR', False): warnings.warn('CMS_MODERATOR will be removed and replaced in django CMS 2.4!', CMSDeprecationWarning) from cms.conf import global_settings pre_patch() for attr in dir(global_settings): if ((attr == attr.upper()) and (not hasattr(settings, attr))): setattr(settings._wrapped, attr, getattr(global_settings, attr)) post_patch() if settings.DEBUG: post_patch_check()
-1,568,719,852,966,767,000
Merge settings with global cms settings, so all required attributes will exist. Never override, just append non existing settings. Also check for setting inconsistencies if settings.DEBUG
cms/conf/__init__.py
patch_settings
tonatos/django-cms
python
def patch_settings(): 'Merge settings with global cms settings, so all required attributes\n will exist. Never override, just append non existing settings.\n \n Also check for setting inconsistencies if settings.DEBUG\n ' if patch_settings.ALREADY_PATCHED: return patch_settings.ALREADY_PATCHED = True if getattr(settings, 'CMS_MODERATOR', False): warnings.warn('CMS_MODERATOR will be removed and replaced in django CMS 2.4!', CMSDeprecationWarning) from cms.conf import global_settings pre_patch() for attr in dir(global_settings): if ((attr == attr.upper()) and (not hasattr(settings, attr))): setattr(settings._wrapped, attr, getattr(global_settings, attr)) post_patch() if settings.DEBUG: post_patch_check()
def list_database_account_keys(account_name: Optional[str]=None, resource_group_name: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableListDatabaseAccountKeysResult: '\n The access keys for the given database account.\n\n\n :param str account_name: Cosmos DB database account name.\n :param str resource_group_name: Name of an Azure resource group.\n ' __args__ = dict() __args__['accountName'] = account_name __args__['resourceGroupName'] = resource_group_name if (opts is None): opts = pulumi.InvokeOptions() if (opts.version is None): opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-native:documentdb/v20191212:listDatabaseAccountKeys', __args__, opts=opts, typ=ListDatabaseAccountKeysResult).value return AwaitableListDatabaseAccountKeysResult(primary_master_key=__ret__.primary_master_key, primary_readonly_master_key=__ret__.primary_readonly_master_key, secondary_master_key=__ret__.secondary_master_key, secondary_readonly_master_key=__ret__.secondary_readonly_master_key)
-3,061,702,169,182,575,600
The access keys for the given database account. :param str account_name: Cosmos DB database account name. :param str resource_group_name: Name of an Azure resource group.
sdk/python/pulumi_azure_native/documentdb/v20191212/list_database_account_keys.py
list_database_account_keys
polivbr/pulumi-azure-native
python
def list_database_account_keys(account_name: Optional[str]=None, resource_group_name: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableListDatabaseAccountKeysResult: '\n The access keys for the given database account.\n\n\n :param str account_name: Cosmos DB database account name.\n :param str resource_group_name: Name of an Azure resource group.\n ' __args__ = dict() __args__['accountName'] = account_name __args__['resourceGroupName'] = resource_group_name if (opts is None): opts = pulumi.InvokeOptions() if (opts.version is None): opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-native:documentdb/v20191212:listDatabaseAccountKeys', __args__, opts=opts, typ=ListDatabaseAccountKeysResult).value return AwaitableListDatabaseAccountKeysResult(primary_master_key=__ret__.primary_master_key, primary_readonly_master_key=__ret__.primary_readonly_master_key, secondary_master_key=__ret__.secondary_master_key, secondary_readonly_master_key=__ret__.secondary_readonly_master_key)
@property @pulumi.getter(name='primaryMasterKey') def primary_master_key(self) -> str: '\n Base 64 encoded value of the primary read-write key.\n ' return pulumi.get(self, 'primary_master_key')
-6,444,721,121,725,809,000
Base 64 encoded value of the primary read-write key.
sdk/python/pulumi_azure_native/documentdb/v20191212/list_database_account_keys.py
primary_master_key
polivbr/pulumi-azure-native
python
@property @pulumi.getter(name='primaryMasterKey') def primary_master_key(self) -> str: '\n \n ' return pulumi.get(self, 'primary_master_key')
@property @pulumi.getter(name='primaryReadonlyMasterKey') def primary_readonly_master_key(self) -> str: '\n Base 64 encoded value of the primary read-only key.\n ' return pulumi.get(self, 'primary_readonly_master_key')
2,857,989,781,514,118,000
Base 64 encoded value of the primary read-only key.
sdk/python/pulumi_azure_native/documentdb/v20191212/list_database_account_keys.py
primary_readonly_master_key
polivbr/pulumi-azure-native
python
@property @pulumi.getter(name='primaryReadonlyMasterKey') def primary_readonly_master_key(self) -> str: '\n \n ' return pulumi.get(self, 'primary_readonly_master_key')
@property @pulumi.getter(name='secondaryMasterKey') def secondary_master_key(self) -> str: '\n Base 64 encoded value of the secondary read-write key.\n ' return pulumi.get(self, 'secondary_master_key')
4,442,451,844,350,200,000
Base 64 encoded value of the secondary read-write key.
sdk/python/pulumi_azure_native/documentdb/v20191212/list_database_account_keys.py
secondary_master_key
polivbr/pulumi-azure-native
python
@property @pulumi.getter(name='secondaryMasterKey') def secondary_master_key(self) -> str: '\n \n ' return pulumi.get(self, 'secondary_master_key')
@property @pulumi.getter(name='secondaryReadonlyMasterKey') def secondary_readonly_master_key(self) -> str: '\n Base 64 encoded value of the secondary read-only key.\n ' return pulumi.get(self, 'secondary_readonly_master_key')
1,901,077,849,702,143,000
Base 64 encoded value of the secondary read-only key.
sdk/python/pulumi_azure_native/documentdb/v20191212/list_database_account_keys.py
secondary_readonly_master_key
polivbr/pulumi-azure-native
python
@property @pulumi.getter(name='secondaryReadonlyMasterKey') def secondary_readonly_master_key(self) -> str: '\n \n ' return pulumi.get(self, 'secondary_readonly_master_key')
def inference_data(): '\n Testing with a single type of nodes. Must do as well as EdgeFeatureGraphCRF\n ' (X, Y) = generate_blocks_multinomial(noise=2, n_samples=1, seed=1) (x, y) = (X[0], Y[0]) n_states = x.shape[(- 1)] edge_list = make_grid_edges(x, 4, return_lists=True) edges = np.vstack(edge_list) pw_horz = ((- 1) * np.eye(n_states)) (xx, yy) = np.indices(pw_horz.shape) pw_horz[(xx > yy)] = 1 pw_vert = ((- 1) * np.eye(n_states)) pw_vert[(xx != yy)] = 1 pw_vert *= 10 edge_weights_horizontal = np.repeat(pw_horz[np.newaxis, :, :], edge_list[0].shape[0], axis=0) edge_weights_vertical = np.repeat(pw_vert[np.newaxis, :, :], edge_list[1].shape[0], axis=0) edge_weights = np.vstack([edge_weights_horizontal, edge_weights_vertical]) res = lp_general_graph((- x.reshape((- 1), n_states)), edges, edge_weights) edge_features = edge_list_to_features(edge_list) x = ([x.reshape((- 1), n_states)], [edges], [edge_features]) y = y.ravel() return (x, y, pw_horz, pw_vert, res, n_states)
3,998,788,590,160,578,600
Testing with a single type of nodes. Must do as well as EdgeFeatureGraphCRF
pystruct/tests/test_models/test_node_type_edge_feature_graph_crf.py
inference_data
LemonLison/pystruct
python
def inference_data(): '\n \n ' (X, Y) = generate_blocks_multinomial(noise=2, n_samples=1, seed=1) (x, y) = (X[0], Y[0]) n_states = x.shape[(- 1)] edge_list = make_grid_edges(x, 4, return_lists=True) edges = np.vstack(edge_list) pw_horz = ((- 1) * np.eye(n_states)) (xx, yy) = np.indices(pw_horz.shape) pw_horz[(xx > yy)] = 1 pw_vert = ((- 1) * np.eye(n_states)) pw_vert[(xx != yy)] = 1 pw_vert *= 10 edge_weights_horizontal = np.repeat(pw_horz[np.newaxis, :, :], edge_list[0].shape[0], axis=0) edge_weights_vertical = np.repeat(pw_vert[np.newaxis, :, :], edge_list[1].shape[0], axis=0) edge_weights = np.vstack([edge_weights_horizontal, edge_weights_vertical]) res = lp_general_graph((- x.reshape((- 1), n_states)), edges, edge_weights) edge_features = edge_list_to_features(edge_list) x = ([x.reshape((- 1), n_states)], [edges], [edge_features]) y = y.ravel() return (x, y, pw_horz, pw_vert, res, n_states)
def test_joint_feature_discrete(): '\n Testing with a single type of nodes. Must de aw well as EdgeFeatureGraphCRF\n ' (X, Y) = generate_blocks_multinomial(noise=2, n_samples=1, seed=1) (x, y) = (X[0], Y[0]) edge_list = make_grid_edges(x, 4, return_lists=True) edges = np.vstack(edge_list) edge_features = edge_list_to_features(edge_list) x = ([x.reshape((- 1), 3)], [edges], [edge_features]) y_flat = y.ravel() if True: crf = NodeTypeEdgeFeatureGraphCRF(1, [3], [3], [[2]]) joint_feature_y = crf.joint_feature(x, y_flat) assert_equal(joint_feature_y.shape, (crf.size_joint_feature,)) n_states = crf.l_n_states[0] n_features = crf.l_n_features[0] (pw_joint_feature_horz, pw_joint_feature_vert) = joint_feature_y[(n_states * n_features):].reshape(2, n_states, n_states) assert_array_equal(pw_joint_feature_vert, np.diag([(9 * 4), (9 * 4), (9 * 4)])) vert_joint_feature = np.diag([(10 * 3), (10 * 3), (10 * 3)]) vert_joint_feature[(0, 1)] = 10 vert_joint_feature[(1, 2)] = 10 assert_array_equal(pw_joint_feature_horz, vert_joint_feature)
-6,468,331,002,279,117,000
Testing with a single type of nodes. Must de aw well as EdgeFeatureGraphCRF
pystruct/tests/test_models/test_node_type_edge_feature_graph_crf.py
test_joint_feature_discrete
LemonLison/pystruct
python
def test_joint_feature_discrete(): '\n \n ' (X, Y) = generate_blocks_multinomial(noise=2, n_samples=1, seed=1) (x, y) = (X[0], Y[0]) edge_list = make_grid_edges(x, 4, return_lists=True) edges = np.vstack(edge_list) edge_features = edge_list_to_features(edge_list) x = ([x.reshape((- 1), 3)], [edges], [edge_features]) y_flat = y.ravel() if True: crf = NodeTypeEdgeFeatureGraphCRF(1, [3], [3], [[2]]) joint_feature_y = crf.joint_feature(x, y_flat) assert_equal(joint_feature_y.shape, (crf.size_joint_feature,)) n_states = crf.l_n_states[0] n_features = crf.l_n_features[0] (pw_joint_feature_horz, pw_joint_feature_vert) = joint_feature_y[(n_states * n_features):].reshape(2, n_states, n_states) assert_array_equal(pw_joint_feature_vert, np.diag([(9 * 4), (9 * 4), (9 * 4)])) vert_joint_feature = np.diag([(10 * 3), (10 * 3), (10 * 3)]) vert_joint_feature[(0, 1)] = 10 vert_joint_feature[(1, 2)] = 10 assert_array_equal(pw_joint_feature_horz, vert_joint_feature)
def test_joint_feature_continuous(): '\n Testing with a single type of nodes. Must de aw well as EdgeFeatureGraphCRF\n ' (X, Y) = generate_blocks_multinomial(noise=2, n_samples=1, seed=1) (x, y) = (X[0], Y[0]) n_states = x.shape[(- 1)] edge_list = make_grid_edges(x, 4, return_lists=True) edges = np.vstack(edge_list) edge_features = edge_list_to_features(edge_list) x = ([x.reshape((- 1), 3)], [edges], [edge_features]) y = y.ravel() pw_horz = ((- 1) * np.eye(n_states)) (xx, yy) = np.indices(pw_horz.shape) pw_horz[(xx > yy)] = 1 pw_vert = ((- 1) * np.eye(n_states)) pw_vert[(xx != yy)] = 1 pw_vert *= 10 if True: crf = NodeTypeEdgeFeatureGraphCRF(1, [3], [3], [[2]]) w = np.hstack([np.eye(3).ravel(), (- pw_horz.ravel()), (- pw_vert.ravel())]) crf.initialize(x, y) y_pred = crf.inference(x, w, relaxed=True) joint_feature_y = crf.joint_feature(x, y_pred) assert_equal(joint_feature_y.shape, (crf.size_joint_feature,))
52,569,035,070,579,250
Testing with a single type of nodes. Must de aw well as EdgeFeatureGraphCRF
pystruct/tests/test_models/test_node_type_edge_feature_graph_crf.py
test_joint_feature_continuous
LemonLison/pystruct
python
def test_joint_feature_continuous(): '\n \n ' (X, Y) = generate_blocks_multinomial(noise=2, n_samples=1, seed=1) (x, y) = (X[0], Y[0]) n_states = x.shape[(- 1)] edge_list = make_grid_edges(x, 4, return_lists=True) edges = np.vstack(edge_list) edge_features = edge_list_to_features(edge_list) x = ([x.reshape((- 1), 3)], [edges], [edge_features]) y = y.ravel() pw_horz = ((- 1) * np.eye(n_states)) (xx, yy) = np.indices(pw_horz.shape) pw_horz[(xx > yy)] = 1 pw_vert = ((- 1) * np.eye(n_states)) pw_vert[(xx != yy)] = 1 pw_vert *= 10 if True: crf = NodeTypeEdgeFeatureGraphCRF(1, [3], [3], [[2]]) w = np.hstack([np.eye(3).ravel(), (- pw_horz.ravel()), (- pw_vert.ravel())]) crf.initialize(x, y) y_pred = crf.inference(x, w, relaxed=True) joint_feature_y = crf.joint_feature(x, y_pred) assert_equal(joint_feature_y.shape, (crf.size_joint_feature,))
def run_recipe(recipe, args, config): 'Given a recipe, calls the appropriate query and returns the result.\n\n The provided recipe name is used to make a call to the modules.\n\n :param str recipe: name of the recipe to be run.\n :param list args: remainder arguments that were unparsed.\n :param Configuration config: config object.\n :returns: string\n ' modname = '.recipes.{}'.format(recipe) mod = importlib.import_module(modname, package='adr') try: output = mod.run(args, config) except MissingDataError: return "ActiveData didn't return any data." if isinstance(config.fmt, string_types): fmt = all_formatters[config.fmt] log.debug('Result:') return fmt(output)
-761,608,809,952,332,300
Given a recipe, calls the appropriate query and returns the result. The provided recipe name is used to make a call to the modules. :param str recipe: name of the recipe to be run. :param list args: remainder arguments that were unparsed. :param Configuration config: config object. :returns: string
adr/recipe.py
run_recipe
gmierz/active-data-recipes
python
def run_recipe(recipe, args, config): 'Given a recipe, calls the appropriate query and returns the result.\n\n The provided recipe name is used to make a call to the modules.\n\n :param str recipe: name of the recipe to be run.\n :param list args: remainder arguments that were unparsed.\n :param Configuration config: config object.\n :returns: string\n ' modname = '.recipes.{}'.format(recipe) mod = importlib.import_module(modname, package='adr') try: output = mod.run(args, config) except MissingDataError: return "ActiveData didn't return any data." if isinstance(config.fmt, string_types): fmt = all_formatters[config.fmt] log.debug('Result:') return fmt(output)
def get_chromsizes(bwpath): '\n TODO: replace this with negspy\n\n Also, return NaNs from any missing chromosomes in bbi.fetch\n\n ' chromsizes = bbi.chromsizes(bwpath) chromosomes = natsorted(chromsizes.keys()) chrom_series = pd.Series(chromsizes)[chromosomes] return chrom_series
5,693,233,413,802,413,000
TODO: replace this with negspy Also, return NaNs from any missing chromosomes in bbi.fetch
clodius/tiles/bigwig.py
get_chromsizes
4dn-dcic/clodius
python
def get_chromsizes(bwpath): '\n TODO: replace this with negspy\n\n Also, return NaNs from any missing chromosomes in bbi.fetch\n\n ' chromsizes = bbi.chromsizes(bwpath) chromosomes = natsorted(chromsizes.keys()) chrom_series = pd.Series(chromsizes)[chromosomes] return chrom_series
def tileset_info(bwpath, chromsizes=None): "\n Get the tileset info for a bigWig file\n\n Parameters\n ----------\n bwpath: string\n The path to the bigwig file from which to retrieve data\n chromsizes: [[chrom, size],...]\n A list of chromosome sizes associated with this tileset.\n Typically passed in to specify in what order data from\n the bigwig should be returned.\n\n Returns\n -------\n tileset_info: {'min_pos': [],\n 'max_pos': [],\n 'tile_size': 1024,\n 'max_zoom': 7\n }\n " TILE_SIZE = 1024 if (chromsizes is None): chromsizes = get_chromsizes(bwpath) chromsizes_list = [] for (chrom, size) in chromsizes.iteritems(): chromsizes_list += [[chrom, int(size)]] else: chromsizes_list = chromsizes min_tile_cover = np.ceil((sum([int(c[1]) for c in chromsizes_list]) / TILE_SIZE)) max_zoom = int(np.ceil(np.log2(min_tile_cover))) tileset_info = {'min_pos': [0], 'max_pos': [(TILE_SIZE * (2 ** max_zoom))], 'max_width': (TILE_SIZE * (2 ** max_zoom)), 'tile_size': TILE_SIZE, 'max_zoom': max_zoom, 'chromsizes': chromsizes_list, 'aggregation_modes': aggregation_modes, 'range_modes': range_modes} return tileset_info
6,060,560,380,858,904,000
Get the tileset info for a bigWig file Parameters ---------- bwpath: string The path to the bigwig file from which to retrieve data chromsizes: [[chrom, size],...] A list of chromosome sizes associated with this tileset. Typically passed in to specify in what order data from the bigwig should be returned. Returns ------- tileset_info: {'min_pos': [], 'max_pos': [], 'tile_size': 1024, 'max_zoom': 7 }
clodius/tiles/bigwig.py
tileset_info
4dn-dcic/clodius
python
def tileset_info(bwpath, chromsizes=None): "\n Get the tileset info for a bigWig file\n\n Parameters\n ----------\n bwpath: string\n The path to the bigwig file from which to retrieve data\n chromsizes: [[chrom, size],...]\n A list of chromosome sizes associated with this tileset.\n Typically passed in to specify in what order data from\n the bigwig should be returned.\n\n Returns\n -------\n tileset_info: {'min_pos': [],\n 'max_pos': [],\n 'tile_size': 1024,\n 'max_zoom': 7\n }\n " TILE_SIZE = 1024 if (chromsizes is None): chromsizes = get_chromsizes(bwpath) chromsizes_list = [] for (chrom, size) in chromsizes.iteritems(): chromsizes_list += [[chrom, int(size)]] else: chromsizes_list = chromsizes min_tile_cover = np.ceil((sum([int(c[1]) for c in chromsizes_list]) / TILE_SIZE)) max_zoom = int(np.ceil(np.log2(min_tile_cover))) tileset_info = {'min_pos': [0], 'max_pos': [(TILE_SIZE * (2 ** max_zoom))], 'max_width': (TILE_SIZE * (2 ** max_zoom)), 'tile_size': TILE_SIZE, 'max_zoom': max_zoom, 'chromsizes': chromsizes_list, 'aggregation_modes': aggregation_modes, 'range_modes': range_modes} return tileset_info
def tiles(bwpath, tile_ids, chromsizes_map={}, chromsizes=None): '\n Generate tiles from a bigwig file.\n\n Parameters\n ----------\n tileset: tilesets.models.Tileset object\n The tileset that the tile ids should be retrieved from\n tile_ids: [str,...]\n A list of tile_ids (e.g. xyx.0.0) identifying the tiles\n to be retrieved\n chromsizes_map: {uid: []}\n A set of chromsizes listings corresponding to the parameters of the\n tile_ids. To be used if a chromsizes id is passed in with the tile id\n with the `|cos:id` tag in the tile id\n chromsizes: [[chrom, size],...]\n A 2d array containing chromosome names and sizes. Overrides the\n chromsizes in chromsizes_map\n\n Returns\n -------\n tile_list: [(tile_id, tile_data),...]\n A list of tile_id, tile_data tuples\n ' TILE_SIZE = 1024 generated_tiles = [] for tile_id in tile_ids: tile_option_parts = tile_id.split('|')[1:] tile_no_options = tile_id.split('|')[0] tile_id_parts = tile_no_options.split('.') tile_position = list(map(int, tile_id_parts[1:3])) return_value = (tile_id_parts[3] if (len(tile_id_parts) > 3) else 'mean') aggregation_mode = (return_value if (return_value in aggregation_modes) else 'mean') range_mode = (return_value if (return_value in range_modes) else None) tile_options = dict([o.split(':') for o in tile_option_parts]) if chromsizes: chromnames = [c[0] for c in chromsizes] chromlengths = [int(c[1]) for c in chromsizes] chromsizes_to_use = pd.Series(chromlengths, index=chromnames) else: chromsizes_id = None if ('cos' in tile_options): chromsizes_id = tile_options['cos'] if (chromsizes_id in chromsizes_map): chromsizes_to_use = chromsizes_map[chromsizes_id] else: chromsizes_to_use = None zoom_level = tile_position[0] tile_pos = tile_position[1] if (chromsizes_to_use is None): chromsizes_to_use = get_chromsizes(bwpath) max_depth = get_quadtree_depth(chromsizes_to_use) tile_size = (TILE_SIZE * (2 ** (max_depth - zoom_level))) start_pos = (tile_pos * tile_size) end_pos = (start_pos + tile_size) dense = get_bigwig_tile(bwpath, zoom_level, start_pos, end_pos, chromsizes_to_use, aggregation_mode=aggregation_mode, range_mode=range_mode) tile_value = hgfo.format_dense_tile(dense) generated_tiles += [(tile_id, tile_value)] return generated_tiles
-4,122,499,748,940,527,000
Generate tiles from a bigwig file. Parameters ---------- tileset: tilesets.models.Tileset object The tileset that the tile ids should be retrieved from tile_ids: [str,...] A list of tile_ids (e.g. xyx.0.0) identifying the tiles to be retrieved chromsizes_map: {uid: []} A set of chromsizes listings corresponding to the parameters of the tile_ids. To be used if a chromsizes id is passed in with the tile id with the `|cos:id` tag in the tile id chromsizes: [[chrom, size],...] A 2d array containing chromosome names and sizes. Overrides the chromsizes in chromsizes_map Returns ------- tile_list: [(tile_id, tile_data),...] A list of tile_id, tile_data tuples
clodius/tiles/bigwig.py
tiles
4dn-dcic/clodius
python
def tiles(bwpath, tile_ids, chromsizes_map={}, chromsizes=None): '\n Generate tiles from a bigwig file.\n\n Parameters\n ----------\n tileset: tilesets.models.Tileset object\n The tileset that the tile ids should be retrieved from\n tile_ids: [str,...]\n A list of tile_ids (e.g. xyx.0.0) identifying the tiles\n to be retrieved\n chromsizes_map: {uid: []}\n A set of chromsizes listings corresponding to the parameters of the\n tile_ids. To be used if a chromsizes id is passed in with the tile id\n with the `|cos:id` tag in the tile id\n chromsizes: [[chrom, size],...]\n A 2d array containing chromosome names and sizes. Overrides the\n chromsizes in chromsizes_map\n\n Returns\n -------\n tile_list: [(tile_id, tile_data),...]\n A list of tile_id, tile_data tuples\n ' TILE_SIZE = 1024 generated_tiles = [] for tile_id in tile_ids: tile_option_parts = tile_id.split('|')[1:] tile_no_options = tile_id.split('|')[0] tile_id_parts = tile_no_options.split('.') tile_position = list(map(int, tile_id_parts[1:3])) return_value = (tile_id_parts[3] if (len(tile_id_parts) > 3) else 'mean') aggregation_mode = (return_value if (return_value in aggregation_modes) else 'mean') range_mode = (return_value if (return_value in range_modes) else None) tile_options = dict([o.split(':') for o in tile_option_parts]) if chromsizes: chromnames = [c[0] for c in chromsizes] chromlengths = [int(c[1]) for c in chromsizes] chromsizes_to_use = pd.Series(chromlengths, index=chromnames) else: chromsizes_id = None if ('cos' in tile_options): chromsizes_id = tile_options['cos'] if (chromsizes_id in chromsizes_map): chromsizes_to_use = chromsizes_map[chromsizes_id] else: chromsizes_to_use = None zoom_level = tile_position[0] tile_pos = tile_position[1] if (chromsizes_to_use is None): chromsizes_to_use = get_chromsizes(bwpath) max_depth = get_quadtree_depth(chromsizes_to_use) tile_size = (TILE_SIZE * (2 ** (max_depth - zoom_level))) start_pos = (tile_pos * tile_size) end_pos = (start_pos + tile_size) dense = get_bigwig_tile(bwpath, zoom_level, start_pos, end_pos, chromsizes_to_use, aggregation_mode=aggregation_mode, range_mode=range_mode) tile_value = hgfo.format_dense_tile(dense) generated_tiles += [(tile_id, tile_value)] return generated_tiles
def chromsizes(filename): '\n Get a list of chromosome sizes from this [presumably] bigwig\n file.\n\n Parameters:\n -----------\n filename: string\n The filename of the bigwig file\n\n Returns\n -------\n chromsizes: [(name:string, size:int), ...]\n An ordered list of chromosome names and sizes\n ' try: chrom_series = get_chromsizes(filename) data = [] for (chrom, size) in chrom_series.iteritems(): data.append([chrom, size]) return data except Exception as ex: logger.error(ex) raise Exception('Error loading chromsizes from bigwig file: {}'.format(ex))
2,550,086,058,640,435,700
Get a list of chromosome sizes from this [presumably] bigwig file. Parameters: ----------- filename: string The filename of the bigwig file Returns ------- chromsizes: [(name:string, size:int), ...] An ordered list of chromosome names and sizes
clodius/tiles/bigwig.py
chromsizes
4dn-dcic/clodius
python
def chromsizes(filename): '\n Get a list of chromosome sizes from this [presumably] bigwig\n file.\n\n Parameters:\n -----------\n filename: string\n The filename of the bigwig file\n\n Returns\n -------\n chromsizes: [(name:string, size:int), ...]\n An ordered list of chromosome names and sizes\n ' try: chrom_series = get_chromsizes(filename) data = [] for (chrom, size) in chrom_series.iteritems(): data.append([chrom, size]) return data except Exception as ex: logger.error(ex) raise Exception('Error loading chromsizes from bigwig file: {}'.format(ex))
def read_args(): 'Reads command line arguments.\n\n Returns: Parsed arguments.' parser = argparse.ArgumentParser() parser.add_argument('-f', '--file', type=str, help='path to .csv file', default='orbit.csv') parser.add_argument('-u', '--units', type=str, help='units of distance (m or km)', default='km') return parser.parse_args()
6,337,914,842,948,013,000
Reads command line arguments. Returns: Parsed arguments.
orbitdeterminator/kep_determination/ellipse_fit.py
read_args
Alexandros23Kazantzidis/orbitdeterminator
python
def read_args(): 'Reads command line arguments.\n\n Returns: Parsed arguments.' parser = argparse.ArgumentParser() parser.add_argument('-f', '--file', type=str, help='path to .csv file', default='orbit.csv') parser.add_argument('-u', '--units', type=str, help='units of distance (m or km)', default='km') return parser.parse_args()
def plane_err(data, coeffs): 'Calculates the total squared error of the data wrt a plane.\n\n The data should be a list of points. coeffs is an array of\n 3 elements - the coefficients a,b,c in the plane equation\n ax+by+c = 0.\n\n Arguments:\n data: A numpy array of points.\n coeffs: The coefficients of the plane ax+by+c=0.\n\n Returns: The total squared error wrt the plane defined by ax+by+cz = 0.' (a, b, c) = coeffs return (np.sum(((((a * data[:, 0]) + (b * data[:, 1])) + (c * data[:, 2])) ** 2)) / (((a ** 2) + (b ** 2)) + (c ** 2)))
-5,432,732,931,796,351,000
Calculates the total squared error of the data wrt a plane. The data should be a list of points. coeffs is an array of 3 elements - the coefficients a,b,c in the plane equation ax+by+c = 0. Arguments: data: A numpy array of points. coeffs: The coefficients of the plane ax+by+c=0. Returns: The total squared error wrt the plane defined by ax+by+cz = 0.
orbitdeterminator/kep_determination/ellipse_fit.py
plane_err
Alexandros23Kazantzidis/orbitdeterminator
python
def plane_err(data, coeffs): 'Calculates the total squared error of the data wrt a plane.\n\n The data should be a list of points. coeffs is an array of\n 3 elements - the coefficients a,b,c in the plane equation\n ax+by+c = 0.\n\n Arguments:\n data: A numpy array of points.\n coeffs: The coefficients of the plane ax+by+c=0.\n\n Returns: The total squared error wrt the plane defined by ax+by+cz = 0.' (a, b, c) = coeffs return (np.sum(((((a * data[:, 0]) + (b * data[:, 1])) + (c * data[:, 2])) ** 2)) / (((a ** 2) + (b ** 2)) + (c ** 2)))
def project_to_plane(points, coeffs): 'Projects points onto a plane.\n\n Projects a list of points onto the plane ax+by+c=0,\n where a,b,c are elements of coeffs.\n\n Arguments:\n coeffs: The coefficients of the plane ax+by+c=0.\n points: A numpy array of points.\n\n Returns:\n A list of projected points.' (a, b, c) = coeffs proj_mat = [[((b ** 2) + (c ** 2)), ((- a) * b), ((- a) * c)], [((- a) * b), ((a ** 2) + (c ** 2)), ((- b) * c)], [((- a) * c), ((- b) * c), ((a ** 2) + (b ** 2))]] return (np.matmul(points, proj_mat) / (((a ** 2) + (b ** 2)) + (c ** 2)))
7,388,186,402,707,117,000
Projects points onto a plane. Projects a list of points onto the plane ax+by+c=0, where a,b,c are elements of coeffs. Arguments: coeffs: The coefficients of the plane ax+by+c=0. points: A numpy array of points. Returns: A list of projected points.
orbitdeterminator/kep_determination/ellipse_fit.py
project_to_plane
Alexandros23Kazantzidis/orbitdeterminator
python
def project_to_plane(points, coeffs): 'Projects points onto a plane.\n\n Projects a list of points onto the plane ax+by+c=0,\n where a,b,c are elements of coeffs.\n\n Arguments:\n coeffs: The coefficients of the plane ax+by+c=0.\n points: A numpy array of points.\n\n Returns:\n A list of projected points.' (a, b, c) = coeffs proj_mat = [[((b ** 2) + (c ** 2)), ((- a) * b), ((- a) * c)], [((- a) * b), ((a ** 2) + (c ** 2)), ((- b) * c)], [((- a) * c), ((- b) * c), ((a ** 2) + (b ** 2))]] return (np.matmul(points, proj_mat) / (((a ** 2) + (b ** 2)) + (c ** 2)))
def conv_to_2D(points, x, y): 'Finds coordinates of points in a plane wrt a basis.\n\n Given a list of points in a plane, and a basis of the plane,\n this function returns the coordinates of those points\n wrt this basis.\n\n Arguments:\n points: A numpy array of points.\n x: One vector of the basis.\n y: Another vector of the basis.\n\n Returns:\n Coordinates of the points wrt the basis [x,y].' mat = [x[0:2], y[0:2]] mat_inv = np.linalg.inv(mat) coords = np.matmul(points[:, 0:2], mat_inv) return coords
-1,297,365,435,381,717,800
Finds coordinates of points in a plane wrt a basis. Given a list of points in a plane, and a basis of the plane, this function returns the coordinates of those points wrt this basis. Arguments: points: A numpy array of points. x: One vector of the basis. y: Another vector of the basis. Returns: Coordinates of the points wrt the basis [x,y].
orbitdeterminator/kep_determination/ellipse_fit.py
conv_to_2D
Alexandros23Kazantzidis/orbitdeterminator
python
def conv_to_2D(points, x, y): 'Finds coordinates of points in a plane wrt a basis.\n\n Given a list of points in a plane, and a basis of the plane,\n this function returns the coordinates of those points\n wrt this basis.\n\n Arguments:\n points: A numpy array of points.\n x: One vector of the basis.\n y: Another vector of the basis.\n\n Returns:\n Coordinates of the points wrt the basis [x,y].' mat = [x[0:2], y[0:2]] mat_inv = np.linalg.inv(mat) coords = np.matmul(points[:, 0:2], mat_inv) return coords
def cart_to_pol(points): 'Converts a list of cartesian coordinates into polar ones.\n\n Arguments:\n points: The list of points in the format [x,y].\n\n Returns:\n A list of polar coordinates in the format [radius,angle].' pol = np.empty(points.shape) pol[:, 0] = np.sqrt(((points[:, 0] ** 2) + (points[:, 1] ** 2))) pol[:, 1] = np.arctan2(points[:, 1], points[:, 0]) return pol
-8,661,686,831,716,060,000
Converts a list of cartesian coordinates into polar ones. Arguments: points: The list of points in the format [x,y]. Returns: A list of polar coordinates in the format [radius,angle].
orbitdeterminator/kep_determination/ellipse_fit.py
cart_to_pol
Alexandros23Kazantzidis/orbitdeterminator
python
def cart_to_pol(points): 'Converts a list of cartesian coordinates into polar ones.\n\n Arguments:\n points: The list of points in the format [x,y].\n\n Returns:\n A list of polar coordinates in the format [radius,angle].' pol = np.empty(points.shape) pol[:, 0] = np.sqrt(((points[:, 0] ** 2) + (points[:, 1] ** 2))) pol[:, 1] = np.arctan2(points[:, 1], points[:, 0]) return pol
def ellipse_err(polar_coords, params): 'Calculates the total squared error of the data wrt an ellipse.\n\n params is a 3 element array used to define an ellipse.\n It contains 3 elements a,e, and t0.\n\n a is the semi-major axis\n e is the eccentricity\n t0 is the angle of the major axis wrt the x-axis.\n\n These 3 elements define an ellipse with one focus at origin.\n Equation of the ellipse is r = a(1-e^2)/(1+ecos(t-t0))\n\n The function calculates r for every theta in the data.\n It then takes the square of the difference and sums it.\n\n Arguments:\n polar_coords: A list of polar coordinates in the format [radius,angle].\n params: The array [a,e,t0].\n\n Returns:\n The total squared error of the data wrt the ellipse.' (a, e, t0) = params dem = (1 + (e * np.cos((polar_coords[:, 1] - t0)))) num = (a * (1 - (e ** 2))) r = np.divide(num, dem) err = np.sum(((r - polar_coords[:, 0]) ** 2)) return err
-2,029,023,196,265,276,000
Calculates the total squared error of the data wrt an ellipse. params is a 3 element array used to define an ellipse. It contains 3 elements a,e, and t0. a is the semi-major axis e is the eccentricity t0 is the angle of the major axis wrt the x-axis. These 3 elements define an ellipse with one focus at origin. Equation of the ellipse is r = a(1-e^2)/(1+ecos(t-t0)) The function calculates r for every theta in the data. It then takes the square of the difference and sums it. Arguments: polar_coords: A list of polar coordinates in the format [radius,angle]. params: The array [a,e,t0]. Returns: The total squared error of the data wrt the ellipse.
orbitdeterminator/kep_determination/ellipse_fit.py
ellipse_err
Alexandros23Kazantzidis/orbitdeterminator
python
def ellipse_err(polar_coords, params): 'Calculates the total squared error of the data wrt an ellipse.\n\n params is a 3 element array used to define an ellipse.\n It contains 3 elements a,e, and t0.\n\n a is the semi-major axis\n e is the eccentricity\n t0 is the angle of the major axis wrt the x-axis.\n\n These 3 elements define an ellipse with one focus at origin.\n Equation of the ellipse is r = a(1-e^2)/(1+ecos(t-t0))\n\n The function calculates r for every theta in the data.\n It then takes the square of the difference and sums it.\n\n Arguments:\n polar_coords: A list of polar coordinates in the format [radius,angle].\n params: The array [a,e,t0].\n\n Returns:\n The total squared error of the data wrt the ellipse.' (a, e, t0) = params dem = (1 + (e * np.cos((polar_coords[:, 1] - t0)))) num = (a * (1 - (e ** 2))) r = np.divide(num, dem) err = np.sum(((r - polar_coords[:, 0]) ** 2)) return err
def configure_app(app): 'Multiple app configurations' Compress(app)
-8,213,292,863,896,477,000
Multiple app configurations
config.py
configure_app
Rwothoromo/Flask-Okta
python
def configure_app(app): Compress(app)
def _param_check(): ' 受信パラメータチェック ' msg = '' if (not HachiUtil.LocalAddress().is_localaddress(RecvParams.ip.get())): msg += '・指定した待受IPアドレスがインターフェースにありません。\n' if (not (0 <= RecvParams.port.get() <= 65535)): msg += '・ポート番号は 0~65535 の範囲で指定してください。\n' return msg
-3,071,456,578,103,253,500
受信パラメータチェック
controller/RxController.py
_param_check
kinformation/hachi
python
def _param_check(): ' ' msg = if (not HachiUtil.LocalAddress().is_localaddress(RecvParams.ip.get())): msg += '・指定した待受IPアドレスがインターフェースにありません。\n' if (not (0 <= RecvParams.port.get() <= 65535)): msg += '・ポート番号は 0~65535 の範囲で指定してください。\n' return msg
def recv_tcp_start(self): ' TCPパケット送信スレッド ' self.th_recv = RecvTcpThread.RecvTcpThread(RecvParams(), self.shareObj) self.th_recv.setDaemon(True) self.th_recv.start()
-1,761,130,429,100,993,800
TCPパケット送信スレッド
controller/RxController.py
recv_tcp_start
kinformation/hachi
python
def recv_tcp_start(self): ' ' self.th_recv = RecvTcpThread.RecvTcpThread(RecvParams(), self.shareObj) self.th_recv.setDaemon(True) self.th_recv.start()
def recv_udp_start(self): ' UDPパケット受信スレッド ' self.th_recv = RecvUdpThread.RecvUdpThread(RecvParams(), self.shareObj) self.th_recv.setDaemon(True) self.th_recv.start()
8,602,522,311,085,771,000
UDPパケット受信スレッド
controller/RxController.py
recv_udp_start
kinformation/hachi
python
def recv_udp_start(self): ' ' self.th_recv = RecvUdpThread.RecvUdpThread(RecvParams(), self.shareObj) self.th_recv.setDaemon(True) self.th_recv.start()
def monitor_start(self): ' パケット受信監視スレッド ' self.th_monitor = RecvMonitorThread.RecvMonitorThread(MonitorParams(), self.shareObj) self.th_monitor.setDaemon(True) self.th_monitor.start()
-5,020,833,981,410,120,000
パケット受信監視スレッド
controller/RxController.py
monitor_start
kinformation/hachi
python
def monitor_start(self): ' ' self.th_monitor = RecvMonitorThread.RecvMonitorThread(MonitorParams(), self.shareObj) self.th_monitor.setDaemon(True) self.th_monitor.start()
@pytest.fixture(scope='module', autouse=True) def create_kmc_db(): '\n Set up tests and clean up after.\n ' kmer_len = 17 memory = 2 cutoff_min = 1 sig_len = 9 reads_src = 'input.fastq' reads = ('GGCATTGCATGCAGTNNCAGTCATGCAGTCAGGCAGTCATGGCATGCAACGACGATCAGTCATGGTCGAG', 'GGCATTGCATGCAGTNNCAGTCATGCAGTCAGGCAGTCATGGCATGCAACGACGATCAGTCATGGTCGAG', 'GTCGATGCATCGATGCTGATGCTGCTGTGCTAGTAGCGTCTGAGGGCTA') _save_reads_as_fastq(reads, reads_src) kmers = _cout_kmers(reads, kmer_len) absent_kmers = _generate_not_existing_kmers(kmers, kmer_len) _run_kmc(cutoff_min, kmer_len, memory, sig_len, reads_src) result = {'kmers': kmers, 'kmer_len': kmer_len, 'sig_len': sig_len, 'absent_kmers': absent_kmers} (yield result) os.remove(reads_src) os.remove('kmc_db.kmc_pre') os.remove('kmc_db.kmc_suf')
1,694,867,008,165,371,600
Set up tests and clean up after.
tests/py_kmc_api/test_py_kmc_file.py
create_kmc_db
refresh-bio/KMC
python
@pytest.fixture(scope='module', autouse=True) def create_kmc_db(): '\n \n ' kmer_len = 17 memory = 2 cutoff_min = 1 sig_len = 9 reads_src = 'input.fastq' reads = ('GGCATTGCATGCAGTNNCAGTCATGCAGTCAGGCAGTCATGGCATGCAACGACGATCAGTCATGGTCGAG', 'GGCATTGCATGCAGTNNCAGTCATGCAGTCAGGCAGTCATGGCATGCAACGACGATCAGTCATGGTCGAG', 'GTCGATGCATCGATGCTGATGCTGCTGTGCTAGTAGCGTCTGAGGGCTA') _save_reads_as_fastq(reads, reads_src) kmers = _cout_kmers(reads, kmer_len) absent_kmers = _generate_not_existing_kmers(kmers, kmer_len) _run_kmc(cutoff_min, kmer_len, memory, sig_len, reads_src) result = {'kmers': kmers, 'kmer_len': kmer_len, 'sig_len': sig_len, 'absent_kmers': absent_kmers} (yield result) os.remove(reads_src) os.remove('kmc_db.kmc_pre') os.remove('kmc_db.kmc_suf')
def _cout_kmers(reads, kmer_len): ' Simple k-mer counting routine. ' kmers = {} for read in reads: for start in range(0, ((len(read) - kmer_len) + 1)): kmer = read[start:(start + kmer_len)] if ('N' in kmer): continue rev = kmer_utils.rev_comp(kmer) if (rev < kmer): kmer = rev if (kmer in kmers.keys()): kmers[kmer] += 1 else: kmers[kmer] = 1 return kmers
292,329,010,136,147,000
Simple k-mer counting routine.
tests/py_kmc_api/test_py_kmc_file.py
_cout_kmers
refresh-bio/KMC
python
def _cout_kmers(reads, kmer_len): ' ' kmers = {} for read in reads: for start in range(0, ((len(read) - kmer_len) + 1)): kmer = read[start:(start + kmer_len)] if ('N' in kmer): continue rev = kmer_utils.rev_comp(kmer) if (rev < kmer): kmer = rev if (kmer in kmers.keys()): kmers[kmer] += 1 else: kmers[kmer] = 1 return kmers
def _save_reads_as_fastq(reads, file_name): ' Save reads from input to file named file_name. ' file = open(file_name, 'w') for read in reads: file.write('@TEST\n') file.write((read + '\n')) file.write('+TEST\n') file.write((('I' * len(read)) + '\n')) file.close()
7,954,480,761,115,530,000
Save reads from input to file named file_name.
tests/py_kmc_api/test_py_kmc_file.py
_save_reads_as_fastq
refresh-bio/KMC
python
def _save_reads_as_fastq(reads, file_name): ' ' file = open(file_name, 'w') for read in reads: file.write('@TEST\n') file.write((read + '\n')) file.write('+TEST\n') file.write((('I' * len(read)) + '\n')) file.close()
def _generate_not_existing_kmers(kmers, kmer_len): ' Generate k-mers that are not present in the database.\n\n :kmers: existing k-mers\n :kmer_len: length of k-mers\n\n ' def increment_kmer(kmer, start): ' Increments k-mer to next lexographical.\n\n Start from pos :start: (from end, i.e. start = 0 means last k-mer symbol). ' def replace_char(string, pos, new_char): ' Create new string with character at :pos: changed to :new_char:. ' if (pos < 0): pos = (len(string) + pos) return ((string[:pos] + new_char) + string[(pos + 1):]) for i in range(start, len(kmer)): if (kmer[((- 1) - i)] == 'A'): return replace_char(kmer, ((- 1) - i), 'C') if (kmer[((- 1) - i)] == 'C'): return replace_char(kmer, ((- 1) - i), 'G') if (kmer[((- 1) - i)] == 'T'): return replace_char(kmer, ((- 1) - i), 'T') kmer = replace_char(kmer, ((- 1) - i), 'T') return kmer absent_kmers = [] for i in range(0, kmer_len): for kmer_str in kmers.keys(): inc_kmer = increment_kmer(kmer_str, i) if (not (inc_kmer in kmers.keys())): absent_kmers.append(inc_kmer) return absent_kmers
-3,914,537,680,329,714,700
Generate k-mers that are not present in the database. :kmers: existing k-mers :kmer_len: length of k-mers
tests/py_kmc_api/test_py_kmc_file.py
_generate_not_existing_kmers
refresh-bio/KMC
python
def _generate_not_existing_kmers(kmers, kmer_len): ' Generate k-mers that are not present in the database.\n\n :kmers: existing k-mers\n :kmer_len: length of k-mers\n\n ' def increment_kmer(kmer, start): ' Increments k-mer to next lexographical.\n\n Start from pos :start: (from end, i.e. start = 0 means last k-mer symbol). ' def replace_char(string, pos, new_char): ' Create new string with character at :pos: changed to :new_char:. ' if (pos < 0): pos = (len(string) + pos) return ((string[:pos] + new_char) + string[(pos + 1):]) for i in range(start, len(kmer)): if (kmer[((- 1) - i)] == 'A'): return replace_char(kmer, ((- 1) - i), 'C') if (kmer[((- 1) - i)] == 'C'): return replace_char(kmer, ((- 1) - i), 'G') if (kmer[((- 1) - i)] == 'T'): return replace_char(kmer, ((- 1) - i), 'T') kmer = replace_char(kmer, ((- 1) - i), 'T') return kmer absent_kmers = [] for i in range(0, kmer_len): for kmer_str in kmers.keys(): inc_kmer = increment_kmer(kmer_str, i) if (not (inc_kmer in kmers.keys())): absent_kmers.append(inc_kmer) return absent_kmers
def _run_kmc(cutoff_min, kmer_len, memory, sig_len, reads_src): ' Runs kmc. ' if (init_sys_path.is_linux() or init_sys_path.is_mac()): kmc_path = os.path.join(os.path.dirname(__file__), '../../bin/kmc') elif init_sys_path.is_windows(): kmc_path = os.path.join(os.path.dirname(__file__), '../../x64/Release/kmer_counter.exe') if init_sys_path.is_mac(): resource.setrlimit(resource.RLIMIT_NOFILE, (2048, 2048)) subprocess.call([kmc_path, '-ci{}'.format(cutoff_min), '-k{}'.format(kmer_len), '-m{}'.format(memory), '-p{}'.format(sig_len), reads_src, 'kmc_db', '.'])
6,122,891,625,812,580,000
Runs kmc.
tests/py_kmc_api/test_py_kmc_file.py
_run_kmc
refresh-bio/KMC
python
def _run_kmc(cutoff_min, kmer_len, memory, sig_len, reads_src): ' ' if (init_sys_path.is_linux() or init_sys_path.is_mac()): kmc_path = os.path.join(os.path.dirname(__file__), '../../bin/kmc') elif init_sys_path.is_windows(): kmc_path = os.path.join(os.path.dirname(__file__), '../../x64/Release/kmer_counter.exe') if init_sys_path.is_mac(): resource.setrlimit(resource.RLIMIT_NOFILE, (2048, 2048)) subprocess.call([kmc_path, '-ci{}'.format(cutoff_min), '-k{}'.format(kmer_len), '-m{}'.format(memory), '-p{}'.format(sig_len), reads_src, 'kmc_db', '.'])
def _open_for_listing(): ' Open kmc database for listing and check if opened sucessfully. ' kmc_file = pka.KMCFile() assert kmc_file.OpenForListing('kmc_db') return kmc_file
3,280,609,107,479,147,500
Open kmc database for listing and check if opened sucessfully.
tests/py_kmc_api/test_py_kmc_file.py
_open_for_listing
refresh-bio/KMC
python
def _open_for_listing(): ' ' kmc_file = pka.KMCFile() assert kmc_file.OpenForListing('kmc_db') return kmc_file
def _open_for_ra(): ' Open kmc database for random access and check if opened sucessfully. ' kmc_file = pka.KMCFile() assert kmc_file.OpenForRA('kmc_db') return kmc_file
7,697,411,584,319,123,000
Open kmc database for random access and check if opened sucessfully.
tests/py_kmc_api/test_py_kmc_file.py
_open_for_ra
refresh-bio/KMC
python
def _open_for_ra(): ' ' kmc_file = pka.KMCFile() assert kmc_file.OpenForRA('kmc_db') return kmc_file
def test_info(create_kmc_db): '\n Test if some fields in object returned from Info are set properly.\n\n ' pattern = create_kmc_db kmc_file = _open_for_listing() info = kmc_file.Info() assert (info.kmer_length == pattern['kmer_len']) assert (info.mode == 0) assert (info.counter_size == 1) assert (info.signature_len == pattern['sig_len']) assert (info.min_count == 1) assert info.both_strands assert (info.total_kmers == len(pattern['kmers']))
-3,972,196,797,885,729,000
Test if some fields in object returned from Info are set properly.
tests/py_kmc_api/test_py_kmc_file.py
test_info
refresh-bio/KMC
python
def test_info(create_kmc_db): '\n \n\n ' pattern = create_kmc_db kmc_file = _open_for_listing() info = kmc_file.Info() assert (info.kmer_length == pattern['kmer_len']) assert (info.mode == 0) assert (info.counter_size == 1) assert (info.signature_len == pattern['sig_len']) assert (info.min_count == 1) assert info.both_strands assert (info.total_kmers == len(pattern['kmers']))
def test_kmc_file_next_kmer(create_kmc_db): ' Test if all counted k-mers are returned by KMC API using NextKmer method. ' pattern = create_kmc_db['kmers'] kmc_file = _open_for_listing() counter = pka.Count() kmer = pka.KmerAPI(create_kmc_db['kmer_len']) res = {} while kmc_file.ReadNextKmer(kmer, counter): res[str(kmer)] = counter.value assert (res == pattern)
-5,991,943,377,933,929,000
Test if all counted k-mers are returned by KMC API using NextKmer method.
tests/py_kmc_api/test_py_kmc_file.py
test_kmc_file_next_kmer
refresh-bio/KMC
python
def test_kmc_file_next_kmer(create_kmc_db): ' ' pattern = create_kmc_db['kmers'] kmc_file = _open_for_listing() counter = pka.Count() kmer = pka.KmerAPI(create_kmc_db['kmer_len']) res = {} while kmc_file.ReadNextKmer(kmer, counter): res[str(kmer)] = counter.value assert (res == pattern)
def test_get_counters_for_read(create_kmc_db): ' Test case for GetCountersForRead method of KMCFile. ' kmers = create_kmc_db['kmers'] read = 'GGCATTGCATGCAGTNNCAGTCATGCAGTCAGGCAGTCATGGCATGCGTAAACGACGATCAGTCATGGTCGAG' pattern = [] kmer_len = create_kmc_db['kmer_len'] for i in range(0, ((len(read) - kmer_len) + 1)): kmer = read[i:(i + kmer_len)] if ('N' in kmer): pattern.append(0) continue rev = kmer_utils.rev_comp(kmer) if (rev < kmer): kmer = rev if (not (kmer in kmers.keys())): pattern.append(0) else: pattern.append(kmers[kmer]) kmc_file = _open_for_ra() res = pka.CountVec() kmc_file.GetCountersForRead(read, res) assert (res.value == pattern)
-1,974,169,297,877,745,200
Test case for GetCountersForRead method of KMCFile.
tests/py_kmc_api/test_py_kmc_file.py
test_get_counters_for_read
refresh-bio/KMC
python
def test_get_counters_for_read(create_kmc_db): ' ' kmers = create_kmc_db['kmers'] read = 'GGCATTGCATGCAGTNNCAGTCATGCAGTCAGGCAGTCATGGCATGCGTAAACGACGATCAGTCATGGTCGAG' pattern = [] kmer_len = create_kmc_db['kmer_len'] for i in range(0, ((len(read) - kmer_len) + 1)): kmer = read[i:(i + kmer_len)] if ('N' in kmer): pattern.append(0) continue rev = kmer_utils.rev_comp(kmer) if (rev < kmer): kmer = rev if (not (kmer in kmers.keys())): pattern.append(0) else: pattern.append(kmers[kmer]) kmc_file = _open_for_ra() res = pka.CountVec() kmc_file.GetCountersForRead(read, res) assert (res.value == pattern)
def test_check_kmer(create_kmc_db): '\n Test case for CheckKmer method.\n\n Check if are k-mers from input are present in the database and\n if some not present in the input are absent in the output.\n ' kmers = create_kmc_db['kmers'] kmer_len = create_kmc_db['kmer_len'] kmer = pka.KmerAPI(kmer_len) counter = pka.Count() kmc_file = _open_for_ra() for (kmer_str, count) in kmers.items(): kmer.from_string(kmer_str) assert kmc_file.CheckKmer(kmer, counter) assert (counter.value == count) absent_kmers = create_kmc_db['absent_kmers'] for kmer_str in absent_kmers: kmer.from_string(kmer_str) assert (not kmc_file.CheckKmer(kmer, counter))
-785,399,526,982,835,100
Test case for CheckKmer method. Check if are k-mers from input are present in the database and if some not present in the input are absent in the output.
tests/py_kmc_api/test_py_kmc_file.py
test_check_kmer
refresh-bio/KMC
python
def test_check_kmer(create_kmc_db): '\n Test case for CheckKmer method.\n\n Check if are k-mers from input are present in the database and\n if some not present in the input are absent in the output.\n ' kmers = create_kmc_db['kmers'] kmer_len = create_kmc_db['kmer_len'] kmer = pka.KmerAPI(kmer_len) counter = pka.Count() kmc_file = _open_for_ra() for (kmer_str, count) in kmers.items(): kmer.from_string(kmer_str) assert kmc_file.CheckKmer(kmer, counter) assert (counter.value == count) absent_kmers = create_kmc_db['absent_kmers'] for kmer_str in absent_kmers: kmer.from_string(kmer_str) assert (not kmc_file.CheckKmer(kmer, counter))
def increment_kmer(kmer, start): ' Increments k-mer to next lexographical.\n\n Start from pos :start: (from end, i.e. start = 0 means last k-mer symbol). ' def replace_char(string, pos, new_char): ' Create new string with character at :pos: changed to :new_char:. ' if (pos < 0): pos = (len(string) + pos) return ((string[:pos] + new_char) + string[(pos + 1):]) for i in range(start, len(kmer)): if (kmer[((- 1) - i)] == 'A'): return replace_char(kmer, ((- 1) - i), 'C') if (kmer[((- 1) - i)] == 'C'): return replace_char(kmer, ((- 1) - i), 'G') if (kmer[((- 1) - i)] == 'T'): return replace_char(kmer, ((- 1) - i), 'T') kmer = replace_char(kmer, ((- 1) - i), 'T') return kmer
-6,267,665,744,541,755,000
Increments k-mer to next lexographical. Start from pos :start: (from end, i.e. start = 0 means last k-mer symbol).
tests/py_kmc_api/test_py_kmc_file.py
increment_kmer
refresh-bio/KMC
python
def increment_kmer(kmer, start): ' Increments k-mer to next lexographical.\n\n Start from pos :start: (from end, i.e. start = 0 means last k-mer symbol). ' def replace_char(string, pos, new_char): ' Create new string with character at :pos: changed to :new_char:. ' if (pos < 0): pos = (len(string) + pos) return ((string[:pos] + new_char) + string[(pos + 1):]) for i in range(start, len(kmer)): if (kmer[((- 1) - i)] == 'A'): return replace_char(kmer, ((- 1) - i), 'C') if (kmer[((- 1) - i)] == 'C'): return replace_char(kmer, ((- 1) - i), 'G') if (kmer[((- 1) - i)] == 'T'): return replace_char(kmer, ((- 1) - i), 'T') kmer = replace_char(kmer, ((- 1) - i), 'T') return kmer
def replace_char(string, pos, new_char): ' Create new string with character at :pos: changed to :new_char:. ' if (pos < 0): pos = (len(string) + pos) return ((string[:pos] + new_char) + string[(pos + 1):])
-4,285,610,178,083,164,000
Create new string with character at :pos: changed to :new_char:.
tests/py_kmc_api/test_py_kmc_file.py
replace_char
refresh-bio/KMC
python
def replace_char(string, pos, new_char): ' ' if (pos < 0): pos = (len(string) + pos) return ((string[:pos] + new_char) + string[(pos + 1):])
def leakyrelu(x, leak=0.01): '\n leakyrelu激活函数\n Args:\n x (Tensor): input\n leak (int): x<0时的斜率\n\n Returns:\n Tensor\n ' f1 = (0.5 * (1 + leak)) f2 = (0.5 * (1 - leak)) return ((f1 * x) + (f2 * tf.abs(x)))
6,906,050,861,386,286,000
leakyrelu激活函数 Args: x (Tensor): input leak (int): x<0时的斜率 Returns: Tensor
algorithm/BST/leakyrelu.py
leakyrelu
tangxyw/RecAlgorithm
python
def leakyrelu(x, leak=0.01): '\n leakyrelu激活函数\n Args:\n x (Tensor): input\n leak (int): x<0时的斜率\n\n Returns:\n Tensor\n ' f1 = (0.5 * (1 + leak)) f2 = (0.5 * (1 - leak)) return ((f1 * x) + (f2 * tf.abs(x)))
def read_novel_info(self): 'Get novel title, autor, cover etc' logger.debug('Visiting %s', self.novel_url) soup = self.get_soup((self.novel_url + '?waring=1')) self.novel_title = soup.find('div', {'class': 'manga-detail'}).find('h1').text logger.info('Novel title: %s', self.novel_title) self.novel_cover = self.absolute_url(soup.find('div', {'class': 'manga-detail'}).find('img')['src']) logger.info('Novel cover: %s', self.novel_cover) author = soup.find('div', {'class': 'detail-info'}).find('a').text.split(',') if (len(author) == 2): self.novel_author = (((author[0] + ' (') + author[1]) + ')') else: self.novel_author = ' '.join(author) logger.info('Novel author: %s', self.novel_author) chapters = soup.find('div', {'class': 'manga-detailchapter'}).findAll('a', title=True) chapters.reverse() for a in chapters: for span in a.findAll('span'): span.extract() for x in chapters: chap_id = (len(self.chapters) + 1) if ((len(self.chapters) % 100) == 0): vol_id = ((chap_id // 100) + 1) vol_title = ('Volume ' + str(vol_id)) self.volumes.append({'id': vol_id, 'title': vol_title}) self.chapters.append({'id': chap_id, 'volume': vol_id, 'url': self.absolute_url(x['href']), 'title': (x['title'] or ('Chapter %d' % chap_id))})
-1,875,111,746,532,289,300
Get novel title, autor, cover etc
sources/novelall.py
read_novel_info
BorgSquared/lightnovel-crawler
python
def read_novel_info(self): logger.debug('Visiting %s', self.novel_url) soup = self.get_soup((self.novel_url + '?waring=1')) self.novel_title = soup.find('div', {'class': 'manga-detail'}).find('h1').text logger.info('Novel title: %s', self.novel_title) self.novel_cover = self.absolute_url(soup.find('div', {'class': 'manga-detail'}).find('img')['src']) logger.info('Novel cover: %s', self.novel_cover) author = soup.find('div', {'class': 'detail-info'}).find('a').text.split(',') if (len(author) == 2): self.novel_author = (((author[0] + ' (') + author[1]) + ')') else: self.novel_author = ' '.join(author) logger.info('Novel author: %s', self.novel_author) chapters = soup.find('div', {'class': 'manga-detailchapter'}).findAll('a', title=True) chapters.reverse() for a in chapters: for span in a.findAll('span'): span.extract() for x in chapters: chap_id = (len(self.chapters) + 1) if ((len(self.chapters) % 100) == 0): vol_id = ((chap_id // 100) + 1) vol_title = ('Volume ' + str(vol_id)) self.volumes.append({'id': vol_id, 'title': vol_title}) self.chapters.append({'id': chap_id, 'volume': vol_id, 'url': self.absolute_url(x['href']), 'title': (x['title'] or ('Chapter %d' % chap_id))})
def download_chapter_body(self, chapter): 'Download body of a single chapter and return as clean html format.' logger.info('Downloading %s', chapter['url']) soup = self.get_soup(chapter['url']) contents = soup.find('div', {'class': 'reading-box'}) self.clean_contents(contents) return str(contents)
2,756,602,651,324,911,000
Download body of a single chapter and return as clean html format.
sources/novelall.py
download_chapter_body
BorgSquared/lightnovel-crawler
python
def download_chapter_body(self, chapter): logger.info('Downloading %s', chapter['url']) soup = self.get_soup(chapter['url']) contents = soup.find('div', {'class': 'reading-box'}) self.clean_contents(contents) return str(contents)
def create_url_search(self, parameters): 'Creates the search url, combining the standard url and various\n search parameters.' url = self.standard url += parameters[0] for i in parameters[1:]: url += '&{}'.format(i) url += '&apikey={}'.format(self.key_api) return url
-7,296,441,941,738,629,000
Creates the search url, combining the standard url and various search parameters.
src/arcas/IEEE/main.py
create_url_search
ArcasProject/Arcas
python
def create_url_search(self, parameters): 'Creates the search url, combining the standard url and various\n search parameters.' url = self.standard url += parameters[0] for i in parameters[1:]: url += '&{}'.format(i) url += '&apikey={}'.format(self.key_api) return url
@staticmethod @ratelimit.rate_limited(3) def make_request(url): 'Request from an API and returns response.' response = requests.get(url, stream=True, verify=False) if (response.status_code != 200): raise APIError(response.status_code) return response
2,011,241,343,862,152,700
Request from an API and returns response.
src/arcas/IEEE/main.py
make_request
ArcasProject/Arcas
python
@staticmethod @ratelimit.rate_limited(3) def make_request(url): response = requests.get(url, stream=True, verify=False) if (response.status_code != 200): raise APIError(response.status_code) return response
def to_dataframe(self, raw_article): 'A function which takes a dictionary with structure of the IEEE\n results and transform it to a standardized format.\n ' raw_article['url'] = raw_article.get('html_url', None) try: raw_article['author'] = [author['full_name'] for author in raw_article['authors']['authors']] except KeyError: raw_article['author'] = ['No authors found for this document.'] raw_article['abstract'] = raw_article.get('abstract', None) if (raw_article['content_type'] == 'Conferences'): date = raw_article.get('conference_dates', None) else: date = raw_article.get('publication_date', None) if (date is not None): date = int(date.split(' ')[(- 1)]) raw_article['date'] = date category = raw_article.get('index_terms', None) if (category is not None): try: category = category['author_terms']['terms'] except KeyError: try: category = category['ieee_terms']['terms'] except KeyError: category = None raw_article['doi'] = raw_article.get('doi', None) raw_article['category'] = category raw_article['journal'] = raw_article.get('publication_title', None) raw_article['provenance'] = 'IEEE' (raw_article['key'], raw_article['unique_key']) = self.create_keys(raw_article) raw_article['open_access'] = (raw_article['access_type'] == 'OPEN_ACCESS') raw_article['score'] = 'Not available' return self.dict_to_dataframe(raw_article)
-7,890,503,207,656,345,000
A function which takes a dictionary with structure of the IEEE results and transform it to a standardized format.
src/arcas/IEEE/main.py
to_dataframe
ArcasProject/Arcas
python
def to_dataframe(self, raw_article): 'A function which takes a dictionary with structure of the IEEE\n results and transform it to a standardized format.\n ' raw_article['url'] = raw_article.get('html_url', None) try: raw_article['author'] = [author['full_name'] for author in raw_article['authors']['authors']] except KeyError: raw_article['author'] = ['No authors found for this document.'] raw_article['abstract'] = raw_article.get('abstract', None) if (raw_article['content_type'] == 'Conferences'): date = raw_article.get('conference_dates', None) else: date = raw_article.get('publication_date', None) if (date is not None): date = int(date.split(' ')[(- 1)]) raw_article['date'] = date category = raw_article.get('index_terms', None) if (category is not None): try: category = category['author_terms']['terms'] except KeyError: try: category = category['ieee_terms']['terms'] except KeyError: category = None raw_article['doi'] = raw_article.get('doi', None) raw_article['category'] = category raw_article['journal'] = raw_article.get('publication_title', None) raw_article['provenance'] = 'IEEE' (raw_article['key'], raw_article['unique_key']) = self.create_keys(raw_article) raw_article['open_access'] = (raw_article['access_type'] == 'OPEN_ACCESS') raw_article['score'] = 'Not available' return self.dict_to_dataframe(raw_article)
def parse(self, root): 'Parsing the xml file' if (root['total_records'] == 0): return False return root['articles']
7,165,902,714,162,163,000
Parsing the xml file
src/arcas/IEEE/main.py
parse
ArcasProject/Arcas
python
def parse(self, root): if (root['total_records'] == 0): return False return root['articles']
def parse_args(): '\n Parse input arguments\n ' parser = argparse.ArgumentParser(description='Generate txt result file') parser.add_argument('--dir', dest='base_dir', help='result base dir', default='/home/hezheqi/data/frame/result', type=str) parser.add_argument('--gt', dest='gt_dir', help='gt base dir', default='/data/hezheqi/frame/test/gt', type=str) parser.add_argument('--name', dest='name', help='out name', default=None, type=str) parser.add_argument('--list', dest='img_list_dir', help='image list', default='/data/hezheqi/frame/test/img_list.txt', type=str) if (len(sys.argv) == 1): parser.print_help() sys.exit(1) args = parser.parse_args() return args
-3,541,902,435,000,393,000
Parse input arguments
tools/eval_frame.py
parse_args
lz20061213/quadrilateral
python
def parse_args(): '\n \n ' parser = argparse.ArgumentParser(description='Generate txt result file') parser.add_argument('--dir', dest='base_dir', help='result base dir', default='/home/hezheqi/data/frame/result', type=str) parser.add_argument('--gt', dest='gt_dir', help='gt base dir', default='/data/hezheqi/frame/test/gt', type=str) parser.add_argument('--name', dest='name', help='out name', default=None, type=str) parser.add_argument('--list', dest='img_list_dir', help='image list', default='/data/hezheqi/frame/test/img_list.txt', type=str) if (len(sys.argv) == 1): parser.print_help() sys.exit(1) args = parser.parse_args() return args
def eval_one(results, gts, point_dis=False, rect_label=None): '\n :param results:\n :param gts:\n :param point_dis:\n :param rect_label: use rectangle or not\n :return right_num, error_num, mid_num\n ' m = len(gts) is_used = ([False] * m) right_num = 0 err_num = 0 mid_num = 0 for res in results: if (not point_dis): max_iou = (- 1) max_index = (- 1) for (j, gt) in enumerate(gts): if is_used[j]: continue iou = calculate_iou(res, gt) if (max_iou < iou): max_iou = iou max_index = j if (max_iou > th): is_used[max_index] = True if (rect_label == None): right_num += 1 elif rect_label[max_index]: right_num += 1 elif (not rect_label[max_index]): mid_num += 1 else: err_num += 1 else: flag = False for (j, gt) in enumerate(gts): if is_used[j]: continue if verify_point_distance(res, gt): is_used[j] = True right_num += 1 flag = True break if (not flag): err_num += 1 assert (right_num <= m) assert (err_num <= len(results)) return (right_num, err_num, mid_num)
1,268,701,586,528,160,300
:param results: :param gts: :param point_dis: :param rect_label: use rectangle or not :return right_num, error_num, mid_num
tools/eval_frame.py
eval_one
lz20061213/quadrilateral
python
def eval_one(results, gts, point_dis=False, rect_label=None): '\n :param results:\n :param gts:\n :param point_dis:\n :param rect_label: use rectangle or not\n :return right_num, error_num, mid_num\n ' m = len(gts) is_used = ([False] * m) right_num = 0 err_num = 0 mid_num = 0 for res in results: if (not point_dis): max_iou = (- 1) max_index = (- 1) for (j, gt) in enumerate(gts): if is_used[j]: continue iou = calculate_iou(res, gt) if (max_iou < iou): max_iou = iou max_index = j if (max_iou > th): is_used[max_index] = True if (rect_label == None): right_num += 1 elif rect_label[max_index]: right_num += 1 elif (not rect_label[max_index]): mid_num += 1 else: err_num += 1 else: flag = False for (j, gt) in enumerate(gts): if is_used[j]: continue if verify_point_distance(res, gt): is_used[j] = True right_num += 1 flag = True break if (not flag): err_num += 1 assert (right_num <= m) assert (err_num <= len(results)) return (right_num, err_num, mid_num)
@property def chat(self): "\n Returns the :tl:`User`, :tl:`Chat` or :tl:`Channel` where this object\n belongs to. It may be ``None`` if Telegram didn't send the chat.\n\n If you're using `telethon.events`, use `get_chat` instead.\n " return self._chat
-3,510,885,467,875,371,000
Returns the :tl:`User`, :tl:`Chat` or :tl:`Channel` where this object belongs to. It may be ``None`` if Telegram didn't send the chat. If you're using `telethon.events`, use `get_chat` instead.
telethon/tl/custom/chatgetter.py
chat
bb010g/Telethon
python
@property def chat(self): "\n Returns the :tl:`User`, :tl:`Chat` or :tl:`Channel` where this object\n belongs to. It may be ``None`` if Telegram didn't send the chat.\n\n If you're using `telethon.events`, use `get_chat` instead.\n " return self._chat
async def get_chat(self): "\n Returns `chat`, but will make an API call to find the\n chat unless it's already cached.\n " if (((self._chat is None) or getattr(self._chat, 'min', None)) and (await self.get_input_chat())): try: self._chat = (await self._client.get_entity(self._input_chat)) except ValueError: (await self._refetch_chat()) return self._chat
7,656,971,415,182,909,000
Returns `chat`, but will make an API call to find the chat unless it's already cached.
telethon/tl/custom/chatgetter.py
get_chat
bb010g/Telethon
python
async def get_chat(self): "\n Returns `chat`, but will make an API call to find the\n chat unless it's already cached.\n " if (((self._chat is None) or getattr(self._chat, 'min', None)) and (await self.get_input_chat())): try: self._chat = (await self._client.get_entity(self._input_chat)) except ValueError: (await self._refetch_chat()) return self._chat
@property def input_chat(self): "\n This :tl:`InputPeer` is the input version of the chat where the\n message was sent. Similarly to `input_sender`, this doesn't have\n things like username or similar, but still useful in some cases.\n\n Note that this might not be available if the library doesn't\n have enough information available.\n " if ((self._input_chat is None) and self._chat_peer and self._client): try: self._input_chat = self._client._entity_cache[self._chat_peer] except KeyError: pass return self._input_chat
-8,939,400,300,496,340,000
This :tl:`InputPeer` is the input version of the chat where the message was sent. Similarly to `input_sender`, this doesn't have things like username or similar, but still useful in some cases. Note that this might not be available if the library doesn't have enough information available.
telethon/tl/custom/chatgetter.py
input_chat
bb010g/Telethon
python
@property def input_chat(self): "\n This :tl:`InputPeer` is the input version of the chat where the\n message was sent. Similarly to `input_sender`, this doesn't have\n things like username or similar, but still useful in some cases.\n\n Note that this might not be available if the library doesn't\n have enough information available.\n " if ((self._input_chat is None) and self._chat_peer and self._client): try: self._input_chat = self._client._entity_cache[self._chat_peer] except KeyError: pass return self._input_chat
async def get_input_chat(self): "\n Returns `input_chat`, but will make an API call to find the\n input chat unless it's already cached.\n " if ((self.input_chat is None) and self.chat_id and self._client): try: target = self.chat_id async for d in self._client.iter_dialogs(100): if (d.id == target): self._chat = d.entity self._input_chat = d.input_entity break except errors.RPCError: pass return self._input_chat
-3,886,584,894,192,146,400
Returns `input_chat`, but will make an API call to find the input chat unless it's already cached.
telethon/tl/custom/chatgetter.py
get_input_chat
bb010g/Telethon
python
async def get_input_chat(self): "\n Returns `input_chat`, but will make an API call to find the\n input chat unless it's already cached.\n " if ((self.input_chat is None) and self.chat_id and self._client): try: target = self.chat_id async for d in self._client.iter_dialogs(100): if (d.id == target): self._chat = d.entity self._input_chat = d.input_entity break except errors.RPCError: pass return self._input_chat
@property def chat_id(self): '\n Returns the marked chat integer ID. Note that this value **will\n be different** from `to_id` for incoming private messages, since\n the chat *to* which the messages go is to your own person, but\n the *chat* itself is with the one who sent the message.\n\n TL;DR; this gets the ID that you expect.\n ' return (utils.get_peer_id(self._chat_peer) if self._chat_peer else None)
1,561,789,777,970,038,000
Returns the marked chat integer ID. Note that this value **will be different** from `to_id` for incoming private messages, since the chat *to* which the messages go is to your own person, but the *chat* itself is with the one who sent the message. TL;DR; this gets the ID that you expect.
telethon/tl/custom/chatgetter.py
chat_id
bb010g/Telethon
python
@property def chat_id(self): '\n Returns the marked chat integer ID. Note that this value **will\n be different** from `to_id` for incoming private messages, since\n the chat *to* which the messages go is to your own person, but\n the *chat* itself is with the one who sent the message.\n\n TL;DR; this gets the ID that you expect.\n ' return (utils.get_peer_id(self._chat_peer) if self._chat_peer else None)
@property def is_private(self): 'True if the message was sent as a private message.' return isinstance(self._chat_peer, types.PeerUser)
2,072,770,881,503,792,600
True if the message was sent as a private message.
telethon/tl/custom/chatgetter.py
is_private
bb010g/Telethon
python
@property def is_private(self): return isinstance(self._chat_peer, types.PeerUser)
@property def is_group(self): 'True if the message was sent on a group or megagroup.' if ((self._broadcast is None) and self.chat): self._broadcast = getattr(self.chat, 'broadcast', None) return (isinstance(self._chat_peer, (types.PeerChat, types.PeerChannel)) and (not self._broadcast))
8,039,516,671,244,592,000
True if the message was sent on a group or megagroup.
telethon/tl/custom/chatgetter.py
is_group
bb010g/Telethon
python
@property def is_group(self): if ((self._broadcast is None) and self.chat): self._broadcast = getattr(self.chat, 'broadcast', None) return (isinstance(self._chat_peer, (types.PeerChat, types.PeerChannel)) and (not self._broadcast))
@property def is_channel(self): 'True if the message was sent on a megagroup or channel.' return isinstance(self._chat_peer, types.PeerChannel)
2,578,293,180,495,241,700
True if the message was sent on a megagroup or channel.
telethon/tl/custom/chatgetter.py
is_channel
bb010g/Telethon
python
@property def is_channel(self): return isinstance(self._chat_peer, types.PeerChannel)
async def _refetch_chat(self): '\n Re-fetches chat information through other means.\n '
4,903,209,581,249,528,000
Re-fetches chat information through other means.
telethon/tl/custom/chatgetter.py
_refetch_chat
bb010g/Telethon
python
async def _refetch_chat(self): '\n \n '
def est_fpos_rate(token, trace=None, stats=None): "\n Estimate false positive rate of a single-token signature.\n \n Estimates using the 'tokensplit' and trace-modeling methods,\n and returns the higher (most pessimistic of the two). Note that both\n of these estimates are strictly equal to or higher than the actual \n fraction of streams that 'token' occurs in within the trace.\n " global estd_fpos_rate if (not (estd_fpos_rate.has_key(trace) and estd_fpos_rate[trace].has_key(token))): if (not estd_fpos_rate.has_key(trace)): estd_fpos_rate[trace] = {} import polygraph.sigprob.tokensplit as tokensplit import polygraph.sigprob.sigprob as sigprob if trace: split_prob = tokensplit.mpp(token, trace, minlen=3)[0] stat_prob = tokensplit.maxcontextprob(token, trace)[0] estd_fpos_rate[trace][token] = max(split_prob, stat_prob) else: estd_fpos_rate[trace][token] = sigprob.token_prob(token, 1000, stats=stats)[(- 1)] rv = estd_fpos_rate[trace][token] if (len(token) > 20): del estd_fpos_rate[trace][token] if (len(estd_fpos_rate[trace].keys()) > 200): estd_fpos_rate[trace].clear() return rv
-4,091,892,719,482,200,600
Estimate false positive rate of a single-token signature. Estimates using the 'tokensplit' and trace-modeling methods, and returns the higher (most pessimistic of the two). Note that both of these estimates are strictly equal to or higher than the actual fraction of streams that 'token' occurs in within the trace.
polygraph/sig_gen/sig_gen.py
est_fpos_rate
hadisfr/Polygraph
python
def est_fpos_rate(token, trace=None, stats=None): "\n Estimate false positive rate of a single-token signature.\n \n Estimates using the 'tokensplit' and trace-modeling methods,\n and returns the higher (most pessimistic of the two). Note that both\n of these estimates are strictly equal to or higher than the actual \n fraction of streams that 'token' occurs in within the trace.\n " global estd_fpos_rate if (not (estd_fpos_rate.has_key(trace) and estd_fpos_rate[trace].has_key(token))): if (not estd_fpos_rate.has_key(trace)): estd_fpos_rate[trace] = {} import polygraph.sigprob.tokensplit as tokensplit import polygraph.sigprob.sigprob as sigprob if trace: split_prob = tokensplit.mpp(token, trace, minlen=3)[0] stat_prob = tokensplit.maxcontextprob(token, trace)[0] estd_fpos_rate[trace][token] = max(split_prob, stat_prob) else: estd_fpos_rate[trace][token] = sigprob.token_prob(token, 1000, stats=stats)[(- 1)] rv = estd_fpos_rate[trace][token] if (len(token) > 20): del estd_fpos_rate[trace][token] if (len(estd_fpos_rate[trace].keys()) > 200): estd_fpos_rate[trace].clear() return rv
def train(self, pos_samples): '\n Generate one or more signatures from pos_samples (suspicious pool).\n Returns a sequence of Sig objects.\n ' raise NotImplementedError
4,773,307,708,365,914,000
Generate one or more signatures from pos_samples (suspicious pool). Returns a sequence of Sig objects.
polygraph/sig_gen/sig_gen.py
train
hadisfr/Polygraph
python
def train(self, pos_samples): '\n Generate one or more signatures from pos_samples (suspicious pool).\n Returns a sequence of Sig objects.\n ' raise NotImplementedError
def match(self, sample): 'Return whether current signature matches the sample' raise NotImplementedError
-4,908,549,882,155,932,000
Return whether current signature matches the sample
polygraph/sig_gen/sig_gen.py
match
hadisfr/Polygraph
python
def match(self, sample): raise NotImplementedError
def runTest(self): 'This function will fetch added table under schema node.' table_response = tables_utils.verify_table(self.server, self.db_name, self.table_id) if (not table_response): raise Exception('Could not find the table to update.') if self.is_partition: data = {'id': self.table_id} tables_utils.set_partition_data(self.server, self.db_name, self.schema_name, self.table_name, self.partition_type, data, self.mode) else: data = {'description': 'This is test comment for table', 'id': self.table_id} response = self.tester.put((((((((((self.url + str(utils.SERVER_GROUP)) + '/') + str(self.server_id)) + '/') + str(self.db_id)) + '/') + str(self.schema_id)) + '/') + str(self.table_id)), data=json.dumps(data), follow_redirects=True) self.assertEquals(response.status_code, 200)
-8,427,420,854,452,124,000
This function will fetch added table under schema node.
pgAdmin4/pgAdmin4/lib/python2.7/site-packages/pgadmin4/pgadmin/browser/server_groups/servers/databases/schemas/tables/tests/test_table_put.py
runTest
Anillab/One-Minute-Pitch
python
def runTest(self): table_response = tables_utils.verify_table(self.server, self.db_name, self.table_id) if (not table_response): raise Exception('Could not find the table to update.') if self.is_partition: data = {'id': self.table_id} tables_utils.set_partition_data(self.server, self.db_name, self.schema_name, self.table_name, self.partition_type, data, self.mode) else: data = {'description': 'This is test comment for table', 'id': self.table_id} response = self.tester.put((((((((((self.url + str(utils.SERVER_GROUP)) + '/') + str(self.server_id)) + '/') + str(self.db_id)) + '/') + str(self.schema_id)) + '/') + str(self.table_id)), data=json.dumps(data), follow_redirects=True) self.assertEquals(response.status_code, 200)
def dataset_parser(self, value): 'Parse an ImageNet record from a serialized string Tensor.' keys_to_features = {'image/encoded': tf.FixedLenFeature((), tf.string, ''), 'image/format': tf.FixedLenFeature((), tf.string, 'jpeg'), 'image/class/label': tf.FixedLenFeature([], tf.int64, (- 1)), 'image/class/text': tf.FixedLenFeature([], tf.string, ''), 'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32), 'image/object/class/label': tf.VarLenFeature(dtype=tf.int64)} parsed = tf.parse_single_example(value, keys_to_features) image_bytes = tf.reshape(parsed['image/encoded'], shape=[]) image = self.image_preprocessing_fn(image_bytes=image_bytes, is_training=self.is_training, use_bfloat16=self.use_bfloat16) label = tf.cast((tf.cast(tf.reshape(parsed['image/class/label'], shape=[1]), dtype=tf.int32) - 1), dtype=tf.float32) return (image, label)
-2,487,874,934,583,650,000
Parse an ImageNet record from a serialized string Tensor.
models/experimental/distribution_strategy/imagenet_input_keras.py
dataset_parser
aidangomez/tpu
python
def dataset_parser(self, value): keys_to_features = {'image/encoded': tf.FixedLenFeature((), tf.string, ), 'image/format': tf.FixedLenFeature((), tf.string, 'jpeg'), 'image/class/label': tf.FixedLenFeature([], tf.int64, (- 1)), 'image/class/text': tf.FixedLenFeature([], tf.string, ), 'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32), 'image/object/class/label': tf.VarLenFeature(dtype=tf.int64)} parsed = tf.parse_single_example(value, keys_to_features) image_bytes = tf.reshape(parsed['image/encoded'], shape=[]) image = self.image_preprocessing_fn(image_bytes=image_bytes, is_training=self.is_training, use_bfloat16=self.use_bfloat16) label = tf.cast((tf.cast(tf.reshape(parsed['image/class/label'], shape=[1]), dtype=tf.int32) - 1), dtype=tf.float32) return (image, label)
def input_fn(self): 'Input function which provides a single batch for train or eval.\n\n Returns:\n A `tf.data.Dataset` object.\n ' if (self.data_dir is None): tf.logging.info('Using fake input.') return self.input_fn_null() file_pattern = os.path.join(self.data_dir, ('train-*' if self.is_training else 'validation-*')) dataset = tf.data.Dataset.list_files(file_pattern, shuffle=self.is_training) if self.is_training: dataset = dataset.repeat() def fetch_dataset(filename): buffer_size = ((8 * 1024) * 1024) dataset = tf.data.TFRecordDataset(filename, buffer_size=buffer_size) return dataset dataset = dataset.apply(tf.contrib.data.parallel_interleave(fetch_dataset, cycle_length=16, sloppy=True)) dataset = dataset.shuffle(1024) dataset = dataset.apply(tf.contrib.data.map_and_batch(self.dataset_parser, batch_size=self.batch_size, num_parallel_batches=2, drop_remainder=True)) dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE) return dataset
-7,626,540,773,962,126,000
Input function which provides a single batch for train or eval. Returns: A `tf.data.Dataset` object.
models/experimental/distribution_strategy/imagenet_input_keras.py
input_fn
aidangomez/tpu
python
def input_fn(self): 'Input function which provides a single batch for train or eval.\n\n Returns:\n A `tf.data.Dataset` object.\n ' if (self.data_dir is None): tf.logging.info('Using fake input.') return self.input_fn_null() file_pattern = os.path.join(self.data_dir, ('train-*' if self.is_training else 'validation-*')) dataset = tf.data.Dataset.list_files(file_pattern, shuffle=self.is_training) if self.is_training: dataset = dataset.repeat() def fetch_dataset(filename): buffer_size = ((8 * 1024) * 1024) dataset = tf.data.TFRecordDataset(filename, buffer_size=buffer_size) return dataset dataset = dataset.apply(tf.contrib.data.parallel_interleave(fetch_dataset, cycle_length=16, sloppy=True)) dataset = dataset.shuffle(1024) dataset = dataset.apply(tf.contrib.data.map_and_batch(self.dataset_parser, batch_size=self.batch_size, num_parallel_batches=2, drop_remainder=True)) dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE) return dataset
def input_fn_null(self): 'Input function which provides null (black) images.' dataset = tf.data.Dataset.range(1).repeat().map(self._get_null_input) dataset = dataset.prefetch(self.batch_size) dataset = dataset.batch(self.batch_size, drop_remainder=True) dataset = dataset.prefetch(32) tf.logging.info('Input dataset: %s', str(dataset)) return dataset
-9,042,925,970,187,984,000
Input function which provides null (black) images.
models/experimental/distribution_strategy/imagenet_input_keras.py
input_fn_null
aidangomez/tpu
python
def input_fn_null(self): dataset = tf.data.Dataset.range(1).repeat().map(self._get_null_input) dataset = dataset.prefetch(self.batch_size) dataset = dataset.batch(self.batch_size, drop_remainder=True) dataset = dataset.prefetch(32) tf.logging.info('Input dataset: %s', str(dataset)) return dataset
def retrieval_fall_out(preds: Tensor, target: Tensor, k: Optional[int]=None) -> Tensor: 'Computes the Fall-out (for information retrieval), as explained in `IR Fall-out`_ Fall-out is the fraction\n of non-relevant documents retrieved among all the non-relevant documents.\n\n ``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``,\n ``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be `float`,\n otherwise an error is raised. If you want to measure Fall-out@K, ``k`` must be a positive integer.\n\n Args:\n preds: estimated probabilities of each document to be relevant.\n target: ground truth about each document being relevant or not.\n k: consider only the top k elements (default: `None`, which considers them all)\n\n Returns:\n a single-value tensor with the fall-out (at ``k``) of the predictions ``preds`` w.r.t. the labels ``target``.\n\n Raises:\n ValueError:\n If ``k`` parameter is not `None` or an integer larger than 0\n\n Example:\n >>> from torchmetrics.functional import retrieval_fall_out\n >>> preds = tensor([0.2, 0.3, 0.5])\n >>> target = tensor([True, False, True])\n >>> retrieval_fall_out(preds, target, k=2)\n tensor(1.)\n ' (preds, target) = _check_retrieval_functional_inputs(preds, target) k = (preds.shape[(- 1)] if (k is None) else k) if (not (isinstance(k, int) and (k > 0))): raise ValueError('`k` has to be a positive integer or None') target = (1 - target) if (not target.sum()): return tensor(0.0, device=preds.device) relevant = target[torch.argsort(preds, dim=(- 1), descending=True)][:k].sum().float() return (relevant / target.sum())
6,820,039,520,958,153,000
Computes the Fall-out (for information retrieval), as explained in `IR Fall-out`_ Fall-out is the fraction of non-relevant documents retrieved among all the non-relevant documents. ``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``, ``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be `float`, otherwise an error is raised. If you want to measure Fall-out@K, ``k`` must be a positive integer. Args: preds: estimated probabilities of each document to be relevant. target: ground truth about each document being relevant or not. k: consider only the top k elements (default: `None`, which considers them all) Returns: a single-value tensor with the fall-out (at ``k``) of the predictions ``preds`` w.r.t. the labels ``target``. Raises: ValueError: If ``k`` parameter is not `None` or an integer larger than 0 Example: >>> from torchmetrics.functional import retrieval_fall_out >>> preds = tensor([0.2, 0.3, 0.5]) >>> target = tensor([True, False, True]) >>> retrieval_fall_out(preds, target, k=2) tensor(1.)
torchmetrics/functional/retrieval/fall_out.py
retrieval_fall_out
Abdelrhman-Hosny/metrics
python
def retrieval_fall_out(preds: Tensor, target: Tensor, k: Optional[int]=None) -> Tensor: 'Computes the Fall-out (for information retrieval), as explained in `IR Fall-out`_ Fall-out is the fraction\n of non-relevant documents retrieved among all the non-relevant documents.\n\n ``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``,\n ``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be `float`,\n otherwise an error is raised. If you want to measure Fall-out@K, ``k`` must be a positive integer.\n\n Args:\n preds: estimated probabilities of each document to be relevant.\n target: ground truth about each document being relevant or not.\n k: consider only the top k elements (default: `None`, which considers them all)\n\n Returns:\n a single-value tensor with the fall-out (at ``k``) of the predictions ``preds`` w.r.t. the labels ``target``.\n\n Raises:\n ValueError:\n If ``k`` parameter is not `None` or an integer larger than 0\n\n Example:\n >>> from torchmetrics.functional import retrieval_fall_out\n >>> preds = tensor([0.2, 0.3, 0.5])\n >>> target = tensor([True, False, True])\n >>> retrieval_fall_out(preds, target, k=2)\n tensor(1.)\n ' (preds, target) = _check_retrieval_functional_inputs(preds, target) k = (preds.shape[(- 1)] if (k is None) else k) if (not (isinstance(k, int) and (k > 0))): raise ValueError('`k` has to be a positive integer or None') target = (1 - target) if (not target.sum()): return tensor(0.0, device=preds.device) relevant = target[torch.argsort(preds, dim=(- 1), descending=True)][:k].sum().float() return (relevant / target.sum())
def index(request): '\n Return landing page\n\n arguments:\n :request: GET HTTP request\n\n returns:\n Rendered home (index) page\n ' return render(request, 'webservice/index.html', {'demo_input_repo_name': DEMO_REPO_INPUT_NAME, 'supported_languages': sorted([lang.capitalize() for lang in SUPPORTED_LANGUAGES.keys()])})
2,300,159,280,462,162,700
Return landing page arguments: :request: GET HTTP request returns: Rendered home (index) page
webserver/pkgpkr/webservice/views.py
index
pkgpkr/Package-Picker
python
def index(request): '\n Return landing page\n\n arguments:\n :request: GET HTTP request\n\n returns:\n Rendered home (index) page\n ' return render(request, 'webservice/index.html', {'demo_input_repo_name': DEMO_REPO_INPUT_NAME, 'supported_languages': sorted([lang.capitalize() for lang in SUPPORTED_LANGUAGES.keys()])})
def about(request): '\n Return about info\n\n arguments:\n :request: GET HTTP request\n\n returns:\n Rendered about page\n ' return render(request, 'webservice/about.html')
1,604,955,779,372,363,300
Return about info arguments: :request: GET HTTP request returns: Rendered about page
webserver/pkgpkr/webservice/views.py
about
pkgpkr/Package-Picker
python
def about(request): '\n Return about info\n\n arguments:\n :request: GET HTTP request\n\n returns:\n Rendered about page\n ' return render(request, 'webservice/about.html')
def login(request): ' Log user in using GitHub OAuth\n\n arguments:\n :request: GET HTTP request\n\n returns:\n Redirects to index\n ' if (not request.session.get('github_token')): request.session['github_token'] = None request.session['github_info'] = None if (os.environ.get('SELENIUM_TEST') == '1'): assert os.environ.get('GH_TOKEN'), 'GH_TOKEN not set' request.session['github_token'] = os.environ.get('GH_TOKEN') request.session['github_info'] = github_util.get_user_info(request.session['github_token']) return HttpResponseRedirect(reverse('index')) return HttpResponseRedirect(GITHUB_OATH_AUTH_PATH)
1,179,897,966,924,708,600
Log user in using GitHub OAuth arguments: :request: GET HTTP request returns: Redirects to index
webserver/pkgpkr/webservice/views.py
login
pkgpkr/Package-Picker
python
def login(request): ' Log user in using GitHub OAuth\n\n arguments:\n :request: GET HTTP request\n\n returns:\n Redirects to index\n ' if (not request.session.get('github_token')): request.session['github_token'] = None request.session['github_info'] = None if (os.environ.get('SELENIUM_TEST') == '1'): assert os.environ.get('GH_TOKEN'), 'GH_TOKEN not set' request.session['github_token'] = os.environ.get('GH_TOKEN') request.session['github_info'] = github_util.get_user_info(request.session['github_token']) return HttpResponseRedirect(reverse('index')) return HttpResponseRedirect(GITHUB_OATH_AUTH_PATH)
def callback(request): '\n GitHub redirect here, then retrieves token for API\n\n arguments:\n :request: GET HTTP request\n\n returns:\n Redirects to index\n ' code = request.GET.get('code') payload = {'client_id': GITHUB_CLIENT_ID, 'client_secret': GITHUB_CLIENT_SECRET, 'code': code} headers = {'accept': 'application/json'} res = requests.post(GITHUB_OATH_ACCESS_TOKEN_PATH, data=payload, headers=headers) request.session['github_token'] = res.json()['access_token'] request.session['github_info'] = github_util.get_user_info(request.session['github_token']) return HttpResponseRedirect(reverse('index'))
-6,453,799,905,267,306,000
GitHub redirect here, then retrieves token for API arguments: :request: GET HTTP request returns: Redirects to index
webserver/pkgpkr/webservice/views.py
callback
pkgpkr/Package-Picker
python
def callback(request): '\n GitHub redirect here, then retrieves token for API\n\n arguments:\n :request: GET HTTP request\n\n returns:\n Redirects to index\n ' code = request.GET.get('code') payload = {'client_id': GITHUB_CLIENT_ID, 'client_secret': GITHUB_CLIENT_SECRET, 'code': code} headers = {'accept': 'application/json'} res = requests.post(GITHUB_OATH_ACCESS_TOKEN_PATH, data=payload, headers=headers) request.session['github_token'] = res.json()['access_token'] request.session['github_info'] = github_util.get_user_info(request.session['github_token']) return HttpResponseRedirect(reverse('index'))
def logout(request): '\n Logs user out but keep authorization ot OAuth GitHub\n\n arguments:\n :request: GET HTTP request\n\n returns:\n Redirects to index\n ' request.session['github_token'] = None request.session['github_info'] = None return HttpResponseRedirect(reverse('index'))
-1,557,519,377,283,205,400
Logs user out but keep authorization ot OAuth GitHub arguments: :request: GET HTTP request returns: Redirects to index
webserver/pkgpkr/webservice/views.py
logout
pkgpkr/Package-Picker
python
def logout(request): '\n Logs user out but keep authorization ot OAuth GitHub\n\n arguments:\n :request: GET HTTP request\n\n returns:\n Redirects to index\n ' request.session['github_token'] = None request.session['github_info'] = None return HttpResponseRedirect(reverse('index'))
def repositories(request): '\n Get full list (up to 100) for the current user\n\n arguments:\n :request: GET HTTP request\n\n returns:\n Rendered repositories page\n ' if (not request.session.get('github_token')): return HttpResponseRedirect(reverse('index')) repos_per_language = github_util.get_repositories(request.session['github_token']) combined_repos = dict() for (language, repos) in repos_per_language.items(): for repo in repos: if (not repo['object']): continue date_time = repo['updatedAt'] date = date_time.split('T')[0] repo['date'] = date repo['nameWithOwnerEscaped'] = urllib.parse.quote_plus(repo['nameWithOwner']) repo['language'] = language if parse_dependencies(repo['object']['text'], language, True): combined_repos[repo['nameWithOwner']] = repo return render(request, 'webservice/repositories.html', {'repos': combined_repos.values()})
4,976,217,205,767,315,000
Get full list (up to 100) for the current user arguments: :request: GET HTTP request returns: Rendered repositories page
webserver/pkgpkr/webservice/views.py
repositories
pkgpkr/Package-Picker
python
def repositories(request): '\n Get full list (up to 100) for the current user\n\n arguments:\n :request: GET HTTP request\n\n returns:\n Rendered repositories page\n ' if (not request.session.get('github_token')): return HttpResponseRedirect(reverse('index')) repos_per_language = github_util.get_repositories(request.session['github_token']) combined_repos = dict() for (language, repos) in repos_per_language.items(): for repo in repos: if (not repo['object']): continue date_time = repo['updatedAt'] date = date_time.split('T')[0] repo['date'] = date repo['nameWithOwnerEscaped'] = urllib.parse.quote_plus(repo['nameWithOwner']) repo['language'] = language if parse_dependencies(repo['object']['text'], language, True): combined_repos[repo['nameWithOwner']] = repo return render(request, 'webservice/repositories.html', {'repos': combined_repos.values()})
def recommendations(request, name): '\n Get recommended packages for the repo\n\n arguments:\n :request: GET/POST HTTP request\n :name: repo name\n\n returns:\n Rendered recommendation page\n ' repo_name = urllib.parse.unquote_plus(name) if (request.method == 'POST'): language = request.POST.get('language') language = language.lower() dependencies = request.POST.get('dependencies') dependencies = dependencies.strip(',') if (language not in SUPPORTED_LANGUAGES.keys()): return HttpResponse(f'Demo language {language} not supported', status=404) request.session['dependencies'] = dependencies request.session['language'] = language branch_name = None branch_names = None else: if (not request.session.get('github_token')): return HttpResponseRedirect(reverse('index')) branch_name = request.GET.get('branch', default='master') (_, branch_names, language) = github_util.get_dependencies(request.session['github_token'], repo_name, branch_name) return render(request, 'webservice/recommendations.html', {'repository_name': repo_name, 'recommendation_url': f'/recommendations/{urllib.parse.quote_plus(name)}?branch={branch_name}', 'branch_names': branch_names, 'current_branch': branch_name, 'language': language})
-4,413,514,821,107,826,700
Get recommended packages for the repo arguments: :request: GET/POST HTTP request :name: repo name returns: Rendered recommendation page
webserver/pkgpkr/webservice/views.py
recommendations
pkgpkr/Package-Picker
python
def recommendations(request, name): '\n Get recommended packages for the repo\n\n arguments:\n :request: GET/POST HTTP request\n :name: repo name\n\n returns:\n Rendered recommendation page\n ' repo_name = urllib.parse.unquote_plus(name) if (request.method == 'POST'): language = request.POST.get('language') language = language.lower() dependencies = request.POST.get('dependencies') dependencies = dependencies.strip(',') if (language not in SUPPORTED_LANGUAGES.keys()): return HttpResponse(f'Demo language {language} not supported', status=404) request.session['dependencies'] = dependencies request.session['language'] = language branch_name = None branch_names = None else: if (not request.session.get('github_token')): return HttpResponseRedirect(reverse('index')) branch_name = request.GET.get('branch', default='master') (_, branch_names, language) = github_util.get_dependencies(request.session['github_token'], repo_name, branch_name) return render(request, 'webservice/recommendations.html', {'repository_name': repo_name, 'recommendation_url': f'/recommendations/{urllib.parse.quote_plus(name)}?branch={branch_name}', 'branch_names': branch_names, 'current_branch': branch_name, 'language': language})
def recommendations_json(request, name): '\n Get recommended packages for the repo in JSON format\n\n arguments:\n :request: GET HTTP request\n :name: repo name\n\n returns:\n JSON object with recommendations\n ' repo_name = urllib.parse.unquote_plus(name) if (name == DEMO_REPO_INPUT_NAME): dependencies = github_util.parse_dependencies(request.session.get('dependencies'), request.session.get('language')) branch_name = None else: if (not request.session.get('github_token')): return HttpResponse('Unauthorized', status=401) branch_name = request.GET.get('branch', default='master') (dependencies, _, _) = github_util.get_dependencies(request.session['github_token'], repo_name, branch_name) recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies) data = {'repository_name': repo_name, 'current_branch': branch_name, 'data': recommended_dependencies} return HttpResponse(json.dumps(data), content_type='application/json')
2,116,296,651,441,857,800
Get recommended packages for the repo in JSON format arguments: :request: GET HTTP request :name: repo name returns: JSON object with recommendations
webserver/pkgpkr/webservice/views.py
recommendations_json
pkgpkr/Package-Picker
python
def recommendations_json(request, name): '\n Get recommended packages for the repo in JSON format\n\n arguments:\n :request: GET HTTP request\n :name: repo name\n\n returns:\n JSON object with recommendations\n ' repo_name = urllib.parse.unquote_plus(name) if (name == DEMO_REPO_INPUT_NAME): dependencies = github_util.parse_dependencies(request.session.get('dependencies'), request.session.get('language')) branch_name = None else: if (not request.session.get('github_token')): return HttpResponse('Unauthorized', status=401) branch_name = request.GET.get('branch', default='master') (dependencies, _, _) = github_util.get_dependencies(request.session['github_token'], repo_name, branch_name) recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies) data = {'repository_name': repo_name, 'current_branch': branch_name, 'data': recommended_dependencies} return HttpResponse(json.dumps(data), content_type='application/json')
@csrf_exempt def recommendations_service_api(request): '\n Returns package recommendations for API POST call without authentication\n\n arguments:\n :request: POST request of application/json type\n\n returns:\n list of package recommendations\n ' if (request.method == 'POST'): try: json_data = json.loads(request.body) except json.JSONDecodeError: return HttpResponseBadRequest('Could not parse JSON.') try: dependencies = json_data['dependencies'] language = json_data['language'].lower() except KeyError: return HttpResponseBadRequest('Required JSON keys: `dependencies`, `language`') except AttributeError as e: return HttpResponseBadRequest(f'Error casting language to lower(): {e}') if ((not isinstance(dependencies, list)) or (not dependencies)): return HttpResponseBadRequest(f'{language.capitalize()} dependencies must be non-empty and of type LIST (i.e. [...]).') if (language == PYTHON): dependencies = '\n'.join(dependencies) elif (language == JAVASCRIPT): formatted_dependencies_list = [(('"' + dep.replace('@', '":"')) + '"') for dep in dependencies] dependencies = ','.join(formatted_dependencies_list) else: return HttpResponseBadRequest(f'Language not supported: [{language}].') dependencies = github_util.parse_dependencies(dependencies, language) if ('max_recommendations' in json_data): recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies, json_data['max_recommendations']) else: recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies) output_recommended_dependencies = [] for recommended_dependency in recommended_dependencies: d = dict() d['forPackage'] = recommended_dependency[0] d['recommendedPackage'] = recommended_dependency[1] d['url'] = recommended_dependency[2] d['pkgpkrScore'] = recommended_dependency[3] d['absoluteTrendScore'] = recommended_dependency[4] d['relativeTrendScore'] = recommended_dependency[5] d['boundedPopularityScore'] = recommended_dependency[6] d['boundedSimilarityScore'] = recommended_dependency[7] d['categories'] = recommended_dependency[8] d['displayDate'] = recommended_dependency[9] d['monthlyDownloadsLastMonth'] = recommended_dependency[10] output_recommended_dependencies.append(d) data = {'language': language, 'recommended_dependencies': output_recommended_dependencies} return HttpResponse(json.dumps(data), content_type='application/json') return HttpResponseNotAllowed(['POST'])
6,866,179,884,337,457,000
Returns package recommendations for API POST call without authentication arguments: :request: POST request of application/json type returns: list of package recommendations
webserver/pkgpkr/webservice/views.py
recommendations_service_api
pkgpkr/Package-Picker
python
@csrf_exempt def recommendations_service_api(request): '\n Returns package recommendations for API POST call without authentication\n\n arguments:\n :request: POST request of application/json type\n\n returns:\n list of package recommendations\n ' if (request.method == 'POST'): try: json_data = json.loads(request.body) except json.JSONDecodeError: return HttpResponseBadRequest('Could not parse JSON.') try: dependencies = json_data['dependencies'] language = json_data['language'].lower() except KeyError: return HttpResponseBadRequest('Required JSON keys: `dependencies`, `language`') except AttributeError as e: return HttpResponseBadRequest(f'Error casting language to lower(): {e}') if ((not isinstance(dependencies, list)) or (not dependencies)): return HttpResponseBadRequest(f'{language.capitalize()} dependencies must be non-empty and of type LIST (i.e. [...]).') if (language == PYTHON): dependencies = '\n'.join(dependencies) elif (language == JAVASCRIPT): formatted_dependencies_list = [(('"' + dep.replace('@', '":"')) + '"') for dep in dependencies] dependencies = ','.join(formatted_dependencies_list) else: return HttpResponseBadRequest(f'Language not supported: [{language}].') dependencies = github_util.parse_dependencies(dependencies, language) if ('max_recommendations' in json_data): recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies, json_data['max_recommendations']) else: recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies) output_recommended_dependencies = [] for recommended_dependency in recommended_dependencies: d = dict() d['forPackage'] = recommended_dependency[0] d['recommendedPackage'] = recommended_dependency[1] d['url'] = recommended_dependency[2] d['pkgpkrScore'] = recommended_dependency[3] d['absoluteTrendScore'] = recommended_dependency[4] d['relativeTrendScore'] = recommended_dependency[5] d['boundedPopularityScore'] = recommended_dependency[6] d['boundedSimilarityScore'] = recommended_dependency[7] d['categories'] = recommended_dependency[8] d['displayDate'] = recommended_dependency[9] d['monthlyDownloadsLastMonth'] = recommended_dependency[10] output_recommended_dependencies.append(d) data = {'language': language, 'recommended_dependencies': output_recommended_dependencies} return HttpResponse(json.dumps(data), content_type='application/json') return HttpResponseNotAllowed(['POST'])
def _get_modifiable_indices(self, current_text): ' \n Returns the word indices in ``current_text`` which are able to be modified.\n ' non_stopword_indices = set() for (i, word) in enumerate(current_text.words): if (word not in self.stopwords): non_stopword_indices.add(i) return non_stopword_indices
-7,270,054,439,923,210,000
Returns the word indices in ``current_text`` which are able to be modified.
textattack/constraints/pre_transformation/stopword_modification.py
_get_modifiable_indices
fighting41love/TextAttack
python
def _get_modifiable_indices(self, current_text): ' \n \n ' non_stopword_indices = set() for (i, word) in enumerate(current_text.words): if (word not in self.stopwords): non_stopword_indices.add(i) return non_stopword_indices
def check_compatibility(self, transformation): ' \n The stopword constraint only is concerned with word swaps since paraphrasing phrases\n containing stopwords is OK.\n\n Args:\n transformation: The ``Transformation`` to check compatibility with.\n ' return transformation_consists_of_word_swaps(transformation)
-5,874,091,402,889,851,000
The stopword constraint only is concerned with word swaps since paraphrasing phrases containing stopwords is OK. Args: transformation: The ``Transformation`` to check compatibility with.
textattack/constraints/pre_transformation/stopword_modification.py
check_compatibility
fighting41love/TextAttack
python
def check_compatibility(self, transformation): ' \n The stopword constraint only is concerned with word swaps since paraphrasing phrases\n containing stopwords is OK.\n\n Args:\n transformation: The ``Transformation`` to check compatibility with.\n ' return transformation_consists_of_word_swaps(transformation)
def link_iterable_by_fields(unlinked, other=None, fields=None, kind=None, internal=False, relink=False): 'Generic function to link objects in ``unlinked`` to objects in ``other`` using fields ``fields``.\n\n The database to be linked must have uniqueness for each object for the given ``fields``.\n\n If ``kind``, limit objects in ``unlinked`` of type ``kind``.\n\n If ``relink``, link to objects which already have an ``input``. Otherwise, skip already linked objects.\n\n If ``internal``, linked ``unlinked`` to other objects in ``unlinked``. Each object must have the attributes ``database`` and ``code``.' if kind: kind = ({kind} if isinstance(kind, str) else kind) if relink: filter_func = (lambda x: (x.get('type') in kind)) else: filter_func = (lambda x: ((x.get('type') in kind) and (not x.get('input')))) elif relink: filter_func = (lambda x: True) else: filter_func = (lambda x: (not x.get('input'))) if internal: other = unlinked (duplicates, candidates) = ({}, {}) try: for ds in other: key = activity_hash(ds, fields) if (key in candidates): duplicates.setdefault(key, []).append(ds) else: candidates[key] = (ds['database'], ds['code']) except KeyError: raise StrategyError('Not all datasets in database to be linked have ``database`` or ``code`` attributes') for container in unlinked: for obj in filter(filter_func, container.get('exchanges', [])): key = activity_hash(obj, fields) if (key in duplicates): raise StrategyError(format_nonunique_key_error(obj, fields, duplicates[key])) elif (key in candidates): obj['input'] = candidates[key] return unlinked
2,774,126,575,220,764,000
Generic function to link objects in ``unlinked`` to objects in ``other`` using fields ``fields``. The database to be linked must have uniqueness for each object for the given ``fields``. If ``kind``, limit objects in ``unlinked`` of type ``kind``. If ``relink``, link to objects which already have an ``input``. Otherwise, skip already linked objects. If ``internal``, linked ``unlinked`` to other objects in ``unlinked``. Each object must have the attributes ``database`` and ``code``.
bw2io/strategies/generic.py
link_iterable_by_fields
pjamesjoyce/brightway2-io
python
def link_iterable_by_fields(unlinked, other=None, fields=None, kind=None, internal=False, relink=False): 'Generic function to link objects in ``unlinked`` to objects in ``other`` using fields ``fields``.\n\n The database to be linked must have uniqueness for each object for the given ``fields``.\n\n If ``kind``, limit objects in ``unlinked`` of type ``kind``.\n\n If ``relink``, link to objects which already have an ``input``. Otherwise, skip already linked objects.\n\n If ``internal``, linked ``unlinked`` to other objects in ``unlinked``. Each object must have the attributes ``database`` and ``code``.' if kind: kind = ({kind} if isinstance(kind, str) else kind) if relink: filter_func = (lambda x: (x.get('type') in kind)) else: filter_func = (lambda x: ((x.get('type') in kind) and (not x.get('input')))) elif relink: filter_func = (lambda x: True) else: filter_func = (lambda x: (not x.get('input'))) if internal: other = unlinked (duplicates, candidates) = ({}, {}) try: for ds in other: key = activity_hash(ds, fields) if (key in candidates): duplicates.setdefault(key, []).append(ds) else: candidates[key] = (ds['database'], ds['code']) except KeyError: raise StrategyError('Not all datasets in database to be linked have ``database`` or ``code`` attributes') for container in unlinked: for obj in filter(filter_func, container.get('exchanges', [])): key = activity_hash(obj, fields) if (key in duplicates): raise StrategyError(format_nonunique_key_error(obj, fields, duplicates[key])) elif (key in candidates): obj['input'] = candidates[key] return unlinked