Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
3,300
quantopian/zipline
zipline/lib/adjusted_array.py
AdjustedArray.update_labels
def update_labels(self, func): """ Map a function over baseline and adjustment values in place. Note that the baseline data values must be a LabelArray. """ if not isinstance(self.data, LabelArray): raise TypeError( 'update_labels only supported if data is of type LabelArray.' ) # Map the baseline values. self._data = self._data.map(func) # Map each of the adjustments. for _, row_adjustments in iteritems(self.adjustments): for adjustment in row_adjustments: adjustment.value = func(adjustment.value)
python
def update_labels(self, func): """ Map a function over baseline and adjustment values in place. Note that the baseline data values must be a LabelArray. """ if not isinstance(self.data, LabelArray): raise TypeError( 'update_labels only supported if data is of type LabelArray.' ) # Map the baseline values. self._data = self._data.map(func) # Map each of the adjustments. for _, row_adjustments in iteritems(self.adjustments): for adjustment in row_adjustments: adjustment.value = func(adjustment.value)
['def', 'update_labels', '(', 'self', ',', 'func', ')', ':', 'if', 'not', 'isinstance', '(', 'self', '.', 'data', ',', 'LabelArray', ')', ':', 'raise', 'TypeError', '(', "'update_labels only supported if data is of type LabelArray.'", ')', '# Map the baseline values.', 'self', '.', '_data', '=', 'self', '.', '_data', '.', 'map', '(', 'func', ')', '# Map each of the adjustments.', 'for', '_', ',', 'row_adjustments', 'in', 'iteritems', '(', 'self', '.', 'adjustments', ')', ':', 'for', 'adjustment', 'in', 'row_adjustments', ':', 'adjustment', '.', 'value', '=', 'func', '(', 'adjustment', '.', 'value', ')']
Map a function over baseline and adjustment values in place. Note that the baseline data values must be a LabelArray.
['Map', 'a', 'function', 'over', 'baseline', 'and', 'adjustment', 'values', 'in', 'place', '.']
train
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/lib/adjusted_array.py#L311-L328
3,301
by46/simplekit
simplekit/objson/dolphin2.py
dump
def dump(obj, fp, *args, **kwargs): """Serialize a object to a file object. Basic Usage: >>> import simplekit.objson >>> from cStringIO import StringIO >>> obj = {'name': 'wendy'} >>> io = StringIO() >>> simplekit.objson.dump(obj, io) >>> print io.getvalue() :param obj: a object which need to dump :param fp: a instance of file object :param args: Optional arguments that :func:`json.dump` takes. :param kwargs: Keys arguments that :func:`json.dump` takes. :return: None """ kwargs['default'] = object2dict json.dump(obj, fp, *args, **kwargs)
python
def dump(obj, fp, *args, **kwargs): """Serialize a object to a file object. Basic Usage: >>> import simplekit.objson >>> from cStringIO import StringIO >>> obj = {'name': 'wendy'} >>> io = StringIO() >>> simplekit.objson.dump(obj, io) >>> print io.getvalue() :param obj: a object which need to dump :param fp: a instance of file object :param args: Optional arguments that :func:`json.dump` takes. :param kwargs: Keys arguments that :func:`json.dump` takes. :return: None """ kwargs['default'] = object2dict json.dump(obj, fp, *args, **kwargs)
['def', 'dump', '(', 'obj', ',', 'fp', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'kwargs', '[', "'default'", ']', '=', 'object2dict', 'json', '.', 'dump', '(', 'obj', ',', 'fp', ',', '*', 'args', ',', '*', '*', 'kwargs', ')']
Serialize a object to a file object. Basic Usage: >>> import simplekit.objson >>> from cStringIO import StringIO >>> obj = {'name': 'wendy'} >>> io = StringIO() >>> simplekit.objson.dump(obj, io) >>> print io.getvalue() :param obj: a object which need to dump :param fp: a instance of file object :param args: Optional arguments that :func:`json.dump` takes. :param kwargs: Keys arguments that :func:`json.dump` takes. :return: None
['Serialize', 'a', 'object', 'to', 'a', 'file', 'object', '.']
train
https://github.com/by46/simplekit/blob/33f3ce6de33accc185e1057f096af41859db5976/simplekit/objson/dolphin2.py#L98-L118
3,302
erigones/zabbix-api
zabbix_api.py
ZabbixAPI.timestamp_to_datetime
def timestamp_to_datetime(cls, dt, dt_format=DATETIME_FORMAT): """Convert unix timestamp to human readable date/time string""" return cls.convert_datetime(cls.get_datetime(dt), dt_format=dt_format)
python
def timestamp_to_datetime(cls, dt, dt_format=DATETIME_FORMAT): """Convert unix timestamp to human readable date/time string""" return cls.convert_datetime(cls.get_datetime(dt), dt_format=dt_format)
['def', 'timestamp_to_datetime', '(', 'cls', ',', 'dt', ',', 'dt_format', '=', 'DATETIME_FORMAT', ')', ':', 'return', 'cls', '.', 'convert_datetime', '(', 'cls', '.', 'get_datetime', '(', 'dt', ')', ',', 'dt_format', '=', 'dt_format', ')']
Convert unix timestamp to human readable date/time string
['Convert', 'unix', 'timestamp', 'to', 'human', 'readable', 'date', '/', 'time', 'string']
train
https://github.com/erigones/zabbix-api/blob/2474ab1d1ddb46c26eea70671b3a599b836d42da/zabbix_api.py#L222-L224
3,303
ninuxorg/nodeshot
nodeshot/core/nodes/views.py
NodeList.perform_create
def perform_create(self, serializer): """ determine user when node is added """ if serializer.instance is None: serializer.save(user=self.request.user)
python
def perform_create(self, serializer): """ determine user when node is added """ if serializer.instance is None: serializer.save(user=self.request.user)
['def', 'perform_create', '(', 'self', ',', 'serializer', ')', ':', 'if', 'serializer', '.', 'instance', 'is', 'None', ':', 'serializer', '.', 'save', '(', 'user', '=', 'self', '.', 'request', '.', 'user', ')']
determine user when node is added
['determine', 'user', 'when', 'node', 'is', 'added']
train
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/core/nodes/views.py#L51-L54
3,304
projecthamster/hamster-lib
hamster_lib/helpers/config_helpers.py
get_config_path
def get_config_path(appdirs=DEFAULT_APPDIRS, file_name=DEFAULT_CONFIG_FILENAME): """ Return the path where the config file is stored. Args: app_name (text_type, optional): Name of the application, defaults to ``'projecthamster``. Allows you to use your own application specific namespace if you wish. file_name (text_type, optional): Name of the config file. Defaults to ``config.conf``. Returns: str: Fully qualified path (dir & filename) where we expect the config file. """ return os.path.join(appdirs.user_config_dir, file_name)
python
def get_config_path(appdirs=DEFAULT_APPDIRS, file_name=DEFAULT_CONFIG_FILENAME): """ Return the path where the config file is stored. Args: app_name (text_type, optional): Name of the application, defaults to ``'projecthamster``. Allows you to use your own application specific namespace if you wish. file_name (text_type, optional): Name of the config file. Defaults to ``config.conf``. Returns: str: Fully qualified path (dir & filename) where we expect the config file. """ return os.path.join(appdirs.user_config_dir, file_name)
['def', 'get_config_path', '(', 'appdirs', '=', 'DEFAULT_APPDIRS', ',', 'file_name', '=', 'DEFAULT_CONFIG_FILENAME', ')', ':', 'return', 'os', '.', 'path', '.', 'join', '(', 'appdirs', '.', 'user_config_dir', ',', 'file_name', ')']
Return the path where the config file is stored. Args: app_name (text_type, optional): Name of the application, defaults to ``'projecthamster``. Allows you to use your own application specific namespace if you wish. file_name (text_type, optional): Name of the config file. Defaults to ``config.conf``. Returns: str: Fully qualified path (dir & filename) where we expect the config file.
['Return', 'the', 'path', 'where', 'the', 'config', 'file', 'is', 'stored', '.']
train
https://github.com/projecthamster/hamster-lib/blob/bc34c822c239a6fa0cde3a4f90b0d00506fb5a4f/hamster_lib/helpers/config_helpers.py#L141-L155
3,305
reflexsc/reflex
src/rfxmon/__init__.py
Monitor.reporting
def reporting(self): """ report on consumption info """ self.thread_debug("reporting") res = resource.getrusage(resource.RUSAGE_SELF) self.NOTIFY("", type='internal-usage', maxrss=round(res.ru_maxrss/1024, 2), ixrss=round(res.ru_ixrss/1024, 2), idrss=round(res.ru_idrss/1024, 2), isrss=round(res.ru_isrss/1024, 2), threads=threading.active_count(), proctot=len(self.monitors), procwin=self.stats.procwin)
python
def reporting(self): """ report on consumption info """ self.thread_debug("reporting") res = resource.getrusage(resource.RUSAGE_SELF) self.NOTIFY("", type='internal-usage', maxrss=round(res.ru_maxrss/1024, 2), ixrss=round(res.ru_ixrss/1024, 2), idrss=round(res.ru_idrss/1024, 2), isrss=round(res.ru_isrss/1024, 2), threads=threading.active_count(), proctot=len(self.monitors), procwin=self.stats.procwin)
['def', 'reporting', '(', 'self', ')', ':', 'self', '.', 'thread_debug', '(', '"reporting"', ')', 'res', '=', 'resource', '.', 'getrusage', '(', 'resource', '.', 'RUSAGE_SELF', ')', 'self', '.', 'NOTIFY', '(', '""', ',', 'type', '=', "'internal-usage'", ',', 'maxrss', '=', 'round', '(', 'res', '.', 'ru_maxrss', '/', '1024', ',', '2', ')', ',', 'ixrss', '=', 'round', '(', 'res', '.', 'ru_ixrss', '/', '1024', ',', '2', ')', ',', 'idrss', '=', 'round', '(', 'res', '.', 'ru_idrss', '/', '1024', ',', '2', ')', ',', 'isrss', '=', 'round', '(', 'res', '.', 'ru_isrss', '/', '1024', ',', '2', ')', ',', 'threads', '=', 'threading', '.', 'active_count', '(', ')', ',', 'proctot', '=', 'len', '(', 'self', '.', 'monitors', ')', ',', 'procwin', '=', 'self', '.', 'stats', '.', 'procwin', ')']
report on consumption info
['report', 'on', 'consumption', 'info']
train
https://github.com/reflexsc/reflex/blob/cee6b0ccfef395ca5e157d644a2e3252cea9fe62/src/rfxmon/__init__.py#L342-L356
3,306
tensorflow/tensor2tensor
tensor2tensor/data_generators/wikisum/wikisum.py
_wiki_articles
def _wiki_articles(shard_id, wikis_dir=None): """Generates WikipediaArticles from GCS that are part of shard shard_id.""" if not wikis_dir: wikis_dir = WIKI_CONTENT_DIR with tf.Graph().as_default(): dataset = tf.data.TFRecordDataset( cc_utils.readahead( os.path.join(wikis_dir, WIKI_CONTENT_FILE % shard_id)), buffer_size=16 * 1000 * 1000) def _parse_example(ex_ser): """Parse serialized Example containing Wikipedia article content.""" features = { "url": tf.VarLenFeature(tf.string), "title": tf.VarLenFeature(tf.string), "section_titles": tf.VarLenFeature(tf.string), "section_texts": tf.VarLenFeature(tf.string), } ex = tf.parse_single_example(ex_ser, features) for k in ex.keys(): ex[k] = ex[k].values ex["url"] = ex["url"][0] ex["title"] = ex["title"][0] return ex dataset = dataset.map(_parse_example, num_parallel_calls=32) dataset = dataset.prefetch(100) record_it = dataset.make_one_shot_iterator().get_next() with tf.Session() as sess: while True: try: ex = sess.run(record_it) except tf.errors.OutOfRangeError: break sections = [ WikipediaSection(title=text_encoder.to_unicode(title), text=text_encoder.to_unicode(text)) for title, text in zip(ex["section_titles"], ex["section_texts"]) ] yield WikipediaArticle( url=text_encoder.to_unicode(ex["url"]), title=text_encoder.to_unicode(ex["title"]), sections=sections)
python
def _wiki_articles(shard_id, wikis_dir=None): """Generates WikipediaArticles from GCS that are part of shard shard_id.""" if not wikis_dir: wikis_dir = WIKI_CONTENT_DIR with tf.Graph().as_default(): dataset = tf.data.TFRecordDataset( cc_utils.readahead( os.path.join(wikis_dir, WIKI_CONTENT_FILE % shard_id)), buffer_size=16 * 1000 * 1000) def _parse_example(ex_ser): """Parse serialized Example containing Wikipedia article content.""" features = { "url": tf.VarLenFeature(tf.string), "title": tf.VarLenFeature(tf.string), "section_titles": tf.VarLenFeature(tf.string), "section_texts": tf.VarLenFeature(tf.string), } ex = tf.parse_single_example(ex_ser, features) for k in ex.keys(): ex[k] = ex[k].values ex["url"] = ex["url"][0] ex["title"] = ex["title"][0] return ex dataset = dataset.map(_parse_example, num_parallel_calls=32) dataset = dataset.prefetch(100) record_it = dataset.make_one_shot_iterator().get_next() with tf.Session() as sess: while True: try: ex = sess.run(record_it) except tf.errors.OutOfRangeError: break sections = [ WikipediaSection(title=text_encoder.to_unicode(title), text=text_encoder.to_unicode(text)) for title, text in zip(ex["section_titles"], ex["section_texts"]) ] yield WikipediaArticle( url=text_encoder.to_unicode(ex["url"]), title=text_encoder.to_unicode(ex["title"]), sections=sections)
['def', '_wiki_articles', '(', 'shard_id', ',', 'wikis_dir', '=', 'None', ')', ':', 'if', 'not', 'wikis_dir', ':', 'wikis_dir', '=', 'WIKI_CONTENT_DIR', 'with', 'tf', '.', 'Graph', '(', ')', '.', 'as_default', '(', ')', ':', 'dataset', '=', 'tf', '.', 'data', '.', 'TFRecordDataset', '(', 'cc_utils', '.', 'readahead', '(', 'os', '.', 'path', '.', 'join', '(', 'wikis_dir', ',', 'WIKI_CONTENT_FILE', '%', 'shard_id', ')', ')', ',', 'buffer_size', '=', '16', '*', '1000', '*', '1000', ')', 'def', '_parse_example', '(', 'ex_ser', ')', ':', '"""Parse serialized Example containing Wikipedia article content."""', 'features', '=', '{', '"url"', ':', 'tf', '.', 'VarLenFeature', '(', 'tf', '.', 'string', ')', ',', '"title"', ':', 'tf', '.', 'VarLenFeature', '(', 'tf', '.', 'string', ')', ',', '"section_titles"', ':', 'tf', '.', 'VarLenFeature', '(', 'tf', '.', 'string', ')', ',', '"section_texts"', ':', 'tf', '.', 'VarLenFeature', '(', 'tf', '.', 'string', ')', ',', '}', 'ex', '=', 'tf', '.', 'parse_single_example', '(', 'ex_ser', ',', 'features', ')', 'for', 'k', 'in', 'ex', '.', 'keys', '(', ')', ':', 'ex', '[', 'k', ']', '=', 'ex', '[', 'k', ']', '.', 'values', 'ex', '[', '"url"', ']', '=', 'ex', '[', '"url"', ']', '[', '0', ']', 'ex', '[', '"title"', ']', '=', 'ex', '[', '"title"', ']', '[', '0', ']', 'return', 'ex', 'dataset', '=', 'dataset', '.', 'map', '(', '_parse_example', ',', 'num_parallel_calls', '=', '32', ')', 'dataset', '=', 'dataset', '.', 'prefetch', '(', '100', ')', 'record_it', '=', 'dataset', '.', 'make_one_shot_iterator', '(', ')', '.', 'get_next', '(', ')', 'with', 'tf', '.', 'Session', '(', ')', 'as', 'sess', ':', 'while', 'True', ':', 'try', ':', 'ex', '=', 'sess', '.', 'run', '(', 'record_it', ')', 'except', 'tf', '.', 'errors', '.', 'OutOfRangeError', ':', 'break', 'sections', '=', '[', 'WikipediaSection', '(', 'title', '=', 'text_encoder', '.', 'to_unicode', '(', 'title', ')', ',', 'text', '=', 'text_encoder', '.', 'to_unicode', '(', 'text', ')', ')', 'for', 'title', ',', 'text', 'in', 'zip', '(', 'ex', '[', '"section_titles"', ']', ',', 'ex', '[', '"section_texts"', ']', ')', ']', 'yield', 'WikipediaArticle', '(', 'url', '=', 'text_encoder', '.', 'to_unicode', '(', 'ex', '[', '"url"', ']', ')', ',', 'title', '=', 'text_encoder', '.', 'to_unicode', '(', 'ex', '[', '"title"', ']', ')', ',', 'sections', '=', 'sections', ')']
Generates WikipediaArticles from GCS that are part of shard shard_id.
['Generates', 'WikipediaArticles', 'from', 'GCS', 'that', 'are', 'part', 'of', 'shard', 'shard_id', '.']
train
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wikisum/wikisum.py#L279-L323
3,307
facetoe/zenpy
zenpy/lib/api_objects/help_centre_objects.py
Article.author
def author(self): """ | Comment: The id of the user who wrote the article (set to the user who made the request on create by default) """ if self.api and self.author_id: return self.api._get_user(self.author_id)
python
def author(self): """ | Comment: The id of the user who wrote the article (set to the user who made the request on create by default) """ if self.api and self.author_id: return self.api._get_user(self.author_id)
['def', 'author', '(', 'self', ')', ':', 'if', 'self', '.', 'api', 'and', 'self', '.', 'author_id', ':', 'return', 'self', '.', 'api', '.', '_get_user', '(', 'self', '.', 'author_id', ')']
| Comment: The id of the user who wrote the article (set to the user who made the request on create by default)
['|', 'Comment', ':', 'The', 'id', 'of', 'the', 'user', 'who', 'wrote', 'the', 'article', '(', 'set', 'to', 'the', 'user', 'who', 'made', 'the', 'request', 'on', 'create', 'by', 'default', ')']
train
https://github.com/facetoe/zenpy/blob/34c54c7e408b9ed01604ddf8b3422204c8bf31ea/zenpy/lib/api_objects/help_centre_objects.py#L233-L238
3,308
python-cmd2/cmd2
cmd2/cmd2.py
Cmd.do_py
def do_py(self, args: argparse.Namespace) -> bool: """Invoke Python command or shell""" from .pyscript_bridge import PyscriptBridge if self._in_py: err = "Recursively entering interactive Python consoles is not allowed." self.perror(err, traceback_war=False) return False try: self._in_py = True # Support the run command even if called prior to invoking an interactive interpreter def py_run(filename: str): """Run a Python script file in the interactive console. :param filename: filename of *.py script file to run """ expanded_filename = os.path.expanduser(filename) # cmd_echo defaults to False for scripts. The user can always toggle this value in their script. bridge.cmd_echo = False try: with open(expanded_filename) as f: interp.runcode(f.read()) except OSError as ex: error_msg = "Error opening script file '{}': {}".format(expanded_filename, ex) self.perror(error_msg, traceback_war=False) def py_quit(): """Function callable from the interactive Python console to exit that environment""" raise EmbeddedConsoleExit # Set up Python environment bridge = PyscriptBridge(self) self.pystate[self.pyscript_name] = bridge self.pystate['run'] = py_run self.pystate['quit'] = py_quit self.pystate['exit'] = py_quit if self.locals_in_py: self.pystate['self'] = self elif 'self' in self.pystate: del self.pystate['self'] localvars = self.pystate from code import InteractiveConsole interp = InteractiveConsole(locals=localvars) interp.runcode('import sys, os;sys.path.insert(0, os.getcwd())') # Check if the user is running a Python statement on the command line if args.command: full_command = args.command if args.remainder: full_command += ' ' + ' '.join(args.remainder) # Set cmd_echo to True so PyscriptBridge statements like: py app('help') # run at the command line will print their output. bridge.cmd_echo = True # noinspection PyBroadException try: interp.runcode(full_command) except BaseException: # We don't care about any exception that happened in the interactive console pass # If there are no args, then we will open an interactive Python console else: # Set up readline for Python console if rl_type != RlType.NONE: # Save cmd2 history saved_cmd2_history = [] for i in range(1, readline.get_current_history_length() + 1): # noinspection PyArgumentList saved_cmd2_history.append(readline.get_history_item(i)) readline.clear_history() # Restore py's history for item in self.py_history: readline.add_history(item) if self.use_rawinput and self.completekey: # Set up tab completion for the Python console # rlcompleter relies on the default settings of the Python readline module if rl_type == RlType.GNU: saved_basic_quotes = ctypes.cast(rl_basic_quote_characters, ctypes.c_void_p).value rl_basic_quote_characters.value = orig_rl_basic_quotes if 'gnureadline' in sys.modules: # rlcompleter imports readline by name, so it won't use gnureadline # Force rlcompleter to use gnureadline instead so it has our settings and history saved_readline = None if 'readline' in sys.modules: saved_readline = sys.modules['readline'] sys.modules['readline'] = sys.modules['gnureadline'] saved_delims = readline.get_completer_delims() readline.set_completer_delims(orig_rl_delims) # rlcompleter will not need cmd2's custom display function # This will be restored by cmd2 the next time complete() is called if rl_type == RlType.GNU: readline.set_completion_display_matches_hook(None) elif rl_type == RlType.PYREADLINE: # noinspection PyUnresolvedReferences readline.rl.mode._display_completions = self._display_matches_pyreadline # Save off the current completer and set a new one in the Python console # Make sure it tab completes from its locals() dictionary saved_completer = readline.get_completer() interp.runcode("from rlcompleter import Completer") interp.runcode("import readline") interp.runcode("readline.set_completer(Completer(locals()).complete)") # Set up sys module for the Python console self._reset_py_display() saved_sys_stdout = sys.stdout sys.stdout = self.stdout saved_sys_stdin = sys.stdin sys.stdin = self.stdin cprt = 'Type "help", "copyright", "credits" or "license" for more information.' instructions = ('End with `Ctrl-D` (Unix) / `Ctrl-Z` (Windows), `quit()`, `exit()`.\n' 'Non-Python commands can be issued with: {}("your command")\n' 'Run Python code from external script files with: run("script.py")' .format(self.pyscript_name)) # noinspection PyBroadException try: interp.interact(banner="Python {} on {}\n{}\n\n{}\n". format(sys.version, sys.platform, cprt, instructions)) except BaseException: # We don't care about any exception that happened in the interactive console pass finally: sys.stdout = saved_sys_stdout sys.stdin = saved_sys_stdin # Set up readline for cmd2 if rl_type != RlType.NONE: # Save py's history self.py_history.clear() for i in range(1, readline.get_current_history_length() + 1): # noinspection PyArgumentList self.py_history.append(readline.get_history_item(i)) readline.clear_history() # Restore cmd2's history for item in saved_cmd2_history: readline.add_history(item) if self.use_rawinput and self.completekey: # Restore cmd2's tab completion settings readline.set_completer(saved_completer) readline.set_completer_delims(saved_delims) if rl_type == RlType.GNU: rl_basic_quote_characters.value = saved_basic_quotes if 'gnureadline' in sys.modules: # Restore what the readline module pointed to if saved_readline is None: del(sys.modules['readline']) else: sys.modules['readline'] = saved_readline except KeyboardInterrupt: pass finally: self._in_py = False return self._should_quit
python
def do_py(self, args: argparse.Namespace) -> bool: """Invoke Python command or shell""" from .pyscript_bridge import PyscriptBridge if self._in_py: err = "Recursively entering interactive Python consoles is not allowed." self.perror(err, traceback_war=False) return False try: self._in_py = True # Support the run command even if called prior to invoking an interactive interpreter def py_run(filename: str): """Run a Python script file in the interactive console. :param filename: filename of *.py script file to run """ expanded_filename = os.path.expanduser(filename) # cmd_echo defaults to False for scripts. The user can always toggle this value in their script. bridge.cmd_echo = False try: with open(expanded_filename) as f: interp.runcode(f.read()) except OSError as ex: error_msg = "Error opening script file '{}': {}".format(expanded_filename, ex) self.perror(error_msg, traceback_war=False) def py_quit(): """Function callable from the interactive Python console to exit that environment""" raise EmbeddedConsoleExit # Set up Python environment bridge = PyscriptBridge(self) self.pystate[self.pyscript_name] = bridge self.pystate['run'] = py_run self.pystate['quit'] = py_quit self.pystate['exit'] = py_quit if self.locals_in_py: self.pystate['self'] = self elif 'self' in self.pystate: del self.pystate['self'] localvars = self.pystate from code import InteractiveConsole interp = InteractiveConsole(locals=localvars) interp.runcode('import sys, os;sys.path.insert(0, os.getcwd())') # Check if the user is running a Python statement on the command line if args.command: full_command = args.command if args.remainder: full_command += ' ' + ' '.join(args.remainder) # Set cmd_echo to True so PyscriptBridge statements like: py app('help') # run at the command line will print their output. bridge.cmd_echo = True # noinspection PyBroadException try: interp.runcode(full_command) except BaseException: # We don't care about any exception that happened in the interactive console pass # If there are no args, then we will open an interactive Python console else: # Set up readline for Python console if rl_type != RlType.NONE: # Save cmd2 history saved_cmd2_history = [] for i in range(1, readline.get_current_history_length() + 1): # noinspection PyArgumentList saved_cmd2_history.append(readline.get_history_item(i)) readline.clear_history() # Restore py's history for item in self.py_history: readline.add_history(item) if self.use_rawinput and self.completekey: # Set up tab completion for the Python console # rlcompleter relies on the default settings of the Python readline module if rl_type == RlType.GNU: saved_basic_quotes = ctypes.cast(rl_basic_quote_characters, ctypes.c_void_p).value rl_basic_quote_characters.value = orig_rl_basic_quotes if 'gnureadline' in sys.modules: # rlcompleter imports readline by name, so it won't use gnureadline # Force rlcompleter to use gnureadline instead so it has our settings and history saved_readline = None if 'readline' in sys.modules: saved_readline = sys.modules['readline'] sys.modules['readline'] = sys.modules['gnureadline'] saved_delims = readline.get_completer_delims() readline.set_completer_delims(orig_rl_delims) # rlcompleter will not need cmd2's custom display function # This will be restored by cmd2 the next time complete() is called if rl_type == RlType.GNU: readline.set_completion_display_matches_hook(None) elif rl_type == RlType.PYREADLINE: # noinspection PyUnresolvedReferences readline.rl.mode._display_completions = self._display_matches_pyreadline # Save off the current completer and set a new one in the Python console # Make sure it tab completes from its locals() dictionary saved_completer = readline.get_completer() interp.runcode("from rlcompleter import Completer") interp.runcode("import readline") interp.runcode("readline.set_completer(Completer(locals()).complete)") # Set up sys module for the Python console self._reset_py_display() saved_sys_stdout = sys.stdout sys.stdout = self.stdout saved_sys_stdin = sys.stdin sys.stdin = self.stdin cprt = 'Type "help", "copyright", "credits" or "license" for more information.' instructions = ('End with `Ctrl-D` (Unix) / `Ctrl-Z` (Windows), `quit()`, `exit()`.\n' 'Non-Python commands can be issued with: {}("your command")\n' 'Run Python code from external script files with: run("script.py")' .format(self.pyscript_name)) # noinspection PyBroadException try: interp.interact(banner="Python {} on {}\n{}\n\n{}\n". format(sys.version, sys.platform, cprt, instructions)) except BaseException: # We don't care about any exception that happened in the interactive console pass finally: sys.stdout = saved_sys_stdout sys.stdin = saved_sys_stdin # Set up readline for cmd2 if rl_type != RlType.NONE: # Save py's history self.py_history.clear() for i in range(1, readline.get_current_history_length() + 1): # noinspection PyArgumentList self.py_history.append(readline.get_history_item(i)) readline.clear_history() # Restore cmd2's history for item in saved_cmd2_history: readline.add_history(item) if self.use_rawinput and self.completekey: # Restore cmd2's tab completion settings readline.set_completer(saved_completer) readline.set_completer_delims(saved_delims) if rl_type == RlType.GNU: rl_basic_quote_characters.value = saved_basic_quotes if 'gnureadline' in sys.modules: # Restore what the readline module pointed to if saved_readline is None: del(sys.modules['readline']) else: sys.modules['readline'] = saved_readline except KeyboardInterrupt: pass finally: self._in_py = False return self._should_quit
['def', 'do_py', '(', 'self', ',', 'args', ':', 'argparse', '.', 'Namespace', ')', '->', 'bool', ':', 'from', '.', 'pyscript_bridge', 'import', 'PyscriptBridge', 'if', 'self', '.', '_in_py', ':', 'err', '=', '"Recursively entering interactive Python consoles is not allowed."', 'self', '.', 'perror', '(', 'err', ',', 'traceback_war', '=', 'False', ')', 'return', 'False', 'try', ':', 'self', '.', '_in_py', '=', 'True', '# Support the run command even if called prior to invoking an interactive interpreter', 'def', 'py_run', '(', 'filename', ':', 'str', ')', ':', '"""Run a Python script file in the interactive console.\n :param filename: filename of *.py script file to run\n """', 'expanded_filename', '=', 'os', '.', 'path', '.', 'expanduser', '(', 'filename', ')', '# cmd_echo defaults to False for scripts. The user can always toggle this value in their script.', 'bridge', '.', 'cmd_echo', '=', 'False', 'try', ':', 'with', 'open', '(', 'expanded_filename', ')', 'as', 'f', ':', 'interp', '.', 'runcode', '(', 'f', '.', 'read', '(', ')', ')', 'except', 'OSError', 'as', 'ex', ':', 'error_msg', '=', '"Error opening script file \'{}\': {}"', '.', 'format', '(', 'expanded_filename', ',', 'ex', ')', 'self', '.', 'perror', '(', 'error_msg', ',', 'traceback_war', '=', 'False', ')', 'def', 'py_quit', '(', ')', ':', '"""Function callable from the interactive Python console to exit that environment"""', 'raise', 'EmbeddedConsoleExit', '# Set up Python environment', 'bridge', '=', 'PyscriptBridge', '(', 'self', ')', 'self', '.', 'pystate', '[', 'self', '.', 'pyscript_name', ']', '=', 'bridge', 'self', '.', 'pystate', '[', "'run'", ']', '=', 'py_run', 'self', '.', 'pystate', '[', "'quit'", ']', '=', 'py_quit', 'self', '.', 'pystate', '[', "'exit'", ']', '=', 'py_quit', 'if', 'self', '.', 'locals_in_py', ':', 'self', '.', 'pystate', '[', "'self'", ']', '=', 'self', 'elif', "'self'", 'in', 'self', '.', 'pystate', ':', 'del', 'self', '.', 'pystate', '[', "'self'", ']', 'localvars', '=', 'self', '.', 'pystate', 'from', 'code', 'import', 'InteractiveConsole', 'interp', '=', 'InteractiveConsole', '(', 'locals', '=', 'localvars', ')', 'interp', '.', 'runcode', '(', "'import sys, os;sys.path.insert(0, os.getcwd())'", ')', '# Check if the user is running a Python statement on the command line', 'if', 'args', '.', 'command', ':', 'full_command', '=', 'args', '.', 'command', 'if', 'args', '.', 'remainder', ':', 'full_command', '+=', "' '", '+', "' '", '.', 'join', '(', 'args', '.', 'remainder', ')', "# Set cmd_echo to True so PyscriptBridge statements like: py app('help')", '# run at the command line will print their output.', 'bridge', '.', 'cmd_echo', '=', 'True', '# noinspection PyBroadException', 'try', ':', 'interp', '.', 'runcode', '(', 'full_command', ')', 'except', 'BaseException', ':', "# We don't care about any exception that happened in the interactive console", 'pass', '# If there are no args, then we will open an interactive Python console', 'else', ':', '# Set up readline for Python console', 'if', 'rl_type', '!=', 'RlType', '.', 'NONE', ':', '# Save cmd2 history', 'saved_cmd2_history', '=', '[', ']', 'for', 'i', 'in', 'range', '(', '1', ',', 'readline', '.', 'get_current_history_length', '(', ')', '+', '1', ')', ':', '# noinspection PyArgumentList', 'saved_cmd2_history', '.', 'append', '(', 'readline', '.', 'get_history_item', '(', 'i', ')', ')', 'readline', '.', 'clear_history', '(', ')', "# Restore py's history", 'for', 'item', 'in', 'self', '.', 'py_history', ':', 'readline', '.', 'add_history', '(', 'item', ')', 'if', 'self', '.', 'use_rawinput', 'and', 'self', '.', 'completekey', ':', '# Set up tab completion for the Python console', '# rlcompleter relies on the default settings of the Python readline module', 'if', 'rl_type', '==', 'RlType', '.', 'GNU', ':', 'saved_basic_quotes', '=', 'ctypes', '.', 'cast', '(', 'rl_basic_quote_characters', ',', 'ctypes', '.', 'c_void_p', ')', '.', 'value', 'rl_basic_quote_characters', '.', 'value', '=', 'orig_rl_basic_quotes', 'if', "'gnureadline'", 'in', 'sys', '.', 'modules', ':', "# rlcompleter imports readline by name, so it won't use gnureadline", '# Force rlcompleter to use gnureadline instead so it has our settings and history', 'saved_readline', '=', 'None', 'if', "'readline'", 'in', 'sys', '.', 'modules', ':', 'saved_readline', '=', 'sys', '.', 'modules', '[', "'readline'", ']', 'sys', '.', 'modules', '[', "'readline'", ']', '=', 'sys', '.', 'modules', '[', "'gnureadline'", ']', 'saved_delims', '=', 'readline', '.', 'get_completer_delims', '(', ')', 'readline', '.', 'set_completer_delims', '(', 'orig_rl_delims', ')', "# rlcompleter will not need cmd2's custom display function", '# This will be restored by cmd2 the next time complete() is called', 'if', 'rl_type', '==', 'RlType', '.', 'GNU', ':', 'readline', '.', 'set_completion_display_matches_hook', '(', 'None', ')', 'elif', 'rl_type', '==', 'RlType', '.', 'PYREADLINE', ':', '# noinspection PyUnresolvedReferences', 'readline', '.', 'rl', '.', 'mode', '.', '_display_completions', '=', 'self', '.', '_display_matches_pyreadline', '# Save off the current completer and set a new one in the Python console', '# Make sure it tab completes from its locals() dictionary', 'saved_completer', '=', 'readline', '.', 'get_completer', '(', ')', 'interp', '.', 'runcode', '(', '"from rlcompleter import Completer"', ')', 'interp', '.', 'runcode', '(', '"import readline"', ')', 'interp', '.', 'runcode', '(', '"readline.set_completer(Completer(locals()).complete)"', ')', '# Set up sys module for the Python console', 'self', '.', '_reset_py_display', '(', ')', 'saved_sys_stdout', '=', 'sys', '.', 'stdout', 'sys', '.', 'stdout', '=', 'self', '.', 'stdout', 'saved_sys_stdin', '=', 'sys', '.', 'stdin', 'sys', '.', 'stdin', '=', 'self', '.', 'stdin', 'cprt', '=', '\'Type "help", "copyright", "credits" or "license" for more information.\'', 'instructions', '=', '(', "'End with `Ctrl-D` (Unix) / `Ctrl-Z` (Windows), `quit()`, `exit()`.\\n'", '\'Non-Python commands can be issued with: {}("your command")\\n\'', '\'Run Python code from external script files with: run("script.py")\'', '.', 'format', '(', 'self', '.', 'pyscript_name', ')', ')', '# noinspection PyBroadException', 'try', ':', 'interp', '.', 'interact', '(', 'banner', '=', '"Python {} on {}\\n{}\\n\\n{}\\n"', '.', 'format', '(', 'sys', '.', 'version', ',', 'sys', '.', 'platform', ',', 'cprt', ',', 'instructions', ')', ')', 'except', 'BaseException', ':', "# We don't care about any exception that happened in the interactive console", 'pass', 'finally', ':', 'sys', '.', 'stdout', '=', 'saved_sys_stdout', 'sys', '.', 'stdin', '=', 'saved_sys_stdin', '# Set up readline for cmd2', 'if', 'rl_type', '!=', 'RlType', '.', 'NONE', ':', "# Save py's history", 'self', '.', 'py_history', '.', 'clear', '(', ')', 'for', 'i', 'in', 'range', '(', '1', ',', 'readline', '.', 'get_current_history_length', '(', ')', '+', '1', ')', ':', '# noinspection PyArgumentList', 'self', '.', 'py_history', '.', 'append', '(', 'readline', '.', 'get_history_item', '(', 'i', ')', ')', 'readline', '.', 'clear_history', '(', ')', "# Restore cmd2's history", 'for', 'item', 'in', 'saved_cmd2_history', ':', 'readline', '.', 'add_history', '(', 'item', ')', 'if', 'self', '.', 'use_rawinput', 'and', 'self', '.', 'completekey', ':', "# Restore cmd2's tab completion settings", 'readline', '.', 'set_completer', '(', 'saved_completer', ')', 'readline', '.', 'set_completer_delims', '(', 'saved_delims', ')', 'if', 'rl_type', '==', 'RlType', '.', 'GNU', ':', 'rl_basic_quote_characters', '.', 'value', '=', 'saved_basic_quotes', 'if', "'gnureadline'", 'in', 'sys', '.', 'modules', ':', '# Restore what the readline module pointed to', 'if', 'saved_readline', 'is', 'None', ':', 'del', '(', 'sys', '.', 'modules', '[', "'readline'", ']', ')', 'else', ':', 'sys', '.', 'modules', '[', "'readline'", ']', '=', 'saved_readline', 'except', 'KeyboardInterrupt', ':', 'pass', 'finally', ':', 'self', '.', '_in_py', '=', 'False', 'return', 'self', '.', '_should_quit']
Invoke Python command or shell
['Invoke', 'Python', 'command', 'or', 'shell']
train
https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/cmd2/cmd2.py#L3034-L3212
3,309
Kami/python-yubico-client
yubico_client/yubico.py
Yubico.get_parameters_as_dictionary
def get_parameters_as_dictionary(self, query_string): """ Returns query string parameters as a dictionary. """ pairs = (x.split('=', 1) for x in query_string.split('&')) return dict((k, unquote(v)) for k, v in pairs)
python
def get_parameters_as_dictionary(self, query_string): """ Returns query string parameters as a dictionary. """ pairs = (x.split('=', 1) for x in query_string.split('&')) return dict((k, unquote(v)) for k, v in pairs)
['def', 'get_parameters_as_dictionary', '(', 'self', ',', 'query_string', ')', ':', 'pairs', '=', '(', 'x', '.', 'split', '(', "'='", ',', '1', ')', 'for', 'x', 'in', 'query_string', '.', 'split', '(', "'&'", ')', ')', 'return', 'dict', '(', '(', 'k', ',', 'unquote', '(', 'v', ')', ')', 'for', 'k', ',', 'v', 'in', 'pairs', ')']
Returns query string parameters as a dictionary.
['Returns', 'query', 'string', 'parameters', 'as', 'a', 'dictionary', '.']
train
https://github.com/Kami/python-yubico-client/blob/3334b2ee1b5b996af3ef6be57a4ea52b8e45e764/yubico_client/yubico.py#L342-L345
3,310
ldomic/lintools
lintools/molecule.py
Molecule.load_molecule_in_rdkit_smiles
def load_molecule_in_rdkit_smiles(self, molSize,kekulize=True,bonds=[],bond_color=None,atom_color = {}, size= {} ): """ Loads mol file in rdkit without the hydrogens - they do not have to appear in the final figure. Once loaded, the molecule is converted to SMILES format which RDKit appears to draw best - since we do not care about the actual coordinates of the original molecule, it is sufficient to have just 2D information. Some molecules can be problematic to import and steps such as stopping sanitize function can be taken. This is done automatically if problems are observed. However, better solutions can also be implemented and need more research. The molecule is then drawn from SMILES in 2D representation without hydrogens. The drawing is saved as an SVG file. """ mol_in_rdkit = self.topology_data.mol #need to reload without hydrogens try: mol_in_rdkit = Chem.RemoveHs(mol_in_rdkit) self.topology_data.smiles = Chem.MolFromSmiles(Chem.MolToSmiles(mol_in_rdkit)) except ValueError: mol_in_rdkit = Chem.RemoveHs(mol_in_rdkit, sanitize = False) self.topology_data.smiles = Chem.MolFromSmiles(Chem.MolToSmiles(mol_in_rdkit), sanitize=False) self.atom_identities = {} i=0 for atom in self.topology_data.smiles.GetAtoms(): self.atom_identities[mol_in_rdkit.GetProp('_smilesAtomOutputOrder')[1:].rsplit(",")[i]] = atom.GetIdx() i+=1 mc = Chem.Mol(self.topology_data.smiles.ToBinary()) if kekulize: try: Chem.Kekulize(mc) except: mc = Chem.Mol(self.topology_data.smiles.ToBinary()) if not mc.GetNumConformers(): rdDepictor.Compute2DCoords(mc) atoms=[] colors={} for i in range(mol_in_rdkit.GetNumAtoms()): atoms.append(i) if len(atom_color)==0: colors[i]=(1,1,1) else: colors = atom_color drawer = rdMolDraw2D.MolDraw2DSVG(int(molSize[0]),int(molSize[1])) drawer.DrawMolecule(mc,highlightAtoms=atoms,highlightBonds=bonds, highlightAtomColors=colors,highlightAtomRadii=size,highlightBondColors=bond_color) drawer.FinishDrawing() self.svg = drawer.GetDrawingText().replace('svg:','') filesvg = open("molecule.svg", "w+") filesvg.write(self.svg)
python
def load_molecule_in_rdkit_smiles(self, molSize,kekulize=True,bonds=[],bond_color=None,atom_color = {}, size= {} ): """ Loads mol file in rdkit without the hydrogens - they do not have to appear in the final figure. Once loaded, the molecule is converted to SMILES format which RDKit appears to draw best - since we do not care about the actual coordinates of the original molecule, it is sufficient to have just 2D information. Some molecules can be problematic to import and steps such as stopping sanitize function can be taken. This is done automatically if problems are observed. However, better solutions can also be implemented and need more research. The molecule is then drawn from SMILES in 2D representation without hydrogens. The drawing is saved as an SVG file. """ mol_in_rdkit = self.topology_data.mol #need to reload without hydrogens try: mol_in_rdkit = Chem.RemoveHs(mol_in_rdkit) self.topology_data.smiles = Chem.MolFromSmiles(Chem.MolToSmiles(mol_in_rdkit)) except ValueError: mol_in_rdkit = Chem.RemoveHs(mol_in_rdkit, sanitize = False) self.topology_data.smiles = Chem.MolFromSmiles(Chem.MolToSmiles(mol_in_rdkit), sanitize=False) self.atom_identities = {} i=0 for atom in self.topology_data.smiles.GetAtoms(): self.atom_identities[mol_in_rdkit.GetProp('_smilesAtomOutputOrder')[1:].rsplit(",")[i]] = atom.GetIdx() i+=1 mc = Chem.Mol(self.topology_data.smiles.ToBinary()) if kekulize: try: Chem.Kekulize(mc) except: mc = Chem.Mol(self.topology_data.smiles.ToBinary()) if not mc.GetNumConformers(): rdDepictor.Compute2DCoords(mc) atoms=[] colors={} for i in range(mol_in_rdkit.GetNumAtoms()): atoms.append(i) if len(atom_color)==0: colors[i]=(1,1,1) else: colors = atom_color drawer = rdMolDraw2D.MolDraw2DSVG(int(molSize[0]),int(molSize[1])) drawer.DrawMolecule(mc,highlightAtoms=atoms,highlightBonds=bonds, highlightAtomColors=colors,highlightAtomRadii=size,highlightBondColors=bond_color) drawer.FinishDrawing() self.svg = drawer.GetDrawingText().replace('svg:','') filesvg = open("molecule.svg", "w+") filesvg.write(self.svg)
['def', 'load_molecule_in_rdkit_smiles', '(', 'self', ',', 'molSize', ',', 'kekulize', '=', 'True', ',', 'bonds', '=', '[', ']', ',', 'bond_color', '=', 'None', ',', 'atom_color', '=', '{', '}', ',', 'size', '=', '{', '}', ')', ':', 'mol_in_rdkit', '=', 'self', '.', 'topology_data', '.', 'mol', '#need to reload without hydrogens', 'try', ':', 'mol_in_rdkit', '=', 'Chem', '.', 'RemoveHs', '(', 'mol_in_rdkit', ')', 'self', '.', 'topology_data', '.', 'smiles', '=', 'Chem', '.', 'MolFromSmiles', '(', 'Chem', '.', 'MolToSmiles', '(', 'mol_in_rdkit', ')', ')', 'except', 'ValueError', ':', 'mol_in_rdkit', '=', 'Chem', '.', 'RemoveHs', '(', 'mol_in_rdkit', ',', 'sanitize', '=', 'False', ')', 'self', '.', 'topology_data', '.', 'smiles', '=', 'Chem', '.', 'MolFromSmiles', '(', 'Chem', '.', 'MolToSmiles', '(', 'mol_in_rdkit', ')', ',', 'sanitize', '=', 'False', ')', 'self', '.', 'atom_identities', '=', '{', '}', 'i', '=', '0', 'for', 'atom', 'in', 'self', '.', 'topology_data', '.', 'smiles', '.', 'GetAtoms', '(', ')', ':', 'self', '.', 'atom_identities', '[', 'mol_in_rdkit', '.', 'GetProp', '(', "'_smilesAtomOutputOrder'", ')', '[', '1', ':', ']', '.', 'rsplit', '(', '","', ')', '[', 'i', ']', ']', '=', 'atom', '.', 'GetIdx', '(', ')', 'i', '+=', '1', 'mc', '=', 'Chem', '.', 'Mol', '(', 'self', '.', 'topology_data', '.', 'smiles', '.', 'ToBinary', '(', ')', ')', 'if', 'kekulize', ':', 'try', ':', 'Chem', '.', 'Kekulize', '(', 'mc', ')', 'except', ':', 'mc', '=', 'Chem', '.', 'Mol', '(', 'self', '.', 'topology_data', '.', 'smiles', '.', 'ToBinary', '(', ')', ')', 'if', 'not', 'mc', '.', 'GetNumConformers', '(', ')', ':', 'rdDepictor', '.', 'Compute2DCoords', '(', 'mc', ')', 'atoms', '=', '[', ']', 'colors', '=', '{', '}', 'for', 'i', 'in', 'range', '(', 'mol_in_rdkit', '.', 'GetNumAtoms', '(', ')', ')', ':', 'atoms', '.', 'append', '(', 'i', ')', 'if', 'len', '(', 'atom_color', ')', '==', '0', ':', 'colors', '[', 'i', ']', '=', '(', '1', ',', '1', ',', '1', ')', 'else', ':', 'colors', '=', 'atom_color', 'drawer', '=', 'rdMolDraw2D', '.', 'MolDraw2DSVG', '(', 'int', '(', 'molSize', '[', '0', ']', ')', ',', 'int', '(', 'molSize', '[', '1', ']', ')', ')', 'drawer', '.', 'DrawMolecule', '(', 'mc', ',', 'highlightAtoms', '=', 'atoms', ',', 'highlightBonds', '=', 'bonds', ',', 'highlightAtomColors', '=', 'colors', ',', 'highlightAtomRadii', '=', 'size', ',', 'highlightBondColors', '=', 'bond_color', ')', 'drawer', '.', 'FinishDrawing', '(', ')', 'self', '.', 'svg', '=', 'drawer', '.', 'GetDrawingText', '(', ')', '.', 'replace', '(', "'svg:'", ',', "''", ')', 'filesvg', '=', 'open', '(', '"molecule.svg"', ',', '"w+"', ')', 'filesvg', '.', 'write', '(', 'self', '.', 'svg', ')']
Loads mol file in rdkit without the hydrogens - they do not have to appear in the final figure. Once loaded, the molecule is converted to SMILES format which RDKit appears to draw best - since we do not care about the actual coordinates of the original molecule, it is sufficient to have just 2D information. Some molecules can be problematic to import and steps such as stopping sanitize function can be taken. This is done automatically if problems are observed. However, better solutions can also be implemented and need more research. The molecule is then drawn from SMILES in 2D representation without hydrogens. The drawing is saved as an SVG file.
['Loads', 'mol', 'file', 'in', 'rdkit', 'without', 'the', 'hydrogens', '-', 'they', 'do', 'not', 'have', 'to', 'appear', 'in', 'the', 'final', 'figure', '.', 'Once', 'loaded', 'the', 'molecule', 'is', 'converted', 'to', 'SMILES', 'format', 'which', 'RDKit', 'appears', 'to', 'draw', 'best', '-', 'since', 'we', 'do', 'not', 'care', 'about', 'the', 'actual', 'coordinates', 'of', 'the', 'original', 'molecule', 'it', 'is', 'sufficient', 'to', 'have', 'just', '2D', 'information', '.', 'Some', 'molecules', 'can', 'be', 'problematic', 'to', 'import', 'and', 'steps', 'such', 'as', 'stopping', 'sanitize', 'function', 'can', 'be', 'taken', '.', 'This', 'is', 'done', 'automatically', 'if', 'problems', 'are', 'observed', '.', 'However', 'better', 'solutions', 'can', 'also', 'be', 'implemented', 'and', 'need', 'more', 'research', '.', 'The', 'molecule', 'is', 'then', 'drawn', 'from', 'SMILES', 'in', '2D', 'representation', 'without', 'hydrogens', '.', 'The', 'drawing', 'is', 'saved', 'as', 'an', 'SVG', 'file', '.']
train
https://github.com/ldomic/lintools/blob/d825a4a7b35f3f857d3b81b46c9aee72b0ec697a/lintools/molecule.py#L48-L94
3,311
suurjaak/InputScope
inputscope/webui.py
init
def init(): """Initialize configuration and web application.""" global app if app: return app conf.init(), db.init(conf.DbPath, conf.DbStatements) bottle.TEMPLATE_PATH.insert(0, conf.TemplatePath) app = bottle.default_app() bottle.BaseTemplate.defaults.update(get_url=app.get_url) return app
python
def init(): """Initialize configuration and web application.""" global app if app: return app conf.init(), db.init(conf.DbPath, conf.DbStatements) bottle.TEMPLATE_PATH.insert(0, conf.TemplatePath) app = bottle.default_app() bottle.BaseTemplate.defaults.update(get_url=app.get_url) return app
['def', 'init', '(', ')', ':', 'global', 'app', 'if', 'app', ':', 'return', 'app', 'conf', '.', 'init', '(', ')', ',', 'db', '.', 'init', '(', 'conf', '.', 'DbPath', ',', 'conf', '.', 'DbStatements', ')', 'bottle', '.', 'TEMPLATE_PATH', '.', 'insert', '(', '0', ',', 'conf', '.', 'TemplatePath', ')', 'app', '=', 'bottle', '.', 'default_app', '(', ')', 'bottle', '.', 'BaseTemplate', '.', 'defaults', '.', 'update', '(', 'get_url', '=', 'app', '.', 'get_url', ')', 'return', 'app']
Initialize configuration and web application.
['Initialize', 'configuration', 'and', 'web', 'application', '.']
train
https://github.com/suurjaak/InputScope/blob/245ff045163a1995e8cd5ac558d0a93024eb86eb/inputscope/webui.py#L210-L219
3,312
wonambi-python/wonambi
wonambi/widgets/channels.py
ChannelsGroup.get_info
def get_info(self): """Get the information about the channel groups. Returns ------- dict information about this channel group Notes ----- The items in selectedItems() are ordered based on the user's selection (which appears pretty random). It's more consistent to use the same order of the main channel list. That's why the additional for-loop is necessary. We don't care about the order of the reference channels. """ selectedItems = self.idx_l0.selectedItems() selected_chan = [x.text() for x in selectedItems] chan_to_plot = [] for chan in self.chan_name + ['_REF']: if chan in selected_chan: chan_to_plot.append(chan) selectedItems = self.idx_l1.selectedItems() ref_chan = [] for selected in selectedItems: ref_chan.append(selected.text()) hp = self.idx_hp.value() if hp == 0: low_cut = None else: low_cut = hp lp = self.idx_lp.value() if lp == 0: high_cut = None else: high_cut = lp scale = self.idx_scale.value() group_info = {'name': self.group_name, 'chan_to_plot': chan_to_plot, 'ref_chan': ref_chan, 'hp': low_cut, 'lp': high_cut, 'scale': float(scale), 'color': self.idx_color } return group_info
python
def get_info(self): """Get the information about the channel groups. Returns ------- dict information about this channel group Notes ----- The items in selectedItems() are ordered based on the user's selection (which appears pretty random). It's more consistent to use the same order of the main channel list. That's why the additional for-loop is necessary. We don't care about the order of the reference channels. """ selectedItems = self.idx_l0.selectedItems() selected_chan = [x.text() for x in selectedItems] chan_to_plot = [] for chan in self.chan_name + ['_REF']: if chan in selected_chan: chan_to_plot.append(chan) selectedItems = self.idx_l1.selectedItems() ref_chan = [] for selected in selectedItems: ref_chan.append(selected.text()) hp = self.idx_hp.value() if hp == 0: low_cut = None else: low_cut = hp lp = self.idx_lp.value() if lp == 0: high_cut = None else: high_cut = lp scale = self.idx_scale.value() group_info = {'name': self.group_name, 'chan_to_plot': chan_to_plot, 'ref_chan': ref_chan, 'hp': low_cut, 'lp': high_cut, 'scale': float(scale), 'color': self.idx_color } return group_info
['def', 'get_info', '(', 'self', ')', ':', 'selectedItems', '=', 'self', '.', 'idx_l0', '.', 'selectedItems', '(', ')', 'selected_chan', '=', '[', 'x', '.', 'text', '(', ')', 'for', 'x', 'in', 'selectedItems', ']', 'chan_to_plot', '=', '[', ']', 'for', 'chan', 'in', 'self', '.', 'chan_name', '+', '[', "'_REF'", ']', ':', 'if', 'chan', 'in', 'selected_chan', ':', 'chan_to_plot', '.', 'append', '(', 'chan', ')', 'selectedItems', '=', 'self', '.', 'idx_l1', '.', 'selectedItems', '(', ')', 'ref_chan', '=', '[', ']', 'for', 'selected', 'in', 'selectedItems', ':', 'ref_chan', '.', 'append', '(', 'selected', '.', 'text', '(', ')', ')', 'hp', '=', 'self', '.', 'idx_hp', '.', 'value', '(', ')', 'if', 'hp', '==', '0', ':', 'low_cut', '=', 'None', 'else', ':', 'low_cut', '=', 'hp', 'lp', '=', 'self', '.', 'idx_lp', '.', 'value', '(', ')', 'if', 'lp', '==', '0', ':', 'high_cut', '=', 'None', 'else', ':', 'high_cut', '=', 'lp', 'scale', '=', 'self', '.', 'idx_scale', '.', 'value', '(', ')', 'group_info', '=', '{', "'name'", ':', 'self', '.', 'group_name', ',', "'chan_to_plot'", ':', 'chan_to_plot', ',', "'ref_chan'", ':', 'ref_chan', ',', "'hp'", ':', 'low_cut', ',', "'lp'", ':', 'high_cut', ',', "'scale'", ':', 'float', '(', 'scale', ')', ',', "'color'", ':', 'self', '.', 'idx_color', '}', 'return', 'group_info']
Get the information about the channel groups. Returns ------- dict information about this channel group Notes ----- The items in selectedItems() are ordered based on the user's selection (which appears pretty random). It's more consistent to use the same order of the main channel list. That's why the additional for-loop is necessary. We don't care about the order of the reference channels.
['Get', 'the', 'information', 'about', 'the', 'channel', 'groups', '.']
train
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/channels.py#L221-L271
3,313
scott-maddox/openbandparams
src/openbandparams/iii_v_zinc_blende_strained.py
IIIVZincBlendeStrained001.strain_in_plane
def strain_in_plane(self, **kwargs): ''' Returns the in-plane strain assuming no lattice relaxation, which is positive for tensile strain and negative for compressive strain. ''' if self._strain_out_of_plane is not None: return ((self._strain_out_of_plane / -2.) * (self.unstrained.c11(**kwargs) / self.unstrained.c12(**kwargs) ) ) else: return 1 - self.unstrained.a(**kwargs) / self.substrate.a(**kwargs)
python
def strain_in_plane(self, **kwargs): ''' Returns the in-plane strain assuming no lattice relaxation, which is positive for tensile strain and negative for compressive strain. ''' if self._strain_out_of_plane is not None: return ((self._strain_out_of_plane / -2.) * (self.unstrained.c11(**kwargs) / self.unstrained.c12(**kwargs) ) ) else: return 1 - self.unstrained.a(**kwargs) / self.substrate.a(**kwargs)
['def', 'strain_in_plane', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'if', 'self', '.', '_strain_out_of_plane', 'is', 'not', 'None', ':', 'return', '(', '(', 'self', '.', '_strain_out_of_plane', '/', '-', '2.', ')', '*', '(', 'self', '.', 'unstrained', '.', 'c11', '(', '*', '*', 'kwargs', ')', '/', 'self', '.', 'unstrained', '.', 'c12', '(', '*', '*', 'kwargs', ')', ')', ')', 'else', ':', 'return', '1', '-', 'self', '.', 'unstrained', '.', 'a', '(', '*', '*', 'kwargs', ')', '/', 'self', '.', 'substrate', '.', 'a', '(', '*', '*', 'kwargs', ')']
Returns the in-plane strain assuming no lattice relaxation, which is positive for tensile strain and negative for compressive strain.
['Returns', 'the', 'in', '-', 'plane', 'strain', 'assuming', 'no', 'lattice', 'relaxation', 'which', 'is', 'positive', 'for', 'tensile', 'strain', 'and', 'negative', 'for', 'compressive', 'strain', '.']
train
https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/iii_v_zinc_blende_strained.py#L86-L96
3,314
HIPS/autograd
autograd/differential_operators.py
value_and_grad
def value_and_grad(fun, x): """Returns a function that returns both value and gradient. Suitable for use in scipy.optimize""" vjp, ans = _make_vjp(fun, x) if not vspace(ans).size == 1: raise TypeError("value_and_grad only applies to real scalar-output " "functions. Try jacobian, elementwise_grad or " "holomorphic_grad.") return ans, vjp(vspace(ans).ones())
python
def value_and_grad(fun, x): """Returns a function that returns both value and gradient. Suitable for use in scipy.optimize""" vjp, ans = _make_vjp(fun, x) if not vspace(ans).size == 1: raise TypeError("value_and_grad only applies to real scalar-output " "functions. Try jacobian, elementwise_grad or " "holomorphic_grad.") return ans, vjp(vspace(ans).ones())
['def', 'value_and_grad', '(', 'fun', ',', 'x', ')', ':', 'vjp', ',', 'ans', '=', '_make_vjp', '(', 'fun', ',', 'x', ')', 'if', 'not', 'vspace', '(', 'ans', ')', '.', 'size', '==', '1', ':', 'raise', 'TypeError', '(', '"value_and_grad only applies to real scalar-output "', '"functions. Try jacobian, elementwise_grad or "', '"holomorphic_grad."', ')', 'return', 'ans', ',', 'vjp', '(', 'vspace', '(', 'ans', ')', '.', 'ones', '(', ')', ')']
Returns a function that returns both value and gradient. Suitable for use in scipy.optimize
['Returns', 'a', 'function', 'that', 'returns', 'both', 'value', 'and', 'gradient', '.', 'Suitable', 'for', 'use', 'in', 'scipy', '.', 'optimize']
train
https://github.com/HIPS/autograd/blob/e3b525302529d7490769d5c0bcfc7457e24e3b3e/autograd/differential_operators.py#L132-L140
3,315
opereto/pyopereto
pyopereto/client.py
OperetoClient.create_product
def create_product(self, product, version, build, name=None, description=None, attributes={}): ''' create_product(self, product, version, build, name=None, description=None, attributes={}) Create product :Parameters: * *product* (`string`) -- product * *version* (`string`) -- version * *build* (`string`) -- build * *name* (`string`) -- name * *description* (`string`) -- description * *attributes* (`object`) -- product attributes ''' request_data = {'product': product, 'version': version, 'build': build} if name: request_data['name']=name if description: request_data['description']=description if attributes: request_data['attributes']=attributes ret_data= self._call_rest_api('post', '/products', data=request_data, error='Failed to create a new product') pid = ret_data message = 'New product created [pid = %s] '%pid self.logger.info(message) return str(pid)
python
def create_product(self, product, version, build, name=None, description=None, attributes={}): ''' create_product(self, product, version, build, name=None, description=None, attributes={}) Create product :Parameters: * *product* (`string`) -- product * *version* (`string`) -- version * *build* (`string`) -- build * *name* (`string`) -- name * *description* (`string`) -- description * *attributes* (`object`) -- product attributes ''' request_data = {'product': product, 'version': version, 'build': build} if name: request_data['name']=name if description: request_data['description']=description if attributes: request_data['attributes']=attributes ret_data= self._call_rest_api('post', '/products', data=request_data, error='Failed to create a new product') pid = ret_data message = 'New product created [pid = %s] '%pid self.logger.info(message) return str(pid)
['def', 'create_product', '(', 'self', ',', 'product', ',', 'version', ',', 'build', ',', 'name', '=', 'None', ',', 'description', '=', 'None', ',', 'attributes', '=', '{', '}', ')', ':', 'request_data', '=', '{', "'product'", ':', 'product', ',', "'version'", ':', 'version', ',', "'build'", ':', 'build', '}', 'if', 'name', ':', 'request_data', '[', "'name'", ']', '=', 'name', 'if', 'description', ':', 'request_data', '[', "'description'", ']', '=', 'description', 'if', 'attributes', ':', 'request_data', '[', "'attributes'", ']', '=', 'attributes', 'ret_data', '=', 'self', '.', '_call_rest_api', '(', "'post'", ',', "'/products'", ',', 'data', '=', 'request_data', ',', 'error', '=', "'Failed to create a new product'", ')', 'pid', '=', 'ret_data', 'message', '=', "'New product created [pid = %s] '", '%', 'pid', 'self', '.', 'logger', '.', 'info', '(', 'message', ')', 'return', 'str', '(', 'pid', ')']
create_product(self, product, version, build, name=None, description=None, attributes={}) Create product :Parameters: * *product* (`string`) -- product * *version* (`string`) -- version * *build* (`string`) -- build * *name* (`string`) -- name * *description* (`string`) -- description * *attributes* (`object`) -- product attributes
['create_product', '(', 'self', 'product', 'version', 'build', 'name', '=', 'None', 'description', '=', 'None', 'attributes', '=', '{}', ')']
train
https://github.com/opereto/pyopereto/blob/16ca987738a7e1b82b52b0b099794a74ed557223/pyopereto/client.py#L1492-L1515
3,316
zetaops/zengine
zengine/tornado_server/server.py
HttpHandler.post
def post(self, view_name): """ login handler """ sess_id = None input_data = {} # try: self._handle_headers() # handle input input_data = json_decode(self.request.body) if self.request.body else {} input_data['path'] = view_name # set or get session cookie if not self.get_cookie(COOKIE_NAME) or 'username' in input_data: sess_id = uuid4().hex self.set_cookie(COOKIE_NAME, sess_id) # , domain='127.0.0.1' else: sess_id = self.get_cookie(COOKIE_NAME) # h_sess_id = "HTTP_%s" % sess_id input_data = {'data': input_data, '_zops_remote_ip': self.request.remote_ip} log.info("New Request for %s: %s" % (sess_id, input_data)) self.application.pc.register_websocket(sess_id, self) self.application.pc.redirect_incoming_message(sess_id, json_encode(input_data), self.request)
python
def post(self, view_name): """ login handler """ sess_id = None input_data = {} # try: self._handle_headers() # handle input input_data = json_decode(self.request.body) if self.request.body else {} input_data['path'] = view_name # set or get session cookie if not self.get_cookie(COOKIE_NAME) or 'username' in input_data: sess_id = uuid4().hex self.set_cookie(COOKIE_NAME, sess_id) # , domain='127.0.0.1' else: sess_id = self.get_cookie(COOKIE_NAME) # h_sess_id = "HTTP_%s" % sess_id input_data = {'data': input_data, '_zops_remote_ip': self.request.remote_ip} log.info("New Request for %s: %s" % (sess_id, input_data)) self.application.pc.register_websocket(sess_id, self) self.application.pc.redirect_incoming_message(sess_id, json_encode(input_data), self.request)
['def', 'post', '(', 'self', ',', 'view_name', ')', ':', 'sess_id', '=', 'None', 'input_data', '=', '{', '}', '# try:', 'self', '.', '_handle_headers', '(', ')', '# handle input', 'input_data', '=', 'json_decode', '(', 'self', '.', 'request', '.', 'body', ')', 'if', 'self', '.', 'request', '.', 'body', 'else', '{', '}', 'input_data', '[', "'path'", ']', '=', 'view_name', '# set or get session cookie', 'if', 'not', 'self', '.', 'get_cookie', '(', 'COOKIE_NAME', ')', 'or', "'username'", 'in', 'input_data', ':', 'sess_id', '=', 'uuid4', '(', ')', '.', 'hex', 'self', '.', 'set_cookie', '(', 'COOKIE_NAME', ',', 'sess_id', ')', "# , domain='127.0.0.1'", 'else', ':', 'sess_id', '=', 'self', '.', 'get_cookie', '(', 'COOKIE_NAME', ')', '# h_sess_id = "HTTP_%s" % sess_id', 'input_data', '=', '{', "'data'", ':', 'input_data', ',', "'_zops_remote_ip'", ':', 'self', '.', 'request', '.', 'remote_ip', '}', 'log', '.', 'info', '(', '"New Request for %s: %s"', '%', '(', 'sess_id', ',', 'input_data', ')', ')', 'self', '.', 'application', '.', 'pc', '.', 'register_websocket', '(', 'sess_id', ',', 'self', ')', 'self', '.', 'application', '.', 'pc', '.', 'redirect_incoming_message', '(', 'sess_id', ',', 'json_encode', '(', 'input_data', ')', ',', 'self', '.', 'request', ')']
login handler
['login', 'handler']
train
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/tornado_server/server.py#L110-L137
3,317
enkore/i3pystatus
i3pystatus/network.py
Network.cycle_interface
def cycle_interface(self, increment=1): """Cycle through available interfaces in `increment` steps. Sign indicates direction.""" interfaces = [i for i in netifaces.interfaces() if i not in self.ignore_interfaces] if self.interface in interfaces: next_index = (interfaces.index(self.interface) + increment) % len(interfaces) self.interface = interfaces[next_index] elif len(interfaces) > 0: self.interface = interfaces[0] if self.network_traffic: self.network_traffic.clear_counters() self.kbs_arr = [0.0] * self.graph_width
python
def cycle_interface(self, increment=1): """Cycle through available interfaces in `increment` steps. Sign indicates direction.""" interfaces = [i for i in netifaces.interfaces() if i not in self.ignore_interfaces] if self.interface in interfaces: next_index = (interfaces.index(self.interface) + increment) % len(interfaces) self.interface = interfaces[next_index] elif len(interfaces) > 0: self.interface = interfaces[0] if self.network_traffic: self.network_traffic.clear_counters() self.kbs_arr = [0.0] * self.graph_width
['def', 'cycle_interface', '(', 'self', ',', 'increment', '=', '1', ')', ':', 'interfaces', '=', '[', 'i', 'for', 'i', 'in', 'netifaces', '.', 'interfaces', '(', ')', 'if', 'i', 'not', 'in', 'self', '.', 'ignore_interfaces', ']', 'if', 'self', '.', 'interface', 'in', 'interfaces', ':', 'next_index', '=', '(', 'interfaces', '.', 'index', '(', 'self', '.', 'interface', ')', '+', 'increment', ')', '%', 'len', '(', 'interfaces', ')', 'self', '.', 'interface', '=', 'interfaces', '[', 'next_index', ']', 'elif', 'len', '(', 'interfaces', ')', '>', '0', ':', 'self', '.', 'interface', '=', 'interfaces', '[', '0', ']', 'if', 'self', '.', 'network_traffic', ':', 'self', '.', 'network_traffic', '.', 'clear_counters', '(', ')', 'self', '.', 'kbs_arr', '=', '[', '0.0', ']', '*', 'self', '.', 'graph_width']
Cycle through available interfaces in `increment` steps. Sign indicates direction.
['Cycle', 'through', 'available', 'interfaces', 'in', 'increment', 'steps', '.', 'Sign', 'indicates', 'direction', '.']
train
https://github.com/enkore/i3pystatus/blob/14cfde967cecf79b40e223e35a04600f4c875af7/i3pystatus/network.py#L394-L405
3,318
minio/minio-py
minio/compat.py
urlencode
def urlencode(resource): """ This implementation of urlencode supports all unicode characters :param: resource: Resource value to be url encoded. """ if isinstance(resource, str): return _urlencode(resource.encode('utf-8')) return _urlencode(resource)
python
def urlencode(resource): """ This implementation of urlencode supports all unicode characters :param: resource: Resource value to be url encoded. """ if isinstance(resource, str): return _urlencode(resource.encode('utf-8')) return _urlencode(resource)
['def', 'urlencode', '(', 'resource', ')', ':', 'if', 'isinstance', '(', 'resource', ',', 'str', ')', ':', 'return', '_urlencode', '(', 'resource', '.', 'encode', '(', "'utf-8'", ')', ')', 'return', '_urlencode', '(', 'resource', ')']
This implementation of urlencode supports all unicode characters :param: resource: Resource value to be url encoded.
['This', 'implementation', 'of', 'urlencode', 'supports', 'all', 'unicode', 'characters']
train
https://github.com/minio/minio-py/blob/7107c84183cf5fb4deff68c0a16ab9f1c0b4c37e/minio/compat.py#L96-L105
3,319
usc-isi-i2/etk
etk/knowledge_graph_schema.py
KGSchema.is_location
def is_location(v) -> (bool, str): """ Boolean function for checking if v is a location format Args: v: Returns: bool """ def convert2float(value): try: float_num = float(value) return float_num except ValueError: return False if not isinstance(v, str): return False, v split_lst = v.split(":") if len(split_lst) != 5: return False, v if convert2float(split_lst[3]): longitude = abs(convert2float(split_lst[3])) if longitude > 90: return False, v if convert2float(split_lst[4]): latitude = abs(convert2float(split_lst[3])) if latitude > 180: return False, v return True, v
python
def is_location(v) -> (bool, str): """ Boolean function for checking if v is a location format Args: v: Returns: bool """ def convert2float(value): try: float_num = float(value) return float_num except ValueError: return False if not isinstance(v, str): return False, v split_lst = v.split(":") if len(split_lst) != 5: return False, v if convert2float(split_lst[3]): longitude = abs(convert2float(split_lst[3])) if longitude > 90: return False, v if convert2float(split_lst[4]): latitude = abs(convert2float(split_lst[3])) if latitude > 180: return False, v return True, v
['def', 'is_location', '(', 'v', ')', '->', '(', 'bool', ',', 'str', ')', ':', 'def', 'convert2float', '(', 'value', ')', ':', 'try', ':', 'float_num', '=', 'float', '(', 'value', ')', 'return', 'float_num', 'except', 'ValueError', ':', 'return', 'False', 'if', 'not', 'isinstance', '(', 'v', ',', 'str', ')', ':', 'return', 'False', ',', 'v', 'split_lst', '=', 'v', '.', 'split', '(', '":"', ')', 'if', 'len', '(', 'split_lst', ')', '!=', '5', ':', 'return', 'False', ',', 'v', 'if', 'convert2float', '(', 'split_lst', '[', '3', ']', ')', ':', 'longitude', '=', 'abs', '(', 'convert2float', '(', 'split_lst', '[', '3', ']', ')', ')', 'if', 'longitude', '>', '90', ':', 'return', 'False', ',', 'v', 'if', 'convert2float', '(', 'split_lst', '[', '4', ']', ')', ':', 'latitude', '=', 'abs', '(', 'convert2float', '(', 'split_lst', '[', '3', ']', ')', ')', 'if', 'latitude', '>', '180', ':', 'return', 'False', ',', 'v', 'return', 'True', ',', 'v']
Boolean function for checking if v is a location format Args: v: Returns: bool
['Boolean', 'function', 'for', 'checking', 'if', 'v', 'is', 'a', 'location', 'format']
train
https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/knowledge_graph_schema.py#L197-L227
3,320
ska-sa/hypercube
hypercube/base_cube.py
HyperCube.bytes_required
def bytes_required(self): """ Returns ------- int Estimated number of bytes required by arrays registered on the cube, taking their extents into account. """ return np.sum([hcu.array_bytes(a) for a in self.arrays(reify=True).itervalues()])
python
def bytes_required(self): """ Returns ------- int Estimated number of bytes required by arrays registered on the cube, taking their extents into account. """ return np.sum([hcu.array_bytes(a) for a in self.arrays(reify=True).itervalues()])
['def', 'bytes_required', '(', 'self', ')', ':', 'return', 'np', '.', 'sum', '(', '[', 'hcu', '.', 'array_bytes', '(', 'a', ')', 'for', 'a', 'in', 'self', '.', 'arrays', '(', 'reify', '=', 'True', ')', '.', 'itervalues', '(', ')', ']', ')']
Returns ------- int Estimated number of bytes required by arrays registered on the cube, taking their extents into account.
['Returns', '-------', 'int', 'Estimated', 'number', 'of', 'bytes', 'required', 'by', 'arrays', 'registered', 'on', 'the', 'cube', 'taking', 'their', 'extents', 'into', 'account', '.']
train
https://github.com/ska-sa/hypercube/blob/6564a9e65ccd9ed7e7a71bd643f183e1ec645b29/hypercube/base_cube.py#L81-L90
3,321
hannorein/rebound
rebound/simulation.py
Simulation.add_particles_ascii
def add_particles_ascii(self, s): """ Adds particles from an ASCII string. Parameters ---------- s : string One particle per line. Each line should include particle's mass, radius, position and velocity. """ for l in s.split("\n"): r = l.split() if len(r): try: r = [float(x) for x in r] p = Particle(simulation=self, m=r[0], r=r[1], x=r[2], y=r[3], z=r[4], vx=r[5], vy=r[6], vz=r[7]) self.add(p) except: raise AttributeError("Each line requires 8 floats corresponding to mass, radius, position (x,y,z) and velocity (x,y,z).")
python
def add_particles_ascii(self, s): """ Adds particles from an ASCII string. Parameters ---------- s : string One particle per line. Each line should include particle's mass, radius, position and velocity. """ for l in s.split("\n"): r = l.split() if len(r): try: r = [float(x) for x in r] p = Particle(simulation=self, m=r[0], r=r[1], x=r[2], y=r[3], z=r[4], vx=r[5], vy=r[6], vz=r[7]) self.add(p) except: raise AttributeError("Each line requires 8 floats corresponding to mass, radius, position (x,y,z) and velocity (x,y,z).")
['def', 'add_particles_ascii', '(', 'self', ',', 's', ')', ':', 'for', 'l', 'in', 's', '.', 'split', '(', '"\\n"', ')', ':', 'r', '=', 'l', '.', 'split', '(', ')', 'if', 'len', '(', 'r', ')', ':', 'try', ':', 'r', '=', '[', 'float', '(', 'x', ')', 'for', 'x', 'in', 'r', ']', 'p', '=', 'Particle', '(', 'simulation', '=', 'self', ',', 'm', '=', 'r', '[', '0', ']', ',', 'r', '=', 'r', '[', '1', ']', ',', 'x', '=', 'r', '[', '2', ']', ',', 'y', '=', 'r', '[', '3', ']', ',', 'z', '=', 'r', '[', '4', ']', ',', 'vx', '=', 'r', '[', '5', ']', ',', 'vy', '=', 'r', '[', '6', ']', ',', 'vz', '=', 'r', '[', '7', ']', ')', 'self', '.', 'add', '(', 'p', ')', 'except', ':', 'raise', 'AttributeError', '(', '"Each line requires 8 floats corresponding to mass, radius, position (x,y,z) and velocity (x,y,z)."', ')']
Adds particles from an ASCII string. Parameters ---------- s : string One particle per line. Each line should include particle's mass, radius, position and velocity.
['Adds', 'particles', 'from', 'an', 'ASCII', 'string', '.']
train
https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulation.py#L1206-L1223
3,322
rndusr/torf
torf/_torrent.py
Torrent.pieces
def pieces(self): """ Number of pieces the content is split into or ``None`` if :attr:`piece_size` returns ``None`` """ if self.piece_size is None: return None else: return math.ceil(self.size / self.piece_size)
python
def pieces(self): """ Number of pieces the content is split into or ``None`` if :attr:`piece_size` returns ``None`` """ if self.piece_size is None: return None else: return math.ceil(self.size / self.piece_size)
['def', 'pieces', '(', 'self', ')', ':', 'if', 'self', '.', 'piece_size', 'is', 'None', ':', 'return', 'None', 'else', ':', 'return', 'math', '.', 'ceil', '(', 'self', '.', 'size', '/', 'self', '.', 'piece_size', ')']
Number of pieces the content is split into or ``None`` if :attr:`piece_size` returns ``None``
['Number', 'of', 'pieces', 'the', 'content', 'is', 'split', 'into', 'or', 'None', 'if', ':', 'attr', ':', 'piece_size', 'returns', 'None']
train
https://github.com/rndusr/torf/blob/df0363232daacd3f8c91aafddaa0623b8c28cbd2/torf/_torrent.py#L312-L320
3,323
fermiPy/fermipy
fermipy/diffuse/diffuse_src_manager.py
DiffuseModelManager.make_diffuse_comp_info
def make_diffuse_comp_info(self, source_name, source_ver, diffuse_dict, components=None, comp_key=None): """ Make a dictionary mapping the merged component names to list of template files Parameters ---------- source_name : str Name of the source source_ver : str Key identifying the version of the source diffuse_dict : dict Information about this component comp_key : str Used when we need to keep track of sub-components, i.e., for moving and selection dependent sources. Returns `model_component.ModelComponentInfo` or `model_component.IsoComponentInfo` """ model_type = diffuse_dict['model_type'] sourcekey = '%s_%s' % (source_name, source_ver) if comp_key is None: template_name = self.make_template_name(model_type, sourcekey) srcmdl_name = self.make_xml_name(sourcekey) else: template_name = self.make_template_name( model_type, "%s_%s" % (sourcekey, comp_key)) srcmdl_name = self.make_xml_name("%s_%s" % (sourcekey, comp_key)) template_name = self._name_factory.fullpath(localpath=template_name) srcmdl_name = self._name_factory.fullpath(localpath=srcmdl_name) kwargs = dict(source_name=source_name, source_ver=source_ver, model_type=model_type, srcmdl_name=srcmdl_name, components=components, comp_key=comp_key) kwargs.update(diffuse_dict) if model_type == 'IsoSource': kwargs['Spectral_Filename'] = template_name return IsoComponentInfo(**kwargs) elif model_type == 'MapCubeSource': kwargs['Spatial_Filename'] = template_name return MapCubeComponentInfo(**kwargs) elif model_type == 'SpatialMap': kwargs['Spatial_Filename'] = template_name return SpatialMapComponentInfo(**kwargs) else: raise ValueError("Unexpected model type %s" % model_type)
python
def make_diffuse_comp_info(self, source_name, source_ver, diffuse_dict, components=None, comp_key=None): """ Make a dictionary mapping the merged component names to list of template files Parameters ---------- source_name : str Name of the source source_ver : str Key identifying the version of the source diffuse_dict : dict Information about this component comp_key : str Used when we need to keep track of sub-components, i.e., for moving and selection dependent sources. Returns `model_component.ModelComponentInfo` or `model_component.IsoComponentInfo` """ model_type = diffuse_dict['model_type'] sourcekey = '%s_%s' % (source_name, source_ver) if comp_key is None: template_name = self.make_template_name(model_type, sourcekey) srcmdl_name = self.make_xml_name(sourcekey) else: template_name = self.make_template_name( model_type, "%s_%s" % (sourcekey, comp_key)) srcmdl_name = self.make_xml_name("%s_%s" % (sourcekey, comp_key)) template_name = self._name_factory.fullpath(localpath=template_name) srcmdl_name = self._name_factory.fullpath(localpath=srcmdl_name) kwargs = dict(source_name=source_name, source_ver=source_ver, model_type=model_type, srcmdl_name=srcmdl_name, components=components, comp_key=comp_key) kwargs.update(diffuse_dict) if model_type == 'IsoSource': kwargs['Spectral_Filename'] = template_name return IsoComponentInfo(**kwargs) elif model_type == 'MapCubeSource': kwargs['Spatial_Filename'] = template_name return MapCubeComponentInfo(**kwargs) elif model_type == 'SpatialMap': kwargs['Spatial_Filename'] = template_name return SpatialMapComponentInfo(**kwargs) else: raise ValueError("Unexpected model type %s" % model_type)
['def', 'make_diffuse_comp_info', '(', 'self', ',', 'source_name', ',', 'source_ver', ',', 'diffuse_dict', ',', 'components', '=', 'None', ',', 'comp_key', '=', 'None', ')', ':', 'model_type', '=', 'diffuse_dict', '[', "'model_type'", ']', 'sourcekey', '=', "'%s_%s'", '%', '(', 'source_name', ',', 'source_ver', ')', 'if', 'comp_key', 'is', 'None', ':', 'template_name', '=', 'self', '.', 'make_template_name', '(', 'model_type', ',', 'sourcekey', ')', 'srcmdl_name', '=', 'self', '.', 'make_xml_name', '(', 'sourcekey', ')', 'else', ':', 'template_name', '=', 'self', '.', 'make_template_name', '(', 'model_type', ',', '"%s_%s"', '%', '(', 'sourcekey', ',', 'comp_key', ')', ')', 'srcmdl_name', '=', 'self', '.', 'make_xml_name', '(', '"%s_%s"', '%', '(', 'sourcekey', ',', 'comp_key', ')', ')', 'template_name', '=', 'self', '.', '_name_factory', '.', 'fullpath', '(', 'localpath', '=', 'template_name', ')', 'srcmdl_name', '=', 'self', '.', '_name_factory', '.', 'fullpath', '(', 'localpath', '=', 'srcmdl_name', ')', 'kwargs', '=', 'dict', '(', 'source_name', '=', 'source_name', ',', 'source_ver', '=', 'source_ver', ',', 'model_type', '=', 'model_type', ',', 'srcmdl_name', '=', 'srcmdl_name', ',', 'components', '=', 'components', ',', 'comp_key', '=', 'comp_key', ')', 'kwargs', '.', 'update', '(', 'diffuse_dict', ')', 'if', 'model_type', '==', "'IsoSource'", ':', 'kwargs', '[', "'Spectral_Filename'", ']', '=', 'template_name', 'return', 'IsoComponentInfo', '(', '*', '*', 'kwargs', ')', 'elif', 'model_type', '==', "'MapCubeSource'", ':', 'kwargs', '[', "'Spatial_Filename'", ']', '=', 'template_name', 'return', 'MapCubeComponentInfo', '(', '*', '*', 'kwargs', ')', 'elif', 'model_type', '==', "'SpatialMap'", ':', 'kwargs', '[', "'Spatial_Filename'", ']', '=', 'template_name', 'return', 'SpatialMapComponentInfo', '(', '*', '*', 'kwargs', ')', 'else', ':', 'raise', 'ValueError', '(', '"Unexpected model type %s"', '%', 'model_type', ')']
Make a dictionary mapping the merged component names to list of template files Parameters ---------- source_name : str Name of the source source_ver : str Key identifying the version of the source diffuse_dict : dict Information about this component comp_key : str Used when we need to keep track of sub-components, i.e., for moving and selection dependent sources. Returns `model_component.ModelComponentInfo` or `model_component.IsoComponentInfo`
['Make', 'a', 'dictionary', 'mapping', 'the', 'merged', 'component', 'names', 'to', 'list', 'of', 'template', 'files']
train
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/diffuse_src_manager.py#L325-L375
3,324
pennlabs/penn-sdk-python
penn/dining.py
Dining.venues
def venues(self): """Get a list of all venue objects. >>> venues = din.venues() """ response = self._request(V2_ENDPOINTS['VENUES']) # Normalize `dateHours` to array for venue in response["result_data"]["document"]["venue"]: if venue.get("id") in VENUE_NAMES: venue["name"] = VENUE_NAMES[venue.get("id")] if isinstance(venue.get("dateHours"), dict): venue["dateHours"] = [venue["dateHours"]] if "dateHours" in venue: for dh in venue["dateHours"]: if isinstance(dh.get("meal"), dict): dh["meal"] = [dh["meal"]] return response
python
def venues(self): """Get a list of all venue objects. >>> venues = din.venues() """ response = self._request(V2_ENDPOINTS['VENUES']) # Normalize `dateHours` to array for venue in response["result_data"]["document"]["venue"]: if venue.get("id") in VENUE_NAMES: venue["name"] = VENUE_NAMES[venue.get("id")] if isinstance(venue.get("dateHours"), dict): venue["dateHours"] = [venue["dateHours"]] if "dateHours" in venue: for dh in venue["dateHours"]: if isinstance(dh.get("meal"), dict): dh["meal"] = [dh["meal"]] return response
['def', 'venues', '(', 'self', ')', ':', 'response', '=', 'self', '.', '_request', '(', 'V2_ENDPOINTS', '[', "'VENUES'", ']', ')', '# Normalize `dateHours` to array', 'for', 'venue', 'in', 'response', '[', '"result_data"', ']', '[', '"document"', ']', '[', '"venue"', ']', ':', 'if', 'venue', '.', 'get', '(', '"id"', ')', 'in', 'VENUE_NAMES', ':', 'venue', '[', '"name"', ']', '=', 'VENUE_NAMES', '[', 'venue', '.', 'get', '(', '"id"', ')', ']', 'if', 'isinstance', '(', 'venue', '.', 'get', '(', '"dateHours"', ')', ',', 'dict', ')', ':', 'venue', '[', '"dateHours"', ']', '=', '[', 'venue', '[', '"dateHours"', ']', ']', 'if', '"dateHours"', 'in', 'venue', ':', 'for', 'dh', 'in', 'venue', '[', '"dateHours"', ']', ':', 'if', 'isinstance', '(', 'dh', '.', 'get', '(', '"meal"', ')', ',', 'dict', ')', ':', 'dh', '[', '"meal"', ']', '=', '[', 'dh', '[', '"meal"', ']', ']', 'return', 'response']
Get a list of all venue objects. >>> venues = din.venues()
['Get', 'a', 'list', 'of', 'all', 'venue', 'objects', '.']
train
https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/dining.py#L157-L173
3,325
paramiko/paramiko
paramiko/sftp_file.py
SFTPFile.prefetch
def prefetch(self, file_size=None): """ Pre-fetch the remaining contents of this file in anticipation of future `.read` calls. If reading the entire file, pre-fetching can dramatically improve the download speed by avoiding roundtrip latency. The file's contents are incrementally buffered in a background thread. The prefetched data is stored in a buffer until read via the `.read` method. Once data has been read, it's removed from the buffer. The data may be read in a random order (using `.seek`); chunks of the buffer that haven't been read will continue to be buffered. :param int file_size: When this is ``None`` (the default), this method calls `stat` to determine the remote file size. In some situations, doing so can cause exceptions or hangs (see `#562 <https://github.com/paramiko/paramiko/pull/562>`_); as a workaround, one may call `stat` explicitly and pass its value in via this parameter. .. versionadded:: 1.5.1 .. versionchanged:: 1.16.0 The ``file_size`` parameter was added (with no default value). .. versionchanged:: 1.16.1 The ``file_size`` parameter was made optional for backwards compatibility. """ if file_size is None: file_size = self.stat().st_size # queue up async reads for the rest of the file chunks = [] n = self._realpos while n < file_size: chunk = min(self.MAX_REQUEST_SIZE, file_size - n) chunks.append((n, chunk)) n += chunk if len(chunks) > 0: self._start_prefetch(chunks)
python
def prefetch(self, file_size=None): """ Pre-fetch the remaining contents of this file in anticipation of future `.read` calls. If reading the entire file, pre-fetching can dramatically improve the download speed by avoiding roundtrip latency. The file's contents are incrementally buffered in a background thread. The prefetched data is stored in a buffer until read via the `.read` method. Once data has been read, it's removed from the buffer. The data may be read in a random order (using `.seek`); chunks of the buffer that haven't been read will continue to be buffered. :param int file_size: When this is ``None`` (the default), this method calls `stat` to determine the remote file size. In some situations, doing so can cause exceptions or hangs (see `#562 <https://github.com/paramiko/paramiko/pull/562>`_); as a workaround, one may call `stat` explicitly and pass its value in via this parameter. .. versionadded:: 1.5.1 .. versionchanged:: 1.16.0 The ``file_size`` parameter was added (with no default value). .. versionchanged:: 1.16.1 The ``file_size`` parameter was made optional for backwards compatibility. """ if file_size is None: file_size = self.stat().st_size # queue up async reads for the rest of the file chunks = [] n = self._realpos while n < file_size: chunk = min(self.MAX_REQUEST_SIZE, file_size - n) chunks.append((n, chunk)) n += chunk if len(chunks) > 0: self._start_prefetch(chunks)
['def', 'prefetch', '(', 'self', ',', 'file_size', '=', 'None', ')', ':', 'if', 'file_size', 'is', 'None', ':', 'file_size', '=', 'self', '.', 'stat', '(', ')', '.', 'st_size', '# queue up async reads for the rest of the file', 'chunks', '=', '[', ']', 'n', '=', 'self', '.', '_realpos', 'while', 'n', '<', 'file_size', ':', 'chunk', '=', 'min', '(', 'self', '.', 'MAX_REQUEST_SIZE', ',', 'file_size', '-', 'n', ')', 'chunks', '.', 'append', '(', '(', 'n', ',', 'chunk', ')', ')', 'n', '+=', 'chunk', 'if', 'len', '(', 'chunks', ')', '>', '0', ':', 'self', '.', '_start_prefetch', '(', 'chunks', ')']
Pre-fetch the remaining contents of this file in anticipation of future `.read` calls. If reading the entire file, pre-fetching can dramatically improve the download speed by avoiding roundtrip latency. The file's contents are incrementally buffered in a background thread. The prefetched data is stored in a buffer until read via the `.read` method. Once data has been read, it's removed from the buffer. The data may be read in a random order (using `.seek`); chunks of the buffer that haven't been read will continue to be buffered. :param int file_size: When this is ``None`` (the default), this method calls `stat` to determine the remote file size. In some situations, doing so can cause exceptions or hangs (see `#562 <https://github.com/paramiko/paramiko/pull/562>`_); as a workaround, one may call `stat` explicitly and pass its value in via this parameter. .. versionadded:: 1.5.1 .. versionchanged:: 1.16.0 The ``file_size`` parameter was added (with no default value). .. versionchanged:: 1.16.1 The ``file_size`` parameter was made optional for backwards compatibility.
['Pre', '-', 'fetch', 'the', 'remaining', 'contents', 'of', 'this', 'file', 'in', 'anticipation', 'of', 'future', '.', 'read', 'calls', '.', 'If', 'reading', 'the', 'entire', 'file', 'pre', '-', 'fetching', 'can', 'dramatically', 'improve', 'the', 'download', 'speed', 'by', 'avoiding', 'roundtrip', 'latency', '.', 'The', 'file', 's', 'contents', 'are', 'incrementally', 'buffered', 'in', 'a', 'background', 'thread', '.']
train
https://github.com/paramiko/paramiko/blob/cf7d49d66f3b1fbc8b0853518a54050182b3b5eb/paramiko/sftp_file.py#L438-L476
3,326
WebarchivCZ/WA-KAT
src/wa_kat/templates/static/js/Lib/site-packages/wa_kat_main.py
MARCGeneratorAdapter.on_complete
def on_complete(cls, req): """ Callback called when the request to REST is done. Handles the errors and if there is none, :class:`.OutputPicker` is shown. """ # handle http errors if not (req.status == 200 or req.status == 0): ViewController.log_view.add(req.text) alert(req.text) # TODO: better handling return try: resp = json.loads(req.text) except ValueError: resp = None if not resp: alert("Chyba při konverzi!") # TODO: better ViewController.log_view.add( "Error while generating MARC: %s" % resp.text ) return OutputPicker.show(resp)
python
def on_complete(cls, req): """ Callback called when the request to REST is done. Handles the errors and if there is none, :class:`.OutputPicker` is shown. """ # handle http errors if not (req.status == 200 or req.status == 0): ViewController.log_view.add(req.text) alert(req.text) # TODO: better handling return try: resp = json.loads(req.text) except ValueError: resp = None if not resp: alert("Chyba při konverzi!") # TODO: better ViewController.log_view.add( "Error while generating MARC: %s" % resp.text ) return OutputPicker.show(resp)
['def', 'on_complete', '(', 'cls', ',', 'req', ')', ':', '# handle http errors', 'if', 'not', '(', 'req', '.', 'status', '==', '200', 'or', 'req', '.', 'status', '==', '0', ')', ':', 'ViewController', '.', 'log_view', '.', 'add', '(', 'req', '.', 'text', ')', 'alert', '(', 'req', '.', 'text', ')', '# TODO: better handling', 'return', 'try', ':', 'resp', '=', 'json', '.', 'loads', '(', 'req', '.', 'text', ')', 'except', 'ValueError', ':', 'resp', '=', 'None', 'if', 'not', 'resp', ':', 'alert', '(', '"Chyba při konverzi!")', ' ', ' TODO: better', 'ViewController', '.', 'log_view', '.', 'add', '(', '"Error while generating MARC: %s"', '%', 'resp', '.', 'text', ')', 'return', 'OutputPicker', '.', 'show', '(', 'resp', ')']
Callback called when the request to REST is done. Handles the errors and if there is none, :class:`.OutputPicker` is shown.
['Callback', 'called', 'when', 'the', 'request', 'to', 'REST', 'is', 'done', '.', 'Handles', 'the', 'errors', 'and', 'if', 'there', 'is', 'none', ':', 'class', ':', '.', 'OutputPicker', 'is', 'shown', '.']
train
https://github.com/WebarchivCZ/WA-KAT/blob/16d064a3a775dc1d2713debda7847ded52dd2a06/src/wa_kat/templates/static/js/Lib/site-packages/wa_kat_main.py#L248-L271
3,327
mkouhei/bootstrap-py
bootstrap_py/classifiers.py
Classifiers._acronym_lic
def _acronym_lic(self, license_statement): """Convert license acronym.""" pat = re.compile(r'\(([\w+\W?\s?]+)\)') if pat.search(license_statement): lic = pat.search(license_statement).group(1) if lic.startswith('CNRI'): acronym_licence = lic[:4] else: acronym_licence = lic.replace(' ', '') else: acronym_licence = ''.join( [w[0] for w in license_statement.split(self.prefix_lic)[1].split()]) return acronym_licence
python
def _acronym_lic(self, license_statement): """Convert license acronym.""" pat = re.compile(r'\(([\w+\W?\s?]+)\)') if pat.search(license_statement): lic = pat.search(license_statement).group(1) if lic.startswith('CNRI'): acronym_licence = lic[:4] else: acronym_licence = lic.replace(' ', '') else: acronym_licence = ''.join( [w[0] for w in license_statement.split(self.prefix_lic)[1].split()]) return acronym_licence
['def', '_acronym_lic', '(', 'self', ',', 'license_statement', ')', ':', 'pat', '=', 're', '.', 'compile', '(', "r'\\(([\\w+\\W?\\s?]+)\\)'", ')', 'if', 'pat', '.', 'search', '(', 'license_statement', ')', ':', 'lic', '=', 'pat', '.', 'search', '(', 'license_statement', ')', '.', 'group', '(', '1', ')', 'if', 'lic', '.', 'startswith', '(', "'CNRI'", ')', ':', 'acronym_licence', '=', 'lic', '[', ':', '4', ']', 'else', ':', 'acronym_licence', '=', 'lic', '.', 'replace', '(', "' '", ',', "''", ')', 'else', ':', 'acronym_licence', '=', "''", '.', 'join', '(', '[', 'w', '[', '0', ']', 'for', 'w', 'in', 'license_statement', '.', 'split', '(', 'self', '.', 'prefix_lic', ')', '[', '1', ']', '.', 'split', '(', ')', ']', ')', 'return', 'acronym_licence']
Convert license acronym.
['Convert', 'license', 'acronym', '.']
train
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/classifiers.py#L54-L67
3,328
pytroll/satpy
satpy/readers/viirs_sdr.py
VIIRSSDRReader.get_right_geo_fhs
def get_right_geo_fhs(self, dsid, fhs): """Find the right geographical file handlers for given dataset ID *dsid*.""" ds_info = self.ids[dsid] req_geo, rem_geo = self._get_req_rem_geo(ds_info) desired, other = split_desired_other(fhs, req_geo, rem_geo) if desired: try: ds_info['dataset_groups'].remove(rem_geo) except ValueError: pass return desired else: return other
python
def get_right_geo_fhs(self, dsid, fhs): """Find the right geographical file handlers for given dataset ID *dsid*.""" ds_info = self.ids[dsid] req_geo, rem_geo = self._get_req_rem_geo(ds_info) desired, other = split_desired_other(fhs, req_geo, rem_geo) if desired: try: ds_info['dataset_groups'].remove(rem_geo) except ValueError: pass return desired else: return other
['def', 'get_right_geo_fhs', '(', 'self', ',', 'dsid', ',', 'fhs', ')', ':', 'ds_info', '=', 'self', '.', 'ids', '[', 'dsid', ']', 'req_geo', ',', 'rem_geo', '=', 'self', '.', '_get_req_rem_geo', '(', 'ds_info', ')', 'desired', ',', 'other', '=', 'split_desired_other', '(', 'fhs', ',', 'req_geo', ',', 'rem_geo', ')', 'if', 'desired', ':', 'try', ':', 'ds_info', '[', "'dataset_groups'", ']', '.', 'remove', '(', 'rem_geo', ')', 'except', 'ValueError', ':', 'pass', 'return', 'desired', 'else', ':', 'return', 'other']
Find the right geographical file handlers for given dataset ID *dsid*.
['Find', 'the', 'right', 'geographical', 'file', 'handlers', 'for', 'given', 'dataset', 'ID', '*', 'dsid', '*', '.']
train
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/viirs_sdr.py#L532-L544
3,329
jsfenfen/990-xml-reader
irs_reader/text_format_utils.py
debracket
def debracket(string): """ Eliminate the bracketed var names in doc, line strings """ result = re.sub(BRACKET_RE, ';', str(string)) result = result.lstrip(';') result = result.lstrip(' ') result = result.replace('; ;',';') return result
python
def debracket(string): """ Eliminate the bracketed var names in doc, line strings """ result = re.sub(BRACKET_RE, ';', str(string)) result = result.lstrip(';') result = result.lstrip(' ') result = result.replace('; ;',';') return result
['def', 'debracket', '(', 'string', ')', ':', 'result', '=', 're', '.', 'sub', '(', 'BRACKET_RE', ',', "';'", ',', 'str', '(', 'string', ')', ')', 'result', '=', 'result', '.', 'lstrip', '(', "';'", ')', 'result', '=', 'result', '.', 'lstrip', '(', "' '", ')', 'result', '=', 'result', '.', 'replace', '(', "'; ;'", ',', "';'", ')', 'return', 'result']
Eliminate the bracketed var names in doc, line strings
['Eliminate', 'the', 'bracketed', 'var', 'names', 'in', 'doc', 'line', 'strings']
train
https://github.com/jsfenfen/990-xml-reader/blob/00020529b789081329a31a2e30b5ee729ce7596a/irs_reader/text_format_utils.py#L15-L21
3,330
ucbvislab/radiotool
radiotool/utils.py
linear
def linear(arr1, arr2): """ Create a linear blend of arr1 (fading out) and arr2 (fading in) """ n = N.shape(arr1)[0] try: channels = N.shape(arr1)[1] except: channels = 1 f_in = N.linspace(0, 1, num=n) f_out = N.linspace(1, 0, num=n) # f_in = N.arange(n) / float(n - 1) # f_out = N.arange(n - 1, -1, -1) / float(n) if channels > 1: f_in = N.tile(f_in, (channels, 1)).T f_out = N.tile(f_out, (channels, 1)).T vals = f_out * arr1 + f_in * arr2 return vals
python
def linear(arr1, arr2): """ Create a linear blend of arr1 (fading out) and arr2 (fading in) """ n = N.shape(arr1)[0] try: channels = N.shape(arr1)[1] except: channels = 1 f_in = N.linspace(0, 1, num=n) f_out = N.linspace(1, 0, num=n) # f_in = N.arange(n) / float(n - 1) # f_out = N.arange(n - 1, -1, -1) / float(n) if channels > 1: f_in = N.tile(f_in, (channels, 1)).T f_out = N.tile(f_out, (channels, 1)).T vals = f_out * arr1 + f_in * arr2 return vals
['def', 'linear', '(', 'arr1', ',', 'arr2', ')', ':', 'n', '=', 'N', '.', 'shape', '(', 'arr1', ')', '[', '0', ']', 'try', ':', 'channels', '=', 'N', '.', 'shape', '(', 'arr1', ')', '[', '1', ']', 'except', ':', 'channels', '=', '1', 'f_in', '=', 'N', '.', 'linspace', '(', '0', ',', '1', ',', 'num', '=', 'n', ')', 'f_out', '=', 'N', '.', 'linspace', '(', '1', ',', '0', ',', 'num', '=', 'n', ')', '# f_in = N.arange(n) / float(n - 1)', '# f_out = N.arange(n - 1, -1, -1) / float(n)', 'if', 'channels', '>', '1', ':', 'f_in', '=', 'N', '.', 'tile', '(', 'f_in', ',', '(', 'channels', ',', '1', ')', ')', '.', 'T', 'f_out', '=', 'N', '.', 'tile', '(', 'f_out', ',', '(', 'channels', ',', '1', ')', ')', '.', 'T', 'vals', '=', 'f_out', '*', 'arr1', '+', 'f_in', '*', 'arr2', 'return', 'vals']
Create a linear blend of arr1 (fading out) and arr2 (fading in)
['Create', 'a', 'linear', 'blend', 'of', 'arr1', '(', 'fading', 'out', ')', 'and', 'arr2', '(', 'fading', 'in', ')']
train
https://github.com/ucbvislab/radiotool/blob/01c9d878a811cf400b1482896d641d9c95e83ded/radiotool/utils.py#L91-L112
3,331
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
pltar
def pltar(vrtces, plates): """ Compute the total area of a collection of triangular plates. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pltar_c.html :param vrtces: Array of vertices. :type vrtces: Nx3-Element Array of floats :param plates: Array of plates. :type plates: Nx3-Element Array of ints :return: total area of the set of plates :rtype: float """ nv = ctypes.c_int(len(vrtces)) vrtces = stypes.toDoubleMatrix(vrtces) np = ctypes.c_int(len(plates)) plates = stypes.toIntMatrix(plates) return libspice.pltar_c(nv, vrtces, np, plates)
python
def pltar(vrtces, plates): """ Compute the total area of a collection of triangular plates. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pltar_c.html :param vrtces: Array of vertices. :type vrtces: Nx3-Element Array of floats :param plates: Array of plates. :type plates: Nx3-Element Array of ints :return: total area of the set of plates :rtype: float """ nv = ctypes.c_int(len(vrtces)) vrtces = stypes.toDoubleMatrix(vrtces) np = ctypes.c_int(len(plates)) plates = stypes.toIntMatrix(plates) return libspice.pltar_c(nv, vrtces, np, plates)
['def', 'pltar', '(', 'vrtces', ',', 'plates', ')', ':', 'nv', '=', 'ctypes', '.', 'c_int', '(', 'len', '(', 'vrtces', ')', ')', 'vrtces', '=', 'stypes', '.', 'toDoubleMatrix', '(', 'vrtces', ')', 'np', '=', 'ctypes', '.', 'c_int', '(', 'len', '(', 'plates', ')', ')', 'plates', '=', 'stypes', '.', 'toIntMatrix', '(', 'plates', ')', 'return', 'libspice', '.', 'pltar_c', '(', 'nv', ',', 'vrtces', ',', 'np', ',', 'plates', ')']
Compute the total area of a collection of triangular plates. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pltar_c.html :param vrtces: Array of vertices. :type vrtces: Nx3-Element Array of floats :param plates: Array of plates. :type plates: Nx3-Element Array of ints :return: total area of the set of plates :rtype: float
['Compute', 'the', 'total', 'area', 'of', 'a', 'collection', 'of', 'triangular', 'plates', '.', 'https', ':', '//', 'naif', '.', 'jpl', '.', 'nasa', '.', 'gov', '/', 'pub', '/', 'naif', '/', 'toolkit_docs', '/', 'C', '/', 'cspice', '/', 'pltar_c', '.', 'html', ':', 'param', 'vrtces', ':', 'Array', 'of', 'vertices', '.', ':', 'type', 'vrtces', ':', 'Nx3', '-', 'Element', 'Array', 'of', 'floats', ':', 'param', 'plates', ':', 'Array', 'of', 'plates', '.', ':', 'type', 'plates', ':', 'Nx3', '-', 'Element', 'Array', 'of', 'ints', ':', 'return', ':', 'total', 'area', 'of', 'the', 'set', 'of', 'plates', ':', 'rtype', ':', 'float']
train
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L9646-L9663
3,332
jaraco/keyrings.alt
keyrings/alt/Google.py
DocsKeyring.get_password
def get_password(self, service, username): """Get password of the username for the service """ result = self._get_entry(self._keyring, service, username) if result: result = self._decrypt(result) return result
python
def get_password(self, service, username): """Get password of the username for the service """ result = self._get_entry(self._keyring, service, username) if result: result = self._decrypt(result) return result
['def', 'get_password', '(', 'self', ',', 'service', ',', 'username', ')', ':', 'result', '=', 'self', '.', '_get_entry', '(', 'self', '.', '_keyring', ',', 'service', ',', 'username', ')', 'if', 'result', ':', 'result', '=', 'self', '.', '_decrypt', '(', 'result', ')', 'return', 'result']
Get password of the username for the service
['Get', 'password', 'of', 'the', 'username', 'for', 'the', 'service']
train
https://github.com/jaraco/keyrings.alt/blob/5b71223d12bf9ac6abd05b1b395f1efccb5ea660/keyrings/alt/Google.py#L85-L91
3,333
weld-project/weld
python/numpy/weldnumpy/__init__.py
array
def array(arr, *args, **kwargs): ''' Wrapper around weldarray - first create np.array and then convert to weldarray. ''' return weldarray(np.array(arr, *args, **kwargs))
python
def array(arr, *args, **kwargs): ''' Wrapper around weldarray - first create np.array and then convert to weldarray. ''' return weldarray(np.array(arr, *args, **kwargs))
['def', 'array', '(', 'arr', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'return', 'weldarray', '(', 'np', '.', 'array', '(', 'arr', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ')']
Wrapper around weldarray - first create np.array and then convert to weldarray.
['Wrapper', 'around', 'weldarray', '-', 'first', 'create', 'np', '.', 'array', 'and', 'then', 'convert', 'to', 'weldarray', '.']
train
https://github.com/weld-project/weld/blob/8ddd6db6b28878bef0892da44b1d2002b564389c/python/numpy/weldnumpy/__init__.py#L13-L18
3,334
google/prettytensor
prettytensor/pretty_tensor_class.py
_DeferredLayer.attach_template
def attach_template(self, _template, _key, **unbound_var_values): """Attaches the template to this with the _key is supplied with this layer. Note: names were chosen to avoid conflicts. Args: _template: The template to construct. _key: The key that this layer should replace. **unbound_var_values: The values for the unbound_vars. Returns: A new layer with operation applied. Raises: ValueError: If _key is specified twice or there is a problem computing the template. """ if _key in unbound_var_values: raise ValueError('%s specified twice.' % _key) unbound_var_values[_key] = self return _DeferredLayer(self.bookkeeper, _template.as_layer().construct, [], unbound_var_values, scope=self._scope, defaults=self._defaults, partial_context=self._partial_context)
python
def attach_template(self, _template, _key, **unbound_var_values): """Attaches the template to this with the _key is supplied with this layer. Note: names were chosen to avoid conflicts. Args: _template: The template to construct. _key: The key that this layer should replace. **unbound_var_values: The values for the unbound_vars. Returns: A new layer with operation applied. Raises: ValueError: If _key is specified twice or there is a problem computing the template. """ if _key in unbound_var_values: raise ValueError('%s specified twice.' % _key) unbound_var_values[_key] = self return _DeferredLayer(self.bookkeeper, _template.as_layer().construct, [], unbound_var_values, scope=self._scope, defaults=self._defaults, partial_context=self._partial_context)
['def', 'attach_template', '(', 'self', ',', '_template', ',', '_key', ',', '*', '*', 'unbound_var_values', ')', ':', 'if', '_key', 'in', 'unbound_var_values', ':', 'raise', 'ValueError', '(', "'%s specified twice.'", '%', '_key', ')', 'unbound_var_values', '[', '_key', ']', '=', 'self', 'return', '_DeferredLayer', '(', 'self', '.', 'bookkeeper', ',', '_template', '.', 'as_layer', '(', ')', '.', 'construct', ',', '[', ']', ',', 'unbound_var_values', ',', 'scope', '=', 'self', '.', '_scope', ',', 'defaults', '=', 'self', '.', '_defaults', ',', 'partial_context', '=', 'self', '.', '_partial_context', ')']
Attaches the template to this with the _key is supplied with this layer. Note: names were chosen to avoid conflicts. Args: _template: The template to construct. _key: The key that this layer should replace. **unbound_var_values: The values for the unbound_vars. Returns: A new layer with operation applied. Raises: ValueError: If _key is specified twice or there is a problem computing the template.
['Attaches', 'the', 'template', 'to', 'this', 'with', 'the', '_key', 'is', 'supplied', 'with', 'this', 'layer', '.']
train
https://github.com/google/prettytensor/blob/75daa0b11252590f548da5647addc0ea610c4c45/prettytensor/pretty_tensor_class.py#L1282-L1306
3,335
seperman/deepdiff
deepdiff/diff.py
DeepDiff.__skip_this
def __skip_this(self, level): """ Check whether this comparison should be skipped because one of the objects to compare meets exclusion criteria. :rtype: bool """ skip = False if self.exclude_paths and level.path() in self.exclude_paths: skip = True elif self.exclude_regex_paths and any( [exclude_regex_path.search(level.path()) for exclude_regex_path in self.exclude_regex_paths]): skip = True else: if self.exclude_types_tuple and (isinstance(level.t1, self.exclude_types_tuple) or isinstance(level.t2, self.exclude_types_tuple)): skip = True return skip
python
def __skip_this(self, level): """ Check whether this comparison should be skipped because one of the objects to compare meets exclusion criteria. :rtype: bool """ skip = False if self.exclude_paths and level.path() in self.exclude_paths: skip = True elif self.exclude_regex_paths and any( [exclude_regex_path.search(level.path()) for exclude_regex_path in self.exclude_regex_paths]): skip = True else: if self.exclude_types_tuple and (isinstance(level.t1, self.exclude_types_tuple) or isinstance(level.t2, self.exclude_types_tuple)): skip = True return skip
['def', '__skip_this', '(', 'self', ',', 'level', ')', ':', 'skip', '=', 'False', 'if', 'self', '.', 'exclude_paths', 'and', 'level', '.', 'path', '(', ')', 'in', 'self', '.', 'exclude_paths', ':', 'skip', '=', 'True', 'elif', 'self', '.', 'exclude_regex_paths', 'and', 'any', '(', '[', 'exclude_regex_path', '.', 'search', '(', 'level', '.', 'path', '(', ')', ')', 'for', 'exclude_regex_path', 'in', 'self', '.', 'exclude_regex_paths', ']', ')', ':', 'skip', '=', 'True', 'else', ':', 'if', 'self', '.', 'exclude_types_tuple', 'and', '(', 'isinstance', '(', 'level', '.', 't1', ',', 'self', '.', 'exclude_types_tuple', ')', 'or', 'isinstance', '(', 'level', '.', 't2', ',', 'self', '.', 'exclude_types_tuple', ')', ')', ':', 'skip', '=', 'True', 'return', 'skip']
Check whether this comparison should be skipped because one of the objects to compare meets exclusion criteria. :rtype: bool
['Check', 'whether', 'this', 'comparison', 'should', 'be', 'skipped', 'because', 'one', 'of', 'the', 'objects', 'to', 'compare', 'meets', 'exclusion', 'criteria', '.', ':', 'rtype', ':', 'bool']
train
https://github.com/seperman/deepdiff/blob/a66879190fadc671632f154c1fcb82f5c3cef800/deepdiff/diff.py#L209-L225
3,336
python-escpos/python-escpos
src/escpos/capabilities.py
BaseProfile.get_font
def get_font(self, font): """Return the escpos index for `font`. Makes sure that the requested `font` is valid. """ font = {'a': 0, 'b': 1}.get(font, font) if not six.text_type(font) in self.fonts: raise NotSupported( '"{}" is not a valid font in the current profile'.format(font)) return font
python
def get_font(self, font): """Return the escpos index for `font`. Makes sure that the requested `font` is valid. """ font = {'a': 0, 'b': 1}.get(font, font) if not six.text_type(font) in self.fonts: raise NotSupported( '"{}" is not a valid font in the current profile'.format(font)) return font
['def', 'get_font', '(', 'self', ',', 'font', ')', ':', 'font', '=', '{', "'a'", ':', '0', ',', "'b'", ':', '1', '}', '.', 'get', '(', 'font', ',', 'font', ')', 'if', 'not', 'six', '.', 'text_type', '(', 'font', ')', 'in', 'self', '.', 'fonts', ':', 'raise', 'NotSupported', '(', '\'"{}" is not a valid font in the current profile\'', '.', 'format', '(', 'font', ')', ')', 'return', 'font']
Return the escpos index for `font`. Makes sure that the requested `font` is valid.
['Return', 'the', 'escpos', 'index', 'for', 'font', '.', 'Makes', 'sure', 'that', 'the', 'requested', 'font', 'is', 'valid', '.']
train
https://github.com/python-escpos/python-escpos/blob/52719c0b7de8948fabdffd180a2d71c22cf4c02b/src/escpos/capabilities.py#L72-L80
3,337
SeabornGames/RequestClient
seaborn/request_client/api_call.py
ApiCall.save_formatted_data
def save_formatted_data(self, data): """ This will save the formatted data as a repr object (see returns.py) :param data: dict of the return data :return: None """ self.data = data self._timestamps['process'] = time.time() self._stage = STAGE_DONE_DATA_FORMATTED
python
def save_formatted_data(self, data): """ This will save the formatted data as a repr object (see returns.py) :param data: dict of the return data :return: None """ self.data = data self._timestamps['process'] = time.time() self._stage = STAGE_DONE_DATA_FORMATTED
['def', 'save_formatted_data', '(', 'self', ',', 'data', ')', ':', 'self', '.', 'data', '=', 'data', 'self', '.', '_timestamps', '[', "'process'", ']', '=', 'time', '.', 'time', '(', ')', 'self', '.', '_stage', '=', 'STAGE_DONE_DATA_FORMATTED']
This will save the formatted data as a repr object (see returns.py) :param data: dict of the return data :return: None
['This', 'will', 'save', 'the', 'formatted', 'data', 'as', 'a', 'repr', 'object', '(', 'see', 'returns', '.', 'py', ')', ':', 'param', 'data', ':', 'dict', 'of', 'the', 'return', 'data', ':', 'return', ':', 'None']
train
https://github.com/SeabornGames/RequestClient/blob/21aeb951ddfdb6ee453ad0edc896ff224e06425d/seaborn/request_client/api_call.py#L558-L566
3,338
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Taskmaster.py
Taskmaster.next_task
def next_task(self): """ Returns the next task to be executed. This simply asks for the next Node to be evaluated, and then wraps it in the specific Task subclass with which we were initialized. """ node = self._find_next_ready_node() if node is None: return None executor = node.get_executor() if executor is None: return None tlist = executor.get_all_targets() task = self.tasker(self, tlist, node in self.original_top, node) try: task.make_ready() except Exception as e : # We had a problem just trying to get this task ready (like # a child couldn't be linked to a VariantDir when deciding # whether this node is current). Arrange to raise the # exception when the Task is "executed." self.ready_exc = sys.exc_info() if self.ready_exc: task.exception_set(self.ready_exc) self.ready_exc = None return task
python
def next_task(self): """ Returns the next task to be executed. This simply asks for the next Node to be evaluated, and then wraps it in the specific Task subclass with which we were initialized. """ node = self._find_next_ready_node() if node is None: return None executor = node.get_executor() if executor is None: return None tlist = executor.get_all_targets() task = self.tasker(self, tlist, node in self.original_top, node) try: task.make_ready() except Exception as e : # We had a problem just trying to get this task ready (like # a child couldn't be linked to a VariantDir when deciding # whether this node is current). Arrange to raise the # exception when the Task is "executed." self.ready_exc = sys.exc_info() if self.ready_exc: task.exception_set(self.ready_exc) self.ready_exc = None return task
['def', 'next_task', '(', 'self', ')', ':', 'node', '=', 'self', '.', '_find_next_ready_node', '(', ')', 'if', 'node', 'is', 'None', ':', 'return', 'None', 'executor', '=', 'node', '.', 'get_executor', '(', ')', 'if', 'executor', 'is', 'None', ':', 'return', 'None', 'tlist', '=', 'executor', '.', 'get_all_targets', '(', ')', 'task', '=', 'self', '.', 'tasker', '(', 'self', ',', 'tlist', ',', 'node', 'in', 'self', '.', 'original_top', ',', 'node', ')', 'try', ':', 'task', '.', 'make_ready', '(', ')', 'except', 'Exception', 'as', 'e', ':', '# We had a problem just trying to get this task ready (like', "# a child couldn't be linked to a VariantDir when deciding", '# whether this node is current). Arrange to raise the', '# exception when the Task is "executed."', 'self', '.', 'ready_exc', '=', 'sys', '.', 'exc_info', '(', ')', 'if', 'self', '.', 'ready_exc', ':', 'task', '.', 'exception_set', '(', 'self', '.', 'ready_exc', ')', 'self', '.', 'ready_exc', '=', 'None', 'return', 'task']
Returns the next task to be executed. This simply asks for the next Node to be evaluated, and then wraps it in the specific Task subclass with which we were initialized.
['Returns', 'the', 'next', 'task', 'to', 'be', 'executed', '.']
train
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Taskmaster.py#L952-L985
3,339
jssimporter/python-jss
jss/jssobject.py
JSSObject.get_url
def get_url(cls, data): """Return the URL for a get request based on data type. Args: data: Accepts multiple types. Int: Generate URL to object with data ID. None: Get basic object GET URL (list). String/Unicode: Search for <data> with default_search, usually "name". String/Unicode with "=": Other searches, for example Computers can be search by uuid with: "udid=E79E84CB-3227-5C69-A32C-6C45C2E77DF5" See the class "search_types" attribute for options. """ try: data = int(data) except (ValueError, TypeError): pass if isinstance(data, int): return "%s%s%s" % (cls._url, cls.id_url, data) elif data is None: return cls._url elif isinstance(data, basestring): if "=" in data: key, value = data.split("=") # pylint: disable=no-member if key in cls.search_types: return "%s%s%s" % (cls._url, cls.search_types[key], value) else: raise JSSUnsupportedSearchMethodError( "This object cannot be queried by %s." % key) else: return "%s%s%s" % (cls._url, cls.search_types[cls.default_search], data) else: raise ValueError
python
def get_url(cls, data): """Return the URL for a get request based on data type. Args: data: Accepts multiple types. Int: Generate URL to object with data ID. None: Get basic object GET URL (list). String/Unicode: Search for <data> with default_search, usually "name". String/Unicode with "=": Other searches, for example Computers can be search by uuid with: "udid=E79E84CB-3227-5C69-A32C-6C45C2E77DF5" See the class "search_types" attribute for options. """ try: data = int(data) except (ValueError, TypeError): pass if isinstance(data, int): return "%s%s%s" % (cls._url, cls.id_url, data) elif data is None: return cls._url elif isinstance(data, basestring): if "=" in data: key, value = data.split("=") # pylint: disable=no-member if key in cls.search_types: return "%s%s%s" % (cls._url, cls.search_types[key], value) else: raise JSSUnsupportedSearchMethodError( "This object cannot be queried by %s." % key) else: return "%s%s%s" % (cls._url, cls.search_types[cls.default_search], data) else: raise ValueError
['def', 'get_url', '(', 'cls', ',', 'data', ')', ':', 'try', ':', 'data', '=', 'int', '(', 'data', ')', 'except', '(', 'ValueError', ',', 'TypeError', ')', ':', 'pass', 'if', 'isinstance', '(', 'data', ',', 'int', ')', ':', 'return', '"%s%s%s"', '%', '(', 'cls', '.', '_url', ',', 'cls', '.', 'id_url', ',', 'data', ')', 'elif', 'data', 'is', 'None', ':', 'return', 'cls', '.', '_url', 'elif', 'isinstance', '(', 'data', ',', 'basestring', ')', ':', 'if', '"="', 'in', 'data', ':', 'key', ',', 'value', '=', 'data', '.', 'split', '(', '"="', ')', '# pylint: disable=no-member', 'if', 'key', 'in', 'cls', '.', 'search_types', ':', 'return', '"%s%s%s"', '%', '(', 'cls', '.', '_url', ',', 'cls', '.', 'search_types', '[', 'key', ']', ',', 'value', ')', 'else', ':', 'raise', 'JSSUnsupportedSearchMethodError', '(', '"This object cannot be queried by %s."', '%', 'key', ')', 'else', ':', 'return', '"%s%s%s"', '%', '(', 'cls', '.', '_url', ',', 'cls', '.', 'search_types', '[', 'cls', '.', 'default_search', ']', ',', 'data', ')', 'else', ':', 'raise', 'ValueError']
Return the URL for a get request based on data type. Args: data: Accepts multiple types. Int: Generate URL to object with data ID. None: Get basic object GET URL (list). String/Unicode: Search for <data> with default_search, usually "name". String/Unicode with "=": Other searches, for example Computers can be search by uuid with: "udid=E79E84CB-3227-5C69-A32C-6C45C2E77DF5" See the class "search_types" attribute for options.
['Return', 'the', 'URL', 'for', 'a', 'get', 'request', 'based', 'on', 'data', 'type', '.']
train
https://github.com/jssimporter/python-jss/blob/b95185d74e0c0531b0b563f280d4129e21d5fe5d/jss/jssobject.py#L239-L273
3,340
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/work_data.py
AttackWorkPieces.init_from_adversarial_batches
def init_from_adversarial_batches(self, adv_batches): """Initializes work pieces from adversarial batches. Args: adv_batches: dict with adversarial batches, could be obtained as AversarialBatches.data """ for idx, (adv_batch_id, adv_batch_val) in enumerate(iteritems(adv_batches)): work_id = ATTACK_WORK_ID_PATTERN.format(idx) self.work[work_id] = { 'claimed_worker_id': None, 'claimed_worker_start_time': None, 'is_completed': False, 'error': None, 'elapsed_time': None, 'submission_id': adv_batch_val['submission_id'], 'shard_id': None, 'output_adversarial_batch_id': adv_batch_id, }
python
def init_from_adversarial_batches(self, adv_batches): """Initializes work pieces from adversarial batches. Args: adv_batches: dict with adversarial batches, could be obtained as AversarialBatches.data """ for idx, (adv_batch_id, adv_batch_val) in enumerate(iteritems(adv_batches)): work_id = ATTACK_WORK_ID_PATTERN.format(idx) self.work[work_id] = { 'claimed_worker_id': None, 'claimed_worker_start_time': None, 'is_completed': False, 'error': None, 'elapsed_time': None, 'submission_id': adv_batch_val['submission_id'], 'shard_id': None, 'output_adversarial_batch_id': adv_batch_id, }
['def', 'init_from_adversarial_batches', '(', 'self', ',', 'adv_batches', ')', ':', 'for', 'idx', ',', '(', 'adv_batch_id', ',', 'adv_batch_val', ')', 'in', 'enumerate', '(', 'iteritems', '(', 'adv_batches', ')', ')', ':', 'work_id', '=', 'ATTACK_WORK_ID_PATTERN', '.', 'format', '(', 'idx', ')', 'self', '.', 'work', '[', 'work_id', ']', '=', '{', "'claimed_worker_id'", ':', 'None', ',', "'claimed_worker_start_time'", ':', 'None', ',', "'is_completed'", ':', 'False', ',', "'error'", ':', 'None', ',', "'elapsed_time'", ':', 'None', ',', "'submission_id'", ':', 'adv_batch_val', '[', "'submission_id'", ']', ',', "'shard_id'", ':', 'None', ',', "'output_adversarial_batch_id'", ':', 'adv_batch_id', ',', '}']
Initializes work pieces from adversarial batches. Args: adv_batches: dict with adversarial batches, could be obtained as AversarialBatches.data
['Initializes', 'work', 'pieces', 'from', 'adversarial', 'batches', '.']
train
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/work_data.py#L349-L367
3,341
RI-imaging/ODTbrain
odtbrain/_alg2d_int.py
integrate_2d
def integrate_2d(uSin, angles, res, nm, lD=0, coords=None, count=None, max_count=None, verbose=0): r"""(slow) 2D reconstruction with the Fourier diffraction theorem Two-dimensional diffraction tomography reconstruction algorithm for scattering of a plane wave :math:`u_0(\mathbf{r}) = u_0(x,z)` by a dielectric object with refractive index :math:`n(x,z)`. This function implements the solution by summation in real space, which is extremely slow. Parameters ---------- uSin: (A,N) ndarray Two-dimensional sinogram of line recordings :math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D})` divided by the incident plane wave :math:`u_0(l_\mathrm{D})` measured at the detector. angles: (A,) ndarray Angular positions :math:`\phi_j` of `uSin` in radians. res: float Vacuum wavelength of the light :math:`\lambda` in pixels. nm: float Refractive index of the surrounding medium :math:`n_\mathrm{m}`. lD: float Distance from center of rotation to detector plane :math:`l_\mathrm{D}` in pixels. coords: None or (2,M) ndarray] Computes only the output image at these coordinates. This keyword is reserved for future versions and is not implemented yet. count, max_count: multiprocessing.Value or `None` Can be used to monitor the progress of the algorithm. Initially, the value of `max_count.value` is incremented by the total number of steps. At each step, the value of `count.value` is incremented. verbose: int Increment to increase verbosity. Returns ------- f: ndarray of shape (N,N), complex if `onlyreal` is `False` Reconstructed object function :math:`f(\mathbf{r})` as defined by the Helmholtz equation. :math:`f(x,z) = k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)` See Also -------- backpropagate_2d: implementation by backprojection fourier_map_2d: implementation by Fourier interpolation odt_to_ri: conversion of the object function :math:`f(\mathbf{r})` to refractive index :math:`n(\mathbf{r})` Notes ----- This method is not meant for production use. The computation time is very long and the reconstruction quality is bad. This function is included in the package, because of its educational value, exemplifying the backpropagation algorithm. Do not use the parameter `lD` in combination with the Rytov approximation - the propagation is not correctly described. Instead, numerically refocus the sinogram prior to converting it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`) with a numerical focusing algorithm (available in the Python package :py:mod:`nrefocus`). """ if coords is None: lx = uSin.shape[1] x = np.linspace(-lx/2, lx/2, lx, endpoint=False) xv, yv = np.meshgrid(x, x) coords = np.zeros((2, lx**2)) coords[0, :] = xv.flat coords[1, :] = yv.flat if max_count is not None: max_count.value += coords.shape[1] + 1 # Cut-Off frequency km = (2 * np.pi * nm) / res # Fourier transform of all uB's # In the script we used the unitary angular frequency (uaf) Fourier # Transform. The discrete Fourier transform is equivalent to the # unitary ordinary frequency (uof) Fourier transform. # # uof: f₁(ξ) = int f(x) exp(-2πi xξ) # # uaf: f₃(ω) = (2π)^(-n/2) int f(x) exp(-i ωx) # # f₁(ω/(2π)) = (2π)^(n/2) f₃(ω) # ω = 2πξ # # We have a one-dimensional (n=1) Fourier transform and UB in the # script is equivalent to f₃(ω). Because we are working with the # uaf, we divide by sqrt(2π) after computing the fft with the uof. # # We calculate the fourier transform of uB further below. This is # necessary for memory control. # Corresponding sample frequencies fx = np.fft.fftfreq(uSin[0].shape[0]) # 1D array # kx is a 1D array. kx = 2 * np.pi * fx # Undersampling/oversampling? # Determine if the resolution of the image is too low by looking # at the maximum value for kx. This is no comparison between # Nyquist and Rayleigh frequency. if np.max(kx**2) <= 2 * km**2: # Detector is not set up properly. Higher resolution # can be achieved. if verbose: print("......Measurement data is undersampled.") else: if verbose: print("......Measurement data is oversampled.") raise NotImplementedError("Oversampled data not yet supported." + " Please rescale input data") # Differentials for integral dphi0 = 2 * np.pi / len(angles) dkx = kx[1] - kx[0] # We will later multiply with phi0. # Make sure we are using correct shapes kx = kx.reshape(1, kx.shape[0]) # Low-pass filter: # less-than-or-equal would give us zero division error. filter_klp = (kx**2 < km**2) # a0 will be multiplied with kx # a0 = np.atleast_1d(a0) # a0 = a0.reshape(1,-1) # Create the integrand # Integrals over ϕ₀ [0,2π]; kx [-kₘ,kₘ] # - double coverage factor 1/2 already included # - unitary angular frequency to unitary ordinary frequency # conversion performed in calculation of UB=FT(uB). # # f(r) = -i kₘ / ((2π)^(3/2) a₀) (prefactor) # * iint dϕ₀ dkx (prefactor) # * |kx| (prefactor) # * exp(-i kₘ M lD ) (prefactor) # * UBϕ₀(kx) (dependent on ϕ₀) # * exp( i (kx t⊥ + kₘ (M - 1) s₀) r ) (dependent on ϕ₀ and r) # # (r and s₀ are vectors. In the last term we perform the dot-product) # # kₘM = sqrt( kₘ² - kx² ) # t⊥ = ( cos(ϕ₀), sin(ϕ₀) ) # s₀ = ( -sin(ϕ₀), cos(ϕ₀) ) # # # everything that is not dependent on phi0: # # Filter M so there are no nans from the root M = 1. / km * np.sqrt((km**2 - kx**2) * filter_klp) prefactor = -1j * km / ((2 * np.pi)**(3. / 2)) prefactor *= dphi0 * dkx # Also filter the prefactor, so nothing outside the required # low-pass contributes to the sum. prefactor *= np.abs(kx) * filter_klp # new in version 0.1.4: # We multiply by the factor (M-1) instead of just (M) # to take into account that we have a scattered # wave that is normalized by u0. prefactor *= np.exp(-1j * km * (M-1) * lD) # Initiate function f f = np.zeros(len(coords[0]), dtype=np.complex128) lenf = len(f) lenu0 = len(uSin[0]) # lenu0 = len(kx[0]) # Initiate vector r that corresponds to calculating a value of f. r = np.zeros((2, 1, 1)) # Everything is normal. # Get the angles ϕ₀. phi0 = angles.reshape(-1, 1) # Compute the Fourier transform of uB. # This is true: np.fft.fft(UB)[0] == np.fft.fft(UB[0]) # because axis -1 is always used. # # # Furthermore, The notation in the our optical tomography script for # a wave propagating to the right is: # # u0(x) = exp(ikx) # # However, in physics usually usethe other sign convention: # # u0(x) = exp(-ikx) # # In order to be consisten with programs like Meep or our scattering # script for a dielectric cylinder, we want to use the latter sign # convention. # This is not a big problem. We only need to multiply the imaginary # part of the scattered wave by -1. UB = np.fft.fft(np.fft.ifftshift(uSin, axes=-1)) / np.sqrt(2 * np.pi) UBi = UB.reshape(len(angles), lenu0) if count is not None: count.value += 1 for j in range(lenf): # Get r (We compute f(r) in this for-loop) r[0][:] = coords[0, j] # x r[1][:] = coords[1, j] # y # Integrand changes with r, so we have to create a new # array: integrand = prefactor * UBi # We save memory by directly applying the following to # the integrand: # # Vector along which we measured # s0 = np.zeros((2, phi0.shape[0], kx.shape[0])) # s0[0] = -np.sin(phi0) # s0[1] = +np.cos(phi0) # Vector perpendicular to s0 # t_perp_kx = np.zeros((2, phi0.shape[0], kx.shape[1])) # # t_perp_kx[0] = kx*np.cos(phi0) # t_perp_kx[1] = kx*np.sin(phi0) # # term3 = np.exp(1j*np.sum(r*( t_perp_kx + (gamma-km)*s0 ), axis=0)) # integrand* = term3 # # Reminder: # f(r) = -i kₘ / ((2π)^(3/2) a₀) (prefactor) # * iint dϕ₀ dkx (prefactor) # * |kx| (prefactor) # * exp(-i kₘ M lD ) (prefactor) # * UB(kx) (dependent on ϕ₀) # * exp( i (kx t⊥ + kₘ(M - 1) s₀) r ) (dependent on ϕ₀ and r) # # (r and s₀ are vectors. In the last term we perform the dot-product) # # kₘM = sqrt( kₘ² - kx² ) # t⊥ = ( cos(ϕ₀), sin(ϕ₀) ) # s₀ = ( -sin(ϕ₀), cos(ϕ₀) ) integrand *= np.exp(1j * ( r[0] * (kx * np.cos(phi0) - km * (M - 1) * np.sin(phi0)) + r[1] * (kx * np.sin(phi0) + km * (M - 1) * np.cos(phi0)))) # Calculate the integral for the position r # integrand.sort() f[j] = np.sum(integrand) # free memory del integrand if count is not None: count.value += 1 return f.reshape(lx, lx)
python
def integrate_2d(uSin, angles, res, nm, lD=0, coords=None, count=None, max_count=None, verbose=0): r"""(slow) 2D reconstruction with the Fourier diffraction theorem Two-dimensional diffraction tomography reconstruction algorithm for scattering of a plane wave :math:`u_0(\mathbf{r}) = u_0(x,z)` by a dielectric object with refractive index :math:`n(x,z)`. This function implements the solution by summation in real space, which is extremely slow. Parameters ---------- uSin: (A,N) ndarray Two-dimensional sinogram of line recordings :math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D})` divided by the incident plane wave :math:`u_0(l_\mathrm{D})` measured at the detector. angles: (A,) ndarray Angular positions :math:`\phi_j` of `uSin` in radians. res: float Vacuum wavelength of the light :math:`\lambda` in pixels. nm: float Refractive index of the surrounding medium :math:`n_\mathrm{m}`. lD: float Distance from center of rotation to detector plane :math:`l_\mathrm{D}` in pixels. coords: None or (2,M) ndarray] Computes only the output image at these coordinates. This keyword is reserved for future versions and is not implemented yet. count, max_count: multiprocessing.Value or `None` Can be used to monitor the progress of the algorithm. Initially, the value of `max_count.value` is incremented by the total number of steps. At each step, the value of `count.value` is incremented. verbose: int Increment to increase verbosity. Returns ------- f: ndarray of shape (N,N), complex if `onlyreal` is `False` Reconstructed object function :math:`f(\mathbf{r})` as defined by the Helmholtz equation. :math:`f(x,z) = k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)` See Also -------- backpropagate_2d: implementation by backprojection fourier_map_2d: implementation by Fourier interpolation odt_to_ri: conversion of the object function :math:`f(\mathbf{r})` to refractive index :math:`n(\mathbf{r})` Notes ----- This method is not meant for production use. The computation time is very long and the reconstruction quality is bad. This function is included in the package, because of its educational value, exemplifying the backpropagation algorithm. Do not use the parameter `lD` in combination with the Rytov approximation - the propagation is not correctly described. Instead, numerically refocus the sinogram prior to converting it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`) with a numerical focusing algorithm (available in the Python package :py:mod:`nrefocus`). """ if coords is None: lx = uSin.shape[1] x = np.linspace(-lx/2, lx/2, lx, endpoint=False) xv, yv = np.meshgrid(x, x) coords = np.zeros((2, lx**2)) coords[0, :] = xv.flat coords[1, :] = yv.flat if max_count is not None: max_count.value += coords.shape[1] + 1 # Cut-Off frequency km = (2 * np.pi * nm) / res # Fourier transform of all uB's # In the script we used the unitary angular frequency (uaf) Fourier # Transform. The discrete Fourier transform is equivalent to the # unitary ordinary frequency (uof) Fourier transform. # # uof: f₁(ξ) = int f(x) exp(-2πi xξ) # # uaf: f₃(ω) = (2π)^(-n/2) int f(x) exp(-i ωx) # # f₁(ω/(2π)) = (2π)^(n/2) f₃(ω) # ω = 2πξ # # We have a one-dimensional (n=1) Fourier transform and UB in the # script is equivalent to f₃(ω). Because we are working with the # uaf, we divide by sqrt(2π) after computing the fft with the uof. # # We calculate the fourier transform of uB further below. This is # necessary for memory control. # Corresponding sample frequencies fx = np.fft.fftfreq(uSin[0].shape[0]) # 1D array # kx is a 1D array. kx = 2 * np.pi * fx # Undersampling/oversampling? # Determine if the resolution of the image is too low by looking # at the maximum value for kx. This is no comparison between # Nyquist and Rayleigh frequency. if np.max(kx**2) <= 2 * km**2: # Detector is not set up properly. Higher resolution # can be achieved. if verbose: print("......Measurement data is undersampled.") else: if verbose: print("......Measurement data is oversampled.") raise NotImplementedError("Oversampled data not yet supported." + " Please rescale input data") # Differentials for integral dphi0 = 2 * np.pi / len(angles) dkx = kx[1] - kx[0] # We will later multiply with phi0. # Make sure we are using correct shapes kx = kx.reshape(1, kx.shape[0]) # Low-pass filter: # less-than-or-equal would give us zero division error. filter_klp = (kx**2 < km**2) # a0 will be multiplied with kx # a0 = np.atleast_1d(a0) # a0 = a0.reshape(1,-1) # Create the integrand # Integrals over ϕ₀ [0,2π]; kx [-kₘ,kₘ] # - double coverage factor 1/2 already included # - unitary angular frequency to unitary ordinary frequency # conversion performed in calculation of UB=FT(uB). # # f(r) = -i kₘ / ((2π)^(3/2) a₀) (prefactor) # * iint dϕ₀ dkx (prefactor) # * |kx| (prefactor) # * exp(-i kₘ M lD ) (prefactor) # * UBϕ₀(kx) (dependent on ϕ₀) # * exp( i (kx t⊥ + kₘ (M - 1) s₀) r ) (dependent on ϕ₀ and r) # # (r and s₀ are vectors. In the last term we perform the dot-product) # # kₘM = sqrt( kₘ² - kx² ) # t⊥ = ( cos(ϕ₀), sin(ϕ₀) ) # s₀ = ( -sin(ϕ₀), cos(ϕ₀) ) # # # everything that is not dependent on phi0: # # Filter M so there are no nans from the root M = 1. / km * np.sqrt((km**2 - kx**2) * filter_klp) prefactor = -1j * km / ((2 * np.pi)**(3. / 2)) prefactor *= dphi0 * dkx # Also filter the prefactor, so nothing outside the required # low-pass contributes to the sum. prefactor *= np.abs(kx) * filter_klp # new in version 0.1.4: # We multiply by the factor (M-1) instead of just (M) # to take into account that we have a scattered # wave that is normalized by u0. prefactor *= np.exp(-1j * km * (M-1) * lD) # Initiate function f f = np.zeros(len(coords[0]), dtype=np.complex128) lenf = len(f) lenu0 = len(uSin[0]) # lenu0 = len(kx[0]) # Initiate vector r that corresponds to calculating a value of f. r = np.zeros((2, 1, 1)) # Everything is normal. # Get the angles ϕ₀. phi0 = angles.reshape(-1, 1) # Compute the Fourier transform of uB. # This is true: np.fft.fft(UB)[0] == np.fft.fft(UB[0]) # because axis -1 is always used. # # # Furthermore, The notation in the our optical tomography script for # a wave propagating to the right is: # # u0(x) = exp(ikx) # # However, in physics usually usethe other sign convention: # # u0(x) = exp(-ikx) # # In order to be consisten with programs like Meep or our scattering # script for a dielectric cylinder, we want to use the latter sign # convention. # This is not a big problem. We only need to multiply the imaginary # part of the scattered wave by -1. UB = np.fft.fft(np.fft.ifftshift(uSin, axes=-1)) / np.sqrt(2 * np.pi) UBi = UB.reshape(len(angles), lenu0) if count is not None: count.value += 1 for j in range(lenf): # Get r (We compute f(r) in this for-loop) r[0][:] = coords[0, j] # x r[1][:] = coords[1, j] # y # Integrand changes with r, so we have to create a new # array: integrand = prefactor * UBi # We save memory by directly applying the following to # the integrand: # # Vector along which we measured # s0 = np.zeros((2, phi0.shape[0], kx.shape[0])) # s0[0] = -np.sin(phi0) # s0[1] = +np.cos(phi0) # Vector perpendicular to s0 # t_perp_kx = np.zeros((2, phi0.shape[0], kx.shape[1])) # # t_perp_kx[0] = kx*np.cos(phi0) # t_perp_kx[1] = kx*np.sin(phi0) # # term3 = np.exp(1j*np.sum(r*( t_perp_kx + (gamma-km)*s0 ), axis=0)) # integrand* = term3 # # Reminder: # f(r) = -i kₘ / ((2π)^(3/2) a₀) (prefactor) # * iint dϕ₀ dkx (prefactor) # * |kx| (prefactor) # * exp(-i kₘ M lD ) (prefactor) # * UB(kx) (dependent on ϕ₀) # * exp( i (kx t⊥ + kₘ(M - 1) s₀) r ) (dependent on ϕ₀ and r) # # (r and s₀ are vectors. In the last term we perform the dot-product) # # kₘM = sqrt( kₘ² - kx² ) # t⊥ = ( cos(ϕ₀), sin(ϕ₀) ) # s₀ = ( -sin(ϕ₀), cos(ϕ₀) ) integrand *= np.exp(1j * ( r[0] * (kx * np.cos(phi0) - km * (M - 1) * np.sin(phi0)) + r[1] * (kx * np.sin(phi0) + km * (M - 1) * np.cos(phi0)))) # Calculate the integral for the position r # integrand.sort() f[j] = np.sum(integrand) # free memory del integrand if count is not None: count.value += 1 return f.reshape(lx, lx)
['def', 'integrate_2d', '(', 'uSin', ',', 'angles', ',', 'res', ',', 'nm', ',', 'lD', '=', '0', ',', 'coords', '=', 'None', ',', 'count', '=', 'None', ',', 'max_count', '=', 'None', ',', 'verbose', '=', '0', ')', ':', 'if', 'coords', 'is', 'None', ':', 'lx', '=', 'uSin', '.', 'shape', '[', '1', ']', 'x', '=', 'np', '.', 'linspace', '(', '-', 'lx', '/', '2', ',', 'lx', '/', '2', ',', 'lx', ',', 'endpoint', '=', 'False', ')', 'xv', ',', 'yv', '=', 'np', '.', 'meshgrid', '(', 'x', ',', 'x', ')', 'coords', '=', 'np', '.', 'zeros', '(', '(', '2', ',', 'lx', '**', '2', ')', ')', 'coords', '[', '0', ',', ':', ']', '=', 'xv', '.', 'flat', 'coords', '[', '1', ',', ':', ']', '=', 'yv', '.', 'flat', 'if', 'max_count', 'is', 'not', 'None', ':', 'max_count', '.', 'value', '+=', 'coords', '.', 'shape', '[', '1', ']', '+', '1', '# Cut-Off frequency', 'km', '=', '(', '2', '*', 'np', '.', 'pi', '*', 'nm', ')', '/', 'res', "# Fourier transform of all uB's", '# In the script we used the unitary angular frequency (uaf) Fourier', '# Transform. The discrete Fourier transform is equivalent to the', '# unitary ordinary frequency (uof) Fourier transform.', '#', '# uof: f₁(ξ) = int f(x) exp(-2πi xξ)', '#', '# uaf: f₃(ω) = (2π)^(-n/2) int f(x) exp(-i ωx)', '#', '# f₁(ω/(2π)) = (2π)^(n/2) f₃(ω)', '# ω = 2πξ', '#', '# We have a one-dimensional (n=1) Fourier transform and UB in the', '# script is equivalent to f₃(ω). Because we are working with the', '# uaf, we divide by sqrt(2π) after computing the fft with the uof.', '#', '# We calculate the fourier transform of uB further below. This is', '# necessary for memory control.', '# Corresponding sample frequencies', 'fx', '=', 'np', '.', 'fft', '.', 'fftfreq', '(', 'uSin', '[', '0', ']', '.', 'shape', '[', '0', ']', ')', '# 1D array', '# kx is a 1D array.', 'kx', '=', '2', '*', 'np', '.', 'pi', '*', 'fx', '# Undersampling/oversampling?', '# Determine if the resolution of the image is too low by looking', '# at the maximum value for kx. This is no comparison between', '# Nyquist and Rayleigh frequency.', 'if', 'np', '.', 'max', '(', 'kx', '**', '2', ')', '<=', '2', '*', 'km', '**', '2', ':', '# Detector is not set up properly. Higher resolution', '# can be achieved.', 'if', 'verbose', ':', 'print', '(', '"......Measurement data is undersampled."', ')', 'else', ':', 'if', 'verbose', ':', 'print', '(', '"......Measurement data is oversampled."', ')', 'raise', 'NotImplementedError', '(', '"Oversampled data not yet supported."', '+', '" Please rescale input data"', ')', '# Differentials for integral', 'dphi0', '=', '2', '*', 'np', '.', 'pi', '/', 'len', '(', 'angles', ')', 'dkx', '=', 'kx', '[', '1', ']', '-', 'kx', '[', '0', ']', '# We will later multiply with phi0.', '# Make sure we are using correct shapes', 'kx', '=', 'kx', '.', 'reshape', '(', '1', ',', 'kx', '.', 'shape', '[', '0', ']', ')', '# Low-pass filter:', '# less-than-or-equal would give us zero division error.', 'filter_klp', '=', '(', 'kx', '**', '2', '<', 'km', '**', '2', ')', '# a0 will be multiplied with kx', '# a0 = np.atleast_1d(a0)', '# a0 = a0.reshape(1,-1)', '# Create the integrand', '# Integrals over ϕ₀ [0,2π]; kx [-kₘ,kₘ]', '# - double coverage factor 1/2 already included', '# - unitary angular frequency to unitary ordinary frequency', '# conversion performed in calculation of UB=FT(uB).', '#', '# f(r) = -i kₘ / ((2π)^(3/2) a₀) (prefactor)', '# * iint dϕ₀ dkx (prefactor)', '# * |kx| (prefactor)', '# * exp(-i kₘ M lD ) (prefactor)', '# * UBϕ₀(kx) (dependent on ϕ₀)', '# * exp( i (kx t⊥ + kₘ (M - 1) s₀) r ) (dependent on ϕ₀ and r)', '#', '# (r and s₀ are vectors. In the last term we perform the dot-product)', '#', '# kₘM = sqrt( kₘ² - kx² )', '# t⊥ = ( cos(ϕ₀), sin(ϕ₀) )', '# s₀ = ( -sin(ϕ₀), cos(ϕ₀) )', '#', '#', '# everything that is not dependent on phi0:', '#', '# Filter M so there are no nans from the root', 'M', '=', '1.', '/', 'km', '*', 'np', '.', 'sqrt', '(', '(', 'km', '**', '2', '-', 'kx', '**', '2', ')', '*', 'filter_klp', ')', 'prefactor', '=', '-', '1j', '*', 'km', '/', '(', '(', '2', '*', 'np', '.', 'pi', ')', '**', '(', '3.', '/', '2', ')', ')', 'prefactor', '*=', 'dphi0', '*', 'dkx', '# Also filter the prefactor, so nothing outside the required', '# low-pass contributes to the sum.', 'prefactor', '*=', 'np', '.', 'abs', '(', 'kx', ')', '*', 'filter_klp', '# new in version 0.1.4:', '# We multiply by the factor (M-1) instead of just (M)', '# to take into account that we have a scattered', '# wave that is normalized by u0.', 'prefactor', '*=', 'np', '.', 'exp', '(', '-', '1j', '*', 'km', '*', '(', 'M', '-', '1', ')', '*', 'lD', ')', '# Initiate function f', 'f', '=', 'np', '.', 'zeros', '(', 'len', '(', 'coords', '[', '0', ']', ')', ',', 'dtype', '=', 'np', '.', 'complex128', ')', 'lenf', '=', 'len', '(', 'f', ')', 'lenu0', '=', 'len', '(', 'uSin', '[', '0', ']', ')', '# lenu0 = len(kx[0])', '# Initiate vector r that corresponds to calculating a value of f.', 'r', '=', 'np', '.', 'zeros', '(', '(', '2', ',', '1', ',', '1', ')', ')', '# Everything is normal.', '# Get the angles ϕ₀.', 'phi0', '=', 'angles', '.', 'reshape', '(', '-', '1', ',', '1', ')', '# Compute the Fourier transform of uB.', '# This is true: np.fft.fft(UB)[0] == np.fft.fft(UB[0])', '# because axis -1 is always used.', '#', '#', '# Furthermore, The notation in the our optical tomography script for', '# a wave propagating to the right is:', '#', '# u0(x) = exp(ikx)', '#', '# However, in physics usually usethe other sign convention:', '#', '# u0(x) = exp(-ikx)', '#', '# In order to be consisten with programs like Meep or our scattering', '# script for a dielectric cylinder, we want to use the latter sign', '# convention.', '# This is not a big problem. We only need to multiply the imaginary', '# part of the scattered wave by -1.', 'UB', '=', 'np', '.', 'fft', '.', 'fft', '(', 'np', '.', 'fft', '.', 'ifftshift', '(', 'uSin', ',', 'axes', '=', '-', '1', ')', ')', '/', 'np', '.', 'sqrt', '(', '2', '*', 'np', '.', 'pi', ')', 'UBi', '=', 'UB', '.', 'reshape', '(', 'len', '(', 'angles', ')', ',', 'lenu0', ')', 'if', 'count', 'is', 'not', 'None', ':', 'count', '.', 'value', '+=', '1', 'for', 'j', 'in', 'range', '(', 'lenf', ')', ':', '# Get r (We compute f(r) in this for-loop)', 'r', '[', '0', ']', '[', ':', ']', '=', 'coords', '[', '0', ',', 'j', ']', '# x', 'r', '[', '1', ']', '[', ':', ']', '=', 'coords', '[', '1', ',', 'j', ']', '# y', '# Integrand changes with r, so we have to create a new', '# array:', 'integrand', '=', 'prefactor', '*', 'UBi', '# We save memory by directly applying the following to', '# the integrand:', '#', '# Vector along which we measured', '# s0 = np.zeros((2, phi0.shape[0], kx.shape[0]))', '# s0[0] = -np.sin(phi0)', '# s0[1] = +np.cos(phi0)', '# Vector perpendicular to s0', '# t_perp_kx = np.zeros((2, phi0.shape[0], kx.shape[1]))', '#', '# t_perp_kx[0] = kx*np.cos(phi0)', '# t_perp_kx[1] = kx*np.sin(phi0)', '#', '# term3 = np.exp(1j*np.sum(r*( t_perp_kx + (gamma-km)*s0 ), axis=0))', '# integrand* = term3', '#', '# Reminder:', '# f(r) = -i kₘ / ((2π)^(3/2) a₀) (prefactor)', '# * iint dϕ₀ dkx (prefactor)', '# * |kx| (prefactor)', '# * exp(-i kₘ M lD ) (prefactor)', '# * UB(kx) (dependent on ϕ₀)', '# * exp( i (kx t⊥ + kₘ(M - 1) s₀) r ) (dependent on ϕ₀ and r)', '#', '# (r and s₀ are vectors. In the last term we perform the dot-product)', '#', '# kₘM = sqrt( kₘ² - kx² )', '# t⊥ = ( cos(ϕ₀), sin(ϕ₀) )', '# s₀ = ( -sin(ϕ₀), cos(ϕ₀) )', 'integrand', '*=', 'np', '.', 'exp', '(', '1j', '*', '(', 'r', '[', '0', ']', '*', '(', 'kx', '*', 'np', '.', 'cos', '(', 'phi0', ')', '-', 'km', '*', '(', 'M', '-', '1', ')', '*', 'np', '.', 'sin', '(', 'phi0', ')', ')', '+', 'r', '[', '1', ']', '*', '(', 'kx', '*', 'np', '.', 'sin', '(', 'phi0', ')', '+', 'km', '*', '(', 'M', '-', '1', ')', '*', 'np', '.', 'cos', '(', 'phi0', ')', ')', ')', ')', '# Calculate the integral for the position r', '# integrand.sort()', 'f', '[', 'j', ']', '=', 'np', '.', 'sum', '(', 'integrand', ')', '# free memory', 'del', 'integrand', 'if', 'count', 'is', 'not', 'None', ':', 'count', '.', 'value', '+=', '1', 'return', 'f', '.', 'reshape', '(', 'lx', ',', 'lx', ')']
r"""(slow) 2D reconstruction with the Fourier diffraction theorem Two-dimensional diffraction tomography reconstruction algorithm for scattering of a plane wave :math:`u_0(\mathbf{r}) = u_0(x,z)` by a dielectric object with refractive index :math:`n(x,z)`. This function implements the solution by summation in real space, which is extremely slow. Parameters ---------- uSin: (A,N) ndarray Two-dimensional sinogram of line recordings :math:`u_{\mathrm{B}, \phi_j}(x_\mathrm{D})` divided by the incident plane wave :math:`u_0(l_\mathrm{D})` measured at the detector. angles: (A,) ndarray Angular positions :math:`\phi_j` of `uSin` in radians. res: float Vacuum wavelength of the light :math:`\lambda` in pixels. nm: float Refractive index of the surrounding medium :math:`n_\mathrm{m}`. lD: float Distance from center of rotation to detector plane :math:`l_\mathrm{D}` in pixels. coords: None or (2,M) ndarray] Computes only the output image at these coordinates. This keyword is reserved for future versions and is not implemented yet. count, max_count: multiprocessing.Value or `None` Can be used to monitor the progress of the algorithm. Initially, the value of `max_count.value` is incremented by the total number of steps. At each step, the value of `count.value` is incremented. verbose: int Increment to increase verbosity. Returns ------- f: ndarray of shape (N,N), complex if `onlyreal` is `False` Reconstructed object function :math:`f(\mathbf{r})` as defined by the Helmholtz equation. :math:`f(x,z) = k_m^2 \left(\left(\frac{n(x,z)}{n_m}\right)^2 -1\right)` See Also -------- backpropagate_2d: implementation by backprojection fourier_map_2d: implementation by Fourier interpolation odt_to_ri: conversion of the object function :math:`f(\mathbf{r})` to refractive index :math:`n(\mathbf{r})` Notes ----- This method is not meant for production use. The computation time is very long and the reconstruction quality is bad. This function is included in the package, because of its educational value, exemplifying the backpropagation algorithm. Do not use the parameter `lD` in combination with the Rytov approximation - the propagation is not correctly described. Instead, numerically refocus the sinogram prior to converting it to Rytov data (using e.g. :func:`odtbrain.sinogram_as_rytov`) with a numerical focusing algorithm (available in the Python package :py:mod:`nrefocus`).
['r', '(', 'slow', ')', '2D', 'reconstruction', 'with', 'the', 'Fourier', 'diffraction', 'theorem']
train
https://github.com/RI-imaging/ODTbrain/blob/abbab8b790f10c0c7aea8d858d7d60f2fdd7161e/odtbrain/_alg2d_int.py#L5-L271
3,342
davenquinn/Attitude
attitude/stereonet.py
iterative_plane_errors
def iterative_plane_errors(axes,covariance_matrix, **kwargs): """ An iterative version of `pca.plane_errors`, which computes an error surface for a plane. """ sheet = kwargs.pop('sheet','upper') level = kwargs.pop('level',1) n = kwargs.pop('n',100) cov = N.sqrt(N.diagonal(covariance_matrix)) u = N.linspace(0, 2*N.pi, n) scales = dict(upper=1,lower=-1,nominal=0) c1 = scales[sheet] c1 *= -1 # We assume upper hemisphere if axes[2,2] < 0: c1 *= -1 def sdot(a,b): return sum([i*j for i,j in zip(a,b)]) def step_func(a): e = [ N.cos(a)*cov[0], N.sin(a)*cov[1], c1*cov[2]] d = [sdot(e,i) for i in axes.T] x,y,z = d[2],d[0],d[1] r = N.sqrt(x**2 + y**2 + z**2) lat = N.arcsin(z/r) lon = N.arctan2(y, x) return lon,lat # Get a bundle of vectors defining # a full rotation around the unit circle return N.array([step_func(i) for i in u])
python
def iterative_plane_errors(axes,covariance_matrix, **kwargs): """ An iterative version of `pca.plane_errors`, which computes an error surface for a plane. """ sheet = kwargs.pop('sheet','upper') level = kwargs.pop('level',1) n = kwargs.pop('n',100) cov = N.sqrt(N.diagonal(covariance_matrix)) u = N.linspace(0, 2*N.pi, n) scales = dict(upper=1,lower=-1,nominal=0) c1 = scales[sheet] c1 *= -1 # We assume upper hemisphere if axes[2,2] < 0: c1 *= -1 def sdot(a,b): return sum([i*j for i,j in zip(a,b)]) def step_func(a): e = [ N.cos(a)*cov[0], N.sin(a)*cov[1], c1*cov[2]] d = [sdot(e,i) for i in axes.T] x,y,z = d[2],d[0],d[1] r = N.sqrt(x**2 + y**2 + z**2) lat = N.arcsin(z/r) lon = N.arctan2(y, x) return lon,lat # Get a bundle of vectors defining # a full rotation around the unit circle return N.array([step_func(i) for i in u])
['def', 'iterative_plane_errors', '(', 'axes', ',', 'covariance_matrix', ',', '*', '*', 'kwargs', ')', ':', 'sheet', '=', 'kwargs', '.', 'pop', '(', "'sheet'", ',', "'upper'", ')', 'level', '=', 'kwargs', '.', 'pop', '(', "'level'", ',', '1', ')', 'n', '=', 'kwargs', '.', 'pop', '(', "'n'", ',', '100', ')', 'cov', '=', 'N', '.', 'sqrt', '(', 'N', '.', 'diagonal', '(', 'covariance_matrix', ')', ')', 'u', '=', 'N', '.', 'linspace', '(', '0', ',', '2', '*', 'N', '.', 'pi', ',', 'n', ')', 'scales', '=', 'dict', '(', 'upper', '=', '1', ',', 'lower', '=', '-', '1', ',', 'nominal', '=', '0', ')', 'c1', '=', 'scales', '[', 'sheet', ']', 'c1', '*=', '-', '1', '# We assume upper hemisphere', 'if', 'axes', '[', '2', ',', '2', ']', '<', '0', ':', 'c1', '*=', '-', '1', 'def', 'sdot', '(', 'a', ',', 'b', ')', ':', 'return', 'sum', '(', '[', 'i', '*', 'j', 'for', 'i', ',', 'j', 'in', 'zip', '(', 'a', ',', 'b', ')', ']', ')', 'def', 'step_func', '(', 'a', ')', ':', 'e', '=', '[', 'N', '.', 'cos', '(', 'a', ')', '*', 'cov', '[', '0', ']', ',', 'N', '.', 'sin', '(', 'a', ')', '*', 'cov', '[', '1', ']', ',', 'c1', '*', 'cov', '[', '2', ']', ']', 'd', '=', '[', 'sdot', '(', 'e', ',', 'i', ')', 'for', 'i', 'in', 'axes', '.', 'T', ']', 'x', ',', 'y', ',', 'z', '=', 'd', '[', '2', ']', ',', 'd', '[', '0', ']', ',', 'd', '[', '1', ']', 'r', '=', 'N', '.', 'sqrt', '(', 'x', '**', '2', '+', 'y', '**', '2', '+', 'z', '**', '2', ')', 'lat', '=', 'N', '.', 'arcsin', '(', 'z', '/', 'r', ')', 'lon', '=', 'N', '.', 'arctan2', '(', 'y', ',', 'x', ')', 'return', 'lon', ',', 'lat', '# Get a bundle of vectors defining', '# a full rotation around the unit circle', 'return', 'N', '.', 'array', '(', '[', 'step_func', '(', 'i', ')', 'for', 'i', 'in', 'u', ']', ')']
An iterative version of `pca.plane_errors`, which computes an error surface for a plane.
['An', 'iterative', 'version', 'of', 'pca', '.', 'plane_errors', 'which', 'computes', 'an', 'error', 'surface', 'for', 'a', 'plane', '.']
train
https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/attitude/stereonet.py#L156-L193
3,343
SylvanasSun/FishFishJump
fish_core/utils/common_utils.py
check_validity_for_dict
def check_validity_for_dict(keys, dict): """ >>> dict = {'a': 0, 'b': 1, 'c': 2} >>> keys = ['a', 'd', 'e'] >>> check_validity_for_dict(keys, dict) == False True >>> keys = ['a', 'b', 'c'] >>> check_validity_for_dict(keys, dict) == False False """ for key in keys: if key not in dict or dict[key] is '' or dict[key] is None: return False return True
python
def check_validity_for_dict(keys, dict): """ >>> dict = {'a': 0, 'b': 1, 'c': 2} >>> keys = ['a', 'd', 'e'] >>> check_validity_for_dict(keys, dict) == False True >>> keys = ['a', 'b', 'c'] >>> check_validity_for_dict(keys, dict) == False False """ for key in keys: if key not in dict or dict[key] is '' or dict[key] is None: return False return True
['def', 'check_validity_for_dict', '(', 'keys', ',', 'dict', ')', ':', 'for', 'key', 'in', 'keys', ':', 'if', 'key', 'not', 'in', 'dict', 'or', 'dict', '[', 'key', ']', 'is', "''", 'or', 'dict', '[', 'key', ']', 'is', 'None', ':', 'return', 'False', 'return', 'True']
>>> dict = {'a': 0, 'b': 1, 'c': 2} >>> keys = ['a', 'd', 'e'] >>> check_validity_for_dict(keys, dict) == False True >>> keys = ['a', 'b', 'c'] >>> check_validity_for_dict(keys, dict) == False False
['>>>', 'dict', '=', '{', 'a', ':', '0', 'b', ':', '1', 'c', ':', '2', '}', '>>>', 'keys', '=', '[', 'a', 'd', 'e', ']', '>>>', 'check_validity_for_dict', '(', 'keys', 'dict', ')', '==', 'False', 'True', '>>>', 'keys', '=', '[', 'a', 'b', 'c', ']', '>>>', 'check_validity_for_dict', '(', 'keys', 'dict', ')', '==', 'False', 'False']
train
https://github.com/SylvanasSun/FishFishJump/blob/696212d242d8d572f3f1b43925f3d8ab8acc6a2d/fish_core/utils/common_utils.py#L58-L71
3,344
tjcsl/cslbot
cslbot/commands/distro.py
cmd
def cmd(send, *_): """Gets a random distro. Syntax: {command} """ url = get('http://distrowatch.com/random.php').url match = re.search('=(.*)', url) if match: send(match.group(1)) else: send("no distro found")
python
def cmd(send, *_): """Gets a random distro. Syntax: {command} """ url = get('http://distrowatch.com/random.php').url match = re.search('=(.*)', url) if match: send(match.group(1)) else: send("no distro found")
['def', 'cmd', '(', 'send', ',', '*', '_', ')', ':', 'url', '=', 'get', '(', "'http://distrowatch.com/random.php'", ')', '.', 'url', 'match', '=', 're', '.', 'search', '(', "'=(.*)'", ',', 'url', ')', 'if', 'match', ':', 'send', '(', 'match', '.', 'group', '(', '1', ')', ')', 'else', ':', 'send', '(', '"no distro found"', ')']
Gets a random distro. Syntax: {command}
['Gets', 'a', 'random', 'distro', '.']
train
https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/commands/distro.py#L26-L37
3,345
andreikop/qutepart
qutepart/syntax/__init__.py
SyntaxManager.getSyntax
def getSyntax(self, xmlFileName=None, mimeType=None, languageName=None, sourceFilePath=None, firstLine=None): """Get syntax by one of parameters: * xmlFileName * mimeType * languageName * sourceFilePath First parameter in the list has biggest priority """ syntax = None if syntax is None and xmlFileName is not None: try: syntax = self._getSyntaxByXmlFileName(xmlFileName) except KeyError: _logger.warning('No xml definition %s' % xmlFileName) if syntax is None and mimeType is not None: try: syntax = self._getSyntaxByMimeType(mimeType) except KeyError: _logger.warning('No syntax for mime type %s' % mimeType) if syntax is None and languageName is not None: try: syntax = self._getSyntaxByLanguageName(languageName) except KeyError: _logger.warning('No syntax for language %s' % languageName) if syntax is None and sourceFilePath is not None: baseName = os.path.basename(sourceFilePath) try: syntax = self._getSyntaxBySourceFileName(baseName) except KeyError: pass if syntax is None and firstLine is not None: try: syntax = self._getSyntaxByFirstLine(firstLine) except KeyError: pass return syntax
python
def getSyntax(self, xmlFileName=None, mimeType=None, languageName=None, sourceFilePath=None, firstLine=None): """Get syntax by one of parameters: * xmlFileName * mimeType * languageName * sourceFilePath First parameter in the list has biggest priority """ syntax = None if syntax is None and xmlFileName is not None: try: syntax = self._getSyntaxByXmlFileName(xmlFileName) except KeyError: _logger.warning('No xml definition %s' % xmlFileName) if syntax is None and mimeType is not None: try: syntax = self._getSyntaxByMimeType(mimeType) except KeyError: _logger.warning('No syntax for mime type %s' % mimeType) if syntax is None and languageName is not None: try: syntax = self._getSyntaxByLanguageName(languageName) except KeyError: _logger.warning('No syntax for language %s' % languageName) if syntax is None and sourceFilePath is not None: baseName = os.path.basename(sourceFilePath) try: syntax = self._getSyntaxBySourceFileName(baseName) except KeyError: pass if syntax is None and firstLine is not None: try: syntax = self._getSyntaxByFirstLine(firstLine) except KeyError: pass return syntax
['def', 'getSyntax', '(', 'self', ',', 'xmlFileName', '=', 'None', ',', 'mimeType', '=', 'None', ',', 'languageName', '=', 'None', ',', 'sourceFilePath', '=', 'None', ',', 'firstLine', '=', 'None', ')', ':', 'syntax', '=', 'None', 'if', 'syntax', 'is', 'None', 'and', 'xmlFileName', 'is', 'not', 'None', ':', 'try', ':', 'syntax', '=', 'self', '.', '_getSyntaxByXmlFileName', '(', 'xmlFileName', ')', 'except', 'KeyError', ':', '_logger', '.', 'warning', '(', "'No xml definition %s'", '%', 'xmlFileName', ')', 'if', 'syntax', 'is', 'None', 'and', 'mimeType', 'is', 'not', 'None', ':', 'try', ':', 'syntax', '=', 'self', '.', '_getSyntaxByMimeType', '(', 'mimeType', ')', 'except', 'KeyError', ':', '_logger', '.', 'warning', '(', "'No syntax for mime type %s'", '%', 'mimeType', ')', 'if', 'syntax', 'is', 'None', 'and', 'languageName', 'is', 'not', 'None', ':', 'try', ':', 'syntax', '=', 'self', '.', '_getSyntaxByLanguageName', '(', 'languageName', ')', 'except', 'KeyError', ':', '_logger', '.', 'warning', '(', "'No syntax for language %s'", '%', 'languageName', ')', 'if', 'syntax', 'is', 'None', 'and', 'sourceFilePath', 'is', 'not', 'None', ':', 'baseName', '=', 'os', '.', 'path', '.', 'basename', '(', 'sourceFilePath', ')', 'try', ':', 'syntax', '=', 'self', '.', '_getSyntaxBySourceFileName', '(', 'baseName', ')', 'except', 'KeyError', ':', 'pass', 'if', 'syntax', 'is', 'None', 'and', 'firstLine', 'is', 'not', 'None', ':', 'try', ':', 'syntax', '=', 'self', '.', '_getSyntaxByFirstLine', '(', 'firstLine', ')', 'except', 'KeyError', ':', 'pass', 'return', 'syntax']
Get syntax by one of parameters: * xmlFileName * mimeType * languageName * sourceFilePath First parameter in the list has biggest priority
['Get', 'syntax', 'by', 'one', 'of', 'parameters', ':', '*', 'xmlFileName', '*', 'mimeType', '*', 'languageName', '*', 'sourceFilePath', 'First', 'parameter', 'in', 'the', 'list', 'has', 'biggest', 'priority']
train
https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/syntax/__init__.py#L214-L260
3,346
atlassian-api/atlassian-python-api
atlassian/bitbucket.py
Bitbucket.delete_branch
def delete_branch(self, project, repository, name, end_point): """ Delete branch from related repo :param self: :param project: :param repository: :param name: :param end_point: :return: """ url = 'rest/branch-utils/1.0/projects/{project}/repos/{repository}/branches'.format(project=project, repository=repository) data = {"name": str(name), "endPoint": str(end_point)} return self.delete(url, data=data)
python
def delete_branch(self, project, repository, name, end_point): """ Delete branch from related repo :param self: :param project: :param repository: :param name: :param end_point: :return: """ url = 'rest/branch-utils/1.0/projects/{project}/repos/{repository}/branches'.format(project=project, repository=repository) data = {"name": str(name), "endPoint": str(end_point)} return self.delete(url, data=data)
['def', 'delete_branch', '(', 'self', ',', 'project', ',', 'repository', ',', 'name', ',', 'end_point', ')', ':', 'url', '=', "'rest/branch-utils/1.0/projects/{project}/repos/{repository}/branches'", '.', 'format', '(', 'project', '=', 'project', ',', 'repository', '=', 'repository', ')', 'data', '=', '{', '"name"', ':', 'str', '(', 'name', ')', ',', '"endPoint"', ':', 'str', '(', 'end_point', ')', '}', 'return', 'self', '.', 'delete', '(', 'url', ',', 'data', '=', 'data', ')']
Delete branch from related repo :param self: :param project: :param repository: :param name: :param end_point: :return:
['Delete', 'branch', 'from', 'related', 'repo', ':', 'param', 'self', ':', ':', 'param', 'project', ':', ':', 'param', 'repository', ':', ':', 'param', 'name', ':', ':', 'param', 'end_point', ':', ':', 'return', ':']
train
https://github.com/atlassian-api/atlassian-python-api/blob/540d269905c3e7547b666fe30c647b2d512cf358/atlassian/bitbucket.py#L315-L330
3,347
Opentrons/opentrons
api/src/opentrons/legacy_api/instruments/pipette.py
Pipette._aspirate_plunger_position
def _aspirate_plunger_position(self, ul): """Calculate axis position for a given liquid volume. Translates the passed liquid volume to absolute coordinates on the axis associated with this pipette. Calibration of the pipette motor's ul-to-mm conversion is required """ millimeters = ul / self._ul_per_mm(ul, 'aspirate') destination_mm = self._get_plunger_position('bottom') + millimeters return round(destination_mm, 6)
python
def _aspirate_plunger_position(self, ul): """Calculate axis position for a given liquid volume. Translates the passed liquid volume to absolute coordinates on the axis associated with this pipette. Calibration of the pipette motor's ul-to-mm conversion is required """ millimeters = ul / self._ul_per_mm(ul, 'aspirate') destination_mm = self._get_plunger_position('bottom') + millimeters return round(destination_mm, 6)
['def', '_aspirate_plunger_position', '(', 'self', ',', 'ul', ')', ':', 'millimeters', '=', 'ul', '/', 'self', '.', '_ul_per_mm', '(', 'ul', ',', "'aspirate'", ')', 'destination_mm', '=', 'self', '.', '_get_plunger_position', '(', "'bottom'", ')', '+', 'millimeters', 'return', 'round', '(', 'destination_mm', ',', '6', ')']
Calculate axis position for a given liquid volume. Translates the passed liquid volume to absolute coordinates on the axis associated with this pipette. Calibration of the pipette motor's ul-to-mm conversion is required
['Calculate', 'axis', 'position', 'for', 'a', 'given', 'liquid', 'volume', '.']
train
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/legacy_api/instruments/pipette.py#L1453-L1463
3,348
Kunstmord/datalib
src/dataset.py
DataSetBase.return_multiple_convert_numpy
def return_multiple_convert_numpy(self, start_id, end_id, converter, add_args=None): """ Converts several objects, with ids in the range (start_id, end_id) into a 2d numpy array and returns the array, the conversion is done by the 'converter' function Parameters ---------- start_id : the id of the first object to be converted end_id : the id of the last object to be converted, if equal to -1, will convert all data points in range (start_id, <id of last element in database>) converter : function, which takes the path of a data point and *args as parameters and returns a numpy array add_args : optional arguments for the converter (list/dictionary/tuple/whatever). if None, the converter should take only one input argument - the file path. default value: None Returns ------- result : 2-dimensional ndarray """ if end_id == -1: end_id = self.points_amt return return_multiple_convert_numpy_base(self.dbpath, self.path_to_set, self._set_object, start_id, end_id, converter, add_args)
python
def return_multiple_convert_numpy(self, start_id, end_id, converter, add_args=None): """ Converts several objects, with ids in the range (start_id, end_id) into a 2d numpy array and returns the array, the conversion is done by the 'converter' function Parameters ---------- start_id : the id of the first object to be converted end_id : the id of the last object to be converted, if equal to -1, will convert all data points in range (start_id, <id of last element in database>) converter : function, which takes the path of a data point and *args as parameters and returns a numpy array add_args : optional arguments for the converter (list/dictionary/tuple/whatever). if None, the converter should take only one input argument - the file path. default value: None Returns ------- result : 2-dimensional ndarray """ if end_id == -1: end_id = self.points_amt return return_multiple_convert_numpy_base(self.dbpath, self.path_to_set, self._set_object, start_id, end_id, converter, add_args)
['def', 'return_multiple_convert_numpy', '(', 'self', ',', 'start_id', ',', 'end_id', ',', 'converter', ',', 'add_args', '=', 'None', ')', ':', 'if', 'end_id', '==', '-', '1', ':', 'end_id', '=', 'self', '.', 'points_amt', 'return', 'return_multiple_convert_numpy_base', '(', 'self', '.', 'dbpath', ',', 'self', '.', 'path_to_set', ',', 'self', '.', '_set_object', ',', 'start_id', ',', 'end_id', ',', 'converter', ',', 'add_args', ')']
Converts several objects, with ids in the range (start_id, end_id) into a 2d numpy array and returns the array, the conversion is done by the 'converter' function Parameters ---------- start_id : the id of the first object to be converted end_id : the id of the last object to be converted, if equal to -1, will convert all data points in range (start_id, <id of last element in database>) converter : function, which takes the path of a data point and *args as parameters and returns a numpy array add_args : optional arguments for the converter (list/dictionary/tuple/whatever). if None, the converter should take only one input argument - the file path. default value: None Returns ------- result : 2-dimensional ndarray
['Converts', 'several', 'objects', 'with', 'ids', 'in', 'the', 'range', '(', 'start_id', 'end_id', ')', 'into', 'a', '2d', 'numpy', 'array', 'and', 'returns', 'the', 'array', 'the', 'conversion', 'is', 'done', 'by', 'the', 'converter', 'function']
train
https://github.com/Kunstmord/datalib/blob/9d7db3e7c3a5feeeb5d19eb0dbee858bd2b50886/src/dataset.py#L864-L885
3,349
pypa/pipenv
pipenv/patched/notpip/_vendor/ipaddress.py
IPv6Address.teredo
def teredo(self): """Tuple of embedded teredo IPs. Returns: Tuple of the (server, client) IPs or None if the address doesn't appear to be a teredo address (doesn't start with 2001::/32) """ if (self._ip >> 96) != 0x20010000: return None return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF), IPv4Address(~self._ip & 0xFFFFFFFF))
python
def teredo(self): """Tuple of embedded teredo IPs. Returns: Tuple of the (server, client) IPs or None if the address doesn't appear to be a teredo address (doesn't start with 2001::/32) """ if (self._ip >> 96) != 0x20010000: return None return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF), IPv4Address(~self._ip & 0xFFFFFFFF))
['def', 'teredo', '(', 'self', ')', ':', 'if', '(', 'self', '.', '_ip', '>>', '96', ')', '!=', '0x20010000', ':', 'return', 'None', 'return', '(', 'IPv4Address', '(', '(', 'self', '.', '_ip', '>>', '64', ')', '&', '0xFFFFFFFF', ')', ',', 'IPv4Address', '(', '~', 'self', '.', '_ip', '&', '0xFFFFFFFF', ')', ')']
Tuple of embedded teredo IPs. Returns: Tuple of the (server, client) IPs or None if the address doesn't appear to be a teredo address (doesn't start with 2001::/32)
['Tuple', 'of', 'embedded', 'teredo', 'IPs', '.']
train
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_vendor/ipaddress.py#L2148-L2160
3,350
abarker/pdfCropMargins
src/pdfCropMargins/external_program_calls.py
remove_program_temp_directory
def remove_program_temp_directory(): """Remove the global temp directory and all its contents.""" if os.path.exists(program_temp_directory): max_retries = 3 curr_retries = 0 time_between_retries = 1 while True: try: shutil.rmtree(program_temp_directory) break except IOError: curr_retries += 1 if curr_retries > max_retries: raise # re-raise the exception time.sleep(time_between_retries) except: print("Cleaning up temp dir...", file=sys.stderr) raise
python
def remove_program_temp_directory(): """Remove the global temp directory and all its contents.""" if os.path.exists(program_temp_directory): max_retries = 3 curr_retries = 0 time_between_retries = 1 while True: try: shutil.rmtree(program_temp_directory) break except IOError: curr_retries += 1 if curr_retries > max_retries: raise # re-raise the exception time.sleep(time_between_retries) except: print("Cleaning up temp dir...", file=sys.stderr) raise
['def', 'remove_program_temp_directory', '(', ')', ':', 'if', 'os', '.', 'path', '.', 'exists', '(', 'program_temp_directory', ')', ':', 'max_retries', '=', '3', 'curr_retries', '=', '0', 'time_between_retries', '=', '1', 'while', 'True', ':', 'try', ':', 'shutil', '.', 'rmtree', '(', 'program_temp_directory', ')', 'break', 'except', 'IOError', ':', 'curr_retries', '+=', '1', 'if', 'curr_retries', '>', 'max_retries', ':', 'raise', '# re-raise the exception', 'time', '.', 'sleep', '(', 'time_between_retries', ')', 'except', ':', 'print', '(', '"Cleaning up temp dir..."', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'raise']
Remove the global temp directory and all its contents.
['Remove', 'the', 'global', 'temp', 'directory', 'and', 'all', 'its', 'contents', '.']
train
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/external_program_calls.py#L191-L208
3,351
Kozea/pygal
pygal/graph/pyramid.py
VerticalPyramid._get_separated_values
def _get_separated_values(self, secondary=False): """Separate values between odd and even series stacked""" series = self.secondary_series if secondary else self.series positive_vals = map( sum, zip( *[ serie.safe_values for index, serie in enumerate(series) if index % 2 ] ) ) negative_vals = map( sum, zip( *[ serie.safe_values for index, serie in enumerate(series) if not index % 2 ] ) ) return list(positive_vals), list(negative_vals)
python
def _get_separated_values(self, secondary=False): """Separate values between odd and even series stacked""" series = self.secondary_series if secondary else self.series positive_vals = map( sum, zip( *[ serie.safe_values for index, serie in enumerate(series) if index % 2 ] ) ) negative_vals = map( sum, zip( *[ serie.safe_values for index, serie in enumerate(series) if not index % 2 ] ) ) return list(positive_vals), list(negative_vals)
['def', '_get_separated_values', '(', 'self', ',', 'secondary', '=', 'False', ')', ':', 'series', '=', 'self', '.', 'secondary_series', 'if', 'secondary', 'else', 'self', '.', 'series', 'positive_vals', '=', 'map', '(', 'sum', ',', 'zip', '(', '*', '[', 'serie', '.', 'safe_values', 'for', 'index', ',', 'serie', 'in', 'enumerate', '(', 'series', ')', 'if', 'index', '%', '2', ']', ')', ')', 'negative_vals', '=', 'map', '(', 'sum', ',', 'zip', '(', '*', '[', 'serie', '.', 'safe_values', 'for', 'index', ',', 'serie', 'in', 'enumerate', '(', 'series', ')', 'if', 'not', 'index', '%', '2', ']', ')', ')', 'return', 'list', '(', 'positive_vals', ')', ',', 'list', '(', 'negative_vals', ')']
Separate values between odd and even series stacked
['Separate', 'values', 'between', 'odd', 'and', 'even', 'series', 'stacked']
train
https://github.com/Kozea/pygal/blob/5e25c98a59a0642eecd9fcc5dbfeeb2190fbb5e7/pygal/graph/pyramid.py#L40-L61
3,352
mikhaildubov/AST-text-analysis
east/asts/ast_linear.py
LinearAnnotatedSuffixTree._construct
def _construct(self, strings_collection): """ Generalized suffix tree construction algorithm based on the Ukkonen's algorithm for suffix tree construction, with linear [O(n_1 + ... + n_m)] worst-case time complexity, where m is the number of strings in collection. """ # 1. Add a unique character to each string in the collection strings_collection = utils.make_unique_endings(strings_collection) ############################################################ # 2. Build the GST using modified Ukkonnen's algorithm # ############################################################ root = ast.AnnotatedSuffixTree.Node() root.strings_collection = strings_collection # To preserve simplicity root.suffix_link = root root._arc = (0,-1,0) # For constant updating of all leafs, see [Gusfield {RUS}, p. 139] root._e = [0 for _ in xrange(len(strings_collection))] def _ukkonen_first_phases(string_ind): """ Looks for the part of the string which is already encoded. Returns a tuple of form ([length of already encoded string preffix], [tree node to start the first explicit phase with], [path to go down at the beginning of the first explicit phase]). """ already_in_tree = 0 suffix = strings_collection[string_ind] starting_path = (0, 0, 0) starting_node = root child_node = starting_node.chose_arc(suffix) while child_node: (str_ind, substr_start, substr_end) = child_node.arc() match = utils.match_strings( suffix, strings_collection[str_ind][substr_start:substr_end]) already_in_tree += match if match == substr_end-substr_start: # matched the arc, proceed with child node suffix = suffix[match:] starting_node = child_node child_node = starting_node.chose_arc(suffix) else: # otherwise we will have to proceed certain path at the beginning # of the first explicit phase starting_path = (str_ind, substr_start, substr_start+match) break # For constant updating of all leafs, see [Gusfield {RUS}, p. 139] root._e[string_ind] = already_in_tree return (already_in_tree, starting_node, starting_path) def _ukkonen_phase(string_ind, phase, starting_node, starting_path, starting_continuation): """ Ukkonen's algorithm single phase. Returns a tuple of form: ([tree node to start the next phase with], [path to go down at the beginning of the next phase], [starting continuation for the next phase]). """ current_suffix_end = starting_node suffix_link_source_node = None path_str_ind, path_substr_start, path_substr_end = starting_path # Continuations [starting_continuation..(i+1)] for continuation in xrange(starting_continuation, phase+1): # Go up to the first node with suffix link [no more than 1 pass] if continuation > starting_continuation: path_str_ind, path_substr_start, path_substr_end = 0, 0, 0 if not current_suffix_end.suffix_link: (path_str_ind, path_substr_start, path_substr_end) = current_suffix_end.arc() current_suffix_end = current_suffix_end.parent if current_suffix_end.is_root(): path_str_ind = string_ind path_substr_start = continuation path_substr_end = phase else: # Go through the suffix link current_suffix_end = current_suffix_end.suffix_link # Go down the path (str_ind, substr_start, substr_end) # NB: using Skip/Count trick, # see [Gusfield {RUS} p.134] for details g = path_substr_end - path_substr_start if g > 0: current_suffix_end = current_suffix_end.chose_arc(strings_collection [path_str_ind][path_substr_start]) (_, cs_ss_start, cs_ss_end) = current_suffix_end.arc() g_ = cs_ss_end - cs_ss_start while g >= g_: path_substr_start += g_ g -= g_ if g > 0: current_suffix_end = current_suffix_end.chose_arc(strings_collection [path_str_ind][path_substr_start]) (_, cs_ss_start, cs_ss_end) = current_suffix_end.arc() g_ = cs_ss_end - cs_ss_start # Perform continuation by one of three rules, # see [Gusfield {RUS} p. 129] for details if g == 0: # Rule 1 if current_suffix_end.is_leaf(): pass # Rule 2a elif not current_suffix_end.chose_arc(strings_collection[string_ind][phase]): if suffix_link_source_node: suffix_link_source_node.suffix_link = current_suffix_end new_leaf = current_suffix_end.add_new_child(string_ind, phase, -1) new_leaf.weight = 1 if continuation == starting_continuation: starting_node = new_leaf starting_path = (0, 0, 0) # Rule 3a else: if suffix_link_source_node: suffix_link_source_node.suffix_link = current_suffix_end starting_continuation = continuation starting_node = current_suffix_end starting_path = (string_ind, phase, phase+1) break suffix_link_source_node = None else: (si, ss, se) = current_suffix_end._arc # Rule 2b if strings_collection[si][ss + g] != strings_collection[string_ind][phase]: parent = current_suffix_end.parent parent.remove_child(current_suffix_end) current_suffix_end._arc = (si, ss+g, se) new_node = parent.add_new_child(si, ss, ss + g) new_leaf = new_node.add_new_child(string_ind, phase, -1) new_leaf.weight = 1 if continuation == starting_continuation: starting_node = new_leaf starting_path = (0, 0, 0) new_node.add_child(current_suffix_end) if suffix_link_source_node: # Define new suffix link suffix_link_source_node.suffix_link = new_node suffix_link_source_node = new_node current_suffix_end = new_node # Rule 3b else: suffix_link_source_node = None starting_continuation = continuation starting_node = current_suffix_end.parent starting_path = (si, ss, ss+g+1) break # Constant updating of all leafs, see [Gusfield {RUS}, p. 139] starting_node._e[string_ind] += 1 return starting_node, starting_path, starting_continuation for m in xrange(len(strings_collection)): # Check for phases 1..x that are already in tree starting_phase, starting_node, starting_path = _ukkonen_first_phases(m) starting_continuation = 0 # Perform phases (x+1)..n explicitly for phase in xrange(starting_phase, len(strings_collection[m])): starting_node, starting_path, starting_continuation = \ _ukkonen_phase(m, phase, starting_node, starting_path, starting_continuation) ############################################################ ############################################################ ############################################################ # 3. Delete degenerate first-level children for k in root.children.keys(): (ss, si, se) = root.children[k].arc() if (se - si == 1 and ord(strings_collection[ss][si]) >= consts.String.UNICODE_SPECIAL_SYMBOLS_START): del root.children[k] # 4. Make a depth-first bottom-up traversal and annotate # each node by the sum of its children; # each leaf is already annotated with '1'. def _annotate(node): weight = 0 for k in node.children: if node.children[k].weight > 0: weight += node.children[k].weight else: weight += _annotate(node.children[k]) node.weight = weight return weight _annotate(root) return root
python
def _construct(self, strings_collection): """ Generalized suffix tree construction algorithm based on the Ukkonen's algorithm for suffix tree construction, with linear [O(n_1 + ... + n_m)] worst-case time complexity, where m is the number of strings in collection. """ # 1. Add a unique character to each string in the collection strings_collection = utils.make_unique_endings(strings_collection) ############################################################ # 2. Build the GST using modified Ukkonnen's algorithm # ############################################################ root = ast.AnnotatedSuffixTree.Node() root.strings_collection = strings_collection # To preserve simplicity root.suffix_link = root root._arc = (0,-1,0) # For constant updating of all leafs, see [Gusfield {RUS}, p. 139] root._e = [0 for _ in xrange(len(strings_collection))] def _ukkonen_first_phases(string_ind): """ Looks for the part of the string which is already encoded. Returns a tuple of form ([length of already encoded string preffix], [tree node to start the first explicit phase with], [path to go down at the beginning of the first explicit phase]). """ already_in_tree = 0 suffix = strings_collection[string_ind] starting_path = (0, 0, 0) starting_node = root child_node = starting_node.chose_arc(suffix) while child_node: (str_ind, substr_start, substr_end) = child_node.arc() match = utils.match_strings( suffix, strings_collection[str_ind][substr_start:substr_end]) already_in_tree += match if match == substr_end-substr_start: # matched the arc, proceed with child node suffix = suffix[match:] starting_node = child_node child_node = starting_node.chose_arc(suffix) else: # otherwise we will have to proceed certain path at the beginning # of the first explicit phase starting_path = (str_ind, substr_start, substr_start+match) break # For constant updating of all leafs, see [Gusfield {RUS}, p. 139] root._e[string_ind] = already_in_tree return (already_in_tree, starting_node, starting_path) def _ukkonen_phase(string_ind, phase, starting_node, starting_path, starting_continuation): """ Ukkonen's algorithm single phase. Returns a tuple of form: ([tree node to start the next phase with], [path to go down at the beginning of the next phase], [starting continuation for the next phase]). """ current_suffix_end = starting_node suffix_link_source_node = None path_str_ind, path_substr_start, path_substr_end = starting_path # Continuations [starting_continuation..(i+1)] for continuation in xrange(starting_continuation, phase+1): # Go up to the first node with suffix link [no more than 1 pass] if continuation > starting_continuation: path_str_ind, path_substr_start, path_substr_end = 0, 0, 0 if not current_suffix_end.suffix_link: (path_str_ind, path_substr_start, path_substr_end) = current_suffix_end.arc() current_suffix_end = current_suffix_end.parent if current_suffix_end.is_root(): path_str_ind = string_ind path_substr_start = continuation path_substr_end = phase else: # Go through the suffix link current_suffix_end = current_suffix_end.suffix_link # Go down the path (str_ind, substr_start, substr_end) # NB: using Skip/Count trick, # see [Gusfield {RUS} p.134] for details g = path_substr_end - path_substr_start if g > 0: current_suffix_end = current_suffix_end.chose_arc(strings_collection [path_str_ind][path_substr_start]) (_, cs_ss_start, cs_ss_end) = current_suffix_end.arc() g_ = cs_ss_end - cs_ss_start while g >= g_: path_substr_start += g_ g -= g_ if g > 0: current_suffix_end = current_suffix_end.chose_arc(strings_collection [path_str_ind][path_substr_start]) (_, cs_ss_start, cs_ss_end) = current_suffix_end.arc() g_ = cs_ss_end - cs_ss_start # Perform continuation by one of three rules, # see [Gusfield {RUS} p. 129] for details if g == 0: # Rule 1 if current_suffix_end.is_leaf(): pass # Rule 2a elif not current_suffix_end.chose_arc(strings_collection[string_ind][phase]): if suffix_link_source_node: suffix_link_source_node.suffix_link = current_suffix_end new_leaf = current_suffix_end.add_new_child(string_ind, phase, -1) new_leaf.weight = 1 if continuation == starting_continuation: starting_node = new_leaf starting_path = (0, 0, 0) # Rule 3a else: if suffix_link_source_node: suffix_link_source_node.suffix_link = current_suffix_end starting_continuation = continuation starting_node = current_suffix_end starting_path = (string_ind, phase, phase+1) break suffix_link_source_node = None else: (si, ss, se) = current_suffix_end._arc # Rule 2b if strings_collection[si][ss + g] != strings_collection[string_ind][phase]: parent = current_suffix_end.parent parent.remove_child(current_suffix_end) current_suffix_end._arc = (si, ss+g, se) new_node = parent.add_new_child(si, ss, ss + g) new_leaf = new_node.add_new_child(string_ind, phase, -1) new_leaf.weight = 1 if continuation == starting_continuation: starting_node = new_leaf starting_path = (0, 0, 0) new_node.add_child(current_suffix_end) if suffix_link_source_node: # Define new suffix link suffix_link_source_node.suffix_link = new_node suffix_link_source_node = new_node current_suffix_end = new_node # Rule 3b else: suffix_link_source_node = None starting_continuation = continuation starting_node = current_suffix_end.parent starting_path = (si, ss, ss+g+1) break # Constant updating of all leafs, see [Gusfield {RUS}, p. 139] starting_node._e[string_ind] += 1 return starting_node, starting_path, starting_continuation for m in xrange(len(strings_collection)): # Check for phases 1..x that are already in tree starting_phase, starting_node, starting_path = _ukkonen_first_phases(m) starting_continuation = 0 # Perform phases (x+1)..n explicitly for phase in xrange(starting_phase, len(strings_collection[m])): starting_node, starting_path, starting_continuation = \ _ukkonen_phase(m, phase, starting_node, starting_path, starting_continuation) ############################################################ ############################################################ ############################################################ # 3. Delete degenerate first-level children for k in root.children.keys(): (ss, si, se) = root.children[k].arc() if (se - si == 1 and ord(strings_collection[ss][si]) >= consts.String.UNICODE_SPECIAL_SYMBOLS_START): del root.children[k] # 4. Make a depth-first bottom-up traversal and annotate # each node by the sum of its children; # each leaf is already annotated with '1'. def _annotate(node): weight = 0 for k in node.children: if node.children[k].weight > 0: weight += node.children[k].weight else: weight += _annotate(node.children[k]) node.weight = weight return weight _annotate(root) return root
['def', '_construct', '(', 'self', ',', 'strings_collection', ')', ':', '# 1. Add a unique character to each string in the collection', 'strings_collection', '=', 'utils', '.', 'make_unique_endings', '(', 'strings_collection', ')', '############################################################', "# 2. Build the GST using modified Ukkonnen's algorithm #", '############################################################', 'root', '=', 'ast', '.', 'AnnotatedSuffixTree', '.', 'Node', '(', ')', 'root', '.', 'strings_collection', '=', 'strings_collection', '# To preserve simplicity', 'root', '.', 'suffix_link', '=', 'root', 'root', '.', '_arc', '=', '(', '0', ',', '-', '1', ',', '0', ')', '# For constant updating of all leafs, see [Gusfield {RUS}, p. 139]', 'root', '.', '_e', '=', '[', '0', 'for', '_', 'in', 'xrange', '(', 'len', '(', 'strings_collection', ')', ')', ']', 'def', '_ukkonen_first_phases', '(', 'string_ind', ')', ':', '"""\n Looks for the part of the string which is already encoded.\n Returns a tuple of form\n ([length of already encoded string preffix],\n [tree node to start the first explicit phase with],\n [path to go down at the beginning of the first explicit phase]).\n \n """', 'already_in_tree', '=', '0', 'suffix', '=', 'strings_collection', '[', 'string_ind', ']', 'starting_path', '=', '(', '0', ',', '0', ',', '0', ')', 'starting_node', '=', 'root', 'child_node', '=', 'starting_node', '.', 'chose_arc', '(', 'suffix', ')', 'while', 'child_node', ':', '(', 'str_ind', ',', 'substr_start', ',', 'substr_end', ')', '=', 'child_node', '.', 'arc', '(', ')', 'match', '=', 'utils', '.', 'match_strings', '(', 'suffix', ',', 'strings_collection', '[', 'str_ind', ']', '[', 'substr_start', ':', 'substr_end', ']', ')', 'already_in_tree', '+=', 'match', 'if', 'match', '==', 'substr_end', '-', 'substr_start', ':', '# matched the arc, proceed with child node', 'suffix', '=', 'suffix', '[', 'match', ':', ']', 'starting_node', '=', 'child_node', 'child_node', '=', 'starting_node', '.', 'chose_arc', '(', 'suffix', ')', 'else', ':', '# otherwise we will have to proceed certain path at the beginning', '# of the first explicit phase', 'starting_path', '=', '(', 'str_ind', ',', 'substr_start', ',', 'substr_start', '+', 'match', ')', 'break', '# For constant updating of all leafs, see [Gusfield {RUS}, p. 139]', 'root', '.', '_e', '[', 'string_ind', ']', '=', 'already_in_tree', 'return', '(', 'already_in_tree', ',', 'starting_node', ',', 'starting_path', ')', 'def', '_ukkonen_phase', '(', 'string_ind', ',', 'phase', ',', 'starting_node', ',', 'starting_path', ',', 'starting_continuation', ')', ':', '"""\n Ukkonen\'s algorithm single phase.\n Returns a tuple of form:\n ([tree node to start the next phase with],\n [path to go down at the beginning of the next phase],\n [starting continuation for the next phase]).\n \n """', 'current_suffix_end', '=', 'starting_node', 'suffix_link_source_node', '=', 'None', 'path_str_ind', ',', 'path_substr_start', ',', 'path_substr_end', '=', 'starting_path', '# Continuations [starting_continuation..(i+1)]', 'for', 'continuation', 'in', 'xrange', '(', 'starting_continuation', ',', 'phase', '+', '1', ')', ':', '# Go up to the first node with suffix link [no more than 1 pass]', 'if', 'continuation', '>', 'starting_continuation', ':', 'path_str_ind', ',', 'path_substr_start', ',', 'path_substr_end', '=', '0', ',', '0', ',', '0', 'if', 'not', 'current_suffix_end', '.', 'suffix_link', ':', '(', 'path_str_ind', ',', 'path_substr_start', ',', 'path_substr_end', ')', '=', 'current_suffix_end', '.', 'arc', '(', ')', 'current_suffix_end', '=', 'current_suffix_end', '.', 'parent', 'if', 'current_suffix_end', '.', 'is_root', '(', ')', ':', 'path_str_ind', '=', 'string_ind', 'path_substr_start', '=', 'continuation', 'path_substr_end', '=', 'phase', 'else', ':', '# Go through the suffix link', 'current_suffix_end', '=', 'current_suffix_end', '.', 'suffix_link', '# Go down the path (str_ind, substr_start, substr_end)', '# NB: using Skip/Count trick,', '# see [Gusfield {RUS} p.134] for details', 'g', '=', 'path_substr_end', '-', 'path_substr_start', 'if', 'g', '>', '0', ':', 'current_suffix_end', '=', 'current_suffix_end', '.', 'chose_arc', '(', 'strings_collection', '[', 'path_str_ind', ']', '[', 'path_substr_start', ']', ')', '(', '_', ',', 'cs_ss_start', ',', 'cs_ss_end', ')', '=', 'current_suffix_end', '.', 'arc', '(', ')', 'g_', '=', 'cs_ss_end', '-', 'cs_ss_start', 'while', 'g', '>=', 'g_', ':', 'path_substr_start', '+=', 'g_', 'g', '-=', 'g_', 'if', 'g', '>', '0', ':', 'current_suffix_end', '=', 'current_suffix_end', '.', 'chose_arc', '(', 'strings_collection', '[', 'path_str_ind', ']', '[', 'path_substr_start', ']', ')', '(', '_', ',', 'cs_ss_start', ',', 'cs_ss_end', ')', '=', 'current_suffix_end', '.', 'arc', '(', ')', 'g_', '=', 'cs_ss_end', '-', 'cs_ss_start', '# Perform continuation by one of three rules,', '# see [Gusfield {RUS} p. 129] for details', 'if', 'g', '==', '0', ':', '# Rule 1', 'if', 'current_suffix_end', '.', 'is_leaf', '(', ')', ':', 'pass', '# Rule 2a', 'elif', 'not', 'current_suffix_end', '.', 'chose_arc', '(', 'strings_collection', '[', 'string_ind', ']', '[', 'phase', ']', ')', ':', 'if', 'suffix_link_source_node', ':', 'suffix_link_source_node', '.', 'suffix_link', '=', 'current_suffix_end', 'new_leaf', '=', 'current_suffix_end', '.', 'add_new_child', '(', 'string_ind', ',', 'phase', ',', '-', '1', ')', 'new_leaf', '.', 'weight', '=', '1', 'if', 'continuation', '==', 'starting_continuation', ':', 'starting_node', '=', 'new_leaf', 'starting_path', '=', '(', '0', ',', '0', ',', '0', ')', '# Rule 3a', 'else', ':', 'if', 'suffix_link_source_node', ':', 'suffix_link_source_node', '.', 'suffix_link', '=', 'current_suffix_end', 'starting_continuation', '=', 'continuation', 'starting_node', '=', 'current_suffix_end', 'starting_path', '=', '(', 'string_ind', ',', 'phase', ',', 'phase', '+', '1', ')', 'break', 'suffix_link_source_node', '=', 'None', 'else', ':', '(', 'si', ',', 'ss', ',', 'se', ')', '=', 'current_suffix_end', '.', '_arc', '# Rule 2b', 'if', 'strings_collection', '[', 'si', ']', '[', 'ss', '+', 'g', ']', '!=', 'strings_collection', '[', 'string_ind', ']', '[', 'phase', ']', ':', 'parent', '=', 'current_suffix_end', '.', 'parent', 'parent', '.', 'remove_child', '(', 'current_suffix_end', ')', 'current_suffix_end', '.', '_arc', '=', '(', 'si', ',', 'ss', '+', 'g', ',', 'se', ')', 'new_node', '=', 'parent', '.', 'add_new_child', '(', 'si', ',', 'ss', ',', 'ss', '+', 'g', ')', 'new_leaf', '=', 'new_node', '.', 'add_new_child', '(', 'string_ind', ',', 'phase', ',', '-', '1', ')', 'new_leaf', '.', 'weight', '=', '1', 'if', 'continuation', '==', 'starting_continuation', ':', 'starting_node', '=', 'new_leaf', 'starting_path', '=', '(', '0', ',', '0', ',', '0', ')', 'new_node', '.', 'add_child', '(', 'current_suffix_end', ')', 'if', 'suffix_link_source_node', ':', '# Define new suffix link', 'suffix_link_source_node', '.', 'suffix_link', '=', 'new_node', 'suffix_link_source_node', '=', 'new_node', 'current_suffix_end', '=', 'new_node', '# Rule 3b', 'else', ':', 'suffix_link_source_node', '=', 'None', 'starting_continuation', '=', 'continuation', 'starting_node', '=', 'current_suffix_end', '.', 'parent', 'starting_path', '=', '(', 'si', ',', 'ss', ',', 'ss', '+', 'g', '+', '1', ')', 'break', '# Constant updating of all leafs, see [Gusfield {RUS}, p. 139]', 'starting_node', '.', '_e', '[', 'string_ind', ']', '+=', '1', 'return', 'starting_node', ',', 'starting_path', ',', 'starting_continuation', 'for', 'm', 'in', 'xrange', '(', 'len', '(', 'strings_collection', ')', ')', ':', '# Check for phases 1..x that are already in tree', 'starting_phase', ',', 'starting_node', ',', 'starting_path', '=', '_ukkonen_first_phases', '(', 'm', ')', 'starting_continuation', '=', '0', '# Perform phases (x+1)..n explicitly', 'for', 'phase', 'in', 'xrange', '(', 'starting_phase', ',', 'len', '(', 'strings_collection', '[', 'm', ']', ')', ')', ':', 'starting_node', ',', 'starting_path', ',', 'starting_continuation', '=', '_ukkonen_phase', '(', 'm', ',', 'phase', ',', 'starting_node', ',', 'starting_path', ',', 'starting_continuation', ')', '############################################################', '############################################################', '############################################################', '# 3. Delete degenerate first-level children', 'for', 'k', 'in', 'root', '.', 'children', '.', 'keys', '(', ')', ':', '(', 'ss', ',', 'si', ',', 'se', ')', '=', 'root', '.', 'children', '[', 'k', ']', '.', 'arc', '(', ')', 'if', '(', 'se', '-', 'si', '==', '1', 'and', 'ord', '(', 'strings_collection', '[', 'ss', ']', '[', 'si', ']', ')', '>=', 'consts', '.', 'String', '.', 'UNICODE_SPECIAL_SYMBOLS_START', ')', ':', 'del', 'root', '.', 'children', '[', 'k', ']', '# 4. Make a depth-first bottom-up traversal and annotate', '# each node by the sum of its children;', "# each leaf is already annotated with '1'.", 'def', '_annotate', '(', 'node', ')', ':', 'weight', '=', '0', 'for', 'k', 'in', 'node', '.', 'children', ':', 'if', 'node', '.', 'children', '[', 'k', ']', '.', 'weight', '>', '0', ':', 'weight', '+=', 'node', '.', 'children', '[', 'k', ']', '.', 'weight', 'else', ':', 'weight', '+=', '_annotate', '(', 'node', '.', 'children', '[', 'k', ']', ')', 'node', '.', 'weight', '=', 'weight', 'return', 'weight', '_annotate', '(', 'root', ')', 'return', 'root']
Generalized suffix tree construction algorithm based on the Ukkonen's algorithm for suffix tree construction, with linear [O(n_1 + ... + n_m)] worst-case time complexity, where m is the number of strings in collection.
['Generalized', 'suffix', 'tree', 'construction', 'algorithm', 'based', 'on', 'the', 'Ukkonen', 's', 'algorithm', 'for', 'suffix', 'tree', 'construction', 'with', 'linear', '[', 'O', '(', 'n_1', '+', '...', '+', 'n_m', ')', ']', 'worst', '-', 'case', 'time', 'complexity', 'where', 'm', 'is', 'the', 'number', 'of', 'strings', 'in', 'collection', '.']
train
https://github.com/mikhaildubov/AST-text-analysis/blob/055ad8d2492c100bbbaa25309ec1074bdf1dfaa5/east/asts/ast_linear.py#L12-L208
3,353
pazz/alot
alot/ui.py
UI._input_filter
def _input_filter(self, keys, raw): """ handles keypresses. This function gets triggered directly by class:`urwid.MainLoop` upon user input and is supposed to pass on its `keys` parameter to let the root widget handle keys. We intercept the input here to trigger custom commands as defined in our keybindings. """ logging.debug("Got key (%s, %s)", keys, raw) # work around: escape triggers this twice, with keys = raw = [] # the first time.. if not keys: return # let widgets handle input if key is virtual window resize keypress # or we are in "passall" mode elif 'window resize' in keys or self._passall: return keys # end "lockdown" mode if the right key was pressed elif self._locked and keys[0] == self._unlock_key: self._locked = False self.mainloop.widget = self.root_widget if callable(self._unlock_callback): self._unlock_callback() # otherwise interpret keybinding else: def clear(*_): """Callback that resets the input queue.""" if self._alarm is not None: self.mainloop.remove_alarm(self._alarm) self.input_queue = [] async def _apply_fire(cmdline): try: await self.apply_commandline(cmdline) except CommandParseError as e: self.notify(str(e), priority='error') def fire(_, cmdline): clear() logging.debug("cmdline: '%s'", cmdline) if not self._locked: loop = asyncio.get_event_loop() loop.create_task(_apply_fire(cmdline)) # move keys are always passed elif cmdline in ['move up', 'move down', 'move page up', 'move page down']: return [cmdline[5:]] key = keys[0] if key and 'mouse' in key[0]: key = key[0] + ' %i' % key[1] self.input_queue.append(key) keyseq = ' '.join(self.input_queue) candidates = settings.get_mapped_input_keysequences(self.mode, prefix=keyseq) if keyseq in candidates: # case: current input queue is a mapped keysequence # get binding and interpret it if non-null cmdline = settings.get_keybinding(self.mode, keyseq) if cmdline: if len(candidates) > 1: timeout = float(settings.get('input_timeout')) if self._alarm is not None: self.mainloop.remove_alarm(self._alarm) self._alarm = self.mainloop.set_alarm_in( timeout, fire, cmdline) else: return fire(self.mainloop, cmdline) elif not candidates: # case: no sequence with prefix keyseq is mapped # just clear the input queue clear() else: # case: some sequences with proper prefix keyseq is mapped timeout = float(settings.get('input_timeout')) if self._alarm is not None: self.mainloop.remove_alarm(self._alarm) self._alarm = self.mainloop.set_alarm_in(timeout, clear) # update statusbar self.update()
python
def _input_filter(self, keys, raw): """ handles keypresses. This function gets triggered directly by class:`urwid.MainLoop` upon user input and is supposed to pass on its `keys` parameter to let the root widget handle keys. We intercept the input here to trigger custom commands as defined in our keybindings. """ logging.debug("Got key (%s, %s)", keys, raw) # work around: escape triggers this twice, with keys = raw = [] # the first time.. if not keys: return # let widgets handle input if key is virtual window resize keypress # or we are in "passall" mode elif 'window resize' in keys or self._passall: return keys # end "lockdown" mode if the right key was pressed elif self._locked and keys[0] == self._unlock_key: self._locked = False self.mainloop.widget = self.root_widget if callable(self._unlock_callback): self._unlock_callback() # otherwise interpret keybinding else: def clear(*_): """Callback that resets the input queue.""" if self._alarm is not None: self.mainloop.remove_alarm(self._alarm) self.input_queue = [] async def _apply_fire(cmdline): try: await self.apply_commandline(cmdline) except CommandParseError as e: self.notify(str(e), priority='error') def fire(_, cmdline): clear() logging.debug("cmdline: '%s'", cmdline) if not self._locked: loop = asyncio.get_event_loop() loop.create_task(_apply_fire(cmdline)) # move keys are always passed elif cmdline in ['move up', 'move down', 'move page up', 'move page down']: return [cmdline[5:]] key = keys[0] if key and 'mouse' in key[0]: key = key[0] + ' %i' % key[1] self.input_queue.append(key) keyseq = ' '.join(self.input_queue) candidates = settings.get_mapped_input_keysequences(self.mode, prefix=keyseq) if keyseq in candidates: # case: current input queue is a mapped keysequence # get binding and interpret it if non-null cmdline = settings.get_keybinding(self.mode, keyseq) if cmdline: if len(candidates) > 1: timeout = float(settings.get('input_timeout')) if self._alarm is not None: self.mainloop.remove_alarm(self._alarm) self._alarm = self.mainloop.set_alarm_in( timeout, fire, cmdline) else: return fire(self.mainloop, cmdline) elif not candidates: # case: no sequence with prefix keyseq is mapped # just clear the input queue clear() else: # case: some sequences with proper prefix keyseq is mapped timeout = float(settings.get('input_timeout')) if self._alarm is not None: self.mainloop.remove_alarm(self._alarm) self._alarm = self.mainloop.set_alarm_in(timeout, clear) # update statusbar self.update()
['def', '_input_filter', '(', 'self', ',', 'keys', ',', 'raw', ')', ':', 'logging', '.', 'debug', '(', '"Got key (%s, %s)"', ',', 'keys', ',', 'raw', ')', '# work around: escape triggers this twice, with keys = raw = []', '# the first time..', 'if', 'not', 'keys', ':', 'return', '# let widgets handle input if key is virtual window resize keypress', '# or we are in "passall" mode', 'elif', "'window resize'", 'in', 'keys', 'or', 'self', '.', '_passall', ':', 'return', 'keys', '# end "lockdown" mode if the right key was pressed', 'elif', 'self', '.', '_locked', 'and', 'keys', '[', '0', ']', '==', 'self', '.', '_unlock_key', ':', 'self', '.', '_locked', '=', 'False', 'self', '.', 'mainloop', '.', 'widget', '=', 'self', '.', 'root_widget', 'if', 'callable', '(', 'self', '.', '_unlock_callback', ')', ':', 'self', '.', '_unlock_callback', '(', ')', '# otherwise interpret keybinding', 'else', ':', 'def', 'clear', '(', '*', '_', ')', ':', '"""Callback that resets the input queue."""', 'if', 'self', '.', '_alarm', 'is', 'not', 'None', ':', 'self', '.', 'mainloop', '.', 'remove_alarm', '(', 'self', '.', '_alarm', ')', 'self', '.', 'input_queue', '=', '[', ']', 'async', 'def', '_apply_fire', '(', 'cmdline', ')', ':', 'try', ':', 'await', 'self', '.', 'apply_commandline', '(', 'cmdline', ')', 'except', 'CommandParseError', 'as', 'e', ':', 'self', '.', 'notify', '(', 'str', '(', 'e', ')', ',', 'priority', '=', "'error'", ')', 'def', 'fire', '(', '_', ',', 'cmdline', ')', ':', 'clear', '(', ')', 'logging', '.', 'debug', '(', '"cmdline: \'%s\'"', ',', 'cmdline', ')', 'if', 'not', 'self', '.', '_locked', ':', 'loop', '=', 'asyncio', '.', 'get_event_loop', '(', ')', 'loop', '.', 'create_task', '(', '_apply_fire', '(', 'cmdline', ')', ')', '# move keys are always passed', 'elif', 'cmdline', 'in', '[', "'move up'", ',', "'move down'", ',', "'move page up'", ',', "'move page down'", ']', ':', 'return', '[', 'cmdline', '[', '5', ':', ']', ']', 'key', '=', 'keys', '[', '0', ']', 'if', 'key', 'and', "'mouse'", 'in', 'key', '[', '0', ']', ':', 'key', '=', 'key', '[', '0', ']', '+', "' %i'", '%', 'key', '[', '1', ']', 'self', '.', 'input_queue', '.', 'append', '(', 'key', ')', 'keyseq', '=', "' '", '.', 'join', '(', 'self', '.', 'input_queue', ')', 'candidates', '=', 'settings', '.', 'get_mapped_input_keysequences', '(', 'self', '.', 'mode', ',', 'prefix', '=', 'keyseq', ')', 'if', 'keyseq', 'in', 'candidates', ':', '# case: current input queue is a mapped keysequence', '# get binding and interpret it if non-null', 'cmdline', '=', 'settings', '.', 'get_keybinding', '(', 'self', '.', 'mode', ',', 'keyseq', ')', 'if', 'cmdline', ':', 'if', 'len', '(', 'candidates', ')', '>', '1', ':', 'timeout', '=', 'float', '(', 'settings', '.', 'get', '(', "'input_timeout'", ')', ')', 'if', 'self', '.', '_alarm', 'is', 'not', 'None', ':', 'self', '.', 'mainloop', '.', 'remove_alarm', '(', 'self', '.', '_alarm', ')', 'self', '.', '_alarm', '=', 'self', '.', 'mainloop', '.', 'set_alarm_in', '(', 'timeout', ',', 'fire', ',', 'cmdline', ')', 'else', ':', 'return', 'fire', '(', 'self', '.', 'mainloop', ',', 'cmdline', ')', 'elif', 'not', 'candidates', ':', '# case: no sequence with prefix keyseq is mapped', '# just clear the input queue', 'clear', '(', ')', 'else', ':', '# case: some sequences with proper prefix keyseq is mapped', 'timeout', '=', 'float', '(', 'settings', '.', 'get', '(', "'input_timeout'", ')', ')', 'if', 'self', '.', '_alarm', 'is', 'not', 'None', ':', 'self', '.', 'mainloop', '.', 'remove_alarm', '(', 'self', '.', '_alarm', ')', 'self', '.', '_alarm', '=', 'self', '.', 'mainloop', '.', 'set_alarm_in', '(', 'timeout', ',', 'clear', ')', '# update statusbar', 'self', '.', 'update', '(', ')']
handles keypresses. This function gets triggered directly by class:`urwid.MainLoop` upon user input and is supposed to pass on its `keys` parameter to let the root widget handle keys. We intercept the input here to trigger custom commands as defined in our keybindings.
['handles', 'keypresses', '.', 'This', 'function', 'gets', 'triggered', 'directly', 'by', 'class', ':', 'urwid', '.', 'MainLoop', 'upon', 'user', 'input', 'and', 'is', 'supposed', 'to', 'pass', 'on', 'its', 'keys', 'parameter', 'to', 'let', 'the', 'root', 'widget', 'handle', 'keys', '.', 'We', 'intercept', 'the', 'input', 'here', 'to', 'trigger', 'custom', 'commands', 'as', 'defined', 'in', 'our', 'keybindings', '.']
train
https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/ui.py#L153-L233
3,354
basecrm/basecrm-python
basecrm/services.py
DealSourcesService.retrieve
def retrieve(self, id) : """ Retrieve a single source Returns a single source available to the user by the provided id If a source with the supplied unique identifier does not exist it returns an error :calls: ``get /deal_sources/{id}`` :param int id: Unique identifier of a DealSource. :return: Dictionary that support attriubte-style access and represent DealSource resource. :rtype: dict """ _, _, deal_source = self.http_client.get("/deal_sources/{id}".format(id=id)) return deal_source
python
def retrieve(self, id) : """ Retrieve a single source Returns a single source available to the user by the provided id If a source with the supplied unique identifier does not exist it returns an error :calls: ``get /deal_sources/{id}`` :param int id: Unique identifier of a DealSource. :return: Dictionary that support attriubte-style access and represent DealSource resource. :rtype: dict """ _, _, deal_source = self.http_client.get("/deal_sources/{id}".format(id=id)) return deal_source
['def', 'retrieve', '(', 'self', ',', 'id', ')', ':', '_', ',', '_', ',', 'deal_source', '=', 'self', '.', 'http_client', '.', 'get', '(', '"/deal_sources/{id}"', '.', 'format', '(', 'id', '=', 'id', ')', ')', 'return', 'deal_source']
Retrieve a single source Returns a single source available to the user by the provided id If a source with the supplied unique identifier does not exist it returns an error :calls: ``get /deal_sources/{id}`` :param int id: Unique identifier of a DealSource. :return: Dictionary that support attriubte-style access and represent DealSource resource. :rtype: dict
['Retrieve', 'a', 'single', 'source']
train
https://github.com/basecrm/basecrm-python/blob/7c1cf97dbaba8aeb9ff89f8a54f945a8702349f6/basecrm/services.py#L443-L457
3,355
googleapis/google-cloud-python
firestore/google/cloud/firestore_v1beta1/gapic/firestore_client.py
FirestoreClient.delete_document
def delete_document( self, name, current_document=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Deletes a document. Example: >>> from google.cloud import firestore_v1beta1 >>> >>> client = firestore_v1beta1.FirestoreClient() >>> >>> name = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]') >>> >>> client.delete_document(name) Args: name (str): The resource name of the Document to delete. In the format: ``projects/{project_id}/databases/{database_id}/documents/{document_path}``. current_document (Union[dict, ~google.cloud.firestore_v1beta1.types.Precondition]): An optional precondition on the document. The request will fail if this is set and not met by the target document. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.Precondition` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "delete_document" not in self._inner_api_calls: self._inner_api_calls[ "delete_document" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_document, default_retry=self._method_configs["DeleteDocument"].retry, default_timeout=self._method_configs["DeleteDocument"].timeout, client_info=self._client_info, ) request = firestore_pb2.DeleteDocumentRequest( name=name, current_document=current_document ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) self._inner_api_calls["delete_document"]( request, retry=retry, timeout=timeout, metadata=metadata )
python
def delete_document( self, name, current_document=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Deletes a document. Example: >>> from google.cloud import firestore_v1beta1 >>> >>> client = firestore_v1beta1.FirestoreClient() >>> >>> name = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]') >>> >>> client.delete_document(name) Args: name (str): The resource name of the Document to delete. In the format: ``projects/{project_id}/databases/{database_id}/documents/{document_path}``. current_document (Union[dict, ~google.cloud.firestore_v1beta1.types.Precondition]): An optional precondition on the document. The request will fail if this is set and not met by the target document. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.Precondition` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "delete_document" not in self._inner_api_calls: self._inner_api_calls[ "delete_document" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_document, default_retry=self._method_configs["DeleteDocument"].retry, default_timeout=self._method_configs["DeleteDocument"].timeout, client_info=self._client_info, ) request = firestore_pb2.DeleteDocumentRequest( name=name, current_document=current_document ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) self._inner_api_calls["delete_document"]( request, retry=retry, timeout=timeout, metadata=metadata )
['def', 'delete_document', '(', 'self', ',', 'name', ',', 'current_document', '=', 'None', ',', 'retry', '=', 'google', '.', 'api_core', '.', 'gapic_v1', '.', 'method', '.', 'DEFAULT', ',', 'timeout', '=', 'google', '.', 'api_core', '.', 'gapic_v1', '.', 'method', '.', 'DEFAULT', ',', 'metadata', '=', 'None', ',', ')', ':', '# Wrap the transport method to add retry and timeout logic.', 'if', '"delete_document"', 'not', 'in', 'self', '.', '_inner_api_calls', ':', 'self', '.', '_inner_api_calls', '[', '"delete_document"', ']', '=', 'google', '.', 'api_core', '.', 'gapic_v1', '.', 'method', '.', 'wrap_method', '(', 'self', '.', 'transport', '.', 'delete_document', ',', 'default_retry', '=', 'self', '.', '_method_configs', '[', '"DeleteDocument"', ']', '.', 'retry', ',', 'default_timeout', '=', 'self', '.', '_method_configs', '[', '"DeleteDocument"', ']', '.', 'timeout', ',', 'client_info', '=', 'self', '.', '_client_info', ',', ')', 'request', '=', 'firestore_pb2', '.', 'DeleteDocumentRequest', '(', 'name', '=', 'name', ',', 'current_document', '=', 'current_document', ')', 'if', 'metadata', 'is', 'None', ':', 'metadata', '=', '[', ']', 'metadata', '=', 'list', '(', 'metadata', ')', 'try', ':', 'routing_header', '=', '[', '(', '"name"', ',', 'name', ')', ']', 'except', 'AttributeError', ':', 'pass', 'else', ':', 'routing_metadata', '=', 'google', '.', 'api_core', '.', 'gapic_v1', '.', 'routing_header', '.', 'to_grpc_metadata', '(', 'routing_header', ')', 'metadata', '.', 'append', '(', 'routing_metadata', ')', 'self', '.', '_inner_api_calls', '[', '"delete_document"', ']', '(', 'request', ',', 'retry', '=', 'retry', ',', 'timeout', '=', 'timeout', ',', 'metadata', '=', 'metadata', ')']
Deletes a document. Example: >>> from google.cloud import firestore_v1beta1 >>> >>> client = firestore_v1beta1.FirestoreClient() >>> >>> name = client.any_path_path('[PROJECT]', '[DATABASE]', '[DOCUMENT]', '[ANY_PATH]') >>> >>> client.delete_document(name) Args: name (str): The resource name of the Document to delete. In the format: ``projects/{project_id}/databases/{database_id}/documents/{document_path}``. current_document (Union[dict, ~google.cloud.firestore_v1beta1.types.Precondition]): An optional precondition on the document. The request will fail if this is set and not met by the target document. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.Precondition` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
['Deletes', 'a', 'document', '.']
train
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/gapic/firestore_client.py#L669-L742
3,356
beetbox/audioread
audioread/__init__.py
_gst_available
def _gst_available(): """Determine whether Gstreamer and the Python GObject bindings are installed. """ try: import gi except ImportError: return False try: gi.require_version('Gst', '1.0') except (ValueError, AttributeError): return False try: from gi.repository import Gst # noqa except ImportError: return False return True
python
def _gst_available(): """Determine whether Gstreamer and the Python GObject bindings are installed. """ try: import gi except ImportError: return False try: gi.require_version('Gst', '1.0') except (ValueError, AttributeError): return False try: from gi.repository import Gst # noqa except ImportError: return False return True
['def', '_gst_available', '(', ')', ':', 'try', ':', 'import', 'gi', 'except', 'ImportError', ':', 'return', 'False', 'try', ':', 'gi', '.', 'require_version', '(', "'Gst'", ',', "'1.0'", ')', 'except', '(', 'ValueError', ',', 'AttributeError', ')', ':', 'return', 'False', 'try', ':', 'from', 'gi', '.', 'repository', 'import', 'Gst', '# noqa', 'except', 'ImportError', ':', 'return', 'False', 'return', 'True']
Determine whether Gstreamer and the Python GObject bindings are installed.
['Determine', 'whether', 'Gstreamer', 'and', 'the', 'Python', 'GObject', 'bindings', 'are', 'installed', '.']
train
https://github.com/beetbox/audioread/blob/c8bedf7880f13a7b7488b108aaf245d648674818/audioread/__init__.py#L22-L41
3,357
LordDarkula/chess_py
chess_py/core/board.py
Board._calc_all_possible_moves
def _calc_all_possible_moves(self, input_color): """ Returns list of all possible moves :type: input_color: Color :rtype: list """ for piece in self: # Tests if square on the board is not empty if piece is not None and piece.color == input_color: for move in piece.possible_moves(self): test = cp(self) test_move = Move(end_loc=move.end_loc, piece=test.piece_at_square(move.start_loc), status=move.status, start_loc=move.start_loc, promoted_to_piece=move.promoted_to_piece) test.update(test_move) if self.king_loc_dict is None: yield move continue my_king = test.piece_at_square(self.king_loc_dict[input_color]) if my_king is None or \ not isinstance(my_king, King) or \ my_king.color != input_color: self.king_loc_dict[input_color] = test.find_king(input_color) my_king = test.piece_at_square(self.king_loc_dict[input_color]) if not my_king.in_check(test): yield move
python
def _calc_all_possible_moves(self, input_color): """ Returns list of all possible moves :type: input_color: Color :rtype: list """ for piece in self: # Tests if square on the board is not empty if piece is not None and piece.color == input_color: for move in piece.possible_moves(self): test = cp(self) test_move = Move(end_loc=move.end_loc, piece=test.piece_at_square(move.start_loc), status=move.status, start_loc=move.start_loc, promoted_to_piece=move.promoted_to_piece) test.update(test_move) if self.king_loc_dict is None: yield move continue my_king = test.piece_at_square(self.king_loc_dict[input_color]) if my_king is None or \ not isinstance(my_king, King) or \ my_king.color != input_color: self.king_loc_dict[input_color] = test.find_king(input_color) my_king = test.piece_at_square(self.king_loc_dict[input_color]) if not my_king.in_check(test): yield move
['def', '_calc_all_possible_moves', '(', 'self', ',', 'input_color', ')', ':', 'for', 'piece', 'in', 'self', ':', '# Tests if square on the board is not empty', 'if', 'piece', 'is', 'not', 'None', 'and', 'piece', '.', 'color', '==', 'input_color', ':', 'for', 'move', 'in', 'piece', '.', 'possible_moves', '(', 'self', ')', ':', 'test', '=', 'cp', '(', 'self', ')', 'test_move', '=', 'Move', '(', 'end_loc', '=', 'move', '.', 'end_loc', ',', 'piece', '=', 'test', '.', 'piece_at_square', '(', 'move', '.', 'start_loc', ')', ',', 'status', '=', 'move', '.', 'status', ',', 'start_loc', '=', 'move', '.', 'start_loc', ',', 'promoted_to_piece', '=', 'move', '.', 'promoted_to_piece', ')', 'test', '.', 'update', '(', 'test_move', ')', 'if', 'self', '.', 'king_loc_dict', 'is', 'None', ':', 'yield', 'move', 'continue', 'my_king', '=', 'test', '.', 'piece_at_square', '(', 'self', '.', 'king_loc_dict', '[', 'input_color', ']', ')', 'if', 'my_king', 'is', 'None', 'or', 'not', 'isinstance', '(', 'my_king', ',', 'King', ')', 'or', 'my_king', '.', 'color', '!=', 'input_color', ':', 'self', '.', 'king_loc_dict', '[', 'input_color', ']', '=', 'test', '.', 'find_king', '(', 'input_color', ')', 'my_king', '=', 'test', '.', 'piece_at_square', '(', 'self', '.', 'king_loc_dict', '[', 'input_color', ']', ')', 'if', 'not', 'my_king', '.', 'in_check', '(', 'test', ')', ':', 'yield', 'move']
Returns list of all possible moves :type: input_color: Color :rtype: list
['Returns', 'list', 'of', 'all', 'possible', 'moves']
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/core/board.py#L229-L264
3,358
osrg/ryu
ryu/services/protocols/bgp/bgpspeaker.py
BGPSpeaker.neighbor_state_get
def neighbor_state_get(self, address=None, format='json'): """ This method returns the state of peer(s) in a json format. ``address`` specifies the address of a peer. If not given, the state of all the peers return. ``format`` specifies the format of the response. This parameter must be one of the following. - 'json' (default) - 'cli' """ show = { 'params': ['neighbor', 'summary'], 'format': format, } if address: show['params'].append(address) return call('operator.show', **show)
python
def neighbor_state_get(self, address=None, format='json'): """ This method returns the state of peer(s) in a json format. ``address`` specifies the address of a peer. If not given, the state of all the peers return. ``format`` specifies the format of the response. This parameter must be one of the following. - 'json' (default) - 'cli' """ show = { 'params': ['neighbor', 'summary'], 'format': format, } if address: show['params'].append(address) return call('operator.show', **show)
['def', 'neighbor_state_get', '(', 'self', ',', 'address', '=', 'None', ',', 'format', '=', "'json'", ')', ':', 'show', '=', '{', "'params'", ':', '[', "'neighbor'", ',', "'summary'", ']', ',', "'format'", ':', 'format', ',', '}', 'if', 'address', ':', 'show', '[', "'params'", ']', '.', 'append', '(', 'address', ')', 'return', 'call', '(', "'operator.show'", ',', '*', '*', 'show', ')']
This method returns the state of peer(s) in a json format. ``address`` specifies the address of a peer. If not given, the state of all the peers return. ``format`` specifies the format of the response. This parameter must be one of the following. - 'json' (default) - 'cli'
['This', 'method', 'returns', 'the', 'state', 'of', 'peer', '(', 's', ')', 'in', 'a', 'json', 'format', '.']
train
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/bgp/bgpspeaker.py#L610-L630
3,359
slundberg/shap
shap/explainers/deep/deep_pytorch.py
add_interim_values
def add_interim_values(module, input, output): """The forward hook used to save interim tensors, detached from the graph. Used to calculate the multipliers """ try: del module.x except AttributeError: pass try: del module.y except AttributeError: pass module_type = module.__class__.__name__ if module_type in op_handler: func_name = op_handler[module_type].__name__ # First, check for cases where we don't need to save the x and y tensors if func_name == 'passthrough': pass else: # check only the 0th input varies for i in range(len(input)): if i != 0 and type(output) is tuple: assert input[i] == output[i], "Only the 0th input may vary!" # if a new method is added, it must be added here too. This ensures tensors # are only saved if necessary if func_name in ['maxpool', 'nonlinear_1d']: # only save tensors if necessary if type(input) is tuple: setattr(module, 'x', torch.nn.Parameter(input[0].detach())) else: setattr(module, 'x', torch.nn.Parameter(input.detach())) if type(output) is tuple: setattr(module, 'y', torch.nn.Parameter(output[0].detach())) else: setattr(module, 'y', torch.nn.Parameter(output.detach())) if module_type in failure_case_modules: input[0].register_hook(deeplift_tensor_grad)
python
def add_interim_values(module, input, output): """The forward hook used to save interim tensors, detached from the graph. Used to calculate the multipliers """ try: del module.x except AttributeError: pass try: del module.y except AttributeError: pass module_type = module.__class__.__name__ if module_type in op_handler: func_name = op_handler[module_type].__name__ # First, check for cases where we don't need to save the x and y tensors if func_name == 'passthrough': pass else: # check only the 0th input varies for i in range(len(input)): if i != 0 and type(output) is tuple: assert input[i] == output[i], "Only the 0th input may vary!" # if a new method is added, it must be added here too. This ensures tensors # are only saved if necessary if func_name in ['maxpool', 'nonlinear_1d']: # only save tensors if necessary if type(input) is tuple: setattr(module, 'x', torch.nn.Parameter(input[0].detach())) else: setattr(module, 'x', torch.nn.Parameter(input.detach())) if type(output) is tuple: setattr(module, 'y', torch.nn.Parameter(output[0].detach())) else: setattr(module, 'y', torch.nn.Parameter(output.detach())) if module_type in failure_case_modules: input[0].register_hook(deeplift_tensor_grad)
['def', 'add_interim_values', '(', 'module', ',', 'input', ',', 'output', ')', ':', 'try', ':', 'del', 'module', '.', 'x', 'except', 'AttributeError', ':', 'pass', 'try', ':', 'del', 'module', '.', 'y', 'except', 'AttributeError', ':', 'pass', 'module_type', '=', 'module', '.', '__class__', '.', '__name__', 'if', 'module_type', 'in', 'op_handler', ':', 'func_name', '=', 'op_handler', '[', 'module_type', ']', '.', '__name__', "# First, check for cases where we don't need to save the x and y tensors", 'if', 'func_name', '==', "'passthrough'", ':', 'pass', 'else', ':', '# check only the 0th input varies', 'for', 'i', 'in', 'range', '(', 'len', '(', 'input', ')', ')', ':', 'if', 'i', '!=', '0', 'and', 'type', '(', 'output', ')', 'is', 'tuple', ':', 'assert', 'input', '[', 'i', ']', '==', 'output', '[', 'i', ']', ',', '"Only the 0th input may vary!"', '# if a new method is added, it must be added here too. This ensures tensors', '# are only saved if necessary', 'if', 'func_name', 'in', '[', "'maxpool'", ',', "'nonlinear_1d'", ']', ':', '# only save tensors if necessary', 'if', 'type', '(', 'input', ')', 'is', 'tuple', ':', 'setattr', '(', 'module', ',', "'x'", ',', 'torch', '.', 'nn', '.', 'Parameter', '(', 'input', '[', '0', ']', '.', 'detach', '(', ')', ')', ')', 'else', ':', 'setattr', '(', 'module', ',', "'x'", ',', 'torch', '.', 'nn', '.', 'Parameter', '(', 'input', '.', 'detach', '(', ')', ')', ')', 'if', 'type', '(', 'output', ')', 'is', 'tuple', ':', 'setattr', '(', 'module', ',', "'y'", ',', 'torch', '.', 'nn', '.', 'Parameter', '(', 'output', '[', '0', ']', '.', 'detach', '(', ')', ')', ')', 'else', ':', 'setattr', '(', 'module', ',', "'y'", ',', 'torch', '.', 'nn', '.', 'Parameter', '(', 'output', '.', 'detach', '(', ')', ')', ')', 'if', 'module_type', 'in', 'failure_case_modules', ':', 'input', '[', '0', ']', '.', 'register_hook', '(', 'deeplift_tensor_grad', ')']
The forward hook used to save interim tensors, detached from the graph. Used to calculate the multipliers
['The', 'forward', 'hook', 'used', 'to', 'save', 'interim', 'tensors', 'detached', 'from', 'the', 'graph', '.', 'Used', 'to', 'calculate', 'the', 'multipliers']
train
https://github.com/slundberg/shap/blob/b280cb81d498b9d98565cad8dd16fc88ae52649f/shap/explainers/deep/deep_pytorch.py#L209-L245
3,360
aricaldeira/PySPED
pysped/nfe/processador_nfe.py
ConexaoHTTPS.connect
def connect(self): "Connect to a host on a given (SSL) port." # # source_address é atributo incluído na versão 2.7 do Python # Verificando a existência para funcionar em versões anteriores à 2.7 # if hasattr(self, 'source_address'): sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address) else: sock = socket.create_connection((self.host, self.port), self.timeout) if self._tunnel_host: self.sock = sock self._tunnel() if sys.version_info >= (2,7,13): self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLS, do_handshake_on_connect=False) else: self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_SSLv23, do_handshake_on_connect=False)
python
def connect(self): "Connect to a host on a given (SSL) port." # # source_address é atributo incluído na versão 2.7 do Python # Verificando a existência para funcionar em versões anteriores à 2.7 # if hasattr(self, 'source_address'): sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address) else: sock = socket.create_connection((self.host, self.port), self.timeout) if self._tunnel_host: self.sock = sock self._tunnel() if sys.version_info >= (2,7,13): self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLS, do_handshake_on_connect=False) else: self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_SSLv23, do_handshake_on_connect=False)
['def', 'connect', '(', 'self', ')', ':', '#', '# source_address é atributo incluído na versão 2.7 do Python', '# Verificando a existência para funcionar em versões anteriores à 2.7', '#', 'if', 'hasattr', '(', 'self', ',', "'source_address'", ')', ':', 'sock', '=', 'socket', '.', 'create_connection', '(', '(', 'self', '.', 'host', ',', 'self', '.', 'port', ')', ',', 'self', '.', 'timeout', ',', 'self', '.', 'source_address', ')', 'else', ':', 'sock', '=', 'socket', '.', 'create_connection', '(', '(', 'self', '.', 'host', ',', 'self', '.', 'port', ')', ',', 'self', '.', 'timeout', ')', 'if', 'self', '.', '_tunnel_host', ':', 'self', '.', 'sock', '=', 'sock', 'self', '.', '_tunnel', '(', ')', 'if', 'sys', '.', 'version_info', '>=', '(', '2', ',', '7', ',', '13', ')', ':', 'self', '.', 'sock', '=', 'ssl', '.', 'wrap_socket', '(', 'sock', ',', 'self', '.', 'key_file', ',', 'self', '.', 'cert_file', ',', 'ssl_version', '=', 'ssl', '.', 'PROTOCOL_TLS', ',', 'do_handshake_on_connect', '=', 'False', ')', 'else', ':', 'self', '.', 'sock', '=', 'ssl', '.', 'wrap_socket', '(', 'sock', ',', 'self', '.', 'key_file', ',', 'self', '.', 'cert_file', ',', 'ssl_version', '=', 'ssl', '.', 'PROTOCOL_SSLv23', ',', 'do_handshake_on_connect', '=', 'False', ')']
Connect to a host on a given (SSL) port.
['Connect', 'to', 'a', 'host', 'on', 'a', 'given', '(', 'SSL', ')', 'port', '.']
train
https://github.com/aricaldeira/PySPED/blob/42905693e913f32db2c23f4e067f94af28a8164a/pysped/nfe/processador_nfe.py#L162-L181
3,361
pulumi/pulumi
sdk/python/lib/pulumi/runtime/settings.py
get_project
def get_project() -> Optional[str]: """ Returns the current project name. """ project = SETTINGS.project if not project: require_test_mode_enabled() raise RunError('Missing project name; for test mode, please set PULUMI_NODEJS_PROJECT') return project
python
def get_project() -> Optional[str]: """ Returns the current project name. """ project = SETTINGS.project if not project: require_test_mode_enabled() raise RunError('Missing project name; for test mode, please set PULUMI_NODEJS_PROJECT') return project
['def', 'get_project', '(', ')', '->', 'Optional', '[', 'str', ']', ':', 'project', '=', 'SETTINGS', '.', 'project', 'if', 'not', 'project', ':', 'require_test_mode_enabled', '(', ')', 'raise', 'RunError', '(', "'Missing project name; for test mode, please set PULUMI_NODEJS_PROJECT'", ')', 'return', 'project']
Returns the current project name.
['Returns', 'the', 'current', 'project', 'name', '.']
train
https://github.com/pulumi/pulumi/blob/95d51efe6ab9a533838b6d83aa240b5f912e72aa/sdk/python/lib/pulumi/runtime/settings.py#L107-L115
3,362
MartinThoma/mpu
mpu/math.py
round_down
def round_down(x, decimal_places): """ Round a float down to decimal_places. Parameters ---------- x : float decimal_places : int Returns ------- rounded_float : float Examples -------- >>> round_down(1.23456, 3) 1.234 >>> round_down(1.23456, 2) 1.23 """ from math import floor d = int('1' + ('0' * decimal_places)) return floor(x * d) / d
python
def round_down(x, decimal_places): """ Round a float down to decimal_places. Parameters ---------- x : float decimal_places : int Returns ------- rounded_float : float Examples -------- >>> round_down(1.23456, 3) 1.234 >>> round_down(1.23456, 2) 1.23 """ from math import floor d = int('1' + ('0' * decimal_places)) return floor(x * d) / d
['def', 'round_down', '(', 'x', ',', 'decimal_places', ')', ':', 'from', 'math', 'import', 'floor', 'd', '=', 'int', '(', "'1'", '+', '(', "'0'", '*', 'decimal_places', ')', ')', 'return', 'floor', '(', 'x', '*', 'd', ')', '/', 'd']
Round a float down to decimal_places. Parameters ---------- x : float decimal_places : int Returns ------- rounded_float : float Examples -------- >>> round_down(1.23456, 3) 1.234 >>> round_down(1.23456, 2) 1.23
['Round', 'a', 'float', 'down', 'to', 'decimal_places', '.']
train
https://github.com/MartinThoma/mpu/blob/61bc36d0192ca90c0bcf9b8a5d7d0d8520e20ff6/mpu/math.py#L206-L228
3,363
hydpy-dev/hydpy
hydpy/cythons/modelutils.py
FuncConverter.pyxlines
def pyxlines(self): """Cython code lines. Assumptions: * Function shall be a method * Method shall be inlined * Method returns nothing * Method arguments are of type `int` (except self) * Local variables are generally of type `int` but of type `double` when their name starts with `d_` """ lines = [' '+line for line in self.cleanlines] lines[0] = lines[0].replace('def ', 'cpdef inline void ') lines[0] = lines[0].replace('):', ') %s:' % _nogil) for name in self.untypedarguments: lines[0] = lines[0].replace(', %s ' % name, ', int %s ' % name) lines[0] = lines[0].replace(', %s)' % name, ', int %s)' % name) for name in self.untypedinternalvarnames: if name.startswith('d_'): lines.insert(1, ' cdef double ' + name) else: lines.insert(1, ' cdef int ' + name) return Lines(*lines)
python
def pyxlines(self): """Cython code lines. Assumptions: * Function shall be a method * Method shall be inlined * Method returns nothing * Method arguments are of type `int` (except self) * Local variables are generally of type `int` but of type `double` when their name starts with `d_` """ lines = [' '+line for line in self.cleanlines] lines[0] = lines[0].replace('def ', 'cpdef inline void ') lines[0] = lines[0].replace('):', ') %s:' % _nogil) for name in self.untypedarguments: lines[0] = lines[0].replace(', %s ' % name, ', int %s ' % name) lines[0] = lines[0].replace(', %s)' % name, ', int %s)' % name) for name in self.untypedinternalvarnames: if name.startswith('d_'): lines.insert(1, ' cdef double ' + name) else: lines.insert(1, ' cdef int ' + name) return Lines(*lines)
['def', 'pyxlines', '(', 'self', ')', ':', 'lines', '=', '[', "' '", '+', 'line', 'for', 'line', 'in', 'self', '.', 'cleanlines', ']', 'lines', '[', '0', ']', '=', 'lines', '[', '0', ']', '.', 'replace', '(', "'def '", ',', "'cpdef inline void '", ')', 'lines', '[', '0', ']', '=', 'lines', '[', '0', ']', '.', 'replace', '(', "'):'", ',', "') %s:'", '%', '_nogil', ')', 'for', 'name', 'in', 'self', '.', 'untypedarguments', ':', 'lines', '[', '0', ']', '=', 'lines', '[', '0', ']', '.', 'replace', '(', "', %s '", '%', 'name', ',', "', int %s '", '%', 'name', ')', 'lines', '[', '0', ']', '=', 'lines', '[', '0', ']', '.', 'replace', '(', "', %s)'", '%', 'name', ',', "', int %s)'", '%', 'name', ')', 'for', 'name', 'in', 'self', '.', 'untypedinternalvarnames', ':', 'if', 'name', '.', 'startswith', '(', "'d_'", ')', ':', 'lines', '.', 'insert', '(', '1', ',', "' cdef double '", '+', 'name', ')', 'else', ':', 'lines', '.', 'insert', '(', '1', ',', "' cdef int '", '+', 'name', ')', 'return', 'Lines', '(', '*', 'lines', ')']
Cython code lines. Assumptions: * Function shall be a method * Method shall be inlined * Method returns nothing * Method arguments are of type `int` (except self) * Local variables are generally of type `int` but of type `double` when their name starts with `d_`
['Cython', 'code', 'lines', '.']
train
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/cythons/modelutils.py#L1218-L1240
3,364
spry-group/python-vultr
vultr/v1_startupscript.py
VultrStartupScript.update
def update(self, scriptid, params=None): ''' /v1/startupscript/update POST - account Update an existing startup script Link: https://www.vultr.com/api/#startupscript_update ''' params = update_params(params, {'SCRIPTID': scriptid}) return self.request('/v1/startupscript/update', params, 'POST')
python
def update(self, scriptid, params=None): ''' /v1/startupscript/update POST - account Update an existing startup script Link: https://www.vultr.com/api/#startupscript_update ''' params = update_params(params, {'SCRIPTID': scriptid}) return self.request('/v1/startupscript/update', params, 'POST')
['def', 'update', '(', 'self', ',', 'scriptid', ',', 'params', '=', 'None', ')', ':', 'params', '=', 'update_params', '(', 'params', ',', '{', "'SCRIPTID'", ':', 'scriptid', '}', ')', 'return', 'self', '.', 'request', '(', "'/v1/startupscript/update'", ',', 'params', ',', "'POST'", ')']
/v1/startupscript/update POST - account Update an existing startup script Link: https://www.vultr.com/api/#startupscript_update
['/', 'v1', '/', 'startupscript', '/', 'update', 'POST', '-', 'account', 'Update', 'an', 'existing', 'startup', 'script']
train
https://github.com/spry-group/python-vultr/blob/bad1448f1df7b5dba70fd3d11434f32580f0b850/vultr/v1_startupscript.py#L46-L54
3,365
vtkiorg/vtki
vtki/container.py
MultiBlock._get_attrs
def _get_attrs(self): """An internal helper for the representation methods""" attrs = [] attrs.append(("N Blocks", self.n_blocks, "{}")) bds = self.bounds attrs.append(("X Bounds", (bds[0], bds[1]), "{:.3f}, {:.3f}")) attrs.append(("Y Bounds", (bds[2], bds[3]), "{:.3f}, {:.3f}")) attrs.append(("Z Bounds", (bds[4], bds[5]), "{:.3f}, {:.3f}")) return attrs
python
def _get_attrs(self): """An internal helper for the representation methods""" attrs = [] attrs.append(("N Blocks", self.n_blocks, "{}")) bds = self.bounds attrs.append(("X Bounds", (bds[0], bds[1]), "{:.3f}, {:.3f}")) attrs.append(("Y Bounds", (bds[2], bds[3]), "{:.3f}, {:.3f}")) attrs.append(("Z Bounds", (bds[4], bds[5]), "{:.3f}, {:.3f}")) return attrs
['def', '_get_attrs', '(', 'self', ')', ':', 'attrs', '=', '[', ']', 'attrs', '.', 'append', '(', '(', '"N Blocks"', ',', 'self', '.', 'n_blocks', ',', '"{}"', ')', ')', 'bds', '=', 'self', '.', 'bounds', 'attrs', '.', 'append', '(', '(', '"X Bounds"', ',', '(', 'bds', '[', '0', ']', ',', 'bds', '[', '1', ']', ')', ',', '"{:.3f}, {:.3f}"', ')', ')', 'attrs', '.', 'append', '(', '(', '"Y Bounds"', ',', '(', 'bds', '[', '2', ']', ',', 'bds', '[', '3', ']', ')', ',', '"{:.3f}, {:.3f}"', ')', ')', 'attrs', '.', 'append', '(', '(', '"Z Bounds"', ',', '(', 'bds', '[', '4', ']', ',', 'bds', '[', '5', ']', ')', ',', '"{:.3f}, {:.3f}"', ')', ')', 'return', 'attrs']
An internal helper for the representation methods
['An', 'internal', 'helper', 'for', 'the', 'representation', 'methods']
train
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/container.py#L356-L364
3,366
juju/charm-helpers
charmhelpers/contrib/openstack/amulet/utils.py
OpenStackAmuletUtils.get_rmq_cluster_status
def get_rmq_cluster_status(self, sentry_unit): """Execute rabbitmq cluster status command on a unit and return the full output. :param unit: sentry unit :returns: String containing console output of cluster status command """ cmd = 'rabbitmqctl cluster_status' output, _ = self.run_cmd_unit(sentry_unit, cmd) self.log.debug('{} cluster_status:\n{}'.format( sentry_unit.info['unit_name'], output)) return str(output)
python
def get_rmq_cluster_status(self, sentry_unit): """Execute rabbitmq cluster status command on a unit and return the full output. :param unit: sentry unit :returns: String containing console output of cluster status command """ cmd = 'rabbitmqctl cluster_status' output, _ = self.run_cmd_unit(sentry_unit, cmd) self.log.debug('{} cluster_status:\n{}'.format( sentry_unit.info['unit_name'], output)) return str(output)
['def', 'get_rmq_cluster_status', '(', 'self', ',', 'sentry_unit', ')', ':', 'cmd', '=', "'rabbitmqctl cluster_status'", 'output', ',', '_', '=', 'self', '.', 'run_cmd_unit', '(', 'sentry_unit', ',', 'cmd', ')', 'self', '.', 'log', '.', 'debug', '(', "'{} cluster_status:\\n{}'", '.', 'format', '(', 'sentry_unit', '.', 'info', '[', "'unit_name'", ']', ',', 'output', ')', ')', 'return', 'str', '(', 'output', ')']
Execute rabbitmq cluster status command on a unit and return the full output. :param unit: sentry unit :returns: String containing console output of cluster status command
['Execute', 'rabbitmq', 'cluster', 'status', 'command', 'on', 'a', 'unit', 'and', 'return', 'the', 'full', 'output', '.']
train
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/amulet/utils.py#L1218-L1229
3,367
PaulHancock/Aegean
AegeanTools/source_finder.py
SourceFinder.find_sources_in_image
def find_sources_in_image(self, filename, hdu_index=0, outfile=None, rms=None, bkg=None, max_summits=None, innerclip=5, outerclip=4, cores=None, rmsin=None, bkgin=None, beam=None, doislandflux=False, nopositive=False, nonegative=False, mask=None, lat=None, imgpsf=None, blank=False, docov=True, cube_index=None): """ Run the Aegean source finder. Parameters ---------- filename : str or HDUList Image filename or HDUList. hdu_index : int The index of the FITS HDU (extension). outfile : str file for printing catalog (NOT a table, just a text file of my own design) rms : float Use this rms for the entire image (will also assume that background is 0) max_summits : int Fit up to this many components to each island (extras are included but not fit) innerclip, outerclip : float The seed (inner) and flood (outer) clipping level (sigmas). cores : int Number of CPU cores to use. None means all cores. rmsin, bkgin : str or HDUList Filename or HDUList for the noise and background images. If either are None, then it will be calculated internally. beam : (major, minor, pa) Floats representing the synthesised beam (degrees). Replaces whatever is given in the FITS header. If the FITS header has no BMAJ/BMIN then this is required. doislandflux : bool If True then each island will also be characterized. nopositive, nonegative : bool Whether to return positive or negative sources. Default nopositive=False, nonegative=True. mask : str The filename of a region file created by MIMAS. Islands outside of this region will be ignored. lat : float The latitude of the telescope (declination of zenith). imgpsf : str or HDUList Filename or HDUList for a psf image. blank : bool Cause the output image to be blanked where islands are found. docov : bool If True then include covariance matrix in the fitting process. (default=True) cube_index : int For image cubes, cube_index determines which slice is used. Returns ------- sources : list List of sources found. """ # Tell numpy to be quiet np.seterr(invalid='ignore') if cores is not None: if not (cores >= 1): raise AssertionError("cores must be one or more") self.load_globals(filename, hdu_index=hdu_index, bkgin=bkgin, rmsin=rmsin, beam=beam, rms=rms, bkg=bkg, cores=cores, verb=True, mask=mask, lat=lat, psf=imgpsf, blank=blank, docov=docov, cube_index=cube_index) global_data = self.global_data rmsimg = global_data.rmsimg data = global_data.data_pix self.log.info("beam = {0:5.2f}'' x {1:5.2f}'' at {2:5.2f}deg".format( global_data.beam.a * 3600, global_data.beam.b * 3600, global_data.beam.pa)) # stop people from doing silly things. if outerclip > innerclip: outerclip = innerclip self.log.info("seedclip={0}".format(innerclip)) self.log.info("floodclip={0}".format(outerclip)) isle_num = 0 if cores == 1: # single-threaded, no parallel processing queue = [] else: queue = pprocess.Queue(limit=cores, reuse=1) fit_parallel = queue.manage(pprocess.MakeReusable(self._fit_islands)) island_group = [] group_size = 20 for i, xmin, xmax, ymin, ymax in self._gen_flood_wrap(data, rmsimg, innerclip, outerclip, domask=True): # ignore empty islands # This should now be impossible to trigger if np.size(i) < 1: self.log.warn("Empty island detected, this should be imposisble.") continue isle_num += 1 scalars = (innerclip, outerclip, max_summits) offsets = (xmin, xmax, ymin, ymax) island_data = IslandFittingData(isle_num, i, scalars, offsets, doislandflux) # If cores==1 run fitting in main process. Otherwise build up groups of islands # and submit to queue for subprocesses. Passing a group of islands is more # efficient than passing single islands to the subprocesses. if cores == 1: res = self._fit_island(island_data) queue.append(res) else: island_group.append(island_data) # If the island group is full queue it for the subprocesses to fit if len(island_group) >= group_size: fit_parallel(island_group) island_group = [] # The last partially-filled island group also needs to be queued for fitting if len(island_group) > 0: fit_parallel(island_group) # Write the output to the output file if outfile: print(header.format("{0}-({1})".format(__version__, __date__), filename), file=outfile) print(OutputSource.header, file=outfile) sources = [] for srcs in queue: if srcs: # ignore empty lists for src in srcs: # ignore sources that we have been told to ignore if (src.peak_flux > 0 and nopositive) or (src.peak_flux < 0 and nonegative): continue sources.append(src) if outfile: print(str(src), file=outfile) self.sources.extend(sources) return sources
python
def find_sources_in_image(self, filename, hdu_index=0, outfile=None, rms=None, bkg=None, max_summits=None, innerclip=5, outerclip=4, cores=None, rmsin=None, bkgin=None, beam=None, doislandflux=False, nopositive=False, nonegative=False, mask=None, lat=None, imgpsf=None, blank=False, docov=True, cube_index=None): """ Run the Aegean source finder. Parameters ---------- filename : str or HDUList Image filename or HDUList. hdu_index : int The index of the FITS HDU (extension). outfile : str file for printing catalog (NOT a table, just a text file of my own design) rms : float Use this rms for the entire image (will also assume that background is 0) max_summits : int Fit up to this many components to each island (extras are included but not fit) innerclip, outerclip : float The seed (inner) and flood (outer) clipping level (sigmas). cores : int Number of CPU cores to use. None means all cores. rmsin, bkgin : str or HDUList Filename or HDUList for the noise and background images. If either are None, then it will be calculated internally. beam : (major, minor, pa) Floats representing the synthesised beam (degrees). Replaces whatever is given in the FITS header. If the FITS header has no BMAJ/BMIN then this is required. doislandflux : bool If True then each island will also be characterized. nopositive, nonegative : bool Whether to return positive or negative sources. Default nopositive=False, nonegative=True. mask : str The filename of a region file created by MIMAS. Islands outside of this region will be ignored. lat : float The latitude of the telescope (declination of zenith). imgpsf : str or HDUList Filename or HDUList for a psf image. blank : bool Cause the output image to be blanked where islands are found. docov : bool If True then include covariance matrix in the fitting process. (default=True) cube_index : int For image cubes, cube_index determines which slice is used. Returns ------- sources : list List of sources found. """ # Tell numpy to be quiet np.seterr(invalid='ignore') if cores is not None: if not (cores >= 1): raise AssertionError("cores must be one or more") self.load_globals(filename, hdu_index=hdu_index, bkgin=bkgin, rmsin=rmsin, beam=beam, rms=rms, bkg=bkg, cores=cores, verb=True, mask=mask, lat=lat, psf=imgpsf, blank=blank, docov=docov, cube_index=cube_index) global_data = self.global_data rmsimg = global_data.rmsimg data = global_data.data_pix self.log.info("beam = {0:5.2f}'' x {1:5.2f}'' at {2:5.2f}deg".format( global_data.beam.a * 3600, global_data.beam.b * 3600, global_data.beam.pa)) # stop people from doing silly things. if outerclip > innerclip: outerclip = innerclip self.log.info("seedclip={0}".format(innerclip)) self.log.info("floodclip={0}".format(outerclip)) isle_num = 0 if cores == 1: # single-threaded, no parallel processing queue = [] else: queue = pprocess.Queue(limit=cores, reuse=1) fit_parallel = queue.manage(pprocess.MakeReusable(self._fit_islands)) island_group = [] group_size = 20 for i, xmin, xmax, ymin, ymax in self._gen_flood_wrap(data, rmsimg, innerclip, outerclip, domask=True): # ignore empty islands # This should now be impossible to trigger if np.size(i) < 1: self.log.warn("Empty island detected, this should be imposisble.") continue isle_num += 1 scalars = (innerclip, outerclip, max_summits) offsets = (xmin, xmax, ymin, ymax) island_data = IslandFittingData(isle_num, i, scalars, offsets, doislandflux) # If cores==1 run fitting in main process. Otherwise build up groups of islands # and submit to queue for subprocesses. Passing a group of islands is more # efficient than passing single islands to the subprocesses. if cores == 1: res = self._fit_island(island_data) queue.append(res) else: island_group.append(island_data) # If the island group is full queue it for the subprocesses to fit if len(island_group) >= group_size: fit_parallel(island_group) island_group = [] # The last partially-filled island group also needs to be queued for fitting if len(island_group) > 0: fit_parallel(island_group) # Write the output to the output file if outfile: print(header.format("{0}-({1})".format(__version__, __date__), filename), file=outfile) print(OutputSource.header, file=outfile) sources = [] for srcs in queue: if srcs: # ignore empty lists for src in srcs: # ignore sources that we have been told to ignore if (src.peak_flux > 0 and nopositive) or (src.peak_flux < 0 and nonegative): continue sources.append(src) if outfile: print(str(src), file=outfile) self.sources.extend(sources) return sources
['def', 'find_sources_in_image', '(', 'self', ',', 'filename', ',', 'hdu_index', '=', '0', ',', 'outfile', '=', 'None', ',', 'rms', '=', 'None', ',', 'bkg', '=', 'None', ',', 'max_summits', '=', 'None', ',', 'innerclip', '=', '5', ',', 'outerclip', '=', '4', ',', 'cores', '=', 'None', ',', 'rmsin', '=', 'None', ',', 'bkgin', '=', 'None', ',', 'beam', '=', 'None', ',', 'doislandflux', '=', 'False', ',', 'nopositive', '=', 'False', ',', 'nonegative', '=', 'False', ',', 'mask', '=', 'None', ',', 'lat', '=', 'None', ',', 'imgpsf', '=', 'None', ',', 'blank', '=', 'False', ',', 'docov', '=', 'True', ',', 'cube_index', '=', 'None', ')', ':', '# Tell numpy to be quiet', 'np', '.', 'seterr', '(', 'invalid', '=', "'ignore'", ')', 'if', 'cores', 'is', 'not', 'None', ':', 'if', 'not', '(', 'cores', '>=', '1', ')', ':', 'raise', 'AssertionError', '(', '"cores must be one or more"', ')', 'self', '.', 'load_globals', '(', 'filename', ',', 'hdu_index', '=', 'hdu_index', ',', 'bkgin', '=', 'bkgin', ',', 'rmsin', '=', 'rmsin', ',', 'beam', '=', 'beam', ',', 'rms', '=', 'rms', ',', 'bkg', '=', 'bkg', ',', 'cores', '=', 'cores', ',', 'verb', '=', 'True', ',', 'mask', '=', 'mask', ',', 'lat', '=', 'lat', ',', 'psf', '=', 'imgpsf', ',', 'blank', '=', 'blank', ',', 'docov', '=', 'docov', ',', 'cube_index', '=', 'cube_index', ')', 'global_data', '=', 'self', '.', 'global_data', 'rmsimg', '=', 'global_data', '.', 'rmsimg', 'data', '=', 'global_data', '.', 'data_pix', 'self', '.', 'log', '.', 'info', '(', '"beam = {0:5.2f}\'\' x {1:5.2f}\'\' at {2:5.2f}deg"', '.', 'format', '(', 'global_data', '.', 'beam', '.', 'a', '*', '3600', ',', 'global_data', '.', 'beam', '.', 'b', '*', '3600', ',', 'global_data', '.', 'beam', '.', 'pa', ')', ')', '# stop people from doing silly things.', 'if', 'outerclip', '>', 'innerclip', ':', 'outerclip', '=', 'innerclip', 'self', '.', 'log', '.', 'info', '(', '"seedclip={0}"', '.', 'format', '(', 'innerclip', ')', ')', 'self', '.', 'log', '.', 'info', '(', '"floodclip={0}"', '.', 'format', '(', 'outerclip', ')', ')', 'isle_num', '=', '0', 'if', 'cores', '==', '1', ':', '# single-threaded, no parallel processing', 'queue', '=', '[', ']', 'else', ':', 'queue', '=', 'pprocess', '.', 'Queue', '(', 'limit', '=', 'cores', ',', 'reuse', '=', '1', ')', 'fit_parallel', '=', 'queue', '.', 'manage', '(', 'pprocess', '.', 'MakeReusable', '(', 'self', '.', '_fit_islands', ')', ')', 'island_group', '=', '[', ']', 'group_size', '=', '20', 'for', 'i', ',', 'xmin', ',', 'xmax', ',', 'ymin', ',', 'ymax', 'in', 'self', '.', '_gen_flood_wrap', '(', 'data', ',', 'rmsimg', ',', 'innerclip', ',', 'outerclip', ',', 'domask', '=', 'True', ')', ':', '# ignore empty islands', '# This should now be impossible to trigger', 'if', 'np', '.', 'size', '(', 'i', ')', '<', '1', ':', 'self', '.', 'log', '.', 'warn', '(', '"Empty island detected, this should be imposisble."', ')', 'continue', 'isle_num', '+=', '1', 'scalars', '=', '(', 'innerclip', ',', 'outerclip', ',', 'max_summits', ')', 'offsets', '=', '(', 'xmin', ',', 'xmax', ',', 'ymin', ',', 'ymax', ')', 'island_data', '=', 'IslandFittingData', '(', 'isle_num', ',', 'i', ',', 'scalars', ',', 'offsets', ',', 'doislandflux', ')', '# If cores==1 run fitting in main process. Otherwise build up groups of islands', '# and submit to queue for subprocesses. Passing a group of islands is more', '# efficient than passing single islands to the subprocesses.', 'if', 'cores', '==', '1', ':', 'res', '=', 'self', '.', '_fit_island', '(', 'island_data', ')', 'queue', '.', 'append', '(', 'res', ')', 'else', ':', 'island_group', '.', 'append', '(', 'island_data', ')', '# If the island group is full queue it for the subprocesses to fit', 'if', 'len', '(', 'island_group', ')', '>=', 'group_size', ':', 'fit_parallel', '(', 'island_group', ')', 'island_group', '=', '[', ']', '# The last partially-filled island group also needs to be queued for fitting', 'if', 'len', '(', 'island_group', ')', '>', '0', ':', 'fit_parallel', '(', 'island_group', ')', '# Write the output to the output file', 'if', 'outfile', ':', 'print', '(', 'header', '.', 'format', '(', '"{0}-({1})"', '.', 'format', '(', '__version__', ',', '__date__', ')', ',', 'filename', ')', ',', 'file', '=', 'outfile', ')', 'print', '(', 'OutputSource', '.', 'header', ',', 'file', '=', 'outfile', ')', 'sources', '=', '[', ']', 'for', 'srcs', 'in', 'queue', ':', 'if', 'srcs', ':', '# ignore empty lists', 'for', 'src', 'in', 'srcs', ':', '# ignore sources that we have been told to ignore', 'if', '(', 'src', '.', 'peak_flux', '>', '0', 'and', 'nopositive', ')', 'or', '(', 'src', '.', 'peak_flux', '<', '0', 'and', 'nonegative', ')', ':', 'continue', 'sources', '.', 'append', '(', 'src', ')', 'if', 'outfile', ':', 'print', '(', 'str', '(', 'src', ')', ',', 'file', '=', 'outfile', ')', 'self', '.', 'sources', '.', 'extend', '(', 'sources', ')', 'return', 'sources']
Run the Aegean source finder. Parameters ---------- filename : str or HDUList Image filename or HDUList. hdu_index : int The index of the FITS HDU (extension). outfile : str file for printing catalog (NOT a table, just a text file of my own design) rms : float Use this rms for the entire image (will also assume that background is 0) max_summits : int Fit up to this many components to each island (extras are included but not fit) innerclip, outerclip : float The seed (inner) and flood (outer) clipping level (sigmas). cores : int Number of CPU cores to use. None means all cores. rmsin, bkgin : str or HDUList Filename or HDUList for the noise and background images. If either are None, then it will be calculated internally. beam : (major, minor, pa) Floats representing the synthesised beam (degrees). Replaces whatever is given in the FITS header. If the FITS header has no BMAJ/BMIN then this is required. doislandflux : bool If True then each island will also be characterized. nopositive, nonegative : bool Whether to return positive or negative sources. Default nopositive=False, nonegative=True. mask : str The filename of a region file created by MIMAS. Islands outside of this region will be ignored. lat : float The latitude of the telescope (declination of zenith). imgpsf : str or HDUList Filename or HDUList for a psf image. blank : bool Cause the output image to be blanked where islands are found. docov : bool If True then include covariance matrix in the fitting process. (default=True) cube_index : int For image cubes, cube_index determines which slice is used. Returns ------- sources : list List of sources found.
['Run', 'the', 'Aegean', 'source', 'finder', '.']
train
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/source_finder.py#L1396-L1540
3,368
scanny/python-pptx
pptx/chart/data.py
BubbleSeriesData.add_data_point
def add_data_point(self, x, y, size, number_format=None): """ Append a new BubbleDataPoint object having the values *x*, *y*, and *size*. The optional *number_format* is used to format the Y value. If not provided, the number format is inherited from the series data. """ data_point = BubbleDataPoint(self, x, y, size, number_format) self.append(data_point) return data_point
python
def add_data_point(self, x, y, size, number_format=None): """ Append a new BubbleDataPoint object having the values *x*, *y*, and *size*. The optional *number_format* is used to format the Y value. If not provided, the number format is inherited from the series data. """ data_point = BubbleDataPoint(self, x, y, size, number_format) self.append(data_point) return data_point
['def', 'add_data_point', '(', 'self', ',', 'x', ',', 'y', ',', 'size', ',', 'number_format', '=', 'None', ')', ':', 'data_point', '=', 'BubbleDataPoint', '(', 'self', ',', 'x', ',', 'y', ',', 'size', ',', 'number_format', ')', 'self', '.', 'append', '(', 'data_point', ')', 'return', 'data_point']
Append a new BubbleDataPoint object having the values *x*, *y*, and *size*. The optional *number_format* is used to format the Y value. If not provided, the number format is inherited from the series data.
['Append', 'a', 'new', 'BubbleDataPoint', 'object', 'having', 'the', 'values', '*', 'x', '*', '*', 'y', '*', 'and', '*', 'size', '*', '.', 'The', 'optional', '*', 'number_format', '*', 'is', 'used', 'to', 'format', 'the', 'Y', 'value', '.', 'If', 'not', 'provided', 'the', 'number', 'format', 'is', 'inherited', 'from', 'the', 'series', 'data', '.']
train
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/chart/data.py#L769-L777
3,369
pyviz/holoviews
holoviews/util/parser.py
Parser.collect_tokens
def collect_tokens(cls, parseresult, mode): """ Collect the tokens from a (potentially) nested parse result. """ inner = '(%s)' if mode=='parens' else '[%s]' if parseresult is None: return [] tokens = [] for token in parseresult.asList(): # If value is a tuple, the token will be a list if isinstance(token, list): token = cls.recurse_token(token, inner) tokens[-1] = tokens[-1] + token else: if token.strip() == ',': continue tokens.append(cls._strip_commas(token)) return tokens
python
def collect_tokens(cls, parseresult, mode): """ Collect the tokens from a (potentially) nested parse result. """ inner = '(%s)' if mode=='parens' else '[%s]' if parseresult is None: return [] tokens = [] for token in parseresult.asList(): # If value is a tuple, the token will be a list if isinstance(token, list): token = cls.recurse_token(token, inner) tokens[-1] = tokens[-1] + token else: if token.strip() == ',': continue tokens.append(cls._strip_commas(token)) return tokens
['def', 'collect_tokens', '(', 'cls', ',', 'parseresult', ',', 'mode', ')', ':', 'inner', '=', "'(%s)'", 'if', 'mode', '==', "'parens'", 'else', "'[%s]'", 'if', 'parseresult', 'is', 'None', ':', 'return', '[', ']', 'tokens', '=', '[', ']', 'for', 'token', 'in', 'parseresult', '.', 'asList', '(', ')', ':', '# If value is a tuple, the token will be a list', 'if', 'isinstance', '(', 'token', ',', 'list', ')', ':', 'token', '=', 'cls', '.', 'recurse_token', '(', 'token', ',', 'inner', ')', 'tokens', '[', '-', '1', ']', '=', 'tokens', '[', '-', '1', ']', '+', 'token', 'else', ':', 'if', 'token', '.', 'strip', '(', ')', '==', "','", ':', 'continue', 'tokens', '.', 'append', '(', 'cls', '.', '_strip_commas', '(', 'token', ')', ')', 'return', 'tokens']
Collect the tokens from a (potentially) nested parse result.
['Collect', 'the', 'tokens', 'from', 'a', '(', 'potentially', ')', 'nested', 'parse', 'result', '.']
train
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/util/parser.py#L63-L78
3,370
pantsbuild/pants
src/python/pants/backend/jvm/tasks/jar_publish.py
PushDb.dump
def dump(self, path): """Saves the pushdb as a properties file to the given path.""" with open(path, 'w') as props: Properties.dump(self._props, props)
python
def dump(self, path): """Saves the pushdb as a properties file to the given path.""" with open(path, 'w') as props: Properties.dump(self._props, props)
['def', 'dump', '(', 'self', ',', 'path', ')', ':', 'with', 'open', '(', 'path', ',', "'w'", ')', 'as', 'props', ':', 'Properties', '.', 'dump', '(', 'self', '.', '_props', ',', 'props', ')']
Saves the pushdb as a properties file to the given path.
['Saves', 'the', 'pushdb', 'as', 'a', 'properties', 'file', 'to', 'the', 'given', 'path', '.']
train
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/jvm/tasks/jar_publish.py#L144-L147
3,371
tweepy/tweepy
tweepy/api.py
API.remove_list_members
def remove_list_members(self, screen_name=None, user_id=None, slug=None, list_id=None, owner_id=None, owner_screen_name=None): """ Perform bulk remove of list members from user ID or screenname """ return self._remove_list_members(list_to_csv(screen_name), list_to_csv(user_id), slug, list_id, owner_id, owner_screen_name)
python
def remove_list_members(self, screen_name=None, user_id=None, slug=None, list_id=None, owner_id=None, owner_screen_name=None): """ Perform bulk remove of list members from user ID or screenname """ return self._remove_list_members(list_to_csv(screen_name), list_to_csv(user_id), slug, list_id, owner_id, owner_screen_name)
['def', 'remove_list_members', '(', 'self', ',', 'screen_name', '=', 'None', ',', 'user_id', '=', 'None', ',', 'slug', '=', 'None', ',', 'list_id', '=', 'None', ',', 'owner_id', '=', 'None', ',', 'owner_screen_name', '=', 'None', ')', ':', 'return', 'self', '.', '_remove_list_members', '(', 'list_to_csv', '(', 'screen_name', ')', ',', 'list_to_csv', '(', 'user_id', ')', ',', 'slug', ',', 'list_id', ',', 'owner_id', ',', 'owner_screen_name', ')']
Perform bulk remove of list members from user ID or screenname
['Perform', 'bulk', 'remove', 'of', 'list', 'members', 'from', 'user', 'ID', 'or', 'screenname']
train
https://github.com/tweepy/tweepy/blob/cc3894073905811c4d9fd816202f93454ed932da/tweepy/api.py#L1097-L1103
3,372
saltstack/salt
salt/modules/boto_ec2.py
get_tags
def get_tags(instance_id=None, keyid=None, key=None, profile=None, region=None): ''' Given an instance_id, return a list of tags associated with that instance. returns (list) - list of tags as key/value pairs CLI Example: .. code-block:: bash salt myminion boto_ec2.get_tags instance_id ''' tags = [] client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) result = client.get_all_tags(filters={"resource-id": instance_id}) if result: for tag in result: tags.append({tag.name: tag.value}) else: log.info("No tags found for instance_id %s", instance_id) return tags
python
def get_tags(instance_id=None, keyid=None, key=None, profile=None, region=None): ''' Given an instance_id, return a list of tags associated with that instance. returns (list) - list of tags as key/value pairs CLI Example: .. code-block:: bash salt myminion boto_ec2.get_tags instance_id ''' tags = [] client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) result = client.get_all_tags(filters={"resource-id": instance_id}) if result: for tag in result: tags.append({tag.name: tag.value}) else: log.info("No tags found for instance_id %s", instance_id) return tags
['def', 'get_tags', '(', 'instance_id', '=', 'None', ',', 'keyid', '=', 'None', ',', 'key', '=', 'None', ',', 'profile', '=', 'None', ',', 'region', '=', 'None', ')', ':', 'tags', '=', '[', ']', 'client', '=', '_get_conn', '(', 'key', '=', 'key', ',', 'keyid', '=', 'keyid', ',', 'profile', '=', 'profile', ',', 'region', '=', 'region', ')', 'result', '=', 'client', '.', 'get_all_tags', '(', 'filters', '=', '{', '"resource-id"', ':', 'instance_id', '}', ')', 'if', 'result', ':', 'for', 'tag', 'in', 'result', ':', 'tags', '.', 'append', '(', '{', 'tag', '.', 'name', ':', 'tag', '.', 'value', '}', ')', 'else', ':', 'log', '.', 'info', '(', '"No tags found for instance_id %s"', ',', 'instance_id', ')', 'return', 'tags']
Given an instance_id, return a list of tags associated with that instance. returns (list) - list of tags as key/value pairs CLI Example: .. code-block:: bash salt myminion boto_ec2.get_tags instance_id
['Given', 'an', 'instance_id', 'return', 'a', 'list', 'of', 'tags', 'associated', 'with', 'that', 'instance', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_ec2.py#L751-L773
3,373
CityOfZion/neo-python
neo/Core/TX/TransactionAttribute.py
TransactionAttribute.ToJson
def ToJson(self): """ Convert object members to a dictionary that can be parsed as JSON. Returns: dict: """ obj = { 'usage': self.Usage, 'data': '' if not self.Data else self.Data.hex() } return obj
python
def ToJson(self): """ Convert object members to a dictionary that can be parsed as JSON. Returns: dict: """ obj = { 'usage': self.Usage, 'data': '' if not self.Data else self.Data.hex() } return obj
['def', 'ToJson', '(', 'self', ')', ':', 'obj', '=', '{', "'usage'", ':', 'self', '.', 'Usage', ',', "'data'", ':', "''", 'if', 'not', 'self', '.', 'Data', 'else', 'self', '.', 'Data', '.', 'hex', '(', ')', '}', 'return', 'obj']
Convert object members to a dictionary that can be parsed as JSON. Returns: dict:
['Convert', 'object', 'members', 'to', 'a', 'dictionary', 'that', 'can', 'be', 'parsed', 'as', 'JSON', '.']
train
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Core/TX/TransactionAttribute.py#L147-L158
3,374
trevorstephens/gplearn
gplearn/genetic.py
SymbolicClassifier.predict_proba
def predict_proba(self, X): """Predict probabilities on test vectors X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Input vectors, where n_samples is the number of samples and n_features is the number of features. Returns ------- proba : array, shape = [n_samples, n_classes] The class probabilities of the input samples. The order of the classes corresponds to that in the attribute `classes_`. """ if not hasattr(self, '_program'): raise NotFittedError('SymbolicClassifier not fitted.') X = check_array(X) _, n_features = X.shape if self.n_features_ != n_features: raise ValueError('Number of features of the model must match the ' 'input. Model n_features is %s and input ' 'n_features is %s.' % (self.n_features_, n_features)) scores = self._program.execute(X) proba = self._transformer(scores) proba = np.vstack([1 - proba, proba]).T return proba
python
def predict_proba(self, X): """Predict probabilities on test vectors X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Input vectors, where n_samples is the number of samples and n_features is the number of features. Returns ------- proba : array, shape = [n_samples, n_classes] The class probabilities of the input samples. The order of the classes corresponds to that in the attribute `classes_`. """ if not hasattr(self, '_program'): raise NotFittedError('SymbolicClassifier not fitted.') X = check_array(X) _, n_features = X.shape if self.n_features_ != n_features: raise ValueError('Number of features of the model must match the ' 'input. Model n_features is %s and input ' 'n_features is %s.' % (self.n_features_, n_features)) scores = self._program.execute(X) proba = self._transformer(scores) proba = np.vstack([1 - proba, proba]).T return proba
['def', 'predict_proba', '(', 'self', ',', 'X', ')', ':', 'if', 'not', 'hasattr', '(', 'self', ',', "'_program'", ')', ':', 'raise', 'NotFittedError', '(', "'SymbolicClassifier not fitted.'", ')', 'X', '=', 'check_array', '(', 'X', ')', '_', ',', 'n_features', '=', 'X', '.', 'shape', 'if', 'self', '.', 'n_features_', '!=', 'n_features', ':', 'raise', 'ValueError', '(', "'Number of features of the model must match the '", "'input. Model n_features is %s and input '", "'n_features is %s.'", '%', '(', 'self', '.', 'n_features_', ',', 'n_features', ')', ')', 'scores', '=', 'self', '.', '_program', '.', 'execute', '(', 'X', ')', 'proba', '=', 'self', '.', '_transformer', '(', 'scores', ')', 'proba', '=', 'np', '.', 'vstack', '(', '[', '1', '-', 'proba', ',', 'proba', ']', ')', '.', 'T', 'return', 'proba']
Predict probabilities on test vectors X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Input vectors, where n_samples is the number of samples and n_features is the number of features. Returns ------- proba : array, shape = [n_samples, n_classes] The class probabilities of the input samples. The order of the classes corresponds to that in the attribute `classes_`.
['Predict', 'probabilities', 'on', 'test', 'vectors', 'X', '.']
train
https://github.com/trevorstephens/gplearn/blob/5c0465f2ecdcd5abcdf3fe520688d24cd59e4a52/gplearn/genetic.py#L1108-L1138
3,375
django-extensions/django-extensions
django_extensions/management/commands/graph_models.py
Command.add_arguments
def add_arguments(self, parser): """Unpack self.arguments for parser.add_arguments.""" parser.add_argument('app_label', nargs='*') for argument in self.arguments: parser.add_argument(*argument.split(' '), **self.arguments[argument])
python
def add_arguments(self, parser): """Unpack self.arguments for parser.add_arguments.""" parser.add_argument('app_label', nargs='*') for argument in self.arguments: parser.add_argument(*argument.split(' '), **self.arguments[argument])
['def', 'add_arguments', '(', 'self', ',', 'parser', ')', ':', 'parser', '.', 'add_argument', '(', "'app_label'", ',', 'nargs', '=', "'*'", ')', 'for', 'argument', 'in', 'self', '.', 'arguments', ':', 'parser', '.', 'add_argument', '(', '*', 'argument', '.', 'split', '(', "' '", ')', ',', '*', '*', 'self', '.', 'arguments', '[', 'argument', ']', ')']
Unpack self.arguments for parser.add_arguments.
['Unpack', 'self', '.', 'arguments', 'for', 'parser', '.', 'add_arguments', '.']
train
https://github.com/django-extensions/django-extensions/blob/7e0bef97ea6cb7f9eea5e2528e3a985a83a7b9b8/django_extensions/management/commands/graph_models.py#L169-L173
3,376
oceanprotocol/squid-py
squid_py/agreements/service_factory.py
ServiceFactory.build_access_service
def build_access_service(did, price, consume_endpoint, service_endpoint, timeout, template_id): """ Build the access service. :param did: DID, str :param price: Asset price, int :param consume_endpoint: url of the service provider, str :param service_endpoint: identifier of the service inside the asset DDO, str :param timeout: amount of time in seconds before the agreement expires, int :param template_id: id of the template use to create the service, str :return: ServiceAgreement """ # TODO fill all the possible mappings param_map = { '_documentId': did_to_id(did), '_amount': price, '_rewardAddress': Keeper.get_instance().escrow_reward_condition.address, } sla_template_path = get_sla_template_path() sla_template = ServiceAgreementTemplate.from_json_file(sla_template_path) sla_template.template_id = template_id conditions = sla_template.conditions[:] for cond in conditions: for param in cond.parameters: param.value = param_map.get(param.name, '') if cond.timeout > 0: cond.timeout = timeout sla_template.set_conditions(conditions) sa = ServiceAgreement( 1, sla_template, consume_endpoint, service_endpoint, ServiceTypes.ASSET_ACCESS ) sa.set_did(did) return sa
python
def build_access_service(did, price, consume_endpoint, service_endpoint, timeout, template_id): """ Build the access service. :param did: DID, str :param price: Asset price, int :param consume_endpoint: url of the service provider, str :param service_endpoint: identifier of the service inside the asset DDO, str :param timeout: amount of time in seconds before the agreement expires, int :param template_id: id of the template use to create the service, str :return: ServiceAgreement """ # TODO fill all the possible mappings param_map = { '_documentId': did_to_id(did), '_amount': price, '_rewardAddress': Keeper.get_instance().escrow_reward_condition.address, } sla_template_path = get_sla_template_path() sla_template = ServiceAgreementTemplate.from_json_file(sla_template_path) sla_template.template_id = template_id conditions = sla_template.conditions[:] for cond in conditions: for param in cond.parameters: param.value = param_map.get(param.name, '') if cond.timeout > 0: cond.timeout = timeout sla_template.set_conditions(conditions) sa = ServiceAgreement( 1, sla_template, consume_endpoint, service_endpoint, ServiceTypes.ASSET_ACCESS ) sa.set_did(did) return sa
['def', 'build_access_service', '(', 'did', ',', 'price', ',', 'consume_endpoint', ',', 'service_endpoint', ',', 'timeout', ',', 'template_id', ')', ':', '# TODO fill all the possible mappings', 'param_map', '=', '{', "'_documentId'", ':', 'did_to_id', '(', 'did', ')', ',', "'_amount'", ':', 'price', ',', "'_rewardAddress'", ':', 'Keeper', '.', 'get_instance', '(', ')', '.', 'escrow_reward_condition', '.', 'address', ',', '}', 'sla_template_path', '=', 'get_sla_template_path', '(', ')', 'sla_template', '=', 'ServiceAgreementTemplate', '.', 'from_json_file', '(', 'sla_template_path', ')', 'sla_template', '.', 'template_id', '=', 'template_id', 'conditions', '=', 'sla_template', '.', 'conditions', '[', ':', ']', 'for', 'cond', 'in', 'conditions', ':', 'for', 'param', 'in', 'cond', '.', 'parameters', ':', 'param', '.', 'value', '=', 'param_map', '.', 'get', '(', 'param', '.', 'name', ',', "''", ')', 'if', 'cond', '.', 'timeout', '>', '0', ':', 'cond', '.', 'timeout', '=', 'timeout', 'sla_template', '.', 'set_conditions', '(', 'conditions', ')', 'sa', '=', 'ServiceAgreement', '(', '1', ',', 'sla_template', ',', 'consume_endpoint', ',', 'service_endpoint', ',', 'ServiceTypes', '.', 'ASSET_ACCESS', ')', 'sa', '.', 'set_did', '(', 'did', ')', 'return', 'sa']
Build the access service. :param did: DID, str :param price: Asset price, int :param consume_endpoint: url of the service provider, str :param service_endpoint: identifier of the service inside the asset DDO, str :param timeout: amount of time in seconds before the agreement expires, int :param template_id: id of the template use to create the service, str :return: ServiceAgreement
['Build', 'the', 'access', 'service', '.']
train
https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/agreements/service_factory.py#L166-L204
3,377
projectshift/shift-boiler
boiler/collections/api_collection.py
ApiCollection.dict
def dict(self): """ Returns current collection as a dictionary """ collection = super().dict() serialized_items = [] for item in collection['items']: serialized_items.append(self.serializer(item)) collection['items'] = serialized_items return collection
python
def dict(self): """ Returns current collection as a dictionary """ collection = super().dict() serialized_items = [] for item in collection['items']: serialized_items.append(self.serializer(item)) collection['items'] = serialized_items return collection
['def', 'dict', '(', 'self', ')', ':', 'collection', '=', 'super', '(', ')', '.', 'dict', '(', ')', 'serialized_items', '=', '[', ']', 'for', 'item', 'in', 'collection', '[', "'items'", ']', ':', 'serialized_items', '.', 'append', '(', 'self', '.', 'serializer', '(', 'item', ')', ')', 'collection', '[', "'items'", ']', '=', 'serialized_items', 'return', 'collection']
Returns current collection as a dictionary
['Returns', 'current', 'collection', 'as', 'a', 'dictionary']
train
https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/collections/api_collection.py#L23-L31
3,378
troeger/opensubmit
web/opensubmit/admin/submission.py
SubmissionAdmin.file_link
def file_link(self, instance): ''' Renders the link to the student upload file. ''' sfile = instance.file_upload if not sfile: return mark_safe('No file submitted by student.') else: return mark_safe('<a href="%s">%s</a><br/>(<a href="%s" target="_new">Preview</a>)' % (sfile.get_absolute_url(), sfile.basename(), sfile.get_preview_url()))
python
def file_link(self, instance): ''' Renders the link to the student upload file. ''' sfile = instance.file_upload if not sfile: return mark_safe('No file submitted by student.') else: return mark_safe('<a href="%s">%s</a><br/>(<a href="%s" target="_new">Preview</a>)' % (sfile.get_absolute_url(), sfile.basename(), sfile.get_preview_url()))
['def', 'file_link', '(', 'self', ',', 'instance', ')', ':', 'sfile', '=', 'instance', '.', 'file_upload', 'if', 'not', 'sfile', ':', 'return', 'mark_safe', '(', "'No file submitted by student.'", ')', 'else', ':', 'return', 'mark_safe', '(', '\'<a href="%s">%s</a><br/>(<a href="%s" target="_new">Preview</a>)\'', '%', '(', 'sfile', '.', 'get_absolute_url', '(', ')', ',', 'sfile', '.', 'basename', '(', ')', ',', 'sfile', '.', 'get_preview_url', '(', ')', ')', ')']
Renders the link to the student upload file.
['Renders', 'the', 'link', 'to', 'the', 'student', 'upload', 'file', '.']
train
https://github.com/troeger/opensubmit/blob/384a95b7c6fa41e3f949a129d25dafd9a1c54859/web/opensubmit/admin/submission.py#L153-L161
3,379
thunder-project/thunder
thunder/readers.py
BotoParallelReader.getfiles
def getfiles(self, path, ext=None, start=None, stop=None, recursive=False): """ Get scheme, bucket, and keys for a set of files """ from .utils import connection_with_anon, connection_with_gs parse = BotoClient.parse_query(path) scheme = parse[0] bucket_name = parse[1] if scheme == 's3' or scheme == 's3n': conn = connection_with_anon(self.credentials) bucket = conn.get_bucket(parse[1]) elif scheme == 'gs': conn = connection_with_gs(bucket_name) bucket = conn.get_bucket() else: raise NotImplementedError("No file reader implementation for URL scheme " + scheme) keys = BotoClient.retrieve_keys( bucket, parse[2], prefix=parse[3], postfix=parse[4], recursive=recursive) keylist = [key.name for key in keys] if ext: if ext == 'tif' or ext == 'tiff': keylist = [keyname for keyname in keylist if keyname.endswith('tif')] keylist.append([keyname for keyname in keylist if keyname.endswith('tiff')]) else: keylist = [keyname for keyname in keylist if keyname.endswith(ext)] keylist.sort() keylist = select(keylist, start, stop) return scheme, bucket.name, keylist
python
def getfiles(self, path, ext=None, start=None, stop=None, recursive=False): """ Get scheme, bucket, and keys for a set of files """ from .utils import connection_with_anon, connection_with_gs parse = BotoClient.parse_query(path) scheme = parse[0] bucket_name = parse[1] if scheme == 's3' or scheme == 's3n': conn = connection_with_anon(self.credentials) bucket = conn.get_bucket(parse[1]) elif scheme == 'gs': conn = connection_with_gs(bucket_name) bucket = conn.get_bucket() else: raise NotImplementedError("No file reader implementation for URL scheme " + scheme) keys = BotoClient.retrieve_keys( bucket, parse[2], prefix=parse[3], postfix=parse[4], recursive=recursive) keylist = [key.name for key in keys] if ext: if ext == 'tif' or ext == 'tiff': keylist = [keyname for keyname in keylist if keyname.endswith('tif')] keylist.append([keyname for keyname in keylist if keyname.endswith('tiff')]) else: keylist = [keyname for keyname in keylist if keyname.endswith(ext)] keylist.sort() keylist = select(keylist, start, stop) return scheme, bucket.name, keylist
['def', 'getfiles', '(', 'self', ',', 'path', ',', 'ext', '=', 'None', ',', 'start', '=', 'None', ',', 'stop', '=', 'None', ',', 'recursive', '=', 'False', ')', ':', 'from', '.', 'utils', 'import', 'connection_with_anon', ',', 'connection_with_gs', 'parse', '=', 'BotoClient', '.', 'parse_query', '(', 'path', ')', 'scheme', '=', 'parse', '[', '0', ']', 'bucket_name', '=', 'parse', '[', '1', ']', 'if', 'scheme', '==', "'s3'", 'or', 'scheme', '==', "'s3n'", ':', 'conn', '=', 'connection_with_anon', '(', 'self', '.', 'credentials', ')', 'bucket', '=', 'conn', '.', 'get_bucket', '(', 'parse', '[', '1', ']', ')', 'elif', 'scheme', '==', "'gs'", ':', 'conn', '=', 'connection_with_gs', '(', 'bucket_name', ')', 'bucket', '=', 'conn', '.', 'get_bucket', '(', ')', 'else', ':', 'raise', 'NotImplementedError', '(', '"No file reader implementation for URL scheme "', '+', 'scheme', ')', 'keys', '=', 'BotoClient', '.', 'retrieve_keys', '(', 'bucket', ',', 'parse', '[', '2', ']', ',', 'prefix', '=', 'parse', '[', '3', ']', ',', 'postfix', '=', 'parse', '[', '4', ']', ',', 'recursive', '=', 'recursive', ')', 'keylist', '=', '[', 'key', '.', 'name', 'for', 'key', 'in', 'keys', ']', 'if', 'ext', ':', 'if', 'ext', '==', "'tif'", 'or', 'ext', '==', "'tiff'", ':', 'keylist', '=', '[', 'keyname', 'for', 'keyname', 'in', 'keylist', 'if', 'keyname', '.', 'endswith', '(', "'tif'", ')', ']', 'keylist', '.', 'append', '(', '[', 'keyname', 'for', 'keyname', 'in', 'keylist', 'if', 'keyname', '.', 'endswith', '(', "'tiff'", ')', ']', ')', 'else', ':', 'keylist', '=', '[', 'keyname', 'for', 'keyname', 'in', 'keylist', 'if', 'keyname', '.', 'endswith', '(', 'ext', ')', ']', 'keylist', '.', 'sort', '(', ')', 'keylist', '=', 'select', '(', 'keylist', ',', 'start', ',', 'stop', ')', 'return', 'scheme', ',', 'bucket', '.', 'name', ',', 'keylist']
Get scheme, bucket, and keys for a set of files
['Get', 'scheme', 'bucket', 'and', 'keys', 'for', 'a', 'set', 'of', 'files']
train
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/readers.py#L328-L361
3,380
CivicSpleen/ckcache
ckcache/filesystem.py
FsLimitedCache._free_up_space
def _free_up_space(self, size, this_rel_path=None): '''If there are not size bytes of space left, delete files until there is Args: size: size of the current file this_rel_path: rel_pat to the current file, so we don't delete it. ''' # Amount of space we are over ( bytes ) for next put space = self.size + size - self.maxsize if space <= 0: return removes = [] for row in self.database.execute("SELECT path, size, time FROM files ORDER BY time ASC"): if space > 0: removes.append(row[0]) space -= row[1] else: break for rel_path in removes: if rel_path != this_rel_path: global_logger.debug("Deleting {}".format(rel_path)) self.remove(rel_path)
python
def _free_up_space(self, size, this_rel_path=None): '''If there are not size bytes of space left, delete files until there is Args: size: size of the current file this_rel_path: rel_pat to the current file, so we don't delete it. ''' # Amount of space we are over ( bytes ) for next put space = self.size + size - self.maxsize if space <= 0: return removes = [] for row in self.database.execute("SELECT path, size, time FROM files ORDER BY time ASC"): if space > 0: removes.append(row[0]) space -= row[1] else: break for rel_path in removes: if rel_path != this_rel_path: global_logger.debug("Deleting {}".format(rel_path)) self.remove(rel_path)
['def', '_free_up_space', '(', 'self', ',', 'size', ',', 'this_rel_path', '=', 'None', ')', ':', '# Amount of space we are over ( bytes ) for next put', 'space', '=', 'self', '.', 'size', '+', 'size', '-', 'self', '.', 'maxsize', 'if', 'space', '<=', '0', ':', 'return', 'removes', '=', '[', ']', 'for', 'row', 'in', 'self', '.', 'database', '.', 'execute', '(', '"SELECT path, size, time FROM files ORDER BY time ASC"', ')', ':', 'if', 'space', '>', '0', ':', 'removes', '.', 'append', '(', 'row', '[', '0', ']', ')', 'space', '-=', 'row', '[', '1', ']', 'else', ':', 'break', 'for', 'rel_path', 'in', 'removes', ':', 'if', 'rel_path', '!=', 'this_rel_path', ':', 'global_logger', '.', 'debug', '(', '"Deleting {}"', '.', 'format', '(', 'rel_path', ')', ')', 'self', '.', 'remove', '(', 'rel_path', ')']
If there are not size bytes of space left, delete files until there is Args: size: size of the current file this_rel_path: rel_pat to the current file, so we don't delete it.
['If', 'there', 'are', 'not', 'size', 'bytes', 'of', 'space', 'left', 'delete', 'files', 'until', 'there', 'is']
train
https://github.com/CivicSpleen/ckcache/blob/0c699b6ba97ff164e9702504f0e1643dd4cd39e1/ckcache/filesystem.py#L508-L537
3,381
raymontag/kppy
kppy/database.py
KPDBv1.close
def close(self): """This method closes the database correctly.""" if self.filepath is not None: if path.isfile(self.filepath+'.lock'): remove(self.filepath+'.lock') self.filepath = None self.read_only = False self.lock() return True else: raise KPError('Can\'t close a not opened file')
python
def close(self): """This method closes the database correctly.""" if self.filepath is not None: if path.isfile(self.filepath+'.lock'): remove(self.filepath+'.lock') self.filepath = None self.read_only = False self.lock() return True else: raise KPError('Can\'t close a not opened file')
['def', 'close', '(', 'self', ')', ':', 'if', 'self', '.', 'filepath', 'is', 'not', 'None', ':', 'if', 'path', '.', 'isfile', '(', 'self', '.', 'filepath', '+', "'.lock'", ')', ':', 'remove', '(', 'self', '.', 'filepath', '+', "'.lock'", ')', 'self', '.', 'filepath', '=', 'None', 'self', '.', 'read_only', '=', 'False', 'self', '.', 'lock', '(', ')', 'return', 'True', 'else', ':', 'raise', 'KPError', '(', "'Can\\'t close a not opened file'", ')']
This method closes the database correctly.
['This', 'method', 'closes', 'the', 'database', 'correctly', '.']
train
https://github.com/raymontag/kppy/blob/a43f1fff7d49da1da4b3d8628a1b3ebbaf47f43a/kppy/database.py#L448-L459
3,382
googledatalab/pydatalab
datalab/bigquery/_api.py
Api.datasets_list
def datasets_list(self, project_id=None, max_results=0, page_token=None): """Issues a request to list the datasets in the project. Args: project_id: the project id to use to fetch the results; use None for the default project. max_results: an optional maximum number of tables to retrieve. page_token: an optional token to continue the retrieval. Returns: A parsed result object. Raises: Exception if there is an error performing the operation. """ if project_id is None: project_id = self._project_id url = Api._ENDPOINT + (Api._DATASETS_PATH % (project_id, '')) args = {} if max_results != 0: args['maxResults'] = max_results if page_token is not None: args['pageToken'] = page_token return datalab.utils.Http.request(url, args=args, credentials=self._credentials)
python
def datasets_list(self, project_id=None, max_results=0, page_token=None): """Issues a request to list the datasets in the project. Args: project_id: the project id to use to fetch the results; use None for the default project. max_results: an optional maximum number of tables to retrieve. page_token: an optional token to continue the retrieval. Returns: A parsed result object. Raises: Exception if there is an error performing the operation. """ if project_id is None: project_id = self._project_id url = Api._ENDPOINT + (Api._DATASETS_PATH % (project_id, '')) args = {} if max_results != 0: args['maxResults'] = max_results if page_token is not None: args['pageToken'] = page_token return datalab.utils.Http.request(url, args=args, credentials=self._credentials)
['def', 'datasets_list', '(', 'self', ',', 'project_id', '=', 'None', ',', 'max_results', '=', '0', ',', 'page_token', '=', 'None', ')', ':', 'if', 'project_id', 'is', 'None', ':', 'project_id', '=', 'self', '.', '_project_id', 'url', '=', 'Api', '.', '_ENDPOINT', '+', '(', 'Api', '.', '_DATASETS_PATH', '%', '(', 'project_id', ',', "''", ')', ')', 'args', '=', '{', '}', 'if', 'max_results', '!=', '0', ':', 'args', '[', "'maxResults'", ']', '=', 'max_results', 'if', 'page_token', 'is', 'not', 'None', ':', 'args', '[', "'pageToken'", ']', '=', 'page_token', 'return', 'datalab', '.', 'utils', '.', 'Http', '.', 'request', '(', 'url', ',', 'args', '=', 'args', ',', 'credentials', '=', 'self', '.', '_credentials', ')']
Issues a request to list the datasets in the project. Args: project_id: the project id to use to fetch the results; use None for the default project. max_results: an optional maximum number of tables to retrieve. page_token: an optional token to continue the retrieval. Returns: A parsed result object. Raises: Exception if there is an error performing the operation.
['Issues', 'a', 'request', 'to', 'list', 'the', 'datasets', 'in', 'the', 'project', '.']
train
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/bigquery/_api.py#L324-L346
3,383
viralogic/py-enumerable
py_linq/py_linq3.py
Enumerable3.intersect
def intersect(self, enumerable, key=lambda x: x): """ Returns enumerable that is the intersection between given enumerable and self :param enumerable: enumerable object :param key: key selector as lambda expression :return: new Enumerable object """ if not isinstance(enumerable, Enumerable3): raise TypeError( u"enumerable parameter must be an instance of Enumerable") return self.join(enumerable, key, key).select(lambda x: x[0])
python
def intersect(self, enumerable, key=lambda x: x): """ Returns enumerable that is the intersection between given enumerable and self :param enumerable: enumerable object :param key: key selector as lambda expression :return: new Enumerable object """ if not isinstance(enumerable, Enumerable3): raise TypeError( u"enumerable parameter must be an instance of Enumerable") return self.join(enumerable, key, key).select(lambda x: x[0])
['def', 'intersect', '(', 'self', ',', 'enumerable', ',', 'key', '=', 'lambda', 'x', ':', 'x', ')', ':', 'if', 'not', 'isinstance', '(', 'enumerable', ',', 'Enumerable3', ')', ':', 'raise', 'TypeError', '(', 'u"enumerable parameter must be an instance of Enumerable"', ')', 'return', 'self', '.', 'join', '(', 'enumerable', ',', 'key', ',', 'key', ')', '.', 'select', '(', 'lambda', 'x', ':', 'x', '[', '0', ']', ')']
Returns enumerable that is the intersection between given enumerable and self :param enumerable: enumerable object :param key: key selector as lambda expression :return: new Enumerable object
['Returns', 'enumerable', 'that', 'is', 'the', 'intersection', 'between', 'given', 'enumerable', 'and', 'self', ':', 'param', 'enumerable', ':', 'enumerable', 'object', ':', 'param', 'key', ':', 'key', 'selector', 'as', 'lambda', 'expression', ':', 'return', ':', 'new', 'Enumerable', 'object']
train
https://github.com/viralogic/py-enumerable/blob/63363649bccef223379e1e87056747240c83aa9d/py_linq/py_linq3.py#L434-L445
3,384
JukeboxPipeline/jukebox-core
src/jukeboxcore/launcher.py
Launcher.launch
def launch(self, args, unknown): """Launch something according to the provided arguments :param args: arguments from the launch parser :type args: Namespace :param unknown: list of unknown arguments :type unknown: list :returns: None :rtype: None :raises: SystemExit """ pm = plugins.PluginManager.get() addon = pm.get_plugin(args.addon) isgui = isinstance(addon, plugins.JB_StandaloneGuiPlugin) if isgui: gui.main.init_gui() print "Launching %s..." % args.addon addon.run() if isgui: app = gui.main.get_qapp() sys.exit(app.exec_())
python
def launch(self, args, unknown): """Launch something according to the provided arguments :param args: arguments from the launch parser :type args: Namespace :param unknown: list of unknown arguments :type unknown: list :returns: None :rtype: None :raises: SystemExit """ pm = plugins.PluginManager.get() addon = pm.get_plugin(args.addon) isgui = isinstance(addon, plugins.JB_StandaloneGuiPlugin) if isgui: gui.main.init_gui() print "Launching %s..." % args.addon addon.run() if isgui: app = gui.main.get_qapp() sys.exit(app.exec_())
['def', 'launch', '(', 'self', ',', 'args', ',', 'unknown', ')', ':', 'pm', '=', 'plugins', '.', 'PluginManager', '.', 'get', '(', ')', 'addon', '=', 'pm', '.', 'get_plugin', '(', 'args', '.', 'addon', ')', 'isgui', '=', 'isinstance', '(', 'addon', ',', 'plugins', '.', 'JB_StandaloneGuiPlugin', ')', 'if', 'isgui', ':', 'gui', '.', 'main', '.', 'init_gui', '(', ')', 'print', '"Launching %s..."', '%', 'args', '.', 'addon', 'addon', '.', 'run', '(', ')', 'if', 'isgui', ':', 'app', '=', 'gui', '.', 'main', '.', 'get_qapp', '(', ')', 'sys', '.', 'exit', '(', 'app', '.', 'exec_', '(', ')', ')']
Launch something according to the provided arguments :param args: arguments from the launch parser :type args: Namespace :param unknown: list of unknown arguments :type unknown: list :returns: None :rtype: None :raises: SystemExit
['Launch', 'something', 'according', 'to', 'the', 'provided', 'arguments']
train
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/launcher.py#L83-L103
3,385
knipknap/exscript
Exscript/util/start.py
start
def start(users, hosts, func, only_authenticate=False, **kwargs): """ Like run(), but automatically logs into the host before passing the host to the callback function. :type users: Account|list[Account] :param users: The account(s) to use for logging in. :type hosts: Host|list[Host] :param hosts: A list of Host objects. :type func: function :param func: The callback function. :type only_authenticate: bool :param only_authenticate: don't authorize, just authenticate? :type kwargs: dict :param kwargs: Passed to the Exscript.Queue constructor. """ if only_authenticate: run(users, hosts, autoauthenticate()(func), **kwargs) else: run(users, hosts, autologin()(func), **kwargs)
python
def start(users, hosts, func, only_authenticate=False, **kwargs): """ Like run(), but automatically logs into the host before passing the host to the callback function. :type users: Account|list[Account] :param users: The account(s) to use for logging in. :type hosts: Host|list[Host] :param hosts: A list of Host objects. :type func: function :param func: The callback function. :type only_authenticate: bool :param only_authenticate: don't authorize, just authenticate? :type kwargs: dict :param kwargs: Passed to the Exscript.Queue constructor. """ if only_authenticate: run(users, hosts, autoauthenticate()(func), **kwargs) else: run(users, hosts, autologin()(func), **kwargs)
['def', 'start', '(', 'users', ',', 'hosts', ',', 'func', ',', 'only_authenticate', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', 'if', 'only_authenticate', ':', 'run', '(', 'users', ',', 'hosts', ',', 'autoauthenticate', '(', ')', '(', 'func', ')', ',', '*', '*', 'kwargs', ')', 'else', ':', 'run', '(', 'users', ',', 'hosts', ',', 'autologin', '(', ')', '(', 'func', ')', ',', '*', '*', 'kwargs', ')']
Like run(), but automatically logs into the host before passing the host to the callback function. :type users: Account|list[Account] :param users: The account(s) to use for logging in. :type hosts: Host|list[Host] :param hosts: A list of Host objects. :type func: function :param func: The callback function. :type only_authenticate: bool :param only_authenticate: don't authorize, just authenticate? :type kwargs: dict :param kwargs: Passed to the Exscript.Queue constructor.
['Like', 'run', '()', 'but', 'automatically', 'logs', 'into', 'the', 'host', 'before', 'passing', 'the', 'host', 'to', 'the', 'callback', 'function', '.']
train
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/util/start.py#L82-L101
3,386
sony/nnabla
python/src/nnabla/utils/image_utils/__init__.py
minmax_auto_scale
def minmax_auto_scale(img, as_uint16): """ Utility function for rescaling all pixel values of input image to fit the range of uint8. Rescaling method is min-max, which is all pixel values are normalized to [0, 1] by using img.min() and img.max() and then are scaled up by 255 times. If the argument `as_uint16` is True, output image dtype is np.uint16 and the range of pixel values is [0, 65535] (scaled up by 65535 after normalized to [0, 1]). :param img (numpy.ndarray): input image. :param as_uint16: If True, output image dtype is uint16. :return: numpy.ndarray """ if as_uint16: output_high = 65535 output_type = np.uint16 else: output_high = 255 output_type = np.uint8 return rescale_pixel_intensity(img, input_low=img.min(), input_high=img.max(), output_low=0, output_high=output_high, output_type=output_type)
python
def minmax_auto_scale(img, as_uint16): """ Utility function for rescaling all pixel values of input image to fit the range of uint8. Rescaling method is min-max, which is all pixel values are normalized to [0, 1] by using img.min() and img.max() and then are scaled up by 255 times. If the argument `as_uint16` is True, output image dtype is np.uint16 and the range of pixel values is [0, 65535] (scaled up by 65535 after normalized to [0, 1]). :param img (numpy.ndarray): input image. :param as_uint16: If True, output image dtype is uint16. :return: numpy.ndarray """ if as_uint16: output_high = 65535 output_type = np.uint16 else: output_high = 255 output_type = np.uint8 return rescale_pixel_intensity(img, input_low=img.min(), input_high=img.max(), output_low=0, output_high=output_high, output_type=output_type)
['def', 'minmax_auto_scale', '(', 'img', ',', 'as_uint16', ')', ':', 'if', 'as_uint16', ':', 'output_high', '=', '65535', 'output_type', '=', 'np', '.', 'uint16', 'else', ':', 'output_high', '=', '255', 'output_type', '=', 'np', '.', 'uint8', 'return', 'rescale_pixel_intensity', '(', 'img', ',', 'input_low', '=', 'img', '.', 'min', '(', ')', ',', 'input_high', '=', 'img', '.', 'max', '(', ')', ',', 'output_low', '=', '0', ',', 'output_high', '=', 'output_high', ',', 'output_type', '=', 'output_type', ')']
Utility function for rescaling all pixel values of input image to fit the range of uint8. Rescaling method is min-max, which is all pixel values are normalized to [0, 1] by using img.min() and img.max() and then are scaled up by 255 times. If the argument `as_uint16` is True, output image dtype is np.uint16 and the range of pixel values is [0, 65535] (scaled up by 65535 after normalized to [0, 1]). :param img (numpy.ndarray): input image. :param as_uint16: If True, output image dtype is uint16. :return: numpy.ndarray
['Utility', 'function', 'for', 'rescaling', 'all', 'pixel', 'values', 'of', 'input', 'image', 'to', 'fit', 'the', 'range', 'of', 'uint8', '.', 'Rescaling', 'method', 'is', 'min', '-', 'max', 'which', 'is', 'all', 'pixel', 'values', 'are', 'normalized', 'to', '[', '0', '1', ']', 'by', 'using', 'img', '.', 'min', '()', 'and', 'img', '.', 'max', '()', 'and', 'then', 'are', 'scaled', 'up', 'by', '255', 'times', '.', 'If', 'the', 'argument', 'as_uint16', 'is', 'True', 'output', 'image', 'dtype', 'is', 'np', '.', 'uint16', 'and', 'the', 'range', 'of', 'pixel', 'values', 'is', '[', '0', '65535', ']', '(', 'scaled', 'up', 'by', '65535', 'after', 'normalized', 'to', '[', '0', '1', ']', ')', '.']
train
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/utils/image_utils/__init__.py#L57-L77
3,387
qba73/circleclient
circleclient/circleclient.py
Build.recent_all_projects
def recent_all_projects(self, limit=30, offset=0): """Return information about recent builds across all projects. Args: limit (int), Number of builds to return, max=100, defaults=30. offset (int): Builds returned from this point, default=0. Returns: A list of dictionaries. """ method = 'GET' url = ('/recent-builds?circle-token={token}&limit={limit}&' 'offset={offset}'.format(token=self.client.api_token, limit=limit, offset=offset)) json_data = self.client.request(method, url) return json_data
python
def recent_all_projects(self, limit=30, offset=0): """Return information about recent builds across all projects. Args: limit (int), Number of builds to return, max=100, defaults=30. offset (int): Builds returned from this point, default=0. Returns: A list of dictionaries. """ method = 'GET' url = ('/recent-builds?circle-token={token}&limit={limit}&' 'offset={offset}'.format(token=self.client.api_token, limit=limit, offset=offset)) json_data = self.client.request(method, url) return json_data
['def', 'recent_all_projects', '(', 'self', ',', 'limit', '=', '30', ',', 'offset', '=', '0', ')', ':', 'method', '=', "'GET'", 'url', '=', '(', "'/recent-builds?circle-token={token}&limit={limit}&'", "'offset={offset}'", '.', 'format', '(', 'token', '=', 'self', '.', 'client', '.', 'api_token', ',', 'limit', '=', 'limit', ',', 'offset', '=', 'offset', ')', ')', 'json_data', '=', 'self', '.', 'client', '.', 'request', '(', 'method', ',', 'url', ')', 'return', 'json_data']
Return information about recent builds across all projects. Args: limit (int), Number of builds to return, max=100, defaults=30. offset (int): Builds returned from this point, default=0. Returns: A list of dictionaries.
['Return', 'information', 'about', 'recent', 'builds', 'across', 'all', 'projects', '.']
train
https://github.com/qba73/circleclient/blob/8bf5b093e416c899cc39e43a770c17a5466487b0/circleclient/circleclient.py#L173-L189
3,388
bokeh/bokeh
bokeh/util/serialization.py
convert_datetime_array
def convert_datetime_array(array): ''' Convert NumPy datetime arrays to arrays to milliseconds since epoch. Args: array : (obj) A NumPy array of datetime to convert If the value passed in is not a NumPy array, it will be returned as-is. Returns: array ''' if not isinstance(array, np.ndarray): return array try: dt2001 = np.datetime64('2001') legacy_datetime64 = (dt2001.astype('int64') == dt2001.astype('datetime64[ms]').astype('int64')) except AttributeError as e: if e.args == ("'module' object has no attribute 'datetime64'",): # for compatibility with PyPy that doesn't have datetime64 if 'PyPy' in sys.version: legacy_datetime64 = False pass else: raise e else: raise e # not quite correct, truncates to ms.. if array.dtype.kind == 'M': if legacy_datetime64: if array.dtype == np.dtype('datetime64[ns]'): array = array.astype('int64') / 10**6.0 else: array = array.astype('datetime64[us]').astype('int64') / 1000. elif array.dtype.kind == 'm': array = array.astype('timedelta64[us]').astype('int64') / 1000. return array
python
def convert_datetime_array(array): ''' Convert NumPy datetime arrays to arrays to milliseconds since epoch. Args: array : (obj) A NumPy array of datetime to convert If the value passed in is not a NumPy array, it will be returned as-is. Returns: array ''' if not isinstance(array, np.ndarray): return array try: dt2001 = np.datetime64('2001') legacy_datetime64 = (dt2001.astype('int64') == dt2001.astype('datetime64[ms]').astype('int64')) except AttributeError as e: if e.args == ("'module' object has no attribute 'datetime64'",): # for compatibility with PyPy that doesn't have datetime64 if 'PyPy' in sys.version: legacy_datetime64 = False pass else: raise e else: raise e # not quite correct, truncates to ms.. if array.dtype.kind == 'M': if legacy_datetime64: if array.dtype == np.dtype('datetime64[ns]'): array = array.astype('int64') / 10**6.0 else: array = array.astype('datetime64[us]').astype('int64') / 1000. elif array.dtype.kind == 'm': array = array.astype('timedelta64[us]').astype('int64') / 1000. return array
['def', 'convert_datetime_array', '(', 'array', ')', ':', 'if', 'not', 'isinstance', '(', 'array', ',', 'np', '.', 'ndarray', ')', ':', 'return', 'array', 'try', ':', 'dt2001', '=', 'np', '.', 'datetime64', '(', "'2001'", ')', 'legacy_datetime64', '=', '(', 'dt2001', '.', 'astype', '(', "'int64'", ')', '==', 'dt2001', '.', 'astype', '(', "'datetime64[ms]'", ')', '.', 'astype', '(', "'int64'", ')', ')', 'except', 'AttributeError', 'as', 'e', ':', 'if', 'e', '.', 'args', '==', '(', '"\'module\' object has no attribute \'datetime64\'"', ',', ')', ':', "# for compatibility with PyPy that doesn't have datetime64", 'if', "'PyPy'", 'in', 'sys', '.', 'version', ':', 'legacy_datetime64', '=', 'False', 'pass', 'else', ':', 'raise', 'e', 'else', ':', 'raise', 'e', '# not quite correct, truncates to ms..', 'if', 'array', '.', 'dtype', '.', 'kind', '==', "'M'", ':', 'if', 'legacy_datetime64', ':', 'if', 'array', '.', 'dtype', '==', 'np', '.', 'dtype', '(', "'datetime64[ns]'", ')', ':', 'array', '=', 'array', '.', 'astype', '(', "'int64'", ')', '/', '10', '**', '6.0', 'else', ':', 'array', '=', 'array', '.', 'astype', '(', "'datetime64[us]'", ')', '.', 'astype', '(', "'int64'", ')', '/', '1000.', 'elif', 'array', '.', 'dtype', '.', 'kind', '==', "'m'", ':', 'array', '=', 'array', '.', 'astype', '(', "'timedelta64[us]'", ')', '.', 'astype', '(', "'int64'", ')', '/', '1000.', 'return', 'array']
Convert NumPy datetime arrays to arrays to milliseconds since epoch. Args: array : (obj) A NumPy array of datetime to convert If the value passed in is not a NumPy array, it will be returned as-is. Returns: array
['Convert', 'NumPy', 'datetime', 'arrays', 'to', 'arrays', 'to', 'milliseconds', 'since', 'epoch', '.']
train
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/util/serialization.py#L195-L238
3,389
golemhq/webdriver-manager
webdriver_manager/helpers.py
download_file_with_progress_bar
def download_file_with_progress_bar(url): """Downloads a file from the given url, displays a progress bar. Returns a io.BytesIO object """ request = requests.get(url, stream=True) if request.status_code == 404: msg = ('there was a 404 error trying to reach {} \nThis probably ' 'means the requested version does not exist.'.format(url)) logger.error(msg) sys.exit() total_size = int(request.headers["Content-Length"]) chunk_size = 1024 bars = int(total_size / chunk_size) bytes_io = io.BytesIO() pbar = tqdm(request.iter_content(chunk_size=chunk_size), total=bars, unit="kb", leave=False) for chunk in pbar: bytes_io.write(chunk) return bytes_io
python
def download_file_with_progress_bar(url): """Downloads a file from the given url, displays a progress bar. Returns a io.BytesIO object """ request = requests.get(url, stream=True) if request.status_code == 404: msg = ('there was a 404 error trying to reach {} \nThis probably ' 'means the requested version does not exist.'.format(url)) logger.error(msg) sys.exit() total_size = int(request.headers["Content-Length"]) chunk_size = 1024 bars = int(total_size / chunk_size) bytes_io = io.BytesIO() pbar = tqdm(request.iter_content(chunk_size=chunk_size), total=bars, unit="kb", leave=False) for chunk in pbar: bytes_io.write(chunk) return bytes_io
['def', 'download_file_with_progress_bar', '(', 'url', ')', ':', 'request', '=', 'requests', '.', 'get', '(', 'url', ',', 'stream', '=', 'True', ')', 'if', 'request', '.', 'status_code', '==', '404', ':', 'msg', '=', '(', "'there was a 404 error trying to reach {} \\nThis probably '", "'means the requested version does not exist.'", '.', 'format', '(', 'url', ')', ')', 'logger', '.', 'error', '(', 'msg', ')', 'sys', '.', 'exit', '(', ')', 'total_size', '=', 'int', '(', 'request', '.', 'headers', '[', '"Content-Length"', ']', ')', 'chunk_size', '=', '1024', 'bars', '=', 'int', '(', 'total_size', '/', 'chunk_size', ')', 'bytes_io', '=', 'io', '.', 'BytesIO', '(', ')', 'pbar', '=', 'tqdm', '(', 'request', '.', 'iter_content', '(', 'chunk_size', '=', 'chunk_size', ')', ',', 'total', '=', 'bars', ',', 'unit', '=', '"kb"', ',', 'leave', '=', 'False', ')', 'for', 'chunk', 'in', 'pbar', ':', 'bytes_io', '.', 'write', '(', 'chunk', ')', 'return', 'bytes_io']
Downloads a file from the given url, displays a progress bar. Returns a io.BytesIO object
['Downloads', 'a', 'file', 'from', 'the', 'given', 'url', 'displays', 'a', 'progress', 'bar', '.', 'Returns', 'a', 'io', '.', 'BytesIO', 'object']
train
https://github.com/golemhq/webdriver-manager/blob/5c923deec5cb14f503ba7c20b67bc296e411de19/webdriver_manager/helpers.py#L106-L125
3,390
cqparts/cqparts
src/cqparts/params/utils.py
as_parameter
def as_parameter(nullable=True, strict=True): """ Decorate a container class as a functional :class:`Parameter` class for a :class:`ParametricObject`. :param nullable: if set, parameter's value may be Null :type nullable: :class:`bool` .. doctest:: >>> from cqparts.params import as_parameter, ParametricObject >>> @as_parameter(nullable=True) ... class Stuff(object): ... def __init__(self, a=1, b=2, c=3): ... self.a = a ... self.b = b ... self.c = c ... @property ... def abc(self): ... return (self.a, self.b, self.c) >>> class Thing(ParametricObject): ... foo = Stuff({'a': 10, 'b': 100}, doc="controls stuff") >>> thing = Thing(foo={'a': 20}) >>> thing.foo.a 20 >>> thing.foo.abc (20, 2, 3) """ def decorator(cls): base_class = Parameter if nullable else NonNullParameter return type(cls.__name__, (base_class,), { # Preserve text for documentation '__name__': cls.__name__, '__doc__': cls.__doc__, '__module__': cls.__module__, # Sphinx doc type string '_doc_type': ":class:`{class_name} <{module}.{class_name}>`".format( class_name=cls.__name__, module=__name__ ), # 'type': lambda self, value: cls(**value) }) return decorator
python
def as_parameter(nullable=True, strict=True): """ Decorate a container class as a functional :class:`Parameter` class for a :class:`ParametricObject`. :param nullable: if set, parameter's value may be Null :type nullable: :class:`bool` .. doctest:: >>> from cqparts.params import as_parameter, ParametricObject >>> @as_parameter(nullable=True) ... class Stuff(object): ... def __init__(self, a=1, b=2, c=3): ... self.a = a ... self.b = b ... self.c = c ... @property ... def abc(self): ... return (self.a, self.b, self.c) >>> class Thing(ParametricObject): ... foo = Stuff({'a': 10, 'b': 100}, doc="controls stuff") >>> thing = Thing(foo={'a': 20}) >>> thing.foo.a 20 >>> thing.foo.abc (20, 2, 3) """ def decorator(cls): base_class = Parameter if nullable else NonNullParameter return type(cls.__name__, (base_class,), { # Preserve text for documentation '__name__': cls.__name__, '__doc__': cls.__doc__, '__module__': cls.__module__, # Sphinx doc type string '_doc_type': ":class:`{class_name} <{module}.{class_name}>`".format( class_name=cls.__name__, module=__name__ ), # 'type': lambda self, value: cls(**value) }) return decorator
['def', 'as_parameter', '(', 'nullable', '=', 'True', ',', 'strict', '=', 'True', ')', ':', 'def', 'decorator', '(', 'cls', ')', ':', 'base_class', '=', 'Parameter', 'if', 'nullable', 'else', 'NonNullParameter', 'return', 'type', '(', 'cls', '.', '__name__', ',', '(', 'base_class', ',', ')', ',', '{', '# Preserve text for documentation', "'__name__'", ':', 'cls', '.', '__name__', ',', "'__doc__'", ':', 'cls', '.', '__doc__', ',', "'__module__'", ':', 'cls', '.', '__module__', ',', '# Sphinx doc type string', "'_doc_type'", ':', '":class:`{class_name} <{module}.{class_name}>`"', '.', 'format', '(', 'class_name', '=', 'cls', '.', '__name__', ',', 'module', '=', '__name__', ')', ',', '#', "'type'", ':', 'lambda', 'self', ',', 'value', ':', 'cls', '(', '*', '*', 'value', ')', '}', ')', 'return', 'decorator']
Decorate a container class as a functional :class:`Parameter` class for a :class:`ParametricObject`. :param nullable: if set, parameter's value may be Null :type nullable: :class:`bool` .. doctest:: >>> from cqparts.params import as_parameter, ParametricObject >>> @as_parameter(nullable=True) ... class Stuff(object): ... def __init__(self, a=1, b=2, c=3): ... self.a = a ... self.b = b ... self.c = c ... @property ... def abc(self): ... return (self.a, self.b, self.c) >>> class Thing(ParametricObject): ... foo = Stuff({'a': 10, 'b': 100}, doc="controls stuff") >>> thing = Thing(foo={'a': 20}) >>> thing.foo.a 20 >>> thing.foo.abc (20, 2, 3)
['Decorate', 'a', 'container', 'class', 'as', 'a', 'functional', ':', 'class', ':', 'Parameter', 'class', 'for', 'a', ':', 'class', ':', 'ParametricObject', '.']
train
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts/params/utils.py#L6-L54
3,391
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
ekacld
def ekacld(handle, segno, column, dvals, entszs, nlflgs, rcptrs, wkindx): """ Add an entire double precision column to an EK segment. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekacld_c.html :param handle: EK file handle. :type handle: int :param segno: Number of segment to add column to. :type segno: int :param column: Column name. :type column: str :param dvals: Double precision values to add to column. :type dvals: Array of floats :param entszs: Array of sizes of column entries. :type entszs: Array of ints :param nlflgs: Array of null flags for column entries. :type nlflgs: Array of bools :param rcptrs: Record pointers for segment. :type rcptrs: Array of ints :param wkindx: Work space for column index. :type wkindx: Array of ints :return: Work space for column index. :rtype: Array of ints """ handle = ctypes.c_int(handle) segno = ctypes.c_int(segno) column = stypes.stringToCharP(column) dvals = stypes.toDoubleVector(dvals) entszs = stypes.toIntVector(entszs) nlflgs = stypes.toIntVector(nlflgs) rcptrs = stypes.toIntVector(rcptrs) wkindx = stypes.toIntVector(wkindx) libspice.ekacld_c(handle, segno, column, dvals, entszs, nlflgs, rcptrs, wkindx) return stypes.cVectorToPython(wkindx)
python
def ekacld(handle, segno, column, dvals, entszs, nlflgs, rcptrs, wkindx): """ Add an entire double precision column to an EK segment. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekacld_c.html :param handle: EK file handle. :type handle: int :param segno: Number of segment to add column to. :type segno: int :param column: Column name. :type column: str :param dvals: Double precision values to add to column. :type dvals: Array of floats :param entszs: Array of sizes of column entries. :type entszs: Array of ints :param nlflgs: Array of null flags for column entries. :type nlflgs: Array of bools :param rcptrs: Record pointers for segment. :type rcptrs: Array of ints :param wkindx: Work space for column index. :type wkindx: Array of ints :return: Work space for column index. :rtype: Array of ints """ handle = ctypes.c_int(handle) segno = ctypes.c_int(segno) column = stypes.stringToCharP(column) dvals = stypes.toDoubleVector(dvals) entszs = stypes.toIntVector(entszs) nlflgs = stypes.toIntVector(nlflgs) rcptrs = stypes.toIntVector(rcptrs) wkindx = stypes.toIntVector(wkindx) libspice.ekacld_c(handle, segno, column, dvals, entszs, nlflgs, rcptrs, wkindx) return stypes.cVectorToPython(wkindx)
['def', 'ekacld', '(', 'handle', ',', 'segno', ',', 'column', ',', 'dvals', ',', 'entszs', ',', 'nlflgs', ',', 'rcptrs', ',', 'wkindx', ')', ':', 'handle', '=', 'ctypes', '.', 'c_int', '(', 'handle', ')', 'segno', '=', 'ctypes', '.', 'c_int', '(', 'segno', ')', 'column', '=', 'stypes', '.', 'stringToCharP', '(', 'column', ')', 'dvals', '=', 'stypes', '.', 'toDoubleVector', '(', 'dvals', ')', 'entszs', '=', 'stypes', '.', 'toIntVector', '(', 'entszs', ')', 'nlflgs', '=', 'stypes', '.', 'toIntVector', '(', 'nlflgs', ')', 'rcptrs', '=', 'stypes', '.', 'toIntVector', '(', 'rcptrs', ')', 'wkindx', '=', 'stypes', '.', 'toIntVector', '(', 'wkindx', ')', 'libspice', '.', 'ekacld_c', '(', 'handle', ',', 'segno', ',', 'column', ',', 'dvals', ',', 'entszs', ',', 'nlflgs', ',', 'rcptrs', ',', 'wkindx', ')', 'return', 'stypes', '.', 'cVectorToPython', '(', 'wkindx', ')']
Add an entire double precision column to an EK segment. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekacld_c.html :param handle: EK file handle. :type handle: int :param segno: Number of segment to add column to. :type segno: int :param column: Column name. :type column: str :param dvals: Double precision values to add to column. :type dvals: Array of floats :param entszs: Array of sizes of column entries. :type entszs: Array of ints :param nlflgs: Array of null flags for column entries. :type nlflgs: Array of bools :param rcptrs: Record pointers for segment. :type rcptrs: Array of ints :param wkindx: Work space for column index. :type wkindx: Array of ints :return: Work space for column index. :rtype: Array of ints
['Add', 'an', 'entire', 'double', 'precision', 'column', 'to', 'an', 'EK', 'segment', '.']
train
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L3833-L3868
3,392
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_notification_stream.py
brocade_notification_stream.BGPNeighborPrefixExceeded_originator_switch_info_switchIpV6Address
def BGPNeighborPrefixExceeded_originator_switch_info_switchIpV6Address(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") BGPNeighborPrefixExceeded = ET.SubElement(config, "BGPNeighborPrefixExceeded", xmlns="http://brocade.com/ns/brocade-notification-stream") originator_switch_info = ET.SubElement(BGPNeighborPrefixExceeded, "originator-switch-info") switchIpV6Address = ET.SubElement(originator_switch_info, "switchIpV6Address") switchIpV6Address.text = kwargs.pop('switchIpV6Address') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def BGPNeighborPrefixExceeded_originator_switch_info_switchIpV6Address(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") BGPNeighborPrefixExceeded = ET.SubElement(config, "BGPNeighborPrefixExceeded", xmlns="http://brocade.com/ns/brocade-notification-stream") originator_switch_info = ET.SubElement(BGPNeighborPrefixExceeded, "originator-switch-info") switchIpV6Address = ET.SubElement(originator_switch_info, "switchIpV6Address") switchIpV6Address.text = kwargs.pop('switchIpV6Address') callback = kwargs.pop('callback', self._callback) return callback(config)
['def', 'BGPNeighborPrefixExceeded_originator_switch_info_switchIpV6Address', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'BGPNeighborPrefixExceeded', '=', 'ET', '.', 'SubElement', '(', 'config', ',', '"BGPNeighborPrefixExceeded"', ',', 'xmlns', '=', '"http://brocade.com/ns/brocade-notification-stream"', ')', 'originator_switch_info', '=', 'ET', '.', 'SubElement', '(', 'BGPNeighborPrefixExceeded', ',', '"originator-switch-info"', ')', 'switchIpV6Address', '=', 'ET', '.', 'SubElement', '(', 'originator_switch_info', ',', '"switchIpV6Address"', ')', 'switchIpV6Address', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'switchIpV6Address'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']
Auto Generated Code
['Auto', 'Generated', 'Code']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_notification_stream.py#L118-L128
3,393
miniconfig/python-openevse-wifi
openevsewifi/__init__.py
Charger.getAutoServiceLevelEnabled
def getAutoServiceLevelEnabled(self): """Returns True if enabled, False if disabled""" command = '$GE' settings = self.sendCommand(command) flags = int(settings[2], 16) return not (flags & 0x0020)
python
def getAutoServiceLevelEnabled(self): """Returns True if enabled, False if disabled""" command = '$GE' settings = self.sendCommand(command) flags = int(settings[2], 16) return not (flags & 0x0020)
['def', 'getAutoServiceLevelEnabled', '(', 'self', ')', ':', 'command', '=', "'$GE'", 'settings', '=', 'self', '.', 'sendCommand', '(', 'command', ')', 'flags', '=', 'int', '(', 'settings', '[', '2', ']', ',', '16', ')', 'return', 'not', '(', 'flags', '&', '0x0020', ')']
Returns True if enabled, False if disabled
['Returns', 'True', 'if', 'enabled', 'False', 'if', 'disabled']
train
https://github.com/miniconfig/python-openevse-wifi/blob/42fabeae052a9f82092fa9220201413732e38bb4/openevsewifi/__init__.py#L127-L132
3,394
dhermes/bezier
scripts/check_doc_templates.py
get_diff
def get_diff(value1, value2, name1, name2): """Get a diff between two strings. Args: value1 (str): First string to be compared. value2 (str): Second string to be compared. name1 (str): Name of the first string. name2 (str): Name of the second string. Returns: str: The full diff. """ lines1 = [line + "\n" for line in value1.splitlines()] lines2 = [line + "\n" for line in value2.splitlines()] diff_lines = difflib.context_diff( lines1, lines2, fromfile=name1, tofile=name2 ) return "".join(diff_lines)
python
def get_diff(value1, value2, name1, name2): """Get a diff between two strings. Args: value1 (str): First string to be compared. value2 (str): Second string to be compared. name1 (str): Name of the first string. name2 (str): Name of the second string. Returns: str: The full diff. """ lines1 = [line + "\n" for line in value1.splitlines()] lines2 = [line + "\n" for line in value2.splitlines()] diff_lines = difflib.context_diff( lines1, lines2, fromfile=name1, tofile=name2 ) return "".join(diff_lines)
['def', 'get_diff', '(', 'value1', ',', 'value2', ',', 'name1', ',', 'name2', ')', ':', 'lines1', '=', '[', 'line', '+', '"\\n"', 'for', 'line', 'in', 'value1', '.', 'splitlines', '(', ')', ']', 'lines2', '=', '[', 'line', '+', '"\\n"', 'for', 'line', 'in', 'value2', '.', 'splitlines', '(', ')', ']', 'diff_lines', '=', 'difflib', '.', 'context_diff', '(', 'lines1', ',', 'lines2', ',', 'fromfile', '=', 'name1', ',', 'tofile', '=', 'name2', ')', 'return', '""', '.', 'join', '(', 'diff_lines', ')']
Get a diff between two strings. Args: value1 (str): First string to be compared. value2 (str): Second string to be compared. name1 (str): Name of the first string. name2 (str): Name of the second string. Returns: str: The full diff.
['Get', 'a', 'diff', 'between', 'two', 'strings', '.']
train
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/scripts/check_doc_templates.py#L245-L262
3,395
mitsei/dlkit
dlkit/services/repository.py
Repository.use_sequestered_composition_view
def use_sequestered_composition_view(self): """Pass through to provider CompositionLookupSession.use_sequestered_composition_view""" self._containable_views['composition'] = SEQUESTERED # self._get_provider_session('composition_lookup_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_sequestered_composition_view() except AttributeError: pass
python
def use_sequestered_composition_view(self): """Pass through to provider CompositionLookupSession.use_sequestered_composition_view""" self._containable_views['composition'] = SEQUESTERED # self._get_provider_session('composition_lookup_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_sequestered_composition_view() except AttributeError: pass
['def', 'use_sequestered_composition_view', '(', 'self', ')', ':', 'self', '.', '_containable_views', '[', "'composition'", ']', '=', 'SEQUESTERED', "# self._get_provider_session('composition_lookup_session') # To make sure the session is tracked", 'for', 'session', 'in', 'self', '.', '_get_provider_sessions', '(', ')', ':', 'try', ':', 'session', '.', 'use_sequestered_composition_view', '(', ')', 'except', 'AttributeError', ':', 'pass']
Pass through to provider CompositionLookupSession.use_sequestered_composition_view
['Pass', 'through', 'to', 'provider', 'CompositionLookupSession', '.', 'use_sequestered_composition_view']
train
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/repository.py#L1902-L1910
3,396
adamrehn/ue4cli
ue4cli/JsonDataManager.py
JsonDataManager.setDictionary
def setDictionary(self, data): """ Overwrites the entire dictionary """ # Create the directory containing the JSON file if it doesn't already exist jsonDir = os.path.dirname(self.jsonFile) if os.path.exists(jsonDir) == False: os.makedirs(jsonDir) # Store the dictionary Utility.writeFile(self.jsonFile, json.dumps(data))
python
def setDictionary(self, data): """ Overwrites the entire dictionary """ # Create the directory containing the JSON file if it doesn't already exist jsonDir = os.path.dirname(self.jsonFile) if os.path.exists(jsonDir) == False: os.makedirs(jsonDir) # Store the dictionary Utility.writeFile(self.jsonFile, json.dumps(data))
['def', 'setDictionary', '(', 'self', ',', 'data', ')', ':', "# Create the directory containing the JSON file if it doesn't already exist", 'jsonDir', '=', 'os', '.', 'path', '.', 'dirname', '(', 'self', '.', 'jsonFile', ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'jsonDir', ')', '==', 'False', ':', 'os', '.', 'makedirs', '(', 'jsonDir', ')', '# Store the dictionary', 'Utility', '.', 'writeFile', '(', 'self', '.', 'jsonFile', ',', 'json', '.', 'dumps', '(', 'data', ')', ')']
Overwrites the entire dictionary
['Overwrites', 'the', 'entire', 'dictionary']
train
https://github.com/adamrehn/ue4cli/blob/f1c34502c96059e36757b7433da7e98760a75a6f/ue4cli/JsonDataManager.py#L42-L53
3,397
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/thread.py
_ThreadContainer.scan_threads
def scan_threads(self): """ Populates the snapshot with running threads. """ # Ignore special process IDs. # PID 0: System Idle Process. Also has a special meaning to the # toolhelp APIs (current process). # PID 4: System Integrity Group. See this forum post for more info: # http://tinyurl.com/ycza8jo # (points to social.technet.microsoft.com) # Only on XP and above # PID 8: System (?) only in Windows 2000 and below AFAIK. # It's probably the same as PID 4 in XP and above. dwProcessId = self.get_pid() if dwProcessId in (0, 4, 8): return ## dead_tids = set( self.get_thread_ids() ) # XXX triggers a scan dead_tids = self._get_thread_ids() dwProcessId = self.get_pid() hSnapshot = win32.CreateToolhelp32Snapshot(win32.TH32CS_SNAPTHREAD, dwProcessId) try: te = win32.Thread32First(hSnapshot) while te is not None: if te.th32OwnerProcessID == dwProcessId: dwThreadId = te.th32ThreadID if dwThreadId in dead_tids: dead_tids.remove(dwThreadId) ## if not self.has_thread(dwThreadId): # XXX triggers a scan if not self._has_thread_id(dwThreadId): aThread = Thread(dwThreadId, process = self) self._add_thread(aThread) te = win32.Thread32Next(hSnapshot) finally: win32.CloseHandle(hSnapshot) for tid in dead_tids: self._del_thread(tid)
python
def scan_threads(self): """ Populates the snapshot with running threads. """ # Ignore special process IDs. # PID 0: System Idle Process. Also has a special meaning to the # toolhelp APIs (current process). # PID 4: System Integrity Group. See this forum post for more info: # http://tinyurl.com/ycza8jo # (points to social.technet.microsoft.com) # Only on XP and above # PID 8: System (?) only in Windows 2000 and below AFAIK. # It's probably the same as PID 4 in XP and above. dwProcessId = self.get_pid() if dwProcessId in (0, 4, 8): return ## dead_tids = set( self.get_thread_ids() ) # XXX triggers a scan dead_tids = self._get_thread_ids() dwProcessId = self.get_pid() hSnapshot = win32.CreateToolhelp32Snapshot(win32.TH32CS_SNAPTHREAD, dwProcessId) try: te = win32.Thread32First(hSnapshot) while te is not None: if te.th32OwnerProcessID == dwProcessId: dwThreadId = te.th32ThreadID if dwThreadId in dead_tids: dead_tids.remove(dwThreadId) ## if not self.has_thread(dwThreadId): # XXX triggers a scan if not self._has_thread_id(dwThreadId): aThread = Thread(dwThreadId, process = self) self._add_thread(aThread) te = win32.Thread32Next(hSnapshot) finally: win32.CloseHandle(hSnapshot) for tid in dead_tids: self._del_thread(tid)
['def', 'scan_threads', '(', 'self', ')', ':', '# Ignore special process IDs.', '# PID 0: System Idle Process. Also has a special meaning to the', '# toolhelp APIs (current process).', '# PID 4: System Integrity Group. See this forum post for more info:', '# http://tinyurl.com/ycza8jo', '# (points to social.technet.microsoft.com)', '# Only on XP and above', '# PID 8: System (?) only in Windows 2000 and below AFAIK.', "# It's probably the same as PID 4 in XP and above.", 'dwProcessId', '=', 'self', '.', 'get_pid', '(', ')', 'if', 'dwProcessId', 'in', '(', '0', ',', '4', ',', '8', ')', ':', 'return', '## dead_tids = set( self.get_thread_ids() ) # XXX triggers a scan', 'dead_tids', '=', 'self', '.', '_get_thread_ids', '(', ')', 'dwProcessId', '=', 'self', '.', 'get_pid', '(', ')', 'hSnapshot', '=', 'win32', '.', 'CreateToolhelp32Snapshot', '(', 'win32', '.', 'TH32CS_SNAPTHREAD', ',', 'dwProcessId', ')', 'try', ':', 'te', '=', 'win32', '.', 'Thread32First', '(', 'hSnapshot', ')', 'while', 'te', 'is', 'not', 'None', ':', 'if', 'te', '.', 'th32OwnerProcessID', '==', 'dwProcessId', ':', 'dwThreadId', '=', 'te', '.', 'th32ThreadID', 'if', 'dwThreadId', 'in', 'dead_tids', ':', 'dead_tids', '.', 'remove', '(', 'dwThreadId', ')', '## if not self.has_thread(dwThreadId): # XXX triggers a scan', 'if', 'not', 'self', '.', '_has_thread_id', '(', 'dwThreadId', ')', ':', 'aThread', '=', 'Thread', '(', 'dwThreadId', ',', 'process', '=', 'self', ')', 'self', '.', '_add_thread', '(', 'aThread', ')', 'te', '=', 'win32', '.', 'Thread32Next', '(', 'hSnapshot', ')', 'finally', ':', 'win32', '.', 'CloseHandle', '(', 'hSnapshot', ')', 'for', 'tid', 'in', 'dead_tids', ':', 'self', '.', '_del_thread', '(', 'tid', ')']
Populates the snapshot with running threads.
['Populates', 'the', 'snapshot', 'with', 'running', 'threads', '.']
train
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/thread.py#L1927-L1965
3,398
happyleavesaoc/python-snapcast
snapcast/control/group.py
Snapgroup.add_client
def add_client(self, client_identifier): """Add a client.""" if client_identifier in self.clients: _LOGGER.error('%s already in group %s', client_identifier, self.identifier) return new_clients = self.clients new_clients.append(client_identifier) yield from self._server.group_clients(self.identifier, new_clients) _LOGGER.info('added %s to %s', client_identifier, self.identifier) self._server.client(client_identifier).callback() self.callback()
python
def add_client(self, client_identifier): """Add a client.""" if client_identifier in self.clients: _LOGGER.error('%s already in group %s', client_identifier, self.identifier) return new_clients = self.clients new_clients.append(client_identifier) yield from self._server.group_clients(self.identifier, new_clients) _LOGGER.info('added %s to %s', client_identifier, self.identifier) self._server.client(client_identifier).callback() self.callback()
['def', 'add_client', '(', 'self', ',', 'client_identifier', ')', ':', 'if', 'client_identifier', 'in', 'self', '.', 'clients', ':', '_LOGGER', '.', 'error', '(', "'%s already in group %s'", ',', 'client_identifier', ',', 'self', '.', 'identifier', ')', 'return', 'new_clients', '=', 'self', '.', 'clients', 'new_clients', '.', 'append', '(', 'client_identifier', ')', 'yield', 'from', 'self', '.', '_server', '.', 'group_clients', '(', 'self', '.', 'identifier', ',', 'new_clients', ')', '_LOGGER', '.', 'info', '(', "'added %s to %s'", ',', 'client_identifier', ',', 'self', '.', 'identifier', ')', 'self', '.', '_server', '.', 'client', '(', 'client_identifier', ')', '.', 'callback', '(', ')', 'self', '.', 'callback', '(', ')']
Add a client.
['Add', 'a', 'client', '.']
train
https://github.com/happyleavesaoc/python-snapcast/blob/9b3c483358677327c7fd6d0666bf474c19d87f19/snapcast/control/group.py#L96-L106
3,399
sampottinger/pycotracer
pycotracer/retrieval.py
get_report_raw
def get_report_raw(year, report_type): """Download and extract a CO-TRACER report. Generate a URL for the given report, download the corresponding archive, extract the CSV report, and interpret it using the standard CSV library. @param year: The year for which data should be downloaded. @type year: int @param report_type: The type of report that should be downloaded. Should be one of the strings in constants.REPORT_TYPES. @type report_type: str @return: A DictReader with the loaded data. Note that this data has not been interpreted so data fields like floating point values, dates, and boolean values are still strings. @rtype: csv.DictReader """ if not is_valid_report_type(report_type): msg = '%s is not a valid report type.' % report_type raise ValueError(msg) url = get_url(year, report_type) raw_contents = get_zipped_file(url) return csv.DictReader(cStringIO.StringIO(raw_contents))
python
def get_report_raw(year, report_type): """Download and extract a CO-TRACER report. Generate a URL for the given report, download the corresponding archive, extract the CSV report, and interpret it using the standard CSV library. @param year: The year for which data should be downloaded. @type year: int @param report_type: The type of report that should be downloaded. Should be one of the strings in constants.REPORT_TYPES. @type report_type: str @return: A DictReader with the loaded data. Note that this data has not been interpreted so data fields like floating point values, dates, and boolean values are still strings. @rtype: csv.DictReader """ if not is_valid_report_type(report_type): msg = '%s is not a valid report type.' % report_type raise ValueError(msg) url = get_url(year, report_type) raw_contents = get_zipped_file(url) return csv.DictReader(cStringIO.StringIO(raw_contents))
['def', 'get_report_raw', '(', 'year', ',', 'report_type', ')', ':', 'if', 'not', 'is_valid_report_type', '(', 'report_type', ')', ':', 'msg', '=', "'%s is not a valid report type.'", '%', 'report_type', 'raise', 'ValueError', '(', 'msg', ')', 'url', '=', 'get_url', '(', 'year', ',', 'report_type', ')', 'raw_contents', '=', 'get_zipped_file', '(', 'url', ')', 'return', 'csv', '.', 'DictReader', '(', 'cStringIO', '.', 'StringIO', '(', 'raw_contents', ')', ')']
Download and extract a CO-TRACER report. Generate a URL for the given report, download the corresponding archive, extract the CSV report, and interpret it using the standard CSV library. @param year: The year for which data should be downloaded. @type year: int @param report_type: The type of report that should be downloaded. Should be one of the strings in constants.REPORT_TYPES. @type report_type: str @return: A DictReader with the loaded data. Note that this data has not been interpreted so data fields like floating point values, dates, and boolean values are still strings. @rtype: csv.DictReader
['Download', 'and', 'extract', 'a', 'CO', '-', 'TRACER', 'report', '.']
train
https://github.com/sampottinger/pycotracer/blob/c66c3230949b7bee8c9fec5fc00ab392865a0c8b/pycotracer/retrieval.py#L90-L112