Code
stringlengths
103
85.9k
Summary
listlengths
0
94
Please provide a description of the function:def execute(self, query): c = self.conn.cursor() result = c.execute(query) for i in result: yield i
[ "\n Execute a query directly on the database.\n " ]
Please provide a description of the function:def _replace(self, feature, cursor): try: cursor.execute( constants._UPDATE, list(feature.astuple()) + [feature.id]) except sqlite3.ProgrammingError: cursor.execute( constants._INSERT, list(feature.astuple(self.default_encoding)) + [feature.id])
[ "\n Insert a feature into the database.\n " ]
Please provide a description of the function:def wait_for_js(function): @functools.wraps(function) def wrapper(*args, **kwargs): # pylint: disable=missing-docstring # If not a method, then just call the function if len(args) < 1: return function(*args, **kwargs) # Otherwise, retrieve `self` as the first arg else: self = args[0] # If the class has been decorated by one of the # JavaScript dependency decorators, it should have # a `wait_for_js` method if hasattr(self, 'wait_for_js'): self.wait_for_js() # Call the function return function(*args, **kwargs) return wrapper
[ "\n Method decorator that waits for JavaScript dependencies before executing `function`.\n If the function is not a method, the decorator has no effect.\n\n Args:\n function (callable): Method to decorate.\n\n Returns:\n Decorated method\n " ]
Please provide a description of the function:def _decorator(store_name, store_values): def decorator(clz): # pylint: disable=missing-docstring # Add a `wait_for_js` method to the class if not hasattr(clz, 'wait_for_js'): setattr(clz, 'wait_for_js', _wait_for_js) # pylint: disable= literal-used-as-attribute # Store the RequireJS module names in the class if not hasattr(clz, store_name): setattr(clz, store_name, set()) getattr(clz, store_name).update(store_values) return clz return decorator
[ "\n Return a class decorator that:\n\n 1) Defines a new class method, `wait_for_js`\n 2) Defines a new class list variable, `store_name` and adds\n `store_values` to the list.\n " ]
Please provide a description of the function:def _wait_for_js(self): # No Selenium browser available, so return without doing anything if not hasattr(self, 'browser'): return # pylint: disable=protected-access # Wait for JavaScript variables to be defined if hasattr(self, '_js_vars') and self._js_vars: EmptyPromise( lambda: _are_js_vars_defined(self.browser, self._js_vars), u"JavaScript variables defined: {0}".format(", ".join(self._js_vars)) ).fulfill() # Wait for RequireJS dependencies to load if hasattr(self, '_requirejs_deps') and self._requirejs_deps: EmptyPromise( lambda: _are_requirejs_deps_loaded(self.browser, self._requirejs_deps), u"RequireJS dependencies loaded: {0}".format(", ".join(self._requirejs_deps)), try_limit=5 ).fulfill()
[ "\n Class method added by the decorators to allow\n decorated classes to manually re-check JavaScript\n dependencies.\n\n Expect that `self` is a class that:\n 1) Has been decorated with either `js_defined` or `requirejs`\n 2) Has a `browser` property\n\n If either (1) or (2) is not satisfied, then do nothing.\n " ]
Please provide a description of the function:def _are_js_vars_defined(browser, js_vars): # This script will evaluate to True iff all of # the required vars are defined. script = u" && ".join([ u"!(typeof {0} === 'undefined')".format(var) for var in js_vars ]) try: return browser.execute_script(u"return {}".format(script)) except WebDriverException as exc: if "is not defined" in exc.msg or "is undefined" in exc.msg: return False else: raise
[ "\n Return a boolean indicating whether all the JavaScript\n variables `js_vars` are defined on the current page.\n\n `browser` is a Selenium webdriver instance.\n " ]
Please provide a description of the function:def _are_requirejs_deps_loaded(browser, deps): # This is a little complicated # # We're going to use `execute_async_script` to give control to # the browser. The browser indicates that it wants to return # control to us by calling `callback`, which is the last item # in the global `arguments` array. # # We install a RequireJS module with the dependencies we want # to ensure are loaded. When our module loads, we return # control to the test suite. script = dedent(u).format(deps=json.dumps(list(deps))) # Set a timeout to ensure we get control back browser.set_script_timeout(30) # Give control to the browser # `result` will be the argument passed to the callback function try: result = browser.execute_async_script(script) return result == 'Success' except TimeoutException: return False
[ "\n Return a boolean indicating whether all the RequireJS\n dependencies `deps` have loaded on the current page.\n\n `browser` is a WebDriver instance.\n ", "\n // Retrieve the callback function used to return control to the test suite\n var callback = arguments[arguments.length - 1];\n\n // If RequireJS isn't defined, then return immediately\n if (!window.require) {{\n callback(\"RequireJS not defined\");\n }}\n\n // Otherwise, install a RequireJS module that depends on the modules\n // we're waiting for.\n else {{\n\n // Catch errors reported by RequireJS\n requirejs.onError = callback;\n\n // Install our module\n require({deps}, function() {{\n callback('Success');\n }});\n }}\n " ]
Please provide a description of the function:def no_selenium_errors(func): def _inner(*args, **kwargs): # pylint: disable=missing-docstring try: return_val = func(*args, **kwargs) except WebDriverException: LOGGER.warning(u'Exception ignored during retry loop:', exc_info=True) return False else: return return_val return _inner
[ "\n Decorator to create an `EmptyPromise` check function that is satisfied\n only when `func` executes without a Selenium error.\n\n This protects against many common test failures due to timing issues.\n For example, accessing an element after it has been modified by JavaScript\n ordinarily results in a `StaleElementException`. Methods decorated\n with `no_selenium_errors` will simply retry if that happens, which makes tests\n more robust.\n\n Args:\n func (callable): The function to execute, with retries if an error occurs.\n\n Returns:\n Decorated function\n " ]
Please provide a description of the function:def pre_verify(method): @wraps(method) def wrapper(self, *args, **kwargs): # pylint: disable=missing-docstring self._verify_page() # pylint: disable=protected-access return method(self, *args, **kwargs) return wrapper
[ "\n Decorator that calls self._verify_page() before executing the decorated method\n\n Args:\n method (callable): The method to decorate.\n\n Returns:\n Decorated method\n " ]
Please provide a description of the function:def set_rules(self, rules): self.rules_to_ignore = rules.get("ignore", []) self.rules_to_run = rules.get("apply", [])
[ "\n Sets the rules to be run or ignored for the audit.\n\n Args:\n\n rules: a dictionary of the format `{\"ignore\": [], \"apply\": []}`.\n\n See https://github.com/GoogleChrome/accessibility-developer-tools/tree/master/src/audits\n\n Passing `{\"apply\": []}` or `{}` means to check for all available rules.\n\n Passing `{\"apply\": None}` means that no audit should be done for this page.\n\n Passing `{\"ignore\": []}` means to run all otherwise enabled rules.\n Any rules in the \"ignore\" list will be ignored even if they were also\n specified in the \"apply\".\n\n Examples:\n\n To check only `badAriaAttributeValue`::\n\n page.a11y_audit.config.set_rules({\n \"apply\": ['badAriaAttributeValue']\n })\n\n To check all rules except `badAriaAttributeValue`::\n\n page.a11y_audit.config.set_rules({\n \"ignore\": ['badAriaAttributeValue'],\n })\n " ]
Please provide a description of the function:def set_scope(self, include=None, exclude=None): if include: self.scope = u"document.querySelector(\"{}\")".format( u', '.join(include) ) else: self.scope = "null" if exclude is not None: raise NotImplementedError( "The argument `exclude` has not been implemented in " "AxsAuditConfig.set_scope method." )
[ "\n Sets `scope`, the \"start point\" for the audit.\n\n Args:\n\n include: A list of css selectors specifying the elements that\n contain the portion of the page that should be audited.\n Defaults to auditing the entire document.\n exclude: This arg is not implemented in this ruleset.\n\n Examples:\n\n To check only the `div` with id `foo`::\n\n page.a11y_audit.config.set_scope([\"div#foo\"])\n\n To reset the scope to check the whole document::\n\n page.a11y_audit.config.set_scope()\n " ]
Please provide a description of the function:def _check_rules(browser, rules_js, config): if config.rules_to_run is None: msg = 'No accessibility rules were specified to check.' log.warning(msg) return None # This line will only be included in the script if rules to check on # this page are specified, as the default behavior of the js is to # run all rules. rules = config.rules_to_run if rules: rules_config = u"auditConfig.auditRulesToRun = {rules};".format( rules=rules) else: rules_config = "" ignored_rules = config.rules_to_ignore if ignored_rules: rules_config += ( u"\nauditConfig.auditRulesToIgnore = {rules};".format( rules=ignored_rules ) ) script = dedent(u.format(rules_js=rules_js, rules_config=rules_config, scope=config.scope)) result = browser.execute_script(script) # audit_results is report of accessibility errors for that session audit_results = AuditResults( errors=result.get('errors_'), warnings=result.get('warnings_') ) return audit_results
[ "\n Check the page for violations of the configured rules. By default,\n all rules in the ruleset will be checked.\n\n Args:\n browser: a browser instance.\n rules_js: the ruleset JavaScript as a string.\n config: an AxsAuditConfig instance.\n\n Returns:\n A namedtuple with 'errors' and 'warnings' fields whose values are\n the errors and warnings returned from the audit.\n\n None if config has rules_to_run set to None.\n\n __Caution__: You probably don't really want to call this method\n directly! It will be used by `A11yAudit.do_audit` if using this ruleset.\n ", "\n {rules_js}\n var auditConfig = new axs.AuditConfiguration();\n {rules_config}\n auditConfig.scope = {scope};\n var run_results = axs.Audit.run(auditConfig);\n var audit_results = axs.Audit.auditResults(run_results)\n return audit_results;\n " ]
Please provide a description of the function:def get_errors(audit_results): errors = [] if audit_results: if audit_results.errors: errors.extend(audit_results.errors) return errors
[ "\n Args:\n\n audit_results: results of `AxsAudit.do_audit()`.\n\n Returns: a list of errors.\n " ]
Please provide a description of the function:def report_errors(audit, url): errors = AxsAudit.get_errors(audit) if errors: msg = u"URL '{}' has {} errors:\n{}".format( url, len(errors), ', '.join(errors) ) raise AccessibilityError(msg)
[ "\n Args:\n\n audit: results of `AxsAudit.do_audit()`.\n url: the url of the page being audited.\n\n Raises: `AccessibilityError`\n " ]
Please provide a description of the function:def fulfill(self): is_fulfilled, result = self._check_fulfilled() if is_fulfilled: return result else: raise BrokenPromise(self)
[ "\n Evaluate the promise and return the result.\n\n Returns:\n The result of the `Promise` (second return value from the `check_func`)\n\n Raises:\n BrokenPromise: the `Promise` was not satisfied within the time or attempt limits.\n " ]
Please provide a description of the function:def _check_fulfilled(self): is_fulfilled = False result = None start_time = time.time() # Check whether the promise has been fulfilled until we run out of time or attempts while self._has_time_left(start_time) and self._has_more_tries(): # Keep track of how many attempts we've made so far self._num_tries += 1 is_fulfilled, result = self._check_func() # If the promise is satisfied, then continue execution if is_fulfilled: break # Delay between checks time.sleep(self._try_interval) return is_fulfilled, result
[ "\n Return tuple `(is_fulfilled, result)` where\n `is_fulfilled` is a boolean indicating whether the promise has been fulfilled\n and `result` is the value to pass to the `with` block.\n " ]
Please provide a description of the function:def search(self): self.q(css='button.btn').click() GitHubSearchResultsPage(self.browser).wait_for_page()
[ "\n Click on the Search button and wait for the\n results page to be displayed\n " ]
Please provide a description of the function:def set_rules(self, rules): options = {} if rules: if rules.get("ignore"): options["rules"] = {} for rule in rules.get("ignore"): options["rules"][rule] = {"enabled": False} elif rules.get("apply"): options["runOnly"] = { "type": "rule", "values": rules.get("apply"), } elif rules.get("tags"): options["runOnly"] = { "type": "tag", "values": rules.get("tags"), } self.rules = json.dumps(options)
[ "\n Set rules to ignore XOR limit to when checking for accessibility\n errors on the page.\n\n Args:\n\n rules: a dictionary one of the following formats.\n If you want to run all of the rules except for some::\n\n {\"ignore\": []}\n\n If you want to run only a specific set of rules::\n\n {\"apply\": []}\n\n If you want to run only rules of a specific standard::\n\n {\"tags\": []}\n\n Examples:\n\n To run only \"bad-link\" and \"color-contrast\" rules::\n\n page.a11y_audit.config.set_rules({\n \"apply\": [\"bad-link\", \"color-contrast\"],\n })\n\n To run all rules except for \"bad-link\" and \"color-contrast\"::\n\n page.a11y_audit.config.set_rules({\n \"ignore\": [\"bad-link\", \"color-contrast\"],\n })\n\n To run only WCAG 2.0 Level A rules::\n\n page.a11y_audit.config.set_rules({\n \"tags\": [\"wcag2a\"],\n })\n\n To run all rules:\n page.a11y_audit.config.set_rules({})\n\n Related documentation:\n\n * https://github.com/dequelabs/axe-core/blob/master/doc/API.md#options-parameter-examples\n * https://github.com/dequelabs/axe-core/doc/rule-descriptions.md\n " ]
Please provide a description of the function:def set_scope(self, include=None, exclude=None): context = {} if exclude: context["exclude"] = [[selector] for selector in exclude] if include: context["include"] = [[selector] for selector in include] self.context = json.dumps(context) if context else 'document'
[ "\n Sets `scope` (refered to as `context` in ruleset documentation), which\n defines the elements on a page to include or exclude in the audit. If\n neither `include` nor `exclude` are passed, the entire document will\n be included.\n\n Args:\n\n include (optional): a list of css selectors for elements that\n should be included in the audit. By, default, the entire document\n is included.\n exclude (optional): a list of css selectors for elements that should not\n be included in the audit.\n\n Examples:\n\n To include all items in `#main-content` except `#some-special-elm`::\n\n page.a11y_audit.config.set_scope(\n exclude=[\"#some-special-elm\"],\n include=[\"#main-content\"]\n )\n\n To include all items in the document except `#some-special-elm`::\n\n page.a11y_audit.config.set_scope(\n exclude=[\"#some-special-elm\"],\n )\n\n To include only children of `#some-special-elm`::\n\n page.a11y_audit.config.set_scope(\n include=[\"#some-special-elm\"],\n )\n\n Context documentation:\n\n https://github.com/dequelabs/axe-core/blob/master/doc/API.md#a-context-parameter\n\n Note that this implementation only supports css selectors. It does\n not accept nodes as described in the above documentation resource.\n " ]
Please provide a description of the function:def customize_ruleset(self, custom_ruleset_file=None): custom_file = custom_ruleset_file or os.environ.get( "BOKCHOY_A11Y_CUSTOM_RULES_FILE" ) if not custom_file: return with open(custom_file, "r") as additional_rules: custom_rules = additional_rules.read() if "var customRules" not in custom_rules: raise A11yAuditConfigError( "Custom rules file must include \"var customRules\"" ) self.custom_rules = custom_rules
[ "\n Updates the ruleset to include a set of custom rules. These rules will\n be _added_ to the existing ruleset or replace the existing rule with\n the same ID.\n\n Args:\n\n custom_ruleset_file (optional): The filepath to the custom rules.\n Defaults to `None`. If `custom_ruleset_file` isn't passed, the\n environment variable `BOKCHOY_A11Y_CUSTOM_RULES_FILE` will be\n checked. If a filepath isn't specified by either of these\n methods, the ruleset will not be updated.\n\n Raises:\n\n `IOError` if the specified file does not exist.\n\n Examples:\n\n To include the rules defined in `axe-core-custom-rules.js`::\n\n page.a11y_audit.config.customize_ruleset(\n \"axe-core-custom-rules.js\"\n )\n\n Alternatively, use the environment variable `BOKCHOY_A11Y_CUSTOM_RULES_FILE`\n to specify the path to the file containing the custom rules.\n\n Documentation for how to write rules:\n\n https://github.com/dequelabs/axe-core/blob/master/doc/developer-guide.md\n\n An example of a custom rules file can be found at\n https://github.com/edx/bok-choy/tree/master/tests/a11y_custom_rules.js\n " ]
Please provide a description of the function:def _check_rules(browser, rules_js, config): audit_run_script = dedent(u).format( rules_js=rules_js, custom_rules=config.custom_rules, context=config.context, options=config.rules ) audit_results_script = dedent(u) browser.execute_script(audit_run_script) def audit_results_check_func(): unicode_results = browser.execute_script(audit_results_script) try: results = json.loads(unicode_results) except (TypeError, ValueError): results = None if results: return True, results return False, None result = Promise( audit_results_check_func, "Timed out waiting for a11y audit results.", timeout=5, ).fulfill() # audit_results is report of accessibility violations for that session # Note that this ruleset doesn't have distinct error/warning levels. audit_results = result.get('violations') return audit_results
[ "\n Run an accessibility audit on the page using the axe-core ruleset.\n\n Args:\n browser: a browser instance.\n rules_js: the ruleset JavaScript as a string.\n config: an AxsAuditConfig instance.\n\n Returns:\n A list of violations.\n\n Related documentation:\n\n https://github.com/dequelabs/axe-core/blob/master/doc/API.md#results-object\n\n __Caution__: You probably don't really want to call this method\n directly! It will be used by `AxeCoreAudit.do_audit`.\n ", "\n {rules_js}\n {custom_rules}\n axe.configure(customRules);\n var callback = function(err, results) {{\n if (err) throw err;\n window.a11yAuditResults = JSON.stringify(results);\n window.console.log(window.a11yAuditResults);\n }}\n axe.run({context}, {options}, callback);\n ", "\n window.console.log(window.a11yAuditResults);\n return window.a11yAuditResults;\n ", "\n A method to check that the audit has completed.\n\n Returns:\n\n (True, results) if the results are available.\n (False, None) if the results aren't available.\n " ]
Please provide a description of the function:def get_errors(audit_results): errors = {"errors": [], "total": 0} if audit_results: errors["errors"].extend(audit_results) for i in audit_results: for _node in i["nodes"]: errors["total"] += 1 return errors
[ "\n Args:\n\n audit_results: results of `AxeCoreAudit.do_audit()`.\n\n Returns:\n\n A dictionary with keys \"errors\" and \"total\".\n " ]
Please provide a description of the function:def format_errors(errors): def _get_message(node): messages = set() try: messages.update([node['message']]) except KeyError: pass for check_group in ['any', 'all', 'none']: try: for check in node[check_group]: messages.update([check.get('message')]) except KeyError: pass messages = messages.difference(['']) return '; '.join(messages) lines = [] for error_type in errors: lines.append(u"Severity: {}".format(error_type.get("impact"))) lines.append(u"Rule ID: {}".format(error_type.get("id"))) lines.append(u"Help URL: {}\n".format(error_type.get('helpUrl'))) for node in error_type['nodes']: msg = u"Message: {}".format(_get_message(node)) html = u"Html: {}".format(node.get('html').encode('utf-8')) target = u"Target: {}".format(node.get('target')) fill_opts = { 'width': 100, 'initial_indent': '\t', 'subsequent_indent': '\t\t', } lines.append(fill(msg, **fill_opts)) lines.append(fill(html, **fill_opts)) lines.append(fill(target, **fill_opts)) lines.append('\n') return '\n'.join(lines)
[ "\n Args:\n\n errors: results of `AxeCoreAudit.get_errors()`.\n\n Returns: The errors as a formatted string.\n ", "\n Get the message to display in the error output.\n " ]
Please provide a description of the function:def report_errors(audit, url): errors = AxeCoreAudit.get_errors(audit) if errors["total"] > 0: msg = u"URL '{}' has {} errors:\n\n{}".format( url, errors["total"], AxeCoreAudit.format_errors(errors["errors"]) ) raise AccessibilityError(msg)
[ "\n Args:\n\n audit: results of `AxeCoreAudit.do_audit()`.\n url: the url of the page being audited.\n\n Raises: `AccessibilityError`\n " ]
Please provide a description of the function:def save_source(driver, name): source = driver.page_source file_name = os.path.join(os.environ.get('SAVED_SOURCE_DIR'), '{name}.html'.format(name=name)) try: with open(file_name, 'wb') as output_file: output_file.write(source.encode('utf-8')) except Exception: # pylint: disable=broad-except msg = u"Could not save the browser page source to {}.".format(file_name) LOGGER.warning(msg)
[ "\n Save the rendered HTML of the browser.\n\n The location of the source can be configured\n by the environment variable `SAVED_SOURCE_DIR`. If not set,\n this defaults to the current working directory.\n\n Args:\n driver (selenium.webdriver): The Selenium-controlled browser.\n name (str): A name to use in the output file name.\n Note that \".html\" is appended automatically\n\n Returns:\n None\n " ]
Please provide a description of the function:def save_screenshot(driver, name): if hasattr(driver, 'save_screenshot'): screenshot_dir = os.environ.get('SCREENSHOT_DIR') if not screenshot_dir: LOGGER.warning('The SCREENSHOT_DIR environment variable was not set; not saving a screenshot') return elif not os.path.exists(screenshot_dir): os.makedirs(screenshot_dir) image_name = os.path.join(screenshot_dir, name + '.png') driver.save_screenshot(image_name) else: msg = ( u"Browser does not support screenshots. " u"Could not save screenshot '{name}'" ).format(name=name) LOGGER.warning(msg)
[ "\n Save a screenshot of the browser.\n\n The location of the screenshot can be configured\n by the environment variable `SCREENSHOT_DIR`. If not set,\n this defaults to the current working directory.\n\n Args:\n driver (selenium.webdriver): The Selenium-controlled browser.\n name (str): A name for the screenshot, which will be used in the output file name.\n\n Returns:\n None\n " ]
Please provide a description of the function:def save_driver_logs(driver, prefix): browser_name = os.environ.get('SELENIUM_BROWSER', 'firefox') log_dir = os.environ.get('SELENIUM_DRIVER_LOG_DIR') if not log_dir: LOGGER.warning('The SELENIUM_DRIVER_LOG_DIR environment variable was not set; not saving logs') return elif not os.path.exists(log_dir): os.makedirs(log_dir) if browser_name == 'firefox': # Firefox doesn't yet provide logs to Selenium, but does log to a separate file # https://github.com/mozilla/geckodriver/issues/284 # https://firefox-source-docs.mozilla.org/testing/geckodriver/geckodriver/TraceLogs.html log_path = os.path.join(os.getcwd(), 'geckodriver.log') if os.path.exists(log_path): dest_path = os.path.join(log_dir, '{}_geckodriver.log'.format(prefix)) copyfile(log_path, dest_path) return log_types = driver.log_types for log_type in log_types: try: log = driver.get_log(log_type) file_name = os.path.join( log_dir, '{}_{}.log'.format(prefix, log_type) ) with open(file_name, 'w') as output_file: for line in log: output_file.write("{}{}".format(dumps(line), '\n')) except: # pylint: disable=bare-except msg = ( u"Could not save browser log of type '{log_type}'. " u"It may be that the browser does not support it." ).format(log_type=log_type) LOGGER.warning(msg, exc_info=True)
[ "\n Save the selenium driver logs.\n\n The location of the driver log files can be configured\n by the environment variable `SELENIUM_DRIVER_LOG_DIR`. If not set,\n this defaults to the current working directory.\n\n Args:\n driver (selenium.webdriver): The Selenium-controlled browser.\n prefix (str): A prefix which will be used in the output file names for the logs.\n\n Returns:\n None\n " ]
Please provide a description of the function:def browser(tags=None, proxy=None, other_caps=None): browser_name = os.environ.get('SELENIUM_BROWSER', 'firefox') def browser_check_func(): # See https://openedx.atlassian.net/browse/TE-701 try: # Get the class and kwargs required to instantiate the browser based on # whether we are using a local or remote one. if _use_remote_browser(SAUCE_ENV_VARS): browser_class, browser_args, browser_kwargs = _remote_browser_class( SAUCE_ENV_VARS, tags) elif _use_remote_browser(REMOTE_ENV_VARS): browser_class, browser_args, browser_kwargs = _remote_browser_class( REMOTE_ENV_VARS, tags) else: browser_class, browser_args, browser_kwargs = _local_browser_class( browser_name) # If we are using a proxy, we need extra kwargs passed on intantiation. if proxy: browser_kwargs = _proxy_kwargs(browser_name, proxy, browser_kwargs) # Load in user given desired caps but override with derived caps from above. This is to retain existing # behavior. Only for remote drivers, where various testing services use this info for configuration. if browser_class == webdriver.Remote: desired_caps = other_caps or {} desired_caps.update(browser_kwargs.get('desired_capabilities', {})) browser_kwargs['desired_capabilities'] = desired_caps return True, browser_class(*browser_args, **browser_kwargs) except (socket.error, WebDriverException) as err: msg = str(err) LOGGER.debug('Failed to instantiate browser: ' + msg) return False, None browser_instance = Promise( # There are cases where selenium takes 30s to return with a failure, so in order to try 3 # times, we set a long timeout. If there is a hang on the first try, the timeout will # be enforced. browser_check_func, "Browser is instantiated successfully.", try_limit=3, timeout=95).fulfill() return browser_instance
[ "\n Interpret environment variables to configure Selenium.\n Performs validation, logging, and sensible defaults.\n\n There are three cases:\n\n 1. Local browsers: If the proper environment variables are not all set for the second case,\n then we use a local browser.\n\n * The environment variable `SELENIUM_BROWSER` can be set to specify which local browser to use. The default is \\\n Firefox.\n * Additionally, if a proxy instance is passed and the browser choice is either Chrome or Firefox, then the \\\n browser will be initialized with the proxy server set.\n * The environment variable `SELENIUM_FIREFOX_PATH` can be used for specifying a path to the Firefox binary. \\\n Default behavior is to use the system location.\n * The environment variable `FIREFOX_PROFILE_PATH` can be used for specifying a path to the Firefox profile. \\\n Default behavior is to use a barebones default profile with a few useful preferences set.\n\n 2. Remote browser (not SauceLabs): Set all of the following environment variables, but not all of\n the ones needed for SauceLabs:\n\n * SELENIUM_BROWSER\n * SELENIUM_HOST\n * SELENIUM_PORT\n\n 3. SauceLabs: Set all of the following environment variables:\n\n * SELENIUM_BROWSER\n * SELENIUM_VERSION\n * SELENIUM_PLATFORM\n * SELENIUM_HOST\n * SELENIUM_PORT\n * SAUCE_USER_NAME\n * SAUCE_API_KEY\n\n **NOTE:** these are the environment variables set by the SauceLabs\n Jenkins plugin.\n\n Optionally provide Jenkins info, used to identify jobs to Sauce:\n\n * JOB_NAME\n * BUILD_NUMBER\n\n `tags` is a list of string tags to apply to the SauceLabs\n job. If not using SauceLabs, these will be ignored.\n\n Keyword Args:\n tags (list of str): Tags to apply to the SauceLabs job. If not using SauceLabs, these will be ignored.\n proxy: A proxy instance.\n other_caps (dict of str): Additional desired capabilities to provide to remote WebDriver instances. Note\n that these values will be overwritten by environment variables described above. This is only used for\n remote driver instances, where such info is usually used by services for additional configuration and\n metadata.\n\n Returns:\n selenium.webdriver: The configured browser object used to drive tests\n\n Raises:\n BrowserConfigError: The environment variables are not correctly specified.\n ", " Instantiate the browser and return the browser instance " ]
Please provide a description of the function:def _firefox_profile(): profile_dir = os.environ.get(FIREFOX_PROFILE_ENV_VAR) if profile_dir: LOGGER.info(u"Using firefox profile: %s", profile_dir) try: firefox_profile = webdriver.FirefoxProfile(profile_dir) except OSError as err: if err.errno == errno.ENOENT: raise BrowserConfigError( u"Firefox profile directory {env_var}={profile_dir} does not exist".format( env_var=FIREFOX_PROFILE_ENV_VAR, profile_dir=profile_dir)) elif err.errno == errno.EACCES: raise BrowserConfigError( u"Firefox profile directory {env_var}={profile_dir} has incorrect permissions. It must be \ readable and executable.".format(env_var=FIREFOX_PROFILE_ENV_VAR, profile_dir=profile_dir)) else: # Some other OSError: raise BrowserConfigError( u"Problem with firefox profile directory {env_var}={profile_dir}: {msg}" .format(env_var=FIREFOX_PROFILE_ENV_VAR, profile_dir=profile_dir, msg=str(err))) else: LOGGER.info("Using default firefox profile") firefox_profile = webdriver.FirefoxProfile() # Bypasses the security prompt displayed by the browser when it attempts to # access a media device (e.g., a webcam) firefox_profile.set_preference('media.navigator.permission.disabled', True) # Disable the initial url fetch to 'learn more' from mozilla (so you don't have to # be online to run bok-choy on firefox) firefox_profile.set_preference('browser.startup.homepage', 'about:blank') firefox_profile.set_preference('startup.homepage_welcome_url', 'about:blank') firefox_profile.set_preference('startup.homepage_welcome_url.additional', 'about:blank') # Disable fetching an updated version of firefox firefox_profile.set_preference('app.update.enabled', False) # Disable plugin checking firefox_profile.set_preference('plugins.hide_infobar_for_outdated_plugin', True) # Disable health reporter firefox_profile.set_preference('datareporting.healthreport.service.enabled', False) # Disable all data upload (Telemetry and FHR) firefox_profile.set_preference('datareporting.policy.dataSubmissionEnabled', False) # Disable crash reporter firefox_profile.set_preference('toolkit.crashreporter.enabled', False) # Disable the JSON Viewer firefox_profile.set_preference('devtools.jsonview.enabled', False) # Grant OS focus to the launched browser so focus-related tests function correctly firefox_profile.set_preference('focusmanager.testmode', True) for function in FIREFOX_PROFILE_CUSTOMIZERS: function(firefox_profile) return firefox_profile
[ "Configure the Firefox profile, respecting FIREFOX_PROFILE_PATH if set" ]
Please provide a description of the function:def _local_browser_class(browser_name): # Log name of local browser LOGGER.info(u"Using local browser: %s [Default is firefox]", browser_name) # Get class of local browser based on name browser_class = BROWSERS.get(browser_name) headless = os.environ.get('BOKCHOY_HEADLESS', 'false').lower() == 'true' if browser_class is None: raise BrowserConfigError( u"Invalid browser name {name}. Options are: {options}".format( name=browser_name, options=", ".join(list(BROWSERS.keys())))) else: if browser_name == 'firefox': # Remove geckodriver log data from previous test cases log_path = os.path.join(os.getcwd(), 'geckodriver.log') if os.path.exists(log_path): os.remove(log_path) firefox_options = FirefoxOptions() firefox_options.log.level = 'trace' if headless: firefox_options.headless = True browser_args = [] browser_kwargs = { 'firefox_profile': _firefox_profile(), 'options': firefox_options, } firefox_path = os.environ.get('SELENIUM_FIREFOX_PATH') firefox_log = os.environ.get('SELENIUM_FIREFOX_LOG') if firefox_path and firefox_log: browser_kwargs.update({ 'firefox_binary': FirefoxBinary( firefox_path=firefox_path, log_file=firefox_log) }) elif firefox_path: browser_kwargs.update({ 'firefox_binary': FirefoxBinary(firefox_path=firefox_path) }) elif firefox_log: browser_kwargs.update({ 'firefox_binary': FirefoxBinary(log_file=firefox_log) }) elif browser_name == 'chrome': chrome_options = ChromeOptions() if headless: chrome_options.headless = True # Emulate webcam and microphone for testing purposes chrome_options.add_argument('--use-fake-device-for-media-stream') # Bypasses the security prompt displayed by the browser when it attempts to # access a media device (e.g., a webcam) chrome_options.add_argument('--use-fake-ui-for-media-stream') browser_args = [] browser_kwargs = { 'options': chrome_options, } else: browser_args, browser_kwargs = [], {} return browser_class, browser_args, browser_kwargs
[ "\n Returns class, kwargs, and args needed to instantiate the local browser.\n " ]
Please provide a description of the function:def _remote_browser_class(env_vars, tags=None): if tags is None: tags = [] # Interpret the environment variables, raising an exception if they're # invalid envs = _required_envs(env_vars) envs.update(_optional_envs()) # Turn the environment variables into a dictionary of desired capabilities caps = _capabilities_dict(envs, tags) if 'accessKey' in caps: LOGGER.info(u"Using SauceLabs: %s %s %s", caps['platform'], caps['browserName'], caps['version']) else: LOGGER.info(u"Using Remote Browser: %s", caps['browserName']) # Create and return a new Browser # We assume that the WebDriver end-point is running locally (e.g. using # SauceConnect) url = u"http://{0}:{1}/wd/hub".format( envs['SELENIUM_HOST'], envs['SELENIUM_PORT']) browser_args = [] browser_kwargs = { 'command_executor': url, 'desired_capabilities': caps, } if caps['browserName'] == 'firefox': browser_kwargs['browser_profile'] = _firefox_profile() return webdriver.Remote, browser_args, browser_kwargs
[ "\n Returns class, kwargs, and args needed to instantiate the remote browser.\n " ]
Please provide a description of the function:def _proxy_kwargs(browser_name, proxy, browser_kwargs={}): # pylint: disable=dangerous-default-value proxy_dict = { "httpProxy": proxy.proxy, "proxyType": 'manual', } if browser_name == 'firefox' and 'desired_capabilities' not in browser_kwargs: # This one works for firefox locally wd_proxy = webdriver.common.proxy.Proxy(proxy_dict) browser_kwargs['proxy'] = wd_proxy else: # This one works with chrome, both locally and remote # This one works with firefox remote, but not locally if 'desired_capabilities' not in browser_kwargs: browser_kwargs['desired_capabilities'] = {} browser_kwargs['desired_capabilities']['proxy'] = proxy_dict return browser_kwargs
[ "\n Determines the kwargs needed to set up a proxy based on the\n browser type.\n\n Returns: a dictionary of arguments needed to pass when\n instantiating the WebDriver instance.\n " ]
Please provide a description of the function:def _required_envs(env_vars): envs = { key: os.environ.get(key) for key in env_vars } # Check for missing keys missing = [key for key, val in list(envs.items()) if val is None] if missing: msg = ( u"These environment variables must be set: " + u", ".join(missing) ) raise BrowserConfigError(msg) # Check that we support this browser if envs['SELENIUM_BROWSER'] not in BROWSERS: msg = u"Unsuppported browser: {0}".format(envs['SELENIUM_BROWSER']) raise BrowserConfigError(msg) return envs
[ "\n Parse environment variables for required values,\n raising a `BrowserConfig` error if they are not found.\n\n Returns a `dict` of environment variables.\n " ]
Please provide a description of the function:def _optional_envs(): envs = { key: os.environ.get(key) for key in OPTIONAL_ENV_VARS if key in os.environ } # If we're using Jenkins, check that we have all the required info if 'JOB_NAME' in envs and 'BUILD_NUMBER' not in envs: raise BrowserConfigError("Missing BUILD_NUMBER environment var") if 'BUILD_NUMBER' in envs and 'JOB_NAME' not in envs: raise BrowserConfigError("Missing JOB_NAME environment var") return envs
[ "\n Parse environment variables for optional values,\n raising a `BrowserConfig` error if they are insufficiently specified.\n\n Returns a `dict` of environment variables.\n " ]
Please provide a description of the function:def _capabilities_dict(envs, tags): capabilities = { 'browserName': envs['SELENIUM_BROWSER'], 'acceptInsecureCerts': bool(envs.get('SELENIUM_INSECURE_CERTS', False)), 'video-upload-on-pass': False, 'sauce-advisor': False, 'capture-html': True, 'record-screenshots': True, 'max-duration': 600, 'public': 'public restricted', 'tags': tags, } # Add SauceLabs specific environment vars if they are set. if _use_remote_browser(SAUCE_ENV_VARS): sauce_capabilities = { 'platform': envs['SELENIUM_PLATFORM'], 'version': envs['SELENIUM_VERSION'], 'username': envs['SAUCE_USER_NAME'], 'accessKey': envs['SAUCE_API_KEY'], } capabilities.update(sauce_capabilities) # Optional: Add in Jenkins-specific environment variables # to link Sauce output with the Jenkins job if 'JOB_NAME' in envs: jenkins_vars = { 'build': envs['BUILD_NUMBER'], 'name': envs['JOB_NAME'], } capabilities.update(jenkins_vars) return capabilities
[ "\n Convert the dictionary of environment variables to\n a dictionary of desired capabilities to send to the\n Remote WebDriver.\n\n `tags` is a list of string tags to apply to the SauceLabs job.\n " ]
Please provide a description of the function:def replace(self, **kwargs): clone = copy(self) clone.transforms = list(clone.transforms) for key, value in kwargs.items(): if not hasattr(clone, key): raise TypeError(u'replace() got an unexpected keyword argument {!r}'.format(key)) setattr(clone, key, value) return clone
[ "\n Return a copy of this `Query`, but with attributes specified\n as keyword arguments replaced by the keyword values.\n\n Keyword Args:\n Attributes/values to replace in the copy.\n\n Returns:\n A copy of the query that has its attributes updated with the specified values.\n\n Raises:\n TypeError: The `Query` does not have the specified attribute.\n " ]
Please provide a description of the function:def transform(self, transform, desc=None): if desc is None: desc = u'transform({})'.format(getattr(transform, '__name__', '')) return self.replace( transforms=self.transforms + [transform], desc_stack=self.desc_stack + [desc] )
[ "\n Create a copy of this query, transformed by `transform`.\n\n Args:\n transform (callable): Callable that takes an iterable of values and\n returns an iterable of transformed values.\n\n Keyword Args:\n desc (str): A description of the transform, to use in log messages.\n Defaults to the name of the `transform` function.\n\n Returns:\n Query\n " ]
Please provide a description of the function:def map(self, map_fn, desc=None): if desc is None: desc = getattr(map_fn, '__name__', '') desc = u'map({})'.format(desc) return self.transform(lambda xs: (map_fn(x) for x in xs), desc=desc)
[ "\n Return a copy of this query, with the values mapped through `map_fn`.\n\n Args:\n map_fn (callable): A callable that takes a single argument and returns a new value.\n\n Keyword Args:\n desc (str): A description of the mapping transform, for use in log message.\n Defaults to the name of the map function.\n\n Returns:\n Query\n " ]
Please provide a description of the function:def filter(self, filter_fn=None, desc=None, **kwargs): if filter_fn is not None and kwargs: raise TypeError('Must supply either a filter_fn or attribute filter parameters to filter(), but not both.') if filter_fn is None and not kwargs: raise TypeError('Must supply one of filter_fn or one or more attribute filter parameters to filter().') if desc is None: if filter_fn is not None: desc = getattr(filter_fn, '__name__', '') elif kwargs: desc = u", ".join([u"{}={!r}".format(key, value) for key, value in kwargs.items()]) desc = u"filter({})".format(desc) if kwargs: def filter_fn(elem): # pylint: disable=function-redefined, missing-docstring return all( getattr(elem, filter_key) == filter_value for filter_key, filter_value in kwargs.items() ) return self.transform(lambda xs: (x for x in xs if filter_fn(x)), desc=desc)
[ "\n Return a copy of this query, with some values removed.\n\n Example usages:\n\n .. code:: python\n\n # Returns a query that matches even numbers\n q.filter(filter_fn=lambda x: x % 2)\n\n # Returns a query that matches elements with el.description == \"foo\"\n q.filter(description=\"foo\")\n\n Keyword Args:\n filter_fn (callable): If specified, a function that accepts one argument (the element)\n and returns a boolean indicating whether to include that element in the results.\n\n kwargs: Specify attribute values that an element must have to be included in the results.\n\n desc (str): A description of the filter, for use in log messages.\n Defaults to the name of the filter function or attribute.\n\n Raises:\n TypeError: neither or both of `filter_fn` and `kwargs` are provided.\n " ]
Please provide a description of the function:def _execute(self): data = self.seed_fn() for transform in self.transforms: data = transform(data) return list(data)
[ "\n Run the query, generating data from the `seed_fn` and performing transforms on the results.\n " ]
Please provide a description of the function:def execute(self, try_limit=5, try_interval=0.5, timeout=30): return Promise( no_error(self._execute), u"Executing {!r}".format(self), try_limit=try_limit, try_interval=try_interval, timeout=timeout, ).fulfill()
[ "\n Execute this query, retrying based on the supplied parameters.\n\n Keyword Args:\n try_limit (int): The number of times to retry the query.\n try_interval (float): The number of seconds to wait between each try (float).\n timeout (float): The maximum number of seconds to spend retrying (float).\n\n Returns:\n The transformed results of the query.\n\n Raises:\n BrokenPromise: The query did not execute without a Selenium error after one or more attempts.\n " ]
Please provide a description of the function:def first(self): def _transform(xs): # pylint: disable=missing-docstring, invalid-name try: return [six.next(iter(xs))] except StopIteration: return [] return self.transform(_transform, 'first')
[ "\n Return a Query that selects only the first element of this Query.\n If no elements are available, returns a query with no results.\n\n Example usage:\n\n .. code:: python\n\n >> q = Query(lambda: list(range(5)))\n >> q.first.results\n [0]\n\n Returns:\n Query\n " ]
Please provide a description of the function:def nth(self, index): def _transform(xs): # pylint: disable=missing-docstring, invalid-name try: return [next(islice(iter(xs), index, None))] # Gracefully handle (a) running out of elements, and (b) negative indices except (StopIteration, ValueError): return [] return self.transform(_transform, 'nth')
[ "\n Return a query that selects the element at `index` (starts from 0).\n If no elements are available, returns a query with no results.\n\n Example usage:\n\n .. code:: python\n\n >> q = Query(lambda: list(range(5)))\n >> q.nth(2).results\n [2]\n\n Args:\n index (int): The index of the element to select (starts from 0)\n\n Returns:\n Query\n " ]
Please provide a description of the function:def attrs(self, attribute_name): desc = u'attrs({!r})'.format(attribute_name) return self.map(lambda el: el.get_attribute(attribute_name), desc).results
[ "\n Retrieve HTML attribute values from the elements matched by the query.\n\n Example usage:\n\n .. code:: python\n\n # Assume that the query matches html elements:\n # <div class=\"foo\"> and <div class=\"bar\">\n >> q.attrs('class')\n ['foo', 'bar']\n\n Args:\n attribute_name (str): The name of the attribute values to retrieve.\n\n Returns:\n A list of attribute values for `attribute_name`.\n " ]
Please provide a description of the function:def selected(self): query_results = self.map(lambda el: el.is_selected(), 'selected').results if query_results: return all(query_results) return False
[ "\n Check whether all the matched elements are selected.\n\n Returns:\n bool\n " ]
Please provide a description of the function:def visible(self): query_results = self.map(lambda el: el.is_displayed(), 'visible').results if query_results: return all(query_results) return False
[ "\n Check whether all matched elements are visible.\n\n Returns:\n bool\n " ]
Please provide a description of the function:def is_focused(self): active_el = self.browser.execute_script("return document.activeElement") query_results = self.map(lambda el: el == active_el, 'focused').results if query_results: return any(query_results) return False
[ "\n Checks that *at least one* matched element is focused. More\n specifically, it checks whether the element is document.activeElement.\n If no matching element is focused, this returns `False`.\n\n Returns:\n bool\n " ]
Please provide a description of the function:def fill(self, text): def _fill(elem): # pylint: disable=missing-docstring elem.clear() elem.send_keys(text) self.map(_fill, u'fill({!r})'.format(text)).execute()
[ "\n Set the text value of each matched element to `text`.\n\n Example usage:\n\n .. code:: python\n\n # Set the text of the first element matched by the query to \"Foo\"\n q.first.fill('Foo')\n\n Args:\n text (str): The text used to fill the element (usually a text field or text area).\n\n Returns:\n None\n " ]
Please provide a description of the function:def url_converter(self, *args, **kwargs): upstream_converter = super(PatchedManifestStaticFilesStorage, self).url_converter(*args, **kwargs) def converter(matchobj): try: upstream_converter(matchobj) except ValueError: # e.g. a static file 'static/media/logo.6a30f15f.svg' could not be found # because the upstream converter stripped 'static/' from the path matched, url = matchobj.groups() return matched return converter
[ "\n Return the custom URL converter for the given file name.\n " ]
Please provide a description of the function:def prepare_headers(table, bound_columns): if table.request is None: return for column in bound_columns: if column.sortable: params = table.request.GET.copy() param_path = _with_path_prefix(table, 'order') order = table.request.GET.get(param_path, None) start_sort_desc = column.sort_default_desc params[param_path] = column.name if not start_sort_desc else '-' + column.name column.is_sorting = False if order is not None: is_desc = order.startswith('-') order_field = order if not is_desc else order[1:] if order_field == column.name: new_order = order_field if is_desc else ('-' + order_field) params[param_path] = new_order column.sort_direction = DESCENDING if is_desc else ASCENDING column.is_sorting = True column.url = "?" + params.urlencode() else: column.is_sorting = False
[ "\n :type bound_columns: list of BoundColumn\n " ]
Please provide a description of the function:def order_by_on_list(objects, order_field, is_desc=False): if callable(order_field): objects.sort(key=order_field, reverse=is_desc) return def order_key(x): v = getattr_path(x, order_field) if v is None: return MIN return v objects.sort(key=order_key, reverse=is_desc)
[ "\n Utility function to sort objects django-style even for non-query set collections\n\n :param objects: list of objects to sort\n :param order_field: field name, follows django conventions, so \"foo__bar\" means `foo.bar`, can be a callable.\n :param is_desc: reverse the sorting\n :return:\n " ]
Please provide a description of the function:def default_cell_formatter(table, column, row, value, **_): formatter = _cell_formatters.get(type(value)) if formatter: value = formatter(table=table, column=column, row=row, value=value) if value is None: return '' return conditional_escape(value)
[ "\n :type column: tri.table.Column\n " ]
Please provide a description of the function:def django_pre_2_0_table_context( request, table, links=None, paginate_by=None, page=None, extra_context=None, paginator=None, show_hits=False, hit_label='Items'): if extra_context is None: # pragma: no cover extra_context = {} assert table.data is not None links, grouped_links = evaluate_and_group_links(links, table=table) base_context = { 'links': links, 'grouped_links': grouped_links, 'table': table, } if paginate_by: try: paginate_by = int(request.GET.get('page_size', paginate_by)) except ValueError: # pragma: no cover pass if paginator is None: paginator = Paginator(table.data, paginate_by) object_list = None else: # pragma: no cover object_list = table.data if not page: page = request.GET.get('page', 1) try: page = int(page) if page < 1: # pragma: no cover page = 1 if page > paginator.num_pages: # pragma: no cover page = paginator.num_pages if object_list is None: table.data = paginator.page(page).object_list except (InvalidPage, ValueError): # pragma: no cover if page == 1: table.data = [] else: raise Http404 base_context.update({ 'request': request, 'is_paginated': paginator.num_pages > 1, 'results_per_page': paginate_by, 'has_next': paginator.num_pages > page, 'has_previous': page > 1, 'page_size': paginate_by, 'page': page, 'next': page + 1, 'previous': page - 1, 'pages': paginator.num_pages, 'hits': paginator.count, 'show_hits': show_hits, 'hit_label': hit_label}) else: # pragma: no cover base_context.update({ 'is_paginated': False}) base_context.update(extra_context) return base_context
[ "\n :type table: Table\n " ]
Please provide a description of the function:def table_context(request, table, links=None, paginate_by=None, page=None, extra_context=None, paginator=None, show_hits=False, hit_label='Items'): from django import __version__ as django_version django_version = tuple([int(x) for x in django_version.split('.')]) if django_version < (2, 0): return django_pre_2_0_table_context(request, table, links=links, paginate_by=paginate_by, extra_context=extra_context, paginator=paginator, show_hits=show_hits, hit_label=hit_label) if extra_context is None: # pragma: no cover extra_context = {} assert table.data is not None links, grouped_links = evaluate_and_group_links(links, table=table) base_context = { 'links': links, 'grouped_links': grouped_links, 'table': table, } if paginate_by: try: paginate_by = int(request.GET.get('page_size', paginate_by)) except ValueError: # pragma: no cover pass if paginator is None: paginator = Paginator(table.data, paginate_by) if not page: page = request.GET.get('page') # None is translated to the default page in paginator.get_page try: page_obj = paginator.get_page(page) table.data = page_obj.object_list except (InvalidPage, ValueError): # pragma: no cover raise Http404 base_context.update({ 'request': request, 'is_paginated': paginator.num_pages > 1, 'results_per_page': paginate_by, 'has_next': page_obj.has_next(), 'has_previous': page_obj.has_previous(), 'next': page_obj.next_page_number() if page_obj.has_next() else None, 'previous': page_obj.previous_page_number() if page_obj.has_previous() else None, 'page': page_obj.number, 'pages': paginator.num_pages, 'hits': paginator.count, 'show_hits': show_hits, 'hit_label': hit_label, }) else: # pragma: no cover base_context.update({ 'is_paginated': False}) base_context.update(extra_context) return base_context
[ "\n :type table: Table\n " ]
Please provide a description of the function:def render_table(request, table, links=None, context=None, template='tri_table/list.html', blank_on_empty=False, paginate_by=40, # pragma: no mutate page=None, paginator=None, show_hits=False, hit_label='Items', post_bulk_edit=lambda table, queryset, updates: None): if not context: context = {} if isinstance(table, Namespace): table = table() assert isinstance(table, Table), table table.request = request should_return, dispatch_result = handle_dispatch(request=request, obj=table) if should_return: return dispatch_result context['bulk_form'] = table.bulk_form context['query_form'] = table.query_form context['tri_query_error'] = table.query_error if table.bulk_form and request.method == 'POST': if table.bulk_form.is_valid(): queryset = table.bulk_queryset() updates = { field.name: field.value for field in table.bulk_form.fields if field.value is not None and field.value != '' and field.attr is not None } queryset.update(**updates) post_bulk_edit(table=table, queryset=queryset, updates=updates) return HttpResponseRedirect(request.META['HTTP_REFERER']) table.context = table_context( request, table=table, links=links, paginate_by=paginate_by, page=page, extra_context=context, paginator=paginator, show_hits=show_hits, hit_label=hit_label, ) if not table.data and blank_on_empty: return '' if table.query_form and not table.query_form.is_valid(): table.data = None table.context['invalid_form_message'] = mark_safe('<i class="fa fa-meh-o fa-5x" aria-hidden="true"></i>') return render_template(request, template, table.context)
[ "\n Render a table. This automatically handles pagination, sorting, filtering and bulk operations.\n\n :param request: the request object. This is set on the table object so that it is available for lambda expressions.\n :param table: an instance of Table\n :param links: a list of instances of Link\n :param context: dict of extra context parameters\n :param template: if you need to render the table differently you can override this parameter with either a name of a template to load or a `Template` instance.\n :param blank_on_empty: turn off the displaying of `{{ empty_message }}` in the template when the list is empty\n :param show_hits: Display how many items there are total in the paginator.\n :param hit_label: Label for the show_hits display.\n :return: a string with the rendered HTML table\n " ]
Please provide a description of the function:def render_table_to_response(*args, **kwargs): response = render_table(*args, **kwargs) if isinstance(response, HttpResponse): # pragma: no cover return response return HttpResponse(response)
[ "\n Shortcut for `HttpResponse(render_table(*args, **kwargs))`\n " ]
Please provide a description of the function:def generate_duid(mac): valid = mac and isinstance(mac, six.string_types) if not valid: raise ValueError("Invalid argument was passed") return "00:" + mac[9:] + ":" + mac
[ "DUID is consisted of 10 hex numbers.\n\n 0x00 + mac with last 3 hex + mac with 6 hex\n " ]
Please provide a description of the function:def try_value_to_bool(value, strict_mode=True): if strict_mode: true_list = ('True',) false_list = ('False',) val = value else: true_list = ('true', 'on', 'yes') false_list = ('false', 'off', 'no') val = str(value).lower() if val in true_list: return True elif val in false_list: return False return value
[ "Tries to convert value into boolean.\n\n strict_mode is True:\n - Only string representation of str(True) and str(False)\n are converted into booleans;\n - Otherwise unchanged incoming value is returned;\n\n strict_mode is False:\n - Anything that looks like True or False is converted into booleans.\n Values accepted as True:\n - 'true', 'on', 'yes' (case independent)\n Values accepted as False:\n - 'false', 'off', 'no' (case independent)\n - all other values are returned unchanged\n " ]
Please provide a description of the function:def create_network(self, net_view_name, cidr, nameservers=None, members=None, gateway_ip=None, dhcp_trel_ip=None, network_extattrs=None): ipv4 = ib_utils.determine_ip_version(cidr) == 4 options = [] if nameservers: options.append(obj.DhcpOption(name='domain-name-servers', value=",".join(nameservers))) if ipv4 and gateway_ip: options.append(obj.DhcpOption(name='routers', value=gateway_ip)) if ipv4 and dhcp_trel_ip: options.append(obj.DhcpOption(name='dhcp-server-identifier', num=54, value=dhcp_trel_ip)) return obj.Network.create(self.connector, network_view=net_view_name, cidr=cidr, members=members, options=options, extattrs=network_extattrs, check_if_exists=False)
[ "Create NIOS Network and prepare DHCP options.\n\n Some DHCP options are valid for IPv4 only, so just skip processing\n them for IPv6 case.\n\n :param net_view_name: network view name\n :param cidr: network to allocate, example '172.23.23.0/24'\n :param nameservers: list of name servers hosts/ip\n :param members: list of objects.AnyMember objects that are expected\n to serve dhcp for created network\n :param gateway_ip: gateway ip for the network (valid for IPv4 only)\n :param dhcp_trel_ip: ip address of dhcp relay (valid for IPv4 only)\n :param network_extattrs: extensible attributes for network (instance of\n objects.EA)\n :returns: created network (instance of objects.Network)\n " ]
Please provide a description of the function:def create_ip_range(self, network_view, start_ip, end_ip, network, disable, range_extattrs): return obj.IPRange.create(self.connector, network_view=network_view, start_addr=start_ip, end_addr=end_ip, cidr=network, disable=disable, extattrs=range_extattrs, check_if_exists=False)
[ "Creates IPRange or fails if already exists." ]
Please provide a description of the function:def network_exists(self, network_view, cidr): LOG.warning( "DEPRECATION WARNING! Using network_exists() is deprecated " "and to be removed in next releases. " "Use get_network() or objects.Network.search instead") network = obj.Network.search(self.connector, network_view=network_view, cidr=cidr) return network is not None
[ "Deprecated, use get_network() instead." ]
Please provide a description of the function:def delete_objects_associated_with_a_record(self, name, view, delete_list): search_objects = {} if 'record:cname' in delete_list: search_objects['record:cname'] = 'canonical' if 'record:txt' in delete_list: search_objects['record:txt'] = 'name' if not search_objects: return for obj_type, search_type in search_objects.items(): payload = {'view': view, search_type: name} ib_objs = self.connector.get_object(obj_type, payload) if ib_objs: for ib_obj in ib_objs: self.delete_object_by_ref(ib_obj['_ref'])
[ "Deletes records associated with record:a or record:aaaa." ]
Please provide a description of the function:def _parse_options(self, options): attributes = ('host', 'wapi_version', 'username', 'password', 'ssl_verify', 'http_request_timeout', 'max_retries', 'http_pool_connections', 'http_pool_maxsize', 'silent_ssl_warnings', 'log_api_calls_as_info', 'max_results', 'paging') for attr in attributes: if isinstance(options, dict) and attr in options: setattr(self, attr, options[attr]) elif hasattr(options, attr): value = getattr(options, attr) setattr(self, attr, value) elif attr in self.DEFAULT_OPTIONS: setattr(self, attr, self.DEFAULT_OPTIONS[attr]) else: msg = "WAPI config error. Option %s is not defined" % attr raise ib_ex.InfobloxConfigException(msg=msg) for attr in ('host', 'username', 'password'): if not getattr(self, attr): msg = "WAPI config error. Option %s can not be blank" % attr raise ib_ex.InfobloxConfigException(msg=msg) self.wapi_url = "https://%s/wapi/v%s/" % (self.host, self.wapi_version) self.cloud_api_enabled = self.is_cloud_wapi(self.wapi_version)
[ "Copy needed options to self" ]
Please provide a description of the function:def _parse_reply(request): try: return jsonutils.loads(request.content) except ValueError: raise ib_ex.InfobloxConnectionError(reason=request.content)
[ "Tries to parse reply from NIOS.\n\n Raises exception with content if reply is not in json format\n " ]
Please provide a description of the function:def get_object(self, obj_type, payload=None, return_fields=None, extattrs=None, force_proxy=False, max_results=None, paging=False): self._validate_obj_type_or_die(obj_type, obj_type_expected=False) # max_results passed to get_object has priority over # one defined as connector option if max_results is None and self.max_results: max_results = self.max_results if paging is False and self.paging: paging = self.paging query_params = self._build_query_params(payload=payload, return_fields=return_fields, max_results=max_results, paging=paging) # Clear proxy flag if wapi version is too old (non-cloud) proxy_flag = self.cloud_api_enabled and force_proxy ib_object = self._handle_get_object(obj_type, query_params, extattrs, proxy_flag) if ib_object: return ib_object # Do second get call with force_proxy if not done yet if self.cloud_api_enabled and not force_proxy: ib_object = self._handle_get_object(obj_type, query_params, extattrs, proxy_flag=True) if ib_object: return ib_object return None
[ "Retrieve a list of Infoblox objects of type 'obj_type'\n\n Some get requests like 'ipv4address' should be always\n proxied to GM on Hellfire\n If request is cloud and proxy is not forced yet,\n then plan to do 2 request:\n - the first one is not proxied to GM\n - the second is proxied to GM\n\n Args:\n obj_type (str): Infoblox object type, e.g. 'network',\n 'range', etc.\n payload (dict): Payload with data to send\n return_fields (list): List of fields to be returned\n extattrs (dict): List of Extensible Attributes\n force_proxy (bool): Set _proxy_search flag\n to process requests on GM\n max_results (int): Maximum number of objects to be returned.\n If set to a negative number the appliance will return an error\n when the number of returned objects would exceed the setting.\n The default is -1000. If this is set to a positive number,\n the results will be truncated when necessary.\n paging (bool): Enables paging to wapi calls if paging = True,\n it uses _max_results to set paging size of the wapi calls.\n If _max_results is negative it will take paging size as 1000.\n\n Returns:\n A list of the Infoblox objects requested\n Raises:\n InfobloxObjectNotFound\n " ]
Please provide a description of the function:def create_object(self, obj_type, payload, return_fields=None): self._validate_obj_type_or_die(obj_type) query_params = self._build_query_params(return_fields=return_fields) url = self._construct_url(obj_type, query_params) opts = self._get_request_options(data=payload) self._log_request('post', url, opts) if(self.session.cookies): # the first 'get' or 'post' action will generate a cookie # after that, we don't need to re-authenticate self.session.auth = None r = self.session.post(url, **opts) self._validate_authorized(r) if r.status_code != requests.codes.CREATED: response = utils.safe_json_load(r.content) already_assigned = 'is assigned to another network view' if response and already_assigned in response.get('text'): exception = ib_ex.InfobloxMemberAlreadyAssigned else: exception = ib_ex.InfobloxCannotCreateObject raise exception( response=response, obj_type=obj_type, content=r.content, args=payload, code=r.status_code) return self._parse_reply(r)
[ "Create an Infoblox object of type 'obj_type'\n\n Args:\n obj_type (str): Infoblox object type,\n e.g. 'network', 'range', etc.\n payload (dict): Payload with data to send\n return_fields (list): List of fields to be returned\n Returns:\n The object reference of the newly create object\n Raises:\n InfobloxException\n " ]
Please provide a description of the function:def update_object(self, ref, payload, return_fields=None): query_params = self._build_query_params(return_fields=return_fields) opts = self._get_request_options(data=payload) url = self._construct_url(ref, query_params) self._log_request('put', url, opts) r = self.session.put(url, **opts) self._validate_authorized(r) if r.status_code != requests.codes.ok: self._check_service_availability('update', r, ref) raise ib_ex.InfobloxCannotUpdateObject( response=jsonutils.loads(r.content), ref=ref, content=r.content, code=r.status_code) return self._parse_reply(r)
[ "Update an Infoblox object\n\n Args:\n ref (str): Infoblox object reference\n payload (dict): Payload with data to send\n Returns:\n The object reference of the updated object\n Raises:\n InfobloxException\n " ]
Please provide a description of the function:def delete_object(self, ref, delete_arguments=None): opts = self._get_request_options() if not isinstance(delete_arguments, dict): delete_arguments = {} url = self._construct_url(ref, query_params=delete_arguments) self._log_request('delete', url, opts) r = self.session.delete(url, **opts) self._validate_authorized(r) if r.status_code != requests.codes.ok: self._check_service_availability('delete', r, ref) raise ib_ex.InfobloxCannotDeleteObject( response=jsonutils.loads(r.content), ref=ref, content=r.content, code=r.status_code) return self._parse_reply(r)
[ "Remove an Infoblox object\n\n Args:\n ref (str): Object reference\n delete_arguments (dict): Extra delete arguments\n Returns:\n The object reference of the removed object\n Raises:\n InfobloxException\n " ]
Please provide a description of the function:def _remap_fields(cls, kwargs): mapped = {} for key in kwargs: if key in cls._remap: mapped[cls._remap[key]] = kwargs[key] else: mapped[key] = kwargs[key] return mapped
[ "Map fields from kwargs into dict acceptable by NIOS" ]
Please provide a description of the function:def from_dict(cls, eas_from_nios): if not eas_from_nios: return return cls({name: cls._process_value(ib_utils.try_value_to_bool, eas_from_nios[name]['value']) for name in eas_from_nios})
[ "Converts extensible attributes from the NIOS reply." ]
Please provide a description of the function:def to_dict(self): return {name: {'value': self._process_value(str, value)} for name, value in self._ea_dict.items() if not (value is None or value == "" or value == [])}
[ "Converts extensible attributes into the format suitable for NIOS." ]
Please provide a description of the function:def _process_value(func, value): if isinstance(value, (list, tuple)): return [func(item) for item in value] return func(value)
[ "Applies processing method for value or each element in it.\n\n :param func: method to be called with value\n :param value: value to process\n :return: if 'value' is list/tupe, returns iterable with func results,\n else func result is returned\n " ]
Please provide a description of the function:def from_dict(cls, connector, ip_dict): mapping = cls._global_field_processing.copy() mapping.update(cls._custom_field_processing) # Process fields that require building themselves as objects for field in mapping: if field in ip_dict: ip_dict[field] = mapping[field](ip_dict[field]) return cls(connector, **ip_dict)
[ "Build dict fields as SubObjects if needed.\n\n Checks if lambda for building object from dict exists.\n _global_field_processing and _custom_field_processing rules\n are checked.\n " ]
Please provide a description of the function:def field_to_dict(self, field): value = getattr(self, field) if isinstance(value, (list, tuple)): return [self.value_to_dict(val) for val in value] return self.value_to_dict(value)
[ "Read field value and converts to dict if possible" ]
Please provide a description of the function:def to_dict(self, search_fields=None): fields = self._fields if search_fields == 'update': fields = self._search_for_update_fields elif search_fields == 'all': fields = self._all_searchable_fields elif search_fields == 'exclude': # exclude search fields for update actions, # but include updateable_search_fields fields = [field for field in self._fields if field in self._updateable_search_fields or field not in self._search_for_update_fields] return {field: self.field_to_dict(field) for field in fields if getattr(self, field, None) is not None}
[ "Builds dict without None object fields" ]
Please provide a description of the function:def _ip_setter(self, ipaddr_name, ipaddrs_name, ips): if isinstance(ips, six.string_types): setattr(self, ipaddr_name, ips) elif isinstance(ips, (list, tuple)) and isinstance(ips[0], IP): setattr(self, ipaddr_name, ips[0].ip) setattr(self, ipaddrs_name, ips) elif isinstance(ips, IP): setattr(self, ipaddr_name, ips.ip) setattr(self, ipaddrs_name, [ips]) elif ips is None: setattr(self, ipaddr_name, None) setattr(self, ipaddrs_name, None) else: raise ValueError( "Invalid format of ip passed in: %s." "Should be string or list of NIOS IP objects." % ips)
[ "Setter for ip fields\n\n Accept as input string or list of IP instances.\n String case:\n only ipvXaddr is going to be filled, that is enough to perform\n host record search using ip\n List of IP instances case:\n ipvXaddrs is going to be filled with ips content,\n so create can be issues, since fully prepared IP objects in place.\n ipXaddr is also filled to be able perform search on NIOS\n and verify that no such host record exists yet.\n " ]
Please provide a description of the function:def mac(self, mac): self._mac = mac if mac: self.duid = ib_utils.generate_duid(mac) elif not hasattr(self, 'duid'): self.duid = None
[ "Set mac and duid fields\n\n To have common interface with FixedAddress accept mac address\n and set duid as a side effect.\n 'mac' was added to _shadow_fields to prevent sending it out over wapi.\n " ]
Please provide a description of the function:def render_property(property): # This ain't the prettiest thing, but it should get the job done. # I don't think we have anything more elegant available at bosh-manifest-generation time. # See https://docs.pivotal.io/partners/product-template-reference.html for list. if 'type' in property and property['type'] in PROPERTY_FIELDS: fields = {} for field in PROPERTY_FIELDS[property['type']]: if type(field) is tuple: fields[field[0]] = '(( .properties.{}.{} ))'.format(property['name'], field[1]) else: fields[field] = '(( .properties.{}.{} ))'.format(property['name'], field) out = { property['name']: fields } else: if property.get('is_reference', False): out = { property['name']: property['default'] } else: out = { property['name']: '(( .properties.{}.value ))'.format(property['name']) } return out
[ "Render a property for bosh manifest, according to its type." ]
Please provide a description of the function:def match(obj, matchers=TYPES): buf = get_bytes(obj) for matcher in matchers: if matcher.match(buf): return matcher return None
[ "\n Matches the given input againts the available\n file type matchers.\n\n Args:\n obj: path to file, bytes or bytearray.\n\n Returns:\n Type instance if type matches. Otherwise None.\n\n Raises:\n TypeError: if obj is not a supported type.\n " ]
Please provide a description of the function:def signature(array): length = len(array) index = _NUM_SIGNATURE_BYTES if length > _NUM_SIGNATURE_BYTES else length return array[:index]
[ "\n Returns the first 262 bytes of the given bytearray\n as part of the file header signature.\n\n Args:\n array: bytearray to extract the header signature.\n\n Returns:\n First 262 bytes of the file content as bytearray type.\n " ]
Please provide a description of the function:def get_bytes(obj): try: obj = obj.read(_NUM_SIGNATURE_BYTES) except AttributeError: # duck-typing as readable failed - we'll try the other options pass kind = type(obj) if kind is bytearray: return signature(obj) if kind is str: return get_signature_bytes(obj) if kind is bytes: return signature(obj) if kind is memoryview: return signature(obj).tolist() raise TypeError('Unsupported type as file input: %s' % kind)
[ "\n Infers the input type and reads the first 262 bytes,\n returning a sliced bytearray.\n\n Args:\n obj: path to readable, file, bytes or bytearray.\n\n Returns:\n First 262 bytes of the file content as bytearray type.\n\n Raises:\n TypeError: if obj is not a supported type.\n " ]
Please provide a description of the function:def get_type(mime=None, ext=None): for kind in types: if kind.extension is ext or kind.mime is mime: return kind return None
[ "\n Returns the file type instance searching by\n MIME type or file extension.\n\n Args:\n ext: file extension string. E.g: jpg, png, mp4, mp3\n mime: MIME string. E.g: image/jpeg, video/mpeg\n\n Returns:\n The matched file type instance. Otherwise None.\n " ]
Please provide a description of the function:def open(self, encoding=None): try: if IS_GZIPPED_FILE.search(self._filename): _file = gzip.open(self._filename, 'rb') else: if encoding: _file = io.open(self._filename, 'r', encoding=encoding, errors='replace') elif self._encoding: _file = io.open(self._filename, 'r', encoding=self._encoding, errors='replace') else: _file = io.open(self._filename, 'r', errors='replace') except IOError, e: self._log_warning(str(e)) _file = None self.close() return _file
[ "Opens the file with the appropriate call" ]
Please provide a description of the function:def close(self): if not self.active: return self.active = False if self._file: self._file.close() self._sincedb_update_position(force_update=True) if self._current_event: event = '\n'.join(self._current_event) self._current_event.clear() self._callback_wrapper([event])
[ "Closes all currently open file pointers" ]
Please provide a description of the function:def _buffer_extract(self, data): # Extract token-delimited entities from the input string with the split command. # There's a bit of craftiness here with the -1 parameter. Normally split would # behave no differently regardless of if the token lies at the very end of the # input buffer or not (i.e. a literal edge case) Specifying -1 forces split to # return "" in this case, meaning that the last entry in the list represents a # new segment of data where the token has not been encountered entities = collections.deque(data.split(self._delimiter, -1)) # Check to see if the buffer has exceeded capacity, if we're imposing a limit if self._size_limit: if self.input_size + len(entities[0]) > self._size_limit: raise Exception('input buffer full') self._input_size += len(entities[0]) # Move the first entry in the resulting array into the input buffer. It represents # the last segment of a token-delimited entity unless it's the only entry in the list. first_entry = entities.popleft() if len(first_entry) > 0: self._input.append(first_entry) # If the resulting array from the split is empty, the token was not encountered # (not even at the end of the buffer). Since we've encountered no token-delimited # entities this go-around, return an empty array. if len(entities) == 0: return [] # At this point, we've hit a token, or potentially multiple tokens. Now we can bring # together all the data we've buffered from earlier calls without hitting a token, # and add it to our list of discovered entities. entities.appendleft(''.join(self._input)) # Now that we've hit a token, joined the input buffer and added it to the entities # list, we can go ahead and clear the input buffer. All of the segments that were # stored before the join can now be garbage collected. self._input.clear() # The last entity in the list is not token delimited, however, thanks to the -1 # passed to split. It represents the beginning of a new list of as-yet-untokenized # data, so we add it to the start of the list. self._input.append(entities.pop()) # Set the new input buffer size, provided we're keeping track if self._size_limit: self._input_size = len(self._input[0]) # Now we're left with the list of extracted token-delimited entities we wanted # in the first place. Hooray! return entities
[ "\n Extract takes an arbitrary string of input data and returns an array of\n tokenized entities, provided there were any available to extract. This\n makes for easy processing of datagrams using a pattern like:\n\n tokenizer.extract(data).map { |entity| Decode(entity) }.each do ..." ]
Please provide a description of the function:def _ensure_file_is_good(self, current_time): if self._last_file_mapping_update and current_time - self._last_file_mapping_update <= self._stat_interval: return self._last_file_mapping_update = time.time() try: st = os.stat(self._filename) except EnvironmentError, err: if err.errno == errno.ENOENT: self._log_info('file removed') self.close() return raise fid = self.get_file_id(st) if fid != self._fid: self._log_info('file rotated') self.close() elif self._file.tell() > st.st_size: if st.st_size == 0 and self._ignore_truncate: self._logger.info("[{0}] - file size is 0 {1}. ".format(fid, self._filename) + "If you use another tool (i.e. logrotate) to truncate " + "the file, your application may continue to write to " + "the offset it last wrote later. In such a case, we'd " + "better do nothing here") return self._log_info('file truncated') self._update_file(seek_to_end=False) elif REOPEN_FILES: self._log_debug('file reloaded (non-linux)') position = self._file.tell() self._update_file(seek_to_end=False) if self.active: self._file.seek(position, os.SEEK_SET)
[ "Every N seconds, ensures that the file we are tailing is the file we expect to be tailing" ]
Please provide a description of the function:def _run_pass(self): while True: try: data = self._file.read(4096) except IOError, e: if e.errno == errno.ESTALE: self.active = False return False lines = self._buffer_extract(data) if not lines: # Before returning, check if an event (maybe partial) is waiting for too long. if self._current_event and time.time() - self._last_activity > 1: event = '\n'.join(self._current_event) self._current_event.clear() self._callback_wrapper([event]) break self._last_activity = time.time() if self._multiline_regex_after or self._multiline_regex_before: # Multiline is enabled for this file. events = multiline_merge( lines, self._current_event, self._multiline_regex_after, self._multiline_regex_before) else: events = lines if events: self._callback_wrapper(events) if self._sincedb_path: current_line_count = len(lines) self._sincedb_update_position(lines=current_line_count) self._sincedb_update_position()
[ "Read lines from a file and performs a callback against them" ]
Please provide a description of the function:def _sincedb_init(self): if not self._sincedb_path: return if not os.path.exists(self._sincedb_path): self._log_debug('initializing sincedb sqlite schema') conn = sqlite3.connect(self._sincedb_path, isolation_level=None) conn.execute() conn.close()
[ "Initializes the sincedb schema in an sqlite db", "\n create table sincedb (\n fid text primary key,\n filename text,\n position integer default 1\n );\n " ]
Please provide a description of the function:def _sincedb_update_position(self, lines=0, force_update=False): if not self._sincedb_path: return False self._line_count = self._line_count + lines old_count = self._line_count_sincedb lines = self._line_count current_time = int(time.time()) if not force_update: if self._last_sincedb_write and current_time - self._last_sincedb_write <= self._sincedb_write_interval: return False if old_count == lines: return False self._sincedb_init() self._last_sincedb_write = current_time self._log_debug('updating sincedb to {0}'.format(lines)) conn = sqlite3.connect(self._sincedb_path, isolation_level=None) cursor = conn.cursor() query = 'insert or replace into sincedb (fid, filename) values (:fid, :filename);' cursor.execute(query, { 'fid': self._fid, 'filename': self._filename }) query = 'update sincedb set position = :position where fid = :fid and filename = :filename' cursor.execute(query, { 'fid': self._fid, 'filename': self._filename, 'position': lines, }) conn.close() self._line_count_sincedb = lines return True
[ "Retrieves the starting position from the sincedb sql db for a given file\n Returns a boolean representing whether or not it updated the record\n " ]
Please provide a description of the function:def _sincedb_start_position(self): if not self._sincedb_path: return None self._sincedb_init() self._log_debug('retrieving start_position from sincedb') conn = sqlite3.connect(self._sincedb_path, isolation_level=None) cursor = conn.cursor() cursor.execute('select position from sincedb where fid = :fid and filename = :filename', { 'fid': self._fid, 'filename': self._filename }) start_position = None for row in cursor.fetchall(): start_position, = row return start_position
[ "Retrieves the starting position from the sincedb sql db\n for a given file\n " ]
Please provide a description of the function:def _update_file(self, seek_to_end=True): try: self.close() self._file = self.open() except IOError: pass else: if not self._file: return self.active = True try: st = os.stat(self._filename) except EnvironmentError, err: if err.errno == errno.ENOENT: self._log_info('file removed') self.close() fid = self.get_file_id(st) if not self._fid: self._fid = fid if fid != self._fid: self._log_info('file rotated') self.close() elif seek_to_end: self._seek_to_end()
[ "Open the file for tailing" ]
Please provide a description of the function:def tail(self, fname, encoding, window, position=None): if window <= 0: raise ValueError('invalid window %r' % window) encodings = ENCODINGS if encoding: encodings = [encoding] + ENCODINGS for enc in encodings: try: f = self.open(encoding=enc) if f: return self.tail_read(f, window, position=position) return False except IOError, err: if err.errno == errno.ENOENT: return [] raise except UnicodeDecodeError: pass
[ "Read last N lines from file fname." ]
Please provide a description of the function:def create_transport(beaver_config, logger): transport_str = beaver_config.get('transport') if '.' not in transport_str: # allow simple names like 'redis' to load a beaver built-in transport module_path = 'beaver.transports.%s_transport' % transport_str.lower() class_name = '%sTransport' % transport_str.title() else: # allow dotted path names to load a custom transport class try: module_path, class_name = transport_str.rsplit('.', 1) except ValueError: raise Exception('Invalid transport {0}'.format(beaver_config.get('transport'))) _module = __import__(module_path, globals(), locals(), class_name, -1) transport_class = getattr(_module, class_name) transport = transport_class(beaver_config=beaver_config, logger=logger) return transport
[ "Creates and returns a transport object" ]
Please provide a description of the function:def listdir(self): ls = os.listdir(self._folder) return [x for x in ls if os.path.splitext(x)[1][1:] == "log"]
[ "HACK around not having a beaver_config stanza\n TODO: Convert this to a glob" ]
Please provide a description of the function:def update_files(self): if self._update_time and int(time.time()) - self._update_time < self._discover_interval: return self._update_time = int(time.time()) possible_files = [] files = [] if len(self._beaver_config.get('globs')) > 0: extend_files = files.extend for name, exclude in self._beaver_config.get('globs').items(): globbed = [os.path.realpath(filename) for filename in eglob(name, exclude)] extend_files(globbed) self._beaver_config.addglob(name, globbed) self._callback(("addglob", (name, globbed))) else: append_files = files.append for name in self.listdir(): append_files(os.path.realpath(os.path.join(self._folder, name))) for absname in files: try: st = os.stat(absname) except EnvironmentError, err: if err.errno != errno.ENOENT: raise else: if not stat.S_ISREG(st.st_mode): continue append_possible_files = possible_files.append fid = self.get_file_id(st) append_possible_files((fid, absname)) # add new ones new_files = [fname for fid, fname in possible_files if fid not in self._tails] self.watch(new_files)
[ "Ensures all files are properly loaded.\n Detects new files, file removals, file rotation, and truncation.\n On non-linux platforms, it will also manually reload the file for tailing.\n Note that this hack is necessary because EOF is cached on BSD systems.\n " ]
Please provide a description of the function:def close(self, signalnum=None, frame=None): self._running = False self._log_debug("Closing all tail objects") self._active = False for fid in self._tails: self._tails[fid].close() for n in range(0,self._number_of_consumer_processes): if self._proc[n] is not None and self._proc[n].is_alive(): self._logger.debug("Terminate Process: " + str(n)) self._proc[n].terminate() self._proc[n].join()
[ "Closes all currently open Tail objects" ]
Please provide a description of the function:def eglob(path, exclude=None): fi = itertools.chain.from_iterable paths = list(fi(glob2.iglob(d) for d in expand_paths(path))) if exclude: cached_regex = cached_regices.get(exclude, None) if not cached_regex: cached_regex = cached_regices[exclude] = re.compile(exclude) paths = [x for x in paths if not cached_regex.search(x)] return paths
[ "Like glob.glob, but supports \"/path/**/{a,b,c}.txt\" lookup" ]
Please provide a description of the function:def expand_paths(path): pr = itertools.product parts = MAGIC_BRACKETS.findall(path) if not path: return if not parts: return [path] permutations = [[(p[0], i, 1) for i in p[1].split(',')] for p in parts] return [_replace_all(path, i) for i in pr(*permutations)]
[ "When given a path with brackets, expands it to return all permutations\n of the path with expanded brackets, similar to ant.\n\n >>> expand_paths('../{a,b}/{c,d}')\n ['../a/c', '../a/d', '../b/c', '../b/d']\n >>> expand_paths('../{a,b}/{a,b}.py')\n ['../a/a.py', '../a/b.py', '../b/a.py', '../b/b.py']\n >>> expand_paths('../{a,b,c}/{a,b,c}')\n ['../a/a', '../a/b', '../a/c', '../b/a', '../b/b', '../b/c', '../c/a', '../c/b', '../c/c']\n >>> expand_paths('test')\n ['test']\n >>> expand_paths('')\n " ]
Please provide a description of the function:def multiline_merge(lines, current_event, re_after, re_before): events = [] for line in lines: if re_before and re_before.match(line): current_event.append(line) elif re_after and current_event and re_after.match(current_event[-1]): current_event.append(line) else: if current_event: events.append('\n'.join(current_event)) current_event.clear() current_event.append(line) return events
[ " Merge multi-line events based.\n\n Some event (like Python trackback or Java stracktrace) spawn\n on multiple line. This method will merge them using two\n regular expression: regex_after and regex_before.\n\n If a line match re_after, it will be merged with next line.\n\n If a line match re_before, it will be merged with previous line.\n\n This function return a list of complet event. Note that because\n we don't know if an event is complet before another new event\n start, the last event will not be returned but stored in\n current_event. You should pass the same current_event to\n successive call to multiline_merge. current_event is a list\n of lines whose belong to the same event.\n " ]
Please provide a description of the function:def create_ssh_tunnel(beaver_config, logger=None): if not beaver_config.use_ssh_tunnel(): return None logger.info("Proxying transport using through local ssh tunnel") return BeaverSshTunnel(beaver_config, logger=logger)
[ "Returns a BeaverSshTunnel object if the current config requires us to" ]