Code
stringlengths
103
85.9k
Summary
listlengths
0
94
Please provide a description of the function:def parse(cls, args): parsed = {} try: (options, args) = cls.optparser.parse_args(args) except OptionParsingError as e: raise ParseError(e.msg, cls.optparser.format_help()) except OptionParsingExit as e: return None parsed['label'] = options.label parsed['can_notify'] = options.can_notify parsed['name'] = options.name parsed['tags'] = options.tags parsed["command_type"] = "HadoopCommand" parsed['print_logs'] = options.print_logs parsed['print_logs_live'] = options.print_logs_live parsed['pool'] = options.pool if len(args) < 2: raise ParseError("Need at least two arguments", cls.usage) subcmd = args.pop(0) if subcmd not in cls.subcmdlist: raise ParseError("First argument must be one of <%s>" % "|".join(cls.subcmdlist)) parsed["sub_command"] = subcmd parsed["sub_command_args"] = " ".join("'" + str(a) + "'" for a in args) return parsed
[ "\n Parse command line arguments to construct a dictionary of command\n parameters that can be used to create a command\n\n Args:\n `args`: sequence of arguments\n\n Returns:\n Dictionary that can be used in create method\n\n Raises:\n ParseError: when the arguments are not correct\n " ]
Please provide a description of the function:def parse(cls, args): try: (options, args) = cls.optparser.parse_args(args) if options.inline is None and options.script_location is None: raise ParseError("One of script or it's location" " must be specified", cls.optparser.format_help()) except OptionParsingError as e: raise ParseError(e.msg, cls.optparser.format_help()) except OptionParsingExit as e: return None if options.script_location is not None: if options.inline is not None: raise ParseError( "Both script and script_location cannot be specified", cls.optparser.format_help()) if ((options.script_location.find("s3://") != 0) and (options.script_location.find("s3n://") != 0)): # script location is local file try: s = open(options.script_location).read() except IOError as e: raise ParseError("Unable to open script location: %s" % str(e), cls.optparser.format_help()) options.script_location = None options.inline = s if (args is not None) and (len(args) > 0): if options.inline is not None: raise ParseError( "Extra arguments can only be " "supplied with a script_location in S3 right now", cls.optparser.format_help()) setattr(options, 'parameters', " ".join([pipes.quote(a) for a in args])) else: if (args is not None) and (len(args) > 0): raise ParseError( "Extra arguments can only be supplied with a script_location", cls.optparser.format_help()) v = vars(options) v["command_type"] = "ShellCommand" return v
[ "\n Parse command line arguments to construct a dictionary of command\n parameters that can be used to create a command\n\n Args:\n `args`: sequence of arguments\n\n Returns:\n Dictionary that can be used in create method\n\n Raises:\n ParseError: when the arguments are not correct\n " ]
Please provide a description of the function:def parse(cls, args): try: (options, args) = cls.optparser.parse_args(args) if options.latin_statements is None and options.script_location is None: raise ParseError("One of script or it's location" " must be specified", cls.optparser.format_help()) except OptionParsingError as e: raise ParseError(e.msg, cls.optparser.format_help()) except OptionParsingExit as e: return None if options.script_location is not None: if options.latin_statements is not None: raise ParseError( "Both script and script_location cannot be specified", cls.optparser.format_help()) if ((options.script_location.find("s3://") != 0) and (options.script_location.find("s3n://") != 0)): # script location is local file try: s = open(options.script_location).read() except IOError as e: raise ParseError("Unable to open script location: %s" % str(e), cls.optparser.format_help()) options.script_location = None options.latin_statements = s if (args is not None) and (len(args) > 0): if options.latin_statements is not None: raise ParseError( "Extra arguments can only be " "supplied with a script_location in S3 right now", cls.optparser.format_help()) p = {} for a in args: kv = a.split('=') if len(kv) != 2: raise ParseError("Arguments to pig script must be of this format k1=v1 k2=v2 k3=v3...") p[kv[0]] = kv[1] setattr(options, 'parameters', p) else: if (args is not None) and (len(args) > 0): raise ParseError( "Extra arguments can only be supplied with a script_location", cls.optparser.format_help()) v = vars(options) v["command_type"] = "PigCommand" return v
[ "\n Parse command line arguments to construct a dictionary of command\n parameters that can be used to create a command\n\n Args:\n `args`: sequence of arguments\n\n Returns:\n Dictionary that can be used in create method\n\n Raises:\n ParseError: when the arguments are not correct\n " ]
Please provide a description of the function:def parse(cls, args): try: (options, args) = cls.optparser.parse_args(args) if options.mode not in ["1", "2"]: raise ParseError("mode must be either '1' or '2'", cls.optparser.format_help()) if (options.dbtap_id is None) or (options.db_table is None): raise ParseError("dbtap_id and db_table are required", cls.optparser.format_help()) if options.mode is "1": if options.hive_table is None: raise ParseError("hive_table is required for mode 1", cls.optparser.format_help()) elif options.export_dir is None: # mode 2 raise ParseError("export_dir is required for mode 2", cls.optparser.format_help()) if options.db_update_mode is not None: if options.db_update_mode not in ["allowinsert", "updateonly"]: raise ParseError("db_update_mode should either be left blank for append " "mode or be 'updateonly' or 'allowinsert'", cls.optparser.format_help()) if options.db_update_mode is "updateonly": if options.db_update_keys is None: raise ParseError("db_update_keys is required when db_update_mode " "is 'updateonly'", cls.optparser.format_help()) elif options.db_update_keys is not None: raise ParseError("db_update_keys is used only when db_update_mode " "is 'updateonly'", cls.optparser.format_help()) except OptionParsingError as e: raise ParseError(e.msg, cls.optparser.format_help()) except OptionParsingExit as e: return None v = vars(options) v["command_type"] = "DbExportCommand" return v
[ "\n Parse command line arguments to construct a dictionary of command\n parameters that can be used to create a command\n\n Args:\n `args`: sequence of arguments\n\n Returns:\n Dictionary that can be used in create method\n\n Raises:\n ParseError: when the arguments are not correct\n " ]
Please provide a description of the function:def parse(cls, args): try: (options, args) = cls.optparser.parse_args(args) if options.mode not in ["1", "2"]: raise ParseError("mode must be either '1' or '2'", cls.optparser.format_help()) if (options.dbtap_id is None) or (options.db_table is None): raise ParseError("dbtap_id and db_table are required", cls.optparser.format_help()) # TODO: Semantic checks for parameters in mode 1 and 2 except OptionParsingError as e: raise ParseError(e.msg, cls.optparser.format_help()) except OptionParsingExit as e: return None v = vars(options) v["command_type"] = "DbImportCommand" return v
[ "\n Parse command line arguments to construct a dictionary of command\n parameters that can be used to create a command\n\n Args:\n `args`: sequence of arguments\n\n Returns:\n Dictionary that can be used in create method\n\n Raises:\n ParseError: when the arguments are not correct\n " ]
Please provide a description of the function:def compose(cls, sub_commands, macros=None, cluster_label=None, notify=False, name=None, tags=None): if macros is not None: macros = json.loads(macros) return { "sub_commands": sub_commands, "command_type": "CompositeCommand", "macros": macros, "label": cluster_label, "tags": tags, "can_notify": notify, "name": name }
[ "\n Args:\n `sub_commands`: list of sub-command dicts\n\n Returns:\n Dictionary that can be used in create method\n\n Example Usage:\n cmd1 = HiveCommand.parse(['--query', \"show tables\"])\n cmd2 = PigCommand.parse(['--script_location', \"s3://paid-qubole/PigAPIDemo/scripts/script1-hadoop-s3-small.pig\"])\n composite = CompositeCommand.compose([cmd1, cmd2])\n cmd = CompositeCommand.run(**composite)\n " ]
Please provide a description of the function:def parse(cls, args): try: (options, args) = cls.optparser.parse_args(args) if options.db_tap_id is None: raise ParseError("db_tap_id is required", cls.optparser.format_help()) if options.query is None and options.script_location is None: raise ParseError("query or script location is required", cls.optparser.format_help()) if options.script_location is not None: if options.query is not None: raise ParseError( "Both query and script_location cannot be specified", cls.optparser.format_help()) if ((options.script_location.find("s3://") != 0) and (options.script_location.find("s3n://") != 0)): # script location is local file try: q = open(options.script_location).read() except IOError as e: raise ParseError("Unable to open script location: %s" % str(e), cls.optparser.format_help()) options.script_location = None options.query = q except OptionParsingError as e: raise ParseError(e.msg, cls.optparser.format_help()) except OptionParsingExit as e: return None if options.macros is not None: options.macros = json.loads(options.macros) v = vars(options) v["command_type"] = "DbTapQueryCommand" return v
[ "\n Parse command line arguments to construct a dictionary of command\n parameters that can be used to create a command\n\n Args:\n `args`: sequence of arguments\n\n Returns:\n Dictionary that can be used in create method\n\n Raises:\n ParseError: when the arguments are not correct\n " ]
Please provide a description of the function:def pluralize(singular): if singular in UNCOUNTABLES: return singular for i in IRREGULAR: if i[0] == singular: return i[1] for i in PLURALIZE_PATTERNS: if re.search(i[0], singular): return re.sub(i[0], i[1], singular)
[ "Convert singular word to its plural form.\n\n Args:\n singular: A word in its singular form.\n\n Returns:\n The word in its plural form.\n " ]
Please provide a description of the function:def singularize(plural): if plural in UNCOUNTABLES: return plural for i in IRREGULAR: if i[1] == plural: return i[0] for i in SINGULARIZE_PATTERNS: if re.search(i[0], plural): return re.sub(i[0], i[1], plural) return plural
[ "Convert plural word to its singular form.\n\n Args:\n plural: A word in its plural form.\n Returns:\n The word in its singular form.\n " ]
Please provide a description of the function:def camelize(word): return ''.join(w[0].upper() + w[1:] for w in re.sub('[^A-Z^a-z^0-9^:]+', ' ', word).split(' '))
[ "Convert a word from lower_with_underscores to CamelCase.\n\n Args:\n word: The string to convert.\n Returns:\n The modified string.\n " ]
Please provide a description of the function:def _make_minimal(dictionary): new_dict = {} for key, value in dictionary.items(): if value is not None: if isinstance(value, dict): new_value = _make_minimal(value) if new_value: new_dict[key] = new_value else: new_dict[key] = value return new_dict
[ "\n This function removes all the keys whose value is either None or an empty\n dictionary.\n " ]
Please provide a description of the function:def check(sensor_class, args): parser = SensorCmdLine.parsers(sensor_class) parsed = parser.parse_args(args) return sensor_class.check(json.loads(parsed.data))
[ "\n Method to call Sensor.check after parsing args from cmdline\n :param sensor_class: sensor class\n :param args: inline arguments\n :return: True or False\n " ]
Please provide a description of the function:def check(cls, data): conn = Qubole.agent() return conn.post(cls.rest_entity_path, data=data)['status']
[ "\n Method to call the sensors api with json payload\n :param data: valid json object\n :return: True or False\n " ]
Please provide a description of the function:def upload_profiler_report(url, filename, config): try: logger.debug("Uploading profiler report to IOpipe") with open(filename, "rb") as data: response = requests.put(url, data=data, timeout=config["network_timeout"]) response.raise_for_status() except Exception as e: logger.debug("Error while uploading profiler report: %s", e) if hasattr(e, "response"): logger.debug(e.response.content) else: logger.debug("Profiler report uploaded successfully") finally: if os.path.isfile(filename): os.remove(filename)
[ "\n Uploads a profiler report to IOpipe\n\n :param url: The signed URL\n :param filename: The profiler report file\n :param config: The IOpipe config\n " ]
Please provide a description of the function:def read_pid_stat(pid): return { "utime": random.randint(0, 999999999), "stime": random.randint(0, 999999999), "cutime": random.randint(0, 999999999), "cstime": random.randint(0, 999999999), }
[ "\n Mocks read_pid_stat as this is a Linux-specific operation.\n " ]
Please provide a description of the function:def read_stat(): return [ { "times": { "user": random.randint(0, 999999999), "nice": random.randint(0, 999999999), "sys": random.randint(0, 999999999), "idle": random.randint(0, 999999999), "irq": random.randint(0, 999999999), } } ]
[ "\n Mocks read_stat as this is a Linux-specific operation.\n " ]
Please provide a description of the function:def handle_timeout(self, signum, frame): logger.debug("Function is about to timeout, sending report") self.report.prepare(TimeoutError("Timeout Exceeded."), frame) self.run_hooks("pre:report") self.report.send() self.run_hooks("post:report") self.wait_for_futures()
[ "\n Catches a timeout (SIGALRM) and sends the report before actual timeout occurs.\n\n The signum and frame parameters are passed by the signal module to this handler.\n\n :param signum: The signal number being handled.\n :param frame: The stack frame when signal was raised.\n " ]
Please provide a description of the function:def load_plugins(self, plugins): def instantiate(plugin): return plugin() if inspect.isclass(plugin) else plugin loaded_plugins = [] plugins_seen = [] # Iterate over plugins in reverse to permit users to override default # plugin config for plugin in reversed(plugins): if not is_plugin(plugin) or plugin.name in plugins_seen: continue # Build the plugins list in reverse to restore original order loaded_plugins.insert(0, instantiate(plugin)) plugins_seen.append(plugin.name) return loaded_plugins
[ "\n Loads plugins that match the `Plugin` interface and are instantiated.\n\n :param plugins: A list of plugin instances.\n " ]
Please provide a description of the function:def run_hooks(self, name, event=None, context=None): hooks = { "pre:setup": lambda p: p.pre_setup(self), "post:setup": lambda p: p.post_setup(self), "pre:invoke": lambda p: p.pre_invoke(event, context), "post:invoke": lambda p: p.post_invoke(event, context), "pre:report": lambda p: p.pre_report(self.report), "post:report": lambda p: p.post_report(self.report), } if name in hooks: for p in self.plugins: if p.enabled: try: hooks[name](p) except Exception as e: logger.error( "IOpipe plugin %s hook raised error" % (name, str(e)) ) logger.exception(e)
[ "\n Runs plugin hooks for each registered plugin.\n " ]
Please provide a description of the function:def submit_future(self, func, *args, **kwargs): # This mode will run futures synchronously. This should only be used # for benchmarking purposes. if self.config["sync_http"] is True: return MockFuture(func, *args, **kwargs) future = self.pool.submit(func, *args, **kwargs) self.futures.append(future) return future
[ "\n Submit a function call to be run as a future in a thread pool. This\n should be an I/O bound operation.\n " ]
Please provide a description of the function:def wait_for_futures(self): [future for future in futures.as_completed(self.futures)] self.futures = []
[ "\n Wait for all futures to complete. This should be done at the end of an\n an invocation.\n " ]
Please provide a description of the function:def validate_context(self, context): return all( [ hasattr(context, attr) for attr in [ "aws_request_id", "function_name", "function_version", "get_remaining_time_in_millis", "invoked_function_arn", "log_group_name", "log_stream_name", "memory_limit_in_mb", ] ] ) and callable(context.get_remaining_time_in_millis)
[ "\n Checks to see if we're working with a valid lambda context object.\n\n :returns: True if valid, False if not\n :rtype: bool\n " ]
Please provide a description of the function:def patch_session_send(context, http_filter): if Session is None: return def send(self, *args, **kwargs): id = ensure_utf8(str(uuid.uuid4())) with context.iopipe.mark(id): response = original_session_send(self, *args, **kwargs) trace = context.iopipe.mark.measure(id) context.iopipe.mark.delete(id) collect_metrics_for_response(response, context, trace, http_filter) return response Session.send = send
[ "\n Monkey patches requests' Session class, if available. Overloads the\n send method to add tracing and metrics collection.\n " ]
Please provide a description of the function:def patch_botocore_session_send(context, http_filter): if BotocoreSession is None: return def send(self, *args, **kwargs): id = str(uuid.uuid4()) with context.iopipe.mark(id): response = original_botocore_session_send(self, *args, **kwargs) trace = context.iopipe.mark.measure(id) context.iopipe.mark.delete(id) collect_metrics_for_response(response, context, trace, http_filter) return response BotocoreSession.send = send
[ "\n Monkey patches botocore's vendored requests, if available. Overloads the\n Session class' send method to add tracing and metric collection.\n " ]
Please provide a description of the function:def collect_metrics_for_response(http_response, context, trace, http_filter): http_response = copy.deepcopy(http_response) if http_filter is not None and callable(http_filter): http_response = http_filter(http_response) if http_response is False: return request = None if hasattr(http_response, "request"): parsed_url = None if hasattr(http_response.request, "url"): parsed_url = urlparse(http_response.request.url) request_headers = [] if hasattr(http_response.request, "headers"): request_headers = [ {"key": ensure_utf8(k), "string": ensure_utf8(v)} for k, v in http_response.request.headers.items() if k.lower() in INCLUDE_HEADERS ] request = Request( hash=ensure_utf8(getattr(parsed_url, "fragment", None)), headers=request_headers, hostname=ensure_utf8(getattr(parsed_url, "hostname", None)), method=ensure_utf8(getattr(http_response.request, "method", None)), path=ensure_utf8(getattr(parsed_url, "path", None)), # TODO: Determine if this is redundant pathname=ensure_utf8(getattr(parsed_url, "path", None)), port=ensure_utf8(getattr(parsed_url, "port", None)), protocol=ensure_utf8(getattr(parsed_url, "scheme", None)), query=ensure_utf8(getattr(parsed_url, "query", None)), url=ensure_utf8(getattr(http_response.request, "url", None)), ) response_headers = [] if hasattr(http_response, "headers"): response_headers = [ {"key": ensure_utf8(k), "string": ensure_utf8(v)} for k, v in http_response.headers.items() if k.lower() in INCLUDE_HEADERS ] response = Response( headers=response_headers, statusCode=ensure_utf8(getattr(http_response, "status_code", None)), statusMessage=None, ) context.iopipe.mark.http_trace(trace, request, response)
[ "\n Collects relevant metrics from a requests Response object and adds them to\n the IOpipe context.\n " ]
Please provide a description of the function:def get_plugin_meta(plugins): return [ { "name": p.name, "version": p.version, "homepage": p.homepage, "enabled": p.enabled, } for p in plugins if is_plugin(p) ]
[ "\n Returns meta data about plugins.\n\n :param plugins: A list of plugins.\n :type plugins: list\n :returns: A list of dicts containing plugin meta data.\n :rtype: list\n " ]
Please provide a description of the function:def is_plugin(plugin): try: return isinstance(plugin, Plugin) or issubclass(plugin, Plugin) except TypeError: return False
[ "\n Returns true if the plugin implements the `Plugin` interface.\n\n :param plugin: The plugin to check.\n :returns: True if plugin, False otherwise.\n :rtype: bool\n " ]
Please provide a description of the function:def with_metaclass(meta, *bases): class metaclass(meta): def __new__(cls, name, this_bases, d): return meta(name, bases, d) return type.__new__(metaclass, "temporary_class", (), {})
[ "Python 2 and 3 compatible way to do meta classes" ]
Please provide a description of the function:def extract_context_data(self): data = {} for k, v in { # camel case names in the report to align with AWS standards "functionName": "function_name", "functionVersion": "function_version", "memoryLimitInMB": "memory_limit_in_mb", "invokedFunctionArn": "invoked_function_arn", "awsRequestId": "aws_request_id", "logGroupName": "log_group_name", "logStreamName": "log_stream_name", }.items(): if hasattr(self.context, v): data[k] = getattr(self.context, v) if ( hasattr(self.context, "invoked_function_arn") and "AWS_SAM_LOCAL" in os.environ ): data["invokedFunctionArn"] = ( "arn:aws:lambda:local:0:function:%s" % data.get("functionName", "unknown") ) if hasattr(self.context, "get_remaining_time_in_millis") and callable( self.context.get_remaining_time_in_millis ): data[ "getRemainingTimeInMillis" ] = self.context.get_remaining_time_in_millis() data["traceId"] = os.getenv("_X_AMZN_TRACE_ID", "") return data
[ "\n Returns the contents of a AWS Lambda context.\n\n :returns: A dict of relevant context data.\n :rtype: dict\n " ]
Please provide a description of the function:def retain_error(self, error, frame=None): if frame is None: stack = traceback.format_exc() self.labels.add("@iopipe/error") else: stack = "\n".join(traceback.format_stack(frame)) self.labels.add("@iopipe/timeout") details = { "name": type(error).__name__, "message": "{}".format(error), "stack": stack, } self.report["errors"] = details
[ "\n Adds details of an error to the report.\n\n :param error: The error exception to add to the report.\n " ]
Please provide a description of the function:def prepare(self, error=None, frame=None): if error: self.retain_error(error, frame) self.report["environment"]["host"]["boot_id"] = system.read_bootid() # convert labels to list for sending self.report["labels"] = list(self.labels) meminfo = system.read_meminfo() self.report.update( { "aws": self.extract_context_data(), "timestampEnd": int(time.time() * 1000), } ) self.report["environment"]["os"].update( { "cpus": system.read_stat(), "freemem": meminfo["MemFree"], "hostname": system.read_hostname(), "totalmem": meminfo["MemTotal"], "usedmem": meminfo["MemTotal"] - meminfo["MemFree"], } ) self.report["environment"]["os"]["linux"]["pid"] = { "self": { "stat": system.read_pid_stat("self"), "stat_start": self.stat_start, "status": system.read_pid_status("self"), } } self.report["disk"] = system.read_disk() self.report["duration"] = int((monotonic() - self.start_time) * 1e9)
[ "\n Prepare the report to be sent to IOpipe.\n\n :param error: An optional error to add to report.\n :param frame: A stack frame to add to report in the event of a timeout.\n " ]
Please provide a description of the function:def send(self): if self.sent is True: return self.sent = True logger.debug("Sending report to IOpipe:") logger.debug(json.dumps(self.report, indent=2, sort_keys=True)) self.client.submit_future(send_report, copy.deepcopy(self.report), self.config)
[ "\n Sends the report to IOpipe.\n " ]
Please provide a description of the function:def get_collector_path(base_url=None): if not base_url: return "/v0/event" event_url = urlparse(base_url) event_path = urljoin(event_url.path, "v0/event") if not event_path.startswith("/"): event_path = "/%s" % event_path if event_url.query: event_path = "?".join([event_path, event_url.query]) return event_path
[ "\n Returns the IOpipe collector's path. By default this is `/v0/event`.\n\n :param base_url: An optional base URL to use.\n :returns: The collector's path.\n :rtype: str\n " ]
Please provide a description of the function:def get_hostname(config_url=None): region_string = "" if config_url: return urlparse(config_url).hostname aws_region = os.getenv("AWS_REGION") if aws_region and aws_region in SUPPORTED_REGIONS: region_string = ".%s" % aws_region return "metrics-api%s.iopipe.com" % region_string
[ "\n Returns the IOpipe collector's hostname. If the `AWS_REGION` environment\n variable is not set or unsupported then `us-east-1` will be used by\n default. In this case, `us-east-1` is `metrics-api.iopipe.com`.\n\n :param config_url: A optional config URL to use.\n :returns: The collector's hostname.\n :rtype: str\n " ]
Please provide a description of the function:def send_report(report, config): headers = {"Authorization": "Bearer {}".format(config["token"])} url = "https://{host}{path}".format(**config) try: response = session.post( url, json=report, headers=headers, timeout=config["network_timeout"] ) response.raise_for_status() except Exception as e: logger.debug("Error sending report to IOpipe: %s" % e) else: logger.debug("Report sent to IOpipe successfully")
[ "\n Sends the report to IOpipe's collector.\n\n :param report: The report to be sent.\n :param config: The IOpipe agent configuration.\n " ]
Please provide a description of the function:def upload_log_data(url, stream_or_file, config): try: logger.debug("Uploading log data to IOpipe") if isinstance(stream_or_file, StringIO): stream_or_file.seek(0) response = requests.put( url, data=stream_or_file, timeout=config["network_timeout"] ) else: with open(stream_or_file, "rb") as data: response = requests.put( url, data=data, timeout=config["network_timeout"] ) response.raise_for_status() except Exception as e: logger.debug("Error while uploading log data: %s", e) logger.exception(e) if hasattr(e, "response") and hasattr(e.response, "content"): logger.debug(e.response.content) else: logger.debug("Log data uploaded successfully") finally: if isinstance(stream_or_file, str) and os.path.exists(stream_or_file): os.remove(stream_or_file)
[ "\n Uploads log data to IOpipe.\n\n :param url: The signed URL\n :param stream_or_file: The log data stream or file\n :param config: The IOpipe config\n " ]
Please provide a description of the function:def get_signer_hostname(): region = os.getenv("AWS_REGION", "") region = region if region and region in SUPPORTED_REGIONS else "us-west-2" return "signer.{region}.iopipe.com".format(region=region)
[ "\n Returns the IOpipe signer hostname for a region\n\n :returns: The signer hostname\n :rtype str\n " ]
Please provide a description of the function:def get_signed_request(config, context, extension): url = "https://{hostname}/".format(hostname=get_signer_hostname()) try: logger.debug("Requesting signed request URL from %s", url) response = requests.post( url, json={ "arn": context.invoked_function_arn, "requestId": context.aws_request_id, "timestamp": int(time.time() * 1000), "extension": extension, }, headers={"Authorization": config["token"]}, timeout=config["network_timeout"], ) response.raise_for_status() except Exception as e: logger.debug("Error requesting signed request URL: %s", e) if hasattr(e, "response"): logger.debug(e.response.content) else: response = response.json() logger.debug("Signed request URL received for %s", response["url"]) return response
[ "\n Returns a signed request URL from IOpipe\n\n :param config: The IOpipe config\n :param context: The AWS context to request a signed URL\n :param extension: The extension of the file to sign\n :returns: A signed request URL\n :rtype: str\n " ]
Please provide a description of the function:def handler(event, context): try: ip = requests.get("http://checkip.amazonaws.com/") except requests.RequestException as e: # Send some context about this error to Lambda Logs print(e) raise e return { "statusCode": 200, "body": json.dumps( {"message": "hello world", "location": ip.text.replace("\n", "")} ), }
[ "Sample pure Lambda function\n\n Parameters\n ----------\n event: dict, required\n API Gateway Lambda Proxy Input Format\n\n {\n \"resource\": \"Resource path\",\n \"path\": \"Path parameter\",\n \"httpMethod\": \"Incoming request's method name\"\n \"headers\": {Incoming request headers}\n \"queryStringParameters\": {query string parameters }\n \"pathParameters\": {path parameters}\n \"stageVariables\": {Applicable stage variables}\n \"requestContext\": {Request context, including authorizer-returned key-value pairs}\n \"body\": \"A JSON string of the request payload.\"\n \"isBase64Encoded\": \"A boolean flag to indicate if the applicable request payload is Base64-encode\"\n }\n\n https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-input-format\n\n context: object, required\n Lambda Context runtime methods and attributes\n\n Attributes\n ----------\n\n context.aws_request_id: str\n Lambda request ID\n context.client_context: object\n Additional context when invoked through AWS Mobile SDK\n context.function_name: str\n Lambda function name\n context.function_version: str\n Function version identifier\n context.get_remaining_time_in_millis: function\n Time in milliseconds before function times out\n context.identity:\n Cognito identity provider context when invoked through AWS Mobile SDK\n context.invoked_function_arn: str\n Function ARN\n context.log_group_name: str\n Cloudwatch Log group name\n context.log_stream_name: str\n Cloudwatch Log stream name\n context.memory_limit_in_mb: int\n Function memory\n\n https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html\n\n Returns\n ------\n API Gateway Lambda Proxy Output Format: dict\n 'statusCode' and 'body' are required\n\n {\n \"isBase64Encoded\": true | false,\n \"statusCode\": httpStatusCode,\n \"headers\": {\"headerName\": \"headerValue\", ...},\n \"body\": \"...\"\n }\n\n # api-gateway-simple-proxy-for-lambda-output-format\n https: // docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html\n " ]
Please provide a description of the function:def read_disk(): s = os.statvfs("/tmp") return { # This should report as 500MB, if not may need to be hardcoded # https://aws.amazon.com/lambda/faqs/ "totalMiB": (s.f_blocks * s.f_bsize) / MB_FACTOR, "usedMiB": ((s.f_blocks - s.f_bfree) * s.f_frsize) / MB_FACTOR, "usedPercentage": round( (((s.f_blocks - s.f_bfree) * s.f_frsize) / (s.f_blocks * s.f_bsize)) * 100, 2, ), }
[ "\n Returns disk usage for /tmp\n\n :returns: Disk usage (total, used, percentage used)\n :rtype dict\n " ]
Please provide a description of the function:def read_meminfo(): data = {} with open("/proc/meminfo", "rb") as meminfo_file: for row in meminfo_file: fields = row.split() # Example content: # MemTotal: 3801016 kB # MemFree: 1840972 kB # MemAvailable: 3287752 kB # HugePages_Total: 0 data[fields[0].decode("ascii")[:-1]] = int(fields[1]) * 1024 return data
[ "\n Returns system memory usage information.\n\n :returns: The system memory usage.\n :rtype: dict\n " ]
Please provide a description of the function:def read_pid_stat(pid="self"): with open("/proc/%s/stat" % (pid,), "rb") as f: stat = f.readline().split() return { "utime": int(stat[13]), "stime": int(stat[14]), "cutime": int(stat[15]), "cstime": int(stat[16]), }
[ "\n Returns system process stat information.\n\n :param pid: The process ID.\n :returns: The system stat information.\n :rtype: dict\n " ]
Please provide a description of the function:def read_pid_status(pid="self"): data = {} with open("/proc/%s/status" % (pid,), "rb") as status_file: for row in status_file: fields = row.split() if fields and fields[0] in [b"VmRSS:", b"Threads:", b"FDSize:"]: try: data[fields[0].decode("ascii")[:-1]] = int(fields[1]) except ValueError: data[fields[0].decode("ascii")[:-1]] = fields[1].decode("ascii") return data
[ "\n Returns the system process sstatus.\n\n :param pid: The process ID.\n :returns: The system process status.\n :rtype: dict\n " ]
Please provide a description of the function:def read_stat(): data = [] with open("/proc/stat", "rb") as stat_file: for line in stat_file: cpu_stat = line.split() if cpu_stat[0][:3] != b"cpu": break # First cpu line is aggregation of following lines, skip it if len(cpu_stat[0]) == 3: continue data.append( { "times": { "user": int(cpu_stat[1]), "nice": int(cpu_stat[2]), "sys": int(cpu_stat[3]), "idle": int(cpu_stat[4]), "irq": int(cpu_stat[6]), } } ) return data
[ "\n Returns the system stat information.\n\n :returns: The system stat information.\n :rtype: list\n " ]
Please provide a description of the function:def set_config(**config): config.setdefault("debug", bool(strtobool(os.getenv("IOPIPE_DEBUG", "false")))) config.setdefault("enabled", bool(strtobool(os.getenv("IOPIPE_ENABLED", "true")))) config.setdefault("host", get_hostname()) config.setdefault("install_method", os.getenv("IOPIPE_INSTALL_METHOD", "manual")) config.setdefault("network_timeout", os.getenv("IOPIPE_NETWORK_TIMEOUT", 5000)) config.setdefault("path", get_collector_path()) config.setdefault("plugins", []) config.setdefault("sync_http", False) config.setdefault("timeout_window", os.getenv("IOPIPE_TIMEOUT_WINDOW", 500)) config.setdefault( "token", os.getenv("IOPIPE_TOKEN") or os.getenv("IOPIPE_CLIENTID") or "" ) if "client_id" in config: config["token"] = config.pop("client_id") if "url" in config: url = config.pop("url") config["host"] = get_hostname(url) config["path"] = get_collector_path(url) if "." in str(config["network_timeout"]): warnings.warn( "IOpipe's 'network_timeout' is now in milliseconds, expressed as an integer" ) try: config["debug"] = bool(config["debug"]) except ValueError: config["debug"] = False try: config["network_timeout"] = int(config["network_timeout"]) / 1000.0 except ValueError: config["network_timeout"] = 5.0 if "." in str(config["timeout_window"]): warnings.warn( "IOpipe's 'timeout_window' is now in milliseconds, expressed as an integer" ) try: config["timeout_window"] = int(config["timeout_window"]) / 1000.0 except ValueError: config["timeout_window"] = 0.5 return config
[ "\n Returns IOpipe configuration options, setting defaults as necessary.\n " ]
Please provide a description of the function:def b(s): return s if isinstance(s, bytes) else s.encode(locale.getpreferredencoding())
[ " Encodes Unicode strings to byte strings, if necessary. " ]
Please provide a description of the function:def LogMsg(msg): global headerlogged if headerlogged == 0: print("{0:<8} {1:<90} {2}".format( "Time", "MainThread", "UpdateSNMPObjsThread" )) print("{0:-^120}".format("-")) headerlogged = 1 threadname = threading.currentThread().name funcname = sys._getframe(1).f_code.co_name if funcname == "<module>": funcname = "Main code path" elif funcname == "LogNetSnmpMsg": funcname = "net-snmp code" else: funcname = "{0}()".format(funcname) if threadname == "MainThread": logmsg = "{0} {1:<112.112}".format( time.strftime("%T", time.localtime(time.time())), "{0}: {1}".format(funcname, msg) ) else: logmsg = "{0} {1:>112.112}".format( time.strftime("%T", time.localtime(time.time())), "{0}: {1}".format(funcname, msg) ) print(logmsg)
[ " Writes a formatted log message with a timestamp to stdout. " ]
Please provide a description of the function:def UpdateSNMPObjs(): global threadingString LogMsg("Beginning data update.") data = "" # Obtain the data by calling an external command. We don't use # subprocess.check_output() here for compatibility with Python versions # older than 2.7. LogMsg("Calling external command \"sleep 5; date\".") proc = subprocess.Popen( "sleep 5; date", shell=True, env={ "LANG": "C" }, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) output = proc.communicate()[0].splitlines()[0] rc = proc.poll() if rc != 0: LogMsg("An error occured executing the command: {0}".format(output)) return msg = "Updating \"threadingString\" object with data \"{0}\"." LogMsg(msg.format(output)) threadingString.update(output) LogMsg("Data update done, exiting thread.")
[ " Function that does the actual data update. " ]
Please provide a description of the function:def UpdateSNMPObjsAsync(): # UpdateSNMPObjs() will be executed in a separate thread so that the main # thread can continue looping and processing SNMP requests while the data # update is still in progress. However we'll make sure only one update # thread is run at any time, even if the data update interval has been set # too low. if threading.active_count() == 1: LogMsg("Creating thread for UpdateSNMPObjs().") t = threading.Thread(target=UpdateSNMPObjs, name="UpdateSNMPObjsThread") t.daemon = True t.start() else: LogMsg("Data update still active, data update interval too low?")
[ " Starts UpdateSNMPObjs() in a separate thread. " ]
Please provide a description of the function:def getRegistered(self, context = ""): myobjs = {} try: # Python 2.x objs_iterator = self._objs[context].iteritems() except AttributeError: # Python 3.x objs_iterator = self._objs[context].items() for oidstr, snmpobj in objs_iterator: myobjs[oidstr] = { "type": type(snmpobj).__name__, "value": snmpobj.value() } return dict(myobjs)
[ " Returns a dictionary with the currently registered SNMP objects.\n\n\t\t Returned is a dictionary objects for the specified \"context\",\n\t\t which defaults to the default context. " ]
Please provide a description of the function:def start(self): if self._status != netsnmpAgentStatus.CONNECTED \ and self._status != netsnmpAgentStatus.RECONNECTING: self._status = netsnmpAgentStatus.FIRSTCONNECT libnsa.init_snmp(b(self.AgentName)) if self._status == netsnmpAgentStatus.CONNECTFAILED: msg = "Error connecting to snmpd instance at \"{0}\" -- " \ "incorrect \"MasterSocket\" or snmpd not running?" msg = msg.format(self.MasterSocket) raise netsnmpAgentException(msg)
[ " Starts the agent. Among other things, this means connecting\n\t\t to the master agent, if configured that way. " ]
Please provide a description of the function:def _adjust_trim_top(self, canv, size): action = self._scroll_action self._scroll_action = None maxcol, maxrow = size trim_top = self._trim_top canv_rows = canv.rows() if trim_top < 0: # Negative trim_top values use bottom of canvas as reference trim_top = canv_rows - maxrow + trim_top + 1 if canv_rows <= maxrow: self._trim_top = 0 # Reset scroll position return def ensure_bounds(new_trim_top): return max(0, min(canv_rows - maxrow, new_trim_top)) if action == SCROLL_LINE_UP: self._trim_top = ensure_bounds(trim_top - 1) elif action == SCROLL_LINE_DOWN: self._trim_top = ensure_bounds(trim_top + 1) elif action == SCROLL_PAGE_UP: self._trim_top = ensure_bounds(trim_top - maxrow+1) elif action == SCROLL_PAGE_DOWN: self._trim_top = ensure_bounds(trim_top + maxrow-1) elif action == SCROLL_TO_TOP: self._trim_top = 0 elif action == SCROLL_TO_END: self._trim_top = canv_rows - maxrow else: self._trim_top = ensure_bounds(trim_top) # If the cursor was moved by the most recent keypress, adjust trim_top # so that the new cursor position is within the displayed canvas part. # But don't do this if the cursor is at the top/bottom edge so we can still scroll out if self._old_cursor_coords is not None and self._old_cursor_coords != canv.cursor: self._old_cursor_coords = None curscol, cursrow = canv.cursor if cursrow < self._trim_top: self._trim_top = cursrow elif cursrow >= self._trim_top + maxrow: self._trim_top = max(0, cursrow - maxrow + 1)
[ "Adjust self._trim_top according to self._scroll_action" ]
Please provide a description of the function:def rows_max(self, size=None, focus=False): if size is not None: ow = self._original_widget ow_size = self._get_original_widget_size(size) sizing = ow.sizing() if FIXED in sizing: self._rows_max_cached = ow.pack(ow_size, focus)[1] elif FLOW in sizing: self._rows_max_cached = ow.rows(ow_size, focus) else: raise RuntimeError('Not a flow/box widget: %r' % self._original_widget) return self._rows_max_cached
[ "Return the number of rows for `size`\n\n If `size` is not given, the currently rendered number of rows is returned.\n " ]
Please provide a description of the function:def scrolling_base_widget(self): def orig_iter(w): while hasattr(w, 'original_widget'): w = w.original_widget yield w yield w def is_scrolling_widget(w): return hasattr(w, 'get_scrollpos') and hasattr(w, 'rows_max') for w in orig_iter(self): if is_scrolling_widget(w): return w raise ValueError('Not compatible to be wrapped by ScrollBar: %r' % w)
[ "Nearest `original_widget` that is compatible with the scrolling API" ]
Please provide a description of the function:def timeout_after(seconds, coro=None, *args): '''Execute the specified coroutine and return its result. However, issue a cancellation request to the calling task after seconds have elapsed. When this happens, a TaskTimeout exception is raised. If coro is None, the result of this function serves as an asynchronous context manager that applies a timeout to a block of statements. timeout_after() may be composed with other timeout_after() operations (i.e., nested timeouts). If an outer timeout expires first, then TimeoutCancellationError is raised instead of TaskTimeout. If an inner timeout expires and fails to properly TaskTimeout, a UncaughtTimeoutError is raised in the outer timeout. ''' if coro: return _timeout_after_func(seconds, False, coro, args) return TimeoutAfter(seconds)
[]
Please provide a description of the function:def timeout_at(clock, coro=None, *args): '''Execute the specified coroutine and return its result. However, issue a cancellation request to the calling task after seconds have elapsed. When this happens, a TaskTimeout exception is raised. If coro is None, the result of this function serves as an asynchronous context manager that applies a timeout to a block of statements. timeout_after() may be composed with other timeout_after() operations (i.e., nested timeouts). If an outer timeout expires first, then TimeoutCancellationError is raised instead of TaskTimeout. If an inner timeout expires and fails to properly TaskTimeout, a UncaughtTimeoutError is raised in the outer timeout. ''' if coro: return _timeout_after_func(clock, True, coro, args) return TimeoutAfter(clock, absolute=True)
[]
Please provide a description of the function:def ignore_after(seconds, coro=None, *args, timeout_result=None): '''Execute the specified coroutine and return its result. Issue a cancellation request after seconds have elapsed. When a timeout occurs, no exception is raised. Instead, timeout_result is returned. If coro is None, the result is an asynchronous context manager that applies a timeout to a block of statements. For the context manager case, the resulting context manager object has an expired attribute set to True if time expired. Note: ignore_after() may also be composed with other timeout operations. TimeoutCancellationError and UncaughtTimeoutError exceptions might be raised according to the same rules as for timeout_after(). ''' if coro: return _ignore_after_func(seconds, False, coro, args, timeout_result) return TimeoutAfter(seconds, ignore=True)
[]
Please provide a description of the function:def ignore_at(clock, coro=None, *args, timeout_result=None): ''' Stop the enclosed task or block of code at an absolute clock value. Same usage as ignore_after(). ''' if coro: return _ignore_after_func(clock, True, coro, args, timeout_result) return TimeoutAfter(clock, absolute=True, ignore=True)
[]
Please provide a description of the function:def _add_task(self, task): '''Add an already existing task to the task group.''' if hasattr(task, '_task_group'): raise RuntimeError('task is already part of a group') if self._closed: raise RuntimeError('task group is closed') task._task_group = self if task.done(): self._done.append(task) else: self._pending.add(task) task.add_done_callback(self._on_done)
[]
Please provide a description of the function:async def spawn(self, coro, *args): '''Create a new task that’s part of the group. Returns a Task instance. ''' task = await spawn(coro, *args, report_crash=False) self._add_task(task) return task
[]
Please provide a description of the function:async def next_done(self): '''Returns the next completed task. Returns None if no more tasks remain. A TaskGroup may also be used as an asynchronous iterator. ''' if not self._done and self._pending: self._done_event.clear() await self._done_event.wait() if self._done: return self._done.popleft() return None
[]
Please provide a description of the function:async def join(self): '''Wait for tasks in the group to terminate according to the wait policy for the group. If the join() operation itself is cancelled, all remaining tasks in the group are also cancelled. If a TaskGroup is used as a context manager, the join() method is called on context-exit. Once join() returns, no more tasks may be added to the task group. Tasks can be added while join() is running. ''' def errored(task): return not task.cancelled() and task.exception() try: if self._wait in (all, object): while True: task = await self.next_done() if task is None: return if errored(task): break if self._wait is object: if task.cancelled() or task.result() is not None: return else: # any task = await self.next_done() if task is None or not errored(task): return finally: await self.cancel_remaining() if errored(task): raise task.exception()
[]
Please provide a description of the function:async def cancel_remaining(self): '''Cancel all remaining tasks.''' self._closed = True task_list = list(self._pending) for task in task_list: task.cancel() for task in task_list: with suppress(CancelledError): await task
[]
Please provide a description of the function:async def _connect_one(self, remote_address): '''Connect to the proxy and perform a handshake requesting a connection. Return the open socket on success, or the exception on failure. ''' loop = asyncio.get_event_loop() for info in await loop.getaddrinfo(str(self.address.host), self.address.port, type=socket.SOCK_STREAM): # This object has state so is only good for one connection client = self.protocol(remote_address, self.auth) sock = socket.socket(family=info[0]) try: # A non-blocking socket is required by loop socket methods sock.setblocking(False) await loop.sock_connect(sock, info[4]) await self._handshake(client, sock, loop) self.peername = sock.getpeername() return sock except (OSError, SOCKSProtocolError) as e: exception = e # Don't close the socket because of an asyncio bug # see https://github.com/kyuupichan/aiorpcX/issues/8 return exception
[]
Please provide a description of the function:async def _connect(self, remote_addresses): '''Connect to the proxy and perform a handshake requesting a connection to each address in addresses. Return an (open_socket, remote_address) pair on success. ''' assert remote_addresses exceptions = [] for remote_address in remote_addresses: sock = await self._connect_one(remote_address) if isinstance(sock, socket.socket): return sock, remote_address exceptions.append(sock) strings = set(f'{exc!r}' for exc in exceptions) raise (exceptions[0] if len(strings) == 1 else OSError(f'multiple exceptions: {", ".join(strings)}'))
[]
Please provide a description of the function:async def _detect_proxy(self): '''Return True if it appears we can connect to a SOCKS proxy, otherwise False. ''' if self.protocol is SOCKS4a: remote_address = NetAddress('www.apple.com', 80) else: remote_address = NetAddress('8.8.8.8', 53) sock = await self._connect_one(remote_address) if isinstance(sock, socket.socket): sock.close() return True # SOCKSFailure indicates something failed, but that we are likely talking to a # proxy return isinstance(sock, SOCKSFailure)
[]
Please provide a description of the function:async def auto_detect_at_address(cls, address, auth): '''Try to detect a SOCKS proxy at address using the authentication method (or None). SOCKS5, SOCKS4a and SOCKS are tried in order. If a SOCKS proxy is detected a SOCKSProxy object is returned. Returning a SOCKSProxy does not mean it is functioning - for example, it may have no network connectivity. If no proxy is detected return None. ''' for protocol in (SOCKS5, SOCKS4a, SOCKS4): proxy = cls(address, protocol, auth) if await proxy._detect_proxy(): return proxy return None
[]
Please provide a description of the function:async def auto_detect_at_host(cls, host, ports, auth): '''Try to detect a SOCKS proxy on a host on one of the ports. Calls auto_detect_address for the ports in order. Returning a SOCKSProxy does not mean it is functioning - for example, it may have no network connectivity. If no proxy is detected return None. ''' for port in ports: proxy = await cls.auto_detect_at_address(NetAddress(host, port), auth) if proxy: return proxy return None
[]
Please provide a description of the function:async def create_connection(self, protocol_factory, host, port, *, resolve=False, ssl=None, family=0, proto=0, flags=0): '''Set up a connection to (host, port) through the proxy. If resolve is True then host is resolved locally with getaddrinfo using family, proto and flags, otherwise the proxy is asked to resolve host. The function signature is similar to loop.create_connection() with the same result. The attribute _address is set on the protocol to the address of the successful remote connection. Additionally raises SOCKSError if something goes wrong with the proxy handshake. ''' loop = asyncio.get_event_loop() if resolve: remote_addresses = [NetAddress(info[4][0], info[4][1]) for info in await loop.getaddrinfo(host, port, family=family, proto=proto, type=socket.SOCK_STREAM, flags=flags)] else: remote_addresses = [NetAddress(host, port)] sock, remote_address = await self._connect(remote_addresses) def set_address(): protocol = protocol_factory() protocol._proxy = self protocol._remote_address = remote_address return protocol return await loop.create_connection(set_address, sock=sock, ssl=ssl, server_hostname=host if ssl else None)
[]
Please provide a description of the function:async def create_connection(self): '''Initiate a connection.''' connector = self.proxy or self.loop return await connector.create_connection( self.session_factory, self.host, self.port, **self.kwargs)
[]
Please provide a description of the function:def data_received(self, framed_message): '''Called by asyncio when a message comes in.''' if self.verbosity >= 4: self.logger.debug(f'Received framed message {framed_message}') self.recv_size += len(framed_message) self.bump_cost(len(framed_message) * self.bw_cost_per_byte) self.framer.received_bytes(framed_message)
[]
Please provide a description of the function:def pause_writing(self): '''Transport calls when the send buffer is full.''' if not self.is_closing(): self._can_send.clear() self.transport.pause_reading()
[]
Please provide a description of the function:def resume_writing(self): '''Transport calls when the send buffer has room.''' if not self._can_send.is_set(): self._can_send.set() self.transport.resume_reading()
[]
Please provide a description of the function:def connection_made(self, transport): '''Called by asyncio when a connection is established. Derived classes overriding this method must call this first.''' self.transport = transport # If the Socks proxy was used then _proxy and _remote_address are already set if self._proxy is None: # This would throw if called on a closed SSL transport. Fixed in asyncio in # Python 3.6.1 and 3.5.4 peername = transport.get_extra_info('peername') self._remote_address = NetAddress(peername[0], peername[1]) self._task = spawn_sync(self._process_messages(), loop=self.loop)
[]
Please provide a description of the function:def connection_lost(self, exc): '''Called by asyncio when the connection closes. Tear down things done in connection_made.''' # Work around uvloop bug; see https://github.com/MagicStack/uvloop/issues/246 if self.transport: self.transport = None self.closed_event.set() # Release waiting tasks self._can_send.set() # Cancelling directly leads to self-cancellation problems for member # functions await-ing self.close() self.loop.call_soon(self._task.cancel)
[]
Please provide a description of the function:def recalc_concurrency(self): '''Call to recalculate sleeps and concurrency for the session. Called automatically if cost has drifted significantly. Otherwise can be called at regular intervals if desired. ''' # Refund resource usage proportionally to elapsed time; the bump passed is negative now = time.time() self.cost = max(0, self.cost - (now - self._cost_time) * self.cost_decay_per_sec) self._cost_time = now self._cost_last = self.cost # Setting cost_hard_limit <= 0 means to not limit concurrency value = self._incoming_concurrency.max_concurrent cost_soft_range = self.cost_hard_limit - self.cost_soft_limit if cost_soft_range <= 0: return cost = self.cost + self.extra_cost() self._cost_fraction = max(0.0, (cost - self.cost_soft_limit) / cost_soft_range) target = max(0, ceil((1.0 - self._cost_fraction) * self.initial_concurrent)) if abs(target - value) > 1: self.logger.info(f'changing task concurrency from {value} to {target}') self._incoming_concurrency.set_target(target)
[]
Please provide a description of the function:async def close(self, *, force_after=30): '''Close the connection and return when closed.''' if self.transport: self.transport.close() try: async with timeout_after(force_after): await self.closed_event.wait() except TaskTimeout: self.abort() await self.closed_event.wait()
[]
Please provide a description of the function:async def _throttled_message(self, message): '''Process a single request, respecting the concurrency limit.''' try: timeout = self.processing_timeout async with timeout_after(timeout): async with self._incoming_concurrency: if self._cost_fraction: await sleep(self._cost_fraction * self.cost_sleep) await self.handle_message(message) except ProtocolError as e: self.logger.error(f'{e}') self._bump_errors(e) except TaskTimeout: self.logger.info(f'incoming request timed out after {timeout} secs') self._bump_errors() except ExcessiveSessionCostError: await self.close() except CancelledError: raise except Exception: self.logger.exception(f'exception handling {message}') self._bump_errors()
[]
Please provide a description of the function:async def _throttled_request(self, request): '''Process a single request, respecting the concurrency limit.''' disconnect = False try: timeout = self.processing_timeout async with timeout_after(timeout): async with self._incoming_concurrency: if self.is_closing(): return if self._cost_fraction: await sleep(self._cost_fraction * self.cost_sleep) result = await self.handle_request(request) except (ProtocolError, RPCError) as e: result = e except TaskTimeout: self.logger.info(f'incoming request {request} timed out after {timeout} secs') result = RPCError(JSONRPC.SERVER_BUSY, 'server busy - request timed out') except ReplyAndDisconnect as e: result = e.result disconnect = True except ExcessiveSessionCostError: result = RPCError(JSONRPC.EXCESSIVE_RESOURCE_USAGE, 'excessive resource usage') disconnect = True except CancelledError: raise except Exception: self.logger.exception(f'exception handling {request}') result = RPCError(JSONRPC.INTERNAL_ERROR, 'internal server error') if isinstance(request, Request): message = request.send_result(result) if message: await self._send_message(message) if isinstance(result, Exception): self._bump_errors(result) if disconnect: await self.close()
[]
Please provide a description of the function:async def send_request(self, method, args=()): '''Send an RPC request over the network.''' message, event = self.connection.send_request(Request(method, args)) return await self._send_concurrent(message, event, 1)
[]
Please provide a description of the function:async def send_notification(self, method, args=()): '''Send an RPC notification over the network.''' message = self.connection.send_notification(Notification(method, args)) await self._send_message(message)
[]
Please provide a description of the function:async def close(self): '''Close the listening socket. This does not close any ServerSession objects created to handle incoming connections. ''' if self.server: self.server.close() await self.server.wait_closed() self.server = None
[]
Please provide a description of the function:def _message_to_payload(cls, message): '''Returns a Python object or a ProtocolError.''' try: return json.loads(message.decode()) except UnicodeDecodeError: message = 'messages must be encoded in UTF-8' except json.JSONDecodeError: message = 'invalid JSON' raise cls._error(cls.PARSE_ERROR, message, True, None)
[]
Please provide a description of the function:def message_to_item(cls, message): '''Translate an unframed received message and return an (item, request_id) pair. The item can be a Request, Notification, Response or a list. A JSON RPC error response is returned as an RPCError inside a Response object. If a Batch is returned, request_id is an iterable of request ids, one per batch member. If the message violates the protocol in some way a ProtocolError is returned, except if the message was determined to be a response, in which case the ProtocolError is placed inside a Response object. This is so that client code can mark a request as having been responded to even if the response was bad. raises: ProtocolError ''' payload = cls._message_to_payload(message) if isinstance(payload, dict): if 'method' in payload: return cls._process_request(payload) else: return cls._process_response(payload) elif isinstance(payload, list) and cls.allow_batches: if not payload: raise cls._error(JSONRPC.INVALID_REQUEST, 'batch is empty', True, None) return payload, None raise cls._error(cls.INVALID_REQUEST, 'request object must be a dictionary', True, None)
[]
Please provide a description of the function:def request_message(cls, item, request_id): '''Convert an RPCRequest item to a message.''' assert isinstance(item, Request) return cls.encode_payload(cls.request_payload(item, request_id))
[]
Please provide a description of the function:def notification_message(cls, item): '''Convert an RPCRequest item to a message.''' assert isinstance(item, Notification) return cls.encode_payload(cls.request_payload(item, None))
[]
Please provide a description of the function:def response_message(cls, result, request_id): '''Convert a response result (or RPCError) to a message.''' if isinstance(result, CodeMessageError): payload = cls.error_payload(result, request_id) else: payload = cls.response_payload(result, request_id) return cls.encode_payload(payload)
[]
Please provide a description of the function:def batch_message(cls, batch, request_ids): '''Convert a request Batch to a message.''' assert isinstance(batch, Batch) if not cls.allow_batches: raise ProtocolError.invalid_request( 'protocol does not permit batches') id_iter = iter(request_ids) rm = cls.request_message nm = cls.notification_message parts = (rm(request, next(id_iter)) if isinstance(request, Request) else nm(request) for request in batch) return cls.batch_message_from_parts(parts)
[]
Please provide a description of the function:def batch_message_from_parts(cls, messages): '''Convert messages, one per batch item, into a batch message. At least one message must be passed. ''' # Comma-separate the messages and wrap the lot in square brackets middle = b', '.join(messages) if not middle: raise ProtocolError.empty_batch() return b''.join([b'[', middle, b']'])
[]
Please provide a description of the function:def encode_payload(cls, payload): '''Encode a Python object as JSON and convert it to bytes.''' try: return json.dumps(payload).encode() except TypeError: msg = f'JSON payload encoding error: {payload}' raise ProtocolError(cls.INTERNAL_ERROR, msg) from None
[]
Please provide a description of the function:def request_payload(cls, request, request_id): '''JSON v1 request (or notification) payload.''' if isinstance(request.args, dict): raise ProtocolError.invalid_args( 'JSONRPCv1 does not support named arguments') return { 'method': request.method, 'params': request.args, 'id': request_id }
[]
Please provide a description of the function:def request_payload(cls, request, request_id): '''JSON v2 request (or notification) payload.''' payload = { 'jsonrpc': '2.0', 'method': request.method, } # A notification? if request_id is not None: payload['id'] = request_id # Preserve empty dicts as missing params is read as an array if request.args or request.args == {}: payload['params'] = request.args return payload
[]
Please provide a description of the function:def detect_protocol(cls, message): '''Attempt to detect the protocol from the message.''' main = cls._message_to_payload(message) def protocol_for_payload(payload): if not isinstance(payload, dict): return JSONRPCLoose # Will error # Obey an explicit "jsonrpc" version = payload.get('jsonrpc') if version == '2.0': return JSONRPCv2 if version == '1.0': return JSONRPCv1 # Now to decide between JSONRPCLoose and JSONRPCv1 if possible if 'result' in payload and 'error' in payload: return JSONRPCv1 return JSONRPCLoose if isinstance(main, list): parts = set(protocol_for_payload(payload) for payload in main) # If all same protocol, return it if len(parts) == 1: return parts.pop() # If strict protocol detected, return it, preferring JSONRPCv2. # This means a batch of JSONRPCv1 will fail for protocol in (JSONRPCv2, JSONRPCv1): if protocol in parts: return protocol # Will error if no parts return JSONRPCLoose return protocol_for_payload(main)
[]
Please provide a description of the function:def send_request(self, request): '''Send a Request. Return a (message, event) pair. The message is an unframed message to send over the network. Wait on the event for the response; which will be in the "result" attribute. Raises: ProtocolError if the request violates the protocol in some way.. ''' request_id = next(self._id_counter) message = self._protocol.request_message(request, request_id) return message, self._event(request, request_id)
[]
Please provide a description of the function:def receive_message(self, message): '''Call with an unframed message received from the network. Raises: ProtocolError if the message violates the protocol in some way. However, if it happened in a response that can be paired with a request, the ProtocolError is instead set in the result attribute of the send_request() that caused the error. ''' if self._protocol is JSONRPCAutoDetect: self._protocol = JSONRPCAutoDetect.detect_protocol(message) try: item, request_id = self._protocol.message_to_item(message) except ProtocolError as e: if e.response_msg_id is not id: return self._receive_response(e, e.response_msg_id) raise if isinstance(item, Request): item.send_result = partial(self._send_result, request_id) return [item] if isinstance(item, Notification): return [item] if isinstance(item, Response): return self._receive_response(item.result, request_id) assert isinstance(item, list) if all(isinstance(payload, dict) and ('result' in payload or 'error' in payload) for payload in item): return self._receive_response_batch(item) else: return self._receive_request_batch(item)
[]
Please provide a description of the function:def cancel_pending_requests(self): '''Cancel all pending requests.''' exception = CancelledError() for _request, event in self._requests.values(): event.result = exception event.set() self._requests.clear()
[]
Please provide a description of the function:def is_valid_hostname(hostname): '''Return True if hostname is valid, otherwise False.''' if not isinstance(hostname, str): raise TypeError('hostname must be a string') # strip exactly one dot from the right, if present if hostname and hostname[-1] == ".": hostname = hostname[:-1] if not hostname or len(hostname) > 253: return False labels = hostname.split('.') # the TLD must be not all-numeric if re.match(NUMERIC_REGEX, labels[-1]): return False return all(LABEL_REGEX.match(label) for label in labels)
[]
Please provide a description of the function:def classify_host(host): '''Host is an IPv4Address, IPv6Address or a string. If an IPv4Address or IPv6Address return it. Otherwise convert the string to an IPv4Address or IPv6Address object if possible and return it. Otherwise return the original string if it is a valid hostname. Raise ValueError if a string cannot be interpreted as an IP address and it is not a valid hostname. ''' if isinstance(host, (IPv4Address, IPv6Address)): return host if is_valid_hostname(host): return host return ip_address(host)
[]
Please provide a description of the function:def validate_port(port): '''Validate port and return it as an integer. A string, or its representation as an integer, is accepted.''' if not isinstance(port, (str, int)): raise TypeError(f'port must be an integer or string: {port}') if isinstance(port, str) and port.isdigit(): port = int(port) if isinstance(port, int) and 0 < port <= 65535: return port raise ValueError(f'invalid port: {port}')
[]
Please provide a description of the function:def validate_protocol(protocol): '''Validate a protocol, a string, and return it.''' if not re.match(PROTOCOL_REGEX, protocol): raise ValueError(f'invalid protocol: {protocol}') return protocol.lower()
[]