text
stringlengths
78
104k
score
float64
0
0.18
def get_int(byte_array, signed=True): """ Gets the specified integer from its byte array. This should be used by this module alone, as it works with big endian. :param byte_array: the byte array representing th integer. :param signed: whether the number is signed or not. :return: the integer representing the given byte array. """ return int.from_bytes(byte_array, byteorder='big', signed=signed)
0.002326
def broadcast(self, fromUserId, objectName, content, pushContent=None, pushData=None, os=None): """ 发送广播消息方法(发送消息给一个应用下的所有注册用户,如用户未在线会对满足条件(绑定手机终端)的用户发送 Push 信息,单条消息最大 128k,会话类型为 SYSTEM。每小时只能发送 1 次,每天最多发送 3 次。) 方法 @param fromUserId:发送人用户 Id。(必传) @param txtMessage:文本消息。 @param pushContent:定义显示的 Push 内容,如果 objectName 为融云内置消息类型时,则发送后用户一定会收到 Push 信息. 如果为自定义消息,则 pushContent 为自定义消息显示的 Push 内容,如果不传则用户不会收到 Push 通知.(可选) @param pushData:针对 iOS 平台为 Push 通知时附加到 payload 中,Android 客户端收到推送消息时对应字段名为 pushData。(可选) @param os:针对操作系统发送 Push,值为 iOS 表示对 iOS 手机用户发送 Push ,为 Android 时表示对 Android 手机用户发送 Push ,如对所有用户发送 Push 信息,则不需要传 os 参数。(可选) @return code:返回码,200 为正常。 @return errorMessage:错误信息。 """ desc = { "name": "CodeSuccessReslut", "desc": " http 成功返回结果", "fields": [{ "name": "code", "type": "Integer", "desc": "返回码,200 为正常。" }, { "name": "errorMessage", "type": "String", "desc": "错误信息。" }] } r = self.call_api( method=('API', 'POST', 'application/x-www-form-urlencoded'), action='/message/broadcast.json', params={ "fromUserId": fromUserId, "objectName": objectName, "content": content, "pushContent": pushContent, "pushData": pushData, "os": os }) return Response(r, desc)
0.010651
def build(client, destination_args): """ Build a SetupHandler object for client from destination parameters. """ # Have defined a remote job directory, lets do the setup locally. if client.job_directory: handler = LocalSetupHandler(client, destination_args) else: handler = RemoteSetupHandler(client) return handler
0.002817
def fit(self, fr): """ To perform the munging operations on a frame specified in steps on the frame fr. :param fr: H2OFrame where munging operations are to be performed on. :return: H2OFrame after munging operations are completed. """ assert_is_type(fr, H2OFrame) steps = "[%s]" % ",".join(quoted(step[1].to_rest(step[0]).replace('"', "'")) for step in self.steps) j = h2o.api("POST /99/Assembly", data={"steps": steps, "frame": fr.frame_id}) self.id = j["assembly"]["name"] return H2OFrame.get_frame(j["result"]["name"])
0.008306
def process_user(self, user: types.User): """ Generate user data :param user: :return: """ if not user: return yield 'user_id', user.id if self.include_content: yield 'user_full_name', user.full_name if user.username: yield 'user_name', f"@{user.username}"
0.005348
def get_checksums(self, fileset): """ Downloads the MD5 digests associated with the files in the file-set. These are saved with the downloaded files in the cache and used to check if the files have been updated on the server Parameters ---------- resource : xnat.ResourceCatalog The xnat resource file_format : FileFormat The format of the fileset to get the checksums for. Used to determine the primary file within the resource and change the corresponding key in the checksums dictionary to '.' to match the way it is generated locally by Arcana. """ if fileset.uri is None: raise ArcanaUsageError( "Can't retrieve checksums as URI has not been set for {}" .format(fileset)) with self: checksums = {r['Name']: r['digest'] for r in self._login.get_json(fileset.uri + '/files')[ 'ResultSet']['Result']} if not fileset.format.directory: # Replace the key corresponding to the primary file with '.' to # match the way that checksums are created by Arcana primary = fileset.format.assort_files(checksums.keys())[0] checksums['.'] = checksums.pop(primary) return checksums
0.001438
def show_compatibility_message(self, message): """ Show compatibility message. """ messageBox = QMessageBox(self) messageBox.setWindowModality(Qt.NonModal) messageBox.setAttribute(Qt.WA_DeleteOnClose) messageBox.setWindowTitle('Compatibility Check') messageBox.setText(message) messageBox.setStandardButtons(QMessageBox.Ok) messageBox.show()
0.004751
def flush(self, timeout=None): """ Invoking this method makes all buffered records immediately available to send (even if linger_ms is greater than 0) and blocks on the completion of the requests associated with these records. The post-condition of :meth:`~kafka.KafkaProducer.flush` is that any previously sent record will have completed (e.g. Future.is_done() == True). A request is considered completed when either it is successfully acknowledged according to the 'acks' configuration for the producer, or it results in an error. Other threads can continue sending messages while one thread is blocked waiting for a flush call to complete; however, no guarantee is made about the completion of messages sent after the flush call begins. Arguments: timeout (float, optional): timeout in seconds to wait for completion. Raises: KafkaTimeoutError: failure to flush buffered records within the provided timeout """ log.debug("Flushing accumulated records in producer.") # trace self._accumulator.begin_flush() self._sender.wakeup() self._accumulator.await_flush_completion(timeout=timeout)
0.002336
def _assert_sframe_equal(sf1, sf2, check_column_names=True, check_column_order=True, check_row_order=True, float_column_delta=None): """ Assert the two SFrames are equal. The default behavior of this function uses the strictest possible definition of equality, where all columns must be in the same order, with the same names and have the same data in the same order. Each of these stipulations can be relaxed individually and in concert with another, with the exception of `check_column_order` and `check_column_names`, we must use one of these to determine which columns to compare with one another. Parameters ---------- sf1 : SFrame sf2 : SFrame check_column_names : bool If true, assert if the data values in two columns are the same, but they have different names. If False, column order is used to determine which columns to compare. check_column_order : bool If true, assert if the data values in two columns are the same, but are not in the same column position (one is the i-th column and the other is the j-th column, i != j). If False, column names are used to determine which columns to compare. check_row_order : bool If true, assert if all rows in the first SFrame exist in the second SFrame, but they are not in the same order. float_column_delta : float The acceptable delta that two float values can be and still be considered "equal". When this is None, only exact equality is accepted. This is the default behavior since columns of all Nones are often of float type. Applies to all float columns. """ from .. import SFrame as _SFrame if (type(sf1) is not _SFrame) or (type(sf2) is not _SFrame): raise TypeError("Cannot function on types other than SFrames.") if not check_column_order and not check_column_names: raise ValueError("Cannot ignore both column order and column names.") sf1.__materialize__() sf2.__materialize__() if sf1.num_columns() != sf2.num_columns(): raise AssertionError("Number of columns mismatched: " + str(sf1.num_columns()) + " != " + str(sf2.num_columns())) s1_names = sf1.column_names() s2_names = sf2.column_names() sorted_s1_names = sorted(s1_names) sorted_s2_names = sorted(s2_names) if check_column_names: if (check_column_order and (s1_names != s2_names)) or (sorted_s1_names != sorted_s2_names): raise AssertionError("SFrame does not have same column names: " + str(sf1.column_names()) + " != " + str(sf2.column_names())) if sf1.num_rows() != sf2.num_rows(): raise AssertionError("Number of rows mismatched: " + str(sf1.num_rows()) + " != " + str(sf2.num_rows())) if not check_row_order and (sf1.num_rows() > 1): sf1 = sf1.sort(s1_names) sf2 = sf2.sort(s2_names) names_to_check = None if check_column_names: names_to_check = list(zip(sorted_s1_names, sorted_s2_names)) else: names_to_check = list(zip(s1_names, s2_names)) for i in names_to_check: col1 = sf1[i[0]] col2 = sf2[i[1]] if col1.dtype != col2.dtype: raise AssertionError("Columns " + str(i) + " types mismatched.") compare_ary = None if col1.dtype == float and float_column_delta is not None: dt = float_column_delta compare_ary = ((col1 > col2-dt) & (col1 < col2+dt)) else: compare_ary = (sf1[i[0]] == sf2[i[1]]) if not compare_ary.all(): count = 0 for j in compare_ary: if not j: first_row = count break count += 1 raise AssertionError("Columns " + str(i) + " are not equal! First differing element is at row " + str(first_row) + ": " + str((col1[first_row],col2[first_row])))
0.002901
def get_health_monitor(self, loadbalancer): """ Returns a dict representing the health monitor for the load balancer. If no monitor has been configured, returns an empty dict. """ uri = "/loadbalancers/%s/healthmonitor" % utils.get_id(loadbalancer) resp, body = self.api.method_get(uri) return body.get("healthMonitor", {})
0.005168
def exception(maxTBlevel=None): """Retrieve useful information about an exception. Returns a bunch (attribute-access dict) with the following information: * name: exception class name * cls: the exception class * exception: the exception instance * trace: the traceback instance * formatted: formatted traceback * args: arguments to the exception instance This functionality allows you to trap an exception in a method agnostic to differences between Python 2.x and 3.x. """ try: from marrow.util.bunch import Bunch cls, exc, trbk = sys.exc_info() excName = cls.__name__ excArgs = getattr(exc, 'args', None) excTb = ''.join(traceback.format_exception(cls, exc, trbk, maxTBlevel)) return Bunch( name=excName, cls=cls, exception=exc, trace=trbk, formatted=excTb, args=excArgs ) finally: del cls, exc, trbk
0.000974
def apiDockerCall(job, image, parameters=None, deferParam=None, volumes=None, working_dir=None, containerName=None, entrypoint=None, detach=False, log_config=None, auto_remove=None, remove=False, user=None, environment=None, stdout=None, stderr=False, streamfile=None, **kwargs): """ A toil wrapper for the python docker API. Docker API Docs: https://docker-py.readthedocs.io/en/stable/index.html Docker API Code: https://github.com/docker/docker-py This implements docker's python API within toil so that calls are run as jobs, with the intention that failed/orphaned docker jobs be handled appropriately. Example of using dockerCall in toil to index a FASTA file with SAMtools: def toil_job(job): working_dir = job.fileStore.getLocalTempDir() path = job.fileStore.readGlobalFile(ref_id, os.path.join(working_dir, 'ref.fasta') parameters = ['faidx', path] apiDockerCall(job, image='quay.io/ucgc_cgl/samtools:latest', working_dir=working_dir, parameters=parameters) Note that when run with detatch=False, or with detatch=True and stdout=True or stderr=True, this is a blocking call. When run with detatch=True and without output capture, the container is started and returned without waiting for it to finish. :param toil.Job.job job: The Job instance for the calling function. :param str image: Name of the Docker image to be used. (e.g. 'quay.io/ucsc_cgl/samtools:latest') :param list[str] parameters: A list of string elements. If there are multiple elements, these will be joined with spaces. This handling of multiple elements provides backwards compatibility with previous versions which called docker using subprocess.check_call(). **If list of lists: list[list[str]], then treat as successive commands chained with pipe. :param str working_dir: The working directory. :param int deferParam: Action to take on the container upon job completion. FORGO (0) leaves the container untouched and running. STOP (1) Sends SIGTERM, then SIGKILL if necessary to the container. RM (2) Immediately send SIGKILL to the container. This is the default behavior if defer is set to None. :param str name: The name/ID of the container. :param str entrypoint: Prepends commands sent to the container. See: https://docker-py.readthedocs.io/en/stable/containers.html :param bool detach: Run the container in detached mode. (equivalent to '-d') :param bool stdout: Return logs from STDOUT when detach=False (default: True). Block and capture stdout to a file when detach=True (default: False). Output capture defaults to output.log, and can be specified with the "streamfile" kwarg. :param bool stderr: Return logs from STDERR when detach=False (default: False). Block and capture stderr to a file when detach=True (default: False). Output capture defaults to output.log, and can be specified with the "streamfile" kwarg. :param str streamfile: Collect container output to this file if detach=True and stderr and/or stdout are True. Defaults to "output.log". :param dict log_config: Specify the logs to return from the container. See: https://docker-py.readthedocs.io/en/stable/containers.html :param bool remove: Remove the container on exit or not. :param str user: The container will be run with the privileges of the user specified. Can be an actual name, such as 'root' or 'lifeisaboutfishtacos', or it can be the uid or gid of the user ('0' is root; '1000' is an example of a less privileged uid or gid), or a complement of the uid:gid (RECOMMENDED), such as '0:0' (root user : root group) or '1000:1000' (some other user : some other user group). :param environment: Allows one to set environment variables inside of the container, such as: :param kwargs: Additional keyword arguments supplied to the docker API's run command. The list is 75 keywords total, for examples and full documentation see: https://docker-py.readthedocs.io/en/stable/containers.html :returns: Returns the standard output/standard error text, as requested, when detatch=False. Returns the underlying docker.models.containers.Container object from the Docker API when detatch=True. """ # make certain that files have the correct permissions thisUser = os.getuid() thisGroup = os.getgid() if user is None: user = str(thisUser) + ":" + str(thisGroup) if containerName is None: containerName = getContainerName(job) if working_dir is None: working_dir = os.getcwd() if volumes is None: volumes = {working_dir: {'bind': '/data', 'mode': 'rw'}} if parameters is None: parameters = [] # If 'parameters' is a list of lists, treat each list as a separate command # and chain with pipes. if len(parameters) > 0 and type(parameters[0]) is list: if entrypoint is None: entrypoint = ['/bin/bash', '-c'] chain_params = \ [' '.join((quote(arg) for arg in command)) \ for command in parameters] command = ' | '.join(chain_params) pipe_prefix = "set -eo pipefail && " command = [pipe_prefix + command] logger.debug("Calling docker with: " + repr(command)) # If 'parameters' is a normal list, join all elements into a single string # element, quoting and escaping each element. # Example: ['echo','the Oread'] becomes: ["echo 'the Oread'"] # Note that this is still a list, and the docker API prefers this as best # practice: # http://docker-py.readthedocs.io/en/stable/containers.html elif len(parameters) > 0 and type(parameters) is list: command = ' '.join((quote(arg) for arg in parameters)) logger.debug("Calling docker with: " + repr(command)) # If the 'parameters' lists are empty, they are respecified as None, which # tells the API to simply create and run the container else: entrypoint = None command = None working_dir = os.path.abspath(working_dir) # Ensure the user has passed a valid value for deferParam assert deferParam in (None, FORGO, STOP, RM), \ 'Please provide a valid value for deferParam.' client = docker.from_env(version='auto') if deferParam == STOP: job.defer(dockerStop, containerName, client) if deferParam == FORGO: remove = False elif deferParam == RM: remove = True job.defer(dockerKill, containerName, client) elif remove is True: job.defer(dockerKill, containerName, client) if auto_remove is None: auto_remove = remove try: if detach is False: # When detach is False, this returns stdout normally: # >>> client.containers.run("ubuntu:latest", "echo hello world") # 'hello world\n' if stdout is None: stdout = True out = client.containers.run(image=image, command=command, working_dir=working_dir, entrypoint=entrypoint, name=containerName, detach=False, volumes=volumes, auto_remove=auto_remove, stdout=stdout, stderr=stderr, remove=remove, log_config=log_config, user=user, environment=environment, **kwargs) return out else: if (stdout or stderr) and log_config is None: logger.warn('stdout or stderr specified, but log_config is not set. ' 'Defaulting to "journald".') log_config = dict(type='journald') if stdout is None: stdout = False # When detach is True, this returns a container object: # >>> client.containers.run("bfirsh/reticulate-splines", detach=True) # <Container '45e6d2de7c54'> container = client.containers.run(image=image, command=command, working_dir=working_dir, entrypoint=entrypoint, name=containerName, detach=True, volumes=volumes, auto_remove=auto_remove, stdout=stdout, stderr=stderr, remove=remove, log_config=log_config, user=user, environment=environment, **kwargs) if stdout or stderr: if streamfile is None: streamfile = 'output.log' for line in container.logs(stdout=stdout, stderr=stderr, stream=True): # stream=True makes this loop blocking; we will loop until # the container stops and there is no more output. with open(streamfile, 'w') as f: f.write(line) # If we didn't capture output, the caller will need to .wait() on # the container to know when it is done. Even if we did capture # output, the caller needs the container to get at the exit code. return container except ContainerError: logger.error("Docker had non-zero exit. Check your command: " + repr(command)) raise except ImageNotFound: logger.error("Docker image not found.") raise except requests.exceptions.HTTPError as e: logger.error("The server returned an error.") raise create_api_error_from_http_exception(e)
0.002153
def dlogpdf_df_dtheta(self, f, y, Y_metadata=None): """ TODO: Doc strings """ if self.size > 0: if self.not_block_really: raise NotImplementedError("Need to make a decorator for this!") if isinstance(self.gp_link, link_functions.Identity): return self.dlogpdf_dlink_dtheta(f, y, Y_metadata=Y_metadata) else: inv_link_f = self.gp_link.transf(f) dlink_df = self.gp_link.dtransf_df(f) dlogpdf_dlink_dtheta = self.dlogpdf_dlink_dtheta(inv_link_f, y, Y_metadata=Y_metadata) dlogpdf_df_dtheta = np.zeros((self.size, f.shape[0], f.shape[1])) #Chain each parameter of hte likelihood seperately for p in range(self.size): dlogpdf_df_dtheta[p, :, :] = chain_1(dlogpdf_dlink_dtheta[p,:,:], dlink_df) return dlogpdf_df_dtheta #return chain_1(dlogpdf_dlink_dtheta, dlink_df) else: # There are no parameters so return an empty array for derivatives return np.zeros((0, f.shape[0], f.shape[1]))
0.007759
def check_AP_deriv(abf,n=10): """X""" timePoints=get_AP_timepoints(abf)[:10] #first 10 if len(timePoints)==0: return swhlab.plot.new(abf,True,title="AP velocity (n=%d)"%n,xlabel="ms",ylabel="V/S") pylab.axhline(-50,color='r',lw=2,ls="--",alpha=.2) pylab.axhline(-100,color='r',lw=2,ls="--",alpha=.2) Ys=abf.get_data_around(timePoints,msDeriv=.1,padding=.005) Xs=(np.arange(len(Ys[0]))-len(Ys[0])/2)*1000/abf.rate for i in range(1,len(Ys)): pylab.plot(Xs,Ys[i],alpha=.2,color='b') pylab.plot(Xs,Ys[0],alpha=.4,color='r',lw=2) pylab.margins(0,.1)
0.054636
def ticket_form_delete(self, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/ticket_forms#delete-ticket-form" api_path = "/api/v2/ticket_forms/{id}.json" api_path = api_path.format(id=id) return self.call(api_path, method="DELETE", **kwargs)
0.010345
def lock(self, block=True): """ Lock connection from being used else where """ self._locked = True return self._lock.acquire(block)
0.049645
def file(self, name, *args, **kwargs): """Open a file. :param name: Filename appended to self.root :param args: passed to open() :param kwargs: passed to open() :rtype: file """ return open(self(name), *args, **kwargs)
0.007246
def add_comment(self, page_id, text): """ Add comment into page :param page_id :param text """ data = {'type': 'comment', 'container': {'id': page_id, 'type': 'page', 'status': 'current'}, 'body': {'storage': {'value': text, 'representation': 'storage'}}} return self.post('rest/api/content/', data=data)
0.010204
def get_path(self): """ :returns: matplotlib.path.Path object for the z=0 projection of this polygon. """ if self.path == None: from matplotlib import path return path.Path(self.points[:, :2]) # z=0 projection! return self.path
0.016026
def child(self, name): """Get a child with a specified name.""" return XMLElement(lib.lsl_child(self.e, str.encode(name)))
0.014493
def _push_next(self): """Assign next batch workload to workers.""" r = next(self._iter, None) if r is None: return self._key_queue.put((self._sent_idx, r)) self._sent_idx += 1
0.008811
def handle_log_data(self, m): '''handling incoming log data''' if self.download_file is None: return # lose some data # import random # if random.uniform(0,1) < 0.05: # print('dropping ', str(m)) # return if m.ofs != self.download_ofs: self.download_file.seek(m.ofs) self.download_ofs = m.ofs if m.count != 0: s = bytearray(m.data[:m.count]) self.download_file.write(s) self.download_set.add(m.ofs // 90) self.download_ofs += m.count self.download_last_timestamp = time.time() if m.count == 0 or (m.count < 90 and len(self.download_set) == 1 + (m.ofs // 90)): dt = time.time() - self.download_start self.download_file.close() size = os.path.getsize(self.download_filename) speed = size / (1000.0 * dt) print("Finished downloading %s (%u bytes %u seconds, %.1f kbyte/sec %u retries)" % ( self.download_filename, size, dt, speed, self.retries)) self.download_file = None self.download_filename = None self.download_set = set() self.master.mav.log_request_end_send(self.target_system, self.target_component) if len(self.download_queue): self.log_download_next()
0.002694
def get_machine_stats(self): ''' Gather spider based stats ''' self.logger.debug("Gathering machine stats") the_dict = {} keys = self.redis_conn.keys('stats:crawler:*:*:*:*') for key in keys: # break down key elements = key.split(":") machine = elements[2] spider = elements[3] response = elements[4] end = elements[5] # we only care about the machine, not spider type if machine not in the_dict: the_dict[machine] = {} if response not in the_dict[machine]: the_dict[machine][response] = {} if end in the_dict[machine][response]: the_dict[machine][response][end] = the_dict[machine][response][end] + \ self._get_key_value(key, end == 'lifetime') else: the_dict[machine][response][end] = self._get_key_value(key, end == 'lifetime') # simple count the_dict['count'] = len(list(the_dict.keys())) ret_dict = {} ret_dict['machines'] = the_dict return ret_dict
0.00341
def set_offset(self, offset, mid=None): """This method will allow the menu to be placed anywhere in the open window instead of just the upper left corner. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Inputs: offset - This is the x,y tuple of the position that you want to move the screen to. mid - The offset will be treated as the value passed in instead of the top left pixel. 'x' (the x point in offset will be treated as the middle of the menu image) 'y' (the y point in offset will be treated as the middle of the menu image) 'c' (the offset will be treated as the center of the menu image) (doc string updated ver 0.1) """ if mid: imagesize = self.image.get_size() imagemidp = (int(imagesize[0] * 0.5), int(imagesize[1] * 0.5)) if mid == 'x': offset = (offset[0] - imagemidp[0], offset[1]) if mid == 'y': offset = (offset[0], offset[1] - imagemidp[1]) if mid == 'c': offset = (offset[0] - imagemidp[0], offset[1] - imagemidp[1]) self.pos = offset for i in self.buttonlist: i.rect[0] += offset[0] i.rect[1] += offset[1] try: for i in self.widgetlist: i.rect[0] += offset[0] i.rect[1] += offset[1] except AttributeError: pass
0.002498
def enqueue_or_delay(self, queue_name=None, priority=None, delayed_until=None, prepend=False, queue_model=None): """ Will enqueue or delay the job depending of the delayed_until. """ queue_name = self._get_queue_name(queue_name) fields = {'queued': '1'} if priority is not None: fields['priority'] = priority else: priority = self.priority.hget() in_the_future = delayed_until and delayed_until > datetime.utcnow() if in_the_future: fields['delayed_until'] = str(delayed_until) fields['status'] = STATUSES.DELAYED else: self.delayed_until.delete() fields['status'] = STATUSES.WAITING self.hmset(**fields) if queue_model is None: queue_model = self.queue_model queue = queue_model.get_queue(queue_name, priority) if in_the_future: queue.delay_job(self, delayed_until) else: queue.enqueue_job(self, prepend)
0.002817
def validate_url(cls, url: str) -> Optional[Match[str]]: """Check if the Extractor can handle the given url.""" match = re.match(cls._VALID_URL, url) return match
0.010753
def recv_exactly(self, n, timeout='default'): """ Recieve exactly n bytes Aliases: read_exactly, readexactly, recvexactly """ self._print_recv_header( '======== Receiving until exactly {0}B{timeout_text} ========', timeout, n) return self._recv_predicate(lambda s: n if len(s) >= n else 0, timeout)
0.00831
def summary_fig( mapper_summary, width=600, height=500, top=60, left=20, bottom=60, right=20, bgcolor="rgb(240,240,240)", ): """Define a dummy figure that displays info on the algorithms and sklearn class instances or methods used Returns a FigureWidget object representing the figure """ text = _text_mapper_summary(mapper_summary) data = [ dict( type="scatter", x=[0, width], y=[height, 0], mode="text", text=[text, ""], textposition="bottom right", hoverinfo="none", ) ] layout = dict( title="Algorithms and scikit-learn objects/methods", width=width, height=height, font=dict(size=12), xaxis=dict(visible=False), yaxis=dict(visible=False, range=[0, height + 5]), margin=dict(t=top, b=bottom, l=left, r=right), plot_bgcolor=bgcolor, ) return go.FigureWidget(data=data, layout=layout)
0.000933
def tf_import_demo_experience(self, states, internals, actions, terminal, reward): """ Imports a single experience to memory. """ return self.demo_memory.store( states=states, internals=internals, actions=actions, terminal=terminal, reward=reward )
0.008621
def up(ctx, instance_id): """Start EC2 instance""" session = create_session(ctx.obj['AWS_PROFILE_NAME']) ec2 = session.resource('ec2') try: instance = ec2.Instance(instance_id) instance.start() except botocore.exceptions.ClientError as e: click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True) sys.exit(2)
0.005277
def file_items(container, iterable): """ Files away objects into the appropriate attributes of the container. """ # container._value = copy(iterable) container.nodes = set() container.variables = set() container.deterministics = set() container.stochastics = set() container.potentials = set() container.observed_stochastics = set() # containers needs to be a list to hold unhashable items. container.containers = [] i = -1 for item in iterable: # If this is a dictionary, switch from key to item. if isinstance(iterable, (dict, dict_proxy_type)): key = item item = iterable[key] # Item counter else: i += 1 # If the item isn't iterable, file it away. if isinstance(item, Variable): container.variables.add(item) if isinstance(item, StochasticBase): if item.observed or not getattr(item, 'mask', None) is None: container.observed_stochastics.add(item) if not item.observed: container.stochastics.add(item) elif isinstance(item, DeterministicBase): container.deterministics.add(item) elif isinstance(item, PotentialBase): container.potentials.add(item) elif isinstance(item, ContainerBase): container.assimilate(item) container.containers.append(item) # Wrap internal containers elif hasattr(item, '__iter__'): # If this is a non-object-valued ndarray, don't container-ize it. if isinstance(item, ndarray): if item.dtype != dtype('object'): continue # If the item is iterable, wrap it in a container. Replace the item # with the wrapped version. try: new_container = Container(item) except: continue # Update all of container's variables, potentials, etc. with the new wrapped # iterable's. This process recursively unpacks nested iterables. container.assimilate(new_container) if isinstance(container, dict): container.replace(key, new_container) elif isinstance(container, tuple): return container[:i] + (new_container,) + container[i + 1:] else: container.replace(item, new_container, i) container.nodes = container.potentials | container.variables # 'Freeze' markov blanket, moral neighbors, coparents of all constituent stochastics # for future use for attr in ['moral_neighbors', 'markov_blanket', 'coparents']: setattr(container, attr, {}) for s in container.stochastics: for attr in ['moral_neighbors', 'markov_blanket', 'coparents']: getattr(container, attr)[s] = getattr(s, attr)
0.001359
def _add_item(self, cls, *args, **kwargs): """Add a plot item.""" box_index = kwargs.pop('box_index', self._default_box_index) data = cls.validate(*args, **kwargs) n = cls.vertex_count(**data) if not isinstance(box_index, np.ndarray): k = len(self._default_box_index) box_index = _get_array(box_index, (n, k)) data['box_index'] = box_index if cls not in self._items: self._items[cls] = [] self._items[cls].append(data) return data
0.003697
def _acl_exists(name=None, id=None, token=None, consul_url=None): ''' Check the acl exists by using the name or the ID, name is ignored if ID is specified, if only Name is used the ID associated with it is returned ''' ret = {'result': False, 'id': None} if id: info = __salt__['consul.acl_info'](id=id, token=token, consul_url=consul_url) elif name: info = __salt__['consul.acl_list'](token=token, consul_url=consul_url) else: return ret if info.get('data'): for acl in info['data']: if id and acl['ID'] == id: ret['result'] = True ret['id'] = id elif name and acl['Name'] == name: ret['result'] = True ret['id'] = acl['ID'] return ret
0.00246
def avail_images(): ''' Available images ''' response = _query('grid', 'image/list') ret = {} for item in response['list']: name = item['friendlyName'] ret[name] = item return ret
0.004444
def socket_parse(self, astr_destination): ''' Examines <astr_destination> and if of form <str1>:<str2> assumes that <str1> is a host to send datagram comms to over port <str2>. Returns True or False. ''' t_socketInfo = astr_destination.partition(':') if len(t_socketInfo[1]): self._b_isSocket = True self._socketRemote = t_socketInfo[0] self._socketPort = t_socketInfo[2] else: self._b_isSocket = False return self._b_isSocket
0.012367
def getColumnByColumnAlias(self, name): """Returns a column from its name. Input ----- name : columnAlias of the column to retrieve Return ------ A Column object or None when the name cannot be found. """ result = None for column in self.__columns: columnAlias = column.getColumnAlias() if columnAlias == name: result = column break return result
0.00789
def sanitizer(name, replacements=[(':','_'), ('/','_'), ('\\','_')]): """ String sanitizer to avoid problematic characters in filenames. """ for old,new in replacements: name = name.replace(old,new) return name
0.02521
def update_admin_object_resource(name, server=None, **kwargs): ''' Update a JMS destination ''' if 'jndiName' in kwargs: del kwargs['jndiName'] return _update_element(name, 'resources/admin-object-resource', kwargs, server)
0.007968
def get_stats(self, start=int(time()), stop=int(time())+10, step=10): """ Get stats of a monitored machine :param start: Time formatted as integer, from when to fetch stats (default now) :param stop: Time formatted as integer, until when to fetch stats (default +10 seconds) :param step: Step to fetch stats (default 10 seconds) :returns: A dict of stats """ payload = { 'v': 2, 'start': start, 'stop': stop, 'step': step } data = json.dumps(payload) req = self.request(self.mist_client.uri+"/clouds/"+self.cloud.id+"/machines/"+self.id+"/stats", data=data) stats = req.get().json() return stats
0.006676
def FetchSizeOfSignedBinary(binary_urn, token = None ): """Returns the size of the given binary (in bytes). Args: binary_urn: RDFURN that uniquely identifies the binary. token: ACL token to use with the legacy (non-relational) datastore. Raises: SignedBinaryNotFoundError: If no signed binary with the given URN exists. """ if _ShouldUseLegacyDatastore(): try: aff4_stream = aff4.FACTORY.Open( binary_urn, aff4_type=collects.GRRSignedBlob, mode="r", token=token) return aff4_stream.size except aff4.InstantiationError: raise SignedBinaryNotFoundError(binary_urn) else: try: references, _ = data_store.REL_DB.ReadSignedBinaryReferences( _SignedBinaryIDFromURN(binary_urn)) except db.UnknownSignedBinaryError: raise SignedBinaryNotFoundError(binary_urn) last_reference = references.items[-1] return last_reference.offset + last_reference.size
0.01206
def autocov(x, **kwargs): """Returns the autocovariance of signal s at all lags. Parameters ---------- x : ndarray axis : time axis all_lags : {True/False} whether to return all nonzero lags, or to clip the length of r_xy to be the length of x and y. If False, then the zero lag correlation is at index 0. Otherwise, it is found at (len(x) + len(y) - 1)/2 Returns ------- cxx : ndarray The autocovariance function Notes ----- Adheres to the definition .. math:: C_{xx}[k]=E\{(X[n+k]-E\{X\})(X[n]-E\{X\})^{*}\} where X is a discrete, stationary (ergodic) random process """ # only remove the mean once, if needed debias = kwargs.pop('debias', True) axis = kwargs.get('axis', -1) if debias: x = remove_bias(x, axis) kwargs['debias'] = False return crosscov(x, x, **kwargs)
0.007752
def postChunked(host, selector, fields, files): """ Attempt to replace postMultipart() with nearly-identical interface. (The files tuple no longer requires the filename, and we only return the response body.) Uses the urllib2_file.py originally from http://fabien.seisen.org which was also drawn heavily from http://code.activestate.com/recipes/146306/ . This urllib2_file.py is more desirable because of the chunked uploading from a file pointer (no need to read entire file into memory) and the ability to work from behind a proxy (due to its basis on urllib2). """ params = urllib.urlencode(fields) url = 'http://%s%s?%s' % (host, selector, params) u = urllib2.urlopen(url, files) result = u.read() [fp.close() for (key, fp) in files] return result
0.009592
def which(software, strip_newline=True): '''get_install will return the path to where an executable is installed. ''' if software is None: software = "singularity" cmd = ['which', software ] try: result = run_command(cmd) if strip_newline is True: result['message'] = result['message'].strip('\n') return result except: # FileNotFoundError return None
0.009346
def _scalar_field_to_json(field, row_value): """Maps a field and value to a JSON-safe value. Args: field ( \ :class:`~google.cloud.bigquery.schema.SchemaField`, \ ): The SchemaField to use for type conversion and field name. row_value (any): Value to be converted, based on the field's type. Returns: any: A JSON-serializable object. """ converter = _SCALAR_VALUE_TO_JSON_ROW.get(field.field_type) if converter is None: # STRING doesn't need converting return row_value return converter(row_value)
0.001623
def _display_completions_like_readline(cli, completions): """ Display the list of completions in columns above the prompt. This will ask for a confirmation if there are too many completions to fit on a single page and provide a paginator to walk through them. """ from prompt_toolkit.shortcuts import create_confirm_application assert isinstance(completions, list) # Get terminal dimensions. term_size = cli.output.get_size() term_width = term_size.columns term_height = term_size.rows # Calculate amount of required columns/rows for displaying the # completions. (Keep in mind that completions are displayed # alphabetically column-wise.) max_compl_width = min(term_width, max(get_cwidth(c.text) for c in completions) + 1) column_count = max(1, term_width // max_compl_width) completions_per_page = column_count * (term_height - 1) page_count = int(math.ceil(len(completions) / float(completions_per_page))) # Note: math.ceil can return float on Python2. def display(page): # Display completions. page_completions = completions[page * completions_per_page: (page+1) * completions_per_page] page_row_count = int(math.ceil(len(page_completions) / float(column_count))) page_columns = [page_completions[i * page_row_count:(i+1) * page_row_count] for i in range(column_count)] result = [] for r in range(page_row_count): for c in range(column_count): try: result.append(page_columns[c][r].text.ljust(max_compl_width)) except IndexError: pass result.append('\n') cli.output.write(''.join(result)) cli.output.flush() # User interaction through an application generator function. def run(): if len(completions) > completions_per_page: # Ask confirmation if it doesn't fit on the screen. message = 'Display all {} possibilities? (y on n) '.format(len(completions)) confirm = yield create_confirm_application(message) if confirm: # Display pages. for page in range(page_count): display(page) if page != page_count - 1: # Display --MORE-- and go to the next page. show_more = yield _create_more_application() if not show_more: return else: cli.output.write('\n'); cli.output.flush() else: # Display all completions. display(0) cli.run_application_generator(run, render_cli_done=True)
0.003222
def compare(self, other, components=[]): """ Compare two publishes It expects that other publish is same or older than this one Return tuple (diff, equal) of dict {'component': ['snapshot']} """ lg.debug("Comparing publish %s (%s) and %s (%s)" % (self.name, self.storage or "local", other.name, other.storage or "local")) diff, equal = ({}, {}) for component, snapshots in self.components.items(): if component not in list(other.components.keys()): # Component is missing in other diff[component] = snapshots continue equal_snapshots = list(set(snapshots).intersection(other.components[component])) if equal_snapshots: lg.debug("Equal snapshots for %s: %s" % (component, equal_snapshots)) equal[component] = equal_snapshots diff_snapshots = list(set(snapshots).difference(other.components[component])) if diff_snapshots: lg.debug("Different snapshots for %s: %s" % (component, diff_snapshots)) diff[component] = diff_snapshots return (diff, equal)
0.005848
def copy_view(self, request, object_id): """ Instantiates a class-based view that redirects to Wagtail's 'copy' view for models that extend 'Page' (if the user has sufficient permissions). We do this via our own view so that we can reliably control redirection of the user back to the index_view once the action is completed. The view class used can be overridden by changing the 'copy_view_class' attribute. """ kwargs = {'model_admin': self, 'object_id': object_id} view_class = self.copy_view_class return view_class.as_view(**kwargs)(request)
0.003155
def get_field_from_args_or_session(config, args, field_name): """ We try to get field_name from diffent sources: The order of priorioty is following: - command line argument (--<field_name>) - current session configuration (default_<filed_name>) """ rez = getattr(args, field_name, None) #type(rez) can be int in case of wallet-index, so we cannot make simply if(rez) if (rez != None): return rez rez = config.get_session_field("default_%s"%field_name, exception_if_not_found=False) if (rez): return rez raise Exception("Fail to get default_%s from config, should specify %s via --%s parameter"%(field_name, field_name, field_name.replace("_","-")))
0.012623
def _build_likelihood(self): """ This function computes the optimal density for v, q*(v), up to a constant """ # get the (marginals of) q(f): exactly predicting! fmean, fvar = self._build_predict(self.X, full_cov=False) return tf.reduce_sum(self.likelihood.variational_expectations(fmean, fvar, self.Y))
0.011396
def fast_sync_inspect_snapshot( snapshot_path ): """ Inspect a snapshot Return useful information Return {'status': True, 'signatures': ..., 'payload_size': ..., 'sig_append_offset': ..., 'hash': ...} on success Return {'error': ...} on error """ with open(snapshot_path, 'r') as f: info = fast_sync_inspect( f ) if 'error' in info: log.error("Failed to inspect snapshot {}: {}".format(snapshot_path, info['error'])) return {'error': 'Failed to inspect snapshot'} # get the hash of the file hash_hex = get_file_hash(f, hashlib.sha256, fd_len=info['payload_size']) info['hash'] = hash_hex return info
0.012894
def execute_fields_serially( self, parent_type: GraphQLObjectType, source_value: Any, path: Optional[ResponsePath], fields: Dict[str, List[FieldNode]], ) -> AwaitableOrValue[Dict[str, Any]]: """Execute the given fields serially. Implements the "Evaluating selection sets" section of the spec for "write" mode. """ results: Dict[str, Any] = {} for response_name, field_nodes in fields.items(): field_path = add_path(path, response_name) result = self.resolve_field( parent_type, source_value, field_nodes, field_path ) if result is INVALID: continue if isawaitable(results): # noinspection PyShadowingNames async def await_and_set_result(results, response_name, result): awaited_results = await results awaited_results[response_name] = ( await result if isawaitable(result) else result ) return awaited_results # noinspection PyTypeChecker results = await_and_set_result( cast(Awaitable, results), response_name, result ) elif isawaitable(result): # noinspection PyShadowingNames async def set_result(results, response_name, result): results[response_name] = await result return results # noinspection PyTypeChecker results = set_result(results, response_name, result) else: results[response_name] = result if isawaitable(results): # noinspection PyShadowingNames async def get_results(): return await cast(Awaitable, results) return get_results() return results
0.002044
def preston_bin(data, max_num): """ Bins data on base 2 using Preston's method Parameters ---------- data : array-like Data to be binned max_num : float The maximum upper value of the data Returns ------- tuple (binned_data, bin_edges) Notes ----- Uses Preston's method of binning, which has exclusive lower boundaries and inclusive upper boundaries. Densities are not split between bins. Examples -------- >>> import macroeco.compare as comp >>> import numpy as np >>> # Load some data and get Preston bins >>> data = np.array([1, 1, 1, 1, 4, 5, 6, 7, 12, 34, 56]) >>> comp.preston_bin(data, np.max(data)) (array([4, 0, 1, 3, 1, 0, 2]), array([ 1., 2., 3., 5., 9., 17., 33., 65.])) References ---------- .. [#] Preston, F. (1962). The canonical distribution of commonness and rarity. Ecology, 43, 185-215 """ log_ub = np.ceil(np.log2(max_num)) # Make an exclusive lower bound in keeping with Preston if log_ub == 0: boundaries = np.array([0, 1]) elif log_ub == 1: boundaries = np.arange(1, 4) else: boundaries = 2 ** np.arange(0, log_ub + 1) boundaries = np.insert(boundaries, 2, 3) boundaries[3:] = boundaries[3:] + 1 hist_data = np.histogram(data, bins=boundaries) return hist_data
0.000705
def _pad_arrays(t, arrays, indices, span, period): """Internal routine to pad arrays for periodic models.""" N = len(t) if indices is None: indices = np.arange(N) pad_left = max(0, 0 - np.min(indices - span // 2)) pad_right = max(0, np.max(indices + span - span // 2) - (N - 1)) if pad_left + pad_right > 0: Nright, pad_right = divmod(pad_right, N) Nleft, pad_left = divmod(pad_left, N) t = np.concatenate([t[N - pad_left:] - (Nleft + 1) * period] + [t + i * period for i in range(-Nleft, Nright + 1)] + [t[:pad_right] + (Nright + 1) * period]) arrays = [np.concatenate([a[N - pad_left:]] + (Nleft + Nright + 1) * [a] + [a[:pad_right]]) for a in arrays] pad_left = pad_left % N Nright = pad_right / N pad_right = pad_right % N return (t, arrays, slice(pad_left + Nleft * N, pad_left + (Nleft + 1) * N)) else: return (t, arrays, slice(None))
0.000868
def xpath(self, *args, **kwargs): """ Perform XPath on the passage XML :param args: Ordered arguments for etree._Element().xpath() :param kwargs: Named arguments :return: Result list :rtype: list(etree._Element) """ if "smart_strings" not in kwargs: kwargs["smart_strings"] = False return self.resource.xpath(*args, **kwargs)
0.004975
def loadFromStream(self, stream, name=None): """Return a WSDL instance loaded from a stream object.""" document = DOM.loadDocument(stream) wsdl = WSDL() if name: wsdl.location = name elif hasattr(stream, 'name'): wsdl.location = stream.name wsdl.load(document) return wsdl
0.005682
def result(self): """Formats the result.""" return { "count": self._count, "total": self._total, "average": float(self._total) / self._count if self._count else 0 }
0.005155
def datapath4file(filename, ext:str='.tgz', archive=True): "Return data path to `filename`, checking locally first then in the config file." local_path = URLs.LOCAL_PATH/'data'/filename if local_path.exists() or local_path.with_suffix(ext).exists(): return local_path elif archive: return Config.data_archive_path() / filename else: return Config.data_path() / filename
0.023136
def get(self, didMethodName, required=True) -> DidMethod: """ :param didMethodName: name of DID Method :param required: if not found and True, throws an exception, else None :return: DID Method """ dm = self.d.get(didMethodName) if didMethodName else self.default if not dm and required: raise DidMethodNotFound return dm
0.005038
def run_evaluation(self, stream_name: str) -> None: """ Run the main loop with the given stream in the prediction mode. :param stream_name: name of the stream to be evaluated """ def prediction(): logging.info('Running prediction') self._run_zeroth_epoch([stream_name]) logging.info('Prediction done\n\n') self._try_run(prediction)
0.004808
def meta_retrieve(self, meta_lookahead = None): """ Get metadata from the query itself. This is guaranteed to only return a Python dictionary. Note that if the query failed, the metadata might not be in JSON format, in which case there may be additional, non-JSON data which can be retrieved using the following :: raw_meta = req.raw.value :return: A dictionary containing the query metadata """ if not self.__meta_received: if meta_lookahead or self.meta_lookahead: self.buffered_remainder = list(self) else: raise RuntimeError( 'This property only valid once all rows are received!') if isinstance(self.raw.value, dict): return self.raw.value return {}
0.004678
def user_agent_detail(self, **kwargs): """Get the user agent detail. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabGetError: If the server cannot perform the request """ path = '%s/%s/user_agent_detail' % (self.manager.path, self.get_id()) return self.manager.gitlab.http_get(path, **kwargs)
0.004264
def check_user(self, user_id): """ Check whether this user can read this dataset """ if self.hidden == 'N': return True for owner in self.owners: if int(owner.user_id) == int(user_id): if owner.view == 'Y': return True return False
0.005797
def read_config(self): """Read a previous configuration file or create a new with default values.""" config_file = os.path.join(self.config_dir, 'pueue.ini') self.config = configparser.ConfigParser() # Try to get configuration file and return it # If this doesn't work, a new default config file will be created if os.path.exists(config_file): try: self.config.read(config_file) return except Exception: self.logger.error('Error while parsing config file. Deleting old config') self.logger.exception() self.config['default'] = { 'resumeAfterStart': False, 'maxProcesses': 1, 'customShell': 'default', } self.config['log'] = { 'logTime': 60*60*24*14, } self.write_config()
0.004464
def ctype_for_encoding(self, encoding): """Return ctypes type for an encoded Objective-C type.""" if encoding in self.typecodes: return self.typecodes[encoding] elif encoding[0:1] == b'^' and encoding[1:] in self.typecodes: return POINTER(self.typecodes[encoding[1:]]) elif encoding[0:1] == b'^' and encoding[1:] in [CGImageEncoding, NSZoneEncoding]: return c_void_p elif encoding[0:1] == b'r' and encoding[1:] in self.typecodes: return self.typecodes[encoding[1:]] elif encoding[0:2] == b'r^' and encoding[2:] in self.typecodes: return POINTER(self.typecodes[encoding[2:]]) else: raise Exception('unknown encoding for %s: %s' % (self.name, encoding))
0.002317
def symmetric_difference(self, other): """Constructs an unminimized DFA recognizing the symmetric difference of the languages of two given DFAs. Args: other (DFA): The other DFA that will be used for the symmetric difference operation Returns: DFA: The resulting DFA """ operation = bool.__xor__ self.cross_product(other, operation) return self
0.00655
def digicam_control_send(self, target_system, target_component, session, zoom_pos, zoom_step, focus_lock, shot, command_id, extra_param, extra_value, force_mavlink1=False): ''' Control on-board Camera Control System to take shots. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) session : 0: stop, 1: start or keep it up //Session control e.g. show/hide lens (uint8_t) zoom_pos : 1 to N //Zoom's absolute position (0 means ignore) (uint8_t) zoom_step : -100 to 100 //Zooming step value to offset zoom from the current position (int8_t) focus_lock : 0: unlock focus or keep unlocked, 1: lock focus or keep locked, 3: re-lock focus (uint8_t) shot : 0: ignore, 1: shot or start filming (uint8_t) command_id : Command Identity (incremental loop: 0 to 255)//A command sent multiple times will be executed or pooled just once (uint8_t) extra_param : Extra parameters enumeration (0 means ignore) (uint8_t) extra_value : Correspondent value to given extra_param (float) ''' return self.send(self.digicam_control_encode(target_system, target_component, session, zoom_pos, zoom_step, focus_lock, shot, command_id, extra_param, extra_value), force_mavlink1=force_mavlink1)
0.007648
def load_shedding(network, **kwargs): """ Implement load shedding in existing network to identify feasibility problems Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA marginal_cost : int Marginal costs for load shedding p_nom : int Installed capacity of load shedding generator Returns ------- """ marginal_cost_def = 10000 # network.generators.marginal_cost.max()*2 p_nom_def = network.loads_t.p_set.max().max() marginal_cost = kwargs.get('marginal_cost', marginal_cost_def) p_nom = kwargs.get('p_nom', p_nom_def) network.add("Carrier", "load") start = network.generators.index.to_series().str.rsplit( ' ').str[0].astype(int).sort_values().max() + 1 index = list(range(start, start + len(network.buses.index))) network.import_components_from_dataframe( pd.DataFrame( dict(marginal_cost=marginal_cost, p_nom=p_nom, carrier='load shedding', bus=network.buses.index), index=index), "Generator" ) return
0.001745
def emit_dcp(self, event, **context): """Emit a event(with dcp) and gets the dynamic join point data(html code). :param event: str,unicode: A unique identifier name for dcp. :param context: dict: Keyword parameter, additional data passed to the template :returns: html code with :class:`~jinja2.Markup`. .. versionadded:: 2.1.0 """ if event and isinstance(event, string_types) and event in self._dcp_funcs: results = [] for f in self._dcp_funcs[event]: rv = f(**context) if rv is not None: results.append(rv) del self._dcp_funcs[event] return jinja2.Markup(TemplateEventResult(results)) else: return jinja2.Markup()
0.006289
def check_indent_level(self, string, expected, line_num): """return the indent level of the string """ indent = self.config.indent_string if indent == "\\t": # \t is not interpreted in the configuration file indent = "\t" level = 0 unit_size = len(indent) while string[:unit_size] == indent: string = string[unit_size:] level += 1 suppl = "" while string and string[0] in " \t": if string[0] != indent[0]: if string[0] == "\t": args = ("tab", "space") else: args = ("space", "tab") self.add_message("mixed-indentation", args=args, line=line_num) return level suppl += string[0] string = string[1:] if level != expected or suppl: i_type = "spaces" if indent[0] == "\t": i_type = "tabs" self.add_message( "bad-indentation", line=line_num, args=(level * unit_size + len(suppl), i_type, expected * unit_size), ) return None
0.002508
def load_yaml_file(file): """ Load data from yaml file :param file: Readable object or path to file :type file: FileIO | str | unicode :return: Yaml data :rtype: None | int | float | str | unicode | list | dict """ if not hasattr(file, "read"): with io.open(file, "r", encoding="utf-8") as f: return yaml.load(f, yaml.FullLoader) return yaml.load(file, yaml.FullLoader)
0.002347
def recv_framer(self, value): """ Set the framer in use for the receiving side of the connection. The framer state will be reset next time the framer is used. """ if not isinstance(value, framers.Framer): raise ValueError("framer must be an instance of tendril.Framer") self._recv_framer = value
0.005464
def _lmder1_powell_singular(): """Powell's singular function (lmder test #6). Don't run this as a test, since it just zooms to zero parameters. The precise results depend a lot on nitty-gritty rounding and tolerances and things.""" def func(params, vec): vec[0] = params[0] + 10 * params[1] vec[1] = np.sqrt(5) * (params[2] - params[3]) vec[2] = (params[1] - 2 * params[2])**2 vec[3] = np.sqrt(10) * (params[0] - params[3])**2 def jac(params, jac): jac.fill(0) jac[0,0] = 1 jac[0,3] = 2 * np.sqrt(10) * (params[0] - params[3]) jac[1,0] = 10 jac[1,2] = 2 * (params[1] - 2 * params[2]) jac[2,1] = np.sqrt(5) jac[2,2] = -2 * jac[2,1] jac[3,1] = -np.sqrt(5) jac[3,3] = -jac[3,0] guess = np.asfarray([3, -1, 0, 1]) _lmder1_test(4, func, jac, guess) _lmder1_test(4, func, jac, guess * 10) _lmder1_test(4, func, jac, guess * 100)
0.011375
def remove_which(ol,value,which,**kwargs): ''' from elist.elist import * ol = [1,'a',3,'a',5,'a'] id(ol) new = remove_which(ol,'a',1) ol new id(ol) id(new) #### ol = [1,'a',3,'a',5,'a'] id(ol) rslt = remove_which(ol,'a',1,mode="original") ol rslt id(ol) id(rslt) ''' if('mode' in kwargs): mode = kwargs["mode"] else: mode = "new" new = copy.deepcopy(ol) length = ol.__len__() if(mode == "new"): l = new else: l = ol seq = -1 for i in range(0,length): if(ol[i]==value): seq = seq + 1 if(seq == which): l.pop(i) break else: pass else: pass return(l)
0.010417
def iterate_subchain(self, chain): """ A coroutine used by __call__ to forward all requests to a subchain. """ for mw in chain: try: yield mw except Exception as err: yield chain.throw(err)
0.007018
def read(self, filename, binary_mode=False, size=None, offset=None): """Reads contents of a file to a string. Args: filename: string, a path binary_mode: bool, read as binary if True, otherwise text size: int, number of bytes or characters to read, otherwise read all the contents of the file from the offset offset: int, offset into file to read from, otherwise read from the very beginning Returns: Subset of the contents of the file as a string or bytes. """ s3 = boto3.resource("s3") bucket, path = self.bucket_and_path(filename) args = {} endpoint = 0 if size is not None or offset is not None: if offset is None: offset = 0 endpoint = '' if size is None else (offset + size) args['Range'] = 'bytes={}-{}'.format(offset, endpoint) try: stream = s3.Object(bucket, path).get(**args)['Body'].read() except botocore.exceptions.ClientError as exc: if exc.response['Error']['Code'] == '416': if size is not None: # Asked for too much, so request just to the end. Do this # in a second request so we don't check length in all cases. client = boto3.client("s3") obj = client.head_object(Bucket=bucket, Key=path) len = obj['ContentLength'] endpoint = min(len, offset + size) if offset == endpoint: # Asked for no bytes, so just return empty stream = b'' else: args['Range'] = 'bytes={}-{}'.format(offset, endpoint) stream = s3.Object(bucket, path).get(**args)['Body'].read() else: raise if binary_mode: return bytes(stream) else: return stream.decode('utf-8')
0.001479
def _from_dict(cls, _dict): """Initialize a ListEnvironmentsResponse object from a json dictionary.""" args = {} if 'environments' in _dict: args['environments'] = [ Environment._from_dict(x) for x in (_dict.get('environments')) ] return cls(**args)
0.009346
def ASCII_encoding(self): """Returns the ASCII encoding of a string""" w = unicodedata.normalize('NFKD', self.word).encode('ASCII', 'ignore') # Encode into ASCII, returns a bytestring w = w.decode('utf-8') # Convert back to string return w
0.008955
def _update_grammar(self): """ We create a new ``Grammar`` object from the one in ``AtisSqlTableContext``, that also has the new entities that are extracted from the utterance. Stitching together the expressions to form the grammar is a little tedious here, but it is worth it because we don't have to create a new grammar from scratch. Creating a new grammar is expensive because we have many production rules that have all database values in the column on the right hand side. We update the expressions bottom up, since the higher level expressions may refer to the lower level ones. For example, the ternary expression will refer to the start and end times. """ # This will give us a shallow copy. We have to be careful here because the ``Grammar`` object # contains ``Expression`` objects that have tuples containing the members of that expression. # We have to create new sub-expression objects so that original grammar is not mutated. new_grammar = copy(AtisWorld.sql_table_context.grammar) for numeric_nonterminal in NUMERIC_NONTERMINALS: self._add_numeric_nonterminal_to_grammar(numeric_nonterminal, new_grammar) self._update_expression_reference(new_grammar, 'pos_value', 'number') ternary_expressions = [self._get_sequence_with_spacing(new_grammar, [new_grammar['col_ref'], Literal('BETWEEN'), new_grammar['time_range_start'], Literal(f'AND'), new_grammar['time_range_end']]), self._get_sequence_with_spacing(new_grammar, [new_grammar['col_ref'], Literal('NOT'), Literal('BETWEEN'), new_grammar['time_range_start'], Literal(f'AND'), new_grammar['time_range_end']]), self._get_sequence_with_spacing(new_grammar, [new_grammar['col_ref'], Literal('not'), Literal('BETWEEN'), new_grammar['time_range_start'], Literal(f'AND'), new_grammar['time_range_end']])] new_grammar['ternaryexpr'] = OneOf(*ternary_expressions, name='ternaryexpr') self._update_expression_reference(new_grammar, 'condition', 'ternaryexpr') new_binary_expressions = [] fare_round_trip_cost_expression = \ self._get_sequence_with_spacing(new_grammar, [Literal('fare'), Literal('.'), Literal('round_trip_cost'), new_grammar['binaryop'], new_grammar['fare_round_trip_cost']]) new_binary_expressions.append(fare_round_trip_cost_expression) fare_one_direction_cost_expression = \ self._get_sequence_with_spacing(new_grammar, [Literal('fare'), Literal('.'), Literal('one_direction_cost'), new_grammar['binaryop'], new_grammar['fare_one_direction_cost']]) new_binary_expressions.append(fare_one_direction_cost_expression) flight_number_expression = \ self._get_sequence_with_spacing(new_grammar, [Literal('flight'), Literal('.'), Literal('flight_number'), new_grammar['binaryop'], new_grammar['flight_number']]) new_binary_expressions.append(flight_number_expression) if self.dates: year_binary_expression = self._get_sequence_with_spacing(new_grammar, [Literal('date_day'), Literal('.'), Literal('year'), new_grammar['binaryop'], new_grammar['year_number']]) month_binary_expression = self._get_sequence_with_spacing(new_grammar, [Literal('date_day'), Literal('.'), Literal('month_number'), new_grammar['binaryop'], new_grammar['month_number']]) day_binary_expression = self._get_sequence_with_spacing(new_grammar, [Literal('date_day'), Literal('.'), Literal('day_number'), new_grammar['binaryop'], new_grammar['day_number']]) new_binary_expressions.extend([year_binary_expression, month_binary_expression, day_binary_expression]) new_binary_expressions = new_binary_expressions + list(new_grammar['biexpr'].members) new_grammar['biexpr'] = OneOf(*new_binary_expressions, name='biexpr') self._update_expression_reference(new_grammar, 'condition', 'biexpr') return new_grammar
0.007901
def create_CAG_with_indicators(input, output, filename="CAG_with_indicators.pdf"): """ Create a CAG with mapped indicators """ with open(input, "rb") as f: G = pickle.load(f) G.map_concepts_to_indicators(min_temporal_res="month") G.set_indicator("UN/events/weather/precipitation", "Historical Average Total Daily Rainfall (Maize)", "DSSAT") G.set_indicator("UN/events/human/agriculture/food_production", "Historical Production (Maize)", "DSSAT") G.set_indicator("UN/entities/human/food/food_security", "IPC Phase Classification", "FEWSNET") G.set_indicator("UN/entities/food_availability", "Production, Meat indigenous, total", "FAO") G.set_indicator("UN/entities/human/financial/economic/market", "Inflation Rate", "ieconomics.com") G.set_indicator("UN/events/human/death", "Battle-related deaths", "WDI") with open(output, "wb") as f: pickle.dump(G, f)
0.007592
def load_config(check_dir): """ Load configuration file from ``check_dir / ".cs50.yaml"``, applying defaults to unspecified values. :param check_dir: directory from which to load config file :type check_dir: str / Path :rtype: dict """ # Defaults for top-level keys options = { "checks": "__init__.py", "dependencies": None, "translations": None } # Defaults for translation keys translation_options = { "localedir": "locale", "domain": "messages", } config_file = Path(check_dir) / ".cs50.yaml" with open(config_file) as f: config = lib50.config.load(f.read(), "check50") if isinstance(config, dict): options.update(config) if options["translations"]: if isinstance(options["translations"], dict): translation_options.update(options["translations"]) options["translations"] = translation_options if isinstance(options["checks"], dict): # Compile simple checks with open(check_dir / "__init__.py", "w") as f: f.write(simple.compile(options["checks"])) options["checks"] = "__init__.py" return options
0.00083
def _add_to_filemenu(): """Helper function for the above :func:add_to_filemenu() This function is serialised into a string and passed on to evalDeferred above. """ import os import pyblish from maya import cmds # This must be duplicated here, due to this function # not being available through the above `evalDeferred` for item in ("pyblishOpeningDivider", "pyblishScene", "pyblishCloseDivider"): if cmds.menuItem(item, exists=True): cmds.deleteUI(item, menuItem=True) icon = os.path.dirname(pyblish.__file__) icon = os.path.join(icon, "icons", "logo-32x32.svg") cmds.menuItem("pyblishOpeningDivider", divider=True, insertAfter="saveAsOptions", parent="mainFileMenu") cmds.menuItem("pyblishScene", insertAfter="pyblishOpeningDivider", label="Publish", parent="mainFileMenu", image=icon, command="import pyblish_maya;pyblish_maya.show()") cmds.menuItem("pyblishCloseDivider", insertAfter="pyblishScene", parent="mainFileMenu", divider=True)
0.000794
def install_catalogs(self): """ Call to catalog install command: http://stackoverflow.com/a/24353921/4075339 """ cmd_obj = self.distribution.get_command_obj('catalogs') cmd_obj.force = self.force if self.ugali_dir: cmd_obj.ugali_dir = self.ugali_dir self.run_command('catalogs')
0.008772
def _GetCacheFileMetadataHeaderOffset(self, file_object): """Determines the offset of the cache file metadata header. This method is inspired by the work of James Habben: https://github.com/JamesHabben/FirefoxCache2 Args: file_object (dfvfs.FileIO): a file-like object. Returns: int: offset of the file cache metadata header relative to the start of the file. Raises: IOError: if the start of the cache file metadata could not be determined. """ file_object.seek(-4, os.SEEK_END) file_offset = file_object.tell() metadata_size_map = self._GetDataTypeMap('uint32be') try: metadata_size, _ = self._ReadStructureFromFileObject( file_object, file_offset, metadata_size_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile( 'Unable to parse cache file metadata size with error: {0!s}'.format( exception)) # Firefox splits the content into chunks. number_of_chunks, remainder = divmod(metadata_size, self._CHUNK_SIZE) if remainder != 0: number_of_chunks += 1 # Each chunk in the cached record is padded with two bytes. # Skip the first 4 bytes which contains a hash value of the cached content. return metadata_size + (number_of_chunks * 2) + 4
0.003745
def delete_tracking_beacon(self, tracking_beacons_id, **data): """ DELETE /tracking_beacons/:tracking_beacons_id/ Delete the :format:`tracking_beacons` with the specified :tracking_beacons_id. """ return self.delete("/tracking_beacons/{0}/".format(tracking_beacons_id), data=data)
0.015198
def get_humidity(self): """ Returns the percentage of relative humidity """ self._init_humidity() # Ensure humidity sensor is initialised humidity = 0 data = self._humidity.humidityRead() if (data[0]): # Humidity valid humidity = data[1] return humidity
0.006024
def _check(self, sock_info): """This side-effecty function checks if this socket has been idle for for longer than the max idle time, or if the socket has been closed by some external network error, and if so, attempts to create a new socket. If this connection attempt fails we raise the ConnectionFailure. Checking sockets lets us avoid seeing *some* :class:`~pymongo.errors.AutoReconnect` exceptions on server hiccups, etc. We only check if the socket was closed by an external error if it has been > 1 second since the socket was checked into the pool, to keep performance reasonable - we can't avoid AutoReconnects completely anyway. """ idle_time_seconds = sock_info.idle_time_seconds() # If socket is idle, open a new one. if (self.opts.max_idle_time_seconds is not None and idle_time_seconds > self.opts.max_idle_time_seconds): sock_info.close() return self.connect() if (self._check_interval_seconds is not None and ( 0 == self._check_interval_seconds or idle_time_seconds > self._check_interval_seconds)): if self.socket_checker.socket_closed(sock_info.sock): sock_info.close() return self.connect() return sock_info
0.001448
def DeleteClusterTags(r, tags, dry_run=False): """ Deletes tags from the cluster. @type tags: list of str @param tags: tags to delete @type dry_run: bool @param dry_run: whether to perform a dry run """ query = { "dry-run": dry_run, "tag": tags, } return r.request("delete", "/2/tags", query=query)
0.002801
def _add_hgnc_symbols(self, variant_obj): """Add hgnc symbols to the variant If there are transcripts use the symbols found here, otherwise use phizz to get the gene ids. """ hgnc_symbols = set() if variant_obj.transcripts: for transcript in variant_obj.transcripts: if transcript.hgnc_symbol: hgnc_symbols.add(transcript.hgnc_symbol) else: chrom = variant_obj.CHROM start = variant_obj.start stop = variant_obj.stop hgnc_symbols = get_gene_symbols(chrom, start, stop) #Make unique ids variant_obj.gene_symbols = list(hgnc_symbols)
0.006897
def debug_reduce(self, rule, tokens, parent, last_token_pos): """Customized format and print for our kind of tokens which gets called in debugging grammar reduce rules """ def fix(c): s = str(c) last_token_pos = s.find('_') if last_token_pos == -1: return s else: return s[:last_token_pos] prefix = '' if parent and tokens: p_token = tokens[parent] if hasattr(p_token, 'linestart') and p_token.linestart: prefix = 'L.%3d: ' % p_token.linestart else: prefix = ' ' if hasattr(p_token, 'offset'): prefix += "%3s" % fix(p_token.offset) if len(rule[1]) > 1: prefix += '-%-3s ' % fix(tokens[last_token_pos-1].offset) else: prefix += ' ' else: prefix = ' ' print("%s%s ::= %s (%d)" % (prefix, rule[0], ' '.join(rule[1]), last_token_pos))
0.002778
def _Kw(rho, T): """Equation for the ionization constant of ordinary water Parameters ---------- rho : float Density, [kg/m³] T : float Temperature, [K] Returns ------- pKw : float Ionization constant in -log10(kw), [-] Notes ------ Raise :class:`NotImplementedError` if input isn't in limit: * 0 ≤ ρ ≤ 1250 * 273.15 ≤ T ≤ 1073.15 Examples -------- >>> _Kw(1000, 300) 13.906565 References ---------- IAPWS, Release on the Ionization Constant of H2O, http://www.iapws.org/relguide/Ionization.pdf """ # Check input parameters if rho < 0 or rho > 1250 or T < 273.15 or T > 1073.15: raise NotImplementedError("Incoming out of bound") # The internal method of calculation use rho in g/cm³ d = rho/1000. # Water molecular weight different Mw = 18.015268 gamma = [6.1415e-1, 4.825133e4, -6.770793e4, 1.01021e7] pKg = 0 for i, g in enumerate(gamma): pKg += g/T**i Q = d*exp(-0.864671+8659.19/T-22786.2/T**2*d**(2./3)) pKw = -12*(log10(1+Q)-Q/(Q+1)*d*(0.642044-56.8534/T-0.375754*d)) + \ pKg+2*log10(Mw/1000) return pKw
0.000825
def fetch(backend_class, backend_args, category, filter_classified=False, manager=None): """Fetch items using the given backend. Generator to get items using the given backend class. When an archive manager is given, this function will store the fetched items in an `Archive`. If an exception is raised, this archive will be removed to avoid corrupted archives. The parameters needed to initialize the `backend` class and get the items are given using `backend_args` dict parameter. :param backend_class: backend class to fetch items :param backend_args: dict of arguments needed to fetch the items :param category: category of the items to retrieve. If None, it will use the default backend category :param filter_classified: remove classified fields from the resulting items :param manager: archive manager needed to store the items :returns: a generator of items """ init_args = find_signature_parameters(backend_class.__init__, backend_args) archive = manager.create_archive() if manager else None init_args['archive'] = archive backend = backend_class(**init_args) if category: backend_args['category'] = category if filter_classified: backend_args['filter_classified'] = filter_classified fetch_args = find_signature_parameters(backend.fetch, backend_args) items = backend.fetch(**fetch_args) try: for item in items: yield item except Exception as e: if manager: archive_path = archive.archive_path manager.remove_archive(archive_path) raise e
0.000578
def _root_amplitude_brentq(counts, bkg, model, root_fn=_f_cash_root): """Fit amplitude by finding roots using Brent algorithm. See Appendix A Stewart (2009). Parameters ---------- counts : `~numpy.ndarray` Slice of count map. bkg : `~numpy.ndarray` Slice of background map. model : `~numpy.ndarray` Model template to fit. Returns ------- amplitude : float Fitted flux amplitude. niter : int Number of function evaluations needed for the fit. """ # Compute amplitude bounds and assert counts > 0 amplitude_min, amplitude_max = _amplitude_bounds(counts, bkg, model) if not np.sum(counts) > 0: return amplitude_min, 0 args = (counts, bkg, model) if root_fn(0.0, *args) < 0: return 0.0, 1 with warnings.catch_warnings(): warnings.simplefilter("ignore") try: result = brentq(root_fn, amplitude_min, amplitude_max, args=args, maxiter=MAX_NITER, full_output=True, rtol=1E-4) return result[0], result[1].iterations except (RuntimeError, ValueError): # Where the root finding fails NaN is set as amplitude return np.nan, MAX_NITER
0.000796
def show_firmware_version_output_show_firmware_version_build_time(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_firmware_version = ET.Element("show_firmware_version") config = show_firmware_version output = ET.SubElement(show_firmware_version, "output") show_firmware_version = ET.SubElement(output, "show-firmware-version") build_time = ET.SubElement(show_firmware_version, "build-time") build_time.text = kwargs.pop('build_time') callback = kwargs.pop('callback', self._callback) return callback(config)
0.004785
def input_fields(self, preamble, *args): """Get a set of fields from the user. Optionally a preamble may be shown to the user secribing the fields to return. The fields are specified as the remaining arguments with each field being a a list with the following entries: - a programmer-visible name for the field - a string prompt to show to the user - one of the following values: - string: return a string from the user - password: return a string from the user but do not echo the input to the screen - boolean: return a boolean value from the user - integer: return an integer value from the user - the default value (optional) Fields are requested from the user in the order specified. Fields are returned in a dictionary with the field names being the keys and the values being the items. """ self.new_section() if preamble is not None: self.message(preamble) if any([True for x in args if len(x) > 3]): self.message(""" Some questions have default answers which can be selected by pressing 'Enter' at the prompt.""") output_dict = { } for field in args: (field_name, prompt, field_type) = field[:3] default = None if len(field) > 3: default = field[3] if field_type == 'string': output_dict[field_name] = self.input(prompt, default = default) elif field_type == 'password': output_dict[field_name] = self.input(prompt, no_echo=True) elif field_type == 'boolean': output_dict[field_name] = self.input_boolean(prompt, default = default) elif field_type == 'integer': output_dict[field_name] = self.input_integer(prompt, default = default) return output_dict
0.005413
def validate_broker_ids_subset(broker_ids, subset_ids): """Validate that user specified broker ids to restart exist in the broker ids retrieved from cluster config. :param broker_ids: all broker IDs in a cluster :type broker_ids: list of integers :param subset_ids: broker IDs specified by user :type subset_ids: list of integers :returns: bool """ all_ids = set(broker_ids) valid = True for subset_id in subset_ids: valid = valid and subset_id in all_ids if subset_id not in all_ids: print("Error: user specified broker id {0} does not exist in cluster.".format(subset_id)) return valid
0.004518
def validate(args): """ cldf validate <DATASET> Validate a dataset against the CLDF specification, i.e. check - whether required tables and columns are present - whether values for required columns are present - the referential integrity of the dataset """ ds = _get_dataset(args) ds.validate(log=args.log)
0.002915
def document_scale(cls): """ Create a documentation for a scale Import the superclass parameters It replaces `{superclass_parameters}` with the documentation of the parameters from the superclass. Parameters ---------- cls : type A scale class Returns ------- cls : type The scale class with a modified docstring. """ params_list = [] # Get set of cls params cls_param_string = docstring_parameters_section(cls) cls_param_dict = parameters_str_to_dict(cls_param_string) cls_params = set(cls_param_dict.keys()) for i, base in enumerate(cls.__bases__): # Get set of base class params base_param_string = param_string = docstring_parameters_section(base) base_param_dict = parameters_str_to_dict(base_param_string) base_params = set(base_param_dict.keys()) # Remove duplicate params from the base class duplicate_params = base_params & cls_params for param in duplicate_params: del base_param_dict[param] if duplicate_params: param_string = parameters_dict_to_str(base_param_dict) # Accumulate params of base case if i == 0: # Compensate for the indentation of the # {superclass_parameters} string param_string = param_string.strip() params_list.append(param_string) # Prevent the next base classes from bringing in the # same parameters. cls_params |= base_params # Fill in the processed superclass parameters superclass_parameters = '\n'.join(params_list) cls.__doc__ = cls.__doc__.format( superclass_parameters=superclass_parameters) return cls
0.000575
def createKeyboardTab(self): ''' KEYBOARD ''' _keyboardList = [ 'KEYCODE_1', 'KEYCODE_2', 'KEYCODE_3', 'KEYCODE_4', 'KEYCODE_5', 'KEYCODE_6', 'KEYCODE_7', 'KEYCODE_8', 'KEYCODE_9', 'KEYCODE_0', 'KEYCODE_Q', 'KEYCODE_W', 'KEYCODE_E', 'KEYCODE_R', 'KEYCODE_T', 'KEYCODE_Y', 'KEYCODE_U', 'KEYCODE_I', 'KEYCODE_O', 'KEYCODE_P', 'KEYCODE_A', 'KEYCODE_S', 'KEYCODE_D', 'KEYCODE_F', 'KEYCODE_G', 'KEYCODE_H', 'KEYCODE_J', 'KEYCODE_K', 'KEYCODE_L', 'KEYCODE_DEL', 'KEYCODE_Z', 'KEYCODE_X', 'KEYCODE_C', 'KEYCODE_V', 'KEYCODE_B', 'KEYCODE_N', 'KEYCODE_M', 'KEYCODE_.', 'KEYCODE_SPACE', 'KEYCODE_GO' ] for keyboard in _keyboardList: _cpb = ControlPanelButton(self.keyboardTab, self.culebron, self.printOperation, value=keyboard, text=keyboard[8:], width=Layout.BUTTON_WIDTH, bg=self.bg, fg=self.fg, highlightbackground=self.highlightbackground) _cpb.configure(command=_cpb.command) _cpb.grid(column=self.childWindow.column, row=self.childWindow.row) self.tabLayout()
0.007212
def _respond(self, channel, text): """Respond to a message on the current socket. Args: channel (:py:class:`str`): The channel to send to. text (:py:class:`str`): The message text to send. """ result = self._format_message(channel, text) if result is not None: logger.info( 'Sending message: %r', truncate(result, max_len=50), ) self.socket.send_str(result)
0.004141
def boundary(ax, scale, axes_colors=None, **kwargs): """ Plots the boundary of the simplex. Creates and returns matplotlib axis if none given. Parameters ---------- ax: Matplotlib AxesSubplot, None The subplot to draw on. scale: float Simplex scale size. kwargs: Any kwargs to pass through to matplotlib. axes_colors: dict Option for coloring boundaries different colors. e.g. {'l': 'g'} for coloring the left axis boundary green """ # Set default color as black. if axes_colors is None: axes_colors = dict() for _axis in ['l', 'r', 'b']: if _axis not in axes_colors.keys(): axes_colors[_axis] = 'black' horizontal_line(ax, scale, 0, color=axes_colors['b'], **kwargs) left_parallel_line(ax, scale, 0, color=axes_colors['l'], **kwargs) right_parallel_line(ax, scale, 0, color=axes_colors['r'], **kwargs) return ax
0.001052