code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def _zp_decode(self, msg): """ZP: Zone partitions.""" zone_partitions = [ord(x)-0x31 for x in msg[4:4+Max.ZONES.value]] return {'zone_partitions': zone_partitions}
ZP: Zone partitions.
Below is the the instruction that describes the task: ### Input: ZP: Zone partitions. ### Response: def _zp_decode(self, msg): """ZP: Zone partitions.""" zone_partitions = [ord(x)-0x31 for x in msg[4:4+Max.ZONES.value]] return {'zone_partitions': zone_partitions}
def hide(self): """Hide the spinner to allow for custom writing to the terminal.""" thr_is_alive = self._spin_thread and self._spin_thread.is_alive() if thr_is_alive and not self._hide_spin.is_set(): # set the hidden spinner flag self._hide_spin.set() # clear the current line sys.stdout.write("\r") self._clear_line() # flush the stdout buffer so the current line can be rewritten to sys.stdout.flush()
Hide the spinner to allow for custom writing to the terminal.
Below is the the instruction that describes the task: ### Input: Hide the spinner to allow for custom writing to the terminal. ### Response: def hide(self): """Hide the spinner to allow for custom writing to the terminal.""" thr_is_alive = self._spin_thread and self._spin_thread.is_alive() if thr_is_alive and not self._hide_spin.is_set(): # set the hidden spinner flag self._hide_spin.set() # clear the current line sys.stdout.write("\r") self._clear_line() # flush the stdout buffer so the current line can be rewritten to sys.stdout.flush()
def example_alter_configs(a, args): """ Alter configs atomically, replacing non-specified configuration properties with their default values. """ resources = [] for restype, resname, configs in zip(args[0::3], args[1::3], args[2::3]): resource = ConfigResource(restype, resname) resources.append(resource) for k, v in [conf.split('=') for conf in configs.split(',')]: resource.set_config(k, v) fs = a.alter_configs(resources) # Wait for operation to finish. for res, f in fs.items(): try: f.result() # empty, but raises exception on failure print("{} configuration successfully altered".format(res)) except Exception: raise
Alter configs atomically, replacing non-specified configuration properties with their default values.
Below is the the instruction that describes the task: ### Input: Alter configs atomically, replacing non-specified configuration properties with their default values. ### Response: def example_alter_configs(a, args): """ Alter configs atomically, replacing non-specified configuration properties with their default values. """ resources = [] for restype, resname, configs in zip(args[0::3], args[1::3], args[2::3]): resource = ConfigResource(restype, resname) resources.append(resource) for k, v in [conf.split('=') for conf in configs.split(',')]: resource.set_config(k, v) fs = a.alter_configs(resources) # Wait for operation to finish. for res, f in fs.items(): try: f.result() # empty, but raises exception on failure print("{} configuration successfully altered".format(res)) except Exception: raise
def renew_service(request, pk): """ renew an existing service :param request object :param pk: the primary key of the service to renew :type pk: int """ default_provider.load_services() service = get_object_or_404(ServicesActivated, pk=pk) service_name = str(service.name) service_object = default_provider.get_service(service_name) lets_auth = getattr(service_object, 'auth') getattr(service_object, 'reset_failed')(pk=pk) return redirect(lets_auth(request))
renew an existing service :param request object :param pk: the primary key of the service to renew :type pk: int
Below is the the instruction that describes the task: ### Input: renew an existing service :param request object :param pk: the primary key of the service to renew :type pk: int ### Response: def renew_service(request, pk): """ renew an existing service :param request object :param pk: the primary key of the service to renew :type pk: int """ default_provider.load_services() service = get_object_or_404(ServicesActivated, pk=pk) service_name = str(service.name) service_object = default_provider.get_service(service_name) lets_auth = getattr(service_object, 'auth') getattr(service_object, 'reset_failed')(pk=pk) return redirect(lets_auth(request))
def export_flow_di_data(params, plane): """ Creates a new BPMNEdge XML element for given edge parameters and adds it to 'plane' element. :param params: dictionary with edge parameters, :param plane: object of Element class, representing BPMN XML 'BPMNPlane' element (root for edge DI data). """ output_flow = eTree.SubElement(plane, BpmnDiagramGraphExport.bpmndi_namespace + consts.Consts.bpmn_edge) output_flow.set(consts.Consts.id, params[consts.Consts.id] + "_gui") output_flow.set(consts.Consts.bpmn_element, params[consts.Consts.id]) waypoints = params[consts.Consts.waypoints] for waypoint in waypoints: waypoint_element = eTree.SubElement(output_flow, "omgdi:waypoint") waypoint_element.set(consts.Consts.x, waypoint[0]) waypoint_element.set(consts.Consts.y, waypoint[1])
Creates a new BPMNEdge XML element for given edge parameters and adds it to 'plane' element. :param params: dictionary with edge parameters, :param plane: object of Element class, representing BPMN XML 'BPMNPlane' element (root for edge DI data).
Below is the the instruction that describes the task: ### Input: Creates a new BPMNEdge XML element for given edge parameters and adds it to 'plane' element. :param params: dictionary with edge parameters, :param plane: object of Element class, representing BPMN XML 'BPMNPlane' element (root for edge DI data). ### Response: def export_flow_di_data(params, plane): """ Creates a new BPMNEdge XML element for given edge parameters and adds it to 'plane' element. :param params: dictionary with edge parameters, :param plane: object of Element class, representing BPMN XML 'BPMNPlane' element (root for edge DI data). """ output_flow = eTree.SubElement(plane, BpmnDiagramGraphExport.bpmndi_namespace + consts.Consts.bpmn_edge) output_flow.set(consts.Consts.id, params[consts.Consts.id] + "_gui") output_flow.set(consts.Consts.bpmn_element, params[consts.Consts.id]) waypoints = params[consts.Consts.waypoints] for waypoint in waypoints: waypoint_element = eTree.SubElement(output_flow, "omgdi:waypoint") waypoint_element.set(consts.Consts.x, waypoint[0]) waypoint_element.set(consts.Consts.y, waypoint[1])
def rank_for_in(self, leaderboard_name, member): ''' Retrieve the rank for a member in the named leaderboard. @param leaderboard_name [String] Name of the leaderboard. @param member [String] Member name. @return the rank for a member in the leaderboard. ''' if self.order == self.ASC: try: return self.redis_connection.zrank( leaderboard_name, member) + 1 except: return None else: try: return self.redis_connection.zrevrank( leaderboard_name, member) + 1 except: return None
Retrieve the rank for a member in the named leaderboard. @param leaderboard_name [String] Name of the leaderboard. @param member [String] Member name. @return the rank for a member in the leaderboard.
Below is the the instruction that describes the task: ### Input: Retrieve the rank for a member in the named leaderboard. @param leaderboard_name [String] Name of the leaderboard. @param member [String] Member name. @return the rank for a member in the leaderboard. ### Response: def rank_for_in(self, leaderboard_name, member): ''' Retrieve the rank for a member in the named leaderboard. @param leaderboard_name [String] Name of the leaderboard. @param member [String] Member name. @return the rank for a member in the leaderboard. ''' if self.order == self.ASC: try: return self.redis_connection.zrank( leaderboard_name, member) + 1 except: return None else: try: return self.redis_connection.zrevrank( leaderboard_name, member) + 1 except: return None
def geom_symm_match(g, atwts, ax, theta, do_refl): """ [Revised match factor calculation] .. todo:: Complete geom_symm_match docstring """ # Imports import numpy as np from scipy import linalg as spla # Convert g and atwts to n-D vectors g = make_nd_vec(g, nd=None, t=np.float64, norm=False) atwts = make_nd_vec(atwts, nd=None, t=np.float64, norm=False) # Ensure proper dimensionality if not g.shape[0] == 3 * atwts.shape[0]: raise ValueError("Size of 'g' is not 3*size of 'atwts'") ## end if # Calculate transformed geometry gx = symm_op(g, ax, theta, do_refl) # Push g to a column vector g = g.reshape((g.shape[0],1)) # Augment g and gx with imaginary atomic weights ex_wts = atwts.repeat(3,axis=0).T.reshape((atwts.shape[0]*3,1)) * 1.j g = np.add(g, ex_wts) gx = np.add(gx, ex_wts) ## # Define calc as the outer product of the augmented vectors ## calc = np.dot(g.reshape((g.shape[0],1)), \ ## np.reciprocal(gx.reshape((1,gx.shape[0])))) ## ## # Calculate the complex magnitude of each element and take log10, ## # then abs again ## calc = np.abs(np.log10(np.abs(calc))) # Expand g and gx as column vectors of coordinates calc_g = g.reshape((g.shape[0] // 3, 3)) calc_gx = gx.reshape((gx.shape[0] // 3, 3)) ## ## # Expand each into a square matrix of identical column vectors ## calc_g = calc_g.repeat(g.shape[0], axis=1) ## calc_gx = gx.repeat(gx.shape[0], axis=1) # Calc is the absolute distance between the calc-ed values, # scaled by the maximum of the individual atom distances or unity. # Calculate the unscaled distances calc = [[spla.norm(np.subtract(calc_g[i,:], calc_gx[j,:])) \ for j in range(calc_gx.shape[0])] \ for i in range(calc_g.shape[0])] # Calculate the scale factors scale_g = np.array([spla.norm(calc_g[i,:]) for i in \ range(calc_g.shape[0])]).reshape((calc_g.shape[0],1)) \ .repeat(calc_g.shape[0], axis=1) scale_gx = np.array([spla.norm(calc_gx[j,:]) for j in \ range(calc_g.shape[0])]).reshape((1,calc_gx.shape[0])) \ .repeat(calc_gx.shape[0], axis=0) scale = np.maximum(np.maximum(scale_g, scale_gx), np.ones_like(scale_g, dtype=np.float64)) # Scale calc calc = np.divide(calc, scale) # Take the minimum of each row mins = np.min(calc, axis=1) # Take the maximum of the minima for the final factor fac = np.max(mins) # Using the atomic weights for checking matching can result in 'fac' # being greater than unity. Return the minimum of fac and unity. fac = min(fac, 1.0) return fac
[Revised match factor calculation] .. todo:: Complete geom_symm_match docstring
Below is the the instruction that describes the task: ### Input: [Revised match factor calculation] .. todo:: Complete geom_symm_match docstring ### Response: def geom_symm_match(g, atwts, ax, theta, do_refl): """ [Revised match factor calculation] .. todo:: Complete geom_symm_match docstring """ # Imports import numpy as np from scipy import linalg as spla # Convert g and atwts to n-D vectors g = make_nd_vec(g, nd=None, t=np.float64, norm=False) atwts = make_nd_vec(atwts, nd=None, t=np.float64, norm=False) # Ensure proper dimensionality if not g.shape[0] == 3 * atwts.shape[0]: raise ValueError("Size of 'g' is not 3*size of 'atwts'") ## end if # Calculate transformed geometry gx = symm_op(g, ax, theta, do_refl) # Push g to a column vector g = g.reshape((g.shape[0],1)) # Augment g and gx with imaginary atomic weights ex_wts = atwts.repeat(3,axis=0).T.reshape((atwts.shape[0]*3,1)) * 1.j g = np.add(g, ex_wts) gx = np.add(gx, ex_wts) ## # Define calc as the outer product of the augmented vectors ## calc = np.dot(g.reshape((g.shape[0],1)), \ ## np.reciprocal(gx.reshape((1,gx.shape[0])))) ## ## # Calculate the complex magnitude of each element and take log10, ## # then abs again ## calc = np.abs(np.log10(np.abs(calc))) # Expand g and gx as column vectors of coordinates calc_g = g.reshape((g.shape[0] // 3, 3)) calc_gx = gx.reshape((gx.shape[0] // 3, 3)) ## ## # Expand each into a square matrix of identical column vectors ## calc_g = calc_g.repeat(g.shape[0], axis=1) ## calc_gx = gx.repeat(gx.shape[0], axis=1) # Calc is the absolute distance between the calc-ed values, # scaled by the maximum of the individual atom distances or unity. # Calculate the unscaled distances calc = [[spla.norm(np.subtract(calc_g[i,:], calc_gx[j,:])) \ for j in range(calc_gx.shape[0])] \ for i in range(calc_g.shape[0])] # Calculate the scale factors scale_g = np.array([spla.norm(calc_g[i,:]) for i in \ range(calc_g.shape[0])]).reshape((calc_g.shape[0],1)) \ .repeat(calc_g.shape[0], axis=1) scale_gx = np.array([spla.norm(calc_gx[j,:]) for j in \ range(calc_g.shape[0])]).reshape((1,calc_gx.shape[0])) \ .repeat(calc_gx.shape[0], axis=0) scale = np.maximum(np.maximum(scale_g, scale_gx), np.ones_like(scale_g, dtype=np.float64)) # Scale calc calc = np.divide(calc, scale) # Take the minimum of each row mins = np.min(calc, axis=1) # Take the maximum of the minima for the final factor fac = np.max(mins) # Using the atomic weights for checking matching can result in 'fac' # being greater than unity. Return the minimum of fac and unity. fac = min(fac, 1.0) return fac
def items(self): """ On Python 2.7+: D.items() -> a set-like object providing a view on D's items On Python 2.6: D.items() -> an iterator over D's items """ if ver == (2, 7): return self.viewitems() elif ver == (2, 6): return self.iteritems() elif ver >= (3, 0): return self.items()
On Python 2.7+: D.items() -> a set-like object providing a view on D's items On Python 2.6: D.items() -> an iterator over D's items
Below is the the instruction that describes the task: ### Input: On Python 2.7+: D.items() -> a set-like object providing a view on D's items On Python 2.6: D.items() -> an iterator over D's items ### Response: def items(self): """ On Python 2.7+: D.items() -> a set-like object providing a view on D's items On Python 2.6: D.items() -> an iterator over D's items """ if ver == (2, 7): return self.viewitems() elif ver == (2, 6): return self.iteritems() elif ver >= (3, 0): return self.items()
def create_screenshot(self, app_id, filename, position=1): """Add a screenshot to the web app identified by by ``app_id``. Screenshots are ordered by ``position``. :returns: HttpResponse: * status_code (int) 201 is successful * content (dict) containing screenshot data """ # prepare file for upload with open(filename, 'rb') as s_file: s_content = s_file.read() s_encoded = b64encode(s_content) url = self.url('create_screenshot') % app_id mtype, encoding = mimetypes.guess_type(filename) if mtype is None: mtype = 'image/jpeg' data = {'position': position, 'file': {'type': mtype, 'data': s_encoded}} return self.conn.fetch('POST', url, data)
Add a screenshot to the web app identified by by ``app_id``. Screenshots are ordered by ``position``. :returns: HttpResponse: * status_code (int) 201 is successful * content (dict) containing screenshot data
Below is the the instruction that describes the task: ### Input: Add a screenshot to the web app identified by by ``app_id``. Screenshots are ordered by ``position``. :returns: HttpResponse: * status_code (int) 201 is successful * content (dict) containing screenshot data ### Response: def create_screenshot(self, app_id, filename, position=1): """Add a screenshot to the web app identified by by ``app_id``. Screenshots are ordered by ``position``. :returns: HttpResponse: * status_code (int) 201 is successful * content (dict) containing screenshot data """ # prepare file for upload with open(filename, 'rb') as s_file: s_content = s_file.read() s_encoded = b64encode(s_content) url = self.url('create_screenshot') % app_id mtype, encoding = mimetypes.guess_type(filename) if mtype is None: mtype = 'image/jpeg' data = {'position': position, 'file': {'type': mtype, 'data': s_encoded}} return self.conn.fetch('POST', url, data)
def subtract_column_median(df, prefix='Intensity '): """ Apply column-wise normalisation to expression columns. Default is median transform to expression columns beginning with Intensity :param df: :param prefix: The column prefix for expression columns :return: """ df = df.copy() df.replace([np.inf, -np.inf], np.nan, inplace=True) mask = [l.startswith(prefix) for l in df.columns.values] df.iloc[:, mask] = df.iloc[:, mask] - df.iloc[:, mask].median(axis=0) return df
Apply column-wise normalisation to expression columns. Default is median transform to expression columns beginning with Intensity :param df: :param prefix: The column prefix for expression columns :return:
Below is the the instruction that describes the task: ### Input: Apply column-wise normalisation to expression columns. Default is median transform to expression columns beginning with Intensity :param df: :param prefix: The column prefix for expression columns :return: ### Response: def subtract_column_median(df, prefix='Intensity '): """ Apply column-wise normalisation to expression columns. Default is median transform to expression columns beginning with Intensity :param df: :param prefix: The column prefix for expression columns :return: """ df = df.copy() df.replace([np.inf, -np.inf], np.nan, inplace=True) mask = [l.startswith(prefix) for l in df.columns.values] df.iloc[:, mask] = df.iloc[:, mask] - df.iloc[:, mask].median(axis=0) return df
def read_request_from_str(data, **params): """ 从字符串中读取请求头,并根据格式化字符串模板,进行字符串格式化 :param data: :param params: :return: """ method, uri = None, None headers = {} host = '' try: split_list = data.split('\n\n') headers_text = split_list[0] body = '\n\n'.join(split_list[1:]) except: headers_text = data body = '' body = force_bytes(body) for k, v in params.items(): body = body.replace(b'{%s}' % force_bytes(k), force_bytes(v)) header_list = headers_text.split('\n') for i, line in enumerate(header_list): line = line.strip() if line.strip() == '': continue line = line.format(**params) if i == 0: # 至多3个 split_line = line.strip().split(' ') method, uri, _ = split_line[0], ' '.join(split_line[1:-1]), split_line[-1] else: # 至多2个 header, value = line.split(':', 1) header = header.strip() value = value.strip() headers[header] = value if header.lower() == 'host': host = value return headers, method, uri, host, body
从字符串中读取请求头,并根据格式化字符串模板,进行字符串格式化 :param data: :param params: :return:
Below is the the instruction that describes the task: ### Input: 从字符串中读取请求头,并根据格式化字符串模板,进行字符串格式化 :param data: :param params: :return: ### Response: def read_request_from_str(data, **params): """ 从字符串中读取请求头,并根据格式化字符串模板,进行字符串格式化 :param data: :param params: :return: """ method, uri = None, None headers = {} host = '' try: split_list = data.split('\n\n') headers_text = split_list[0] body = '\n\n'.join(split_list[1:]) except: headers_text = data body = '' body = force_bytes(body) for k, v in params.items(): body = body.replace(b'{%s}' % force_bytes(k), force_bytes(v)) header_list = headers_text.split('\n') for i, line in enumerate(header_list): line = line.strip() if line.strip() == '': continue line = line.format(**params) if i == 0: # 至多3个 split_line = line.strip().split(' ') method, uri, _ = split_line[0], ' '.join(split_line[1:-1]), split_line[-1] else: # 至多2个 header, value = line.split(':', 1) header = header.strip() value = value.strip() headers[header] = value if header.lower() == 'host': host = value return headers, method, uri, host, body
def create_assembly_instance(self, assembly_uri, part_uri, configuration): ''' Insert a configurable part into an assembly. Args: - assembly (dict): eid, wid, and did of the assembly into which will be inserted - part (dict): eid and did of the configurable part - configuration (dict): the configuration Returns: - requests.Response: Onshape response data ''' payload = { "documentId": part_uri["did"], "elementId": part_uri["eid"], # could be added if needed: # "partId": "String", # "featureId": "String", # "microversionId": "String", "versionId": part_uri["wvm"], # "microversionId": "String", "isAssembly": False, "isWholePartStudio": True, "configuration": self.encode_configuration(part_uri["did"], part_uri["eid"], configuration) } return self._api.request('post', '/api/assemblies/d/' + assembly_uri["did"] + '/' + assembly_uri["wvm_type"] + '/' + assembly_uri["wvm"] + '/e/' + assembly_uri["eid"] + '/instances', body=payload)
Insert a configurable part into an assembly. Args: - assembly (dict): eid, wid, and did of the assembly into which will be inserted - part (dict): eid and did of the configurable part - configuration (dict): the configuration Returns: - requests.Response: Onshape response data
Below is the the instruction that describes the task: ### Input: Insert a configurable part into an assembly. Args: - assembly (dict): eid, wid, and did of the assembly into which will be inserted - part (dict): eid and did of the configurable part - configuration (dict): the configuration Returns: - requests.Response: Onshape response data ### Response: def create_assembly_instance(self, assembly_uri, part_uri, configuration): ''' Insert a configurable part into an assembly. Args: - assembly (dict): eid, wid, and did of the assembly into which will be inserted - part (dict): eid and did of the configurable part - configuration (dict): the configuration Returns: - requests.Response: Onshape response data ''' payload = { "documentId": part_uri["did"], "elementId": part_uri["eid"], # could be added if needed: # "partId": "String", # "featureId": "String", # "microversionId": "String", "versionId": part_uri["wvm"], # "microversionId": "String", "isAssembly": False, "isWholePartStudio": True, "configuration": self.encode_configuration(part_uri["did"], part_uri["eid"], configuration) } return self._api.request('post', '/api/assemblies/d/' + assembly_uri["did"] + '/' + assembly_uri["wvm_type"] + '/' + assembly_uri["wvm"] + '/e/' + assembly_uri["eid"] + '/instances', body=payload)
def md5_string(s): """ Shortcut to create md5 hash :param s: :return: """ m = hashlib.md5() m.update(s) return str(m.hexdigest())
Shortcut to create md5 hash :param s: :return:
Below is the the instruction that describes the task: ### Input: Shortcut to create md5 hash :param s: :return: ### Response: def md5_string(s): """ Shortcut to create md5 hash :param s: :return: """ m = hashlib.md5() m.update(s) return str(m.hexdigest())
def _process_response(self, resp, out_folder=None): """ processes the response object""" CHUNK = 4056 maintype = self._mainType(resp) contentDisposition = resp.headers.get('content-disposition') contentEncoding = resp.headers.get('content-encoding') contentType = resp.headers.get('content-type') contentLength = resp.headers.get('content-length') if maintype.lower() in ('image', 'application/x-zip-compressed') or \ contentType == 'application/x-zip-compressed' or \ (contentDisposition is not None and \ contentDisposition.lower().find('attachment;') > -1): fname = self._get_file_name( contentDisposition=contentDisposition, url=resp.geturl()) if out_folder is None: out_folder = tempfile.gettempdir() if contentLength is not None: max_length = int(contentLength) if max_length < CHUNK: CHUNK = max_length file_name = os.path.join(out_folder, fname) with open(file_name, 'wb') as writer: for data in self._chunk(response=resp): writer.write(data) del data del writer return file_name else: read = "" for data in self._chunk(response=resp, size=4096): if self.PY3 == True: read += data.decode('utf-8') else: read += data del data try: return json.loads(read.strip()) except: return read return None
processes the response object
Below is the the instruction that describes the task: ### Input: processes the response object ### Response: def _process_response(self, resp, out_folder=None): """ processes the response object""" CHUNK = 4056 maintype = self._mainType(resp) contentDisposition = resp.headers.get('content-disposition') contentEncoding = resp.headers.get('content-encoding') contentType = resp.headers.get('content-type') contentLength = resp.headers.get('content-length') if maintype.lower() in ('image', 'application/x-zip-compressed') or \ contentType == 'application/x-zip-compressed' or \ (contentDisposition is not None and \ contentDisposition.lower().find('attachment;') > -1): fname = self._get_file_name( contentDisposition=contentDisposition, url=resp.geturl()) if out_folder is None: out_folder = tempfile.gettempdir() if contentLength is not None: max_length = int(contentLength) if max_length < CHUNK: CHUNK = max_length file_name = os.path.join(out_folder, fname) with open(file_name, 'wb') as writer: for data in self._chunk(response=resp): writer.write(data) del data del writer return file_name else: read = "" for data in self._chunk(response=resp, size=4096): if self.PY3 == True: read += data.decode('utf-8') else: read += data del data try: return json.loads(read.strip()) except: return read return None
def sign(self, secret=None): """Sign the generated :class:`TransactionEnvelope <stellar_base.transaction_envelope.TransactionEnvelope>` from the list of this builder's operations. :param str secret: The secret seed to use if a key pair or secret was not provided when this class was originaly instantiated, or if another key is being utilized to sign the transaction envelope. """ keypair = self.keypair if not secret else Keypair.from_seed(secret) self.gen_te() self.te.sign(keypair)
Sign the generated :class:`TransactionEnvelope <stellar_base.transaction_envelope.TransactionEnvelope>` from the list of this builder's operations. :param str secret: The secret seed to use if a key pair or secret was not provided when this class was originaly instantiated, or if another key is being utilized to sign the transaction envelope.
Below is the the instruction that describes the task: ### Input: Sign the generated :class:`TransactionEnvelope <stellar_base.transaction_envelope.TransactionEnvelope>` from the list of this builder's operations. :param str secret: The secret seed to use if a key pair or secret was not provided when this class was originaly instantiated, or if another key is being utilized to sign the transaction envelope. ### Response: def sign(self, secret=None): """Sign the generated :class:`TransactionEnvelope <stellar_base.transaction_envelope.TransactionEnvelope>` from the list of this builder's operations. :param str secret: The secret seed to use if a key pair or secret was not provided when this class was originaly instantiated, or if another key is being utilized to sign the transaction envelope. """ keypair = self.keypair if not secret else Keypair.from_seed(secret) self.gen_te() self.te.sign(keypair)
def api_representation(self): """ Returns the JSON formatting required by Outlook's API for contacts """ return dict(EmailAddress=dict(Name=self.name, Address=self.email))
Returns the JSON formatting required by Outlook's API for contacts
Below is the the instruction that describes the task: ### Input: Returns the JSON formatting required by Outlook's API for contacts ### Response: def api_representation(self): """ Returns the JSON formatting required by Outlook's API for contacts """ return dict(EmailAddress=dict(Name=self.name, Address=self.email))
def draw_mini_map(self, surf): """Draw the minimap.""" if (self._render_rgb and self._obs.observation.HasField("render_data") and self._obs.observation.render_data.HasField("minimap")): # Draw the rendered version. surf.blit_np_array(features.Feature.unpack_rgb_image( self._obs.observation.render_data.minimap)) else: # Render it manually from feature layer data. hmap_feature = features.MINIMAP_FEATURES.height_map hmap = hmap_feature.unpack(self._obs.observation) if not hmap.any(): hmap = hmap + 100 # pylint: disable=g-no-augmented-assignment hmap_color = hmap_feature.color(hmap) creep_feature = features.MINIMAP_FEATURES.creep creep = creep_feature.unpack(self._obs.observation) creep_mask = creep > 0 creep_color = creep_feature.color(creep) if self._obs.observation.player_common.player_id in (0, 16): # observer # If we're the observer, show the absolute since otherwise all player # units are friendly, making it pretty boring. player_feature = features.MINIMAP_FEATURES.player_id else: player_feature = features.MINIMAP_FEATURES.player_relative player_data = player_feature.unpack(self._obs.observation) player_mask = player_data > 0 player_color = player_feature.color(player_data) visibility = features.MINIMAP_FEATURES.visibility_map.unpack( self._obs.observation) visibility_fade = np.array([[0.5] * 3, [0.75]*3, [1]*3]) # Compose and color the different layers. out = hmap_color * 0.6 out[creep_mask, :] = (0.4 * out[creep_mask, :] + 0.6 * creep_color[creep_mask, :]) out[player_mask, :] = player_color[player_mask, :] out *= visibility_fade[visibility] # Render the bit of the composited layers that actually correspond to the # map. This isn't all of it on non-square maps. shape = self._map_size.scale_max_size( self._feature_minimap_px).floor() surf.blit_np_array(out[:shape.y, :shape.x, :]) surf.draw_rect(colors.white * 0.8, self._camera, 1) # Camera pygame.draw.rect(surf.surf, colors.red, surf.surf.get_rect(), 1)
Draw the minimap.
Below is the the instruction that describes the task: ### Input: Draw the minimap. ### Response: def draw_mini_map(self, surf): """Draw the minimap.""" if (self._render_rgb and self._obs.observation.HasField("render_data") and self._obs.observation.render_data.HasField("minimap")): # Draw the rendered version. surf.blit_np_array(features.Feature.unpack_rgb_image( self._obs.observation.render_data.minimap)) else: # Render it manually from feature layer data. hmap_feature = features.MINIMAP_FEATURES.height_map hmap = hmap_feature.unpack(self._obs.observation) if not hmap.any(): hmap = hmap + 100 # pylint: disable=g-no-augmented-assignment hmap_color = hmap_feature.color(hmap) creep_feature = features.MINIMAP_FEATURES.creep creep = creep_feature.unpack(self._obs.observation) creep_mask = creep > 0 creep_color = creep_feature.color(creep) if self._obs.observation.player_common.player_id in (0, 16): # observer # If we're the observer, show the absolute since otherwise all player # units are friendly, making it pretty boring. player_feature = features.MINIMAP_FEATURES.player_id else: player_feature = features.MINIMAP_FEATURES.player_relative player_data = player_feature.unpack(self._obs.observation) player_mask = player_data > 0 player_color = player_feature.color(player_data) visibility = features.MINIMAP_FEATURES.visibility_map.unpack( self._obs.observation) visibility_fade = np.array([[0.5] * 3, [0.75]*3, [1]*3]) # Compose and color the different layers. out = hmap_color * 0.6 out[creep_mask, :] = (0.4 * out[creep_mask, :] + 0.6 * creep_color[creep_mask, :]) out[player_mask, :] = player_color[player_mask, :] out *= visibility_fade[visibility] # Render the bit of the composited layers that actually correspond to the # map. This isn't all of it on non-square maps. shape = self._map_size.scale_max_size( self._feature_minimap_px).floor() surf.blit_np_array(out[:shape.y, :shape.x, :]) surf.draw_rect(colors.white * 0.8, self._camera, 1) # Camera pygame.draw.rect(surf.surf, colors.red, surf.surf.get_rect(), 1)
def warp(self, srid=None, format=None, geom=None): """Returns a new RasterQuerySet with possibly warped/converted rasters. Keyword args: format -- raster file extension format as str geom -- geometry for masking or spatial subsetting srid -- spatial reference identifier as int for warping to """ clone = self._clone() for obj in clone: obj.convert(format, geom) if srid: fp = tempfile.NamedTemporaryFile(suffix='.%s' % format or '') with obj.raster() as r, r.warp(srid, fp.name) as w: obj.image.file = fp return clone
Returns a new RasterQuerySet with possibly warped/converted rasters. Keyword args: format -- raster file extension format as str geom -- geometry for masking or spatial subsetting srid -- spatial reference identifier as int for warping to
Below is the the instruction that describes the task: ### Input: Returns a new RasterQuerySet with possibly warped/converted rasters. Keyword args: format -- raster file extension format as str geom -- geometry for masking or spatial subsetting srid -- spatial reference identifier as int for warping to ### Response: def warp(self, srid=None, format=None, geom=None): """Returns a new RasterQuerySet with possibly warped/converted rasters. Keyword args: format -- raster file extension format as str geom -- geometry for masking or spatial subsetting srid -- spatial reference identifier as int for warping to """ clone = self._clone() for obj in clone: obj.convert(format, geom) if srid: fp = tempfile.NamedTemporaryFile(suffix='.%s' % format or '') with obj.raster() as r, r.warp(srid, fp.name) as w: obj.image.file = fp return clone
def get_user(user_name=None, region=None, key=None, keyid=None, profile=None): ''' Get user information. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt myminion boto_iam.get_user myuser ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: info = conn.get_user(user_name) if not info: return False return info except boto.exception.BotoServerError as e: log.debug(e) log.error('Failed to get IAM user %s info.', user_name) return False
Get user information. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt myminion boto_iam.get_user myuser
Below is the the instruction that describes the task: ### Input: Get user information. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt myminion boto_iam.get_user myuser ### Response: def get_user(user_name=None, region=None, key=None, keyid=None, profile=None): ''' Get user information. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt myminion boto_iam.get_user myuser ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: info = conn.get_user(user_name) if not info: return False return info except boto.exception.BotoServerError as e: log.debug(e) log.error('Failed to get IAM user %s info.', user_name) return False
def add_arguments(self, parser): """ Define optional arguments with default values """ parser.add_argument('--length', default=self.length, type=int, help=_('SECRET_KEY length default=%d' % self.length)) parser.add_argument('--alphabet', default=self.allowed_chars, type=str, help=_('alphabet to use default=%s' % self.allowed_chars))
Define optional arguments with default values
Below is the the instruction that describes the task: ### Input: Define optional arguments with default values ### Response: def add_arguments(self, parser): """ Define optional arguments with default values """ parser.add_argument('--length', default=self.length, type=int, help=_('SECRET_KEY length default=%d' % self.length)) parser.add_argument('--alphabet', default=self.allowed_chars, type=str, help=_('alphabet to use default=%s' % self.allowed_chars))
def first_available(self, *quantities): """ Return the first available quantity in the input arguments. Return `None` if none of them is available. """ for i, q in enumerate(quantities): if self.has_quantity(q): if i: warnings.warn('{} not available; using {} instead'.format(quantities[0], q)) return q
Return the first available quantity in the input arguments. Return `None` if none of them is available.
Below is the the instruction that describes the task: ### Input: Return the first available quantity in the input arguments. Return `None` if none of them is available. ### Response: def first_available(self, *quantities): """ Return the first available quantity in the input arguments. Return `None` if none of them is available. """ for i, q in enumerate(quantities): if self.has_quantity(q): if i: warnings.warn('{} not available; using {} instead'.format(quantities[0], q)) return q
def set_driver_simulated(self): """Sets the device driver type to simulated""" self._device_dict["servermain.MULTIPLE_TYPES_DEVICE_DRIVER"] = "Simulator" if self._is_sixteen_bit: self._device_dict["servermain.DEVICE_MODEL"] = 0 else: self._device_dict["servermain.DEVICE_MODEL"] = 1 self._device_dict["servermain.DEVICE_ID_OCTAL"] = 1
Sets the device driver type to simulated
Below is the the instruction that describes the task: ### Input: Sets the device driver type to simulated ### Response: def set_driver_simulated(self): """Sets the device driver type to simulated""" self._device_dict["servermain.MULTIPLE_TYPES_DEVICE_DRIVER"] = "Simulator" if self._is_sixteen_bit: self._device_dict["servermain.DEVICE_MODEL"] = 0 else: self._device_dict["servermain.DEVICE_MODEL"] = 1 self._device_dict["servermain.DEVICE_ID_OCTAL"] = 1
def select_groups(adata, groups='all', key='louvain'): """Get subset of groups in adata.obs[key]. """ strings_to_categoricals(adata) if isinstance(groups, list) and isinstance(groups[0], int): groups = [str(n) for n in groups] categories = adata.obs[key].cat.categories groups_masks = np.array([categories[i] == adata.obs[key].values for i, name in enumerate(categories)]) if groups == 'all': groups = categories.values else: groups_ids = [np.where(categories.values == name)[0][0] for name in groups] groups_masks = groups_masks[groups_ids] groups = categories[groups_ids].values return groups, groups_masks
Get subset of groups in adata.obs[key].
Below is the the instruction that describes the task: ### Input: Get subset of groups in adata.obs[key]. ### Response: def select_groups(adata, groups='all', key='louvain'): """Get subset of groups in adata.obs[key]. """ strings_to_categoricals(adata) if isinstance(groups, list) and isinstance(groups[0], int): groups = [str(n) for n in groups] categories = adata.obs[key].cat.categories groups_masks = np.array([categories[i] == adata.obs[key].values for i, name in enumerate(categories)]) if groups == 'all': groups = categories.values else: groups_ids = [np.where(categories.values == name)[0][0] for name in groups] groups_masks = groups_masks[groups_ids] groups = categories[groups_ids].values return groups, groups_masks
def get_user_info(self, request): """Implement custom getter.""" if not current_user.is_authenticated: return {} user_info = { 'id': current_user.get_id(), } if 'SENTRY_USER_ATTRS' in current_app.config: for attr in current_app.config['SENTRY_USER_ATTRS']: if hasattr(current_user, attr): user_info[attr] = getattr(current_user, attr) return user_info
Implement custom getter.
Below is the the instruction that describes the task: ### Input: Implement custom getter. ### Response: def get_user_info(self, request): """Implement custom getter.""" if not current_user.is_authenticated: return {} user_info = { 'id': current_user.get_id(), } if 'SENTRY_USER_ATTRS' in current_app.config: for attr in current_app.config['SENTRY_USER_ATTRS']: if hasattr(current_user, attr): user_info[attr] = getattr(current_user, attr) return user_info
def read_length_block(fp, fmt='I', padding=1): """ Read a block of data with a length marker at the beginning. :param fp: file-like :param fmt: format of the length marker :return: bytes object """ length = read_fmt(fmt, fp)[0] data = fp.read(length) assert len(data) == length, (len(data), length) read_padding(fp, length, padding) return data
Read a block of data with a length marker at the beginning. :param fp: file-like :param fmt: format of the length marker :return: bytes object
Below is the the instruction that describes the task: ### Input: Read a block of data with a length marker at the beginning. :param fp: file-like :param fmt: format of the length marker :return: bytes object ### Response: def read_length_block(fp, fmt='I', padding=1): """ Read a block of data with a length marker at the beginning. :param fp: file-like :param fmt: format of the length marker :return: bytes object """ length = read_fmt(fmt, fp)[0] data = fp.read(length) assert len(data) == length, (len(data), length) read_padding(fp, length, padding) return data
def scanStoVars(self, strline): """ scan input string line, replace sto parameters with calculated results. """ for wd in strline.split(): if wd in self.stodict: strline = strline.replace(wd, str(self.stodict[wd])) return strline
scan input string line, replace sto parameters with calculated results.
Below is the the instruction that describes the task: ### Input: scan input string line, replace sto parameters with calculated results. ### Response: def scanStoVars(self, strline): """ scan input string line, replace sto parameters with calculated results. """ for wd in strline.split(): if wd in self.stodict: strline = strline.replace(wd, str(self.stodict[wd])) return strline
def Write(self, grr_message): """Write the message into the transaction log.""" grr_message = grr_message.SerializeToString() try: with io.open(self.logfile, "wb") as fd: fd.write(grr_message) except (IOError, OSError): # Check if we're missing directories and try to create them. if not os.path.isdir(os.path.dirname(self.logfile)): try: os.makedirs(os.path.dirname(self.logfile)) with io.open(self.logfile, "wb") as fd: fd.write(grr_message) except (IOError, OSError): logging.exception("Couldn't write nanny transaction log to %s", self.logfile)
Write the message into the transaction log.
Below is the the instruction that describes the task: ### Input: Write the message into the transaction log. ### Response: def Write(self, grr_message): """Write the message into the transaction log.""" grr_message = grr_message.SerializeToString() try: with io.open(self.logfile, "wb") as fd: fd.write(grr_message) except (IOError, OSError): # Check if we're missing directories and try to create them. if not os.path.isdir(os.path.dirname(self.logfile)): try: os.makedirs(os.path.dirname(self.logfile)) with io.open(self.logfile, "wb") as fd: fd.write(grr_message) except (IOError, OSError): logging.exception("Couldn't write nanny transaction log to %s", self.logfile)
def parse_reqtype(self): """Return the authentication body.""" if self.job_args['os_auth_version'] == 'v1.0': return dict() else: setup = { 'username': self.job_args.get('os_user') } # Check if any prefix items are set. A prefix should be a # dictionary with keys matching the os_* credential type. prefixes = self.job_args.get('os_prefix') if self.job_args.get('os_token') is not None: auth_body = { 'auth': { 'token': { 'id': self.job_args.get('os_token') } } } if not self.job_args.get('os_tenant'): raise exceptions.AuthenticationProblem( 'To use token auth you must specify the tenant id. Set' ' the tenant ID with [ --os-tenant ]' ) elif self.job_args.get('os_password') is not None: setup['password'] = self.job_args.get('os_password') if prefixes: prefix = prefixes.get('os_password') if not prefix: raise NotImplementedError( 'the `password` method is not implemented for this' ' auth plugin' ) else: prefix = 'passwordCredentials' auth_body = { 'auth': { prefix: setup } } elif self.job_args.get('os_apikey') is not None: setup['apiKey'] = self.job_args.get('os_apikey') if prefixes: prefix = prefixes.get('os_apikey') if not prefix: raise NotImplementedError( 'the `apikey` method is not implemented for this' ' auth plugin' ) else: prefix = 'apiKeyCredentials' auth_body = { 'auth': { prefix: setup } } else: raise exceptions.AuthenticationProblem( 'No Password, APIKey, or Token Specified' ) if self.job_args.get('os_tenant'): auth = auth_body['auth'] auth['tenantName'] = self.job_args.get('os_tenant') LOG.debug('AUTH Request body: [ %s ]', auth_body) return auth_body
Return the authentication body.
Below is the the instruction that describes the task: ### Input: Return the authentication body. ### Response: def parse_reqtype(self): """Return the authentication body.""" if self.job_args['os_auth_version'] == 'v1.0': return dict() else: setup = { 'username': self.job_args.get('os_user') } # Check if any prefix items are set. A prefix should be a # dictionary with keys matching the os_* credential type. prefixes = self.job_args.get('os_prefix') if self.job_args.get('os_token') is not None: auth_body = { 'auth': { 'token': { 'id': self.job_args.get('os_token') } } } if not self.job_args.get('os_tenant'): raise exceptions.AuthenticationProblem( 'To use token auth you must specify the tenant id. Set' ' the tenant ID with [ --os-tenant ]' ) elif self.job_args.get('os_password') is not None: setup['password'] = self.job_args.get('os_password') if prefixes: prefix = prefixes.get('os_password') if not prefix: raise NotImplementedError( 'the `password` method is not implemented for this' ' auth plugin' ) else: prefix = 'passwordCredentials' auth_body = { 'auth': { prefix: setup } } elif self.job_args.get('os_apikey') is not None: setup['apiKey'] = self.job_args.get('os_apikey') if prefixes: prefix = prefixes.get('os_apikey') if not prefix: raise NotImplementedError( 'the `apikey` method is not implemented for this' ' auth plugin' ) else: prefix = 'apiKeyCredentials' auth_body = { 'auth': { prefix: setup } } else: raise exceptions.AuthenticationProblem( 'No Password, APIKey, or Token Specified' ) if self.job_args.get('os_tenant'): auth = auth_body['auth'] auth['tenantName'] = self.job_args.get('os_tenant') LOG.debug('AUTH Request body: [ %s ]', auth_body) return auth_body
def set_value(self, index, col, value, takeable=False): """ Put single value at passed column and index. .. deprecated:: 0.21.0 Use .at[] or .iat[] accessors instead. Parameters ---------- index : row label col : column label value : scalar takeable : interpret the index/col as indexers, default False Returns ------- DataFrame If label pair is contained, will be reference to calling DataFrame, otherwise a new object. """ warnings.warn("set_value is deprecated and will be removed " "in a future release. Please use " ".at[] or .iat[] accessors instead", FutureWarning, stacklevel=2) return self._set_value(index, col, value, takeable=takeable)
Put single value at passed column and index. .. deprecated:: 0.21.0 Use .at[] or .iat[] accessors instead. Parameters ---------- index : row label col : column label value : scalar takeable : interpret the index/col as indexers, default False Returns ------- DataFrame If label pair is contained, will be reference to calling DataFrame, otherwise a new object.
Below is the the instruction that describes the task: ### Input: Put single value at passed column and index. .. deprecated:: 0.21.0 Use .at[] or .iat[] accessors instead. Parameters ---------- index : row label col : column label value : scalar takeable : interpret the index/col as indexers, default False Returns ------- DataFrame If label pair is contained, will be reference to calling DataFrame, otherwise a new object. ### Response: def set_value(self, index, col, value, takeable=False): """ Put single value at passed column and index. .. deprecated:: 0.21.0 Use .at[] or .iat[] accessors instead. Parameters ---------- index : row label col : column label value : scalar takeable : interpret the index/col as indexers, default False Returns ------- DataFrame If label pair is contained, will be reference to calling DataFrame, otherwise a new object. """ warnings.warn("set_value is deprecated and will be removed " "in a future release. Please use " ".at[] or .iat[] accessors instead", FutureWarning, stacklevel=2) return self._set_value(index, col, value, takeable=takeable)
def env(config, endpoint): """Print RENKU environment variables. Run this command to configure your Renku client: $ eval "$(renku env)" """ access_token = config['endpoints'][endpoint]['token']['access_token'] click.echo('export {0}={1}'.format('RENKU_ENDPOINT', endpoint)) click.echo('export {0}={1}'.format('RENKU_ACCESS_TOKEN', access_token)) click.echo('# Run this command to configure your Renku client:') click.echo('# eval "$(renku env)"')
Print RENKU environment variables. Run this command to configure your Renku client: $ eval "$(renku env)"
Below is the the instruction that describes the task: ### Input: Print RENKU environment variables. Run this command to configure your Renku client: $ eval "$(renku env)" ### Response: def env(config, endpoint): """Print RENKU environment variables. Run this command to configure your Renku client: $ eval "$(renku env)" """ access_token = config['endpoints'][endpoint]['token']['access_token'] click.echo('export {0}={1}'.format('RENKU_ENDPOINT', endpoint)) click.echo('export {0}={1}'.format('RENKU_ACCESS_TOKEN', access_token)) click.echo('# Run this command to configure your Renku client:') click.echo('# eval "$(renku env)"')
def _init_hdrgos(self, hdrgos_dflt, hdrgos_usr=None, add_dflt=True): """Initialize GO high""" # Use default GO group header values if (hdrgos_usr is None or hdrgos_usr is False) and not self.sections: return set(hdrgos_dflt) # Get GO group headers provided by user hdrgos_init = set() if hdrgos_usr: chk_goids(hdrgos_usr, "User-provided GO group headers") hdrgos_init |= set(hdrgos_usr) if self.sections: self._chk_sections(self.sections) hdrgos_sec = set([hg for _, hdrgos in self.sections for hg in hdrgos]) chk_goids(hdrgos_sec, "User-provided GO group headers in sections") hdrgos_init |= hdrgos_sec # Add default depth-01 GOs to headers, if desired if add_dflt: return set(hdrgos_init).union(hdrgos_dflt) # Return user-provided GO grouping headers return hdrgos_init
Initialize GO high
Below is the the instruction that describes the task: ### Input: Initialize GO high ### Response: def _init_hdrgos(self, hdrgos_dflt, hdrgos_usr=None, add_dflt=True): """Initialize GO high""" # Use default GO group header values if (hdrgos_usr is None or hdrgos_usr is False) and not self.sections: return set(hdrgos_dflt) # Get GO group headers provided by user hdrgos_init = set() if hdrgos_usr: chk_goids(hdrgos_usr, "User-provided GO group headers") hdrgos_init |= set(hdrgos_usr) if self.sections: self._chk_sections(self.sections) hdrgos_sec = set([hg for _, hdrgos in self.sections for hg in hdrgos]) chk_goids(hdrgos_sec, "User-provided GO group headers in sections") hdrgos_init |= hdrgos_sec # Add default depth-01 GOs to headers, if desired if add_dflt: return set(hdrgos_init).union(hdrgos_dflt) # Return user-provided GO grouping headers return hdrgos_init
def write_xmlbif(self, filename): """ Write the xml data into the file. Parameters ---------- filename: Name of the file. Examples ------- >>> writer = XMLBIFWriter(model) >>> writer.write_xmlbif(test_file) """ with open(filename, 'w') as fout: fout.write(self.__str__())
Write the xml data into the file. Parameters ---------- filename: Name of the file. Examples ------- >>> writer = XMLBIFWriter(model) >>> writer.write_xmlbif(test_file)
Below is the the instruction that describes the task: ### Input: Write the xml data into the file. Parameters ---------- filename: Name of the file. Examples ------- >>> writer = XMLBIFWriter(model) >>> writer.write_xmlbif(test_file) ### Response: def write_xmlbif(self, filename): """ Write the xml data into the file. Parameters ---------- filename: Name of the file. Examples ------- >>> writer = XMLBIFWriter(model) >>> writer.write_xmlbif(test_file) """ with open(filename, 'w') as fout: fout.write(self.__str__())
def getSolution(self, domains, constraints, vconstraints): """ Return one solution for the given problem @param domains: Dictionary mapping variables to their domains @type domains: dict @param constraints: List of pairs of (constraint, variables) @type constraints: list @param vconstraints: Dictionary mapping variables to a list of constraints affecting the given variables. @type vconstraints: dict """ msg = "%s is an abstract class" % self.__class__.__name__ raise NotImplementedError(msg)
Return one solution for the given problem @param domains: Dictionary mapping variables to their domains @type domains: dict @param constraints: List of pairs of (constraint, variables) @type constraints: list @param vconstraints: Dictionary mapping variables to a list of constraints affecting the given variables. @type vconstraints: dict
Below is the the instruction that describes the task: ### Input: Return one solution for the given problem @param domains: Dictionary mapping variables to their domains @type domains: dict @param constraints: List of pairs of (constraint, variables) @type constraints: list @param vconstraints: Dictionary mapping variables to a list of constraints affecting the given variables. @type vconstraints: dict ### Response: def getSolution(self, domains, constraints, vconstraints): """ Return one solution for the given problem @param domains: Dictionary mapping variables to their domains @type domains: dict @param constraints: List of pairs of (constraint, variables) @type constraints: list @param vconstraints: Dictionary mapping variables to a list of constraints affecting the given variables. @type vconstraints: dict """ msg = "%s is an abstract class" % self.__class__.__name__ raise NotImplementedError(msg)
def _extract_value_from_storage(self, string): """Taking a string that was a member of the zset, extract the value and pk Parameters ---------- string: str The member extracted from the sorted set Returns ------- tuple Tuple with the value and the pk, extracted from the string """ parts = string.split(self.separator) pk = parts.pop() return self.separator.join(parts), pk
Taking a string that was a member of the zset, extract the value and pk Parameters ---------- string: str The member extracted from the sorted set Returns ------- tuple Tuple with the value and the pk, extracted from the string
Below is the the instruction that describes the task: ### Input: Taking a string that was a member of the zset, extract the value and pk Parameters ---------- string: str The member extracted from the sorted set Returns ------- tuple Tuple with the value and the pk, extracted from the string ### Response: def _extract_value_from_storage(self, string): """Taking a string that was a member of the zset, extract the value and pk Parameters ---------- string: str The member extracted from the sorted set Returns ------- tuple Tuple with the value and the pk, extracted from the string """ parts = string.split(self.separator) pk = parts.pop() return self.separator.join(parts), pk
def clean_key_name(key): """ Makes ``key`` a valid and appropriate SQL column name: 1. Replaces illegal characters in column names with ``_`` 2. Prevents name from beginning with a digit (prepends ``_``) 3. Lowercases name. If you want case-sensitive table or column names, you are a bad person and you should feel bad. """ result = _illegal_in_column_name.sub("_", key.strip()) if result[0].isdigit(): result = '_%s' % result if result.upper() in sql_reserved_words: result = '_%s' % key return result.lower()
Makes ``key`` a valid and appropriate SQL column name: 1. Replaces illegal characters in column names with ``_`` 2. Prevents name from beginning with a digit (prepends ``_``) 3. Lowercases name. If you want case-sensitive table or column names, you are a bad person and you should feel bad.
Below is the the instruction that describes the task: ### Input: Makes ``key`` a valid and appropriate SQL column name: 1. Replaces illegal characters in column names with ``_`` 2. Prevents name from beginning with a digit (prepends ``_``) 3. Lowercases name. If you want case-sensitive table or column names, you are a bad person and you should feel bad. ### Response: def clean_key_name(key): """ Makes ``key`` a valid and appropriate SQL column name: 1. Replaces illegal characters in column names with ``_`` 2. Prevents name from beginning with a digit (prepends ``_``) 3. Lowercases name. If you want case-sensitive table or column names, you are a bad person and you should feel bad. """ result = _illegal_in_column_name.sub("_", key.strip()) if result[0].isdigit(): result = '_%s' % result if result.upper() in sql_reserved_words: result = '_%s' % key return result.lower()
def c_struct(self): """Get the struct of the module.""" member = '\n'.join(self.c_member_funcs(True)) if self.opts.windll: return 'struct {{\n{}{} }} {};\n'.format( self._c_dll_base(), member, self.name ) return 'typedef\nstruct {2} {{\n{0}\n{1}}}\n{3};\n'.format( self._c_dll_base(), member, *self._c_struct_names() )
Get the struct of the module.
Below is the the instruction that describes the task: ### Input: Get the struct of the module. ### Response: def c_struct(self): """Get the struct of the module.""" member = '\n'.join(self.c_member_funcs(True)) if self.opts.windll: return 'struct {{\n{}{} }} {};\n'.format( self._c_dll_base(), member, self.name ) return 'typedef\nstruct {2} {{\n{0}\n{1}}}\n{3};\n'.format( self._c_dll_base(), member, *self._c_struct_names() )
def add_tip_labels_to_axes(self): """ Add text offset from tips of tree with correction for orientation, and fixed_order which is usually used in multitree plotting. """ # get tip-coords and replace if using fixed_order if self.style.orient in ("up", "down"): ypos = np.zeros(self.ntips) xpos = np.arange(self.ntips) if self.style.orient in ("right", "left"): xpos = np.zeros(self.ntips) ypos = np.arange(self.ntips) # pop fill from color dict if using color if self.style.tip_labels_colors: self.style.tip_labels_style.pop("fill") # fill anchor shift if None # (Toytrees fill this at draw() normally when tip_labels != None) if self.style.tip_labels_style["-toyplot-anchor-shift"] is None: self.style.tip_labels_style["-toyplot-anchor-shift"] = "15px" # add tip names to coordinates calculated above self.axes.text( xpos, ypos, self.tip_labels, angle=(0 if self.style.orient in ("right", "left") else -90), style=self.style.tip_labels_style, color=self.style.tip_labels_colors, ) # get stroke-width for aligned tip-label lines (optional) # copy stroke-width from the edge_style unless user set it if not self.style.edge_align_style.get("stroke-width"): self.style.edge_align_style['stroke-width'] = ( self.style.edge_style['stroke-width'])
Add text offset from tips of tree with correction for orientation, and fixed_order which is usually used in multitree plotting.
Below is the the instruction that describes the task: ### Input: Add text offset from tips of tree with correction for orientation, and fixed_order which is usually used in multitree plotting. ### Response: def add_tip_labels_to_axes(self): """ Add text offset from tips of tree with correction for orientation, and fixed_order which is usually used in multitree plotting. """ # get tip-coords and replace if using fixed_order if self.style.orient in ("up", "down"): ypos = np.zeros(self.ntips) xpos = np.arange(self.ntips) if self.style.orient in ("right", "left"): xpos = np.zeros(self.ntips) ypos = np.arange(self.ntips) # pop fill from color dict if using color if self.style.tip_labels_colors: self.style.tip_labels_style.pop("fill") # fill anchor shift if None # (Toytrees fill this at draw() normally when tip_labels != None) if self.style.tip_labels_style["-toyplot-anchor-shift"] is None: self.style.tip_labels_style["-toyplot-anchor-shift"] = "15px" # add tip names to coordinates calculated above self.axes.text( xpos, ypos, self.tip_labels, angle=(0 if self.style.orient in ("right", "left") else -90), style=self.style.tip_labels_style, color=self.style.tip_labels_colors, ) # get stroke-width for aligned tip-label lines (optional) # copy stroke-width from the edge_style unless user set it if not self.style.edge_align_style.get("stroke-width"): self.style.edge_align_style['stroke-width'] = ( self.style.edge_style['stroke-width'])
def get_progress(self): """Get the progress of the queue in percentage (float). Returns: float: The 'finished' progress in percentage. """ count_remaining = len(self.items_queued) + len(self.items_in_progress) percentage_remaining = 100 / self.count_total * count_remaining return 100 - percentage_remaining
Get the progress of the queue in percentage (float). Returns: float: The 'finished' progress in percentage.
Below is the the instruction that describes the task: ### Input: Get the progress of the queue in percentage (float). Returns: float: The 'finished' progress in percentage. ### Response: def get_progress(self): """Get the progress of the queue in percentage (float). Returns: float: The 'finished' progress in percentage. """ count_remaining = len(self.items_queued) + len(self.items_in_progress) percentage_remaining = 100 / self.count_total * count_remaining return 100 - percentage_remaining
def NormalizePath(path, sep="/"): """A sane implementation of os.path.normpath. The standard implementation treats leading / and // as different leading to incorrect normal forms. NOTE: Its ok to use a relative path here (without leading /) but any /../ will still be removed anchoring the path at the top level (e.g. foo/../../../../bar => bar). Args: path: The path to normalize. sep: Separator used. Returns: A normalized path. In this context normalized means that all input paths that would result in the system opening the same physical file will produce the same normalized path. """ if not path: return sep path = SmartUnicode(path) path_list = path.split(sep) # This is a relative path and the first element is . or .. if path_list[0] in [".", "..", ""]: path_list.pop(0) # Deliberately begin at index 1 to preserve a single leading / i = 0 while True: list_len = len(path_list) # We begin at the last known good position so we never iterate over path # elements which are already examined for i in range(i, len(path_list)): # Remove /./ form if path_list[i] == "." or not path_list[i]: path_list.pop(i) break # Remove /../ form elif path_list[i] == "..": path_list.pop(i) # Anchor at the top level if (i == 1 and path_list[0]) or i > 1: i -= 1 path_list.pop(i) break # If we didnt alter the path so far we can quit if len(path_list) == list_len: return sep + sep.join(path_list)
A sane implementation of os.path.normpath. The standard implementation treats leading / and // as different leading to incorrect normal forms. NOTE: Its ok to use a relative path here (without leading /) but any /../ will still be removed anchoring the path at the top level (e.g. foo/../../../../bar => bar). Args: path: The path to normalize. sep: Separator used. Returns: A normalized path. In this context normalized means that all input paths that would result in the system opening the same physical file will produce the same normalized path.
Below is the the instruction that describes the task: ### Input: A sane implementation of os.path.normpath. The standard implementation treats leading / and // as different leading to incorrect normal forms. NOTE: Its ok to use a relative path here (without leading /) but any /../ will still be removed anchoring the path at the top level (e.g. foo/../../../../bar => bar). Args: path: The path to normalize. sep: Separator used. Returns: A normalized path. In this context normalized means that all input paths that would result in the system opening the same physical file will produce the same normalized path. ### Response: def NormalizePath(path, sep="/"): """A sane implementation of os.path.normpath. The standard implementation treats leading / and // as different leading to incorrect normal forms. NOTE: Its ok to use a relative path here (without leading /) but any /../ will still be removed anchoring the path at the top level (e.g. foo/../../../../bar => bar). Args: path: The path to normalize. sep: Separator used. Returns: A normalized path. In this context normalized means that all input paths that would result in the system opening the same physical file will produce the same normalized path. """ if not path: return sep path = SmartUnicode(path) path_list = path.split(sep) # This is a relative path and the first element is . or .. if path_list[0] in [".", "..", ""]: path_list.pop(0) # Deliberately begin at index 1 to preserve a single leading / i = 0 while True: list_len = len(path_list) # We begin at the last known good position so we never iterate over path # elements which are already examined for i in range(i, len(path_list)): # Remove /./ form if path_list[i] == "." or not path_list[i]: path_list.pop(i) break # Remove /../ form elif path_list[i] == "..": path_list.pop(i) # Anchor at the top level if (i == 1 and path_list[0]) or i > 1: i -= 1 path_list.pop(i) break # If we didnt alter the path so far we can quit if len(path_list) == list_len: return sep + sep.join(path_list)
def cycles(self): """ Fairly expensive cycle detection algorithm. This method will return the shortest unique cycles that were detected. Debug usage may look something like: print("The following cycles were found:") for cycle in network.cycles(): print(" ", " -> ".join(cycle)) """ def walk_node(node, seen): """ Walk each top-level node we know about, and recurse along the graph. """ if node in seen: yield (node,) return seen.add(node) for edge in self.edges[node]: for cycle in walk_node(edge, set(seen)): yield (node,) + cycle # First, let's get a iterable of all known cycles. cycles = chain.from_iterable( (walk_node(node, set()) for node in self.nodes)) shortest = set() # Now, let's go through and sift through the cycles, finding # the shortest unique cycle known, ignoring cycles which contain # already known cycles. for cycle in sorted(cycles, key=len): for el in shortest: if set(el).issubset(set(cycle)): break else: shortest.add(cycle) # And return that unique list. return shortest
Fairly expensive cycle detection algorithm. This method will return the shortest unique cycles that were detected. Debug usage may look something like: print("The following cycles were found:") for cycle in network.cycles(): print(" ", " -> ".join(cycle))
Below is the the instruction that describes the task: ### Input: Fairly expensive cycle detection algorithm. This method will return the shortest unique cycles that were detected. Debug usage may look something like: print("The following cycles were found:") for cycle in network.cycles(): print(" ", " -> ".join(cycle)) ### Response: def cycles(self): """ Fairly expensive cycle detection algorithm. This method will return the shortest unique cycles that were detected. Debug usage may look something like: print("The following cycles were found:") for cycle in network.cycles(): print(" ", " -> ".join(cycle)) """ def walk_node(node, seen): """ Walk each top-level node we know about, and recurse along the graph. """ if node in seen: yield (node,) return seen.add(node) for edge in self.edges[node]: for cycle in walk_node(edge, set(seen)): yield (node,) + cycle # First, let's get a iterable of all known cycles. cycles = chain.from_iterable( (walk_node(node, set()) for node in self.nodes)) shortest = set() # Now, let's go through and sift through the cycles, finding # the shortest unique cycle known, ignoring cycles which contain # already known cycles. for cycle in sorted(cycles, key=len): for el in shortest: if set(el).issubset(set(cycle)): break else: shortest.add(cycle) # And return that unique list. return shortest
def download_task(url, headers, destination, download_type='layer'): '''download an image layer (.tar.gz) to a specified download folder. This task is done by using local versions of the same download functions that are used for the client. core stream/download functions of the parent client. Parameters ========== image_id: the shasum id of the layer, already determined to not exist repo_name: the image name (library/ubuntu) to retrieve download_folder: download to this folder. If not set, uses temp. ''' # Update the user what we are doing bot.verbose("Downloading %s from %s" % (download_type, url)) # Step 1: Download the layer atomically file_name = "%s.%s" % (destination, next(tempfile._get_candidate_names())) tar_download = download(url, file_name, headers=headers) try: shutil.move(tar_download, destination) except Exception: msg = "Cannot untar layer %s," % tar_download msg += " was there a problem with download?" bot.error(msg) sys.exit(1) return destination
download an image layer (.tar.gz) to a specified download folder. This task is done by using local versions of the same download functions that are used for the client. core stream/download functions of the parent client. Parameters ========== image_id: the shasum id of the layer, already determined to not exist repo_name: the image name (library/ubuntu) to retrieve download_folder: download to this folder. If not set, uses temp.
Below is the the instruction that describes the task: ### Input: download an image layer (.tar.gz) to a specified download folder. This task is done by using local versions of the same download functions that are used for the client. core stream/download functions of the parent client. Parameters ========== image_id: the shasum id of the layer, already determined to not exist repo_name: the image name (library/ubuntu) to retrieve download_folder: download to this folder. If not set, uses temp. ### Response: def download_task(url, headers, destination, download_type='layer'): '''download an image layer (.tar.gz) to a specified download folder. This task is done by using local versions of the same download functions that are used for the client. core stream/download functions of the parent client. Parameters ========== image_id: the shasum id of the layer, already determined to not exist repo_name: the image name (library/ubuntu) to retrieve download_folder: download to this folder. If not set, uses temp. ''' # Update the user what we are doing bot.verbose("Downloading %s from %s" % (download_type, url)) # Step 1: Download the layer atomically file_name = "%s.%s" % (destination, next(tempfile._get_candidate_names())) tar_download = download(url, file_name, headers=headers) try: shutil.move(tar_download, destination) except Exception: msg = "Cannot untar layer %s," % tar_download msg += " was there a problem with download?" bot.error(msg) sys.exit(1) return destination
def mute_modmail_author(self, _unmute=False): """Mute the sender of this modmail message. :param _unmute: Unmute the user instead. Please use :meth:`unmute_modmail_author` instead of setting this directly. """ path = 'unmute_sender' if _unmute else 'mute_sender' return self.reddit_session.request_json( self.reddit_session.config[path], data={'id': self.fullname})
Mute the sender of this modmail message. :param _unmute: Unmute the user instead. Please use :meth:`unmute_modmail_author` instead of setting this directly.
Below is the the instruction that describes the task: ### Input: Mute the sender of this modmail message. :param _unmute: Unmute the user instead. Please use :meth:`unmute_modmail_author` instead of setting this directly. ### Response: def mute_modmail_author(self, _unmute=False): """Mute the sender of this modmail message. :param _unmute: Unmute the user instead. Please use :meth:`unmute_modmail_author` instead of setting this directly. """ path = 'unmute_sender' if _unmute else 'mute_sender' return self.reddit_session.request_json( self.reddit_session.config[path], data={'id': self.fullname})
def get_bug_stats(self, startday, endday): """Get all intermittent failures per specified date range and repository, returning a dict of bug_id's with total, repository and platform totals if totals are greater than or equal to the threshold. eg: { "1206327": { "total": 5, "per_repository": { "fx-team": 2, "mozilla-inbound": 3 }, "per_platform": { "osx-10-10": 4, "b2g-emu-ics": 1 } }, ... } """ # Min required failures per bug in order to post a comment threshold = 1 if self.weekly_mode else 15 bug_ids = (BugJobMap.failures.by_date(startday, endday) .values('bug_id') .annotate(total=Count('bug_id')) .filter(total__gte=threshold) .values_list('bug_id', flat=True)) bugs = (BugJobMap.failures.by_date(startday, endday) .filter(bug_id__in=bug_ids) .values('job__repository__name', 'job__machine_platform__platform', 'bug_id')) bug_map = dict() for bug in bugs: platform = bug['job__machine_platform__platform'] repo = bug['job__repository__name'] bug_id = bug['bug_id'] if bug_id in bug_map: bug_map[bug_id]['total'] += 1 bug_map[bug_id]['per_platform'][platform] += 1 bug_map[bug_id]['per_repository'][repo] += 1 else: bug_map[bug_id] = {} bug_map[bug_id]['total'] = 1 bug_map[bug_id]['per_platform'] = Counter([platform]) bug_map[bug_id]['per_repository'] = Counter([repo]) return bug_map, bug_ids
Get all intermittent failures per specified date range and repository, returning a dict of bug_id's with total, repository and platform totals if totals are greater than or equal to the threshold. eg: { "1206327": { "total": 5, "per_repository": { "fx-team": 2, "mozilla-inbound": 3 }, "per_platform": { "osx-10-10": 4, "b2g-emu-ics": 1 } }, ... }
Below is the the instruction that describes the task: ### Input: Get all intermittent failures per specified date range and repository, returning a dict of bug_id's with total, repository and platform totals if totals are greater than or equal to the threshold. eg: { "1206327": { "total": 5, "per_repository": { "fx-team": 2, "mozilla-inbound": 3 }, "per_platform": { "osx-10-10": 4, "b2g-emu-ics": 1 } }, ... } ### Response: def get_bug_stats(self, startday, endday): """Get all intermittent failures per specified date range and repository, returning a dict of bug_id's with total, repository and platform totals if totals are greater than or equal to the threshold. eg: { "1206327": { "total": 5, "per_repository": { "fx-team": 2, "mozilla-inbound": 3 }, "per_platform": { "osx-10-10": 4, "b2g-emu-ics": 1 } }, ... } """ # Min required failures per bug in order to post a comment threshold = 1 if self.weekly_mode else 15 bug_ids = (BugJobMap.failures.by_date(startday, endday) .values('bug_id') .annotate(total=Count('bug_id')) .filter(total__gte=threshold) .values_list('bug_id', flat=True)) bugs = (BugJobMap.failures.by_date(startday, endday) .filter(bug_id__in=bug_ids) .values('job__repository__name', 'job__machine_platform__platform', 'bug_id')) bug_map = dict() for bug in bugs: platform = bug['job__machine_platform__platform'] repo = bug['job__repository__name'] bug_id = bug['bug_id'] if bug_id in bug_map: bug_map[bug_id]['total'] += 1 bug_map[bug_id]['per_platform'][platform] += 1 bug_map[bug_id]['per_repository'][repo] += 1 else: bug_map[bug_id] = {} bug_map[bug_id]['total'] = 1 bug_map[bug_id]['per_platform'] = Counter([platform]) bug_map[bug_id]['per_repository'] = Counter([repo]) return bug_map, bug_ids
def exampleRand(S, A): """WARNING: This will delete a database with the same name as 'db'.""" db = "MDP-%sx%s.db" % (S, A) if os.path.exists(db): os.remove(db) conn = sqlite3.connect(db) with conn: c = conn.cursor() cmd = ''' CREATE TABLE info (name TEXT, value INTEGER); INSERT INTO info VALUES('states', %s); INSERT INTO info VALUES('actions', %s);''' % (S, A) c.executescript(cmd) for a in range(1, A+1): cmd = ''' CREATE TABLE transition%s (row INTEGER, col INTEGER, prob REAL); CREATE TABLE reward%s (state INTEGER PRIMARY KEY ASC, val REAL); ''' % (a, a) c.executescript(cmd) cmd = "INSERT INTO reward%s(val) VALUES(?)" % a c.executemany(cmd, zip(random(S).tolist())) for s in xrange(1, S+1): # to be usefully represented as a sparse matrix, the number of # nonzero entries should be less than 1/3 of dimesion of the # matrix, so S/3 n = randint(1, S//3) # timeit [90894] * 20330 # ==> 10000 loops, best of 3: 141 us per loop # timeit (90894*np.ones(20330, dtype=int)).tolist() # ==> 1000 loops, best of 3: 548 us per loop col = (permutation(arange(1,S+1))[0:n]).tolist() val = random(n) val = (val / val.sum()).tolist() cmd = "INSERT INTO transition%s VALUES(?, ?, ?)" % a c.executemany(cmd, zip([s] * n, col, val)) cmd = "CREATE UNIQUE INDEX Pidx%s ON transition%s (row, col);" % (a, a) c.execute(cmd) # return the name of teh database return db
WARNING: This will delete a database with the same name as 'db'.
Below is the the instruction that describes the task: ### Input: WARNING: This will delete a database with the same name as 'db'. ### Response: def exampleRand(S, A): """WARNING: This will delete a database with the same name as 'db'.""" db = "MDP-%sx%s.db" % (S, A) if os.path.exists(db): os.remove(db) conn = sqlite3.connect(db) with conn: c = conn.cursor() cmd = ''' CREATE TABLE info (name TEXT, value INTEGER); INSERT INTO info VALUES('states', %s); INSERT INTO info VALUES('actions', %s);''' % (S, A) c.executescript(cmd) for a in range(1, A+1): cmd = ''' CREATE TABLE transition%s (row INTEGER, col INTEGER, prob REAL); CREATE TABLE reward%s (state INTEGER PRIMARY KEY ASC, val REAL); ''' % (a, a) c.executescript(cmd) cmd = "INSERT INTO reward%s(val) VALUES(?)" % a c.executemany(cmd, zip(random(S).tolist())) for s in xrange(1, S+1): # to be usefully represented as a sparse matrix, the number of # nonzero entries should be less than 1/3 of dimesion of the # matrix, so S/3 n = randint(1, S//3) # timeit [90894] * 20330 # ==> 10000 loops, best of 3: 141 us per loop # timeit (90894*np.ones(20330, dtype=int)).tolist() # ==> 1000 loops, best of 3: 548 us per loop col = (permutation(arange(1,S+1))[0:n]).tolist() val = random(n) val = (val / val.sum()).tolist() cmd = "INSERT INTO transition%s VALUES(?, ?, ?)" % a c.executemany(cmd, zip([s] * n, col, val)) cmd = "CREATE UNIQUE INDEX Pidx%s ON transition%s (row, col);" % (a, a) c.execute(cmd) # return the name of teh database return db
def grow_mask(anat, aseg, ants_segs=None, ww=7, zval=2.0, bw=4): """ Grow mask including pixels that have a high likelihood. GM tissue parameters are sampled in image patches of ``ww`` size. This is inspired on mindboggle's solution to the problem: https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/segment.py#L1660 """ selem = sim.ball(bw) if ants_segs is None: ants_segs = np.zeros_like(aseg, dtype=np.uint8) aseg[aseg == 42] = 3 # Collapse both hemispheres gm = anat.copy() gm[aseg != 3] = 0 refined = refine_aseg(aseg) newrefmask = sim.binary_dilation(refined, selem) - refined indices = np.argwhere(newrefmask > 0) for pixel in indices: # When ATROPOS identified the pixel as GM, set and carry on if ants_segs[tuple(pixel)] == 2: refined[tuple(pixel)] = 1 continue window = gm[ pixel[0] - ww:pixel[0] + ww, pixel[1] - ww:pixel[1] + ww, pixel[2] - ww:pixel[2] + ww ] if np.any(window > 0): mu = window[window > 0].mean() sigma = max(window[window > 0].std(), 1.e-5) zstat = abs(anat[tuple(pixel)] - mu) / sigma refined[tuple(pixel)] = int(zstat < zval) refined = sim.binary_opening(refined, selem) return refined
Grow mask including pixels that have a high likelihood. GM tissue parameters are sampled in image patches of ``ww`` size. This is inspired on mindboggle's solution to the problem: https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/segment.py#L1660
Below is the the instruction that describes the task: ### Input: Grow mask including pixels that have a high likelihood. GM tissue parameters are sampled in image patches of ``ww`` size. This is inspired on mindboggle's solution to the problem: https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/segment.py#L1660 ### Response: def grow_mask(anat, aseg, ants_segs=None, ww=7, zval=2.0, bw=4): """ Grow mask including pixels that have a high likelihood. GM tissue parameters are sampled in image patches of ``ww`` size. This is inspired on mindboggle's solution to the problem: https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/segment.py#L1660 """ selem = sim.ball(bw) if ants_segs is None: ants_segs = np.zeros_like(aseg, dtype=np.uint8) aseg[aseg == 42] = 3 # Collapse both hemispheres gm = anat.copy() gm[aseg != 3] = 0 refined = refine_aseg(aseg) newrefmask = sim.binary_dilation(refined, selem) - refined indices = np.argwhere(newrefmask > 0) for pixel in indices: # When ATROPOS identified the pixel as GM, set and carry on if ants_segs[tuple(pixel)] == 2: refined[tuple(pixel)] = 1 continue window = gm[ pixel[0] - ww:pixel[0] + ww, pixel[1] - ww:pixel[1] + ww, pixel[2] - ww:pixel[2] + ww ] if np.any(window > 0): mu = window[window > 0].mean() sigma = max(window[window > 0].std(), 1.e-5) zstat = abs(anat[tuple(pixel)] - mu) / sigma refined[tuple(pixel)] = int(zstat < zval) refined = sim.binary_opening(refined, selem) return refined
def check_if_release_is_current(log): """Warns the user if their release is behind the latest PyPi __version__.""" if __version__ == '0.0.0': return client = xmlrpclib.ServerProxy('https://pypi.python.org/pypi') latest_pypi_version = client.package_releases('hca') latest_version_nums = [int(i) for i in latest_pypi_version[0].split('.')] this_version_nums = [int(i) for i in __version__.split('.')] for i in range(max([len(latest_version_nums), len(this_version_nums)])): try: if this_version_nums[i] < latest_version_nums[i]: log.warning('WARNING: Python (pip) package "hca" is not up-to-date!\n' 'You have hca version: ' + str(__version__) + '\n' 'Please use the latest hca version: ' + str(latest_pypi_version[0])) # handles the odd case where a user's current __version__ is higher than PyPi's elif this_version_nums[i] > latest_version_nums[i]: break # if 4.2 compared to 4.3.1, this handles the missing element except IndexError: pass
Warns the user if their release is behind the latest PyPi __version__.
Below is the the instruction that describes the task: ### Input: Warns the user if their release is behind the latest PyPi __version__. ### Response: def check_if_release_is_current(log): """Warns the user if their release is behind the latest PyPi __version__.""" if __version__ == '0.0.0': return client = xmlrpclib.ServerProxy('https://pypi.python.org/pypi') latest_pypi_version = client.package_releases('hca') latest_version_nums = [int(i) for i in latest_pypi_version[0].split('.')] this_version_nums = [int(i) for i in __version__.split('.')] for i in range(max([len(latest_version_nums), len(this_version_nums)])): try: if this_version_nums[i] < latest_version_nums[i]: log.warning('WARNING: Python (pip) package "hca" is not up-to-date!\n' 'You have hca version: ' + str(__version__) + '\n' 'Please use the latest hca version: ' + str(latest_pypi_version[0])) # handles the odd case where a user's current __version__ is higher than PyPi's elif this_version_nums[i] > latest_version_nums[i]: break # if 4.2 compared to 4.3.1, this handles the missing element except IndexError: pass
def install(*pkgs, **kwargs): ''' Installs a single or multiple packages via nix :type pkgs: list(str) :param pkgs: packages to update :param bool attributes: Pass the list of packages or single package as attribues, not package names. default: False :return: Installed packages. Example element: ``gcc-3.3.2`` :rtype: list(str) .. code-block:: bash salt '*' nix.install package [package2 ...] salt '*' nix.install attributes=True attr.name [attr.name2 ...] ''' attributes = kwargs.get('attributes', False) if not pkgs: return "Plese specify a package or packages to upgrade" cmd = _quietnix() cmd.append('--install') if kwargs.get('attributes', False): cmd.extend(_zip_flatten('--attr', pkgs)) else: cmd.extend(pkgs) out = _run(cmd) installs = list(itertools.chain.from_iterable( [s.split()[1:] for s in out['stderr'].splitlines() if s.startswith('installing')] )) return [_strip_quotes(s) for s in installs]
Installs a single or multiple packages via nix :type pkgs: list(str) :param pkgs: packages to update :param bool attributes: Pass the list of packages or single package as attribues, not package names. default: False :return: Installed packages. Example element: ``gcc-3.3.2`` :rtype: list(str) .. code-block:: bash salt '*' nix.install package [package2 ...] salt '*' nix.install attributes=True attr.name [attr.name2 ...]
Below is the the instruction that describes the task: ### Input: Installs a single or multiple packages via nix :type pkgs: list(str) :param pkgs: packages to update :param bool attributes: Pass the list of packages or single package as attribues, not package names. default: False :return: Installed packages. Example element: ``gcc-3.3.2`` :rtype: list(str) .. code-block:: bash salt '*' nix.install package [package2 ...] salt '*' nix.install attributes=True attr.name [attr.name2 ...] ### Response: def install(*pkgs, **kwargs): ''' Installs a single or multiple packages via nix :type pkgs: list(str) :param pkgs: packages to update :param bool attributes: Pass the list of packages or single package as attribues, not package names. default: False :return: Installed packages. Example element: ``gcc-3.3.2`` :rtype: list(str) .. code-block:: bash salt '*' nix.install package [package2 ...] salt '*' nix.install attributes=True attr.name [attr.name2 ...] ''' attributes = kwargs.get('attributes', False) if not pkgs: return "Plese specify a package or packages to upgrade" cmd = _quietnix() cmd.append('--install') if kwargs.get('attributes', False): cmd.extend(_zip_flatten('--attr', pkgs)) else: cmd.extend(pkgs) out = _run(cmd) installs = list(itertools.chain.from_iterable( [s.split()[1:] for s in out['stderr'].splitlines() if s.startswith('installing')] )) return [_strip_quotes(s) for s in installs]
def random_unitary_matrix(dim, seed=None): """Deprecated in 0.8+ """ warnings.warn('The random_unitary_matrix() function in qiskit.tools.qi has been ' 'deprecated and will be removed in the future. Instead use ' 'the function in qiskit.quantum_info.random', DeprecationWarning) return random.random_unitary(dim, seed).data
Deprecated in 0.8+
Below is the the instruction that describes the task: ### Input: Deprecated in 0.8+ ### Response: def random_unitary_matrix(dim, seed=None): """Deprecated in 0.8+ """ warnings.warn('The random_unitary_matrix() function in qiskit.tools.qi has been ' 'deprecated and will be removed in the future. Instead use ' 'the function in qiskit.quantum_info.random', DeprecationWarning) return random.random_unitary(dim, seed).data
def load_csv_data(resource_name): # type: (str) -> List[str] """ Loads first column of specified CSV file from package data. """ data_bytes = pkgutil.get_data('clkhash', 'data/{}'.format(resource_name)) if data_bytes is None: raise ValueError("No data resource found with name {}".format(resource_name)) else: data = data_bytes.decode('utf8') reader = csv.reader(data.splitlines()) next(reader, None) # skip the headers return [row[0] for row in reader]
Loads first column of specified CSV file from package data.
Below is the the instruction that describes the task: ### Input: Loads first column of specified CSV file from package data. ### Response: def load_csv_data(resource_name): # type: (str) -> List[str] """ Loads first column of specified CSV file from package data. """ data_bytes = pkgutil.get_data('clkhash', 'data/{}'.format(resource_name)) if data_bytes is None: raise ValueError("No data resource found with name {}".format(resource_name)) else: data = data_bytes.decode('utf8') reader = csv.reader(data.splitlines()) next(reader, None) # skip the headers return [row[0] for row in reader]
def _convert(self, value): """Returns a PasswordHash from the given string. PasswordHash instances or None values will return unchanged. Strings will be hashed and the resulting PasswordHash returned. Any other input will result in a TypeError. """ if isinstance(value, PasswordHash): return value elif isinstance(value, str): value = value.encode('utf-8') return PasswordHash.new(value, self.rounds) elif value is not None: raise TypeError( 'Cannot convert {} to a PasswordHash'.format(type(value)))
Returns a PasswordHash from the given string. PasswordHash instances or None values will return unchanged. Strings will be hashed and the resulting PasswordHash returned. Any other input will result in a TypeError.
Below is the the instruction that describes the task: ### Input: Returns a PasswordHash from the given string. PasswordHash instances or None values will return unchanged. Strings will be hashed and the resulting PasswordHash returned. Any other input will result in a TypeError. ### Response: def _convert(self, value): """Returns a PasswordHash from the given string. PasswordHash instances or None values will return unchanged. Strings will be hashed and the resulting PasswordHash returned. Any other input will result in a TypeError. """ if isinstance(value, PasswordHash): return value elif isinstance(value, str): value = value.encode('utf-8') return PasswordHash.new(value, self.rounds) elif value is not None: raise TypeError( 'Cannot convert {} to a PasswordHash'.format(type(value)))
def generate_sample_json(): """Generate sample json data for testing""" check = EpubCheck(samples.EPUB3_VALID) with open(samples.RESULT_VALID, 'wb') as jsonfile: jsonfile.write(check._stdout) check = EpubCheck(samples.EPUB3_INVALID) with open(samples.RESULT_INVALID, 'wb') as jsonfile: jsonfile.write(check._stdout)
Generate sample json data for testing
Below is the the instruction that describes the task: ### Input: Generate sample json data for testing ### Response: def generate_sample_json(): """Generate sample json data for testing""" check = EpubCheck(samples.EPUB3_VALID) with open(samples.RESULT_VALID, 'wb') as jsonfile: jsonfile.write(check._stdout) check = EpubCheck(samples.EPUB3_INVALID) with open(samples.RESULT_INVALID, 'wb') as jsonfile: jsonfile.write(check._stdout)
def generate_patches(self): """ Generates a list of patches for each file underneath self.root_directory that satisfy the given conditions given query conditions, where patches for each file are suggested by self.suggestor. """ start_pos = self.start_position or Position(None, None) end_pos = self.end_position or Position(None, None) path_list = Query._walk_directory(self.root_directory) path_list = Query._sublist(path_list, start_pos.path, end_pos.path) path_list = ( path for path in path_list if Query._path_looks_like_code(path) and (self.path_filter(path)) or (self.inc_extensionless and helpers.is_extensionless(path)) ) for path in path_list: try: lines = list(open(path)) except (IOError, UnicodeDecodeError): # If we can't open the file--perhaps it's a symlink whose # destination no loner exists--then short-circuit. continue for patch in self.suggestor(lines): if path == start_pos.path: if patch.start_line_number < start_pos.line_number: continue # suggestion is pre-start_pos if path == end_pos.path: if patch.end_line_number >= end_pos.line_number: break # suggestion is post-end_pos old_lines = lines[ patch.start_line_number:patch.end_line_number] if patch.new_lines is None or patch.new_lines != old_lines: patch.path = path yield patch # re-open file, in case contents changed lines[:] = list(open(path))
Generates a list of patches for each file underneath self.root_directory that satisfy the given conditions given query conditions, where patches for each file are suggested by self.suggestor.
Below is the the instruction that describes the task: ### Input: Generates a list of patches for each file underneath self.root_directory that satisfy the given conditions given query conditions, where patches for each file are suggested by self.suggestor. ### Response: def generate_patches(self): """ Generates a list of patches for each file underneath self.root_directory that satisfy the given conditions given query conditions, where patches for each file are suggested by self.suggestor. """ start_pos = self.start_position or Position(None, None) end_pos = self.end_position or Position(None, None) path_list = Query._walk_directory(self.root_directory) path_list = Query._sublist(path_list, start_pos.path, end_pos.path) path_list = ( path for path in path_list if Query._path_looks_like_code(path) and (self.path_filter(path)) or (self.inc_extensionless and helpers.is_extensionless(path)) ) for path in path_list: try: lines = list(open(path)) except (IOError, UnicodeDecodeError): # If we can't open the file--perhaps it's a symlink whose # destination no loner exists--then short-circuit. continue for patch in self.suggestor(lines): if path == start_pos.path: if patch.start_line_number < start_pos.line_number: continue # suggestion is pre-start_pos if path == end_pos.path: if patch.end_line_number >= end_pos.line_number: break # suggestion is post-end_pos old_lines = lines[ patch.start_line_number:patch.end_line_number] if patch.new_lines is None or patch.new_lines != old_lines: patch.path = path yield patch # re-open file, in case contents changed lines[:] = list(open(path))
def exists(self, index, id, doc_type='_all', params=None): """ Returns a boolean indicating whether or not given document exists in Elasticsearch. `<http://elasticsearch.org/guide/reference/api/get/>`_ :arg index: The name of the index :arg id: The document ID :arg doc_type: The type of the document (uses `_all` by default to fetch the first document matching the ID across all types) :arg parent: The ID of the parent document :arg preference: Specify the node or shard the operation should be performed on (default: random) :arg realtime: Specify whether to perform the operation in realtime or search mode :arg refresh: Refresh the shard containing the document before performing the operation :arg routing: Specific routing value """ try: self.transport.perform_request( 'HEAD', _make_path(index, doc_type, id), params=params) except exceptions.NotFoundError: return gen.Return(False) raise gen.Return(True)
Returns a boolean indicating whether or not given document exists in Elasticsearch. `<http://elasticsearch.org/guide/reference/api/get/>`_ :arg index: The name of the index :arg id: The document ID :arg doc_type: The type of the document (uses `_all` by default to fetch the first document matching the ID across all types) :arg parent: The ID of the parent document :arg preference: Specify the node or shard the operation should be performed on (default: random) :arg realtime: Specify whether to perform the operation in realtime or search mode :arg refresh: Refresh the shard containing the document before performing the operation :arg routing: Specific routing value
Below is the the instruction that describes the task: ### Input: Returns a boolean indicating whether or not given document exists in Elasticsearch. `<http://elasticsearch.org/guide/reference/api/get/>`_ :arg index: The name of the index :arg id: The document ID :arg doc_type: The type of the document (uses `_all` by default to fetch the first document matching the ID across all types) :arg parent: The ID of the parent document :arg preference: Specify the node or shard the operation should be performed on (default: random) :arg realtime: Specify whether to perform the operation in realtime or search mode :arg refresh: Refresh the shard containing the document before performing the operation :arg routing: Specific routing value ### Response: def exists(self, index, id, doc_type='_all', params=None): """ Returns a boolean indicating whether or not given document exists in Elasticsearch. `<http://elasticsearch.org/guide/reference/api/get/>`_ :arg index: The name of the index :arg id: The document ID :arg doc_type: The type of the document (uses `_all` by default to fetch the first document matching the ID across all types) :arg parent: The ID of the parent document :arg preference: Specify the node or shard the operation should be performed on (default: random) :arg realtime: Specify whether to perform the operation in realtime or search mode :arg refresh: Refresh the shard containing the document before performing the operation :arg routing: Specific routing value """ try: self.transport.perform_request( 'HEAD', _make_path(index, doc_type, id), params=params) except exceptions.NotFoundError: return gen.Return(False) raise gen.Return(True)
def compute_header_hmac_hash(context): """Compute HMAC-SHA256 hash of header. Used to prevent header tampering.""" return hmac.new( hashlib.sha512( b'\xff' * 8 + hashlib.sha512( context._.header.value.dynamic_header.master_seed.data + context.transformed_key + b'\x01' ).digest() ).digest(), context._.header.data, hashlib.sha256 ).digest()
Compute HMAC-SHA256 hash of header. Used to prevent header tampering.
Below is the the instruction that describes the task: ### Input: Compute HMAC-SHA256 hash of header. Used to prevent header tampering. ### Response: def compute_header_hmac_hash(context): """Compute HMAC-SHA256 hash of header. Used to prevent header tampering.""" return hmac.new( hashlib.sha512( b'\xff' * 8 + hashlib.sha512( context._.header.value.dynamic_header.master_seed.data + context.transformed_key + b'\x01' ).digest() ).digest(), context._.header.data, hashlib.sha256 ).digest()
def clear(self): """ Discards all registered handlers and cached results """ with self._hlock: self.handlers.clear() with self._mlock: self.memoize.clear()
Discards all registered handlers and cached results
Below is the the instruction that describes the task: ### Input: Discards all registered handlers and cached results ### Response: def clear(self): """ Discards all registered handlers and cached results """ with self._hlock: self.handlers.clear() with self._mlock: self.memoize.clear()
def generateCertificate(self, alias, commonName, organizationalUnit, city, state, country, keyalg="RSA", keysize=1024, sigalg="SHA256withRSA", validity=90 ): """ Use this operation to create a self-signed certificate or as a starting point for getting a production-ready CA-signed certificate. The portal will generate a certificate for you and store it in its keystore. """ params = {"f" : "json", "alias" : alias, "commonName" : commonName, "organizationalUnit" : organizationalUnit, "city" : city, "state" : state, "country" : country, "keyalg" : keyalg, "keysize" : keysize, "sigalg" : sigalg, "validity" : validity } url = self._url + "/SSLCertificate/ generateCertificate" return self._post(url=url, param_dict=params, proxy_port=self._proxy_port, proxy_url=self._proxy_url)
Use this operation to create a self-signed certificate or as a starting point for getting a production-ready CA-signed certificate. The portal will generate a certificate for you and store it in its keystore.
Below is the the instruction that describes the task: ### Input: Use this operation to create a self-signed certificate or as a starting point for getting a production-ready CA-signed certificate. The portal will generate a certificate for you and store it in its keystore. ### Response: def generateCertificate(self, alias, commonName, organizationalUnit, city, state, country, keyalg="RSA", keysize=1024, sigalg="SHA256withRSA", validity=90 ): """ Use this operation to create a self-signed certificate or as a starting point for getting a production-ready CA-signed certificate. The portal will generate a certificate for you and store it in its keystore. """ params = {"f" : "json", "alias" : alias, "commonName" : commonName, "organizationalUnit" : organizationalUnit, "city" : city, "state" : state, "country" : country, "keyalg" : keyalg, "keysize" : keysize, "sigalg" : sigalg, "validity" : validity } url = self._url + "/SSLCertificate/ generateCertificate" return self._post(url=url, param_dict=params, proxy_port=self._proxy_port, proxy_url=self._proxy_url)
def uniq(args): """ %prog uniq fasta uniq.fasta remove fasta records that are the same """ p = OptionParser(uniq.__doc__) p.add_option("--seq", default=False, action="store_true", help="Uniqify the sequences [default: %default]") p.add_option("-t", "--trimname", dest="trimname", action="store_true", default=False, help="turn on the defline trim to first space [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(p.print_help()) fastafile, uniqfastafile = args fw = must_open(uniqfastafile, "w") seq = opts.seq for rec in _uniq_rec(fastafile, seq=seq): if opts.trimname: rec.description = "" SeqIO.write([rec], fw, "fasta")
%prog uniq fasta uniq.fasta remove fasta records that are the same
Below is the the instruction that describes the task: ### Input: %prog uniq fasta uniq.fasta remove fasta records that are the same ### Response: def uniq(args): """ %prog uniq fasta uniq.fasta remove fasta records that are the same """ p = OptionParser(uniq.__doc__) p.add_option("--seq", default=False, action="store_true", help="Uniqify the sequences [default: %default]") p.add_option("-t", "--trimname", dest="trimname", action="store_true", default=False, help="turn on the defline trim to first space [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(p.print_help()) fastafile, uniqfastafile = args fw = must_open(uniqfastafile, "w") seq = opts.seq for rec in _uniq_rec(fastafile, seq=seq): if opts.trimname: rec.description = "" SeqIO.write([rec], fw, "fasta")
def signature(self, name, file_name, file_type, file_content, owner=None, **kwargs): """ Create the Signature TI object. Args: owner: file_content: file_name: file_type: name: **kwargs: Return: """ return Signature(self.tcex, name, file_name, file_type, file_content, owner=owner, **kwargs)
Create the Signature TI object. Args: owner: file_content: file_name: file_type: name: **kwargs: Return:
Below is the the instruction that describes the task: ### Input: Create the Signature TI object. Args: owner: file_content: file_name: file_type: name: **kwargs: Return: ### Response: def signature(self, name, file_name, file_type, file_content, owner=None, **kwargs): """ Create the Signature TI object. Args: owner: file_content: file_name: file_type: name: **kwargs: Return: """ return Signature(self.tcex, name, file_name, file_type, file_content, owner=owner, **kwargs)
def fix_missing(df, col, name, na_dict): """ Fill missing data in a column of df with the median, and add a {name}_na column which specifies if the data was missing. Parameters: ----------- df: The data frame that will be changed. col: The column of data to fix by filling in missing data. name: The name of the new filled column in df. na_dict: A dictionary of values to create na's of and the value to insert. If name is not a key of na_dict the median will fill any missing data. Also if name is not a key of na_dict and there is no missing data in col, then no {name}_na column is not created. Examples: --------- >>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]}) >>> df col1 col2 0 1 5 1 nan 2 2 3 2 >>> fix_missing(df, df['col1'], 'col1', {}) >>> df col1 col2 col1_na 0 1 5 False 1 2 2 True 2 3 2 False >>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]}) >>> df col1 col2 0 1 5 1 nan 2 2 3 2 >>> fix_missing(df, df['col2'], 'col2', {}) >>> df col1 col2 0 1 5 1 nan 2 2 3 2 >>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]}) >>> df col1 col2 0 1 5 1 nan 2 2 3 2 >>> fix_missing(df, df['col1'], 'col1', {'col1' : 500}) >>> df col1 col2 col1_na 0 1 5 False 1 500 2 True 2 3 2 False """ if is_numeric_dtype(col): if pd.isnull(col).sum() or (name in na_dict): df[name+'_na'] = pd.isnull(col) filler = na_dict[name] if name in na_dict else col.median() df[name] = col.fillna(filler) na_dict[name] = filler return na_dict
Fill missing data in a column of df with the median, and add a {name}_na column which specifies if the data was missing. Parameters: ----------- df: The data frame that will be changed. col: The column of data to fix by filling in missing data. name: The name of the new filled column in df. na_dict: A dictionary of values to create na's of and the value to insert. If name is not a key of na_dict the median will fill any missing data. Also if name is not a key of na_dict and there is no missing data in col, then no {name}_na column is not created. Examples: --------- >>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]}) >>> df col1 col2 0 1 5 1 nan 2 2 3 2 >>> fix_missing(df, df['col1'], 'col1', {}) >>> df col1 col2 col1_na 0 1 5 False 1 2 2 True 2 3 2 False >>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]}) >>> df col1 col2 0 1 5 1 nan 2 2 3 2 >>> fix_missing(df, df['col2'], 'col2', {}) >>> df col1 col2 0 1 5 1 nan 2 2 3 2 >>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]}) >>> df col1 col2 0 1 5 1 nan 2 2 3 2 >>> fix_missing(df, df['col1'], 'col1', {'col1' : 500}) >>> df col1 col2 col1_na 0 1 5 False 1 500 2 True 2 3 2 False
Below is the the instruction that describes the task: ### Input: Fill missing data in a column of df with the median, and add a {name}_na column which specifies if the data was missing. Parameters: ----------- df: The data frame that will be changed. col: The column of data to fix by filling in missing data. name: The name of the new filled column in df. na_dict: A dictionary of values to create na's of and the value to insert. If name is not a key of na_dict the median will fill any missing data. Also if name is not a key of na_dict and there is no missing data in col, then no {name}_na column is not created. Examples: --------- >>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]}) >>> df col1 col2 0 1 5 1 nan 2 2 3 2 >>> fix_missing(df, df['col1'], 'col1', {}) >>> df col1 col2 col1_na 0 1 5 False 1 2 2 True 2 3 2 False >>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]}) >>> df col1 col2 0 1 5 1 nan 2 2 3 2 >>> fix_missing(df, df['col2'], 'col2', {}) >>> df col1 col2 0 1 5 1 nan 2 2 3 2 >>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]}) >>> df col1 col2 0 1 5 1 nan 2 2 3 2 >>> fix_missing(df, df['col1'], 'col1', {'col1' : 500}) >>> df col1 col2 col1_na 0 1 5 False 1 500 2 True 2 3 2 False ### Response: def fix_missing(df, col, name, na_dict): """ Fill missing data in a column of df with the median, and add a {name}_na column which specifies if the data was missing. Parameters: ----------- df: The data frame that will be changed. col: The column of data to fix by filling in missing data. name: The name of the new filled column in df. na_dict: A dictionary of values to create na's of and the value to insert. If name is not a key of na_dict the median will fill any missing data. Also if name is not a key of na_dict and there is no missing data in col, then no {name}_na column is not created. Examples: --------- >>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]}) >>> df col1 col2 0 1 5 1 nan 2 2 3 2 >>> fix_missing(df, df['col1'], 'col1', {}) >>> df col1 col2 col1_na 0 1 5 False 1 2 2 True 2 3 2 False >>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]}) >>> df col1 col2 0 1 5 1 nan 2 2 3 2 >>> fix_missing(df, df['col2'], 'col2', {}) >>> df col1 col2 0 1 5 1 nan 2 2 3 2 >>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]}) >>> df col1 col2 0 1 5 1 nan 2 2 3 2 >>> fix_missing(df, df['col1'], 'col1', {'col1' : 500}) >>> df col1 col2 col1_na 0 1 5 False 1 500 2 True 2 3 2 False """ if is_numeric_dtype(col): if pd.isnull(col).sum() or (name in na_dict): df[name+'_na'] = pd.isnull(col) filler = na_dict[name] if name in na_dict else col.median() df[name] = col.fillna(filler) na_dict[name] = filler return na_dict
def get_email_forwarding(netid): """ Return a restclients.models.uwnetid.UwEmailForwarding object on the given uwnetid """ subscriptions = get_netid_subscriptions(netid, Subscription.SUBS_CODE_U_FORWARDING) for subscription in subscriptions: if subscription.subscription_code == Subscription.SUBS_CODE_U_FORWARDING: return_obj = UwEmailForwarding() if subscription.data_value: return_obj.fwd = subscription.data_value return_obj.permitted = subscription.permitted return_obj.status = subscription.status_name return return_obj return None
Return a restclients.models.uwnetid.UwEmailForwarding object on the given uwnetid
Below is the the instruction that describes the task: ### Input: Return a restclients.models.uwnetid.UwEmailForwarding object on the given uwnetid ### Response: def get_email_forwarding(netid): """ Return a restclients.models.uwnetid.UwEmailForwarding object on the given uwnetid """ subscriptions = get_netid_subscriptions(netid, Subscription.SUBS_CODE_U_FORWARDING) for subscription in subscriptions: if subscription.subscription_code == Subscription.SUBS_CODE_U_FORWARDING: return_obj = UwEmailForwarding() if subscription.data_value: return_obj.fwd = subscription.data_value return_obj.permitted = subscription.permitted return_obj.status = subscription.status_name return return_obj return None
def _copy_mbox(self, mbox): """Copy the contents of a mbox to a temporary file""" tmp_path = tempfile.mktemp(prefix='perceval_') with mbox.container as f_in: with open(tmp_path, mode='wb') as f_out: for l in f_in: f_out.write(l) return tmp_path
Copy the contents of a mbox to a temporary file
Below is the the instruction that describes the task: ### Input: Copy the contents of a mbox to a temporary file ### Response: def _copy_mbox(self, mbox): """Copy the contents of a mbox to a temporary file""" tmp_path = tempfile.mktemp(prefix='perceval_') with mbox.container as f_in: with open(tmp_path, mode='wb') as f_out: for l in f_in: f_out.write(l) return tmp_path
def invalid_example_number(region_code): """Gets an invalid number for the specified region. This is useful for unit-testing purposes, where you want to test what will happen with an invalid number. Note that the number that is returned will always be able to be parsed and will have the correct country code. It may also be a valid *short* number/code for this region. Validity checking such numbers is handled with shortnumberinfo. Arguments: region_code -- The region for which an example number is needed. Returns an invalid number for the specified region. Returns None when an unsupported region or the region 001 (Earth) is passed in. """ if not _is_valid_region_code(region_code): return None # We start off with a valid fixed-line number since every country # supports this. Alternatively we could start with a different number # type, since fixed-line numbers typically have a wide breadth of valid # number lengths and we may have to make it very short before we get an # invalid number. metadata = PhoneMetadata.metadata_for_region(region_code.upper()) desc = _number_desc_by_type(metadata, PhoneNumberType.FIXED_LINE) if desc is None or desc.example_number is None: # This shouldn't happen; we have a test for this. return None # pragma no cover example_number = desc.example_number # Try and make the number invalid. We do this by changing the length. We # try reducing the length of the number, since currently no region has a # number that is the same length as MIN_LENGTH_FOR_NSN. This is probably # quicker than making the number longer, which is another # alternative. We could also use the possible number pattern to extract # the possible lengths of the number to make this faster, but this # method is only for unit-testing so simplicity is preferred to # performance. We don't want to return a number that can't be parsed, # so we check the number is long enough. We try all possible lengths # because phone number plans often have overlapping prefixes so the # number 123456 might be valid as a fixed-line number, and 12345 as a # mobile number. It would be faster to loop in a different order, but we # prefer numbers that look closer to real numbers (and it gives us a # variety of different lengths for the resulting phone numbers - # otherwise they would all be MIN_LENGTH_FOR_NSN digits long.) phone_number_length = len(example_number) - 1 while phone_number_length >= _MIN_LENGTH_FOR_NSN: number_to_try = example_number[:phone_number_length] try: possibly_valid_number = parse(number_to_try, region_code) if not is_valid_number(possibly_valid_number): return possibly_valid_number except NumberParseException: # pragma no cover # Shouldn't happen: we have already checked the length, we know # example numbers have only valid digits, and we know the region # code is fine. pass phone_number_length -= 1 # We have a test to check that this doesn't happen for any of our # supported regions. return None
Gets an invalid number for the specified region. This is useful for unit-testing purposes, where you want to test what will happen with an invalid number. Note that the number that is returned will always be able to be parsed and will have the correct country code. It may also be a valid *short* number/code for this region. Validity checking such numbers is handled with shortnumberinfo. Arguments: region_code -- The region for which an example number is needed. Returns an invalid number for the specified region. Returns None when an unsupported region or the region 001 (Earth) is passed in.
Below is the the instruction that describes the task: ### Input: Gets an invalid number for the specified region. This is useful for unit-testing purposes, where you want to test what will happen with an invalid number. Note that the number that is returned will always be able to be parsed and will have the correct country code. It may also be a valid *short* number/code for this region. Validity checking such numbers is handled with shortnumberinfo. Arguments: region_code -- The region for which an example number is needed. Returns an invalid number for the specified region. Returns None when an unsupported region or the region 001 (Earth) is passed in. ### Response: def invalid_example_number(region_code): """Gets an invalid number for the specified region. This is useful for unit-testing purposes, where you want to test what will happen with an invalid number. Note that the number that is returned will always be able to be parsed and will have the correct country code. It may also be a valid *short* number/code for this region. Validity checking such numbers is handled with shortnumberinfo. Arguments: region_code -- The region for which an example number is needed. Returns an invalid number for the specified region. Returns None when an unsupported region or the region 001 (Earth) is passed in. """ if not _is_valid_region_code(region_code): return None # We start off with a valid fixed-line number since every country # supports this. Alternatively we could start with a different number # type, since fixed-line numbers typically have a wide breadth of valid # number lengths and we may have to make it very short before we get an # invalid number. metadata = PhoneMetadata.metadata_for_region(region_code.upper()) desc = _number_desc_by_type(metadata, PhoneNumberType.FIXED_LINE) if desc is None or desc.example_number is None: # This shouldn't happen; we have a test for this. return None # pragma no cover example_number = desc.example_number # Try and make the number invalid. We do this by changing the length. We # try reducing the length of the number, since currently no region has a # number that is the same length as MIN_LENGTH_FOR_NSN. This is probably # quicker than making the number longer, which is another # alternative. We could also use the possible number pattern to extract # the possible lengths of the number to make this faster, but this # method is only for unit-testing so simplicity is preferred to # performance. We don't want to return a number that can't be parsed, # so we check the number is long enough. We try all possible lengths # because phone number plans often have overlapping prefixes so the # number 123456 might be valid as a fixed-line number, and 12345 as a # mobile number. It would be faster to loop in a different order, but we # prefer numbers that look closer to real numbers (and it gives us a # variety of different lengths for the resulting phone numbers - # otherwise they would all be MIN_LENGTH_FOR_NSN digits long.) phone_number_length = len(example_number) - 1 while phone_number_length >= _MIN_LENGTH_FOR_NSN: number_to_try = example_number[:phone_number_length] try: possibly_valid_number = parse(number_to_try, region_code) if not is_valid_number(possibly_valid_number): return possibly_valid_number except NumberParseException: # pragma no cover # Shouldn't happen: we have already checked the length, we know # example numbers have only valid digits, and we know the region # code is fine. pass phone_number_length -= 1 # We have a test to check that this doesn't happen for any of our # supported regions. return None
def get_device_by_name(self, device_name): """Search the list of connected devices by name. device_name param is the string name of the device """ # Find the device for the vera device name we are interested in found_device = None for device in self.get_devices(): if device.name == device_name: found_device = device # found the first (and should be only) one so we will finish break if found_device is None: logger.debug('Did not find device with {}'.format(device_name)) return found_device
Search the list of connected devices by name. device_name param is the string name of the device
Below is the the instruction that describes the task: ### Input: Search the list of connected devices by name. device_name param is the string name of the device ### Response: def get_device_by_name(self, device_name): """Search the list of connected devices by name. device_name param is the string name of the device """ # Find the device for the vera device name we are interested in found_device = None for device in self.get_devices(): if device.name == device_name: found_device = device # found the first (and should be only) one so we will finish break if found_device is None: logger.debug('Did not find device with {}'.format(device_name)) return found_device
def jsonarrtrim(self, name, path, start, stop): """ Trim the array JSON value under ``path`` at key ``name`` to the inclusive range given by ``start`` and ``stop`` """ return self.execute_command('JSON.ARRTRIM', name, str_path(path), start, stop)
Trim the array JSON value under ``path`` at key ``name`` to the inclusive range given by ``start`` and ``stop``
Below is the the instruction that describes the task: ### Input: Trim the array JSON value under ``path`` at key ``name`` to the inclusive range given by ``start`` and ``stop`` ### Response: def jsonarrtrim(self, name, path, start, stop): """ Trim the array JSON value under ``path`` at key ``name`` to the inclusive range given by ``start`` and ``stop`` """ return self.execute_command('JSON.ARRTRIM', name, str_path(path), start, stop)
def kpl_set_on_mask(self, address, group, mask): """Get the status of a KPL button.""" addr = Address(address) device = self.plm.devices[addr.id] device.states[group].set_on_mask(mask)
Get the status of a KPL button.
Below is the the instruction that describes the task: ### Input: Get the status of a KPL button. ### Response: def kpl_set_on_mask(self, address, group, mask): """Get the status of a KPL button.""" addr = Address(address) device = self.plm.devices[addr.id] device.states[group].set_on_mask(mask)
def _init_glyph(self, plot, mapping, properties): """ Returns a Bokeh glyph object. """ plot_method = properties.pop('plot_method', None) properties = mpl_to_bokeh(properties) data = dict(properties, **mapping) if self._has_holes: plot_method = 'multi_polygons' elif plot_method is None: plot_method = self._plot_methods.get('single') renderer = getattr(plot, plot_method)(**data) if self.colorbar: for k, v in list(self.handles.items()): if not k.endswith('color_mapper'): continue self._draw_colorbar(plot, v, k[:-12]) return renderer, renderer.glyph
Returns a Bokeh glyph object.
Below is the the instruction that describes the task: ### Input: Returns a Bokeh glyph object. ### Response: def _init_glyph(self, plot, mapping, properties): """ Returns a Bokeh glyph object. """ plot_method = properties.pop('plot_method', None) properties = mpl_to_bokeh(properties) data = dict(properties, **mapping) if self._has_holes: plot_method = 'multi_polygons' elif plot_method is None: plot_method = self._plot_methods.get('single') renderer = getattr(plot, plot_method)(**data) if self.colorbar: for k, v in list(self.handles.items()): if not k.endswith('color_mapper'): continue self._draw_colorbar(plot, v, k[:-12]) return renderer, renderer.glyph
def parse(cls, buff, offset): """ Given a buffer and offset, returns the parsed value and new offset. Parses the ``size_primitive`` first to determine how many more bytes to consume to extract the value. """ size, offset = cls.size_primitive.parse(buff, offset) if size == -1: return None, offset var_struct = struct.Struct("!%ds" % size) value = var_struct.unpack_from(buff, offset)[0] value = cls.parse_value(value) offset += var_struct.size return value, offset
Given a buffer and offset, returns the parsed value and new offset. Parses the ``size_primitive`` first to determine how many more bytes to consume to extract the value.
Below is the the instruction that describes the task: ### Input: Given a buffer and offset, returns the parsed value and new offset. Parses the ``size_primitive`` first to determine how many more bytes to consume to extract the value. ### Response: def parse(cls, buff, offset): """ Given a buffer and offset, returns the parsed value and new offset. Parses the ``size_primitive`` first to determine how many more bytes to consume to extract the value. """ size, offset = cls.size_primitive.parse(buff, offset) if size == -1: return None, offset var_struct = struct.Struct("!%ds" % size) value = var_struct.unpack_from(buff, offset)[0] value = cls.parse_value(value) offset += var_struct.size return value, offset
def isPe32(self): """ Determines if the current L{PE} instance is a PE32 file. @rtype: bool @return: C{True} if the current L{PE} instance is a PE32 file. Otherwise, returns C{False}. """ if self.ntHeaders.optionalHeader.magic.value == consts.PE32: return True return False
Determines if the current L{PE} instance is a PE32 file. @rtype: bool @return: C{True} if the current L{PE} instance is a PE32 file. Otherwise, returns C{False}.
Below is the the instruction that describes the task: ### Input: Determines if the current L{PE} instance is a PE32 file. @rtype: bool @return: C{True} if the current L{PE} instance is a PE32 file. Otherwise, returns C{False}. ### Response: def isPe32(self): """ Determines if the current L{PE} instance is a PE32 file. @rtype: bool @return: C{True} if the current L{PE} instance is a PE32 file. Otherwise, returns C{False}. """ if self.ntHeaders.optionalHeader.magic.value == consts.PE32: return True return False
def fts_match(self, fts_mask, segment): """Evaluates whether a set of features 'match' a segment (are a subset of that segment's features) Args: fts_mask (list): list of (value, feature) tuples segment (unicode): IPA string corresponding to segment (consonant or vowel) Returns: bool: None if `segment` cannot be parsed; True if the feature values of `fts_mask` are a subset of those for `segment` """ fts_mask = set(fts_mask) fts_seg = self.fts(segment) if fts_seg: return fts_seg <= fts_mask else: return None
Evaluates whether a set of features 'match' a segment (are a subset of that segment's features) Args: fts_mask (list): list of (value, feature) tuples segment (unicode): IPA string corresponding to segment (consonant or vowel) Returns: bool: None if `segment` cannot be parsed; True if the feature values of `fts_mask` are a subset of those for `segment`
Below is the the instruction that describes the task: ### Input: Evaluates whether a set of features 'match' a segment (are a subset of that segment's features) Args: fts_mask (list): list of (value, feature) tuples segment (unicode): IPA string corresponding to segment (consonant or vowel) Returns: bool: None if `segment` cannot be parsed; True if the feature values of `fts_mask` are a subset of those for `segment` ### Response: def fts_match(self, fts_mask, segment): """Evaluates whether a set of features 'match' a segment (are a subset of that segment's features) Args: fts_mask (list): list of (value, feature) tuples segment (unicode): IPA string corresponding to segment (consonant or vowel) Returns: bool: None if `segment` cannot be parsed; True if the feature values of `fts_mask` are a subset of those for `segment` """ fts_mask = set(fts_mask) fts_seg = self.fts(segment) if fts_seg: return fts_seg <= fts_mask else: return None
def sort_targets(targets): """ :API: public :return: the targets that `targets` depend on sorted from most dependent to least. """ roots, inverted_deps = invert_dependencies(targets) ordered = [] visited = set() def topological_sort(target): if target not in visited: visited.add(target) if target in inverted_deps: for dep in inverted_deps[target]: topological_sort(dep) ordered.append(target) for root in roots: topological_sort(root) return ordered
:API: public :return: the targets that `targets` depend on sorted from most dependent to least.
Below is the the instruction that describes the task: ### Input: :API: public :return: the targets that `targets` depend on sorted from most dependent to least. ### Response: def sort_targets(targets): """ :API: public :return: the targets that `targets` depend on sorted from most dependent to least. """ roots, inverted_deps = invert_dependencies(targets) ordered = [] visited = set() def topological_sort(target): if target not in visited: visited.add(target) if target in inverted_deps: for dep in inverted_deps[target]: topological_sort(dep) ordered.append(target) for root in roots: topological_sort(root) return ordered
def run_primlist(self, primlist, skip_remaining=False): '''Runs runs from a primlist. Parameters ---------- primlist : string Filename of primlist. skip_remaining : bool If True, skip remaining runs, if a run does not exit with status FINISHED. Note ---- Primlist is a text file of the following format (comment line by adding '#'): <module name (containing class) or class (in either case use dot notation)>; <scan parameter>=<value>; <another scan parameter>=<another value> ''' runlist = self.open_primlist(primlist) for index, run in enumerate(runlist): logging.info('Progressing with run %i out of %i...', index + 1, len(runlist)) join = self.run_run(run, use_thread=True) status = join() if skip_remaining and not status == run_status.finished: logging.error('Exited run %i with status %s: Skipping all remaining runs.', run.run_number, status) break
Runs runs from a primlist. Parameters ---------- primlist : string Filename of primlist. skip_remaining : bool If True, skip remaining runs, if a run does not exit with status FINISHED. Note ---- Primlist is a text file of the following format (comment line by adding '#'): <module name (containing class) or class (in either case use dot notation)>; <scan parameter>=<value>; <another scan parameter>=<another value>
Below is the the instruction that describes the task: ### Input: Runs runs from a primlist. Parameters ---------- primlist : string Filename of primlist. skip_remaining : bool If True, skip remaining runs, if a run does not exit with status FINISHED. Note ---- Primlist is a text file of the following format (comment line by adding '#'): <module name (containing class) or class (in either case use dot notation)>; <scan parameter>=<value>; <another scan parameter>=<another value> ### Response: def run_primlist(self, primlist, skip_remaining=False): '''Runs runs from a primlist. Parameters ---------- primlist : string Filename of primlist. skip_remaining : bool If True, skip remaining runs, if a run does not exit with status FINISHED. Note ---- Primlist is a text file of the following format (comment line by adding '#'): <module name (containing class) or class (in either case use dot notation)>; <scan parameter>=<value>; <another scan parameter>=<another value> ''' runlist = self.open_primlist(primlist) for index, run in enumerate(runlist): logging.info('Progressing with run %i out of %i...', index + 1, len(runlist)) join = self.run_run(run, use_thread=True) status = join() if skip_remaining and not status == run_status.finished: logging.error('Exited run %i with status %s: Skipping all remaining runs.', run.run_number, status) break
def check(self, pointer, expected, raise_onerror=False): """Check if value exists into object. :param pointer: the path to search in :param expected: the expected value :param raise_onerror: should raise on error? :return: boolean """ obj = self.document for token in Pointer(pointer): try: obj = token.extract(obj, bypass_ref=True) except ExtractError as error: if raise_onerror: raise Error(*error.args) logger.exception(error) return False return obj == expected
Check if value exists into object. :param pointer: the path to search in :param expected: the expected value :param raise_onerror: should raise on error? :return: boolean
Below is the the instruction that describes the task: ### Input: Check if value exists into object. :param pointer: the path to search in :param expected: the expected value :param raise_onerror: should raise on error? :return: boolean ### Response: def check(self, pointer, expected, raise_onerror=False): """Check if value exists into object. :param pointer: the path to search in :param expected: the expected value :param raise_onerror: should raise on error? :return: boolean """ obj = self.document for token in Pointer(pointer): try: obj = token.extract(obj, bypass_ref=True) except ExtractError as error: if raise_onerror: raise Error(*error.args) logger.exception(error) return False return obj == expected
def lastId(self): """ Children passage :rtype: str :returns: First children of the graph. Shortcut to self.graph.children[0] """ if self._last is False: # Request the next urn self._last = self.childIds[-1] return self._last
Children passage :rtype: str :returns: First children of the graph. Shortcut to self.graph.children[0]
Below is the the instruction that describes the task: ### Input: Children passage :rtype: str :returns: First children of the graph. Shortcut to self.graph.children[0] ### Response: def lastId(self): """ Children passage :rtype: str :returns: First children of the graph. Shortcut to self.graph.children[0] """ if self._last is False: # Request the next urn self._last = self.childIds[-1] return self._last
def call_on_commit(self, callback): """Call a callback upon successful commit of a transaction. If not in a transaction, the callback is called immediately. In a transaction, multiple callbacks may be registered and will be called once the transaction commits, in the order in which they were registered. If the transaction fails, the callbacks will not be called. If the callback raises an exception, it bubbles up normally. This means: If the callback is called immediately, any exception it raises will bubble up immediately. If the call is postponed until commit, remaining callbacks will be skipped and the exception will bubble up through the transaction() call. (However, the transaction is already committed at that point.) """ if not self.in_transaction(): callback() else: self._on_commit_queue.append(callback)
Call a callback upon successful commit of a transaction. If not in a transaction, the callback is called immediately. In a transaction, multiple callbacks may be registered and will be called once the transaction commits, in the order in which they were registered. If the transaction fails, the callbacks will not be called. If the callback raises an exception, it bubbles up normally. This means: If the callback is called immediately, any exception it raises will bubble up immediately. If the call is postponed until commit, remaining callbacks will be skipped and the exception will bubble up through the transaction() call. (However, the transaction is already committed at that point.)
Below is the the instruction that describes the task: ### Input: Call a callback upon successful commit of a transaction. If not in a transaction, the callback is called immediately. In a transaction, multiple callbacks may be registered and will be called once the transaction commits, in the order in which they were registered. If the transaction fails, the callbacks will not be called. If the callback raises an exception, it bubbles up normally. This means: If the callback is called immediately, any exception it raises will bubble up immediately. If the call is postponed until commit, remaining callbacks will be skipped and the exception will bubble up through the transaction() call. (However, the transaction is already committed at that point.) ### Response: def call_on_commit(self, callback): """Call a callback upon successful commit of a transaction. If not in a transaction, the callback is called immediately. In a transaction, multiple callbacks may be registered and will be called once the transaction commits, in the order in which they were registered. If the transaction fails, the callbacks will not be called. If the callback raises an exception, it bubbles up normally. This means: If the callback is called immediately, any exception it raises will bubble up immediately. If the call is postponed until commit, remaining callbacks will be skipped and the exception will bubble up through the transaction() call. (However, the transaction is already committed at that point.) """ if not self.in_transaction(): callback() else: self._on_commit_queue.append(callback)
def signal_to_exception(sig: signal.Signals) -> SignalException: """ Convert a ``signal.Signals`` to a ``SignalException``. This allows for natural, pythonic signal handing with the use of try-except blocks. .. code-block:: python import signal import zproc zproc.signal_to_exception(signals.SIGTERM) try: ... except zproc.SignalException as e: print("encountered:", e) finally: zproc.exception_to_signal(signals.SIGTERM) """ signal.signal(sig, _sig_exc_handler) return SignalException(sig)
Convert a ``signal.Signals`` to a ``SignalException``. This allows for natural, pythonic signal handing with the use of try-except blocks. .. code-block:: python import signal import zproc zproc.signal_to_exception(signals.SIGTERM) try: ... except zproc.SignalException as e: print("encountered:", e) finally: zproc.exception_to_signal(signals.SIGTERM)
Below is the the instruction that describes the task: ### Input: Convert a ``signal.Signals`` to a ``SignalException``. This allows for natural, pythonic signal handing with the use of try-except blocks. .. code-block:: python import signal import zproc zproc.signal_to_exception(signals.SIGTERM) try: ... except zproc.SignalException as e: print("encountered:", e) finally: zproc.exception_to_signal(signals.SIGTERM) ### Response: def signal_to_exception(sig: signal.Signals) -> SignalException: """ Convert a ``signal.Signals`` to a ``SignalException``. This allows for natural, pythonic signal handing with the use of try-except blocks. .. code-block:: python import signal import zproc zproc.signal_to_exception(signals.SIGTERM) try: ... except zproc.SignalException as e: print("encountered:", e) finally: zproc.exception_to_signal(signals.SIGTERM) """ signal.signal(sig, _sig_exc_handler) return SignalException(sig)
def _query(self, method, path, data=None, page=False, retry=0): """ Fetch an object from the Graph API and parse the output, returning a tuple where the first item is the object yielded by the Graph API and the second is the URL for the next page of results, or ``None`` if results have been exhausted. :param method: A string describing the HTTP method. :param path: A string describing the object in the Graph API. :param data: A dictionary of HTTP GET parameters (for GET requests) or POST data (for POST requests). :param page: A boolean describing whether to return an iterator that iterates over each page of results. :param retry: An integer describing how many times the request may be retried. """ if(data): data = dict( (k.replace('_sqbro_', '['), v) for k, v in data.items()) data = dict( (k.replace('_sqbrc_', ']'), v) for k, v in data.items()) data = dict( (k.replace('__', ':'), v) for k, v in data.items()) data = data or {} def load(method, url, data): for key in data: value = data[key] if isinstance(value, (list, dict, set)): data[key] = json.dumps(value) try: if method in ['GET', 'DELETE']: response = self.session.request( method, url, params=data, allow_redirects=True, verify=self.verify_ssl_certificate, timeout=self.timeout ) if method in ['POST', 'PUT']: files = {} for key in data: if hasattr(data[key], 'read'): files[key] = data[key] for key in files: data.pop(key) response = self.session.request( method, url, data=data, files=files, verify=self.verify_ssl_certificate, timeout=self.timeout ) if 500 <= response.status_code < 600: # Facebook 5XX errors usually come with helpful messages # as a JSON object describing the problem with the request. # If this is the case, an error will be raised and we just # need to re-raise it. This is most likely to happen # with the Ads API. # This will raise an exception if a JSON-like error object # comes in the response. self._parse(response.content) # If Facebook does not provide any JSON-formatted error # but just a plain-text, useless error, we'll just inform # about a Facebook Internal errror occurred. raise FacebookError( 'Internal Facebook error occurred', response.status_code ) except requests.RequestException as exception: raise HTTPError(exception) result = self._parse(response.content) if isinstance(result, dict): result['headers'] = response.headers try: next_url = result['paging']['next'] except (KeyError, TypeError): next_url = None return result, next_url def load_with_retry(method, url, data): remaining_retries = retry while True: try: return load(method, url, data) except FacepyError as e: log.warn("Exception on %s: %s, retries remaining: %s", url, e, remaining_retries, ) if remaining_retries > 0: remaining_retries -= 1 else: raise def paginate(method, url, data): while url: result, url = load_with_retry(method, url, data) # Reset pagination parameters. for key in ['offset', 'until', 'since']: if key in data: del data[key] yield result # Convert option lists to comma-separated values. for key in data: if isinstance(data[key], (list, set, tuple)) and all([isinstance(item, six.string_types) for item in data[key]]): data[key] = ','.join(data[key]) # Support absolute paths too if not path.startswith('/'): if six.PY2: path = '/' + six.text_type(path.decode('utf-8')) else: path = '/' + path url = self._get_url(path) if self.oauth_token: data['access_token'] = self.oauth_token if self.appsecret and self.oauth_token: data['appsecret_proof'] = self._generate_appsecret_proof() if page: return paginate(method, url, data) else: return load_with_retry(method, url, data)[0]
Fetch an object from the Graph API and parse the output, returning a tuple where the first item is the object yielded by the Graph API and the second is the URL for the next page of results, or ``None`` if results have been exhausted. :param method: A string describing the HTTP method. :param path: A string describing the object in the Graph API. :param data: A dictionary of HTTP GET parameters (for GET requests) or POST data (for POST requests). :param page: A boolean describing whether to return an iterator that iterates over each page of results. :param retry: An integer describing how many times the request may be retried.
Below is the the instruction that describes the task: ### Input: Fetch an object from the Graph API and parse the output, returning a tuple where the first item is the object yielded by the Graph API and the second is the URL for the next page of results, or ``None`` if results have been exhausted. :param method: A string describing the HTTP method. :param path: A string describing the object in the Graph API. :param data: A dictionary of HTTP GET parameters (for GET requests) or POST data (for POST requests). :param page: A boolean describing whether to return an iterator that iterates over each page of results. :param retry: An integer describing how many times the request may be retried. ### Response: def _query(self, method, path, data=None, page=False, retry=0): """ Fetch an object from the Graph API and parse the output, returning a tuple where the first item is the object yielded by the Graph API and the second is the URL for the next page of results, or ``None`` if results have been exhausted. :param method: A string describing the HTTP method. :param path: A string describing the object in the Graph API. :param data: A dictionary of HTTP GET parameters (for GET requests) or POST data (for POST requests). :param page: A boolean describing whether to return an iterator that iterates over each page of results. :param retry: An integer describing how many times the request may be retried. """ if(data): data = dict( (k.replace('_sqbro_', '['), v) for k, v in data.items()) data = dict( (k.replace('_sqbrc_', ']'), v) for k, v in data.items()) data = dict( (k.replace('__', ':'), v) for k, v in data.items()) data = data or {} def load(method, url, data): for key in data: value = data[key] if isinstance(value, (list, dict, set)): data[key] = json.dumps(value) try: if method in ['GET', 'DELETE']: response = self.session.request( method, url, params=data, allow_redirects=True, verify=self.verify_ssl_certificate, timeout=self.timeout ) if method in ['POST', 'PUT']: files = {} for key in data: if hasattr(data[key], 'read'): files[key] = data[key] for key in files: data.pop(key) response = self.session.request( method, url, data=data, files=files, verify=self.verify_ssl_certificate, timeout=self.timeout ) if 500 <= response.status_code < 600: # Facebook 5XX errors usually come with helpful messages # as a JSON object describing the problem with the request. # If this is the case, an error will be raised and we just # need to re-raise it. This is most likely to happen # with the Ads API. # This will raise an exception if a JSON-like error object # comes in the response. self._parse(response.content) # If Facebook does not provide any JSON-formatted error # but just a plain-text, useless error, we'll just inform # about a Facebook Internal errror occurred. raise FacebookError( 'Internal Facebook error occurred', response.status_code ) except requests.RequestException as exception: raise HTTPError(exception) result = self._parse(response.content) if isinstance(result, dict): result['headers'] = response.headers try: next_url = result['paging']['next'] except (KeyError, TypeError): next_url = None return result, next_url def load_with_retry(method, url, data): remaining_retries = retry while True: try: return load(method, url, data) except FacepyError as e: log.warn("Exception on %s: %s, retries remaining: %s", url, e, remaining_retries, ) if remaining_retries > 0: remaining_retries -= 1 else: raise def paginate(method, url, data): while url: result, url = load_with_retry(method, url, data) # Reset pagination parameters. for key in ['offset', 'until', 'since']: if key in data: del data[key] yield result # Convert option lists to comma-separated values. for key in data: if isinstance(data[key], (list, set, tuple)) and all([isinstance(item, six.string_types) for item in data[key]]): data[key] = ','.join(data[key]) # Support absolute paths too if not path.startswith('/'): if six.PY2: path = '/' + six.text_type(path.decode('utf-8')) else: path = '/' + path url = self._get_url(path) if self.oauth_token: data['access_token'] = self.oauth_token if self.appsecret and self.oauth_token: data['appsecret_proof'] = self._generate_appsecret_proof() if page: return paginate(method, url, data) else: return load_with_retry(method, url, data)[0]
def to_vars_dict(self): """ Return local state which is relevant for the cluster setup process. """ return { 'aws_access_key_id': self._access_key, 'aws_secret_access_key': self._secret_key, 'aws_region': self._region_name, 'aws_vpc_name': (self._vpc or ''), 'aws_vpc_id': (self._vpc_id or ''), }
Return local state which is relevant for the cluster setup process.
Below is the the instruction that describes the task: ### Input: Return local state which is relevant for the cluster setup process. ### Response: def to_vars_dict(self): """ Return local state which is relevant for the cluster setup process. """ return { 'aws_access_key_id': self._access_key, 'aws_secret_access_key': self._secret_key, 'aws_region': self._region_name, 'aws_vpc_name': (self._vpc or ''), 'aws_vpc_id': (self._vpc_id or ''), }
def _sign(translator, expr): """Workaround for missing sign function""" op = expr.op() arg, = op.args arg_ = translator.translate(arg) return 'intDivOrZero({0}, abs({0}))'.format(arg_)
Workaround for missing sign function
Below is the the instruction that describes the task: ### Input: Workaround for missing sign function ### Response: def _sign(translator, expr): """Workaround for missing sign function""" op = expr.op() arg, = op.args arg_ = translator.translate(arg) return 'intDivOrZero({0}, abs({0}))'.format(arg_)
def get_user( self, identified_with, identifier, req, resp, resource, uri_kwargs ): """Return default user object.""" return self.user
Return default user object.
Below is the the instruction that describes the task: ### Input: Return default user object. ### Response: def get_user( self, identified_with, identifier, req, resp, resource, uri_kwargs ): """Return default user object.""" return self.user
def process_response(self, response): """ Load a JSON response. :param Response response: The HTTP response. :return dict: The JSON-loaded content. """ if response.status_code != 200: raise TwilioException('Unable to fetch page', response) return json.loads(response.text)
Load a JSON response. :param Response response: The HTTP response. :return dict: The JSON-loaded content.
Below is the the instruction that describes the task: ### Input: Load a JSON response. :param Response response: The HTTP response. :return dict: The JSON-loaded content. ### Response: def process_response(self, response): """ Load a JSON response. :param Response response: The HTTP response. :return dict: The JSON-loaded content. """ if response.status_code != 200: raise TwilioException('Unable to fetch page', response) return json.loads(response.text)
def get_dashboard_panels_visibility_by_section(section_name): """ Return a list of pairs as values that represents the role-permission view relation for the panel section passed in. :param section_name: the panels section id. :return: a list of tuples. """ registry_info = get_dashboard_registry_record() if section_name not in registry_info: # Registry hasn't been set, do it at least for this section registry_info = \ setup_dashboard_panels_visibility_registry(section_name) pairs = registry_info.get(section_name) pairs = get_strings(pairs) if pairs is None: # In the registry, but with None value? setup_dashboard_panels_visibility_registry(section_name) return get_dashboard_panels_visibility_by_section(section_name) pairs = pairs.split(',') if len(pairs) == 0 or len(pairs) % 2 != 0: # Non-valid or malformed value setup_dashboard_panels_visibility_registry(section_name) return get_dashboard_panels_visibility_by_section(section_name) result = [ (pairs[i], pairs[i + 1]) for i in range(len(pairs)) if i % 2 == 0] return result
Return a list of pairs as values that represents the role-permission view relation for the panel section passed in. :param section_name: the panels section id. :return: a list of tuples.
Below is the the instruction that describes the task: ### Input: Return a list of pairs as values that represents the role-permission view relation for the panel section passed in. :param section_name: the panels section id. :return: a list of tuples. ### Response: def get_dashboard_panels_visibility_by_section(section_name): """ Return a list of pairs as values that represents the role-permission view relation for the panel section passed in. :param section_name: the panels section id. :return: a list of tuples. """ registry_info = get_dashboard_registry_record() if section_name not in registry_info: # Registry hasn't been set, do it at least for this section registry_info = \ setup_dashboard_panels_visibility_registry(section_name) pairs = registry_info.get(section_name) pairs = get_strings(pairs) if pairs is None: # In the registry, but with None value? setup_dashboard_panels_visibility_registry(section_name) return get_dashboard_panels_visibility_by_section(section_name) pairs = pairs.split(',') if len(pairs) == 0 or len(pairs) % 2 != 0: # Non-valid or malformed value setup_dashboard_panels_visibility_registry(section_name) return get_dashboard_panels_visibility_by_section(section_name) result = [ (pairs[i], pairs[i + 1]) for i in range(len(pairs)) if i % 2 == 0] return result
def _to_legacy_path(dict_path): """Convert a tuple of ints and strings in a legacy "Path". .. note: This assumes, but does not verify, that each entry in ``dict_path`` is valid (i.e. doesn't have more than one key out of "name" / "id"). :type dict_path: lsit :param dict_path: The "structured" path for a key, i.e. it is a list of dictionaries, each of which has "kind" and one of "name" / "id" as keys. :rtype: :class:`._app_engine_key_pb2.Path` :returns: The legacy path corresponding to ``dict_path``. """ elements = [] for part in dict_path: element_kwargs = {"type": part["kind"]} if "id" in part: element_kwargs["id"] = part["id"] elif "name" in part: element_kwargs["name"] = part["name"] element = _app_engine_key_pb2.Path.Element(**element_kwargs) elements.append(element) return _app_engine_key_pb2.Path(element=elements)
Convert a tuple of ints and strings in a legacy "Path". .. note: This assumes, but does not verify, that each entry in ``dict_path`` is valid (i.e. doesn't have more than one key out of "name" / "id"). :type dict_path: lsit :param dict_path: The "structured" path for a key, i.e. it is a list of dictionaries, each of which has "kind" and one of "name" / "id" as keys. :rtype: :class:`._app_engine_key_pb2.Path` :returns: The legacy path corresponding to ``dict_path``.
Below is the the instruction that describes the task: ### Input: Convert a tuple of ints and strings in a legacy "Path". .. note: This assumes, but does not verify, that each entry in ``dict_path`` is valid (i.e. doesn't have more than one key out of "name" / "id"). :type dict_path: lsit :param dict_path: The "structured" path for a key, i.e. it is a list of dictionaries, each of which has "kind" and one of "name" / "id" as keys. :rtype: :class:`._app_engine_key_pb2.Path` :returns: The legacy path corresponding to ``dict_path``. ### Response: def _to_legacy_path(dict_path): """Convert a tuple of ints and strings in a legacy "Path". .. note: This assumes, but does not verify, that each entry in ``dict_path`` is valid (i.e. doesn't have more than one key out of "name" / "id"). :type dict_path: lsit :param dict_path: The "structured" path for a key, i.e. it is a list of dictionaries, each of which has "kind" and one of "name" / "id" as keys. :rtype: :class:`._app_engine_key_pb2.Path` :returns: The legacy path corresponding to ``dict_path``. """ elements = [] for part in dict_path: element_kwargs = {"type": part["kind"]} if "id" in part: element_kwargs["id"] = part["id"] elif "name" in part: element_kwargs["name"] = part["name"] element = _app_engine_key_pb2.Path.Element(**element_kwargs) elements.append(element) return _app_engine_key_pb2.Path(element=elements)
def trigger(self, source, actions, event_args): """ Perform actions as a result of an event listener (TRIGGER) """ type = BlockType.TRIGGER return self.action_block(source, actions, type, event_args=event_args)
Perform actions as a result of an event listener (TRIGGER)
Below is the the instruction that describes the task: ### Input: Perform actions as a result of an event listener (TRIGGER) ### Response: def trigger(self, source, actions, event_args): """ Perform actions as a result of an event listener (TRIGGER) """ type = BlockType.TRIGGER return self.action_block(source, actions, type, event_args=event_args)
async def create_scene(self, scene_name, room_id) -> Scene: """Create a scene and returns the scene object. :raises PvApiError when something is wrong with the hub. """ _raw = await self._scenes_entry_point.create_scene(room_id, scene_name) result = Scene(_raw, self.request) self.scenes.append(result) return result
Create a scene and returns the scene object. :raises PvApiError when something is wrong with the hub.
Below is the the instruction that describes the task: ### Input: Create a scene and returns the scene object. :raises PvApiError when something is wrong with the hub. ### Response: async def create_scene(self, scene_name, room_id) -> Scene: """Create a scene and returns the scene object. :raises PvApiError when something is wrong with the hub. """ _raw = await self._scenes_entry_point.create_scene(room_id, scene_name) result = Scene(_raw, self.request) self.scenes.append(result) return result
def _join_info_fields(self): """Updates info attribute from info dict.""" if self.info_dict: info_fields = [] if len(self.info_dict) > 1: self.info_dict.pop(".", None) for field, value in self.info_dict.items(): if field == value: info_fields.append(value) else: info_fields.append("=".join([field, value])) self.info = ";".join(info_fields) else: self.info = "."
Updates info attribute from info dict.
Below is the the instruction that describes the task: ### Input: Updates info attribute from info dict. ### Response: def _join_info_fields(self): """Updates info attribute from info dict.""" if self.info_dict: info_fields = [] if len(self.info_dict) > 1: self.info_dict.pop(".", None) for field, value in self.info_dict.items(): if field == value: info_fields.append(value) else: info_fields.append("=".join([field, value])) self.info = ";".join(info_fields) else: self.info = "."
def create_token(self, request, refresh_token=False, **kwargs): """ Create a BearerToken, by default without refresh token. :param request: OAuthlib request. :type request: oauthlib.common.Request :param refresh_token: """ if "save_token" in kwargs: warnings.warn("`save_token` has been deprecated, it was not called internally." "If you do, call `request_validator.save_token()` instead.", DeprecationWarning) if callable(self.expires_in): expires_in = self.expires_in(request) else: expires_in = self.expires_in request.expires_in = expires_in token = { 'access_token': self.token_generator(request), 'expires_in': expires_in, 'token_type': 'Bearer', } # If provided, include - this is optional in some cases https://tools.ietf.org/html/rfc6749#section-3.3 but # there is currently no mechanism to coordinate issuing a token for only a subset of the requested scopes so # all tokens issued are for the entire set of requested scopes. if request.scopes is not None: token['scope'] = ' '.join(request.scopes) if refresh_token: if (request.refresh_token and not self.request_validator.rotate_refresh_token(request)): token['refresh_token'] = request.refresh_token else: token['refresh_token'] = self.refresh_token_generator(request) token.update(request.extra_credentials or {}) return OAuth2Token(token)
Create a BearerToken, by default without refresh token. :param request: OAuthlib request. :type request: oauthlib.common.Request :param refresh_token:
Below is the the instruction that describes the task: ### Input: Create a BearerToken, by default without refresh token. :param request: OAuthlib request. :type request: oauthlib.common.Request :param refresh_token: ### Response: def create_token(self, request, refresh_token=False, **kwargs): """ Create a BearerToken, by default without refresh token. :param request: OAuthlib request. :type request: oauthlib.common.Request :param refresh_token: """ if "save_token" in kwargs: warnings.warn("`save_token` has been deprecated, it was not called internally." "If you do, call `request_validator.save_token()` instead.", DeprecationWarning) if callable(self.expires_in): expires_in = self.expires_in(request) else: expires_in = self.expires_in request.expires_in = expires_in token = { 'access_token': self.token_generator(request), 'expires_in': expires_in, 'token_type': 'Bearer', } # If provided, include - this is optional in some cases https://tools.ietf.org/html/rfc6749#section-3.3 but # there is currently no mechanism to coordinate issuing a token for only a subset of the requested scopes so # all tokens issued are for the entire set of requested scopes. if request.scopes is not None: token['scope'] = ' '.join(request.scopes) if refresh_token: if (request.refresh_token and not self.request_validator.rotate_refresh_token(request)): token['refresh_token'] = request.refresh_token else: token['refresh_token'] = self.refresh_token_generator(request) token.update(request.extra_credentials or {}) return OAuth2Token(token)
def symlink_bundles(self, app, bundle_dir): """For each bundle in the given app, symlinks relevant matched paths. Validates that at least one path was matched by a bundle. """ for bundle_counter, bundle in enumerate(app.bundles): count = 0 for path, relpath in bundle.filemap.items(): bundle_path = os.path.join(bundle_dir, relpath) count += 1 if os.path.exists(bundle_path): continue if os.path.isfile(path): safe_mkdir(os.path.dirname(bundle_path)) os.symlink(path, bundle_path) elif os.path.isdir(path): safe_mkdir(bundle_path) if count == 0: raise TargetDefinitionException(app.target, 'Bundle index {} of "bundles" field ' 'does not match any files.'.format(bundle_counter))
For each bundle in the given app, symlinks relevant matched paths. Validates that at least one path was matched by a bundle.
Below is the the instruction that describes the task: ### Input: For each bundle in the given app, symlinks relevant matched paths. Validates that at least one path was matched by a bundle. ### Response: def symlink_bundles(self, app, bundle_dir): """For each bundle in the given app, symlinks relevant matched paths. Validates that at least one path was matched by a bundle. """ for bundle_counter, bundle in enumerate(app.bundles): count = 0 for path, relpath in bundle.filemap.items(): bundle_path = os.path.join(bundle_dir, relpath) count += 1 if os.path.exists(bundle_path): continue if os.path.isfile(path): safe_mkdir(os.path.dirname(bundle_path)) os.symlink(path, bundle_path) elif os.path.isdir(path): safe_mkdir(bundle_path) if count == 0: raise TargetDefinitionException(app.target, 'Bundle index {} of "bundles" field ' 'does not match any files.'.format(bundle_counter))
def highlightBlock(self, text): """ Actually highlight the block""" # Note that an undefined blockstate is equal to -1, so the first block # will have the correct behaviour of starting at 0. if self._allow_highlight: start = self.previousBlockState() + 1 end = start + len(text) for i, (fmt, letter) in enumerate(self._charlist[start:end]): self.setFormat(i, 1, fmt) self.setCurrentBlockState(end) self.highlight_spaces(text)
Actually highlight the block
Below is the the instruction that describes the task: ### Input: Actually highlight the block ### Response: def highlightBlock(self, text): """ Actually highlight the block""" # Note that an undefined blockstate is equal to -1, so the first block # will have the correct behaviour of starting at 0. if self._allow_highlight: start = self.previousBlockState() + 1 end = start + len(text) for i, (fmt, letter) in enumerate(self._charlist[start:end]): self.setFormat(i, 1, fmt) self.setCurrentBlockState(end) self.highlight_spaces(text)
def point_distance(point1, point2): """ calculate the distance between two points on the sphere like google map reference http://www.movable-type.co.uk/scripts/latlong.html Keyword arguments: point1 -- point one geojson object point2 -- point two geojson object return distance """ lon1 = point1['coordinates'][0] lat1 = point1['coordinates'][1] lon2 = point2['coordinates'][0] lat2 = point2['coordinates'][1] deg_lat = number2radius(lat2 - lat1) deg_lon = number2radius(lon2 - lon1) a = math.pow(math.sin(deg_lat / 2), 2) + math.cos(number2radius(lat1)) * \ math.cos(number2radius(lat2)) * math.pow(math.sin(deg_lon / 2), 2) c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a)) return (6371 * c) * 1000
calculate the distance between two points on the sphere like google map reference http://www.movable-type.co.uk/scripts/latlong.html Keyword arguments: point1 -- point one geojson object point2 -- point two geojson object return distance
Below is the the instruction that describes the task: ### Input: calculate the distance between two points on the sphere like google map reference http://www.movable-type.co.uk/scripts/latlong.html Keyword arguments: point1 -- point one geojson object point2 -- point two geojson object return distance ### Response: def point_distance(point1, point2): """ calculate the distance between two points on the sphere like google map reference http://www.movable-type.co.uk/scripts/latlong.html Keyword arguments: point1 -- point one geojson object point2 -- point two geojson object return distance """ lon1 = point1['coordinates'][0] lat1 = point1['coordinates'][1] lon2 = point2['coordinates'][0] lat2 = point2['coordinates'][1] deg_lat = number2radius(lat2 - lat1) deg_lon = number2radius(lon2 - lon1) a = math.pow(math.sin(deg_lat / 2), 2) + math.cos(number2radius(lat1)) * \ math.cos(number2radius(lat2)) * math.pow(math.sin(deg_lon / 2), 2) c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a)) return (6371 * c) * 1000
def get_dependencies(self, id, **kwargs): """ Get the direct dependencies of the specified configuration This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_dependencies(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Build configuration id (required) :param int page_index: Page Index :param int page_size: Pagination size :param str sort: Sorting RSQL :param str q: RSQL Query :return: BuildConfigurationPage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_dependencies_with_http_info(id, **kwargs) else: (data) = self.get_dependencies_with_http_info(id, **kwargs) return data
Get the direct dependencies of the specified configuration This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_dependencies(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Build configuration id (required) :param int page_index: Page Index :param int page_size: Pagination size :param str sort: Sorting RSQL :param str q: RSQL Query :return: BuildConfigurationPage If the method is called asynchronously, returns the request thread.
Below is the the instruction that describes the task: ### Input: Get the direct dependencies of the specified configuration This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_dependencies(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Build configuration id (required) :param int page_index: Page Index :param int page_size: Pagination size :param str sort: Sorting RSQL :param str q: RSQL Query :return: BuildConfigurationPage If the method is called asynchronously, returns the request thread. ### Response: def get_dependencies(self, id, **kwargs): """ Get the direct dependencies of the specified configuration This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_dependencies(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Build configuration id (required) :param int page_index: Page Index :param int page_size: Pagination size :param str sort: Sorting RSQL :param str q: RSQL Query :return: BuildConfigurationPage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_dependencies_with_http_info(id, **kwargs) else: (data) = self.get_dependencies_with_http_info(id, **kwargs) return data
def searchForUsers(self, name, limit=10): """ Find and get user by his/her name :param name: Name of the user :param limit: The max. amount of users to fetch :return: :class:`models.User` objects, ordered by relevance :rtype: list :raises: FBchatException if request failed """ params = {"search": name, "limit": limit} j = self.graphql_request(GraphQL(query=GraphQL.SEARCH_USER, params=params)) return [User._from_graphql(node) for node in j[name]["users"]["nodes"]]
Find and get user by his/her name :param name: Name of the user :param limit: The max. amount of users to fetch :return: :class:`models.User` objects, ordered by relevance :rtype: list :raises: FBchatException if request failed
Below is the the instruction that describes the task: ### Input: Find and get user by his/her name :param name: Name of the user :param limit: The max. amount of users to fetch :return: :class:`models.User` objects, ordered by relevance :rtype: list :raises: FBchatException if request failed ### Response: def searchForUsers(self, name, limit=10): """ Find and get user by his/her name :param name: Name of the user :param limit: The max. amount of users to fetch :return: :class:`models.User` objects, ordered by relevance :rtype: list :raises: FBchatException if request failed """ params = {"search": name, "limit": limit} j = self.graphql_request(GraphQL(query=GraphQL.SEARCH_USER, params=params)) return [User._from_graphql(node) for node in j[name]["users"]["nodes"]]
def _load_activity(self, activity): """ Iterates trough the all enabled `~zengine.settings.ACTIVITY_MODULES_IMPORT_PATHS` to find the given path. """ fpths = [] full_path = '' errors = [] paths = settings.ACTIVITY_MODULES_IMPORT_PATHS number_of_paths = len(paths) for index_no in range(number_of_paths): full_path = "%s.%s" % (paths[index_no], activity) for look4kls in (0, 1): try: self.current.log.info("try to load from %s[%s]" % (full_path, look4kls)) kls, cls_name, cls_method = self._import_object(full_path, look4kls) if cls_method: self.current.log.info("WILLCall %s(current).%s()" % (kls, cls_method)) self.wf_activities[activity] = lambda crnt: getattr(kls(crnt), cls_method)() else: self.wf_activities[activity] = kls return except (ImportError, AttributeError): fpths.append(full_path) errmsg = "{activity} not found under these paths:\n\n >>> {paths} \n\n" \ "Error Messages:\n {errors}" errors.append("\n========================================================>\n" "| PATH | %s" "\n========================================================>\n\n" "%s" % (full_path, traceback.format_exc())) assert index_no != number_of_paths - 1, errmsg.format(activity=activity, paths='\n >>> '.join( set(fpths)), errors='\n\n'.join(errors) ) except: self.current.log.exception("Cannot found the %s" % activity)
Iterates trough the all enabled `~zengine.settings.ACTIVITY_MODULES_IMPORT_PATHS` to find the given path.
Below is the the instruction that describes the task: ### Input: Iterates trough the all enabled `~zengine.settings.ACTIVITY_MODULES_IMPORT_PATHS` to find the given path. ### Response: def _load_activity(self, activity): """ Iterates trough the all enabled `~zengine.settings.ACTIVITY_MODULES_IMPORT_PATHS` to find the given path. """ fpths = [] full_path = '' errors = [] paths = settings.ACTIVITY_MODULES_IMPORT_PATHS number_of_paths = len(paths) for index_no in range(number_of_paths): full_path = "%s.%s" % (paths[index_no], activity) for look4kls in (0, 1): try: self.current.log.info("try to load from %s[%s]" % (full_path, look4kls)) kls, cls_name, cls_method = self._import_object(full_path, look4kls) if cls_method: self.current.log.info("WILLCall %s(current).%s()" % (kls, cls_method)) self.wf_activities[activity] = lambda crnt: getattr(kls(crnt), cls_method)() else: self.wf_activities[activity] = kls return except (ImportError, AttributeError): fpths.append(full_path) errmsg = "{activity} not found under these paths:\n\n >>> {paths} \n\n" \ "Error Messages:\n {errors}" errors.append("\n========================================================>\n" "| PATH | %s" "\n========================================================>\n\n" "%s" % (full_path, traceback.format_exc())) assert index_no != number_of_paths - 1, errmsg.format(activity=activity, paths='\n >>> '.join( set(fpths)), errors='\n\n'.join(errors) ) except: self.current.log.exception("Cannot found the %s" % activity)
def get_job(self, id_job, hub=None, group=None, project=None, access_token=None, user_id=None): """ Get the information about a job, by its id """ if access_token: self.req.credential.set_token(access_token) if user_id: self.req.credential.set_user_id(user_id) if not self.check_credentials(): respond = {} respond["status"] = 'Error' respond["error"] = "Not credentials valid" return respond if not id_job: respond = {} respond["status"] = 'Error' respond["error"] = "Job ID not specified" return respond url = get_job_url(self.config, hub, group, project) url += '/' + id_job job = self.req.get(url) if 'qasms' in job: for qasm in job['qasms']: if ('result' in qasm) and ('data' in qasm['result']): qasm['data'] = qasm['result']['data'] del qasm['result']['data'] for key in qasm['result']: qasm['data'][key] = qasm['result'][key] del qasm['result'] return job
Get the information about a job, by its id
Below is the the instruction that describes the task: ### Input: Get the information about a job, by its id ### Response: def get_job(self, id_job, hub=None, group=None, project=None, access_token=None, user_id=None): """ Get the information about a job, by its id """ if access_token: self.req.credential.set_token(access_token) if user_id: self.req.credential.set_user_id(user_id) if not self.check_credentials(): respond = {} respond["status"] = 'Error' respond["error"] = "Not credentials valid" return respond if not id_job: respond = {} respond["status"] = 'Error' respond["error"] = "Job ID not specified" return respond url = get_job_url(self.config, hub, group, project) url += '/' + id_job job = self.req.get(url) if 'qasms' in job: for qasm in job['qasms']: if ('result' in qasm) and ('data' in qasm['result']): qasm['data'] = qasm['result']['data'] del qasm['result']['data'] for key in qasm['result']: qasm['data'][key] = qasm['result'][key] del qasm['result'] return job
def close_db(self, exception): """Added as a `~flask.Flask.teardown_request` to applications to commit the transaction and disconnect ZODB if it was used during the request.""" if self.is_connected: if exception is None and not transaction.isDoomed(): transaction.commit() else: transaction.abort() self.connection.close()
Added as a `~flask.Flask.teardown_request` to applications to commit the transaction and disconnect ZODB if it was used during the request.
Below is the the instruction that describes the task: ### Input: Added as a `~flask.Flask.teardown_request` to applications to commit the transaction and disconnect ZODB if it was used during the request. ### Response: def close_db(self, exception): """Added as a `~flask.Flask.teardown_request` to applications to commit the transaction and disconnect ZODB if it was used during the request.""" if self.is_connected: if exception is None and not transaction.isDoomed(): transaction.commit() else: transaction.abort() self.connection.close()
def load(self, infile): ''' Deserialize a model from a stored file. By default, unpickle an entire object. If `dump` is overridden to use a different storage format, `load` should be as well. :param file outfile: A file-like object from which to retrieve the serialized model. ''' model = pickle.load(infile) self.__dict__.update(model.__dict__)
Deserialize a model from a stored file. By default, unpickle an entire object. If `dump` is overridden to use a different storage format, `load` should be as well. :param file outfile: A file-like object from which to retrieve the serialized model.
Below is the the instruction that describes the task: ### Input: Deserialize a model from a stored file. By default, unpickle an entire object. If `dump` is overridden to use a different storage format, `load` should be as well. :param file outfile: A file-like object from which to retrieve the serialized model. ### Response: def load(self, infile): ''' Deserialize a model from a stored file. By default, unpickle an entire object. If `dump` is overridden to use a different storage format, `load` should be as well. :param file outfile: A file-like object from which to retrieve the serialized model. ''' model = pickle.load(infile) self.__dict__.update(model.__dict__)
def date_to_number(self, date): """ Converts a date or datetime instance to a corresponding float value. """ if isinstance(date, datetime.datetime): delta = date - self._null_date elif isinstance(date, datetime.date): delta = date - self._null_date.date() else: raise TypeError(date) return delta.days + delta.seconds / (24.0 * 60 * 60)
Converts a date or datetime instance to a corresponding float value.
Below is the the instruction that describes the task: ### Input: Converts a date or datetime instance to a corresponding float value. ### Response: def date_to_number(self, date): """ Converts a date or datetime instance to a corresponding float value. """ if isinstance(date, datetime.datetime): delta = date - self._null_date elif isinstance(date, datetime.date): delta = date - self._null_date.date() else: raise TypeError(date) return delta.days + delta.seconds / (24.0 * 60 * 60)
def get_text_position_and_inner_alignment(ax, pos, scale=default_text_relative_padding, with_transAxes_kwargs=True): """Return text position and its alignment in its bounding box. The returned position is given in Axes coordinate, as defined in matplotlib documentation on transformation. The returned alignment is given in dictionary, which can be put as a fontdict to text-relavent method. """ xy = get_text_position_in_ax_coord(ax,pos,scale=scale) alignment_fontdict = get_text_alignment(pos) if with_transAxes_kwargs: alignment_fontdict = {**alignment_fontdict, **{'transform':ax.transAxes}} return xy, alignment_fontdict
Return text position and its alignment in its bounding box. The returned position is given in Axes coordinate, as defined in matplotlib documentation on transformation. The returned alignment is given in dictionary, which can be put as a fontdict to text-relavent method.
Below is the the instruction that describes the task: ### Input: Return text position and its alignment in its bounding box. The returned position is given in Axes coordinate, as defined in matplotlib documentation on transformation. The returned alignment is given in dictionary, which can be put as a fontdict to text-relavent method. ### Response: def get_text_position_and_inner_alignment(ax, pos, scale=default_text_relative_padding, with_transAxes_kwargs=True): """Return text position and its alignment in its bounding box. The returned position is given in Axes coordinate, as defined in matplotlib documentation on transformation. The returned alignment is given in dictionary, which can be put as a fontdict to text-relavent method. """ xy = get_text_position_in_ax_coord(ax,pos,scale=scale) alignment_fontdict = get_text_alignment(pos) if with_transAxes_kwargs: alignment_fontdict = {**alignment_fontdict, **{'transform':ax.transAxes}} return xy, alignment_fontdict
def update_properties(self, new_properties): """ Update config properties values Property name must be equal to 'Section_option' of config property :param new_properties: dict with new properties values """ [self._update_property_from_dict(section, option, new_properties) for section in self.sections() for option in self.options(section)]
Update config properties values Property name must be equal to 'Section_option' of config property :param new_properties: dict with new properties values
Below is the the instruction that describes the task: ### Input: Update config properties values Property name must be equal to 'Section_option' of config property :param new_properties: dict with new properties values ### Response: def update_properties(self, new_properties): """ Update config properties values Property name must be equal to 'Section_option' of config property :param new_properties: dict with new properties values """ [self._update_property_from_dict(section, option, new_properties) for section in self.sections() for option in self.options(section)]
def _process_transfer(self, ud, ase, offsets, data): # type: (Uploader, blobxfer.models.upload.Descriptor, # blobxfer.models.azure.StorageEntity, # blobxfer.models.upload.Offsets, bytes) -> None """Process transfer instructions :param Uploader self: this :param blobxfer.models.upload.Descriptor ud: upload descriptor :param blobxfer.models.azure.StorageEntity ase: Storage entity :param blobxfer.models.upload.Offsets offsets: offsets :param bytes data: data to upload """ # issue put range self._put_data(ud, ase, offsets, data) # accounting with self._transfer_lock: if ud.local_path.use_stdin: self._upload_bytes_total += offsets.num_bytes elif offsets.chunk_num == 0: self._upload_bytes_total += ase.size self._upload_bytes_sofar += offsets.num_bytes self._transfer_set.remove( blobxfer.operations.upload.Uploader.create_unique_transfer_id( ud.local_path, ase, offsets)) ud.complete_offset_upload(offsets.chunk_num) # add descriptor back to upload queue only for append blobs if ud.entity.mode == blobxfer.models.azure.StorageModes.Append: self._upload_queue.put(ud) # update progress bar self._update_progress_bar(stdin=ud.local_path.use_stdin)
Process transfer instructions :param Uploader self: this :param blobxfer.models.upload.Descriptor ud: upload descriptor :param blobxfer.models.azure.StorageEntity ase: Storage entity :param blobxfer.models.upload.Offsets offsets: offsets :param bytes data: data to upload
Below is the the instruction that describes the task: ### Input: Process transfer instructions :param Uploader self: this :param blobxfer.models.upload.Descriptor ud: upload descriptor :param blobxfer.models.azure.StorageEntity ase: Storage entity :param blobxfer.models.upload.Offsets offsets: offsets :param bytes data: data to upload ### Response: def _process_transfer(self, ud, ase, offsets, data): # type: (Uploader, blobxfer.models.upload.Descriptor, # blobxfer.models.azure.StorageEntity, # blobxfer.models.upload.Offsets, bytes) -> None """Process transfer instructions :param Uploader self: this :param blobxfer.models.upload.Descriptor ud: upload descriptor :param blobxfer.models.azure.StorageEntity ase: Storage entity :param blobxfer.models.upload.Offsets offsets: offsets :param bytes data: data to upload """ # issue put range self._put_data(ud, ase, offsets, data) # accounting with self._transfer_lock: if ud.local_path.use_stdin: self._upload_bytes_total += offsets.num_bytes elif offsets.chunk_num == 0: self._upload_bytes_total += ase.size self._upload_bytes_sofar += offsets.num_bytes self._transfer_set.remove( blobxfer.operations.upload.Uploader.create_unique_transfer_id( ud.local_path, ase, offsets)) ud.complete_offset_upload(offsets.chunk_num) # add descriptor back to upload queue only for append blobs if ud.entity.mode == blobxfer.models.azure.StorageModes.Append: self._upload_queue.put(ud) # update progress bar self._update_progress_bar(stdin=ud.local_path.use_stdin)
def attach_arguments(cls, parser, prefix='--', skip_formats=False, format_excludes=None, format_title=None, format_desc=None, skip_render=False, render_excludes=None, render_title=None, render_desc=None, skip_filters=False, filter_excludes=None, filter_title=None, filter_desc=None): """ Attach argparse arguments to an argparse parser/group with table options. These are renderer options and filtering options with the ability to turn off headers and footers. The return value is function that parses an argparse.Namespace object into keyword arguments for a layout.Table constructor. """ convs = [] if not skip_formats: attach = cls.attach_format_arguments convs.append(attach(parser, prefix, format_excludes, format_title, format_desc)) if not skip_render: attach = cls.attach_render_arguments convs.append(attach(parser, prefix, render_excludes, render_title, render_desc)) if not skip_filters: attach = cls.attach_filter_arguments convs.append(attach(parser, prefix, filter_excludes, filter_title, filter_desc)) def argparse_ns_to_table_opts(ns): options = {} for conv in convs: options.update(conv(ns)) return options return argparse_ns_to_table_opts
Attach argparse arguments to an argparse parser/group with table options. These are renderer options and filtering options with the ability to turn off headers and footers. The return value is function that parses an argparse.Namespace object into keyword arguments for a layout.Table constructor.
Below is the the instruction that describes the task: ### Input: Attach argparse arguments to an argparse parser/group with table options. These are renderer options and filtering options with the ability to turn off headers and footers. The return value is function that parses an argparse.Namespace object into keyword arguments for a layout.Table constructor. ### Response: def attach_arguments(cls, parser, prefix='--', skip_formats=False, format_excludes=None, format_title=None, format_desc=None, skip_render=False, render_excludes=None, render_title=None, render_desc=None, skip_filters=False, filter_excludes=None, filter_title=None, filter_desc=None): """ Attach argparse arguments to an argparse parser/group with table options. These are renderer options and filtering options with the ability to turn off headers and footers. The return value is function that parses an argparse.Namespace object into keyword arguments for a layout.Table constructor. """ convs = [] if not skip_formats: attach = cls.attach_format_arguments convs.append(attach(parser, prefix, format_excludes, format_title, format_desc)) if not skip_render: attach = cls.attach_render_arguments convs.append(attach(parser, prefix, render_excludes, render_title, render_desc)) if not skip_filters: attach = cls.attach_filter_arguments convs.append(attach(parser, prefix, filter_excludes, filter_title, filter_desc)) def argparse_ns_to_table_opts(ns): options = {} for conv in convs: options.update(conv(ns)) return options return argparse_ns_to_table_opts