Search is not available for this dataset
text
stringlengths
75
104k
def niftilist_mask_to_array(img_filelist, mask_file=None, outdtype=None): """From the list of absolute paths to nifti files, creates a Numpy array with the masked data. Parameters ---------- img_filelist: list of str List of absolute file paths to nifti files. All nifti files must have the same shape. mask_file: str Path to a Nifti mask file. Should be the same shape as the files in nii_filelist. outdtype: dtype Type of the elements of the array, if not set will obtain the dtype from the first nifti file. Returns ------- outmat: Numpy array with shape N x prod(vol.shape) containing the N files as flat vectors. mask_indices: Tuple with the 3D spatial indices of the masking voxels, for reshaping with vol_shape and remapping. vol_shape: Tuple with shape of the volumes, for reshaping. """ img = check_img(img_filelist[0]) if not outdtype: outdtype = img.dtype mask_data, _ = load_mask_data(mask_file) indices = np.where (mask_data) mask = check_img(mask_file) outmat = np.zeros((len(img_filelist), np.count_nonzero(mask_data)), dtype=outdtype) for i, img_item in enumerate(img_filelist): img = check_img(img_item) if not are_compatible_imgs(img, mask): raise NiftiFilesNotCompatible(repr_imgs(img), repr_imgs(mask_file)) vol = get_img_data(img) outmat[i, :] = vol[indices] return outmat, mask_data
def create(_): """Create a client for Service Fabric APIs.""" endpoint = client_endpoint() if not endpoint: raise CLIError("Connection endpoint not found. " "Before running sfctl commands, connect to a cluster using " "the 'sfctl cluster select' command.") no_verify = no_verify_setting() if security_type() == 'aad': auth = AdalAuthentication(no_verify) else: cert = cert_info() ca_cert = ca_cert_info() auth = ClientCertAuthentication(cert, ca_cert, no_verify) return ServiceFabricClientAPIs(auth, base_url=endpoint)
def aggregate(self, clazz, new_col, *args): """ Aggregate the rows of the DataFrame into a single value. :param clazz: name of a class that extends class Callable :type clazz: class :param new_col: name of the new column :type new_col: str :param args: list of column names of the object that function should be applied to :type args: tuple :return: returns a new dataframe object with the aggregated value :rtype: DataFrame """ if is_callable(clazz) and not is_none(new_col) and has_elements(*args): return self.__do_aggregate(clazz, new_col, *args)
def subset(self, *args): """ Subset only some of the columns of the DataFrame. :param args: list of column names of the object that should be subsetted :type args: tuple :return: returns dataframe with only the columns you selected :rtype: DataFrame """ cols = {} for k in self.colnames: if k in args: cols[str(k)] = \ self.__data_columns[self.colnames.index(k)].values return DataFrame(**cols)
def modify(self, clazz, new_col, *args): """ Modify some columns (i.e. apply a function) and add the result to the table. :param clazz: name of a class that extends class Callable :type clazz: class :param new_col: name of the new column :type new_col: str :param args: list of column names of the object that function should be applied to :type args: tuple :return: returns a new dataframe object with the modiefied values, i.e. the new column :rtype: DataFrame """ if is_callable(clazz) and not is_none(new_col) and has_elements(*args): return self.__do_modify(clazz, new_col, *args)
def group(*args): """ Pipeable grouping method. Takes either - a dataframe and a tuple of strings for grouping, - a tuple of strings if a dataframe has already been piped into. :Example: group(dataframe, "column") :Example: dataframe >> group("column") :param args: tuple of arguments :type args: tuple :return: returns a grouped dataframe object :rtype: GroupedDataFrame """ if args and isinstance(args[0], dataframe.DataFrame): return args[0].group(*args[1:]) elif not args: raise ValueError("No arguments provided") else: return pipeable.Pipeable(pipeable.PipingMethod.GROUP, *args)
def aggregate(*args): """ Pipeable aggregation method. Takes either - a dataframe and a tuple of arguments required for aggregation, - a tuple of arguments if a dataframe has already been piped into. In any case one argument has to be a class that extends callable. :Example: aggregate(dataframe, Function, "new_col_name", "old_col_name") :Example: dataframe >> aggregate(Function, "new_col_name", "old_col_name") :param args: tuple of arguments :type args: tuple :return: returns a dataframe object :rtype: DataFrame """ if args and isinstance(args[0], dataframe.DataFrame): return args[0].aggregate(args[1], args[2], *args[3:]) elif not args: raise ValueError("No arguments provided") else: return pipeable.Pipeable(pipeable.PipingMethod.AGGREGATE, *args)
def subset(*args): """ Pipeable subsetting method. Takes either - a dataframe and a tuple of arguments required for subsetting, - a tuple of arguments if a dataframe has already been piped into. :Example: subset(dataframe, "column") :Example: dataframe >> subset("column") :param args: tuple of arguments :type args: tuple :return: returns a dataframe object :rtype: DataFrame """ if args and isinstance(args[0], dataframe.DataFrame): return args[0].subset(*args[1:]) elif not args: raise ValueError("No arguments provided") else: return pipeable.Pipeable(pipeable.PipingMethod.SUBSET, *args)
def modify(*args): """ Pipeable modification method Takes either - a dataframe and a tuple of arguments required for modification, - a tuple of arguments if a dataframe has already been piped into. In any case one argument has to be a class that extends callable. :Example: modify(dataframe, Function, "new_col_name", "old_col_name") :Example: dataframe >> modify(Function, "new_col_name", "old_col_name") :param args: tuple of arguments :type args: tuple :return: returns a dataframe object :rtype: DataFrame """ if args and isinstance(args[0], dataframe.DataFrame): return args[0].modify(args[1], args[2], *args[3:]) elif not args: raise ValueError("No arguments provided") else: return pipeable.Pipeable(pipeable.PipingMethod.MODIFY, *args)
def _escape_char(c, escape_char=ESCAPE_CHAR): """Escape a single character""" buf = [] for byte in c.encode('utf8'): buf.append(escape_char) buf.append('%X' % _ord(byte)) return ''.join(buf)
def escape(to_escape, safe=SAFE, escape_char=ESCAPE_CHAR, allow_collisions=False): """Escape a string so that it only contains characters in a safe set. Characters outside the safe list will be escaped with _%x_, where %x is the hex value of the character. If `allow_collisions` is True, occurrences of `escape_char` in the input will not be escaped. In this case, `unescape` cannot be used to reverse the transform because occurrences of the escape char in the resulting string are ambiguous. Only use this mode when: 1. collisions cannot occur or do not matter, and 2. unescape will never be called. .. versionadded: 1.0 allow_collisions argument. Prior to 1.0, behavior was the same as allow_collisions=False (default). """ if isinstance(to_escape, bytes): # always work on text to_escape = to_escape.decode('utf8') if not isinstance(safe, set): safe = set(safe) if allow_collisions: safe.add(escape_char) elif escape_char in safe: # escape char can't be in safe list safe.remove(escape_char) chars = [] for c in to_escape: if c in safe: chars.append(c) else: chars.append(_escape_char(c, escape_char)) return u''.join(chars)
def unescape(escaped, escape_char=ESCAPE_CHAR): """Unescape a string escaped with `escape` escape_char must be the same as that used in the call to escape. """ if isinstance(escaped, bytes): # always work on text escaped = escaped.decode('utf8') escape_pat = re.compile(re.escape(escape_char).encode('utf8') + b'([a-z0-9]{2})', re.IGNORECASE) buf = escape_pat.subn(_unescape_char, escaped.encode('utf8'))[0] return buf.decode('utf8')
def can_send(self, user, notice_type): """ Determines whether this backend is allowed to send a notification to the given user and notice_type. """ from notification.models import NoticeSetting return NoticeSetting.for_user(user, notice_type, self.medium_id).send
def get_formatted_messages(self, formats, label, context): """ Returns a dictionary with the format identifier as the key. The values are are fully rendered templates with the given context. """ format_templates = {} for fmt in formats: # conditionally turn off autoescaping for .txt extensions in format if fmt.endswith(".txt"): context.autoescape = False format_templates[fmt] = render_to_string(( "notification/%s/%s" % (label, fmt), "notification/%s" % fmt), context_instance=context) return format_templates
def copy_attributes(source, destination, ignore_patterns=[]): """ Copy the attributes from a source object to a destination object. """ for attr in _wildcard_filter(dir(source), *ignore_patterns): setattr(destination, attr, getattr(source, attr))
def row(self, idx): """ Returns DataFrameRow of the DataFrame given its index. :param idx: the index of the row in the DataFrame. :return: returns a DataFrameRow """ return DataFrameRow(idx, [x[idx] for x in self], self.colnames)
def notice_settings(request): """ The notice settings view. Template: :template:`notification/notice_settings.html` Context: notice_types A list of all :model:`notification.NoticeType` objects. notice_settings A dictionary containing ``column_headers`` for each ``NOTICE_MEDIA`` and ``rows`` containing a list of dictionaries: ``notice_type``, a :model:`notification.NoticeType` object and ``cells``, a list of tuples whose first value is suitable for use in forms and the second value is ``True`` or ``False`` depending on a ``request.POST`` variable called ``form_label``, whose valid value is ``on``. """ notice_types = NoticeType.objects.all() settings_table = [] for notice_type in notice_types: settings_row = [] for medium_id, medium_display in NOTICE_MEDIA: form_label = "%s_%s" % (notice_type.label, medium_id) setting = NoticeSetting.for_user(request.user, notice_type, medium_id) if request.method == "POST": if request.POST.get(form_label) == "on": if not setting.send: setting.send = True setting.save() else: if setting.send: setting.send = False setting.save() settings_row.append((form_label, setting.send)) settings_table.append({"notice_type": notice_type, "cells": settings_row}) if request.method == "POST": next_page = request.POST.get("next_page", ".") return HttpResponseRedirect(next_page) settings = { "column_headers": [medium_display for medium_id, medium_display in NOTICE_MEDIA], "rows": settings_table, } return render_to_response("notification/notice_settings.html", { "notice_types": notice_types, "notice_settings": settings, }, context_instance=RequestContext(request))
def query(self, input = '', params = {}): """Query Wolfram Alpha and return a Result object""" # Get and construct query parameters # Default parameters payload = {'input': input, 'appid': self.appid} # Additional parameters (from params), formatted for url for key, value in params.items(): # Check if value is list or tuple type (needs to be comma joined) if isinstance(value, (list, tuple)): payload[key] = ','.join(value) else: payload[key] = value # Catch any issues with connecting to Wolfram Alpha API try: r = requests.get("http://api.wolframalpha.com/v2/query", params=payload) # Raise Exception (to be returned as error) if r.status_code != 200: raise Exception('Invalid response status code: %s' % (r.status_code)) if r.encoding != 'utf-8': raise Exception('Invalid encoding: %s' % (r.encoding)) except Exception, e: return Result(error = e) return Result(xml = r.text)
def pods(self): """Return list of all Pod objects in result""" # Return empty list if xml_tree is not defined (error Result object) if not self.xml_tree: return [] # Create a Pod object for every pod group in xml return [Pod(elem) for elem in self.xml_tree.findall('pod')]
def format(self): """ Dictionary of available formats, corresponding to a list of the values Example: pod.format['plaintext'] will return a list of every plaintext content in the pod's subpods """ formats = {} # Iterate through all the tags (formats) in subpods # 'state' is a tag but not an acceptable format for subpod in self.root.findall('subpod'): # elem will be a specific format for elem in list(subpod): # skip any subpod state xml groups (not a format) if elem.tag == 'state': continue # Content of elem (specific format) content = elem.text # img needs special content packaging if elem.tag == 'img': content = {'url': elem.get('src'), 'alt': elem.get('alt'), 'title': elem.get('title'), 'width': int(elem.get('width', 0)), 'height': int(elem.get('height', 0))} # Create / append to return dict if elem.tag not in formats: formats[elem.tag] = [content] else: formats[elem.tag].append(content) return formats
def find(self, *args): """ Find a node in the tree. If the node is not found it is added first and then returned. :param args: a tuple :return: returns the node """ curr_node = self.__root return self.__traverse(curr_node, 0, *args)
def get_notification_language(user): """ Returns site-specific notification language for this user. Raises LanguageStoreNotAvailable if this site does not use translated notifications. """ if getattr(settings, "NOTIFICATION_LANGUAGE_MODULE", False): try: app_label, model_name = settings.NOTIFICATION_LANGUAGE_MODULE.split(".") model = models.get_model(app_label, model_name) # pylint: disable-msg=W0212 language_model = model._default_manager.get(user__id__exact=user.id) if hasattr(language_model, "language"): return language_model.language except (ImportError, ImproperlyConfigured, model.DoesNotExist): raise LanguageStoreNotAvailable raise LanguageStoreNotAvailable
def send_now(users, label, extra_context=None, sender=None): """ Creates a new notice. This is intended to be how other apps create new notices. notification.send(user, "friends_invite_sent", { "spam": "eggs", "foo": "bar", ) """ sent = False if extra_context is None: extra_context = {} notice_type = NoticeType.objects.get(label=label) current_language = get_language() for user in users: # get user language for user from language store defined in # NOTIFICATION_LANGUAGE_MODULE setting try: language = get_notification_language(user) except LanguageStoreNotAvailable: language = None if language is not None: # activate the user's language activate(language) for backend in NOTIFICATION_BACKENDS.values(): if backend.can_send(user, notice_type): backend.deliver(user, sender, notice_type, extra_context) sent = True # reset environment to original language activate(current_language) return sent
def send(*args, **kwargs): """ A basic interface around both queue and send_now. This honors a global flag NOTIFICATION_QUEUE_ALL that helps determine whether all calls should be queued or not. A per call ``queue`` or ``now`` keyword argument can be used to always override the default global behavior. """ queue_flag = kwargs.pop("queue", False) now_flag = kwargs.pop("now", False) assert not (queue_flag and now_flag), "'queue' and 'now' cannot both be True." if queue_flag: return queue(*args, **kwargs) elif now_flag: return send_now(*args, **kwargs) else: if QUEUE_ALL: return queue(*args, **kwargs) else: return send_now(*args, **kwargs)
def queue(users, label, extra_context=None, sender=None): """ Queue the notification in NoticeQueueBatch. This allows for large amounts of user notifications to be deferred to a seperate process running outside the webserver. """ if extra_context is None: extra_context = {} if isinstance(users, QuerySet): users = [row["pk"] for row in users.values("pk")] else: users = [user.pk for user in users] notices = [] for user in users: notices.append((user, label, extra_context, sender)) NoticeQueueBatch(pickled_data=base64.b64encode(pickle.dumps(notices))).save()
def write_table_pair_potential(func, dfunc=None, bounds=(1.0, 10.0), samples=1000, tollerance=1e-6, keyword='PAIR'): """A helper function to write lammps pair potentials to string. Assumes that functions are vectorized. Parameters ---------- func: function A function that will be evaluated for the force at each radius. Required to be numpy vectorizable. dfunc: function Optional. A function that will be evaluated for the energy at each radius. If not supplied the centered difference method will be used. Required to be numpy vectorizable. bounds: tuple, list Optional. specifies min and max radius to evaluate the potential. Default 1 length unit, 10 length unit. samples: int Number of points to evaluate potential. Default 1000. Note that a low number of sample points will reduce accuracy. tollerance: float Value used to centered difference differentiation. keyword: string Lammps keyword to use to pair potential. This keyword will need to be used in the lammps pair_coeff. Default ``PAIR`` filename: string Optional. filename to write lammps table potential as. Default ``lammps.table`` it is highly recomended to change the value. A file for each unique pair potential is required. """ r_min, r_max = bounds if dfunc is None: dfunc = lambda r: (func(r+tollerance) - func(r-tollerance)) / (2*tollerance) i = np.arange(1, samples+1) r = np.linspace(r_min, r_max, samples) forces = func(r) energies = dfunc(r) lines = ['%d %f %f %f\n' % (index, radius, force, energy) for index, radius, force, energy in zip(i, r, forces, energies)] return "%s\nN %d\n\n" % (keyword, samples) + ''.join(lines)
def write_tersoff_potential(parameters): """Write tersoff potential file from parameters to string Parameters ---------- parameters: dict keys are tuple of elements with the values being the parameters length 14 """ lines = [] for (e1, e2, e3), params in parameters.items(): if len(params) != 14: raise ValueError('tersoff three body potential expects 14 parameters') lines.append(' '.join([e1, e2, e3] + ['{:16.8g}'.format(_) for _ in params])) return '\n'.join(lines)
def subset(self, *args): """ Subset only some of the columns of the DataFrame. :param args: list of column names of the object that should be subsetted :type args: tuple :return: returns DataFrame with only the columns you selected :rtype: DataFrame """ args = list(args) args.extend([x for x in self.__grouping.grouping_colnames if x not in args]) return GroupedDataFrame(self.__grouping.ungroup().subset(*args), *self.__grouping.grouping_colnames)
def modify(self, clazz, new_col, *args): """ Modify some columns (i.e. apply a function) and add the result to the table. :param clazz: name of a class that extends class Callable :type clazz: class :param new_col: name of the new column :type new_col: str :param args: list of column names of the object that function should be applied to :type args: tuple :return: returns a new GroupedDataFrame object with the modified values, i.e. the new column of values :rtype: GroupedDataFrame """ if is_callable(clazz) \ and not is_none(new_col) \ and has_elements(*args) \ and is_disjoint(self.__grouping.grouping_colnames, args, __DISJOINT_SETS_ERROR__): return self.__do_modify(clazz, new_col, *args)
def aggregate(self, clazz, new_col, *args): """ Aggregate the rows of each group into a single value. :param clazz: name of a class that extends class Callable :type clazz: class :param new_col: name of the new column :type new_col: str :param args: list of column names of the object that function should be applied to :type args: varargs :return: returns a new dataframe object with the aggregated value :rtype: DataFrame """ if is_callable(clazz) \ and not is_none(new_col) \ and has_elements(*args) \ and is_disjoint(self.__grouping.grouping_colnames, args, __DISJOINT_SETS_ERROR__): return self.__do_aggregate(clazz, new_col, *args)
def reader(ltsvfile, labels=None): """Make LTSV Reader for reading selected labels. :param ltsvfile: iterable of lines. :param labels: sequence of labels. (optional) :return: generator of record in [[label, value], ...] form. """ label_pattern = re.compile(r"^[0-9A-Za-z_.-]+:") if labels is not None: prefixes = tuple(L + ':' for L in labels if label_pattern.match(L + ':')) for record in ltsvfile: record = record.rstrip('\r\n') yield [x.split(':', 1) for x in record.split('\t') if x.startswith(prefixes)] return for record in ltsvfile: record = record.rstrip('\r\n') yield [x.split(':', 1) for x in record.split('\t') if label_pattern.match(x)]
def DictReader(ltsvfile, labels=None, dict_type=dict): """Make LTSV Reader for reading selected labels. :param ltsvfile: iterable of lines. :param labels: sequence of labels. :return: generator of record in {label: value, ...} form. """ for rec in reader(ltsvfile, labels): yield dict_type(rec)
def is_disjoint(set1, set2, warn): """ Checks if elements of set2 are in set1. :param set1: a set of values :param set2: a set of values :param warn: the error message that should be thrown when the sets are NOT disjoint :return: returns true no elements of set2 are in set1 """ for elem in set2: if elem in set1: raise ValueError(warn) return True
def contains_all(set1, set2, warn): """ Checks if all elements from set2 are in set1. :param set1: a set of values :param set2: a set of values :param warn: the error message that should be thrown when the sets are not containd :return: returns true if all values of set2 are in set1 """ for elem in set2: if elem not in set1: raise ValueError(warn) return True
def to_XML(self): """ Serialize object back to XML string. Returns: str: String which should be same as original input, if everything\ works as expected. """ marcxml_template = """<record xmlns="http://www.loc.gov/MARC21/slim/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.loc.gov/MARC21/slim http://www.loc.gov/standards/marcxml/schema/MARC21slim.xsd"> $LEADER $CONTROL_FIELDS $DATA_FIELDS </record> """ oai_template = """<record> <metadata> <oai_marc> $LEADER$CONTROL_FIELDS $DATA_FIELDS </oai_marc> </metadata> </record> """ # serialize leader, if it is present and record is marc xml leader = self.leader if self.leader is not None else "" if leader: # print only visible leaders leader = "<leader>" + leader + "</leader>" # discard leader for oai if self.oai_marc: leader = "" # serialize xml_template = oai_template if self.oai_marc else marcxml_template xml_output = Template(xml_template).substitute( LEADER=leader.strip(), CONTROL_FIELDS=self._serialize_ctl_fields().strip(), DATA_FIELDS=self._serialize_data_fields().strip() ) return xml_output
def _parse_string(self, xml): """ Parse MARC XML document to dicts, which are contained in self.controlfields and self.datafields. Args: xml (str or HTMLElement): input data Also detect if this is oai marc format or not (see elf.oai_marc). """ if not isinstance(xml, HTMLElement): xml = dhtmlparser.parseString(str(xml)) # check if there are any records record = xml.find("record") if not record: raise ValueError("There is no <record> in your MARC XML document!") record = record[0] self.oai_marc = len(record.find("oai_marc")) > 0 # leader is separate only in marc21 if not self.oai_marc: leader = record.find("leader") if len(leader) >= 1: self.leader = leader[0].getContent() # parse body in respect of OAI MARC format possibility if self.oai_marc: self._parse_control_fields(record.find("fixfield"), "id") self._parse_data_fields(record.find("varfield"), "id", "label") else: self._parse_control_fields(record.find("controlfield"), "tag") self._parse_data_fields(record.find("datafield"), "tag", "code") # for backward compatibility of MARC XML with OAI if self.oai_marc and "LDR" in self.controlfields: self.leader = self.controlfields["LDR"]
def _parse_control_fields(self, fields, tag_id="tag"): """ Parse control fields. Args: fields (list): list of HTMLElements tag_id (str): parameter name, which holds the information, about field name this is normally "tag", but in case of oai_marc "id". """ for field in fields: params = field.params # skip tags without parameters if tag_id not in params: continue self.controlfields[params[tag_id]] = field.getContent().strip()
def _parse_data_fields(self, fields, tag_id="tag", sub_id="code"): """ Parse data fields. Args: fields (list): of HTMLElements tag_id (str): parameter name, which holds the information, about field name this is normally "tag", but in case of oai_marc "id" sub_id (str): id of parameter, which holds informations about subfield name this is normally "code" but in case of oai_marc "label" """ for field in fields: params = field.params if tag_id not in params: continue # take care of iX/indX (indicator) parameters field_repr = OrderedDict([ [self.i1_name, params.get(self.i1_name, " ")], [self.i2_name, params.get(self.i2_name, " ")], ]) # process all subfields for subfield in field.find("subfield"): if sub_id not in subfield.params: continue content = MARCSubrecord( val=subfield.getContent().strip(), i1=field_repr[self.i1_name], i2=field_repr[self.i2_name], other_subfields=field_repr ) # add or append content to list of other contents code = subfield.params[sub_id] if code in field_repr: field_repr[code].append(content) else: field_repr[code] = [content] tag = params[tag_id] if tag in self.datafields: self.datafields[tag].append(field_repr) else: self.datafields[tag] = [field_repr]
def add_ctl_field(self, name, value): """ Add new control field `value` with under `name` into control field dictionary :attr:`controlfields`. """ if len(name) != 3: raise ValueError("name parameter have to be exactly 3 chars long!") self.controlfields[name] = value
def add_data_field(self, name, i1, i2, subfields_dict): """ Add new datafield into :attr:`datafields` and take care of OAI MARC differencies. Args: name (str): Name of datafield. i1 (char): Value of i1/ind1 parameter. i2 (char): Value of i2/ind2 parameter. subfields_dict (dict): Dictionary containing subfields (as list). `subfields_dict` is expected to be in this format:: { "field_id": ["subfield data",], ... "z": ["X0456b"] } Warning: For your own good, use OrderedDict for `subfields_dict`, or constructor's `resort` parameter set to ``True`` (it is by default). Warning: ``field_id`` can be only one character long! """ if i1 not in self.valid_i_chars: raise ValueError("Invalid i1 parameter '" + i1 + "'!") if i2 not in self.valid_i_chars: raise ValueError("Invalid i2 parameter '" + i2 + "'!") if len(name) != 3: raise ValueError( "`name` parameter have to be exactly 3 chars long!" ) if not subfields_dict: raise ValueError( "`subfields_dict` have to contain something!" ) if not isinstance(subfields_dict, dict): raise ValueError( "`subfields_dict` parameter has to be dict instance!" ) # check local keys, convert strings to MARCSubrecord instances subrecords = [] for key, val in subfields_dict.items(): if len(key) > 1: raise KeyError( "`subfields_dict` can be only one character long!" ) # convert other values to lists if not isinstance(val, list): val = [val] subfields = map( lambda x: MARCSubrecord(x, i1, i2, None), val ) subfields_dict[key] = subfields subrecords.extend(subfields) # save i/ind values subfields_dict[self.i1_name] = i1 subfields_dict[self.i2_name] = i2 # append dict, or add new dict into self.datafields if name in self.datafields: self.datafields[name].append(subfields_dict) else: self.datafields[name] = [subfields_dict] # to each subrecord add reference to list of all subfields in this # datafield other_subfields = self.datafields[name] for record in subrecords: record.other_subfields = other_subfields
def get_i_name(self, num, is_oai=None): """ This method is used mainly internally, but it can be handy if you work with with raw MARC XML object and not using getters. Args: num (int): Which indicator you need (1/2). is_oai (bool/None): If None, :attr:`.oai_marc` is used. Returns: str: current name of ``i1``/``ind1`` parameter based on \ :attr:`oai_marc` property. """ if num not in (1, 2): raise ValueError("`num` parameter have to be 1 or 2!") if is_oai is None: is_oai = self.oai_marc i_name = "ind" if not is_oai else "i" return i_name + str(num)
def get_ctl_field(self, controlfield, alt=None): """ Method wrapper over :attr:`.controlfields` dictionary. Args: controlfield (str): Name of the controlfield. alt (object, default None): Alternative value of the `controlfield` when `controlfield` couldn't be found. Returns: str: record from given `controlfield` """ if not alt: return self.controlfields[controlfield] return self.controlfields.get(controlfield, alt)
def getDataRecords(self, datafield, subfield, throw_exceptions=True): """ .. deprecated:: Use :func:`get_subfields` instead. """ return self.get_subfields( datafield=datafield, subfield=subfield, exception=throw_exceptions )
def get_subfields(self, datafield, subfield, i1=None, i2=None, exception=False): """ Return content of given `subfield` in `datafield`. Args: datafield (str): Section name (for example "001", "100", "700"). subfield (str): Subfield name (for example "a", "1", etc..). i1 (str, default None): Optional i1/ind1 parameter value, which will be used for search. i2 (str, default None): Optional i2/ind2 parameter value, which will be used for search. exception (bool): If ``True``, :exc:`~exceptions.KeyError` is raised when method couldn't found given `datafield` / `subfield`. If ``False``, blank array ``[]`` is returned. Returns: list: of :class:`.MARCSubrecord`. Raises: KeyError: If the subfield or datafield couldn't be found. Note: MARCSubrecord is practically same thing as string, but has defined :meth:`.MARCSubrecord.i1` and :attr:`.MARCSubrecord.i2` methods. You may need to be able to get this, because MARC XML depends on i/ind parameters from time to time (names of authors for example). """ if len(datafield) != 3: raise ValueError( "`datafield` parameter have to be exactly 3 chars long!" ) if len(subfield) != 1: raise ValueError( "Bad subfield specification - subfield have to be 1 char long!" ) # if datafield not found, return or raise exception if datafield not in self.datafields: if exception: raise KeyError(datafield + " is not in datafields!") return [] # look for subfield defined by `subfield`, `i1` and `i2` parameters output = [] for datafield in self.datafields[datafield]: if subfield not in datafield: continue # records are not returned just like plain string, but like # MARCSubrecord, because you will need ind1/ind2 values for sfield in datafield[subfield]: if i1 and sfield.i1 != i1: continue if i2 and sfield.i2 != i2: continue output.append(sfield) if not output and exception: raise KeyError(subfield + " couldn't be found in subfields!") return output
def timeit_block(unit='s', label=""): """ 测试代码块耗时 :param unit: 时间单位,有 's','m','h' 可选(seconds,minutes,hours) :param label: 代码块标签 """ start = time.time() try: yield finally: _format(unit, time.time() - start, label)
def timeit(unit='s'): """ 测试函数耗时 :param unit: 时间单位,有 's','m','h' 可选(seconds,minutes,hours) """ def wrapper(func): @wraps(func) def inner(*args, **kwargs): start = time.time() _result = func(*args, **kwargs) _format(unit, time.time() - start, func.__name__ + '()') return _result return inner return wrapper
def _print(stats, limit, label): """ 控制输出量 """ print("TraceMalloc for {}".format(label)) for index, stat in enumerate(stats): if index < limit: print(stat) else: break
def memoryit(group_by='lineno', limit=10): """ 追踪函数内存消耗情况 :param group_by: 统计分组,有 'filename', 'lineno', 'traceback' 可选 :param limit: 限制输出行数 """ def wrapper(func): @wraps(func) def inner(*args, **kwargs): tracemalloc.start() _start = tracemalloc.take_snapshot() _result = func(*args, **kwargs) _end = tracemalloc.take_snapshot() stats = _end.compare_to(_start, group_by) _print(stats, limit, func.__name__ + '()') return _result return inner return wrapper
def memoryit_block(group_by='lineno', limit=10, label='code block'): """ 追踪代码块内存消耗情况 :param group_by: 统计分组,有 'filename', 'lineno', 'traceback' 可选 :param limit: 限制输出行数 :param label: 代码块标签 """ tracemalloc.start() _start = tracemalloc.take_snapshot() try: yield finally: _end = tracemalloc.take_snapshot() stats = _end.compare_to(_start, group_by) _print(stats, limit, label)
def _get_params(target, param, dof): '''Get the given param from each of the DOFs for a joint.''' return [target.getParam(getattr(ode, 'Param{}{}'.format(param, s))) for s in ['', '2', '3'][:dof]]
def _set_params(target, param, values, dof): '''Set the given param for each of the DOFs for a joint.''' if not isinstance(values, (list, tuple, np.ndarray)): values = [values] * dof assert dof == len(values) for s, value in zip(['', '2', '3'][:dof], values): target.setParam(getattr(ode, 'Param{}{}'.format(param, s)), value)
def make_quaternion(theta, *axis): '''Given an angle and an axis, create a quaternion.''' x, y, z = axis r = np.sqrt(x * x + y * y + z * z) st = np.sin(theta / 2.) ct = np.cos(theta / 2.) return [x * st / r, y * st / r, z * st / r, ct]
def center_of_mass(bodies): '''Given a set of bodies, compute their center of mass in world coordinates. ''' x = np.zeros(3.) t = 0. for b in bodies: m = b.mass x += b.body_to_world(m.c) * m.mass t += m.mass return x / t
def state(self): '''The state of this body includes: - name of the body (str) - position (3-tuple) - quaternion (4-tuple) - linear velocity (3-tuple) - angular velocity (3-tuple) ''' return BodyState(self.name, tuple(self.position), tuple(self.quaternion), tuple(self.linear_velocity), tuple(self.angular_velocity))
def state(self, state): '''Set the state of this body. Parameters ---------- state : BodyState tuple The desired state of the body. ''' assert self.name == state.name, \ 'state name "{}" != body name "{}"'.format(state.name, self.name) self.position = state.position self.quaternion = state.quaternion self.linear_velocity = state.linear_velocity self.angular_velocity = state.angular_velocity
def rotation(self, rotation): '''Set the rotation of this body using a rotation matrix. Parameters ---------- rotation : sequence of 9 floats The desired rotation matrix for this body. ''' if isinstance(rotation, np.ndarray): rotation = rotation.ravel() self.ode_body.setRotation(tuple(rotation))
def is_kinematic(self, is_kinematic): '''Set the kinematic/dynamic attribute for this body. In pagoda, kinematic bodies have infinite mass and do interact with other bodies via collisions. Parameters ---------- is_kinematic : bool If True, this body will be set to kinematic. If False, it will be set to dynamic. ''' if is_kinematic: self.ode_body.setKinematic() else: self.ode_body.setDynamic()
def body_to_world(self, position): '''Convert a body-relative offset to world coordinates. Parameters ---------- position : 3-tuple of float A tuple giving body-relative offsets. Returns ------- position : 3-tuple of float A tuple giving the world coordinates of the given offset. ''' return np.array(self.ode_body.getRelPointPos(tuple(position)))
def world_to_body(self, position): '''Convert a point in world coordinates to a body-relative offset. Parameters ---------- position : 3-tuple of float A world coordinates position. Returns ------- offset : 3-tuple of float A tuple giving the body-relative offset of the given position. ''' return np.array(self.ode_body.getPosRelPoint(tuple(position)))
def relative_offset_to_world(self, offset): '''Convert a relative body offset to world coordinates. Parameters ---------- offset : 3-tuple of float The offset of the desired point, given as a relative fraction of the size of this body. For example, offset (0, 0, 0) is the center of the body, while (0.5, -0.2, 0.1) describes a point halfway from the center towards the maximum x-extent of the body, 20% of the way from the center towards the minimum y-extent, and 10% of the way from the center towards the maximum z-extent. Returns ------- position : 3-tuple of float A position in world coordinates of the given body offset. ''' return np.array(self.body_to_world(offset * self.dimensions / 2))
def add_force(self, force, relative=False, position=None, relative_position=None): '''Add a force to this body. Parameters ---------- force : 3-tuple of float A vector giving the forces along each world or body coordinate axis. relative : bool, optional If False, the force values are assumed to be given in the world coordinate frame. If True, they are assumed to be given in the body-relative coordinate frame. Defaults to False. position : 3-tuple of float, optional If given, apply the force at this location in world coordinates. Defaults to the current position of the body. relative_position : 3-tuple of float, optional If given, apply the force at this relative location on the body. If given, this method ignores the ``position`` parameter. ''' b = self.ode_body if relative_position is not None: op = b.addRelForceAtRelPos if relative else b.addForceAtRelPos op(force, relative_position) elif position is not None: op = b.addRelForceAtPos if relative else b.addForceAtPos op(force, position) else: op = b.addRelForce if relative else b.addForce op(force)
def add_torque(self, torque, relative=False): '''Add a torque to this body. Parameters ---------- force : 3-tuple of float A vector giving the torque along each world or body coordinate axis. relative : bool, optional If False, the torque values are assumed to be given in the world coordinate frame. If True, they are assumed to be given in the body-relative coordinate frame. Defaults to False. ''' op = self.ode_body.addRelTorque if relative else self.ode_body.addTorque op(torque)
def join_to(self, joint, other_body=None, **kwargs): '''Connect this body to another one using a joint. This method creates a joint to fasten this body to the other one. See :func:`World.join`. Parameters ---------- joint : str The type of joint to use when connecting these bodies. other_body : :class:`Body` or str, optional The other body to join with this one. If not given, connects this body to the world. ''' self.world.join(joint, self, other_body, **kwargs)
def connect_to(self, joint, other_body, offset=(0, 0, 0), other_offset=(0, 0, 0), **kwargs): '''Move another body next to this one and join them together. This method will move the ``other_body`` so that the anchor points for the joint coincide. It then creates a joint to fasten the two bodies together. See :func:`World.move_next_to` and :func:`World.join`. Parameters ---------- joint : str The type of joint to use when connecting these bodies. other_body : :class:`Body` or str The other body to join with this one. offset : 3-tuple of float, optional The body-relative offset where the anchor for the joint should be placed. Defaults to (0, 0, 0). See :func:`World.move_next_to` for a description of how offsets are specified. other_offset : 3-tuple of float, optional The offset on the second body where the joint anchor should be placed. Defaults to (0, 0, 0). Like ``offset``, this is given as an offset relative to the size and shape of ``other_body``. ''' anchor = self.world.move_next_to(self, other_body, offset, other_offset) self.world.join(joint, self, other_body, anchor=anchor, **kwargs)
def positions(self): '''List of positions for linear degrees of freedom.''' return [self.ode_obj.getPosition(i) for i in range(self.LDOF)]
def position_rates(self): '''List of position rates for linear degrees of freedom.''' return [self.ode_obj.getPositionRate(i) for i in range(self.LDOF)]
def angles(self): '''List of angles for rotational degrees of freedom.''' return [self.ode_obj.getAngle(i) for i in range(self.ADOF)]
def angle_rates(self): '''List of angle rates for rotational degrees of freedom.''' return [self.ode_obj.getAngleRate(i) for i in range(self.ADOF)]
def axes(self): '''List of axes for this object's degrees of freedom.''' return [np.array(self.ode_obj.getAxis(i)) for i in range(self.ADOF or self.LDOF)]
def axes(self, axes): '''Set the axes for this object's degrees of freedom. Parameters ---------- axes : list of axes specifications A list of axis values to set. This list must have the same number of elements as the degrees of freedom of the underlying ODE object. Each element can be (a) None, which has no effect on the corresponding axis, or (b) three floats specifying the axis to set. ''' assert self.ADOF == len(axes) or self.LDOF == len(axes) for i, axis in enumerate(axes): if axis is not None: self.ode_obj.setAxis(i, 0, axis)
def lo_stops(self, lo_stops): '''Set the lo stop values for this object's degrees of freedom. Parameters ---------- lo_stops : float or sequence of float A lo stop value to set on all degrees of freedom, or a list containing one such value for each degree of freedom. For rotational degrees of freedom, these values must be in radians. ''' _set_params(self.ode_obj, 'LoStop', lo_stops, self.ADOF + self.LDOF)
def hi_stops(self, hi_stops): '''Set the hi stop values for this object's degrees of freedom. Parameters ---------- hi_stops : float or sequence of float A hi stop value to set on all degrees of freedom, or a list containing one such value for each degree of freedom. For rotational degrees of freedom, these values must be in radians. ''' _set_params(self.ode_obj, 'HiStop', hi_stops, self.ADOF + self.LDOF)
def velocities(self, velocities): '''Set the target velocities for this object's degrees of freedom. Parameters ---------- velocities : float or sequence of float A target velocity value to set on all degrees of freedom, or a list containing one such value for each degree of freedom. For rotational degrees of freedom, these values must be in radians / second. ''' _set_params(self.ode_obj, 'Vel', velocities, self.ADOF + self.LDOF)
def max_forces(self, max_forces): '''Set the maximum forces for this object's degrees of freedom. Parameters ---------- max_forces : float or sequence of float A maximum force value to set on all degrees of freedom, or a list containing one such value for each degree of freedom. ''' _set_params(self.ode_obj, 'FMax', max_forces, self.ADOF + self.LDOF)
def erps(self, erps): '''Set the ERP values for this object's degrees of freedom. Parameters ---------- erps : float or sequence of float An ERP value to set on all degrees of freedom, or a list containing one such value for each degree of freedom. ''' _set_params(self.ode_obj, 'ERP', erps, self.ADOF + self.LDOF)
def cfms(self, cfms): '''Set the CFM values for this object's degrees of freedom. Parameters ---------- cfms : float or sequence of float A CFM value to set on all degrees of freedom, or a list containing one such value for each degree of freedom. ''' _set_params(self.ode_obj, 'CFM', cfms, self.ADOF + self.LDOF)
def stop_cfms(self, stop_cfms): '''Set the CFM values for this object's DOF limits. Parameters ---------- stop_cfms : float or sequence of float A CFM value to set on all degrees of freedom limits, or a list containing one such value for each degree of freedom limit. ''' _set_params(self.ode_obj, 'StopCFM', stop_cfms, self.ADOF + self.LDOF)
def stop_erps(self, stop_erps): '''Set the ERP values for this object's DOF limits. Parameters ---------- stop_erps : float or sequence of float An ERP value to set on all degrees of freedom limits, or a list containing one such value for each degree of freedom limit. ''' _set_params(self.ode_obj, 'StopERP', stop_erps, self.ADOF + self.LDOF)
def axes(self, axes): '''Set the axes for this object's degrees of freedom. Parameters ---------- axes : list of axis parameters A list of axis values to set. This list must have the same number of elements as the degrees of freedom of the underlying ODE object. Each element can be (a) None, which has no effect on the corresponding axis, or (b) three floats specifying the axis to set, or (c) a dictionary with an "axis" key specifying the axis to set and an optional "rel" key (defaults to 0) specifying the relative body to set the axis on. ''' assert len(axes) == self.ADOF for i, ax in enumerate(axes): if ax is None: continue if not isinstance(ax, dict): ax = dict(axis=ax) self.ode_obj.setAxis(i, ax.get('rel', 0), ax['axis'])
def axes(self, axes): '''Set the linear axis of displacement for this joint. Parameters ---------- axes : list containing one 3-tuple of floats A list of the axes for this joint. For a slider joint, which has one degree of freedom, this must contain one 3-tuple specifying the X, Y, and Z axis for the joint. ''' self.lmotor.axes = [axes[0]] self.ode_obj.setAxis(tuple(axes[0]))
def axes(self, axes): '''Set the angular axis of rotation for this joint. Parameters ---------- axes : list containing one 3-tuple of floats A list of the axes for this joint. For a hinge joint, which has one degree of freedom, this must contain one 3-tuple specifying the X, Y, and Z axis for the joint. ''' self.amotor.axes = [axes[0]] self.ode_obj.setAxis(tuple(axes[0]))
def axes(self): '''A list of axes of rotation for this joint.''' return [np.array(self.ode_obj.getAxis1()), np.array(self.ode_obj.getAxis2())]
def create_body(self, shape, name=None, **kwargs): '''Create a new body. Parameters ---------- shape : str The "shape" of the body to be created. This should name a type of body object, e.g., "box" or "cap". name : str, optional The name to use for this body. If not given, a default name will be constructed of the form "{shape}{# of objects in the world}". Returns ------- body : :class:`Body` The created body object. ''' shape = shape.lower() if name is None: for i in range(1 + len(self._bodies)): name = '{}{}'.format(shape, i) if name not in self._bodies: break self._bodies[name] = Body.build(shape, name, self, **kwargs) return self._bodies[name]
def join(self, shape, body_a, body_b=None, name=None, **kwargs): '''Create a new joint that connects two bodies together. Parameters ---------- shape : str The "shape" of the joint to use for joining together two bodies. This should name a type of joint, such as "ball" or "piston". body_a : str or :class:`Body` The first body to join together with this joint. If a string is given, it will be used as the name of a body to look up in the world. body_b : str or :class:`Body`, optional If given, identifies the second body to join together with ``body_a``. If not given, ``body_a`` is joined to the world. name : str, optional If given, use this name for the created joint. If not given, a name will be constructed of the form "{body_a.name}^{shape}^{body_b.name}". Returns ------- joint : :class:`Joint` The joint object that was created. ''' ba = self.get_body(body_a) bb = self.get_body(body_b) shape = shape.lower() if name is None: name = '{}^{}^{}'.format(ba.name, shape, bb.name if bb else '') self._joints[name] = Joint.build( shape, name, self, body_a=ba, body_b=bb, **kwargs) return self._joints[name]
def move_next_to(self, body_a, body_b, offset_a, offset_b): '''Move one body to be near another one. After moving, the location described by ``offset_a`` on ``body_a`` will be coincident with the location described by ``offset_b`` on ``body_b``. Parameters ---------- body_a : str or :class:`Body` The body to use as a reference for moving the other body. If this is a string, it is treated as the name of a body to look up in the world. body_b : str or :class:`Body` The body to move next to ``body_a``. If this is a string, it is treated as the name of a body to look up in the world. offset_a : 3-tuple of float The offset of the anchor point, given as a relative fraction of the size of ``body_a``. See :func:`Body.relative_offset_to_world`. offset_b : 3-tuple of float The offset of the anchor point, given as a relative fraction of the size of ``body_b``. Returns ------- anchor : 3-tuple of float The location of the shared point, which is often useful to use as a joint anchor. ''' ba = self.get_body(body_a) bb = self.get_body(body_b) if ba is None: return bb.relative_offset_to_world(offset_b) if bb is None: return ba.relative_offset_to_world(offset_a) anchor = ba.relative_offset_to_world(offset_a) offset = bb.relative_offset_to_world(offset_b) bb.position = bb.position + anchor - offset return anchor
def set_body_states(self, states): '''Set the states of some bodies in the world. Parameters ---------- states : sequence of states A complete state tuple for one or more bodies in the world. See :func:`get_body_states`. ''' for state in states: self.get_body(state.name).state = state
def step(self, substeps=2): '''Step the world forward by one frame. Parameters ---------- substeps : int, optional Split the step into this many sub-steps. This helps to prevent the time delta for an update from being too large. ''' self.frame_no += 1 dt = self.dt / substeps for _ in range(substeps): self.ode_contactgroup.empty() self.ode_space.collide(None, self.on_collision) self.ode_world.step(dt)
def on_key_press(self, key, modifiers, keymap): '''Handle an otherwise unhandled keypress event (from a GUI).''' if key == keymap.ENTER: self.reset() return True
def are_connected(self, body_a, body_b): '''Determine whether the given bodies are currently connected. Parameters ---------- body_a : str or :class:`Body` One body to test for connectedness. If this is a string, it is treated as the name of a body to look up. body_b : str or :class:`Body` One body to test for connectedness. If this is a string, it is treated as the name of a body to look up. Returns ------- connected : bool Return True iff the two bodies are connected. ''' return bool(ode.areConnected( self.get_body(body_a).ode_body, self.get_body(body_b).ode_body))
def on_collision(self, args, geom_a, geom_b): '''Callback function for the collide() method. Parameters ---------- args : None Arguments passed when the callback was registered. Not used. geom_a : ODE geometry The geometry object of one of the bodies that has collided. geom_b : ODE geometry The geometry object of one of the bodies that has collided. ''' body_a = geom_a.getBody() body_b = geom_b.getBody() if ode.areConnected(body_a, body_b) or \ (body_a and body_a.isKinematic()) or \ (body_b and body_b.isKinematic()): return for c in ode.collide(geom_a, geom_b): c.setBounce(self.elasticity) c.setMu(self.friction) ode.ContactJoint(self.ode_world, self.ode_contactgroup, c).attach( geom_a.getBody(), geom_b.getBody())
def record_iterator(xml): """ Iterate over all ``<record>`` tags in `xml`. Args: xml (str/file): Input string with XML. UTF-8 is prefered encoding, unicode should be ok. Yields: MARCXMLRecord: For each corresponding ``<record>``. """ # handle file-like objects if hasattr(xml, "read"): xml = xml.read() dom = None try: dom = dhtmlparser.parseString(xml) except UnicodeError: dom = dhtmlparser.parseString(xml.encode("utf-8")) for record_xml in dom.findB("record"): yield MARCXMLRecord(record_xml)
def profileit(field='cumulative'): """ 测试函数运行消耗情况 :param field: 输出内容排序方式。 可选参数为 "stdname", "calls", "time", "cumulative" """ def wrapper(func): @wraps(func) def inner(*args, **kwargs): pro = Profile() pro.runcall(func, *args, **kwargs) stats = Stats(pro) stats.strip_dirs() stats.sort_stats(field) print("Profile for {}()".format(func.__name__)) stats.print_stats() stats.print_callers() return inner return wrapper
def parse(source, world, jointgroup=None, density=1000, color=None): '''Load and parse a source file. Parameters ---------- source : file A file-like object that contains text information describing bodies and joints to add to the world. world : :class:`pagoda.physics.World` The world to add objects and joints to. jointgroup : ode.JointGroup, optional If provided, add all joints from this parse to the given group. The default behavior adds joints to the world without an explicit group. density : float, optional Default density for bodies. This is overridden if the source provides a density or mass value for a given body. color : tuple of floats, optional Default color for bodies from this source. Defaults to None, which does not assign a color to parsed bodies. ''' visitor = Visitor(world, jointgroup, density, color) visitor.parse(re.sub(r'#.*', ' ', source.read())) return visitor
def parse_asf(source, world, jointgroup=None, density=1000, color=None): '''Load and parse a source file. Parameters ---------- source : file A file-like object that contains text information describing bodies and joints to add to the world. world : :class:`pagoda.physics.World` The world to add objects and joints to. jointgroup : ode.JointGroup, optional If provided, add all joints from this parse to the given group. The default behavior adds joints to the world without an explicit group. density : float, optional Default density for bodies. This is overridden if the source provides a density or mass value for a given body. color : tuple of floats, optional Default color for bodies from this source. Defaults to None, which does not assign a color to parsed bodies. ''' visitor = AsfVisitor(world, jointgroup, density, color) visitor.parse(re.sub(r'#.*', ' ', source.read())) return visitor
def parse_amc(source): '''Parse an AMC motion capture data file. Parameters ---------- source : file A file-like object that contains AMC motion capture text. Yields ------ frame : dict Yields a series of motion capture frames. Each frame is a dictionary that maps a bone name to a list of the DOF configurations for that bone. ''' lines = 0 frames = 1 frame = {} degrees = False for line in source: lines += 1 line = line.split('#')[0].strip() if not line: continue if line.startswith(':'): if line.lower().startswith(':deg'): degrees = True continue if line.isdigit(): if int(line) != frames: raise RuntimeError( 'frame mismatch on line {}: ' 'produced {} but file claims {}'.format(lines, frames, line)) yield frame frames += 1 frame = {} continue fields = line.split() frame[fields[0]] = list(map(float, fields[1:]))
def create_bodies(self, translate=(0, 1, 0), size=0.1): '''Traverse the bone hierarchy and create physics bodies.''' stack = [('root', 0, self.root['position'] + translate)] while stack: name, depth, end = stack.pop() for child in self.hierarchy.get(name, ()): stack.append((child, depth + 1, end + self.bones[child].end)) if name not in self.bones: continue bone = self.bones[name] body = self.world.create_body( 'box', name=bone.name, density=self.density, lengths=(size, size, bone.length)) body.color = self.color # move the center of the body to the halfway point between # the parent (joint) and child (joint). x, y, z = end - bone.direction * bone.length / 2 # swizzle y and z -- asf uses y as up, but we use z as up. body.position = x, z, y # compute an orthonormal (rotation) matrix using the ground and # the body. this is mind-bending but seems to work. u = bone.direction v = np.cross(u, [0, 1, 0]) l = np.linalg.norm(v) if l > 0: v /= l rot = np.vstack([np.cross(u, v), v, u]).T swizzle = [[1, 0, 0], [0, 0, 1], [0, -1, 0]] body.rotation = np.dot(swizzle, rot) self.bodies.append(body)
def create_joints(self): '''Traverse the bone hierarchy and create physics joints.''' stack = ['root'] while stack: parent = stack.pop() for child in self.hierarchy.get(parent, ()): stack.append(child) if parent not in self.bones: continue bone = self.bones[parent] body = [b for b in self.bodies if b.name == parent][0] for child in self.hierarchy.get(parent, ()): child_bone = self.bones[child] child_body = [b for b in self.bodies if b.name == child][0] shape = ('', 'hinge', 'universal', 'ball')[len(child_bone.dof)] self.joints.append(self.world.join(shape, body, child_body))
def format_cookies(path): """ 将 cookie 字符串转化为字典 :param path: cookies 文件路径 :return: cookies 字典 """ with open(path, 'r') as f: _cookies = {} for row in f.read().split(';'): k, v = row.strip().split('=', 1) _cookies[k] = v return _cookies
def delete_empty_dir(directory): """ 删除空目录 :param directory: 目录路径 """ if os.path.exists(directory): if os.path.isdir(directory): for d in os.listdir(directory): path = os.path.join(directory, d) if os.path.isdir(path): delete_empty_dir(path) if not os.listdir(directory): os.rmdir(directory) print("Remove the empty directory: " + directory) else: print("The directory is not exist!")
def _parse_corporations(self, datafield, subfield, roles=["any"]): """ Parse informations about corporations from given field identified by `datafield` parameter. Args: datafield (str): MARC field ID ("``110``", "``610``", etc..) subfield (str): MARC subfield ID with name, which is typically stored in "``a``" subfield. roles (str): specify which roles you need. Set to ``["any"]`` for any role, ``["dst"]`` for distributors, etc.. For details, see http://www.loc.gov/marc/relators/relaterm.html Returns: list: :class:`Corporation` objects. """ if len(datafield) != 3: raise ValueError( "datafield parameter have to be exactly 3 chars long!" ) if len(subfield) != 1: raise ValueError( "Bad subfield specification - subield have to be 3 chars long!" ) parsed_corporations = [] for corporation in self.get_subfields(datafield, subfield): other_subfields = corporation.other_subfields # check if corporation have at least one of the roles specified in # 'roles' parameter of function if "4" in other_subfields and roles != ["any"]: corp_roles = other_subfields["4"] # list of role parameters relevant = any(map(lambda role: role in roles, corp_roles)) # skip non-relevant corporations if not relevant: continue name = "" place = "" date = "" name = corporation if "c" in other_subfields: place = ",".join(other_subfields["c"]) if "d" in other_subfields: date = ",".join(other_subfields["d"]) parsed_corporations.append(Corporation(name, place, date)) return parsed_corporations