content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def call_assign_job(job_id, mex_id): """ Function to send an update to the MEx Sentinel to assign a Job to an MEx. """ try: rospy.wait_for_service('/mex_sentinel/assign_job_to_mex', rospy.Duration(1)) try: assign_job = rospy.ServiceProxy('mex_sentinel/assign_job_to_mex', AssignJobToMex) req = AssignJobToMexRequest() req.job_id = job_id req.mex_id = mex_id result = assign_job(req) return result except rospy.ServiceException as e: print(NAME + "Service call failed: %s"%e) except rospy.ROSException: pass
9c5b2aa27e8d04949fbb4c5a2c9eb2ac86ccd9a7
17,090
def full( coords, nodata=np.nan, dtype=np.float32, name=None, attrs={}, crs=None, lazy=False ): """Return a full DataArray based on a geospatial coords dictionary. Arguments --------- coords: sequence or dict of array_like, optional Coordinates (tick labels) to use for indexing along each dimension (max 3). The coordinate sequence should be (dim0, y, x) of which the first is optional. nodata: float, int, optional Fill value for new DataArray, defaults to other.nodata or if not set np.nan dtype: numpy.dtype, optional Data type name: str, optional DataArray name attrs : dict, optional additional attributes crs: int, dict, or str, optional Coordinate Reference System. Accepts EPSG codes (int or str); proj (str or dict) lazy: bool, optional If True return DataArray with a dask rather than numpy array. Returns ------- da: DataArray Filled DataArray """ f = dask.array.empty if lazy else np.full dims = tuple([d for d in coords]) shape = tuple([coords[dim].size for dim in dims]) data = f(shape, nodata, dtype=dtype) da = xr.DataArray(data, coords, dims, name, attrs) da.raster.set_nodata(nodata) da.raster.set_crs(crs) return da
41bb4fce22a8dd280dee0d4891ff81bd88d263b5
17,091
from typing import Union from typing import List from typing import Tuple from typing import Dict def add_weight_decay( model: nn.Module, weight_decay: float = 1e-5, skip_list: Union[List, Tuple] = () ) -> List[Dict]: """Helper function to not decay weights in BatchNorm layers Source: https://discuss.pytorch.org/t/weight-decay-in-the-optimizers-is-a-bad-idea-especially-with-batchnorm/16994/3 """ decay = [] no_decay = [] for name, param in model.named_parameters(): if not param.requires_grad: continue if len(param.shape) == 1 or name in skip_list: no_decay.append(param) else: decay.append(param) return [ {"params": no_decay, "weight_decay": 0.0}, {"params": decay, "weight_decay": weight_decay}, ]
27efae02eaaf0bdc94f3763c1069165c47e08acb
17,093
def find_bands_hdu(hdu_list, hdu): """Discover the extension name of the BANDS HDU. Parameters ---------- hdu_list : `~astropy.io.fits.HDUList` hdu : `~astropy.io.fits.BinTableHDU` or `~astropy.io.fits.ImageHDU` Returns ------- hduname : str Extension name of the BANDS HDU. None if no BANDS HDU was found. """ if "BANDSHDU" in hdu.header: return hdu.header["BANDSHDU"] has_cube_data = False if ( isinstance(hdu, (fits.ImageHDU, fits.PrimaryHDU)) and hdu.header.get("NAXIS", None) == 3 ): has_cube_data = True elif isinstance(hdu, fits.BinTableHDU): if ( hdu.header.get("INDXSCHM", "") in ["EXPLICIT", "IMPLICIT", ""] and len(hdu.columns) > 1 ): has_cube_data = True if has_cube_data: if "EBOUNDS" in hdu_list: return "EBOUNDS" elif "ENERGIES" in hdu_list: return "ENERGIES" return None
3b170109d199482c651861764b0ec21a44aa7933
17,094
def read_raw_binary_file(file_path): """can actually be any file""" with open(file_path, 'rb') as f: return f.read()
b03bc1d4c00f9463ded0ea022023e66fd298a7ad
17,095
def encode_cl_value(entity: CLValue) -> dict: """Encodes a CL value. """ def _encode_parsed(type_info: CLType) -> str: if type_info.typeof in TYPES_NUMERIC: return str(int(entity.parsed)) elif type_info.typeof == CLTypeKey.BYTE_ARRAY: return entity.parsed.hex() elif type_info.typeof == CLTypeKey.PUBLIC_KEY: return entity.parsed.account_key.hex() elif type_info.typeof == CLTypeKey.UREF: return entity.parsed.as_string() elif type_info.typeof == CLTypeKey.OPTION: return _encode_parsed(type_info.inner_type) else: return str(entity.parsed) return { "bytes": serialisation.to_bytes(entity).hex(), "cl_type": encode_cl_type(entity.cl_type), "parsed": _encode_parsed(entity.cl_type), }
09d75f9552347e4fd121dcd1a57f26ac46756870
17,096
def escape(string): """ Escape a passed string so that we can send it to the regular expressions engine. """ ret = None def replfunc(m): if ( m[0] == "\\" ): return("\\\\\\\\") else: return("\\\\" + m[0]) # @note - I had an issue getting replfunc to be called in # javascript correctly when I didn't use this pragma # not sure if I was just doing it wrong or what __pragma__( 'js', '{}', ''' var r = /[^A-Za-z:;\d]/g; ret = string.replace(r, replfunc); ''') if ( ret is not None ): return(ret) else: raise Exception("Failed to escape the passed string")
c2682757fec2ddaefb32bb792fee44dd63c539fd
17,097
def batch_apply(fn, inputs): """Folds time into the batch dimension, runs fn() and unfolds the result. Args: fn: Function that takes as input the n tensors of the tf.nest structure, with shape [time*batch, <remaining shape>], and returns a tf.nest structure of batched tensors. inputs: tf.nest structure of n [time, batch, <remaining shape>] tensors. Returns: tf.nest structure of [time, batch, <fn output shape>]. Structure is determined by the output of fn. """ time_to_batch_fn = lambda t: tf.reshape(t, [-1] + t.shape[2:].as_list()) batched = tf.nest.map_structure(time_to_batch_fn, inputs) output = fn(*batched) prefix = [int(tf.nest.flatten(inputs)[0].shape[0]), -1] batch_to_time_fn = lambda t: tf.reshape(t, prefix + t.shape[1:].as_list()) return tf.nest.map_structure(batch_to_time_fn, output)
4cc220a7891f236dc6741e9c203862c5ee33e978
17,098
def grab_haul_list(creep: Creep, roomName, totalStructures, add_storage=False): """ 위에 허울러가 에너지를 채울 목록 확인. :param creep: :param roomName: 방이름. :param totalStructures: 본문 all_structures 와 동일 :param add_storage: 스토리지를 포함할 것인가? priority == 0 인 상황 아니면 포함할일이 없음. :return: 허울러의 에너지 채울 대상목록 """ # defining structures to fill the energy on. originally above of this spot but replaced for cpu eff. # towers only fills 80% since it's gonna repair here and there all the time. structures = totalStructures.filter(lambda s: ((s.structureType == STRUCTURE_SPAWN or s.structureType == STRUCTURE_EXTENSION) and s.energy < s.energyCapacity) or (s.structureType == STRUCTURE_TOWER and s.energy < s.energyCapacity * 0.8) or (s.structureType == STRUCTURE_TERMINAL and s.store[RESOURCE_ENERGY] < 10000)) # 스토리지에 넣을 양이 있을때 추가하는거임. # 기준: 스토리지에 남은 양이 max_energy 값 이상일 경우 # 변경: 스토리지에 남은 양이 있는 경우 if add_storage: structures.extend(totalStructures.filter (lambda s: s.structureType == STRUCTURE_STORAGE # and s.storeCapacity - _.sum(s.store) >= Game.rooms[roomName].memory.structure_type[max_energy])) and s.storeCapacity - _.sum(s.store) > 0)) # 핵에 에너지 넣는걸로 함? if Memory.rooms[roomName].options and Memory.rooms[roomName].options.fill_nuke: nuke_structure_add = totalStructures.filter(lambda s: s.structureType == STRUCTURE_NUKER and s.energy < s.energyCapacity) structures.extend(nuke_structure_add) # 연구소에 에너지 넣는걸로 함? if Memory.rooms[roomName].options and Memory.rooms[roomName].options.fill_labs: structure_add = totalStructures \ .filter(lambda s: s.structureType == STRUCTURE_LAB and s.energy < s.energyCapacity) structures.extend(structure_add) container = [] # for_upgrade :스토리지가 컨트롤러에서 많이 떨어져 있을때 대비해 두는 컨테이너. # 렙 8이하에 에너지가 있을때만 찾는다 if Game.rooms[roomName].controller.level < 8 and creep.store.getCapacity(RESOURCE_ENERGY): for rcont in Game.rooms[roomName].memory[STRUCTURE_CONTAINER]: cont_obj = Game.getObjectById(rcont.id) if not cont_obj: continue # 업글용 컨테이너고 수확저장용도가 아닌가? 그러면 허울러가 넣는다. 2/3 이하로 차있을때만. if rcont.for_upgrade and not rcont.for_harvest \ and cont_obj.store.getUsedCapacity() < cont_obj.store.getCapacity() * 2 / 3: # 단, 스토리지를 만들 렙(4이상)이고 스토리지가 없으면 안넣는다. # 방 내 에너지가 안 찼을때도 통과 if 4 <= creep.room.controller.level \ and not Game.getObjectById(creep.memory.upgrade_target).room.storage \ or creep.room.energyAvailable < creep.room.energyCapacityAvailable * .95: continue container.append(Game.getObjectById(rcont.id)) structures.extend(container) return structures
d1d944c221089363a7e546bdc03dd51cd178fc35
17,099
def target(x, seed, instance): """A target function for dummy testing of TA perform x^2 for easy result calculations in checks. """ # Return x[i] (with brackets) so we pass the value, not the # np array element return x[0] ** 2, {'key': seed, 'instance': instance}
131560778f51ebd250a3077833859f7e5addeb6e
17,101
def generate(fspec, count, _fuel=None): """Generate <count> number of random passwords/passphrases. The passphrases are formated according to <fspec>. Returned value is (list, json_data), where list is a <count>-element sequence of pair of (password, reading hint for password). json_data is a dict at least containing the following keys: key 'diag': (str) message for diagnostics, key 'entropy': (float) estimated entropy of generated passphrases, key 'elements': list of sequences of elements of generated passphrases. Raises BadFormatError if fspec is either bad or not able to be satisfied. """ diag = [] fspec, entropy = _parse_fspec(fspec, diag=diag, _fuel=_fuel) if count < 1: raise BadFormatError('bad count of passwords specified') fspec, entropy = _resolve_entropy(fspec, entropy, diag=diag, _fuel=_fuel) elements = [] result = [] for ncount in range(count): o = [] def elem(e, f, o, h, c=None, ct=1): d = {'entropy': e, 'separator': f, 'password': o, 'hint': h} if c != None: d['corpus_source'] = str(c) if not f: d['repeat_count'] = ct return d def proc(filling, i, sep, wl, ct): initial = not filling and i == 0 e1 = wl.entropy() if wl.is_words: intersep = sep if sep != None else " " presep = "" if initial else sep if sep != None else " " for c in range(0, ct): w = wl.get_randomly() s = presep if c == 0 else intersep sh = " " if (s == "" and c != 0) else s if sh: o.append(elem(0.0, True, s, sh, None)) o.append(elem(e1, False, w.word, w.hint, wl)) else: if ct != 0: intersep = "" presep = "" if initial else sep if presep: o.append(elem(0.0, True, presep, presep, None)) ow = [] oh = [] for c in range(0, ct): w = wl.get_randomly() ow.append(w.word) oh.append(w.hint) o.append(elem(ct * e1, False, "".join(ow), "".join(oh), wl, ct=ct)) for i, s in enumerate(fspec): proc(False, i, *s) o_word = "".join(x['password'] for x in o) o_hint = "".join(x['hint'] for x in o) elements.append(o) result.append((o_word, o_hint)) return result, {'passwords': result, 'elements': elements, 'diag': "\n".join(diag), 'entropy': entropy}
aad44a80a648d192c696ebdd44ceefadd21d88cd
17,102
def convert_to_valid_einsum_chars(einsum_str): """Convert the str ``einsum_str`` to contain only the alphabetic characters valid for numpy einsum. """ # partition into valid and invalid sets valid, invalid = set(), set() for x in einsum_str: (valid if is_valid_einsum_char(x) else invalid).add(x) # get replacements for invalid chars that are not already used available = gen_unused_symbols(valid, len(invalid)) # map invalid to available and replace in the inputs replacer = dict(zip(invalid, available)) return "".join(replacer.get(x, x) for x in einsum_str)
2cdd67bc967a12bd3dcb80f323f093cd9eff7213
17,103
def prop_GAC(csp, newVar=None): """ Do GAC propagation. If newVar is None we do initial GAC enforce processing all constraints. Otherwise we do GAC enforce with constraints containing newVar on GAC Queue """ constraints = csp.get_cons_with_var(newVar) if newVar else csp.get_all_cons() pruned = [] # NOTE: although <constraints> is a list, the order is unimportant and acts like a set. # See page 209 of RN textbook while constraints != []: constraint = constraints.pop(0) # grab the first constraint for var in constraint.get_unasgn_vars(): # get_scope()? for val in var.cur_domain(): if not constraint.has_support(var, val): # Check if we have already pruned (var, val) if (var, val) not in pruned: var.prune_value(val) pruned.append((var, val)) # We have modified var's domain, so add back all constraints # that have var in it's scope for c in csp.get_cons_with_var(var): if c not in constraints: constraints.append(c) # Check if var's domain is empty if var.cur_domain_size() == 0: return False, pruned return True, pruned
a1c576cfd9920a51eb9b9884bd49b4e8f4194d02
17,104
def submit_only_kwargs(kwargs): """Strip out kwargs that are not used in submit""" kwargs = kwargs.copy() for key in ['patience', 'min_freq', 'max_freq', 'validation', "max_epochs", "epoch_boost", "train_size", "valid_size"]: _ = kwargs.pop(key, None) return kwargs
e93a4b8921c5b80bb487caa6057c1ff7c1701305
17,106
def make_simple_boundary(outline_edge_group: UniqueEdgeList, all_edges: UniqueEdgeList): """ Step 3 recursive :param outline_edge_group: A list of edges, grouped by connectivity between edges. :param all_edges: :return: ??? """ while len(all_edges.edge_list) > 0: current_edge = all_edges.edge_list[0] work = False neighbors = all_edges.get_neighbor_indices_for_edge(current_edge) # Loop against all neighboring edges, gobble up the neighbors. for neighbor in neighbors: neighbor_edge = all_edges.edge_list[neighbor] if not Edge.same_edge(current_edge, neighbor_edge): shared_vertex = Edge.has_shared_vertex(current_edge, neighbor_edge) parallel = Edge.are_parallel_or_anti_parallel(current_edge, neighbor_edge) if shared_vertex is not None and parallel: # Case 1. start_vertex = [neighbor_edge.x1, neighbor_edge.y1, neighbor_edge.z1] # Case 2. if (neighbor_edge.x1 == shared_vertex[0] and neighbor_edge.y1 == shared_vertex[1] and neighbor_edge.z1 == shared_vertex[2]): start_vertex = [neighbor_edge.x2, neighbor_edge.y2, neighbor_edge.z2] # Case 3. end_vertex = [current_edge.x1, current_edge.y1, current_edge.z1] # Case 4. if (current_edge.x1 == shared_vertex[0] and current_edge.y1 == shared_vertex[1] and current_edge.z1 == shared_vertex[2]): end_vertex = [current_edge.x2, current_edge.y2, current_edge.z2] new_edge = Edge(start_vertex[0], start_vertex[1], start_vertex[2], # Edge Start end_vertex[0], end_vertex[1], end_vertex[2]) # Edge end all_edges.remove(current_edge) all_edges.remove(neighbor_edge) all_edges.add(new_edge) work = True break if not work and len(all_edges.edge_list) > 0: outline_edge_group.add(current_edge) all_edges.remove(current_edge) return outline_edge_group
fd3dfd40302d2f01126032c9420fd7b990d30cc6
17,107
async def async_attach_trigger( hass: HomeAssistant, config: ConfigType, action: AutomationActionType, automation_info: dict, ) -> CALLBACK_TYPE: """Attach a trigger.""" job = HassJob(action) if config[CONF_TYPE] == "turn_on": entity_id = config[CONF_ENTITY_ID] @callback def _handle_event(event: Event): if event.data[ATTR_ENTITY_ID] == entity_id: hass.async_run_hass_job( job, {"trigger": {**config, "description": f"{DOMAIN} - {entity_id}"}}, event.context, ) return hass.bus.async_listen(EVENT_TURN_ON, _handle_event) return lambda: None
5cf362c7dc0b82f562164141ccf76f30dd1a0169
17,110
import pathlib def imread(image_path, as_uint8=True): """Read an image as numpy array. Args: image_path (str or pathlib.Path): File path (including extension) to read image. as_uint8 (bool): Read an image in uint8 format. Returns: :class:`numpy.ndarray`: Image array of dtype uint8, MxNx3. Examples: >>> from tiatoolbox import utils >>> img = utils.misc.imread('ImagePath.jpg') """ if isinstance(image_path, pathlib.Path): image_path = str(image_path) if pathlib.Path(image_path).suffix == ".npy": image = np.load(image_path) else: image = cv2.imread(image_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) if as_uint8: return image.astype(np.uint8) return image
30050f63e43d862cf512994f0e9d21c187b1ac0a
17,111
def pk(obj): """ A helper that gets the primary key of a model instance if one is passed in. If not, this returns the parameter itself. This allows functions to have parameters that accept either a primary key or model instance. For example: ``` python def get_translations(target_locale): return Translation.objects.filter(target_locale=pk(target_locale)) # Both of these would be valid calls get_translations(Locale.objects.get(id=1)) get_translations(1) ``` Args: obj (Model | any): A model instance or primary key value. Returns: any: The primary key of the model instance, or value of `obj` parameter. """ if isinstance(obj, models.Model): return obj.pk else: return obj
431f518fe6d53e979543e4588a1d7389d7100d69
17,112
import requests import json def get_dataset(id): """Query for existence of dataset by ID.""" uu = UrlUtils() es_url = uu.rest_url #es_index = "{}_{}_s1-ifg".format(uu.grq_index_prefix, version) es_index = "grq" # query query = { "query": { "wildcard": { "_id": id } } } logger.info(query) if es_url.endswith('/'): search_url = '%s%s/_search' % (es_url, es_index) else: search_url = '%s/%s/_search' % (es_url, es_index) logger.info("search_url : %s" %search_url) r = requests.post(search_url, data=json.dumps(query)) if r.status_code != 200: logger.info("Failed to query %s:\n%s" % (es_url, r.text)) logger.info("query: %s" % json.dumps(query, indent=2)) logger.info("returned: %s" % r.text) r.raise_for_status() result = r.json() logger.info(result['hits']['total']) return result
cfb31da0e23b7e197af1919fa34fa3f2d4fa1dfe
17,113
def bags_with_gold( parents_of, _ ): """ Starting from leaf = 'gold', find recursively its parents upto the root and add them to a set Number of bags that could contain gold = length of the set """ contains_gold = set() def find_roots( bag ): for outer_bag in parents_of[ bag ]: contains_gold.add( outer_bag ) find_roots( outer_bag ) find_roots('shiny gold') return len(contains_gold)
3fd2b1c260d41867a5787a14f0c50a9b5d1a2f08
17,114
def process_file(file_path): """ This function processes the submitted file :return: A dictionary of errors found in the file. If there are no errors, then only the error report headers will in the results. """ enc = detect_bom_encoding(file_path) if enc is None: with open(file_path, 'r') as f: result = run_checks(file_path, f) else: with open(file_path, 'r', encoding=enc) as f: result = run_checks(file_path, f) print('Finished processing %s\n' % file_path) return result
29b25b9a1ac950b2b0d051a6748ebc78b31bad10
17,115
import requests def get_request_body(text, api_key, *args): """ send a request and return the response body parsed as dictionary @param text: target text that you want to detect its language @type text: str @type api_key: str @param api_key: your private API key """ if not api_key: raise Exception("you need to get an API_KEY for this to work. " "Get one for free here: https://detectlanguage.com/documentation") if not text: raise Exception("Please provide an input text") else: try: headers = config['headers'] headers['Authorization'] = headers['Authorization'].format(api_key) response = requests.post(config['url'], json={'q': text}, headers=headers) body = response.json().get('data') return body except HTTPError as e: print("Error occured while requesting from server: ", e.args) raise e
e21e8733eec00bc78616b18a8d93c18dc2b20449
17,116
def generate_sample_task(project): """ Generate task example for upload and check it with serializer validation :param project: project with label config :return: task dict """ task = generate_sample_task_without_check(project.label_config) # check generated task '''if project: try: TaskSerializer.check_data(project, task) except ValidationError as e: raise ValidationError(str(e) + ': task example = ' + json.dumps(task) + ', project config = ' + project.label_config + ', project data_types = ' + json.dumps(project.data_types))''' return task
1e3259b320e46a938139b0dde8ed5b999290d6cd
17,117
def serializable_value(self, field_name): """ Returns the value of the field name for this instance. If the field is a foreign key, returns the id value, instead of the object. If there's no Field object with this name on the model, the model attribute's value is returned directly. Used to serialize a field's value (in the serializer, or form output, for example). Normally, you would just access the attribute directly and not use this method. """ try: field = self._admin_opts.get_field_by_name(field_name)[0] except FieldDoesNotExist: return getattr(self, field_name) return getattr(self, field.name)
839d59e92c4249359367d07700bd55f80eafe98b
17,118
import pandas def fetch_fi2010(normalization=None) -> pandas.DataFrame: """ Load the FI2010 dataset with no auction. Benchmark Dataset for Mid-Price Forecasting of Limit Order Book Data with Machine Learning Methods. A Ntakaris, M Magris, J Kanniainen, M Gabbouj, A Iosifidis. arXiv:1705.03233 [cs.CE]. https://arxiv.org/abs/1705.03233 Parameters ---------- normalization : {"zscore", None} Normalization method. """ if normalization is None: url = "https://raw.githubusercontent.com/simaki/fi2010/main/data/data.csv" return pandas.read_csv(url, index_col=0) if normalization == "zscore": url1 = "https://raw.githubusercontent.com/simaki/fi2010/main/data/data_zscore1.csv" url2 = "https://raw.githubusercontent.com/simaki/fi2010/main/data/data_zscore2.csv" return pandas.concat([pandas.read_csv(url1, index_col=0), pandas.read_csv(url2, index_col=0)])
bb6e6e484d6d3d3d1d831a9194fe4f629b820db8
17,119
def get_netcdf_filename(batch_idx: int) -> str: """Generate full filename, excluding path.""" assert 0 <= batch_idx < 1e6 return f"{batch_idx:06d}.nc"
5d916c4969eb96653ea9f0a21ab8bec93ebcfafa
17,120
def area_calc(radius, point_in, total_points): """Calculates the partial area of ball :param radius: radius of ball :param point_in: points of the total points to include :param total_points: number of sampled points :return: area """ return (4 * pi * radius ** 2) * (point_in / total_points)
a660776a4f4a1d2d04a28255b7ee0892ddc5d136
17,122
import gettext import math def results_framework_export(request, program): """Returns .XLSX containing program's results framework""" program = Program.rf_aware_objects.get(pk=program) wb = openpyxl.Workbook() wb.remove(wb.active) ws = wb.create_sheet(gettext("Results Framework")) get_font = lambda attrs: styles.Font(**{**{'name': 'Calibri', 'size': 12}, **attrs}) ws.cell(row=2, column=2).value = gettext("Results Framework") ws.cell(row=2, column=2).font = get_font({'size': 18, 'bold': True}) ws.cell(row=3, column=2).value = program.name ws.cell(row=3, column=2).font = get_font({'size': 18}) level_span_style = styles.NamedStyle(name='level_span') level_span_style.font = get_font({}) level_span_style.alignment = styles.Alignment(wrap_text=True, vertical='center', horizontal='center') level_span_style.fill = styles.PatternFill('solid', 'E5E5E5') wb.add_named_style(level_span_style) level_single_style = styles.NamedStyle(name='level_no_span') level_single_style.font = get_font({}) level_single_style.alignment = styles.Alignment(wrap_text=True, vertical='top', horizontal='left') level_single_style.fill = styles.PatternFill('solid', 'E5E5E5') wb.add_named_style(level_single_style) bottom_tier = program.level_tiers.count() def row_height_getter(cell): lines_of_text = str(cell.value).splitlines() row = cell.row def get_row_height_decorated(w): lines = sum([math.ceil(len(s)/w) or 1 for s in lines_of_text]) height = 26 + lines * 15 if lines == 1: height = 30 return max(height, ws.row_dimensions[row].height or 0, 30) return get_row_height_decorated def write_level(parent, start_row, start_column): levels = program.levels.filter(parent=parent).order_by('customsort') column = start_column row = start_row if not levels: return column + 2 for level in levels: current_column = column cell = ws.cell(row=row, column=column) cell.value = level.display_name get_row_height = row_height_getter(cell) if level.level_depth == bottom_tier: cell.style = 'level_no_span' row = row + 2 ws.row_dimensions[cell.row].height = get_row_height(24) else: column = write_level(level, row+2, column) if column - 2 <= current_column: cell.style = 'level_no_span' ws.row_dimensions[cell.row].height = get_row_height(24) else: cell.style = 'level_span' ws.merge_cells(start_row=row, end_row=row, start_column=current_column, end_column=column-2) width = 24 + 29 * ((column - 2 - current_column) / 2) ws.row_dimensions[cell.row].height = get_row_height(width) if parent and parent.level_depth == bottom_tier-1: column = column + 2 if parent is None: for column in range(column): width = 24.5 if (column + 1) % 2 == 0 else 3 ws.column_dimensions[utils.get_column_letter(column + 1)].width = width for r in range(3, ws.max_row+2): if r % 2 == 0: ws.row_dimensions[r].height = 10 return column write_level(None, 5, 2) filename = "Results Framework.xlsx" response = HttpResponse(content_type='application/ms-excel') response['Content-Disposition'] = 'attachment; filename="{}"'.format(filename) wb.save(response) return response
79cf9f84243f089dd463909f9f64d13a1bb39444
17,123
import copy def generate_subwindow(pc, sample_bb, scale, offset=2, oriented=True): """ generating the search area using the sample_bb :param pc: :param sample_bb: :param scale: :param offset: :param oriented: use oriented or axis-aligned cropping :return: """ rot_mat = np.transpose(sample_bb.rotation_matrix) trans = -sample_bb.center if oriented: new_pc = PointCloud(pc.points.copy()) box_tmp = copy.deepcopy(sample_bb) # transform to the coordinate system of sample_bb new_pc.translate(trans) box_tmp.translate(trans) new_pc.rotate(rot_mat) box_tmp.rotate(Quaternion(matrix=rot_mat)) new_pc = crop_pc_axis_aligned(new_pc, box_tmp, scale=scale, offset=offset) else: new_pc = crop_pc_axis_aligned(pc, sample_bb, scale=scale, offset=offset) # transform to the coordinate system of sample_bb new_pc.translate(trans) new_pc.rotate(rot_mat) return new_pc
af86fdd4409f98ccd503a9587a6e4b19b0763a31
17,124
def tsfigure(num=None, figsize=None, dpi=None, facecolor=None, edgecolor=None, frameon=True, subplotpars=None, FigureClass=TSFigure): """ Creates a new :class:`TimeSeriesFigure` object. Parameters ---------- num : {None, int}, optional Number of the figure. If None, a new figure is created and ``num`` is incremented. %(figsize)s %(dpi)s %(facecolor)s %(edgecolor)s %(frameon)s %(subplotpars)s FigureClass : FigureClass Class of the figure to create """ figargs = dict(num=num, figsize=figsize, dpi=dpi, facecolor=facecolor, frameon=frameon, FigureClass=FigureClass, subplotpars=subplotpars) fig = pylab.figure(**figargs) return fig
578b8299ea8b7b8eb05a1f0e68ce4b1f1dca4682
17,125
import zipfile import json def load_predict_result(predict_filename): """Loads the file to be predicted""" predict_result = {} ret_code = SUCCESS try: predict_file_zip = zipfile.ZipFile(predict_filename) except: ret_code = FILE_ERROR return predict_result, ret_code for predict_file in predict_file_zip.namelist(): for line in predict_file_zip.open(predict_file): try: line = line.decode('utf8').strip() except: ret_code = ENCODING_ERROR return predict_result, ret_code try: json_info = json.loads(line) except: ret_code = JSON_ERROR return predict_result, ret_code if 'text' not in json_info or 'spo_list' not in json_info: ret_code = SCHEMA_ERROR return predict_result, ret_code sent = json_info['text'] spo_set = set() for spo_item in json_info['spo_list']: if type(spo_item) is not dict or 'subject' not in spo_item \ or 'predicate' not in spo_item \ or 'object' not in spo_item or \ not isinstance(spo_item['subject'], basestring) or \ not isinstance(spo_item['object'], basestring): ret_code = SCHEMA_ERROR return predict_result, ret_code s = del_bookname(spo_item['subject'].lower()) o = del_bookname(spo_item['object'].lower()) spo_set.add((s, spo_item['predicate'], o)) predict_result[sent] = spo_set return predict_result, ret_code
c26cc24fcdcaa774d05ed6963f66cae346617f46
17,126
def convert_post_to_VERB(request, verb): """ Force Django to process the VERB. """ if request.method == verb: if hasattr(request, '_post'): del(request._post) del(request._files) try: request.method = "POST" request._load_post_and_files() request.method = verb except AttributeError: request.META['REQUEST_METHOD'] = 'POST' request._load_post_and_files() request.META['REQUEST_METHOD'] = verb setattr(request, verb, request.POST) return request
3c304d07ab04950ac65f58405acc3103a3b64dcf
17,128
def close(x, y, rtol, atol): """Returns True if x and y are sufficiently close. Parameters ---------- rtol The relative tolerance. atol The absolute tolerance. """ # assumes finite weights return abs(x-y) <= atol + rtol * abs(y)
bd2597c0c94f2edf686d0dc9772288312cb36d83
17,129
import warnings def plot_precip_field( precip, ptype="intensity", ax=None, geodata=None, units="mm/h", bbox=None, colorscale="pysteps", probthr=None, title=None, colorbar=True, axis="on", cax=None, map_kwargs=None, **kwargs, ): """ Function to plot a precipitation intensity or probability field with a colorbar. .. _Axes: https://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes .. _SubplotSpec: https://matplotlib.org/api/_as_gen/matplotlib.gridspec.SubplotSpec.html Parameters ---------- precip: array-like Two-dimensional array containing the input precipitation field or an exceedance probability map. ptype: {'intensity', 'depth', 'prob'}, optional Type of the map to plot: 'intensity' = precipitation intensity field, 'depth' = precipitation depth (accumulation) field, 'prob' = exceedance probability field. geodata: dictionary or None, optional Optional dictionary containing geographical information about the field. Required is map is not None. If geodata is not None, it must contain the following key-value pairs: .. tabularcolumns:: |p{1.5cm}|L| +-----------------+---------------------------------------------------+ | Key | Value | +=================+===================================================+ | projection | PROJ.4-compatible projection definition | +-----------------+---------------------------------------------------+ | x1 | x-coordinate of the lower-left corner of the data | | | raster | +-----------------+---------------------------------------------------+ | y1 | y-coordinate of the lower-left corner of the data | | | raster | +-----------------+---------------------------------------------------+ | x2 | x-coordinate of the upper-right corner of the | | | data raster | +-----------------+---------------------------------------------------+ | y2 | y-coordinate of the upper-right corner of the | | | data raster | +-----------------+---------------------------------------------------+ | yorigin | a string specifying the location of the first | | | element in the data raster w.r.t. y-axis: | | | 'upper' = upper border, 'lower' = lower border | +-----------------+---------------------------------------------------+ units : {'mm/h', 'mm', 'dBZ'}, optional Units of the input array. If ptype is 'prob', this specifies the unit of the intensity threshold. bbox : tuple, optional Four-element tuple specifying the coordinates of the bounding box. Use this for plotting a subdomain inside the input grid. The coordinates are of the form (lower left x, lower left y ,upper right x, upper right y). If 'geodata' is not None, the bbox is in map coordinates, otherwise it represents image pixels. colorscale : {'pysteps', 'STEPS-BE', 'BOM-RF3'}, optional Which colorscale to use. Applicable if units is 'mm/h', 'mm' or 'dBZ'. probthr : float, optional Intensity threshold to show in the color bar of the exceedance probability map. Required if ptype is "prob" and colorbar is True. title : str, optional If not None, print the title on top of the plot. colorbar : bool, optional If set to True, add a colorbar on the right side of the plot. axis : {'off','on'}, optional Whether to turn off or on the x and y axis. cax : Axes_ object, optional Axes into which the colorbar will be drawn. If no axes is provided the colorbar axes are created next to the plot. Other parameters ---------------- map_kwargs: dict Optional parameters that need to be passed to :py:func:`pysteps.visualization.basemaps.plot_geography`. Returns ------- ax : fig Axes_ Figure axes. Needed if one wants to add e.g. text inside the plot. """ if map_kwargs is None: map_kwargs = {} if "type" in kwargs: warnings.warn( "The 'type' keyword use to indicate the type of plot will be " "deprecated in version 1.6. Use 'ptype' instead." ) ptype = kwargs.get("type") if ptype not in PRECIP_VALID_TYPES: raise ValueError( f"Invalid precipitation type '{ptype}'." f"Supported: {str(PRECIP_VALID_TYPES)}" ) if units not in PRECIP_VALID_UNITS: raise ValueError( f"Invalid precipitation units '{units}." f"Supported: {str(PRECIP_VALID_UNITS)}" ) if ptype == "prob" and colorbar and probthr is None: raise ValueError("ptype='prob' but probthr not specified") if len(precip.shape) != 2: raise ValueError("The input is not two-dimensional array") # Assumes the input dimensions are lat/lon nlat, nlon = precip.shape x_grid, y_grid, extent, regular_grid, origin = get_geogrid( nlat, nlon, geodata=geodata ) ax = get_basemap_axis(extent, ax=ax, geodata=geodata, map_kwargs=map_kwargs) precip = np.ma.masked_invalid(precip) # plot rainfield if regular_grid: im = _plot_field(precip, ax, ptype, units, colorscale, extent, origin=origin) else: im = _plot_field( precip, ax, ptype, units, colorscale, extent, x_grid=x_grid, y_grid=y_grid ) plt.title(title) # add colorbar if colorbar: # get colormap and color levels _, _, clevs, clevs_str = get_colormap(ptype, units, colorscale) if ptype in ["intensity", "depth"]: extend = "max" else: extend = "neither" cbar = plt.colorbar( im, ticks=clevs, spacing="uniform", extend=extend, shrink=0.8, cax=cax ) if clevs_str is not None: cbar.ax.set_yticklabels(clevs_str) if ptype == "intensity": cbar.set_label(f"Precipitation intensity [{units}]") elif ptype == "depth": cbar.set_label(f"Precipitation depth [{units}]") else: cbar.set_label(f"P(R > {probthr:.1f} {units})") if geodata is None or axis == "off": ax.xaxis.set_ticks([]) ax.xaxis.set_ticklabels([]) ax.yaxis.set_ticks([]) ax.yaxis.set_ticklabels([]) if bbox is not None: ax.set_xlim(bbox[0], bbox[2]) ax.set_ylim(bbox[1], bbox[3]) return ax
7e9429310ffdfdb38ac2b6e03b4c846d017060f5
17,130
def get_for_repo(repo, name, default=None): """Gets a configuration setting for a particular repository. Looks for a setting specific to the repository, then falls back to a global setting.""" NOT_FOUND = [] # a unique sentinel distinct from None value = get(name, NOT_FOUND, repo) if value is NOT_FOUND: value = get(name, default, '*') return value
5848e4da859f26788ab02b733bc61135c1ea3b80
17,131
from typing import AnyStr def new_user(tenant: AnyStr, password: AnyStr) -> bool: """Return a boolean containing weither a new tenant is created or no.""" if not query.get_tenant_id(tenant): return True return False
ac1bc45213c76712d1ec3553a8545fac5ab67f3a
17,132
def home(): """ Home page control code :return Rendered page: """ error = request.args.get("error", None) state, code = request.args.get("state", None), request.args.get("code", None) if code and not has_user() and 'state' in session and session['state'] == state: tok = reddit_get_access_token(code) username = reddit_get_username(tok) session['user'] = username session['token'] = tok session.modified = True session['state'] = str(uuid4()) session.modified = True return render_template('home.html', user=get_user(), error=False, redirect=whisky_recommender.config.REDDIT_REDIRECT, client_id=whisky_recommender.config.REDDIT_CLIENT, state=session['state'])
280f17feff363fa73decfa15bc615aa0c320d3d9
17,133
def is_even(val): """ Confirms if a value if even. :param val: Value to be tested. :type val: int, float :return: True if the number is even, otherwise false. :rtype: bool Examples: -------------------------- .. code-block:: python >>> even_numbers = list(filter(is_even, range(20))) >>> print(even_numbers) [0, 2, 4, 6, 8, 10, 12, 14, 16, 18] >>> print(is_even(9)) False >>> print(is_even(-2)) True >>> print([value for value in range(20) if is_even(value)]) [0, 2, 4, 6, 8, 10, 12, 14, 16, 18] >>> print([is_even(value) for value in range(4)]) [True, False, True, False] """ return (val % 2) == 0
1ef0716e1e86ff77b3234bbd664c6b973352c3ea
17,135
def min_spacing(mylist): """ Find the minimum spacing in the list. Args: mylist (list): A list of integer/float. Returns: int/float: Minimum spacing within the list. """ # Set the maximum of the minimum spacing. min_space = max(mylist) - min(mylist) # Iteratively find a smaller spacing. for item in mylist: spaces = [abs(item - item2) for item2 in mylist if item != item2] min_space = min(min_space, min(spaces)) # Return the answer. return min_space
b8ce0a46bacb7015c9e59b6573bc2fec0252505d
17,137
def sign(x): """Return the mathematical sign of the particle.""" if x.imag: return x / sqrt(x.imag ** 2 + x.real ** 2) return 0 if x == 0 else -1 if x < 0 else 1
0dca727afbc9c805a858c027a8a4e38d59d9d218
17,139
def wrap_strings(lines: [str], line_width: int): """Return a list of strings, wrapped to the specified length.""" i = 0 while i < len(lines): # if a line is over the limit if len(lines[i]) > line_width: # (try to) find the rightmost occurrence of a space in the first 80 chars try: split_index = lines[i][:line_width].rindex(" ") except ValueError: return None # split the line by the found space and add it to the next one lines.insert(i + 1, lines[i][split_index + 1 :]) lines[i] = lines[i][:split_index] i += 1 return lines
0a6fa989fd6d27276d2e7d8c91cf8be37f6a3aff
17,140
def has_even_parity(message: int) -> bool: """ Return true if message has even parity.""" parity_is_even: bool = True while message: parity_is_even = not parity_is_even message = message & (message - 1) return parity_is_even
8982302840318f223e9c1ab08c407d585a725f97
17,141
def is_primitive(structure): """ Checks if a structure is primitive or not, :param structure: AiiDA StructureData :return: True if the structure can not be anymore refined. prints False if the structure can be futher refined. """ refined_cell = find_primitive_cell(structure) prim = False if all(x in structure.cell for x in refined_cell.cell): prim = True return prim
9f7034bb92d3fdd0505a56bc7d53d1528846ef76
17,142
def mock_mkdir(monkeypatch): """Mock the mkdir function.""" def mocked_mkdir(path, mode=0o755): return True monkeypatch.setattr("charms.layer.git_deploy.os.mkdir", mocked_mkdir)
e4e78ece1b8e60719fe11eb6808f0f2b99a933c3
17,143
def saferepr(obj, maxsize=240): """return a size-limited safe repr-string for the given object. Failing __repr__ functions of user instances will be represented with a short exception info and 'saferepr' generally takes care to never raise exceptions itself. This function is a wrapper around the Repr/reprlib functionality of the standard 2.6 lib. """ # review exception handling srepr = SafeRepr() srepr.maxstring = maxsize srepr.maxsize = maxsize srepr.maxother = 160 return srepr.repr(obj)
d02f68581867e64a6586548ab627b6893328c42a
17,145
from typing import Callable from re import A def filter(pred : Callable[[A], bool], stream : Stream[A]) -> Stream[A]: """Filter a stream of type `A`. :param pred: A predicate on type `A`. :type pred: `A -> bool` :param stream: A stream of type `A` to be filtered. :type stream: `Stream[A]` :return: A stream of type `A`. :rtype: `Stream[A]` """ def _thunk() -> StreamResult[A]: next_stream : Stream[A] = stream while True: next_value, next_stream = next_stream() if not pred(next_value): continue return next_value, filter(pred, next_stream) return _thunk
93b3d4c30d4295b2be73200451436c6a4e9ab5cd
17,146
def _get_kernel_size_numel(kernel_size): """Determine number of pixels/voxels. ``kernel_size`` must be an ``N``-tuple.""" if not isinstance(kernel_size, tuple): raise ValueError(f"kernel_size must be a tuple. Got {kernel_size}.") return _get_numel_from_shape(kernel_size)
fb004817950ece275fc10b4824ee83a1d1b9a6a9
17,147
import uuid def random(): """Get a random UUID.""" return str(uuid.uuid4())
411aeb5254775473b43d3ac4153a27a2f15014cb
17,148
def reautorank(reaumur): """ This function converts Reaumur to rankine, with Reaumur as parameter.""" rankine = (reaumur * 2.25) + 491.67 return rankine
aec2299999e9798530272939125cb42476f095c3
17,149
def list_pets(): """Shows list of all pets in db""" pets = Pet.query.all() return render_template('list.html', pets=pets)
60df575932d98ab04e949d6ef6f1fdfa6734ba92
17,150
import glob def get_lif_list(path): """ Returns a list of files ending in *.lif in provided folder :param: path :return: list -- filenames """ path += '/*.lif' return glob.glob(path)
8a26d65fc2c69b1007a40ded82225038ead67783
17,152
def compute_distance_matrix(users, basestations): """Distances between all users and basestations is calculated. Args: users: (obj) list of users! basestations: (obj) list of basestations! Returns: (list of) numpy arrays containing the distance between a user and all basestations in km!. """ coords_list_ue = [getattr(ele, 'coordinates') for ele in users] coords_list_bs = [getattr(ele, 'coordinates') for ele in basestations] distance_matrix = [] count = 0 for _ in coords_list_ue: element = [coords_list_ue[count]] coords = element + coords_list_bs dist = distance.cdist(coords, coords, 'euclidean') new_dist = np.delete(dist[0], 0) distance_matrix.append(new_dist) count += 1 return np.array(distance_matrix)
07b6175047d7602288436d163f838077e54054fc
17,154
from .core import resolver def __getattr__(name): """Lazy load the global resolver to avoid circular dependencies with plugins.""" if name in _SPECIAL_ATTRS: res = resolver.Resolver() res.load_plugins_from_environment() _set_default_resolver(res) return globals()[name] else: raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
20dd678be2b9d3f08513912a40098dc8b436ac81
17,155
def img_newt(N, xran=(-3, 3), yran=(-3, 3), tol=1e-5, niter=100): """ Add colors to a matrix according to the fixed point of the given equation. """ sol = [-(np.sqrt(3.0)*1j - 1.0)/2.0, (np.sqrt(3.0)*1j + 1.0)/2.0, -1.0] col_newt = np.zeros((N, N, 3)) Y, X = np.mgrid[yran[0]:yran[1]:N*1j, xran[0]:xran[1]:N*1j] for row in range(N): for col in range(N): x = X[row, col] y = Y[row, col] xf = newt(x + y*1j, fun, der, tol=tol, niter=niter) if abs(xf - sol[0])<1e-6: col_newt[row, col, :] = colors[0] if abs(xf - sol[1])<1e-6: col_newt[row, col, :] = colors[1] if abs(xf - sol[2])<1e-6: col_newt[row, col, :] = colors[2] if abs(xf - 1000) < 1e-6: col_newt[row, col, :] = colors[3] return col_newt
166aa3c5e144972f7ec825f973885f9b528047f0
17,156
def pack_block_header(hdr: block.BlockHeader, abbrev: bool = False, pretty: bool = False, ) -> str: """Pack blockchain to JSON string with b64 for bytes.""" f = get_b2s(abbrev) hdr_ = {'timestamp': f(hdr['timestamp']), 'previous_hash': f(hdr['previous_hash']), 'nonce': f(hdr['nonce']), 'merkle_root': f(hdr['merkle_root']), 'this_hash': f(hdr['this_hash']) } return json_dumps(hdr_, pretty)
a6df547918ab82bc990ca915d956730cb6a62b87
17,157
def get_datasets(recipe): """Get dataset instances from the recipe. Parameters ---------- recipe : dict of dict The specifications of the core datasets. Returns ------- datasets : dict of datasets A dictionary of dataset instances, compatible with torch's DataLoader objects. """ # "datasets" return {dataset: get_instance(**par) for dataset, par in recipe.items()}
f525cf379f13069a1f5255798d963af3389dd5ed
17,158
import re def is_sedol(value): """Checks whether a string is a valid SEDOL identifier. Regex from here: https://en.wikipedia.org/wiki/SEDOL :param value: A string to evaluate. :returns: True if string is in the form of a valid SEDOL identifier.""" return re.match(r'^[0-9BCDFGHJKLMNPQRSTVWXYZ]{6}\d$', value)
207ff94a4df99e7a546440cef1242f9a48435118
17,159
def create_substrate(dim): """ The function to create two-sheets substrate configuration with specified dimensions of each sheet. Arguments: dim: The dimensions accross X, Y axis of the sheet """ # Building sheet configurations of inputs and outputs inputs = create_sheet_space(-1, 1, dim, -1) outputs = create_sheet_space(-1, 1, dim, 0) substrate = NEAT.Substrate( inputs, [], # hidden outputs) substrate.m_allow_input_output_links = True substrate.m_allow_input_hidden_links = False substrate.m_allow_hidden_hidden_links = False substrate.m_allow_hidden_output_links = False substrate.m_allow_output_hidden_links = False substrate.m_allow_output_output_links = False substrate.m_allow_looped_hidden_links = False substrate.m_allow_looped_output_links = False substrate.m_hidden_nodes_activation = NEAT.ActivationFunction.SIGNED_SIGMOID substrate.m_output_nodes_activation = NEAT.ActivationFunction.UNSIGNED_SIGMOID substrate.m_with_distance = True substrate.m_max_weight_and_bias = 3.0 return substrate
9a47bf213d796aecec4b6f630ae30b04dc035d63
17,160
def remove_artefacts(signal: np.array, low_limit: int = 40, high_limit: int = 210) -> np.array: """ Replace artefacts [ultra-low and ultra-high values] with zero Args: signal: (np.array) 1D signal low_limit: (int) filter values below it high_limit: (int) filter values above it Output: (np.array) filtered signal """ # replace artefacts with zero signal_new = signal.astype('float') signal_new[signal < low_limit] = 0 #replace ultra-zmall values with 0 signal_new[signal > high_limit] = 0 #replace ultra-large values with 0 return signal_new
0b85e929588bd5895a9a84c5d03fce88c4f9f7cb
17,161
def normalizedBGR(im, display=True): """ Generate Opponent color space. O3 is just the intensity """ im = img.norm(im) B, G, R = np.dsplit(im, 3) b = (B - np.mean(B)) / np.std(B) g = (G - np.mean(G)) / np.std(G) r = (R - np.mean(R)) / np.std(R) out = cv2.merge((np.uint8(img.normUnity(b) * 255), np.uint8(img.normUnity(g) * 255), np.uint8(img.normUnity(r) * 255))) if display: cv2.imshow('norm bgr', np.hstack((np.uint8(img.normUnity(b) * 255), np.uint8(img.normUnity(g) * 255), np.uint8(img.normUnity(r) * 255)))) cv2.waitKey(0) return out, b, g, r
810b4a1ee4d9b5d7f68072c72379fa182b7f34fe
17,162
def feeds(url): """ Tries to find feeds for a given URL. """ url = _full_url(url) data = _get(url) # Check if the url is a feed. if _is_feed(url): return [url] # Try to get feed links from markup. try: feed_links = [link for link in _get_feed_links(data, url) if _is_feed(link)] except: feed_links = [] if feed_links: return feed_links # Try 'a' links. try: links = _get_a_links(data) except: links = [] if links: # Filter to only local links. local_links = [link for link in links if link.startswith(url)] # Try to find feed links. feed_links.extend(_filter_feed_links(local_links)) # If still nothing has been found... if not feed_links: # Try to find feed-looking links. feed_links.extend(_filter_feedish_links(local_links)) # If still nothing has been found... if not feed_links: # BRUTE FORCE IT! guesses = [ 'atom.xml', # Blogger, TypePad 'index.atom', # MoveableType 'index.rdf', # MoveableType 'rss.xml', # Dave Winer/Manila 'index.xml', # MoveableType 'index.rss', # Slash 'feed' # WordPress ] tries = [parse.urljoin(url, g) for g in guesses] feed_links.extend([link for link in tries if _is_feed(link)]) # If *still* nothing has been found, # just try all the links. if links and not feed_links: feed_links.extend(_filter_feed_links(links)) feed_links.extend(_filter_feedish_links(links)) # Filter out duplicates. return list(set(feed_links))
dd16dc751f34fbbf496c9b0142fa5d58372538b2
17,163
def getlog(name): """Create logger object with predefined stream handler & formatting Parameters ---------- name : str module __name__ Returns ------- logging.logger Examples -------- >>> from smseventlog import getlog >>> log = getlog(__name__) """ name = '.'.join(str(name).split('.')[1:]) # cant set name to nothing or that calls the ROOT logger if name == '': name = 'base' return Logger(name)
cd5e0dd4589757e3c8d05614f117b7ce46fe4fb9
17,164
import numpy as np def replace_nan(x): """ Replaces NaNs in 1D array with nearest finite value. Usage: y = replace_nan(x) Returns filled array y without altering input array x. Assumes input is numpy array. 3/2015 BWB """ # x2 = np.zeros(len(x)) np.copyto(x2,x) # bads = find(np.isnan(x)) # indices of NaNs if bads.size == 0: return x2 else: fins = find(np.isfinite(x)) # indices for all finites for ii in np.arange(0,bads.size): # for all NaNs # locate index of nearest finite diffs = np.abs(fins-bads[ii]) idx = diffs.argmin() # replace NaN with nearest finite x2[bads[ii]] = x[fins[idx]] return x2
9100a33dcb7d00b38e7a6a53132db8d13682e499
17,166
def handson_table(request, query_sets, fields): """function to render the scoresheets as part of the template""" return excel.make_response_from_query_sets(query_sets, fields, 'handsontable.html') # content = excel.pe.save_as(source=query_sets, # dest_file_type='handsontable.html', # dest_embed=True) # content.seek(0) # return render( # request, # 'custom-handson-table.html', # { # 'handsontable_content': content.read() # }) # return Response({'handsontable_content': render(content)}, template_name='custom-handson-table.html')
93c1471c142917f5b0492ddb27fdd6c278e9976d
17,167
from functools import reduce def is_periodic(G): """ https://stackoverflow.com/questions/54030163/periodic-and-aperiodic-directed-graphs Own function to test, whether a given Graph is aperiodic: """ if not nx.is_strongly_connected(G): print("G is not strongly connected, periodicity not defined.") return False cycles = list(nx.algorithms.cycles.simple_cycles(G)) cycles_sizes = [len(c) for c in cycles] # Find all cycle sizes cycles_gcd = reduce(gcd, cycles_sizes) # Find greatest common divisor of all cycle sizes is_periodic = cycles_gcd > 1 return is_periodic
6671a1bf57ef6ec973c7d283cb447d890cbd93e2
17,168
def Sphere(individual): """Sphere test objective function. F(x) = sum_{i=1}^d xi^2 d=1,2,3,... Range: [-100,100] Minima: 0 """ #print(individual) return sum(x**2 for x in individual)
349b732e931fc5acf8a52213d9ddf88335479b90
17,169
def find_names_for_google(df_birth_names): """ :param df_birth_names: 所有的birth data from the data given by Lu :return 1: df_country_found, 返回一个dataframe 里面有国家了 先通过country list过滤,有些国家可能有问题(如有好几个名字的(e.g. 荷兰),有些含有特殊符号,如刚果布,刚果金,朝鲜,南朝鲜北朝鲜是三个“国家”, 再比如说南奥塞梯,一些太平洋岛国归属有问题,还就是香港台湾这样的。。。。暂时算是国家) 而后看看是不是美国的一个州。 而后看城市,city在city list里面,citylist 参考 worldcities 数据库。 城市重名了就取人口多的那个城市。如Valencia :return 2: df_need_google_search, 返回一个dataframe 里面都是不在“国家列表”里面的,也不是美国的州,并且“worldcities database”里面找不到的 """ whitelist = set('abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ') # teststr = " happy t00o go 129.129$%^&*(" # answer = ''.join(filter(whitelist.__contains__, teststr)) dirty_list = [] need_searching_list = [] for index, row in df_birth_names.head(30).iterrows(): item = ''.join(filter(whitelist.__contains__, row['birth'])).strip() # item = row['birth'].replace("'","").strip() if item is "":# null dirty_list.append(np.nan) print(item, " is null") continue if item in COUNTRY_LIST: # known countries dirty_list.append(item) print(item, " is a country") continue if item in US_STATES_LIST: # add us states as United States dirty_list.append("United States") print(item, " is a state in the US") continue if item in NATIONALITY_LIST: # add national from nationality information e.g. Chinese -> China nation_from_nationality = NATIONALITY_TO_COUNTRY.loc[item]["Country/entity name"] dirty_list.append(nation_from_nationality) print(item, " is a national of a certain country") continue if item in CITY_LIST: # known city to country e.g. London -> UK country_from_city = CITY_TO_COUNTRY.loc[item]["country"] dirty_list.append(country_from_city) print(item, " is a city and it has been transformed") continue flag1=0 # known city to country e.g. London -> UK for i in COUNTRY_LIST: if i in item: dirty_list.append(i) print(i, " maybe a country") flag1 = 1 break if flag1 == 1: continue flag2 = 0 for i in US_STATES_LIST: if i in item: dirty_list.append("United States") print(i, "maybe a state in the US") flag2 = 1 break if flag2 == 1: continue flag3 = 0 for i in CITY_LIST: if i in item: country_from_city = CITY_TO_COUNTRY.loc[i]["country"] dirty_list.append(country_from_city) print(i, " maybe a city, and we are attempting to transform it") flag3 = 1 break if flag3 == 1: continue need_searching_list.append(item) print("this item: ", item, " is not added") need_searching_list = list(dict.fromkeys(need_searching_list))# remove duplicates df_country_found = pd.DataFrame(dirty_list) df_need_google_search = pd.DataFrame(need_searching_list) return df_country_found, df_need_google_search
6358fd692784389530ebf4c3a2059c3923104d2f
17,170
def make_address_mask(universe, sub=0, net=0, is_simplified=True): """Returns the address bytes for a given universe, subnet and net. Args: universe - Universe to listen sub - Subnet to listen net - Net to listen is_simplified - Whether to use nets and subnet or universe only, see User Guide page 5 (Universe Addressing) Returns: bytes - byte mask for given address """ address_mask = bytearray() if is_simplified: # Ensure data is in right range universe = clamp(universe, 0, 32767) # Make mask msb, lsb = shift_this(universe) # convert to MSB / LSB address_mask.append(lsb) address_mask.append(msb) else: # Ensure data is in right range universe = clamp(universe, 0, 15) sub = clamp(sub, 0, 15) net = clamp(net, 0, 127) # Make mask address_mask.append(sub << 4 | universe) address_mask.append(net & 0xFF) return address_mask
d360dde7ecc4ecc99e32df53f2f0806d5d396f1f
17,171
def get_img_size(src_size, dest_size): """ Возвращает размеры изображения в пропорции с оригиналом исходя из того, как направлено изображение (вертикально или горизонтально) :param src_size: размер оригинала :type src_size: list / tuple :param dest_size: конечные размеры :type dest_size: list / tuple :rtype: tuple """ width, height = dest_size src_width, src_height = src_size if height >= width: return (int(float(width) / height * src_height), src_height) return (src_width, int(float(height) / width * src_width))
133dab529cd528373a1c7c6456a34cf8fd22dac9
17,173
def laxnodeset(v): """\ Return a nodeset with elements from the argument. If the argument is already a nodeset, it self will be returned. Otherwise it will be converted to a nodeset, that can be mutable or immutable depending on what happens to be most effectively implemented.""" if not isinstance(v, NodeSet): v = immnodeset(v) return v
3210f8d1c1d47c8871d0ba82c793b6cd85069566
17,174
def load_config(): """ Loads the configuration file. Returns: - (json) : The configuration file. """ return load_json_file('config.json')
05099118414d371ebc521e498503be1798c39066
17,175
from bs4 import BeautifulSoup def text_from_html(body): """ Gets all raw text from html, removing all tags. :param body: html :return: str """ soup = BeautifulSoup(body, "html.parser") texts = soup.findAll(text=True) visible_texts = filter(tag_visible, texts) return " ".join(t.strip() for t in visible_texts)
313a5f404120c17290b726cb00b05e2276a07895
17,178
def alert_history(): """ Alert History: RESTful CRUD controller """ return s3_rest_controller(rheader = s3db.cap_history_rheader)
34a2b6bf90ab0b73eae3b64c83ffebc918e2f1a3
17,179
def chat(): """ Chat room. The user's name and room must be stored in the session. """ if 'avatar' not in session: session['avatar'] = avatars.get_avatar() data = { 'user_name': session.get('user_name', ''), 'avatar': session.get('avatar'), 'room_key': session.get('room_key', ''), 'password': session.get('password', '') } if data['user_name'] == '' or data['room_key'] == '': return redirect(url_for('.index')) return render_template('chat.html', **data)
d7024960ac8a03082deb696e0c0e6009dfe8e349
17,180
def _get_split_idx(N, blocksize, pad=0): """ Returns a list of indexes dividing an array into blocks of size blocksize with optional padding. Padding takes into account that the resultant block must fit within the original array. Parameters ---------- N : Nonnegative integer Total array length blocksize : Nonnegative integer Size of each block pad : Nonnegative integer Pad to add on either side of each index Returns ------- split_idx : List of 2-tuples Indices to create splits pads_used : List of 2-tuples Pads that were actually used on either side Examples -------- >>> split_idx, pads_used = _get_split_idx(5, 2) >>> print split_idx [(0, 2), (2, 4), (4, 5)] >>> print pads_used [(0, 0), (0, 0), (0, 0)] >>> _get_split_idx(5, 2, pad=1) >>> print split_idx [(0, 3), (1, 5), (3, 5)] >>> print pads_used [(0, 1), (1, 1), (1, 0)] """ num_fullsplits = N // blocksize remainder = N % blocksize split_idx = [] pads_used = [] for i in range(num_fullsplits): start = max(0, i * blocksize - pad) end = min(N, (i + 1) * blocksize + pad) split_idx.append((start, end)) leftpad = i * blocksize - start rightpad = end - (i + 1) * blocksize pads_used.append((leftpad, rightpad)) # Append the last split if there is a remainder if remainder: start = max(0, num_fullsplits * blocksize - pad) split_idx.append((start, N)) leftpad = num_fullsplits * blocksize - start pads_used.append((leftpad, 0)) return split_idx, pads_used
21935190de4c42fa5d7854f6608387dd2f004fbc
17,181
def buydown_loan(amount, nrate, grace=0, dispoints=0, orgpoints=0, prepmt=None): """ In this loan, the periodic payments are recalculated when there are changes in the value of the interest rate. Args: amount (float): Loan amount. nrate (float, pandas.Series): nominal interest rate per year. grace (int): numner of grace periods without paying the principal. dispoints (float): Discount points of the loan. orgpoints (float): Origination points of the loan. prepmt (pandas.Series): generic cashflow representing prepayments. Returns: A object of the class ``Loan``. >>> nrate = interest_rate(const_value=10, start='2016Q1', periods=11, freq='Q', chgpts={'2017Q2':20}) >>> buydown_loan(amount=1000, nrate=nrate, dispoints=0, orgpoints=0, prepmt=None) # doctest: +NORMALIZE_WHITESPACE Amount: 1000.00 Total interest: 200.99 Total payment: 1200.99 Discount points: 0.00 Origination points: 0.00 <BLANKLINE> Beg_Ppal_Amount Nom_Rate Tot_Payment Int_Payment Ppal_Payment \\ 2016Q1 1000.000000 10.0 0.000000 0.000000 0.000000 2016Q2 1000.000000 10.0 114.258763 25.000000 89.258763 2016Q3 910.741237 10.0 114.258763 22.768531 91.490232 2016Q4 819.251005 10.0 114.258763 20.481275 93.777488 2017Q1 725.473517 10.0 114.258763 18.136838 96.121925 2017Q2 629.351591 20.0 123.993257 31.467580 92.525677 2017Q3 536.825914 20.0 123.993257 26.841296 97.151961 2017Q4 439.673952 20.0 123.993257 21.983698 102.009559 2018Q1 337.664393 20.0 123.993257 16.883220 107.110037 2018Q2 230.554356 20.0 123.993257 11.527718 112.465539 2018Q3 118.088816 20.0 123.993257 5.904441 118.088816 <BLANKLINE> End_Ppal_Amount 2016Q1 1.000000e+03 2016Q2 9.107412e+02 2016Q3 8.192510e+02 2016Q4 7.254735e+02 2017Q1 6.293516e+02 2017Q2 5.368259e+02 2017Q3 4.396740e+02 2017Q4 3.376644e+02 2018Q1 2.305544e+02 2018Q2 1.180888e+02 2018Q3 1.136868e-13 >>> pmt = cashflow(const_value=0, start='2016Q1', periods=11, freq='Q') >>> pmt['2017Q4'] = 200 >>> buydown_loan(amount=1000, nrate=nrate, dispoints=0, orgpoints=0, prepmt=pmt) # doctest: +NORMALIZE_WHITESPACE Amount: 1000.00 Total interest: 180.67 Total payment: 1180.67 Discount points: 0.00 Origination points: 0.00 <BLANKLINE> Beg_Ppal_Amount Nom_Rate Tot_Payment Int_Payment Ppal_Payment \\ 2016Q1 1000.000000 10.0 0.000000 0.000000 0.000000 2016Q2 1000.000000 10.0 114.258763 25.000000 89.258763 2016Q3 910.741237 10.0 114.258763 22.768531 91.490232 2016Q4 819.251005 10.0 114.258763 20.481275 93.777488 2017Q1 725.473517 10.0 114.258763 18.136838 96.121925 2017Q2 629.351591 20.0 123.993257 31.467580 92.525677 2017Q3 536.825914 20.0 123.993257 26.841296 97.151961 2017Q4 439.673952 20.0 323.993257 21.983698 302.009559 2018Q1 137.664393 20.0 50.551544 6.883220 43.668324 2018Q2 93.996068 20.0 50.551544 4.699803 45.851741 2018Q3 48.144328 20.0 50.551544 2.407216 48.144328 <BLANKLINE> End_Ppal_Amount 2016Q1 1.000000e+03 2016Q2 9.107412e+02 2016Q3 8.192510e+02 2016Q4 7.254735e+02 2017Q1 6.293516e+02 2017Q2 5.368259e+02 2017Q3 4.396740e+02 2017Q4 1.376644e+02 2018Q1 9.399607e+01 2018Q2 4.814433e+01 2018Q3 4.263256e-14 """ if not isinstance(nrate, pd.Series): TypeError('nrate must be a pandas.Series object.') if prepmt is None: prepmt = nrate.copy() prepmt[:] = 0 else: verify_period_range([nrate, prepmt]) life = len(nrate) - grace - 1 begppalbal = nrate.copy() intpmt = nrate.copy() ppalpmt = nrate.copy() totpmt = nrate.copy() endppalbal = nrate.copy() begppalbal[:] = 0 intpmt[:] = 0 ppalpmt[:] = 0 totpmt[:] = 0 endppalbal[:] = 0 ## ## balance calculation ## pyr = getpyr(nrate) for time in range(grace + life + 1): if time == 0: # begppalbal[time] = amount endppalbal[time] = amount totpmt[time] = amount * (dispoints + orgpoints) / 100 ### intpmt[time] = amount * dispoints / 100 # else: # # periodic payment per period # if time <= grace: begppalbal[time] = endppalbal[time - 1] intpmt[time] = begppalbal[time] * nrate[time] / pyr / 100 totpmt[time] = intpmt[time] endppalbal[time] = begppalbal[time] else: pmt = -pvpmt(nrate=nrate[time], nper=grace+life-time+1, pval=endppalbal[time-1], pmt=None, pyr=pyr) totpmt[time] = pmt + prepmt[time] # balance begppalbal[time] = endppalbal[time - 1] intpmt[time] = begppalbal[time] * nrate[time] / pyr / 100 ppalpmt[time] = totpmt[time] - intpmt[time] endppalbal[time] = begppalbal[time] - ppalpmt[time] data = {'Beg_Ppal_Amount':begppalbal} result = Loan(life=life, amount=amount, grace=grace, nrate=nrate, dispoints=dispoints, orgpoints=orgpoints, data=data) result['Nom_Rate'] = nrate result['Tot_Payment'] = totpmt result['Int_Payment'] = intpmt result['Ppal_Payment'] = ppalpmt result['End_Ppal_Amount'] = endppalbal return result
46eb6bbaaa940b5cf1abd702ee5d9e2e20c6dab3
17,182
from typing import Optional def ffill(array: np.ndarray, value: Optional[int] = 0) -> np.ndarray: """Forward fills an array. Args: array: 1-D or 2-D array. value: Value to be filled. Default is 0. Returns: ndarray: Forward-filled array. Examples: >>> x = np.array([0, 5, 0, 0, 2, 0]) >>> ffill(x) [0, 5, 5, 5, 2, 2] Notes: Works only in axis=1 direction. """ ndims = len(array.shape) ran = np.arange(array.shape[ndims - 1]) idx = np.where((array != value), ran, 0) idx = np.maximum.accumulate(idx, axis=ndims-1) # pylint: disable=E1101 if ndims == 2: return array[np.arange(idx.shape[0])[:, None], idx] return array[idx]
f5774c3e50ddbf2ffa9cf84df5cb57b135d1549a
17,183
def svn_stringbuf_from_file(*args): """svn_stringbuf_from_file(char const * filename, apr_pool_t pool) -> svn_error_t""" return _core.svn_stringbuf_from_file(*args)
b375a43bf8e050aa5191f387d930077680e9b019
17,184
def poly4(x, b, b0): """ Defines a function with polynom 4 to fit the curve Parameters ---------- x: numpy.ndarray: x of f(x) b: float Parameter to fit b0 : int y-intercept of the curve Returns ------- f : numpy.ndarray Result of f(x) """ return b * np.array(x) ** 4 + b0
aed3603640400488219f2cca82e57268f32de000
17,185
from teospy.tests.tester import Tester def chkiapws06table6(printresult=True,chktol=_CHKTOL): """Check accuracy against IAPWS 2006 table 6. Evaluate the functions in this module and compare to reference values of thermodynamic properties (e.g. heat capacity, lapse rate) in IAPWS 2006, table 6. :arg bool printresult: If True (default) and any results are outside of the given tolerance, then the function name, reference value, result value, and relative error are printed. :arg float chktol: Tolerance to use when choosing to print results (default _CHKTOL). :returns: :class:`~teospy.tests.tester.Tester` instances containing the functions, arguments, reference values, results, and relative errors from the tests. The first instance involves derivatives of ice_g whereas the second tests the other thermodynamic functions. """ fargs0 = (273.16,611.657) fargs1 = (273.152519,101325.) fargs2 = (100.,1e8) propfargs = [fargs0,fargs1,fargs2] ders = [(0,0), (1,0), (0,1), (2,0), (1,1), (0,2)] # Tester instance for derivatives of ice_g derfuns = _ice_g derfnames = 'ice_g' # Derivatives change before arguments do here derfargs = [(der+fargs) for fargs in propfargs for der in ders] derargfmt = '({0:1g},{1:1g},{2:7.3f},{3:7g})' derrefs = [0.611784135,0.122069433940e+4,0.109085812737e-2, -0.767602985875e+1,0.174387964700e-6,-0.128495941571e-12, 0.10134274069e+3,0.122076932550e+4,0.109084388214e-2,-0.767598233365e+1, 0.174362219972e-6,-0.128485364928e-12,-0.222296513088e+6, 0.261195122589e+4,0.106193389260e-2,-0.866333195517e+1, 0.274505162488e-7,-0.941807981761e-13] header = 'Ice Gibbs energy derivatives' dertest = Tester(derfuns,derfargs,derrefs,derfnames,derargfmt,header=header) # Tester instance for other ice properties propfuns = [enthalpy,helmholtzenergy,internalenergy,entropy,cp,density, expansion,pcoefficient,kappa_t,kappa_s] propfnames = ['enthalpy','helmholtzenergy','internalenergy','entropy','cp', 'density','expansion','pcoefficient','kappa_t','kappa_s'] propargfmt = '({0:7.3f},{1:7g})' proprefs = [ [-0.333444253966e+6,-0.333354873637e+6,-0.483491635676e+6], [-0.55446875e-1,-0.918701567e+1,-0.328489902347e+6], [-0.333444921197e+6,-0.333465403393e+6,-0.589685024936e+6], [-0.122069433940e+4,-0.122076932550e+4,-0.261195122589e+4], [0.209678431622e+4,0.209671391024e+4,0.866333195517e+3], [0.916709492200e+3,0.916721463419e+3,0.941678203297e+3], [0.159863102566e-3,0.159841589458e-3,0.258495528207e-4], [0.135714764659e+7,0.135705899321e+7,0.291466166994e+6], [0.117793449348e-9,0.117785291765e-9,0.886880048115e-10], [0.114161597779e-9,0.114154442556e-9,0.886060982687e-10] ] header = 'Ice thermodynamic properties' proptest = Tester(propfuns,propfargs,proprefs,propfnames,propargfmt, header=header) # Run Tester instances and print results dertest.run() proptest.run() if printresult: dertest.printresults(chktol=chktol) proptest.printresults(chktol=chktol) return dertest, proptest
c0fce67d3a268ec0b67ff845f5671c67aa394846
17,186
def flat(arr): """ Finds flat things (could be zeros) ___________________________ """ arr = np.array(arr) if arr.size == 0: return False mean = np.repeat(np.mean(arr), arr.size) nonzero_residuals = np.nonzero(arr - mean)[0] return nonzero_residuals.size < arr.size/100
ce2697d95165b46cec477265df6ccb337cb89af1
17,187
def sensitive_fields(*paths, **typed_paths): """ paths must be a path like "password" or "vmInfo.password" """ def ret(old_init): def __init__(self, *args, **kwargs): if paths: ps = ["obj['" + p.replace(".", "']['") + "']" for p in paths] setattr(self, SENSITIVE_FIELD_NAME, ps) old_init(self) return __init__ return ret
e174519c253d4676ae7c07c1b11eb18e532d5f61
17,188
from datetime import datetime import time def get_timestamp_diff(diff): """获取前后diff天对应的时间戳(毫秒)""" tmp_str = (datetime.today() + timedelta(diff)).strftime("%Y-%m-%d %H:%M:%S") tmp_array = time.strptime(tmp_str, "%Y-%m-%d %H:%M:%S") return int(time.mktime(tmp_array)) * 1000
61ca093471103376ee44d940552db6337a4e65f5
17,189
def can_delete(account, bike): """ Check if an account can delete a bike. Account must be a team member and bike not borrowed in the future. """ return (team_control.is_member(account, bike.team) and not has_future_borrows(bike))
f962e465b6a5eb62feea2683cdd8328b5591fb43
17,190
def get_node_types(nodes, return_shape_type = True): """ Get the maya node types for the nodes supplied. Returns: dict: dict[node_type_name] node dict of matching nodes """ found_type = {} for node in nodes: node_type = cmds.nodeType(node) if node_type == 'transform': if return_shape_type: shapes = get_shapes(node) if shapes: node_type = cmds.nodeType(shapes[0]) if not node_type in found_type: found_type[node_type] = [] found_type[node_type].append(node) return found_type
7867f97f7228ac77ae44fda04672a8224aa7c1f4
17,192
import csv from typing import OrderedDict def updateDistances(fileName): """ Calculate and update the distance on the given CSV file. Parameters ---------- fileName: str Path and name of the CSV file to process. Returns ------- ret: bool Response indicating if the update was successful or not. """ # Read the face data from the CSV file try: file = open(fileName, 'r+', newline='') except: return False reader = csv.reader(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) writer = csv.writer(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) # Read the face data from the CSV file and recalculate the distances, # also building a list to later recalculate the distance gradients frames = [] distances = [] faces = OrderedDict() prevDist = 0 for row in reader: if row[0] != 'frame': # Read the face data from the CSV file frameNum = int(row[0]) face = FaceData() face.fromList(row[1:]) face.gradient = 0.0 face.calculateDistance() faces[frameNum] = face # In case the face has been detected but the distance calculation # failed, assume the same distance as the previous detected face if not face.isEmpty(): if face.distance == 0: face.distance = prevDist prevDist = face.distance # Consider for the calculation of the gradients only the non-empty # faces (i.e. the frames where a face was detected) if not face.isEmpty(): frames.append(frameNum) distances.append(face.distance) # Calculate the gradients from the helper list of distances gradients = np.gradient(distances) for i, frameNum in enumerate(frames): faces[frameNum].gradient = gradients[i] # Save the face data back to the CSV file file.truncate(0) file.seek(0) writer.writerow(['frame'] + FaceData.header()) for frameNum, face in faces.items(): writer.writerow([frameNum] + face.toList()) file.close() return True
c19e0adcf731f9fd1af87f5dfe3a61889d395457
17,193
def hr(*args, **kwargs): """ The HTML <hr> element represents a thematic break between paragraph-level elements (for example, a change of scene in a story, or a shift of topic with a section). In previous versions of HTML, it represented a horizontal rule. It may still be displayed as a horizontal rule in visual browsers, but is now defined in semantic terms, rather than presentational terms. """ return void_el('hr', *args, **kwargs)
959106dc2c71334b5a88045f8a26a9f42a2d2fdb
17,194
def as_linker_option(p): """Return as an ld library path argument""" if p: return '-Wl,' + p return ''
452c06034be5c3c2525eb2bfad011e468daef02b
17,195
def split_backbone(options): """ Split backbone fasta file into chunks. Returns dictionary of backbone -> id. """ backbone_to_id = {} id_counter = 0 # Write all backbone files to their own fasta file. pf = ParseFasta(options.backbone_filename) tuple = pf.getRecord() while tuple is not None: print tuple[0] split_backbone = open(options.output_dir + '/' + options.prefix + '-' + str(id_counter) + '.fasta', 'w') split_backbone.write('>' + tuple[0] + '\n' + tuple[1]) split_backbone.close() backbone_to_id[tuple[0]] = options.prefix + '-' + str(id_counter) id_counter += 1 tuple = pf.getRecord() return backbone_to_id
6446e90a1aa2e38ca01ebb8a86b8cd1dbd3abd75
17,196
import pkg_resources def _get_highest_tag(tags): """Find the highest tag from a list. Pass in a list of tag strings and this will return the highest (latest) as sorted by the pkg_resources version parser. """ return max(tags, key=pkg_resources.parse_version)
8d2580f6f6fbb54108ee14d6d4834d376a65c501
17,197
def add_comment(request, pk): """ Adds comment to the image - POST. Checks the user and assigns it to the comment.posted_by """ form = PhotoCommentForm(request.POST) if form.is_valid(): comment = form.save(commit=False) comment.user = request.user comment.save() else: text = 'You have used forbidden word!' messages.warning(request, text) return redirect('photo comments', pk)
4488a183ca7786c65d355991cec38fed01864ab1
17,198
import warnings def disable_warnings_temporarily(func): """Helper to disable warnings for specific functions (used mainly during testing of old functions).""" def inner(*args, **kwargs): warnings.filterwarnings("ignore") func(*args, **kwargs) warnings.filterwarnings("default") return inner
5e19b8f51ca092709a1e1a5d6ff0b2543a41e5e1
17,200
def progress_bar(progress): """ Generates a light bar matrix to display volume / brightness level. :param progress: value between 0..1 """ dots = list(" " * 81) num_dots = ceil(round(progress, 3) * 9) while num_dots > 0: dots[81 - ((num_dots - 1) * 9 + 5)] = "*" num_dots -= 1 return "".join(dots)
88986ecc505cf786e197d8ad55cd70b21fa3aa27
17,201
def get_context_command_parameter_converters(func): """ Parses the given `func`'s parameters. Parameters ---------- func : `async-callable` The function used by a ``SlasherApplicationCommand``. Returns ------- func : `async-callable` The converted function. parameter_converters : `tuple` of ``ParameterConverter`` Parameter converters for the given `func` in order. Raises ------ TypeError - If `func` is not async callable, neither cannot be instanced to async. - If `func` accepts keyword only parameters. - If `func` accepts `*args`. - If `func` accepts `**kwargs`. ValueError - If any parameter is not internal. """ analyzer, real_analyzer, should_instance = check_command_coroutine(func) parameters = real_analyzer.get_non_reserved_positional_parameters() parameter_converters = [] target_converter_detected = False for parameter in parameters: parameter_converter = create_internal_parameter_converter(parameter) if (parameter_converter is None): if target_converter_detected: raise TypeError(f'`{real_analyzer.real_function!r}`\'s `{parameter.name}` do not refers to any of the ' f'expected internal parameters. Context commands do not accept any additional parameters.') else: parameter_converter = create_target_parameter_converter(parameter) target_converter_detected = True parameter_converters.append(parameter_converter) parameter_converters = tuple(parameter_converters) if should_instance: func = analyzer.instance() return func, parameter_converters
294706230f95745dbd50681cafc066a5d226880d
17,202
def norm(x): """Normalize 1D tensor to unit norm""" mu = x.mean() std = x.std() y = (x - mu)/std return y
ea8546da2ea478edb0727614323bba69f6af288d
17,203
def honest_propose(validator, known_items): """ Returns an honest `SignedBeaconBlock` as soon as the slot where the validator is supposed to propose starts. Checks whether a block was proposed for the same slot to avoid slashing. Args: validator: Validator known_items (Dict): Known blocks and attestations received over-the-wire (but perhaps not included yet in `validator.store`) Returns: Optional[SignedBeaconBlock]: Either `None` if the validator decides not to propose, otherwise a `SignedBeaconBlock` containing attestations """ # Not supposed to propose for current slot if not validator.data.current_proposer_duties[validator.data.slot % SLOTS_PER_EPOCH]: return None # Already proposed for this slot if validator.data.last_slot_proposed == validator.data.slot: return None # honest propose return honest_propose_base(validator, known_items)
c6b0403b15154e3e3b19547770a162e2ac05501b
17,204
import re def formatKwargsKey(key): """ 'fooBar_baz' -> 'foo-bar-baz' """ key = re.sub(r'_', '-', key) return key
24c79b37fdd1cd6d73ab41b0d2234b1ed2ffb448
17,205
import dateutil def mktimestamp(dt): """ Prepares a datetime for sending to HipChat. """ if dt.tzinfo is None: dt = dt.replace(tzinfo=dateutil.tz.tzutc()) return dt.isoformat(), dt.tzinfo.tzname(dt)
2f444d0ea27a3afbed68742bade8833a49e191e4
17,206
def build_accuracy(logits, labels, name_scope='accuracy'): """ Builds a graph node to compute accuracy given 'logits' a probability distribution over the output and 'labels' a one-hot vector. """ with tf.name_scope(name_scope): correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1)) correct_prediction = tf.cast(correct_prediction, tf.float32) return tf.reduce_mean(correct_prediction)
53f5f78a8c07c691e20d14c416c1fe21a2547bc6
17,208
def compute_mp_av(mp, index, m, df, k): """ Given a matrix profile, a matrix profile index, the window size and the DataFrame that contains the timeseries. Create a matrix profile object and add the corrected matrix profile after applying the complexity av. Uses an extended version of the apply_av function from matrixprofile foundation that is compatible with multi-dimensional timeseries. The implementation can be found here (https://github.com/MORE-EU/matrixprofile/blob/master/matrixprofile/transform.py) Args: mp: A matrix profile. index: The matrix profile index that accompanies the matrix profile. window: The subsequence window size. df: The timeseries that was used to calculate the matrix profile. Return: Updated profile with an annotation vector """ # Apply the annotation vector m = m # window size mp = np.nan_to_num(mp, np.nanmax(mp)) # remove nan values profile = to_mpf(mp, index, m, df) av_type = 'complexity' profile = mpf.transform.apply_av(profile, av_type) return profile
cc89d34dd145339c99d1ded8ced9af853c061124
17,210