content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import os def data_static(filename): """ Get files :param filename: :return: """ _p, _f = os.path.split(filename) # print(_p, _f) return flask.send_from_directory(os.path.join(config['path']['path_data'], _p), _f)
1b603a8adb7bc384985cd0571d8ed2323ae32a73
17,100
def target(x, seed, instance): """A target function for dummy testing of TA perform x^2 for easy result calculations in checks. """ # Return x[i] (with brackets) so we pass the value, not the # np array element return x[0] ** 2, {'key': seed, 'instance': instance}
131560778f51ebd250a3077833859f7e5addeb6e
17,101
def generate(fspec, count, _fuel=None): """Generate <count> number of random passwords/passphrases. The passphrases are formated according to <fspec>. Returned value is (list, json_data), where list is a <count>-element sequence of pair of (password, reading hint for password). json_data is a dict at least containing the following keys: key 'diag': (str) message for diagnostics, key 'entropy': (float) estimated entropy of generated passphrases, key 'elements': list of sequences of elements of generated passphrases. Raises BadFormatError if fspec is either bad or not able to be satisfied. """ diag = [] fspec, entropy = _parse_fspec(fspec, diag=diag, _fuel=_fuel) if count < 1: raise BadFormatError('bad count of passwords specified') fspec, entropy = _resolve_entropy(fspec, entropy, diag=diag, _fuel=_fuel) elements = [] result = [] for ncount in range(count): o = [] def elem(e, f, o, h, c=None, ct=1): d = {'entropy': e, 'separator': f, 'password': o, 'hint': h} if c != None: d['corpus_source'] = str(c) if not f: d['repeat_count'] = ct return d def proc(filling, i, sep, wl, ct): initial = not filling and i == 0 e1 = wl.entropy() if wl.is_words: intersep = sep if sep != None else " " presep = "" if initial else sep if sep != None else " " for c in range(0, ct): w = wl.get_randomly() s = presep if c == 0 else intersep sh = " " if (s == "" and c != 0) else s if sh: o.append(elem(0.0, True, s, sh, None)) o.append(elem(e1, False, w.word, w.hint, wl)) else: if ct != 0: intersep = "" presep = "" if initial else sep if presep: o.append(elem(0.0, True, presep, presep, None)) ow = [] oh = [] for c in range(0, ct): w = wl.get_randomly() ow.append(w.word) oh.append(w.hint) o.append(elem(ct * e1, False, "".join(ow), "".join(oh), wl, ct=ct)) for i, s in enumerate(fspec): proc(False, i, *s) o_word = "".join(x['password'] for x in o) o_hint = "".join(x['hint'] for x in o) elements.append(o) result.append((o_word, o_hint)) return result, {'passwords': result, 'elements': elements, 'diag': "\n".join(diag), 'entropy': entropy}
aad44a80a648d192c696ebdd44ceefadd21d88cd
17,102
def convert_to_valid_einsum_chars(einsum_str): """Convert the str ``einsum_str`` to contain only the alphabetic characters valid for numpy einsum. """ # partition into valid and invalid sets valid, invalid = set(), set() for x in einsum_str: (valid if is_valid_einsum_char(x) else invalid).add(x) # get replacements for invalid chars that are not already used available = gen_unused_symbols(valid, len(invalid)) # map invalid to available and replace in the inputs replacer = dict(zip(invalid, available)) return "".join(replacer.get(x, x) for x in einsum_str)
2cdd67bc967a12bd3dcb80f323f093cd9eff7213
17,103
def prop_GAC(csp, newVar=None): """ Do GAC propagation. If newVar is None we do initial GAC enforce processing all constraints. Otherwise we do GAC enforce with constraints containing newVar on GAC Queue """ constraints = csp.get_cons_with_var(newVar) if newVar else csp.get_all_cons() pruned = [] # NOTE: although <constraints> is a list, the order is unimportant and acts like a set. # See page 209 of RN textbook while constraints != []: constraint = constraints.pop(0) # grab the first constraint for var in constraint.get_unasgn_vars(): # get_scope()? for val in var.cur_domain(): if not constraint.has_support(var, val): # Check if we have already pruned (var, val) if (var, val) not in pruned: var.prune_value(val) pruned.append((var, val)) # We have modified var's domain, so add back all constraints # that have var in it's scope for c in csp.get_cons_with_var(var): if c not in constraints: constraints.append(c) # Check if var's domain is empty if var.cur_domain_size() == 0: return False, pruned return True, pruned
a1c576cfd9920a51eb9b9884bd49b4e8f4194d02
17,104
import io import shutil import os def clone( # pylint: disable=R0913,R0912,R0914 source, target, branch="main", depth=None, delete_git_dir=False, username=None, password=None, key_filename=None, key_data=None, track_branch_upstream=True, ): """ Clone repository """ # Prepare auth args auth_args = dict() if username is not None: auth_args["username"] = username if password is not None: auth_args["password"] = password if key_filename is not None: auth_args["key_filename"] = key_filename if key_data is not None: key_obj = io.StringIO(key_data.replace("|", "\n")) pkey = paramiko.RSAKey.from_private_key(key_obj) auth_args["key_filename"] = pkey # Clone repository log.info("Cloning repository %s into %s", source, target) repository = porcelain.clone( source, target, checkout=False, depth=depth, errstream=log.DebugLogStream(), **auth_args ) # Get current HEAD tree (default branch) try: head_tree = repository[b"HEAD"] except: # pylint: disable=W0702 head_tree = None # Get target tree (requested branch) branch_b = branch.encode("utf-8") try: target_tree = repository[b"refs/remotes/origin/" + branch_b] except: # pylint: disable=W0702 target_tree = None # Checkout branch branch_to_track = None if target_tree is not None: log.info("Checking out branch %s", branch) repository[b"refs/heads/" + branch_b] = repository[b"refs/remotes/origin/" + branch_b] repository.refs.set_symbolic_ref(b"HEAD", b"refs/heads/" + branch_b) repository.reset_index(repository[b"HEAD"].tree) # branch_to_track = branch elif head_tree is not None: try: default_branch_name = repository.refs.follow(b"HEAD")[0][1] if default_branch_name.startswith(refs.LOCAL_BRANCH_PREFIX): default_branch_name = default_branch_name[len(refs.LOCAL_BRANCH_PREFIX):] default_branch_name = default_branch_name.decode("utf-8") # log.warning( "Branch %s was not found. Checking out default branch %s", branch, default_branch_name ) # branch_to_track = default_branch_name except: # pylint: disable=W0702 log.warning("Branch %s was not found. Trying to check out default branch", branch) # try: repository.reset_index(repository[b"HEAD"].tree) except: # pylint: disable=W0702 log.exception("Failed to checkout default branch") else: log.error("Branch %s was not found and default branch is not set. Skipping checkout") # Add remote tracking if track_branch_upstream and branch_to_track is not None: log.info("Setting '%s' to track upstream branch", branch_to_track) # branch_to_track_b = branch_to_track.encode("utf-8") # config = repository.get_config() config.set( (b"branch", branch_to_track_b), b"remote", b"origin", ) config.set( (b"branch", branch_to_track_b), b"merge", b"refs/heads/" + branch_to_track_b, ) config.write_to_path() # Delete .git if requested if delete_git_dir: log.info("Deleting .git directory") shutil.rmtree(os.path.join(target, ".git")) # Return repo object return repository
6fe55e808bfe2758e82859772d3d2e063d9c5add
17,105
def submit_only_kwargs(kwargs): """Strip out kwargs that are not used in submit""" kwargs = kwargs.copy() for key in ['patience', 'min_freq', 'max_freq', 'validation', "max_epochs", "epoch_boost", "train_size", "valid_size"]: _ = kwargs.pop(key, None) return kwargs
e93a4b8921c5b80bb487caa6057c1ff7c1701305
17,106
def make_simple_boundary(outline_edge_group: UniqueEdgeList, all_edges: UniqueEdgeList): """ Step 3 recursive :param outline_edge_group: A list of edges, grouped by connectivity between edges. :param all_edges: :return: ??? """ while len(all_edges.edge_list) > 0: current_edge = all_edges.edge_list[0] work = False neighbors = all_edges.get_neighbor_indices_for_edge(current_edge) # Loop against all neighboring edges, gobble up the neighbors. for neighbor in neighbors: neighbor_edge = all_edges.edge_list[neighbor] if not Edge.same_edge(current_edge, neighbor_edge): shared_vertex = Edge.has_shared_vertex(current_edge, neighbor_edge) parallel = Edge.are_parallel_or_anti_parallel(current_edge, neighbor_edge) if shared_vertex is not None and parallel: # Case 1. start_vertex = [neighbor_edge.x1, neighbor_edge.y1, neighbor_edge.z1] # Case 2. if (neighbor_edge.x1 == shared_vertex[0] and neighbor_edge.y1 == shared_vertex[1] and neighbor_edge.z1 == shared_vertex[2]): start_vertex = [neighbor_edge.x2, neighbor_edge.y2, neighbor_edge.z2] # Case 3. end_vertex = [current_edge.x1, current_edge.y1, current_edge.z1] # Case 4. if (current_edge.x1 == shared_vertex[0] and current_edge.y1 == shared_vertex[1] and current_edge.z1 == shared_vertex[2]): end_vertex = [current_edge.x2, current_edge.y2, current_edge.z2] new_edge = Edge(start_vertex[0], start_vertex[1], start_vertex[2], # Edge Start end_vertex[0], end_vertex[1], end_vertex[2]) # Edge end all_edges.remove(current_edge) all_edges.remove(neighbor_edge) all_edges.add(new_edge) work = True break if not work and len(all_edges.edge_list) > 0: outline_edge_group.add(current_edge) all_edges.remove(current_edge) return outline_edge_group
fd3dfd40302d2f01126032c9420fd7b990d30cc6
17,107
import os def convert_rscape_svg_to_one_line(rscape_svg, destination): """ Convert R-scape SVG into SVG with 1 line per element. """ output = os.path.join(destination, 'rscape-one-line.svg') cmd = (r"perl -0777 -pe 's/\n +fill/ fill/g' {rscape_svg} | " r"perl -0777 -pe 's/\n d=/ d=/g' | " r"perl -0777 -pe 's/\n +<tspan/ <tspan/g' | " r"perl -0777 -pe 's/\n<\/text>/<\/text>/g' " r"> {output}").format(rscape_svg=rscape_svg, output=output) os.system(cmd) return output
7f561dcd1b9c6e540bb96fab363781dad73db566
17,108
import subprocess import tempfile import scipy def getDSSImage(ra,dec,radius=1.0,xsize=800,**kwargs): """ Download Digitized Sky Survey images https://archive.stsci.edu/cgi-bin/dss_form https://archive.stsci.edu/cgi-bin/dss_search Image is in celestial orientation (RA increases to the right) https://archive.stsci.edu/dss/script_usage.html ra (r) - right ascension dec (d) - declination equinox (e) - equinox (B1950 or J2000; default: J2000) height (h) - height of image (arcminutes; default: 15.0) width (w) - width of image (arcminutes; default: 15.0) format (f) - image format (FITS or GIF; default: FITS) compression (c) - compression (UNIX, GZIP, or NONE; default: NONE; compression applies to FITS only) version (v) - Which version of the survey to use: 1 - First Generation survey (garden variety) 2 - Second generation survey (incomplete) 3 - Check the 2nd generation; if no image is available, then go to the 1st generation. 4 - The Quick V survey (whence came the Guide Stars Catalog; used mostly for Phase II proposal submission) save (s) - Save the file to disk instead of trying to display. (ON (or anything) or not defined; default: not defined.) """ url="https://archive.stsci.edu/cgi-bin/dss_search?" scale = 2.0 * radius * 60. params=dict(ra='%.3f'%ra,dec='%.3f'%dec,width=scale,height=scale, format='gif',version=1) #v='poss2ukstu_red' query='&'.join("%s=%s"%(k,v) for k,v in params.items()) tmp = tempfile.NamedTemporaryFile(suffix='.gif') cmd='wget --progress=dot:mega -O %s "%s"'%(tmp.name,url+query) subprocess.call(cmd,shell=True) im = pylab.imread(tmp.name) tmp.close() if xsize: im = scipy.misc.imresize(im,size=(xsize,xsize)) return im
338b0a5e4b54656f7fc763e0af55a1877b514712
17,109
async def async_attach_trigger( hass: HomeAssistant, config: ConfigType, action: AutomationActionType, automation_info: dict, ) -> CALLBACK_TYPE: """Attach a trigger.""" job = HassJob(action) if config[CONF_TYPE] == "turn_on": entity_id = config[CONF_ENTITY_ID] @callback def _handle_event(event: Event): if event.data[ATTR_ENTITY_ID] == entity_id: hass.async_run_hass_job( job, {"trigger": {**config, "description": f"{DOMAIN} - {entity_id}"}}, event.context, ) return hass.bus.async_listen(EVENT_TURN_ON, _handle_event) return lambda: None
5cf362c7dc0b82f562164141ccf76f30dd1a0169
17,110
import pathlib def imread(image_path, as_uint8=True): """Read an image as numpy array. Args: image_path (str or pathlib.Path): File path (including extension) to read image. as_uint8 (bool): Read an image in uint8 format. Returns: :class:`numpy.ndarray`: Image array of dtype uint8, MxNx3. Examples: >>> from tiatoolbox import utils >>> img = utils.misc.imread('ImagePath.jpg') """ if isinstance(image_path, pathlib.Path): image_path = str(image_path) if pathlib.Path(image_path).suffix == ".npy": image = np.load(image_path) else: image = cv2.imread(image_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) if as_uint8: return image.astype(np.uint8) return image
30050f63e43d862cf512994f0e9d21c187b1ac0a
17,111
def pk(obj): """ A helper that gets the primary key of a model instance if one is passed in. If not, this returns the parameter itself. This allows functions to have parameters that accept either a primary key or model instance. For example: ``` python def get_translations(target_locale): return Translation.objects.filter(target_locale=pk(target_locale)) # Both of these would be valid calls get_translations(Locale.objects.get(id=1)) get_translations(1) ``` Args: obj (Model | any): A model instance or primary key value. Returns: any: The primary key of the model instance, or value of `obj` parameter. """ if isinstance(obj, models.Model): return obj.pk else: return obj
431f518fe6d53e979543e4588a1d7389d7100d69
17,112
import requests import json def get_dataset(id): """Query for existence of dataset by ID.""" uu = UrlUtils() es_url = uu.rest_url #es_index = "{}_{}_s1-ifg".format(uu.grq_index_prefix, version) es_index = "grq" # query query = { "query": { "wildcard": { "_id": id } } } logger.info(query) if es_url.endswith('/'): search_url = '%s%s/_search' % (es_url, es_index) else: search_url = '%s/%s/_search' % (es_url, es_index) logger.info("search_url : %s" %search_url) r = requests.post(search_url, data=json.dumps(query)) if r.status_code != 200: logger.info("Failed to query %s:\n%s" % (es_url, r.text)) logger.info("query: %s" % json.dumps(query, indent=2)) logger.info("returned: %s" % r.text) r.raise_for_status() result = r.json() logger.info(result['hits']['total']) return result
cfb31da0e23b7e197af1919fa34fa3f2d4fa1dfe
17,113
def bags_with_gold( parents_of, _ ): """ Starting from leaf = 'gold', find recursively its parents upto the root and add them to a set Number of bags that could contain gold = length of the set """ contains_gold = set() def find_roots( bag ): for outer_bag in parents_of[ bag ]: contains_gold.add( outer_bag ) find_roots( outer_bag ) find_roots('shiny gold') return len(contains_gold)
3fd2b1c260d41867a5787a14f0c50a9b5d1a2f08
17,114
def process_file(file_path): """ This function processes the submitted file :return: A dictionary of errors found in the file. If there are no errors, then only the error report headers will in the results. """ enc = detect_bom_encoding(file_path) if enc is None: with open(file_path, 'r') as f: result = run_checks(file_path, f) else: with open(file_path, 'r', encoding=enc) as f: result = run_checks(file_path, f) print('Finished processing %s\n' % file_path) return result
29b25b9a1ac950b2b0d051a6748ebc78b31bad10
17,115
import requests def get_request_body(text, api_key, *args): """ send a request and return the response body parsed as dictionary @param text: target text that you want to detect its language @type text: str @type api_key: str @param api_key: your private API key """ if not api_key: raise Exception("you need to get an API_KEY for this to work. " "Get one for free here: https://detectlanguage.com/documentation") if not text: raise Exception("Please provide an input text") else: try: headers = config['headers'] headers['Authorization'] = headers['Authorization'].format(api_key) response = requests.post(config['url'], json={'q': text}, headers=headers) body = response.json().get('data') return body except HTTPError as e: print("Error occured while requesting from server: ", e.args) raise e
e21e8733eec00bc78616b18a8d93c18dc2b20449
17,116
def generate_sample_task(project): """ Generate task example for upload and check it with serializer validation :param project: project with label config :return: task dict """ task = generate_sample_task_without_check(project.label_config) # check generated task '''if project: try: TaskSerializer.check_data(project, task) except ValidationError as e: raise ValidationError(str(e) + ': task example = ' + json.dumps(task) + ', project config = ' + project.label_config + ', project data_types = ' + json.dumps(project.data_types))''' return task
1e3259b320e46a938139b0dde8ed5b999290d6cd
17,117
def serializable_value(self, field_name): """ Returns the value of the field name for this instance. If the field is a foreign key, returns the id value, instead of the object. If there's no Field object with this name on the model, the model attribute's value is returned directly. Used to serialize a field's value (in the serializer, or form output, for example). Normally, you would just access the attribute directly and not use this method. """ try: field = self._admin_opts.get_field_by_name(field_name)[0] except FieldDoesNotExist: return getattr(self, field_name) return getattr(self, field.name)
839d59e92c4249359367d07700bd55f80eafe98b
17,118
import pandas def fetch_fi2010(normalization=None) -> pandas.DataFrame: """ Load the FI2010 dataset with no auction. Benchmark Dataset for Mid-Price Forecasting of Limit Order Book Data with Machine Learning Methods. A Ntakaris, M Magris, J Kanniainen, M Gabbouj, A Iosifidis. arXiv:1705.03233 [cs.CE]. https://arxiv.org/abs/1705.03233 Parameters ---------- normalization : {"zscore", None} Normalization method. """ if normalization is None: url = "https://raw.githubusercontent.com/simaki/fi2010/main/data/data.csv" return pandas.read_csv(url, index_col=0) if normalization == "zscore": url1 = "https://raw.githubusercontent.com/simaki/fi2010/main/data/data_zscore1.csv" url2 = "https://raw.githubusercontent.com/simaki/fi2010/main/data/data_zscore2.csv" return pandas.concat([pandas.read_csv(url1, index_col=0), pandas.read_csv(url2, index_col=0)])
bb6e6e484d6d3d3d1d831a9194fe4f629b820db8
17,119
def get_netcdf_filename(batch_idx: int) -> str: """Generate full filename, excluding path.""" assert 0 <= batch_idx < 1e6 return f"{batch_idx:06d}.nc"
5d916c4969eb96653ea9f0a21ab8bec93ebcfafa
17,120
def CreateBoardConfigs(site_config, boards_dict, ge_build_config): """Create mixin templates for each board.""" # Extract the full list of board names from GE data. separate_board_names = set(config_lib.GeBuildConfigAllBoards(ge_build_config)) unified_builds = config_lib.GetUnifiedBuildConfigAllBuilds(ge_build_config) unified_board_names = set([b[config_lib.CONFIG_TEMPLATE_REFERENCE_BOARD_NAME] for b in unified_builds]) board_names = separate_board_names | unified_board_names # TODO(crbug.com/648473): Remove these, after GE adds them to their data set. board_names = board_names.union(boards_dict['all_boards']) result = dict() for board in board_names: board_config = config_lib.BuildConfig(boards=[board]) if board in _brillo_boards: board_config.apply(site_config.templates.brillo) if board in _lakitu_boards: board_config.apply(site_config.templates.lakitu) if board in _lassen_boards: board_config.apply(site_config.templates.lassen) if board in ['x30evb']: board_config.apply(site_config.templates.x30evb) if board in _loonix_boards: board_config.apply(site_config.templates.loonix) if board in _moblab_boards: board_config.apply(site_config.templates.moblab) if board in _accelerator_boards: board_config.apply(site_config.templates.accelerator) if board in _termina_boards: board_config.apply(site_config.templates.termina) if board in _nofactory_boards: board_config.apply(factory=False, factory_toolkit=False, factory_install_netboot=False, images=remove_images(['factory_install'])) if board in _toolchains_from_source: board_config.apply(usepkg_toolchain=False) if board in _noimagetest_boards: board_config.apply(image_test=False) if board in _nohwqual_boards: board_config.apply(hwqual=False) if board in _norootfs_verification_boards: board_config.apply(rootfs_verification=False) if board in _base_layout_boards: board_config.apply(disk_layout='base') if board in _no_unittest_boards: board_config.apply(site_config.templates.no_unittest_builder) if board in _beaglebone_boards: board_config.apply(site_config.templates.beaglebone) if board == 'moblab-generic-vm': board_config.apply(site_config.templates.moblab_vm_tests) result[board] = board_config return result
7d0dfeca13015c6fa5b75d10106476f347de0cbb
17,121
def area_calc(radius, point_in, total_points): """Calculates the partial area of ball :param radius: radius of ball :param point_in: points of the total points to include :param total_points: number of sampled points :return: area """ return (4 * pi * radius ** 2) * (point_in / total_points)
a660776a4f4a1d2d04a28255b7ee0892ddc5d136
17,122
import gettext import math def results_framework_export(request, program): """Returns .XLSX containing program's results framework""" program = Program.rf_aware_objects.get(pk=program) wb = openpyxl.Workbook() wb.remove(wb.active) ws = wb.create_sheet(gettext("Results Framework")) get_font = lambda attrs: styles.Font(**{**{'name': 'Calibri', 'size': 12}, **attrs}) ws.cell(row=2, column=2).value = gettext("Results Framework") ws.cell(row=2, column=2).font = get_font({'size': 18, 'bold': True}) ws.cell(row=3, column=2).value = program.name ws.cell(row=3, column=2).font = get_font({'size': 18}) level_span_style = styles.NamedStyle(name='level_span') level_span_style.font = get_font({}) level_span_style.alignment = styles.Alignment(wrap_text=True, vertical='center', horizontal='center') level_span_style.fill = styles.PatternFill('solid', 'E5E5E5') wb.add_named_style(level_span_style) level_single_style = styles.NamedStyle(name='level_no_span') level_single_style.font = get_font({}) level_single_style.alignment = styles.Alignment(wrap_text=True, vertical='top', horizontal='left') level_single_style.fill = styles.PatternFill('solid', 'E5E5E5') wb.add_named_style(level_single_style) bottom_tier = program.level_tiers.count() def row_height_getter(cell): lines_of_text = str(cell.value).splitlines() row = cell.row def get_row_height_decorated(w): lines = sum([math.ceil(len(s)/w) or 1 for s in lines_of_text]) height = 26 + lines * 15 if lines == 1: height = 30 return max(height, ws.row_dimensions[row].height or 0, 30) return get_row_height_decorated def write_level(parent, start_row, start_column): levels = program.levels.filter(parent=parent).order_by('customsort') column = start_column row = start_row if not levels: return column + 2 for level in levels: current_column = column cell = ws.cell(row=row, column=column) cell.value = level.display_name get_row_height = row_height_getter(cell) if level.level_depth == bottom_tier: cell.style = 'level_no_span' row = row + 2 ws.row_dimensions[cell.row].height = get_row_height(24) else: column = write_level(level, row+2, column) if column - 2 <= current_column: cell.style = 'level_no_span' ws.row_dimensions[cell.row].height = get_row_height(24) else: cell.style = 'level_span' ws.merge_cells(start_row=row, end_row=row, start_column=current_column, end_column=column-2) width = 24 + 29 * ((column - 2 - current_column) / 2) ws.row_dimensions[cell.row].height = get_row_height(width) if parent and parent.level_depth == bottom_tier-1: column = column + 2 if parent is None: for column in range(column): width = 24.5 if (column + 1) % 2 == 0 else 3 ws.column_dimensions[utils.get_column_letter(column + 1)].width = width for r in range(3, ws.max_row+2): if r % 2 == 0: ws.row_dimensions[r].height = 10 return column write_level(None, 5, 2) filename = "Results Framework.xlsx" response = HttpResponse(content_type='application/ms-excel') response['Content-Disposition'] = 'attachment; filename="{}"'.format(filename) wb.save(response) return response
79cf9f84243f089dd463909f9f64d13a1bb39444
17,123
import copy def generate_subwindow(pc, sample_bb, scale, offset=2, oriented=True): """ generating the search area using the sample_bb :param pc: :param sample_bb: :param scale: :param offset: :param oriented: use oriented or axis-aligned cropping :return: """ rot_mat = np.transpose(sample_bb.rotation_matrix) trans = -sample_bb.center if oriented: new_pc = PointCloud(pc.points.copy()) box_tmp = copy.deepcopy(sample_bb) # transform to the coordinate system of sample_bb new_pc.translate(trans) box_tmp.translate(trans) new_pc.rotate(rot_mat) box_tmp.rotate(Quaternion(matrix=rot_mat)) new_pc = crop_pc_axis_aligned(new_pc, box_tmp, scale=scale, offset=offset) else: new_pc = crop_pc_axis_aligned(pc, sample_bb, scale=scale, offset=offset) # transform to the coordinate system of sample_bb new_pc.translate(trans) new_pc.rotate(rot_mat) return new_pc
af86fdd4409f98ccd503a9587a6e4b19b0763a31
17,124
def tsfigure(num=None, figsize=None, dpi=None, facecolor=None, edgecolor=None, frameon=True, subplotpars=None, FigureClass=TSFigure): """ Creates a new :class:`TimeSeriesFigure` object. Parameters ---------- num : {None, int}, optional Number of the figure. If None, a new figure is created and ``num`` is incremented. %(figsize)s %(dpi)s %(facecolor)s %(edgecolor)s %(frameon)s %(subplotpars)s FigureClass : FigureClass Class of the figure to create """ figargs = dict(num=num, figsize=figsize, dpi=dpi, facecolor=facecolor, frameon=frameon, FigureClass=FigureClass, subplotpars=subplotpars) fig = pylab.figure(**figargs) return fig
578b8299ea8b7b8eb05a1f0e68ce4b1f1dca4682
17,125
import zipfile import json def load_predict_result(predict_filename): """Loads the file to be predicted""" predict_result = {} ret_code = SUCCESS try: predict_file_zip = zipfile.ZipFile(predict_filename) except: ret_code = FILE_ERROR return predict_result, ret_code for predict_file in predict_file_zip.namelist(): for line in predict_file_zip.open(predict_file): try: line = line.decode('utf8').strip() except: ret_code = ENCODING_ERROR return predict_result, ret_code try: json_info = json.loads(line) except: ret_code = JSON_ERROR return predict_result, ret_code if 'text' not in json_info or 'spo_list' not in json_info: ret_code = SCHEMA_ERROR return predict_result, ret_code sent = json_info['text'] spo_set = set() for spo_item in json_info['spo_list']: if type(spo_item) is not dict or 'subject' not in spo_item \ or 'predicate' not in spo_item \ or 'object' not in spo_item or \ not isinstance(spo_item['subject'], basestring) or \ not isinstance(spo_item['object'], basestring): ret_code = SCHEMA_ERROR return predict_result, ret_code s = del_bookname(spo_item['subject'].lower()) o = del_bookname(spo_item['object'].lower()) spo_set.add((s, spo_item['predicate'], o)) predict_result[sent] = spo_set return predict_result, ret_code
c26cc24fcdcaa774d05ed6963f66cae346617f46
17,126
import os def localize_all(roi, ignore_exception=True, **kwargs): """ localize all variable local sources in the roi, make TSmaps and associations if requested ignore if extended -- has 'spatial_model' kwargs can have prefix to select subset with name starting with the prefix, e.g. 'SEED' """ tsmin = kwargs.pop('tsmin',10) prefix = kwargs.pop('prefix', None) source_name = kwargs.pop('source_name', None) update = kwargs.pop('update', False) def filt(s): ok = s.skydir is not None\ and isinstance(s, sources.PointSource) \ and np.any(s.spectral_model.free) if not ok: return False if not hasattr(s,'ts'): s.ts = roi.TS(s.name) return ok and s.ts>tsmin if source_name is not None: vpsources=[roi.get_source(source_name)] else: vpsources = filter(filt, roi.sources) tsmap_dir = kwargs.pop('tsmap_dir', None) if tsmap_dir is not None: if tsmap_dir[0]=='$': tsmap_dir = os.path.expandvars(tsmap_dir) if not os.path.exists(tsmap_dir): os.makedirs(tsmap_dir) associator = kwargs.pop('associator', None) tsfits = kwargs.pop('tsfits', True) if len(kwargs.keys())>0: print ('Warning: unrecognized args to localize_all: %s' %kwargs) initw = roi.log_like() for source in vpsources: if prefix is not None and not source.name.startswith(prefix): continue full_localization(roi, source.name, ignore_exception=ignore_exception, update=update, associator=associator, tsmap_dir=tsmap_dir, tsfits=tsfits) curw= roi.log_like() if abs(initw-curw)>1.0 and not update: print ('localize_all: unexpected change in roi state after localization, from %.1f to %.1f (%+.1f)'\ %(initw, curw, curw-initw)) return False else: return True
0bf5c056ebc23a448954884ca06a3e29fd71de34
17,127
def convert_post_to_VERB(request, verb): """ Force Django to process the VERB. """ if request.method == verb: if hasattr(request, '_post'): del(request._post) del(request._files) try: request.method = "POST" request._load_post_and_files() request.method = verb except AttributeError: request.META['REQUEST_METHOD'] = 'POST' request._load_post_and_files() request.META['REQUEST_METHOD'] = verb setattr(request, verb, request.POST) return request
3c304d07ab04950ac65f58405acc3103a3b64dcf
17,128
def close(x, y, rtol, atol): """Returns True if x and y are sufficiently close. Parameters ---------- rtol The relative tolerance. atol The absolute tolerance. """ # assumes finite weights return abs(x-y) <= atol + rtol * abs(y)
bd2597c0c94f2edf686d0dc9772288312cb36d83
17,129
import warnings def plot_precip_field( precip, ptype="intensity", ax=None, geodata=None, units="mm/h", bbox=None, colorscale="pysteps", probthr=None, title=None, colorbar=True, axis="on", cax=None, map_kwargs=None, **kwargs, ): """ Function to plot a precipitation intensity or probability field with a colorbar. .. _Axes: https://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes .. _SubplotSpec: https://matplotlib.org/api/_as_gen/matplotlib.gridspec.SubplotSpec.html Parameters ---------- precip: array-like Two-dimensional array containing the input precipitation field or an exceedance probability map. ptype: {'intensity', 'depth', 'prob'}, optional Type of the map to plot: 'intensity' = precipitation intensity field, 'depth' = precipitation depth (accumulation) field, 'prob' = exceedance probability field. geodata: dictionary or None, optional Optional dictionary containing geographical information about the field. Required is map is not None. If geodata is not None, it must contain the following key-value pairs: .. tabularcolumns:: |p{1.5cm}|L| +-----------------+---------------------------------------------------+ | Key | Value | +=================+===================================================+ | projection | PROJ.4-compatible projection definition | +-----------------+---------------------------------------------------+ | x1 | x-coordinate of the lower-left corner of the data | | | raster | +-----------------+---------------------------------------------------+ | y1 | y-coordinate of the lower-left corner of the data | | | raster | +-----------------+---------------------------------------------------+ | x2 | x-coordinate of the upper-right corner of the | | | data raster | +-----------------+---------------------------------------------------+ | y2 | y-coordinate of the upper-right corner of the | | | data raster | +-----------------+---------------------------------------------------+ | yorigin | a string specifying the location of the first | | | element in the data raster w.r.t. y-axis: | | | 'upper' = upper border, 'lower' = lower border | +-----------------+---------------------------------------------------+ units : {'mm/h', 'mm', 'dBZ'}, optional Units of the input array. If ptype is 'prob', this specifies the unit of the intensity threshold. bbox : tuple, optional Four-element tuple specifying the coordinates of the bounding box. Use this for plotting a subdomain inside the input grid. The coordinates are of the form (lower left x, lower left y ,upper right x, upper right y). If 'geodata' is not None, the bbox is in map coordinates, otherwise it represents image pixels. colorscale : {'pysteps', 'STEPS-BE', 'BOM-RF3'}, optional Which colorscale to use. Applicable if units is 'mm/h', 'mm' or 'dBZ'. probthr : float, optional Intensity threshold to show in the color bar of the exceedance probability map. Required if ptype is "prob" and colorbar is True. title : str, optional If not None, print the title on top of the plot. colorbar : bool, optional If set to True, add a colorbar on the right side of the plot. axis : {'off','on'}, optional Whether to turn off or on the x and y axis. cax : Axes_ object, optional Axes into which the colorbar will be drawn. If no axes is provided the colorbar axes are created next to the plot. Other parameters ---------------- map_kwargs: dict Optional parameters that need to be passed to :py:func:`pysteps.visualization.basemaps.plot_geography`. Returns ------- ax : fig Axes_ Figure axes. Needed if one wants to add e.g. text inside the plot. """ if map_kwargs is None: map_kwargs = {} if "type" in kwargs: warnings.warn( "The 'type' keyword use to indicate the type of plot will be " "deprecated in version 1.6. Use 'ptype' instead." ) ptype = kwargs.get("type") if ptype not in PRECIP_VALID_TYPES: raise ValueError( f"Invalid precipitation type '{ptype}'." f"Supported: {str(PRECIP_VALID_TYPES)}" ) if units not in PRECIP_VALID_UNITS: raise ValueError( f"Invalid precipitation units '{units}." f"Supported: {str(PRECIP_VALID_UNITS)}" ) if ptype == "prob" and colorbar and probthr is None: raise ValueError("ptype='prob' but probthr not specified") if len(precip.shape) != 2: raise ValueError("The input is not two-dimensional array") # Assumes the input dimensions are lat/lon nlat, nlon = precip.shape x_grid, y_grid, extent, regular_grid, origin = get_geogrid( nlat, nlon, geodata=geodata ) ax = get_basemap_axis(extent, ax=ax, geodata=geodata, map_kwargs=map_kwargs) precip = np.ma.masked_invalid(precip) # plot rainfield if regular_grid: im = _plot_field(precip, ax, ptype, units, colorscale, extent, origin=origin) else: im = _plot_field( precip, ax, ptype, units, colorscale, extent, x_grid=x_grid, y_grid=y_grid ) plt.title(title) # add colorbar if colorbar: # get colormap and color levels _, _, clevs, clevs_str = get_colormap(ptype, units, colorscale) if ptype in ["intensity", "depth"]: extend = "max" else: extend = "neither" cbar = plt.colorbar( im, ticks=clevs, spacing="uniform", extend=extend, shrink=0.8, cax=cax ) if clevs_str is not None: cbar.ax.set_yticklabels(clevs_str) if ptype == "intensity": cbar.set_label(f"Precipitation intensity [{units}]") elif ptype == "depth": cbar.set_label(f"Precipitation depth [{units}]") else: cbar.set_label(f"P(R > {probthr:.1f} {units})") if geodata is None or axis == "off": ax.xaxis.set_ticks([]) ax.xaxis.set_ticklabels([]) ax.yaxis.set_ticks([]) ax.yaxis.set_ticklabels([]) if bbox is not None: ax.set_xlim(bbox[0], bbox[2]) ax.set_ylim(bbox[1], bbox[3]) return ax
7e9429310ffdfdb38ac2b6e03b4c846d017060f5
17,130
def get_for_repo(repo, name, default=None): """Gets a configuration setting for a particular repository. Looks for a setting specific to the repository, then falls back to a global setting.""" NOT_FOUND = [] # a unique sentinel distinct from None value = get(name, NOT_FOUND, repo) if value is NOT_FOUND: value = get(name, default, '*') return value
5848e4da859f26788ab02b733bc61135c1ea3b80
17,131
from typing import AnyStr def new_user(tenant: AnyStr, password: AnyStr) -> bool: """Return a boolean containing weither a new tenant is created or no.""" if not query.get_tenant_id(tenant): return True return False
ac1bc45213c76712d1ec3553a8545fac5ab67f3a
17,132
def home(): """ Home page control code :return Rendered page: """ error = request.args.get("error", None) state, code = request.args.get("state", None), request.args.get("code", None) if code and not has_user() and 'state' in session and session['state'] == state: tok = reddit_get_access_token(code) username = reddit_get_username(tok) session['user'] = username session['token'] = tok session.modified = True session['state'] = str(uuid4()) session.modified = True return render_template('home.html', user=get_user(), error=False, redirect=whisky_recommender.config.REDDIT_REDIRECT, client_id=whisky_recommender.config.REDDIT_CLIENT, state=session['state'])
280f17feff363fa73decfa15bc615aa0c320d3d9
17,133
from datetime import datetime import pytz def get_date_list(num_days): """ For an integer number of days (num_days), get an ordered list of DateTime objects to report on. """ local_tz = tzlocal.get_localzone() local_start_date = local_tz.localize(datetime.datetime.now()).replace(hour=0, minute=0, second=0, microsecond=0) - datetime.timedelta(seconds=1) logger.debug("local_start_date={d}".format(d=local_start_date.strftime("%Y-%m-%d %H:%M:%S%z %Z"))) start_date = local_start_date.astimezone(pytz.utc) logger.debug("start_date={d}".format(d=start_date.strftime("%Y-%m-%d %H:%M:%S%z %Z"))) end_date = (start_date - datetime.timedelta(days=num_days)) + datetime.timedelta(seconds=1) logger.debug("end_date={d}".format(d=end_date.strftime("%Y-%m-%d %H:%M:%S%z %Z"))) dates = [start_date - datetime.timedelta(n) for n in range(num_days)] return dates
5f338cf6ffcbb10569cfd7878a91f6a2d60831ca
17,134
def is_even(val): """ Confirms if a value if even. :param val: Value to be tested. :type val: int, float :return: True if the number is even, otherwise false. :rtype: bool Examples: -------------------------- .. code-block:: python >>> even_numbers = list(filter(is_even, range(20))) >>> print(even_numbers) [0, 2, 4, 6, 8, 10, 12, 14, 16, 18] >>> print(is_even(9)) False >>> print(is_even(-2)) True >>> print([value for value in range(20) if is_even(value)]) [0, 2, 4, 6, 8, 10, 12, 14, 16, 18] >>> print([is_even(value) for value in range(4)]) [True, False, True, False] """ return (val % 2) == 0
1ef0716e1e86ff77b3234bbd664c6b973352c3ea
17,135
import os def get_fremont_data(filename = "Fremont.csv", url = URL, force_download = False): """ Download and cache the fremont data Parameters ---------- csv file Fremont.csv url URL force download Returns ------- data : pandas dataframe """ if force_download or not os.path.exists(filename): urlretrieve(URL, "Fremont.csv") data = pd.read_csv("Fremont.csv", index_col = "Date", parse_dates = True) data.columns = ["Total", "East", "West"] return data
c61683423585e52c42bb96c21bcd6dd21ed9edc8
17,136
def min_spacing(mylist): """ Find the minimum spacing in the list. Args: mylist (list): A list of integer/float. Returns: int/float: Minimum spacing within the list. """ # Set the maximum of the minimum spacing. min_space = max(mylist) - min(mylist) # Iteratively find a smaller spacing. for item in mylist: spaces = [abs(item - item2) for item2 in mylist if item != item2] min_space = min(min_space, min(spaces)) # Return the answer. return min_space
b8ce0a46bacb7015c9e59b6573bc2fec0252505d
17,137
import re import os def extract_dawn_compiler_options() -> list: """Generate options_info for the Dawn compiler options struct.""" options_info = [] regex = re.compile(r"OPT\(([^,]+), ?(\w+)") DAWN_CPP_SRC_ROOT = os.path.join( os.path.dirname(__file__), os.pardir, os.pardir, "src", "dawn" ) # Extract info from .cpp files for name in [ os.path.join(DAWN_CPP_SRC_ROOT, "Compiler", "Options.inc"), os.path.join(DAWN_CPP_SRC_ROOT, "Optimizer", "OptimizerOptions.inc"), ]: options_cpp = [] with open(name, "r") as f: for line in f: line = line.strip() if not (line.startswith("//") or line.startswith("#")): if line.startswith("OPT("): m = regex.match(line) type_str, name_str = m.group(1), m.group(2) line = re.sub(regex, f'{name_str} = ("{type_str}" ', line) options_cpp.append(line) elif line: if options_cpp[-1].endswith('"'): options_cpp[-1] += " + " + line else: options_cpp[-1] += line # OPT(TYPE, NAME, DEFAULT_VALUE, OPTION, OPTION_SHORT, HELP, VALUE_NAME, HAS_VALUE, F_GROUP) options_cpp = "\n".join(options_cpp) for old, new in [("false", "'false'"), ("true", "'true'")]: options_cpp = options_cpp.replace(old, new) defs = {} exec(options_cpp, defs) for key, value in defs.items(): if not key.startswith("__"): py_type = pythonize_type(value[0]) py_default = pythonize_value(value[1], as_type=py_type) options_info.append( MemberInfo( py_name=pythonize_name(key), cpp_name=key, py_type=py_type, cpp_type=value[0], py_default=py_default, cpp_default=value[1], const=False, help=value[4], ) ) return options_info
a79f9700f1d0b5c512a5b77b04a16c7fd4c8c105
17,138
def sign(x): """Return the mathematical sign of the particle.""" if x.imag: return x / sqrt(x.imag ** 2 + x.real ** 2) return 0 if x == 0 else -1 if x < 0 else 1
0dca727afbc9c805a858c027a8a4e38d59d9d218
17,139
def wrap_strings(lines: [str], line_width: int): """Return a list of strings, wrapped to the specified length.""" i = 0 while i < len(lines): # if a line is over the limit if len(lines[i]) > line_width: # (try to) find the rightmost occurrence of a space in the first 80 chars try: split_index = lines[i][:line_width].rindex(" ") except ValueError: return None # split the line by the found space and add it to the next one lines.insert(i + 1, lines[i][split_index + 1 :]) lines[i] = lines[i][:split_index] i += 1 return lines
0a6fa989fd6d27276d2e7d8c91cf8be37f6a3aff
17,140
def has_even_parity(message: int) -> bool: """ Return true if message has even parity.""" parity_is_even: bool = True while message: parity_is_even = not parity_is_even message = message & (message - 1) return parity_is_even
8982302840318f223e9c1ab08c407d585a725f97
17,141
def is_primitive(structure): """ Checks if a structure is primitive or not, :param structure: AiiDA StructureData :return: True if the structure can not be anymore refined. prints False if the structure can be futher refined. """ refined_cell = find_primitive_cell(structure) prim = False if all(x in structure.cell for x in refined_cell.cell): prim = True return prim
9f7034bb92d3fdd0505a56bc7d53d1528846ef76
17,142
def mock_mkdir(monkeypatch): """Mock the mkdir function.""" def mocked_mkdir(path, mode=0o755): return True monkeypatch.setattr("charms.layer.git_deploy.os.mkdir", mocked_mkdir)
e4e78ece1b8e60719fe11eb6808f0f2b99a933c3
17,143
import zipfile import os def zip_dir_recursively(base_dir, zip_file): """Zip compresses a base_dir recursively.""" zip_file = zipfile.ZipFile(zip_file, 'w', compression=zipfile.ZIP_DEFLATED) root_len = len(os.path.abspath(base_dir)) for root, _, files in os.walk(base_dir): archive_root = os.path.abspath(root)[root_len:] for f in files: fullpath = os.path.join(root, f) archive_name = os.path.join(archive_root, f) zip_file.write(fullpath, archive_name, zipfile.ZIP_DEFLATED) zip_file.close() return zip_file
79dc58f508b2f1e78b2cc32208471f75c6a3b15c
17,144
def saferepr(obj, maxsize=240): """return a size-limited safe repr-string for the given object. Failing __repr__ functions of user instances will be represented with a short exception info and 'saferepr' generally takes care to never raise exceptions itself. This function is a wrapper around the Repr/reprlib functionality of the standard 2.6 lib. """ # review exception handling srepr = SafeRepr() srepr.maxstring = maxsize srepr.maxsize = maxsize srepr.maxother = 160 return srepr.repr(obj)
d02f68581867e64a6586548ab627b6893328c42a
17,145
from typing import Callable from re import A def filter(pred : Callable[[A], bool], stream : Stream[A]) -> Stream[A]: """Filter a stream of type `A`. :param pred: A predicate on type `A`. :type pred: `A -> bool` :param stream: A stream of type `A` to be filtered. :type stream: `Stream[A]` :return: A stream of type `A`. :rtype: `Stream[A]` """ def _thunk() -> StreamResult[A]: next_stream : Stream[A] = stream while True: next_value, next_stream = next_stream() if not pred(next_value): continue return next_value, filter(pred, next_stream) return _thunk
93b3d4c30d4295b2be73200451436c6a4e9ab5cd
17,146
def _get_kernel_size_numel(kernel_size): """Determine number of pixels/voxels. ``kernel_size`` must be an ``N``-tuple.""" if not isinstance(kernel_size, tuple): raise ValueError(f"kernel_size must be a tuple. Got {kernel_size}.") return _get_numel_from_shape(kernel_size)
fb004817950ece275fc10b4824ee83a1d1b9a6a9
17,147
import uuid def random(): """Get a random UUID.""" return str(uuid.uuid4())
411aeb5254775473b43d3ac4153a27a2f15014cb
17,148
def reautorank(reaumur): """ This function converts Reaumur to rankine, with Reaumur as parameter.""" rankine = (reaumur * 2.25) + 491.67 return rankine
aec2299999e9798530272939125cb42476f095c3
17,149
def list_pets(): """Shows list of all pets in db""" pets = Pet.query.all() return render_template('list.html', pets=pets)
60df575932d98ab04e949d6ef6f1fdfa6734ba92
17,150
import os import glob def BundleFpmcuUnittests(chroot, sysroot, output_directory): """Create artifact tarball for fingerprint MCU on-device unittests. Args: chroot (chroot_lib.Chroot): The chroot containing the sysroot. sysroot (sysroot_lib.Sysroot): The sysroot whose artifacts are being archived. output_directory (str): The path were the completed archives should be put. Returns: str|None - The archive file path if created, None otherwise. """ fpmcu_unittests_root = os.path.join(chroot.path, sysroot.path.lstrip(os.sep), 'firmware', 'chromeos-fpmcu-unittests') files = [os.path.relpath(f, fpmcu_unittests_root) for f in glob.iglob(os.path.join(fpmcu_unittests_root, '*'))] if not files: return None archive_file = os.path.join(output_directory, constants.FPMCU_UNITTESTS_ARCHIVE_NAME) cros_build_lib.CreateTarball( archive_file, fpmcu_unittests_root, compression=cros_build_lib.COMP_BZIP2, chroot=chroot.path, inputs=files) return archive_file
ee737c5a938e069d36d7f1c963aec8ae7dd2b81a
17,151
import glob def get_lif_list(path): """ Returns a list of files ending in *.lif in provided folder :param: path :return: list -- filenames """ path += '/*.lif' return glob.glob(path)
8a26d65fc2c69b1007a40ded82225038ead67783
17,152
import argparse def parse_args(): """ Parse input arguments """ parser = argparse.ArgumentParser(description='Single image depth estimation') parser.add_argument('--dataset', dest='dataset', help='training dataset', default='custom', type=str) parser.add_argument('--epochs', dest='max_epochs', help='number of epochs to train', default=NUM_EPOCHS, type=int) parser.add_argument('--cuda', dest='cuda', help='whether use CUDA', action='store_true') parser.add_argument('--bs', dest='bs', help='batch_size', default=16, type=int) parser.add_argument('--num_workers', dest='num_workers', help='num_workers', default=1, type=int) parser.add_argument('--disp_interval', dest='disp_interval', help='display interval', default=10, type=int) parser.add_argument('--output_dir', dest='output_dir', help='output directory', default='saved_models', type=str) # config optimization parser.add_argument('--o', dest='optimizer', help='training optimizer', default="sgd", type=str) parser.add_argument('--lr', dest='lr', help='starting learning rate', default=1e-3, type=float) parser.add_argument('--lr_decay_step', dest='lr_decay_step', help='step to do learning rate decay, unit is epoch', default=5, type=int) parser.add_argument('--lr_decay_gamma', dest='lr_decay_gamma', help='learning rate decay ratio', default=0.1, type=float) # set training session parser.add_argument('--s', dest='session', help='training session', default=1, type=int) parser.add_argument('--eval_epoch', dest='eval_epoch', help='number of epoch to evaluate', default=2, type=int) # resume trained model parser.add_argument('--r', dest='resume', help='resume checkpoint or not', default=False, type=bool) parser.add_argument('--start_at', dest='start_epoch', help='epoch to start with', default=0, type=int) parser.add_argument('--checksession', dest='checksession', help='checksession to load model', default=1, type=int) parser.add_argument('--checkepoch', dest='checkepoch', help='checkepoch to load model', default=1, type=int) parser.add_argument('--checkpoint', dest='checkpoint', help='checkpoint to load model', default=0, type=int) # training parameters parser.add_argument('--gamma_sup', dest='gamma_sup', help='factor of supervised loss', default=1., type=float) parser.add_argument('--gamma_unsup', dest='gamma_unsup', help='factor of unsupervised loss', default=1., type=float) parser.add_argument('--gamma_reg', dest='gamma_reg', help='factor of regularization loss', default=10., type=float) args = parser.parse_args() return args
62fd1a807d662253d41c65d9b6add2822fdacca9
17,153
def compute_distance_matrix(users, basestations): """Distances between all users and basestations is calculated. Args: users: (obj) list of users! basestations: (obj) list of basestations! Returns: (list of) numpy arrays containing the distance between a user and all basestations in km!. """ coords_list_ue = [getattr(ele, 'coordinates') for ele in users] coords_list_bs = [getattr(ele, 'coordinates') for ele in basestations] distance_matrix = [] count = 0 for _ in coords_list_ue: element = [coords_list_ue[count]] coords = element + coords_list_bs dist = distance.cdist(coords, coords, 'euclidean') new_dist = np.delete(dist[0], 0) distance_matrix.append(new_dist) count += 1 return np.array(distance_matrix)
07b6175047d7602288436d163f838077e54054fc
17,154
from .core import resolver def __getattr__(name): """Lazy load the global resolver to avoid circular dependencies with plugins.""" if name in _SPECIAL_ATTRS: res = resolver.Resolver() res.load_plugins_from_environment() _set_default_resolver(res) return globals()[name] else: raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
20dd678be2b9d3f08513912a40098dc8b436ac81
17,155
def img_newt(N, xran=(-3, 3), yran=(-3, 3), tol=1e-5, niter=100): """ Add colors to a matrix according to the fixed point of the given equation. """ sol = [-(np.sqrt(3.0)*1j - 1.0)/2.0, (np.sqrt(3.0)*1j + 1.0)/2.0, -1.0] col_newt = np.zeros((N, N, 3)) Y, X = np.mgrid[yran[0]:yran[1]:N*1j, xran[0]:xran[1]:N*1j] for row in range(N): for col in range(N): x = X[row, col] y = Y[row, col] xf = newt(x + y*1j, fun, der, tol=tol, niter=niter) if abs(xf - sol[0])<1e-6: col_newt[row, col, :] = colors[0] if abs(xf - sol[1])<1e-6: col_newt[row, col, :] = colors[1] if abs(xf - sol[2])<1e-6: col_newt[row, col, :] = colors[2] if abs(xf - 1000) < 1e-6: col_newt[row, col, :] = colors[3] return col_newt
166aa3c5e144972f7ec825f973885f9b528047f0
17,156
def pack_block_header(hdr: block.BlockHeader, abbrev: bool = False, pretty: bool = False, ) -> str: """Pack blockchain to JSON string with b64 for bytes.""" f = get_b2s(abbrev) hdr_ = {'timestamp': f(hdr['timestamp']), 'previous_hash': f(hdr['previous_hash']), 'nonce': f(hdr['nonce']), 'merkle_root': f(hdr['merkle_root']), 'this_hash': f(hdr['this_hash']) } return json_dumps(hdr_, pretty)
a6df547918ab82bc990ca915d956730cb6a62b87
17,157
def get_datasets(recipe): """Get dataset instances from the recipe. Parameters ---------- recipe : dict of dict The specifications of the core datasets. Returns ------- datasets : dict of datasets A dictionary of dataset instances, compatible with torch's DataLoader objects. """ # "datasets" return {dataset: get_instance(**par) for dataset, par in recipe.items()}
f525cf379f13069a1f5255798d963af3389dd5ed
17,158
import re def is_sedol(value): """Checks whether a string is a valid SEDOL identifier. Regex from here: https://en.wikipedia.org/wiki/SEDOL :param value: A string to evaluate. :returns: True if string is in the form of a valid SEDOL identifier.""" return re.match(r'^[0-9BCDFGHJKLMNPQRSTVWXYZ]{6}\d$', value)
207ff94a4df99e7a546440cef1242f9a48435118
17,159
def create_substrate(dim): """ The function to create two-sheets substrate configuration with specified dimensions of each sheet. Arguments: dim: The dimensions accross X, Y axis of the sheet """ # Building sheet configurations of inputs and outputs inputs = create_sheet_space(-1, 1, dim, -1) outputs = create_sheet_space(-1, 1, dim, 0) substrate = NEAT.Substrate( inputs, [], # hidden outputs) substrate.m_allow_input_output_links = True substrate.m_allow_input_hidden_links = False substrate.m_allow_hidden_hidden_links = False substrate.m_allow_hidden_output_links = False substrate.m_allow_output_hidden_links = False substrate.m_allow_output_output_links = False substrate.m_allow_looped_hidden_links = False substrate.m_allow_looped_output_links = False substrate.m_hidden_nodes_activation = NEAT.ActivationFunction.SIGNED_SIGMOID substrate.m_output_nodes_activation = NEAT.ActivationFunction.UNSIGNED_SIGMOID substrate.m_with_distance = True substrate.m_max_weight_and_bias = 3.0 return substrate
9a47bf213d796aecec4b6f630ae30b04dc035d63
17,160
def remove_artefacts(signal: np.array, low_limit: int = 40, high_limit: int = 210) -> np.array: """ Replace artefacts [ultra-low and ultra-high values] with zero Args: signal: (np.array) 1D signal low_limit: (int) filter values below it high_limit: (int) filter values above it Output: (np.array) filtered signal """ # replace artefacts with zero signal_new = signal.astype('float') signal_new[signal < low_limit] = 0 #replace ultra-zmall values with 0 signal_new[signal > high_limit] = 0 #replace ultra-large values with 0 return signal_new
0b85e929588bd5895a9a84c5d03fce88c4f9f7cb
17,161
def normalizedBGR(im, display=True): """ Generate Opponent color space. O3 is just the intensity """ im = img.norm(im) B, G, R = np.dsplit(im, 3) b = (B - np.mean(B)) / np.std(B) g = (G - np.mean(G)) / np.std(G) r = (R - np.mean(R)) / np.std(R) out = cv2.merge((np.uint8(img.normUnity(b) * 255), np.uint8(img.normUnity(g) * 255), np.uint8(img.normUnity(r) * 255))) if display: cv2.imshow('norm bgr', np.hstack((np.uint8(img.normUnity(b) * 255), np.uint8(img.normUnity(g) * 255), np.uint8(img.normUnity(r) * 255)))) cv2.waitKey(0) return out, b, g, r
810b4a1ee4d9b5d7f68072c72379fa182b7f34fe
17,162
def feeds(url): """ Tries to find feeds for a given URL. """ url = _full_url(url) data = _get(url) # Check if the url is a feed. if _is_feed(url): return [url] # Try to get feed links from markup. try: feed_links = [link for link in _get_feed_links(data, url) if _is_feed(link)] except: feed_links = [] if feed_links: return feed_links # Try 'a' links. try: links = _get_a_links(data) except: links = [] if links: # Filter to only local links. local_links = [link for link in links if link.startswith(url)] # Try to find feed links. feed_links.extend(_filter_feed_links(local_links)) # If still nothing has been found... if not feed_links: # Try to find feed-looking links. feed_links.extend(_filter_feedish_links(local_links)) # If still nothing has been found... if not feed_links: # BRUTE FORCE IT! guesses = [ 'atom.xml', # Blogger, TypePad 'index.atom', # MoveableType 'index.rdf', # MoveableType 'rss.xml', # Dave Winer/Manila 'index.xml', # MoveableType 'index.rss', # Slash 'feed' # WordPress ] tries = [parse.urljoin(url, g) for g in guesses] feed_links.extend([link for link in tries if _is_feed(link)]) # If *still* nothing has been found, # just try all the links. if links and not feed_links: feed_links.extend(_filter_feed_links(links)) feed_links.extend(_filter_feedish_links(links)) # Filter out duplicates. return list(set(feed_links))
dd16dc751f34fbbf496c9b0142fa5d58372538b2
17,163
def getlog(name): """Create logger object with predefined stream handler & formatting Parameters ---------- name : str module __name__ Returns ------- logging.logger Examples -------- >>> from smseventlog import getlog >>> log = getlog(__name__) """ name = '.'.join(str(name).split('.')[1:]) # cant set name to nothing or that calls the ROOT logger if name == '': name = 'base' return Logger(name)
cd5e0dd4589757e3c8d05614f117b7ce46fe4fb9
17,164
import sys def read_new_probe_design(path: str, reference_type: str = 'genome') -> pd.DataFrame: """ Read amplimap probes.csv file and return pandas dataframe. """ try: design = pd.read_csv(path) log.info('Read probe design table from %s -- found %d probes', path, len(design)) if list(design.columns) == mipgen_columns: # NB: smmip data seems to be in F2R1 orientation (second read = fwd in genomic coordinates) for fwd probes # but F1R2 orientation (second read = rev) for rev probes. # cs-tag data seems to be in F1R2 orientation for fwd targets. unclear for rev targets, but presumably F2R1? # in other words, CS-tag is in gene orientation, while smMIP is in opposite # so both are swapped for MIPs. # is this why sequences in probes.csv are currently so confusing? log.info('Detected old MIPGEN format, converting...') # read the probes file again in old mipgen format and convert design = read_and_convert_mipgen_probes(path) design = process_probe_design(design, reference_type) except Exception as e: raise AmplimapReaderException(e, filename = path, should_have_header = True).with_traceback(sys.exc_info()[2]) return design
e8e8ccfffe514e13af26a7cc7ddd59eb51328c7d
17,165
import numpy as np def replace_nan(x): """ Replaces NaNs in 1D array with nearest finite value. Usage: y = replace_nan(x) Returns filled array y without altering input array x. Assumes input is numpy array. 3/2015 BWB """ # x2 = np.zeros(len(x)) np.copyto(x2,x) # bads = find(np.isnan(x)) # indices of NaNs if bads.size == 0: return x2 else: fins = find(np.isfinite(x)) # indices for all finites for ii in np.arange(0,bads.size): # for all NaNs # locate index of nearest finite diffs = np.abs(fins-bads[ii]) idx = diffs.argmin() # replace NaN with nearest finite x2[bads[ii]] = x[fins[idx]] return x2
9100a33dcb7d00b38e7a6a53132db8d13682e499
17,166
def handson_table(request, query_sets, fields): """function to render the scoresheets as part of the template""" return excel.make_response_from_query_sets(query_sets, fields, 'handsontable.html') # content = excel.pe.save_as(source=query_sets, # dest_file_type='handsontable.html', # dest_embed=True) # content.seek(0) # return render( # request, # 'custom-handson-table.html', # { # 'handsontable_content': content.read() # }) # return Response({'handsontable_content': render(content)}, template_name='custom-handson-table.html')
93c1471c142917f5b0492ddb27fdd6c278e9976d
17,167
from functools import reduce def is_periodic(G): """ https://stackoverflow.com/questions/54030163/periodic-and-aperiodic-directed-graphs Own function to test, whether a given Graph is aperiodic: """ if not nx.is_strongly_connected(G): print("G is not strongly connected, periodicity not defined.") return False cycles = list(nx.algorithms.cycles.simple_cycles(G)) cycles_sizes = [len(c) for c in cycles] # Find all cycle sizes cycles_gcd = reduce(gcd, cycles_sizes) # Find greatest common divisor of all cycle sizes is_periodic = cycles_gcd > 1 return is_periodic
6671a1bf57ef6ec973c7d283cb447d890cbd93e2
17,168
def Sphere(individual): """Sphere test objective function. F(x) = sum_{i=1}^d xi^2 d=1,2,3,... Range: [-100,100] Minima: 0 """ #print(individual) return sum(x**2 for x in individual)
349b732e931fc5acf8a52213d9ddf88335479b90
17,169
def find_names_for_google(df_birth_names): """ :param df_birth_names: 所有的birth data from the data given by Lu :return 1: df_country_found, 返回一个dataframe 里面有国家了 先通过country list过滤,有些国家可能有问题(如有好几个名字的(e.g. 荷兰),有些含有特殊符号,如刚果布,刚果金,朝鲜,南朝鲜北朝鲜是三个“国家”, 再比如说南奥塞梯,一些太平洋岛国归属有问题,还就是香港台湾这样的。。。。暂时算是国家) 而后看看是不是美国的一个州。 而后看城市,city在city list里面,citylist 参考 worldcities 数据库。 城市重名了就取人口多的那个城市。如Valencia :return 2: df_need_google_search, 返回一个dataframe 里面都是不在“国家列表”里面的,也不是美国的州,并且“worldcities database”里面找不到的 """ whitelist = set('abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ') # teststr = " happy t00o go 129.129$%^&*(" # answer = ''.join(filter(whitelist.__contains__, teststr)) dirty_list = [] need_searching_list = [] for index, row in df_birth_names.head(30).iterrows(): item = ''.join(filter(whitelist.__contains__, row['birth'])).strip() # item = row['birth'].replace("'","").strip() if item is "":# null dirty_list.append(np.nan) print(item, " is null") continue if item in COUNTRY_LIST: # known countries dirty_list.append(item) print(item, " is a country") continue if item in US_STATES_LIST: # add us states as United States dirty_list.append("United States") print(item, " is a state in the US") continue if item in NATIONALITY_LIST: # add national from nationality information e.g. Chinese -> China nation_from_nationality = NATIONALITY_TO_COUNTRY.loc[item]["Country/entity name"] dirty_list.append(nation_from_nationality) print(item, " is a national of a certain country") continue if item in CITY_LIST: # known city to country e.g. London -> UK country_from_city = CITY_TO_COUNTRY.loc[item]["country"] dirty_list.append(country_from_city) print(item, " is a city and it has been transformed") continue flag1=0 # known city to country e.g. London -> UK for i in COUNTRY_LIST: if i in item: dirty_list.append(i) print(i, " maybe a country") flag1 = 1 break if flag1 == 1: continue flag2 = 0 for i in US_STATES_LIST: if i in item: dirty_list.append("United States") print(i, "maybe a state in the US") flag2 = 1 break if flag2 == 1: continue flag3 = 0 for i in CITY_LIST: if i in item: country_from_city = CITY_TO_COUNTRY.loc[i]["country"] dirty_list.append(country_from_city) print(i, " maybe a city, and we are attempting to transform it") flag3 = 1 break if flag3 == 1: continue need_searching_list.append(item) print("this item: ", item, " is not added") need_searching_list = list(dict.fromkeys(need_searching_list))# remove duplicates df_country_found = pd.DataFrame(dirty_list) df_need_google_search = pd.DataFrame(need_searching_list) return df_country_found, df_need_google_search
6358fd692784389530ebf4c3a2059c3923104d2f
17,170
def make_address_mask(universe, sub=0, net=0, is_simplified=True): """Returns the address bytes for a given universe, subnet and net. Args: universe - Universe to listen sub - Subnet to listen net - Net to listen is_simplified - Whether to use nets and subnet or universe only, see User Guide page 5 (Universe Addressing) Returns: bytes - byte mask for given address """ address_mask = bytearray() if is_simplified: # Ensure data is in right range universe = clamp(universe, 0, 32767) # Make mask msb, lsb = shift_this(universe) # convert to MSB / LSB address_mask.append(lsb) address_mask.append(msb) else: # Ensure data is in right range universe = clamp(universe, 0, 15) sub = clamp(sub, 0, 15) net = clamp(net, 0, 127) # Make mask address_mask.append(sub << 4 | universe) address_mask.append(net & 0xFF) return address_mask
d360dde7ecc4ecc99e32df53f2f0806d5d396f1f
17,171
def get_offline_target(featureset, start_time=None, name=None): """return an optimal offline feature set target""" # todo: take status, start_time and lookup order into account for target in featureset.status.targets: driver = kind_to_driver[target.kind] if driver.is_offline and (not name or name == target.name): return get_target_driver(target, featureset) return None
6297f26e188ae31df3cf76b8e49229876cae23f6
17,172
def get_img_size(src_size, dest_size): """ Возвращает размеры изображения в пропорции с оригиналом исходя из того, как направлено изображение (вертикально или горизонтально) :param src_size: размер оригинала :type src_size: list / tuple :param dest_size: конечные размеры :type dest_size: list / tuple :rtype: tuple """ width, height = dest_size src_width, src_height = src_size if height >= width: return (int(float(width) / height * src_height), src_height) return (src_width, int(float(height) / width * src_width))
133dab529cd528373a1c7c6456a34cf8fd22dac9
17,173
def laxnodeset(v): """\ Return a nodeset with elements from the argument. If the argument is already a nodeset, it self will be returned. Otherwise it will be converted to a nodeset, that can be mutable or immutable depending on what happens to be most effectively implemented.""" if not isinstance(v, NodeSet): v = immnodeset(v) return v
3210f8d1c1d47c8871d0ba82c793b6cd85069566
17,174
def load_config(): """ Loads the configuration file. Returns: - (json) : The configuration file. """ return load_json_file('config.json')
05099118414d371ebc521e498503be1798c39066
17,175
import subprocess def dtm_generate_footprint(tile_id): """ Generates a footprint file using gdal. :param tile_id: tile id in the format "rrrr_ccc" where rrrr is the row number and ccc is the column number :return execution status """ # Initiate return value and log output return_value = '' log_file = open('log.txt', 'a') try: cmd = settings.gdaltlindex_bin + \ settings.dtm_footprint_folder + '/DTM_1km_' + tile_id + '_footprint.shp ' + \ settings.dtm_folder + '/DTM_1km_' + tile_id + '.tif' cmd_return = subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT) log_file.write( '\n' + tile_id + ' footprint generation... \n' + cmd_return + \ '\n' + tile_id + ' successful.\n\n') return_value = 'success' except: log_file.write('\n' + tile_id + ' footprint generation failed. \n') return_value = 'gdalError' # Close log file log_file.close() return return_value
da8526a61b75b4ec3ce22e052ba75d9ecee84f62
17,176
def favicon(request): """ best by tezar tantular from the Noun Project """ if settings.DEBUG: image_data = open("static/favicon-dev.ico", "rb").read() else: image_data = open("static/favicon.ico", "rb").read() # TODO add cache headers return HttpResponse(image_data, content_type="image/x-icon")
8824d8e6ff313c3773b3c6dddc0833eca3847fba
17,177
from bs4 import BeautifulSoup def text_from_html(body): """ Gets all raw text from html, removing all tags. :param body: html :return: str """ soup = BeautifulSoup(body, "html.parser") texts = soup.findAll(text=True) visible_texts = filter(tag_visible, texts) return " ".join(t.strip() for t in visible_texts)
313a5f404120c17290b726cb00b05e2276a07895
17,178
def alert_history(): """ Alert History: RESTful CRUD controller """ return s3_rest_controller(rheader = s3db.cap_history_rheader)
34a2b6bf90ab0b73eae3b64c83ffebc918e2f1a3
17,179
def chat(): """ Chat room. The user's name and room must be stored in the session. """ if 'avatar' not in session: session['avatar'] = avatars.get_avatar() data = { 'user_name': session.get('user_name', ''), 'avatar': session.get('avatar'), 'room_key': session.get('room_key', ''), 'password': session.get('password', '') } if data['user_name'] == '' or data['room_key'] == '': return redirect(url_for('.index')) return render_template('chat.html', **data)
d7024960ac8a03082deb696e0c0e6009dfe8e349
17,180
def _get_split_idx(N, blocksize, pad=0): """ Returns a list of indexes dividing an array into blocks of size blocksize with optional padding. Padding takes into account that the resultant block must fit within the original array. Parameters ---------- N : Nonnegative integer Total array length blocksize : Nonnegative integer Size of each block pad : Nonnegative integer Pad to add on either side of each index Returns ------- split_idx : List of 2-tuples Indices to create splits pads_used : List of 2-tuples Pads that were actually used on either side Examples -------- >>> split_idx, pads_used = _get_split_idx(5, 2) >>> print split_idx [(0, 2), (2, 4), (4, 5)] >>> print pads_used [(0, 0), (0, 0), (0, 0)] >>> _get_split_idx(5, 2, pad=1) >>> print split_idx [(0, 3), (1, 5), (3, 5)] >>> print pads_used [(0, 1), (1, 1), (1, 0)] """ num_fullsplits = N // blocksize remainder = N % blocksize split_idx = [] pads_used = [] for i in range(num_fullsplits): start = max(0, i * blocksize - pad) end = min(N, (i + 1) * blocksize + pad) split_idx.append((start, end)) leftpad = i * blocksize - start rightpad = end - (i + 1) * blocksize pads_used.append((leftpad, rightpad)) # Append the last split if there is a remainder if remainder: start = max(0, num_fullsplits * blocksize - pad) split_idx.append((start, N)) leftpad = num_fullsplits * blocksize - start pads_used.append((leftpad, 0)) return split_idx, pads_used
21935190de4c42fa5d7854f6608387dd2f004fbc
17,181
def buydown_loan(amount, nrate, grace=0, dispoints=0, orgpoints=0, prepmt=None): """ In this loan, the periodic payments are recalculated when there are changes in the value of the interest rate. Args: amount (float): Loan amount. nrate (float, pandas.Series): nominal interest rate per year. grace (int): numner of grace periods without paying the principal. dispoints (float): Discount points of the loan. orgpoints (float): Origination points of the loan. prepmt (pandas.Series): generic cashflow representing prepayments. Returns: A object of the class ``Loan``. >>> nrate = interest_rate(const_value=10, start='2016Q1', periods=11, freq='Q', chgpts={'2017Q2':20}) >>> buydown_loan(amount=1000, nrate=nrate, dispoints=0, orgpoints=0, prepmt=None) # doctest: +NORMALIZE_WHITESPACE Amount: 1000.00 Total interest: 200.99 Total payment: 1200.99 Discount points: 0.00 Origination points: 0.00 <BLANKLINE> Beg_Ppal_Amount Nom_Rate Tot_Payment Int_Payment Ppal_Payment \\ 2016Q1 1000.000000 10.0 0.000000 0.000000 0.000000 2016Q2 1000.000000 10.0 114.258763 25.000000 89.258763 2016Q3 910.741237 10.0 114.258763 22.768531 91.490232 2016Q4 819.251005 10.0 114.258763 20.481275 93.777488 2017Q1 725.473517 10.0 114.258763 18.136838 96.121925 2017Q2 629.351591 20.0 123.993257 31.467580 92.525677 2017Q3 536.825914 20.0 123.993257 26.841296 97.151961 2017Q4 439.673952 20.0 123.993257 21.983698 102.009559 2018Q1 337.664393 20.0 123.993257 16.883220 107.110037 2018Q2 230.554356 20.0 123.993257 11.527718 112.465539 2018Q3 118.088816 20.0 123.993257 5.904441 118.088816 <BLANKLINE> End_Ppal_Amount 2016Q1 1.000000e+03 2016Q2 9.107412e+02 2016Q3 8.192510e+02 2016Q4 7.254735e+02 2017Q1 6.293516e+02 2017Q2 5.368259e+02 2017Q3 4.396740e+02 2017Q4 3.376644e+02 2018Q1 2.305544e+02 2018Q2 1.180888e+02 2018Q3 1.136868e-13 >>> pmt = cashflow(const_value=0, start='2016Q1', periods=11, freq='Q') >>> pmt['2017Q4'] = 200 >>> buydown_loan(amount=1000, nrate=nrate, dispoints=0, orgpoints=0, prepmt=pmt) # doctest: +NORMALIZE_WHITESPACE Amount: 1000.00 Total interest: 180.67 Total payment: 1180.67 Discount points: 0.00 Origination points: 0.00 <BLANKLINE> Beg_Ppal_Amount Nom_Rate Tot_Payment Int_Payment Ppal_Payment \\ 2016Q1 1000.000000 10.0 0.000000 0.000000 0.000000 2016Q2 1000.000000 10.0 114.258763 25.000000 89.258763 2016Q3 910.741237 10.0 114.258763 22.768531 91.490232 2016Q4 819.251005 10.0 114.258763 20.481275 93.777488 2017Q1 725.473517 10.0 114.258763 18.136838 96.121925 2017Q2 629.351591 20.0 123.993257 31.467580 92.525677 2017Q3 536.825914 20.0 123.993257 26.841296 97.151961 2017Q4 439.673952 20.0 323.993257 21.983698 302.009559 2018Q1 137.664393 20.0 50.551544 6.883220 43.668324 2018Q2 93.996068 20.0 50.551544 4.699803 45.851741 2018Q3 48.144328 20.0 50.551544 2.407216 48.144328 <BLANKLINE> End_Ppal_Amount 2016Q1 1.000000e+03 2016Q2 9.107412e+02 2016Q3 8.192510e+02 2016Q4 7.254735e+02 2017Q1 6.293516e+02 2017Q2 5.368259e+02 2017Q3 4.396740e+02 2017Q4 1.376644e+02 2018Q1 9.399607e+01 2018Q2 4.814433e+01 2018Q3 4.263256e-14 """ if not isinstance(nrate, pd.Series): TypeError('nrate must be a pandas.Series object.') if prepmt is None: prepmt = nrate.copy() prepmt[:] = 0 else: verify_period_range([nrate, prepmt]) life = len(nrate) - grace - 1 begppalbal = nrate.copy() intpmt = nrate.copy() ppalpmt = nrate.copy() totpmt = nrate.copy() endppalbal = nrate.copy() begppalbal[:] = 0 intpmt[:] = 0 ppalpmt[:] = 0 totpmt[:] = 0 endppalbal[:] = 0 ## ## balance calculation ## pyr = getpyr(nrate) for time in range(grace + life + 1): if time == 0: # begppalbal[time] = amount endppalbal[time] = amount totpmt[time] = amount * (dispoints + orgpoints) / 100 ### intpmt[time] = amount * dispoints / 100 # else: # # periodic payment per period # if time <= grace: begppalbal[time] = endppalbal[time - 1] intpmt[time] = begppalbal[time] * nrate[time] / pyr / 100 totpmt[time] = intpmt[time] endppalbal[time] = begppalbal[time] else: pmt = -pvpmt(nrate=nrate[time], nper=grace+life-time+1, pval=endppalbal[time-1], pmt=None, pyr=pyr) totpmt[time] = pmt + prepmt[time] # balance begppalbal[time] = endppalbal[time - 1] intpmt[time] = begppalbal[time] * nrate[time] / pyr / 100 ppalpmt[time] = totpmt[time] - intpmt[time] endppalbal[time] = begppalbal[time] - ppalpmt[time] data = {'Beg_Ppal_Amount':begppalbal} result = Loan(life=life, amount=amount, grace=grace, nrate=nrate, dispoints=dispoints, orgpoints=orgpoints, data=data) result['Nom_Rate'] = nrate result['Tot_Payment'] = totpmt result['Int_Payment'] = intpmt result['Ppal_Payment'] = ppalpmt result['End_Ppal_Amount'] = endppalbal return result
46eb6bbaaa940b5cf1abd702ee5d9e2e20c6dab3
17,182
from typing import Optional def ffill(array: np.ndarray, value: Optional[int] = 0) -> np.ndarray: """Forward fills an array. Args: array: 1-D or 2-D array. value: Value to be filled. Default is 0. Returns: ndarray: Forward-filled array. Examples: >>> x = np.array([0, 5, 0, 0, 2, 0]) >>> ffill(x) [0, 5, 5, 5, 2, 2] Notes: Works only in axis=1 direction. """ ndims = len(array.shape) ran = np.arange(array.shape[ndims - 1]) idx = np.where((array != value), ran, 0) idx = np.maximum.accumulate(idx, axis=ndims-1) # pylint: disable=E1101 if ndims == 2: return array[np.arange(idx.shape[0])[:, None], idx] return array[idx]
f5774c3e50ddbf2ffa9cf84df5cb57b135d1549a
17,183
def svn_stringbuf_from_file(*args): """svn_stringbuf_from_file(char const * filename, apr_pool_t pool) -> svn_error_t""" return _core.svn_stringbuf_from_file(*args)
b375a43bf8e050aa5191f387d930077680e9b019
17,184
def poly4(x, b, b0): """ Defines a function with polynom 4 to fit the curve Parameters ---------- x: numpy.ndarray: x of f(x) b: float Parameter to fit b0 : int y-intercept of the curve Returns ------- f : numpy.ndarray Result of f(x) """ return b * np.array(x) ** 4 + b0
aed3603640400488219f2cca82e57268f32de000
17,185
from teospy.tests.tester import Tester def chkiapws06table6(printresult=True,chktol=_CHKTOL): """Check accuracy against IAPWS 2006 table 6. Evaluate the functions in this module and compare to reference values of thermodynamic properties (e.g. heat capacity, lapse rate) in IAPWS 2006, table 6. :arg bool printresult: If True (default) and any results are outside of the given tolerance, then the function name, reference value, result value, and relative error are printed. :arg float chktol: Tolerance to use when choosing to print results (default _CHKTOL). :returns: :class:`~teospy.tests.tester.Tester` instances containing the functions, arguments, reference values, results, and relative errors from the tests. The first instance involves derivatives of ice_g whereas the second tests the other thermodynamic functions. """ fargs0 = (273.16,611.657) fargs1 = (273.152519,101325.) fargs2 = (100.,1e8) propfargs = [fargs0,fargs1,fargs2] ders = [(0,0), (1,0), (0,1), (2,0), (1,1), (0,2)] # Tester instance for derivatives of ice_g derfuns = _ice_g derfnames = 'ice_g' # Derivatives change before arguments do here derfargs = [(der+fargs) for fargs in propfargs for der in ders] derargfmt = '({0:1g},{1:1g},{2:7.3f},{3:7g})' derrefs = [0.611784135,0.122069433940e+4,0.109085812737e-2, -0.767602985875e+1,0.174387964700e-6,-0.128495941571e-12, 0.10134274069e+3,0.122076932550e+4,0.109084388214e-2,-0.767598233365e+1, 0.174362219972e-6,-0.128485364928e-12,-0.222296513088e+6, 0.261195122589e+4,0.106193389260e-2,-0.866333195517e+1, 0.274505162488e-7,-0.941807981761e-13] header = 'Ice Gibbs energy derivatives' dertest = Tester(derfuns,derfargs,derrefs,derfnames,derargfmt,header=header) # Tester instance for other ice properties propfuns = [enthalpy,helmholtzenergy,internalenergy,entropy,cp,density, expansion,pcoefficient,kappa_t,kappa_s] propfnames = ['enthalpy','helmholtzenergy','internalenergy','entropy','cp', 'density','expansion','pcoefficient','kappa_t','kappa_s'] propargfmt = '({0:7.3f},{1:7g})' proprefs = [ [-0.333444253966e+6,-0.333354873637e+6,-0.483491635676e+6], [-0.55446875e-1,-0.918701567e+1,-0.328489902347e+6], [-0.333444921197e+6,-0.333465403393e+6,-0.589685024936e+6], [-0.122069433940e+4,-0.122076932550e+4,-0.261195122589e+4], [0.209678431622e+4,0.209671391024e+4,0.866333195517e+3], [0.916709492200e+3,0.916721463419e+3,0.941678203297e+3], [0.159863102566e-3,0.159841589458e-3,0.258495528207e-4], [0.135714764659e+7,0.135705899321e+7,0.291466166994e+6], [0.117793449348e-9,0.117785291765e-9,0.886880048115e-10], [0.114161597779e-9,0.114154442556e-9,0.886060982687e-10] ] header = 'Ice thermodynamic properties' proptest = Tester(propfuns,propfargs,proprefs,propfnames,propargfmt, header=header) # Run Tester instances and print results dertest.run() proptest.run() if printresult: dertest.printresults(chktol=chktol) proptest.printresults(chktol=chktol) return dertest, proptest
c0fce67d3a268ec0b67ff845f5671c67aa394846
17,186
def flat(arr): """ Finds flat things (could be zeros) ___________________________ """ arr = np.array(arr) if arr.size == 0: return False mean = np.repeat(np.mean(arr), arr.size) nonzero_residuals = np.nonzero(arr - mean)[0] return nonzero_residuals.size < arr.size/100
ce2697d95165b46cec477265df6ccb337cb89af1
17,187
def sensitive_fields(*paths, **typed_paths): """ paths must be a path like "password" or "vmInfo.password" """ def ret(old_init): def __init__(self, *args, **kwargs): if paths: ps = ["obj['" + p.replace(".", "']['") + "']" for p in paths] setattr(self, SENSITIVE_FIELD_NAME, ps) old_init(self) return __init__ return ret
e174519c253d4676ae7c07c1b11eb18e532d5f61
17,188
from datetime import datetime import time def get_timestamp_diff(diff): """获取前后diff天对应的时间戳(毫秒)""" tmp_str = (datetime.today() + timedelta(diff)).strftime("%Y-%m-%d %H:%M:%S") tmp_array = time.strptime(tmp_str, "%Y-%m-%d %H:%M:%S") return int(time.mktime(tmp_array)) * 1000
61ca093471103376ee44d940552db6337a4e65f5
17,189
def can_delete(account, bike): """ Check if an account can delete a bike. Account must be a team member and bike not borrowed in the future. """ return (team_control.is_member(account, bike.team) and not has_future_borrows(bike))
f962e465b6a5eb62feea2683cdd8328b5591fb43
17,190
import os def config_file(): """ Returns the config file ($HOME/.config/python-pulseaudio-profiles-trayicon/config.json). :return: the directory for the configurations :rtype: str """ return os.path.join(config_dir(), "config.json")
2841d6b52f2b95a194b2858ee1522f0516efbd9d
17,191
def get_node_types(nodes, return_shape_type = True): """ Get the maya node types for the nodes supplied. Returns: dict: dict[node_type_name] node dict of matching nodes """ found_type = {} for node in nodes: node_type = cmds.nodeType(node) if node_type == 'transform': if return_shape_type: shapes = get_shapes(node) if shapes: node_type = cmds.nodeType(shapes[0]) if not node_type in found_type: found_type[node_type] = [] found_type[node_type].append(node) return found_type
7867f97f7228ac77ae44fda04672a8224aa7c1f4
17,192
import csv from typing import OrderedDict def updateDistances(fileName): """ Calculate and update the distance on the given CSV file. Parameters ---------- fileName: str Path and name of the CSV file to process. Returns ------- ret: bool Response indicating if the update was successful or not. """ # Read the face data from the CSV file try: file = open(fileName, 'r+', newline='') except: return False reader = csv.reader(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) writer = csv.writer(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) # Read the face data from the CSV file and recalculate the distances, # also building a list to later recalculate the distance gradients frames = [] distances = [] faces = OrderedDict() prevDist = 0 for row in reader: if row[0] != 'frame': # Read the face data from the CSV file frameNum = int(row[0]) face = FaceData() face.fromList(row[1:]) face.gradient = 0.0 face.calculateDistance() faces[frameNum] = face # In case the face has been detected but the distance calculation # failed, assume the same distance as the previous detected face if not face.isEmpty(): if face.distance == 0: face.distance = prevDist prevDist = face.distance # Consider for the calculation of the gradients only the non-empty # faces (i.e. the frames where a face was detected) if not face.isEmpty(): frames.append(frameNum) distances.append(face.distance) # Calculate the gradients from the helper list of distances gradients = np.gradient(distances) for i, frameNum in enumerate(frames): faces[frameNum].gradient = gradients[i] # Save the face data back to the CSV file file.truncate(0) file.seek(0) writer.writerow(['frame'] + FaceData.header()) for frameNum, face in faces.items(): writer.writerow([frameNum] + face.toList()) file.close() return True
c19e0adcf731f9fd1af87f5dfe3a61889d395457
17,193
def hr(*args, **kwargs): """ The HTML <hr> element represents a thematic break between paragraph-level elements (for example, a change of scene in a story, or a shift of topic with a section). In previous versions of HTML, it represented a horizontal rule. It may still be displayed as a horizontal rule in visual browsers, but is now defined in semantic terms, rather than presentational terms. """ return void_el('hr', *args, **kwargs)
959106dc2c71334b5a88045f8a26a9f42a2d2fdb
17,194
def as_linker_option(p): """Return as an ld library path argument""" if p: return '-Wl,' + p return ''
452c06034be5c3c2525eb2bfad011e468daef02b
17,195
def split_backbone(options): """ Split backbone fasta file into chunks. Returns dictionary of backbone -> id. """ backbone_to_id = {} id_counter = 0 # Write all backbone files to their own fasta file. pf = ParseFasta(options.backbone_filename) tuple = pf.getRecord() while tuple is not None: print tuple[0] split_backbone = open(options.output_dir + '/' + options.prefix + '-' + str(id_counter) + '.fasta', 'w') split_backbone.write('>' + tuple[0] + '\n' + tuple[1]) split_backbone.close() backbone_to_id[tuple[0]] = options.prefix + '-' + str(id_counter) id_counter += 1 tuple = pf.getRecord() return backbone_to_id
6446e90a1aa2e38ca01ebb8a86b8cd1dbd3abd75
17,196
import pkg_resources def _get_highest_tag(tags): """Find the highest tag from a list. Pass in a list of tag strings and this will return the highest (latest) as sorted by the pkg_resources version parser. """ return max(tags, key=pkg_resources.parse_version)
8d2580f6f6fbb54108ee14d6d4834d376a65c501
17,197
def add_comment(request, pk): """ Adds comment to the image - POST. Checks the user and assigns it to the comment.posted_by """ form = PhotoCommentForm(request.POST) if form.is_valid(): comment = form.save(commit=False) comment.user = request.user comment.save() else: text = 'You have used forbidden word!' messages.warning(request, text) return redirect('photo comments', pk)
4488a183ca7786c65d355991cec38fed01864ab1
17,198
import os def predict(): """ runs the three models and displays results view """ filename = request.form['filename'] file_root = os.path.splitext(filename)[0] full_filename = os.path.join(app.config['STATIC_MATRIX_PATH'], filename) # run YOLOv5 model os.system(f'python models/yolov5/detect.py ' f'--weights models/yolov5/best-2.pt ' f'--source {app.config["STATIC_MATRIX_FOLDER"]} ' f'--out {app.config["TEMP_FOLDER"]} ' f'--img 416 --conf 0.4 --save-txt') # run toLatex model latex = results_to_latex( os.path.join(app.config['TEMP_PATH'], file_root + '.txt'), CLASSES) latex_filename = os.path.join(app.config['STATIC_MATRIX_PATH'], file_root) # run renderLatex model displaylatex(latex.replace('\n', ''), latex_filename) # delete temporary folder os.system('rm -r temp') return render_template('results.html', latex=latex, matrix_image=full_filename, image_filename=filename, latex_pdf=latex_filename+'.pdf')
5eeb7452228562b0881105f2e991a781f26921ef
17,199