content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def safe_download(f): """ Makes a download safe, by trapping any app errors and redirecting to a default landing page. Assumes that the first 2 arguments to the function after request are domain and app_id, or there are keyword arguments with those names """ @wraps(f) def _safe_download(request, *args, **kwargs): domain = args[0] if len(args) > 0 else kwargs["domain"] app_id = args[1] if len(args) > 1 else kwargs["app_id"] latest = True if request.GET.get('latest') == 'true' else False target = request.GET.get('target') or None try: request.app = get_app(domain, app_id, latest=latest, target=target) return f(request, *args, **kwargs) except (AppEditingError, CaseError), e: logging.exception(e) messages.error(request, "Problem downloading file: %s" % e) return HttpResponseRedirect(reverse("corehq.apps.app_manager.views.view_app", args=[domain,app_id])) return _safe_download
1d48c48ac067fcc180af37b90949123c5dc864d9
11,500
def Moebius(quaternion_or_infinity, a,b=None,c=None,d=None): """ The Moebius transformation of a quaternion (z) with parameters a,b,c and d >>> import qmath >>> a = qmath.quaternion([1,1,1,0]) >>> b = qmath.quaternion([-2,1,0,1]) >>> c = qmath.quaternion([1,0,0,0]) >>> d = qmath.quaternion([0,-1,-3,-4]) >>> z = qmath.quaternion([1,1,3,4]) >>> qmath.Moebius(z,a,b,c,d) (-5.0+7.0i+7.0k) >>> d = - z >>> z = qmath.Moebius(z,a,b,c,d) >>> z 'Infinity' >>> qmath.Moebius(z,a,b,c,d) (1.0+1.0i+1.0j) """ if type(a) == tuple: return Moebius(quaternion_or_infinity,a[0],a[1],a[2],a[3]) else: A = quaternion(a) B = quaternion(b) C = quaternion(c) D = quaternion(d) if A * D - B * C == 0: raise RuntimeError(' this is not a Moebius transformation') elif quaternion_or_infinity == 'Infinity': return A / C else: Z = quaternion(quaternion_or_infinity) try: return (A * Z + B) * quaternion.inverse(C * Z + D) except: return 'Infinity'
9bfd05268caa6aad1247886717932ca332212e4b
11,501
def _passthrough_zotero_data(zotero_data): """ Address known issues with Zotero metadata. Assumes zotero data should contain a single bibliographic record. """ if not isinstance(zotero_data, list): raise ValueError('_passthrough_zotero_data: zotero_data should be a list') if len(zotero_data) > 1: # Sometimes translation-server creates multiple data items for a single record. # If so, keep only the parent item, and remove child items (such as notes). # https://github.com/zotero/translation-server/issues/67 zotero_data = zotero_data[:1] return zotero_data
cec2271a7a966b77e2d380686ecccc0307f78116
11,502
import json def telebot(): """endpoint responsible to parse and respond bot webhook""" payload = json.loads(request.data) message = payload.get('message', payload.get('edited_message','')) msg_from = message.get('from') user_id = msg_from.get('id') user_first_name = msg_from.get('first_name','') user_last_name = msg_from.get('last_name','') user_is_bot = msg_from.get('is_bot') chat = message.get('chat') chat_id = chat.get('id') command = message.get('text') if user_is_bot or message == '': return jsonify({'method': 'sendMessage','chat_id' : chat_id,'text': 'Sorry I can\'t answer you!'}) bot_response = { 'method': 'sendMessage', 'chat_id' : chat_id, 'text': f'[{user_first_name} {user_last_name}](tg://user?id={user_id}) {command}', 'parse_mode':'Markdown', } return jsonify(bot_response)
3a42fee4a89e1be3fa1ec17da21738bfcefba4ba
11,503
import os def pkg_config(cfg): """Returns PkgConfig pkg config object.""" pkg_config_py = os.path.join(get_vta_hw_path(), "config/pkg_config.py") libpkg = {"__file__": pkg_config_py} exec(compile(open(pkg_config_py, "rb").read(), pkg_config_py, "exec"), libpkg, libpkg) PkgConfig = libpkg["PkgConfig"] return PkgConfig(cfg)
fd8e36f0694c46f9afef5108dbff4fdc38a3b543
11,504
def root(tmpdir): """Return a pytest temporary directory""" return tmpdir
9fa01d67461f8ce1e3d3ad900cf8a893c5a075aa
11,505
from app.crud.core import ready import logging def _check_storage(log_fn: tp.Callable) -> bool: """See if the storage system is alive.""" try: log_fn('Attempting to contact storage system', depth=1) result = ready() return result except Exception as ex: log_fn(ex, level=logging.WARN, depth=1) return False
b20ca64094126a40fd8eb0ce76e3329c8b4da6cb
11,506
def ignore_ip_addresses_rule_generator(ignore_ip_addresses): """ generate tshark rule to ignore ip addresses Args: ignore_ip_addresses: list of ip addresses Returns: rule string """ rules = [] for ip_address in ignore_ip_addresses: rules.append("-Y ip.dst != {0}".format(ip_address)) return rules
3ac43f28a4c8610d4350d0698d93675572d6ba44
11,507
def readmission(aFileName): """ Load a mission from a file into a list. The mission definition is in the Waypoint file format (http://qgroundcontrol.org/mavlink/waypoint_protocol#waypoint_file_format). This function is used by upload_mission(). """ print "\nReading mission from file: %s" % aFileName cmds = vehicle.commands missionlist=[] with open(aFileName) as f: for i, line in enumerate(f): if i==0: if not line.startswith('QGC WPL 110'): raise Exception('File is not supported WP version') else: linearray=line.split('\t') ln_index=int(linearray[0]) ln_currentwp=int(linearray[1]) ln_frame=int(linearray[2]) ln_command=int(linearray[3]) ln_param1=float(linearray[4]) ln_param2=float(linearray[5]) ln_param3=float(linearray[6]) ln_param4=float(linearray[7]) ln_param5=float(linearray[8]) ln_param6=float(linearray[9]) ln_param7=float(linearray[10]) ln_autocontinue=int(linearray[11].strip()) cmd = Command( 0, 0, 0, ln_frame, ln_command, ln_currentwp, ln_autocontinue, ln_param1, ln_param2, ln_param3, ln_param4, ln_param5, ln_param6, ln_param7) missionlist.append(cmd) return missionlist
08e92ef784340dcd9bbd3ca8bb85a9c8a9211841
11,508
def remove_stop_words(words): """Remove all stop words. Args: words (list): The list of words Returns: list: An updated word list with stopwords removed. """ # http://stackoverflow.com/questions/5486337/ # how-to-remove-stop-words-using-nltk-or-python return [w for w in words if w.lower() not in stopwords.words('english')]
29910d1c04cb27ac281428a5401501e4c0e633ae
11,509
def synthetic_costs_1(): """ Uncertainty in 5 points at [0,0] on X1 can cause it to flip to [1,0] if needed to misclassify Uncertainty in 1 point at [1,1] on X2 can cause it to flip to [1,0] if needed to misclassify All other points certain """ costs = np.array([[1,4],[1,4],[1,4],[1,4],[1,4],[4,4],[4,4], [4,4],[4,4],[4,4], [4,1], [4,4],[4,4]]) return costs
97753d9e816feba56b609685831df2d183ab408f
11,510
def example_one(request, context=None): """ Return web page for example one. """ if context is None: context = {} session = request.session.get("ApiSession", None) if session is None: return no_session_set(request) session = Session.deserialize(session) origin_codes = get_codes_with_filter(session, REPORTING_AIRPORT_CODE, 0) context.update( { "title": "Example 1", "active": "example_one", "origin_codes": origin_codes, "dest_codes": EXAMPLE_ONE_DESTS, } ) return render(request, "example_one.html", context)
25bc3fea514e4011c3be513868fd58d0c2b80d2f
11,511
from typing import Any def decode(cls: Any, value: bytes) -> Any: """Decode value in katcp message to a type. If a union type is provided, the value must decode successfully (i.e., without raising :exc:`ValueError`) for exactly one of the types in the union, otherwise a :exc:`ValueError` is raised. Parameters ---------- cls The target type, or a :class:`typing.Union` of types. value Raw (but unescaped) value in katcp message Raises ------ ValueError if `value` does not have a valid value for `cls` TypeError if `cls` is not a registered type or union of registered types. See also -------- :func:`register_type` """ union_args = _union_args(cls) if union_args is not None: values = [] # type: List[Any] for type_ in union_args: try: values.append(decode(type_, value)) except ValueError: pass if len(values) == 1: return values[0] elif not values: raise ValueError('None of the types in {} could decode {!r}'.format( cls, value)) else: raise ValueError('{!r} is ambiguous for {}'.format(value, cls)) else: return get_type(cls).decode(cls, value)
3036b69089e68d2a47c3ca110024bde6a026ba5d
11,512
from typing import TextIO def load_f0(fhandle: TextIO) -> annotations.F0Data: """Load an ikala f0 annotation Args: fhandle (str or file-like): File-like object or path to f0 annotation file Raises: IOError: If f0_path does not exist Returns: F0Data: the f0 annotation data """ lines = fhandle.readlines() f0_midi = np.array([float(line) for line in lines]) f0_hz = librosa.midi_to_hz(f0_midi) * (f0_midi > 0) confidence = (f0_hz > 0).astype(float) times = (np.arange(len(f0_midi)) * TIME_STEP) + (TIME_STEP / 2.0) f0_data = annotations.F0Data(times, f0_hz, confidence) return f0_data
7c0f47e63db1a6fee4718420d74799fa73740b52
11,513
def remove_fallen(lst): """removes fallen orcs from a list""" return [x for x in lst if x.standing]
9e621321909dc7aa13da3d2a7902bb4604ae62f6
11,514
def gc_resnet152(num_classes): """Constructs a ResNet-152 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(GCBottleneck, [3, 8, 36, 3], num_classes=num_classes) model.avgpool = nn.AdaptiveAvgPool2d(1) return model
a1986afd48284471045b08322e008796ee7743bb
11,515
def consume_entropy(generated_password: str, quotient: int, max_length: int) -> str: """ Takes the entropy (quotient) and the length of password (max_length) required and uses the remainder of their division as the index to pick a character from the characters list. This process occurs recursively until the password is of the required length. """ if len(generated_password) >= max_length: return generated_password quotient, remainder = divmod(quotient, len(characters)) generated_password += characters[remainder] return consume_entropy(generated_password, quotient, max_length)
8ba58b34704e9db389241a255e9ec1963e508c99
11,516
def randomNormal(n, height, baseshape=[]): """ Generate random positions, normally distributed along z. Base shape can be: [] (1D sim) [Ly] (2D sim) [Lx, Ly] (3D sim) Where Lx, Ly are lengths along x, y. """ nDim = len(baseshape) + 1 pos = np.zeros([n, nDim]) z = np.random.randn(n) z *= height pos[:,-1] = z for i in range(nDim - 1): pos[:, i] = np.random.rand(n) * baseshape[i] return pos
96d703ecc059fe180b71f547dfee7f259d803a87
11,517
from typing import OrderedDict def index(model_name=None): """ Index page. """ registered_models = mdb.registered_models if model_name: model = next((m for m in registered_models if m.__name__.lower() == model_name.lower()), None) elif registered_models: model = registered_models[0] if not model: abort(404) model_name = model.__name__.lower() # 获取指定model的索引 index_dict = OrderedDict({'_id': ObjectId}) for i in model.indexes: value = i['fields'] if isinstance(value, str): index_dict[value] = model._valid_paths[value] elif isinstance(value, list): for val in value: if isinstance(val, tuple): field, direction = val index_dict[field] = model._valid_paths[field] else: index_dict[val] = model._valid_paths[val] # 查询条件 ''' 20161123/Samuel/暂时不使用populate_model来生成查询条件 # 调用populate_model将查询条件转化为数据对象, 会自动转换查询条件的数据类型 search_record = populate_model(request.args, model, False) # 将数据对象中非空的值提取出来, 构造成一个mongoDB查询的条件 condition = {f: v for f, v in search_record.iteritems() if v} ''' condition = {} for k, t in index_dict.items(): v = request.args.get(k, None) if v: cv = convert_from_string(v, t) condition[k.replace('.$', '')] = cv # 翻页支持 page = int(request.args.get('_p', 1)) count = model.count(condition) start = (page - 1) * PAGE_COUNT # 返回结果只显示索引中的字段 projection = {k.replace('.$', ''): True for k in index_dict} current_app.logger.debug( 'There are %s %ss for condition %s, with projection %s' % (count, model_name, condition, projection)) # TODO: 排序 records = model.find(condition, projection, start, PAGE_COUNT) pagination = Pagination(page, PAGE_COUNT, count) # current_app.logger.debug('Indexed fields for %s are %s' % (model_name, index_dict)) return render_template('/crud/index.html', models=registered_models, model=model, index_dict=index_dict, records=records, pagination=pagination)
b7cff0c56b0ccc2f5e911ce046210e1b7c8ae05c
11,518
import os from unittest.mock import call def download(names, tempdir=None, extra_args=None): """Gather pip packages in `tempdir` Arguments: names (list): Names of packages to install, in pip-format, e.g. ["six==1"] tempdir (str, optional): Absolute path to where pip packages go until they've been installed as Rez packages, defaults to the cwd extra_args (list, optional): Additional arguments, typically only relevant to pip rather than pipz Returns: distributions (list): Downloaded distlib.database.InstalledDistribution Raises: OSError: On anything gone wrong with subprocess and pip """ extra_args = extra_args or [] assert isinstance(names, (list, tuple)), ( "%s was not a tuple or list" % names ) assert all(isinstance(name, _basestring) for name in names), ( "%s contained non-string" % names ) tempdir = tempdir or os.getcwd() # Build pip commandline cmd = [ "python", "-m", "pip", "install", "--target", tempdir, # Only ever consider wheels, anything else is ancient "--use-pep517", # Handle case where the Python distribution used alongside # pip already has a package installed in its `site-packages/` dir. "--ignore-installed", # rez pip users don't have to see this "--disable-pip-version-check", ] for extra_arg in extra_args: if extra_arg in cmd: print_warning("'%s' argument ignored, used internally" % extra_arg) continue cmd += [extra_arg] cmd += names call(cmd) return sorted( find_distributions(tempdir), # Upper-case characters typically come first key=lambda d: d.key )
2ac41052218b1de9308dc7a553a1637acf6e9938
11,519
def _parseCellContentsSection(fileAsList, lineIdx): """ returns fractCoords from Cell Contents section of castep Args: fileAsList(str list): Each entry is 1 line of the castep input file lineIdx(int): The index containing the line "cell contents" Returns fractCoords: nx4 iter with each containing [x,y,z,symbol]. Used to init UnitCell objects """ finished = False while not finished: currLine = fileAsList[lineIdx].strip() if "Fractional coord" in fileAsList[lineIdx]: lineIdx = lineIdx + 3 fractCoords = list() while "xx" not in currLine: currLine = fileAsList[lineIdx].strip() splitLine = currLine.split() if len(splitLine) == 1: break currCoords = [float(x) for x in splitLine[3:6]] + [splitLine[1]] fractCoords.append(currCoords) lineIdx = lineIdx + 1 break else: lineIdx = lineIdx+1 return fractCoords, lineIdx
3baa1a200442ef8681a0741bfa2a60d9ca1e20b2
11,520
def get_avg_no_of_feat_values(contents): """ Helper to calculate numbers of different values of categorical features, averaged for all features """ total = 0 for i in range(0, len(contents[0])): total += len(set([x[i] for x in contents])) return float(total) / float(len(contents[0]))
4e913298d7f133eb08afe23e4999f5b20f455dc1
11,521
def plot_trend_line(axes_, xd, yd, c='r', alpha=1, cus_loc = None, text_color='black', return_params=False, extra_text='', t_line_1_1=True, fit_function=None, fontsize_=12, add_text=True): """Make a line of best fit""" #create clean series x_, y_ = coincidence(xd,yd) if fit_function is not None: params = curve_fit(fit_function, x_, y_) print('fitted parameters') print(params[0]) fit_line_x = np.arange(int(np.nanmin(x_)),int(np.nanmax(x_))+1,.1) plotting_par_list = [fit_line_x] for fit_par in params[0]: plotting_par_list.append(fit_par) funt_par = tuple(plotting_par_list) fit_line_y = fit_function(*funt_par) axes_.plot(fit_line_x, fit_line_y, c, alpha=alpha) # calculate R2 plotting_par_list = [x_] params_str_ = '' for i_, fit_par in enumerate(params[0]): if extra_text == '': params_str_ = params_str_ + 'fit parameters ' + str(i_+1) + ': ' + '$%0.2f$' % (fit_par) + '\n' else: params_str_ = params_str_ + extra_text + '$%0.2f$' % (fit_par) + '\n' plotting_par_list.append(fit_par) funt_par = tuple(plotting_par_list) fit_line_y = fit_function(*funt_par) residuals = y_ - fit_line_y ss_res = np.sum(residuals**2) ss_tot = np.sum((y_ - np.mean(y_))**2) Rsqr = float(1 - (ss_res / ss_tot)) # Plot R^2 value x_1 = np.nanmin(x_) y_2 = np.nanmax(y_) error_text = '$R^2 = %0.2f$' % Rsqr if cus_loc is None: axes_.text(x_1, y_2 , params_str_ + error_text, fontsize=fontsize_, horizontalalignment='left',verticalalignment='top',color=text_color, bbox={'facecolor': 'white', 'edgecolor': 'none'}) else: axes_.text(cus_loc[0], cus_loc[1] , params_str_ + error_text, fontsize=fontsize_, horizontalalignment='left',verticalalignment='top',color=text_color, bbox={'facecolor': 'white', 'edgecolor': 'none'}) else: # Calculate trend line params = np.polyfit(x_, y_, 1) intercept = params[-1] slope = params[-2] minxd = np.nanmin(x_) maxxd = np.nanmax(x_) xl = np.array([minxd, maxxd]) yl = slope * xl + intercept print('fitted parameters') print(slope, intercept) # Plot trend line axes_.plot(xl, yl, c, alpha=alpha) # Calculate R Squared poly_1d = np.poly1d(params) ybar = np.sum(y_) / len(y_) ssreg = np.sum((poly_1d(x_) - ybar) ** 2) sstot = np.sum((y_ - ybar) ** 2) Rsqr = float(ssreg / sstot) # Plot R^2 value x_1 = np.nanmin(x_) y_2 = np.nanmax(y_) if intercept >= 0: if extra_text=='': equat_text = '$Y = %0.2f*x + %0.2f$' % (slope,intercept) else: equat_text = extra_text + '\n' + '$Y = %0.2f*x + %0.2f$' % (slope,intercept) else: if extra_text=='': equat_text = '$Y = %0.2f*x %0.2f$' % (slope,intercept) else: equat_text = extra_text + '\n' + '$Y = %0.2f*x %0.2f$' % (slope,intercept) error_text = '$R^2 = %0.2f$' % Rsqr if add_text: if cus_loc is None: axes_.text(x_1, y_2 , equat_text + '\n' + error_text, fontsize=fontsize_, horizontalalignment='left',verticalalignment='top',color=text_color) else: axes_.text(cus_loc[0], cus_loc[1] , equat_text + '\n' + error_text, fontsize=fontsize_, horizontalalignment='left',verticalalignment='top',color=text_color) # plot 1:1 line if true if t_line_1_1: xy_min = np.min([np.nanmin(x_),np.nanmin(y_)]) xy_max = np.max([np.nanmax(x_),np.nanmax(y_)]) axes_.plot([xy_min, xy_max], [xy_min, xy_max], 'k--') if return_params: return Rsqr, params else: return Rsqr
a4d6e41bf03524f257531bbb0f2bb43d1b3b6b8b
11,522
from pathlib import Path import yaml def get_oil_type_atb( oil_attrs, origin, destination, transport_data_dir, random_generator ): """Randomly choose type of cargo oil spilled from an ATB (articulated tug and barge) based on AIS track origin & destination, and oil cargo attribution analysis. Unlike traditional tank barges, the vessels with 'atb' designation are known oil-cargo vessels. We used three different data sources to verify: AIS, Dept of Ecology's fuel transfer records and Charlie Costanzo's ATB list. Details of traffic can be seen in this google spreadsheet: https://docs.google.com/spreadsheets/d/1dlT0JydkFG43LorqgtHle5IN6caRYjf_3qLrUYqANDY/edit Because of this pre-identification and selection method, we can assume that all ATBs are oil-cargo atbs and that the absence of origin-destination information is due to issues in linking ship tracks and not ambiguity about whether traffic is oil-cargo traffic. :param dict oil_attrs: Oil attribution information from the output of make_oil_attrs.py. :param str or None origin: Origin of AIS track from which spill occurs. :param str or None destination: Destination of AIS track from which spill occurs. :param transport_data_dir: Directory path to marine_transport_data files repository cloned from https://github.com/MIDOSS/marine_transport_data. :type transport_data_dir: :py:class:`pathlib.Path` :param random_generator: PCG-64 random number generator :type random_generator: :py:class:`numpy.random.Generator` :return: Type of oil spilled. :rtype: str """ vessel_type = "atb" # Assign US and CAD origin/destinations from oil_attrs file CAD_origin_destination = oil_attrs["categories"]["CAD_origin_destination"] US_origin_destination = oil_attrs["categories"]["US_origin_destination"] # Get cargo oil type attribution information from oil-type yaml files yaml_file = transport_data_dir / Path(oil_attrs["files"]["CAD_origin"]).name with yaml_file.open("rt") as f: CAD_yaml = yaml.safe_load(f) yaml_file = transport_data_dir / Path(oil_attrs["files"]["WA_destination"]).name with yaml_file.open("rt") as f: WA_in_yaml = yaml.safe_load(f) WA_in_noinfo = _calc_no_info_facilities(WA_in_yaml) yaml_file = transport_data_dir / Path(oil_attrs["files"]["WA_origin"]).name with yaml_file.open("rt") as f: WA_out_yaml = yaml.safe_load(f) WA_out_noinfo = _calc_no_info_facilities(WA_out_yaml) # US_origin is for US as origin yaml_file = transport_data_dir / Path(oil_attrs["files"]["US_origin"]).name with yaml_file.open("rt") as f: US_yaml = yaml.safe_load(f) # US_combined represents the combined import and export of oil yaml_file = transport_data_dir / Path(oil_attrs["files"]["US_combined"]).name with yaml_file.open("rt") as f: USall_yaml = yaml.safe_load(f) yaml_file = transport_data_dir / Path(oil_attrs["files"]["Pacific_origin"]).name with yaml_file.open("rt") as f: Pacific_yaml = yaml.safe_load(f) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # NOTE: these pairs need to be used together for "get_oil_type_cargo" # (but don't yet have error-checks in place): # - "WA_in_yaml" and "destination" # - "WA_out_yaml" and "origin" # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if origin in CAD_origin_destination: if origin == "Westridge Marine Terminal": if destination == "U.S. Oil & Refining": oil_type = get_oil_type_cargo( CAD_yaml, origin, vessel_type, random_generator ) elif destination in US_origin_destination: oil_type = get_oil_type_cargo( CAD_yaml, origin, vessel_type, random_generator ) elif destination in CAD_origin_destination: # assume export within CAD is from Jet fuel storage tanks # as there is a pipeline to Parkland for crude oil oil_type = "jet" else: oil_type = get_oil_type_cargo( CAD_yaml, origin, vessel_type, random_generator ) else: if destination in US_origin_destination: # we have better information on WA fuel transfers, # so I prioritize this information source oil_type = get_oil_type_cargo( WA_in_yaml, destination, vessel_type, random_generator ) elif destination == "ESSO Nanaimo Departure Bay": oil_type = get_oil_type_cargo( CAD_yaml, destination, vessel_type, random_generator ) elif destination == "Suncor Nanaimo": oil_type = get_oil_type_cargo( CAD_yaml, destination, vessel_type, random_generator ) else: oil_type = get_oil_type_cargo( CAD_yaml, origin, vessel_type, random_generator ) elif origin in US_origin_destination and origin not in WA_out_noinfo[vessel_type]: if destination == "Westridge Marine Terminal": # Westridge stores jet fuel from US for re-distribution oil_type = "jet" else: oil_type = get_oil_type_cargo( WA_out_yaml, origin, vessel_type, random_generator ) elif ( destination in US_origin_destination and destination not in WA_in_noinfo[vessel_type] ): oil_type = get_oil_type_cargo( WA_in_yaml, destination, vessel_type, random_generator ) elif destination in CAD_origin_destination: if destination == "Westridge Marine Terminal": # Westridge doesn't receive crude for storage oil_type = "jet" else: oil_type = get_oil_type_cargo( CAD_yaml, destination, vessel_type, random_generator ) elif origin == "Pacific": oil_type = get_oil_type_cargo( Pacific_yaml, origin, vessel_type, random_generator ) elif origin == "US": oil_type = get_oil_type_cargo(US_yaml, origin, vessel_type, random_generator) else: # For all other traffic, use a generic fuel attribution from the combined # US import and export oil_type = get_oil_type_cargo(USall_yaml, None, vessel_type, random_generator) return oil_type
e7e6e51ece2bb5b4fffc70d2507c2e5ff062bbd8
11,523
def get_jwt(): """ Get Authorization token and validate its signature against the application's secret key, . """ expected_errors = { KeyError: WRONG_PAYLOAD_STRUCTURE, AssertionError: JWK_HOST_MISSING, InvalidSignatureError: WRONG_KEY, DecodeError: WRONG_JWT_STRUCTURE, InvalidAudienceError: WRONG_AUDIENCE, TypeError: KID_NOT_FOUND } token = get_auth_token() try: jwks_payload = jwt.decode(token, options={'verify_signature': False}) assert 'jwks_host' in jwks_payload jwks_host = jwks_payload.get('jwks_host') key = get_public_key(jwks_host, token) aud = request.url_root payload = jwt.decode( token, key=key, algorithms=['RS256'], audience=[aud.rstrip('/')] ) set_ctr_entities_limit(payload) return payload except tuple(expected_errors) as error: message = expected_errors[error.__class__] raise AuthorizationError(message)
9c52369b38db9815769ea8277c3e3721ba20c1c9
11,524
def start_volume(name, force=False): """ Start a gluster volume name Volume name force Force the volume start even if the volume is started .. versionadded:: 2015.8.4 CLI Example: .. code-block:: bash salt '*' glusterfs.start mycluster """ cmd = "volume start {0}".format(name) if force: cmd = "{0} force".format(cmd) volinfo = info(name) if name not in volinfo: log.error("Cannot start non-existing volume %s", name) return False if not force and volinfo[name]["status"] == "1": log.info("Volume %s already started", name) return True return _gluster(cmd)
6dc936b4e09beb9713c32e4c93e8649999f82c3c
11,525
def hist_equal(img, z_max=255): """ 直方图均衡化,将暗的地方变量,亮的地方变暗 :param img: :param z_max: 原图像最亮的地方减去最暗的地方的值 :return: """ if len(img.shape) == 2: height, width = img.shape n_chan = 1 elif len(img.shape) == 3: height, width, n_chan = img.shape print(img[:, :, 0].shape) # H, W = img.shape # S is the total of pixels n_pixle = height * width out = img.copy() sum_h = 0. if n_chan == 1: for i in range(1, 255): ind = np.where(img == i) sum_h += len(img[ind]) z_prime = z_max / n_pixle * sum_h out[ind] = z_prime else: for c in range(n_chan): tmp_img = img[:, :, c] tmp_out = tmp_img.copy() for i in range(1, 255): ind = np.where(tmp_img == i) sum_h += len(tmp_img[ind]) z_prime = z_max / n_pixle * sum_h tmp_out[ind] = z_prime out[:, :, c] = tmp_out out = out.astype(np.uint8) return out
e6aaf76ce8088b9519cd896d8236a84f01761976
11,526
def combine(*indices_lists): """ Return all the combinations from lists of indices :param indices_lists: each argument is a list of indices (it must be a list) :return: The combined list of indices """ if len([*indices_lists]) > 1: return [i for i in product(*indices_lists)] else: return set(*indices_lists)
839762c9645e0c8d6ea31a21113a5efd6b97f1de
11,527
import re def get_endpoint(query): """ Regex to parse domain and API endpoint from a SoQL query via FROM statement :param query: str, SoQL-formatted query :return url, endpoint, query: str objects, domain, endpoint, and original query sans FROM statement """ url = re.search(r'\w+\.\w+\.(\w{2,3})', query, flags=re.I) endpoint = re.search(r'(\w{4}-\w{4})\.json', query, flags=re.I) query = re.sub(r'from( +|\t+|\n+).+', '', query, flags=re.I) return url.group(), endpoint.group(1), query
4496c85f2e6f908bd5dcef7195b821998ef79c42
11,528
def load_data(filename: str) ->pd.DataFrame: """ Load house prices dataset and preprocess data. Parameters ---------- filename: str Path to house prices dataset Returns ------- Design matrix and response vector (prices) - either as a single DataFrame or a Tuple[DataFrame, Series] """ df = pd.read_csv(filename) df.fillna(0, inplace=True) df[["id", "price", "bedrooms", "bathrooms", "sqft_living", "sqft_lot", "floors", "waterfront", "view", "condition", "grade", "sqft_above", "sqft_basement", "yr_built", "yr_renovated", "zipcode", "lat", "long", "sqft_living15", "sqft_lot15"]] = df[ ["id", "price", "bedrooms", "bathrooms", "sqft_living", "sqft_lot", "floors", "waterfront", "view", "condition", "grade", "sqft_above", "sqft_basement", "yr_built", "yr_renovated", "zipcode", "lat", "long", "sqft_living15", "sqft_lot15"]].apply(pd.to_numeric) df['date'] = df['date'].astype("str").apply(lambda s: s[:8]) df['date'] = df['date'].astype('float64') df = df[ (df["id"] >= 1) & (df["date"] >= 20000000) & (df["date"] <= 20220000) & (df["price"] >= 50000) & (df["price"] <= 10000000) & (df["bedrooms"] >= 0) & (df["bedrooms"] <= 15) & (df["bathrooms"] >= 0) & (df["bathrooms"] <= 12) & (df["sqft_living"] >= 200) & (df["sqft_living"] <= 100000) & (df["sqft_lot"] >= 450) & (df["sqft_lot"] <= 1800000) & (df["floors"] >= 1) & (df["floors"] <= 4) & (df["waterfront"] == 0) | (df["waterfront"] == 1) & (df["view"] >= 0) & (df["view"] <= 4) & (df["condition"] >= 1) & (df["condition"] <= 5) & (df["grade"] >= 1) & (df["grade"] <= 13) & (df["sqft_above"] >= 250) & (df["sqft_above"] <= 10000) & (df["sqft_basement"] >= 0) & (df["sqft_basement"] <= 5000) & (df["yr_built"] >= 1800) & (df["yr_built"] <= 2022) & (df["yr_renovated"] >= 0) & (df["yr_renovated"] <= 2022) & (df["zipcode"] >= 98000) & (df["zipcode"] <= 99000) & (df["lat"] >= 47) & (df["lat"] <= 48) & (df["long"] >= -123) & (df["long"] <= -121) & (df["sqft_living15"] >= 300) & (df["sqft_living15"] <= 10000) & (df["sqft_lot15"] >= 300) & (df["sqft_lot15"] <= 1000000) ] # inserting the "yr_renovated" col the last year in which the building had had any renovation. df["yr_renovated"] = df[["yr_built", "yr_renovated"]].max(axis=1) prices_by_zipcode = pd.DataFrame({'zipcode': df['zipcode'], 'price': df['price']}) prices_by_zipcode = prices_by_zipcode.groupby('zipcode').mean() prices_by_zipcode.rename(columns={'price': 'mean_price'}, inplace=True) df = pd.merge(df, prices_by_zipcode, on='zipcode') df = df.drop(['id', 'zipcode', 'lat', 'long'], 1) return df
26c785cb72b883cab03b9da6c7718b71e7ccea76
11,529
def version_match(required, candidate): """Test that an available version is a suitable match for a required version. To be suitable a version must be of the same major version as required and be at least a match in minor/patch level. eg. 3.3 is a match for a required 3.1 but 4.1 is not. :param tuple required: the version that must be met. :param tuple candidate: the version to test against required. :returns: True if candidate is suitable False otherwise. :rtype: bool """ return _discover.version_match(required, candidate)
bc537fdae084a3c3ccb7b8336703ef4c2476de6e
11,530
from typing import Optional from datetime import datetime def get_last_upgraded_at(module: base.Module) -> Optional[datetime.datetime]: """ Get the timestamp of the last time this module was upgraded. """ return settings.get_last_upgraded_at(module.name)
bf884bf4c249448929b987504d400d6ba1b12927
11,531
import collections import re import logging def parse_header_file(header_file): """Parse a single header file to get all defined constants out of it.""" resolved_values = collections.OrderedDict() raw_matches = {} with open(header_file, "r") as fd: all_file_lines = collections.OrderedDict( [ (lineno, line.strip()) for lineno, line in enumerate(fd, start=1) if not line.isspace() ] ) line_iterator = iter(all_file_lines.items()) for lineno, line in line_iterator: line, _comment = clean_line(line) # First check to see if this is a #define statement match = re.match(r"^#define\s+UC_(?P<id>\w+)\s+(?P<value>.*)$", line) if match: name = "UC_" + match.group("id") raw_value = match.group("value") try: resolved_values[name] = ast.literal_eval(raw_value) except (NameError, SyntaxError, ValueError): raw_matches[name] = raw_value continue # Not a #define; see if it's an enum. if "enum uc_" not in line.lower(): continue # This is the beginning of an enum. Subsequent lines until the closing `}` are # part of it. We need to keep track because enums without an explicitly defined # value are incremented by one from the previous enum value. next_enum_value = 0 enum_start_line = lineno while True: lineno, line = next(line_iterator, (None, None)) if line is None: # Hit EOF before we hit the end of the enum. That's odd. logging.warning( "Hit EOF before end of enum beginning on line %d.", enum_start_line ) break elif "}" in line: # Hit the end of the enum. break line, _comment = clean_line(line) # Sometimes we have multiple enum definitions on one line. We need to handle # these one at a time. Splitting the line by commas should be enough to # separate out multiple expressions. for expression in line.strip(",").split(","): expression = expression.strip() if not expression: continue # See if this enum value is being assigned rather than implicit. match = re.match(r"^UC_(?P<id>\w+)\s*=\s*(?P<expr>.+)$", expression) if match: # Enum value is assigned. Whatever's on the right-hand side, any # names it references must already be defined. name = "UC_" + match.group("id") raw_value = match.group("expr") try: processed_value = eval(raw_value, resolved_values) except NameError as nerr: logging.error( "Failed to resolve %r on line %d: %s", name, lineno, nerr ) continue resolved_values[name] = processed_value next_enum_value = processed_value + 1 else: # Not an explicit assignment. Expect this expression to be just a # single identifier. match = re.match(r"^UC_(\w+)$", expression) if match: name = match.group(1) resolved_values["UC_" + name] = next_enum_value next_enum_value += 1 else: raise SyntaxError( "Couldn't match any expression type to: %r" % expression ) for name, raw_value in raw_matches.items(): # Convert any remaining values that are still unresolved. This usually only # applies to #define macros that reference other constants. if name not in resolved_values: resolved_values[name] = eval(raw_value, resolved_values) return resolved_values
1681939a78efe6426cdea1577a8781a7f046c02d
11,532
import platform def get_linux_distribution(get_full_name, supported_dists): """Abstract platform.linux_distribution() call which is deprecated as of Python 3.5 and removed in Python 3.7""" try: supported = platform._supported_dists + (supported_dists,) osinfo = list( platform.linux_distribution( full_distribution_name=get_full_name, supported_dists=supported ) ) if not osinfo or osinfo == ['', '', '']: return get_linux_distribution_from_distro(get_full_name) full_name = platform.linux_distribution()[0].strip() osinfo.append(full_name) except AttributeError: return get_linux_distribution_from_distro(get_full_name) return osinfo
01ceea04eeb4e8130e9ce5899a116af557d9f954
11,533
def check_disabled(func): """ Decorator to wrap up checking if the Backdrop connection is set to disabled or not """ @wraps(func) def _check(*args, **kwargs): if _DISABLED: return else: return func(*args, **kwargs) return _check
53d6b0b44558d09ed73556f6854f004c6767856c
11,534
def bbox_from_points(points): """Construct a numeric list representing a bounding box from polygon coordinates in page representation.""" xys = [[int(p) for p in pair.split(',')] for pair in points.split(' ')] return bbox_from_polygon(xys)
75742907d85990ee3bbfa133d9ba51f70b3f76ee
11,535
def return_true(): """Return True Simple function used to check liveness of workers. """ return True
3c4b469ce28aef47723a911071f01bea9eb4cf27
11,536
import os def create_list(input_list): """Construct the list of items to turn into a table. File and string inputs supported""" if os.path.isfile(input_list): with open(input_list, 'r', encoding='UTF-8') as ifile: return [line.rstrip() for line in ifile] return input_list.split(',')
7158fa8241ae6328f931f4e7a9dfe08f3d12c6a2
11,537
def get_active_test_suite(): """ Returns the test suite that was last ran >>> get_active_test_suite() "Hello" """ return TEST_RUNNER_STATE.test_suite
dc578da283429480a872175ff1cd5462bf803925
11,538
def header_from_stream(stream, _magic=None) -> (dict, list, int): """ Parse SAM formatted header from stream. Dict of header values returned is structured as such: {Header tag:[ {Attribute tag: value}, ]}. Header tags can occur more than once and so each list item represents a different tag line. :param stream: Stream containing header data. :param _magic: Data consumed from stream while peeking. Will be prepended to read data. :return: Tuple containing (Dict of header values, list of Reference objects, placeholder to keep return value consistent with header_from_buffer()). """ header = defaultdict(list) while stream.peek(1)[0] == b'@': line = stream.readline() tag = line[1:2] if tag == b'CO': header[tag].append(line[4:]) else: header[tag].append({m[0]: m[1] for m in header_re.findall(line)}) return header, [bam.Reference(ref[b'SN'], int(ref[b'LN'])) for ref in header.pop(b'SQ')] if b'SQ' in header else [], 0
e0e071f38787950fa499344c63cc4040e5fccb23
11,539
def walk_binary_file_or_stdin(filepath, buffer_size = 32768): """ Yield 'buffer_size' bytes from filepath until EOF, or from standard input when 'filepath' is '-'. """ if filepath == '-': return walk_binary_stdin(buffer_size) else: return walk_binary_file(filepath, buffer_size)
290ea9e159c8f0e3df6713b8abcd0c141cb4858a
11,540
def register_device() -> device_pb2.DeviceResponse: """ Now that the client credentials are set, the device can be registered. The device is registered by instantiating an OauthService object and using the register() method. The OauthService requires a Config object and an ISecureCredentialStore object to be constructed. Once the register method is called, a DeviceResponse object will be returned. NOTE: This function will check if the device id set in config.json has already been registered. If it has, then the DeviceResponse for the existing registered device will be returned. Otherwise, the registration will proceed and the DeviceResponse for the new registration will be returned. Returns: A DeviceResponse object indicating whether or not the device registration was successful """ oauth_service: OauthService = helpers.get_oauth_service() if check_device_is_registered(): print( f"Registration already exists for device_id = {helpers.environment_config['device_id']}" ) device_response: device_pb2.DeviceResponse = oauth_service.get_who_am_i() else: print(f"Registering device_id = {helpers.environment_config['device_id']}") device_response: device_pb2.DeviceResponse = oauth_service.register( device_id=helpers.environment_config["device_id"], device_name=helpers.environment_config["device_name"], credential=helpers.environment_config["tenant_secret"], ) save_environment_config() return device_response
5a3958456f55315fa91be4e60324be3e5d9d3af8
11,541
def _sto_to_graph(agent: af.SubTaskOption) -> subgraph.Node: """Convert a `SubTaskOption` to a `Graph`.""" node_label = '{},{},{}'.format(agent.name or 'SubTask Option', agent.subtask.name or 'SubTask', agent.agent.name or 'Policy') return subgraph.Node(label=node_label, type='sub_task_option')
311f591be99bc045d2572b22f9cd3462bce2b10c
11,542
def filter_input(self, forced=False, context=None): """ Passes each hunk (file or code) to the 'input' methods of the compressor filters. """ content = [] for hunk in self.hunks(forced, context=context): content.append(hunk) return content
1ea0ac16cf1e20732ad8c37b6126c80fe94d2ee5
11,543
def delete(request, user): """ Deletes a poll """ poll_id = request.POST.get('poll_id') try: poll = Poll.objects.get(pk=poll_id) except: return JsonResponse({'error': 'Invalid poll_id'}, status=404) if poll.user.id != user.id: return JsonResponse({'error': 'You cannot delete this poll'}, status=403) poll.delete() return JsonResponse({'message': 'Poll was deleted'})
36a46e1b72cd06178ac00706c24451736fd454cd
11,544
from typing import Optional def get_nodes(collection: str, node_link: Optional[str] = None): """Get the Node based on its ID or kind""" # pylint: disable=too-many-locals,too-many-return-statements,too-many-branches user_id = to_object_id(g.user._id) can_view_others_operations = g.user.check_role(IAMPolicies.CAN_VIEW_OTHERS_OPERATIONS) can_view_others_workflows = g.user.check_role(IAMPolicies.CAN_VIEW_OTHERS_WORKFLOWS) can_view_operations = g.user.check_role(IAMPolicies.CAN_VIEW_OPERATIONS) can_view_workflows = g.user.check_role(IAMPolicies.CAN_VIEW_WORKFLOWS) can_create_operations = g.user.check_role(IAMPolicies.CAN_CREATE_OPERATIONS) can_create_workflows = g.user.check_role(IAMPolicies.CAN_CREATE_WORKFLOWS) if node_link in executor_manager.kind_to_executor_class and collection == Collections.TEMPLATES: # if node_link is a base node # i.e. /templates/basic-bash kind = node_link if kind in workflow_manager.kind_to_workflow_dict and (not can_view_workflows or not can_create_workflows): return make_permission_denied() if kind in operation_manager.kind_to_operation_dict and (not can_view_operations or not can_create_operations): return make_permission_denied() node: Node = executor_manager.kind_to_executor_class[kind].get_default_node( is_workflow=kind in workflow_manager.kind_to_workflow_dict ) if isinstance(node, tuple): data = node[0].to_dict() tour_steps = node[1] else: data = node.to_dict() tour_steps = [] data['kind'] = kind return make_success_response({ 'node': data, 'tour_steps': tour_steps, 'plugins_dict': PLUGINS_DICT, }) else: # when node_link is an id of the object try: node_id = to_object_id(node_link) except bson.objectid.InvalidId: # type: ignore return make_fail_response('Invalid ID'), 404 node_dict = node_collection_managers[collection].get_db_node(node_id, user_id) logger.debug(node_dict) if node_dict: is_owner = node_dict['author'] == user_id kind = node_dict['kind'] if kind in workflow_manager.kind_to_workflow_dict and not can_view_workflows: return make_permission_denied() if kind in operation_manager.kind_to_operation_dict and not can_view_operations: return make_permission_denied() if kind in workflow_manager.kind_to_workflow_dict and not can_view_others_workflows and not is_owner: return make_permission_denied() if kind in operation_manager.kind_to_operation_dict and not can_view_others_operations and not is_owner: return make_permission_denied() return make_success_response({ 'node': node_dict, 'plugins_dict': PLUGINS_DICT, }) else: return make_fail_response(f"Node `{node_link}` was not found"), 404
55a8b56988496d355a16e8aa0c1a8a79280af987
11,545
def get_loader(path): """Gets the configuration loader for path according to file extension. Parameters: path: the path of a configuration file, including the filename extension. Returns the loader associated with path's extension within LOADERS. Throws an UnknownConfigurationException if no such loader exists. """ for ext, loader in LOADERS: fullext = '.' + ext if path[-len(fullext):] == fullext: return loader raise exception.UnknownConfigurationException, "No configuration loader found for path '%s'" % path
a122c67d6ebacf2943ec69765d5feab649f5c341
11,546
def mat_stretch(mat, target): """ Changes times of `mat` in-place so that it has the same average BPM and initial time as target. Returns `mat` changed in-place. """ in_times = mat[:, 1:3] out_times = target[:, 1:3] # normalize in [0, 1] in_times -= in_times.min() in_times /= in_times.max() # restretch new_start = out_times.min() in_times *= (out_times.max() - new_start) in_times += new_start return mat
204efb1d8a19c7efe0efb5710add62436a4b5cee
11,547
def parse_range(cpu_range): """Create cpu range object""" if '-' in cpu_range: [x, y] = cpu_range.split('-') # pylint: disable=invalid-name cpus = range(int(x), int(y)+1) if int(x) >= int(y): raise ValueError("incorrect cpu range: " + cpu_range) else: cpus = [int(cpu_range)] return cpus
51079648ffddbcba6a9699db2fc4c04c7c3e3202
11,548
def causal_parents(node, graph): """ Returns the nodes (string names) that are causal parents of the node (have the edge type "causes_or_promotes"), else returns empty list. Parameters node - name of the node (string) graph - networkx graph object """ node_causal_parents = [] if list(graph.predecessors(node)): possibleCausalParents = graph.predecessors(node) for possibleCausalParent in possibleCausalParents: if graph[possibleCausalParent][node]["type"] == "causes_or_promotes": node_causal_parents.append(possibleCausalParent) return node_causal_parents
4618e9649d3ea37c9a3a0d8faf7a44b00e386f1c
11,549
def create_app(settings_override=None): """ Create a test application. :param settings_override: Override settings :type settings_override: dict :return: Flask app """ app = Flask(__name__) params = { 'DEBUG': True, 'WEBPACK_MANIFEST_PATH': './build/manifest.json' } app.config.update(params) if settings_override: app.config.update(settings_override) webpack.init_app(app) return app
38393418415d29e3068398dca9bdce8b8b88eec6
11,550
async def user_me(current_user=Depends(get_current_active_user)): """ Get own user """ return current_user
40c5bb5a45cad8154489db3fc0da3c0fe54d783d
11,551
from typing import Optional from typing import Tuple import crypt def get_password_hash(password: str, salt: Optional[str] = None) -> Tuple[str, str]: """Get user password hash.""" salt = salt or crypt.mksalt(crypt.METHOD_SHA256) return salt, crypt.crypt(password, salt)
ea3d7e0d8c65e23e40660b8921aa872dc9e2f53c
11,552
def start_at(gra, key): """ start a v-matrix at a specific atom Returns the started vmatrix, along with keys to atoms whose neighbors are missing from it """ symb_dct = atom_symbols(gra) ngb_keys_dct = atoms_sorted_neighbor_atom_keys( gra, symbs_first=('X', 'C',), symbs_last=('H',), ords_last=(0.1,)) ngb_keys = ngb_keys_dct[key] if not ngb_keys: zma_keys = [] elif len(ngb_keys) == 1: # Need special handling for atoms with only one neighbor if symb_dct[key] in ('H', 'X'): key2 = ngb_keys[0] zma_keys = (key2,) + ngb_keys_dct[key2] else: key2 = ngb_keys[0] ngb_keys = tuple(k for k in ngb_keys_dct[key2] if k != key) zma_keys = (key, key2) + ngb_keys else: zma_keys = (key,) + ngb_keys_dct[key] vma = () for row, key_ in enumerate(zma_keys): idx1 = idx2 = idx3 = None if row > 0: key1 = next(k for k in ngb_keys_dct[key_] if k in zma_keys[:row]) idx1 = zma_keys.index(key1) if row > 1: key2 = next(k for k in ngb_keys_dct[key1] if k in zma_keys[:row] and k != key_) idx2 = zma_keys.index(key2) if row > 2: key3 = next(k for k in zma_keys[:row] if k not in (key_, key1, key2)) idx3 = zma_keys.index(key3) sym = symb_dct[key_] key_row = [idx1, idx2, idx3] vma = automol.vmat.add_atom(vma, sym, key_row) return vma, zma_keys
baa4d463316d47611a696bea456f8e1d0e4b5755
11,553
def associate_kitti(detections, trackers, det_cates, iou_threshold, velocities, previous_obs, vdc_weight): """ @param detections: """ if (len(trackers) == 0): return np.empty((0, 2), dtype=int), np.arange(len(detections)), np.empty((0, 5), dtype=int) """ Cost from the velocity direction consistency """ Y, X = velocity_direction_batch(detections, previous_obs) inertia_Y, inertia_X = velocities[:, 0], velocities[:, 1] inertia_Y = np.repeat(inertia_Y[:, np.newaxis], Y.shape[1], axis=1) inertia_X = np.repeat(inertia_X[:, np.newaxis], X.shape[1], axis=1) diff_angle_cos = inertia_X * X + inertia_Y * Y diff_angle_cos = np.clip(diff_angle_cos, a_min=-1, a_max=1) diff_angle = np.arccos(diff_angle_cos) diff_angle = (np.pi / 2.0 - np.abs(diff_angle)) / np.pi valid_mask = np.ones(previous_obs.shape[0]) valid_mask[np.where(previous_obs[:, 4] < 0)] = 0 valid_mask = np.repeat(valid_mask[:, np.newaxis], X.shape[1], axis=1) scores = np.repeat(detections[:, -1][:, np.newaxis], trackers.shape[0], axis=1) angle_diff_cost = (valid_mask * diff_angle) * vdc_weight angle_diff_cost = angle_diff_cost.T angle_diff_cost = angle_diff_cost * scores """ Cost from IoU """ iou_matrix = iou_batch(detections, trackers) """ With multiple categories, generate the cost for catgory mismatch """ num_dets = detections.shape[0] num_trk = trackers.shape[0] cate_matrix = np.zeros((num_dets, num_trk)) for i in range(num_dets): for j in range(num_trk): if det_cates[i] != trackers[j, 4]: cate_matrix[i][j] = -1e6 cost_matrix = - iou_matrix - angle_diff_cost - cate_matrix if min(iou_matrix.shape) > 0: a = (iou_matrix > iou_threshold).astype(np.int32) if a.sum(1).max() == 1 and a.sum(0).max() == 1: matched_indices = np.stack(np.where(a), axis=1) else: matched_indices = linear_assignment(cost_matrix) else: matched_indices = np.empty(shape=(0, 2)) unmatched_detections = [] for d, det in enumerate(detections): if (d not in matched_indices[:, 0]): unmatched_detections.append(d) unmatched_trackers = [] for t, trk in enumerate(trackers): if (t not in matched_indices[:, 1]): unmatched_trackers.append(t) # filter out matched with low IOU matches = [] for m in matched_indices: if (iou_matrix[m[0], m[1]] < iou_threshold): unmatched_detections.append(m[0]) unmatched_trackers.append(m[1]) else: matches.append(m.reshape(1, 2)) if (len(matches) == 0): matches = np.empty((0, 2), dtype=int) else: matches = np.concatenate(matches, axis=0) return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
8f3fb7628940cacd68de4f93b8469e212a12d854
11,554
import json import uuid import time def seat_guest(self, speech, guest, timeout): """ Start the view 'seatGuest' :param speech: the text that will be use by the Local Manager for tablet and vocal :type speech: dict :param guest: name of the guest to seat :type guest: string :param timeout: maximum time to wait for a reaction from the local manager :type timeout: float """ goal = RequestToLocalManagerGoal(action="seatGuest", payload=json.dumps({ 'id': str(uuid.uuid4()), 'timestamp': time.time(), 'args': { 'speech': speech, 'guest': guest } })) return self._send_goal_and_wait(goal, timeout)
42a8ccd03638dfb48072c1d1b10c0c05a8f867ec
11,555
def retreive_retries_and_sqs_handler(task_id): """This function retrieve the number of retries and the SQS handler associated to an expired task Args: task_id(str): the id of the expired task Returns: rtype: dict Raises: ClientError: if DynamoDB query failed """ try: response = table.query( KeyConditionExpression=Key('task_id').eq(task_id) ) # CHeck if 1 and only 1 return response.get('Items')[0].get('retries'), response.get('Items')[0].get('sqs_handler_id') except ClientError as e: errlog.log("Cannot retreive retries and handler for task {} : {}".format(task_id, e)) raise e
c432d9f73f8d1de8fbcf48b35e41a2879ca25954
11,556
import torch def decompose(original_weights: torch.Tensor, mask, threshould: float) -> torch.Tensor: """ Calculate the scaling matrix. Use before pruning the current layer. [Inputs] original_weights: (N[i], N[i+1]) important_weights: (N[i], P[i+1]) [Outputs] scaling_matrix: (P[i+1], N[i+1]) """ important_weights = convert_to_important_weights(original_weights, mask) msglogger.info("important_weights", important_weights.size()) scaling_matrix = torch.zeros(important_weights.size()[-1], original_weights.size()[-1]) msglogger.info("scaling_matrix", scaling_matrix.size()) msglogger.info("original_weights", original_weights.size()) for i, weight in enumerate(original_weights.transpose(0, -1)): if weight in important_weights.transpose(0, -1): scaling_matrix[important_weights.transpose(0, -1) == weight][i] = 1 else: most_similar_neuron, similarity, scale = most_similar(weight, important_weights) most_similar_neuron_index_in_important_weights = important_weights == most_similar_neuron if similarity >= threshould: scaling_matrix[most_similar_neuron_index_in_important_weights][i] = scale return scaling_matrix
844562c839b95eb172197f22781f2316639b2d95
11,557
def calc_Kullback_Leibler_distance(dfi, dfj): """ Calculates the Kullback-Leibler distance of the two matrices. As defined in Aerts et al. (2003). Also called Mutual Information. Sort will be ascending. Epsilon is used here to avoid conditional code for checking that neither P nor Q is equal to 0. """ epsilon = 0.00001 P = dfi + epsilon Q = dfj + epsilon divergence = np.sum(P * np.log2(P / Q)) return divergence
fd66434557598717db7cc73ca9a88fde9ab7e73d
11,558
def test_python_java_classes(): """ Run Python tests against JPY test classes """ sub_env = {'PYTHONPATH': _build_dir()} log.info('Executing Python unit tests (against JPY test classes)...') return jpyutil._execute_python_scripts(python_java_jpy_tests, env=sub_env)
11a9e43126799738a7c8e5cf8614cbe63d15cd2f
11,559
def trim_datasets_using_par(data, par_indexes): """ Removes all the data points needing more fitting parameters than available. """ parameters_to_fit = set(par_indexes.keys()) trimmed_data = list() for data_point in data: if data_point.get_fitting_parameter_names() <= parameters_to_fit: trimmed_data.append(data_point) return trimmed_data
5a06f7f5662fb9d7b5190e0e75ba41c858a85d0b
11,560
def _parse_field(field: str) -> Field: """ Parse the given string representation of a CSV import field. :param field: string or string-like field input :return: a new Field """ name, _type = str(field).split(':') if '(' in _type and _type.endswith(')'): _type, id_space = _type.split('(')[0:-1] return Field(name or _type, FieldType.from_str(_type), id_space) return Field(name or _type, FieldType.from_str(_type))
413cc12675e57db57da75dd9044c1884e638282c
11,561
import time import scipy def removeNoise( audio_clip, noise_thresh, mean_freq_noise, std_freq_noise, noise_stft_db, n_grad_freq=2, n_grad_time=4, n_fft=2048, win_length=2048, hop_length=512, n_std_thresh=1.5, prop_decrease=1.0, verbose=False, visual=False, ): """Remove noise from audio based upon a clip containing only noise Args: audio_clip (array): The first parameter. noise_clip (array): The second parameter. n_grad_freq (int): how many frequency channels to smooth over with the mask. n_grad_time (int): how many time channels to smooth over with the mask. n_fft (int): number audio of frames between STFT columns. win_length (int): Each frame of audio is windowed by `window()`. The window will be of length `win_length` and then padded with zeros to match `n_fft`.. hop_length (int):number audio of frames between STFT columns. n_std_thresh (int): how many standard deviations louder than the mean dB of the noise (at each frequency level) to be considered signal prop_decrease (float): To what extent should you decrease noise (1 = all, 0 = none) visual (bool): Whether to plot the steps of the algorithm Returns: array: The recovered signal with noise subtracted """ if verbose: start = time.time() # STFT over noise if verbose: print("STFT on noise:", td(seconds=time.time() - start)) start = time.time() # STFT over signal if verbose: start = time.time() sig_stft = _stft(audio_clip, n_fft, hop_length, win_length) sig_stft_db = _amp_to_db(np.abs(sig_stft)) if verbose: print("STFT on signal:", td(seconds=time.time() - start)) start = time.time() # Calculate value to mask dB to mask_gain_dB = np.min(_amp_to_db(np.abs(sig_stft))) print(noise_thresh, mask_gain_dB) # Create a smoothing filter for the mask in time and frequency smoothing_filter = np.outer( np.concatenate( [ np.linspace(0, 1, n_grad_freq + 1, endpoint=False), np.linspace(1, 0, n_grad_freq + 2), ] )[1:-1], np.concatenate( [ np.linspace(0, 1, n_grad_time + 1, endpoint=False), np.linspace(1, 0, n_grad_time + 2), ] )[1:-1], ) smoothing_filter = smoothing_filter / np.sum(smoothing_filter) # calculate the threshold for each frequency/time bin db_thresh = np.repeat( np.reshape(noise_thresh, [1, len(mean_freq_noise)]), np.shape(sig_stft_db)[1], axis=0, ).T # mask if the signal is above the threshold sig_mask = sig_stft_db < db_thresh if verbose: print("Masking:", td(seconds=time.time() - start)) start = time.time() # convolve the mask with a smoothing filter sig_mask = scipy.signal.fftconvolve(sig_mask, smoothing_filter, mode="same") sig_mask = sig_mask * prop_decrease if verbose: print("Mask convolution:", td(seconds=time.time() - start)) start = time.time() # mask the signal sig_stft_db_masked = ( sig_stft_db * (1 - sig_mask) + np.ones(np.shape(mask_gain_dB)) * mask_gain_dB * sig_mask ) # mask real sig_imag_masked = np.imag(sig_stft) * (1 - sig_mask) sig_stft_amp = (_db_to_amp(sig_stft_db_masked) * np.sign(sig_stft)) + ( 1j * sig_imag_masked ) if verbose: print("Mask application:", td(seconds=time.time() - start)) start = time.time() # recover the signal recovered_signal = _istft(sig_stft_amp, hop_length, win_length) recovered_spec = _amp_to_db( np.abs(_stft(recovered_signal, n_fft, hop_length, win_length)) ) if verbose: print("Signal recovery:", td(seconds=time.time() - start)) if visual: plot_spectrogram(noise_stft_db, title="Noise") if visual: plot_statistics_and_filter( mean_freq_noise, std_freq_noise, noise_thresh, smoothing_filter ) if visual: plot_spectrogram(sig_stft_db, title="Signal") if visual: plot_spectrogram(sig_mask, title="Mask applied") if visual: plot_spectrogram(sig_stft_db_masked, title="Masked signal") if visual: plot_spectrogram(recovered_spec, title="Recovered spectrogram") return recovered_signal
3d92ae7427ab33cc875219b3f005ca86802dd4c2
11,562
import logging def validate_wra_params(func): """Water Risk atlas parameters validation""" @wraps(func) def wrapper(*args, **kwargs): validation_schema = { 'wscheme': { 'required': True }, 'geostore': { 'type': 'string', 'required': True }, 'analysis_type': { 'type': 'string', 'required': True, 'default': None }, 'month': { 'required': False, 'default': None, 'nullable': True }, 'year': { 'type': 'string', 'required': False, 'default': None, 'nullable': True }, 'change_type': { 'type': 'string', 'required': False, 'default': None, 'nullable': True }, 'indicator': { 'type': 'string', 'required': True, 'default': None, 'nullable': True }, 'scenario': { 'type': 'string', 'required': False, 'default': None, 'nullable': True }, 'locations': { 'type': 'string', 'required': True, 'required': False, 'default': None, 'nullable': True }, 'input_address': { 'type': 'string', 'required': False, 'default': None, 'nullable': True }, 'match_address': { 'type': 'string', 'required': False, 'default': None, 'nullable': True }, 'ids': { 'type': 'string', 'required': False, 'nullable': True, 'default': None } } jsonRequestContent = request.json or {} rArgs = {**request.args, **jsonRequestContent} kwargs.update(rArgs) logging.debug(f'[MIDDLEWARE - ws scheme]: {kwargs}') logging.debug(f"[VALIDATOR - wra_weights]: {kwargs}") validator = Validator(validation_schema, allow_unknown=True) if not validator.validate(kwargs): return error(status=400, detail=validator.errors) kwargs['sanitized_params'] = validator.normalized(kwargs) return func(*args, **kwargs) return wrapper
8c15b74e4a5fc9cffc54cb6fa53325fb2405ca7d
11,563
from simulator import simulate import logging import json def handle_request(r): """Handle the Simulator request given by the r dictionary """ print ("handle_request executed .. ") print (r) # Parse request .. config = SimArgs() config.machine = r[u'machine'] config.overlay = [r[u'topology']] # List of topologies - just one config.group = r[u'cores'] overlay = r[u'topology'].split('-') overlay_name = overlay[0] overlay_args = overlay[1:] if overlay_name == 'hybrid': overlay_name = 'cluster' config.hybrid = True; config.hybrid_cluster = overlay_args[0]; config.overlay = [u'cluster'] if overlay_args == 'mm' : config.multimessage = True elif overlay_args == 'rev' : config.reverserecv = True c = config (last_nodes, leaf_nodes, root) = simulate(config) # Generate response to be sent back to client assert len(config.models)==1 # Exactly one model has been generated res = {} res['root'] = root res['model'] = config.models[0] res['last_node'] = last_nodes[0] res['leaf_nodes'] = leaf_nodes[0] res['git-version'] = helpers.git_version().decode('ascii') print(res) logging.info(('Responding with >>>')) logging.info((json.dumps(res))) logging.info(('<<<')) write_statistics(c.machine) return json.dumps(res)
73a55ec93bfdf398b896b3c208a476296c1c04f5
11,564
def number_empty_block(n): """Number of empty block""" L = L4 if n == 4 else L8 i = 0 for x in range(n): for y in range(n): if L[x][y] == 0: i = i + 1 return i
1dc7f228cdcbf4c3a1b6b553bff75ba1bb95bdbe
11,565
def compute_referendum_result_by_regions(referendum_and_areas): """Return a table with the absolute count for each region. The return DataFrame should be indexed by `code_reg` and have columns: ['name_reg', 'Registered', 'Abstentions', 'Null', 'Choice A', 'Choice B'] """ ans = referendum_and_areas.groupby( ['code_reg', 'name_reg']).sum().reset_index().set_index('code_reg') ans = ans.drop(columns="Town code") return ans
fb02c28b5caca9147a27bd2f205c07d377d8561c
11,566
def fixed_rho_total_legacy(data, rho_p, rho_s, beads_2_M): """ *LEGACY*: only returns polycation/cation concentrations. Use updated version (`fixed_rho_total()`), which returns a dictionary of all concentrations. Computes the polycation concentration in the supernatant (I) and coacervate (II) phases for different Bjerrum lengths. Parameters ---------- data : dictionary of Pandas dataframes Contains dataframes of data from liquid state theory calculations indexed by Bjerrum length. Dataframes have densities in beads/sigma^3. rho_p : float Average density of polymer (cation + anion) in both phases [mol/L] rho_s : float Average density of salt (just cation since 1 cation and 1 anion come from one KBr molecule) in both phases [mol/L] beads_2_M : float Multiplicative conversion to get from beads/sigma^3 to moles of monomer/L. Returns ------- lB_arr : (Nx1) numpy array Array of Bjerrum non-dimensionalized by sigma (defined in definition of "data" dictionary). rho_PCI_list : N-element list List of densities of polycation in phase I (supernatant) [mol/L] rho_PCII_list : N-element list List of densities of polycation in phase II (coacervate) [mol/L] alpha_list : N-element list (only returned if ret_alpha==True) List of volume fractions of phase I [nondim]. """ # initializes outputs lB_valid_list = [] rho_PCI_list = [] rho_PCII_list = [] rho_CI_list = [] rho_CII_list = [] alpha_list = [] # computes coexistence at each Bjerrum length and stores results if physical for lB in data.keys(): df = data[lB] df_s = compute_rho_s(df, rho_p, beads_2_M) # ensures that the total salt concentration is within the possible two-phase range if rho_s <= np.max(df_s['rhoS'])*beads_2_M and \ rho_s >= np.min(df_s['rhoS'])*beads_2_M: # finds the index of the dataframe that has the closest salt concentration to the given value diff_rho_s = np.abs(df_s['rhoS']*beads_2_M - rho_s) i_same_salt = np.argmin(diff_rho_s) alpha = df_s['alpha'].iloc[i_same_salt] # recomputes the volume fraction of supernatant more precisely using # interpolation alpha = np.interp(rho_s, df_s['rhoS']*beads_2_M, df_s['alpha'].to_numpy(dtype='float64')) if alpha == 1: print('rho_s = {0:.64f}'.format(rho_s/beads_2_M)) print('rho_p = {0:.64f}'.format(rho_p/beads_2_M)) print('rhoPCI = {0:.64f}'.format(df['rhoPCI'].loc[i_same_salt])) print('rhoPCII = {0:.64f}'.format(df['rhoPCII'].loc[i_same_salt])) print('rhoCI = {0:.64f}'.format(df['rhoCI'].loc[i_same_salt])) print('rhoCII = {0:.64f}'.format(df['rhoCII'].loc[i_same_salt])) print(df.loc[i_same_salt]) # ensures that the ratio of volume I to total volume is physical # (i.e., in the range [0,1]) if alpha > 1 or alpha < 0: continue lB_valid_list += [lB] rho_PCI_list += [df_s['rhoPCI'].iloc[i_same_salt]*beads_2_M] rho_PCII_list += [df_s['rhoPCII'].iloc[i_same_salt]*beads_2_M] rho_CI_list += [df_s['rhoCI'].iloc[i_same_salt]*beads_2_M] rho_CII_list += [df_s['rhoCII'].iloc[i_same_salt]*beads_2_M] alpha_list += [alpha] lB_arr = np.array(lB_valid_list) return rho_PCI_list, rho_PCII_list, rho_CI_list, rho_CII_list, lB_arr, alpha_list
a13419c5dc95702e93dca48822e2731bd05745b5
11,567
import os def is_valid_file(parser, filename): """Check if file exists, and return the filename""" if not os.path.exists(filename): parser.error("The file %s does not exist!" % filename) else: return filename
4e9e2a49749c65fd5457578fd162baf350b94fe3
11,568
def portfolio(): """Function to render the portfolio page.""" form = PortfolioCreateForm() if form.validate_on_submit(): try: portfolio = Portfolio(name=form.data['name'], user_id=session['user_id']) db.session.add(portfolio) db.session.commit() except (DBAPIError, IntegrityError): flash('Something went terribly wrong.') return render_template('stocks/stocks.html', form=form) return redirect(url_for('.search_form')) companies = Company.query.filter_by(user_id=session['user_id']).all() return render_template('./stocks/stocks.html', companies=companies, form=form), 200
adc84381bf6397023fc943f9c2edb1e34879400d
11,569
def svn_client_get_simple_provider(*args): """svn_client_get_simple_provider(svn_auth_provider_object_t provider, apr_pool_t pool)""" return apply(_client.svn_client_get_simple_provider, args)
dcdaaa1b448443e7b3cdb8984dc31d8a009c5606
11,570
def svn_client_invoke_get_commit_log(*args): """ svn_client_invoke_get_commit_log(svn_client_get_commit_log_t _obj, char log_msg, char tmp_file, apr_array_header_t commit_items, void baton, apr_pool_t pool) -> svn_error_t """ return apply(_client.svn_client_invoke_get_commit_log, args)
5c4e8f30309037eabb74c99e77f1b0f4f3172428
11,571
import random def find_valid_nodes(node_ids, tree_1, tree_2): """ Recursive function for finding a subtree in the second tree with the same output type of a random subtree in the first tree Args: node_ids: List of node ids to search tree_1: Node containing full tree tree_2: Node containing full tree Returns: Random subtree of the first tree AND a valid node id of the second tree The output_type of the subtree will match the output_type of the valid node of the second tree """ # Randomly choose a node in the first tree node_id = random.choice(node_ids) # Get output_type of the random node in first tree output_type = tree_1.get_id_outputs()[node_id] # Find nodes with the same output_type in the second tree valid_node_ids = [] for n in tree_2.get_id_outputs(): if tree_2.get_id_outputs()[n] == output_type: valid_node_ids.append(n) if len(valid_node_ids) == 0: # Rerun function without invalid output_type return find_valid_nodes([i for i in node_ids if tree_1.get_id_outputs()[i] != output_type], tree_1, tree_2) # Take off root id node_id = node_id[1:] # Get subtree object from tree_1 subtree_1 = find_subtree(tree_1, node_id) # Randomly choose a node in the second valid_node_id = random.choice(valid_node_ids) # Take off root id valid_node_id = valid_node_id[1:] # Get subtree object from tree_2 subtree_2 = find_subtree(tree_2, valid_node_id) return subtree_1, valid_node_id, subtree_2, node_id
88cfd535487ba460e3c212c9c85269abd68f6ef2
11,572
import torch def pytorch_local_average(n, local_lookup, local_tensors): """Average the neighborhood tensors. Parameters ---------- n : {int} Size of tensor local_lookup : {dict: int->float} A dictionary from rank of neighborhood to the weight between two processes local_tensors : {dict: int->tensor} A dictionary from rank to tensors to be aggregated. Returns ------- tensor An averaged tensor """ averaged = torch.DoubleTensor(np.zeros(n)) for node_id, node_weight in local_lookup.items(): averaged += node_weight * local_tensors[node_id] return averaged
294a4d63ce5eff42ccd3abe1354640f6f934e96f
11,573
def get_rr_Ux(N, Fmat, psd, x): """ Given a rank-reduced decomposition of the Cholesky factor L, calculate L^{T}x where x is some vector. This way, we don't have to built L, which saves memory and computational time. @param N: Vector with the elements of the diagonal matrix N @param Fmat: (n x m) matrix consisting of the reduced rank basis @param psd: PSD of the rank-reduced approximation @param x: Vector we want to process as Lx @return Ux """ n = N.shape[0] m = Fmat.shape[1] r = np.zeros(n) t = np.zeros(m) Z, B, D = get_rr_cholesky_rep(N, Fmat, psd) BD = (B.T * np.sqrt(D)).T for ii in range(n-1, -1, -1): r[ii] = x[ii]*np.sqrt(D[ii]) + np.dot(BD[ii,:].T, t) t += x[ii] * Z[ii,:] return r
9a0aaa95d904b9bc993d295a49d95a48d0f6245f
11,574
def get_poll_options(message: str) -> list: """ Turns string into a list of poll options :param message: :return: """ parts = message.split(CREATE_POLL_EVENT_PATTERN) if len(parts) > 1: votes = parts[-1].split(",") if len(votes) == 1 and votes[0] == ' ': return [] else: return votes return []
fd1209403038d5b1ca75d7abd8567bb47fd6bd9a
11,575
def get_avg_percent_bonds(bond_list, num_opts, adj_lists, num_trials, break_co_bonds=False): """ Given adj_list for a set of options, with repeats for each option, find the avg and std dev of percent of each bond type :param bond_list: list of strings representing each bond type :param num_opts: number of options specified (should be length of adj_lists) :param adj_lists: list of lists of adjs: outer is for each option, inner is for each repeat :param num_trials: number of repeats (should be length of inner adj_lists list) :param break_co_bonds: Boolean, to determine whether determine oligomers and remaining bonds after removing C-O bonds to simulate RCF :return: avg_bonds, std_bonds: list of floats, list of floats: for each option tested, the average and std dev of bond distributions (percentages) """ analysis = [] for i in range(num_opts): cur_adjs = adj_lists[i] analysis.append([analyze_adj_matrix(cur_adjs[j], break_co_bonds=break_co_bonds) for j in range(num_trials)]) bond_percents = {} avg_bonds = {} std_bonds = {} for bond_type in bond_list: bond_percents[bond_type] = [[analysis[j][i][BONDS][bond_type]/sum(analysis[j][i][BONDS].values()) for i in range(num_trials)] for j in range(num_opts)] avg_bonds[bond_type] = [np.mean(bond_pcts) for bond_pcts in bond_percents[bond_type]] std_bonds[bond_type] = [np.sqrt(np.var(bond_pcts)) for bond_pcts in bond_percents[bond_type]] return avg_bonds, std_bonds
74c34afea07ab98941c70b571197fe0ea43bcb88
11,576
from io import StringIO def current_fig_image(): """Takes current figure of matplotlib and returns it as a PIL image. Also clears the current plot""" plt.axis('off') fig = plt.gcf() buff = StringIO.StringIO() fig.savefig(buff) buff.seek(0) img = Image.open(buff).convert('RGB') plt.clf() return img
6da91a7157db0cd0df8ebc1e4f3d6007f35f2621
11,577
def get_bgp_peer( api_client, endpoint_id, bgp_peer_id, verbose=False, **kwargs ): # noqa: E501 """Get eBGP peer # noqa: E501 Get eBGP peer details # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> response = api.get_bgp_peer(client, endpoint_id, bgp_peer_id, async_req=True) :param int endpoint_id: ID for IPsec endpoint (required) :param int bgp_peer_id: ID for BGP peer (required) :param async_req bool: execute request asynchronously :param bool verbose: True for verbose output :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: APIResponse or awaitable if async """ local_var_params = locals() request_params = ["verbose"] # noqa: E501 collection_formats = {} query_params = [] for param in [p for p in request_params if local_var_params.get(p) is not None]: query_params.append((param, local_var_params[param])) # noqa: E501 path_params = {"endpoint_id": endpoint_id, "bgp_peer_id": bgp_peer_id} header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params["Accept"] = api_client.select_header_accept( ["application/json"] ) # noqa: E501 # Authentication setting auth_settings = ["ApiTokenAuth", "basicAuth"] # noqa: E501 return api_client.call_api( "/ipsec/endpoints/{endpoint_id}/ebgp_peers/{bgp_peer_id}", "GET", path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type="object", # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get("async_req"), _return_http_data_only=local_var_params.get( "_return_http_data_only" ), # noqa: E501 _preload_content=local_var_params.get("_preload_content", True), _request_timeout=local_var_params.get("_request_timeout"), collection_formats=collection_formats, )
3a9fa41b2918c1537402d8894fe3315ff90241e8
11,578
def rewrite_elife_funding_awards(json_content, doi): """ rewrite elife funding awards """ # remove a funding award if doi == "10.7554/eLife.00801": for i, award in enumerate(json_content): if "id" in award and award["id"] == "par-2": del json_content[i] # add funding award recipient if doi == "10.7554/eLife.04250": recipients_for_04250 = [ { "type": "person", "name": {"preferred": "Eric Jonas", "index": "Jonas, Eric"}, } ] for i, award in enumerate(json_content): if "id" in award and award["id"] in ["par-2", "par-3", "par-4"]: if "recipients" not in award: json_content[i]["recipients"] = recipients_for_04250 # add funding award recipient if doi == "10.7554/eLife.06412": recipients_for_06412 = [ { "type": "person", "name": {"preferred": "Adam J Granger", "index": "Granger, Adam J"}, } ] for i, award in enumerate(json_content): if "id" in award and award["id"] == "par-1": if "recipients" not in award: json_content[i]["recipients"] = recipients_for_06412 return json_content
aba819589e50bc847d56a0f5a122b2474425d39c
11,579
def remove_subnet_from_router(router_id, subnet_id): """Remove a subnet from the router. Args: router_id (str): The router ID. subnet_id (str): The subnet ID. """ return neutron().remove_interface_router(router_id, { 'subnet_id': subnet_id })
4b7e9123f148fddaa3d53bcabe1f37b35c974162
11,580
def direct_to_template(request, template): """Generic template direction view.""" return render_to_response(template, {}, request)
5a030f302450829d397fbc27f73bd24470bfe50b
11,581
from datetime import datetime def now(): """Return the current time as date object.""" return datetime.now()
79ddcf3c2e22ff57626520e0b41af8b0f58972d6
11,582
def is_valid_node_name(name): """ Determine if a name is valid for a node. A node name: - Cannot be empty - Cannot start with a number - Cannot match any blacklisted pattern :param str name: The name to check. :return: True if the name is valid. False otherwise. :rtype: bool """ return name and name not in BLACKLISTED_NODE_NAMES
6a935f7172e96fd418084543da0fe81d6bb77be5
11,583
def trajCalc(setup): """ Creates trajectory between point A and the ground (B) based off of the initial position and the angle of travel Arguments: setup: [Object] ini file parameters Returns: A [list] lat/lon/elev of the tail of the trajectory B [list] lat/lon/elev of the head of the trajectory """ B = np.array([0, 0, 0]) # convert angles to radians ze = np.radians(setup.zangle) az = np.radians(setup.azim) # Create trajectory vector traj = np.array([np.sin(az)*np.sin(ze), np.cos(az)*np.sin(ze), -np.cos(ze)]) # backwards propegate the trajectory until it reaches 100000 m up n = 85920/traj[2] # B is the intersection between the trajectory vector and the ground A = n*traj # Convert back to geo coordinates B = np.array(loc2Geo(setup.lat_centre, setup.lon_centre, 0, B)) A = np.array(loc2Geo(setup.lat_centre, setup.lon_centre, 0, A)) # print("Created Trajectory between A and B:") # print(" A = {:10.4f}N {:10.4f}E {:10.2f}m".format(A[0], A[1], A[2])) # print(" B = {:10.4f}N {:10.4f}E {:10.2f}m".format(B[0], B[1], B[2])) A[2] /= 1000 B[2] /= 1000 setup.lat_i = A[0] setup.lon_i = A[1] setup.elev_i = A[2] return A, B
30614d0193aacdd594400fbbda396bca40a75156
11,584
from typing import Dict async def health() -> Dict[str, str]: """Health check function :return: Health check dict :rtype: Dict[str: str] """ health_response = schemas.Health(name=settings.PROJECT_NAME, api_version=__version__) return health_response.dict()
ffda9fa5795c02bd197ed8715d44b384781c866f
11,585
def projection_v3(v, w): """Return the signed length of the projection of vector v on vector w. For the full vector result, use projection_as_vec_v3(). Since the resulting vector is along the 1st vector, you can get the full vector result by scaling the 1st vector to the length of the result of this function. """ return dot_v3(v, w) / w.length()
3aeb8783a5eb680f0e2085d6a0f3b8b80511b10f
11,586
def custom_djsettings(settings): """Custom django settings to avoid warnings in stdout""" settings.TEMPLATE_DEBUG = False settings.DEBUG = False return settings
fc57786b22255235af6df7cec665696aeb1e882b
11,587
import math import collections def bleu(pred_seq, label_seq, k): """计算BLEU""" pred_tokens, label_tokens = pred_seq.split(' '), label_seq.split(' ') len_pred, len_label = len(pred_tokens), len(label_tokens) score = math.exp(min(0, 1 - len_label / len_pred)) for n in range(1, k + 1): num_matches, label_subs = 0, collections.defaultdict(int) for i in range(len_label - n + 1): label_subs[''.join(label_tokens[i: i + n])] += 1 for i in range(len_pred - n + 1): if label_subs[''.join(pred_tokens[i: i + n])] > 0: num_matches += 1 label_subs[''.join(pred_tokens[i: i + n])] -= 1 score *= math.pow(num_matches / (len_pred - n + 1), math.pow(0.5, n)) return score
ae7485687a44afc9ced6f2f4ed5ac8fe0d67b295
11,588
def report_by_name(http_request, agent_name): """ A version of report that can look up an agent by its name. This will generally be slower but it also doesn't expose how the data is stored and might be easier in some cases. """ agent = get_list_or_404(Agent, name=agent_name)[0] return report(http_request, agent.id)
ee9daf5b7e7e5f15af3e507f55e0a5e2a1388aca
11,589
def create_application(global_config=None, **local_conf): """ Create a configured instance of the WSGI application. """ sites, types = load_config(local_conf.get("config")) return ImageProxy(sites, types)
c8d3c7158902df00d88512a23bb852bb226b5ba4
11,590
def dimensionState(moons,dimension): """returns the state for the given dimension""" result = list() for moon in moons: result.append((moon.position[dimension],moon.velocity[dimension])) return result
e67a37e4a1556d637be74992fc3801ee56f0e6f9
11,591
def login(): """Log in current user.""" user = get_user() if user.system_wide_role != 'No Access': flask_login.login_user(user) return flask.redirect(common.get_next_url( flask.request, default_url=flask.url_for('dashboard'))) flask.flash(u'You do not have access. Please contact your administrator.', 'alert alert-info') return flask.redirect('/')
f229ebbd0f77789784b2e8d05bb643d9b5cb1da1
11,592
def init_module(): """ Initialize user's module handler. :return: wrapper handler. """ original_module, module_path, handler_name = import_original_module() try: handler = original_module for name in module_path.split('.')[1:] + [handler_name]: handler = getattr(handler, name) return handler except AttributeError: raise AttributeError( 'No handler {} in module {}'.format(handler_name, module_path) )
148ed89ea8a9f67c9cef5bc209a803840ff9de56
11,593
from typing import List def process(lines: List[str]) -> str: """ Preprocess a Fortran source file. Args: inputLines The input Fortran file. Returns: Preprocessed lines of Fortran. """ # remove lines that are entirely comments and partial-line comments lines = [ rm_trailing_comment(line) for line in lines if not line_is_comment(line) ] # merge continuation lines chg = True while chg: chg = False i = 0 while i < len(lines): line = lines[i] llstr = line.lstrip() if len(llstr) > 0 and llstr[0] == "&": # continuation character prevline = lines[i - 1] line = llstr[1:].lstrip() prevline = prevline.rstrip() + line lines[i - 1] = prevline lines.pop(i) chg = True i += 1 return "".join(lines)
f3fd3bc75be544cd507dae87b80364c9b1c12f7c
11,594
def _summary(function): """ Derive summary information from a function's docstring or name. The summary is the first sentence of the docstring, ending in a period, or if no dostring is present, the function's name capitalized. """ if not function.__doc__: return f"{function.__name__.capitalize()}." result = [] for word in function.__doc__.split(): result.append(word) if word.endswith("."): break return " ".join(result)
a3e3e45c3004e135c2810a5ec009aa78ef7e7a04
11,595
def findrun(base,dim,boxsize): """ find all files associated with run given base directory and the resolution size and box length """ if not os.path.isdir(base): print base, 'is not a valid directory' sys.exit(1) #retreive all files that match tag and box size #note this will include the initialisation boxes, which #are independent of redshift searchstr='_'+str(dim)+'_'+str(boxsize)+'Mpc' filenames=os.listdir(base) box_files=[] for filename in filenames: if filename.find(searchstr)>=0: box_files.append(os.path.join(base,filename)) return box_files
c14f885943a33df96cda28a4a84fdce332149167
11,596
def demand_mass_balance_c(host_odemand, class_odemand, avail, host_recapture): """Solve Demand Mass Balance equation for class-level Parameters ---------- host_odemand: int Observerd host demand class_odemand: int Observed class demand avail: dict Availability of demand open during period considered host_recapture: float Estimated host level recapture Returns ------- tuple Estimated demand, spill and recapture """ # if observed demand of a class is 0 demand mass balance can't # estimate demand and spill alone without additioanl information demand = spill = recapture = 0 if class_odemand: recapture = host_recapture * class_odemand / host_odemand # availability of demand closed during period considered k = 1 - avail A = np.array([[1, -1], [-k, 1]]) B = np.array([class_odemand - recapture, 0]) demand, spill = solve(A, B) return demand, spill, recapture
7fda78ce632f1a26ec875c37abe5db40615aa351
11,597
from typing import Optional def serve_buffer( data: bytes, offered_filename: str = None, content_type: str = None, as_attachment: bool = True, as_inline: bool = False, default_content_type: Optional[str] = MimeType.FORCE_DOWNLOAD) \ -> HttpResponse: """ Serve up binary data from a buffer. Options as for :func:`serve_file`. """ response = HttpResponse(data) add_http_headers_for_attachment( response, offered_filename=offered_filename, content_type=content_type, as_attachment=as_attachment, as_inline=as_inline, content_length=len(data), default_content_type=default_content_type) return response
aa41df168e3f84468293ca6653c06402e6c395fc
11,598
def get_single_image_results(gt_boxes, pred_boxes, iou_thr): """Calculates number of true_pos, false_pos, false_neg from single batch of boxes. Args: gt_boxes (list of list of floats): list of locations of ground truth objects as [xmin, ymin, xmax, ymax] pred_boxes (dict): dict of dicts of 'boxes' (formatted like `gt_boxes`) and 'scores' iou_thr (float): value of IoU to consider as threshold for a true prediction. Returns: dict: true positives (int), false positives (int), false negatives (int) """ all_pred_indices = range(len(pred_boxes)) all_gt_indices = range(len(gt_boxes)) if len(all_pred_indices) == 0: tp = 0 fp = 0 fn = len(gt_boxes) return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn} if len(all_gt_indices) == 0: tp = 0 fp = len(pred_boxes) fn = 0 return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn} gt_idx_thr = [] pred_idx_thr = [] ious = [] for ipb, pred_box in enumerate(pred_boxes): for igb, gt_box in enumerate(gt_boxes): iou = calc_iou_individual(pred_box, gt_box) if iou > iou_thr: gt_idx_thr.append(igb) pred_idx_thr.append(ipb) ious.append(iou) args_desc = np.argsort(ious)[::-1] if len(args_desc) == 0: # No matches tp = 0 fp = len(pred_boxes) fn = len(gt_boxes) else: gt_match_idx = [] pred_match_idx = [] for idx in args_desc: gt_idx = gt_idx_thr[idx] pr_idx = pred_idx_thr[idx] # If the boxes are unmatched, add them to matches if (gt_idx not in gt_match_idx) and (pr_idx not in pred_match_idx): gt_match_idx.append(gt_idx) pred_match_idx.append(pr_idx) tp = len(gt_match_idx) fp = len(pred_boxes) - len(pred_match_idx) fn = len(gt_boxes) - len(gt_match_idx) return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn}
ec97189c8c75686aa172292179228621e244982f
11,599