content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from .esri_basemap import esrimap def classFactory(iface): # pylint: disable=invalid-name """Load esrimap class from file esrimap. :param iface: A QGIS interface instance. :type iface: QgsInterface """ # return esrimap(iface)
3e067a97cba21a07c818077e4207cd8e337143d9
8,849
def gpib_open(name): """ Start a device session. Returns a unique integer for the instrument at the specified GPIB address. For example:: >>> gpib_open(lan[158.154.1.110]:19) 4 @param name : LAN/GPIB address of the device @type name : str @return: int """ (devtype,devID) = name.split() address = eval(devtype)[devID]['addr'] return _open(address)
fa9e87a3873248866586758c0b0f370a3ad29e6e
8,850
def myjobs_view(request): """ Renderbox view :param request: :return: """ return render(request, 'renderbox/myjobs.html')
ac0ffbc92a33657a165beb5e12905e3dc495c943
8,851
def _match_contact(filter_criteria): """ This default matching strategy function will attempt to get a single result for the specified criteria. It will fail with an `unmatched` result if there are no matching contacts. It will fail with a `multiple_matches` result if there are multiple matches for this criteria. """ contact = None try: contact = get_queryset_object(Contact.objects.all(), **filter_criteria) contact_matching_status = ContactMatchingStatus.matched except Contact.DoesNotExist: contact_matching_status = ContactMatchingStatus.unmatched except Contact.MultipleObjectsReturned: contact_matching_status = ContactMatchingStatus.multiple_matches return contact, contact_matching_status
088199ac26dc226e1412b43ed0c9b380c669c64e
8,853
from typing import Generator def get_objects_dictionary(): """ creates a dictionary with the types and the circuit objects :return: Dictionary instance """ object_types = {'bus': Bus(), 'load': Load(), 'static_generator': StaticGenerator(), 'battery': Battery(), 'generator': Generator(), 'shunt': Shunt(), 'wires': Wire(), 'overhead_line_types': Tower(), 'underground_cable_types': UndergroundLineType(), 'sequence_line_types': SequenceLineType(), 'transformer_types': TransformerType(), 'branch': Branch(), 'transformer2w': Transformer2W(), 'line': Line(), 'dc_line': DcLine(None, None), 'hvdc': HvdcLine(), 'vsc': VSC(Bus(), Bus(is_dc=True)), } return object_types
bd82c2dc30877f841e4275aafbe054849b6f6ba2
8,854
def create_stripe_onboarding_link(request, stripe_id=None,): """Creates stripe connect onboarding link by calling Stripe API.""" account_links = stripe.AccountLink.create( account=stripe_id, return_url=request.build_absolute_uri( reverse("users:stripe_callback") ), refresh_url=request.build_absolute_uri( reverse("users:stripe_authorize") ), type="account_onboarding", ) return account_links
1dd1e7c50645fb5eaa36d7426abd5cff198e1610
8,855
def add_scheme_if_missing(url): """ >>> add_scheme_if_missing("example.org") 'http://example.org' >>> add_scheme_if_missing("https://example.org") 'https://example.org' """ if "//" not in url: url = "http://%s" % url return url
97a33ce1f60ab67e6a807ef1bd1d95250b5d18c6
8,856
from typing import Dict def _extract_assembly_information(job_context: Dict) -> Dict: """Determine the Ensembl assembly version and name used for this index. Ensembl will periodically release updated versions of the assemblies which are where the input files for this processor comes from. All divisions other than the main one have identical release versions, but we don't know which division these files came from so we can't just hit thier API again. Therefore, look at the URL we used to get the files because it contains the assembly version and name. I'll admit this isn't the most elegant solution, but since the transcriptome index's only database model is the OriginalFiles until processing is complete, there's no other way to pass this information through to this processor without modifying the OriginalFile model. The URL path we're attempting follows this pattern (defined in the surveyor) ftp://ftp.{url_root}/gtf/{species_sub_dir}/{filename_species}.{assembly_name}.{assembly_version}.gtf.gz and we are attempting to extract {assembly_version} and {assembly_name}. """ original_files = job_context["original_files"] for og_file in original_files: if ".gtf.gz" in og_file.source_filename: extensionless_url = og_file.source_url[:-7] version_start_index = extensionless_url.rfind(".") + 1 job_context["assembly_version"] = extensionless_url[version_start_index:] # Decrement the index to skip the period. versionless_url = extensionless_url[:version_start_index-1] assembly_name_start_index = versionless_url.rfind(".") + 1 job_context["assembly_name"] = versionless_url[assembly_name_start_index:] return job_context
b78513b826c0a12bf87563095e33320aee328b76
8,857
def fixture_circle_2() -> Circle: """Return an example circle.""" return Circle(Point(0.0, 0.0), 1.0)
4040cb356a1e09cfe83280711d93a43b9352ff66
8,858
from warnings import warn import logging from fparser import api from loopy.frontend.fortran.translator import F2LoopyTranslator from loopy.transform.callable import merge from loopy.frontend.fortran.translator import specialize_fortran_division def parse_fortran(source, filename="<floopy code>", free_form=None, strict=None, seq_dependencies=None, auto_dependencies=None, target=None): """ :returns: a :class:`loopy.TranslationUnit` """ parse_plog = ProcessLogger(logger, "parsing fortran file '%s'" % filename) if seq_dependencies is not None and auto_dependencies is not None: raise TypeError( "may not specify both seq_dependencies and auto_dependencies") if auto_dependencies is not None: warn("auto_dependencies is deprecated, use seq_dependencies instead", DeprecationWarning, stacklevel=2) seq_dependencies = auto_dependencies if seq_dependencies is None: seq_dependencies = True if free_form is None: free_form = True if strict is None: strict = True console = logging.StreamHandler() console.setLevel(logging.INFO) formatter = logging.Formatter("%(name)-12s: %(levelname)-8s %(message)s") console.setFormatter(formatter) logging.getLogger("fparser").addHandler(console) tree = api.parse(source, isfree=free_form, isstrict=strict, analyze=False, ignore_comments=False) if tree is None: raise LoopyError("Fortran parser was unhappy with source code " "and returned invalid data (Sorry!)") f2loopy = F2LoopyTranslator(filename, target=target) f2loopy(tree) kernels = f2loopy.make_kernels(seq_dependencies=seq_dependencies) prog = merge(kernels) all_kernels = [clbl.subkernel for clbl in prog.callables_table.values()] for knl in all_kernels: prog.with_kernel(_add_assignees_to_calls(knl, all_kernels)) if len(all_kernels) == 1: # guesssing in the case of only one function prog = prog.with_entrypoints(all_kernels[0].name) prog = specialize_fortran_division(prog) parse_plog.done() return prog
69d85ba20fd429598d3441297a89a12933f69925
8,859
def compute_sources(radius, evolved_vars): """ Computes source terms for the symmetry. """ mass_density = evolved_vars[0] momentum_density = evolved_vars[1] energy_density = evolved_vars[2] factor = -_symmetry_alpha / radius pressure = compute_pressure(mass_density, momentum_density, energy_density) return (factor * momentum_density, factor * momentum_density**2 / mass_density, factor * (energy_density + pressure) * momentum_density / mass_density)
f2c7c68f3d00a063f9a29b220f98f71c6bb02aef
8,860
def average_pq(ps, qs): """ average the multiple position and quaternion array Args: ps (np.array): multiple position array of shape Nx3 qs (np.array): multiple quaternion array of shape Nx4 Returns: p_mean (np.array): averaged position array q_mean (np.array): averaged quaternion array """ p_average = np.mean(np.asarray(ps), axis=0) q_average = average_q(np.asarray(qs)) return p_average, q_average
b7064d75f07361d60375de1dad91e0139533b042
8,861
def logobase(**kwargs): """Create a PyGraphviz graph for a logo.""" ag = pygraphviz.AGraph(bgcolor='#D0D0D0', strict=False, directed=True, ranksep=0.3, **kwargs) ag.edge_attr['penwidth'] = 1.4 ag.edge_attr['arrowsize'] = 0.8 return ag
60772de3f3b33f58559ecfd3293cffc26cfe8e70
8,862
import torch def integral_raycasting( pixels: Tensor, mu: Tensor, rho: Tensor, lambd: Tensor, appearance: Tensor, background_appearance: Tensor, K: Tensor, dist_coef: Tensor = None, alpha: float = 2.5e-2, beta: float = 2e0, eps: float = 1e-8, ) -> Tensor: """ :param pixels: [H, W, 3, 1] :param mu: [*, N, 3, 1] :param rho: [*, N, 3, 3] :param lambd: [*, N, 3, 1] :param appearance: [*, N, 3] :param background_appearance: [*, 1, 3] :param K: [*, 3, 3] :param dist_coef: [*, D] :param alpha: :param beta: :param function: :param eps: :return: """ rays = pixel_grid_to_ray_grid( xyz=pixels, K=K, dist_coef=dist_coef, ) lambd, alpha = invert_lambd( lambd=lambd, alpha=alpha, eps=eps, ) rays_sigma_rays, mu_sigma_mu, rays_sigma_mu = compute_quantities( rays=rays, mu=mu, rho=rho, lambd=lambd, ) z = optimal_z(rays_sigma_mu=rays_sigma_mu, rays_sigma_rays=rays_sigma_rays, eps=eps) z_background = beta * max_z(z=z) weights = density(x=z) * integral( rays_sigma_rays=rays_sigma_rays, mu_sigma_mu=mu_sigma_mu, rays_sigma_mu=rays_sigma_mu, alpha=alpha, eps=eps, ) weight_background = density(x=z_background) * background_integral( z=z_background, alpha=alpha, ) shape = weights.shape[:-1] + weight_background.shape[-1:] weight_background = weight_background.expand(shape) weights = torch.cat([weights, weight_background], dim=-1) weights = normalize_weights(weights=weights, eps=eps) appearance = torch.cat([appearance, background_appearance], dim=-2) image = splat_image(weights=weights, appearance=appearance) return image
fc5165c04732ea021d105df5d5f997524b037abd
8,863
async def cors_handler(request, handler): """Middleware to add CORS response headers """ response = await handler(request) response.headers['Access-Control-Allow-Origin'] = '*' return response
c9f33261b1fb2e6dc3ab3139e657106a94c5bfd1
8,864
def validate_image(task: ExternalTask): """ To simulate BPMN/Failure/Success, this handler uses image name variable (to be passed when launching the process) """ log_context = {"WORKER_ID": task.get_worker_id(), "TASK_ID": task.get_task_id(), "TOPIC": task.get_topic_name()} log_with_context("executing validate_image", log_context) img_name = task.get_variable('imgName') if "poor" in img_name: return task.bpmn_error("POOR_QUALITY_IMAGE", "Image quality is bad", {"img_rejection_code": "POOR_QUALITY_CODE_XX", "img_rejection_reason": f"Image quality must be at least GOOD"}) elif "jpg" in img_name: return task.complete({"img_approved": True}) elif "corrupt" in img_name: return task.failure("Cannot validate image", "image is corrupted", 0, default_config.get("retryTimeout")) else: return task.bpmn_error("INVALID_IMAGE", "Image extension must be jpg", {"img_rejection_code": "INVALID_IMG_NAME", "img_rejection_reason": f"Image name {img_name} is invalid"})
97413656181bfc4480dc7b2a195713e8124d44f2
8,865
def simulate_patch(app, path, **kwargs): """Simulates a PATCH request to a WSGI application. Equivalent to:: simulate_request(app, 'PATCH', path, **kwargs) Args: app (callable): The WSGI application to call path (str): The URL path to request Keyword Args: params (dict): A dictionary of query string parameters, where each key is a parameter name, and each value is either a ``str`` or something that can be converted into a ``str``, or a list of such values. If a ``list``, the value will be converted to a comma-delimited string of values (e.g., 'thing=1,2,3'). params_csv (bool): Set to ``False`` to encode list values in query string params by specifying multiple instances of the parameter (e.g., 'thing=1&thing=2&thing=3'). Otherwise, parameters will be encoded as comma-separated values (e.g., 'thing=1,2,3'). Defaults to ``True``. headers (dict): Additional headers to include in the request (default: ``None``) body (str): A string to send as the body of the request. Accepts both byte strings and Unicode strings (default: ``None``). If a Unicode string is provided, it will be encoded as UTF-8 in the request. json(JSON serializable): A JSON document to serialize as the body of the request (default: ``None``). If specified, overrides `body` and the Content-Type header in `headers`. protocol: The protocol to use for the URL scheme (default: 'http') host(str): A string to use for the hostname part of the fully qualified request URL (default: 'falconframework.org') remote_addr (str): A string to use as the remote IP address for the request (default: '127.0.0.1') extras (dict): Additional CGI variables to add to the WSGI ``environ`` dictionary for the request (default: ``None``) """ return simulate_request(app, 'PATCH', path, **kwargs)
48fda74dc2765e3a281a71c7ba6f4144e9a258cd
8,866
def minimum_image_box(sizes): """Creates a distance wrapper using the minimum image convention Arguments: sizes (array-like of float): box sizes """ def _box(sizes, distance_vectors): """A minimum image wrapper for distances""" shift = sizes[None, None, :] * np.round(distance_vectors / sizes[None, None, :]) distance_vectors -= shift return distance_vectors return partial(_box, np.array(sizes))
5d26092a988a011e9fb1967a74c3ceec935f5b1b
8,867
def mlrPredict(W, data): """ mlrObjFunction predicts the label of data given the data and parameter W of Logistic Regression Input: W: the matrix of weight of size (D + 1) x 10. Each column is the weight vector of a Logistic Regression classifier. X: the data matrix of size N x D Output: label: vector of size N x 1 representing the predicted label of corresponding feature vector given in data matrix """ label = np.zeros((data.shape[0], 1)) ################## # YOUR CODE HERE # ################## # HINT: Do not forget to add the bias term to your input data x = np.hstack((np.ones((data.shape[0], 1)),data)) label = (np.argmax((np.exp(np.dot(x, W)) / np.sum(np.exp(np.dot(x, W)))), axis=1)).reshape((data.shape[0],1)) return label
57542e5b54ddd223f4cbcae7adf932e85c4ffeeb
8,868
def calc_mean_score(movies): """Helper method to calculate mean of list of Movie namedtuples, round the mean to 1 decimal place""" return round(sum([movie.score for movie in movies]) / len(movies), 1)
ccf52f813091d1c907470996c62dafa61303e245
8,869
import hmac import hashlib def get_proxy_signature(query_dict, secret): """ Calculate the signature of the given query dict as per Shopify's documentation for proxy requests. See: http://docs.shopify.com/api/tutorials/application-proxies#security """ # Sort and combine query parameters into a single string. sorted_params = '' for key in sorted(query_dict.keys()): sorted_params += "{0}={1}".format(key, ",".join(query_dict.getlist(key))) signature = hmac.new(secret.encode('utf-8'), sorted_params.encode('utf-8'), hashlib.sha256) return signature.hexdigest()
c234f18c1d44a936c4844ae2fe1b912a624eef61
8,870
def candlestick_echarts(data_frame: pd.DataFrame, time_field: str = 'time', open_field: str = "open", high_field: str = 'high', low_field: str = 'low', close_field: str = 'close', volume_field: str = 'volume', mas: list = [5, 10, 30], log_y: bool = True, title: str = "", width: str = "100%", height: str = "600px", left_padding: str = '5%', right_padding: str = '3%') -> Echarts: """ 绘制K线 :param data_frame: :param time_field: 时间列名, 如果指定的列不存在,使用index作为time :param open_field: open列名 :param high_field: high列名 :param low_field: low列名 :param close_field: close列名 :param volume_field: volume列名 :param mas: 均线组 :param log_y: y轴 log分布 底为1.1 一个格子对应10% :param title: 可选标题 :param width: 输出div的宽度 支持像素和百分比 比如800px/100% :param height: 输出div的高度 支持像素和百分比 比如800px/100% :param left_padding: 左侧padding宽度 :param right_padding: 右侧padding宽度 :return: """ df = data_frame.copy() if time_field not in data_frame.columns: # 使用index作为时间 df[time_field] = df.index df[close_field] = df[close_field].fillna(method="ffill") df[open_field] = df[open_field].fillna(df[close_field]) df[high_field] = df[high_field].fillna(df[close_field]) df[low_field] = df[low_field].fillna(df[close_field]) df[volume_field] = df[volume_field].fillna(0) volumes = (df[volume_field]).round(2).tolist() vol_filter = (df[volume_field]).quantile([0.05, 0.95]).values bar_items = [({"value": vol} if vol >= vol_filter[0] and vol <= vol_filter[1] else ( {"value": vol, "itemStyle": {"color": "red"}} if vol > vol_filter[1] else {"value": vol, "itemStyle": {"color": "green"}})) for vol in volumes] options = { 'animation': False, 'title': {'text': title}, 'legend': {'top': 10, 'left': 'center', 'data': [title]}, 'tooltip': { 'trigger': 'axis', 'axisPointer': {'type': 'cross'}, 'borderWidth': 1, 'borderColor': '#ccc', 'padding': 10, 'formatter': Js(""" function(params){ var dt = params[0]['axisValue']; var labels = []; labels.push('<b><span>时间:&nbsp;</span></b>' + dt + '<br/>'); params.sort(function(a, b) { if (a.seriesName < b.seriesName ) {return -1;} else if (a.seriesName > b.seriesName ) {return 1;} else{ return 0;} }); for (let i = 0; i < params.length; i++) { const param = params[i]; var label=["<b><span>"+param['seriesName']+"("+param['seriesType']+"):&nbsp;</span></b>"]; var dimensionNames=param["dimensionNames"]; if (typeof(param['value'])=='object' && dimensionNames.length==param['data'].length){ label.push("<br/>"); for (let j = 1; j <dimensionNames.length; j++) { var value= param['data'][j]; if (typeof(value)=='number'){ if (value%1==0 || value>100000){ label.push("<span>"+dimensionNames[j]+':&nbsp;'+value.toFixed(0)+"</span><br/>"); }else{ label.push("<span>"+dimensionNames[j]+':&nbsp;'+value.toFixed(2)+"</span><br/>"); } }else{ label.push("<div style='max-width:15em;word-break:break-all;white-space: normal;'>"+dimensionNames[j]+':&nbsp;'+value+"</div>"); } } }else if(param['seriesType']=="candlestick"){ label.push("<br/>"); label.push("<span>open:&nbsp;"+param['data'][1].toFixed(2)+"</span><br/>"); label.push("<span>close:&nbsp;"+param['data'][2].toFixed(2)+"</span><br/>"); label.push("<span>high:&nbsp;"+param['data'][4].toFixed(2)+"</span><br/>"); label.push("<span>low:&nbsp;"+param['data'][3].toFixed(2)+"</span><br/>"); }else if(typeof(param['value'])=='number'){ if (param['value']%1==0){ label.push("<span>"+param['value'].toFixed(0)+"</span><br/>"); }else{ label.push("<span>"+param['value'].toFixed(2)+"</span><br/>"); } }else if(param['value']){ label.push("<div style='max-width:15em;word-break:break-all;white-space: normal;'>"+value+"</div>"); }else{ label.push("<br/>"); } var cardStr= label.join(''); labels.push(cardStr); } return labels.join(''); }"""), 'textStyle': {'color': '#000'}, 'position': Js(""" function (pos, params, el, elRect, size){ var obj = {top: 10}; obj[['left', 'right'][+(pos[0] < size.viewSize[0] / 2)]] = 30; return obj; } """) }, 'axisPointer': { 'link': {'xAxisIndex': 'all'}, 'label': {'backgroundColor': '#777'} }, 'grid': [ {'left': left_padding, 'right': right_padding, 'height': '70%'}, {'left': left_padding, 'right': right_padding, 'top': '71%', 'height': '16%'} ], 'xAxis': [ { 'type': 'category', 'data': df[time_field].tolist(), 'scale': True, 'boundaryGap': False, 'axisLine': {'show': False}, 'axisLabel': {'show': False}, 'axisTick': {'show': False}, 'splitLine': {'show': True}, 'splitNumber': 20, 'min': 'dataMin', 'max': 'dataMax', 'axisPointer': { 'z': 100 } }, { 'type': 'category', 'gridIndex': 1, 'data': df[time_field].tolist(), 'scale': True, 'boundaryGap': False, 'axisLine': {'onZero': False, 'show': True}, 'axisLine': {'show': True}, 'axisLabel': {'show': True}, 'axisTick': {'show': True}, 'splitLine': {'show': True}, 'axisLabel': {'show': True}, 'splitNumber': 20, 'min': 'dataMin', 'max': 'dataMax' } ], 'yAxis': [ { 'scale': True, 'type': 'log' if log_y else 'value', 'logBase': 1.1, 'splitNumber': 10, 'axisLabel': {'show': True, 'formatter': Js(""" function(value,index){ return value.toFixed(2); } """)}, 'axisLine': {'show': False}, 'axisTick': {'show': True}, 'splitLine': {'show': True} }, { 'scale': True, 'gridIndex': 1, 'splitNumber': 2, 'axisLabel': {'show': True, 'formatter': Js(""" function(value,index){ var si = [ { value: 1, symbol: "" }, { value: 1E3, symbol: "K" }, { value: 1E6, symbol: "M" }, { value: 1E9, symbol: "G" }, { value: 1E12, symbol: "T" }, { value: 1E15, symbol: "P" }, { value: 1E18, symbol: "E" } ]; var rx = /\.0+$|(\.[0-9]*[1-9])0+$/; var i; for (i = si.length - 1; i > 0; i--) { if (value >= si[i].value) { break; } } return (value / si[i].value).toFixed(2).replace(rx, "$1") + si[i].symbol; } """) }, 'axisLine': {'show': False}, 'axisTick': {'show': False}, 'splitLine': {'show': False} } ], 'dataZoom': [ { 'type': 'inside', 'xAxisIndex': [0, 1], 'start': 0, 'end': 100 } ], 'series': [ { 'name': title, 'type': 'candlestick', 'data': df[[open_field, close_field, low_field, high_field]].values.tolist(), 'emphasis': { 'itemStyle': { 'borderColor': "#333", 'borderWidth': 1, 'shadowColor': 'rgba(0, 0, 0, 0.5)', 'shadowBlur': 15 } } }, { 'name': 'Volume', 'type': 'bar', 'xAxisIndex': 1, 'yAxisIndex': 1, 'data': bar_items, 'emphasis': { 'itemStyle': { 'borderColor': "#333", 'borderWidth': 1, 'shadowColor': 'rgba(0, 0, 0, 0.5)', 'shadowBlur': 15 } } } ] } for ma_len in mas: name = "MA" + str(ma_len) df[name] = df[close_field].rolling(ma_len).mean().round(2) series_ma = { 'name': name, 'type': 'line', 'data': df[name].tolist(), 'smooth': True, 'showSymbol': False, 'lineStyle': {'opacity': 0.5} } options['series'].append(series_ma) options['legend']['data'].append(name) return Echarts(options=options, width=width, height=height)
f8bc3d1ef876a5df0f2fdbdf7dbf97b039a54cc4
8,871
def select_sounder_hac(path_sounder, sounder): """ Donne les indices pour un sondeur (sounder) dans un hac (path sounder), et retourne les index de sondeur et de transducer correspondant inputs: path_sounder: path du hac à analyser sounder: nom du transducer outputs: index du sondeur et du transducer """ list_sounder = util.hac_sounder_descr(FileName=path_sounder) list_st = [ [ list_sounder.GetSounder(isdr).GetTransducer(itsd).m_transName for itsd in range(list_sounder.GetSounder(isdr).m_numberOfTransducer) ] for isdr in range(list_sounder.GetNbSounder()) ] for i in range(len(list_st)): for j in range(len(list_st[i])): if list_st[i][j] == sounder: return i, j return None
2f054ef6a8e3a64f0910e5eb4bce9407befc4b33
8,872
def upvote_checklist(request, checklist_id): # for "messages", refer https://stackoverflow.com/a/61603003/6543250 """if user cannot retract upvote, then this code be uncommented if Upvote.objects.filter(user=User.objects.filter(username=username).first(), checklist=Checklist.objects.get(id=checklist_id)): msg = 'You have already upvoted the checklist once!' messages.info(request, msg) """ """ Note: notifications recorded only when a user upvotes the checklist not downvote in order to promote healthy behaviour and not let the author inundate with downvote notifs in case some user decides to harass the author. """ if Checklist.objects.get(id=checklist_id).author == request.user: msg = "Action Denied! You cannot upvote your own checklist!" messages.error(request, msg) else: # remove user's upvote if he has already upvoted obj = Upvote.objects.filter( user=request.user, checklist=Checklist.objects.get(id=checklist_id) ) msg = "" if obj: obj.delete() msg = "Upvote retracted!" else: upvote_obj = Upvote( user=request.user, checklist=Checklist.objects.get(id=checklist_id), ) upvote_obj.save() msg = "Checklist upvoted!" # also update notifications table so relevant notif can be shown to author fromUser = request.user toUser = Checklist.objects.get(id=checklist_id).author Notification( fromUser=fromUser, toUser=toUser, notif_type=1, checklist=Checklist.objects.get(id=checklist_id), ).save() messages.success(request, msg) if request.META.get("HTTP_REFERER"): if "login" in request.META.get("HTTP_REFERER") and "next" in request.META.get( "HTTP_REFERER" ): return redirect("checklist-home") # redirect to home url; simply reload the page # return redirect('checklist-home') return redirect(request.META.get("HTTP_REFERER", "checklist-home"))
559f9e0341652391b824b215448f87fa3250baae
8,873
def index(request): """查询页面""" ctx = {} Advert_1 = Advert.objects.get(advert_num=1) # 广告1 Advert_2 = Advert.objects.get(advert_num=2) # 广告2 ctx['Adverturl1'] = Advert_1.advert_url ctx['Adverturl2'] = Advert_2.advert_url ctx['Advertimg1'] = '/advert/'+ str(Advert_1.img) ctx['Advertimg2'] = '/advert/'+ str(Advert_2.img) return render(request, 'srsys/index.html',ctx)
91e7a771273ed262e7025bc289defe7f6a52047e
8,874
def load_amazon(): """ """ df = pd.read_csv('data/amazon.txt', header=None, delimiter='\t') X_data = df[0].tolist() y_data = df[1].tolist() print 'Preprocessing...' vectorizer = TfidfVectorizer(strip_accents='unicode', lowercase=True, stop_words='english', ngram_range=(1, 2), max_df=0.5, min_df=5, max_features=20000, norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=False) vectorizer.fit(X_data) X_data = vectorizer.transform(X_data) X_train, X_test, y_train, y_test = train_test_split(X_data, y_data, test_size=0.1, random_state=0) X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=0) return X_train, y_train, X_val, y_val, X_test, y_test
8e11cf91d616f7dfe17e26da2fcf43d82ea26f80
8,875
def get_switch_filters( switch_id, exception_when_missing=True, user=None, session=None, **kwargs ): """get filters of a switch.""" return _get_switch( switch_id, session=session, exception_when_missing=exception_when_missing )
db270f761fcdfb40a9d2970923b4643ebecf7cc3
8,876
def generalized_zielonka_with_psolC(g): """ Zielonka's algorithm with psolC partial solver. :param g: the game to solve. :return: the solution in the following format : (W_0, W_1). """ return generalized_parity_solver_with_partial(g, psolC_gen.psolC_generalized)
1ac4a81df393970c16a5f303155c89cf74db34ab
8,877
from datetime import datetime def read_properties_core(xml_source): """Read assorted file properties.""" properties = DocumentProperties() root = fromstring(xml_source) creator_node = root.find(QName(NAMESPACES['dc'], 'creator').text) if creator_node is not None: properties.creator = creator_node.text else: properties.creator = '' last_modified_by_node = root.find( QName(NAMESPACES['cp'], 'lastModifiedBy').text) if last_modified_by_node is not None: properties.last_modified_by = last_modified_by_node.text else: properties.last_modified_by = '' created_node = root.find(QName(NAMESPACES['dcterms'], 'created').text) if created_node is not None: properties.created = W3CDTF_to_datetime(created_node.text) else: properties.created = datetime.datetime.now() modified_node = root.find(QName(NAMESPACES['dcterms'], 'modified').text) if modified_node is not None: properties.modified = W3CDTF_to_datetime(modified_node.text) else: properties.modified = properties.created return properties
357411103a52bbbfc6e621c47b734b9d11f04284
8,879
import torch def batch_decode(loc, priors, variances): """Decode locations from predictions using priors to undo the encoding we did for offset regression at train time. Args: loc (tensor): location predictions for loc layers, Shape: [num_priors,4] priors (tensor): Prior boxes in center-offset form. Shape: [num_priors,4]. variances: (list[float]) Variances of priorboxes Return: decoded bounding box predictions """ boxes = torch.cat(( priors[:, :, :2] + loc[:, :, :2] * variances[0] * priors[:, :, 2:], priors[:, :, 2:] * torch.exp(loc[:, :, 2:] * variances[1])), 2) boxes[:, :, :2] -= boxes[:, :, 2:] / 2 boxes[:, :, 2:] += boxes[:, :, :2] return boxes
7963b771e2c7bc560e5f9e5051abea43de2f46e3
8,880
def _step2_macs_seq (configs): """Step2 MACS if the raw data type is seq. So it will use the output from step1. """ # check the input t_rep_files = configs["samtools.treat_output_replicates"] t_comb_file = configs["samtools.treat_output"] c_comb_file = configs["samtools.control_output"] macs_genome_option = " -g "+ configs["sample.species"]+" " # run MACS, first for each replicate for i in range(1,configs["data.number_replicates"]+1): if configs["data.has_control"]: # run MACS w/ control command_line = configs["macs.macs_main"]+macs_genome_option+" -w -S -t "+t_rep_files[i-1]+" -c "+ c_comb_file + " -n "+configs["sample.sample_id"]+"_rep"+str(i) run_cmd(command_line) # copy out and rename the wiggle file command_line = "zcat "+configs["sample.sample_id"]+"_rep"+str(i)+"_MACS_wiggle/treat/"+configs["sample.sample_id"]+"_rep"+str(i)+"_treat_afterfiting_all.wig.gz > "+configs["macs.output_treat_wig_replicates"][i-1] run_cmd(command_line) else: # run MACS w/o control command_line = configs["macs.macs_main"]+macs_genome_option+" -w -S -t "+t_rep_files[i-1]+" -n "+configs["sample.sample_id"]+"_rep"+str(i) run_cmd(command_line) # copy out and rename the wiggle file command_line = "zcat "+configs["sample.sample_id"]+"_rep"+str(i)+"_MACS_wiggle/treat/"+configs["sample.sample_id"]+"_rep"+str(i)+"_treat_afterfiting_all.wig.gz > "+configs["macs.output_treat_wig_replicates"][i-1] run_cmd(command_line) # run MACS for the combined treatment if configs["data.number_replicates"] == 1: # no need to run MACS again, simply copy the previous results command_line = "cp "+configs["sample.sample_id"]+"_rep1_peaks.xls"+" "+configs["macs.output_xls"] run_cmd(command_line) command_line = "cp "+configs["sample.sample_id"]+"_rep1_peaks.bed"+" "+configs["macs.output_bed"] run_cmd(command_line) command_line = "cp "+configs["sample.sample_id"]+"_rep1_summits.bed"+" "+configs["macs.output_summits"] run_cmd(command_line) command_line = "cp "+configs["macs.output_treat_wig_replicates"][0]+" "+configs["macs.output_treat_wig"] run_cmd(command_line) if configs["data.has_control"]: command_line = "zcat "+configs["sample.sample_id"]+"_rep"+str(i)+"_MACS_wiggle/control/"+configs["sample.sample_id"]+"_rep1_control_afterfiting_all.wig.gz > "+configs["macs.output_control_wig"] run_cmd(command_line) else: # run MACS on combined alignment files if configs["data.has_control"]: command_line = configs["macs.macs_main"]+macs_genome_option+" -w -S -t "+t_comb_file+" -c "+c_comb_file+" -n "+configs["sample.sample_id"] run_cmd(command_line) # copy out and rename the wiggle file command_line = "zcat "+configs["sample.sample_id"]+"_MACS_wiggle/treat/"+configs["sample.sample_id"]+"_treat_afterfiting_all.wig.gz > "+configs["macs.output_treat_wig"] run_cmd(command_line) command_line = "zcat "+configs["sample.sample_id"]+"_MACS_wiggle/control/"+configs["sample.sample_id"]+"_control_afterfiting_all.wig.gz > "+configs["macs.output_control_wig"] run_cmd(command_line) else: command_line = configs["macs.macs_main"]+macs_genome_option+" -w -S -t "+t_comb_file+" -n "+configs["sample.sample_id"] run_cmd(command_line) # copy out and rename the wiggle file command_line = "zcat "+configs["sample.sample_id"]+"_MACS_wiggle/treat/"+configs["sample.sample_id"]+"_treat_afterfiting_all.wig.gz > "+configs["macs.output_treat_wig"] run_cmd(command_line) return True
69deb8fafeb3f7054901d431d6e32c647504258f
8,881
def menu_entry_to_db(entry): """ Converts a MenuEntry into Meal, Menu, and MenuItem objects which are stored in the database. """ menu, _ = Menu.objects.get_or_create(date=entry.date) meal = Meal.objects.create(meal_type=entry.meal_type, vendor=entry.vendor) for item_name in entry.items: item, _ = MenuItem.objects.get_or_create(name=item_name) meal.items.add(item) if entry.meal_type == 'L': if menu.lunch: menu.lunch.delete() menu.lunch = meal if entry.meal_type == 'D': if menu.dinner: menu.dinner.delete() menu.dinner = meal menu.save() return menu
f35ddb4bb715a3a8bcee073fd863a5f4d8240651
8,882
import torch def get_device_of(tensor: torch.Tensor) -> int: """ Returns the device of the tensor. """ if not tensor.is_cuda: return -1 else: return tensor.get_device()
5532712bd812842fc462951e7c763b9753370174
8,883
def test_script_task(scheduler: Scheduler) -> None: """ Tasks should be definable as shell scripts. """ @task(script=True) def task1(message): return """echo Hello, {message}!""".format(message=message) assert scheduler.run(task1("World")) == b"Hello, World!\n"
c5f764b06f1245feb9ab0c1af5a13fd368fde362
8,884
import copy def __yaml_tag_test(*args, **kwargs): """YAML tag constructor for testing only""" return copy.deepcopy(args), copy.deepcopy(kwargs)
0abeb68caf32912c7b5a78dacbc89e537061a144
8,885
def format_data_for_training(data): """ Create numpy array with planet features ready to feed to the neural net. :param data: parsed features :return: numpy array of shape (number of frames, PLANET_MAX_NUM, PER_PLANET_FEATURES) """ training_input = [] training_output = [] for d in data: features, expected_output = d if len(expected_output.values()) == 0: continue features_matrix = [] for planet_id in range(PLANET_MAX_NUM): if str(planet_id) in features: features_matrix.append(features[str(planet_id)]) else: features_matrix.append([0] * PER_PLANET_FEATURES) fm = np.array(features_matrix) output = [0] * PLANET_MAX_NUM for planet_id, p in expected_output.items(): output[int(planet_id)] = p result = np.array(output) training_input.append(fm) training_output.append(result) return np.array(training_input), np.array(training_output)
b241a932f7a5321ed28dccd8a583fbcf7529e482
8,887
import urllib import json def idcardcert(appcode, card_no): """ 身份证实名认证身份证二要素一致性验证 """ host = 'http://idquery.market.alicloudapi.com' path = '/idcard/query' # method = 'GET' appcode = appcode querys = 'number=%s' % card_no # bodys = {} url = host + path + '?' + querys try: request = urllib.request.Request(url) request.add_header('Authorization', 'APPCODE ' + appcode) response = urllib.request.urlopen(request) content = response.read() if content: return json.loads(content.decode("unicode-escape")) return content except BaseException: return None
a359edf15e7b8795fc80ceda1008f1809d9c52a0
8,888
def custom_error_exception(error=None, exception=None): """Define custom exceptions for MySQL server errors This function defines custom exceptions for MySQL server errors and returns the current set customizations. If error is a MySQL Server error number, then you have to pass also the exception class. The error argument can also be a dictionary in which case the key is the server error number, and value the exception to be raised. If none of the arguments are given, then custom_error_exception() will simply return the current set customizations. To reset the customizations, simply supply an empty dictionary. Examples: import mysql.connector from mysql.connector import errorcode # Server error 1028 should raise a DatabaseError mysql.connector.custom_error_exception( 1028, mysql.connector.DatabaseError) # Or using a dictionary: mysql.connector.custom_error_exception({ 1028: mysql.connector.DatabaseError, 1029: mysql.connector.OperationalError, }) # Reset mysql.connector.custom_error_exception({}) Returns a dictionary. """ global _CUSTOM_ERROR_EXCEPTIONS if isinstance(error, dict) and not len(error): _CUSTOM_ERROR_EXCEPTIONS = {} return _CUSTOM_ERROR_EXCEPTIONS if not error and not exception: return _CUSTOM_ERROR_EXCEPTIONS if not isinstance(error, (int, dict)): raise ValueError( "The error argument should be either an integer or dictionary") if isinstance(error, int): error = { error: exception } for errno, exception in error.items(): if not isinstance(errno, int): raise ValueError("error number should be an integer") try: if not issubclass(exception, Exception): raise TypeError except TypeError: raise ValueError("exception should be subclass of Exception") _CUSTOM_ERROR_EXCEPTIONS[errno] = exception return _CUSTOM_ERROR_EXCEPTIONS
eb24301d2511199e1ee1407152f27d00b72adba5
8,889
import hashlib def cal_md5(content): """ 计算content字符串的md5 :param content: :return: """ # 使用encode result = hashlib.md5(content.encode()) # 打印hash md5 = result.hexdigest() return md5
0cd26654c364e34ecc27b0a0b4d410a539e286c3
8,890
def pas(al, ap, bl,bp): """ Postion-angle from spherical coordinates. :param al: longitude of point A in radians. :type al: float :param ap: latitude of point A in radians. :type ap: float :param bl: longitude of point B in radians. :type bl: float :param bp: latitude of point B in radians. :type bp: float :returns: position angle of B with respect to A in radians (float). .. seealso:: |MANUAL| page 145 """ return _sofa.iauPas(float(al), float(ap), float(bl), float(bp))
9d8321c908c793df84e5ff28c51e4a79f6db99c6
8,894
def get_messy_items_for_training(mod_factor=5): """ Fetch a subset of `FacilityListItem` objects that have been parsed and are not in an error state. Arguments: mod_factor -- Used to partition a subset of `FacilityListItem` records. The larger the value, the fewer records will be contained in the subset. Returns: A dictionary. The key is the `FacilityListItem` ID. The value is a dictionary of clean field values keyed by field name (country, name, address). A "clean" value is one which has been passed through the `clean` function. """ facility_list_item_set = FacilityListItem.objects.exclude( Q(status=FacilityListItem.UPLOADED) | Q(status=FacilityListItem.ERROR) | Q(status=FacilityListItem.ERROR_PARSING) | Q(status=FacilityListItem.ERROR_GEOCODING) | Q(status=FacilityListItem.ERROR_MATCHING) ).extra( select={'country': 'country_code'}).values( 'id', 'country', 'name', 'address') records = [record for (i, record) in enumerate(facility_list_item_set) if i % mod_factor == 0] return {str(i['id']): {k: clean(i[k]) for k in i if k != 'id'} for i in records}
d04f5471266c33cfea122adac72835043ed6c34a
8,895
def tanh_squared(x: np.ndarray, margin: float, loss_at_margin: float = 0.95): """Returns a sigmoidal shaping loss based on Hafner & Reidmiller (2011). Args: x: A numpy array representing the error. margin: Margin parameter, a positive `float`. loss_at_margin: The loss when `l2_norm(x) == margin`. A `float` between 0 and 1. Returns: Shaping loss, a `float` bounded in the half-open interval [0, 1). Raises: ValueError: If the value of `margin` or `loss_at_margin` is invalid. """ if not margin > 0: raise ValueError("`margin` must be positive.") if not 0.0 < loss_at_margin < 1.0: raise ValueError("`loss_at_margin` must be between 0 and 1.") error = np.linalg.norm(x) # Compute weight such that at the margin tanh(w * error) = loss_at_margin w = np.arctanh(np.sqrt(loss_at_margin)) / margin s = np.tanh(w * error) return s * s
4c8dbb826dad5b047682fe030362f4fe71021f06
8,896
def _inv_Jacobian_2D(J, detJ): """ manually invert 2x2 jacobians J in place """ tmp = J[:, 1, 1, :] / detJ J[:, 0, 1, :] = -J[:, 0, 1, :] / detJ J[:, 1, 0, :] = -J[:, 1, 0, :] / detJ J[:, 1, 1, :] = J[:, 0, 0, :] / detJ J[:, 0, 0, :] = tmp return J
23b1ff231e32f09f09dbae781f7e97354f3ca811
8,897
def ratio_error_acc(y_true, y_pred, epsilon, threshold): """ Calculate the ratio error accuracy with the threshold. :param y_true: :param y_pred: :param epsilon: :param threshold: :return: """ ratio_1 = keras.layers.Lambda(lambda x: (x[0] + x[2]) / (x[1] + x[2]))([y_true, y_pred, epsilon]) ratio_2 = keras.layers.Lambda(lambda x: (x[0] + x[2]) / (x[1] + x[2]))([y_pred, y_true, epsilon]) ratio = K.maximum(ratio_1, ratio_2) mask = K.cast(K.less(ratio, threshold), dtype="float32") return K.mean(mask)
9ae487e056800ac9fb5cc6e92301b74c00d65c21
8,898
def error_embed(ctx: context.ApplicationContext, title: str, description: str, author: bool = True) -> discord.Embed: """Make a basic error message embed.""" return make_embed( ctx=ctx, title=title if title else "Error:", description=description, color=discord.Color.red(), author=author, )
aca18ec2d25c4f0a2dec7f4c083716ab9bf4dbae
8,899
from typing import Dict from typing import Any import toml from pathlib import Path import textwrap def load_configuration() -> Dict[str, Any]: """ Return dict from TOML formatted string or file. Returns: The dict configuration. """ default_config = """ [key_bindings] AUTOCLEAR = "c" CANCEL = "esc" ENTER = "enter" FILTER = ["F4", "\\\\"] FOLLOW_ROW = "F" HELP = ["F1", "?"] MOVE_DOWN = ["down", "j"] MOVE_DOWN_STEP = "J" MOVE_END = "end" MOVE_HOME = "home" MOVE_LEFT = ["left", "h"] MOVE_RIGHT = ["right", "l"] MOVE_UP = ["up", "k"] MOVE_UP_STEP = "K" NEXT_SORT = ["p", ">"] PREVIOUS_SORT = "<" PRIORITY_DOWN = ["F8", "d", "]"] PRIORITY_UP = ["F7", "u", "["] QUIT = ["F10", "q"] REMOVE_ASK = ["del", "F9"] RETRY = "r" RETRY_ALL = "R" REVERSE_SORT = "I" SEARCH = ["F3", "/"] SELECT_SORT = "F6" SETUP = "F2" TOGGLE_EXPAND_COLLAPSE = "x" TOGGLE_EXPAND_COLLAPSE_ALL = "X" TOGGLE_RESUME_PAUSE = "space" TOGGLE_RESUME_PAUSE_ALL = "P" TOGGLE_SELECT = "s" UN_SELECT_ALL = "U" ADD_DOWNLOADS = "a" [colors] BRIGHT_HELP = "CYAN BOLD BLACK" FOCUSED_HEADER = "BLACK NORMAL CYAN" FOCUSED_ROW = "BLACK NORMAL CYAN" HEADER = "BLACK NORMAL GREEN" METADATA = "WHITE UNDERLINE BLACK" SIDE_COLUMN_FOCUSED_ROW = "BLACK NORMAL CYAN" SIDE_COLUMN_HEADER = "BLACK NORMAL GREEN" SIDE_COLUMN_ROW = "WHITE NORMAL BLACK" STATUS_ACTIVE = "CYAN NORMAL BLACK" STATUS_COMPLETE = "GREEN NORMAL BLACK" STATUS_ERROR = "RED BOLD BLACK" STATUS_PAUSED = "YELLOW NORMAL BLACK" STATUS_WAITING = "WHITE BOLD BLACK" """ config_dict = {} config_dict["DEFAULT"] = toml.loads(default_config) # Check for configuration file config_file_path = Path(user_config_dir("aria2p")) / "config.toml" if config_file_path.exists(): try: config_dict["USER"] = toml.load(config_file_path) except Exception as error: # noqa: W0703 (too broad exception) logger.error(f"Failed to load configuration file: {error}") else: # Write initial configuration file if it does not exist config_file_path.parent.mkdir(parents=True, exist_ok=True) with config_file_path.open("w") as fd: fd.write(textwrap.dedent(default_config).lstrip("\n")) return config_dict
a7a53382dd43023b74fbb88b9c2540499c9beb4f
8,900
def type_weapon(stage, bin, data=None): """Weapon""" if data == None: return 1 if stage == 1: return (str(data),'') try: v = int(data) if 0 > v or v > 255: raise except: raise PyMSError('Parameter',"Invalid Weapon value '%s', it must be 1 for ground attack or not 1 for air attack." % data) return v
51ad1c627b05b57ad67f5558bb76de3fe6e48f27
8,901
def to_square_feet(square_metres): """Convert metres^2 to ft^2""" return square_metres * 10.7639
50510aad230efcb47662936237a232662fef5596
8,902
def middle_name_handler(update: Update, context: CallbackContext) -> str: """Get and save patronymic of user. Send hello with full name.""" u = User.get_user(update, context) name = (f'{context.user_data[LAST_NAME]} {context.user_data[FIRST_NAME]} ' f'{context.user_data[MIDDLE_NAME]}') context.bot.send_message( chat_id=u.user_id, text=static_text.HELLO_FULL_NAME.format(name=name) ) update.message.reply_text( text=static_text.ASK_GENDER, parse_mode=ParseMode.HTML, reply_markup=keyboard_utils.get_keyboard_for_gender() ) return GENDER
dab2144282aeb63c2a3c4218236d04c3bb940ac8
8,903
def submit_barcodes(barcodes): """ Submits a set of {release1: barcode1, release2:barcode2} Must call auth(user, pass) first """ query = mbxml.make_barcode_request(barcodes) return _do_mb_post("release", query)
6e975e791196ed31ef6f52cdd0ca04d71a8d19eb
8,904
from typing import Counter def get_idf_dict(arr, tokenizer, nthreads=4): """ Returns mapping from word piece index to its inverse document frequency. Args: - :param: `arr` (list of str) : sentences to process. - :param: `tokenizer` : a BERT tokenizer corresponds to `model`. - :param: `nthreads` (int) : number of CPU threads to use """ idf_count = Counter() num_docs = len(arr) process_partial = partial(process, tokenizer=tokenizer) with Pool(nthreads) as p: idf_count.update(chain.from_iterable(p.map(process_partial, arr))) idf_dict = defaultdict(lambda: log((num_docs + 1) / (1))) idf_dict.update({idx: log((num_docs + 1) / (c + 1)) for (idx, c) in idf_count.items()}) return idf_dict
e98a9578695781e4965b36d713c4c0a4351e53da
8,905
import json def load_id_json_file(json_path): """ load the JSON file and get the data inside all this function does is to call json.load(f) inside a with statement Args: json_path (str): where the target JSON file is Return: ID list (list): all the data found in the file """ with open(json_path, 'r') as f: return json.load(f)
fd0f7fb73636cdf407b4de3e1aa3ae66dcc8f964
8,906
def check_github_scopes(exc: ResponseError) -> str: """ Parse github3 ResponseError headers for the correct scopes and return a warning if the user is missing. @param exc: The exception to process @returns: The formatted exception string """ user_warning = "" has_wrong_status_code = exc.response.status_code not in (403, 404) if has_wrong_status_code: return user_warning token_scopes = get_oauth_scopes(exc.response) # Gist resource won't return X-Accepted-OAuth-Scopes for some reason, so this # string might be `None`; we discard the empty string if so. accepted_scopes = exc.response.headers.get("X-Accepted-OAuth-Scopes") or "" accepted_scopes = set(accepted_scopes.split(", ")) accepted_scopes.discard("") request_url = urlparse(exc.response.url) if not accepted_scopes and request_url.path == "/gists": accepted_scopes = {"gist"} missing_scopes = accepted_scopes.difference(token_scopes) if missing_scopes: user_warning = f"Your token may be missing the following scopes: {', '.join(missing_scopes)}\n" # This assumes we're not on enterprise and 'api.github.com' == request_url.hostname user_warning += ( "Visit Settings > Developer settings > Personal access tokens to add them." ) return user_warning
ebb3fffcaddc792dac7c321d9029b5042a42be86
8,907
def user_login(): """ # 显示页面的设置 :return: 接收前端的session信息来显示不同的页面 """ # 获取参数 name = session.get("name") if name is not None: return jsonify(errno=RET.OK, errmsg="True", data={"name": name}) else: return jsonify(errno=RET.SESSIONERR, errmsg="用户未登入")
213ad2338260364186c0539a9e995b84ee889b42
8,908
def sample_conditional(node: gtsam.GaussianConditional, N: int, parents: list = [], sample: dict = {}): """Sample from conditional """ # every node ~ exp(0.5*|R x + S p - d|^2) # calculate mean as inv(R)*(d - S p) d = node.d() n = len(d) rhs = d.reshape(n, 1) if len(parents) > 0: rhs = rhs - node.S() @ np.vstack([sample[p] for p in parents]) # sample from conditional Gaussian invR = np.linalg.inv(node.R()) return invR @ (rhs + np.random.normal(size=(n, N)))
b9ab05ea50eea05a779c6d601db4643a86b343d5
8,909
def _liftover_data_path(data_type: str, version: str) -> str: """ Paths to liftover gnomAD Table. :param data_type: One of `exomes` or `genomes` :param version: One of the release versions of gnomAD on GRCh37 :return: Path to chosen Table """ return f"gs://gnomad-public-requester-pays/release/{version}/liftover_grch38/ht/{data_type}/gnomad.{data_type}.r{version}.sites.liftover_grch38.ht"
8da0f93c86568d56b3211bcb9e226b9cb495c8e2
8,910
def valueinfo_to_tensor(vi): """Creates an all-zeroes numpy tensor from a ValueInfoProto.""" dims = [x.dim_value for x in vi.type.tensor_type.shape.dim] return np.zeros( dims, dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[vi.type.tensor_type.elem_type] )
b814373e7c9d4f1e43f9d1af0c6e48b82989602e
8,911
def signup_email(): """Create a new account using data encoded in the POST body. Expects the following form data: first_name: E.g. 'Taylor' last_name: E.g. 'Swift' email: E.g. '[email protected]' password: E.g. 'iknewyouweretrouble' Responds with the session cookie via the `set-cookie` header on success. Send the associated cookie for all subsequent API requests that accept user authentication. """ # Prevent a CSRF attack from replacing a logged-in user's account with # a new account with known credentials current_user = view_helpers.get_current_user() if current_user: return api_util.jsonify({'message': 'A user is already logged in.'}) params = flask.request.form.copy() # Don't log the password password = params.pop('password', None) rmclogger.log_event( rmclogger.LOG_CATEGORY_API, rmclogger.LOG_EVENT_SIGNUP, { 'params': params, 'type': rmclogger.LOGIN_TYPE_STRING_EMAIL, }, ) first_name = params.get('first_name') last_name = params.get('last_name') email = params.get('email') if not first_name: raise api_util.ApiBadRequestError('Must provide first name.') if not last_name: raise api_util.ApiBadRequestError('Must provide last name.') if not email: raise api_util.ApiBadRequestError('Must provide email.') if not password: raise api_util.ApiBadRequestError('Must provide password.') try: user = m.User.create_new_user_from_email( first_name, last_name, email, password) except m.User.UserCreationError as e: raise api_util.ApiBadRequestError(e.message) view_helpers.login_as_user(user) return api_util.jsonify({ 'message': 'Created and logged in user %s' % user.name })
e3ecca4bd244d1d20ad166a153a6c3f5c80f4876
8,912
def calculate_multi_rmse(regressor, n_task): """ Method which calculate root mean squared error value for trained model Using regressor attributes Return RMSE metrics as dict for train and test datasets :param regressor: trained regression model object :param n_task: :type regressor: TrainedModel, TrainedModelDNN, TrainedModelCV :return: rmse metrics :rtype: dict """ # calculate mse metric test_mse_tmp = mean_squared_error( regressor.y_test.values[:, n_task], regressor.predict_classes['test'][:, n_task] ) train_mse_tmp = mean_squared_error( regressor.y_train.values[:, n_task], regressor.predict_classes['train'][:, n_task] ) # convert mse to rmse return { (str(n_task), 'train', 'RMSE'): train_mse_tmp ** 0.5, (str(n_task), 'test', 'RMSE'): test_mse_tmp ** 0.5, }
53daee6abb97a96af44831df59767a447fd2786e
8,913
import torch from re import T def detr_predict(model, image, thresh=0.95): """ Function used to preprocess the image, feed it into the detr model, and prepare the output draw bounding boxes. Outputs are thresholded. Related functions: detr_load, draw_boxes in coco.py Args: model -- the detr model from detr_load() image -- Array the original image from openCV [width, height, channels] Returns: boxes -- Torch tensor of coordinates of the top left and bottom right of the bounding box ordered as [(x1, y1, x2, y2)] labels -- Torch tensor of index labels for each bounding box [<label indices>] scores -- Torch tensor of class confidence scores for each bounding box [<class scores>]. For COCO, expects 91 different classes """ def box_cxcywh_to_xyxy(x): # Converts bounding boxes to (x1, y1, x2, y2) coordinates of top left and bottom right corners # (center_x, center_y, h, w) x_c, y_c, w, h = x.unbind(1) b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)] return torch.stack(b, dim=1) def rescale_bboxes(out_bbox, size): # Scale the bounding boxes to the image size img_w, img_h = size b = box_cxcywh_to_xyxy(out_bbox) b = b * torch.tensor([img_w, img_h, img_w, img_h], dtype=torch.float32) return b # Preprocess image transform = T.Compose([ T.ToPILImage(), T.Resize(800), T.ToTensor(), T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) t_image = transform(image).unsqueeze(0) # output is a dict containing "pred_logits" of [batch_size x num_queries x (num_classes + 1)] # and "pred_boxes" of shape (center_x, center_y, height, width) normalized to be between [0, 1] output = model(t_image) # Scale the class probabilities to add up to 1 probas = output['pred_logits'].softmax(-1)[0,:,:-1] # Create outputs boxes = rescale_bboxes(output['pred_boxes'][0], (image.shape[1], image.shape[0])).detach() labels = probas.max(-1).indices conf = probas.max(-1).values.detach() ### Threshold scores conf = conf.detach() keep = conf > thresh # Filter out scores, boxes, and labels using threshold conf = conf[keep] boxes = boxes.detach()[keep] labels = labels.detach()[keep] return boxes, labels, conf
394824358138eb66b69569963b21ccc2d0f5a4d3
8,914
def comp_fill_factor(self): """Compute the fill factor of the winding""" if self.winding is None: return 0 else: (Nrad, Ntan) = self.winding.get_dim_wind() S_slot_wind = self.slot.comp_surface_wind() S_wind_act = ( self.winding.conductor.comp_surface_active() * self.winding.Ntcoil * Nrad * Ntan ) return S_wind_act / S_slot_wind
55be8ac7aa2961ad970cd16de961fdcf857016fd
8,915
def idewpt(vp): """ Calculate the dew point given the vapor pressure Args: vp - array of vapor pressure values in [Pa] Returns: dewpt - array same size as vp of the calculated dew point temperature [C] (see Dingman 2002). """ # ensure that vp is a numpy array vp = np.array(vp) # take the log and convert to kPa vp = np.log(vp/float(1000)) # calculate the vapor pressure Td = (vp + 0.4926) / (0.0708 - 0.00421*vp) return Td
68b58d7702a50472a4851e1a7ecdd6ba13fe540a
8,916
def _hexify(num): """ Converts and formats to hexadecimal """ num = "%x" % num if len(num) % 2: num = '0'+num return num.decode('hex')
71fabff1191f670ec503c76a3be916636e8045ce
8,917
def syn_ucbpe(num_workers, gp, acq_optimiser, anc_data): """ Returns a recommendation via UCB-PE in the synchronous setting. """ # Define some internal functions. beta_th = _get_ucb_beta_th(gp.input_dim, anc_data.t) # 1. An LCB for the function def _ucbpe_lcb(x): """ An LCB for GP-UCB-PE. """ mu, sigma = gp.eval(x, uncert_form='std') return mu - beta_th * sigma # 2. A modified UCB for the function using hallucinated observations def _ucbpe_2ucb(x): """ An LCB for GP-UCB-PE. """ mu, sigma = gp.eval(x, uncert_form='std') return mu + 2 * beta_th * sigma # 3. UCB-PE acquisition for the 2nd point in the batch and so on. def _ucbpe_acq(x, yt_dot, halluc_pts): """ Acquisition for GP-UCB-PE. """ _, halluc_stds = gp.eval_with_hallucinated_observations(x, halluc_pts, uncert_form='std') return (_ucbpe_2ucb(x) > yt_dot).astype(np.double) * halluc_stds # Now the algorithm yt_dot_arg = _optimise_acquisition(_ucbpe_lcb, acq_optimiser, anc_data) yt_dot = _ucbpe_lcb(yt_dot_arg.reshape((-1, gp.input_dim))) recommendations = [asy_ucb(gp, acq_optimiser, anc_data)] for _ in range(1, num_workers): curr_acq = lambda x: _ucbpe_acq(x, yt_dot, np.array(recommendations)) new_rec = _optimise_acquisition(curr_acq, acq_optimiser, anc_data) recommendations.append(new_rec) return recommendations
2c12a608c87d61f64b219aaf301189b6c8ee73a2
8,918
def get_reward(intervention, state, time): """Compute the reward based on the observed state and choosen intervention.""" A_1, A_2, A_3 = 60, 500, 60 C_1, C_2, C_3, C_4 = 25, 20, 30, 40 discount = 4.0 / 365 cost = ( A_1 * state.asymptomatic_humans + A_2 * state.symptomatic_humans + A_3 * state.mosquito_population ) cost += 0.5 * ( C_1 * intervention.updates["treated_bednet_use"] ** 2 + C_2 * intervention.updates["condom_use"] ** 2 + C_3 * intervention.updates["treatment_of_infected"] ** 2 + C_4 * intervention.updates["indoor_spray_use"] ** 2 ) return -cost * np.exp(-discount * time)
72803b1a5f09d0856d29601bc766b6787a8255e7
8,919
def array_of_floats(f): """Read an entire file of text as a list of floating-point numbers.""" words = f.read().split() return [builtin_float(x) for x in words]
8b357afb3f977761118f7df2632a4f1c198d721a
8,920
def change_currency(): """ Change user's currency """ form = CurrencyForm() if form.validate_on_submit(): currency = form.rate.data redirected = redirect(url_for('cashtrack.overview')) redirected.set_cookie('filter', currency) symbol = rates[currency]['symbol'] flash(f'Currency has been changed to {currency} ({symbol})', 'success') return redirected return rnd_tmp('currency.html', form=form, rates=rates)
08a23e47a603ee5d5e49cff0259a83f4a2ffc3e0
8,921
def q2_1(df: pd.DataFrame) -> int: """ Finds # of entries in df """ return df.size[0]
d98a3d5592994e7dd3758dfab683cb96b532ce6d
8,923
def is_shell(command: str) -> bool: """Check if command is shell.""" return command.startswith(get_shell())
0cc1497dc17e1535fdfb23c1b160bfcd63141eb1
8,924
def board_init(): """ Initializes board with all available values 1-9 for each cell """ board = [[[i for i in range(1,n+1)] for j in range(n)] for k in range(n)] return board
e4b7192c02e298de915eb3024f32f194942a061b
8,926
def gen_int_lists(num): """ Generate num list strategies of integers """ return [ s.lists(s.integers(), max_size=100) for _ in range(num) ]
f1bd151a09f78b1eee9803ce2a077a4f01d34aaa
8,927
def is_blob(bucket: str, file:str): """ checking if it's a blob """ client = storage.Client() blob = client.get_bucket(bucket).get_blob(file) return hasattr(blob, 'exists') and callable(getattr(blob, 'exists'))
ba9bb07f1f15175a28027907634c37b402c6b292
8,928
from typing import Union def _is_whitelisted(name: str, doc_obj: Union['Module', 'Class']): """ Returns `True` if `name` (relative or absolute refname) is contained in some module's __pdoc__ with a truish value. """ refname = doc_obj.refname + '.' + name module = doc_obj.module while module: qualname = refname[len(module.refname) + 1:] if module.__pdoc__.get(qualname) or module.__pdoc__.get(refname): return True module = module.supermodule return False
c54c69ae0180c1764c8885d00e96640f1bfff0f8
8,930
import copy def permute_bond_indices(atomtype_vector): """ Permutes the set of bond indices of a molecule according to the complete set of valid molecular permutation cycles atomtype_vector: array-like A vector of the number of each atoms, the length is the total number of atoms. An A3B8C system would be [3, 8, 1] Returns many sets permuted bond indices, the number of which equal to the number of cycles """ natoms = sum(atomtype_vector) bond_indices = generate_bond_indices(natoms) cycles_by_atom = molecular_cycles(atomtype_vector) bond_indice_permutations = [] # interatomic distance matrix permutations for atom in cycles_by_atom: for cycle in atom: tmp_bond_indices = copy.deepcopy(bond_indices) # need a deep copy, list of lists for subcycle in cycle: for i, bond in enumerate(tmp_bond_indices): tmp_bond_indices[i] = permute_bond(bond, subcycle) bond_indice_permutations.append(tmp_bond_indices) return bond_indice_permutations
ebf398e55d8a80a2e4ce2cef4f48d957e47d68a3
8,931
def get_cell_integer_param(device_resources, cell_data, name, force_format=None): """ Retrieves definition and decodes value of an integer cell parameter. The function can optionally force a specific encoding format if needed. """ # Get the parameter definition to determine its type param = device_resources.get_parameter_definition(cell_data.cell_type, name) # Force the format if requested by substituting the paraameter # definition object. if not param.is_integer_like() and force_format is not None: if force_format != param.string_format: param = ParameterDefinition( name=name, string_format=force_format, default_value=cell_data.attributes[name]) # Decode return param.decode_integer(cell_data.attributes[name])
6ab281004f324e8c40e176d5676cd7e42f50eaa9
8,932
import hashlib def get_md5(filename): """ Calculates the MD5 sum of the passed file Args: filename (str): File to hash Returns: str: MD5 hash of file """ # Size of buffer in bytes BUF_SIZE = 65536 md5 = hashlib.md5() # Read the file in 64 kB blocks with open(filename, "rb") as f: while True: data = f.read(BUF_SIZE) if not data: break md5.update(data) return md5.hexdigest()
c43538aee954f670c671c2e26e18f4a17e298455
8,933
def is_recurrent(sequence): """ Returns true if the given sequence is recurrent (elements can exist more than once), otherwise returns false. Example --------- >>> sequence = [1,2,3,4,5] >>> ps.is_recurrent(sequence) False >>> sequence = [1,1,2,2,3] >>> ps.is_recurrent(sequence) True """ element_counts = get_element_counts(sequence) truths = [count > 1 for element, count in element_counts.items()] if True in truths: return True return False
e123ddd960b262651b20e54ccbd3d5b11fe3695e
8,935
import torch def flex_stack(items, dim=0): """ """ if len(items) < 1: raise ValueError("items is empty") if len(set([type(item) for item in items])) != 1: raise TypeError("items are not of the same type") if isinstance(items[0], list): return items elif isinstance(items[0], torch.Tensor): return torch.stack(items, dim=0) elif isinstance(items[0], np.ndarray): return np.stack(items, axis=0) else: raise TypeError(f"Unrecognized type f{type(items[0])}")
47ca0e47647ce86619f1cdc86eef560fbbb9304e
8,936
from pathlib import Path def download_image_data(gpx_file, padding, square, min_lat, min_long, max_lat, max_long, cache_dir): """ Download satellite imagery from USGS Args: gpx_file: (str) A file containing one of more tracks to use to determine the area of terrain to model padding: (float) Padding to add around the GPX track, in miles min_lat (float) Southern boundary of the region to model min_long (float) Eastern boundary of the region to model max_lat (float) Northern boundary of the region to model max_long (float) Western boundary of the region to model cache_dir (str) Directory to download the files to """ log = GetLogger() # Determine the bounds of the output if gpx_file: log.info("Parsing GPX file") gpx = GPXFile(gpx_file) try: min_lat, min_long, max_lat, max_long = gpx.GetBounds(padding, square) except ApplicationError as ex: log.error(ex) return False if None in (min_lat, min_long, max_lat, max_long): raise InvalidArgumentError("You must specify an area to download") log.info(f"Requested boundaries top(max_lat)={max_lat} left(min_long)={min_long} bottom(min_lat)={min_lat} right(max_long)={max_long}") # Get the image data cache_dir = Path(cache_dir) image_filename = Path(get_cropped_image_filename(max_lat, min_long, min_lat, max_long)) try: get_image_data(image_filename, min_lat, min_long, max_lat, max_long, cache_dir) except ApplicationError as ex: log.error(ex) return False log.passed("Successfully downloaded images") return True
4ceef45da21622ab716031e8f68ed4724e168062
8,937
def find_nearest_values(array, value): """Find indexes of the two nearest values of an array to a given value Parameters ---------- array (numpy.ndarray) : array value (float) : value Returns ------- idx1 (int) : index of nearest value in the array idx2 (int) : index of second nearest value in the array """ # index of nearest value in the array idx1 = (np.abs(array-value)).argmin() # check if value is bigger or smaller than nearest value if array[idx1] >= value: idx2 = idx1 - 1 else: idx2 = idx1 + 1 return idx1, idx2
9c873692878ef3e4de8762bb89306e7ef907f90a
8,938
def channel_info(channel_id): """ Get Slack channel info """ channel_info = slack_client.api_call("channels.info", channel=channel_id) if channel_info: return channel_info['channel'] return None
260eeaa2849350e2ede331ddecd68aead798f76c
8,939
from typing import Callable from typing import Any import logging def log(message: str) -> Callable: """Returns a decorator to log info a message before function call. Parameters ---------- message : str message to log before function call """ def decorator(function: Callable) -> Callable: @wraps(function) def wrapper(*args: Any, **kwargs: Any) -> None: logging.info(message) return function(*args, **kwargs) return wrapper return decorator
c8ed8f8119be8d6e80935d73034f752ad2cb1dd9
8,940
def client(identity: PrivateIdentity) -> Client: """Client for easy access to iov42 platform.""" return Client(PLATFORM_URL, identity)
a0ad172765b50a76485bd3ec630a2c3ffeae85ef
8,941
def init_weights(module, init='orthogonal'): """Initialize all the weights and biases of a model. :param module: any nn.Module or nn.Sequential :param init: type of initialize, see dict below. :returns: same module with initialized weights :rtype: type(module) """ if init is None: # Base case, no change to default. return module init_dict = { 'xavier_uniform': nn.init.xavier_uniform_, 'xavier_normal': nn.init.xavier_normal_, 'orthogonal': nn.init.orthogonal_, 'kaiming_normal': nn.init.kaiming_normal_, 'kaiming_uniform': nn.init.kaiming_uniform_, } for m in module.modules(): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): # print("initializing {} with {} init.".format(m, init)) init_dict[init](m.weight) if hasattr(m, 'bias') and m.bias is not None: # print("initial bias from ", m, " with zeros") nn.init.constant_(m.bias, 0.0) elif isinstance(m, (nn.Sequential, nn.ModuleList, nn.ModuleDict)): for mod in m: init_weights(mod, init) return module
e8cd95743b8a36dffdb53c7f7b9723e896d2071d
8,942
def getsoundchanges(reflex, root): # requires two ipastrings as input """ Takes a modern-day L1 word and its reconstructed form and returns \ a table of sound changes. :param reflex: a modern-day L1-word :type reflex: str :param root: a reconstructed proto-L1 word :type root: str :return: table of sound changes :rtype: pandas.core.frame.DataFrame :Example: >>> from loanpy import reconstructor as rc >>> rc.getsoundchanges("ɟɒloɡ", "jɑlkɑ") +---+--------+------+ | # | reflex | root | +---+--------+------+ | 0 | #0 | 0 | +---+--------+------+ | 1 | #ɟ | j | +---+--------+------+ | 2 | ɒ | ɑ | +---+--------+------+ | 3 | l | lk | +---+--------+------+ | 4 | o | ɑ | +---+--------+------+ | 5 | ɡ# | 0 | +---+--------+------+ """ reflex = ipa2clusters(reflex) root = ipa2clusters(root) reflex[0], reflex[-1] = "#" + reflex[0], reflex[-1] + "#" reflex, root = ["#0"] + reflex, ["0"] + root if reflex[1][1:] in vow and root[1] in cns: root = root[1:] elif reflex[1][1:] in cns and root[1] in vow: reflex = reflex[1:] diff = abs(len(root) - len(reflex)) # "a,b","c,d,e,f,g->"a,b,000","c,d,efg if len(reflex) < len(root): reflex += ["0#"] root = root[:-diff] + ["".join(root[-diff:])] elif len(reflex) > len(root): root += ["0"] reflex = reflex[:-diff] + ["".join(reflex[-diff:])] else: reflex, root = reflex + ["0#"], root + ["0"] return pd.DataFrame({"reflex": reflex, "root": root})
8230e836e109ed8453c6fdbc72e6a4f77833f69b
8,943
def compute_normals(filename, datatype='cell'): """ Given a file, this method computes the surface normals of the mesh stored in the file. It allows to compute the normals of the cells or of the points. The normal computed in a point is the interpolation of the cell normals of the cells adiacent to the point. :param str filename: the name of the file to parse in order to extract the geometry information. :param str datatype: indicate if the normals have to be computed for the points or the cells. The allowed values are: 'cell', 'point'. Default value is 'cell'. :return: the array that contains the normals. :rtype: numpy.ndarray """ points, cells = FileHandler(filename).get_geometry(get_cells=True) normals = np.array( [normalize(normal(*points[cell][0:3])) for cell in cells]) if datatype == 'point': normals_cell = np.empty((points.shape[0], 3)) for i_point in np.arange(points.shape[0]): cell_adiacent = [cells.index(c) for c in cells if i_point in c] normals_cell[i_point] = normalize( np.mean(normals[cell_adiacent], axis=0)) normals = normals_cell return normals
e0cfc90a299f6db52d9cec2f39eebfc96158265c
8,944
from typing import List from typing import Optional def build_layers_url( layers: List[str], *, size: Optional[LayerImageSize] = None ) -> str: """Convenience method to make the server-side-rendering URL of the provided layer URLs. Parameters ----------- layers: List[:class:`str`] The image urls, in ascending order of Zone ID's size: Optional[:class:`LayerImageSize`] The desired size for the render. If one is not supplied, it defaults to `LayerImageSize.SIZE_600`. """ size_str = str(size or LayerImageSize.SIZE_600)[-3:] joined = ",".join(quote(layer) for layer in layers) return f"https://impress-2020.openneo.net/api/outfitImage?size={size_str}&layerUrls={joined}"
2cc7ab58af2744a4c898903d9a035c77accbae2e
8,945
def SyncBatchNorm(*args, **kwargs): """In cpu environment nn.SyncBatchNorm does not have kernel so use nn.BatchNorm2D instead""" if paddle.get_device() == 'cpu': return nn.BatchNorm2D(*args, **kwargs) else: return nn.SyncBatchNorm(*args, **kwargs)
f08a7141700b36286893bbbc82b28686d1ca88a9
8,946
def data_context_connectivity_context_connectivity_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_peak_burst_size_get(uuid, local_id): # noqa: E501 """data_context_connectivity_context_connectivity_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_peak_burst_size_get returns tapi.common.CapacityValue # noqa: E501 :param uuid: Id of connectivity-service :type uuid: str :param local_id: Id of end-point :type local_id: str :rtype: TapiCommonCapacityValue """ return 'do some magic!'
340189bc76bdbbc14666fe542aa05d467c7d4898
8,947
import re def parse_path_length(path): """ parse path length """ matched_tmp = re.findall(r"(S\d+)", path) return len(matched_tmp)
762e2b86fe59689800ed33aba0419f83b261305b
8,948
def check_permisions(request, allowed_groups): """ Return permissions.""" try: profile = request.user.id print('User', profile, allowed_groups) is_allowed = True except Exception: return False else: return is_allowed
4bdb54bd1edafd7a0cf6f50196d470e0d3425c66
8,949
def kanji2digit(s): """ 1から99までの漢数字をアラビア数字に変換する """ k2d = lambda m, i: _kanjitable[m.group(i)] s = _re_kanjijiu1.sub(lambda m: k2d(m,1) + k2d(m,2), s) s = _re_kanjijiu2.sub(lambda m: u'1' + k2d(m,1), s) s = _re_kanji.sub(lambda m: k2d(m,1), s) s = s.replace(u'十', u'10') return s
27589cee8a9b4f14ad7120061f05077b736b8632
8,950
def load_featurizer(pretrained_local_path): """Load pretrained model.""" return CNN_tf("vgg", pretrained_local_path)
1f39acdae01e484302d8f8051c2f55a178aa2301
8,952
from templateflow.conf import setup_home def make_cmdclass(basecmd): """Decorate setuptools commands.""" base_run = basecmd.run def new_run(self): setup_home() base_run(self) basecmd.run = new_run return basecmd
dc66370f19e2d1b3dbc2da3942f8923a07d8d9a6
8,954
def rmse(predictions, targets): """Compute root mean squared error""" rmse = np.sqrt(((predictions - targets) ** 2).mean()) return rmse
1a5fe824c5ef768f3df34463724fdd057d37901a
8,955
import math def format_timedelta(value, time_format=None): """ formats a datetime.timedelta with the given format. Code copied from Django as explained in http://stackoverflow.com/a/30339105/932593 """ if time_format is None: time_format = "{days} days, {hours2}:{minutes2}:{seconds2}" if hasattr(value, 'seconds'): seconds = value.seconds + value.days * 24 * 3600 else: seconds = int(value) seconds_total = seconds minutes = int(math.floor(seconds / 60)) minutes_total = minutes seconds -= minutes * 60 hours = int(math.floor(minutes / 60)) hours_total = hours minutes -= hours * 60 days = int(math.floor(hours / 24)) days_total = days hours -= days * 24 years = int(math.floor(days / 365)) years_total = years days -= years * 365 return time_format.format(**{ 'seconds': seconds, 'seconds2': str(seconds).zfill(2), 'minutes': minutes, 'minutes2': str(minutes).zfill(2), 'hours': hours, 'hours2': str(hours).zfill(2), 'days': days, 'years': years, 'seconds_total': seconds_total, 'minutes_total': minutes_total, 'hours_total': hours_total, 'days_total': days_total, 'years_total': years_total, })
0ee6a48e0eee5e553e665d44173f0a4843b4007f
8,956
def categorical_log_likelihood(probs: chex.Array, labels: chex.Array): """Computes joint log likelihood based on probs and labels.""" num_data, unused_num_classes = probs.shape assert len(labels) == num_data assigned_probs = probs[jnp.arange(num_data), jnp.squeeze(labels)] return jnp.sum(jnp.log(assigned_probs))
6209fc59dc6a76f8afc49788b9e5c5a11f58354f
8,957
def ask_name(question: str = "What is your name?") -> str: """Ask for the users name.""" return input(question)
1cc9ec4d3bc48d7ae4be1b2cf8eb64a0b4f94b23
8,958
from typing import Sequence def _maxcut(g: Graph, values: Sequence[int]) -> float: """ cut by given values $$\pm 1$$ on each vertex as a list :param g: :param values: :return: """ cost = 0 for e in g.edges: cost += g[e[0]][e[1]].get("weight", 1.0) / 2 * (1 - values[e[0]] * values[e[1]]) return cost
1ca8d2cfce6a741fb4eab55f7fcd9d9db5e3578f
8,959
def cp_als(X, rank, random_state=None, init='randn', **options): """Fits CP Decomposition using Alternating Least Squares (ALS). Parameters ---------- X : (I_1, ..., I_N) array_like A tensor with ``X.ndim >= 3``. rank : integer The `rank` sets the number of components to be computed. random_state : integer, ``RandomState``, or ``None``, optional (default ``None``) If integer, sets the seed of the random number generator; If RandomState instance, random_state is the random number generator; If None, use the RandomState instance used by ``numpy.random``. init : str, or KTensor, optional (default ``'randn'``). Specifies initial guess for KTensor factor matrices. If ``'randn'``, Gaussian random numbers are used to initialize. If ``'rand'``, uniform random numbers are used to initialize. If KTensor instance, a copy is made to initialize the optimization. options : dict, specifying fitting options. tol : float, optional (default ``tol=1E-5``) Stopping tolerance for reconstruction error. max_iter : integer, optional (default ``max_iter = 500``) Maximum number of iterations to perform before exiting. min_iter : integer, optional (default ``min_iter = 1``) Minimum number of iterations to perform before exiting. max_time : integer, optional (default ``max_time = np.inf``) Maximum computational time before exiting. verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``) Display progress. Returns ------- result : FitResult instance Object which holds the fitted results. It provides the factor matrices in form of a KTensor, ``result.factors``. Notes ----- Alternating Least Squares (ALS) is a very old and reliable method for fitting CP decompositions. This is likely a good first algorithm to try. References ---------- Kolda, T. G. & Bader, B. W. "Tensor Decompositions and Applications." SIAM Rev. 51 (2009): 455-500 http://epubs.siam.org/doi/pdf/10.1137/07070111X Comon, Pierre & Xavier Luciani & Andre De Almeida. "Tensor decompositions, alternating least squares and other tales." Journal of chemometrics 23 (2009): 393-405. http://onlinelibrary.wiley.com/doi/10.1002/cem.1236/abstract Examples -------- ``` import tensortools as tt I, J, K, R = 20, 20, 20, 4 X = tt.randn_tensor(I, J, K, rank=R) tt.cp_als(X, rank=R) ``` """ # Check inputs. optim_utils._check_cpd_inputs(X, rank) # Initialize problem. U, normX = optim_utils._get_initial_ktensor(init, X, rank, random_state) result = FitResult(U, 'CP_ALS', **options) # Main optimization loop. while result.still_optimizing: # Iterate over each tensor mode. for n in range(X.ndim): # i) Normalize factors to prevent singularities. U.rebalance() # ii) Compute the N-1 gram matrices. components = [U[j] for j in range(X.ndim) if j != n] grams = sci.multiply.reduce([sci.dot(u.T, u) for u in components]) # iii) Compute Khatri-Rao product. kr = khatri_rao(components) # iv) Form normal equations and solve via Cholesky c = linalg.cho_factor(grams, overwrite_a=False) p = unfold(X, n).dot(kr) U[n] = linalg.cho_solve(c, p.T, overwrite_b=False).T # U[n] = linalg.solve(grams, unfold(X, n).dot(kr).T).T # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Update the optimization result, checks for convergence. # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Compute objective function # grams *= U[-1].T.dot(U[-1]) # obj = np.sqrt(np.sum(grams) - 2*sci.sum(p*U[-1]) + normX**2) / normX obj = linalg.norm(U.full() - X) / normX # Update result result.update(obj) # Finalize and return the optimization result. return result.finalize()
b6402f03ba4e8be7d0abb2b13232d88b07a73be9
8,960