content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def trader_tactic_snapshot(symbol, strategy, end_dt=None, file_html=None, fq=True, max_count=1000): """使用聚宽的数据对任意标的、任意时刻的状态进行策略快照 :param symbol: 交易标的 :param strategy: 择时交易策略 :param end_dt: 结束时间,精确到分钟 :param file_html: 结果文件 :param fq: 是否复权 :param max_count: 最大K线数量 :return: trader """ tactic = strategy(symbol) base_freq, freqs = tactic['base_freq'], tactic['freqs'] bg, data = get_init_bg(symbol, end_dt, base_freq=base_freq, freqs=freqs, max_count=max_count, fq=fq) trader = create_advanced_trader(bg, data, strategy) if file_html: trader.take_snapshot(file_html) print(f'saved into {file_html}') else: trader.open_in_browser() return trader
0b979008f3c950ed41aa85eff2031e6cb34b8685
6,700
def preprocess(batch): """ Add zero-padding to a batch. """ tags = [example.tag for example in batch] # add zero-padding to make all sequences equally long seqs = [example.words for example in batch] max_length = max(map(len, seqs)) seqs = [seq + [PAD] * (max_length - len(seq)) for seq in seqs] return seqs, tags
832b6453714e7b7eb23271b771bbc156a09d3784
6,701
def append_include_as(include_match): """Convert ``#include x`` to ``#include x as y``, where appropriate; also, convert incorrect "as" statements. See INCLUDE_AS dict for mapping from resource to its "as" target. Parameters ---------- include_match : re._pattern_type Match produced by INCLUDE_RE.match(string) Returns ------- repl : string Replacement text for whatever comes after the "#include " """ include_text = include_match.groups()[0] include_as_match = PISAConfigParser.INCLUDE_AS_RE.match(include_text) as_section = None if include_as_match: gd = include_as_match.groupdict() resource = gd['file'] as_section = gd['as'] else: resource = include_text if resource in INCLUDE_AS.keys(): as_section = INCLUDE_AS[resource] if as_section is None: repl = '#include ' + resource else: repl = '#include %s as %s' % (resource, as_section) return repl
0eaaa356efa33f0e64db6b3a843236fd15ebb66d
6,702
def get_user_profiles(page=1, limit=10): """Retrieves a list of user profiles. :param page: Page number :type page: int :param limit: Maximum number of results to show :type limit: int :returns: JSON string of list of user profiles; status code :rtype: (str, int) """ # initialize query query = Query.make( UserProfile, UserProfile.id.asc(), { 'id.asc': UserProfile.id.asc(), 'id.desc': UserProfile.id.desc(), 'user_id.asc': UserProfile.user_id.asc(), 'user_id.desc': UserProfile.user_id.desc(), 'joined_at.asc': UserProfile.joined_at.asc(), 'joined_at.desc': UserProfile.joined_at.desc(), }, request.args, Query.STATUS_FILTER_ADMIN) # retrieve and return results results = list(query.limit(limit).offset((page - 1) * limit)) if len(results) > 0: # prep initial output output = { 'user_profiles': UserProfileAdminSchema(many=True).dump(results), 'page': page, 'limit': limit, 'total': query.count() } # add pagination URIs and return output.update( Pager.get_uris('admin_user_profiles.get_user_profiles', page, limit, output['total'], request.args)) return jsonify(output), 200 return '', 204
7bcfe33925e49a90ac593fad53e4e169b107886e
6,703
import sqlite3 def _repository(): """Helper dependency injection""" db = sqlite3.connect('covid_database.db', isolation_level=None) return CovidTestDataRepository(db)
e30d30f3b4f9673df4863d261655aa6f38a527d7
6,704
def f(x): """Cubic function.""" return x**3
13832221de3490dbd92f4f1a26854baec7010023
6,705
from ruia_cache.response import CacheResponse import os def req_cache(*args, **kwargs): """ Cache decorate for `ruia.Request` class :param args: :param kwargs: :return: """ fetch_func = args[0] @wraps(fetch_func) async def wrapper(self, delay=True): cache_resp = None url_str = f"{self.url}:{self.method}" req_url_str = f"{url_str}:{self.request_config}" # Check whether the current path exists cache_path = gen_cache_dir() pro_dir = os.path.join(cache_path, self.spider_name) if not os.path.exists(pro_dir): create_cache_dir(pro_dir) req_file = f"{md5_encryption(string=req_url_str)}_req.ruia" resp_file = f"{md5_encryption(string=url_str)}_resp.ruia" req_url_path = os.path.join(pro_dir, req_file) resp_url_path = os.path.join(pro_dir, resp_file) pickle_ins = PickleSerializer() if os.path.exists(req_url_path) and os.path.exists(resp_url_path): # Get data locally try: async with aiofiles.open(req_url_path, mode="rb") as f: s_data = await f.read() data = pickle_ins.loads(s_data) cache_resp = data["cache_resp"] except Exception as e: logger.error( f"<Cache load failed: url: {self.url}, method: {self.method}, err: {e}>" ) else: # Delete already path os.remove(req_url_path) if os.path.exists(req_url_path) else None os.remove(resp_url_path) if os.path.exists(resp_url_path) else None # Make a request resp: Response = await fetch_func(self, delay) try: cache_resp = CacheResponse( url=resp.url, method=resp.method, encoding=resp.encoding, metadata=resp.metadata, cookies=resp.cookies, headers=dict(resp.headers), history=resp.history, status=resp.status, aws_json=None, aws_text=None, aws_read=None, ) cache_resp.spider_name = self.spider_name data = { "cache_resp": cache_resp, # "expire_time": time.time() + ttl, # "ttl": ttl, } s_data = pickle_ins.dumps(data) # Persist target data async with aiofiles.open(req_url_path, mode="wb+") as f: await f.write(s_data) logger.info( f"<Cache serialization successfully: " f"cache_path: {req_url_path} [url: {resp.url}, method: {resp.method}]>" ) cache_resp._source_resp = resp except Exception as e: logger.error( f"<Cache serialization failed: url: {resp.url}, method: {resp.method}, err: {e}>" ) return cache_resp return wrapper
626605babe46e16e7f9e9eada74141d3bb7e7eb4
6,706
from outliers import smirnov_grubbs as grubbs def correct_anomalies(peaks, alpha=0.05, save_name=""): """ Outlier peak detection (Grubb's test) and removal. Parameters ---------- peaks : array vector of peak locations alpha : real significance level for Grubb's test save_name : str filename to save peaks as to, empty does not save Results ------- corrected_peaks2 : array vector of corrected peak locations max_indices : array indices of original peaks marked as too slow min_indices : array indices of original peaks marked as too fast """ peak_diffs = abs(np.diff(peaks)) max_indices = grubbs.max_test_indices(peak_diffs, alpha=alpha) min_indices = grubbs.min_test_indices(peak_diffs, alpha=alpha) grubb_idxs = max_indices + min_indices # Compute representative difference based on its distribution mean_rr = np.mean( peak_diffs[[ii for ii in range(len(peak_diffs)) if ii not in grubb_idxs]] ) mean_rr = int(np.round(mean_rr)) corrected_peaks = peaks.copy() for ix in max_indices: n = int(np.round((peaks[ix + 1] - peaks[ix]) / mean_rr)) if n == 1: continue new_peaks = np.linspace(peaks[ix], peaks[ix + 1], n, dtype=int, endpoint=False)[1:] corrected_peaks = np.append(corrected_peaks, new_peaks) corrected_peaks = np.sort(corrected_peaks) corrected_peak_diffs = abs(np.diff(corrected_peaks)) min_indices = grubbs.min_test_indices(corrected_peak_diffs, alpha=alpha) # deleting peak such that resultant RR interval is furthest from mean RR # (i.e. gives longer RR interval) too_fast = np.array(min_indices) # index of peaks to delete (and then reinsert) peaks_to_replace = np.zeros_like(too_fast) new_peaks2 = np.zeros_like(too_fast, dtype=float) for index, i in enumerate(too_fast): # print(index, i) if i == (corrected_peak_diffs.size - 1): # if last RR interval (edge case) peaks_to_replace[index] = i # replace first peak # compute new diff_peak new_diff = (corrected_peaks[i + 1] - corrected_peaks[i - 1])/2 new_peaks2[index] = corrected_peaks[i - 1] + new_diff else: # replace first peak new_diff1 = corrected_peaks[i + 1] - corrected_peaks[i - 1] # replace second peak new_diff2 = corrected_peaks[i + 2] - corrected_peaks[i] if new_diff1 > new_diff2: # replacing first peak results in new RR interval # furthest from mean RR interval peaks_to_replace[index] = i # compute new diff_peak new_diff = (corrected_peaks[i + 1] - corrected_peaks[i - 1])/2 new_peaks2[index] = corrected_peaks[i - 1] + new_diff else: # replacing second peak results in new RR interval # furthest from mean RR interval peaks_to_replace[index] = i + 1 # compute new diff_peak new_diff = (corrected_peaks[i + 2] - corrected_peaks[i])/2 new_peaks2[index] = corrected_peaks[i] + new_diff corrected_peaks2 = corrected_peaks.copy() np.put(corrected_peaks2, peaks_to_replace.astype(int), new_peaks2) # save peaks if save_name != "": np.savetxt(save_name, corrected_peaks2, delimiter=",") return corrected_peaks2, max_indices, min_indices
cf33123e963b245007c8be4777cecd1224d4e3fa
6,707
def svn_wc_walk_entries(*args): """ svn_wc_walk_entries(char path, svn_wc_adm_access_t adm_access, svn_wc_entry_callbacks_t walk_callbacks, void walk_baton, svn_boolean_t show_hidden, apr_pool_t pool) -> svn_error_t """ return apply(_wc.svn_wc_walk_entries, args)
791e0f635aa56329f78a1ed0f171217518f9be05
6,708
def dlp_to_datacatalog_builder( taskgroup: TaskGroup, datastore: str, project_id: str, table_id: str, dataset_id: str, table_dlp_config: DlpTableConfig, next_task: BaseOperator, dag, ) -> TaskGroup: """ Method for returning a Task Group for scannign a table with DLP, and creating BigQuery policy tags based on the results 1) Scan table with DLP and write results to BigQuery 2) Schedule future DLP 3) Read results of DLP scan from BigQuery 4) Update Policy Tags in BQ Returns the first task """ assert table_dlp_config.source_config is not None # setup tables vars dlp_results_dataset_id = table_dlp_config.source_config.results_dataset_id table_ref = TableReference(DatasetReference(project_id, dataset_id), table_id) dlp_results_table_ref = TableReference( DatasetReference(project_id, dlp_results_dataset_id), f"{table_id}_dlp_results" ) dlp_results_table = f"{dlp_results_table_ref.project}.{dlp_results_table_ref.dataset_id}.{dlp_results_table_ref.table_id}" # setup DLP scan vars dlp_template_name = table_dlp_config.get_template_name() rows_limit_percent = table_dlp_config.get_rows_limit_percent() inspect_job = build_inspect_job_config( dlp_template_name, table_ref, rows_limit_percent, dlp_results_table_ref ) # 1 First delete the results table delete_dlp_results = BigQueryDeleteTableOperator( task_id=f"delete_old_dlp_results_{datastore}", deletion_dataset_table=dlp_results_table, ignore_if_missing=True, task_group=taskgroup, dag=dag, ) # 2 Scan table scan_task = CloudDLPCreateDLPJobOperator( task_id=f"scan_table_{datastore}", project_id=project_id, inspect_job=inspect_job, wait_until_finished=True, task_group=taskgroup, dag=dag, ) # 4. Read results read_results_task = DlpBQInspectionResultsOperator( task_id=f"read_dlp_results_{datastore}", project_id=dlp_results_table_ref.project, dataset_id=dlp_results_table_ref.dataset_id, table_id=dlp_results_table_ref.table_id, do_xcom_push=True, min_match_count=table_dlp_config.get_min_match_count(), task_group=taskgroup, dag=dag, ) # 5. Update policy tags update_tags_task = PythonOperator( task_id=f"update_bq_policy_tags_{datastore}", python_callable=update_bq_policy_tags, # <--- PYTHON LIBRARY THAT COPIES FILES FROM SRC TO DEST task_group=taskgroup, dag=dag, templates_dict={ "dlp_results": f"{{{{ti.xcom_pull(task_ids='{read_results_task.task_id}')}}}}", # "dlp_results": "{{ti.xcom_pull(task_ids='dlp_policy_tags.read_dlp_results_test')}}", }, op_kwargs={ "project_id": project_id, "dataset_id": table_ref.dataset_id, "table_id": table_ref.table_id, "policy_tag_config": table_dlp_config.source_config.policy_tag_config, "task_ids": read_results_task.task_id, }, provide_context=True, ) delete_dlp_results >> scan_task >> read_results_task >> update_tags_task >> next_task return delete_dlp_results
53950a99dddc4f2a61dca12908c3b2a17a3765c4
6,709
import sys def upload_assessors(xnat, projects, resdir, num_threads=1): """ Upload all assessors to XNAT :param xnat: pyxnat.Interface object :param projects: list of projects to upload to XNAT :return: None """ # Get the assessor label from the directory : assessors_list = get_assessor_list(projects, resdir) number_of_processes = len(assessors_list) warnings = list() LOGGER.info(('Starting upload pool:{} threads'.format(str(num_threads)))) sys.stdout.flush() pool = Pool(processes=num_threads) for index, assessor_label in enumerate(assessors_list): LOGGER.info(index) sys.stdout.flush() pool.apply_async( upload_thread, [xnat, index, assessor_label, number_of_processes, resdir]) LOGGER.info('waiting for upload pool to finish...') sys.stdout.flush() pool.close() pool.join() LOGGER.info('upload pool finished') sys.stdout.flush() return warnings
38e230c784cfdf1774efd2194c0c475671c8f512
6,710
import textwrap def dedent(ind, text): """ Dedent text to the specific indentation level. :param ind: common indentation level for the resulting text (number of spaces to append to every line) :param text: text that should be transformed. :return: ``text`` with all common indentation removed, and then the specified amount of indentation added. """ text2 = textwrap.dedent(text) if ind == 0: return text2 indent_str = " " * ind return "\n".join(indent_str + line for line in text2.split("\n"))
271b9fd270d78c4bc952af31d3d9be0ff6bdab73
6,711
import os def since(timestamp=None, directory=os.getcwd()): # noqa WPS404, B008 """since.""" if not timestamp: return WRONG_ARGUMENT try: timestamp = int(timestamp) except Exception: return WRONG_ARGUMENT if not os.path.exists(directory): return 'dir not found' dir_content = ls(directory) if not dir_content: return 'dir is empty' return [ item_object for item_object in dir_content if os.stat('{0}/{1}'.format( directory, item_object, )).st_ctime > timestamp ]
9728e68aec54b7781fb07d7fdda243568c51eac3
6,712
def get_vendor(request): """ Returns the ``JSON`` serialized data of the requested vendor on ``GET`` request. .. http:get:: /get_vendor/ Gets the JSON serialized data of the requested vendor. **Example request**: .. sourcecode:: http GET /get_vendor/ HTTP/1.1 Host: localhost:8000 Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9 :param vendor_id: Vendor primary key. **Example response**: .. sourcecode:: http HTTP/1.1 200 OK Vary: Accept Content-Type: application/json; charset=utf-8 [ { "name": "Lug Vendor", "identifier": "TBPN-02692", "gstin": "89AAC4683897343", "address": { "name": "Kuame Burns", "address": "Nonummy Avenue", "city": "Chung Cheong", "phone": "679 166-3127", "state": "Guanacaste", "country": "tellusidnunc.net", "post": "8949" } } ] :resheader Content-Type: application/json :statuscode 200: List of vendors received successfully. :statuscode 400: Bad request version :statuscode 500: Vendor matching query does not exist. """ if request.method == 'GET': vendor_id = request.GET.get('vendor_id') vendor = VendorSerializer(Vendor.objects.get(id=vendor_id)) return JsonResponse(vendor.data)
e142854426ef406bdfe6e34d0629e80d49493c91
6,713
def ensure_bin_str(s): """assert type of s is basestring and convert s to byte string""" assert isinstance(s, basestring), 's should be string' if isinstance(s, unicode): s = s.encode('utf-8') return s
3ce171f02a371073c5474596da9c963b0a77a415
6,714
def _word_accuracy(pred_data, ref_data): """compute word-level accuracy""" pred_size = len(pred_data) ref_size = len(ref_data) if pred_size <= 0 or ref_size <= 0: raise ValueError("size of predict or reference data is less than or equal to 0") if pred_size != ref_size: raise ValueError("size of predict and reference data don't match") total_count = 0 for i in range(pred_size): pred_word = pred_data[i].strip().slipt(" ") ref_word = ref_data[i].strip().slipt(" ") pred_len = len(pred_word) ref_len = len(ref_word) match_count = 0 for j in range(min(pred_len, ref_len)): predict_word = pred_word[j] reference_word = ref_word[j] if predict_word == reference_word: match_count += 1 total_accuracy += 100.0 * match_count / max(pred_len, ref_len) total_count += 1 word_accuracy = total_accuracy / total_count return word_accuracy
c4abfcc439fca5d14b5edc8289ef9ee2d46807fe
6,715
import logging import inspect import os def check_interface(interface: str) -> str: """ Check that the interface we've been asked to run on actually exists """ log = logging.getLogger(inspect.stack()[0][3]) discovered_interfaces = [] for iface in os.listdir("/sys/class/net"): iface_path = os.path.join("/sys/class/net", iface) if os.path.isdir(iface_path): if "phy80211" in os.listdir(iface_path): discovered_interfaces.append(iface) if interface not in discovered_interfaces: log.warning( "%s interface not found in phy80211 interfaces: %s", interface, discovered_interfaces, ) raise ValueError(f"{interface} is not a valid interface") else: log.debug( "%s is in discovered interfaces: [%s]", interface, discovered_interfaces ) return interface
0e3d42ee7c1e2d3f486681ca0bdde174c0650972
6,716
import requests def api_retrieve_part(pt_id): """ Allows the client to call "retrieve" method on the server side to retrieve the part from the ledger. Args: pt_id (str): The uuid of the part Returns: type: str String representing JSON object which allows the client to know that the call was either a success or a failure. """ response = requests.get( "http://127.0.0.1:852/tp/part/{}".format(pt_id) ) output = response.content.decode("utf-8").strip() return output
5043415dcdb95e59ec87271bf62d1f04f818af9b
6,717
def smoP(dataMatIn, classLabels, C, toler, maxIter, kTup = ('lin',0)): """ 完整的线性SMO算法 Parameters: dataMatIn - 数据矩阵 classLabels - 数据标签 C - 松弛变量 toler - 容错率 maxIter - 最大迭代次数 kTup - 包含核函数信息的元组 Returns: oS.b - SMO算法计算的b oS.alphas - SMO算法计算的alphas """ oS = optStruct(np.mat(dataMatIn), np.mat(classLabels).transpose(), C, toler, kTup) #初始化数据结构 iter = 0 #初始化当前迭代次数 entireSet = True; alphaPairsChanged = 0 while (iter < maxIter) and ((alphaPairsChanged > 0) or (entireSet)): #遍历整个数据集都alpha也没有更新或者超过最大迭代次数,则退出循环 alphaPairsChanged = 0 if entireSet: #遍历整个数据集 for i in range(oS.m): alphaPairsChanged += innerL(i,oS) #使用优化的SMO算法 print("全样本遍历:第%d次迭代 样本:%d, alpha优化次数:%d" % (iter,i,alphaPairsChanged)) iter += 1 else: #遍历非边界值 nonBoundIs = np.nonzero((oS.alphas.A > 0) * (oS.alphas.A < C))[0] #遍历不在边界0和C的alpha for i in nonBoundIs: alphaPairsChanged += innerL(i,oS) print("非边界遍历:第%d次迭代 样本:%d, alpha优化次数:%d" % (iter,i,alphaPairsChanged)) iter += 1 if entireSet: #遍历一次后改为非边界遍历 entireSet = False elif (alphaPairsChanged == 0): #如果alpha没有更新,计算全样本遍历 entireSet = True print("迭代次数: %d" % iter) return oS.b,oS.alphas #返回SMO算法计算的b和alphas
fb38ef33ab624a74f3da320c5e90a48aa307d588
6,718
def _get_output_data(output_list, heat, stack_id): """ 获取output数据 """ response = { 'code': 200, 'msg': 'ok', 'status': utils.INSTANTIATED, 'data': [] } for item in output_list['outputs']: output = heat.stacks.output_show(stack_id, item['output_key']) output_value = output['output']['output_value'] item = { 'vmId': output_value['vmId'], 'vncUrl': output_value['vncUrl'], 'networks': [] } if 'networks' in output_value and output_value['networks'] is not None: for net_name, ip_data in output_value['networks'].items(): if utils.validate_uuid(net_name): continue network = { 'name': net_name, 'ip': ip_data[0]['addr'] } item['networks'].append(network) response['data'].append(item) return response
aca183c1b158e6e7b9e414151e7f5d5505de1188
6,719
def _ensure_str(s): """convert bytestrings and numpy strings to python strings""" return s.decode() if isinstance(s, bytes) else str(s)
05f549166cc459371b380f62393bbc835aa7ff48
6,720
def get_polarimeter_index(pol_name): """Return the progressive number of the polarimeter within the board (0…7) Args: pol_name (str): Name of the polarimeter, like ``R0`` or ``W3``. Returns: An integer from 0 to 7. """ if pol_name[0] == "W": return 7 else: return int(pol_name[1])
0068931868e214896f6263e58fc09215352d502c
6,721
def merge_sort(collection): """ Pure implementation of the fastest ordered collection with heterogeneous : parameter collection : some mutable ordered collection with heterogeneous comparable items inside : return : a sollectiojn order by ascending Examples : >>> merge_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> merge_sort([]) [] >>> merge_sort([-45, -5, -2]) [-45, -5, -2] """ start, end = [], [] while len(collection) > 1 : min_one, max_one = min(collection), max(collection) start.append(min_one) end.append(max_one) collection.remove(min_one) collection.remove(max_one) end.reverse() return start + collection + end
b704792ef49629e7e9e04c22ffe03f08b3ef76fa
6,722
def sma_centroids(dataframe, column, short_window, long_window, min_width=None, **kwargs): """Identify centermost point between two SMA interception points Define regions as being bounded by two consecutive interceptions of SMAs with different window widths, then choose the centermost data point within that region. Useful for defining regions that capture the crossover of SMAs. Essentially a wrapper around `sma_to_centroids`. Args: dataframe (pandas.DataFrame): dataframe from which SMAs should be calculated and regions defined column (str): name of column over in dataframe from which sliding-window slopes should be calculated short_window (int): number of consecutive dataframe rows to include in the short window long_window (int): number of consecutive dataframe rows to include in the long window min_width: minimum width, expressed in units of `x_column`, below which an intercept should be disregarded as a valid end of a window kwargs: arguments to be passed to calculate_sma() Returns: DataFrame with indices corresponding to dataframe """ x_column = '_datetime_start' sma_short = calculate_sma(dataframe, x_column, column, window=short_window, **kwargs) sma_long = calculate_sma(dataframe, x_column, column, window=long_window, **kwargs) intercepts = find_sma_intercepts(sma_short, sma_long, dataframe[x_column]) return find_sma_centroids(dataframe=dataframe, sma_short=sma_short, sma_long=sma_long, intercepts=intercepts, x_column=x_column, min_width=min_width)
1157318d90ce514a3a85461851c158a0df9d2a3e
6,723
def delMsg(msgNum): """Deletes a specified message from the inbox""" global usrPrompt try: inboxMessages = json.loads(api.getAllInboxMessages()) # gets the message ID via the message index number msgId = inboxMessages['inboxMessages'][int(msgNum)]['msgid'] msgAck = api.trashMessage(msgId) except: print '\n Connection Error\n' usrPrompt = 0 main() return msgAck
b3cc7a4568ca6eae3267cd5247abe88c5ccb8bec
6,724
def capitalize_first(str): """Capitalizes only the first letter of the given string. :param str: string to capitalize :return: str with only the first letter capitalized """ if str == "": return "" return str[0].upper() + str[1:]
ed6dfdfd9709de1682c29ed152131b9da732441b
6,725
def min_cost_edge(G, T): """Returns the edge with the lowest cost/weight. Parameters ---------- G : NetworkX graph T : Prim's Algorithm Returns ------- The edge with the lowest cost/weight. """ edge_list = possible_edges(G, T) edge_list.sort(key = lambda e : cost(G, e)) return edge_list[0]
3720bb59cddf0b29beb9f9162941ddf7f86dd429
6,726
import io import base64 def get_image_html_tag(fig, format="svg"): """ Returns an HTML tag with embedded image data in the given format. :param fig: a matplotlib figure instance :param format: output image format (passed to fig.savefig) """ stream = io.BytesIO() # bbox_inches: expand the canvas to include the legend that was put outside the plot # see https://stackoverflow.com/a/43439132 fig.savefig(stream, format=format, bbox_inches="tight") data = stream.getvalue() if format == "svg": return data.decode("utf-8") data = base64.b64encode(data).decode("utf-8") return f"<img src=\"data:image/{format};base64,{data}\">"
f5c59a6f4f70fb6616cec4619d8cbf9ca2e28529
6,727
def reformat_language_tuple(langval): """Produce standardly-formatted language specification string using given language tuple. :param langval: `tuple` in form ('<language>', '<language variant>'). Example: ('en', 'US') :return: `string` formatted in form '<language>-<language-variant>' """ if langval: langval_base, langval_variant = langval if langval_variant: langval_base = '{0}-{1}'.format(langval_base, langval_variant) return langval_base else: return None
63c479d7dd273f31b9bdcc6c0ce81d4267a43714
6,728
def _create_ghostnet(variant, width=1.0, pretrained=False, **kwargs): """ Constructs a GhostNet model """ cfgs = [ # k, t, c, SE, s # stage1 [[3, 16, 16, 0, 1]], # stage2 [[3, 48, 24, 0, 2]], [[3, 72, 24, 0, 1]], # stage3 [[5, 72, 40, 0.25, 2]], [[5, 120, 40, 0.25, 1]], # stage4 [[3, 240, 80, 0, 2]], [[3, 200, 80, 0, 1], [3, 184, 80, 0, 1], [3, 184, 80, 0, 1], [3, 480, 112, 0.25, 1], [3, 672, 112, 0.25, 1] ], # stage5 [[5, 672, 160, 0.25, 2]], [[5, 960, 160, 0, 1], [5, 960, 160, 0.25, 1], [5, 960, 160, 0, 1], [5, 960, 160, 0.25, 1] ] ] model_kwargs = dict( cfgs=cfgs, width=width, **kwargs, ) return build_model_with_cfg( GhostNet, variant, pretrained, default_cfg=default_cfgs[variant], feature_cfg=dict(flatten_sequential=True), **model_kwargs)
8b0bca5e4d711dce5150d8c3cdb187c9b1a23ec3
6,729
def re_suffix(string): """ Remove any “os.extsep” prefixing a string, and ensure that it ends with a “$” – to indicate a regular expression suffix. """ if not string: return None return rf"{string.casefold().lstrip(QUALIFIER).rstrip(DOLLA)}{DOLLA}"
fd1767f0d539e284f56c32f5ed4a8789a6638fca
6,730
def _alternate_dataclass_repr(object) -> None: """ Overrides the default dataclass repr by not printing fields that are set to None. i.e. Only prints fields which have values. This is for ease of reading. """ populated_fields = { field.name: getattr(object, f"{field.name}") for field in fields(object) if getattr(object, f"{field.name}") is not None } class_name = object.__class__.__name__ repr_string = f"{class_name}(" + ", ".join([f"{field}={value}" for field, value in populated_fields.items()]) + ")" return repr_string
c9c07508a39c0732698c1ed6803ef00c4b2f65d6
6,731
def which_coords_in_bounds(coords, map_shape): """ Checks the coordinates given to see if they are in bounds :param coords Union[array(2)[int], array(N,2)[int]]: [int, int] or [[int, int], ...], Nx2 ndarray :param map_shape Tuple[int]: shape of the map to check bounds :return Union[bool array(N)[bool]]: corresponding to whether the coord is in bounds (if array is given, then it will be array of bool) """ assert isinstance(coords, np.ndarray) and coords.dtype == np.int assert np.array(map_shape).dtype == np.int if len(coords.shape) == 1: return coords[0] >= 0 and coords[0] < map_shape[0] and coords[1] >= 0 and coords[1] < map_shape[1] else: return np.logical_and(np.logical_and(coords[:, 0] >= 0, coords[:, 0] < map_shape[0]), np.logical_and(coords[:, 1] >= 0, coords[:, 1] < map_shape[1]))
5606c24430e9967cade8bdeb789f10bed1248eb1
6,732
def get_activation_function(activation_function_name: str): """ Given the name of an activation function, retrieve the corresponding function and its derivative :param cost_function_name: the name of the cost function :return: the corresponding activation function and its derivative """ try: return activation_functions[activation_function_name] except KeyError: raise UnknownActivationFunctionName(activation_function_name)
f2a830c15cb93bd9fce1b66c2b5ca14530005cd5
6,733
def url_split(url, uses_hostname=True, split_filename=False): """Split the URL into its components. uses_hostname defines whether the protocol uses a hostname or just a path (for "file://relative/directory"-style URLs) or not. split_filename defines whether the filename will be split off in an attribute or whether it will be part of the path """ # urlparse.urlparse() is a bit deficient for our needs. try: if uses_hostname: match = URL_RE_HOSTNAME.match(url).groupdict() else: match = URL_RE_PLAIN.match(url).groupdict() except AttributeError: raise AttributeError, "Invalid URL." for key, item in match.items(): if item is None: if key == "port": # We should leave port as None if it's not defined. match[key] = "0" else: match[key] = "" if uses_hostname: match["port"] = int(match["port"]) if not split_filename: match["path"] = match["path"] + match["file"] match["file"] = "" return URLSplitResult(match)
5c76eb58c520043ab922c941806f24c60f9ee721
6,734
def memdiff_search(bytes1, bytes2): """ Use binary searching to find the offset of the first difference between two strings. :param bytes1: The original sequence of bytes :param bytes2: A sequence of bytes to compare with bytes1 :type bytes1: str :type bytes2: str :rtype: int offset of the first location a and b differ, None if strings match """ # Prevent infinite recursion on inputs with length of one half = (len(bytes1) // 2) or 1 # Compare first half of the string if bytes1[:half] != bytes2[:half]: # Have we found the first diff? if bytes1[0] != bytes2[0]: return 0 return memdiff_search(bytes1[:half], bytes2[:half]) # Compare second half of the string if bytes1[half:] != bytes2[half:]: return memdiff_search(bytes1[half:], bytes2[half:]) + half
fbcb221c77730c45be4c81a6ae7515e602468af5
6,735
def decomposeJonesMatrix(Jmat): """ Decompose 2x2 Jones matrix to retardance and diattenuation vectors """ Jmat = Jmat / cp.sqrt(cp.linalg.det(Jmat)) q = cp.array([Jmat[0, 0] - Jmat[1, 1], Jmat[1, 0] + Jmat[0, 1], -1j * Jmat[1, 0] + 1j * Jmat[0, 1]]) / 2 tr = cp.trace(Jmat) / 2 c = cp.arccosh(tr) csin = c / cp.sinh(c) if c == 0: csin = 1 f = 2 * q * csin rotVector = -cp.imag(f) diatVector = cp.real(f) return rotVector, diatVector
151320a0f77f2fb3a77d8e06b1e623c0fed6c673
6,736
def format_utc(time): """Format a time in UTC.""" return as_utc(time).strftime('%Y-%m-%d %H:%M:%S.%f')
88624b8e166aa07172abd14c391945e33c77332f
6,737
def plugin_scope(): """Returns the capability as the remote network driver. This function returns the capability of the remote network driver, which is ``global`` or ``local`` and defaults to ``local``. With ``global`` capability, the network information is shared among multipe Docker daemons if the distributed store is appropriately configured. See the following link for more details about the spec: https://github.com/docker/libnetwork/blob/master/docs/remote.md#set-capability # noqa """ LOG.debug("Received /NetworkDriver.GetCapabilities") capabilities = {'Scope': cfg.CONF.capability_scope} return flask.jsonify(capabilities)
dba53f98ca73010d41e1a315f28a8279bb6aa4cd
6,738
import os def _create_chimeric_msa( # pylint: disable=too-many-arguments output_folder, cluster, subexon_df, gene2speciesname, connected_subexons, aligner='ProGraphMSA', padding='XXXXXXXXXX', species_list=None): """Return a modified subexon_df, the dict of chimerics and the msa.""" subexon_df, subexon_matrix = subexons.alignment.create_subexon_matrix( subexon_df) chimerics = subexons.alignment.create_chimeric_sequences( subexon_df, subexon_matrix, connected_subexons, padding=padding) msa_file = _outfile(output_folder, "chimeric_alignment_", cluster, ".fasta") if chimerics: chimerics = subexons.alignment.sort_species(chimerics, gene2speciesname, species_list) subexons.alignment.run_aligner(chimerics, aligner=aligner, output_path=msa_file) msa = subexons.alignment.read_msa_fasta(msa_file) else: if os.path.isfile(msa_file): os.remove(msa_file) msa = None return subexon_df, chimerics, msa
9adb45de5a51939b0dc2d728fdc2837a97fd3df9
6,739
def _expand_sources(sources): """ Expands a user-provided specification of source files into a list of paths. """ if sources is None: return [] if isinstance(sources, str): sources = [x.strip() for x in sources.split(",")] elif isinstance(sources, (float, int)): sources = [str(sources)] return [path for source in sources for path in _glob(source)]
6e16eaae5edb68a5be7e0af4be777fc76b70d22a
6,740
from typing import Optional def get_stream(stream_id: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetStreamResult: """ This data source provides details about a specific Stream resource in Oracle Cloud Infrastructure Streaming service. Gets detailed information about a stream, including the number of partitions. ## Example Usage ```python import pulumi import pulumi_oci as oci test_stream = oci.streaming.get_stream(stream_id=oci_streaming_stream["test_stream"]["id"]) ``` :param str stream_id: The OCID of the stream. """ __args__ = dict() __args__['streamId'] = stream_id if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('oci:streaming/getStream:getStream', __args__, opts=opts, typ=GetStreamResult).value return AwaitableGetStreamResult( compartment_id=__ret__.compartment_id, defined_tags=__ret__.defined_tags, freeform_tags=__ret__.freeform_tags, id=__ret__.id, lifecycle_state_details=__ret__.lifecycle_state_details, messages_endpoint=__ret__.messages_endpoint, name=__ret__.name, partitions=__ret__.partitions, retention_in_hours=__ret__.retention_in_hours, state=__ret__.state, stream_id=__ret__.stream_id, stream_pool_id=__ret__.stream_pool_id, time_created=__ret__.time_created)
fd7eb6675f5d232e90a94e18e4c68e6d538ca7e4
6,741
import os def components(path): """Split a POSIX path into components.""" head, tail = os.path.split(os.path.normpath(path)) if head == "": return [tail] elif head == "/": return [head + tail] else: return components(head) + [tail]
f29ae64104255450f5889a7440679342af767c9b
6,742
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): """ Fusion method. """ n_channels_int = n_channels in_act = input_a + input_b t_act = ops.Tanh()(in_act[:, :n_channels_int, :]) s_act = ops.Sigmoid()(in_act[:, n_channels_int:, :]) acts = t_act * s_act return acts
9412a3b20960b0a280a06569846df744f14a9d63
6,743
import json def _classes_dict(filename): """ Open JSON file and read the data for the Classes (and Origins). filename - the file name as a string. Runtime: O(n) """ class_dict = {} # {'robot': ['blitzcrank']} class_bonus_dict = {} dict = { 1: {}, 2: {}, 3: {}, 4 : {}, 6 : {}} # { 1 : { 'robot' : set['blitzcrank'], 'exile' : set['yasuo'] }, 2 : ... } with open(filename) as json_file: data = json.load(json_file) for class_obj in data.items(): # O(n) key = class_obj[1]['key'] # String name = class_obj[1]['name'] # String description = class_obj[1]['description'] # String accentChampionImage = class_obj[1]['accentChampionImage'] # URL as String bonuses = class_obj[1]['bonuses'] # Array [{'needed': int, 'effect': string}] needed = bonuses[-1]['needed'] # Check the highest number for needed. (In this case it's the last item in the array) class_dict[key] = [] class_bonus_dict[key] = needed dict[needed].update({class_obj[0]: []}) return dict
44fa2acec6c7235995bfdabaab149b4cba2cb7cc
6,744
def set_incident_seen(incident, user=None): """ Updates the incident to be seen """ is_org_member = incident.organization.has_access(user) if is_org_member: is_project_member = False for incident_project in IncidentProject.objects.filter(incident=incident).select_related( "project" ): if incident_project.project.member_set.filter(user=user).exists(): is_project_member = True break if is_project_member: incident_seen, created = IncidentSeen.objects.create_or_update( incident=incident, user=user, values={"last_seen": timezone.now()} ) return incident_seen return False
8b970ec492bdb72b6e05c053f7a5b9bf919b15e7
6,745
def single_parity_check( llr: np.array, mask_steps: int = 0, last_chunk_type: int = 0, ) -> np.array: """Compute beta value for Single parity node.""" all_sign = np.sign(np.prod(llr)) abs_alpha = np.fabs(llr) first_min_idx, second_min_idx = np.argsort(abs_alpha)[:2] result = np.sign(llr) * all_sign for i in range(result.size): if i == first_min_idx: result[i] *= abs_alpha[second_min_idx] else: result[i] *= abs_alpha[first_min_idx] return result
5cc9984bb86fdfd777b2a968fb887388a5422e4f
6,746
def _deserialize_job_result(user_input: JSON) -> JobResult: """Deserialize a JobResult from JSON.""" job = _deserialize_job(user_input['job']) plan = _deserialize_plan(user_input['plan']) is_done = user_input['is_done'] outputs = dict() # type: Dict[str, Asset] for name, asset in user_input['outputs'].items(): outputs[name] = _deserialize_asset(asset) return JobResult(job, plan, is_done, outputs)
883629fa2650fc043124c6ccf721ed38093daa19
6,747
def _brute_force_knn(X, centers, k, return_distance=True): """ :param X: array of shape=(n_samples, n_features) :param centers: array of shape=(n_centers, n_features) :param k: int, only looking for the nearest k points to each center. :param return_distance: bool, if True the return the distance along with the points :return: """ if k == 1: nearest, dists = pairwise_distances_argmin_min(centers, X) return (dists, nearest) if return_distance else nearest else: dists = pairwise_distances(centers, X) nearest = np.argsort(dists, axis=1)[:, :k] return (np.vstack([dists[i, nearest[i]] for i in range(dists.shape[0])]), nearest) if return_distance else nearest
b185a8d9e901a12a1385ed5ffc3183a5cc51c1b5
6,748
def remove_rule(rule_id): """Remove a single rule""" ruleset = packetfilter.get_ruleset() ruleset.remove(rule_id) packetfilter.load_ruleset(ruleset) save_pfconf(packetfilter) return redirect(url_for('rules', message=PFWEB_ALERT_SUCCESS_DEL), code=302)
fe45a3d5af532ff67e8aef21ab093438818c6dbc
6,749
def HexToMPDecimal(hex_chars): """ Convert bytes to an MPDecimal string. Example \x00 -> "aa" This gives us the AppID for a chrome extension. """ result = '' base = ord('a') for i in xrange(len(hex_chars)): value = ord(hex_chars[i]) dig1 = value / 16 dig2 = value % 16 result += chr(dig1 + base) result += chr(dig2 + base) return result
5d81c0e1ee3f4f94e615578e132377b803beb47b
6,750
def fit_growth_curves(input_file, file_data_frame, file_data_units, condition_unit, time_unit, cell_density_unit): """ :Authors: Chuankai Cheng <[email protected]> and J. Cameron Thrash <[email protected]> :License: MIT :Version: 1.0 :Date: 2021-03-17 :Repository: https://github.com/thrash-lab/sparse-growth-curve """ output_data_indices=file_data_frame.groupby( ['Strain','Replicate','Condition'] ).size().reset_index().rename(columns={0:'count'} )[['Strain','Replicate','Condition']] strains_conditions=output_data_indices.groupby(['Strain','Condition'] ).size().reset_index()[['Strain','Condition']] output_data_indices['Growth: Doubling rate']=0 output_data_indices['Death: Doubling rate']=0 output_data_indices=output_data_indices.astype(object) output_data_indices=output_data_indices.sort_values(by=['Strain','Condition']) strains=np.unique(strains_conditions['Strain']) row_num=len(strains) col_num=np.int(np.ceil(len(strains_conditions)/len(strains))) plt.figure(figsize=(col_num*2+1, row_num*2+1)) plot_j=1 previous_condition=output_data_indices['Condition'].values[0] plt.subplot(row_num, col_num, plot_j) color_i=0 plt.title(str(output_data_indices['Strain'].values[0])+'\n' +str(output_data_indices['Condition'].values[0])+' ' +condition_unit) plt.ylabel(cell_density_unit) plt.xlabel(time_unit) for i in output_data_indices.index: target_gr_index=output_data_indices.loc[i] target_growth_curve_df = file_data_frame[ (file_data_frame['Strain']==target_gr_index['Strain'])& (file_data_frame['Condition']==target_gr_index['Condition']) & (file_data_frame['Replicate']==target_gr_index['Replicate'])] #print('\n\nStrain:', target_gr_index['Strain'], # '\t Condition:',str(target_gr_index['Condition'])+' '+condition_unit, # '\t Replicate:',str(target_gr_index['Replicate'])) time=target_growth_curve_df.loc[:,'Time'].values cell_density=target_growth_curve_df.loc[:,'Cell density'].values #print('time=', time) #print('cell density=', 'cell_density') if target_gr_index['Condition']!=previous_condition: plt.yscale('log') plt.ylim(10**np.floor(np.log10(np.min(file_data_frame['Cell density']))-1), 10**np.ceil(np.log10(np.max(file_data_frame['Cell density']))+1)) plt.legend() #plt.xlim(np.floor(np.min(file_data_frame['Time'])), # np.ceil(np.max(file_data_frame['Time']))) color_i=0 plot_j+=1 plt.subplot(row_num, col_num, plot_j) plt.title(str(target_gr_index['Strain'])+'\n' +str(target_gr_index['Condition'])+' ' +condition_unit) plt.ylabel(cell_density_unit) plt.xlabel(time_unit) if len(cell_density)>4: (all_fit_time, all_fit_cell_density, all_fit_conf_band, selected_doubling_rate, selected_fit_time, selected_fit_cell_density, selected_doubling_rate_d, selected_fit_time_d, selected_fit_cell_density_d)=fit_growth_curve( time, cell_density, one_order=10, decision_tree_depth=1) output_data_indices.loc[i,'Growth: Doubling rate']=selected_doubling_rate output_data_indices.loc[i,'Death: Doubling rate']=selected_doubling_rate_d for k in range(len(all_fit_time)): #plt.plot(all_fit_time[i], all_fit_cell_density[i], 'k--') #plt.fill_between(all_fit_time[k], # all_fit_cell_density[k]*(all_fit_conf_band[k]), # all_fit_cell_density[k]/(all_fit_conf_band[k]), # color=colormap(color_i), alpha=0.1) plt.plot(selected_fit_time, selected_fit_cell_density, '-', color=colormap(color_i), linewidth=2) plt.plot(selected_fit_time_d, selected_fit_cell_density_d, '--', color=colormap(color_i), linewidth=1) elif len(cell_density)>2: x=time y=np.log2(cell_density) x_fit = np.arange(0.0, x[-1], 0.01)[:, np.newaxis] (doubling_rate, pre_y, ci) = myLinearRegression_CB(x, y, x_fit, one_order=10) #plt.fill_between(x_fit, # pre_y*ci, # pre_y/ci, # color=colormap(color_i), alpha=0.1) if doubling_rate>0: output_data_indices.loc[i,'Growth: Doubling rate']=doubling_rate plt.plot(x_fit, pre_y, '-', color=colormap(color_i), linewidth=2) else: output_data_indices.loc[i,'Death: Doubling rate']=doubling_rate plt.plot(x_fit, pre_y, '--', color=colormap(color_i), linewidth=1) elif len(cell_density)==2: x=time y=np.log2(cell_density) doubling_rate=(y[1]-y[0])/(x[1]-x[0]) output_data_indices.loc[i,'Growth: Doubling rate']=doubling_rate if doubling_rate>0: output_data_indices.loc[i,'Growth: Doubling rate']=doubling_rate plt.plot(x, y, '-', color=colormap(color_i), linewidth=2) else: output_data_indices.loc[i,'Death: Doubling rate']=doubling_rate plt.plot(x, y, '--', color=colormap(color_i), linewidth=1) plt.plot(time, cell_density,'o',alpha=0.3, color=colormap(color_i), label=output_data_indices.loc[i]['Replicate']) color_i+=1 previous_condition=output_data_indices.loc[i]['Condition'] plt.yscale('log') plt.ylim(10**np.floor(np.log10(np.min(file_data_frame['Cell density']))-1), 10**np.ceil(np.log10(np.max(file_data_frame['Cell density']))+1)) #plt.xlim(np.floor(np.min(file_data_frame['Time'])), # np.ceil(np.max(file_data_frame['Time']))) plt.legend() plt.tight_layout() output_file_string=(output_folder+ '/'+input_file+ '/1_Data_fit_visualization_'+ dt_string+'.pdf') plt.savefig(output_file_string) print('output file saved:'+output_file_string) return output_data_indices
d551a094ea398e09dcc88f8f9668922b3e665317
6,751
def stringify(li,delimiter): """ Converts list entries to strings and joins with delimiter.""" string_list = map(str,li) return delimiter.join(string_list)
a4c35a19d8ea654a802cd3f92ababcbdfdf0ecfb
6,752
def norm_w(x, w): """ Compute sum_i( w[i] * |x[i]| ). See p. 7. """ return (w * abs(x)).sum()
a9825750cb6ee0bbbe87b0c4d1bd132bcfca90db
6,753
def _tensor_run_opt_ext(opt, momentum, learning_rate, gradient, weight, moment): """Apply momentum optimizer to the weight parameter using Tensor.""" success = True success = F.depend(success, opt(weight, moment, learning_rate, gradient, momentum)) return success
89ae490ba0f05455dff03bcd57d4b6f52f7d8327
6,754
from typing import Dict import yaml def get_config_settings(env: str = "dev") -> Dict: """ Retrieves configuration from YAML file """ config_fh = construct_config_path(env) with open(config_fh, "r") as f: data = yaml.safe_load(f) return data
190a7f8cb2a297ee4ae6d5734d4d9f521a18bb3f
6,755
def get_all(connection: ApiConnection, config: str, section: str = None) -> dict: """Get all sections of a config or all values of a section. :param connection: :param config:UCI config name :param section:[optional] UCI section name :return: JSON RPC response result """ return request(connection, 'uci', 'get_all', config, section)
be4a76f87398ce1d4e0765314266647867964e39
6,756
def load_module(module, app): """Load an object from a Python module In: - ``module`` -- name of the module - ``app`` -- name of the object to load Return: - (the object, None) """ r = __import__(module, fromlist=('',)) if app is not None: r = getattr(r, app) return r, None
858d9d0bf91ff7d83ad391218b8ff1b37007b43b
6,757
def get_routes(app: web.Application) -> list: """ Get the full list of defined routes """ return get_standard_routes(app) + get_custom_routes(app)
7f5d365c28ee45096e089ee6913d3aec4d8214d8
6,758
def cb_round(series: pd.Series, base: Number = 5, sig_dec: int = 0): """ Returns the pandas series (or column) with values rounded per the custom base value Args: series (pd.Series): data to be rounded base (float): base value to which data should be rounded (may be decimal) sig_dec (int): number of significant decimals for the custom-rounded value Returns: pd.Series """ valid.validate_array(series, "series", expected_len=None) if not base >= 0.01: err = f"cannot round with base {base}." + "cb_round designed for base >= 0.01." raise ValueError(err) result = series.apply(lambda x: round(base * round(float(x) / base), sig_dec)) return result
29599898fa8686c260e89d2efcdcceec108d5b4c
6,759
def makeGaussian(size, sigma=3, center=None): """ Make a square gaussian kernel. size is the length of a side of the square fwhm is full-width-half-maximum, which can be thought of as an effective radius. """ x = np.arange(0, size, 1, float) y = x[:, np.newaxis] if center is None: x0 = y0 = size // 2 else: x0 = center[0] y0 = center[1] return np.exp(-((x - x0) ** 2 + (y - y0) ** 2) / (2.0 * sigma ** 2)) # return np.exp(-4 * np.log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / sigma ** 2)
8efef3cc265375d5412107a465a97380e8c4d101
6,760
import torch def average_relative_error(y_pred, y_true): """Calculate Average Relative Error Args: y_true (array-like): np.ndarray or torch.Tensor of dimension N x d with actual values y_pred (array-like): np.ndarray or torch.Tensor of dimension N x d with predicted values Returns: float: Average Relative Mean Squared Error Raises: ValueError : If Parameters are not both of type np.ndarray or torch.Tensor """ if isinstance(y_true, np.ndarray) and isinstance(y_pred, np.ndarray): return sum(sum(abs(y_true - y_pred) / y_true) / len(y_true)) / len(y_true[0, :]) elif isinstance(y_true, torch.Tensor) and isinstance(y_pred, torch.Tensor): return torch.sum(torch.sum(torch.abs(y_true - y_pred) / y_true, dim=0) / len(y_true)) / len(y_true[0, :]) else: raise ValueError( 'y_true and y_pred must be both of type numpy.ndarray or torch.Tensor')
2243eb82c78ff03181be3c10d50c3aa000e8476c
6,761
from unittest.mock import Mock def make_subprocess_hook_mock(exit_code: int, output: str) -> Mock: """Mock a SubprocessHook factory object for use in testing. This mock allows us to validate that the RenvOperator is executing subprocess commands as expected without running them for real. """ result_mock = Mock() result_mock.exit_code = exit_code result_mock.output = output hook_instance_mock = Mock() hook_instance_mock.run_command = Mock(return_value=result_mock) hook_factory_mock = Mock(return_value=hook_instance_mock) return hook_factory_mock
a047608503be8bc7fc4b782139e7d12145efb3cd
6,762
def binstr2int(bin_str: str) -> int: """转换二进制形式的字符串为10进制数字, 和int2binstr相反 Args: bin_str: 二进制字符串, 比如: '0b0011'或'0011' Returns: 转换后的10进制整数 """ return int(bin_str, 2)
87c6ac16c2215e533cb407407bef926ed8668e3e
6,763
def _nodeset_compare(compare, a, b, relational=False): """ Applies a comparison function to node-sets a and b in order to evaluate equality (=, !=) and relational (<, <=, >=, >) expressions in which both objects to be compared are node-sets. Returns an XPath boolean indicating the result of the comparison. """ if isinstance(a, Types.NodesetType) and isinstance(b, Types.NodesetType): # From XPath 1.0 Section 3.4: # If both objects to be compared are node-sets, then the comparison # will be true if and only if there is a node in the first node-set # and a node in the second node-set such that the result of # performing the comparison on the string-values of the two nodes # is true. if not (a and b): # One of the two node-sets is empty. In this case, according to # section 3.4 of the XPath rec, no node exists in one of the two # sets to compare, so *any* comparison must be false. return boolean.false # If it is a relational comparison, the actual comparison is done on # the string value of each of the nodes. This means that the values # are then converted to numbers for comparison. if relational: # NumberValue internally coerces a node to a string before # converting it to a number, so the "convert to string" clause # is handled. coerce = Conversions.NumberValue else: coerce = Conversions.StringValue # Convert the nodesets into lists of the converted values. a = map(coerce, a) b = map(coerce, b) # Now compare the items; if any compare True, we're done. for left in a: for right in b: if compare(left, right): return boolean.true return boolean.false # From XPath 1.0 Section 3.4: # If one object to be compared is a node-set and the other is a number, # then the comparison will be true if and only if there is a node in the # node-set such that the result of performing the comparison on the # number to be compared and on the result of converting the string-value # of that node to a number using the number function is true. If one # object to be compared is a node-set and the other is a string, then the # comparison will be true if and only if there is a node in the node-set # such that the result of performing the comparison on the string-value # of the node and the other string is true. If one object to be compared # is a node-set and the other is a boolean, then the comparison will be # true if and only if the result of performing the comparison on the # boolean and on the result of converting the node-set to a boolean using # the boolean function is true. # # (In other words, coerce each node to the same type as the other operand, # then compare them. Note, however, that relational comparisons convert # their operands to numbers.) if isinstance(a, Types.NodesetType): # a is nodeset if isinstance(b, Types.BooleanType): a = Conversions.BooleanValue(a) return compare(a, b) and boolean.true or boolean.false elif relational: b = Conversions.NumberValue(b) coerce = Conversions.NumberValue elif isinstance(b, Types.NumberType): coerce = Conversions.NumberValue else: b = Conversions.StringValue(b) coerce = Conversions.StringValue for node in a: if compare(coerce(node), b): return boolean.true else: # b is nodeset if isinstance(a, Types.BooleanType): b = Conversions.BooleanValue(b) return compare(a, b) and boolean.true or boolean.false elif relational: a = Conversions.NumberValue(a) coerce = Conversions.NumberValue elif isinstance(a, Types.NumberType): coerce = Conversions.NumberValue else: a = Conversions.StringValue(a) coerce = Conversions.StringValue for node in b: if compare(a, coerce(node)): return boolean.true return boolean.false
5751b793662689a1e0073cfe5fc4b86505952dcd
6,764
def test_cl_shift(options): """ Create tests for centerline shifts 8 out of 8 points are on one side of the mean >= 10 out of 11 points are on one side of the mean >= 12 out of 14 points are on one side of the mean >= 14 out of 17 points are on one side of the mean >= 16 out of 20 points are on one side of the mean """ windows = [ (8, Window(8, init=options.m)), (10, Window(11, init=options.m)), (12, Window(14, init=options.m)), (14, Window(17, init=options.m)), (16, Window(20, init=options.m)), ] cl = options.m def test(x): for n, w in windows: w.append(x) if np.sum(w.data > cl) >= n: err_out("%s is %g/%g points > centerline" % (w.data, n, w.n)) elif np.sum(w.data < cl) >= n: err_out("%s is %g/%g points < centerline" % (w.data, n, w.n)) return test
60d874c8b1484ff23c4791043eae949912a969d0
6,765
def _scale(tensor): """Scale a tensor based on min and max of each example and channel Resulting tensor has range (-1, 1). Parameters ---------- tensor : torch.Tensor or torch.autograd.Variable Tensor to scale of shape BxCxHxW Returns ------- Tuple (scaled_tensor, min, max), where min and max are tensors containing the values used for normalizing the tensor """ b, c, h, w = tensor.shape out = tensor.view(b, c, h * w) minimum, _ = out.min(dim=2, keepdim=True) out = out - minimum maximum, _ = out.max(dim=2, keepdim=True) out = out / maximum # out has range (0, 1) out = out * 2 - 1 # out has range (-1, 1) return out.view(b, c, h, w), minimum, maximum
64eed9bd70c543def6456f3af89fa588ec35bca8
6,766
def get_moscow_oh(opening_hours): """ returns an OpeningHourBlock from a fake json corresponding to a POI located in moscow city for different opening_hours formats. """ return get_oh_block(opening_hours, lat=55.748, lon=37.588, country_code="RU")
42f795e262753cc82d8689c2a98e6a82e143a2c3
6,767
def get_firebase_credential_errors(credentials: str): """ Wrapper to get error strings for test_firebase_credential_errors because otherwise the code is gross. Returns None if no errors occurred. """ try: test_firebase_credential_errors(credentials) return None except Exception as e: return str(e)
fbca79e837a3d6dc85ee90bfd426008c6ce25ac2
6,768
def url(endpoint, path): """append the provided path to the endpoint to build an url""" return f"{endpoint.rstrip('/')}/{path}"
dee733845984bfc4cf5728e9614cce08d19a2936
6,769
def is_collision(line_seg1, line_seg2): """ Checks for a collision between line segments p1(x1, y1) -> q1(x2, y2) and p2(x3, y3) -> q2(x4, y4) """ def on_segment(p1, p2, p3): if (p2[0] <= max(p1[0], p3[0])) & (p2[0] >= min(p1[0], p3[0])) & (p2[1] <= max(p1[1], p3[1])) & (p2[1] >= min(p1[1], p3[1])): return True return False def orientation(p1, p2, p3): val = ((p2[1] - p1[1]) * (p3[0] - p2[0])) - ((p2[0] - p1[0]) * (p3[1] - p2[1])) if val == 0: return 0 elif val > 0: return 1 elif val < 0: return 2 p1, q1 = line_seg1[0], line_seg1[1] p2, q2 = line_seg2[0], line_seg2[1] o1 = orientation(p1, q1, p2) o2 = orientation(p1, q1, q2) o3 = orientation(p2, q2, p1) o4 = orientation(p2, q2, q1) if (o1 != o2) & (o3 != o4): return True if (o1 == 0 & on_segment(p1, p2, q1)): return True if (o2 == 0 & on_segment(p1, q2, q1)): return True if (o3 == 0 & on_segment(p2, p1, q2)): return True if (o4 == 0 & on_segment(p2, q1, q2)): return True return False
17dba61faebe50336cbc2cd2cc56c49474db5431
6,770
import numpy def plot_bar_graph_one_time( example_table_xarray, time_index, predictor_indices, info_string=None, figure_object=None, axes_object=None): """Plots predictors at one time as bar graph. :param example_table_xarray: xarray table in format returned by `example_io.read_file`. :param time_index: Index of valid time to plot. :param predictor_indices: 1-D numpy array with indices of predictors to plot. :param info_string: Info string (to be appended to title). :param figure_object: Will plot on this figure (instance of `matplotlib.figure.Figure`). If None, will create new figure. :param axes_object: Will plot on these axes (instance of `matplotlib.axes._subplots.AxesSubplot`). If None, will create new axes. :return: figure_object: See input doc. :return: axes_object: See input doc. :return: pathless_output_file_name: Pathless name for output file. """ error_checking.assert_is_integer(time_index) error_checking.assert_is_geq(time_index, 0) error_checking.assert_is_integer_numpy_array(predictor_indices) error_checking.assert_is_geq_numpy_array(predictor_indices, 0) if info_string is not None: error_checking.assert_is_string(info_string) xt = example_table_xarray predictor_values = ( xt[example_utils.SATELLITE_PREDICTORS_UNGRIDDED_KEY].values[ time_index, predictor_indices ] ) num_predictors = len(predictor_values) y_coords = numpy.linspace( 0, num_predictors - 1, num=num_predictors, dtype=float ) if figure_object is None or axes_object is None: figure_object, axes_object = pyplot.subplots( 1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES) ) axes_object.barh( y_coords, predictor_values, color=BAR_FACE_COLOUR, edgecolor=BAR_EDGE_COLOUR, linewidth=BAR_EDGE_WIDTH ) pyplot.yticks([], []) axes_object.set_xlim(MIN_NORMALIZED_VALUE, MAX_NORMALIZED_VALUE) predictor_names = xt.coords[ example_utils.SATELLITE_PREDICTOR_UNGRIDDED_DIM ].values[predictor_indices].tolist() for j in range(num_predictors): axes_object.text( 0, y_coords[j], predictor_names[j], color=BAR_FONT_COLOUR, horizontalalignment='center', verticalalignment='center', fontsize=BAR_FONT_SIZE, fontweight='bold' ) valid_time_unix_sec = ( xt.coords[example_utils.SATELLITE_TIME_DIM].values[time_index] ) valid_time_string = time_conversion.unix_sec_to_string( valid_time_unix_sec, TIME_FORMAT_SECONDS ) cyclone_id_string = xt[satellite_utils.CYCLONE_ID_KEY].values[time_index] if not isinstance(cyclone_id_string, str): cyclone_id_string = cyclone_id_string.decode('utf-8') title_string = 'Satellite for {0:s} at {1:s}'.format( cyclone_id_string, valid_time_string ) if info_string is not None: title_string += '; {0:s}'.format(info_string) axes_object.set_title(title_string) pathless_output_file_name = '{0:s}_{1:s}_scalar_satellite.jpg'.format( cyclone_id_string, valid_time_string ) return figure_object, axes_object, pathless_output_file_name
5b1faab11bd6e79bd617ca23a8f49aeb83de2aae
6,771
def reshape_nda_to_2d(arr) : """Reshape np.array to 2-d """ sh = arr.shape if len(sh)<3 : return arr arr.shape = (arr.size/sh[-1], sh[-1]) return arr
11c721b938e45fd07d2ed1674a569e6836913ff3
6,772
async def async_setup(hass, config): """Initialize the DuckDNS component.""" domain = config[DOMAIN][CONF_DOMAIN] token = config[DOMAIN][CONF_ACCESS_TOKEN] session = async_get_clientsession(hass) result = await _update_duckdns(session, domain, token) if not result: return False async def update_domain_interval(now): """Update the DuckDNS entry.""" await _update_duckdns(session, domain, token) async def update_domain_service(call): """Update the DuckDNS entry.""" await _update_duckdns(session, domain, token, txt=call.data[ATTR_TXT]) async_track_time_interval(hass, update_domain_interval, INTERVAL) hass.services.async_register( DOMAIN, SERVICE_SET_TXT, update_domain_service, schema=SERVICE_TXT_SCHEMA ) return result
7208d0a25b219b6decbae314618e219705224a5a
6,773
def mock_signal(*args): """Mock creation of a binary signal array. :return: binary array :rtype: np.ndarray """ signal = np.array([1, 0, 1]) return signal
ebeb1f40a43c2c51d941208da78e0bfc0acb6530
6,774
def matmul(a, b): """np.matmul defaults to bfloat16, but this helper function doesn't.""" return np.matmul(a, b, precision=jax.lax.Precision.HIGHEST)
a4efb25933a25067b0b37ada8271f09b76929cb8
6,775
from pathlib import Path def find_preard(path, metadata_pattern='*.json'): """ Match pre-ARD metadata with imagery in some location Parameters ---------- path : str or Path Path to a metadata file or directory of files (returning matches inside the directory) metadata_pattern : str, optional If ``path`` is a directory, this value is used as a glob inside ``path`` to locate metadata files Returns ------- dict[str, list[str]] Pairs of metadata filename to image filename(s) """ path = Path(path) if path.is_dir(): metadata = list(path.glob(metadata_pattern)) else: metadata = [path] preard = {} for meta in metadata: images = sorted(meta.parent.glob(meta.stem + '*.tif')) if images: preard[meta] = images else: logger.debug(f'Could not find images for metadata file {meta}') preard[meta] = [] return preard
9c23ed7308a95cb1f525f9604bf01a4bf9fc5e5d
6,776
def predictions(logit_1, logit_2, logit_3, logit_4, logit_5): """Converts predictions into understandable format. For example correct prediction for 2 will be > [2,10,10,10,10] """ first_digits = np.argmax(logit_1, axis=1) second_digits = np.argmax(logit_2, axis=1) third_digits = np.argmax(logit_3, axis=1) fourth_digits = np.argmax(logit_4, axis=1) fifth_digits = np.argmax(logit_5, axis=1) stacked_digits = np.vstack((first_digits, second_digits, third_digits, fourth_digits, fifth_digits)) rotated_digits = np.rot90(stacked_digits)[::-1] return rotated_digits
99e22cc4808634e6510196f2e9e79cba9dafd61c
6,777
def execute_parent(parent_path, child_path, input_tensor_npy, return_full_ctx=False): """Execute parent model containing a single StreamingDataflowPartition by replacing it with the model at child_path and return result.""" parent_model = load_test_checkpoint_or_skip(parent_path) iname = parent_model.graph.input[0].name oname = parent_model.graph.output[0].name sdp_node = parent_model.get_nodes_by_op_type("StreamingDataflowPartition")[0] sdp_node = getCustomOp(sdp_node) sdp_node.set_nodeattr("model", child_path) ret = execute_onnx(parent_model, {iname: input_tensor_npy}, True) if return_full_ctx: return ret else: return ret[oname]
2757d22c46ee89f34cc89c702393d4a42d275c28
6,778
import os def find_program(prog, paths): """Finds the specified program in env PATH, or tries a set of paths """ loc = spawn.find_executable(prog) if(loc != None): return loc for loc in paths: p = os.path.join(loc, prog) if os.path.exists(p): return p return None
77c483ac139a1c555b6b9ca897663567fd59da3d
6,779
def fibonacci(position): """ Based on a position returns the number in the Fibonacci sequence on that position """ if position == 0: return 0 elif position == 1: return 1 return fibonacci(position-1)+fibonacci(position-2)
cc4fe0860fa97234ead2179e18d208a8567e0cb3
6,780
def visualize_gebco(source, band, min=None, max=None): """ Specialized function to visualize GEBCO data :param source: String, Google Earth Engine image id :param band: String, band of image to visualize :return: Dictionary """ data_params = deepcopy(DATASETS_VIS[source]) # prevent mutation of global state if min is not None: data_params["bathy_vis_params"]["min"] = min if max is not None: data_params["topo_vis_params"]["max"] = max image = ee.Image(source) gebco = image.select(data_params["bandNames"][band]) land_mask = LANDMASK hillshaded = visualize_elevation( image=gebco, land_mask=land_mask, data_params=data_params, bathy_only=False, hillshade_image=True, ) url = _get_gee_url(hillshaded) info = {} info["dataset"] = "gebco" info["band"] = band linear_gradient = [] palette = ( data_params["bathy_vis_params"]["palette"] + data_params["topo_vis_params"]["palette"] ) n_colors = len(palette) offsets = np.linspace(0, 100, num=n_colors) for color, offset in zip(palette, offsets): linear_gradient.append( {"offset": "{:.3f}%".format(offset), "opacity": 100, "color": color} ) info.update( { "url": url, "linearGradient": linear_gradient, "min": data_params["bathy_vis_params"]["min"], "max": data_params["topo_vis_params"]["max"], "imageId": source, } ) return info
ccd382f1e1ede4cbe58bca6fc7eec15aa1b0a85a
6,781
import sys def show(): """ Send values for turning pixels on """ if not available: return 2 else: lockBus() try: bus.write_byte(arduinoAddress, 0x06) except: errorMsg = sys.exc_info()[0] errorHandler(5, errorMsg) unlockBus()
1da6b710a38349de282e9307d3638a220473e32f
6,782
import asyncio import functools def bound_concurrency(size): """Decorator to limit concurrency on coroutine calls""" sem = asyncio.Semaphore(size) def decorator(func): """Actual decorator""" @functools.wraps(func) async def wrapper(*args, **kwargs): """Wrapper""" async with sem: return await func(*args, **kwargs) return wrapper return decorator
030e4dea0efccf9d5f2cbe4a40f3e6f32dfef846
6,783
import socket def _download_file(url, required_length, STRICT_REQUIRED_LENGTH=True): """ <Purpose> Given the url, hashes and length of the desired file, this function opens a connection to 'url' and downloads the file while ensuring its length and hashes match 'required_hashes' and 'required_length'. tuf.util.TempFile is used instead of regular tempfile object because of additional functionality provided by 'tuf.util.TempFile'. <Arguments> url: A URL string that represents the location of the file. required_length: An integer value representing the length of the file. STRICT_REQUIRED_LENGTH: A Boolean indicator used to signal whether we should perform strict checking of required_length. True by default. We explicitly set this to False when we know that we want to turn this off for downloading the timestamp metadata, which has no signed required_length. <Side Effects> A 'tuf.util.TempFile' object is created on disk to store the contents of 'url'. <Exceptions> tuf.DownloadLengthMismatchError, if there was a mismatch of observed vs expected lengths while downloading the file. tuf.FormatError, if any of the arguments are improperly formatted. Any other unforeseen runtime exception. <Returns> A 'tuf.util.TempFile' file-like object which points to the contents of 'url'. """ # Do all of the arguments have the appropriate format? # Raise 'tuf.FormatError' if there is a mismatch. tuf.formats.URL_SCHEMA.check_match(url) tuf.formats.LENGTH_SCHEMA.check_match(required_length) # 'url.replace()' is for compatibility with Windows-based systems because # they might put back-slashes in place of forward-slashes. This converts it # to the common format. url = url.replace('\\', '/') logger.info('Downloading: '+str(url)) # NOTE: Not thread-safe. # Save current values or functions for restoration later. previous_socket_timeout = socket.getdefaulttimeout() previous_http_response_class = httplib.HTTPConnection.response_class # This is the temporary file that we will return to contain the contents of # the downloaded file. temp_file = tuf.util.TempFile() try: # NOTE: Not thread-safe. # Set timeout to induce non-blocking socket operations. socket.setdefaulttimeout(tuf.conf.SOCKET_TIMEOUT) # Replace the socket file-like object class with our safer version. httplib.HTTPConnection.response_class = SaferHTTPResponse # Open the connection to the remote file. connection = _open_connection(url) # We ask the server about how big it thinks this file should be. reported_length = _get_content_length(connection) # Then, we check whether the required length matches the reported length. _check_content_length(reported_length, required_length) # Download the contents of the URL, up to the required length, to a # temporary file, and get the total number of downloaded bytes. total_downloaded = _download_fixed_amount_of_data(connection, temp_file, required_length) # Does the total number of downloaded bytes match the required length? _check_downloaded_length(total_downloaded, required_length, STRICT_REQUIRED_LENGTH=STRICT_REQUIRED_LENGTH) except: # Close 'temp_file'; any written data is lost. temp_file.close_temp_file() logger.exception('Could not download URL: '+str(url)) raise else: return temp_file finally: # NOTE: Not thread-safe. # Restore previously saved values or functions. httplib.HTTPConnection.response_class = previous_http_response_class socket.setdefaulttimeout(previous_socket_timeout)
a9df32371f24a85971807354636621224fc8f7bd
6,784
def tune_speed_librosa(src=None, sr=_sr, rate=1., out_type=np.ndarray): """ 变语速 :param src: :param rate: :return: """ wav = anything2wav(src, sr=sr) spec = librosa.stft(wav) spec = zoom(spec.T, rate=1 / rate, is_same=0).T out = librosa.istft(spec) # out = librosa.griffinlim(spec, n_iter=10) if out_type is np.ndarray: return out else: return anything2bytesio(out, sr=sr)
423b83c6a266e8ee2b259bf3497e53ff2087ca44
6,785
import pathlib def fqn_from_file(java_filepath: pathlib.Path) -> str: """Extract the expected fully qualified class name for the given java file. Args: java_filepath: Path to a .java file. """ if not java_filepath.suffix == ".java": raise ValueError("{} not a path to a .java file".format(java_filepath)) package = extract_package(java_filepath) simple_name = java_filepath.name[:-len(java_filepath.suffix)] return fqn(package, simple_name)
cb1d515af968c1653d31f0529ce40fa6241cf1f4
6,786
def assert_raises(*args, **kwargs): """Assert an exception is raised as a context manager or by passing in a callable and its arguments. As a context manager: >>> with assert_raises(Exception): ... raise Exception Pass in a callable: >>> def raise_exception(arg, kwarg=None): ... raise Exception >>> assert_raises(Exception, raise_exception, 1, kwarg=234) """ if (len(args) == 1) and not kwargs: return _assert_raises_context_manager(args[0]) else: return _assert_raises(*args, **kwargs)
6ef00a131f6ce5192e88fe9bab34f5cd04dd5a8a
6,787
import click def proxy(ctx, control, host, port, socket, proxy): """Settings to configure the connection to a Tor node acting as proxy.""" if control == 'port': if host is None or port is None: raise click.BadOptionUsage( option_name='control', message=f"--control mode '{control}' requires --host and --port to be defined as well.") elif control == 'socket': if socket is None: raise click.BadOptionUsage(option_name='control', message="--control mode 'socket' requires --socket to be defined as well.") return {'proxy': { 'control': control, 'host': host, 'port': port, 'socket': socket, 'proxy': proxy }}
4fe25cb7dc38116e26fe61b43e3903908e098459
6,788
import gzip def get_gzip_uncompressed_file_size(file_name): """ this function will return the uncompressed size of a gzip file similar as gzip -l file_name """ file_obj = gzip.open(file_name, 'r') file_obj.seek(-8, 2) # crc32 = gzip.read32(file_obj) isize = gzip.read32(file_obj) return isize
bf1e40a83098fa32c95959e28069e4a4d4dcc2d7
6,789
def Capitalize(v): """Capitalise a string. >>> s = Schema(Capitalize) >>> s('hello world') 'Hello world' """ return str(v).capitalize()
9072ea91b946694bbb1410fb10a5b1b1f5cdd7c2
6,790
def pg_index_exists(conn, schema_name: str, table_name: str, index_name: str) -> bool: """ Does a postgres index exist? Unlike pg_exists(), we don't need heightened permissions on the table. So, for example, Explorer's limited-permission user can check agdc/ODC tables that it doesn't own. """ return ( conn.execute( """ select indexname from pg_indexes where schemaname=%(schema_name)s and tablename=%(table_name)s and indexname=%(index_name)s """, schema_name=schema_name, table_name=table_name, index_name=index_name, ).scalar() is not None )
98ebdc0db7f3e42050e61205fd17309d015352a0
6,791
def create_mock_data(bundle_name: str, user_params: dict): """ create some mock data and push to S3 bucket :param bundle_name: str, bundle name :param user_params: dict, what parameters to save :return: """ api.context(context_name) api.remote(context_name, remote_context=context_name, remote_url=s3_path) component_signature = {k: str(v) for k, v in user_params.items()} proc_name = api.Bundle.calc_default_processing_name( bundle_name, component_signature, dep_proc_ids={}) with api.Bundle(context_name, name=bundle_name, processing_name=proc_name) as b: b.add_params(component_signature) # local_path will be replaced by S3 by Disdat api.commit(context_name, bundle_name) api.push(context_name, bundle_name) # save the bundle to S3 return b.uuid # return the bundle uuid
0fd377eac24555306aceff26a61d4a2b4666d33d
6,792
def _vertex_arrays_to_list(x_coords_metres, y_coords_metres): """Converts set of vertices from two arrays to one list. V = number of vertices :param x_coords_metres: length-V numpy array of x-coordinates. :param y_coords_metres: length-V numpy array of y-coordinates. :return: vertex_list_xy_metres: length-V list, where each element is an (x, y) tuple. """ _check_polyline( x_coords_metres=x_coords_metres, y_coords_metres=y_coords_metres) num_vertices = len(x_coords_metres) vertex_list_xy_metres = [] for i in range(num_vertices): vertex_list_xy_metres.append((x_coords_metres[i], y_coords_metres[i])) return vertex_list_xy_metres
ef5bed973f684670f979f6cdb0fcfc38b45a4557
6,793
from typing import Optional from typing import Dict from typing import Any def info_from_apiKeyAuth(token: str, required_scopes) -> Optional[Dict[str, Any]]: """ Check and retrieve authentication information from an API key. Returned value will be passed in 'token_info' parameter of your operation function, if there is one. 'sub' or 'uid' will be set in 'user' parameter of your operation function, if there is one. Should return None if auth is invalid or does not allow access to called API. The real work happens in Auth0._set_user(). """ return {"token": token, "method": "apikey"}
29fec65450780e14dfc94979ba2fb73c00d2a4bf
6,794
from datetime import datetime def convert_unix2dt(series): """ Parameters ---------- series : column from pandas dataframe in UNIX microsecond formatting Returns ------- timestamp_dt : series in date-time format """ if (len(series) == 1): unix_s = series/1000 else: unix_s = series.squeeze()/1000 timestamp_dt = np.zeros(len(unix_s), dtype='datetime64[ms]') for i in range(len(timestamp_dt)): timestamp_dt[i] = datetime.fromtimestamp(unix_s.iloc[i]) return timestamp_dt
92b912ba85b123e9f368b3613bff4a374826130a
6,795
from volapi import Room import sys import time def main(): """Program, kok""" args = parse_args() if args.bind: override_socket(args.bind) try: check_update() except Exception as ex: print("Failed to check for new version:", ex, file=sys.stderr, flush=True) stat = Statistics() total_current = 0 try: print("Starting DoS... ", end="", flush=True) with Room(args.room, args.user, subscribe=False) as room: print("done") if args.passwd: print("Greenfagging in as {}... ".format(args.user), end="", flush=True) room.user.login(args.passwd) print("done") files = args.files if any(f.name == "Thumbs.db" for f in files): class NotGonnaDoIt(Exception): """roboCop, pls""" pass raise NotGonnaDoIt("No Thumbs.db for you!") total_length = sum(f.size for f in files) print("Pushing attack bytes to mainframe... {:.2f}MB in total". format(total_length / FAC), flush=True) upload_file = partial(upload, room=room, block_size=args.block_size, force_server=args.force_server, prefix=args.prefix) for i, file in enumerate(files): for attempt in range(args.attempts): try: nums = dict(item=i + 1, files=len(files), cur=total_current, total=total_length) upload_file(file=file, nums=nums) total_current += file.size stat.record(total_current) if args.delete: try_unlink(file) # Exit attempt loop break except Exception as ex: print("\nFailed to upload {}: {} (attempt: {})". format(file, ex, attempt), file=sys.stderr, flush=True) time.sleep(attempt * 0.1) except Exception as ex: print("\nFailure to fly: {} ({})".format(ex, type(ex)), file=sys.stderr, flush=True) return 1 except KeyboardInterrupt: print("\nUser canceled", file=sys.stderr, flush=True) return 3 finally: print("All done in {:.2f}secs ({:.2f}MB/s)". format(stat.runtime, stat.rate)) return 0
a9ffc9d47c78f49e11ec166d2b1552875a4c5244
6,796
import re def sentence_segment(text, delimiters=('?', '?', '!', '!', '。', ';', '……', '…'), include_symbols=True): """ Sentence segmentation :param text: query :param delimiters: set :param include_symbols: bool :return: list(word, idx) """ result = [] delimiters = set([item for item in delimiters]) delimiters_str = '|'.join(delimiters) blocks = re.split(delimiters_str, text) start_idx = 0 for blk in blocks: if not blk: continue result.append((blk, start_idx)) start_idx += len(blk) if include_symbols and start_idx < len(text): result.append((text[start_idx], start_idx)) start_idx += 1 return result
c8860a872e779873330eaded8e9951cabdbba01e
6,797
def time_rep_song_to_16th_note_grid(time_rep_song): """ Transform the time_rep_song into an array of 16th note with pitches in the onsets [[60,4],[62,2],[60,2]] -> [60,0,0,0,62,0,60,0] """ grid_16th = [] for pair_p_t in time_rep_song: grid_16th.extend([pair_p_t[0]] + [0 for _ in range(pair_p_t[1]-1)]) return grid_16th
8986819bd39ae4830d04bf40ab158d310bb45485
6,798
def _double_threshold(x, high_thres, low_thres, n_connect=1, return_arr=True): """_double_threshold Computes a double threshold over the input array :param x: input array, needs to be 1d :param high_thres: High threshold over the array :param low_thres: Low threshold over the array :param n_connect: Postprocessing, maximal distance between clusters to connect :param return_arr: By default this function returns the filtered indiced, but if return_arr = True it returns an array of tsame size as x filled with ones and zeros. """ assert x.ndim == 1, "Input needs to be 1d" high_locations = np.where(x > high_thres)[0] locations = x > low_thres encoded_pairs = find_contiguous_regions(locations) filtered_list = list( filter( lambda pair: ((pair[0] <= high_locations) & (high_locations <= pair[1])).any(), encoded_pairs)) filtered_list = connect_(filtered_list, n_connect) if return_arr: zero_one_arr = np.zeros_like(x, dtype=int) for sl in filtered_list: zero_one_arr[sl[0]:sl[1]] = 1 return zero_one_arr return filtered_list
74a34ed39336c35dfc7eb954af12bb30b3089609
6,799