content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import torch def get_audio_features(audios_data, audio_tStamp, frameRate, video_length, device='cuda'): """audio feature extraction""" extractor = ResNet50().to(device) output1 = torch.Tensor().to(device) output2 = torch.Tensor().to(device) extractor.eval() patchSize = 224 frameSkip = 2 with torch.no_grad(): for iFrame in range(1, video_length, frameSkip): tCenter = np.argmin(abs(audio_tStamp - iFrame / frameRate)) tStart = tCenter - patchSize / 2 + 1 tEnd = tCenter + patchSize / 2 if tStart < 1: tStart = 1 tEnd = patchSize else: if tEnd > audios_data.shape[2]: tStart = audios_data.shape[2] - patchSize + 1 tEnd = audios_data.shape[2] specRef_patch = audios_data[:, :, int(tStart - 1): int(tEnd)] refRGB = torch.cat((specRef_patch, specRef_patch, specRef_patch), 0) last_batch = refRGB.view(1, 3, specRef_patch.shape[1], specRef_patch.shape[2]).float().to(device) features_mean, features_std = extractor(last_batch) output1 = torch.cat((output1, features_mean), 0) output2 = torch.cat((output2, features_std), 0) output = torch.cat((output1, output2), 1).squeeze() return output
149f22fe855f52d63ffc2174800a83afb2568246
30,764
import asyncio async def node_watch_profile_report_builder(data_id: str): """ Allows the front-end to update the display information once a profile report builds successfully. Necessary because the profile report entails opening a separate tab. """ time_waited = 0 while time_waited < 600: if fs.profile_report_exists(data_id): return UpdateNode(node=get_node_by_data_id(data_id)) await asyncio.sleep(5) time_waited += 5 raise HTTPException( status_code=400, detail="The report either failed to generate or took too long" )
6cb6552d1a05726e77a0f31e5b3f4625752b2d1b
30,765
def write_report_systemsorted(system, username): """ function that prepares return values and paths """ """ the return values (prefix 'r') are used for the `mkdocs.yml` file they build the key-value-pair for every system """ # return system_id for mkdocs.yml rid = str(system.system_id) # return fqdn for mkdocs.yml if system.dnsname != None: rfqdn = system.system_name + "." + system.dnsname.dnsname_name else: rfqdn = system.system_name """ build the path for every file it is distinguished between the short version for the `mkdocs.yml` file ('value' of key-value-pair) and the long version that is used to write to the file system """ # build path path = system.system_name # check for domain and add to path if system.domain != None: path = path + "_" + system.domain.domain_name # check for system_install_time and add to path if system.system_install_time != None: install_time = system.system_install_time.strftime('%Y%m%d_%H%M%S') path = path + "_" + install_time # return shortened path for mkdocs.yml ('value') rpath = "systems/" + path + ".md" # get config model model = SystemExporterMarkdownConfigModel.objects.get(system_exporter_markdown_config_name = 'SystemExporterMarkdownConfig') # finish path for markdown file path = model.markdown_path + "/docs/systems/" + path + ".md" # open file for system report = open(path, "w") django_report = File(report) # write systemreport write_report.write_report(django_report, system) # close and save file django_report.closed report.close() # call logger info_logger(username, " SYSTEM_MARKDOWN_CREATED system_id:" + str(system.system_id) + "|system_name:" + str(system.system_name)) # return strings for mkdocs.yml (only used in systemsorted_async) return(rid, rfqdn, rpath)
f080203b1384277a9c6a0c934758d13674978df0
30,766
def is_outlier(points, threshold=3.5): """ This returns a boolean array with "True" if points are outliers and "False" otherwise. These are the data points with a modified z-score greater than this: # value will be classified as outliers. """ # transform into vectors if len(points.shape) == 1: points = points[:,None] # compute median value median = np.median(points, axis=0) # compute diff sums along the axis diff = np.sum((points - median)**2, axis=-1) diff = np.sqrt(diff) # compute MAD med_abs_deviation = np.median(diff) # compute modified z-score # http://www.itl.nist.gov/div898/handbook/eda/section4/eda43.html#Iglewicz modified_z_score = 0.6745 * diff / med_abs_deviation # return a mask for each outliers return modified_z_score > threshold
edc28706b37a6c1cfef356f45dd87c076779fe6d
30,769
def hill_climbing_random_restart(problem,restarts=10): """From the initial node, keep choosing the neighbor with highest value, stopping when no neighbor is better. [Figure 4.2]""" # restarts = cantidad de reinicios aleatorios al llegar a un estado inmejorable current = Node(problem.initial) best = current # se lleva la cuenta del mejor estado hasta el momento while True: if (problem.value(current.state) > problem.value(best.state)): best = current if problem.goal_test(current.state): break neighbors = current.expand(problem) if neighbors: neighbor = argmax_random_tie(neighbors, key=lambda node: problem.value(node.state)) if problem.value(neighbor.state) > problem.value(current.state): current = neighbor else: if restarts > 0: restarts -= 1 current = Node(generate_random_state(problem.N)) else: break else: if restarts > 0: restarts -= 1 current = Node(generate_random_state(problem.N)) else: break return current.state
78846f5d67465c981b712d00da7a0d76bbf152bd
30,770
def ordinal(n): """Converts an integer into its ordinal equivalent. Args: n: number to convert Returns: nth: ordinal respresentation of passed integer """ nth = "%d%s" % (n, "tsnrhtdd"[(n // 10 % 10 != 1) * (n % 10 < 4) * n % 10 :: 4]) return nth
7f438c89a6b0f7adbc42f2eb1e619ca4bf862b4a
30,771
def check_date(date): """check if date string has correct format. Args: date as a string mmddyyyy Returns: a boolean indicating if valid (True) or not (False) """ if len(date) != 8: return False if not date.isdigit(): return False # months are between '01' ~ '12' if (date[0] != '1' and date[0] != '0'): return False if date[0] == '1': if (date[1] != '0') and (date[1] != '1') and (date[1] != '2'): return False # dates are between 0 ~ 31 if (date[2] != '0') and (date[2] != '1') \ and (date[2] != '2') and (date[2] != '3'): return False return True
8972498d94d459ba48851049780e46b057855d9f
30,772
async def expected_raceplan_individual_sprint_27_contestants( event_individual_sprint: dict, ) -> Raceplan: """Create a mock raceplan object - 27 contestants.""" raceplan = Raceplan(event_id=event_individual_sprint["id"], races=list()) raceplan.id = "390e70d5-0933-4af0-bb53-1d705ba7eb95" raceplan.no_of_contestants = 27 return raceplan
969cba41b0cdcdd83317cd98a57b437f66981dbe
30,773
def signal_to_m_converter(dataframe, dbm="4(dBm)"): """ This function convert a (beacon)dataframe with signal values from the tracer to the corresponding *m*eter values, depend on dBm power that was used. By default dbm = 4(dBm) """ # extract all different values from dataframe dataframe_unique_values = np.unique(dataframe) df_txpower = pd.DataFrame(constant.txpower_vs_distance) # extract the used power values from table_"Beacon_datasheet" choose_power = df_txpower[dbm] # caculate the lenght from powerlevel used for later iteration lenght_power = (df_txpower[dbm]).count() # empty list for collecting the corresponding meter values for each signal value list_meter_values = [] flag = True # loop over unique_values over dataframe for value in dataframe_unique_values: # interpolation function for i in range(0, lenght_power): if choose_power[i] >= value and value >= choose_power[i + 1]: meter_value = ( (df_txpower["Distance(m)"][i + 1] - df_txpower["Distance(m)"][i]) / (choose_power[i + 1] - choose_power[i]) * (value - choose_power[i]) + df_txpower["Distance(m)"][i] ) list_meter_values.append(meter_value) if flag: print("\nDistance i+1", df_txpower["Distance(m)"][i + 1]) print("\nDistance i", df_txpower["Distance(m)"][i]) print("\nchoose_power i+1", choose_power[i + 1]) print("\nchoose_power i", choose_power[i]) print("\nvalue", value) print("\ndf_txpower[distance][i]", df_txpower["Distance(m)"][i]) flag = False break else: meter_value = np.nan list_meter_values.append(meter_value) mod_dataframe = dataframe.replace(list(dataframe_unique_values), list_meter_values) return mod_dataframe
55e58553a8685287a0d07e3c3d2432408e46ba04
30,774
def return_lines_as_list(file): """ :rtype: list of str """ # read lines lines = file.readlines() def strip(string): """ Removes whitespace from beginning and end of string :type string: str """ return string.strip() # Coverts our lines to list return list(map(strip, lines))
69e3d45fa3df107a8852d10e104a543c014a6c79
30,775
def cvInitMatNDHeader(*args): """cvInitMatNDHeader(CvMatND mat, int dims, int type, void data=None) -> CvMatND""" return _cv.cvInitMatNDHeader(*args)
152f49b20a858e7bbb7229d77cdeffa6fd1ed049
30,776
def getObjectInfo(fluiddb, objectId): """ Get information about an object. """ return fluiddb.objects[objectId].get(showAbout=True)
baad59e6585e04a8c2a8cca1df305327b80f3768
30,777
def calculate_logAUC(true_y, predicted_score, FPR_range=(0.001, 0.1)): """ Calculate logAUC in a certain FPR range (default range: [0.001, 0.1]). This was used by previous methods [1] and the reason is that only a small percentage of samples can be selected for experimental tests in consideration of cost. This means only molecules with very high predicted activity values can be worth testing, i.e., the decision threshold is high. And the high decision threshold corresponds to the left side of the ROC curve, i.e., those FPRs with small values. Also, because the threshold cannot be predetermined, the area under the curve is used to consolidate all possible thresholds within a certain FPR range. Finally, the logarithm is used to bias smaller FPRs. The higher the logAUC[0.001, 0.1], the better the performance. A perfect classifer gets a logAUC[0.001, 0.1] ) of 1, while a random classifer gets a logAUC[0.001, 0.1] ) of around 0.0215 (See [2]) References: [1] Mysinger, M.M. and B.K. Shoichet, Rapid Context-Dependent Ligand Desolvation in Molecular Docking. Journal of Chemical Information and Modeling, 2010. 50(9): p. 1561-1573. [2] Mendenhall, J. and J. Meiler, Improving quantitative structure–activity relationship models using Artificial Neural Networks trained with dropout. Journal of computer-aided molecular design, 2016. 30(2): p. 177-189. :param true_y: numpy array of the ground truth :param predicted_score: numpy array of the predicted score (The score does not have to be between 0 and 1) :param FPR_range: the range for calculating the logAUC formated in (x, y) with x being the lower bound and y being the upper bound :return: a numpy array of logAUC of size [1,1] """ if FPR_range is not None: range1 = np.log10(FPR_range[0]) range2 = np.log10(FPR_range[1]) if (range1 >= range2): raise Exception('FPR range2 must be greater than range1') # print(f'true_y:{true_y}, predicted_score:{predicted_score}') fpr, tpr, thresholds = roc_curve(true_y, predicted_score, pos_label=1) x = fpr y = tpr x = np.log10(x) y1 = np.append(y, np.interp(range1, x, y)) y = np.append(y1, np.interp(range2, x, y)) x = np.append(x, range1) x = np.append(x, range2) x = np.sort(x) # print(f'x:{x}') y = np.sort(y) # print(f'y:{y}') range1_idx = np.where(x == range1)[-1][-1] range2_idx = np.where(x == range2)[-1][-1] trim_x = x[range1_idx:range2_idx + 1] trim_y = y[range1_idx:range2_idx + 1] area = auc(trim_x, trim_y) / 2 return area
fc75fd9a361435f31c4f089e7c6e2976330affd7
30,778
def format_inline(str_, reset='normal'): """Format a string if there is any markup present.""" if const.regex['url'].search(str_): text = slugify(str_.split('[[')[1].split('][')[1].split(']]')[0]) str_ = const.regex['url'].sub(const.styles['url'] + text + const.styles[reset], str_) for key, val in const.inline.items(): if val['pattern'].search(str_): matches = val['pattern'].findall(str_) repls = [val["cols"] + x.replace(val["delim"], "") + const.styles[reset] for x in matches] for x, y in zip(matches, repls): str_ = str_.replace(x, y) return str_
9cd6819bff098051812f23825bcdb61e7305d650
30,779
from typing import Dict from typing import Any import codecs import pickle def serialize_values( data_dictionary: Dict[str, Any], data_format: PersistedJobDataFormat ) -> Dict[str, Any]: """ Serializes the `data_dictionary` values to the format specified by `data_format`. Args: data_dictionary (Dict[str, Any]): Dict whose values are to be serialized. data_format (PersistedJobDataFormat): The data format used to serialize the values. Note that for `PICKLED` data formats, the values are base64 encoded after serialization, so that they represent valid UTF-8 text and are compatible with `PersistedJobData.json()`. Returns: Dict[str, Any]: Dict with same keys as `data_dictionary` and values serialized to the specified `data_format`. """ return ( { k: codecs.encode(pickle.dumps(v, protocol=4), "base64").decode() for k, v in data_dictionary.items() } if data_format == PersistedJobDataFormat.PICKLED_V4 else data_dictionary )
9dc1116357c2dd50f16bf9b1b1d5eec56ea6b4f7
30,781
def print_level_order(tree): """ prints each level of k-tree on own line input <--- Tree output <--- Prints nodes level by level """ if not isinstance(tree, KTree): raise TypeError('argument must be of type <KTree>') all_strings = [] def recurse(nodelist): nonlocal all_strings new_list = [] printlist = [] for node in nodelist: printlist.append(str(node.val)) for child in node.children: new_list.append(child) string = ' '.join(printlist) all_strings.append(string) if len(new_list): recurse(new_list) if tree.root: recurse([tree.root]) return '\n'.join(all_strings)
7bb5d43725dbe351a85792f685ad504ca1e2d263
30,782
def spark_add(): """ReduceByKey with the addition function. :input RDD data: The RDD to convert. :output Any result: The result. """ def inner(data: pyspark.rdd.RDD) -> ReturnType[pyspark.rdd.RDD]: o = data.reduceByKey(lambda a,b: a+b) return ReturnEntry(result=o) return inner
7cab67ac0f6a55911ca3e46612487bbe329b5d6f
30,783
def edit(): """ Allows the user to edit or delete a reservation """ user = db.session.query(models.Rideshare_user).filter(models.Rideshare_user.netid == session['netid']).first() form = forms.EditReservationFactory() reservation = None rideNumber = request.args.get('rideNo') userHasRev=check_user_has_rev(rideNumber) #check if user has this reservation before proceeding if userHasRev: if form.validate_on_submit(): cancel,newSpots,comments=extract_info(form) ride = db.session.query(models.Ride).filter(models.Ride.ride_no == rideNumber).first() reservation = db.session.query(models.Reserve).filter(models.Reserve.ride_no == rideNumber)\ .filter(models.Reserve.rider_netid==session['netid']).first() if cancel == "Yes": newSpots = cancel_reservation(reservation) email_driver_cancellation(user, ride, reservation) else: updatedSpots = int(request.form['spots_needed']) #only update spots if enough room in the ride if valid_new_rev(reservation, ride, updatedSpots): newSpots = update_reservation(reservation, updatedSpots, comments) else: return render_template('accountPages/edit-reservation.html', reservation=reservation, ride=ride, form=form) ride.seats_available = ride.seats_available - newSpots db.session.commit() return redirect(url_for('rides.account_main')) return render_template('accountPages/edit-reservation.html', user=user, form=form, reservation=reservation, userHasRev=userHasRev)
cfed4d98975d4e7abeb7021eba250c4f1e88c641
30,784
from typing import Type from pathlib import Path async def study_export( app: web.Application, tmp_dir: str, project_id: str, user_id: int, product_name: str, archive: bool = False, formatter_class: Type[BaseFormatter] = FormatterV2, ) -> Path: """ Generates a folder with all the data necessary for exporting a project. If archive is True, an archive will always be produced. returns: directory if archive is True else a compressed archive is returned """ # storage area for the project data base_temp_dir = Path(tmp_dir) destination = base_temp_dir / project_id destination.mkdir(parents=True, exist_ok=True) # The formatter will always be chosen to be the highest availabel version formatter = formatter_class(root_folder=destination) await formatter.format_export_directory( app=app, project_id=project_id, user_id=user_id, product_name=product_name ) if archive is False: # returns the path to the temporary directory containing the study data return destination # an archive is always produced when compression is active archive_path = await zip_folder( folder_to_zip=base_temp_dir, destination_folder=base_temp_dir ) return archive_path
c07bf3244323ee5a222ad0339631c704ed10c568
30,786
def gen_fileext_type_map(): """ Generate previewed file extension and file type relation map. """ d = {} for filetype in list(PREVIEW_FILEEXT.keys()): for fileext in PREVIEW_FILEEXT.get(filetype): d[fileext] = filetype return d
3ef34884b5fff37fbf20e7e11c87e2f16310a77a
30,787
def umm_fields(item): """Return only the UMM part of the data""" return scom.umm_fields(item)
65bb71ed27612a3f504b7aae771bff69eff85bbe
30,788
def converts_to_message(*args): """Decorator to register a custom NumPy-to-Message handler.""" def decorator(function): for message_type in args: if not issubclass(message_type, Message): raise TypeError() _to_message[message_type] = function return function return decorator
5fdd5875aec2962b1ee19766f08c522200e8ea0a
30,789
def oil_rho_sat( rho0: NDArrayOrFloat, g: NDArrayOrFloat, rg: NDArrayOrFloat, b0: NDArrayOrFloat ) -> NDArrayOrFloat: """Calculate the gas saturated oil density B&W Eq 24 Args: rho0: The oil reference density (g/cc) at 15.6 degC g: The gas specific gravity rg: The Gas-to-Oil ratio (L/L) b0: Oil formation volume factor FVF Returns: The gas saturated oil density (g/cc) at 15.6 degC """ return safe_divide((rho0 + 0.0012 * rg * g), b0)
cca2ccc3934dda8e84db03598d56660cd56edc7a
30,790
def astar(array, start, goal): """A* algorithm for pathfinding. It searches for paths excluding diagonal movements. The function is composed by two components, gscore and fscore, as seem below. f(n) = g(n) + h(n) """ neighbors = [(0, 1), (0, -1), (1, 0), (-1, 0)] close_set = set() came_from = {} gscore = {start: 0} fscore = {start: heuristic(start, goal)} oheap = [] heappush(oheap, (fscore[start], start)) while oheap: current = heappop(oheap)[1] if current == goal: data = [] while current in came_from: data.append(current) current = came_from[current] return data close_set.add(current) for i, j in neighbors: neighbor = current[0] + i, current[1] + j tentative_g_score = gscore[current] + heuristic(current, neighbor) if 0 <= neighbor[0] < array.shape[0]: if 0 <= neighbor[1] < array.shape[1]: if array[neighbor[0]][neighbor[1]] == 1: continue else: # array bound y walls continue else: # array bound x walls continue if neighbor in close_set and tentative_g_score >= gscore.get(neighbor, 0): continue if tentative_g_score < gscore.get(neighbor, 0) or neighbor not in [ i[1] for i in oheap ]: came_from[neighbor] = current gscore[neighbor] = tentative_g_score fscore[neighbor] = tentative_g_score + heuristic(neighbor, goal) heappush(oheap, (fscore[neighbor], neighbor)) return False
6ddc058246d0ac8db2aa90c847eb3d55d29321c7
30,791
def download_jar(req, domain, app_id): """ See ApplicationBase.create_jadjar This is the only view that will actually be called in the process of downloading a complete CommCare.jar build (i.e. over the air to a phone). """ response = HttpResponse(mimetype="application/java-archive") app = req.app _, jar = app.create_jadjar() set_file_download(response, 'CommCare.jar') response['Content-Length'] = len(jar) try: response.write(jar) except Exception: messages.error(req, BAD_BUILD_MESSAGE) return back_to_main(req, domain, app_id=app_id) return response
97c963b3dba3a2c95fcf98b784fc31fb778c2c3d
30,792
def cond(addr, condexpr): """ set a condtion breakpoint at addr. """ return setBreakpoint(addr, False, condexpr)
adf2c21ef4dd32b92f546bc70c3009a47e305ee9
30,793
def get_host_credentials(config, hostname): """Get login information for a host `hostip` (ipv4) from marvin's `config` @return the tuple username, password for the host else raise keyerror""" for zone in config.get('zones', []): for pod in zone.get('pods', []): for cluster in pod.get('clusters', []): for host in cluster.get('hosts', []): url = host.get('url') if str(url).startswith('http'): hostname_marvin = urlparse.urlsplit(str(url)).netloc else: hostname_marvin = str(url) if hostname == hostname_marvin: return host.get('username'), host.get('password') raise KeyError("Please provide the marvin configuration file with credentials to your hosts")
82651c247c50d3781c8e96038c373bd6c7fba4e6
30,794
def is_flat_dtype(dtype: np.dtype) -> bool: """ Determines whether a numpy dtype object is flat. Checks whether the ``dtype`` just encodes one element or a shape. A dtype can characterise an array of other base types, which can then be embedded as an element of another array. Parameters ---------- dtype : numpy.dtype The dtype to be checked. Raises ------ TypeError The input is not a numpy's dtype object. ValueError The dtype is structured -- this function only accepts plane dtypes. Returns ------- is_flat : boolean True if the dtype is flat, False otherwise. """ if not isinstance(dtype, np.dtype): raise TypeError('The input should be a numpy dtype object.') # If the dtype is complex if dtype.names is not None: raise ValueError('The numpy dtype object is structured. ' 'Only base dtype are allowed.') # pylint: disable=len-as-condition if _NUMPY_1_13: # pragma: no cover is_flat = not bool(dtype.ndim) else: # pragma: no cover is_flat = len(dtype.shape) == 0 return is_flat
08c690e66a3c9303926a8d25c45dace6c2b292c7
30,795
def _solarize_impl(pil_img, level): """Applies PIL Solarize to `pil_img`. Translate the image in the vertical direction by `level` number of pixels. Args: pil_img: Image in PIL object. level: Strength of the operation specified as an Integer from [0, `PARAMETER_MAX`]. Returns: A PIL Image that has had Solarize applied to it. """ level = int_parameter(level, 256) return ImageOps.solarize(pil_img.convert('RGB'), 256 - level).convert('RGBA')
615650710266b91c6f91d8b93ab26ef5c5081551
30,796
def version_flash(cmd="flash"): """Return the version of flash (as a short string). Parses the output with ``-v``:: $ flash -v | head -n 1 FLASH v1.2.11 It would capture the version from the first line as follows: >>> version_flash() 'v1.2.11' If the command is not on the path, returns None. """ text = getoutput(cmd + " -v") ver = text.split("\n", 1)[0] if ver.upper().startswith("FLASH V"): return ver[7:]
87e9fed11f9d3a3206f4e3a983db1dc165fca576
30,797
def initialise_df(*column_names): """ Initialise a pandasdataframe with n column names :param str column_names: N column names :return: Empty pandas dataframe with specified column names """ return pd.DataFrame(columns=column_names)
8561de29cc6a6aee1752a580c6038a84599a25c0
30,798
def _get_reporting_category(context): """Returns the current member reporting category""" member = _get_member(context) return member[TransactionLoops.MEMBER_REPORTING_CATEGORIES][-1]
64ed9fcaf4fd9459789a1225cf4d9dbfddbfdb49
30,799
import csv def snp2dict(snpfile): """Get settings of dict from .snp file exported from save&restore app. Parameters ---------- snpfile : str Filename of snp file exported from save&restore app. Returns ------- r : dict Dict of pairs of PV name and setpoint value. """ with open(snpfile, 'r') as fp: csv_data = csv.reader(fp, delimiter=',', skipinitialspace=True) next(csv_data) header = next(csv_data) ipv, ival = header.index('PV'), header.index('VALUE') settings = {line[ipv]: line[ival] for line in csv_data if line} return settings
cb902b3f8796685ed065bfeb8ed2d6d83c0fe80b
30,800
import numpy def _handle_zeros_in_scale(scale, copy=True, constant_mask=None): """ Set scales of near constant features to 1. The goal is to avoid division by very small or zero values. Near constant features are detected automatically by identifying scales close to machine precision unless they are precomputed by the caller and passed with the `constant_mask` kwarg. Typically for standard scaling, the scales are the standard deviation while near constant features are better detected on the computed variances which are closer to machine precision by construction. Parameters ---------- scale : array Scale to be corrected. copy : bool Create copy. constant_mask : array Masking array. Returns ------- scale : array Corrected scale. """ # if we are fitting on 1D arrays, scale might be a scalar if numpy.isscalar(scale): if scale == .0: scale = 1. return scale elif isinstance(scale, numpy.ndarray): if constant_mask is None: # Detect near constant values to avoid dividing by a very small # value that could lead to suprising results and numerical # stability issues. constant_mask = scale < 10 * numpy.finfo(scale.dtype).eps if copy: # New array to avoid side-effects scale = scale.copy() scale[constant_mask] = 1.0 return scale
129f28dbf74f04929fcbccf8de42ee1c8dd5a09e
30,801
def ST_LineStringFromText(geos): """ Transform the representation of linestring from WKT to WKB. :type geos: WKT :param geos: Linestring in WKT form. :rtype: WKB :return: Linestring in WKB form. :example: >>> from pyspark.sql import SparkSession >>> from arctern_pyspark import register_funcs >>> spark_session = SparkSession.builder.appName("Python Arrow-in-Spark example").getOrCreate() >>> spark_session.conf.set("spark.sql.execution.arrow.pyspark.enabled", "true") >>> register_funcs(spark_session) >>> test_data = [] >>> test_data.extend([('LINESTRING (0 0, 0 1, 1 1, 1 0)',)]) >>> data_df = spark_session.createDataFrame(data=test_data, schema=["geos"]).cache() >>> data_df.createOrReplaceTempView("data") >>> spark_session.sql("select ST_AsText(ST_LineStringFromText(geos)) from data").show(100,0) +--------------------------------------+ |ST_AsText(ST_LineStringFromText(data))| +--------------------------------------+ |LINESTRING (0 0, 0 1, 1 1, 1 0) | +--------------------------------------+ """ return arctern.ST_GeomFromText(geos)
7cb0a5f3f6d35b49d35765ad5b5992952fe58e18
30,802
def get_container_state(pod, name): """Get the state of container ``name`` from a pod. Returns one of ``waiting, running, terminated, unknown``. """ phase = pod["status"].get("phase", "Unknown") if phase == "Pending": return "waiting" cs = get_container_status(pod, name) if cs is not None: return next(iter(cs["state"])) return "unknown"
bad0143dbc2fb6998dce62665138d94f9542abc5
30,803
def implicit_ecp( objective, equality_constraints, initial_values, lr_func, max_iter=500, convergence_test=default_convergence_test, batched_iter_size=1, optimizer=optimizers.sgd, tol=1e-6): """Use implicit differentiation to solve a nonlinear equality-constrained program of the form: max f(x, θ) subject to h(x, θ) = 0 . We perform a change of variable via the implicit function theorem and obtain the unconstrained program: max f(φ(θ), θ) , where φ is an implicit function of the parameters θ such that h(φ(θ), θ) = 0. Args: objective (callable): Binary callable with signature `f(x, θ)` equality_constraints (callble): Binary callable with signature `h(x, θ)` initial_values (tuple): Tuple of initial values `(x_0, θ_0)` lr_func (scalar or callable): The step size used by the unconstrained optimizer. This can be a scalar ora callable taking in the current iteration and returning a scalar. max_iter (int, optional): Maximum number of outer iterations. Defaults to 500. convergence_test (callable): Binary callable with signature `callback(new_state, old_state)` where `new_state` and `old_state` are tuples of the form `(x_k^*, θ_k)` such that `h(x_k^*, θ_k) = 0` (and with `k-1` for `old_state`). The default convergence test returns `true` if both elements of the tuple have not changed within some tolerance. batched_iter_size (int, optional): The number of iterations to be unrolled and executed per iterations of the `while_loop` op for the forward iteration and the fixed-point adjoint iteration. Defaults to 1. optimizer (callable, optional): Unary callable waking a `lr_func` as a argument and returning an unconstrained optimizer. Defaults to `jax.experimental.optimizers.sgd`. tol (float, optional): Tolerance for the forward and backward iterations. Defaults to 1e-6. Returns: fax.loop.FixedPointSolution: A named tuple containing the solution `(x, θ)` as as the `value` attribute, `converged` (a bool indicating whether convergence was achieved), `iterations` (the number of iterations used), and `previous_value` (the value of the solution on the previous iteration). The previous value satisfies `sol.value=func(sol.previous_value)` and allows us to log the size of the last step if desired. """ def _objective(*args): return -objective(*args) def make_fp_operator(params): def _fp_operator(i, x): del i return x + equality_constraints(x, params) return _fp_operator constraints_solver = make_forward_fixed_point_iteration( make_fp_operator, default_max_iter=max_iter, default_batched_iter_size=batched_iter_size, default_atol=tol, default_rtol=tol) adjoint_iteration_vjp = make_adjoint_fixed_point_iteration( make_fp_operator, default_max_iter=max_iter, default_batched_iter_size=batched_iter_size, default_atol=tol, default_rtol=tol) opt_init, opt_update, get_params = optimizer(step_size=lr_func) grad_objective = grad(_objective, (0, 1)) def update(i, values): old_xstar, opt_state = values old_params = get_params(opt_state) forward_solution = constraints_solver(old_xstar, old_params) grads_x, grads_params = grad_objective(forward_solution.value, get_params(opt_state)) ybar, _ = adjoint_iteration_vjp( grads_x, forward_solution, old_xstar, old_params) implicit_grads = tree_util.tree_multimap( lax.add, grads_params, ybar) opt_state = opt_update(i, implicit_grads, opt_state) return forward_solution.value, opt_state def _convergence_test(new_state, old_state): x_new, params_new = new_state[0], get_params(new_state[1]) x_old, params_old = old_state[0], get_params(old_state[1]) return convergence_test((x_new, params_new), (x_old, params_old)) x0, init_params = initial_values opt_state = opt_init(init_params) solution = fixed_point_iteration(init_x=(x0, opt_state), func=update, convergence_test=jit(_convergence_test), max_iter=max_iter, batched_iter_size=batched_iter_size, unroll=False) return solution._replace( value=(solution.value[0], get_params(solution.value[1])), previous_value=(solution.previous_value[0], get_params(solution.previous_value[1])), )
98a753765dda4cbc09ee8b1bc6225c12d10eb996
30,804
def get_detection_probability_Braun2008(filename, index, TS_threshold): """ Find the detection probability as a function of the expected number of source counts in a detector. Returns Nsrc_list and Pdet :param filename: Filename :param index: spectral index :param TS_threshold: TS <=> 5sigma threshold """ with h5py.File(filename, "r") as f: Nsrc_list = f["Nsrc_list"][()] folder = f["index_%.2f" % index] TS = [] for Nsrc in Nsrc_list: TS.append(folder["TS_" + str(Nsrc)][()]) # Find Pdet for each expected Nsrc Pdet_at_Nsrc = [] for i, Nsrc in enumerate(Nsrc_list): idx = np.where(~np.isnan(TS[i])) ts = TS[i][idx] P = len(ts[ts > TS_threshold]) / len(ts) Pdet_at_Nsrc.append(P) # Weight by poisson probability Pdet = [] for Nsrc in Nsrc_list: P = sum([w * poisson(Nsrc).pmf(i) for i, w in enumerate(Pdet_at_Nsrc)]) Pdet.append(P) return Nsrc_list, Pdet
631195c6d0c264d9b8676929882048d39333f2d6
30,806
from datetime import datetime def device_create(db: Session, name: str, device_type: DeviceType, activation_token: str) -> Device: """ Create a new Device """ device = Device( name=name, device_type=device_type, activation_token=activation_token, created_on=datetime.now(timezone.utc), ) db.add(device) db.commit() db.refresh(device) return device
f7c39a52ad43523cce895b223da85ec6f632ade2
30,807
def xy_potential(_): """ Potential for square XY model with periodic boundary conditions """ def potential(_, passive_rates): pot = -passive_rates.sum(dim=(-1, -2, -3)) # sum over all sites and directions return pot return potential
b809e57464a9cd893cd33cd7ad38dffdb0d12a40
30,808
def distribute_srcs_2D(X, Y, n_src, ext_x, ext_y, R_init): """Distribute n_src's in the given area evenly Parameters ---------- X, Y : np.arrays points at which CSD will be estimated n_src : int demanded number of sources to be included in the model ext_x, ext_y : floats how should the sources extend the area X, Y R_init : float demanded radius of the basis element Returns ------- X_src, Y_src : np.arrays positions of the sources nx, ny : ints number of sources in directions x,y new n_src = nx * ny may not be equal to the demanded number of sources R : float effective radius of the basis element """ Lx = np.max(X) - np.min(X) Ly = np.max(Y) - np.min(Y) Lx_n = Lx + (2 * ext_x) Ly_n = Ly + (2 * ext_y) [nx, ny, Lx_nn, Ly_nn, ds] = get_src_params_2D(Lx_n, Ly_n, n_src) ext_x_n = (Lx_nn - Lx) / 2 ext_y_n = (Ly_nn - Ly) / 2 X_src, Y_src = np.mgrid[(np.min(X) - ext_x_n):(np.max(X) + ext_x_n): np.complex(0, nx), (np.min(Y) - ext_y_n):(np.max(Y) + ext_y_n): np.complex(0, ny)] # d = round(R_init / ds) R = R_init # R = d * ds return X_src, Y_src, R
8c984c742e6e8d604332e51f39e057a46cad0076
30,809
def _setBlueprintNumberOfAxialMeshes(meshPoints, factor): """ Set the blueprint number of axial mesh based on the axial mesh refinement factor. """ if factor <= 0: raise ValueError( "A positive axial mesh refinement factor " f"must be provided. A value of {factor} is invalid." ) if factor != 1: runLog.important( "An axial mesh refinement factor of {} is applied " "to blueprint based on setting specification.".format(factor), single=True, ) return int(meshPoints) * factor
6c477b0e55e996009158fa34c06d3c642e4691c4
30,810
def _get_filter_syntax(_filter_info, _prefix=True): """This function retrieves the proper filter syntax for an API call.""" if type(_filter_info) != tuple and type(_filter_info) != list: raise TypeError("Filter information must be provided as a tuple (element, criteria) or a list of tuples.") elif type(_filter_info) == tuple: _filter_info = [_filter_info] _syntax = "" if len(_filter_info[0]) > 0: _define_prefix = {True: '&', False: ''} _syntax_prefix = _define_prefix.get(_prefix) for _filter_tuple in _filter_info: _element, _criteria = _filter_tuple _syntax = f"{_syntax_prefix}filter={_element}({_criteria})&" _syntax = _syntax[:-1] return _syntax
b1817a2a3f004ba2bd44a8f8f272ad685e4d5ebe
30,813
import math def pol2cart(r,theta): """ Translate from polar to cartesian coordinates. """ return (r*math.cos(float(theta)/180*math.pi), r*math.sin(float(theta)/180*math.pi))
69753e1cadd36ec70da1bf2cf94641d4c7f78179
30,815
import math def mass2mk_ben(m): """mass2mk_ben - mass to M_K, Benedict et al. (2016) double exponential. Usage: mk = mass2mk_ben(mass) Where mk is absolute 2MASS K magnitude and mass is in solar masses. This version is the original double-exponential "forward model" (for going from mass to absolute magnitude) from the paper. NOTE: the range of the parameters is not checked to ensure the relation is used within the domain of applicability, this is left to the user. References: Benedict et al. (2016) AJ 152 141 """ x = m - _x0 e1 = _a1 * math.exp(-_k1*x) e2 = _a2 * math.exp(-_k2*x) mk = e1 + e2 + _y0 return mk
3e9f20588f87db6bb9429b6c5d7135ad879158c3
30,816
def apply_and_concat_one_nb(n, apply_func_nb, *args): # numba doesn't accepts **kwargs """A Numba-compiled version of `apply_and_concat_one`. !!! note * `apply_func_nb` must be Numba-compiled * `*args` must be Numba-compatible * No support for `**kwargs` """ output_0 = to_2d_one_nb(apply_func_nb(0, *args)) output = np.empty((output_0.shape[0], n * output_0.shape[1]), dtype=output_0.dtype) for i in range(n): if i == 0: outputs_i = output_0 else: outputs_i = to_2d_one_nb(apply_func_nb(i, *args)) output[:, i * outputs_i.shape[1]:(i + 1) * outputs_i.shape[1]] = outputs_i return output
ed75920864a736aeafe9156b5bd6e456cd287226
30,817
def convertCovariance2Dto3D(covariance2d): """ convert the covariance from [x, y, theta] to [x, y, z, roll, pitch, yaw] :param covariance2d: covariance matrix in 3x3 format. each row and column corresponds to [x, y, theta] :return: covariance matrix in 6x6 format. each row and column corresponds to [x, y, z, roll, pitch, yaw], where z, roll and pitch values are padded with 0. """ covariance3d = np.zeros([6, 6]) covariance2d = np.array(covariance2d) covariance3d[0:1, 0:1] = covariance2d[0:1, 0:1] covariance3d[5, 0:1] = covariance2d[2, 0:1] covariance3d[0:1, 5] = covariance2d[0:1, 2] covariance3d[5, 5] = covariance2d[2, 2] return covariance3d
4c6ea8bb8475a705fb40181172bab2a761676e85
30,818
import torch import random def fit_gan_wasserstein(nb_epoch: int, x_LS: np.array, y_LS: np.array, x_VS: np.array, y_VS: np.array, x_TEST: np.array, y_TEST: np.array, gen, dis, opt_gen, opt_dis, n_discriminator:int, batch_size:int=100, wdb:bool=False, gpu:bool=True): """ Fit GAN with discriminator using the Wasserstein distance estimate. """ # to assign the data to GPU with .to(device) on the data if gpu: device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") else: device = "cpu" # Assign models and data to gpu gen.to(device) dis.to(device) x_VS_gpu = torch.tensor(x_VS).to(device).float() y_VS_gpu = torch.tensor(y_VS).to(device).float() x_TEST_gpu = torch.tensor(x_TEST).to(device).float() y_TEST_gpu = torch.tensor(y_TEST).to(device).float() loss_list = [] time_tot = 0. # WARNING: batch size = 10 % #LS batch_size = int(0.1 * y_LS.shape[0]) for epoch in range(nb_epoch): start = timer() # Shuffle the data randomly at each epoch seed = random.randint(0, 2000) x_LS_shuffled, y_LS_shuffled = shuffle(x_LS, y_LS, random_state=seed) batch_dis_idx = 0 batch_gen_idx = 0 loss_D_batch = 0 loss_G_batch = 0 # Training batch loop batch_list = [i for i in range(batch_size, batch_size * y_LS.shape[0] // batch_size, batch_size)] for y_batch, x_batch in zip(np.split(y_LS_shuffled, batch_list), np.split(x_LS_shuffled, batch_list)): y_batch_LS = torch.tensor(y_batch).to(device).float() x_batch_LS = torch.tensor(x_batch).to(device).float() bs = x_batch_LS.shape[0] # 1. Train the Discriminator # Critic wants to maximize : E(C(x)) - E(C(G(z))) # <~> maximize : mean(C(x)) - mean(C(G(z))) # <-> minimize : -{mean(C(x)) - mean(C(G(z)))} # Generated samples G_LS_samples = gen(noise=torch.randn(bs, gen.latent_s).to(device), context=x_batch_LS) # Compute Discriminator's loss loss_D = dis.loss(generated_samples=G_LS_samples, true_samples=y_batch_LS, context=x_batch_LS) loss_D_batch += loss_D.detach() # Update critic's weight opt_dis.zero_grad() loss_D.backward() opt_dis.step() # N_CRITIC update for discriminator while one for generator # 2. Train the Generator if ((batch_dis_idx + 1) % n_discriminator) == 0: # Train Generator # Generator has the opposed objective that of critic : # wants to minimize : E(C(x)) - E(C(G(z))) # <-> minimize : - E(C(G(z))) # <-> minimize : -(mean(C(G(z))) # Generated samples G_LS_samples = gen(noise=torch.randn(bs, gen.latent_s).to(device), context=x_batch_LS) D_LS = dis(input=G_LS_samples, context=x_batch_LS) # Compute generator's loss lossG = -torch.mean(D_LS) loss_G_batch += lossG.detach() # Update generator's weight opt_gen.zero_grad() lossG.backward() opt_gen.step() batch_gen_idx += 1 batch_dis_idx += 1 # LS loss is the average over all the batch loss_D_LS = loss_D_batch / batch_dis_idx loss_G_LS = loss_G_batch / batch_gen_idx # VS loss # D G_VS_samples = gen(noise=torch.randn(y_VS_gpu.shape[0], gen.latent_s).to(device), context=x_VS_gpu) loss_D_VS = dis.loss(generated_samples=G_VS_samples, true_samples=y_VS_gpu, context=x_VS_gpu).detach() # G D_VS = dis(input=G_VS_samples, context=x_VS_gpu) loss_G_VS = -torch.mean(D_VS).detach() # TEST loss # D G_TEST_samples = gen(noise=torch.randn(y_TEST.shape[0], gen.latent_s).to(device), context=x_TEST_gpu) loss_D_TEST = dis.loss(generated_samples=G_TEST_samples, true_samples=y_TEST_gpu, context=x_TEST_gpu).detach() # G D_TEST = dis(input=G_TEST_samples, context=x_TEST_gpu) loss_G_TEST = -torch.mean(D_TEST).detach() # Save NF model when the VS loss is minimal loss_list.append([loss_D_LS, loss_G_LS, loss_D_VS, loss_G_VS, loss_D_TEST, loss_G_TEST]) end = timer() time_tot += end - start if wdb: wandb.log({"D ls loss": loss_D_LS}) wandb.log({"G ls loss": loss_G_LS}) wandb.log({"D vs loss": loss_D_VS}) wandb.log({"G vs loss": loss_G_VS}) wandb.log({"D test loss": loss_D_TEST}) wandb.log({"G test loss": loss_G_TEST}) if epoch % 10 == 0: print("Epoch {:.0f} Approximate time left : {:2f} min - D LS loss: {:4f} G LS loss: {:4f} D VS loss: {:4f} G VS loss: {:4f}".format(epoch, time_tot / (epoch + 1) * (nb_epoch - (epoch + 1)) / 60, loss_D_LS, loss_G_LS, loss_D_VS, loss_G_VS), end="\r", flush=True) print('Fitting time_tot %.0f min' %(time_tot/60)) return np.asarray(torch.tensor(loss_list, device='cpu')), gen, dis
64fc1625aa76ca09c14720ef3489657b8c28e671
30,819
import math def pad(image_array, final_dims_in_pixels, zero_fill_mode=False): """ Pad image data to final_dim_in_pixels Attributes: image_array (float, np.array): 3D numpy array containing image data final_dim_in_pixels (list): Final number of pixels in xyz dimensions. Example: [256, 256, 80] zero_fill_mode (bool): If True, returns array filled with zeros Returns: padded_image_array (arr): Resized array containing image data """ dims = len(final_dims_in_pixels) original_dims_in_pixels = [image_array.shape[d] for d in range(len(image_array.shape))] # test if input and output dimensions match if dims != len(original_dims_in_pixels): raise ValueError("Dimensions of the input (" + str(len(image_array.shape)) + ") do not match those of output (" + str(len(final_dims_in_pixels))+ ")") # test if desired final image is larger than original if any(final_dims_in_pixels[d] < original_dims_in_pixels[d] for d in range(dims)): raise ValueError("Final dimensions are smaller than original. Did you mean to `crop`?") padded_image_array = np.zeros(final_dims_in_pixels) new_first_image_pixel = [0 for i in range(dims)] new_last_image_pixel = [0 for i in range(dims)] for dim in range(dims): new_first_image_pixel[dim] = int(math.floor((final_dims_in_pixels[dim] - original_dims_in_pixels[dim]) / 2)) new_last_image_pixel[dim] = new_first_image_pixel[dim] + original_dims_in_pixels[dim] #for 2D: if dims == 2: padded_image_array [new_first_image_pixel[0] : new_last_image_pixel[0], new_first_image_pixel[1] : new_last_image_pixel[1]] = image_array elif dims == 3: padded_image_array [new_first_image_pixel[0] : new_last_image_pixel[0], new_first_image_pixel[1] : new_last_image_pixel[1], new_first_image_pixel[2] : new_last_image_pixel[2]] = image_array if zero_fill_mode: padded_image_array = padded_image_array*0. return(padded_image_array)
8882ded9a01f98e9163807675cf7246527443d97
30,821
def save_and_plot(canddatalist): """ Converts a canddata list into a plots and a candcollection. Calculates candidate features from CandData instance(s). Returns structured numpy array of candidate features labels defined in st.search_dimensions. Generates png plot for peak cands, if so defined in preferences. """ if isinstance(canddatalist, CandData): canddatalist = [canddatalist] elif isinstance(canddatalist, list): if not len(canddatalist): return CandCollection() else: logger.warn("argument must be list of CandData object") logger.info('Calculating features for {0} candidate{1}.' .format(len(canddatalist), 's'[not len(canddatalist)-1:])) st = canddatalist[0].state featurelists = [] for feature in st.features: ff = [] for i, canddata in enumerate(canddatalist): ff.append(canddata_feature(canddata, feature)) featurelists.append(ff) kwargs = dict(zip(st.features, featurelists)) candlocs = [] for i, canddata in enumerate(canddatalist): candlocs.append(canddata_feature(canddata, 'candloc')) kwargs['candloc'] = candlocs if canddata.cluster is not None: clusters = [] clustersizes = [] for i, canddata in enumerate(canddatalist): clusters.append(canddata_feature(canddata, 'cluster')) clustersizes.append(canddata_feature(canddata, 'clustersize')) kwargs['cluster'] = clusters kwargs['clustersize'] = clustersizes candcollection = make_candcollection(st, **kwargs) if (st.prefs.savecands or st.prefs.saveplots) and len(candcollection.array): if len(candcollection) > 1: snrs = candcollection.array['snr1'].flatten() elif len(candcollection) == 1: snrs = None # save cc and save/plot each canddata for i, canddata in enumerate(canddatalist): if st.prefs.savecands: save_cands(st, canddata=canddata) if st.prefs.saveplots: candplot(canddata, cluster=(clusters[i], clustersizes[i]), snrs=snrs) return candcollection
484b2bf099c31762e294ce40039f01f8ec00a273
30,822
def GetReviewers(host, change): """Gets information about all reviewers attached to a change.""" path = 'changes/%s/reviewers' % change return _SendGerritJsonRequest(host, path)
4e2d5bdf37993f76b42c0062dec042dc5a01aa87
30,823
def plot_single_points(xs, ys, color=dark_color, s=50, zorder=1e6, edgecolor='black', **kwargs): """Plot single points and return patch artist.""" if xs is None: xs = tuple(range(len(ys))) return plt.scatter(xs, ys, marker='o', s=s, color=color, zorder=zorder, edgecolor=edgecolor, **kwargs)
490c17dbb360bc06c7805dddb7af1c72b5ce5890
30,824
import _ast def find_imports(source: str, filename=constants.DEFAULT_FILENAME, mode='exec'): """return a list of all module names required by the given source code.""" # passing an AST is not supported because it doesn't make sense to. # either the AST is one that we made, in which case the imports have already been made and calling parse_ast again # would find no imports, or it's an AST made by parsing the output of fix_syntax, which is internal. fixed = _fix_syntax(source, filename=filename) tree = _ast.parse(fixed, filename, mode) return _find_imports(tree, filename=filename)
2ea91f6387e455fb1b4907e6a109fc3b987c8a9d
30,825
def generate_character_data(sentences_train, sentences_dev, sentences_test, max_sent_length, char_embedd_dim=30): """ generate data for charaters :param sentences_train: :param sentences_dev: :param sentences_test: :param max_sent_length: :return: C_train, C_dev, C_test, char_embedd_table """ def get_character_indexes(sentences): index_sentences = [] max_length = 0 for words in sentences: index_words = [] for word in words: index_chars = [] if len(word) > max_length: max_length = len(word) for char in word[:MAX_CHAR_LENGTH]: char_id = char_alphabet.get_index(char) index_chars.append(char_id) index_words.append(index_chars) index_sentences.append(index_words) return index_sentences, max_length def construct_tensor_char(index_sentences): C = np.empty([len(index_sentences), max_sent_length, max_char_length], dtype=np.int32) word_end_id = char_alphabet.get_index(word_end) for i in range(len(index_sentences)): words = index_sentences[i] sent_length = len(words) for j in range(sent_length): chars = words[j] char_length = len(chars) for k in range(char_length): cid = chars[k] C[i, j, k] = cid # fill index of word end after the end of word C[i, j, char_length:] = word_end_id # Zero out C after the end of the sentence C[i, sent_length:, :] = 0 return C def build_char_embedd_table(): scale = np.sqrt(3.0 / char_embedd_dim) char_embedd_table = np.random.uniform(-scale, scale, [char_alphabet.size(), char_embedd_dim]).astype(theano.config.floatX) char_freqs = char_alphabet.get_vocab_freqs() return (char_embedd_table, char_freqs) char_alphabet = Alphabet('character') char_alphabet.get_index(word_end) index_sentences_train, max_char_length_train = get_character_indexes(sentences_train) index_sentences_dev, max_char_length_dev = get_character_indexes(sentences_dev) index_sentences_test, max_char_length_test = get_character_indexes(sentences_test) # close character alphabet char_alphabet.close() logger.info("character alphabet size: %d" % (char_alphabet.size() - 1)) max_char_length = min(MAX_CHAR_LENGTH, max(max_char_length_train, max_char_length_dev, max_char_length_test)) logger.info("Maximum character length of training set is %d" % max_char_length_train) logger.info("Maximum character length of dev set is %d" % max_char_length_dev) logger.info("Maximum character length of test set is %d" % max_char_length_test) logger.info("Maximum character length used for training is %d" % max_char_length) # fill character tensor C_train = construct_tensor_char(index_sentences_train) C_dev = construct_tensor_char(index_sentences_dev) C_test = construct_tensor_char(index_sentences_test) return C_train, C_dev, C_test, build_char_embedd_table()
c53257e1d999edafc54b627a0687ae33aaebc487
30,826
def draw_pie_distribution_of_elements(Genome_EP, ChIP_EP, gprom=(1000, 2000, 3000), gdown=(1000, 2000, 3000), prom=(1000,2000,3000), down=(1000,2000,3000)): """Draw the pie charts of the overall distributions of ChIP regions and genome background """ # get the labels (legend) for the genome pie chart gnames = ["Promoter (<=%d bp)" %gprom[0]] gnames += ["Promoter (%d-%d bp)" %(p1, p2) for p1, p2 in zip(gprom[:-1], gprom[1:])] gnames += ["Downstream (<=%d bp)" %gdown[0]] gnames += ["Downstream (%d-%d bp)" %(d1, d2) for d1, d2 in zip(gdown[:-1], gdown[1:])] gnames += ["5'UTR","3'UTR", "Coding exon", "Intron", "Distal intergenic"] # get the labels (legend) for the pie chart names = ["Promoter (<=%d bp)" %prom[0]] names += ["Promoter (%d-%d bp)" %(p1, p2) for p1, p2 in zip(prom[:-1], prom[1:])] names += ["Downstream (<=%d bp)" %down[0]] names += ["Downstream (%d-%d bp)" %(d1, d2) for d1, d2 in zip(down[:-1], down[1:])] names += ["5'UTR","3'UTR", "Coding exon", "Intron", "Distal intergenic"] # get the proportions to draw x = ChIP_EP['whole']['promoter'] + ChIP_EP['whole']['downstream'] + ChIP_EP['whole']['gene'] + [ChIP_EP['whole']['enhancer']] x_percent = _percent_str([100.0*a for a in x]) names_w_percent_x = list(map(lambda x, y: x + ': ' + y, names, x_percent)) # make x values less than .1% .5% because they are too small to see in the pie chart. But x_percent does not change x = list(map(max, x, [0.01]*len(x))) # get the proportions to draw y = Genome_EP['whole']['promoter'] + Genome_EP['whole']['downstream'] + Genome_EP['whole']['gene'] + [Genome_EP['whole']['enhancer']] y_percent = _percent_str([100.0*a for a in y]) names_w_percent_y = list(map(lambda x, y: x + ': ' + y, gnames, y_percent)) # make x values less than .1% .5% because they are too small to see in the pie chart. But x_percent does not change y = list(map(max, y, [0.01]*len(y))) # # producing R script return # # put header rscript = '\n' rscript += R.comment('') rscript += R.comment('Distribution of Genome and ChIP regions over cis-regulatory element') rscript += R.comment('Note that the x may be modified for better graphics in case a value is too small') rscript += R.comment('Thus, look at the labels of the pie chart to get the real percentage values' ) rscript += R.comment('') rscript += '\n' # some graphical parameters init_angle = 90 density = 100 main_x = 'ChIP' main_y = 'Genome' # pie chart colors cols = ["#445FA2","#EB9D86","#799F7A","#6C527F","#5FA1C1","#E8BB77","#A8C5EF","#FDCDB9","#C6E6B5","#F1D5EE","#B4E1F6"] mar = mar=[3,3,4,2.8] oma=[4,2,4,2] mfcol = [2, 2] rscript += R.par(mar=mar, oma=oma, mfcol=mfcol) # R script rscript += R.pie(y, labels=y_percent, main=main_y, col=cols,clockwise=True, radius=0.9,init_angle=init_angle, cex=0.8, density=density) rscript += R.plot([0,1],[0,1], tp="n", axes=False, xlab="", ylab="", main="", frame=False) rscript += R.legend(x='top', legend=names_w_percent_y, pch=15, col=cols, bty="n") # R script rscript += R.pie(x, labels=x_percent, main=main_x, col=cols,clockwise=True, radius=0.9,init_angle=init_angle, cex=0.8, density=density) rscript += R.plot([0,1],[0,1], tp="n", axes=False, xlab="", ylab="", main="", frame=False) rscript += R.legend(x='top', legend=names_w_percent_x, pch=15, col=cols, bty="n") return rscript
24a28a23e2929e20c77dd2389691235d51e1ba80
30,827
def get_neighborhood(leaflet, mdsys): """ Get neighborhood object for the give leaflet """ dist = distances.distance_array(leaflet.positions, leaflet.positions, mdsys.dimensions[:3]) nbrs = Neighborhood(leaflet.positions, dist, mdsys.dimensions[:3]) return nbrs
234193d36c957a0dd26a805f2fcbf5e97c0be2d6
30,828
def clean_title(title: str) -> str: """Strip unwanted additional text from title.""" for splitter in [" (", " [", " - ", " (", " [", "-"]: if splitter in title: title_parts = title.split(splitter) for title_part in title_parts: # look for the end splitter for end_splitter in [")", "]"]: if end_splitter in title_part: title_part = title_part.split(end_splitter)[0] for ignore_str in ["feat.", "featuring", "ft.", "with ", "explicit"]: if ignore_str in title_part.lower(): return title.split(splitter + title_part)[0].strip() return title.strip()
5625c6c64b166560b1804b7048fd3d604536251a
30,829
def get_dates(): """ Query date in the tweets table :return: """ sql = "SELECT date FROM tweets" dates = cx.read_sql(db, sql) return dates
60704fa5fa625ffbd42b29b9cc22c95d01475026
30,830
def _convert_to_dict(best_param): """ Utiliy method for converting best_param string to dict Args: :best_param: the best_param string Returns: a dict with param->value """ best_param_dict = {} for hp in best_param: hp = hp.split('=') best_param_dict[hp[0]] = hp[1] return best_param_dict
318ed529b0f411b1b671de34a4b0f4ecf3dc9780
30,831
def _expand_currency(data: dict) -> str: """ Verbalizes currency tokens. Args: data: detected data Returns string """ currency = _currency_dict[data['currency']] quantity = data['integral'] + ('.' + data['fractional'] if data.get('fractional') else '') magnitude = data.get('magnitude') # remove commas from quantity to be able to convert to numerical quantity = quantity.replace(',', '') # check for million, billion, etc... if magnitude is not None and magnitude.lower() in _magnitudes: if len(magnitude) == 1: magnitude = _magnitudes_dict[magnitude.lower()] return "{} {} {}".format(_expand_hundreds(quantity), magnitude, currency + 's') parts = quantity.split('.') if len(parts) > 2: return quantity + " " + currency + "s" # Unexpected format dollars = int(parts[0]) if parts[0] else 0 cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0 if dollars and cents: dollar_unit = currency if dollars == 1 else currency + 's' cent_unit = 'cent' if cents == 1 else 'cents' return "{} {}, {} {}".format( _expand_hundreds(dollars), dollar_unit, _inflect.number_to_words(cents), cent_unit ) elif dollars: dollar_unit = currency if dollars == 1 else currency + 's' return "{} {}".format(_expand_hundreds(dollars), dollar_unit) elif cents: cent_unit = 'cent' if cents == 1 else 'cents' return "{} {}".format(_inflect.number_to_words(cents), cent_unit) else: return 'zero' + ' ' + currency + 's'
491d175195f97126d65afec65c60f2e34ca09bc3
30,833
def getCampaignID(title): """ Returns the id of a campaign from a dm name and a title """ conn = connectToDB() cur = conn.cursor() print title query = cur.mogrify('select id from campaigns where title = %s;', (title,)) print query cur.execute(query) results = cur.fetchone() return results[0] if results else -1
b3f6f3b50a97e25931754332ed864bdac9d5c639
30,834
import time def calculate_exe_time(input_function): """ This decorator method take in a function as argument and calulates its execution time. :param input_function: name of method to be executed. :return process_time: method that calls the input_function and calculates execution time. """ def process_time(*args): start = time() input_function(*args) end = time() print(f"Execution time: {end-start} secs") return process_time
bdbd4e20c8126e48d27031e46e5a91c83740a188
30,835
import torch def load_xyz_from_txt(file_name): """Load xyz poses from txt. Each line is: x,y,x Args: file_name (str): txt file path Returns: torch.Tensor: Trajectory in the form of homogenous transformation matrix. Shape [N,4,4] """ global device poses = np.genfromtxt(file_name, delimiter=',') poses = torch.Tensor(poses).to(device) poses = tgm.rtvec_to_pose(poses) # [n,4,4] return poses
f0d57aafa9e96a20c719a27dc8e0f2c18ebe0e7b
30,836
def category_add(): """ Route for category add """ # request Form data form = CategoryForm(request.form) if request.method == "POST" and form.validate(): # Set new category name variable category_name = form.name.data.lower() if category_check(category_name): # Add new category to the database mongo.db.categories.insert_one({"name": category_name}) # Display flash message flash( "Category " + category_name + " succesfully added", "success") return redirect(url_for('products.search')) else: return render_template("category_add.html", form=form) return render_template("category_add.html", form=form)
11a94b7b3600fcaad0848696dd63648d39988052
30,837
import requests def generate_text(input: TextGenerationInput) -> TextGenerationOutput: """Generate text based on a given prompt.""" payload = { "text": input.text, "temperature": input.temperature, "min_length": input.min_length, "max_length": input.max_length, "do_sample": input.do_sample, } res = requests.post(API_ENDPOINT, json=payload) print(res.json()) return TextGenerationOutput(generated_text=res.json()["generated_text"])
1c0eeff8b90b5246828a285f8c7a86bc4095c364
30,838
def file_content_hash(file_name, encoding, database=None): """ Returns the hash of the contents of the file Use the database to keep a persistent cache of the last content hash. """ _, content_hash = _file_content_hash(file_name, encoding, database) return content_hash
42962749e6bb5ec2d061ffcefd6ebf4aa34bbc29
30,839
import inspect def pass_multiallelic_sites(mqc): """ The number of PASS multiallelic sites. Source: count_variants.py (bcftools view) """ k = inspect.currentframe().f_code.co_name try: d = next(iter(mqc["multiqc_npm_count_variants"].values())) v = d["pass_multiallelic_sites"] v = int(v) except KeyError: v = "NA" return k, v
cd91ff816e88fa29e4d3beed4d0baf740388428c
30,840
import json def read_dialog_file(json_filename: str) -> list[Message]: """ Read messages from the dialog file @return: list of Message objects (without intent) """ with open(json_filename, encoding="utf8") as dialog_json: return [ Message(is_bot=msg["is_bot"], text=msg["text"]) for msg in json.load(dialog_json) ]
9665c6bd708c66e66e24cb416d033730ef4f3909
30,841
def dtwavexfm3(X, nlevels=3, biort=DEFAULT_BIORT, qshift=DEFAULT_QSHIFT, include_scale=False, ext_mode=4, discard_level_1=False): """Perform a *n*-level DTCWT-3D decompostion on a 3D matrix *X*. :param X: 3D real array-like object :param nlevels: Number of levels of wavelet decomposition :param biort: Level 1 wavelets to use. See :py:func:`dtcwt.coeffs.biort`. :param qshift: Level >= 2 wavelets to use. See :py:func:`dtcwt.coeffs.qshift`. :param ext_mode: Extension mode. See below. :param discard_level_1: True if level 1 high-pass bands are to be discarded. :returns Yl: The real lowpass image from the final level :returns Yh: A tuple containing the complex highpass subimages for each level. Each element of *Yh* is a 4D complex array with the 4th dimension having size 28. The 3D slice ``Yh[l][:,:,:,d]`` corresponds to the complex higpass coefficients for direction d at level l where d and l are both 0-indexed. If *biort* or *qshift* are strings, they are used as an argument to the :py:func:`dtcwt.coeffs.biort` or :py:func:`dtcwt.coeffs.qshift` functions. Otherwise, they are interpreted as tuples of vectors giving filter coefficients. In the *biort* case, this should be (h0o, g0o, h1o, g1o). In the *qshift* case, this should be (h0a, h0b, g0a, g0b, h1a, h1b, g1a, g1b). There are two values for *ext_mode*, either 4 or 8. If *ext_mode* = 4, check whether 1st level is divisible by 2 (if not we raise a ``ValueError``). Also check whether from 2nd level onwards, the coefs can be divided by 4. If any dimension size is not a multiple of 4, append extra coefs by repeating the edges. If *ext_mode* = 8, check whether 1st level is divisible by 4 (if not we raise a ``ValueError``). Also check whether from 2nd level onwards, the coeffs can be divided by 8. If any dimension size is not a multiple of 8, append extra coeffs by repeating the edges twice. If *discard_level_1* is True the highpass coefficients at level 1 will be discarded. (And, in fact, will never be calculated.) This turns the transform from being 8:1 redundant to being 1:1 redundant at the cost of no-longer allowing perfect reconstruction. If this option is selected then `Yh[0]` will be `None`. Note that :py:func:`dtwaveifm3` will accepts `Yh[0]` being `None` and will treat it as being zero. Example:: # Performs a 3-level transform on the real 3D array X using the 13,19-tap # filters for level 1 and the Q-shift 14-tap filters for levels >= 2. Yl, Yh = dtwavexfm3(X, 3, 'near_sym_b', 'qshift_b') .. codeauthor:: Rich Wareham <[email protected]>, Aug 2013 .. codeauthor:: Huizhong Chen, Jan 2009 .. codeauthor:: Nick Kingsbury, Cambridge University, July 1999. """ trans = Transform3d(biort, qshift, ext_mode) res = trans.forward(X, nlevels, include_scale, discard_level_1) if include_scale: return res.lowpass, res.highpasses, res.scales else: return res.lowpass, res.highpasses
d0adab48c51ade82fab55b416029a2291151e3b9
30,842
def character_regions(img, line_regs, bg_thresh=None, **kwargs): """ Find the characters in an image given the regions of lines if text in the image. Args: img (numpy.ndarray): Grayscaled image. line_regs (list[tuple[int, int]]): List of regions representing where the lines are. bg_thresh (Optional[int]): Background threshold up to which a pixel is considered text and not part of the background. If not provided, a default background threshold is calculated for each line region in the image and used instead. **kwargs: Keyword arguments passed to text_regions. """ assert len(img.shape) == 2 regions = [] w = img.shape[1] for start, end in line_regs: sub_img = img[start:end+1, :] if bg_thresh is None: bg_thresh = default_background_threshold(sub_img) # Sanity check assert w == sub_img.shape[1] pixels = colored_pixels(sub_img, bg_thresh) x_distr, y_distr = zip(*pixels) char_regions = text_regions(x_distr, w, **kwargs) regions.append(char_regions) return regions
2e1b182944a857b698886ed590295723909dcc7e
30,843
def cache_clear(request): """ Очищает директорию кеша. """ Core.get_instance().clear_cache() return { 'size': Core.get_instance().get_cache_size() }
039ddc6e400c1befe283b529ba239d9c7831a7ce
30,844
def sort_crp_tables(tables): """Sort cluster assignments by number""" keys = sorted(tables, key=lambda t: (len(tables[t]), min(tables[t])), reverse=True) items = [item for table in keys for item in tables[table]] dividers = [len(tables[table]) for table in keys] return (items, np.cumsum(dividers))
4147cb86ed672b7dd1503615ba759fdb36d74185
30,845
def sum_category_hours(day, now, timelog=TIMELOG, category_hours=False): """ Sum the hours by category. """ if not category_hours: category_hours = {} activities = get_rows(day, timelog) for activity in activities: category = activity.category duration = activity.get_duration(now) if category in category_hours: category_hours[category] += duration else: category_hours[category] = duration return category_hours
5d8d77759c43f40c616bd394ed8ba169f4a58917
30,846
def run_factory( factory, # type: LazyFactory args=None, # type: Optional[Iterable[Any]] kwargs=None, # type: Optional[Mapping[str, Any]] ): # type: (...) -> Any """ Import and run factory. .. code:: python >>> from objetto.utils.factoring import run_factory >>> bool(run_factory("re|match", (r"^[a-z]+$", "abc"))) True :param factory: Lazy factory. :type factory: str or function or collections.abc.Callable or None :param args: Arguments to be passed to the factory function. :param kwargs: Keyword arguments to be passed to the factory function. :return: Result from factory. """ factory = import_factory(factory) if factory is None: return None else: return factory(*(args or ()), **(kwargs or {}))
3766555849bca15e568ffc41fb522a47c22c666c
30,847
def steps_smoother(steps, resolution): """ :param delta_steps: array of delta positions of 2 joints for each of the 4 feet :return: array of positions of 2 joints for each of the 4 feet """ smoothed_steps = [] for i in range(len(steps)): step = steps[i] next_step = steps[(i + 1) % len(steps)] for j in range(resolution): smoothed_step = [] for k in range(4): positions = step[k] next_positions = next_step[k] pos0 = positions[0] + j * \ ((next_positions[0] - positions[0]) / resolution) pos1 = positions[1] + j * \ ((next_positions[1] - positions[1]) / resolution) smoothed_step.append([pos0, pos1]) smoothed_steps.append(smoothed_step) return smoothed_steps
a27e09af169e79438895d0e15c0b536213962429
30,848
from typing import Optional def get_instance_server(name: Optional[str] = None, server_id: Optional[str] = None, zone: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetInstanceServerResult: """ Gets information about an instance server. ## Example Usage ```python import pulumi import pulumi_scaleway as scaleway my_key = scaleway.get_instance_server(server_id="11111111-1111-1111-1111-111111111111") ``` :param str name: The server name. Only one of `name` and `server_id` should be specified. :param str server_id: The server id. Only one of `name` and `server_id` should be specified. :param str zone: `zone`) The zone in which the server exists. """ __args__ = dict() __args__['name'] = name __args__['serverId'] = server_id __args__['zone'] = zone if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('scaleway:index/getInstanceServer:getInstanceServer', __args__, opts=opts, typ=GetInstanceServerResult).value return AwaitableGetInstanceServerResult( additional_volume_ids=__ret__.additional_volume_ids, boot_type=__ret__.boot_type, bootscript_id=__ret__.bootscript_id, cloud_init=__ret__.cloud_init, enable_dynamic_ip=__ret__.enable_dynamic_ip, enable_ipv6=__ret__.enable_ipv6, id=__ret__.id, image=__ret__.image, ip_id=__ret__.ip_id, ipv6_address=__ret__.ipv6_address, ipv6_gateway=__ret__.ipv6_gateway, ipv6_prefix_length=__ret__.ipv6_prefix_length, name=__ret__.name, organization_id=__ret__.organization_id, placement_group_id=__ret__.placement_group_id, placement_group_policy_respected=__ret__.placement_group_policy_respected, private_ip=__ret__.private_ip, private_networks=__ret__.private_networks, project_id=__ret__.project_id, public_ip=__ret__.public_ip, root_volumes=__ret__.root_volumes, security_group_id=__ret__.security_group_id, server_id=__ret__.server_id, state=__ret__.state, tags=__ret__.tags, type=__ret__.type, user_data=__ret__.user_data, zone=__ret__.zone)
aae211831c951d131a4cd21fab917da5b169f31b
30,849
def leaper(x, y, int1, int2): """sepcifically for the rook, permutes the values needed around a position for no_conflict tests""" return [(x+int1, y+int2), (x-int1, y+int2), (x+int1, y-int2), (x-int1, y-int2), (x+int2, y+int1), (x-int2, y+int1), (x+int2, y-int1), (x-int2, y-int1)]
6f7afc071c8adbc72a6391179e2df522574e5197
30,850
def calc_offsets(obj): """ The search "hit" should have a 'fullsnip' annotation which is a the entire text of the indexable resource, with <start_sel> and <end_sel> wrapping each highlighted word. Check if there's a selector on the indexable, and then if there's a box-selector use this to generate a list of xywh coordinates by retrieving the selector by its index from a list of lists """ if hasattr(obj, "fullsnip"): words = obj.fullsnip.split(" ") offsets = [] if words: for i, word in enumerate(words): if "<start_sel>" in word and "<end_sel>" in word: offsets.append(i) if offsets: if obj.selector: if (boxes := obj.selector.get("box-selector")) is not None: box_list = [] for x in offsets: try: box_list.append(boxes[x]) except (IndexError, ValueError): pass if box_list: return box_list # [boxes[x] for x in offsets if boxes[x]] else: return return
6af4827a57cf20f317ce2a40a669c14d3f6380f3
30,851
def spread(self, value="", **kwargs): """Turns on a dashed tolerance curve for the subsequent curve plots. APDL Command: SPREAD Parameters ---------- value Amount of tolerance. For example, 0.1 is ± 10%. """ return self.run("SPREAD,%s" % (str(value)), **kwargs)
a92c8e230eadd4e1fde498fa5650a403f419eaeb
30,853
def _ValidateCandidateImageVersionId(current_image_version_id, candidate_image_version_id): """Determines if candidate version is a valid upgrade from current version.""" if current_image_version_id == candidate_image_version_id: return False parsed_curr = _ImageVersionItem(image_ver=current_image_version_id) parsed_cand = _ImageVersionItem(image_ver=candidate_image_version_id) # Checks Composer versions. if (not parsed_cand.composer_contains_alias and not _IsComposerVersionUpgradeCompatible(parsed_curr.composer_ver, parsed_cand.composer_ver)): return False # Checks Airflow versions. if (not parsed_cand.airflow_contains_alias and not _IsAirflowVersionUpgradeCompatible(parsed_curr.airflow_ver, parsed_cand.airflow_ver)): return False return True
25d888645211fc21f7a21ee17f5aeeb04e83907e
30,854
import re def getOrdererIPs(): """ returns list of ip addr """ client = docker.from_env() container_list = client.containers.list() orderer_ip_list = [] for container in container_list: if re.search("^orderer[1-9][0-9]*", container.name): out = container.exec_run("awk 'END{print $1}' /etc/hosts", stdout=True) orderer_ip_list.append(out.output.decode().split("\n")[0]) client.close() return orderer_ip_list
745c9635b03745c5e61d6cd56c0b1fcd58df1fa4
30,857
import re def repair_attribute_name(attr): """ Remove "weird" characters from attribute names """ return re.sub('[^a-zA-Z-_\/0-9\*]','',attr)
f653a5cb5ed5e43609bb334f631f518f73687853
30,858
def get_xsd_file(profile_name, profile_version): """Returns path to installed XSD, or local if no installed one exists.""" if profile_name.lower() not in XSD_LOOKUP_MAP: raise ValueError( 'Profile %s did not match a supported profile: %s.\n' % (profile_name, sorted(XSD_FILES.keys()))) # Ensure we have the correct case. camelcase_profile_name = XSD_LOOKUP_MAP[profile_name.lower()] if profile_version not in XSD_FILES[camelcase_profile_name]: raise ValueError( 'Profile Version %s did not match a supported version: %s.\n' % (profile_version, sorted(XSD_FILES[camelcase_profile_name].keys()))) return XSD_FILES[camelcase_profile_name][profile_version]
02d2c127fabd0a8f274211885e625f90d314036f
30,859
def get_response(url: str) -> HTMLResponse: """ 向指定url发起HTTP GET请求 返回Response :param url: 目标url :return: url响应 """ session = HTMLSession() return session.get(url)
f53c2a6a2066bbe76f3b9266d42ad014d5e4fcfa
30,860
def minutesBetween(date_1, date_2): """Calculates the number of whole minutes between two dates. Args: date_1 (Date): The first date to use. date_2 (Date): The second date to use. Returns: int: An integer that is representative of the difference between two dates. """ diff = date_2 - date_1 d, s, _ = diff.days, diff.seconds, diff.microseconds return d * 1440 + s // 60
1e75c3571bee3855183b7a51e661d8eaa0bf47a2
30,861
import asyncio async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Unload FireServiceRota config entry.""" await hass.async_add_executor_job( hass.data[DOMAIN][entry.entry_id].websocket.stop_listener ) unload_ok = all( await asyncio.gather( *[ hass.config_entries.async_forward_entry_unload(entry, platform) for platform in PLATFORMS ] ) ) if unload_ok: del hass.data[DOMAIN][entry.entry_id] return unload_ok
d6acb96f1f144868923b1cc65e99b4ee901caa56
30,863
def HasPositivePatterns(test_filter): """Returns True if test_filter contains a positive pattern, else False Args: test_filter: test-filter style string """ return bool(len(test_filter) > 0 and test_filter[0] != '-')
9038bf799efbe4008a83d2da0aba89c0197c16a1
30,864
import math def get_lr(lr_init, lr_end, lr_max, total_epochs, warmup_epochs, pretrain_epochs, steps_per_epoch, lr_decay_mode): """ generate learning rate array Args: lr_init(float): init learning rate lr_end(float): end learning rate lr_max(float): max learning rate total_epochs(int): total epoch of training warmup_epochs(int): number of warmup epochs pretrain_epochs(int): number of pretrain epochs steps_per_epoch(int): steps of one epoch lr_decay_mode(string): learning rate decay mode, including steps, poly, linear or cosine Returns: np.array, learning rate array """ lr_each_step = [] total_steps = steps_per_epoch * total_epochs warmup_steps = steps_per_epoch * warmup_epochs pretrain_steps = steps_per_epoch * pretrain_epochs decay_steps = total_steps - warmup_steps if lr_decay_mode == 'steps': decay_epoch_index = [ 0.3 * total_steps, 0.6 * total_steps, 0.8 * total_steps ] for i in range(total_steps): if i < decay_epoch_index[0]: lr = lr_max elif i < decay_epoch_index[1]: lr = lr_max * 0.1 elif i < decay_epoch_index[2]: lr = lr_max * 0.01 else: lr = lr_max * 0.001 lr_each_step.append(lr) elif lr_decay_mode == 'poly': for i in range(total_steps): if i < warmup_steps: lr = linear_warmup_lr(i, warmup_steps, lr_max, lr_init) else: base = (1.0 - (i - warmup_steps) / decay_steps) lr = lr_max * base * base lr_each_step.append(lr) elif lr_decay_mode == 'linear': for i in range(total_steps): if i < warmup_steps: lr = linear_warmup_lr(i, warmup_steps, lr_max, lr_init) else: lr = lr_max - (lr_max - lr_end) * (i - warmup_steps) / decay_steps lr_each_step.append(lr) elif lr_decay_mode == 'cosine': for i in range(total_steps): if i < warmup_steps: lr = linear_warmup_lr(i, warmup_steps, lr_max, lr_init) else: linear_decay = (total_steps - i) / decay_steps cosine_decay = 0.5 * ( 1 + math.cos(math.pi * 2 * 0.47 * (i - warmup_steps) / decay_steps)) decayed = linear_decay * cosine_decay + 0.00001 lr = lr_max * decayed lr_each_step.append(lr) else: raise NotImplementedError( 'Learning rate decay mode [{:s}] cannot be recognized'.format( lr_decay_mode)) lr_each_step = np.array(lr_each_step).astype(np.float32) learning_rate = lr_each_step[pretrain_steps:] return learning_rate
90091b35126bcf91166c498c396c831c3da1e7f6
30,865
def comptineN2(): """Generate the midi file of the comptine d'un autre été""" mid = MidiFile() trackl = MidiTrack() trackl.name = "Left hand" for i in range(8): trackl = comp_lh1(trackl) trackl = comp_lh1(trackl) trackl = comp_lh2(trackl) trackl = comp_lh2(trackl) trackl.append(Message('note_on', note=52)) trackl.append(Message('note_off', note=52, time=200)) mid.tracks.append(trackl) trackr = MidiTrack() trackr.name = 'Right hand' trackr.append(Message('note_on', note=67, velocity=0, time=3200)) trackr = comp_rh1(trackr) trackr = comp_rh2(trackr) trackr = comp_rh2(trackr) trackr = comp_rh3(trackr) trackr = comp_rh3(trackr, end=True) trackr = comp_rh4(trackr) trackr.append(Message('note_on', note=71)) trackr.append(Message('note_off', note=71, time=200)) mid.tracks.append(trackr) mid.ticks_per_beat = 100 vols = generate_vol() mid = volume(mid, vols) return mid
a7cd80b7ab483ef68be827c56e5f8b95967d8c08
30,866
from xenserver import tasks from xenserver.tests.helpers import XenServerHelper def xs_helper(monkeypatch): """ Provide a XenServerHelper instance and monkey-patch xenserver.tasks to use sessions from that instance instead of making real API calls. """ xshelper = XenServerHelper() monkeypatch.setattr(tasks, 'getSession', xshelper.get_session) return xshelper
d504aa6b651eb3777171187aceea7eb03fa7e46a
30,867
def highest_palindrome_product(digits): """Returns the highest palindrome number resulting from the multiplication of two numbers with the given amount of digits. """ def is_palindrome(target): """Returns True if target (str or int) is a palindrome. """ string = str(target) return list(string) == list(string)[::-1] # Creating the two highest possible numbers with the given amount of # digits: highest_number1 = highest_number2 = int("9"*digits) palindromes_list = [] while True: result = highest_number1 * highest_number2 if is_palindrome(result): palindromes_list.append(result) # Finding the products between all two numbers with the given # amount of digits: if highest_number2 == int("1" + "0"*(digits-1)): if highest_number1 == int("1" + "0"*(digits-1)): break else: highest_number2 = highest_number1 highest_number1 -=1 else: highest_number2 -= 1 return max(palindromes_list)
e509de1c977c6e4ecf9ab8304ef1afe65a447188
30,868
def is_ladder(try_capture, game_state, candidate, ladder_stones=None, recursion_depth=50): """Ladders are played out in reversed roles, one player tries to capture, the other to escape. We determine the ladder status by recursively calling is_ladder in opposite roles, providing suitable capture or escape candidates. Arguments: try_capture: boolean flag to indicate if you want to capture or escape the ladder game_state: current game state, instance of GameState candidate: a move that potentially leads to escaping the ladder or capturing it, instance of Move ladder_stones: the stones to escape or capture, list of Point. Will be inferred if not provided. recursion_depth: when to stop recursively calling this function, integer valued. Returns True if game state is a ladder and try_capture is true (the ladder captures) or if game state is not a ladder and try_capture is false (you can successfully escape) and False otherwise. """ if not game_state.is_valid_move(Move(candidate)) or not recursion_depth: return False next_player = game_state.next_player capture_player = next_player if try_capture else next_player.other escape_player = capture_player.other if ladder_stones is None: ladder_stones = guess_ladder_stones(game_state, candidate, escape_player) for ladder_stone in ladder_stones: current_state = game_state.apply_move(candidate) if try_capture: candidates = determine_escape_candidates( game_state, ladder_stone, capture_player) attempted_escapes = [ # now try to escape is_ladder(False, current_state, escape_candidate, ladder_stone, recursion_depth - 1) for escape_candidate in candidates] if not any(attempted_escapes): return True # if at least one escape fails, we capture else: if count_liberties(current_state, ladder_stone) >= 3: return True # successful escape if count_liberties(current_state, ladder_stone) == 1: continue # failed escape, others might still do candidates = liberties(current_state, ladder_stone) attempted_captures = [ # now try to capture is_ladder(True, current_state, capture_candidate, ladder_stone, recursion_depth - 1) for capture_candidate in candidates] if any(attempted_captures): continue # failed escape, try others return True # candidate can't be caught in a ladder, escape. return False
755ed8c007d51034ec2f2a24958c3f4660795007
30,869
def instances(request, compute_id): """ :param request: :return: """ all_host_vms = {} error_messages = [] compute = get_object_or_404(Compute, pk=compute_id) if not request.user.is_superuser: all_user_vms = get_user_instances(request) else: try: all_host_vms = get_host_instances(request, compute) except libvirtError as lib_err: error_messages.append(lib_err) if request.method == 'POST': try: return instances_actions(request) except libvirtError as lib_err: error_messages.append(lib_err) addlogmsg(request.user.username, request.POST.get("name", "instance"), lib_err.message) return render(request, 'instances.html', locals())
622336dfb836fbe4918f5d1331149aaaa3467a05
30,870
def seresnet101b_cub(classes=200, **kwargs): """ SE-ResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. Parameters: ---------- classes : int, default 200 Number of classification classes. pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '~/.mxnet/models' Location for keeping the model parameters. """ return get_seresnet(classes=classes, blocks=101, conv1_stride=False, model_name="seresnet101b_cub", **kwargs)
784e915704d244270ddf479eaaf5e279a7db437a
30,871
from pathlib import Path import shutil def dst(request): """Return a real temporary folder path which is unique to each test function invocation. This folder is deleted after the test has finished. """ dst = Path(mkdtemp()).resolve() request.addfinalizer(lambda: shutil.rmtree(str(dst), ignore_errors=True)) return dst
7714ce85fbeedfed00b571d9d2ef31cd6d8898e9
30,873
import string def getSentencesFromReview(reviewContent): """ INPUT: a single review consist of serveral sentences OUTPUT: a list of single sentences """ sent_detector = nltk.data.load('tokenizers/punkt/english.pickle') sentences = sent_detector.tokenize(reviewContent) # split agglomerated sentences for m in range(len(sentences)): subsentences = sentences[m].split('.') new_sentences = [] new_subsen = subsentences[0] for n in range(1,len(subsentences)): if subsentences[n] and (subsentences[n][0] in string.ascii_uppercase): new_subsen += '.' new_sentences.append(new_subsen) new_subsen = subsentences[n] else: new_subsen += '.' + subsentences[n] new_sentences.append(new_subsen) sentences[m] = new_sentences # collect all the single sentence into final_sentence list final_sentences = [] for sentence in sentences: if isinstance(sentence, list): final_sentences.extend(sentence) else: final_sentences.append(sentence) return final_sentences
2c074fac508994ad44edb0889a799ada22261c3c
30,874
import math def gamma_vector_neutrino(m_med, g_l=0.0): """Function to calculate the neutrino width of a vector mediator :param m_med: mediator mass :type m_med: float :param g_l: lepton coupling, defaults to 0.0 :type g_l: float, optional """ return 3 * g_l**2 / (24 * math.pi) * m_med
ebb0c913beee57cf9cdb605cc356949cea461882
30,875
def fifo_cdc(glbl, emesh_i, emesh_o): """ map the packet interfaces to the FIOF interface """ fifo_intf = FIFOBus(size=16, width=len(emesh_i.bits)) @always_comb def rtl_assign(): wr.next = emesh_i.access and not fifo_intf.full rd.next = not fifo_intf.empty and not emesh_i.wait emesh_o.wait.next = fifo_intf.full @always(emesh_o.clock.posedge) def rtl_access(): if not emesh_i.wait: emesh_o.access.next = fifo_intf.rd # assign signals ot the FIFO interface fifo_intf.wdata = emesh_i.bits fifo_intf.rdata = emesh_o.bits g_fifo = cores.fifo.fifo_async(glbl.reset, emesh_i.clock, emesh_o.clock, fifo_intf) return rtl_assign, rtl_access, g_fifo
d30445dad18043c63e29e7a79ff3e02dad370964
30,877
def eudora_bong(update, context): #1.2.1 """Show new choice of buttons""" query = update.callback_query bot = context.bot keyboard = [ [InlineKeyboardButton("Yes", callback_data='0'), InlineKeyboardButton("No", callback_data='00')], [InlineKeyboardButton("Back",callback_data='1.2')] ] reply_markup = InlineKeyboardMarkup(keyboard) bot.edit_message_text( chat_id=query.message.chat_id, message_id=query.message.message_id, text="""We have found a lawyer that suits your needs!""", ) bot.send_photo( chat_id=query.message.chat_id, photo = open("female.jpg",'rb') ) bot.send_message( chat_id=query.message.chat_id, text = """Name: Eudora Bong \nCompany: Chee Chong LLP \nYears of Experience: 9""", ) bot.send_message( chat_id=query.message.chat_id, text = """See more on our website: https://eldoraboo.github.io/PairALegal/eudora-bong""" ) bot.send_message( chat_id=query.message.chat_id, text = """Thank you for using Pair-A-Legal bot. \nWould you like to restart?""", reply_markup = reply_markup ) return FIRST
234d4324b384414fd6e2a6f52dbbccc51f0ff738
30,878
def scan_setup_py(): """Validate the contents of setup.py against Versioneer's expectations.""" found = set() setters = False errors = 0 with open("setup.py", "r") as f: for line in f.readlines(): if "import versioneer" in line: found.add("import") if "versioneer.get_cmdclass()" in line: found.add("cmdclass") if "versioneer.get_version()" in line: found.add("get_version") if "versioneer.VCS" in line: setters = True if "versioneer.versionfile_source" in line: setters = True if len(found) != 3: print("") print("Your setup.py appears to be missing some important items") print("(but I might be wrong). Please make sure it has something") print("roughly like the following:") print("") print(" import versioneer") print(" setup( version=versioneer.get_version(),") print(" cmdclass=versioneer.get_cmdclass(), ...)") print("") errors += 1 if setters: print("You should remove lines like 'versioneer.VCS = ' and") print("'versioneer.versionfile_source = ' . This configuration") print("now lives in setup.cfg, and should be removed from setup.py") print("") errors += 1 return errors
33b9a4bfd44a70d93ae7b50df870d46765bf0cb7
30,880