content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import json def list_clusters(event, context): """List clusters""" clusters = [] cluster_items = storage.get_cluster_table().scan() for cluster in cluster_items.get('Items', []): clusters.append(cluster['id']) return { "statusCode": 200, "body": json.dumps(clusters) }
5f88ca446e8d07d7584b1dfd12fb64cddefc918c
14,651
def round(data): """Compute element-wise round of data. Parameters ---------- data : relay.Expr The input data Returns ------- result : relay.Expr The computed result. """ return _make.round(data)
e3adfdc29d9cc641ca33fb375649caf176098d75
14,652
def deserialize(member, class_indexing): """ deserialize """ class_name = member[0].text if class_name in class_indexing: class_num = class_indexing[class_name] else: return None bnx = member.find('bndbox') box_x_min = float(bnx.find('xmin').text) box_y_min = float(bnx.find('ymin').text) box_x_max = float(bnx.find('xmax').text) box_y_max = float(bnx.find('ymax').text) width = float(box_x_max - box_x_min + 1) height = float(box_y_max - box_y_min + 1) # try: # ignore = float(member.find('ignore').text) # except ValueError: ignore = 0.0 return [class_num, box_x_min, box_y_min, width, height, ignore]
087102acec79ec5d0ecad91453885579c2395895
14,653
def interval_weighting(intervals, lower, upper): """ Compute a weighting function by finding the proportion within the dataframe df's lower and upper bounds. Note: intervals is of the form ((lower, upper, id), ...) """ if len(intervals) == 1: return np.asarray([1]) wts = np.ones(len(intervals)) lower_limit, upper_limit = intervals[0], intervals[-1] wts[0] = (lower_limit[1] - lower) / np.diff(lower_limit[:2]) wts[-1] = (upper - upper_limit[0]) / np.diff(upper_limit[:2]) return wts
5eaf974597ad13d2b2204526d84412a22a104bc2
14,654
def centroid_precursor_frame(mzml_data_struct): """ Read and returns a centroid spectrum for a precursor frame This function uses the SDK to get and return an MS1 centroid spectrum for the requested frame. Parameters ---------- mzml_data_struct : dict structure of the mzml data Returns ------- list of lists list of mz and i lists [[mz,[i]] """ precursor_frame_id = mzml_data_struct['current_precursor']['id'] num_scans = mzml_data_struct['td'].conn.execute("SELECT NumScans FROM Frames WHERE Id={0}".format(precursor_frame_id)).fetchone()[0] data_list = mzml_data_struct['td'].extractCentroidedSpectrumForFrame (precursor_frame_id, 0, num_scans) return np.array(data_list)
24d6f19afeafcd731dd316c36aa4784d60224ee8
14,655
import random def createNewForest(): """Returns a dictionary for a new forest data structure.""" forest = {'width': WIDTH, 'height': HEIGHT} for x in range(WIDTH): for y in range(HEIGHT): if (random.randint(1, 10000) / 100) <= INITIAL_TREE_DENSITY: forest[(x, y)] = TREE # Start as a tree. else: forest[(x, y)] = EMPTY # Start as an empty space. return forest
1c58bb3faeba7b866a7b406b742106adccb64a0f
14,659
def test_filter(): """ Base class filter function """ def test(): """ Test the filter function """ try: for i in _TEST_FRAME_.keys(): for j in range(10): test = _TEST_FRAME_.filter(i, "<", j) assert all(map(lambda x: x < j, test[i])) test = _TEST_FRAME_.filter(i, "<=", j) assert all(map(lambda x: x <= j, test[i])) test = _TEST_FRAME_.filter(i, "=", j) assert all(map(lambda x: x == j, test[i])) test = _TEST_FRAME_.filter(i, "==", j) assert all(map(lambda x: x == j, test[i])) test = _TEST_FRAME_.filter(i, '!=', j) assert all(map(lambda x: x != j, test[i])) test = _TEST_FRAME_.filter(i, ">=", j) assert all(map(lambda x: x >= j, test[i])) test = _TEST_FRAME_.filter(i, ">", j) assert all(map(lambda x: x > j, test[i])) except: return False return True return ["vice.core.dataframe.base.filter", test]
5623ba98d8b06b2e2f395bf8387268f7857236e0
14,660
def exp_moving_average(values, window): """ Numpy implementation of EMA """ if window >= len(values): if len(values) == 0: sma = 0.0 else: sma = np.mean(np.asarray(values)) a = [sma] * len(values) else: weights = np.exp(np.linspace(-1., 0., window)) weights /= weights.sum() a = np.convolve(values, weights, mode='full')[:len(values)] a[:window] = a[window] return a
1563ac9898296e253c7733d341d30ee36cfb822c
14,661
def parabolic(f, x): """ Quadratic interpolation in order to estimate the location of a maximum https://gist.github.com/endolith/255291 Args: f (ndarray): a vector a samples x (int): an index on the vector Returns: (vx, vy): the vertex coordinates of a parabola passing through x and its neighbors """ xv = 1/2. * (f[x-1] - f[x+1]) / (f[x-1] - 2 * f[x] + f[x+1]) + x yv = f[x] - 1/4. * (f[x-1] - f[x+1]) * (xv - x) return (xv, yv)
4373ee6390f3523d0fd69487c27e05522bd8c230
14,662
def arith_expr(draw): """ arith_expr: term (('+'|'-') term)* """ return _expr_builder(draw, term, '+-')
277361c91c5967b36ec24b87402d2444e40f2a31
14,663
import glob def extract_running_speed(module_params): """Writes the stimulus and pkl paths to the input json Parameters ---------- module_params: dict Session or probe unique information, used by each module Returns ------- module_params: dict Session or probe unique information, used by each module input_json_write_dict: dict A dictionary representing the values that will be written to the input json """ # trim_discontiguous_frame_times = module_params['trim'] output_path = module_params['output_path'] input_json_write_dict = \ { 'stimulus_pkl_path': glob(join(module_params['base_directory'], "*.stim.pkl"))[0], 'sync_h5_path': glob(join(module_params['base_directory'], "*.sync"))[0], 'output_path': join(output_path, "running_speed.h5"), "log_level": 'INFO' } return module_params, input_json_write_dict
d04908a9161bebdc74b5a35f14568e50bf4f8559
14,664
def sliced_transposed_product( mat, block_size, axes=(-1,), precision=lax.Precision.DEFAULT, ): """Returns the blocked slices representing a symmetric contraction. Specifically, the output is a contraction of the input mat with itself, in the specified axes. Args: mat: The matrix for which we will compute a contraction with itself. block_size: The size of row blocks to compute. axes: Axes to use for the contraction. precision: The precision to use in each computation. Raises: ValueError: Raised when the specified block size does not evenly divide the number of rows of the input mat. """ rank = len(mat.shape) def _make_axis_positive(ax): assert -rank <= ax < rank return ax + rank if ax < 0 else ax positive_axes = [_make_axis_positive(ax) for ax in axes] assert len(positive_axes) == len(axes) remaining_axes = set(range(rank)) - set(positive_axes) assert len(remaining_axes) == 1 remaining_ax = remaining_axes.pop() num_rows = mat.shape[remaining_ax] if num_rows % block_size != 0: raise ValueError( "The row dimension must be divisible by block_size. " f"Instead got row dimension={num_rows} and block_size={block_size}." ) block_rows = [] for i in range(num_rows // block_size): start_indices = [0] * rank start_indices[remaining_ax] = i * block_size slice_sizes = list(mat.shape) slice_sizes[remaining_ax] = block_size slice_sizes_full = list(mat.shape) slice_sizes_full[remaining_ax] = (i + 1) * block_size block_rows.append( product_with_transpose( lax.dynamic_slice( mat, start_indices=start_indices, slice_sizes=slice_sizes ), lax.dynamic_slice( mat, start_indices=[0] * rank, slice_sizes=slice_sizes_full ), axes=(axes, axes), precision=precision, ) ) return SlicedSymmetricMatrix(block_rows=block_rows)
1bb2016dd485b2da9e74d4a70c703e8fefacf8ff
14,665
import re def _is_ipython_line_magic(line): """ Determines if the source line is an IPython magic. e.g., %%bash for i in 1 2 3; do echo $i done """ return re.match(_IS_IPYTHON_LINE_MAGIC, line) is not None
90575b556f6f6d62bb82b6fb18b2bc979735e808
14,666
def osu_to_excel( osu_path: str, excel_path: str = '', n: int = None, compact_log: bool = False, display_progress=True, **kwargs ) -> str: """Export metadata and hitobjects in a xlsx file.""" metadata = from_osu( osu_path, n=n, compact_log=compact_log, display_progress=display_progress ) mode = 'w' if not excel_path.strip() else 'a' excel_path = './osu_data.xlsx' if not excel_path else excel_path with pd.ExcelWriter(excel_path, mode=mode) as writer: Logs.info("the 'metadata' sheet is being created...") metadata[:MAX_EXCEL_LINES].to_excel(writer, sheet_name='metadata', index=False, **kwargs) if metadata.shape[0] > MAX_EXCEL_LINES: Logs.warning(f'The sheet "metadata" is too large ({metadata.shape[0]} lines), the maximum size has been keeping (MAX_EXCEL_LINES)') else: Logs.success('There is not error during the export data') return excel_path
5d26d70706ec74febc8be0c0d49eaf7f0c48186d
14,667
def convert_pybites_chars(text): """Swap case all characters in the word pybites for the given text. Return the resulting string.""" return "".join( char.swapcase() if char.lower() in PYBITES else char for char in text )
73dff55cc7cd2f1c85d1f51319c12f8335803dce
14,669
def get_meminfo(): """ Return the total memory (in MB). :return: memory (float). """ mem = 0.0 with open("/proc/meminfo", "r") as fd: mems = fd.readline() while mems: if mems.upper().find("MEMTOTAL") != -1: try: mem = float(mems.split()[1]) / 1024 # value listed by command as kB, convert to MB except ValueError as e: logger.warning('exception caught while trying to convert meminfo: %s' % e) break mems = fd.readline() return mem
5aaa671d7d407b1593099a2fb7a1f2fcb0a88542
14,670
def process_inline_semantic_match(placeholder_storage, match_object): """ Process a single inline-semantic match object. """ delimiter = match_object.group('delimiter') tag_name = TAG_NAME_FROM_INLINE_SEMANTIC_DELIMITER[delimiter] attribute_specification = match_object.group('attribute_specification') attribute_dictionary = parse_attribute_specification(attribute_specification) attributes = build_html_attributes(placeholder_storage, attribute_dictionary) content = match_object.group('content') content = strip_whitespace(content) # Process nested inline semantics content = process_inline_semantics(placeholder_storage, content) inline_semantic = f'<{tag_name}{attributes}>{content}</{tag_name}>' return inline_semantic
a1f66093ed361f5e7f924061a1c9770d880d4acc
14,671
async def insert_cd_inurl_name(cluster_id: str, iso_name: str): """ Find SR by Name """ try: try: session = create_session( _id=cluster_id, get_xen_clusters=Settings.get_xen_clusters() ) except KeyError as key_error: raise HTTPException( status_code=400, detail=f"{key_error} is not a valid path" ) srs = SR.get_by_name(session=session, name=iso_name) if srs is not None: __srs_list = [] srs_list = __srs_list.append for sr in srs: srs_list(serialize(sr)) ret = dict(success=True, data=__srs_list) else: ret = dict(success=False) session.xenapi.session.logout() return ret except Fault as xml_rpc_error: raise HTTPException( status_code=int(xml_rpc_error.faultCode), detail=xml_rpc_error.faultString, ) except RemoteDisconnected as rd_error: raise HTTPException(status_code=500, detail=rd_error.strerror)
7c6df12f6de461d559c63adb5e014708e2122760
14,672
def main_add(args): """Start the add-environment command and return exit status code.""" return add_env_spec(args.directory, args.name, args.packages, args.channel)
6358464086e3fc01553df301514976d04a44b3c4
14,673
def write(objct, fileoutput, binary=True): """ Write 3D object to file. (same as `save()`). Possile extensions are: - vtk, vti, npy, npz, ply, obj, stl, byu, vtp, vti, mhd, xyz, tif, png, bmp. """ obj = objct if isinstance(obj, Points): # picks transformation obj = objct.polydata(True) elif isinstance(obj, (vtk.vtkActor, vtk.vtkVolume)): obj = objct.GetMapper().GetInput() elif isinstance(obj, (vtk.vtkPolyData, vtk.vtkImageData)): obj = objct if hasattr(obj, 'filename'): obj.filename = fileoutput fr = fileoutput.lower() if fr.endswith(".vtk"): writer = vtk.vtkDataSetWriter() elif fr.endswith(".ply"): writer = vtk.vtkPLYWriter() writer.AddComment("PLY file generated by vedo") lut = objct.GetMapper().GetLookupTable() if lut: pscal = obj.GetPointData().GetScalars() if not pscal: pscal = obj.GetCellData().GetScalars() if pscal and pscal.GetName(): writer.SetArrayName(pscal.GetName()) writer.SetLookupTable(lut) elif fr.endswith(".stl"): writer = vtk.vtkSTLWriter() elif fr.endswith(".vtp"): writer = vtk.vtkXMLPolyDataWriter() elif fr.endswith(".vtu"): writer = vtk.vtkXMLUnstructuredGridWriter() elif fr.endswith(".vtm"): g = vtk.vtkMultiBlockDataGroupFilter() for ob in objct: if isinstance(ob, (Points, Volume)): # picks transformation ob = ob.polydata(True) g.AddInputData(ob) # elif isinstance(ob, (vtk.vtkActor, vtk.vtkVolume)): # ob = ob.GetMapper().GetInput() # g.AddInputData(ob) g.Update() mb = g.GetOutputDataObject(0) wri = vtk.vtkXMLMultiBlockDataWriter() wri.SetInputData(mb) wri.SetFileName(fileoutput) wri.Write() return mb elif fr.endswith(".xyz"): writer = vtk.vtkSimplePointsWriter() elif fr.endswith(".facet"): writer = vtk.vtkFacetWriter() elif fr.endswith(".tif"): writer = vtk.vtkTIFFWriter() # print("GetCompression ", writer.GetCompression ()) writer.SetFileDimensionality(len(obj.GetDimensions())) elif fr.endswith(".vti"): writer = vtk.vtkXMLImageDataWriter() elif fr.endswith(".mhd"): writer = vtk.vtkMetaImageWriter() elif fr.endswith(".nii"): writer = vtk.vtkNIFTIImageWriter() elif fr.endswith(".png"): writer = vtk.vtkPNGWriter() elif fr.endswith(".jpg"): writer = vtk.vtkJPEGWriter() elif fr.endswith(".bmp"): writer = vtk.vtkBMPWriter() elif fr.endswith(".npy") or fr.endswith(".npz"): if utils.isSequence(objct): objslist = objct else: objslist = [objct] dicts2save = [] for obj in objslist: dicts2save.append( toNumpy(obj) ) np.save(fileoutput, dicts2save) return dicts2save elif fr.endswith(".obj"): outF = open(fileoutput, "w") outF.write('# OBJ file format with ext .obj\n') outF.write('# File generated by vedo\n') for p in objct.points(): outF.write("v {:.5g} {:.5g} {:.5g}\n".format(*p)) # pdata = objct.polydata().GetPointData().GetScalars() # if pdata: # ndata = vtk_to_numpy(pdata) # for vd in ndata: # outF.write('vp '+ str(vd) +'\n') #ptxt = objct.polydata().GetPointData().GetTCoords() # not working #if ptxt: # ntxt = vtk_to_numpy(ptxt) # print(len(objct.faces()), objct.points().shape, ntxt.shape) # for vt in ntxt: # outF.write('vt '+ str(vt[0]) +" "+ str(vt[1])+ ' 0\n') for i,f in enumerate(objct.faces()): fs = '' for fi in f: fs += " {:d}".format(fi+1) outF.write('f' + fs + '\n') for l in objct.lines(): ls = '' for li in l: ls += str(li+1)+" " outF.write('l '+ ls + '\n') outF.close() return objct elif fr.endswith(".xml"): # write tetrahedral dolfin xml vertices = objct.points().astype(str) faces = np.array(objct.faces()).astype(str) ncoords = vertices.shape[0] outF = open(fileoutput, "w") outF.write('<?xml version="1.0" encoding="UTF-8"?>\n') outF.write('<dolfin xmlns:dolfin="http://www.fenicsproject.org">\n') if len(faces[0]) == 4:# write tetrahedral mesh ntets = faces.shape[0] outF.write(' <mesh celltype="tetrahedron" dim="3">\n') outF.write(' <vertices size="' + str(ncoords) + '">\n') for i in range(ncoords): x, y, z = vertices[i] outF.write(' <vertex index="'+str(i)+'" x="'+x+'" y="'+y+'" z="'+z+'"/>\n') outF.write(' </vertices>\n') outF.write(' <cells size="' + str(ntets) + '">\n') for i in range(ntets): v0, v1, v2, v3 = faces[i] outF.write(' <tetrahedron index="'+str(i) + '" v0="'+v0+'" v1="'+v1+'" v2="'+v2+'" v3="'+v3+'"/>\n') elif len(faces[0]) == 3:# write triangle mesh ntri = faces.shape[0] outF.write(' <mesh celltype="triangle" dim="2">\n') outF.write(' <vertices size="' + str(ncoords) + '">\n') for i in range(ncoords): x, y, dummy_z = vertices[i] outF.write(' <vertex index="'+str(i)+'" x="'+x+'" y="'+y+'"/>\n') outF.write(' </vertices>\n') outF.write(' <cells size="' + str(ntri) + '">\n') for i in range(ntri): v0, v1, v2 = faces[i] outF.write(' <triangle index="'+str(i)+'" v0="'+v0+'" v1="'+v1+'" v2="'+v2+'"/>\n') outF.write(' </cells>\n') outF.write(" </mesh>\n") outF.write("</dolfin>\n") outF.close() return objct else: colors.printc("\noentry Unknown format", fileoutput, "file not saved.", c="r") return objct try: if hasattr(writer, 'SetFileTypeToBinary'): if binary: writer.SetFileTypeToBinary() else: writer.SetFileTypeToASCII() writer.SetInputData(obj) writer.SetFileName(fileoutput) writer.Write() except Exception as e: colors.printc("\noentry Error saving: " + fileoutput, "\n", e, c="r") return objct
51e595a83d54a90dd392d09a67289527cb8a4510
14,674
def instantiate_env_class(builder: IRBuilder) -> Value: """Assign an environment class to a register named after the given function definition.""" curr_env_reg = builder.add( Call(builder.fn_info.env_class.ctor, [], builder.fn_info.fitem.line) ) if builder.fn_info.is_nested: builder.fn_info.callable_class._curr_env_reg = curr_env_reg builder.add(SetAttr(curr_env_reg, ENV_ATTR_NAME, builder.fn_info.callable_class.prev_env_reg, builder.fn_info.fitem.line)) else: builder.fn_info._curr_env_reg = curr_env_reg return curr_env_reg
14e3113fe6ba3ec107fcd36e36c7dc525bf11cc5
14,675
import json def validate_recaptcha(token): """ Send recaptcha token to API to check if user response is valid """ url = 'https://www.google.com/recaptcha/api/siteverify' values = { 'secret': settings.RECAPTCHA_PRIVATE_KEY, 'response': token } data = urlencode(values).encode("utf-8") response = builtin_request.urlopen(url, data) result = json.load(response) if result['success']: return True, "" return False, "Invalid reCAPTCHA. Please try again."
7be09a76cbf946edbe8b1d717b2e2e2cdef9a902
14,676
def aggregate_hts(style="all_modes_combined"): """Use the 'processed' version of the HTS table to summarize the flows. Using the 'style' parameter, you can: - aggregate by mode using 'by_mode' - aggregate by mode and o&d location types using 'by_mode_and_location_type' - aggregate without considering mode, using the default 'all_modes_combined' """ def _use_the_right_query(style: str, query: str) -> str: """If the 'style' is 'by_mode': - add 'mode_agg' into the query If the 'style' is 'by_mode_and_location_type': - add 'trip_type' and 'mode_agg' into the query Otherwise, just return the query as it was originally. """ if style == "by_mode": return query.replace("o_cpa, d_cpa", "o_cpa, d_cpa, mode_agg") elif style == "by_mode_and_location_type": return query.replace("o_cpa, d_cpa", "o_cpa, d_cpa, mode_agg, trip_type") else: return query db = db_connection() all_combos_query = """ select o_cpa, d_cpa, count(*) as numtrips_24hr, sum(compositeweight) as sum_24hr from hts_2013_processed where trip_num < 97 group by o_cpa, d_cpa order by sum(compositeweight) desc """ am_query = """ select o_cpa, d_cpa, count(*) as numtrips_am, sum(compositeweight) as sum_am from hts_2013_processed where trip_num < 97 and time_window like '%%AM%%' group by o_cpa, d_cpa """ pm_query = """ select o_cpa, d_cpa, count(*) as numtrips_pm, sum(compositeweight) as sum_pm from hts_2013_processed where trip_num < 97 and time_window like '%%PM%%' group by o_cpa, d_cpa """ # Add the 'mode_agg' column if the 'style' is 'by_mode' all_combos_query = _use_the_right_query(style, all_combos_query) am_query = _use_the_right_query(style, am_query) pm_query = _use_the_right_query(style, pm_query) # Also, join on the 'mode_agg' column if we're analyzing 'by_mode' join_cols = ["o_cpa", "d_cpa"] if style == "by_mode": join_cols.append("mode_agg") elif style == "by_mode_and_location_type": join_cols.append("mode_agg") join_cols.append("trip_type") # Get the 24-hour totals df = db.query_as_df(all_combos_query) # Query and join the AM trips df_am = db.query_as_df(am_query) df = pd.merge(df, df_am, how="left", on=join_cols) # Repeat for the PM trips df_pm = db.query_as_df(pm_query) df = pd.merge(df, df_pm, how="left", on=join_cols) # Save the resulting dataframe back to SQL new_table_name = f"hts_2013_aggregated_{style}" db.import_dataframe(df, new_table_name, if_exists="replace")
b200b312351408e4615a503f56f301c3b775f35a
14,677
def pix2sky(shape, wcs, pix, safe=True, corner=False): """Given an array of corner-based pixel coordinates [{y,x},...], return sky coordinates in the same ordering.""" pix = np.asarray(pix).astype(float) if corner: pix -= 0.5 pflat = pix.reshape(pix.shape[0], -1) coords = np.asarray(wcsutils.nobcheck(wcs).wcs_pix2world(*(tuple(pflat)[::-1]+(0,)))[::-1])*get_unit(wcs) coords = coords.reshape(pix.shape) if safe and not wcsutils.is_plain(wcs): coords = utils.unwind(coords) return coords
288d3f67080611773273aaed950385b19d7aebc8
14,678
def getRelativeSilenceVideo(videoPath): """Function to get relative silence videos before and after each video""" silVid = ['', ''] vidData = getVideoDataFromPath(videoPath) videoNameList = videoPath.split('/') tempVidName = videoNameList[0] + '/' + videoNameList[1] + '/' + videoNameList[2] + '/Silence/sil_{}.mp4' vidNumber = int((vidData.identifier.split('_')[1]).split('.')[0]) silVid[0] = tempVidName.format(vidNumber * 2) silVid[1] = tempVidName.format((vidNumber * 2) + 1) return silVid
b829915c4cfa7592e394914ba40457200b352ab4
14,679
def convert_to_xyxy_coordinates(boxes: tf.Tensor) -> tf.Tensor: """Convert boxes to their center coordinates y_cent, x_cent, h, w -> y_min, x_min, y_max, x_max Arguments: - *boxes*: A Tensor of shape [N, ..., (y_cent, x_cent, h, w)] Returns: A tensor of shape [N, ..., num_boxes, (y_min, x_min, y_max, x_max)] """ y_cent, x_cent, h, w = tf.split(value=boxes, num_or_size_splits=4, axis=-1) y_min = y_cent - 0.5 * h x_min = x_cent - 0.5 * w y_max = y_cent + 0.5 * h x_max = x_cent + 0.5 * w return tf.concat([y_min, x_min, y_max, x_max], axis=-1)
2412d3383d4335d707e220a52ac5e5198513d8ab
14,680
import click def classify(mapper: object, files: list or dict, samples: list = None, fmt: str = None, demux: bool = None, trimsub: str = None, tree: dict = None, rankdic: dict = None, namedic: dict = None, root: str = None, ranks: str = None, rank2dir: dict = None, outzip: str = None, uniq: bool = False, major: int = None, above: bool = False, subok: bool = False, sizes: dict = None, unasgd: bool = False, stratmap: dict = None, chunk: int = None, cache: int = 1024, zippers: dict = None, outcov_dir: str = None) -> dict: """Core of the classification workflow. Parameters ---------- mapper : object Mapping module (Plain or Ordinal). files : list or dict Paths to input alignment files, if multiplexed, or dictionary of file paths to sample IDs, if per-sample. samples : list of str, optional Sample ID list to include. fmt : str, optional Format of input alignment file. Options: - 'b6o': BLAST tabular format. - 'sam': SAM format. - 'map': Simple map of query <tab> subject. If None, program will automatically infer from file content. demux : bool, optional Whether perform demultiplexing. trimsub : str, optional Trim subject IDs at the last given delimiter. tree : dict, optional Taxonomic tree. rankdic : dict, optional Rank dictionary. namedic : dict, optional Taxon name dictionary. root : str, optional Root identifier. ranks: list of str, optional List of ranks at each of which sequences are to be classified. Can also be "none" to omit classification (simply report subject IDs) or "free" to perform free-rank classification (LCA of subjects regardless of rank will be reported). rank2dir : dict, otional Write classification map per rank to directory. outzip : str, optional Output read map compression method (gz, bz2, xz or None). uniq : bool, optional Assignment must be unique. Otherwise, report all possible assignments and normalize counts (for none- and fixed-rank assignments). major : int, optional In given-rank classification, perform majority-rule assignment based on this percentage threshold. Range: [51, 99]. above : bool, optional Allow assigning to a classification unit higher than given rank. subok : bool, optional In free-rank classification, allow assigning sequences to their direct subjects instead of higher classification units, if applicable. sizes : dict, optional Subject size dictionary. unasgd : bool, optional Report unassigned sequences. stratmap : dict, optional Map of sample ID to stratification file. chunk : int, optional Number of lines per chunk to read from alignment file. cache : int, optional LRU cache size for classification results at each rank. zippers : dict, optional External compression programs. outcov_dir : str, optional Write Subject coverage maps to directory. Returns ------- dict of dict Per-rank profiles generated from classification. Notes ----- Subject(s) of each query are sorted and converted into a tuple, which is hashable, a property necessary for subsequent assignment result caching. """ data = {x: {} for x in ranks} # assigners for each rank assigners = {} # assignment parameters kwargs = {'assigners': assigners, 'cache': cache, 'tree': tree, 'rankdic': rankdic, 'namedic': namedic, 'root': root, 'uniq': uniq, 'major': major and major / 100, 'above': above, 'subok': subok, 'sizes': sizes, 'unasgd': unasgd, 'rank2dir': rank2dir, 'outzip': outzip if outzip != 'none' else None} # (optional) subject coverage data covers = {} if outcov_dir else None # current sample Id csample = False # parse input alignment file(s) and generate profile(s) for fp in sorted(files): click.echo(f'Parsing alignment file {basename(fp)} ', nl=False) # read alignment file into query-to-subject(s) map with readzip(fp, zippers) as fh: # query and progress counters nqry, nstep = 0, -1 # parse alignment file by chunk for qryque, subque in mapper(fh, fmt=fmt, n=chunk): nqry += len(qryque) # (optional) demultiplex and generate per-sample maps rmaps = demultiplex(qryque, subque, samples) if demux else { files[fp] if files else None: (qryque, subque)} # (optional) calculate subject coverage if outcov_dir: parse_ranges(rmaps, covers) # assign reads at each rank for sample, (qryque, subque) in rmaps.items(): # (optional) strip suffixes from subject Ids subque = deque(map(tuple, map(sorted, strip_suffix( subque, trimsub) if trimsub else subque))) # (optional) read strata of current sample into cache if stratmap and sample != csample: kwargs['strata'] = read_strata( stratmap[sample], zippers) csample = sample # call assignment workflow for each rank for rank in ranks: assign_readmap( qryque, subque, data, rank, sample, **kwargs) # show progress istep = nqry // 1000000 - nstep if istep: click.echo('.' * istep, nl=False) nstep += istep click.echo(' Done.') click.echo(f' Number of sequences classified: {nqry}.') # write coverage maps if outcov_dir: click.echo('Calculating per sample coverage...', nl=False) write_coverage(calc_coverage(covers), outcov_dir) click.echo(' Done.') click.echo('Classification completed.') return data
1d1976dcf35617a3860af39d77fb206880071105
14,682
import re def get_better_loci(filename, cutoff): """ Returns a subset of loci such that each locus includes at least "cutoff" different species. :param filename: :param cutoff: :return: """ f = open(filename) content = f.read() f.close() loci = re.split(r'//.*|', content) better_loci = [] for locus in loci: found_species = set() for line in locus.strip().split("\n"): if line == "": continue (individual, sequence) = line[1:].split() found_species.add(individual.split("_")[-1]) if len(found_species) >= cutoff: better_loci.append(locus) return better_loci
e2d563c9d0568cef59ea0280aae61a78bf4a6e7b
14,683
import math def paginate_data(data_list, page=1 ,per_page=10): """将数据分页返回""" pages = int(math.ceil(len(data_list) / per_page)) page = int(page) per_page = int(per_page) has_next = True if pages > page else False has_prev = True if 1 < page <= int(pages) else False items = data_list[(page-1)*per_page : page*per_page] return { "item_list": items, "page": page, "total": len(data_list), "pages": pages, "has_next": has_next, "next_num": page + 1 if has_next else None, "per_page": per_page, "has_prev": has_prev, "prev_num": page - 1 if has_prev else None }
63a4602462e0c2e38329107b10b5d72b63c3108d
14,684
import torch def quat_to_rotmat(quat): """Convert quaternion coefficients to rotation matrix. Args: quat: size = [B, 4] 4 <===>(w, x, y, z) Returns: Rotation matrix corresponding to the quaternion -- size = [B, 3, 3] """ norm_quat = quat norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True) w, x, y, z = norm_quat[:, 0], norm_quat[:, 1], norm_quat[:, 2], norm_quat[:, 3] B = quat.size(0) w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2) wx, wy, wz = w * x, w * y, w * z xy, xz, yz = x * y, x * z, y * z rotMat = torch.stack([w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz, 2 * wz + 2 * xy, w2 - x2 + y2 - z2, 2 * yz - 2 * wx, 2 * xz - 2 * wy, 2 * wx + 2 * yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3) return rotMat
6590272c0ed3a97f8f5ef5eacd3605b0c7b91626
14,685
def has_multimethods(cls): """ Declare class as one that have multimethods.""" for name, obj in cls.__dict__.items(): if isinstance(obj, MethodDispatcher): obj.proceed_unbound_rules(cls) return cls
4248af44c0ba6b585a80a4eb0d8da1ca5e9f2299
14,686
def elastic_depth(f, time, method="DP2", lam=0.0, parallel=True): """ calculates the elastic depth between functions in matrix f :param f: matrix of size MxN (M time points for N functions) :param time: vector of size M describing the sample points :param method: method to apply optimization (default="DP2") options are "DP","DP2","RBFGS" :param lam: controls the elasticity (default = 0.0) :rtype: scalar :return amp: amplitude depth :return phase: phase depth """ obs, fns = f.shape amp_dist = zeros((fns,fns)) phs_dist = zeros((fns,fns)) if parallel: out = Parallel(n_jobs=-1)(delayed(distmat)(f, f[:, n], time, n, method) for n in range(fns)) for i in range(0, fns): amp_dist[i, :] = out[i][0] phs_dist[i, :] = out[i][1] else: for i in range(0, fns): amp_dist[i, :], phs_dist[i, :] = distmat(f, f[:, i], time, i, method) amp_dist = amp_dist + amp_dist.T phs_dist = phs_dist + phs_dist.T amp = 1 / (1 + median(amp_dist,axis=0)) phase = 1 / (1 + median(phs_dist,axis=0)) phase = ((2+pi)/pi) * (phase - 2/(2+pi)) return amp, phase
574880a5cc3d26d756286a5d7a8959c67141678a
14,687
from typing import Any def run_coro_thread(func: callable, *args, **kwargs) -> Any: """ Run a Python AsyncIO coroutine function within a new event loop using a thread, and return the result / raise any exceptions as if it were ran normally within an AsyncIO function. .. Caution:: If you're wanting to run a coroutine within a thread from an AsyncIO function/method, then you should use :func:`.run_coro_thread_async` instead, which uses :func:`asyncio.sleep` while waiting for a result/exception to be transmitted via a queue. This allows you to run and wait for multiple coroutine threads simultaneously, as there's no synchronous blocking wait - unlike this function. This will usually allow you to run coroutines from a synchronous function without running into the dreaded "Event loop is already running" error - since the coroutine will be ran inside of a thread with it's own dedicated event loop. **Example Usage**:: >>> async def example_func(lorem: int, ipsum: int): ... if lorem > 100: raise AttributeError("lorem is greater than 100!") ... return f"example: {lorem + ipsum}" >>> run_coro_thread(example_func, 10, 20) example: 30 >>> run_coro_thread(example_func, 3, ipsum=6) example: 9 >>> run_coro_thread(example_func, lorem=40, ipsum=1) example: 41 >>> run_coro_thread(example_func, 120, 50) File "", line 2, in example_func if lorem > 100: raise AttributeError("lorem is greater than 100!") AttributeError: lorem is greater than 100! Creates a new :class:`threading.Thread` with the target :func:`.coro_thread_func` (via :func:`.run_coro_thread_base`), passing the coroutine ``func`` along with the passed positional ``args`` and keyword ``kwargs``, which creates a new event loop, and then runs ``func`` within that thread event loop. Uses the private :class:`queue.Queue` threading queue :attr:`._coro_thread_queue` to safely relay back to the calling thread - either the result from the coroutine, or an exception if one was raised while trying to run the coroutine. :param callable func: A reference to the ``async def`` coroutine function that you want to run :param args: Positional arguments to pass-through to the coroutine function :param kwargs: Keyword arguments to pass-through to the coroutine function :return Any coro_res: The result returned from the coroutine ``func`` """ t_co = run_coro_thread_base(func, *args, **kwargs, _output_queue=_coro_thread_queue) t_co.join() res = _coro_thread_queue.get(block=True, timeout=10) if isinstance(res, (Exception, BaseException)): raise res return res
078b17d38552aa5d9a30efd1374d8f4e8f7e9b40
14,688
def get_all_ports(entity): """ Recursively descends through the entity hierarchy and collects all ports defined within the parameter or any of its children. Parameters ---------- entity : Entity The root from which to start collecting. Returns ------- list of Port A list of ports within the entity or its children. """ return [p for e in get_all_entities(entity) for p in get_ports(e)]
a490ba48d647a1d82a2c7ae7d75e61afb089c907
14,689
def deploy(**kwargs): """Deploy a PR into a remote server via Fabric""" return apply_pr(**kwargs)
26d11e6d6ab08e1298aa99203925c45b96535df9
14,690
import torch def word_list2tensor(word_list, dictionary): """ args word_list: [batch_size, seq_len, token_id] dictionary: Dictionary return source, target [batch_size, seq_len, token_id] """ word_list_padded = add_word_padding(word_list, dictionary) batch = torch.LongTensor(word_list_padded) return batch
6e484c282779bfd709030735268468f3bacde268
14,691
import six def canonicalize_monotonicity(monotonicity, allow_decreasing=True): """Converts string constants representing monotonicity into integers. Args: monotonicity: The monotonicities hyperparameter of a `tfl.layers` Layer (e.g. `tfl.layers.PWLCalibration`). allow_decreasing: If decreasing monotonicity is considered a valid monotonicity. Returns: monotonicity represented as -1, 0, 1, or None. Raises: ValueError: If monotonicity is not in the set {-1, 0, 1, 'decreasing', 'none', 'increasing'} and allow_decreasing is True. ValueError: If monotonicity is not in the set {0, 1, 'none', 'increasing'} and allow_decreasing is False. """ if monotonicity is None: return None if monotonicity in [-1, 0, 1]: if not allow_decreasing and monotonicity == -1: raise ValueError( "'monotonicities' must be from: [0, 1, 'none', 'increasing']. " "Given: {}".format(monotonicity)) return monotonicity elif isinstance(monotonicity, six.string_types): if monotonicity.lower() == "decreasing": if not allow_decreasing: raise ValueError( "'monotonicities' must be from: [0, 1, 'none', 'increasing']. " "Given: {}".format(monotonicity)) return -1 if monotonicity.lower() == "none": return 0 if monotonicity.lower() == "increasing": return 1 raise ValueError("'monotonicities' must be from: [-1, 0, 1, 'decreasing', " "'none', 'increasing']. Given: {}".format(monotonicity))
a9d0870d03f11d7bdff4c8f673cd78d072fa8478
14,692
def add_gdp(df, gdp, input_type="raw", drop=True): """Adds the `GDP` to the dataset. Assuming that both passed dataframes have a column named `country`. Parameters ---------- df : pd.DataFrame Training of test dataframe including the `country` column. gdp : pd.DataFrame Mapping between `country` and `GDP` input_type : {"raw", "aggregated"} Whether the operation should run on the raw, or the aggregated dataset. drop : bool Whether the old country columns should be droped. Returns ------- pd.DataFrame The passed `df` with a new column corresponding to the mapped GDP. """ def stringify(maybe_string): # Handles Unicode country names like "Côte d’Ivoire" , "Réunion" etc, as well as countries only existing # in one of the two dataframes. try: return str(maybe_string) except UnicodeEncodeError: return "Unknown" if input_type == "aggregated": country_cols = [col for col in df.columns if col.startswith("country") and col != "country"] def inverse_ohe(row): for c in country_cols: if row[c] == 1: return c.split("_")[1] df["country"] = df.apply(inverse_ohe, axis=1) if drop: df = df.drop(country_cols, axis=1) elif input_type != "raw": msg = "Only {} and {} are supported. \n" + \ "\tThe former assumes the original form where only the JSON has been flattened.\n" + \ "\tThe latter assumes that OHE has already occured on top." raise ValueError(msg) df["country"] = df["country"].fillna("Unknown").apply(stringify) result = df.merge(gdp, on="country", how='left') if drop: result.drop("country", axis=1, inplace=True) return result
72e2b5fe839f3dbc71ca2def4be442535a0adb84
14,693
from scipy.ndimage.filters import maximum_filter def no_background_patches(threshold=0.4, percentile=99.9): """Returns a patch filter to be used by :func:`create_patches` to determine for each image pair which patches are eligible for sampling. The purpose is to only sample patches from "interesting" regions of the raw image that actually contain a substantial amount of non-background signal. To that end, a maximum filter is applied to the target image to find the largest values in a region. Parameters ---------- threshold : float, optional Scalar threshold between 0 and 1 that will be multiplied with the (outlier-robust) maximum of the image (see `percentile` below) to denote a lower bound. Only patches with a maximum value above this lower bound are eligible to be sampled. percentile : float, optional Percentile value to denote the (outlier-robust) maximum of an image, i.e. should be close 100. Returns ------- function Function that takes an image pair `(y,x)` and the patch size as arguments and returns a binary mask of the same size as the image (to denote the locations eligible for sampling for :func:`create_patches`). At least one pixel of the binary mask must be ``True``, otherwise there are no patches to sample. Raises ------ ValueError Illegal arguments. """ (np.isscalar(percentile) and 0 <= percentile <= 100) or _raise(ValueError()) (np.isscalar(threshold) and 0 <= threshold <= 1) or _raise(ValueError()) def _filter(datas, patch_size, dtype=np.float32): image = datas[0] if dtype is not None: image = image.astype(dtype) # make max filter patch_size smaller to avoid only few non-bg pixel close to image border patch_size = [(p//2 if p>1 else p) for p in patch_size] filtered = maximum_filter(image, patch_size, mode='constant') return filtered > threshold * np.percentile(image,percentile) return _filter
b1ffd8b7bb2023c483da35565044b02f7fd96cd8
14,695
def start_thread(): """Start new thread with or without first comment.""" subject = request.form.get('subject') or '' comment = request.form.get('comment') or '' if not subject: return error('start_thread:subject') storage.start_thread(g.username, subject, comment) flash('New Thread Started: {0}'.format(subject), 'success') return to_threads()
a8fabcddac91cc5cc6d5a63382e1ba433f425c20
14,696
def get_package_data(name, package=None): """Retrieve metadata information for the given package name""" if not package: package = models.Package(name=name) releases = {} else: releases = package.get_all_releases() client = xmlrpclib.ServerProxy('http://pypi.python.org/pypi') versions = client.package_releases(package.name, True) # package_releases() method is case-sensitive, if nothing found # then we search for it # XXX: Ask pypi to make it case-insensitive? if not versions: for item in client.search({'name': name}): if name.lower() == item['name'].lower(): package.name = name = item['name'] break else: logger.info("No packages found matching %r", name) return # Retry retrieving the versions with the new/correct name versions = client.package_releases(package.name, True) # Save the package if it is new if not package.pk: package.save() for version in versions: release, files = releases.get(version, (None, {})) if not release: release = models.Release(package=package, version=version) release.save() data = client.release_data(package.name, release.version) release_form = forms.PypiReleaseDataForm(data, instance=release) if release_form.is_valid(): release_form.save() release_files = client.package_urls(package.name, release.version) for info in release_files: release_file = files.get(info['filename']) if not release_file: release_file = models.ReleaseFile( release=release, filename=info['filename']) release_file.python_version = info['python_version'] release_file.filetype = info['packagetype'] release_file.url = info['url'] release_file.size = info['size'] release_file.md5_digest = info['md5_digest'] release_file.save() package.update_timestamp = now() package.save() return package
98824594fdd245760387f912192037b2e024aadc
14,697
def feedforward( inputs, input_dim, hidden_dim, output_dim, num_hidden_layers, hidden_activation=None, output_activation=None): """ Creates a dense feedforward network with num_hidden_layers layers where each layer has hidden_dim number of units except for the last layer which has output_dim number of units. Arguments: inputs: Tensor input. hidden_dim: The number of units in each hidden layer. output_dim: The number of units in the output layer. num_hidden_layers: The number of hidden layers. hidden_activation: The activation function of hidden layers. Set it to None to use a linear activation. output_activation: The activation function of the output layer. Set it to None to use a linear activation. Returns: Output tensor. """ prev_input_dim = input_dim prev_output = inputs for i in range(0, num_hidden_layers): with tf.variable_scope("dense" + str(i)): w_n = tf.get_variable("w_" + str(i), [prev_input_dim, hidden_dim], initializer=tf.initializers.random_normal(0, 1)) b_n = tf.get_variable("b_" + str(i), [hidden_dim], initializer=tf.initializers.random_normal(0, 1)) prev_input_dim = hidden_dim prev_output = hidden_activation(tf.matmul(prev_output, w_n) + b_n) with tf.variable_scope("dense_output"): return tf.layers.dense(prev_output, output_dim, activation=output_activation)
bbf6559d27e68ff4642d8842af50ff0d292bd1c8
14,700
import doctest def _test(): """ >>> solve("axyb", "abyxb") axb """ global chr def chr(x): return x doctest.testmod()
1ba052fbf066cee92ad2088b9562443c727292df
14,701
from typing import Optional def _basic_rebuild_chain(target: database.Target) -> RebuildChain: """ Get a rebuild chain based purely on 'rebuild info' from Jam. """ chain: RebuildChain = [(target, None)] current: Optional[database.Target] = target assert current is not None while True: reason = current.rebuild_reason current = current.rebuild_reason_target if current is None: break else: chain.append((current, reason)) return chain
966864ac71eafb982c2dff0f74e383e207127b32
14,702
def ravel_group_params(parameters_group): """Take a dict(group -> {k->p}) and return a dict('group:k'-> p) """ return {f'{group_name}:{k}': p for group_name, group_params in parameters_group.items() for k, p in group_params.items()}
4a768e89cd70b39bea4f658600690dcb3992a710
14,703
def decode_orders(game, power_name, dest_unit_value, factors): """ Decode orders from computed factors :param game: An instance of `diplomacy.Game` :param power_name: The name of the power we are playing :param dest_unit_value: A dict with unit as key, and unit value as value :param factors: An instance of `Factors` :return: A list of orders :type factors: Factors :type game: diplomacy.Game """ phase_type = game.get_current_phase()[-1] # Movement phase if phase_type == 'M': return generate_movement_orders(game, power_name, dest_unit_value, factors) # Retreat Phaes if phase_type == 'R': return generate_retreat_orders(game, power_name, dest_unit_value) # Adjustment if phase_type == 'A': power = game.get_power(power_name) nb_builds = len(power.centers) - len(power.units) # Building if nb_builds >= 0: return generate_build_orders(game, power_name, dest_unit_value) # Disbanding return generate_disband_orders(game, power_name, dest_unit_value) # Otherwise, invalid phase_type LOGGER.error('Invalid phase type. Got %s. Expected M, R, A', phase_type) return []
ac1e9b59d792158bb0b903709344b8535b330e73
14,704
from typing import Type def _convert_to_type(se, allow_any=False, allow_implicit_tuple=False): """ Converts an S-Expression representing a type, like (Vec Float) or (Tuple Float (Vec Float)), into a Type object, e.g. Type.Tensor(1,Type.Float) or Type.Tuple(Type.Float, Type.Tensor(1,Type.Float)). If allow_implicit_tuple is true, also converts a list of types into a Tuple, e.g. (Float (Vec Float)) becomes Type.Tuple(Type.Float, Type.Tensor(1,Type.Float)), i.e. as if the S-Expression began with an extra "Tuple". """ while isinstance(se, list) and len(se)==1: se=se[0] # Discard ((pointless)) brackets if isinstance(se, sexpdata.Symbol): if se.value() == "Any" and allow_any: return None return Type(se.value()) if isinstance(se, list) and len(se)>0: if isinstance(se[0], sexpdata.Symbol): sym = se[0].value() if sym == "Tensor" and len(se) == 3: assert se[1] == 1, "Only 1D 'Tensor's ('Vec's) supported" return Type.Tensor(1, _convert_to_type(se[2])) children = [_convert_to_type(s) for s in se[1:]] if sym == "Vec" and len(se)==2: return Type.Tensor(1, utils.single_elem(children)) if sym == "Tuple": return Type.Tuple(*children) # Fall through in case it's a list of types with allow_implicit_tuple. if allow_implicit_tuple: return Type.Tuple(*[_convert_to_type(s) for s in se]) raise ValueError("Did not know how to parse type {}".format(se))
f615244363fa7fcdc67c4d68580860b3145bd94f
14,705
def index(): """Returns a 200, that's about it!!!!!!!""" return 'Wow!!!!!'
f6d8a765556d2d6a1c343bb0ab1a9d4a6c5fd6ba
14,706
def merge_tables(pulse_data, trial_data, merge_keys=TRIAL_GROUPER): """Add trial-wise information to the pulse-wise table.""" pulse_data = pulse_data.merge(trial_data, on=merge_keys) add_kernel_data(pulse_data) return pulse_data
1c5eafa44b50d05c8d23af7d290d0b40c2643ef9
14,708
def eos_deriv(beta, g): """ compute d E_os(beta)/d beta from polynomial expression""" x = np.tan(beta/2.0) y = g[4] + x * g[3] + x*x * g[2] + x*x*x*g[1] + x*x*x*x*g[0] y = y / ((1.0 + x*x)*(1.0 + x*x)*(1.0 + x*x)) return y
2e1055bc48364abfe5bb07a1d9eafd32fefb7031
14,709
def optimizeAngle(angle): """ Because any rotation can be expressed within 360 degrees of any given number, and since negative angles sometimes are one character longer than corresponding positive angle, we shorten the number to one in the range to [-90, 270[. """ # First, we put the new angle in the range ]-360, 360[. # The modulo operator yields results with the sign of the # divisor, so for negative dividends, we preserve the sign # of the angle. if angle < 0: angle %= -360 else: angle %= 360 # 720 degrees is unnecessary, as 360 covers all angles. # As "-x" is shorter than "35x" and "-xxx" one character # longer than positive angles <= 260, we constrain angle # range to [-90, 270[ (or, equally valid: ]-100, 260]). if angle >= 270: angle -= 360 elif angle < -90: angle += 360 return angle
8abcaba2542b59715ced1c0acec94194f6e357d7
14,710
def process_one_name(stove_name): """ Translates a single PokerStove-style name of holecards into an expanded list of pokertools-style names. For example: "AKs" -> ["Ac Kc", "Ad Kd", "Ah Kh", "As Ks"] "66" -> ["6c 6d", "6c 6h", "6c 6s", "6d 6h", "6d 6s", "6c 6d"] """ if len(stove_name) == 3: rank1, rank2, suit_mark = stove_name if suit_mark == "s": return [ "{}{} {}{}".format(rank1, suit, rank2, suit) for suit in SUITS ] elif suit_mark == "o": return [ "{}{} {}{}".format(rank1, suit1, rank2, suit2) for suit1, suit2 in SUIT_PERMUATIONS ] else: raise TokeniserError("incorrect suit_mark in stove_name: {}".format(stove_name)) else: rank1, rank2 = stove_name if rank1 == rank2: return [ "{}{} {}{}".format(rank1, suit1, rank2, suit2) for suit1, suit2 in SUIT_COMBINATIONS ] else: raise TokeniserError("rank1 != rank2 in stove_name: {}".format(stove_name))
5a824df9ae1a723c350b635a6b3096b795d4c58e
14,711
def job_dispatch(results, job_id, batches): """ Process the job batches one at a time When there is more than one batch to process, a chord is used to delay the execution of remaining batches. """ batch = batches.pop(0) info('dispatching job_id: {0}, batch: {1}, results: {2}'.format(job_id, batch, results)) tasks = [job_worker.subtask((job_id, task_num)) for task_num in batch] # when there are other batches to process, use a chord to delay the # execution of remaining tasks, otherwise, finish off with a TaskSet if batches: info('still have batches, chording {0}'.format(batches)) callback = job_dispatch.subtask((job_id, batches)) return chord(tasks)(callback) else: info('only batch, calling TaskSet') return TaskSet(tasks=tasks).apply_async()
d6107c11bf350aedc1103e0e182f2808041abb5b
14,712
import logging import sqlite3 def get_temperature(): """ Serves temperature data from the database, in a simple html format """ logger = logging.getLogger("logger") #sqlite handler sql_handler = SQLiteHandler() logger.addHandler(sql_handler) logger.setLevel(logging.INFO) con = sqlite3.connect(db) cur = con.cursor() cur.execute("select * from temperatures") rows = cur.fetchall() cur.close() logger.info("Temperatures data was requested.") return render_template("temp.html", rows=rows)
3f2400c823ff2bc11a2b1910ce6cc39d90614178
14,713
def command_result_processor_category_empty(command_category): """ Command result message processor if a command category is empty. Parameters ---------- command_category : ``CommandLineCommandCategory`` Respective command category. Returns ------- message : `str` """ command_full_name = ''.join(command_category._trace_back_name()) message_parts = [] message_parts.append('Command category: ') message_parts.append(repr(command_full_name.name)) message_parts.append(' has no direct command, neither sub commands registered.\n') return ''.join(message_parts)
10da547a922bfd538a4241976385210969bf752a
14,714
def _parse_path(**kw): """ Parse leaflet `Path` options. http://leafletjs.com/reference-1.2.0.html#path """ color = kw.pop('color', '#3388ff') return { 'stroke': kw.pop('stroke', True), 'color': color, 'weight': kw.pop('weight', 3), 'opacity': kw.pop('opacity', 1.0), 'lineCap': kw.pop('line_cap', 'round'), 'lineJoin': kw.pop('line_join', 'round'), 'dashArray': kw.pop('dash_array', None), 'dashOffset': kw.pop('dash_offset', None), 'fill': kw.pop('fill', False), 'fillColor': kw.pop('fill_color', color), 'fillOpacity': kw.pop('fill_opacity', 0.2), 'fillRule': kw.pop('fill_rule', 'evenodd'), 'bubblingMouseEvents': kw.pop('bubbling_mouse_events', True), }
02d3810ad69a1a0b8f16d61e661e246aea5c09cc
14,715
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0, fill_mode='nearest', cval=0., interpolation_order=1): """Performs a random rotation of a Numpy image tensor. # Arguments x: Input tensor. Must be 3D. rg: Rotation range, in degrees. row_axis: Index of axis for rows in the input tensor. col_axis: Index of axis for columns in the input tensor. channel_axis: Index of axis for channels in the input tensor. fill_mode: Points outside the boundaries of the input are filled according to the given mode (one of `{'constant', 'nearest', 'reflect', 'wrap'}`). cval: Value used for points outside the boundaries of the input if `mode='constant'`. interpolation_order int: order of spline interpolation. see `ndimage.interpolation.affine_transform` # Returns Rotated Numpy image tensor. """ theta = np.random.uniform(-rg, rg) x = apply_affine_transform(x, theta=theta, channel_axis=channel_axis, fill_mode=fill_mode, cval=cval, order=interpolation_order) return x
57f263f1bee9fb323205543cba9c14c9a86ba431
14,716
from typing import Optional import time from datetime import datetime def time_struct_2_datetime( time_struct: Optional[time.struct_time], ) -> Optional[datetime]: """Convert struct_time to datetime. Args: time_struct (Optional[time.struct_time]): A time struct to convert. Returns: Optional[datetime]: A converted value. """ return ( datetime.fromtimestamp(time.mktime(time_struct)) if time_struct is not None else None )
705b09428d218e8a47961e247b62b9dfd631a41f
14,719
def we_are_buying(account_from, account_to): """ Are we buying? (not buying == selling) """ buy = False sell = False for value in TRADING_ACCOUNTS: if (value.lower() in account_from): buy = True sell = False elif (value.lower() in account_to): buy = False sell = True return buy
a5748ad756f472e0e2c39b5dc5239265fbf3d1f4
14,722
import time def wait_for_compute_jobs(nevermined, account, jobs): """Monitor and wait for compute jobs to finish. Args: nevermined (:py:class:`nevermined_sdk_py.Nevermined`): A nevermined instance. account (:py:class:`contracts_lib_py.account.Account`): Account that published the compute jobs. jobs (:obj:`list` of :obj:`tuple`): A list of tuples with each tuple containing (service_agreement_id, compute_job_id). Returns: :obj:`list` of :obj:`str`: Returns a list of dids produced by the jobs Raises: ValueError: If any of the jobs fail """ failed = False dids = set() while True: finished = 0 for i, (sa_id, job_id) in enumerate(jobs): status = nevermined.assets.compute_status(sa_id, job_id, account) print(f"{job_id}: {status['status']}") if status["status"] == "Failed": failed = True if status["status"] == "Succeeded": finished += 1 dids.add(status["did"]) if failed: for i, (sa_id, job_id) in enumerate(jobs): logs = nevermined.assets.compute_logs(sa_id, job_id, account) for line in logs: print(f"[{line['podName']}]: {line['content']}") raise ValueError("Some jobs failed") if finished == len(jobs): break # move up 4 lines print("\u001B[4A") time.sleep(5) return list(dids)
98370b8d596f304630199578a360a639507ae3c3
14,725
def f1_score_loss(predicted_probs: tf.Tensor, labels: tf.Tensor) -> tf.Tensor: """ Computes a loss function based on F1 scores (harmonic mean of precision an recall). Args: predicted_probs: A [B, L] tensor of predicted probabilities labels: A [B, 1] tensor of expected labels Returns: A tensor of sample-wise losses """ # Apply a sharpened sigmoid function to approximate the threshold thresholded_predictions = predicted_probs - ONE_HALF level_predictions = 1.0 / (1.0 + tf.exp(BETA * thresholded_predictions)) # [B, L] # predictions = tf.reduce_prod(level_predictions, axis=-1, keepdims=True) # [B, 1] predictions = tf.exp(tf.reduce_sum(tf.log(level_predictions), axis=-1, keepdims=True)) # [B, 1] # Compute the (approximate) F1 score f1_score = 2 * tf.reduce_sum(predictions * labels) / (tf.reduce_sum(predictions) + tf.reduce_sum(labels)) return 1.0 - f1_score
df4f35516230a7c57b0c6b3e8b7e958feae900f8
14,726
def get_alarm_historys_logic(starttime, endtime, page, limit): """ GET 请求历史告警记录信息 :return: resp, status resp: json格式的响应数据 status: 响应码 """ data = {'alarm_total': 0, "alarms": []} status = '' message = '' resp = {"status": status, "data": data, "message": message} alarm_set = SfoAlarmLogMethod.group_by_alarm_device(page=int(page), limit=int(limit), starttime=starttime, endtime=endtime) if alarm_set: data['alarm_total'] = alarm_set.total for alarm in alarm_set.items: sfo_alarm_logs = SfoAlarmLogMethod.query_by_alarm_device(alarm.alarm_device, starttime, endtime) if len(sfo_alarm_logs) > 0: critical_len = filter(lambda x: x.alarm_level == 'critical', sfo_alarm_logs) warn_len = filter(lambda x: x.alarm_level == 'warning', sfo_alarm_logs) sfo_cluster_node = SfoClusterNodesMethod.query_host_by_host_name(alarm.hostname) alarm_info = {"alarm": sfo_alarm_logs[0], "total": len(sfo_alarm_logs), "warning_total": len(warn_len), "critical_total": len(critical_len)} if sfo_cluster_node and sfo_cluster_node.cluster_name: alarm_info.update({"cluster_name": sfo_cluster_node.cluster_name}) alarm_info.update({"ip": sfo_cluster_node.node_inet_ip}) data['alarms'].append(alarm_info) status = 200 message = 'OK' else: status = 404 message = 'Not Found Record' resp.update({"status": status, "data": data, "message": message}) return resp, status
bc273cf8e6d022374f92b7c3da86552a9dbbed2a
14,727
def showCities(): """ Shows all cities in the database """ if 'access_token' not in login_session: return redirect(url_for('showLogin')) cities = session.query(City).order_by(City.id) return render_template('cities.html', cities=cities)
558b2a8639f810cf105777ce89acc368e4441bbd
14,728
def symb_to_num(symbolic): """ Convert symbolic permission notation to numeric notation. """ if len(symbolic) == 9: group = (symbolic[:-6], symbolic[3:-3], symbolic[6:]) try: numeric = notation[group[0]] + notation[group[1]] + notation[group[2]] except: numeric = "Invalid Symbolic Representation!" else: numeric = "Symbolic input should be of lengh 9!" return numeric
c2c11697658322ad972e87ec1eb55d08eaa91e0e
14,729
def round_vector(v, fraction): """ ベクトルの各要素をそれぞれ round する Args: v (list[float, float, float]): Returns: list[float, float, float]: """ v = [round(x, fraction) for x in v] return v
47c10d23d9f2caa319f4f3fa97c85cf226752bab
14,730
def accept(model): """Return True if more than 20% of the validation data is being correctly classified. Used to avoid including nets which haven't learnt anything in the ensemble. """ accuracy = 0 for data, target in validation_data[:(500/100)]: if use_gpu: data, target = Variable(data.cuda(), volatile=True), Variable(target.cuda()) else: data, target = Variable(data, volatile=True), Variable(target) output = model(data) pred = output.data.max(1, keepdim=True)[1] accuracy += pred.eq(target.data.view_as(pred)).cpu().sum() if accuracy < 100: return False else: return True
c2921fb6dc0226b88fe7dd8219264cb5908feb6b
14,731
import struct def parse_tcp_packet(tcp_packet): """read tcp data.http only build on tcp, so we do not need to support other protocols.""" tcp_base_header_len = 20 # tcp header tcp_header = tcp_packet[0:tcp_base_header_len] source_port, dest_port, seq, ack_seq, t_f, flags = struct.unpack(b'!HHIIBB6x', tcp_header) # real tcp header len tcp_header_len = ((t_f >> 4) & 0xF) * 4 # skip extension headers if tcp_header_len > tcp_base_header_len: pass # body body = tcp_packet[tcp_header_len:] return source_port, dest_port, flags, seq, ack_seq, body
fa1b1050609cce8ca23ca5bac6276a681f560659
14,732
def find_balanced(text, start=0, start_sep='(', end_sep=')'): """ Finds balanced ``start_sep`` with ``end_sep`` assuming that ``start`` is pointing to ``start_sep`` in ``text``. """ if start >= len(text) or start_sep != text[start]: return start balanced = 1 pos = start + 1 while pos < len(text): token = text[pos] pos += 1 if token == end_sep: if balanced == 1: return pos balanced -= 1 elif token == start_sep: balanced += 1 return start
15c17a216405028b480efa9d12846905a1eb56d4
14,733
from datetime import datetime import requests import io import re def get_jhu_counts(): """ Get latest case count .csv from JHU. Return aggregated counts by country as Series. """ now = datetime.datetime.now().strftime("%m-%d-%Y") url = f"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/{now}.csv" req = requests.head(url) while req.status_code != 200: print("Got status " + str(req.status_code) + " for '" + url + "'") date = datetime.datetime.now() - datetime.timedelta(days=1) now = date.strftime("%m-%d-%Y") url = f"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/{now}.csv" req = requests.head(url) req = requests.get(url) jhu_df = pd.read_csv(io.StringIO(req.text)) print(f"Retrieved JHU case counts from {now}.") jhu_counts = jhu_df['Confirmed'].groupby( jhu_df['Country_Region']).sum().reset_index() jhu_counts['Country_Region'] = jhu_counts['Country_Region'].apply( lambda x: re.sub(r'[^a-zA-Z ]', '', x)) jhu_counts['Country_Region'] = jhu_counts['Country_Region'].apply( lambda x: _COUNTRY_MAP[x] if x in _COUNTRY_MAP.keys() else x) jhu_counts = jhu_counts.set_index('Country_Region') jhu_counts = pd.Series(jhu_counts.values.flatten(), index=jhu_counts.index) return jhu_counts
6a3fb69cce6f8976178afd3ff81ab2381b89abc5
14,734
def sectionsToMarkdown(root): """ Converts a list of Demisto JSON tables to markdown string of tables :type root: ``dict`` or ``list`` :param root: The JSON table - List of dictionaries with the same keys or a single dictionary (required) :return: A string representation of the markdown table :rtype: ``str`` """ mdResult = '' if isinstance(root, dict): for section in root: data = root[section] if isinstance(data, dict): data = [data] data = [{k: formatCell(row[k]) for k in row} for row in data] mdResult += tblToMd(section, data) return mdResult
3f916544cc5a9dc7e4d094d82834382d377948f1
14,735
import numpy def VonMisesFisher_sample(phi0, theta0, sigma0, size=None): """ Draw a sample from the Von-Mises Fisher distribution. Parameters ---------- phi0, theta0 : float or array-like Spherical-polar coordinates of the center of the distribution. sigma0 : float Width of the distribution. size : int, tuple, array-like number of samples to draw. Returns ------- phi, theta : float or array_like Spherical-polar coordinates of sample from distribution. """ n0 = cartesian_from_polar(phi0, theta0) M = rotation_matrix([0, 0, 1], n0) x = numpy.random.uniform(size=size) phi = numpy.random.uniform(size=size) * 2*numpy.pi theta = numpy.arccos(1 + sigma0**2 * numpy.log(1 + (numpy.exp(-2/sigma0**2)-1) * x)) n = cartesian_from_polar(phi, theta) x = M.dot(n) phi, theta = polar_from_cartesian(x) return phi, theta
440029bb9c3455dce22ff2d078068f9b7c404a7b
14,736
from typing import Optional from typing import Dict from typing import Any from unittest.mock import patch async def async_init_flow( hass: HomeAssistantType, handler: str = DOMAIN, context: Optional[Dict] = None, data: Any = None, ) -> Any: """Set up mock Roku integration flow.""" with patch( "homeassistant.components.roku.config_flow.Roku.device_info", new=MockDeviceInfo, ): return await hass.config_entries.flow.async_init( handler=handler, context=context, data=data )
2147f18b6b26e57e84d21aff321e8464710de653
14,737
import inspect def create_cell(cell_classname, cell_params): """ Creates RNN cell. Args: cell_classname: The name of the cell class, e.g. "LSTMCell", "GRUCell" and so on. cell_params: A dictionary of parameters to pass to the cell constructor. Returns: A `tf.contrib.rnn.RNNCell` object. """ cell_params = cell_params.copy() # Find the cell class, use the in-house implemented LSTMCell & GRUCell cell_class = eval(cell_classname) # find from all CELL NAMES imported from tf.contrib.rnn # Make sure additional arguments are valid cell_args = set(inspect.getargspec(cell_class.__init__).args[1:]) new_cell_params = {} for key in cell_params.keys(): if key not in cell_args: # raise ValueError( tf.logging.info( """{} is not a valid argument for {} class. Available arguments are: {}""".format(key, cell_class.__name__, cell_args)) else: new_cell_params[key] = cell_params[key] # Create cell return cell_class(**new_cell_params)
64eed878f950499b599f992dbb50f2f05e8fbff9
14,739
def get_myia_tag(rtag): """Return the myia tag for a constructor. This will fail if you haven't properly called fill_reverse_tag_map(). """ return rev_tag_map[rtag]
95e7afb73ce15bbfe7a75c4708f5c81a9c9e22df
14,740
def get_priority(gene, phenotype): """ Get matched priority from the phenotype table. Parameters ---------- gene : str Gene name. phenotype : str Phenotype name. Returns ------- str EHR priority. Examples -------- >>> import pypgx >>> pypgx.get_priority('CYP2D6', 'Normal Metabolizer') 'Normal/Routine/Low Risk' >>> pypgx.get_priority('CYP2D6', 'Ultrarapid Metabolizer') 'Abnormal/Priority/High Risk' >>> pypgx.get_priority('CYP3A5', 'Normal Metabolizer') 'Abnormal/Priority/High Risk' >>> pypgx.get_priority('CYP3A5', 'Poor Metabolizer') 'Normal/Routine/Low Risk' """ if not is_target_gene(gene): raise NotTargetGeneError(gene) if phenotype not in list_phenotypes(): raise PhenotypeNotFoundError(phenotype) df = load_phenotype_table() i = (df.Gene == gene) & (df.Phenotype == phenotype) return df[i].Priority.values[0]
5520d8df0b79834227f059e98d66109134e84439
14,741
import tqdm from datetime import datetime def _generator3(path): """ Args: path: path of the dataframe Returns: yield outputs of X and Y pairs """ args = init_args() catalog = load_catalog(path) def preprocess(x, y=None): zero = False if not np.any(x): zero = True img = (x - avg_x) / std_x return img, y, zero for index in tqdm(range(0, len(catalog), 200)): rows = catalog[index:index + 200] for idx, row in rows.iterrows(): # print(row) # pdb.set_trace() if row.ncdf_path == "nan": continue samples = load_numpy(row['hdf5_8bit_path']) offset_idx = row['hdf5_8bit_offset'] # continue timedelta_rows = [catalog[catalog.index == ( idx + datetime.timedelta(hours=i))] for i in [0, 1, 3, 6]] # CS_GHIs = [catalog[catalog.index==(idx+datetime.timedelta(hours=i))][station_i + "_CLEARSKY_GHI"].values[0] for i in [0,1,3,6]] for station_i in args.station_data.keys(): sample = samples[station_i] if row[[station_i + "_GHI"]].isnull()[0]: continue elif row[[station_i + "_DAYTIME"]][0] == 0: continue else: GHI_0 = row[station_i + "_GHI"] # train_df[train_df.index == train_df.index[0]+datetime.timedelta(hours=1)] # pdb.set_trace() GHIs = [i[station_i + "_GHI"].values[0] for i in timedelta_rows] CS_GHIs = [i[station_i + "_CLEARSKY_GHI"].values[0] for i in timedelta_rows] y = np.array(CS_GHIs) - np.array(GHIs) if np.isnan(np.sum(y)): continue # ini = time.time() # print(station_coords) imgs = [] x = sample[offset_idx].swapaxes(0, 1).swapaxes(1, 2) # print(y) x = preprocess(x)[0] continue yield x, y
3d26d9cab1777b3b72d85584c6ff95b39c725e47
14,742
def _extract_gsi(name): """ Extract a normalised groundstation if available. :param name: :rtype: str >>> _extract_gsi('LANDSAT-7.76773.S3A1C2D2R2') >>> _extract_gsi('AQUA.60724.S1A1C2D2R2') >>> _extract_gsi('TERRA.73100.S1A2C2D4R4') >>> _extract_gsi('LANDSAT-8.3108') >>> _extract_gsi('NPP.VIIRS.10014.ALICE') 'ASA' >>> _extract_gsi('NPP_VIRS_STD-HDF5_P00_18966.ASA_0_0_20150626T053709Z20150626T055046') 'ASA' >>> _extract_gsi('not_an_ads_dir') >>> _extract_gsi('LANDSAT-8.FAKE') """ last_component = name.split('.')[-1] if '_' in last_component: last_component = last_component.split('_')[0] if not metadata.is_groundstation_alias(last_component): return None return metadata.normalise_gsi(last_component)
b101b79df21b9d0bbb633dbca14ff6a5b207b91d
14,743
def array_at_verts_basic2d(a): """ Computes values at cell vertices on 2d array using neighbor averaging. Parameters ---------- a : ndarray Array values at cell centers, could be a slice in any orientation. Returns ------- averts : ndarray Array values at cell vertices, shape (a.shape[0]+1, a.shape[1]+1). """ assert a.ndim == 2 shape_verts2d = (a.shape[0] + 1, a.shape[1] + 1) # create a 3D array of size (nrow+1, ncol+1, 4) averts3d = np.full(shape_verts2d + (4,), np.nan) averts3d[:-1, :-1, 0] = a averts3d[:-1, 1:, 1] = a averts3d[1:, :-1, 2] = a averts3d[1:, 1:, 3] = a # calculate the mean over the last axis, ignoring NaNs averts = np.nanmean(averts3d, axis=2) return averts
e1f9ab5abbed6d4837daec01b8cd865d15cddde6
14,744
def get_subquestion_answer(response, questions, subquestion): """ Return the answer to a subquestion from ``response``. """ question_id = subquestion[0] answers = response[question_id] dim = len(subquestion) - 1 for answer in answers: matched = True if subquestion[1] != answer[0]: matched = False if dim == 2 and subquestion[2] != answer[1]: matched = False if matched: if dim == 1: answer = answer[1] else: answer = answer[2] return map_answer_expr(questions, question_id, answer)
e0b89db06570e35d1fb9eba7b762ed96bf7c16b8
14,745
def uniform_centroids(dist_map, n_centroids): """ Uniformly space `n_centroids` seeds in a naive way :param dist_map: sparse distance map :param n_centroids: number of seeds to place :return: (n_centroids, ) integer arrays with the indices of the seeds """ def get_dist(idx_vertex): return csgraph.dijkstra(dist_map, indices=idx_vertex, directed=False) res = np.zeros(n_centroids, dtype='i4') res[0] = np.random.randint(0, dist_map.shape[0]) dist = get_dist(res[0]) for idx in range(1, n_centroids): res[idx] = np.argmax(dist) np.minimum(dist, get_dist(res[idx]), out=dist) return res
437eaf8b70b56379d5529ea30026176fda9049a9
14,746
import collections import itertools def collate_custom(batch,key=None): """ Custom collate function for the Dataset class * It doesn't convert numpy arrays to stacked-tensors, but rather combines them in a list * This is useful for processing annotations of different sizes """ # this case will occur in first pass, and will convert a # list of dictionaries (returned by the threads by sampling dataset[idx]) # to a unified dictionary of collated values if isinstance(batch[0], collections.Mapping): return {key: collate_custom([d[key] for d in batch],key) for key in batch[0]} # these cases will occur in recursion #elif torch.is_tensor(batch[0]): # for tensors, use standrard collating function #return default_collate(batch) elif isinstance(batch,list) and isinstance(batch[0],list): # flatten lists of lists flattened_list = list(itertools.chain(*batch)) return flattened_list elif isinstance(batch,list) and len(batch)==1: # lists of length 1, remove list wrap return batch[0] else: # for other types (i.e. lists of len!=1), return as is return batch
b692252cb27aed68cb5af6cd5644913216a8dde7
14,747
def get_articles(language, no_words, max_no_articles, search, **kwargs): """ Retrieve articles from Wikipedia """ wikipedia.set_rate_limiting(True) # be polite wikipedia.set_lang(language) if search is not None: titles = wikipedia.search(search, results = max_no_articles) else: titles = wikipedia.random(pages = max_no_articles) articles = [] current_no_words = 0 for title in titles: print("INFO: loading {}".format(title)) page = wikipedia.page(title=title) content = page.content article_no_words = len(content.split()) current_no_words += article_no_words print("INFO: article contains {} words".format(article_no_words)) articles.append((title, content)) if current_no_words >= no_words: break return articles
d6f2216a0800f6d9627d47ae1acda9e327583841
14,749
def gen_urdf_material(color_rgba): """ :param color_rgba: Four element sequence (0 to 1) encoding an rgba colour tuple, ``seq(float)`` :returns: urdf element sequence for an anonymous material definition containing just a color element, ``str`` """ return '<material name=""><color rgba="{0} {1} {2} {3}"/></material>'.format(*color_rgba)
d0fe1a706c932ad1a6f14aa3a9d9471de70650b9
14,750
def roll_timeseries(arr, timezones): """ Roll timeseries from UTC to local time. Automatically compute time-shift from UTC offset (timezone) and time-series length. Parameters ---------- arr : ndarray Input timeseries array of form (time, sites) timezones : ndarray | list Vector of timezone shifts from UTC to local time Returns ------- local_arr : ndarray Array shifted to local time """ if arr.shape[1] != len(timezones): msg = ('Number of timezone shifts ({}) does not match number of ' 'sites ({})'.format(len(timezones), arr.shape[1])) raise ValueError(msg) time_step = arr.shape[0] // 8760 local_arr = np.zeros(arr.shape, dtype=arr.dtype) for tz in set(timezones): mask = timezones == tz local_arr[:, mask] = np.roll(arr[:, mask], int(tz * time_step), axis=0) return local_arr
4715425ea048a1ccb9c5fe2a1dc9e2ea1ecea085
14,752
def is_linear(a, eps=1e-3): """Check if array of numbers is approximately linear.""" x = np.diff(a[1:-1]).std() / np.diff(a[1:-1]).mean() return x < eps
0efa5c923012527d4973d24d67871a41ee2e3e91
14,753
def faces_sphere(src, show_path): """ Compute vertices and faces of Sphere input for plotting. Parameters ---------- - src (source object) - show_path (bool or int) Returns ------- vert, faces (returns all faces when show_path=int) """ # pylint: disable=protected-access res = 15 # surface discretization # generate sphere faces r = src.diameter / 2 phis = np.linspace(0, 2 * np.pi, res) phis2 = np.roll(np.linspace(0, 2 * np.pi, res), 1) ths = np.linspace(0, np.pi, res) faces = [ r * np.array( [ (np.cos(p) * np.sin(t1), np.sin(p) * np.sin(t1), np.cos(t1)), (np.cos(p) * np.sin(t2), np.sin(p) * np.sin(t2), np.cos(t2)), (np.cos(p2) * np.sin(t2), np.sin(p2) * np.sin(t2), np.cos(t2)), (np.cos(p2) * np.sin(t1), np.sin(p2) * np.sin(t1), np.cos(t1)), ] ) for p, p2 in zip(phis, phis2) for t1, t2 in zip(ths[1:-2], ths[2:-1]) ] faces += [ r * np.array( [(np.cos(p) * np.sin(th), np.sin(p) * np.sin(th), np.cos(th)) for p in phis] ) for th in [ths[1], ths[-2]] ] # add src attributes position and orientation depending on show_path rots, poss, _ = get_rot_pos_from_path(src, show_path) # all faces (incl. along path) adding pos and rot all_faces = [] for rot, pos in zip(rots, poss): for face in faces: all_faces += [[rot.apply(f) + pos for f in face]] return all_faces
eb454e55a932aae2f6b0f15587d1aa0be6da80f7
14,754
import itertools def pronto_signals_to_iguana_signals(carrier_frequency, signals): """Convert the pronto format into iguana format, where the pulses and spaces are represented in number of microseconds. """ return [carrier_cycles_to_microseconds(carrier_frequency, signal) | command for signal, command in zip(signals, itertools.cycle((iguanaIR.IG_PULSE_BIT, 0)))]
b8ddaf9f573abfe207d2ca2009904a3d93e360a4
14,755
import collections import itertools import pandas def stack_xarray_repdim(da, **dims): """Like xarrays stack, but with partial support for repeated dimensions The xarray.DataArray.stack method fails when any dimension occurs multiple times, as repeated dimensions are not currently very well supported in xarray (2018-03-26). This method provides a workaround so that stack can be used for an array where some dimensions are repeated, as long as the repeated dimensions are themselves not stacked. Parameters: da (DataArray): DataArray to operate on. **dims: Dimensions to stack. As for xarray.DataArray.stack. """ # make view of da without repeated dimensions cnt = collections.Counter(da.dims) D = {k: itertools.count() for k in cnt.keys()} tmpdims = [] dimmap = {} for dim in da.dims: if cnt[dim] == 1: tmpdims.append(dim) else: newdim = "{:s}{:d}".format(dim, next(D[dim])) tmpdims.append(newdim) dimmap[newdim] = dim da2 = xarray.DataArray(da.values, dims=tmpdims) da2_stacked = da2.stack(**dims) # put back repeated dimensions with new coordinates da3 = xarray.DataArray(da2_stacked.values, dims=[dimmap.get(d, d) for d in da2_stacked.dims]) da3 = da3.assign_coords( **{k: pandas.MultiIndex.from_product( [da.coords[kk] for kk in dims[k]], names=dims[k]) if k in dims else da.coords[k] for k in np.unique(da3.dims)}) return da3
5f0617ccd054c6d11573b00f659308780db4d0d7
14,756
import math def compute_pnorm(model: nn.Module) -> float: """ Computes the norm of the parameters of a model. :param model: A PyTorch model. :return: The norm of the parameters of the model. """ return math.sqrt(sum([p.norm().item() ** 2 for p in model.parameters()]))
610c640902f411221f90c5c7b48d3b3246a60124
14,757
def atomic_brute_cast(tree: Element) -> Element: """ Cast every node's text into an atomic string to prevent further processing on it. Since we generate the final HTML with Jinja templates, we do not want other inline or tree processors to keep modifying the data, so this function is used to mark the complete tree as "do not touch". Reference: issue [Python-Markdown/markdown#920](https://github.com/Python-Markdown/markdown/issues/920). On a side note: isn't `atomic_brute_cast` such a beautiful function name? Arguments: tree: An XML node, used like the root of an XML tree. Returns: The same node, recursively modified by side-effect. You can skip re-assigning the return value. """ if tree.text: tree.text = AtomicString(tree.text) for child in tree: atomic_brute_cast(child) return tree
57d13b5e97b7f94593f925f745bdf833b15e03a1
14,758
def rsa_keys(p: int = None, q: int = None, e: int = 3) -> RSA_Keys: """ Generate a new set of RSA keys. If p and q are not provided (<= 1), then they will be generated. :param p: A big prime. :param q: A big prime. :param e: The default public key. :return: The RSA private and public keys. :raise Exception: If provided p and q are invalid. """ if not p or p <= 1: p = matasano.math.random_big_prime(e=e) if not q or q <= 1: q = matasano.math.random_big_prime(e=e) n = p * q phi_n = (p - 1) * (q - 1) d = matasano.math.modinv(e, phi_n) return RSA_Keys(RSA_Priv(d, n), RSA_Pub(e, n))
b09fea8b6c23e4709c0f49faf2cb9b20463a2db9
14,759
def _get_closest_station_by_zcta_ranked(zcta): """ Selects the nth ranked station from a list of ranked stations Parameters ---------- zcta : string ZIP Code Tabulation Area (ZCTA) Returns ------- station : string Station that was found warnings : list List of warnings for the returned station (includes distance warnings) lat : float latitude for the search lon : float longitude for the search """ zcta = zcta.zfill(5) # Ensure that we have 5 characters, and if not left-pad it with zeroes. lat, lon = zcta_to_lat_long(zcta) finding_station = True rank = 0 while finding_station: rank = rank + 1 station_ranking = _rank_stations_by_distance_and_quality(lat, lon) station, warnings = select_station(station_ranking, rank=rank) # Ignore stations that begin with A if str(station)[0] != 'A': finding_station = False return station, warnings, lat, lon
b9cbd7ccc4a22c3069e11bc0542700b8ee087a1c
14,761
def label_matrix(y_true, y_pred, classes, normalize=False, title=None, cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if not title: if normalize: title = 'Normalized Confusion Matrix' else: title = 'Confusion Matrix, without normalization' # Compute confusion matrix cm = confusion_matrix(y_true, y_pred) # Only use the labels that appear in the data classes = classes[unique_labels(y_true, y_pred)] if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized Confusion Matrix") else: print('Confusion Matrix, without normalization') print(cm) fig, ax = plt.subplots() im = ax.imshow(cm, interpolation='nearest', cmap=cmap) ax.figure.colorbar(im, ax=ax) # We want to show all ticks... ax.set(xticks=np.arange(cm.shape[1]), yticks=np.arange(cm.shape[0]), # ... and label them with the respective list entries xticklabels=classes, yticklabels=classes, title=title, ylabel='True label', xlabel='Predicted label') # Rotate the tick labels and set their alignment. plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") # Loop over data dimensions and create text annotations. fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i in range(cm.shape[0]): for j in range(cm.shape[1]): ax.text(j, i, format(cm[i, j], fmt), ha="center", va="center", color="white" if cm[i, j] > thresh else "black") fig.tight_layout() return ax
0a1dc4665de4c2b876a0a40d5aa1fcfb1a9113d9
14,762
import warnings def lowpass(data,in_t=None,cutoff=None,order=4,dt=None,axis=-1,causal=False): """ data: vector of data in_t: sample times cutoff: cutoff period in the same units as in_t returns vector same as data, but with high frequencies removed """ # Step 1: Determine dt from data or from user if specified if dt is None: dt=np.median(np.diff(in_t)) dt=float(dt) # make sure it's not an int cutoff=float(cutoff) Wn = dt / cutoff B,A = butter(order, Wn) if not causal: # scipy filtfilt triggers some warning message about tuple # indices. with warnings.catch_warnings(): warnings.simplefilter("ignore") data_filtered = filtfilt(B,A,data,axis=axis) else: data_filtered = lfilter(B,A,data,axis=axis) return data_filtered
f182fdb912be827d0d8e4fd788cc2cadca453b5a
14,763
def gather_along_dim_with_dim_single(x, target_dim, source_dim, indices): """ This function indexes out a target dimension of a tensor in a structured way, by allowing a different value to be selected for each member of a flat index tensor (@indices) corresponding to a source dimension. This can be interpreted as moving along the source dimension, using the corresponding index value in @indices to select values for all other dimensions outside of the source and target dimensions. A common use case is to gather values in target dimension 1 for each batch member (target dimension 0). Args: x (torch.Tensor): tensor to gather values for target_dim (int): dimension to gather values along source_dim (int): dimension to hold constant and use for gathering values from the other dimensions indices (torch.Tensor): flat index tensor with same shape as tensor @x along @source_dim Returns: y (torch.Tensor): gathered tensor, with dimension @target_dim indexed out """ assert len(indices.shape) == 1 assert x.shape[source_dim] == indices.shape[0] # unsqueeze in all dimensions except the source dimension new_shape = [1] * x.ndimension() new_shape[source_dim] = -1 indices = indices.reshape(*new_shape) # repeat in all dimensions - but preserve shape of source dimension, # and make sure target_dimension has singleton dimension expand_shape = list(x.shape) expand_shape[source_dim] = -1 expand_shape[target_dim] = 1 indices = indices.expand(*expand_shape) out = x.gather(dim=target_dim, index=indices) return out.squeeze(target_dim)
06fbba5478ddb21cda9a555c41c94c809244537c
14,764
def get_queue(launcher=None): """Get the name of the queue used in an allocation. :param launcher: Name of the WLM to use to collect allocation info. If no launcher is provided ``detect_launcher`` is used to select a launcher. :type launcher: str | None :returns: Name of the queue :rtype: str :raises SSUnsupportedError: User attempted to use an unsupported WLM """ if launcher is None: launcher = detect_launcher() if launcher == "pbs": return _pbs.get_queue() if launcher == "slurm": return _slurm.get_queue() raise SSUnsupportedError(f"SmartSim cannot get queue for launcher `{launcher}`")
56fd4e59877363fd6e889bae52a9b5abf77230f6
14,766
def get_openmc_geometry(openmoc_geometry): """Return an OpenMC geometry corresponding to an OpenMOC geometry. Parameters ---------- openmoc_geometry : openmoc.Geometry OpenMOC geometry Returns ------- openmc_geometry : openmc.Geometry Equivalent OpenMC geometry """ cv.check_type('openmoc_geometry', openmoc_geometry, openmoc.Geometry) # Clear dictionaries and auto-generated ID OPENMC_SURFACES.clear() OPENMOC_SURFACES.clear() OPENMC_CELLS.clear() OPENMOC_CELLS.clear() OPENMC_UNIVERSES.clear() OPENMOC_UNIVERSES.clear() OPENMC_LATTICES.clear() OPENMOC_LATTICES.clear() openmoc_root_universe = openmoc_geometry.getRootUniverse() openmc_root_universe = get_openmc_universe(openmoc_root_universe) openmc_geometry = openmc.Geometry() openmc_geometry.root_universe = openmc_root_universe return openmc_geometry
af1eb3cbbcdb4122b28b544bc252f754758ababf
14,767
def distinct(xs): """Get the list of distinct values with preserving order.""" # don't use collections.OrderedDict because we do support Python 2.6 seen = set() return [x for x in xs if x not in seen and not seen.add(x)]
e5dafd942c8aa0314b7e9aa2ec09795796cac34a
14,768
import copy def _parse_train_configs(train_config): """ check if user's train configs are valid. Args: train_config(dict): user's train config. Return: configs(dict): final configs will be used. """ configs = copy.deepcopy(_train_config_default) configs.update(train_config) assert isinstance(configs['num_epoch'], int), \ "'num_epoch' must be int value" assert isinstance(configs['max_iter'], int), \ "'max_iter' must be int value" assert isinstance(configs['save_iter_step'], int), \ "'save_iter_step' must be int value" assert isinstance(configs['learning_rate'], float), \ "'learning_rate' must be float" assert isinstance(configs['weight_decay'], float), \ "'weight_decay' must be float" assert isinstance(configs['use_pact'], bool), \ "'use_pact' must be bool" assert isinstance(configs['quant_model_ckpt_path'], str), \ "'quant_model_ckpt_path' must be str" assert isinstance(configs['teacher_model_path_prefix'], str), \ "'teacher_model_path_prefix' must both be string" assert isinstance(configs['model_path_prefix'], str), \ "'model_path_prefix' must both be str" assert isinstance(configs['distill_node_pair'], list), \ "'distill_node_pair' must both be list" assert len(configs['distill_node_pair']) > 0, \ "'distill_node_pair' not configured with distillation nodes" assert len(configs['distill_node_pair']) % 2 == 0, \ "'distill_node_pair' distillation nodes need to be configured in pairs" return train_config
339539eac9a0463f4fd11d471cfa3f4971010969
14,770
def as_region(region): """ Convert string to :class:`~GenomicRegion`. This function attempts to convert any string passed to it to a :class:`~GenomicRegion`. Strings are expected to be of the form <chromosome>[:<start>-<end>[:[strand]], e.g. chr1:1-1000, 2:2mb-5mb:-, chrX:1.5kb-3mb, ... Numbers can be abbreviated as '12k', '1.5Mb', etc. When fed a :class:`~GenomicRegion`, it will simply be returned, making the use of this function as an "if-necessary" converter possible. :param region: str or :class:`~GenomicRegion` :return: :class:`~GenomicRegion` """ if isinstance(region, string_types): return GenomicRegion.from_string(region) elif isinstance(region, GenomicRegion): return region raise ValueError("region parameter cannot be converted to GenomicRegion!")
863b1f982e9b411a023ab876661123b5565fae91
14,771