content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def parse_log(log_file): """ Parses a log file into a list of lists containing the messages logged :param log_file: path-like: Path to the log file :return: list of lists containing messages in the log file """ parsed_logs = [[] for i in range(5)] with open(log_file, 'r') as f: for line in f.readlines(): parts = line.split(':') for i in range(0, len(parts)): parts[i] = parts[i].strip() if parts[0] == LogLevel.ERROR: parsed_logs[0].append(":".join(parts[1:])) elif parts[0] == LogLevel.WARNING: parsed_logs[1].append(":".join(parts[1:])) elif parts[0] == LogLevel.INFO: parsed_logs[2].append(":".join(parts[1:])) elif parts[0] == LogLevel.STARTUP: parsed_logs[3].append(":".join(parts[1:])) else: parsed_logs[3].append(line) return parsed_logs
065618c66470a8c538cbe9346ba66949819672b9
6,862
def generate_experiment_fn(train_files, eval_files, num_epochs=None, train_batch_size=40, eval_batch_size=40, embedding_size=8, first_layer_size=100, num_layers=4, scale_factor=0.7, **experiment_args): """Create an experiment function given hyperparameters. See command line help text for description of args. Returns: A function (output_dir) -> Experiment where output_dir is a string representing the location of summaries, checkpoints, and exports. this function is used by learn_runner to create an Experiment which executes model code provided in the form of an Estimator and input functions. All listed arguments in the outer function are used to create an Estimator, and input functions (training, evaluation, serving). Unlisted args are passed through to Experiment. """ # Check verbose logging flag verbose_logging = experiment_args.pop('verbose_logging') model.set_verbose_logging(verbose_logging) def _experiment_fn(output_dir): # num_epochs can control duration if train_steps isn't # passed to Experiment train_input = model.generate_input_fn( train_files, num_epochs=num_epochs, batch_size=train_batch_size, ) # Don't shuffle evaluation data eval_input = model.generate_input_fn( eval_files, batch_size=eval_batch_size, shuffle=False ) return tf.contrib.learn.Experiment( model.build_estimator( output_dir, embedding_size=embedding_size, # Construct layers sizes with exponetial decay hidden_units=[ max(2, int(first_layer_size * scale_factor**i)) for i in range(num_layers) ] ), train_input_fn=train_input, eval_input_fn=eval_input, # export strategies control the prediction graph structure # of exported binaries. export_strategies=[saved_model_export_utils.make_export_strategy( model.serving_input_fn, default_output_alternative_key=None, exports_to_keep=1 )], **experiment_args ) return _experiment_fn
a7af08f955d1d93c1c9735b08b5e18b2fd9e405a
6,863
def get_inclination_and_azimuth_from_locations(self, locations): """ self must to point to Main_InputWindow """ """ Return "Inc" and "Azi" array objects in reference units. """ Inc = [] Azi = [] for MD in locations: tangentVector = get_ASCT_from_MD(self, MD) verticalVector = np.array([0.0,0.0,1.0,0.0]) if np.allclose( tangentVector, verticalVector, atol=1e-2, rtol=0.0 ): tangentVector = verticalVector inc = np.arccos( tangentVector[2] ) if inc==0.0: azi = 0.0 else: sinazi = tangentVector[0]/np.sin(inc) cosazi = tangentVector[1]/np.sin(inc) if sinazi>=0: azi = np.arccos( cosazi ) elif sinazi<0: azi = 2*np.pi-np.arccos( cosazi ) Inc.append(inc) Azi.append(azi) return np.array(Inc), np.array(Azi)
2761137c670d3ad90c40d0689db062baf743d7a5
6,864
def _ensure_package(base, *parts): """Ensure that all the components of a module directory path exist, and contain a file __init__.py.""" bits = [] for bit in parts[:-1]: bits.append(bit) base.ensure(*(bits + ['__init__.py'])) return base.ensure(*parts)
fc9bb95445cc1b0e8ec819dfafdaff7d5afbf372
6,865
def make_cat_matrix(n_rows: int, n_cats: int) -> tm.CategoricalMatrix: """Make categorical matrix for benchmarks.""" mat = tm.CategoricalMatrix(np.random.choice(np.arange(n_cats, dtype=int), n_rows)) return mat
5c1f314a9582685d6c6da0f9ac0ee58fe9046952
6,866
def add_stabilizer_nodes(boundaries_raw, electrodes, nr_nodes_between): """ Segmentation of nodes: we have the existing nodes N.F is the ratio of required nodes and existing nodes first, add N nodes to each segment then, add one more node to the F first segments * assume ordered boundaries """ boundaries = [] boundaries = boundaries_raw # find first electrode in boundary for nr in range(electrodes.shape[0] - 1): index0 = np.where( (boundaries[:, 0] == electrodes[nr, 0]) & (boundaries[:, 1] == electrodes[nr, 1]) )[0] index1 = np.where( (boundaries[:, 0] == electrodes[nr + 1, 0]) & (boundaries[:, 1] == electrodes[nr + 1, 1]) )[0] index0 = index0[0] index1 = index1[0] if index1 - index0 < 0: index0, index1 = index1, index0 running_index = index0 nr_nodes = index1 - index0 - 1 while nr_nodes < nr_nodes_between: # determine line equation xy0 = boundaries[running_index, 0:2] xy1 = boundaries[running_index + 1, 0:2] direction = xy1 - xy0 heading = direction / np.sqrt(np.sum(direction ** 2)) # new node xy_new = xy0 + heading * direction / 2.0 a = boundaries[running_index, 2][np.newaxis] xyb = np.hstack((xy_new, a)) boundaries = np.insert(boundaries, running_index + 1, xyb, axis=0) # 2, because we have to count the new one running_index += 2 index1 += 1 nr_nodes += 1 if running_index == index1: running_index = index0 return boundaries
fe8ff9618ee34cb9caedd828a880af05a1c964f0
6,867
def read_data(creds): """Read court tracking data in and drop duplicate case numbers""" # try: df = gsheet.read_data(gsheet.open_sheet(gsheet.init_sheets(creds),"01_Community_lawyer_test_out_final","Frontend")) # df.drop_duplicates("Case Number",inplace=True) #Do we want to drop duplicates??? return df
95bb588305c230c2f3aaa306e367da2602788f67
6,868
def _build_indie_lyrics( root: str, num_workers: int = 8, max_size: int = 200000 ) -> DocumentArray: """ Builds the indie lyrics dataset. Download the CSV files from: https://www.kaggle.com/datasets/neisse/scrapped-lyrics-from-6-genres :param root: the dataset root folder. :param num_workers: the number of parallel workers to use. :param max_size: used to randomly subsample from dataset if greater than 0 :return: DocumentArray """ return _build_lyrics( genre='Indie', root=root.replace('indie-lyrics', 'lyrics'), num_workers=num_workers, max_size=max_size, )
9eaaf9742c587a649d036e5a7da30dc5ca37db79
6,869
def getHostname(request): """ Utility method for getting hostname of client. """ if request.getClientIP() in LOOPBACK_ADDRESSES and has_headers(request, X_FORWARDED_FOR): # nginx typically returns ip addresses addr = get_headers(request, X_FORWARDED_FOR) if isIPAddress(addr): # we really shouldn't do such blocking calls in twisted, # but the twisted dns interface is rather terrible and # odd things happen when using it # Set timeout to 1 second to limit the possible damage try: socket.setdefaulttimeout(1) info = socket.gethostbyaddr(addr) return info[0] except socket.error, msg: log.msg("Error performing reverse lookup: %s" % msg) return addr else: addr else: hostname = request.getClient() if hostname is None: hostname = request.getClientIP() return hostname
41ab9ed3a01d1e1bc53565115a8336a5eac741b3
6,870
def CollapseSolutionPosition(x,x0): """ Calculate a free-fall collapse solution x - position to calculate time at in cm x0 - initial position in cm Sam Geen, March 2018 """ X = x/x0 t = (np.arccos(np.sqrt(X)) + np.sqrt(X * (1.0-X))) * x0**1.5 / np.sqrt(2.0*units.G*gravity.centralmass) return t
3d0aaeef997a688b72df38ea2188ea34d62c1d55
6,871
from scipy import signal from nsdata import bfixpix def scaleSpectralSky_cor(subframe, badpixelmask=None, maxshift=20, fitwidth=2, pord=1, nmed=3, dispaxis=0, spatial_index=None, refpix=None, tord=2): """ Use cross-correlation to subtract tilted sky backgrounds. subframe : NumPy array data subframe containing sky data to be subtracted (and, perhaps, an object's spectral trace). badpixelmask : None or NumPy array A boolean array, equal to zero for good pixels and unity for bad pixels. If this is set, the first step will be a call to :func:`nsdata.bfixpix` to interpolate over these values. nmed : int size of 2D median filter for pre-smoothing. pord : int degree of spectral tilt. Keep this number low! maxshift : int Maximum acceptable shift. NOT YET IMPLEMENTED! fitwidth : int Maximum radius (in pixels) for fitting to the peak of the cross-correlation. nmed : int Size of window for 2D median filter (to reject bad pixels, etc.) dispaxis : int set dispersion axis: 0 = horizontal and 1 = vertical spatial_index : None, or 1D NumPy array of type *bool* Which spatial rows (if dispaxis=0) to use when fitting the tilt of sky lines across the spectrum. If you want to use all, set to None. If you want to ignore some (e.g., because there's a bright object's spectrum there) then set those rows' elements of spatial_index to 'False'. refpix : scalar Pixel along spatial axis to which spectral fits should be aligned; if a spectral trace is present, one should set "refpix" to the location of the trace. tord : int Order of polynomial fits along spatial direction in aligned 2D-spectral frame, to account for misalignments or irregularities of tilt direction. :RETURNS: a model of the sky background, of the same shape as 'subframe.' """ # 2012-09-22 17:04 IJMC: Created # 2012-12-27 09:53 IJMC: Edited to better account for sharp edges # in backgrounds. # Parse inputs if not isinstance(subframe, np.ndarray): subframe = pyfits.getdata(subframe) if badpixelmask is None: pass else: badpixelmask = np.array(badpixelmask).astype(bool) subframe = bfixpix(subframe, badpixelmask, retdat=True) if dispaxis==1: subframe = subframe.transpose() # Define necessary variables and vectors: npix, nlam = subframe.shape if spatial_index is None: spatial_index = np.ones(npix, dtype=bool) else: spatial_index = np.array(spatial_index, copy=False) if refpix is None: refpix = npix/2. lampix = np.arange(nlam) tpix = np.arange(npix) alllags = np.arange(nlam-maxshift*2) - np.floor(nlam/2 - maxshift) # Median-filter the input data: if nmed > 1: ssub = signal.medfilt2d(subframe, nmed) else: ssub = subframe.copy() ref = np.median(ssub, axis=0) #allcor = np.zeros((npix, nlam-maxshift*2)) shift = np.zeros(npix, dtype=float) for ii in tpix: # Cross-correlate to measure alignment at each row: cor = np.correlate(ref[maxshift:-maxshift], signal.medfilt(ssub[ii], nmed)[maxshift:-maxshift], mode='same') # Measure offset of each row: maxind = alllags[(cor==cor.max())].mean() fitind = np.abs(alllags - maxind) <= fitwidth quadfit = np.polyfit(alllags[fitind], cor[fitind], 2) shift[ii] = -0.5 * quadfit[1] / quadfit[0] shift_polyfit = an.polyfitr(tpix[spatial_index], shift[spatial_index], pord, 3) #, w=weights) refpos = np.polyval(shift_polyfit, refpix) #pdb.set_trace() fitshift = np.polyval(shift_polyfit, tpix) - refpos # Interpolate each row to a common frame to create an improved reference: newssub = np.zeros((npix, nlam)) for ii in tpix: newssub[ii] = np.interp(lampix, lampix+fitshift[ii], ssub[ii]) #pdb.set_trace() newref = np.median(newssub[spatial_index,:], axis=0) tfits = np.zeros((nlam, tord+1), dtype=float) newssub2 = np.zeros((npix, nlam)) for jj in range(nlam): tfits[jj] = an.polyfitr(tpix, newssub[:,jj], tord, 3) newssub2[:, jj] = np.polyval(tfits[jj], tpix) # Create the final model of the sky background: skymodel = np.zeros((npix, nlam), dtype=float) shiftmodel = np.zeros((npix, nlam), dtype=float) for ii in tpix: #skymodel[ii] = np.interp(lampix, lampix-fitshift[ii], newref) skymodel[ii] = np.interp(lampix, lampix-fitshift[ii], newssub2[ii]) shiftmodel[ii] = np.interp(lampix, lampix+fitshift[ii], ssub[ii]) #pdb.set_trace() if dispaxis==1: skymodel = skymodel.transpose() return skymodel, shiftmodel, newssub, newssub2
50ee28ff81c4e981dca47e67ed525b7d9a421288
6,872
def login(): """ Implements the login feature for the app. Errors are shown if incorrect details are used. If the user tried to access a page requiring login without being authenticated, they are redirected there after sign in. """ if current_user.is_authenticated: return redirect(url_for("auth.index")) form = LoginForm() if form.validate_on_submit(): user = User.query.filter_by( username=form.username.data ).first() # None if invalid if user is None or not user.check_password(form.password.data): flash("Invalid username or password") return redirect(url_for("auth.login")) login_user(user, remember=form.remember_me.data) next_page = request.args.get("next") """To prevent malicious users from adding a malicious site into the parameters, this checks to see if the url is relative. """ if not next_page or url_parse(next_page).netloc != "" or next_page == "/logout": next_page = url_for("auth.index") return redirect(next_page) return render_template("login.html", form=form)
43c60504648aa4e93e24150b1aceb98293a4064d
6,873
def _get_plot_axes(grid): """Find which axes are being plotted. Parameters ---------- grid : Grid Returns ------- tuple """ plot_axes = [0, 1, 2] if np.unique(grid.nodes[:, 0]).size == 1: plot_axes.remove(0) if np.unique(grid.nodes[:, 1]).size == 1: plot_axes.remove(1) if np.unique(grid.nodes[:, 2]).size == 1: plot_axes.remove(2) return tuple(plot_axes)
3112ba7d954c7b39bec035e31b5281919dc78244
6,874
def _read_uint(addr): """ Read a uint """ value = gdb.parse_and_eval("*(unsigned int*)0x%x" % addr) try: if value is not None: return _cast_uint(value) except gdb.MemoryError: pass print("Can't read 0x%x to lookup KASLR uint value" % addr) return None
abe969c2f8595fdf1efdc98157536131d7a8a5ca
6,876
def line_at_infinity(n): """the line at infinity just contains the points at infinity""" return points_at_infinity(n)
8a787b4598e072c101f8babbe948c4996b121a9a
6,877
def check_section(config:Namespace, name:str) -> Namespace: """Check that a section with the specified name is present.""" section = config._get(name) if section is None: raise ConfigurationError(f"Section {name} not found in configuration") if not isinstance(section, Namespace): raise ConfigurationError(f"Configuration error: {name} not a section") return section
09a315a77bd25a3a78b8e80592a32c8709aa511f
6,878
import math def ceil(a): """The ceil function. Args: a (Union[:class:`~taichi.lang.expr.Expr`, :class:`~taichi.lang.matrix.Matrix`]): A number or a matrix. Returns: The least integer greater than or equal to `a`. """ return _unary_operation(_ti_core.expr_ceil, math.ceil, a)
456436d8d1104b4df16327665dd477139528f6fa
6,879
from typing import Any from typing import Optional def Body( default: Any = Undefined, *, default_factory: Optional[NoArgAnyCallable] = None, alias: str = None, title: str = None, description: str = None, const: bool = None, gt: float = None, ge: float = None, lt: float = None, le: float = None, multiple_of: float = None, min_items: int = None, max_items: int = None, min_length: int = None, max_length: int = None, regex: str = None, **extra: Any, ) -> Any: """ Used to provide extra information about a field, either for the model schema or complex validation. Some arguments apply only to number fields (``int``, ``float``, ``Decimal``) and some apply only to ``str``. :param default: since this is replacing the field’s default, its first argument is used to set the default, use ellipsis (``...``) to indicate the field is required :param default_factory: callable that will be called when a default value is needed for this field If both `default` and `default_factory` are set, an error is raised. :param alias: the public name of the field :param title: can be any string, used in the schema :param description: can be any string, used in the schema :param const: this field is required and *must* take it's default value :param gt: only applies to numbers, requires the field to be "greater than". The schema will have an ``exclusiveMinimum`` validation keyword :param ge: only applies to numbers, requires the field to be "greater than or equal to". The schema will have a ``minimum`` validation keyword :param lt: only applies to numbers, requires the field to be "less than". The schema will have an ``exclusiveMaximum`` validation keyword :param le: only applies to numbers, requires the field to be "less than or equal to". The schema will have a ``maximum`` validation keyword :param multiple_of: only applies to numbers, requires the field to be "a multiple of". The schema will have a ``multipleOf`` validation keyword :param min_items: only applies to list or tuple and set, requires the field to have a minimum length. :param max_items: only applies to list or tuple and set, requires the field to have a maximum length. :param min_length: only applies to strings, requires the field to have a minimum length. The schema will have a ``maximum`` validation keyword :param max_length: only applies to strings, requires the field to have a maximum length. The schema will have a ``maxLength`` validation keyword :param regex: only applies to strings, requires the field match again a regular expression pattern string. The schema will have a ``pattern`` validation keyword :param extra: any additional keyword arguments will be added as is to the schema """ if default is not Undefined and default_factory is not None: raise ValueError("cannot specify both default and default_factory") return BodyInfo( default, default_factory=default_factory, alias=alias, title=title, description=description, const=const, gt=gt, ge=ge, lt=lt, le=le, multiple_of=multiple_of, min_items=min_items, max_items=max_items, min_length=min_length, max_length=max_length, regex=regex, **extra, )
efc636d1b0e42736cecb04857afa67f636fd0bb6
6,880
def warp(img, pers_margin=425, margin_bottom=50, margin_top=450, margin_sides=150, reverse=False): """ This function warps an image. For the transformation a src polygon and a destination polygon are used. The source polygon is calculated by the image shape and the margins given. The destination polygon is calculated solely on the image shape. :param img: Input image :param pers_margin: This value determines how sharp the polygon is :param margin_bottom: This value sets the distance between the polygon and the bottom of the image :param margin_top: This value sets the distance between the polygon and the top of the image :param margin_sides: This value sets the distance between the polygon and the sides of the image :param reverse: If True, src and dst will be swapped, thus the image will be unwarped :return: Warped image """ img_size = (img.shape[1], img.shape[0]) # Four source coordinates src = np.float32( [[img_size[0] - margin_sides - pers_margin, margin_top], [img_size[0] - margin_sides, img_size[1] - margin_bottom], [margin_sides, img_size[1] - margin_bottom], [margin_sides + pers_margin, margin_top]]) # Four destination coordinates dst = np.float32( [[img_size[0]*3//4, 0], [img_size[0]*3//4, img_size[1]], [img_size[0]//4, img_size[1]], [img_size[0]//4, 0]]) # Compute perspective transform matrix if not reverse: m = cv2.getPerspectiveTransform(src, dst) else: m = cv2.getPerspectiveTransform(dst, src) # Warp image warped = cv2.warpPerspective(img, m, img_size, flags=cv2.INTER_LINEAR) return warped
06c4b08e43a3efcfaf3a44bd58727c6b0db833da
6,881
def get_all_doorstations(hass): """Get all doorstations.""" return [ entry[DOOR_STATION] for entry in hass.data[DOMAIN].values() if DOOR_STATION in entry ]
a6e785e6c667b956ef41ad98681e38b142d99ef5
6,882
import requests import json def get_weather() -> dict: """Makes an api request for the weather api country code queries the specific country city name queries the specific city within that country units determines the type of numerical data returned (centigrade or Fahrenheit) :return: the response from the api """ query = f"{city_name},{country_code}" url_current_weather = f"https://api.openweathermap.org/data/2.5/weather?q={query}" \ f"&appid={api_key}&units={units}" response = requests.get(url_current_weather).json() if response["cod"] != 200: log.error(json.dumps(response, indent=4)) response = None return response
023253ec2466182515a345d2bca1f10adf7b67ab
6,883
def _create_off_value(): """create off value""" return Tensor(0.0, mstype.float32)
9cddddc27810fdfc4dbe3970aaa5c5a064f4345c
6,884
from datetime import datetime def is_datetime(value): """ Check if an object is a datetime :param value: :return: """ result = False if isinstance(value, datetime.datetime): result = True # else: # result = is_datetime_str(str(value)) return result
95c2392c9a3da9e4fccb43bd50c54914ffe91b8e
6,885
import math def sigmoid(z): """Sigmoid function""" if z > 100: return 0 return 1.0 / (1.0 + math.exp(z))
097e1a85fc46264cb1c7cd74498d6cfab97e5b88
6,886
async def get_company_sumary(symbol: str, db: Session = Depends(get_db)): """ This method receibe a symbol, if does not exits in our database go to extract data, save it on our database and retunr the stored data """ company_solver = CompanySolver(company_symbol=symbol) _ = company_solver.get_company_data(db) return _
9cd4a5e6dfe4f308f564d956280cb6cd522c6296
6,887
def get_attn_pad_mask(seq_q, seq_k): """ 由于各句子长度不一样,故需要通过PAD将所有句子填充到指定长度; 故用于填充的PAD在句子中无任何含义,无需注意力关注; 注意力掩码函数,可用于屏蔽单词位置为PAD的位置,将注意力放在其他单词上。 :param seq_q: [batch_size, seq_len] :param seq_k: [batch_size, seq_len] """ batch_size, len_q = seq_q.size() _, len_k = seq_k.size() pad_attn_mask = seq_k.data.eq(0).unsqueeze(1) # [batch_size, 1, len_k], 0代表PAD,eq(0)返回和seq_k同等维度的矩阵 # 若是seq_k某个位置上的元素为0,那么该位置为True,否则为False # [1, 2, 3, 0] -> [F, F, F, T] return pad_attn_mask.expand(batch_size, len_q, len_k)
522fc244c02ec767b80da2f0c9b5cf6720e931c0
6,889
def convert_str_to_float(string): """Convert str to float To handle the edge case Args: string (str): string Returns: f (float): float value """ try: f = float(string) except Exception: f = np.nan return f
f597d9d59c00f484d9b5183fc610fabf84529218
6,890
def node_tree(node: str): """Format printing for locate""" str2list = list(node.replace(' ', '')) count = 0 for i, e in enumerate(str2list): if e == '(': count += 1 str2list[i] = '(\n{}'.format('| ' * count) elif e == ')': count -= 1 str2list[i] = '\n{})'.format('| ' * count) elif e == ',': str2list[i] = ',\n{}'.format('| ' * count) elif e == '[': count += 1 str2list[i] = '[\n{}'.format('| ' * count) elif e == ']': count -= 1 str2list[i] = '\n{}]'.format('| ' * count) return ''.join(str2list)
010805499cb6e886ec8811949a1d1d013db1d15f
6,891
def process_data(data): """ :param datas: :param args: :return: """ # copy of the origin question_toks for d in datas: if 'origin_question_toks' not in d: d['origin_question_toks'] = d['question_toks'] for entry in datas: entry['question_toks'] = symbol_filter(entry['question_toks']) origin_question_toks = symbol_filter([x for x in entry['origin_question_toks'] if x.lower() != 'the']) question_toks = [wordnet_lemmatizer.lemmatize(x.lower()) for x in entry['question_toks'] if x.lower() != 'the'] entry['question_toks'] = question_toks table_names = [] table_names_pattern = [] for y in entry['table_names']: x = [wordnet_lemmatizer.lemmatize(x.lower()) for x in y.split(' ')] table_names.append(" ".join(x)) x = [re_lemma(x.lower()) for x in y.split(' ')] table_names_pattern.append(" ".join(x)) header_toks = [] header_toks_list = [] header_toks_pattern = [] header_toks_list_pattern = [] for y in entry['col_set']: x = [wordnet_lemmatizer.lemmatize(x.lower()) for x in y.split(' ')] header_toks.append(" ".join(x)) header_toks_list.append(x) x = [re_lemma(x.lower()) for x in y.split(' ')] header_toks_pattern.append(" ".join(x)) header_toks_list_pattern.append(x) num_toks = len(question_toks) idx = 0 tok_concol = [] type_concol = [] nltk_result = nltk.pos_tag(question_toks) while idx < num_toks: # fully header end_idx, header = fully_part_header(question_toks, idx, num_toks, header_toks) if header: tok_concol.append(question_toks[idx: end_idx]) type_concol.append(["col"]) idx = end_idx continue # check for table end_idx, tname = group_header(question_toks, idx, num_toks, table_names) if tname: tok_concol.append(question_toks[idx: end_idx]) type_concol.append(["table"]) idx = end_idx continue # check for column end_idx, header = group_header(question_toks, idx, num_toks, header_toks) if header: tok_concol.append(question_toks[idx: end_idx]) type_concol.append(["col"]) idx = end_idx continue # check for partial column end_idx, tname = partial_header(question_toks, idx, header_toks_list) if tname: tok_concol.append(tname) type_concol.append(["col"]) idx = end_idx continue # check for aggregation end_idx, agg = group_header(question_toks, idx, num_toks, AGG) if agg: tok_concol.append(question_toks[idx: end_idx]) type_concol.append(["agg"]) idx = end_idx continue if nltk_result[idx][1] == 'RBR' or nltk_result[idx][1] == 'JJR': tok_concol.append([question_toks[idx]]) type_concol.append(['MORE']) idx += 1 continue if nltk_result[idx][1] == 'RBS' or nltk_result[idx][1] == 'JJS': tok_concol.append([question_toks[idx]]) type_concol.append(['MOST']) idx += 1 continue # string match for Time Format if num2year(question_toks[idx]): question_toks[idx] = 'year' end_idx, header = group_header(question_toks, idx, num_toks, header_toks) if header: tok_concol.append(question_toks[idx: end_idx]) type_concol.append(["col"]) idx = end_idx continue def get_concept_result(toks, graph): for begin_id in range(0, len(toks)): for r_ind in reversed(range(1, len(toks) + 1 - begin_id)): tmp_query = "_".join(toks[begin_id:r_ind]) if tmp_query in graph: mi = graph[tmp_query] for col in entry['col_set']: if col in mi: return col end_idx, symbol = group_symbol(question_toks, idx, num_toks) if symbol: tmp_toks = [x for x in question_toks[idx: end_idx]] assert len(tmp_toks) > 0, print(symbol, question_toks) pro_result = get_concept_result(tmp_toks, english_IsA) if pro_result is None: pro_result = get_concept_result(tmp_toks, english_RelatedTo) if pro_result is None: pro_result = "NONE" for tmp in tmp_toks: tok_concol.append([tmp]) type_concol.append([pro_result]) pro_result = "NONE" idx = end_idx continue end_idx, values = group_values(origin_question_toks, idx, num_toks) if values and (len(values) > 1 or question_toks[idx - 1] not in ['?', '.']): tmp_toks = [wordnet_lemmatizer.lemmatize(x) for x in question_toks[idx: end_idx] if x.isalnum() is True] assert len(tmp_toks) > 0, print(question_toks[idx: end_idx], values, question_toks, idx, end_idx) pro_result = get_concept_result(tmp_toks, english_IsA) if pro_result is None: pro_result = get_concept_result(tmp_toks, english_RelatedTo) if pro_result is None: pro_result = "NONE" for tmp in tmp_toks: tok_concol.append([tmp]) type_concol.append([pro_result]) pro_result = "NONE" idx = end_idx continue result = group_digital(question_toks, idx) if result is True: tok_concol.append(question_toks[idx: idx + 1]) type_concol.append(["value"]) idx += 1 continue if question_toks[idx] == ['ha']: question_toks[idx] = ['have'] tok_concol.append([question_toks[idx]]) type_concol.append(['NONE']) idx += 1 continue entry['question_arg'] = tok_concol entry['question_arg_type'] = type_concol entry['nltk_pos'] = nltk_result return datas
3e2ab0daa83e48abc121b72cbf1970c8b5fabe87
6,892
import copy def repeated_parity_data_binning(shots, nr_of_meas:int): """ Used for data binning of the repeated parity check experiment. Assumes the data qubit is alternatively prepared in 0 and 1. Args: shots (1D array) : array containing all measured values of 1 qubit nr_of_meas (int) : number of measurement per prepared state. used to determine the period for data binning. Includes the initialization measurement. Returns prep_0 (1D array) outcomes of the initialization measurement meas_0 (1D array) outcomes of the first measurement trace_0 (2D array) traces prep_1 (1D array) meas_1 (1D array) trace_1 (2D array) """ prep_0 = copy(shots[::nr_of_meas*2]) meas_0 = copy(shots[1::nr_of_meas*2]) prep_1 = copy(shots[nr_of_meas::nr_of_meas*2]) meas_1 = copy(shots[nr_of_meas+1::nr_of_meas*2]) trace_0 = np.zeros((len(prep_0), nr_of_meas-1)) trace_1 = np.zeros((len(prep_1), nr_of_meas-1)) for i in range(len(prep_0)): trace_0[i, :] = shots[1+(2*i)*nr_of_meas: (2*i+1)*nr_of_meas] trace_1[i, :] = shots[1+(2*i+1)*nr_of_meas: (2*i+2)*nr_of_meas] return (prep_0, meas_0, trace_0, prep_1, meas_1, trace_1)
3cd724579738f5ccf4bd664cf1b023d1c7c08f27
6,894
def get_user_activities(user_id, timestamp_start, timestamp_end): """ Returns the activities for a user, between two times""" activities = Activity.query \ .filter(Activity.user_id == user_id) \ .filter(Activity.timestamp_end >= timestamp_start) \ .filter(Activity.timestamp_start <= timestamp_end).all() # If required, add the current_activity (The above loop will not get it) current_activity_id = get_current_user_activity_id(target_user_id=user_id) if current_activity_id is not None: current_act = Activity.query.get(current_activity_id) # Don't add the current activity if it started after the requested end if current_act.timestamp_start <= timestamp_end: activities.append(current_act) return activities
0b58c1e6a430e0179d34b0ee6d8fdb70f6b102c1
6,895
def _find_matches(ref, pred): """ find potential matches between objects in the reference and predicted images. These need to have at least 1 pixel of overlap. """ matches = {} for label in ref.labels: mask = ref.labeled == label matches[label] = [m for m in np.unique(pred.labeled[mask]) if m>0] return matches
82ea5c5a0c73996187d7f5409745b947b7e17960
6,896
def _process(config: ConfigType, should_make_dir: bool) -> ConfigType: """Process the config Args: config (ConfigType): Config object should_make_dir (bool): Should make dir for saving logs, models etc Returns: [ConfigType]: Processed config """ config = _process_general_config(config=config) config = _process_logbook_config(config=config, should_make_dir=should_make_dir) config = _process_experiment_config(config=config, should_make_dir=should_make_dir) return config
3bf2cc4eff379fcfe8f7d58332ae33658e7e5540
6,897
def calendar_heatmap_echarts(data_frame: pd.DataFrame, date_field: str = None, value_field: str = None, title: str = "", width: str = "100%", height: str = "300px") -> Echarts: """ 日历热度图,显示日期热度 :param data_frame: :param date_field: 日期列 :param value_field: 值列 :param title: 可选标题 :param width: 输出div的宽度 支持像素和百分比 比如800px/100% :param height: 输出div的高度 支持像素和百分比 比如800px/100% :return: """ df = data_frame[[date_field, value_field]].copy() value_max = df[value_field].max() value_min = df[value_field].min() date_start = pd.to_datetime(df[date_field].min()).strftime("%Y-%m-%d") date_end = pd.to_datetime(df[date_field].max()).strftime("%Y-%m-%d") df[date_field] = pd.to_datetime(df[date_field]).dt.strftime("%Y-%m-%d") options = { 'title': { 'text': title }, 'tooltip': {'formatter': "{c}"}, 'visualMap': { 'text': ['高', '低'], 'min': value_min, 'max': value_max, 'type': 'continuous', 'orient': 'horizontal', 'inRange': { 'color': ["#313695", "#4575b4", "#74add1", "#abd9e9", "#e0f3f8", "#ffffbf", "#fee090", "#fdae61", "#f46d43", "#d73027", "#a50026"] }, 'left': 'center', 'top': 0, 'hoverLink': True }, 'calendar': { 'top': 60, 'left': 30, 'right': 30, 'cellSize': ['auto', 'auto'], 'range': [date_start, date_end], 'itemStyle': { 'borderWidth': 0.5 }, 'dayLabel': { 'firstDay': 1 }, 'monthLabel': { 'nameMap': 'cn' }, 'yearLabel': {'show': True} }, 'series': { 'type': 'heatmap', 'coordinateSystem': 'calendar', 'emphasis': { 'itemStyle': { 'borderColor': "#333", 'borderWidth': 1, 'shadowColor': 'rgba(0, 0, 0, 0.5)', 'shadowBlur': 15 } }, 'data': df[[date_field, value_field]].values.tolist() } } return Echarts(options=options, width=width, height=height)
e92a41dcb533f5fdb0fba91bb1f80b0199d1523e
6,898
from typing import Union import torch def adj_to_edge_indices(adj: Union[torch.Tensor, np.ndarray]) -> Union[torch.Tensor, np.ndarray]: """ Args: adj: a (N, N) adjacency matrix, where N is the number of nodes Returns: A (2, E) array, edge_idxs, where E is the number of edges, and edge_idxs[0], edge_idxs[1] are the source & destination nodes, respectively. """ edge_tuples = torch.nonzero(adj, as_tuple=True) if torch.is_tensor(adj) else np.nonzero(adj) edge_src = edge_tuples[0].unsqueeze(0) if torch.is_tensor(adj) else np.expand_dims(edge_tuples[0], axis=0) edge_dest = edge_tuples[1].unsqueeze(0) if torch.is_tensor(adj) else np.expand_dims(edge_tuples[1], axis=0) if torch.is_tensor(adj): edge_idxs = torch.cat((edge_src, edge_dest), dim=0) else: edge_idxs = np.concatenate((edge_src, edge_dest), axis=0) return edge_idxs
b84d978e7ea6b24cf9b4e8aaa074581d4516435d
6,899
def create_export_settings_window(): """ This function contains all the logic of the export settings window and will run the window by it's own. :return: None """ window = sg.Window("Export Settings", generate_export_settings_layout(), modal=True, finalize=True, keep_on_top=True) while True: n_event, _ = window.read() if n_event in ["Exit", sg.WIN_CLOSED]: window.close() return None if n_event == "-PROGRAM_CODE-": export(window) if n_event == "-OVERWATCH_CODE-": export_as_overwatch_code(window)
9552cfb269cb3e67cf3332783b9a43a674bc9e3d
6,900
def get_vertex_list(session, node_id, part_info): """Wrapper for HAPI_GetVertexList Args: session (int): The session of Houdini you are interacting with. node_id (int): The node to get. part_info (PartInfo): Part info of querying Returns: np.ndarray: Array of vertices """ data_buffer = (c_int * part_info.vertexCount)() result = HAPI_LIB.HAPI_GetVertexList( byref(session), node_id, part_info.id, byref(data_buffer), 0, part_info.vertexCount) assert result == HDATA.Result.SUCCESS,\ "GetVertexList Failed with {0}".format(HDATA.Result(result).name) data_np = np.frombuffer(data_buffer, np.int32) return data_np
dd5a37e248347dc9e9b5f8fba07d202008626ea5
6,901
def lamb1(u,alpha=.5): """Approximate the Lambert W function. Approximate the Lambert W function from its upper and lower bounds. The parameter alpha (between 0 and 1) determines how close the approximation is to the lower bound instead of the upper bound. :arg float u: Modified argument of the function. :arg float alpha: Bound parameter (default 0.5). :returns: (-z)-value of the Lambert function. :raises ValueError: If u is negative. :raises ValueError: If alpha is not between 0 and 1. """ if u < 0: errmsg = 'Argument u must be positive' raise ValueError(errmsg) if alpha < 0 or alpha > 1: errmsg = 'Parameter alpha must be between 0 and 1' raise ValueError(errmsg) beta = (2 + alpha)/3 negz = 1 + (2*u)**.5 + beta*u return negz
1d769ccb74334eef55aa1bc0697328b34ba067bc
6,902
def loglikelihood(time_steps: list) -> float: """Calculate the log-likelihood of the time steps from the estimation Parameters ---------- time_steps : list estimation time steps Returns ------- float log-likelihood """ loglikelihood = 0 for time_step in time_steps: loglikelihood += _loglikelihood(time_step) return loglikelihood
6761ced2947d9ac382d53eef390bd827ceb51203
6,903
def get_r0_rm_rp(s, i_delta): """ compute 3 points r0, r_minus and r_plus to determine apsis compute these at s.i-i_delta and s.i-2*i_delta """ xp = s.Xlast[:, s.i % s.save_last] x0 = s.Xlast[:, (s.i - i_delta) % s.save_last] xm = s.Xlast[:, (s.i - 2 * i_delta) % s.save_last] rp = norm(xp[0:3] - xp[3:6]) r0 = norm(x0[0:3] - x0[3:6]) rm = norm(xm[0:3] - xm[3:6]) return r0, rm, rp
83595b9b15eb9c9373aa4e8f75d2ffc39c8ba248
6,904
def build_rfb_lite(base, feature_layer, mbox, num_classes): """Receptive Field Block Net for Accurate and Fast Object Detection for embeded system See: https://arxiv.org/pdf/1711.07767.pdf for more details. """ base_, extras_, norm_, head_ = add_extras(base(), feature_layer, mbox, num_classes, version='rfb_lite') return RFB(base_, extras_, norm_, head_, feature_layer, num_classes)
c8b1810d088f816d4e3be587cb1085bacde08076
6,906
def bfunsmat(u, p, U): """Computes a matrix of the form :math:`B_{ij}`, where :math:`i=0\\ldots p` and for each :math:`j` th column the row :math:`i` of the matrix corresponds to the value of :math:`(\\mathrm{span}(u_j)-p+i)` th bspline basis function at :math:`u_j`. Parameters: u (np.array(float)) : evaluation point(s) p (int) : basis function degree U (np.array(float)) : knot vector Returns: np.array(float) : matrix :math:`B_{ij}` """ nkts = U.size nbfuns = nkts - p - 1 npts = u.size Bij = np.zeros((nbfuns, npts)) for j in range(0, npts): span = fspan(u[j], p, U) B_i = bfuns(span, u[j], p, U) for i in range(0, p+1): Bij[i,j] = B_i[i] return Bij
6dc260a165c5ae25ac9914ff0b96c1fd8f05b93c
6,907
def getFourgram(words, join_string): """ Input: a list of words, e.g., ['I', 'am', 'Denny', 'boy'] Output: a list of trigram, e.g., ['I_am_Denny_boy'] I use _ as join_string for this example. """ assert type(words) == list L = len(words) if L > 3: lst = [] for i in xrange(L-3): lst.append( join_string.join([words[i], words[i+1], words[i+2], words[i+3]]) ) else: # set it as bigram lst = getTrigram(words, join_string) return lst
17717bb608a7ef5eff1ac9e1f49d2606b7113360
6,908
import math def get_age_carbon_14_dating(carbon_14_ratio): """Returns the estimated age of the sample in year. carbon_14_ratio: the percent (0 < percent < 1) of carbon-14 in the sample conpared to the amount in living tissue (unitless). """ if isinstance(carbon_14_ratio, str): raise TypeError("Please provide an integer") elif carbon_14_ratio <= 0: raise ValueError("Not acceptable, must be greater than 0 but less than 1") elif carbon_14_ratio > 1: raise ValueError("Too large, must be between 0 and 1") calculation = math.log(carbon_14_ratio) / DECAY_CONSTANT * T_HALF age = "{:.2f}".format(calculation) # rounds to 2 decimal places return age
8b0ab86e3c45a97065fefb6c4f02ab87c3e82d23
6,909
def get_input_definition() -> InputDefinition: """ Query ReconAll's input file definition (*t1_files*) to check for existing runs. Returns ------- InputDefinition ReconAll's *t1_files* input definition """ node = get_node() return node.analysis_version.input_definitions.get(key=T1_FILES_KEY)
1575bc2521b6f041c4151be6405ac1d458333d62
6,910
def create_ou_process(action_spec, ou_stddev, ou_damping): """Create nested zero-mean Ornstein-Uhlenbeck processes. The temporal update equation is: .. code-block:: python x_next = (1 - damping) * x + N(0, std_dev) Note: if ``action_spec`` is nested, the returned nested OUProcess will not bec checkpointed. Args: action_spec (nested BountedTensorSpec): action spec ou_damping (float): Damping rate in the above equation. We must have :math:`0 <= damping <= 1`. ou_stddev (float): Standard deviation of the Gaussian component. Returns: nested ``OUProcess`` with the same structure as ``action_spec``. """ def _create_ou_process(action_spec): return dist_utils.OUProcess(action_spec.zeros(), ou_damping, ou_stddev) ou_process = alf.nest.map_structure(_create_ou_process, action_spec) return ou_process
292b235863e57b49e531e5e5b091f55688357122
6,911
def clean_data(df): """ remove the duplicates from a dataframe parameters: df(Dataframe): data frame """ df=df.drop_duplicates() return df
7072885f7233c5407060344e6858f89108d61ee8
6,912
def IssueFactory(data, journal_id, issue_order): """ Realiza o registro fascículo utilizando o opac schema. Esta função pode lançar a exceção `models.Journal.DoesNotExist`. """ mongo_connect() metadata = data["metadata"] issue = models.Issue() issue._id = issue.iid = data.get("id") issue.type = metadata.get("type", "regular") issue.spe_text = metadata.get("spe_text", "") issue.start_month = metadata.get("publication_month", 0) issue.end_month = metadata.get("publication_season", [0])[-1] issue.year = metadata.get("publication_year") issue.volume = metadata.get("volume", "") issue.number = metadata.get("number", "") issue.label = metadata.get( "label", "%s%s" % ("v" + issue.volume, "n" + issue.number) ) issue.order = metadata.get("order", 0) issue.pid = metadata.get("pid", "") issue.journal = models.Journal.objects.get(_id=journal_id) issue.order = issue_order return issue
49ef57cb1c628c05e30a35e10680d34140066182
6,913
def _is_permission_in_db(permission_name: str): """To check whether the given permission is in the DB Parameters ---------- permission_name: str A permission name we use internally. E.g., hazard, hazard:hazard, project... """ return bool( models.Auth0Permission.query.filter_by(permission_name=permission_name).first() )
6e0e672d5c73e0740b695f29d3459a3b80c86831
6,914
from typing import Optional def get_dataset(dataset_id: Optional[str] = None, location: Optional[str] = None, project: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDatasetResult: """ Gets any metadata associated with a dataset. """ __args__ = dict() __args__['datasetId'] = dataset_id __args__['location'] = location __args__['project'] = project if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('google-native:healthcare/v1:getDataset', __args__, opts=opts, typ=GetDatasetResult).value return AwaitableGetDatasetResult( name=__ret__.name, time_zone=__ret__.time_zone)
985a7e9b7b124c0dba37455426889683e5769aaf
6,916
def is_email_available() -> bool: """ Returns whether email services are available on this instance (i.e. settings are in place). """ return bool(settings.EMAIL_HOST)
c8b8362aed7f2af5dd49070dce7f522fd0c2088a
6,917
def sql2label(sql, num_cols): """encode sql""" # because of classification task, label is from 0 # so sel_num and cond_num should -1,and label should +1 in prediction phrase cond_conn_op_label = sql.cond_conn_op sel_num_label = sql.sel_num - 1 # the new dataset has cond_num = 0, do not -1 cond_num_label = len(sql.conds) + len(sql.having) sel_label = np.zeros(num_cols, dtype='int32') sel_agg_label = np.zeros((num_cols, SQL.num_agg_ops), dtype='int32') for col_id, agg_op in zip(sql.sel, sql.agg): assert col_id < num_cols, f"select col_id({col_id}) >= num_cols({num_cols}): {sql}" sel_agg_label[col_id][agg_op] = 1 sel_label[col_id] = 1 # len(SQL.op_sql_dict) over all op ID range,which means defaults to no OP cond_op_label = np.ones(num_cols, dtype='int32') * len(SQL.op_sql_dict) having_agg_label = np.zeros((num_cols, SQL.num_agg_ops), dtype='int32') for col_id, cond_op, _ in sql.conds: assert col_id < num_cols, f"where col_id({col_id}) >= num_cols({num_cols}): {sql}" cond_op_label[col_id] = cond_op for agg, col_id, cond_op, _ in sql.having: assert col_id < num_cols, f"having col_id({col_id}) >= num_cols({num_cols}): {sql}" cond_op_label[col_id] = cond_op having_agg_label[col_id][agg] = 1 order_col_label = np.zeros(num_cols, dtype='int32') order_agg_label = np.zeros((num_cols, SQL.num_agg_ops), dtype='int32') order_direction_label = sql.order_direction for agg, order_col in sql.order_by: order_col_label[order_col] = 1 order_agg_label[order_col][agg] = 1 group_num_label = sql.group_num having_num_label = len(sql.having) group_col_label = np.zeros(num_cols, dtype='int32') for col_id in sql.group_by: assert col_id < num_cols, f"group_by col_id({col_id}) >= num_cols({num_cols}): {sql}" group_col_label[col_id] = 1 return sel_num_label, cond_num_label, cond_conn_op_label, \ sel_agg_label, sel_label, cond_op_label, \ order_col_label, order_agg_label, order_direction_label, \ group_num_label, having_num_label, group_col_label, having_agg_label
b25c819e4645c07216970877ac95d20b0f8baab6
6,918
import time def retrieveToken(verbose: bool = False, save: bool = False, **kwargs)->str: """ LEGACY retrieve token directly following the importConfigFile or Configure method. """ token_with_expiry = token_provider.get_token_and_expiry_for_config(config.config_object,**kwargs) token = token_with_expiry['token'] config.config_object['token'] = token config.config_object['date_limit'] = time.time() + token_with_expiry['expiry'] / 1000 - 500 config.header.update({'Authorization': f'Bearer {token}'}) if verbose: print(f"token valid till : {time.ctime(time.time() + token_with_expiry['expiry'] / 1000)}") return token
b419934bf2725b46d23abc506c5b5a2828de1d0c
6,919
def format_str_for_write(input_str: str) -> bytes: """Format a string for writing to SteamVR's stream.""" if len(input_str) < 1: return "".encode("utf-8") if input_str[-1] != "\n": return (input_str + "\n").encode("utf-8") return input_str.encode("utf-8")
1b83a2c75118b03b7af06350e069775c0b877816
6,920
def reverse_result(func): """The recursive function `get_path` returns results in order reversed from desired. This decorator just reverses those results before returning them to caller. """ @wraps(func) def inner(*args, **kwargs): result = func(*args, **kwargs) if result is not None: return result[::-1] return inner
c13d28550e77a8fba149c50673252012c712961f
6,921
def convert_from_opencorpora_tag(to_ud, tag: str, text: str): """ Конвертировать теги их формата OpenCorpora в Universal Dependencies :param to_ud: конвертер. :param tag: тег в OpenCorpora. :param text: токен. :return: тег в UD. """ ud_tag = to_ud(str(tag), text) pos = ud_tag.split()[0] gram = ud_tag.split()[1] return pos, gram
0e650cc4976d408ed88ef9280fe3a74261353561
6,922
import struct def reg_to_float(reg): """convert reg value to Python float""" st = struct.pack(">L", reg) return struct.unpack(">f", st)[0]
f4a2d416e880807503f3c0ba0b042fbbecc09064
6,923
def wvelocity(grid, u, v, zeta=0): """ Compute "true" vertical velocity Parameters ---------- grid : seapy.model.grid, The grid to use for the calculations u : ndarray, The u-field in time v : ndarray, The v-field in time zeta : ndarray, optional, The zeta-field in time Returns ------- w : ndarray, Vertical Velocity """ grid=seapy.model.asgrid(grid) u=np.ma.array(u) v=np.ma.array(v) zeta=np.ma.array(zeta) # Check the sizes while u.ndim < 4: u=u[np.newaxis, ...] while v.ndim < 4: v=v[np.newaxis, ...] while zeta.ndim < 3: zeta=zeta[np.newaxis, ...] # Get omega W, z_r, z_w, thick_u, thick_v=omega(grid, u, v, zeta, scale=True, work=True) # Compute quasi-horizontal motions (Ui + Vj)*GRAD s(z) vert=z_r * 0 # U-contribution wrk=u * (z_r[:, :, :, 1:] - z_r[:, :, :, :-1]) * \ (grid.pm[:, 1:] - grid.pm[:, :-1]) vert[:, :, :, 1:-1]=0.25 * (wrk[:, :, :, :-1] + wrk[:, :, :, 1:]) # V-contribution wrk = v * (z_r[:, :, 1:, :] - z_r[:, :, :-1, :]) * \ (grid.pn[1:, :] - grid.pn[:-1, :]) vert[:, :, 1:-1, :] += 0.25 * (wrk[:, :, :-1, :] + wrk[:, :, 1:, :]) # Compute barotropic velocity [ERROR IN FORMULATION RIGHT NOW] wrk = np.zeros((vert.shape[0], vert.shape[2], vert.shape[3])) ubar = np.sum(u * thick_u, axis=1) / np.sum(thick_u, axis=1) vbar = np.sum(v * thick_v, axis=1) / np.sum(thick_v, axis=1) # wrk[:, 1:-1, 1:-1] = (ubar[:, 1:-1, :-1] - ubar[:, 1:-1, 1:] + # vbar[:, :-1, 1:-1] - vbar[:, 1:, 1:-1]) # Shift vert from rho to w wvel = z_w * 0 # First two layers slope = (z_r[:, 0, :, :] - z_w[:, 0, :, :]) / \ (z_r[:, 1, :, :] - z_r[:, 0, :, :]) wvel[:, 0, :, :] = 0.375 * (vert[:, 0, :, :] - slope * (vert[:, 1, :, :] - vert[:, 0, :, :])) + \ 0.75 * vert[:, 0, :, :] - \ 0.125 * vert[:, 1, :, :] wvel[:, 1, :, :] = W[:, 1, :, :] + wrk + \ 0.375 * vert[:, 0, :, :] + \ 0.75 * vert[:, 1, :, :] - 0.125 * vert[:, 2, :, :] # Middle of the grid wvel[:, 2:-2, :, :] = W[:, 2:-2, :, :] + \ wrk[:, np.newaxis, :, :] + \ 0.5625 * (vert[:, 1:-2, :, :] + vert[:, 2:-1, :, :]) - \ 0.0625 * (vert[:, :-3, :, :] + vert[:, 3:, :, :]) # Upper two layers slope = (z_w[:, -1, :, :] - z_r[:, -1, :, :]) / \ (z_r[:, -1, :, :] - z_r[:, -2, :, :]) wvel[:, -1, :, :] = wrk + 0.375 * (vert[:, -1, :, :] + slope * (vert[:, -1, :, :] - vert[:, -2, :, :])) + \ 0.75 * vert[:, -1, :, :] - \ 0.0625 * vert[:, -2, :, :] wvel[:, -2, :, :] = W[:, -2, :, :] + 0.375 * vert[:, -1, :, :] + \ wrk + 0.75 * vert[:, -2, :, :] - \ 0.125 * vert[:, -3, :, :] # No gradient at the boundaries wvel[:, :, 0, :] = wvel[:, :, 1, :] wvel[:, :, -2:, :] = wvel[:, :, -3:-2, :] wvel[:, :, :, 0] = wvel[:, :, :, 1] wvel[:, :, :, -2:] = wvel[:, :, :, -3:-2] return wvel
452e84b334b42b9099ed888319a3cc88e7191e9b
6,924
def _as_nested_lists(vertices): """ Convert a nested structure such as an ndarray into a list of lists. """ out = [] for part in vertices: if hasattr(part[0], "__iter__"): verts = _as_nested_lists(part) out.append(verts) else: out.append(list(part)) return out
c69bd2084aa8e76a53adf3e25286a8dd7ae23176
6,925
def markdown(code: str) -> str: """Convert markdown to HTML using markdown2.""" return markdown2.markdown(code, extras=markdown_extensions)
09f463aa28f9289d05b44244e6ac60ce7905af83
6,926
import json import urllib async def post_notification(request): """ Create a new notification to run a specific plugin :Example: curl -X POST http://localhost:8081/fledge/notification -d '{"name": "Test Notification", "description":"Test Notification", "rule": "threshold", "channel": "email", "notification_type": "one shot", "enabled": false}' curl -X POST http://localhost:8081/fledge/notification -d '{"name": "Test Notification", "description":"Test Notification", "rule": "threshold", "channel": "email", "notification_type": "one shot", "enabled": false, "rule_config": {}, "delivery_config": {}}' """ try: notification_service = ServiceRegistry.get(s_type=ServiceRecord.Type.Notification.name) _address, _port = notification_service[0]._address, notification_service[0]._port except service_registry_exceptions.DoesNotExist: raise web.HTTPNotFound(reason="No Notification service available.") try: data = await request.json() if not isinstance(data, dict): raise ValueError('Data payload must be a valid JSON') name = data.get('name', None) description = data.get('description', None) rule = data.get('rule', None) channel = data.get('channel', None) notification_type = data.get('notification_type', None) enabled = data.get('enabled', None) rule_config = data.get('rule_config', {}) delivery_config = data.get('delivery_config', {}) retrigger_time = data.get('retrigger_time', None) try: if retrigger_time: if float(retrigger_time) > 0 and float(retrigger_time).is_integer(): pass else: raise ValueError except ValueError: raise ValueError('Invalid retrigger_time property in payload.') if name is None or name.strip() == "": raise ValueError('Missing name property in payload.') if description is None: raise ValueError('Missing description property in payload.') if rule is None: raise ValueError('Missing rule property in payload.') if channel is None: raise ValueError('Missing channel property in payload.') if notification_type is None: raise ValueError('Missing notification_type property in payload.') if utils.check_reserved(name) is False: raise ValueError('Invalid name property in payload.') if utils.check_reserved(rule) is False: raise ValueError('Invalid rule property in payload.') if utils.check_reserved(channel) is False: raise ValueError('Invalid channel property in payload.') if notification_type not in NOTIFICATION_TYPE: raise ValueError('Invalid notification_type property in payload.') if enabled is not None: if enabled not in ['true', 'false', True, False]: raise ValueError('Only "true", "false", true, false are allowed for value of enabled.') is_enabled = "true" if ((type(enabled) is str and enabled.lower() in ['true']) or ( (type(enabled) is bool and enabled is True))) else "false" storage = connect.get_storage_async() config_mgr = ConfigurationManager(storage) curr_config = await config_mgr.get_category_all_items(name) if curr_config is not None: raise ValueError("A Category with name {} already exists.".format(name)) try: # Get default config for rule and channel plugins url = '{}/plugin'.format(request.url) try: # When authentication is mandatory we need to pass token in request header auth_token = request.token except AttributeError: auth_token = None list_plugins = json.loads(await _hit_get_url(url, auth_token)) r = list(filter(lambda rules: rules['name'] == rule, list_plugins['rules'])) c = list(filter(lambda channels: channels['name'] == channel, list_plugins['delivery'])) if len(r) == 0 or len(c) == 0: raise KeyError rule_plugin_config = r[0]['config'] delivery_plugin_config = c[0]['config'] except KeyError: raise ValueError("Invalid rule plugin {} and/or delivery plugin {} supplied.".format(rule, channel)) # Verify if rule_config contains valid keys if rule_config != {}: for k, v in rule_config.items(): if k not in rule_plugin_config: raise ValueError("Invalid key {} in rule_config {} supplied for plugin {}.".format(k, rule_config, rule)) # Verify if delivery_config contains valid keys if delivery_config != {}: for k, v in delivery_config.items(): if k not in delivery_plugin_config: raise ValueError( "Invalid key {} in delivery_config {} supplied for plugin {}.".format(k, delivery_config, channel)) # First create templates for notification and rule, channel plugins post_url = 'http://{}:{}/notification/{}'.format(_address, _port, urllib.parse.quote(name)) await _hit_post_url(post_url) # Create Notification template post_url = 'http://{}:{}/notification/{}/rule/{}'.format(_address, _port, urllib.parse.quote(name), urllib.parse.quote(rule)) await _hit_post_url(post_url) # Create Notification rule template post_url = 'http://{}:{}/notification/{}/delivery/{}'.format(_address, _port, urllib.parse.quote(name), urllib.parse.quote(channel)) await _hit_post_url(post_url) # Create Notification delivery template # Create configurations notification_config = { "description": description, "rule": rule, "channel": channel, "notification_type": notification_type, "enable": is_enabled, } if retrigger_time: notification_config["retrigger_time"] = retrigger_time await _update_configurations(config_mgr, name, notification_config, rule_config, delivery_config) audit = AuditLogger(storage) await audit.information('NTFAD', {"name": name}) except ValueError as ex: raise web.HTTPBadRequest(reason=str(ex)) except Exception as e: raise web.HTTPInternalServerError(reason=str(e)) else: return web.json_response({'result': "Notification {} created successfully".format(name)})
bdc85dd3d93f51352776a3e63b34a18961014058
6,927
def test_send_file_to_router(monkeypatch, capsys): """ . """ # pylint: disable=unused-argument @counter_wrapper def get_commands(*args, **kwargs): """ . """ return "commands" @counter_wrapper def add_log(log: Log, cursor=None): """ . """ assert ( log.message == "Adding command set /tmp/foo.sh to router" ), "Log has correct file name" monkeypatch.setattr(deploy_helper, "generate_bash_commands", get_commands) monkeypatch.setattr(db, "add_deployment_log", add_log) monkeypatch.setattr( deploy_helper, "write_data_to_router_file", lambda *args, **kwargs: False ) with pytest.raises(ValueError): deployment.send_file_to_router( "before", "after", None, ["commands"], {}, "/tmp/foo.sh" ) assert get_commands.counter == 1, "Commands generated" assert add_log.counter == 1, "Log added" printed = capsys.readouterr() assert printed.out == "Failed to write /tmp/foo.sh to router\n", "Error printed" monkeypatch.setattr( deploy_helper, "write_data_to_router_file", lambda *args, **kwargs: True ) deployment.send_file_to_router( "before", "after", None, ["commands"], {}, "/tmp/foo.sh" ) assert get_commands.counter == 2, "Commands generated" assert add_log.counter == 2, "Log added"
739e9d2dbb9adc40b386566b4e73dae98381ed4c
6,928
def smiles2mol(smiles): """Convert SMILES string into rdkit.Chem.rdchem.Mol. Args: smiles: str, a SMILES string. Returns: mol: rdkit.Chem.rdchem.Mol """ smiles = canonicalize(smiles) mol = Chem.MolFromSmiles(smiles) if mol is None: return None Chem.Kekulize(mol) return mol
56a8e0b28f98b1dd920cf03977eb6086a134fd8f
6,929
def build_term_map(deg, blocklen): """ Builds term map (degree, index) -> term :param deg: :param blocklen: :return: """ term_map = [[0] * comb(blocklen, x, True) for x in range(deg + 1)] for dg in range(1, deg + 1): for idx, x in enumerate(term_generator(dg, blocklen - 1)): term_map[dg][idx] = x return term_map
3e70cb38314189ff33da3eeb43ca0c68d13904cd
6,931
def gen_sets(): """ List of names of all available problem generators """ return registered_gens.keys()
f5aefd9d480115013ef8423ce6fd173d5acf0045
6,932
def is_valid_currency(currency_: str) -> bool: """ is_valid_currency:判断给定货币是否有效 @currency_(str):货币代码 return(bool):FROM_CNY、TO_CNY均有currency_记录 """ return currency_ in FROM_CNY and currency_ in TO_CNY
5b95b0d0a76e5d979e7a560ee14f6adf2c79e140
6,933
from typing import List from typing import Tuple def load_gene_prefixes() -> List[Tuple[str, str, str]]: """Returns FamPlex gene prefixes as a list of rows Returns ------- list List of lists corresponding to rows in gene_prefixes.csv. Each row has three columns [Pattern, Category, Notes]. """ return _load_csv(GENE_PREFIXES_PATH)
9fc450636a4b517a79350b9b6131dccfe860c58e
6,934
def uri2dict(uri): """Take a license uri and convert it into a dictionary of values.""" if uri.startswith(LICENSES_BASE) and uri.endswith('/'): base = LICENSES_BASE license_info = {} raw_info = uri[len(base):] raw_info = raw_info.rstrip('/') info_list = raw_info.split('/') if len(info_list) not in (1,2,3): raise InvalidURIError, "Invalid Creative Commons URI: <%s>"%uri retval = dict( code=info_list[0] ) if len(info_list) > 1: retval['version'] = info_list[1] if len(info_list) > 2: retval['jurisdiction'] = info_list[2] # XXX perform any validation on the dict produced? return retval elif uri.startswith(CC0_BASE) and uri.endswith('/'): base = CC0_BASE retval = {'code': 'CC0', 'jurisdiction': None} retval['version'] = uri.rstrip('/').split('/')[-1] return retval elif uri.startswith(PUBLICDOMAIN_MARK_BASE) and uri.endswith('/'): base = PUBLICDOMAIN_MARK_BASE retval = {'code': 'mark', 'jurisdiction': None} retval['version'] = uri.rstrip('/').split('/')[-1] return retval else: raise InvalidURIError, "Invalid Creative Commons URI: <%s>" % uri
1f2ccdc52b1dc3424b7554857a87f85a02ea1dbd
6,936
import re def test_clean_str(text, language='english'): """ Method to pre-process an text for training word embeddings. This is post by Sebastian Ruder: https://s3.amazonaws.com/aylien-main/data/multilingual-embeddings/preprocess.py and is used at this paper: https://arxiv.org/pdf/1609.02745.pdf """ """ Cleans an input string and prepares it for tokenization. :type text: unicode :param text: input text :return the cleaned input string """ text = text.lower() # replace all numbers with 0 text = re.sub(r"[-+]?[-/.\d]*[\d]+[:,.\d]*", ' 0 ', text) # English-specific pre-processing if language == 'english': text = re.sub(r"\'s", " \'s", text) text = re.sub(r"\'ve", " \'ve", text) text = re.sub(r"n\'t", " n\'t", text) text = re.sub(r"\'re", " \'re", text) text = re.sub(r"\'d", " \'d", text) text = re.sub(r"\'ll", " \'ll", text) elif language == 'french': # French-specific pre-processing text = re.sub(r"c\'", " c\' ", text) text = re.sub(r"l\'", " l\' ", text) text = re.sub(r"j\'", " j\' ", text) text = re.sub(r"d\'", " d\' ", text) text = re.sub(r"s\'", " s\' ", text) text = re.sub(r"n\'", " n\' ", text) text = re.sub(r"m\'", " m\' ", text) text = re.sub(r"qu\'", " qu\' ", text) elif language == 'spanish': # Spanish-specific pre-processing text = re.sub(r"¡", " ", text) elif language == 'chinese': pass text = re.sub(r'[,:;\.\(\)-/"<>]', " ", text) # separate exclamation marks and question marks text = re.sub(r"!+", " ! ", text) text = re.sub(r"\?+", " ? ", text) text = re.sub(r"\s+", " ", text) return text.strip()
683f6d27e7486990d0b2a11dd5aeb78f2c1bab07
6,937
def calc_iou(boxes1, boxes2, scope='iou'): """calculate ious Args: boxes1: 5-D tensor [BATCH_SIZE, CELL_SIZE, CELL_SIZE, BOXES_PER_CELL, 4] ====> (x_center, y_center, w, h) boxes2: 5-D tensor [BATCH_SIZE, CELL_SIZE, CELL_SIZE, BOXES_PER_CELL, 4] ===> (x_center, y_center, w, h) Return: iou: 4-D tensor [BATCH_SIZE, CELL_SIZE, CELL_SIZE, BOXES_PER_CELL] """ with tf.variable_scope(scope): # transform (x_center, y_center, w, h) to (x1, y1, x2, y2) boxes1_t = tf.stack([boxes1[..., 0] - boxes1[..., 2] / 2.0, boxes1[..., 1] - boxes1[..., 3] / 2.0, boxes1[..., 0] + boxes1[..., 2] / 2.0, boxes1[..., 1] + boxes1[..., 3] / 2.0], axis=-1) boxes2_t = tf.stack([boxes2[..., 0] - boxes2[..., 2] / 2.0, boxes2[..., 1] - boxes2[..., 3] / 2.0, boxes2[..., 0] + boxes2[..., 2] / 2.0, boxes2[..., 1] + boxes2[..., 3] / 2.0], axis=-1) # calculate the left up point & right down point lu = tf.maximum(boxes1_t[..., :2], boxes2_t[..., :2]) rd = tf.minimum(boxes1_t[..., 2:], boxes2_t[..., 2:]) # intersection intersection = tf.maximum(0.0, rd - lu) inter_square = intersection[..., 0] * intersection[..., 1] # calculate the boxs1 square and boxs2 square square1 = boxes1[..., 2] * boxes1[..., 3] square2 = boxes2[..., 2] * boxes2[..., 3] union_square = tf.maximum(square1 + square2 - inter_square, 1e-10) return tf.clip_by_value(inter_square / union_square, 0.0, 1.0)
e5714cf74be851b6b6003458c44e3308308907a3
6,939
def not_before(cert): """ Gets the naive datetime of the certificates 'not_before' field. This field denotes the first date in time which the given certificate is valid. :param cert: :return: Datetime """ return cert.not_valid_before
e5e269e67de3059fe0ddfa9a35fb13e7f124d798
6,940
def get_data_from_dict_for_2pttype(type1,type2,datadict): """ Given strings identifying the type of 2pt data in a fits file and a dictionary of 2pt data (i.e. the blinding factors), returns the data from the dictionary matching those types. """ #spectra type codes in fits file, under hdutable.header['quant1'] and quant2 galaxy_position_fourier = "GPF" galaxy_shear_emode_fourier = "GEF" galaxy_shear_bmode_fourier = "GBF" galaxy_position_real = "GPR" galaxy_shear_plus_real = "G+R" galaxy_shear_minus_real = "G-R" if type1==galaxy_position_fourier and type2 == galaxy_position_fourier: yfromdict=datadict['gal_gal_cl'] xfromdict=datadict['gal_gal_l'] elif (type1==galaxy_shear_emode_fourier and type2 == galaxy_position_fourier) or (type2==galaxy_shear_emode_fourier and type2 == galaxy_position_fourier): yfromdict=datadict['gal_shear_cl'] xfromdict=datadict['gal_shear_l'] elif (type1==galaxy_shear_emode_fourier and type2 == galaxy_shear_emode_fourier): yfromdict=datadict['shear_shear_cl'] xfromdict=datadict['shear_shear_l'] elif type1==galaxy_position_real and type2 == galaxy_position_real: yfromdict=datadict['gal_gal_xi'] xfromdict=datadict['gal_gal_theta'] elif (type1==galaxy_shear_plus_real and type2 == galaxy_position_real) or (type2==galaxy_shear_plus_real and type1 == galaxy_position_real): yfromdict=datadict['gal_shear_xi'] xfromdict=datadict['gal_shear_theta'] elif (type1==galaxy_shear_plus_real and type2 == galaxy_shear_plus_real): yfromdict=datadict['shear_shear_xip'] xfromdict=datadict['shear_shear_theta'] elif (type1==galaxy_shear_minus_real and type2 == galaxy_shear_minus_real): yfromdict=datadict['shear_shear_xim'] xfromdict=datadict['shear_shear_theta'] else: print "Spectra type {0:s} - {1:s} not recognized.".format(type1,type2) return xfromdict,yfromdict
d8656e6274dd8fb4001d477572220f2c51c08e01
6,941
def simple_unweighted_distance(g, source, return_as_dicts=True): """Returns the unweighted shortest path length between nodes and source.""" dist_dict = nx.shortest_path_length(g, source) if return_as_dicts: return dist_dict else: return np.fromiter((dist_dict[ni] for ni in g), dtype=int)
d82742ac88f26db8296dec9d28794d3e6d60eec7
6,942
def A070939(i: int = 0) -> int: """Length of binary representation of n.""" return len(f"{i:b}")
31b12e493645c3bdf7e636a48ceccff5d9ecc492
6,943
import time def feed_pump(pin: int, water_supply_time: int=FEED_PUMP_DEFAULT_TIME) -> bool: """ feed water Parameters ---------- pin : int target gpio (BCM) water_supply_time : int water feeding time Returns ------- bool Was water feeding successful ? """ is_running = gpio_read(pin) if is_running: return False # pump on gpio_write(pin, 1) try: publish_device_state() except: gpio_write(pin, 0) return False time.sleep(water_supply_time) # pump off gpio_write(pin, 0) publish_device_state() return True
c45b1775991a4914116468961ae979dae71f6caf
6,944
def app_nav(context): """Renders the main nav, topnav on desktop, sidenav on mobile""" url_name = get_url_name(context) namespace = get_namespace(context) cache_id = "{}:{}x".format(context['request'].user.username, context.request.path) cache_key = make_template_fragment_key('app_nav', [cache_id]) context['app_nav_cache_id'] = cache_id # Only bother doing this work if we don't have a cached template render if not cache.get(cache_key): # Build an app list for the page and user app_list = [] for app in APP_LIST: # Check we have access if app['access'](context.request.user): # Set active flag if namespace matches app['active'] = (app['app'] == namespace) # Add to returned list app_list.append(app) context['app_list'] = app_list context['app'] = namespace if namespace: context['page_title'] = get_page_title(get_module_nav_list(namespace, url_name, context.request.user), context) return context
8e9cc5428b9af22bad13c6454f462d585a04c005
6,945
def centre_to_zeroes(cartesian_point, centre_point): """Converts centre-based coordinates to be in relation to the (0,0) point. PIL likes to do things based on (0,0), and in this project I'd like to keep the origin at the centre point. Parameters ---------- cartesian_point : (numeric) x, y coordinates in terms of the centre centre_point : (numeric) x, y coordinates of the centre """ x = cartesian_point[0] + centre_point[0] y = centre_point[1] - cartesian_point[1] return x, y
f0ddd632650127e3bb1ed766191950ccf7f06d87
6,946
def get_all_stack_names(cf_client=boto3.client("cloudformation")): """ Get all stack names Args: cf_client: boto3 CF client Returns: list of StackName """ LOGGER.info("Attempting to retrieve stack information") response = cf_client.describe_stacks() LOGGER.info("Retrieved stack information: %s", response) return [stack["StackName"] for stack in response["Stacks"]]
47a36e15651495cc0b5c80e642bb5154640d6b7d
6,947
import calendar def match_date(date, date_pattern): """ Match a specific date, a four-tuple with no special values, with a date pattern, four-tuple possibly having special values. """ # unpack the date and pattern year, month, day, day_of_week = date year_p, month_p, day_p, day_of_week_p = date_pattern # check the year if year_p == 255: # any year pass elif year != year_p: # specific year return False # check the month if month_p == 255: # any month pass elif month_p == 13: # odd months if (month % 2) == 0: return False elif month_p == 14: # even months if (month % 2) == 1: return False elif month != month_p: # specific month return False # check the day if day_p == 255: # any day pass elif day_p == 32: # last day of the month last_day = calendar.monthrange(year + 1900, month)[1] if day != last_day: return False elif day_p == 33: # odd days of the month if (day % 2) == 0: return False elif day_p == 34: # even days of the month if (day % 2) == 1: return False elif day != day_p: # specific day return False # check the day of week if day_of_week_p == 255: # any day of the week pass elif day_of_week != day_of_week_p: # specific day of the week return False # all tests pass return True
d794cf211589840697007ecec7cd9e3ba0655b0f
6,948
def get_heating_features(df, fine_grained_HP_types=False): """Get heating type category based on HEATING_TYPE category. heating_system: heat pump, boiler, community scheme etc. heating_source: oil, gas, LPC, electric. Parameters ---------- df : pandas.DataFrame Dataframe that is updated with heating features. fine_grained_HP_types : bool, default=False If True, get different heat pump types (air sourced, ground sourced etc.). If False, return "heat pump" as heating type category. Return --------- df : pandas.DataFrame Updated dataframe with heating system and source.""" # Collections heating_system_types = [] heating_source_types = [] # Get heating types heating_types = df["MAINHEAT_DESCRIPTION"] # Get specific and general heating category for each entry for heating in heating_types: # Set default value system_type = "unknown" source_type = "unknown" # If heating value exists if not (pd.isnull(heating) and isinstance(heating, float)): # Lowercase heating = heating.lower() other_heating_system = [ ("boiler and radiator" in heating), ("boiler & radiator" in heating), ("boiler and underfloor" in heating), ("boiler & underfloor" in heating), ("community scheme" in heating), ("heater" in heating), # not specified heater ] # Different heat pump types # -------------------------- if "ground source heat pump" in heating: system_type = "ground source heat pump" source_type = "electric" elif "air source heat pump" in heating: system_type = "air source heat pump" source_type = "electric" elif "water source heat pump" in heating: system_type = "water source heat pump" source_type = "electric" elif "heat pump" in heating: system_type = "heat pump" source_type = "electric" # Electric heaters # -------------------------- elif "electric storage heaters" in heating: system_type = "storage heater" source_type = "electric" elif "electric underfloor heating" in heating: system_type = "underfloor heating" source_type = "electric" # Warm air # -------------------------- elif "warm air" in heating: system_type = "warm air" source_type = "electric" # Boiler and radiator / Boiler and underfloor / Community scheme / Heater (unspecified) # -------------------------- elif any(other_heating_system): # Set heating system dict heating_system_dict = { "boiler and radiator": "boiler and radiator", "boiler & radiator": "boiler and radiator", "boiler and underfloor": "boiler and underfloor", "boiler & underfloor": "boiler and underfloor", "community scheme": "community scheme", "heater": "heater", # not specified heater (otherwise handeld above) } # Set heating source dict heating_source_dict = { "gas": "gas", ", oil": "oil", # with preceeding comma (!= "boiler") "lpg": "LPG", "electric": "electric", } # If heating system word is found, save respective system type for word, system in heating_system_dict.items(): if word in heating: system_type = system # If heating source word is found, save respective source type for word, source in heating_source_dict.items(): if word in heating: source_type = source # Don't differentiate between heat pump types if not fine_grained_HP_types: if "heat pump" in system_type: system_type = "heat pump" # Save heating system type and source type heating_system_types.append(system_type) heating_source_types.append(source_type) # Add heating system and source to df df["HEATING_SYSTEM"] = heating_system_types df["HEATING_SOURCE"] = heating_source_types return df
5707975a63aca4778e8dbdd70670e317c777c998
6,949
def integrate_eom(initial_conditions, t_span, design_params, SRM1, SRM2): """Numerically integrates the zero gravity equations of motion. Args: initial_conditions (np.array()): Array of initial conditions. Typically set to an array of zeros. t_span (np.array()): Time vector (s) over which to integrate the equations of motions. design_params (np.array()): Array of design parameters. [r1, r2, d1, d2, Ixx, Iyy, Izz] where r1 and r2 are the radial locations of the solid rocket motors (m), d1 and d2 are the longitudinal locations of the two motors (m), and Ixx, Iyy, and Izz are the interia values (kg-m^2). SRM1 (SolidRocketMotor()): First solid rocket motor organized into a class. SRM2 (SolidRocketMotor()): Second solid rocket motor organized into a class. Returns: (np.array()): Numerical solutions for wx, wy, wz, psi, theta, and phi. """ return odeint(euler_eom, initial_conditions, t_span, args=(design_params, SRM1, SRM2))
07574c775268798371425b837b20706ac9af5f52
6,950
def activation_sparse(net, transformer, images_files): """ Activation bottom/top blob sparse analyze Args: net: the instance of Caffe inference transformer: images_files: sparse dataset Returns: none """ print("\nAnalyze the sparse info of the Activation:") # run float32 inference on sparse dataset to analyze activations for i , image in enumerate(images_files): net_forward(net, image, transformer) # analyze bottom/top blob for layer in sparse_layer_lists: blob = net.blobs[layer.bottom_blob_name].data[0].flatten() layer.analyze_bottom_blob(blob) blob = net.blobs[layer.top_blob_name].data[0].flatten() layer.analyze_top_blob(blob) # calculate top blob and flag the sparse channels in every layers for layer in sparse_layer_lists: layer.sparse_bottom_blob() layer.sparse_top_blob() return None
da138764d002e84bdee306e15b6c8524b223bcbc
6,951
def cfg_load(filename): """Load a config yaml file.""" return omegaconf2namespace(OmegaConf.load(filename))
2aa5f808f89d1f654cd95cd6a1c8f903d4baade6
6,952
def char_to_num(x: str) -> int: """Converts a character to a number :param x: Character :type x: str :return: Corresponding number :rtype: int """ total = 0 for i in range(len(x)): total += (ord(x[::-1][i]) - 64) * (26 ** i) return total
f66ee13d696ec1872fbc2a9960362456a5c4cbe9
6,953
from typing import Callable import time def time_it(f: Callable): """ Timer decorator: shows how long execution of function took. :param f: function to measure :return: / """ def timed(*args, **kwargs): t1 = time.time() res = f(*args, **kwargs) t2 = time.time() log("\'", f.__name__, "\' took ", round(t2 - t1, 3), " seconds to complete.", sep="") return res return timed
bc7321721afe9dc9b4a2861b2c849e6a5d2c309a
6,954
def has_prefix(sub_s, dictionary): """ :param sub_s: (str) A substring that is constructed by neighboring letters on a 4x4 square grid :return: (bool) If there is any words with prefix stored in sub_s """ s = '' for letter in sub_s: s += letter for words in dictionary: if words.startswith(s): return True return False
b45f3bf7ed699bc215d1670f35ebc0f15b7ec0ff
6,955
def tf_center_crop(images, sides): """Crops central region""" images_shape = tf.shape(images) top = (images_shape[1] - sides[0]) // 2 left = (images_shape[2] - sides[1]) // 2 return tf.image.crop_to_bounding_box(images, top, left, sides[0], sides[1])
1b1c8bcab55164a04b0ac6109a7b91d084f55b7b
6,957
from datetime import datetime import pytz def convert_timezone(time_in: datetime.datetime) -> datetime.datetime: """ 用来将系统自动生成的datetime格式的utc时区时间转化为本地时间 :param time_in: datetime.datetime格式的utc时间 :return:输出仍旧是datetime.datetime格式,但已经转换为本地时间 """ time_utc = time_in.replace(tzinfo=pytz.timezone("UTC")) time_local = time_utc.astimezone(pytz.timezone(settings.TIME_ZONE)) return time_local
3843aa62a5ff29fd629776e69c52cd95c51fac5d
6,958
import six def classifier_fn_from_tfhub(output_fields, inception_model, return_tensor=False): """Returns a function that can be as a classifier function. Copied from tfgan but avoid loading the model each time calling _classifier_fn Args: output_fields: A string, list, or `None`. If present, assume the module outputs a dictionary, and select this field. inception_model: A model loaded from TFHub. return_tensor: If `True`, return a single tensor instead of a dictionary. Returns: A one-argument function that takes an image Tensor and returns outputs. """ if isinstance(output_fields, six.string_types): output_fields = [output_fields] def _classifier_fn(images): output = inception_model(images) if output_fields is not None: output = {x: output[x] for x in output_fields} if return_tensor: assert len(output) == 1 output = list(output.values())[0] return tf.nest.map_structure(tf.compat.v1.layers.flatten, output) return _classifier_fn
e7f54a4c46519465460cc0e97b0f6f12f91a98d4
6,962
import json def get_rate_limit(client): """ Get the Github API rate limit current state for the used token """ query = '''query { rateLimit { limit remaining resetAt } }''' response = client.execute(query) json_response = json.loads(response) return json_response['data']['rateLimit']
ec5f853014f25c841e71047da62ca41907b02e13
6,963
import functools import pprint def pret(f): """ Decorator which prints the result returned by `f`. >>> @pret ... def f(x, y): return {'sum': x + y, 'prod': x * y} >>> res = f(2, 3) ==> @pret(f) -- {'prod': 6, 'sum': 5} """ @functools.wraps(f) def g(*args, **kwargs): ret = f(*args, **kwargs) _pdeco("pret", f.__name__, "{retstr}".format( retstr=tstr(pprint.pformat(ret), 120, "<... truncated>"), )) return ret return g
fedb8cf19913042d0defef676db6b22715e8c572
6,964
def parse_arguments() -> tuple[str, str, bool]: """Return the command line arguments.""" current_version = get_version() description = f"Release Quality-time. Current version is {current_version}." epilog = """preconditions for release: - the current folder is the release folder - the current branch is master - the workspace has no uncommitted changes - the generated data model documentation is up-to-date - the change log has an '[Unreleased]' header - the change log contains no release candidates""" parser = ArgumentParser(description=description, epilog=epilog, formatter_class=RawDescriptionHelpFormatter) allowed_bumps_in_rc_mode = ["rc", "rc-major", "rc-minor", "rc-patch", "drop-rc"] # rc = release candidate allowed_bumps = ["rc-patch", "rc-minor", "rc-major", "patch", "minor", "major"] bumps = allowed_bumps_in_rc_mode if "rc" in current_version else allowed_bumps parser.add_argument("bump", choices=bumps) parser.add_argument( "-c", "--check-preconditions-only", action="store_true", help="only check the preconditions and then exit" ) arguments = parser.parse_args() return current_version, arguments.bump, arguments.check_preconditions_only
7b58b2b3c99a4297bb12b714b289336cdbc75a5e
6,965
def can_hold_bags(rule: str, bag_rules: dict) -> dict: """ Returns a dict of all bags that can be held by given bag color :param rule: Color of a given bag :param bag_rules: Dictionary of rules :type rule: str :type bag_rules: dict :return: """ return bag_rules[rule]
b7554c32bd91f9a05cd84c9249d92cc6354458a9
6,969
def fix_levers_on_same_level(same_level, above_level): """ Input: 3D numpy array with malmo_object_to_index mapping Returns: 3D numpy array where 3 channels represent object index, color index, state index for minigrid """ lever_idx = malmo_object_to_index['lever'] condition = above_level == lever_idx minimap_array = np.where(condition, above_level, same_level) return minimap_array
d1727e188f9a5935a660d806f69f9b472db94217
6,970
def iv_plot(df, var_name=None, suffix='_dev'): """Returns an IV plot for a specified variable""" p_suffix = suffix.replace('_','').upper() sub_df = df if var_name is None else df.loc[df.var_name==var_name, ['var_cuts_string'+suffix, 'ln_odds'+suffix, 'resp_rate'+suffix, 'iv'+suffix]] sub_df['resp_rate_trend'+suffix] = _trend(sub_df['resp_rate'+suffix]) iv_val = round(sub_df['iv'+suffix].sum(), 4) f, ax = plt.subplots() ax2 = ax.twinx() sns.lineplot(x='var_cuts_string'+suffix, y='resp_rate'+suffix, data=sub_df, color='red', ax=ax) sns.lineplot(x='var_cuts_string'+suffix, y='resp_rate_trend'+suffix, data=sub_df, color='red', linestyle='--', ax=ax) sns.lineplot(x='var_cuts_string'+suffix, y='ln_odds'+suffix, data=sub_df, color='darkgreen', ax=ax2) ax.set_xticklabels(list(sub_df['var_cuts_string'+suffix]), rotation=45, ha='right') ax.set(xlabel='Variable Bins', ylabel=f'Resp Rate ({p_suffix})', title=f'IV of {var_name} ({iv_val})') ax2.set(ylabel=f'Log Odds ({p_suffix})') ax.legend(handles=[l for a in [ax, ax2] for l in a.lines], labels=[f'Resp Rate ({p_suffix})', f'Resp Rate Trend ({p_suffix})', f'Log Odds ({p_suffix})'], loc=0) return f
dd35329b5b91a19babdfa943c2f7688bb013c680
6,971
from py._path.local import LocalPath def is_alive(pid): """Return whether a process is running with the given PID.""" return LocalPath('/proc').join(str(pid)).isdir()
e6086b79aa648dc4483085e15f096152185aa780
6,972
from pyspark import SparkContext from typing import Callable import functools from typing import Any def inheritable_thread_target(f: Callable) -> Callable: """ Return thread target wrapper which is recommended to be used in PySpark when the pinned thread mode is enabled. The wrapper function, before calling original thread target, it inherits the inheritable properties specific to JVM thread such as ``InheritableThreadLocal``. Also, note that pinned thread mode does not close the connection from Python to JVM when the thread is finished in the Python side. With this wrapper, Python garbage-collects the Python thread instance and also closes the connection which finishes JVM thread correctly. When the pinned thread mode is off, it return the original ``f``. .. versionadded:: 3.2.0 Parameters ---------- f : function the original thread target. Notes ----- This API is experimental. It is important to know that it captures the local properties when you decorate it whereas :class:`InheritableThread` captures when the thread is started. Therefore, it is encouraged to decorate it when you want to capture the local properties. For example, the local properties from the current Spark context is captured when you define a function here instead of the invocation: >>> @inheritable_thread_target ... def target_func(): ... pass # your codes. If you have any updates on local properties afterwards, it would not be reflected to the Spark context in ``target_func()``. The example below mimics the behavior of JVM threads as close as possible: >>> Thread(target=inheritable_thread_target(target_func)).start() # doctest: +SKIP """ if isinstance(SparkContext._gateway, ClientServer): # type: ignore[attr-defined] # Here's when the pinned-thread mode (PYSPARK_PIN_THREAD) is on. # NOTICE the internal difference vs `InheritableThread`. `InheritableThread` # copies local properties when the thread starts but `inheritable_thread_target` # copies when the function is wrapped. properties = ( SparkContext._active_spark_context._jsc.sc() # type: ignore[attr-defined] .getLocalProperties() .clone() ) @functools.wraps(f) def wrapped(*args: Any, **kwargs: Any) -> Any: try: # Set local properties in child thread. SparkContext._active_spark_context._jsc.sc().setLocalProperties( # type: ignore[attr-defined] properties ) return f(*args, **kwargs) finally: InheritableThread._clean_py4j_conn_for_current_thread() return wrapped else: return f
02d2e58449c736bf8ef19354bfd8f7a21066615b
6,973
from typing import OrderedDict def join_label_groups(grouped_issues, grouped_prs, issue_label_groups, pr_label_groups): """Combine issue and PR groups in to one dictionary. PR-only groups are added after all issue groups. Any groups that are shared between issues and PRs are added according to the order in the issues list of groups. This results in "label-groups" remaining in the same order originally specified even if a group does not have issues in it. Otherwise, a shared group may end up at the end of the combined dictionary and not in the order originally specified by the user. """ issue_group_names = [x['name'] for x in issue_label_groups] pr_group_names = [x['name'] for x in pr_label_groups] shared_groups = [] for idx, group_name in enumerate(issue_group_names): if len(pr_group_names) > idx and group_name == pr_group_names[idx]: shared_groups.append(group_name) else: break label_groups = OrderedDict() # add shared groups first for group_name in shared_groups: # make sure to copy the issue group in case it is added to label_groups[group_name] = grouped_issues.get(group_name, [])[:] # add any remaining issue groups for group_name, group in grouped_issues.items(): if group_name in shared_groups: continue label_groups[group_name] = group[:] # add any remaining PR groups (extending any existing groups) for group_name, group in grouped_prs.items(): label_groups.setdefault(group_name, []).extend(group) return label_groups
b51a70a60bde3580326816eaf0d3b76cb51062ac
6,975
def healpix_ijs_neighbours(istar, jstar, nside): """Gets the healpix i, jstar neighbours for a single healpix pixel. Parameters ---------- istar : array Healpix integer i star index. jstar : array Healpix integer i star index. nside : int Healpix nside. Returns ------- istar_neigh : array Neighbour healpix integer i star index. jstar_neigh : array Neighbour healpix integer j star index. """ if jstar - istar + 1 == 2*nside: istar_neigh = [istar, istar + 1, istar + 1, istar + nside, istar + nside, istar - nside, istar + 1 - nside, istar+2*nside] jstar_neigh = [jstar - 1, jstar - 1, jstar, jstar - 1 + nside, jstar + nside, jstar - nside, jstar - nside, jstar+2*nside] elif istar - jstar + 1 == 2*nside: istar_neigh = [istar, istar - 1, istar - 1, istar - nside, istar - nside, istar + nside, istar - 1 + nside, istar-2*nside] jstar_neigh = [jstar + 1, jstar + 1, jstar, jstar + 1 - nside, jstar - nside, jstar + nside, jstar + nside, jstar-2*nside] elif jstar - istar + 1 == nside and istar % nside == 0: istar_neigh = [istar - 1, istar, istar + 1, istar - 1, istar + 1, istar, istar + 1] jstar_neigh = [jstar - 1, jstar - 1, jstar - 1, jstar, jstar, jstar + 1, jstar + 1] elif istar - jstar + 1 == nside and jstar % nside == 0: istar_neigh = [istar - 1, istar, istar - 1, istar + 1, istar - 1, istar, istar + 1] jstar_neigh = [jstar - 1, jstar - 1, jstar, jstar, jstar + 1, jstar + 1, jstar + 1] elif istar % nside == 0 and jstar + 1 - nside*(np.floor(istar/nside) + 1) > 0: istar_neigh = [istar, istar + 1, istar + 1, istar, istar + 1, istar - ((jstar+1)-nside*np.floor(jstar/nside)), istar - ((jstar)-nside*np.floor(jstar/nside)), istar - ((jstar-1)-nside*np.floor(jstar/nside))] jstar_neigh = [jstar - 1, jstar - 1, jstar, jstar + 1, jstar + 1, nside*np.floor(jstar/nside)-1, nside*np.floor(jstar/nside)-1, nside*np.floor(jstar/nside)-1] elif jstar % nside == 0 and istar + 1 - nside*(np.floor(jstar/nside) + 1) > 0: jstar_neigh = [jstar, jstar + 1, jstar + 1, jstar, jstar + 1, jstar - ((istar+2)-nside*np.floor(istar/nside)), jstar - ((istar+1)-nside*np.floor(istar/nside)), jstar - ((istar)-nside*np.floor(istar/nside))] istar_neigh = [istar - 1, istar - 1, istar, istar + 1, istar + 1, nside*np.floor(istar/nside)-1, nside*np.floor(istar/nside)-1, nside*np.floor(istar/nside)-1] elif (jstar + 1 - nside) % nside == 0 and jstar + 1 - nside*(np.floor(istar/nside) + 1) > 0: jstar_neigh = [jstar, jstar - 1, jstar - 1, jstar, jstar - 1, jstar + nside*(np.floor(istar/nside)+1)-istar, jstar + nside*(np.floor(istar/nside)+1)-istar-1, jstar + nside*(np.floor(istar/nside)+1)-istar+1] istar_neigh = [istar - 1, istar - 1, istar, istar + 1, istar + 1, nside*(np.floor(istar/nside)+1), nside*(np.floor(istar/nside)+1), nside*(np.floor(istar/nside)+1)] elif (istar + 1 - nside) % nside == 0 and istar + 1 - nside*(np.floor(jstar/nside) + 1) > 0: istar_neigh = [istar, istar - 1, istar - 1, istar, istar - 1, istar + nside*(np.floor(jstar/nside)+1)-jstar, istar + nside*(np.floor(jstar/nside)+1)-jstar-1, istar + nside*(np.floor(jstar/nside)+1)-jstar+1] jstar_neigh = [jstar - 1, jstar - 1, jstar, jstar + 1, jstar + 1, nside*(np.floor(jstar/nside)+1), nside*(np.floor(jstar/nside)+1), nside*(np.floor(jstar/nside)+1)] else: istar_neigh = [istar - 1, istar, istar + 1, istar - 1, istar + 1, istar - 1, istar, istar + 1] jstar_neigh = [jstar - 1, jstar - 1, jstar - 1, jstar, jstar, jstar + 1, jstar + 1, jstar + 1] istar_neigh = np.array(istar_neigh) jstar_neigh = np.array(jstar_neigh) cond = np.where(istar_neigh + jstar_neigh > 9*nside-1)[0] istar_neigh[cond] = istar_neigh[cond] - 4*nside jstar_neigh[cond] = jstar_neigh[cond] - 4*nside cond = np.where(istar_neigh + jstar_neigh < nside-1)[0] istar_neigh[cond] = istar_neigh[cond] + 4*nside jstar_neigh[cond] = jstar_neigh[cond] + 4*nside istar_neigh = np.unique(istar_neigh) jstar_neigh = np.unique(jstar_neigh) return istar_neigh, jstar_neigh
48cae5cd13101529c7d03f9c08ed0f2c2d77a7b8
6,976
def create_parser(config: YAMLConfig) -> ArgumentParser: """ Automatically creates a parser from all of the values specified in a config file. Will use the dot syntax for nested dictionaries. Parameters ---------- config: YAMLConfig Config object Returns ------- ArgumentParser Parser loaded up with all of the values specified in the config """ key_pairs = config.keys() parser = ArgumentParser( description=f""" This argument parser was autogenerated from the config file. This allows you to overwrite specific YAML values on the fly. The options listed here do not entail an exhaustive list of the things that you can configure. For more information on possible kwargs, refer to the class definition of the object in question. """ ) parser.add_argument(f"config_file", help="YAML config file") for k in key_pairs: current = config.access(k) parser.add_argument(f"--{k}", type=type(current)) return parser
8fcf886448061b7f520d133bbf9bb66047e9f516
6,978