content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_ordered_patterns(order, patterns): """ Place the sites in the pattern dictionary into the order that they were added. Input: order: a list of pattern ids providing the order patterns: a dictionary of patterns Output: Nested array with columns for sites, their order, and amplicon id """ order_dict = {pattern: order + 1 for order, pattern in enumerate(order)} result = [] for pattern, loci in patterns.items(): for locus, data in loci.items(): for site in data['sites']: result.append([site.strip(), order_dict[pattern], pattern]) return np.array(result, dtype=str)
b3622fa9b3330923da819cd923582db13199f86a
27,629
def extract_pphour(file, fecha, llat, llon): """ """ l_lat = llat l_lon = np.array(llon) % 360 i_lat, i_lon, lat, lon = get_index_lat(fecha, file, llat, llon) tiempos = get_index_time(file, fecha) di = tiempos[1] di_f = (di + dt.timedelta(days=8)).replace(hour=0) i_t1 = [i for i in range(len(tiempos)) if tiempos[i] == di][0] i_t2 = [i for i in range(len(tiempos)) if tiempos[i] == di_f][0] # Creamos una variable aux res = np.empty([i_t2 - i_t1 + 1, len(lat), len(lon)]) res[:] = np.nan fdates = [datef for datef in tiempos if datef>=di and datef<=di_f] pp1 = file.variables['apcpsfc'][i_t1:i_t2, i_lat[0]:i_lat[1]+1, i_lon[0]:i_lon[1]+1] pp2 = file.variables['apcpsfc'][i_t1+1:i_t2+1, i_lat[0]:i_lat[1]+1, i_lon[0]:i_lon[1]+1] pphour = pp2 - pp1 return lon, lat, pphour, fdates
e5e2a2271e85a6d5a9f6fc4a189b1119f62d471b
27,630
def bubble_sort2(array): """冒泡排序的经典实现""" n = len(array) stop = False #子过程运行闭区间的右端点标记定义为i,范围[n-1,1],转换为适合while惯用区间为[n-1,0) i = n-1 while i > 0 and not stop: stop = True #子过程交换位置标记定义为j,范围为[0,i-1],转换为适合while惯用区间为[0,i) #子过程运行区间为[0,i] j = 0 while j < i: if array[j] > array[j+1]: array[j],array[j+1] = array[j+1],array[j] stop = False j += 1 i -= 1 return array
c95706b40c6e328e321f68de117ee20faa5c1c31
27,631
def encriptar(texto): """função que recebe um texto e retorna o mesmo criptografado""" #1. Filtro de entrada para evitar erros #1.1 Verifica se texto é uma string #1.1 Se texto é vazio, retorna texto #2. remove os espaços em branco do texto texto = texto.replace(" ","") #3. sobre a grade #3.1 Calcula o tamanho da grade (valor de n) n = ceil(sqrt(len(texto))) #3.2 Cria a grade grade = [["" for _ in range(n)] for __ in range(n)] #4. insere o texto na grade, no sentido vertical, # de cima para baixo, da esquerda para a direita. for j in range(n): #j = coluna for i in range(n): #i = linha try: elemento,texto = texto[0],texto[1:] except: elemento = "" finally: #linha.append(elemento) grade[i][j] = elemento #5. transforma a grade em um texto texto_criptografado = " ".join(["".join(x) for x in grade]) #6. retorna o resultado da transformação return texto_criptografado
63afaf6bae94a7a9c43b3fea39bdd5df2916b195
27,632
from enum import Enum def extend_enum(*inherited_enums: type[Enum]): """EXPERIMENTAL Join multiple enums into one. Modified version from: https://stackoverflow.com/a/64045773/14748231 """ # All members from all enums which injected in result enum joined_members = {} def _add_item_if_not_exist(item) -> None: # Add given item to joined_members dict. # If item.name is already existing key, raise ValueError. if item.name not in joined_members: joined_members[item.name] = item.value else: raise ValueError(f"{item.name} key already in joined_members") def wrapper(applied_enum: type[Enum]): @wraps(applied_enum) def inner(): # Add all items from inherited enums. for inherited_enum in inherited_enums: for item in inherited_enum: _add_item_if_not_exist(item) # Add all items from applied enum. for item in applied_enum: _add_item_if_not_exist(item) # Finally, return result Enum with collected members injected. ResEnum = Enum(applied_enum.__name__, joined_members) ResEnum.__doc__ = applied_enum.__doc__ return ResEnum return inner() return wrapper
4ec9e36fdd584555e5c201d3a1899007fa9adb85
27,634
import requests import csv import io def tsv_reader(): """ read register-like data from government-form-data TSV""" resp = requests.get(url=url) resp.raise_for_status() return csv.DictReader(io.StringIO(resp.text), delimiter=sep)
2487f44f516afc8727d1d0fd8b253578aaaef026
27,635
import time def GetMonotime(): """Older tornado doesn't have monotime(); stay compatible.""" if hasattr(tornado.util, 'monotime_impl'): return tornado.util.monotime_impl else: return time.time
e0ea587512213b2a830b4912ae67928cb8515b98
27,636
def test_multiplica(): """ Target function returns the sum of the multilica of two given vector. Expect output as np.float object. """ dict_1 = file_read.read_file('../../data/10mM_2,7-AQDS_1M_KOH_25mVs_0.5step_2.txt') data = file_read.data_frame(dict_1, 1) col_x1, col_x2 = baseline.split(data.Potential) col_y1, col_y2 = baseline.split(data.Current) a_val = baseline.multiplica(col_x1, col_y1) assert isinstance(a_val == np.float64), ("Output should be float object," " but fuction is returning{}".format(type(a_val))) b_val = np.multiply(col_x1, col_y1).sum() (np.testing.assert_almost_equal(a_val, b_val, decimal=3), "Calculation is incorrect") return "Test Passed for multiplica function!"
7334a9500da86df959e6d6056a044d240dfbec95
27,637
def last_name_first(n): """Returns: copy of n in form 'last-name, first-name' Precondition: n string in form 'first-name last-name n has only space, separating first and last.""" assert type(n) == str, str(n) + " is not a string" assert is_two_words(n), n+' has the wrong form' # Compute the value end_first = n.find(' ') first = n[:end_first] last = n[end_first + 1:] return last + ', ' + first
448b23cc70294a28b82d36af95c2dab772b30e9d
27,638
import logging def by_count(logger: logging.Logger, once_every: int) -> logging.Logger: """ The returned logger will only permit at most one print every `once_every` logging calls from the code line this function was called from. Usage example:: for i in range(100): log_throttling.by_count(logger, once_every=10).info( "This line will only log values that are multiples of 10: %s", i ) **Notes**: \\1. Throttling is configured per code line that this function is called from. Changing the parameter from that used previously for that line will reset the throttling counter for the line. \\2. Throttling does not nest. e.g.:: log_throttling.by_time(log_throttling.by_count(logger, 10), 1).info("...") Will simply ignore the nested `by_count`. :param logger: A `logging.Logger` object to "wrap". The return value from this function can be used just like a normal logger. :param once_every: The number of logging calls for which a single call is allowed to be written. :return: A throttled `logging.Logger`-like object. """ strategy = _ThrottlingByCount(once_every) return _by_custom_strategy(logger, strategy)
9c67c84a4e4371a25e5947f6fcb92b860cc12e9e
27,639
def load_ps_label(frame_id): """ :param frame_id: file name of pseudo label :return gt_box: loaded gt boxes (N, 9) [x, y, z, w, l, h, ry, label, scores] """ if frame_id in PSEUDO_LABELS: gt_box = PSEUDO_LABELS[frame_id]['gt_boxes'] else: raise ValueError('Cannot find pseudo label for frame: %s' % frame_id) return gt_box
53b73ddfe4a51676f7e81f9e266724ccc5e494fa
27,641
import json async def execute( url_path: str, body: dict, codef, service_type: ServiceType, ) -> dict: """ API 요청 실행 함수. 실제 사용자에게 제공되는 함수 내부에서 이 함수를 호출해서 사용할 것을 권장한다. :param url_path: 요청 URL 경로 :param body: post 요청 바디 :param codef: codef 인스턴스 :param service_type: 서비스 타입 :return: """ req_domain = get_codef_domain(service_type) client_id, client_secret = codef.get_client_info(service_type) await set_token(client_id, client_secret, codef, service_type) body = json.dumps(body, ensure_ascii=False) return await request_product( req_domain + url_path, codef.get_access_token(service_type), body, codef.get_session(), )
e0c55b38697121e3fa671fc7585fe477c977c724
27,642
def main() -> int: """ Execute all tasks. """ get_googletest() return 0
c2850f781245cacd77b3707881b337a113dcb048
27,643
import asyncio async def async_unload_entry( hass: HomeAssistantType, config_entry: config_entries.ConfigEntry, ) -> bool: """Unload Energosbyt Plus entry""" log_prefix = _make_log_prefix(config_entry, "setup") entry_id = config_entry.entry_id update_delegators: UpdateDelegatorsDataType = hass.data[DATA_UPDATE_DELEGATORS].pop( entry_id ) tasks = [ hass.config_entries.async_forward_entry_unload(config_entry, domain) for domain in update_delegators.keys() ] unload_ok = all(await asyncio.gather(*tasks)) if unload_ok: hass.data[DATA_API_OBJECTS].pop(entry_id) hass.data[DATA_FINAL_CONFIG].pop(entry_id) cancel_listener = hass.data[DATA_UPDATE_LISTENERS].pop(entry_id) cancel_listener() _LOGGER.info( log_prefix + ( "Интеграция выгружена" if IS_IN_RUSSIA else "Unloaded configuration entry" ) ) else: _LOGGER.warning( log_prefix + ( "При выгрузке конфигурации произошла ошибка" if IS_IN_RUSSIA else "Failed to unload configuration entry" ) ) return unload_ok
c3e7a928c801f65b4ca997fb8b3734059b95e210
27,644
def euler_vec(z, y, x, n): """ Return (n,3,3) tensor with each (3,3) block containing an Euler rotation with angles z, y, x. Optionally each of z, y, x can be a vector of length n. """ L = np.zeros((n, 3, 3), "d") cosx, sinx = np.cos(x), np.sin(x) L[:, 0, 0] = 1 L[:, 1, 1] = L[:, 2, 2] = cosx L[:, 1, 2] = -sinx L[:, 2, 1] = sinx N = np.zeros((n, 3, 3), "d") cosy, siny = np.cos(y), np.sin(y) N[:, 0, 0] = N[:, 2, 2] = cosy N[:, 1, 1] = 1 N[:, 0, 2] = siny N[:, 2, 0] = -siny ret = np.einsum("ijk,ikl->ijl", L, N) M = np.zeros((n, 3, 3), "d") cosz, sinz = np.cos(z), np.sin(z) M[:, 0, 0] = M[:, 1, 1] = cosz M[:, 0, 1] = -sinz M[:, 1, 0] = sinz M[:, 2, 2] = 1 ret = np.einsum("ijk,ikl->ijl", ret, M) return ret
799dd414ff8fc1aa405072bb2d5d155751ad86a4
27,645
def ReadBenderEllipse( filename, dataFrame=False, headerLine=None, useDefaultColumnNames=True ): """Read in an ellipse fit generated by Bender/Saglia code and store it in a dictionary (or, optionally, a ListDataFrame object). Columns are converted to 1-D numpy arrays. headerLine indicates which line contains the column titles (first line = 1, etc.); the actual data is assumed to start immediately after. Normally, the function attempts to locate the header line automatically (first line in file with same number of elements [excluding any initial "#"] as last line in file). The headerLine keyword is mainly useful for perverse situations (e.g., there is a line in the header that happens to have 12 words in it). Because we (currently) don't know how the Bender code handles position angles, we don't attempt to "correct" the PA. """ lines = open(filename).readlines() nTotalLines = len(lines) lastLine = lines[-1] nCols = len(lastLine.split()) # find the header line -- should be first line which has same number of elements # as the last line in the file if headerLine is None: headerString = None for i in range(nTotalLines): tmpLine = lines[i].lstrip("#") if len(tmpLine.split()) == nCols: headerString = tmpLine headerLineIndex = i break if headerString is None: print("Unable to find header line!\n") return None else: headerLineIndex = headerLine - 1 headerString = lines[headerLineIndex] if useDefaultColumnNames: colheaders = DEFAULT_BENDER_COLUMNS else: colheaders = headerString.split() # get rid of excess space at end, if any colheaders[-1] = colheaders[-1].strip() # find first data line: firstDataLine = None for j in range(headerLineIndex + 1, nTotalLines): tmpLine = lines[j] if len(tmpLine.split()) == nCols: firstDataLine = j break if firstDataLine is None: print("Unable to find first data line!\n") return None dataLines = [line for line in lines[firstDataLine:] if line[0] != "#"] nDataLines = len(dataLines) dataDict = {} for i in range(nCols): cname = colheaders[i] dataDict[cname] = np.array([ float(line.split()[i]) for line in dataLines ]) dataDict["r_eq"] = EquivRadius(dataDict) colheaders.append("r_eq") # Convert to dataFrame, if requested: if dataFrame is True: frameList = [] for cname in colheaders: frameList.append(dataDict[cname]) result = du.ListDataFrame(frameList, colheaders) # extra conveninces #result.AddColumnName("sma", "a") #result.AddColumnName("intens", "i") # add meta-data result.tableFile = filename else: result = dataDict return result
8d8f816f2ac7375bcc6814c34fd69a11e352255b
27,646
def getQuote(symbolStringCSV, detailFlag = 'ALL' ): """ Returns the live quote of a single or many companies symbolStringCSV <str> is a comma separated value of tickers detailFlag <'ALL' or 'INTRADAY'> specifies whether all data is returned or just a subset with intraday sample usage: getQuote('TVIX, GOOG', detailFlag = 'INTRADAY') """ url = urlRoot().format('market','quote/'+symbolStringCSV) + '?' + \ 'detailFlag={}'.format(detailFlag) # print url return accessMethod(url)
b40489c5d0680126abf58d32f7e487332599ea8b
27,647
def get_user_resources_permissions_dict(user, request, resource_types=None, resource_ids=None, inherit_groups_permissions=True, resolve_groups_permissions=False): # type: (models.User, Request, Optional[List[Str]], Optional[List[int]], bool, bool) -> ResourcePermissionMap """ Creates a dictionary of resources ID with corresponding permissions of the user. .. seealso:: :func:`regroup_permissions_by_resource` :param user: user for which to find resources permissions :param request: request with database session connection :param resource_types: filter the search query with only the specified resource types :param resource_ids: filter the search query to only the specified resource IDs :param inherit_groups_permissions: Whether to include group inherited permissions from user memberships or not. If ``False``, return only user-specific resource permissions. Otherwise, resolve inherited permissions using all groups the user is member of. :param resolve_groups_permissions: whether to combine corresponding user/group permissions into one or not. :return: Only resources which the user has permissions on, or including all :term:`Inherited Permissions`, according to :paramref:`inherit_groups_permissions` argument. """ ax.verify_param(user, not_none=True, http_error=HTTPNotFound, msg_on_fail=s.UserResourcePermissions_GET_NotFoundResponseSchema.description) # full list of user/groups permissions, filter afterwards according to flags res_perm_tuple_list = UserService.resources_with_possible_perms( user, resource_ids=resource_ids, resource_types=resource_types, db_session=request.db) if not inherit_groups_permissions and not resolve_groups_permissions: res_perm_tuple_list = filter_user_permission(res_perm_tuple_list, user) return regroup_permissions_by_resource(res_perm_tuple_list, resolve=resolve_groups_permissions)
6c3c647f304167328282505f118633654b9422a2
27,648
import json import requests def create_new_index(index_name: str): """ Алгоритм внутри по шагам :param index_name: str: название текущей версии индекса :return: new_index_name: str: название обновленного индекса """ print("*" * 10) # получаем новое имя индекса new_index_name = up_index_version(index_name) print(index_name, "->", new_index_name) # создаем новый индекс с настройками старого new_index_url = f"{ES_URL}/{new_index_name}" # читаем settings существующего индекса with open(f"{dir_name_settings}/{index_name}.json", "r+") as f: settings = json.load(f) # проверяем, есть ли изменения settings new_settings = get_new_settings() settings["settings"]["index"].pop("sort", None) if new_settings: print(f"[INFO] settings update: {new_settings}") merge_settings(settings, new_settings) print("[INFO] settings updated") # сохраняем новые settings для индекса with open(f"{dir_name}/new_settings/{new_index_name}.json", "w") as f: json.dump(settings, f, **JSON_DUMP_PARAMS) # читаем mapping существующего индекса with open(f"{dir_name_mappings}/{index_name}.json", "r+") as f: mappings = json.load(f) # проверяем, есть ли обновления mapping ДО put'a if RECOURSIVE_MAPPING_UPDATE: check_mappings_for_updates(mappings, index_name) # сохраняем новый mapping для индекса with open(f"{dir_name_new_mappings}/{new_index_name}.json", "w") as fw: json.dump(mappings, fw, **JSON_DUMP_PARAMS) # put'аем новые settings и mapping if REALLY: print(f"request PUT {new_index_url} settings") resp = requests.put(new_index_url, json={**settings, **mappings}) if resp.status_code == 200: print(f"[WARN] {resp.text}") else: print(f"[WARN] {resp.text}") print("*" * 10) return new_index_name
797a1f2fa556f640588837c644bfbbf2b425daab
27,649
def _parse_slice_str(slice_str): """Parses the given string as a multidimensional array slice and returns a list of slice objects and integer indices.""" is_valid = False if len(slice_str) > 2: is_valid = slice_str[0] == "[" and slice_str[-1] == "]" sliced_inds = [] if is_valid: slice_str_list = [x.strip() for x in slice_str[1:-1].split(",")] for s in slice_str_list: parts = s.split(":") if len(parts) > 3: is_valid = False break if len(parts) == 1: try: sliced_inds.append(int(s)) except: is_valid = False break else: try: start = int(parts[0]) if len(parts[0]) > 0 else None stop = int(parts[1]) if len(parts[1]) > 0 else None if len(parts) == 3: step = int(parts[2]) if len(parts[2]) > 0 else None else: step = None except: is_valid = False break sliced_inds.append(slice(start, stop, step)) if not is_valid: raise ValueError("Invalid slice specified: %s" % slice_str) return sliced_inds
6eb7a6b5d1dc2ee57e878b37be70e1e75d7d6ecc
27,650
def AverageZComparison(x, y): """ Take the average of second and third element in an array and compare which is bigger. To be used in conjunction with the sort function. """ xsum = x[1]+x[2] ysum = y[1]+y[2] if xsum < ysum: return -1 if xsum > ysum: return 1 return 0
84c9e7b92df4b3e4914c769293f71790def5e4dd
27,651
def convblock(in_channels, out_channels, kernel_size, stride=1, padding=0, use_bn=True): """ Returns convolution block """ if use_bn: return [ nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding), nn.BatchNorm2d(out_channels), nn.ReLU(True) ] else: return [ nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding), nn.ReLU(True) ]
157cc5a88e0f1e0d0f4f3afc228fd25d1a67d058
27,652
def get_caffe_op_type(layer, input_channels=1, output_channels=1): """ Gets the relevant Toolkit Enum for the corresponding Caffe layer stage type. :param layer: The particular layer field of the caffe Net msg that we want to discover the type. :return: StageType Enum """ if isConvolution(layer.type): return StageType.convolution if isFCL(layer.type): return StageType.fully_connected_layer if isSoftMax(layer.type): return StageType.soft_max if isPooling(layer.type): pooling_type = layer.pooling_param.pool if pooling_type == 0: # Max return StageType.max_pooling if pooling_type == 1: # Average return StageType.average_pooling if pooling_type == 2: # Stochastic throw_error(ErrorTable.StageTypeNotSupported, "Stochastic Pooling") return StageType.stochastic_pooling if isLRN(layer.type): return StageType.LRN if isEltwise(layer.type): if layer.type == 'Bias': return StageType.eltwise_sum elif layer.eltwise_param.operation == 0: return StageType.eltwise_prod elif layer.eltwise_param.operation == 2: return StageType.eltwise_max else: return StageType.eltwise_sum if isBatchNorm(layer.type) or isScale(layer.type): return StageType.scale if isPReLU(layer.type): return StageType.prelu if isSigmoid(layer.type): return StageType.sigmoid if isTanH(layer.type): return StageType.tanh if isDeconvolution(layer.type): return StageType.deconvolution if isReshape(layer.type): return StageType.reshape if isFlatten(layer.type): return StageType.toplanemajor if isPower(layer.type): return StageType.power if isCrop(layer.type): return StageType.crop if isDepthwiseConvolution(layer, output_channels, input_channels): return StageType.depthwise_convolution if isPermute(layer.type): return StageType.permute if isNormalize(layer.type): return StageType.normalize if isPriorBox(layer.type): return StageType.prior_box if isDetectionOutput(layer.type): return StageType.detection_output throw_error(ErrorTable.StageTypeNotSupported, layer.type)
e912014862643e3724fad8f10c0b8ee7133c77e1
27,653
def comp_volumes(self): """Compute the Lamination volumes (Vlam, Vvent, Vslot, Vwind) Parameters ---------- self : LamSlotWind A LamSlotWind object Returns ------- V_dict: dict Lamination volume dictionnary (Vlam, Vvent, Vslot, Vwind) [m**3] """ V_dict = LamSlot.comp_volumes(self) Lf = self.comp_length() # Include radial ventilation ducts V_dict["Vwind"] = Lf * self.slot.Zs * self.slot.comp_surface_wind() return V_dict
4b9444b9ac7c78e6a7719ab73dfa5cabcd78debf
27,654
def elliptical_orbit(): """Draw an example of a planet with an elliptical orbit around its star.""" fig, axes = plt.subplots(1, 1) orbit = Ellipse(xy=(0, 0), width=2, height=1.5, facecolor='lightblue') axes.add_artist(orbit) axes.plot([-1, 0], [0, 0]) axes.annotate( 'semi-major axis', xy=(-0.5, 0), xytext=(-0.8, -0.2), arrowprops=dict(arrowstyle='wedge') ) axes.annotate( 'orbit center', xy=(0, 0), xytext=(-0.21, 0.115), arrowprops=dict(arrowstyle='wedge') ) plt.plot( [-.75], [0.5], marker='o', markersize=4, color='green', label='planet' ) plt.plot( [0], [0], marker='o', markersize=10, color='orange', label='star' ) # formatting plt.xlim(-1.25, 1.25) plt.ylim(-1.1, 0.75) plt.legend(loc='lower center', ncol=2) # remove axes axes.xaxis.set_visible(False) axes.yaxis.set_visible(False) # remove box around image for spine in axes.spines: axes.spines[spine].set_visible(False) return axes
ccb12b80111009a59cacff522a35cd41a6e73ad4
27,655
from datetime import datetime def get_date_range_for_date(date_str, interval): """ Given a date string, parse it and derive a range based on the given interval. The interval is inclusive on the lower end, and exclusve on the higher end. For example, given a date str of 2019-03-10 and a 'month' interval, this will return a range of 2019-03-01 -- 2019-03-31. :param date_str: Any ISO date or partial date. 2019, 2019-03, 2019-03-01, 2019-12-18 21:00:00 :param interval: Any interval defined in globus_portal_framework.constants.FILTER_DATE_RANGES. Examples include: 'year', 'month', 'day', 'hour' :return: A date range dict. Example: { 'from': '2019-12-18 21:00:00' 'to': '2019-12-18 21:00:01' } """ dt = parse_date_filter(date_str)['datetime'] # If filtering on a month or year, chop off the extra part of the # datetime so we don't accidentally search on the previous month # or next month day = datetime.timedelta(days=1) if interval == FILTER_SECOND: second = datetime.timedelta(seconds=1) from_d, to_d = dt - second, dt + second elif interval == FILTER_MINUTE: from_d = dt.replace(second=0) to_d = from_d + datetime.timedelta(seconds=59) elif interval == FILTER_HOUR: from_d = dt.replace(minute=0, second=0) to_d = from_d + datetime.timedelta(minutes=59, seconds=59) elif interval == FILTER_DAY: dt = dt.replace(hour=0, minute=0, second=0) from_d, to_d = dt, dt + day elif interval == FILTER_MONTH: from_d = dt.replace(day=1, hour=0, minute=0, second=0) inc_month = 1 if dt.month == 12 else dt.month + 1 inc_year = dt.year + 1 if inc_month == 1 else dt.year to_d = from_d.replace(month=inc_month, year=inc_year) - day elif interval == FILTER_YEAR: dt = dt.replace(day=1, month=1, hour=0, minute=0, second=0) year = datetime.timedelta(days=365) from_d, to_d = dt, dt + year else: raise exc.GlobusPortalException('Invalid date type {}' ''.format(interval)) # Globus search can handle any time format, so using the most precise will # work every time. dt_format_type = DATETIME_PARTIAL_FORMATS['time'] return { 'from': from_d.strftime(dt_format_type), 'to': to_d.strftime(dt_format_type) }
4b2d1f47d4984fbd9fe590e2698b288560aa0162
27,656
def sigmaStarDFA(sigma=None): """ Given a alphabet S returns the minimal DFA for S* :param sigma: set of symbols :return: DFA .. versionadded:: 1.2""" if sigma is None: raise d = DFA() d.setSigma(sigma) i = d.addState() d.setInitial(i) d.addFinal(i) for a in d.Sigma: d.addTransition(i, a, i) return d
51ab7fa365b356f03aab57b777bd245163ba6b02
27,657
from datetime import datetime def timestamp_to_datetime(seconds, tz=None): """Returns a datetime.datetime of `seconds` in UTC :param seconds: timestamp relative to the epoch :param tz: timezone of the timestamp """ if tz is None: tz = timezone.utc dt = datetime.fromtimestamp(seconds, tz) return dt.astimezone(timezone.utc)
2b53d69aeb2c7e5f88602af47d2d7b1ee40e0730
27,659
import re def initialize(plugins, exclude_regex=None, rootdir='.'): """Scans the entire codebase for high entropy strings, and returns a SecretsCollection object. :type plugins: tuple of detect_secrets.plugins.base.BasePlugin :param plugins: rules to initialize the SecretsCollection with. :type exclude_regex: str|None :type rootdir: str :rtype: SecretsCollection """ output = SecretsCollection(plugins, exclude_regex) git_files = _get_git_tracked_files(rootdir) if not git_files: return output if exclude_regex: regex = re.compile(exclude_regex, re.IGNORECASE) git_files = filter( lambda x: not regex.search(x), git_files ) for file in git_files: output.scan_file(file) return output
80c262041736c58f79a4356781b3ab831b6f1daa
27,660
def decompose_job_id(job_id): """Thin wrapper around generic decompose_job_id to use our local SPACER.""" return utils_decompose_job_id(job_id, spacer=SPACER)
f41553a6864940816da53982746a9f06220347e6
27,661
from re import U from re import T def build(P , word_size , first_hidden_size , encoding_size) : """ create entity and relation encoding """ P["W_word_left_input"] = U.initial_weights(2*word_size , first_hidden_size) P["W_word_right_input"] = U.initial_weights(2*word_size , first_hidden_size) P["W_encoding_output"] = U.initial_weights(2*first_hidden_size , encoding_size) def batched_triplet_encoding(e_left , relation , e_right) : left_input = T.concatenate([e_left , relation] , axis=1) #batched version right_input = T.concatenate([relation , e_right] , axis=1) #batched version left_hidden = T.tanh(T.dot(left_input , P["W_word_left_input"])) right_hidden = T.tanh(T.dot(right_input , P["W_word_right_input"])) all_hidden = T.concatenate([left_hidden , right_hidden] , axis = 1) #batched version encoding = T.tanh(T.dot(all_hidden , P["W_encoding_output"])) return encoding def vector_triplet_encoding(e_left , relation , e_right) : left_input = T.concatenate([e_left , relation] , axis=0) #batched version right_input = T.concatenate([relation , e_right] , axis=0) #batched version left_hidden = T.tanh(T.dot(left_input , P["W_word_left_input"])) right_hidden = T.tanh(T.dot(right_input , P["W_word_right_input"])) all_hidden = T.concatenate([left_hidden , right_hidden] , axis = 0) #batched version encoding = T.tanh(T.dot(all_hidden , P["W_encoding_output"])) return encoding return batched_triplet_encoding , vector_triplet_encoding
9c465b9033b23aa938ea600326bc5add1000edb5
27,662
def validate_path_for_get_public_key(path: list, slip44_id: int) -> bool: """ Checks if path has at least three hardened items and slip44 id matches. The path is allowed to have more than three items, but all the following items have to be non-hardened. """ length = len(path) if length < 3 or length > 5: return False if path[0] != 44 | HARDENED: return False if path[1] != slip44_id | HARDENED: return False if path[2] < HARDENED or path[2] > 20 | HARDENED: return False if length > 3 and is_hardened(path[3]): return False if length > 4 and is_hardened(path[4]): return False return True
1e0ef325283a2cfc6aceba748c18fa2dbc9a34c0
27,664
import base64 from datetime import datetime def generate_result(pic_type, img_path, predicted_breed=''): """ Generate a result of predicted dog breed for display in Jupyter Notebook. Args: pic_type (str): Type of picture, either 'dog', 'human' or 'error' img_path (str): Path to the image provided by the user predicted_breed (str): The breed that was predicted based on the image Returns: str: A URI of the base64 encoded result in HTML """ img_content = None with open(img_path, 'rb') as img: img_content = base64.b64encode(img.read()) ts = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') lookup = TemplateLookup(['templates/'], output_encoding='utf-8', strict_undefined=True) template = lookup.get_template('dog.html.mako') raw_html = template.render( pic_type=pic_type, img_content=img_content.decode(), timestamp=ts, breed=predicted_breed ) return f'data:text/html;base64,{base64.b64encode(raw_html).decode()}'
345db6c17e0ec2a3d65963ba403200c1bde3229c
27,665
def concatSeriesMovies(dataFrame1, dataFrame2): """ Join, concat two dataFrames Parameters: dataFrame1: string The name of the dataFrame the user wants to concat. dataFrame2: string The name of the dataFrame the user wants to concat. Returns: a new dataFrame """ concatedDataFrame = pd.concat([dataFrame1, dataFrame2], ignore_index=True) return concatedDataFrame
cb063ca82dac809f05a60a920f77d7a53ed5d3af
27,666
def Pattern7(s): """ Compute the correlator for this pattern: ↓ ○ ↑ ↑ and symmetry-equivalent patterns """ res = 0.0 s = np.pad(s, ((0, 0), (2, 2), (2, 2))) L = s.shape[-1] for i in range(L-2): for j in range(L-2): res += s[0, i, j] * s[0, i+1, j] * s[1, i+1, j+1] * s[2, i+2, j+1] res += s[0, i+1, j] * s[0, i+1, j+1] * s[1, i, j+1] * s[2, i, j+2] res += s[2, i, j] * s[1, i+1, j] * s[0, i+1, j+1] * s[0, i+2, j+1] res += s[2, i+1, j] * s[1, i+1, j+1] * s[0, i, j+1] * s[0, i, j+2] res += s[2, i, j] * s[1, i, j+1] * s[0, i+1, j+1] * s[0, i+1, j+2] res += s[0, i, j+1] * s[0, i+1, j+1] * s[1, i+1, j] * s[2, i+2, j] res += s[0, i, j] * s[0, i, j+1] * s[1, i+1, j+1] * s[2, i+1, j+2] res += s[2, i, j+1] * s[0, i+1, j] * s[1, i+1, j+1] * s[0, i+2, j] res += s[1, i, j] * s[1, i+1, j] * s[0, i+1, j+1] * s[2, i+2, j+1] res += s[1, i+1, j] * s[1, i+1, j+1] * s[0, i, j+1] * s[2, i, j+2] res += s[2, i, j] * s[0, i+1, j] * s[1, i+1, j+1] * s[1, i+2, j+1] res += s[2, i+1, j] * s[0, i+1, j+1] * s[1, i, j+1] * s[1, i, j+2] res += s[2, i, j] * s[0, i, j+1] * s[1, i+1, j+1] * s[1, i+1, j+2] res += s[1, i, j+1] * s[1, i+1, j+1] * s[0, i+1, j] * s[2, i+2, j] res += s[1, i, j] * s[1, i, j+1] * s[0, i+1, j+1] * s[2, i+1, j+2] res += s[2, i, j+1] * s[1, i+1, j] * s[0, i+1, j+1] * s[1, i+2, j] return res
2dea3f7cae06cec65f9ee416b61367320d9893cc
27,667
def list_camera_ports(): """ Test the ports and returns a tuple with the available ports and the ones that are working """ non_working_ports = [] working_ports = [] available_ports = [] dev_port = 0 while len(non_working_ports) <= 3: # If there are more than 3 non working ports stop the testing. camera = cv2.VideoCapture(dev_port) if not camera.isOpened(): non_working_ports.append(dev_port) else: is_reading, img = camera.read() w = camera.get(cv2.CAP_PROP_FRAME_WIDTH) h = camera.get(cv2.CAP_PROP_FRAME_HEIGHT) if is_reading: print("Port %s is working and reads images (%s x %s)" %(dev_port, w, h)) working_ports.append(dev_port) else: available_ports.append(dev_port) dev_port +=1 return available_ports, working_ports, non_working_ports
192cdb62807f89a05a67f7efbbc267bcc1b1f305
27,668
from pathlib import Path def parse_taxid_names(file_path): """ Parse the names.dmp file and output a dictionary mapping names to taxids (multiple different keys) and taxids to scientific names. Parameters ---------- file_path : str The path to the names.dmp file. Returns ------- name2taxid : dict Keys are all possible names and values are taxids. taxid2name : dict Keys are taxids and values are scientific names. """ names = Path(file_path) with names.open() as f: lines_processed = 0 name2taxid = {} taxid2name = {} for line in f: lines_processed += 1 if lines_processed % 1000000 == 0: print('processing line', str(lines_processed)) entries = [entry.strip() for entry in line.split('|')] name2taxid[entries[1]] = entries[0] if 'scientific name' in line: taxid2name[entries[0]] = entries[1] return name2taxid, taxid2name
1d136f73a56ac8d3c02fd53c6e7928a39440e27a
27,669
def dn_outfunc(e1, e2, W, V1=None, V2=None, b=None): """Applies a bilinear function based on given parameters. This is a building block of Neural Tensor Network (see the reference paper below). It takes two input variables and one or four parameters, and outputs one variable. To be precise, denote six input arrays mathematically by :math:`e^1\\in \\mathbb{R}^{I\\cdot J}`, :math:`e^2\\in \\mathbb{R}^{I\\cdot K}`, :math:`W\\in \\mathbb{R}^{J \\cdot K \\cdot L}`, :math:`V^1\\in \\mathbb{R}^{J \\cdot L}`, :math:`V^2\\in \\mathbb{R}^{K \\cdot L}`, and :math:`b\\in \\mathbb{R}^{L}`, where :math:`I` is mini-batch size. In this document, we call :math:`V^1`, :math:`V^2`, and :math:`b` linear parameters. The output of forward propagation is calculated as .. math:: y_{il} = \\sum_{jk} e^1_{ij} e^2_{ik} W_{jkl} + \\ \\sum_{j} e^1_{ij} V^1_{jl} + \\sum_{k} e^2_{ik} V^2_{kl} + b_{l}. Note that V1, V2, b are optional. If these are not given, then this function omits the last three terms in the above equation. .. note:: This function accepts an input variable ``e1`` or ``e2`` of a non-matrix array. In this case, the leading dimension is treated as the batch dimension, and the other dimensions are reduced to one dimension. .. note:: In the original paper, :math:`J` and :math:`K` must be equal and the author denotes :math:`[V^1 V^2]` (concatenation of matrices) by :math:`V`. Args: e1 (~chainer.Variable): Left input variable. e2 (~chainer.Variable): Right input variable. W (~chainer.Variable): Quadratic weight variable. V1 (~chainer.Variable): Left coefficient variable. V2 (~chainer.Variable): Right coefficient variable. b (~chainer.Variable): Bias variable. Returns: ~chainer.Variable: Output variable. See: `Reasoning With Neural Tensor Networks for Knowledge Base Completion <http://papers.nips.cc/paper/5028-reasoning-with-neural-tensor- networks-for-knowledge-base-completion>`_ [Socher+, NIPS2013]. """ flags = [V1 is None, V2 is None, b is None] if any(flags): if not all(flags): raise ValueError('All coefficients and bias for bilinear() must ' 'be None, if at least one of them is None.') return DN_outFunction()(e1, e2, W) else: return DN_outFunction()(e1, e2, W, V1, V2, b)
f1c642b7fcaf91d49df993177b76715774fc17bf
27,672
def action_can_be_queued(action_type): """ test the action_type whether can be queued Inputs: action_type, int Outputs: true or false """ need_args = actions.RAW_FUNCTIONS[action_type].args result = False for arg in need_args: if arg.name == 'queued': result = True break return result
e05f56b047d5c14bc2bcb27e82e789a41b12e090
27,673
def bench4(x): """A benchmark function for test purposes. f(x) = float(x) ** 2 where x is a string. It has a single minima with f(x*) = 0 at x* = "0". This benchmark is used for checking support of categorical variables. """ return float(x[0]) ** 2
2c7bf171f917b599db6f7553b2a959cb8c691c93
27,674
def drop_columns_from_dataframe_if_all_elements_are_nan(df, elements_list=['', '']): """ Takes two parameters: df: Dataframe elements_list: By default it will identify np.nan. If you want to add additional elements, as an example, you can do this ['', ' '] """ m = df.applymap(lambda i: i if i not in elements_list else np.nan).apply(lambda c: c.isnull().all()) columns_to_drop = df.columns[m] df.drop(columns_to_drop, axis=1, inplace=True) return df
5f66e8c33a918872e2cfeabff2116a43fda9095f
27,675
def drop_db(code, confirm_by_typing_db_code_again=None): """ Delete a history database. Parameters ---------- code : str, required the database code confirm_by_typing_db_code_again : str, required enter the db code again to confirm you want to drop the database, its config, and all its data Returns ------- dict status message """ params = {"confirm_by_typing_db_code_again": confirm_by_typing_db_code_again} response = houston.delete("/history/databases/{0}".format(code), params=params) houston.raise_for_status_with_json(response) return response.json()
a135ad9e3abdecb891b0ef5b6a1754d01a85fda8
27,676
def _nex_group(seq, core, spatial, c_num=4, s_num=4): """Build spatial stream index""" # step 1 ant_num = c_num * s_num seq_diff = np.diff(seq) offset = np.where(seq_diff != 0)[0] offset = np.r_[0, offset + 1] count = np.diff(np.r_[offset, len(seq)]) offset = offset[count == ant_num] offset = offset[:, None] + np.r_[:ant_num] # step 2 core = core[offset] spatial = spatial[offset] p = core * s_num + spatial p = np.argsort(p, axis=-1) offset = offset[:, :1] + p offset = offset.reshape(-1, c_num, s_num) return offset
2c9ff96b965c48aa0a0aace2cfab2e5d8b1d3a9e
27,677
import asyncio async def test_async_init_timeout(circuit): """Test the async initialization time_out.""" async def w3(): await asyncio.sleep(0.1) return 3 logger = TimeLogger('logger') out = edzed.ValuePoll( 'out', func=w3, interval=10, # don't care init_timeout=0.2, on_output=edzed.Event(logger), initdef='DEFAULT') asyncio.create_task(circuit.run_forever()) await circuit.wait_init() await circuit.shutdown() logger.compare([(100, 3)])
1915a66e8e82987f878f05a4a9016037e25c7da0
27,678
def compute_diffraction( bundle, key, s, eta, L, nlebedev=74, nomega=12, mode="xray", form="raw", anisotropy="cos2", print_level=False, ): """Compute the I(s, eta) elastic scattering signal for a Bundle. See aimsprop/notes/ued for details on this property. Notes: * All frames for each initial condition (IC) in bundle should be aligned so that the transition dipole moment from S0 -> Sex at t=0 is on z. This is required for proper computation of anisotropy. * All frames should be weighted by geometric considerations at the IC (e.g., conformational wells, Wigner weights, etc), by the cross section for the optical transition at the IC (e.g., oscillator strength and excitation energy window), and by the frame weight due to non-adiabatic dynamics. Params: bundle (Bundle) - the Bundle object to compute the property for (modified in place) key (str) - the name of the property. s (np.ndarray) - list of scattering vector norms in Angstrom^-1. The relationship between s and theta (scattering angle) is given as, s = 4 pi / L * sin(theta / 2). eta (np.ndarray) - list of azimuthal scattering angles in radians. L (float) - effective wavelength of scattering particle (x-ray wavelength or UED deBroglie wavelength) in Angstrom. Used to convert through scattering angle theta. nlebedev (int) - Lebedev number to use for solid angle orientation quadrature. nomega (int) - number of uniform quadrature points to use for plane orientation quadrature. mode (str) - 'xray' or 'ued' for selection of form factors form (str) - 'raw' or 'mod' for modified/raw diffraction intensities I(s) or M(s). anisotropy (str) - 'none' or 'cos2' for isotropic of cos^2 (z) anisotropty. print_level (bool) - print progress if true (useful to track long property computations) Result/Return: bundle - reference to the input Bundle object. The properties """ # Validity checks if mode not in ["xray", "ued"]: raise ValueError("Unknown mode: %s" % mode) if form not in ["raw", "mod"]: raise ValueError("Unknown form: %s" % form) if anisotropy not in ["none", "cos2"]: raise ValueError("Unknown anisotropy: %s" % anisotropy) # Compute scattering angles via Bragg equation theta = 2.0 * np.arcsin(s * L / (4.0 * np.pi)) tt, ee = np.meshgrid(theta, eta, indexing="ij") ss, ee = np.meshgrid(s, eta, indexing="ij") # Compute scattering vectors sx = ss * np.cos(tt / 2.0) * np.sin(ee) sy = ss * np.sin(tt / 2.0) sz = ss * np.cos(tt / 2.0) * np.cos(ee) # Get a rotation quadrature for the orientations of the frames if nlebedev == 1 and nomega == 1: # Fixed orientation Rs = [np.eye(3)] ws = [1.0] else: # Rotation quadrature Rs, ws = rotation.rotation_quadrature(nlebedev=nlebedev, nomega=nomega) # Get atomic form factors for appropriate x-ray/ued mode factors = formfactor.AtomicFormFactor.build_factors(bundle.frames[0], mode=mode) # Compute atomic scattering Iat D = np.zeros_like(sx) for A, factor in enumerate(factors): F = factor.evaluate_N(qx=sx, qy=sy, qz=sz, x=0.0, y=0.0, z=0.0) D += (np.abs(F) ** 2).real # Compute IAM scattering, integrating over all orientation angles for find, frame in enumerate(bundle.frames): if print_level: print(("Frame %5d of %5d" % (find, len(bundle.frames)))) I = np.zeros_like(sx) for R, w in zip(Rs, ws): # cos(z)^2 pump anisotropy cos2 = R[2, 2] ** 2 if anisotropy == "cos2" else 1.0 # Rotated molecule xyz = np.dot(frame.xyz, R) # Compute diffraction N = np.zeros_like(I, dtype=complex) for A, factor in enumerate(factors): x = xyz[A, 0] y = xyz[A, 1] z = xyz[A, 2] N += factor.evaluate_N(qx=sx, qy=sy, qz=sz, x=x, y=y, z=z) F = (np.abs(N) ** 2).real if form == "mod": F = (F - D) / D I += w * cos2 * F frame.properties[key] = I return bundle
380043d47e6a5786ab2907a4b1bd11495784f052
27,679
def convert_dict_id_values_to_strings(dict_list): """This function ensures that the ``id`` keys in a list of dictionaries use string values. :param dict_list: List (or tuple) of dictionaries (or a single dictionary) containing API object data :type dict_list: list, tuple, dict, None :returns: A new dictionary list with properly formatted ``id`` values :raises: :py:exc:`TypeError` """ dict_list = [dict_list] if isinstance(dict_list, dict) else dict_list new_dict_list = [] for single_dict in dict_list: if not isinstance(single_dict, dict): raise TypeError("The 'dict_list' argument must be a dictionary or a list of dictionaries.") if 'id' in single_dict and not isinstance(single_dict.get('id'), str): single_dict['id'] = str(single_dict.get('id')) new_dict_list.append(single_dict) return new_dict_list
7d1348910e5802c928b94bc74d71f3ce35770215
27,680
def delete_images(request): """ Deletes images which are passed via HTTP query. """ Image.objects.filter(pk__in=request.POST.getlist("images")).delete() return HttpResponseRedirect(reverse("lfs_manage_global_images"))
29e2e3be3730a2a3552bc78feb8e8817e594dab5
27,681
import fnmatch def is_requirements_file(location): """ Return True if the ``location`` is likely for a pip requirements file. For example:: >>> is_requirements_file('dev-requirements.txt') True >>> is_requirements_file('requirements.txt') True >>> is_requirements_file('requirements.in') True >>> is_requirements_file('requirements.pip') True >>> is_requirements_file('requirements-dev.txt') True >>> is_requirements_file('some-requirements-dev.txt') True >>> is_requirements_file('reqs.txt') False >>> is_requirements_file('requires.txt') True """ filename = fileutils.file_name(location) req_files = ( '*requirements*.txt', '*requirements*.pip', '*requirements*.in', 'requires.txt', ) return any(fnmatch.fnmatchcase(filename, rf) for rf in req_files)
2577595ef6d2bcb553a0354623c40589f96a5fb3
27,682
def bisect_steps_remaining(): """Estimate of remaining steps, including the current one. This is an approximation.""" # https://github.com/git/git/blob/566a1439f6f56c2171b8853ddbca0ad3f5098770/bisect.c#L1043 return floor(log(bisect_revisions(), 2))
ded51395ead2c7ea76aa4ff3c2cf7d6195f81537
27,683
def create_lstm_model(fingerprint_input, model_settings, model_size_info, is_training): """Builds a model with a lstm layer (with output projection layer and peep-hole connections) Based on model described in https://arxiv.org/abs/1705.02411 model_size_info: [projection size, memory cells in LSTM] """ if is_training: dropout_prob = tf.placeholder(tf.float32, name='dropout_prob') input_frequency_size = model_settings['dct_coefficient_count'] input_time_size = model_settings['spectrogram_length'] fingerprint_4d = tf.reshape(fingerprint_input, [-1, input_time_size, input_frequency_size]) num_classes = model_settings['label_count'] projection_units = model_size_info[0] LSTM_units = model_size_info[1] with tf.name_scope('LSTM-Layer'): with tf.variable_scope("lstm"): lstmcell = tf.contrib.rnn.LSTMCell(LSTM_units, use_peepholes=True, num_proj=projection_units) _, last = tf.nn.dynamic_rnn(cell=lstmcell, inputs=fingerprint_4d, dtype=tf.float32) flow = last[-1] with tf.name_scope('Output-Layer'): W_o = tf.get_variable('W_o', shape=[projection_units, num_classes], initializer=tf.contrib.layers.xavier_initializer()) b_o = tf.get_variable('b_o', shape=[num_classes]) logits = tf.matmul(flow, W_o) + b_o if is_training: return logits, dropout_prob else: return logits
d050e3b984f2aff1b0466b5cab73d262243d6cc9
27,684
def tile(x: Tensor, count: int, dim=0) -> Tensor: """ Tiles x on dimension dim count times. From OpenNMT. Used for beam search. :param x: tensor to tile :param count: number of tiles :param dim: dimension along which the tensor is tiled :return: tiled tensor """ if isinstance(x, tuple): h, c = x return tile(h, count, dim=dim), tile(c, count, dim=dim) perm = list(range(len(x.size()))) if dim != 0: perm[0], perm[dim] = perm[dim], perm[0] x = x.permute(perm).contiguous() out_size = list(x.size()) out_size[0] *= count batch = x.size(0) x = x.view(batch, -1) \ .transpose(0, 1) \ .repeat(count, 1) \ .transpose(0, 1) \ .contiguous() \ .view(*out_size) if dim != 0: x = x.permute(perm).contiguous() return x
04b0e507e1600f3cb3185b170ee72286c2d20a7f
27,685
from typing import Callable from typing import Coroutine from typing import Any from typing import Optional import asyncio def cancellable_request(handler: Callable[..., Coroutine[Any, Any, Optional[Any]]]): """this decorator periodically checks if the client disconnected and then will cancel the request and return a 499 code (a la nginx). Usage: decorate the cancellable route and add request: Request as an argument @cancellable_request async def route( _request: Request, ... ) """ @wraps(handler) async def decorator(*args, **kwargs) -> Optional[Any]: request = kwargs["_request"] handler_task = asyncio.create_task( handler(*args, **kwargs), name="cancellable_request/handler" ) auto_cancel_task = asyncio.create_task( _cancel_task_if_client_disconnected(request, handler_task), name="cancellable_request/auto_cancel", ) try: return await handler_task except CancelledError: logger.warning( "request %s was cancelled by client %s!", request.url, request.client ) return Response("Oh No!", status_code=499) finally: auto_cancel_task.cancel() return decorator
10e50a565f45c0e4babf68386b244557e1727bc2
27,686
import time def add_central_server_member_delete_global_error_cert(case, client, ss2_host, ss2_username, ss2_password): """ Restores security server after member being deleted in central server :param case: :param cs_ssh_host: str - central server ssh host :param cs_ssh_user: str - central server ssh username :param cs_ssh_pass: str - central server ssh password :param ca_ssh_host: str - ca ssh host :param ca_ssh_user: str - ca ssh username :param ca_ssh_pass: str - ca ssh password :param client: dict - client info :param ss2_host: str - security server 2 host :param ss2_username: str - security server 2 username :param ss2_password: str - security server 2 password :param ss2_ssh_host: str - security server 2 ssh host :param cert_path: str - certificate filename :param user: dict - user, under which the changes are made :return: """ self = case sync_timeout = 120 def add_cs_member_delete_global_error_cert(): self.log('Open members page') self.wait_until_visible(type=By.CSS_SELECTOR, element=sidebar.MEMBERS_CSS).click() # MEMBER_10 Add new X-Road Member self.log('MEMBER_10 Add new X-Road Member') add_member_to_cs(self, member=client) self.log('Wait until servers have synchronized') time.sleep(sync_timeout) self.log('Open security server, which was deleted from central server') self.reload_webdriver(url=ss2_host, username=ss2_username, password=ss2_password) self.log('Open keys and certificates') self.wait_until_visible(type=By.CSS_SELECTOR, element=sidebar.KEYSANDCERTIFICATES_BTN_CSS).click() self.wait_until_visible(type=By.ID, element=keys_and_certificates_table.KEYS_AND_CERTIFICATES_TABLE_ID) self.wait_jquery() self.log('Click on the certificate, which has global error status and delete it') self.by_xpath(keys_and_certificates_table.GLOBAL_ERROR_CERTIFICATE_ROW_XPATH).click() self.by_id(keys_and_certificates_table.DELETE_BTN_ID).click() popups.confirm_dialog_click(self) self.wait_jquery() return add_cs_member_delete_global_error_cert
77ea9a9de677947285f7bda233520bc43df84e18
27,687
def get_country_models(model): """ Get all valid domain-specific models for a given model. :param str model: :return: """ domains = get_domain_for(model, country=None) return ['{}{}-K9'.format(model, domain) for domain in domains]
48e171e304e75216ac0dd2d70613996b8ae5f9e7
27,688
def _cprint_bad_contrast3(fgcolor, bgcolor, bold, underlined): """Returns 1 if one of the conditions of poor contrast is matched """ # black on black with LIGHT BG _c1 = (fgcolor == 8) and (bgcolor == 0) and (CPRINT_PAR["light_background"]) if _c1: return 1 else: return 0
89d74cfe47fecd3fd21fc3d7f47a5674df80b669
27,690
def download_from_mongo(context, sel_filter, projection): """ Download panda DataFrame from a mongoDB server :param context: execution context :param sel_filter: a SON object specifying elements which must be present for a document to be included in the result set :param projection: a list of field names that should be returned in the result set or a dict specifying the fields to include or exclude. If projection is a list “_id” will always be returned. Use a dict to exclude fields from the result (e.g. projection={‘_id’: False}). :return: panda DataFrame or None :rtype: panda.DataFrame """ df = None client = context.resources.mongo_warehouse.get_connection(context) if client is not None: # get database collection collection = client.get_collection() # retrieve a cursor for required records # https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.find context.log.info(f'Document retrieval in progress') cursor = collection.find(filter=sel_filter, projection=projection) entries = list(cursor) context.log.info(f'{len(entries)} documents retrieved') context.log.info(f'DataFrame loading in progress') df = pd.DataFrame.from_dict(entries) # tidy up cursor.close() client.close_connection() context.log.info(f'Loaded {len(df)} records') return df
863a0fdc7761de38d578039c764052ecac2e57f8
27,691
def create_group(current_session, groupname, description): """ Creates a group and returns it """ return gp.create_group(current_session, groupname, description)
0a5adea7b0a57ec3f44d260da250b06c09eca939
27,692
import requests def call_api(endpoint): """ Build the API URL and request data :param str endpoint: specific api endpoint to hit :return response: server's reponse to the request """ url = BASE_URL + endpoint try: # try to get json data response = requests.get(url).json() except ValueError: # if bytes, convert to str response = requests.get(url).content.decode('utf-8') except Exception as e: response = e return handle_response(response)
6f2527c6eb777c2cd0ab80636865d7adda032506
27,693
import inspect def test_callable_args(func, args): """ Return True when this function can be called with the given arguments. """ assert isinstance(args, (list, tuple)) signature = getattr(inspect, 'signature', None) if signature is not None: # For Python 3, use inspect.signature. try: sig = _signatures_cache[func] except KeyError: sig = signature(func) _signatures_cache[func] = sig try: sig.bind(*args) except TypeError: return False else: return True else: # For older Python versions, fall back to using getargspec. spec = inspect.getargspec(func) # Drop the 'self' def drop_self(spec): args, varargs, varkw, defaults = spec if args[0:1] == ['self']: args = args[1:] return inspect.ArgSpec(args, varargs, varkw, defaults) spec = drop_self(spec) # When taking *args, always return True. if spec.varargs is not None: return True # Test whether the given amount of args is between the min and max # accepted argument counts. return len(spec.args) - len(spec.defaults or []) <= len(args) <= len(spec.args)
704697253a88009394297a4688a9b3179b976f0d
27,694
def get_kde_caseduration_json(df, parameters=None): """ Gets the estimation of KDE density for the case durations calculated on the log/dataframe (expressed as JSON) Parameters -------------- df Pandas dataframe parameters Possible parameters of the algorithm, including: Parameters.GRAPH_POINTS -> number of points to include in the graph Parameters.CASE_ID_KEY -> Column hosting the Case ID Returns -------------- json JSON representing the graph points """ cases = get_cases_description(df, parameters=parameters) duration_values = [x["caseDuration"] for x in cases.values()] return case_duration_commons.get_kde_caseduration_json(duration_values, parameters=parameters)
54262948216c43e0bbd44ec9ed2ed691903d9a9d
27,695
def render_text(name: str, data: str, font_size: int = 10, fig_width_per_char: float = 0.1, fig_height: float = 0.4, img_height: int = None, img_width: int = None, **kwargs): """Render a text string. Args: name: name of the text data: the string to be rendered font_size: text font size fig_width_per_char: the width of each character measured by ``figsize`` of ``plt.subplots()``. fig_height: the height of the text label measured by ``figsize`` of ``plt.subplots()``. img_height (int): height of the output image img_width (int): width of the output image **kwargs: extra arguments forwarded to ``ax.text``. """ fig, ax = plt.subplots( figsize=(len(data) * fig_width_per_char, fig_height)) kwargs['fontsize'] = font_size ax.text(0, 0, data, **kwargs) ax.axis('off') return _convert_to_image(name, fig, dpi, img_height, img_width)
d956b8dc885f1f582c2a54147bd42623f2a796c4
27,696
def index_closed(client, index_name): """Return True if index is closed""" try: # 1.0 params index_metadata = client.cluster.state( index=index_name, metric='metadata', ) except TypeError: # 0.90 params: index_metadata = client.cluster.state( filter_blocks=True, filter_index_templates=True, filter_indices=index_name, filter_nodes=True, filter_routing_table=True, ) return index_metadata['metadata']['indices'][index_name]['state'] == 'close'
9da234b5ef4b6f6c2835f3adf67aac804cc92919
27,697
def get_contributions_with_user_as_submitter(event, user): """Get a list of contributions in which the `user` has submission rights""" return (_query_contributions_with_user_as_submitter(event, user) .options(joinedload('acl_entries')) .order_by(db.func.lower(Contribution.title)) .all())
ae30ded175f52aed5d50e629d5e260577bffe0f3
27,699
def rewrite_string( searcher: AbstractSearcher, source: Text, path: Text, max_iterations=1, ) -> Text: """Applies any replacements to the input source, and returns the result.""" return formatting.apply_substitutions( source, find_iter( searcher, source, path, max_iterations=max_iterations, ))
2de75ca2511ff2496f29674ff762c7a48531c51b
27,700
def build_path_result_tests(name): """ Build a test for API commands that respond with ``Err`` and ``Mountpoint`` fields. :param unicode command_name: The command in the schema to validate. :return: ``TestCase``. """ return build_schema_test( name=str(name + "Tests"), schema={"$ref": "/endpoints.json#/definitions/" + name}, schema_store=SCHEMAS, failing_instances={ b'additionalProperties': [ # Extra field: {"Err": "", "Mountpoint": "/x", "extra": "y"}, # Wrong fields: {"Result": "hello"}, ], b'required': [ # Missing field: {}, {"Mountpoint": "/x"}, ], b'type': [ # Wrong types: [], "", None, ], }, passing_instances=[ {"Err": "Something went wrong."}, {"Err": "", "Mountpoint": "/x/"}, ])
32491f1eb10d9f175522b7ea55f91a2c1e6aeb27
27,701
def get_mpls_autobw_template_detail_rpc(self, api_timeout=''): """ This is an auto-generated method for the PySwitchLib. **Supported Versions**: * SLXOS: 17r.1.01a, 17r.2.00, 17s.1.02 :type api_timeout: long or tuple(long, long) :param api_timeout: Timeout for connection and response in seconds. If a tuple is specified, then the first value is for the connection timeout and the second value is for the response timeout. :rtype: (*bool, list*) :returns: Returns a tuple. #. **api_success** (*bool*) - The success or failure of the API. #. **details** (*list*) - List of REST request/response dictionaries, keyed by the asset's ip address. :raises ConnectionError: If requests module connection or response timeout occurs. :raises UnsupportedOSError: If firmware version installed on asset is not supported. :raises RestInterfaceError: If requests module does not get a successful response from the rest URI. :raises ValueError: If the argument value does not meet type requirements or value restrictions. """ operation_type = 'rpc' compositions_list = [] bindings_list = [('pybind.slxos.v17r_1_01a.brocade_mpls_rpc.get_mpls_autobw_template_detail', 'pybind.slxos.v17r_1_01a.brocade_mpls_rpc.brocade_mpls', 'pybind.slxos.v17r_1_01a.brocade_mpls_rpc'), ('pybind.slxos.v17r_2_00.brocade_mpls_rpc.get_mpls_autobw_template_detail', 'pybind.slxos.v17r_2_00.brocade_mpls_rpc.brocade_mpls', 'pybind.slxos.v17r_2_00.brocade_mpls_rpc'), ('pybind.slxos.v17s_1_02.brocade_mpls_rpc.get_mpls_autobw_template_detail', 'pybind.slxos.v17s_1_02.brocade_mpls_rpc.brocade_mpls', 'pybind.slxos.v17s_1_02.brocade_mpls_rpc')] composed_child_list = [] compositions_keyval_list = [] bindings_keyval = {'kwargs_key_name': '', 'keyval': '', 'extra_keyval': ''} composed_child_leafval_list = [] leafval_map = {} rest_leaf_name = '' choices_kwargs_map = {} leaf_os_support_map = {} self._api_validation(choices_kwargs_map=choices_kwargs_map, leaf_os_support_map=leaf_os_support_map) pybind_object = self._get_pybind_object(operation_type=operation_type, compositions_list=compositions_list, bindings_list=bindings_list, composed_child_list=composed_child_list, compositions_keyval_list=compositions_keyval_list, bindings_keyval=bindings_keyval, composed_child_leafval_list=composed_child_leafval_list, leafval_map=leafval_map) return self._rpc_worker(operation_type=operation_type, pybind_object=pybind_object, resource_depth=1, timeout=api_timeout)
97e0d7ef9a43ef2335fefc6c62ad34286dbf7efb
27,702
def empty(a): """Test whether the slice is empty.""" return a is None or volume(a)==0
c1de4cfbf3bcc569b4b3cde02d518aabf32ff6fd
27,703
def nnPredict(w1, w2, data): """% nnPredict predicts the label of data given the parameter w1, w2 of Neural % Network. % Input: % w1: matrix of weights of connections from input layer to hidden layers. % w1(i, j) represents the weight of connection from unit i in input % layer to unit j in hidden layer. % w2: matrix of weights of connections from hidden layer to output layers. % w2(i, j) represents the weight of connection from unit i in input % layer to unit j in hidden layer. % data: matrix of data. Each row of this matrix represents the feature % vector of a particular image % Output: % label: a column vector of predicted labels""" labels = np.array([]) # Your code here return labels
564e3db0c659713de9dbfda7c279381d50d490e3
27,705
from typing import List def count_pairs(array: List[int], difference: int) -> int: """ Given an array of integers, count the number of unique pairs of integers that have a given difference. These pairs are stored in a set in order to remove duplicates. Time complexity: O(n^2). :param array: is the array to count. :param difference: is the difference between two elements. :return: the number of unique pairs of integers that have a given difference. """ pairs = set() for i in range(len(array)): for j in range(len(array)): if array[i] - array[j] == difference: pairs.add((array[i], array[j])) return len(pairs)
e027e8885f4c4531da9b7dab7de8e84a7004c913
27,706
def decompress_deltas_18bit(buffer): """Parse packet deltas from 18-byte compression format.""" if bad_data_size(buffer, 18, "18-byte compressed packet"): raise ValueError("Bad input size for byte conversion.") deltas = np.zeros((2, 4)) # Sample 1 - Channel 1 minibuf = [(buffer[0] >> 6), ((buffer[0] & 0x3F) << 2 & 0xFF) | (buffer[1] >> 6), ((buffer[1] & 0x3F) << 2 & 0xFF) | (buffer[2] >> 6)] deltas[0][0] = int32_from_18bit(minibuf) # Sample 1 - Channel 2 minibuf = [(buffer[2] & 0x3F) >> 4, (buffer[2] << 4 & 0xFF) | (buffer[3] >> 4), (buffer[3] << 4 & 0xFF) | (buffer[4] >> 4)] deltas[0][1] = int32_from_18bit(minibuf) # Sample 1 - Channel 3 minibuf = [(buffer[4] & 0x0F) >> 2, (buffer[4] << 6 & 0xFF) | (buffer[5] >> 2), (buffer[5] << 6 & 0xFF) | (buffer[6] >> 2)] deltas[0][2] = int32_from_18bit(minibuf) # Sample 1 - Channel 4 minibuf = [(buffer[6] & 0x03), buffer[7], buffer[8]] deltas[0][3] = int32_from_18bit(minibuf) # Sample 2 - Channel 1 minibuf = [(buffer[9] >> 6), ((buffer[9] & 0x3F) << 2 & 0xFF) | (buffer[10] >> 6), ((buffer[10] & 0x3F) << 2 & 0xFF) | (buffer[11] >> 6)] deltas[1][0] = int32_from_18bit(minibuf) # Sample 2 - Channel 2 minibuf = [(buffer[11] & 0x3F) >> 4, (buffer[11] << 4 & 0xFF) | (buffer[12] >> 4), (buffer[12] << 4 & 0xFF) | (buffer[13] >> 4)] deltas[1][1] = int32_from_18bit(minibuf) # Sample 2 - Channel 3 minibuf = [(buffer[13] & 0x0F) >> 2, (buffer[13] << 6 & 0xFF) | (buffer[14] >> 2), (buffer[14] << 6 & 0xFF) | (buffer[15] >> 2)] deltas[1][2] = int32_from_18bit(minibuf) # Sample 2 - Channel 4 minibuf = [(buffer[15] & 0x03), buffer[16], buffer[17]] deltas[1][3] = int32_from_18bit(minibuf) return deltas
65fc1b7cc12c0d9f0a082c7df57bbae81fb1ec13
27,707
import re def password_validate(password): """It validates password using regular expression :param password: contains the users password :returns: Boolean :raises: ValidationError """ if re.match( r"^(?=.*[a-z])(?=.*[A-Z])(?=.*\d)(?=.*[@$!%*?&])[A-Za-z\d@$!%*?&]{8,}$", password, ): return True raise serializers.ValidationError( { "message": "Password must be at least 8 characters long, at least one" + " capitalized character, alphanumeric and contain special characters." } )
57cec9fd1ae1f4f2b4df887d98e65ea9f6191595
27,708
def get_emr_cluster_status(cluster_id: str, detail: bool = False): """ Provides cluster-level details including status, cluster_id, cluster_name and so on. Args: cluster_id: string, EMR cluster id detail: bool, provided additional detail about cluster like ec2 attributes Returns: cluster_status(): dict, cluster level details like id, name, state """ cluster_status = {} try: response = emr.describe_cluster(ClusterId=cluster_id) except Exception as error: logger.error(error) raise botocore.exceptions.ClientError(error, 'describe_cluster') else: cluster_status = { "cluster_id": response.get('Cluster').get('Id'), "cluster_name": response.get('Cluster').get('Name'), "status": response.get('Cluster').get('Status').get('State'), "protection": response.get('Cluster').get('TerminationProtected'), "message": response.get('Cluster').get('Status').get('StateChangeReason').get('Message') } if detail: cluster_status['ec2_attributes'] = { 'subnet_id': response.get('Cluster').get('Ec2InstanceAttributes').get('Ec2SubnetId'), 'availability_zone': response.get('Cluster').get('Ec2InstanceAttributes').get('Ec2AvailabilityZone'), 'master_sg': response.get('Cluster').get('Ec2InstanceAttributes').get('EmrManagedMasterSecurityGroup'), 'service_sg': response.get('Cluster').get('Ec2InstanceAttributes').get('ServiceAccessSecurityGroup') } return cluster_status
48da66c5aa45f0697b71924a31ea6d942bb5ce19
27,709
def get_keypair() -> KeyPairInfo: """Returns current keypair (ec2.KeyPairInfo) https://boto3.readthedocs.io/en/latest/reference/services/ec2.html#keypairinfo """ return get_keypair_dict()[get_keypair_name()]
95cba4f8a81858578e831b0bdbae359791399841
27,710
def char_vectorizer(char, custom_alphabet=False): """ Vectorize given nucleotide character. Convert to uppercase before vectorizing. >>> char_vectorizer("C") [0, 1, 0, 0] >>> char_vectorizer("g") [0, 0, 1, 0] >>> char_vectorizer("M", ['E', 'H', 'I', 'M', 'S']) [0, 0, 0, 1, 0] """ alphabet = ['A','C','G','U'] if custom_alphabet: alphabet = custom_alphabet char = char.upper() l = len(char) vector = [] assert l == 1, "given char length != 1 (given char: \"%s\")" % (l) for c in alphabet: if c == char: vector.append(1) else: vector.append(0) return vector
53c306d814807299b7bac7801f4b7d2a663b6f99
27,711
def orbit_from_name(name): """Return :py:class:`~poliastro.twobody.orbit.Orbit` given a name. Retrieve info from JPL DASTCOM5 database. Parameters ---------- name : str NEO name. Returns ------- orbit : list (~poliastro.twobody.orbit.Orbit) NEO orbits. """ records = record_from_name(name) orbits = [] for record in records: orbits.append(orbit_from_record(record)) return orbits
145c1ef503743e9066ecf8e2f5e06c0526f8ab62
27,712
def make_snowflake(timestamp_ms, datacenter_id, worker_id, sequence_id, twepoch=twepoch): """generate a twitter-snowflake id, based on https://github.com/twitter/snowflake/blob/master/src/main/scala/com/twitter/service/snowflake/IdWorker.scala :param: timestamp_ms time since UNIX epoch in milliseconds""" timestamp_ms = int(timestamp_ms) sid = ((timestamp_ms - twepoch) % max_timestamp) << datacenter_id_bits << worker_id_bits << sequence_id_bits sid += (datacenter_id % max_datacenter_id) << worker_id_bits << sequence_id_bits sid += (worker_id % max_worker_id) << sequence_id_bits sid += sequence_id % max_sequence_id return sid
473d97589dbe90949cb69ecdbeba719cbea5c2ee
27,713
from typing import Sequence def compute_committee(indices: Sequence[ValidatorIndex], seed: Bytes32, index: uint64, count: uint64) -> Sequence[ValidatorIndex]: """ Return the committee corresponding to ``indices``, ``seed``, ``index``, and committee ``count``. """ start = (len(indices) * index) // count end = (len(indices) * (index + 1)) // count return [indices[compute_shuffled_index(i, len(indices), seed)] for i in range(start, end)]
168fc69ef7d4962c089cd6e497cd94ccad062c73
27,714
def get_default_graph(): """ Setting graphviz graph global options """ graph = Digraph('AWS', engine='dot') graph.body.append('splines=line') graph.body.append('rankdir=LR') graph.body.append('outputorder=edgesfirst') graph.node_attr.update(shape='rectangle', style='filled', color='black') return graph
25ae7376b31d280722fb2791dbf9478a6b4cb2d1
27,715
from json import dumps def GetJson(data): """ 将对象转换为JSON @data 被转换的对象(dict/list/str/int...) """ if data == bytes: data = data.decode('utf-8') return dumps(data)
372b501f5ada7254efab10447dcbdc91c8799408
27,716
def binary_tail(n: int) -> int: """ The last 1 digit and the following 0s of a binary representation, as a number """ return ((n ^ (n - 1)) + 1) >> 1
63460cef7b39b7e7ee2ec880810ff71d82be01e9
27,717
from datetime import datetime def time_left(expire_date): """Return remaining days before feature expiration or 0 if expired.""" today_dt = datetime.today() expire_dt = datetime.strptime(expire_date, "%d-%b-%Y") # Calculate remaining days before expiration days_left_td = expire_dt - today_dt days_left = days_left_td.days if days_left <= 0: days_left = 0 return days_left
652acd27b0d4fa9b21321df4ff8ce6ce15b97ed6
27,718
import yaml def load_config(): """ importer_config.yaml must be in the current working directory """ with open(_PATH) as fd: config = yaml.safe_load(fd) # Remove trailing slashes from URLs config['kbase_endpoint'] = config['kbase_endpoint'].strip('/') return config
0137c59cd2ea7b5b8ba57a82fb2997e6e7330751
27,719
def get_package_version(): """Return the Review Board version as a Python package version string. Returns: unicode: The Review Board package version. """ version = '%s.%s' % (VERSION[0], VERSION[1]) if VERSION[2] or VERSION[3]: version = '%s.%s' % (version, VERSION[2]) if VERSION[3]: version = '%s.%s' % (version, VERSION[3]) tag = VERSION[4] if tag != 'final': if tag == 'alpha': tag = 'a' elif tag == 'beta': tag = 'b' version = '%s%s%s' % (version, tag, VERSION[5]) return version
418eef843dd3647cffdeb879781e3599487cd885
27,720
def fit(x, y, sigy=None, error=False): """ Perform a linear fit on a range of x and y values. This function fits a set of data points x, y with individual standard deviations sigy for the y values to a straight line y = a + bx by minimizing chi-square. If the 'error' parameter is False the function returns only the found values for 'a' and 'b' as a tuple. If 'error' is True then a third tuple item is returned containing a dictionary of error statistics ('siga' the standard deviation of 'a', 'sigb' the standard deviation of 'b', 'chi2' the value of chi-square, and 'q' the goodness of fit probability). If 'sigy' is 'None' then no standard deviations are used, 'q' will be 1.0 and the normalization of chi-square is to unit standard deviation for all points. Example: >>> a, b, err = fit([1.1, 1.95, 3.05], [1, 2.01, 2.95], error=True) """ # This routine is based on an algorithm from Numerical Recipes x = np.asarray(x).astype(float) y = np.asarray(y).astype(float) if x.size != y.size: ValueError("Arrays 'x' and 'y' have different length") if x.size < 2: raise ValueError("Arrays 'x' and 'y' should have at least 2 elements") useWeights = False if sigy is not None: sigy = np.asarray(sigy) if sigy.size != y.size: raise ValueError("Arrays 'sigy' and 'y' have different length") useWeights = True # We need to minimize: # # chi2 = sum( ((y[i] - a - b * x[i]) / sigy[i])^2 ; i=1..N) # # When taking derivatives with respect to a and b we get # # dchi2/da = 0 = -2 * sum( (y[i] - a - b * x[i]) / sig[i]^2 ; i=1..N) # dchi2/db = 0 = -2 * sum( x[i] * (y[i] - a - b * x[i]) / sig[i]^2 ; i=1..N) # # which provides us with a linear equation that we can use to solve a and b if useWeights: weights = 1 / (np.square(sigy.flat)) S = np.sum(weights) Sx = np.dot(x.flat, weights) Sy = np.dot(y.flat, weights) t = x.flat - Sx / S tw = t * weights Stt = np.dot(tw, t) b = np.dot(tw, y.flat) / Stt else: S = x.size Sx = np.sum(x.flat) Sy = np.sum(y.flat) t = x.flat - Sx / S Stt = np.dot(t, t) b = np.dot(t, y.flat) / Stt a = (Sy - Sx * b) / S if not error: return (a, b) siga = np.sqrt((1 + Sx * Sx / (S * Stt)) / S) sigb = np.sqrt(1 / Stt) chi2 = 0.0 q = 1.0 if useWeights: chi = (y - a - b * x.flat) / sigy.flat chi2 = np.dot(chi, chi) if x.size > 2: q = gammaq(0.5 * (x.size - 2), 0.5 * chi2) else: chi = y.flat - a - b * x.flat chi2 = np.dot(chi, chi) if x.size > 2: sigdat = np.sqrt(chi2 / (x.size - 2)) siga *= sigdat sigb *= sigdat return (a, b, {'siga': siga, 'sigb': sigb, 'chi2': chi2, 'q': float(q)})
f02db36a6e7395d3f955cafc6f506e3a42984abf
27,722
def eval_metrics_offline(cfg, pred_folder, palette, metrics=['mFscoreCD', 'mFscore'] ): """Calculate evaluation metrics from offline GT and prediction images, maily use the evaluate of dataset class Args: cfg (Config): config dict, must have the necessary items to build a dataset palette (list | ndarray): palette of the label metrics ([str]): metrics to be calculated. Default: ['mFscoreCD', 'mFscore'] Returns: returns of the evaluate method of the dataset class """ assert osp.isdir(pred_folder) dataset = build_dataset(cfg) preds = [] for img_info in dataset.img_infos: pred_path = osp.join(pred_folder, img_info['ann']['seg_map']) pred = read_s2looking_label(pred_path, palette) preds.append(pred) return dataset.evaluate(preds, metric=metrics)
7d5e44a1cda8d7bbb92a56cedf18e940c51663e5
27,723
def zero_pad_and_crop(img, amount=4): """Zero pad by `amount` zero pixels on each side then take a random crop. Args: img: numpy image that will be zero padded and cropped. amount: amount of zeros to pad `img` with horizontally and verically. Returns: The cropped zero padded img. The returned numpy array will be of the same shape as `img`. """ padded_img = np.zeros((img.shape[0] + amount * 2, img.shape[1] + amount * 2, img.shape[2])) padded_img[amount:img.shape[0] + amount, amount: img.shape[1] + amount, :] = img top = np.random.randint(low=0, high=2 * amount) left = np.random.randint(low=0, high=2 * amount) new_img = padded_img[top:top + img.shape[0], left:left + img.shape[1], :] return new_img
63ac80cb9759fd06032afd97f03e0057aed86784
27,724
def video_id(video_id_or_url): """ Returns video id from given video id or url Parameters: ----------- video_id_or_url: str - either a video id or url Returns: -------- the video id """ if 'watch?v=' in video_id_or_url: return video_id_or_url.split('watch?v=')[1] else: # assume we already have an video id return video_id_or_url
9f680ac621e1f5c6314a6a3e97093d786fa7ea33
27,725
def check_permission(resource: Resource, permission_name: str) -> dict: """ Check if requester has sufficient permissions to do something on specific resource. Raises if not. """ base_permission_policy = resource.get_guest_authorization() if (authorization_header := resource.request.headers.get("Authorization")) is None: if not base_permission_policy: raise Unauthorized("Authorization header missing or empty") authorizer = Authorizer( auth_jwt=authorization_header, resource_name=resource.get_name(), permission_name=permission_name, base_permission_policy=base_permission_policy, ) authorizer.check_access() return authorizer.restrictions
b632fbb5f3429c1540da35c3b82e6fa562b81c87
27,726
from datetime import datetime import json def makePalRecordsConsistent(pal_records, low_frequency, high_frequency, user_id, fcc_channel_id="1", start_date=None, end_date=None): """Make Pal object consistent with the inputs Args: pal_records: (list) A list of PAL Records in the form of dictionary. low_frequency: (number) The Primary Low Frequency in Hz for PAL. high_frequency: (number) The Primary High Frequency in Hz for PAL. user_id: (string) The userId to put in PAL Records. fcc_channel_id: (string) The FCC-supplied frequency channel identifier. start_date: (string) PAL license start date, generally set as one year before the current date end_date: (string) PAL license expiration date, generally set as more than one year after the current date Returns: A list containing individual PAL records in the form of dictionary Note: The PAL Dictionary must contain censusYear(number) and fipsCode(number) """ start_date = datetime.now().replace(year=datetime.now().year - 1) \ if start_date is None else start_date end_date = datetime.now().replace(year=datetime.now().year + 1) \ if end_date is None else end_date for index, pal_rec in enumerate(pal_records): pal_fips_code = pal_rec['fipsCode'] pal_census_year = pal_rec['censusYear'] del pal_rec['fipsCode'], pal_rec['censusYear'] pal_rec = defaultdict(lambda: defaultdict(dict), pal_rec) # Change the FIPS Code and Registration Date-Year in Pal Id pal_rec['palId'] = '/'.join(['pal', '%s-%d' % ('{:02d}'.format(start_date.month), start_date.year), str(pal_fips_code), fcc_channel_id]) pal_rec['userId'] = user_id # Make the date consistent in Pal Record for Registration and License pal_rec['registrationInformation']['registrationDate'] = \ start_date.strftime('%Y-%m-%dT%H:%M:%SZ') # Change License Information in Pal pal_rec['license']['licenseAreaIdentifier'] = str(pal_fips_code) pal_rec['license']['licenseAreaExtent'] = \ 'zone/census_tract/census/{}/{}'.format(pal_census_year, pal_fips_code) pal_rec['license']['licenseDate'] = start_date.strftime('%Y-%m-%dT%H:%M:%SZ') pal_rec['license']['licenseExpiration'] = end_date.strftime('%Y-%m-%dT%H:%M:%SZ') pal_rec['license']['licenseFrequencyChannelId'] = fcc_channel_id # Change Frequency Information in Pal pal_rec['channelAssignment']['primaryAssignment']['lowFrequency'] = low_frequency pal_rec['channelAssignment']['primaryAssignment']['highFrequency'] = high_frequency # Converting from defaultdict to dict pal_records[index] = json.loads(json.dumps(pal_rec)) return pal_records
3029ee761afb17428a6a2b0b1e85c0f2f3fdc6d6
27,727
import yaml def read_config(): """Read from rotest.yml config elrados segment.""" config_path = search_config_file() if config_path is not None: with open(config_path, "r") as config_file: configuration_content = config_file.read() yaml_configuration = yaml.load(configuration_content) else: yaml_configuration = {} return AttrDict(yaml_configuration.get("elrados", {}))
5fb81ef5609074c029203021895ba55654760a60
27,728
import typing def cast_to_str(some_value: typing.Any, from_type: typing.Any) -> typing.Any: """Just helper for creating suitable test assets.""" if from_type == bytes: return some_value.decode() return str(some_value)
9157873a74d0d02b919d047710c1d4ccee4121a6
27,729
def downsample_by_group(df,min_distance=1.,message_callback=print): """Group and down-sample a DataFrame of xrsd records. Parameters ---------- df : pandas.DataFrame dataframe containing xrsd samples min_distance : float the minimum allowed nearest-neighbor distance for continuing to downsample after 10 or more samples have been selected Returns ------- data_sample : pandas.DataFrame DataFrame containing all of the down-sampled data from each group in the input dataframe. Features in this DataFrame are not scaled: the correct scaler should be applied before training models. """ data_sample = pd.DataFrame(columns=df.columns) group_cols = ['experiment_id','system_class'] all_groups = df.groupby(group_cols) # downsample each group independently for group_labels,grp in all_groups.groups.items(): group_df = df.iloc[grp].copy() if message_callback: message_callback('Downsampling data for group: {}'.format(group_labels)) #lbl_df = _filter_by_labels(data,lbls) dsamp = downsample(df.iloc[grp].copy(), min_distance) if message_callback: message_callback('Finished downsampling: kept {}/{}'.format(len(dsamp),len(group_df))) data_sample = data_sample.append(dsamp) return data_sample
bbcdbab6c1bf5e42554e102bda8d245263a47bfd
27,730
from typing import Union def get_pure_ratings( ratings: Union[str, pd.Series, pd.DataFrame] ) -> Union[str, pd.Series, pd.DataFrame]: """Removes rating watches/outlooks. Parameters ---------- ratings : str, pd.Series, or pd.DataFrame Rating may contain watch, such as `AA- *+`, `BBB+ (CwNegative)`. Outlook/watch should be seperated by a blank from the actual rating. Returns ------- Union[str, pd.Series, pd.DataFrame] String, Series, or DataFrame with regular ratings stripped off of watches. The name of the resulting Series or the columns of the returning DataFrame will be suffixed with `_clean`. Examples -------- Cleaning a single rating: >>> get_pure_ratings("AA- *+") 'AA-' >>> get_pure_ratings("Au") 'A' Cleaning a `pd.Series`: >>> import pandas as pd >>> rating_series=pd.Series( ... data=[ ... "BB+ *-", ... "BBB *+", ... np.nan, ... "AA- (Developing)", ... np.nan, ... "CCC+ (CwPositive)", ... "BB+u", ... ], ... name="rtg_SP", ... ) >>> get_pure_ratings(rating_series) 0 BB+ 1 BBB 2 NaN 3 AA- 4 NaN 5 CCC+ 6 BB+ Name: rtg_SP_clean, dtype: object Cleaning a `pd.DataFrame`: >>> rtg_df = pd.DataFrame( ... data={ ... "rtg_SP": [ ... "BB+ *-", ... "BBB *+", ... np.nan, ... "AA- (Developing)", ... np.nan, ... "CCC+ (CwPositive)", ... "BB+u", ... ], ... "rtg_Fitch": [ ... "BB+ *-", ... "BBB *+", ... pd.NA, ... "AA- (Developing)", ... np.nan, ... "CCC+ (CwPositive)", ... "BB+u", ... ], ... }, ... ) >>> get_pure_ratings(rtg_df) rtg_SP_clean rtg_Fitch_clean 0 BB+ BB+ 1 BBB BBB 2 NaN <NA> 3 AA- AA- 4 NaN NaN 5 CCC+ CCC+ 6 BB+ BB+ """ if isinstance(ratings, str): ratings = ratings.split()[0] ratings = ratings.rstrip("uU") return ratings elif isinstance(ratings, pd.Series): # identify string occurrences isstring = ratings.apply(type).eq(str) # strip string after occurrence of very first blank and strip character 'u', # which has usually been added without a blank ratings[isstring] = ratings[isstring].str.split().str[0] ratings[isstring] = ratings[isstring].str.rstrip("uU") ratings.name = f"{ratings.name}_clean" return ratings elif isinstance(ratings, pd.DataFrame): # Recursive call of `get_pure_ratings` return pd.concat( [get_pure_ratings(ratings=ratings[col]) for col in ratings.columns], axis=1 )
a834b3f33f12d8d3b6c109021a4d075c32160544
27,731
def export_grid(outname,resh,ignore=[255,0]): """[Exports given grid to a point cloud] Args: outname ([str]): [Output file name] resh ([np.array]): [3D Volume in grid representation] ignore (list, optional): [values to ignore]. Defaults to [255,0]. Returns: Grid as a point cloud and its colors """ pts = [] colors = [] grid_shape = resh.shape # print(grid_shape) colorMap = np.array([[255, 0, 0], [0, 255, 0], [0, 0, 255], [80, 128, 255], [255, 230, 180], [255, 0, 255], [0, 255, 255], [100, 0, 0], [0, 100, 0], [255, 255, 0], [50, 150, 0], [200, 255, 255], [255, 200, 255], [128, 128, 80], [0, 50, 128], [0, 100, 100], [0, 255, 128], [0, 128, 255], [255, 0, 128], [128, 0, 255], [255, 128, 0], [128, 255, 0], ]) # resh = np.load("results_overfit_scannet_corrected/0000.npy").reshape(grid_shape) for x in range(grid_shape[0]): for y in range(grid_shape[1]): for z in range(grid_shape[2]): if resh[x,y,z] not in ignore: colors.append(colorMap[resh[x,y,z]]) pts.append([x,y,z]) a = trimesh.points.PointCloud(pts,colors).export(outname) return np.array(pts),np.array(colors)
27fd06c37d57f0eb320d0d81908efc1acf6bc8ee
27,732
def sum_errors(dic): """Helper function to sum up number of failed jobs per host. Assumes that dic is in the form :param dict dic: {"error_code1":count1, "error_code2":count2, etc.} :return int: Sum of all values in dic """ return sum(value for key, value in dic.iteritems())
0d2bc9df58e5bf9639a331d64061de4c0a5aa4ed
27,733
def checkFriends(new_user, usernameOfFriend): """ Check if users are friends or not""" # Check if users are friends first viewF = viewFriends(new_user, usernameOfFriend) # Check if users have any pending requests viewP = searchPendingRequests(new_user, usernameOfFriend) # Logic if viewF: return True elif viewP: # The friendship has not been approved yet return False else: return False
6268d180a768ad858d5e1c5c69c92837a027f07f
27,734
def update(instance, **data): """Update instance with data directly by using ``update()`` skipping calling ``save()`` method. Usage: ``instance = update(instance, some_field=some_value)`` """ instance.__class__.objects.filter(pk=instance.pk).update(**data) return refresh(instance)
8462d5459ba02d11ef3edd0c1d3c152d4b682634
27,735
def cast_distance_matrix_to_optimal_integer_type(D_X): """ Cast distance matrix to smallest signed integer type, sufficient to hold all its distances. Parameters ----------- D_X: np.array (|X|×|X|) Distance matrix of a compact metric space X with integer distances. Returns -------- D: np.array (|X|×|X|) Distance matrix of the metric space, cast to optimal type. """ max_distance = np.max(D_X) # Type is signed integer to allow subtractions. optimal_int_type = determine_optimal_int_type(max_distance) D_X = D_X.astype(optimal_int_type) return D_X
01c8e43259d5edc7282a9c01269277f34495ff9d
27,736
def getObject(name): """Get reference to single Rhino object, checking for failure""" rc, obRef = Rhino.Input.RhinoGet.GetOneObject("Select " + name, True, Rhino.DocObjects.ObjectType.AnyObject) if rc != Rhino.Commands.Result.Success or not obRef : raise NameError(rc) return obRef
20d562dd90dec82a479bdedabef62f17129cecf0
27,737
def get_falsecolor(input): """ picks false color bands from the 12 Sentinel bands (for visual interpretation of vegetation) :param input: 12-band image tensor :return: 3-band NIR-RED-GREEN tensor """ rgb_band_idxs = [bands.index(b) for b in ["S2B8", "S2B4", "S2B3"]] return input[rgb_band_idxs]
a4e80bb61211456794b34c99ec0ad92f1b1f567d
27,738