content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def find_asg_using_amis(ami_ids): """ take a list of ami ids and return a dictionary of launch configs that use them """ # ref: return = { ami_id : "lc_arns":[]} ami_ids = listify(ami_ids) result = {id: [] for id in ami_ids} client_asg = boto3.client('autoscaling') lc = client_asg.describe_launch_configurations() for a_lc in lc['LaunchConfigurations']: if a_lc['ImageId'] in ami_ids: result[a_lc['ImageId']].append(a_lc['LaunchConfigurationARN']) return result
951d20144be55a699911a690ee35405d3e4fa08b
7,900
def tokenize_nmt(text, num_examples=None): """Tokenize the English-French dataset.""" source, target = [], [] for i, line in enumerate(text.split('\n')): if num_examples and i > num_examples: break parts = line.split('\t') if len(parts) == 2: source.append(parts[0].split(' ')) target.append(parts[1].split(' ')) return source, target
f77def3cdd2eee6a9ecc3bb7fa007eccfcd9e8a8
7,901
def setup_snicar(input_file): """Builds impurity array and instances of all classes according to config in yaml file. Args: None Returns: ice: instance of Ice class illumination: instance of Illumination class rt_config: instance of RTConfig class model_config: instance of ModelConfig class plot_config: instance of PlotConfig class display_config: instance of DisplayConfig class """ impurities = build_impurities_array(input_file) ( ice, illumination, rt_config, model_config, plot_config, ) = build_classes(input_file) return ( ice, illumination, rt_config, model_config, plot_config, impurities, )
14fc40bb6c555cf94f8f8c7ddf2133e208b5ee02
7,902
async def list_model_config(model_name: str): """ Lists all the model's configuration. :param model_name: Model name :return: List of model's configuration """ try: return ApiResponse(data=dl_service.get_config(model_name)) except ApplicationError as e: raise e except Exception: raise ApplicationError('unexpected server error')
8ce01caee8fe7dacc381ea4ed23a5d885ab0da01
7,903
def signup_post(request): """Получаем данные пользователя с формы и заносим их в базу проверяя, если удачно то предоставляем вход в базу потом дополнительные идентификац""" mess = "login please" data = get_post( request ) mail = data.POST['mail'] name = data.POST['name'] captcha = data.POST['captcha'] hash = data.POST['hash'] password = data.POST['password'] address = data.POST['addres'] phone = data.POST['phone'] # date = time.strftime("%H:%M:%S %d.%m.%Y") # date=time.time() passw = getmd5(password) if not check_captcha(hash, captcha): hash, raw = get_captcha(hash) mess="Неправильно введен проверочный код" return templ('app.auth:signup', request, dict(name=name, mail=mail, address=address, phone=phone, hash = hash, mess = mess.decode('UTF-8')) ) #проверяем есть ли такие пользователи в базе если нет то регистрируем if not request.db.doc.find_one({'_id':'user:'+name}): doc = {'_id': 'user:'+name, 'name': name, 'password': passw, 'mail': mail, "type": "table_row", "rate":0, "doc_type": "des:users", "doc": {"user":"user:"+name, "name": {'ru':name, 'en':name}, "old": "33", "phone":phone, "address": address, 'date': create_date(), "home": "false" } } request.db.doc.save(doc) request.db.doc.update({'_id':'role:simple_users'}, {'$set':{'users.user:'+name:'true'}} ) mess = "Поздравляем, можете войти." else: mess = "Такой логин уже есть выберите другой" return templ('libs.auth:login', request, dict(mess = mess.decode('UTF-8')) )
22e4a8508849f9c4879c65e3347a0fabf6bffeb8
7,904
def make_onehot(label, num_classes, axis=-1): """ Create one hot tensor based on the input tensor Args: label: input tensor, the value must be positive integer and less than num_class num_classes: the number of class in one hot tensor axis: The axis to fill (default: -1, a new inner-most axis). Returns: :onehot tensor Examples: >>> make_onehot(np.array([[1, 2],[1, 3]]).long(), 4, axis=-1) tensor([[[0., 1., 1., 0.], [0., 1., 0., 1.]], <BLANKLINE> [[0., 0., 0., 0.], [0., 0., 0., 0.]]]) """ shp=label.shape flatten_label=label.reshape(-1) result=np.eye(num_classes) result=result[flatten_label.astype(np.int64)] if axis!=-1 and axis!=ndim(label)-1: result=np.swapaxes(axis,-1) return result
3329f5f135b1fea383b389012aeeb37ea923cb46
7,905
def kohn_sham_iteration( state, num_electrons, xc_energy_density_fn, interaction_fn, enforce_reflection_symmetry): """One iteration of Kohn-Sham calculation. Note xc_energy_density_fn must be wrapped by jax.tree_util.Partial so this function can take a callable. When the arguments of this callable changes, e.g. the parameters of the neural network, kohn_sham_iteration() will not be recompiled. Args: state: KohnShamState. num_electrons: Integer, the number of electrons in the system. The first num_electrons states are occupid. xc_energy_density_fn: function takes density (num_grids,) and returns the energy density (num_grids,). interaction_fn: function takes displacements and returns float numpy array with the same shape of displacements. enforce_reflection_symmetry: Boolean, whether to enforce reflection symmetry. If True, the system are symmetric respecting to the center. Returns: KohnShamState, the next state of Kohn-Sham iteration. """ if enforce_reflection_symmetry: xc_energy_density_fn = _flip_and_average_fn( xc_energy_density_fn, locations=state.locations, grids=state.grids) hartree_potential = get_hartree_potential( density=state.density, grids=state.grids, interaction_fn=interaction_fn) xc_potential = get_xc_potential( density=state.density, xc_energy_density_fn=xc_energy_density_fn, grids=state.grids) ks_potential = hartree_potential + xc_potential + state.external_potential xc_energy_density = xc_energy_density_fn(state.density) # Solve Kohn-Sham equation. density, total_eigen_energies, gap = solve_noninteracting_system( external_potential=ks_potential, num_electrons=num_electrons, grids=state.grids) total_energy = ( # kinetic energy = total_eigen_energies - external_potential_energy total_eigen_energies - get_external_potential_energy( external_potential=ks_potential, density=density, grids=state.grids) # Hartree energy + get_hartree_energy( density=density, grids=state.grids, interaction_fn=interaction_fn) # xc energy + get_xc_energy( density=density, xc_energy_density_fn=xc_energy_density_fn, grids=state.grids) # external energy + get_external_potential_energy( external_potential=state.external_potential, density=density, grids=state.grids) ) if enforce_reflection_symmetry: density = utils.flip_and_average( locations=state.locations, grids=state.grids, array=density) return state._replace( density=density, total_energy=total_energy, hartree_potential=hartree_potential, xc_potential=xc_potential, xc_energy_density=xc_energy_density, gap=gap)
f016e1d8c6ab9072065183d3fa1d37cfa12eb8ee
7,906
def calculate_reliability(data): """ Calculates the reliability rating of the smartcab during testing. """ success_ratio = data['success'].sum() * 1.0 / len(data) if success_ratio == 1: # Always meets deadline return ("A+", "green") else: if success_ratio >= 0.90: return ("A", "green") elif success_ratio >= 0.80: return ("B", "green") elif success_ratio >= 0.70: return ("C", "#EEC700") elif success_ratio >= 0.60: return ("D", "#EEC700") else: return ("F", "red")
d1c9ad7bba220beeae06c568cfd269aaaebfb994
7,907
def cache_mat_calc(dra, ddc, dra_err, ddc_err, ra_rad, dc_rad, ra_dc_cor=None, l_max=1, fit_type="full", num_iter=None): """Calculate cache matrix for future use Parameters ---------- dra/ddc : array of float R.A.(*cos(Dec.))/Dec. differences dra_err/ddc_err : array of float formal uncertainty of dra(*cos(dc_rad))/ddc ra_rad/dc_rad : array of float Right ascension/Declination in radian ra_dc_cov/ra_dc_cor : array of float covariance/correlation coefficient between dra and ddc, default is None fit_type : string flag to determine which parameters to be fitted 'full' for T- and S-vectors both 'T' for T-vectors only 'S' for S-vectors only Returns ---------- pmt : array of float estimation of (d1, d2, d3, r1, r2, r3) sig : array of float uncertainty of x cor_mat : matrix matrix of correlation coefficient. """ # Maxium number of sources processed per time # According to my test, 100 should be a good choice if num_iter is None: num_iter = 100 div = dra.size // num_iter rem = dra.size % num_iter suffix_array = [] if rem: suffix_array.append("{:05d}".format(0)) if not ra_dc_cor is None: nor_mat_calc_for_cache(dra_err[: rem], ddc_err[: rem], ra_rad[: rem], dc_rad[: rem], ra_dc_cor=ra_dc_cor[: rem], l_max=l_max, fit_type=fit_type, suffix=suffix_array[0]) else: nor_mat_calc_for_cache(dra_err[: rem], ddc_err[: rem], ra_rad[: rem], dc_rad[: rem], l_max=l_max, fit_type=fit_type, suffix=suffix_array[0]) for i in range(div): sta = rem + i * num_iter end = sta + num_iter suffix_array.append("{:05d}".format(i+1)) if not ra_dc_cor is None: nor_mat_calc_for_cache(dra_err[sta: end], ddc_err[sta: end], ra_rad[sta: end], dc_rad[sta: end], ra_dc_cor=ra_dc_cor[sta: end], l_max=l_max, fit_type=fit_type, suffix=suffix_array[-1]) else: nor_mat_calc_for_cache(dra_err[sta: end], ddc_err[sta: end], ra_rad[sta: end], dc_rad[sta: end], l_max=l_max, fit_type=fit_type, suffix=suffix_array[-1]) return suffix_array
805ecc94165ae1162341abdc7c4dd3557af5f8c4
7,908
def get_android_replacements(): """Gets a dictionary of all android-specific replacements to be made.""" replacements = {} compileSdk = 'compileSdkVersion {}'.format(COMPILE_SDK_VERSION) targetSdk = 'targetSdkVersion {}'.format(TARGET_SDK_VERSION) buildToolsVersion = 'buildToolsVersion \'{}\''.format(BUILD_TOOLS_VERSION) replacements[COMPILE_SDK_RE] = compileSdk replacements[TARGET_SDK_RE] = targetSdk replacements[BUILD_TOOLS_RE] = buildToolsVersion return replacements
e3b78d7ccd897d79db66740d46aa05410dd2a83f
7,909
from typing import List from typing import Set from typing import Dict def process_long_term_idle_users( slack_data_dir: str, users: List[ZerverFieldsT], slack_user_id_to_zulip_user_id: SlackToZulipUserIDT, added_channels: AddedChannelsT, added_mpims: AddedMPIMsT, dm_members: DMMembersT, zerver_userprofile: List[ZerverFieldsT], ) -> Set[int]: """Algorithmically, we treat users who have sent at least 10 messages or have sent a message within the last 60 days as active. Everyone else is treated as long-term idle, which means they will have a slightly slower first page load when coming back to Zulip. """ all_messages = get_messages_iterator(slack_data_dir, added_channels, added_mpims, dm_members) sender_counts: Dict[str, int] = defaultdict(int) recent_senders: Set[str] = set() NOW = float(timezone_now().timestamp()) for message in all_messages: timestamp = float(message["ts"]) slack_user_id = get_message_sending_user(message) if not slack_user_id: continue if slack_user_id in recent_senders: continue if NOW - timestamp < 60: recent_senders.add(slack_user_id) sender_counts[slack_user_id] += 1 for (slack_sender_id, count) in sender_counts.items(): if count > 10: recent_senders.add(slack_sender_id) long_term_idle = set() for slack_user in users: if slack_user["id"] in recent_senders: continue zulip_user_id = slack_user_id_to_zulip_user_id[slack_user["id"]] long_term_idle.add(zulip_user_id) for user_profile_row in zerver_userprofile: if user_profile_row["id"] in long_term_idle: user_profile_row["long_term_idle"] = True # Setting last_active_message_id to 1 means the user, if # imported, will get the full message history for the # streams they were on. user_profile_row["last_active_message_id"] = 1 return long_term_idle
4a372837ed5497117227e18534c91c6f0ce840bf
7,910
def clear_cache(): """ Clears internal cache. Returns something that can be given back to restore_cache. """ global FS_CACHE old = FS_CACHE FS_CACHE = {} return old
492513177a70cd663671616e034c6f3b287ceb75
7,911
import tqdm def extend_gdf(gdf_disjoint, id_col): """ Add duplicates of intersecting geometries to be able to add the constants. This function adds rows with duplicate geometries and creates the new `id` column for each of the new rows. This function is called by another function `complete_disjoint_geoms`. """ tqdm_max = len(gdf_disjoint) ext = pd.DataFrame(columns=list(gdf_disjoint.columns) + [id_col + "_set"]) for _, row in tqdm(gdf_disjoint.iterrows(), total=tqdm_max): num = len(row[id_col]) data = np.array([list(row[id_col]), [row["geometry"]] * num]).T ext_new = pd.DataFrame(data, columns=gdf_disjoint.columns) ext_new[id_col + "_set"] = [row[id_col]] * num ext = ext.append(ext_new, ignore_index=True) return ext
5d6667b67c47125f8668bb6127b4a295fb2d61c9
7,912
def schedule_news_updates(update_interval:int, update_name:str)->dict: """ Functionality: --------------- Schedules a new news data update Parameters: --------------- update_interval: int The time until the scheduler should update the news data update_name: str The name of the update that has caused the scheduling of the news data update Returns: --------------- a key-value pair: dict Returns a dictionary with the key being the update name and the value being news scheduler object """ return({update_name:news_scheduler.enter(update_interval, 1, update_news, ())})
1e2bf07aae3e2468f1ee0ff119b492820d866460
7,913
def get_digits(text): """ Returns all numeric characters (digits) in string *text* in a new (concatenated) **string**. Example: >>> get_digits('Test123') '123' >>> int(get_digits('The answer is 42')) 42 :param text: The string to search. :type text: str, unicode :rtype: str, unicode """ _vld.pass_if(_vld.is_text(text), TypeError, "'text' attribute must be a string (got {!r})".format(text)) return EMPTY_STR.join(s for s in text if s.isdigit())
78bcd49b74dbdfd7d9f40af3806c2b984adca780
7,914
def embed_tiles_in_json_sprite(tile_list, as_bytes=True, out_file=None): """Make a big rectangle containing the images for a brainsprite. Parameters: ----------- tile_list : list List of 2d square numpy arrays to stick in a mosaic Returns: -------- mosaic : np.ndarray Mosaic of tile images """ # Tiles are squares tile_size = tile_list[0].shape[0] num_tiles = len(tile_list) num_tile_rows = nearest_square(num_tiles) num_tile_cols = int(np.ceil(num_tiles/num_tile_rows)) mosaic = np.zeros((num_tile_rows * tile_size, num_tile_cols * tile_size)) i_indices, j_indices = np.unravel_index(np.arange(num_tiles), (num_tile_rows, num_tile_cols)) i_tile_offsets = tile_size * i_indices j_tile_offsets = tile_size * j_indices for tile, i_offset, j_offset in zip(tile_list, i_tile_offsets, j_tile_offsets): mosaic[i_offset:(i_offset + tile_size), j_offset:(j_offset + tile_size)] = tile if as_bytes: img = mplfig(mosaic, out_file, as_bytes=as_bytes) return dict(img=img, N=num_tile_rows, M=num_tile_cols, pix=tile_size, num_slices=num_tiles) return dict(mosaic=mosaic, N=num_tile_rows, M=num_tile_cols, pix=tile_size, num_slices=num_tiles)
2eca9eb999d69537fac60dfefd0d482576994868
7,915
import re def removeUnicode(text): """ Removes unicode strings like "\u002c" and "x96" """ text = re.sub(r'(\\u[0-9A-Fa-f]+)',r'', text) text = re.sub(r'[^\x00-\x7f]',r'',text) return text
f5c8090329ede82ce51601efa463537bba68b63a
7,916
import scipy def mfcc(tf, n_mfcc, fs, fmin=0.0, fmax=None): """ Extract MFCC vectors Args: tf : single-channel time-frequency domain signal, indexed by 'tf' n_mfcc : number of coefficients fs : sample rate fmin : (default 0) minimal frequency in Hz fmax : (default fs/2) maximal frequency in Hz Returns: mfcc : MFCC """ if fmax is None: fmax = fs / 2.0 n_frame, n_fbin = tf.shape # get filter weights freq = np.fft.fftfreq(n_fbin) fbw = mel_freq_fbank_weight(n_mfcc, freq, fs, fmin=fmin, fmax=fmax) # get log power sigpow = np.real(tf * tf.conj()) logfpow = np.log(np.einsum('bf,tf->tb', fbw, sigpow) + 1e-20) # DCT mfcc = scipy.fft.dct(logfpow) return mfcc
58769db11aa0633dd846b9d427e6d35a0fff435e
7,917
import logging def transform(fname, metadata=False): """ This function reads a Mission Analysis Orbit file and performs a matrix transformation on it. Currently only from the Mercury Equatorial frame to the Earth Equatorial frame. :param fname: The path to the orbit file. :type fname: str. :param metadata: Flag to return the metadata dictionary :type state: bool. :returns: numpy.array -- the return code. """ furnsh("/Users/jmcaulif/Code/naif/generic/lsk/naif0010.tls") logging.basicConfig(level=logging.INFO) mdata = {} data = {} with open(fname, 'r') as fh: for line in fh: t, x, y, z, vx, vy, vz = [float(x) for x in line.split()] T = np.array([[0.98159386604468, 0.19098031873327, 0.0], [-0.16775718426422, 0.86223242348167, 0.47792549108063], [0.09127436261733, -0.46912873047114, 0.87840037851502]]) Tinv = linalg.inv(T) r = np.array([[x, y, z]]) v = np.array([[vx, vy, vz]]) r_new = Tinv.dot(r.T).T v_new = Tinv.dot(v.T).T x, y, z = r_new[0] vx, vy, vz = v_new[0] t = et2utc(t * 86400, 'isoc', 2) print("{} {:9.2f} {:9.2f} {:9.2f} {:9.6f} {:9.6f} {:9.6f}". format(t, x, y, z, vx, vy, vz)) fh.close() if metadata: return data, mdata else: return data
ce389f74ba35d3c92a94031563b187a7285ccb5b
7,918
import requests import json def query_repository(repo_name, index_uuid, token, display_results=False): """ Display the ids ('subjects') of all items indexed in a repository. :param repo_name: Textual name of repository to query, corresponds to 'name' field in conf file. :param index_name: Name of index, mapped by us to a UUID. :param display_results: Print ids to standard output :return: List of result ids """ LOGGER.info("Querying index %s for repository %s" % (index_uuid, repo_name)) querylimit = 20 headers = {'Authorization' : ('Bearer ' + token), 'Content-Type' : 'application/json'} queryobj = {"@datatype": "GSearchRequest", "@version": "2016-11-09", "advanced": True, "offset": 0, "limit": querylimit, "q": "*", "filters": [ {"@datatype": "GFilter", "@version": "2016-11-09", "type": "match_any", "field_name": "https://frdr\\.ca/schema/1\\.0#origin\\.id", "values": [""]}]} result_ids = [] queryobj["filters"][0]["values"][0] = repo_name offset = 0 while True: r = requests.post('https://' + _api_host + '/v1/index/' + index_uuid + '/search', headers=headers, json=queryobj) search_results = json.loads(r.text) results_count = search_results['count'] LOGGER.info("Got %i results" % (results_count)) if results_count == 0: break for result in search_results['gmeta']: result_ids.append(result['subject']) offset = offset + querylimit queryobj["offset"] = offset if display_results: print('\n'.join(result_ids)) return result_ids
8a6b4f90374b90504a375c5d44a6dce6f32fb936
7,919
def power(x1, x2, out=None, where=True, dtype=None): """ First array elements raised to powers from second array, element-wise. Raises each base in `x1` to the positionally-corresponding power in `x2`. Note: Numpy arguments `casting`, `order`, `dtype`, `subok`, `signature`, and `extobj` are not supported. When `where` is provided, `out` must have a tensor value. `out` is not supported for storing the result, however it can be used in combination with `where` to set the value at indices for which `where` is set to False. On GPU, the supported dtypes are np.float16, and np.float32. Args: x1 (Tensor): the bases. x2 (Tensor): the exponents. out (Tensor or None, optional): defaults to None. where (Tensor or None, optional): For any non-default value of type other than :class:`Tensor` or :class:`None`, the output retains its original value. This condition is broadcasted over the input. At locations where the condition is `True`, the out array will be set to the ufunc result. Elsewhere, the out array will retain its original value. Note that if an uninitialized out array is created via the default ``out=None``, locations within it where the condition is `False` will remain uninitialized. dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the output Tensor. Returns: Tensor or scalar, the bases in `x1` raised to the exponents in `x2`. This is a scalar if both `x1` and `x2` are scalars. Raises: TypeError: if the input is not a tensor. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> x1 = np.full((3, 2), [1, 2]).astype('float32') >>> x2 = np.full((3, 2), [3, 4]).astype('float32') >>> output = np.power(x1, x2) >>> print(output) [[ 1, 16], [ 1, 16], [ 1, 16]] """ return _apply_tensor_op(F.tensor_pow, x1, x2, out=out, where=where, dtype=dtype)
ecc265e96c36c47177aa96797d355a55f35114d6
7,920
def _str2bool(s): """文字列からboolへの変換""" return s.lower() in ["true", "t", "yes", "1"]
877340b7926541a5b8f56cb7f1acd5a54e08a987
7,921
import random def _generate_nonce(length=42): """ Generate an alpha numeric string that is unique for each request. Twitter used a 42 character alpha-numeric (case-sensitive) string in the API documentation. However, they note "any approach which produces a relatively random alphanumeric string should be OK here." I opted not to use a cryptographically secure source of entropy. `SystemRandom` is convenient, but it uses file IO to connect to `/dev/urandom`. Adding `async` machinery here seems like expensive complexity. """ return "".join(random.choice(ALPHANUMERIC) for _ in range(length))
034c4fee363d47e9af98c0135159f53179d27fdd
7,922
from datetime import datetime def is_expired(image_id: str) -> bool: """ Check whether entry is expired (based on timestamp encoded in the image_id) """ ts = expiration(image_id) now = datetime.now(timezone.utc) if ts is None: log.debug("Invalid cache entry ID: %s", image_id) return True log.debug("entry: %s (expiration ts: %s)", image_id, ts.isoformat()) return ts < now
b5f2f08bc42a83ed7b567ab437d54c626dee8a50
7,923
def int_(value): """Validate that the config option is an integer. Automatically also converts strings to ints. """ check_not_templatable(value) if isinstance(value, int): return value if isinstance(value, float): if int(value) == value: return int(value) raise Invalid( f"This option only accepts integers with no fractional part. Please remove the fractional part from {value}" ) value = string_strict(value).lower() base = 10 if value.startswith("0x"): base = 16 try: return int(value, base) except ValueError: # pylint: disable=raise-missing-from raise Invalid(f"Expected integer, but cannot parse {value} as an integer")
f5d5f729a1bbec418f3b270b4e28be2cf9cc7cd6
7,924
def explode(screen): """Convert a string representing a screen display into a list of lists.""" return [list(row) for row in screen.split('\n')]
a43a9d8c830c4a784bb9c3505c62aaf2077bb732
7,925
import csv import glob import os def merge_csv_files(directory, out): """\ Merges the CSV files in the provided `directory` into one CSV file. :param str directory: Path where to find the CSV files :param str out: Resulting file name. """ f = open(out, 'w', encoding='utf-8') writer = csv.writer(f) writerow = writer.writerow writerow(['URL', 'Draft Date', 'Document Number', 'Film Number', 'From', 'Subject', 'TAGS', 'To']) cnt = 0 for fn in sorted(glob.glob(directory + '*.csv'), key=lambda fn: int(os.path.basename(fn).split('.')[0])): with open(fn, 'r', encoding='utf-8') as inputfile: reader = csv.reader(inputfile) for row in reader: cnt += 1 writerow(row) f.close() return cnt
4d5066b27b9977161b92061a39a6207040982b41
7,926
def basic_info(user, keys): """Prints a table of basic user information""" table = formatting.KeyValueTable(['Title', 'Basic Information']) table.align['Title'] = 'r' table.align['Basic Information'] = 'l' table.add_row(['Id', user.get('id', '-')]) table.add_row(['Username', user.get('username', '-')]) if keys: for key in user.get('apiAuthenticationKeys'): table.add_row(['APIKEY', key.get('authenticationKey')]) table.add_row(['Name', "%s %s" % (user.get('firstName', '-'), user.get('lastName', '-'))]) table.add_row(['Email', user.get('email')]) table.add_row(['OpenID', user.get('openIdConnectUserName')]) address = "%s %s %s %s %s %s" % ( user.get('address1'), user.get('address2'), user.get('city'), user.get('state'), user.get('country'), user.get('postalCode')) table.add_row(['Address', address]) table.add_row(['Company', user.get('companyName')]) table.add_row(['Created', user.get('createDate')]) table.add_row(['Phone Number', user.get('officePhone')]) if user.get('parentId', False): table.add_row(['Parent User', utils.lookup(user, 'parent', 'username')]) table.add_row(['Status', utils.lookup(user, 'userStatus', 'name')]) table.add_row(['PPTP VPN', user.get('pptpVpnAllowedFlag', 'No')]) table.add_row(['SSL VPN', user.get('sslVpnAllowedFlag', 'No')]) for login in user.get('unsuccessfulLogins', {}): login_string = "%s From: %s" % (login.get('createDate'), login.get('ipAddress')) table.add_row(['Last Failed Login', login_string]) break for login in user.get('successfulLogins', {}): login_string = "%s From: %s" % (login.get('createDate'), login.get('ipAddress')) table.add_row(['Last Login', login_string]) break return table
805b8e12f531b1c6b31aeca847568cbc1b2e929c
7,927
def iobes_iob(tags): """ IOBES -> IOB """ new_tags = [] for i, tag in enumerate(tags): if tag == 'rel': new_tags.append(tag) elif tag.split('-')[0] == 'B': new_tags.append(tag) elif tag.split('-')[0] == 'I': new_tags.append(tag) elif tag.split('-')[0] == 'S': new_tags.append(tag.replace('S-', 'B-')) elif tag.split('-')[0] == 'E': new_tags.append(tag.replace('E-', 'I-')) elif tag.split('-')[0] == 'O': new_tags.append(tag) else: raise Exception('Invalid format!') return new_tags
1a2f715edfc37b387944f84c4f149f21a0e86e74
7,928
def draw_handler(canvas): """ Event handler that is responsible for all drawing. It receives "canvas" object and draws the "Pong" table, the "moving" ball and the scores of each "Player". It is also responsible for testing whether the ball touches/collides with the "gutters" or the "paddles". """ # These are (global) numbers; vertical "position" of # each "paddle". global paddle1_pos, paddle2_pos # These are (global) numbers; "score" of each "Player". global score1, score2 # These are vectors stored as (global) "[x,y]" lists; # ball "position" and "velocity". global ball_pos, ball_vel # This is (global) number; keeps track of the time in # "seconds". global seconds # Draw middle line and "gutters" of "Pong" table. canvas.draw_line([WIDTH / 2, 0], [WIDTH / 2, HEIGHT], LINE_WIDTH, COLOR) canvas.draw_line([PAD_WIDTH, 0], [PAD_WIDTH, HEIGHT], LINE_WIDTH, COLOR) canvas.draw_line([WIDTH - PAD_WIDTH, 0], [WIDTH - PAD_WIDTH, HEIGHT], LINE_WIDTH, COLOR) # "Postpone" the beginning of new game if "Timer" is # already running by "reseting" ball "position" at the # middle of the table. if timer.is_running(): ball_pos = [WIDTH / 2, HEIGHT / 2] # Print message about the remaining time until the # beginning of the new game by referencing the # global "seconds" counter. canvas.draw_text("new game will start in " + str(NEW_GAME_DELAY - seconds) + " seconds" + ("." * (NEW_GAME_DELAY - seconds)), [WIDTH // 12, 3 * HEIGHT // 4], 3 * FONT_SIZE // 10, COLOR, FONT_FACE) else: # "Timer" has expired; update ball "position" for # the new game. ball_pos[0] += ball_vel[0] ball_pos[1] += ball_vel[1] # Test whether the ball touches/collides with the left # "gutter" (offset from the left edge of the "canvas" # by the width of the "paddle"). if ball_pos[0] <= (BALL_RADIUS + PAD_WIDTH): # Check whether the ball is actually striking left # "paddle" when it touches left "gutter". If so, # reflect the ball back into play; ball's "velocity" # increased by the "acceleration" configured. if ((paddle1_pos - HALF_PAD_HEIGHT) <= ball_pos[1] <= (paddle1_pos + HALF_PAD_HEIGHT)): ball_vel[0] = -ball_vel[0] * BALL_VELOCITY_ACCELERATION else: # Ball touched "gutter". Respawn the ball in # the center of the table headed towards the # opposite "gutter" and of course update score # of "Player" 2 (right) by the "points" # configured. score2 += POINTS # Start a game of "Pong". Start also a "Timer" # to "postpone" the beginning of the new game. if not timer.is_running(): timer.start() spawn_ball(RIGHT) # Test whether the ball touches/collides with the right # "gutter" (offset from the right edge of the "canvas" # by the width of the "paddle"). elif ball_pos[0] >= ((WIDTH - 1) - BALL_RADIUS - PAD_WIDTH): # Check whether the ball is actually striking right # "paddle" when it touches right "gutter". If so, # reflect the ball back into play; ball's "velocity" # increased by the "acceleration" configured. if ((paddle2_pos - HALF_PAD_HEIGHT) <= ball_pos[1] <= (paddle2_pos + HALF_PAD_HEIGHT)): ball_vel[0] = -ball_vel[0] * BALL_VELOCITY_ACCELERATION else: # Ball touched "gutter". Respawn the ball in # the center of the table headed towards the # opposite "gutter" and of course update score # of "Player" 1 (left) by the "points" # configured. score1 += POINTS # Start a game of "Pong". Start also a "Timer" # to "postpone" the beginning of the new game. if not timer.is_running(): timer.start() spawn_ball(LEFT) # Collide and reflect off of top side of the "canvas". elif ball_pos[1] <= BALL_RADIUS: ball_vel[1] = -ball_vel[1] # Collide and reflect off of bottom side of the "canvas". elif ball_pos[1] >= ((HEIGHT - 1) - BALL_RADIUS): ball_vel[1] = -ball_vel[1] # Draw a ball moving across the "Pong" table. canvas.draw_circle(ball_pos, BALL_RADIUS, 2 * LINE_WIDTH, COLOR, COLOR) # Update paddle's vertical "position", by # referencing the two global variables that contain the # vertical "velocities" of the "paddle". Keep "paddle" # on the screen by calling the proper "helper" function. if keep_paddle_on_screen(paddle1_pos, paddle1_vel): paddle1_pos += paddle1_vel if keep_paddle_on_screen(paddle2_pos, paddle2_vel): paddle2_pos += paddle2_vel # Draw left and right "paddles" in their respective # "gutters". canvas.draw_polygon([[0, paddle1_pos - HALF_PAD_HEIGHT], [PAD_WIDTH, paddle1_pos - HALF_PAD_HEIGHT], [PAD_WIDTH, paddle1_pos + HALF_PAD_HEIGHT], [0, paddle1_pos + HALF_PAD_HEIGHT]], LINE_WIDTH, COLOR, COLOR) canvas.draw_polygon([[WIDTH - PAD_WIDTH, paddle2_pos - HALF_PAD_HEIGHT], [WIDTH , paddle2_pos - HALF_PAD_HEIGHT], [WIDTH, paddle2_pos + HALF_PAD_HEIGHT], [WIDTH - PAD_WIDTH, paddle2_pos + HALF_PAD_HEIGHT]], LINE_WIDTH, COLOR, COLOR) # Draw scores; # but first get the width of the "score" text in pixels # for each "Player"; useful in (later) computing the # position to draw the "score" text - centered justified # on the "canvas field" of each player. score_textwidth_in_px = frame.get_canvas_textwidth(str(score1), FONT_SIZE, FONT_FACE) score_point_x = (WIDTH // 4) - (score_textwidth_in_px // 2) score_point_y = (HEIGHT // 4) canvas.draw_text(str(score1), [score_point_x, score_point_y], FONT_SIZE, COLOR, FONT_FACE) score_textwidth_in_px = frame.get_canvas_textwidth(str(score2), FONT_SIZE, FONT_FACE) score_point_x = (3 * WIDTH // 4) - (score_textwidth_in_px // 2) score_point_y = (HEIGHT // 4) canvas.draw_text(str(score2), [score_point_x, score_point_y], FONT_SIZE, COLOR, FONT_FACE) return None
ad9d5be8bbc1eb1b1612c6bfd35cc774bcacbd7a
7,929
def tf_idf(df, vocab): """[summary] https://towardsdatascience.com/natural-language-processing-feature-engineering-using-tf-idf-e8b9d00e7e76 Args: docs ([type]): [description] """ docs = [] for text in df['text'].tolist(): docs += [text] vectorizer = TfidfVectorizer(tokenizer=token_list, lowercase=False, vocabulary=vocab) vectors = vectorizer.fit_transform(docs) feature_names = vectorizer.get_feature_names() dense = vectors.todense() denselist = dense.tolist() tfidf_matrix = pd.DataFrame(denselist, columns=feature_names) return tfidf_matrix
ef75e51f1d4b69dcf6a5ab908f56eff8a25852f7
7,930
import os def setup_checkpoint_dir(cfg, args, phase): """Create checkpoint directory # ROUND2-TODO: let's make this checkpoint director way more involved. Specific to user, to model, to config name, etc. """ root_dir = os.path.join( cfg["checkpoints_dir"], cfg["model"]["name"], args.config_name ) ckpt_dir = os.path.join(root_dir, "checkpoints") if not os.path.exists(ckpt_dir): if phase == "train": os.makedirs(ckpt_dir) else: raise FileNotFoundError("Checkpoint directory doesn't exist!") return ckpt_dir, root_dir
b2a753807106a9b2efee5f8a811852c5e6b764aa
7,931
def get_activation_func(activation): """Turns a string activation function name into a function. """ if isinstance(activation, string_types): # Get the activation function. activation = activation.lower() if activation == "tanh": activation_func = tanh elif activation == "abstanh": activation_func = abs_tanh elif activation in ["sig", "sigmoid"]: activation_func = sigmoid elif activation in ["rec", "relu"]: activation_func = rectify elif activation in ["prelu_shelf"]: activation_func = parametric_flat_relu elif activation == "relu_max": activation_func = rectify_max # For performance comparisons with abs version of rectify elif activation in ["rec_para", "prelu"]: activation_func = parametric_rectifier elif activation == "maxout": activation_func = maxout elif activation == "linear": activation_func = linear else: raise ValueError("Unrecognized activation: {}".format(activation)) else: activation_func = activation return activation_func
6cb6fccdacf44c3fc3fac242d69a2494459c1318
7,932
import os def number_of_cores(): """ number_of_cores() Detect the number of cores in this system. """ # Linux, Unix and MacOS: if hasattr(os, "sysconf"): if "SC_NPROCESSORS_ONLN" in os.sysconf_names: # Linux & Unix: ncpus = os.sysconf("SC_NPROCESSORS_ONLN") if isinstance(ncpus, int) and ncpus > 0: return ncpus else: # OSX: return int(os.popen2("sysctl -n hw.ncpu")[1].read()) # Windows: if "NUMBER_OF_PROCESSORS" in os.environ: ncpus = int(os.environ["NUMBER_OF_PROCESSORS"]) if ncpus > 0: return ncpus return 1
ecf479f382015cffdad518a25c4334f5c44dc6fe
7,933
def create_schema(hostname='localhost', username=None, password=None, dbname=None, port=None, schema_name=None): """Create test schema.""" cn = create_cn(hostname, password, username, dbname, port) with cn.cursor() as cr: cr.execute('DROP SCHEMA IF EXISTS %s CASCADE' % dbname) cr.execute('CREATE SCHEMA %s' % dbname) cn.close() cn = create_cn(hostname, password, username, dbname, port) return cn
ef1b8b77ca1cd88804f0afec9efc24caad3a601d
7,934
def set_color_in_session(intent, session): """ Sets the color in the session and prepares the speech to reply to the user. """ card_title = intent['name'] session_attributes = {} should_end_session = False if 'Color' in intent['slots']: favorite_color = intent['slots']['Color']['value'] session_attributes = create_favorite_color_attributes(favorite_color) speech_output = "I now know the bus stop you are in is " + \ favorite_color + \ ". You can ask me where your bus stop is by asking, " \ "what bus stop am I on?" reprompt_text = "You can ask me where your bus stop is by asking, " \ "what bus stop am I on?" else: speech_output = "I'm not sure what bus stop you are in. " \ "Please try again." reprompt_text = "I'm not sure what bus stop you are in " \ "You can ask me where your bus stop is by asking, " \ "what bus stop am I on?" return build_response(session_attributes, build_speechlet_response( card_title, speech_output, reprompt_text, should_end_session))
4783aa36b18fb723441d99a8ffcdfe4b587d2a45
7,935
def fpath_to_pgn(fpath): """Slices the pgn string from file path. """ return fpath.split('/')[-1].split('.jpeg')[0]
1cc6cad60c5356b6c731947a59998117bf15035a
7,936
def convert_to_constant(num): """ Convert one float argument to Constant, returning the converted object. :param float num: Float number to be converted to Constant :return: Float number converted to a Constant object :rtype: object """ return Constant(name=str(num), units = null_dimension, value = float(num) )
111ae55c1446228b23638465c75bd9fa6d3d7043
7,937
def data_zip(data): """ 输入数据,返回一个拼接了子项的列表,如([1,2,3], [4,5,6]) -> [[1,4], [2,5], [3,6]] {"a":[1,2],"b":[3,4]} -> [{"a":1,"b":3}, {"a":2,"b":4}] :param data: 数组 data 元组 (x, y,...) 字典 {"a":data1, "b":data2,...} :return: 列表或数组 """ if isinstance(data, tuple): return [list(d) for d in zip(*data)] if isinstance(data, dict): data_list = [] keys = data.keys() for i in range(len(data[list(keys)[0]])): # 迭代字典值中的数据 data_dict = {} for key in keys: data_dict[key] = data[key][i] data_list.append(data_dict) return data_list return data
31dcaa3905a7d062cfe994543df31f293fdc962a
7,938
def _days_in_leap_and_common_years(i_date, f_date): """Return the a tuple with number of days in common and leap years (respectively) between initial and final dates. """ iy = i_date.year fy = f_date.year days_in_leap = 0 days_in_common = 0 if iy == fy: # same year delta = f_date - i_date if _isleap(iy): days_in_leap += delta.days else: days_in_common += delta.days elif fy - iy >= 1: # different year delta1 = i_date.replace(year = iy+1, month=1, day=1) - i_date # days in initial year delta2 = f_date - f_date.replace(month=1, day=1) # days in final year if _isleap(iy): days_in_leap += delta1.days else: days_in_common += delta1.days if _isleap(fy): days_in_leap += delta2.days else: days_in_common += delta2.days leaps_in_between = [y for y in range(iy+1, fy) if _isleap(y)] commons_in_between = [y for y in range(iy+1, fy) if not(_isleap(y))] days_in_leap += len(leaps_in_between) * 366 days_in_common += len(commons_in_between) * 365 #else: #raise InputError(expr = "Error in days_in_years(), f_date.year must be greater than i_date.year") return (days_in_leap, days_in_common)
f96b6c26fd8e87a543ffa6b6e7ed66144248d752
7,939
def make_space_kernel(data, background_kernel, trigger_kernel, time, time_cutoff=None, space_cutoff=None): """Produce a kernel object which evaluates the background kernel, and the trigger kernel based on the space locations in the data, always using the fixed time as passed in. :param data: An array of shape `(3,N)` giving the space-time locations events. Used when computing the triggered / aftershock events. :param background_kernel: The kernel object giving the background risk intensity. We assume this has a method `space_kernel` which gives just the two dimensional spacial kernel. :param trigger_kernel: The kernel object giving the trigger / aftershock risk intensity. :param time: The fixed time coordinate to evaluate at. :param time_cutoff: Optional; if set, then we assume the trigger_kernel is zero for times greater than this value (to speed up evaluation). :param space_cutoff: Optional; if set, then we assume the trigger_kernel is zero for space distances greater than this value (to speed up evaluation). :return: A kernel object which can be called on arrays of (2 dimensional space) points. """ mask = data[0] < time if time_cutoff is not None: mask = mask & (data[0] > time - time_cutoff) data_copy = _np.array(data[:, mask]) return SpaceKernel(time, background_kernel, trigger_kernel, data_copy, space_cutoff)
5c84dfb89340e52e57fb0b28464b18b0487601ea
7,940
import torch def get_dim_act_curv(args): """ Helper function to get dimension and activation at every layer. :param args: :return: """ if not args.act: act = lambda x: x else: act = getattr(F, args.act) acts = [act] * (args.num_layers - 1) dims = [args.feat_dim] # Check layer_num and hdden_dim match if args.num_layers > 1: hidden_dim = [int(h) for h in args.hidden_dim.split(',')] if args.num_layers != len(hidden_dim) + 1: raise RuntimeError('Check dimension hidden:{}, num_layers:{}'.format(args.hidden_dim, args.num_layers) ) dims = dims + hidden_dim dims += [args.dim] acts += [act] n_curvatures = args.num_layers if args.c_trainable == 1: # NOTE : changed from # if args.c is None: # create list of trainable curvature parameters curvatures = [nn.Parameter(torch.Tensor([args.c]).to(args.device)) for _ in range(n_curvatures)] else: # fixed curvature curvatures = [torch.tensor([args.c]) for _ in range(n_curvatures)] if not args.cuda == -1: curvatures = [curv.to(args.device) for curv in curvatures] return dims, acts, curvatures
072a54a3a2060598cbbe0fc89efe0be4b7cdc63f
7,941
def debug(): """ Function to return exported resources with types as dict. """ return exported_res_dict
ab32c01534159853445a6f1959654623db93c82f
7,942
def write_charset_executable(mysql_charset_script_name, here): """Write to disk as an executable the file that will be used to issue the MySQL statements that change the character set to UTF-8 -- return the absolute path. """ mysql_charset_script = os.path.join(here, mysql_charset_script_name) if not os.path.exists(mysql_charset_script): with open(mysql_charset_script, 'w') as f: pass os.chmod(mysql_charset_script, 0744) return mysql_charset_script
b92ea0e48ccebd267856ababcb714e736fea812c
7,943
import time def readTemperature(file): """ Returns the temperature of the one wire sensor. Pass in the file containing the one wire data (ds18b20+) """ lines = read_temp_raw(file) while lines[0].strip()[-3:] != "YES": time.sleep(0.2) lines = read_temp_raw(file) equals_pos = lines[1].find("t=") if equals_pos != -1: temp_string = lines[1][equals_pos + 2:] # convert temperature to C temp_c = float(temp_string) / 1000.0 return temp_c return -273.15
3398f287ae98df4b72ff212cdcad1764a9bbe31b
7,944
def layernorm_backward(dout, cache): """ Backward pass for layer normalization. For this implementation, you can heavily rely on the work you've done already for batch normalization. Inputs: - dout: Upstream derivatives, of shape (N, D) - cache: Variable of intermediates from layernorm_forward. Returns a tuple of: - dx: Gradient with respect to inputs x, of shape (N, D) - dgamma: Gradient with respect to scale parameter gamma, of shape (D,) - dbeta: Gradient with respect to shift parameter beta, of shape (D,) """ dx, dgamma, dbeta = None, None, None ########################################################################### # TODO: Implement the backward pass for layer norm. # # # # HINT: this can be done by slightly modifying your training-time # # implementation of batch normalization. The hints to the forward pass # # still apply! # ########################################################################### # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** x, x_norm, mu, sigma2, gamma = cache D, N = x_norm.shape x_mean0 = x - mu dgamma = sum(dout*x_norm) dbeta = sum(dout) dx_norm = dout * gamma dx_norm = dx_norm.T x_norm = x_norm.T #dsigma2 = -0.5*sum(dx_norm*x_norm/sigma2) dsigma2 = -0.5*sum(dx_norm * x_mean0)* (sigma2**-1.5) dmu = - sum(dx_norm / np.sqrt(sigma2)) - 2* dsigma2 * np.mean(x_mean0) dx = (dx_norm/np.sqrt(sigma2)) + (dsigma2*2*x_mean0/N) + (dmu/N) dx =dx.T # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** ########################################################################### # END OF YOUR CODE # ########################################################################### return dx, dgamma, dbeta
10603f4c4b78652ae466f83e511477956eae89fb
7,945
def energyAct( grid, deltaE, xA, yA, zA, xB, yB, zB, temp, eList, i, dimensions): """Perform swap or not, based on deltaE value""" kB = 8.617332e-5 # boltzmann constant, w/ ~eV units kTemp = kB * temp if deltaE <= 0: # Swap lowers energy, therefore is favourable, # so perform swap in grid grid = performSwap(grid, xA, yA, zA, xB, yB, zB, dimensions) eList[i + 1] = eList[i] + deltaE else: # i.e. deltaE > 0: if temp == 0: thermalEnergy = 0 else: thermalEnergy = exp((-1 * deltaE) / (kTemp)) R = randint(0, 1000) / 1000 if thermalEnergy > R: grid = performSwap(grid, xA, yA, zA, xB, yB, zB, dimensions) eList[i + 1] = eList[i] + deltaE else: eList[i + 1] = eList[i] return grid, eList
2380991200c2b3c196b1fea4c4108e82f20a979b
7,946
def lambda_handler(event, context): """ Lambda function that transforms input data and stores inital DB entry Parameters ---------- event: dict, required context: object, required Lambda Context runtime methods and attributes Context doc: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html Returns ------ Lambda Output Format: dict """ log.log_request_and_context(event, context) labeling_jobs = event["labelingJobs"] batch_id = event["batchId"] error_message = "" """ Example database entry input for batch { "BatchCurrentStep": "INPUT", "BatchId": "notebook-test-08f874a7", "BatchMetadataType": "INPUT", "BatchStatus": "INTERNAL_ERROR", "LabelingJobs": [ { "inputConfig": { "inputManifestS3Uri": "s3://smgt-qa-batch-input-468814823616-us-east-1/two-frame-manifest.manifest" }, "jobLevel": 1, "jobModality": "PointCloudObjectDetectionAudit", "jobName": "notebook-test-08f874a7-first-level", "jobType": "BATCH", "labelCategoryConfigS3Uri": "s3://smgt-qa-batch-input-468814823616-us-east-1/first-level-label-category-file.json", "maxConcurrentTaskCount": 1, "taskAvailabilityLifetimeInSeconds": 864000, "taskTimeLimitInSeconds": 604800, "workteamArn": "arn:aws:sagemaker:us-east-1:468814823616:workteam/private-crowd/first-level" }, { "inputConfig": { "chainFromJobName": "notebook-test-08f874a7-first-level" }, "jobLevel": 2, "jobModality": "PointCloudObjectDetectionAudit", "jobName": "notebook-test-08f874a7-second-level", "jobType": "BATCH", "maxConcurrentTaskCount": 1, "taskAvailabilityLifetimeInSeconds": 864000, "taskTimeLimitInSeconds": 604800, "workteamArn": "arn:aws:sagemaker:us-east-1:468814823616:workteam/private-crowd/first-level" } ] } """ db.insert_transformed_input_batch_metadata( batch_id=batch_id, batch_current_step=BatchCurrentStep.INPUT, batch_status=BatchStatus.IN_PROGRESS, batch_metadata_type=BatchMetadataType.INPUT, error_message=error_message, labeling_jobs=labeling_jobs, ) return { "batch_id": batch_id, }
83bba41d28a7b37da6579dfc2641380839f9d785
7,947
def getModCase(s, mod): """Checks the state of the shift and caps lock keys, and switches the case of the s string if needed.""" if bool(mod & KMOD_RSHIFT or mod & KMOD_LSHIFT) ^ bool(mod & KMOD_CAPS): return s.swapcase() else: return s
4faa2963e96786c5495443382585188b3a6ff119
7,948
import os def plot_D_dt_histogram(all_samples, lens_i=0, true_D_dt=None, save_dir='.'): """Plot the histogram of D_dt samples, overlaid with a Gaussian fit and truth D_dt all_samples : np.array D_dt MCMC samples """ bin_heights, bin_borders, _ = plt.hist(all_samples, bins=200, alpha=0.5, density=True, edgecolor='k', color='tab:blue', range=[0.0, 15000.0]) bin_centers = bin_borders[:-1] + np.diff(bin_borders) / 2 # Compute the mode and std for lognormal lognorm_stats = h0_utils.get_lognormal_stats(all_samples) mu = lognorm_stats['mu'] sigma = lognorm_stats['sigma'] mode = lognorm_stats['mode'] std = lognorm_stats['std'] popt = [mu, sigma] #x_interval_for_fit = np.linspace(bin_borders[0], bin_borders[-1], 10000) x_interval_for_fit = np.linspace(bin_centers[0], bin_centers[-1], 1000) # Overlay the fit gaussian pdf plt.plot(x_interval_for_fit, lognormal(x_interval_for_fit, *popt), color='k', label='fit: mode={:0.1f}, std={:0.1f}'.format(mode, std)) if save_dir is not None: if true_D_dt is not None: plt.axvline(x=true_D_dt, linestyle='--', color='red', label='truth') plt.xlabel(r'$D_{{\Delta t}}$ (Mpc)') plt.ylabel('density') plt.title(r'$D_{{\Delta t}}$ posterior for lens {0:04d}'.format(lens_i)) plt.legend() save_path = os.path.join(save_dir, 'D_dt_histogram_{0:04d}.png'.format(lens_i)) plt.savefig(save_path) plt.close() return mu, sigma
f5d8a05c3beea5e9c0b21dcde05a43ce023f728f
7,949
from pathlib import Path def try_provider(package, provider, domain): """Try using a provider.""" downloaded_file = None data = None apk_name = f'{package}.apk' temp_file = Path(gettempdir()) / apk_name link = find_apk_link(provider, domain) if link: downloaded_file = download_file(link, temp_file) if downloaded_file: data = add_apk(downloaded_file, apk_name) if data: return data return None
e43a6b18a5783722c148a537f21e501a1f4e5928
7,950
def _get_list(sline): """Takes a list of strings and converts them to floats.""" try: sline2 = convert_to_float(sline) except ValueError: print("sline = %s" % sline) raise SyntaxError('cannot parse %s' % sline) return sline2
2b315b12508638fe603378fdd3127bd814f4313d
7,951
def add_number(partitions, number): """ Adds to the partition provided `number` in all its combinations """ # Add to each list in partitions add 1 prods = partitions.values() nKeys = [(1,) + x for x in partitions.keys()] # apply sum_ones on each partition, and add results to partitions # Done use reduce, the continues list creation is just too slow #partitions = reduce(lambda acc, x: acc + sum_ones(x), partitions, []) newParts = [] newProds = [] for part, prod in zip(nKeys, prods): npart, nprod = sum_ones(part, prod) newParts.extend(npart) newProds.extend(nprod) # Remove duplicates return dict(zip(newParts, newProds))
cade1ddbd4002b76fc2652c59fe4f0a9bcdcc9b9
7,952
import colorsys def _color_to_rgb(color, input): """Add some more flexibility to color choices.""" if input == "hls": color = colorsys.hls_to_rgb(*color) elif input == "husl": color = husl.husl_to_rgb(*color) color = tuple(np.clip(color, 0, 1)) elif input == "xkcd": color = xkcd_rgb[color] return color
7b3a502eba4a48dbd9b1e44d278aee58eb1ea22c
7,953
def in_days(range_in_days): """ Generate time range strings between start and end date where each range is range_in_days days long :param range_in_days: number of days :return: list of strings with time ranges in the required format """ delta = observation_period_end - observation_period_start # timedelta period_starts = [] for d in range(0, delta.days + 1, range_in_days): # print(observation_period_start + timedelta(days=d)) period_starts.append(observation_period_start + timedelta(days=d)) start_end = [] for i, start in enumerate(period_starts[:-1]): start_end.append((start, period_starts[i+1] - timedelta(days=1))) time_periods = [start.strftime("%Y%m%d") + ":" + end.strftime("%Y%m%d") for start, end in start_end] return time_periods
f9e77eac3fd151923cb14d05d291a0e362bd11b5
7,954
from pathlib import Path from typing import List import re def tags_in_file(path: Path) -> List[str]: """Return all tags in a file.""" matches = re.findall(r'@([a-zA-Z1-9\-]+)', path.read_text()) return matches
1071c22ac79f51697b2ed18896aa1d17568ecb2c
7,955
import torch def pad_to_longest_in_one_batch(batch): """According to the longest item to pad dataset in one batch. Notes: usage of pad_sequence: seq_list = [(L_1, dims), (L_2, dims), ...] item.size() must be (L, dims) return (longest_len, len(seq_list), dims) Args: batch: [ (noisy_mag_1, noise_mag_1, clean_mag_1, n_frames_1), (noisy_mag_2, noise_mag_2, clean_mag_2, n_frames_2), ... ] """ noisy_mag_list = [] mask_mag_list = [] clean_mag_list = [] n_frames_list = [] for noisy_mag, mask, clean_mag, n_frames in batch: noisy_mag_list.append(torch.t(torch.tensor(noisy_mag))) # the shape of tensor is (T, F). mask_mag_list.append(torch.t(torch.tensor(mask))) clean_mag_list.append(torch.t(torch.tensor(clean_mag))) n_frames_list.append(n_frames) noisy_mag_one_batch = pad_sequence(noisy_mag_list) # the shape is (longest T, len(seq_list), F) mask_one_batch = pad_sequence(mask_mag_list) clean_mag_one_batch = pad_sequence(clean_mag_list) noisy_mag_one_batch = noisy_mag_one_batch.permute(1, 0, 2) # the shape is (len(seq_list), longest T, F) mask_one_batch = mask_one_batch.permute(1, 0, 2) clean_mag_one_batch = clean_mag_one_batch.permute(1, 0, 2) # (batch_size, longest T, F) return noisy_mag_one_batch, mask_one_batch, clean_mag_one_batch, n_frames_list
3892b6002ae0f0cb0b6ce4c29128120a8f63237a
7,956
def create_instance(c_instance): """ Creates and returns the Twister script """ return Twister(c_instance)
acceb844eae8a3c9a8c734e55c4c618f990f1ae0
7,957
from typing import Type from typing import Callable def command_handler(command_type: Type[CommandAPI], *, name: str = None) -> Callable[[CommandHandlerFn], Type[CommandHandler]]: """ Decorator that can be used to construct a CommandHandler from a simple function. .. code-block:: python @command_handler(Ping) def handle_ping(connection, msg): connection.get_base_protocol().send_pong() """ if name is None: name = f'handle_{command_type.__name__}' def decorator(fn: CommandHandlerFn) -> Type[CommandHandler]: return type( name, (CommandHandler,), { 'cmd_type': command_type, 'handle': staticmethod(fn), }, ) return decorator
1c6634691e366b61043b2c745d43cc2371090200
7,958
def check_ip_in_lists(ip, db_connection, penalties): """ Does an optimized ip lookup with the db_connection. Applies only the maximum penalty. Args: ip (str): ip string db_connection (DBconnector obj) penalties (dict): Contains tor_penalty, vpn_penalty, blacklist_penalty keys with integer values Returns: :int: penalty_added """ penalties = {'tor': int(penalties['tor_penalty']), 'vpn': int(penalties['vpn_penalty']), 'blacklist': int(penalties['ip_blacklist_penalty'])} penalties = sorted(penalties.items(), key=lambda x: x[1]) # sort by penalty value to check in that order and perform early stopping penalty_added = 0 for penalty_type, penalty_value in penalties: if penalty_value == 0: continue if penalty_type == 'tor': if db_connection.set_exists('tor_ips', ip): penalty_added = penalty_value elif penalty_type == 'blacklist': if db_connection.set_exists('blacklist_ips', ip): penalty_added = penalty_value elif db_connection.set_exists('blacklist_ips', '.'.join(ip.split('.')[:3])): penalty_added = penalty_value elif db_connection.set_exists('blacklist_ips', '.'.join(ip.split('.')[:2])): penalty_added = penalty_value elif penalty_type == 'vpn': if db_connection.set_exists('vpn_ips', ip): penalty_added = penalty_value elif db_connection.set_exists('vpn_ips', '.'.join(ip.split('.')[:3])): penalty_added = penalty_value elif db_connection.set_exists('vpn_ips', '.'.join(ip.split('.')[:2])): penalty_added = penalty_value if penalty_added > 0: break return penalty_added
2d6e3615d4b0d9b0fb05e7a0d03708856ffcbfef
7,959
import numpy def scan(fn, sequences=None, outputs_info=None, non_sequences=None, n_steps=None, truncate_gradient=-1, go_backwards=False, mode=None, name=None, options=None, profile=False): """ This function constructs and applies a Scan op to the provided arguments. :param fn: ``fn`` is a function that describes the operations involved in one step of ``scan``. ``fn`` should construct variables describing the output of one iteration step. It should expect as input theano variables representing all the slices of the input sequences and previous values of the outputs, as well as all other arguments given to scan as ``non_sequences``. The order in which scan passes these variables to ``fn`` is the following : * all time slices of the first sequence * all time slices of the second sequence * ... * all time slices of the last sequence * all past slices of the first output * all past slices of the second otuput * ... * all past slices of the last output * all other arguments (the list given as `non_sequences` to scan) The order of the sequences is the same as the one in the list `sequences` given to scan. The order of the outputs is the same as the order of ``outputs_info``. For any sequence or output the order of the time slices is the same as the one in which they have been given as taps. For example if one writes the following : .. code-block:: python scan(fn, sequences = [ dict(input= Sequence1, taps = [-3,2,-1]) , Sequence2 , dict(input = Sequence3, taps = 3) ] , outputs_info = [ dict(initial = Output1, taps = [-3,-5]) , dict(initial = Output2, taps = None) , Output3 ] , non_sequences = [ Argument1, Argument 2]) ``fn`` should expect the following arguments in this given order: #. ``Sequence1[t-3]`` #. ``Sequence1[t+2]`` #. ``Sequence1[t-1]`` #. ``Sequence2[t]`` #. ``Sequence3[t+3]`` #. ``Output1[t-3]`` #. ``Output1[t-5]`` #. ``Output3[t-1]`` #. ``Argument1`` #. ``Argument2`` The list of ``non_sequences`` can also contain shared variables used in the function, though ``scan`` is able to figure those out on its own so they can be skipped. For the clarity of the code we recommend though to provide them to scan. To some extend ``scan`` can also figure out other ``non sequences`` (not shared) even if not passed to scan (but used by `fn`). A simple example of this would be : .. code-block:: python import theano.tensor as TT W = TT.matrix() W_2 = W**2 def f(x): return TT.dot(x,W_2) The function is expected to return two things. One is a list of outputs ordered in the same order as ``outputs_info``, with the difference that there should be only one output variable per output initial state (even if no tap value is used). Secondly `fn` should return an update dictionary (that tells how to update any shared variable after each iteration step). The dictionary can optionally be given as a list of tuples. There is no constraint on the order of these two list, ``fn`` can return either ``(outputs_list, update_dictionary)`` or ``(update_dictionary, outputs_list)`` or just one of the two (in case the other is empty). To use ``scan`` as a while loop, the user needs to change the function ``fn`` such that also a stopping condition is returned. To do so, he/she needs to wrap the condition in an ``until`` class. The condition should be returned as a third element, for example: .. code-block:: python ... return [y1_t, y2_t], {x:x+1}, theano.scan_module.until(x < 50) Note that a number of steps (considered in here as the maximum number of steps ) is still required even though a condition is passed (and it is used to allocate memory if needed). = {}): :param sequences: ``sequences`` is the list of Theano variables or dictionaries describing the sequences ``scan`` has to iterate over. If a sequence is given as wrapped in a dictionary, then a set of optional information can be provided about the sequence. The dictionary should have the following keys: * ``input`` (*mandatory*) -- Theano variable representing the sequence. * ``taps`` -- Temporal taps of the sequence required by ``fn``. They are provided as a list of integers, where a value ``k`` impiles that at iteration step ``t`` scan will pass to ``fn`` the slice ``t+k``. Default value is ``[0]`` Any Theano variable in the list ``sequences`` is automatically wrapped into a dictionary where ``taps`` is set to ``[0]`` :param outputs_info: ``outputs_info`` is the list of Theano variables or dictionaries describing the initial state of the outputs computed recurrently. When this initial states are given as dictionary optional information can be provided about the output corresponding to these initial states. The dictionary should have the following keys: * ``initial`` -- Theano variable that represents the initial state of a given output. In case the output is not computed recursively (think of a map) and does not require a initial state this field can be skiped. Given that only the previous time step of the output is used by ``fn`` the initial state should have the same shape as the output. If multiple time taps are used, the initial state should have one extra dimension that should cover all the possible taps. For example if we use ``-5``, ``-2`` and ``-1`` as past taps, at step 0, ``fn`` will require (by an abuse of notation) ``output[-5]``, ``output[-2]`` and ``output[-1]``. This will be given by the initial state, which in this case should have the shape (5,)+output.shape. If this variable containing the initial state is called ``init_y`` then ``init_y[0]`` *corresponds to* ``output[-5]``. ``init_y[1]`` *correponds to* ``output[-4]``, ``init_y[2]`` corresponds to ``output[-3]``, ``init_y[3]`` coresponds to ``output[-2]``, ``init_y[4]`` corresponds to ``output[-1]``. While this order might seem strange, it comes natural from splitting an array at a given point. Assume that we have a array ``x``, and we choose ``k`` to be time step ``0``. Then our initial state would be ``x[:k]``, while the output will be ``x[k:]``. Looking at this split, elements in ``x[:k]`` are ordered exactly like those in ``init_y``. * ``taps`` -- Temporal taps of the output that will be pass to ``fn``. They are provided as a list of *negative* integers, where a value ``k`` implies that at iteration step ``t`` scan will pass to ``fn`` the slice ``t+k``. ``scan`` will follow this logic if partial information is given: * If an output is not wrapped in a dictionary, ``scan`` will wrap it in one assuming that you use only the last step of the output (i.e. it makes your tap value list equal to [-1]). * If you wrap an output in a dictionary and you do not provide any taps but you provide an initial state it will assume that you are using only a tap value of -1. * If you wrap an output in a dictionary but you do not provide any initial state, it assumes that you are not using any form of taps. * If you provide a ``None`` instead of a variable or a empty dictionary ``scan`` assumes that you will not use any taps for this output (like for example in case of a map) If ``outputs_info`` is an empty list or None, ``scan`` assumes that no tap is used for any of the outputs. If information is provided just for a subset of the outputs an exception is raised (because there is no convention on how scan should map the provided information to the outputs of ``fn``) :param non_sequences: ``non_sequences`` is the list of arguments that are passed to ``fn`` at each steps. One can opt to exclude variable used in ``fn`` from this list as long as they are part of the computational graph, though for clarity we encourage not to do so. :param n_steps: ``n_steps`` is the number of steps to iterate given as an int or Theano scalar. If any of the input sequences do not have enough elements, scan will raise an error. If the *value is 0* the outputs will have *0 rows*. If the value is negative, ``scan`` will run backwards in time. If the ``go_backwards`` flag is already set and also ``n_steps`` is negative, ``scan`` will run forward in time. If n stpes is not provided, ``scan`` will figure out the amount of steps it should run given its input sequences. :param truncate_gradient: ``truncate_gradient`` is the number of steps to use in truncated BPTT. If you compute gradients through a scan op, they are computed using backpropagation through time. By providing a different value then -1, you choose to use truncated BPTT instead of classical BPTT, where you go for only ``truncate_gradient`` number of steps back in time. :param go_backwards: ``go_backwards`` is a flag indicating if ``scan`` should go backwards through the sequences. If you think of each sequence as indexed by time, making this flag True would mean that ``scan`` goes back in time, namely that for any sequence it starts from the end and goes towards 0. :param name: When profiling ``scan``, it is crucial to provide a name for any instance of ``scan``. The profiler will produce an overall profile of your code as well as profiles for the computation of one step of each instance of ``scan``. The ``name`` of the instance appears in those profiles and can greatly help to disambiguate information. :param mode: It is recommended to leave this argument to None, especially when profiling ``scan`` (otherwise the results are not going to be accurate). If you prefer the computations of one step of ``scan`` to be done differently then the entire function, you can use this parameter to describe how the computations in this loop are done (see ``theano.function`` for details about possible values and their meaning). :param profile: Flag or string. If true, or different from the empty string, a profile object will be created and attached to the inner graph of scan. In case ``profile`` is True, the profile object will have the name of the scan instance, otherwise it will have the passed string. Profile object collect (and print) information only when running the inner graph with the new cvm linker ( with default modes, other linkers this argument is useless) :rtype: tuple :return: tuple of the form (outputs, updates); ``outputs`` is either a Theano variable or a list of Theano variables representing the outputs of ``scan`` (in the same order as in ``outputs_info``). ``updates`` is a subclass of dictionary specifying the update rules for all shared variables used in scan This dictionary should be passed to ``theano.function`` when you compile your function. The change compared to a normal dictionary is that we validate that keys are SharedVariable and addition of those dictionary are validated to be consistent. """ # Note : see the internal documentation of the scan op for naming # conventions and all other details if options is None: options = {} rvals = scan_utils.canonical_arguments(sequences, outputs_info, non_sequences, go_backwards, n_steps) inputs, states_and_outputs_info, parameters, T = rvals # If we provided a known number of steps ( before compilation) # and if that number is 1 or -1, then we can skip the Scan Op, # and just apply the inner function once # To do that we check here to see the nature of n_steps T_value = None if isinstance(n_steps, (float, int)): T_value = int(n_steps) else: try: T_value = opt.get_scalar_constant_value(n_steps) except (TypeError, AttributeError): T_value = None if T_value in (1, -1): return one_step_scan(fn, inputs, states_and_outputs_info, parameters, truncate_gradient) # 1. Variable representing the current time step t = scalar_shared(numpy.int64(0), name='t') # 2. Allocate memory for the states of scan. mintaps = [] lengths = [] for pos, arg_info in enumerate(states_and_outputs_info): if arg_info.get('taps', None) == [-1]: mintaps.append(1) lengths.append(scalar_shared(numpy.int64(0), name='l%d' % pos)) arg_info['initial'] = scan_utils.expand(tensor.unbroadcast( tensor.shape_padleft(arg_info['initial']), 0), T) elif arg_info.get('taps', None): if numpy.any(numpy.array(arg_info.get('taps', [])) > 0): # Make sure we do not have requests for future values of a # sequence we can not provide such values raise ValueError('Can not use future taps of outputs', arg_info) mintap = abs(numpy.min(arg_info['taps'])) lengths.append(scalar_shared(numpy.int64(0), name='l%d' % pos)) mintaps.append(mintap) arg_info['initial'] = scan_utils.expand( arg_info['initial'][:mintap], T) else: mintaps.append(0) lengths.append(scalar_shared(numpy.int64(0), name='l%d' % pos)) # 3. Generate arguments for the function passed to scan. This will # function will return the outputs that need to be computed at every # timesteps inputs_slices = [input[t] for input in inputs] states_slices = [] for n, state in enumerate(states_and_outputs_info): # Check if it is actually a state and not an output if mintaps[n] != 0: for k in state['taps']: states_slices.append( state['initial'][(t + mintaps[n] + k) % lengths[n]]) # 4. Construct outputs that are to be computed by the inner # function of scan args = inputs_slices + states_slices + parameters cond, states_and_outputs, updates = \ scan_utils.get_updates_and_outputs(fn(*args)) # User is allowed to provide no information if it only behaves like a # map if (len(states_and_outputs) != len(states_and_outputs_info) and len(states_and_outputs_info) == 0): mintaps = [0] * len(states_and_outputs) # 5. Construct the scan op # 5.1 Construct list of shared variables with updates (those that # can be treated as states (i.e. of TensorType) and those that can not # (like Random States) if cond is not None: _cond = [cond] else: _cond = [] rvals = rebuild_collect_shared( states_and_outputs + _cond, updates=updates, rebuild_strict=True, copy_inputs_over=True, no_default_updates=False) # extracting the arguments input_variables, cloned_outputs, other_rval = rvals clone_d, update_d, update_expr, shared_inputs = other_rval additional_input_states = [] additional_output_states = [] additional_lengths = [] additional_mintaps = [] original_numeric_shared_variables = [] non_numeric_input_states = [] non_numeric_output_states = [] original_non_numeric_shared_variables = [] pos = len(lengths) for sv in shared_inputs: if sv in update_d: if isinstance(sv, (TensorVariable, TensorSharedVariable)): # We can treat it as a sit sot nw_state = scan_utils.expand( tensor.unbroadcast(tensor.shape_padleft(sv), 0), T) additional_lengths.append(scalar_shared(numpy.int64(0), name='l%d' % pos)) pos = pos + 1 additional_mintaps.append(1) additional_input_states.append(nw_state) additional_output_states.append( scan_utils.clone(tensor.set_subtensor( nw_state[(t + 1) % additional_lengths[-1]], update_d[sv]))) original_numeric_shared_variables.append(sv) else: non_numeric_input_states.append(sv) non_numeric_output_states.append(update_d[sv]) original_non_numeric_shared_variables.append(sv) # Replace shared variables in the update _additional_output_states = [] replace = {} for sv, buf in zip(original_numeric_shared_variables, additional_input_states): replace[sv] = buf[t] for out in additional_output_states: _additional_output_states.append( scan_utils.clone(out, replace=replace)) additional_output_states = _additional_output_states # 5.2 Collect inputs/outputs of the inner function inputs = [] outputs = [] for n, mintap in enumerate(mintaps): if mintap != 0: input_state = states_and_outputs_info[n]['initial'] inputs.append(input_state) outputs.append( tensor.set_subtensor( input_state[(t + mintap) % lengths[n]], states_and_outputs[n])) else: mem_buffer = scan_utils.allocate_memory( T, states_and_outputs_info[n], states_and_outputs[n]) inputs.append(output) outputs.append( tensor.set_subtensor(output[t % lengths[n]], states_and_outputs[n])) inputs.extend(additional_input_states) outputs.extend(additional_output_states) lengths.extend(additional_lengths) mintaps.extend(additional_mintaps) inputs.extend(non_numeric_input_states) outputs.extend(non_numeric_output_states) all_other_inputs = gof.graph.inputs(outputs) parameters = [x for x in all_other_inputs if (x not in inputs and x not in lengths and x is not t and isinstance(x, gof.Variable) and not isinstance(x, gof.Constant))] inputs.extend(parameters) # 5.3 Construct the the options dictionary options['name'] = name options['profile'] = profile options['mode'] = mode options['inplace'] = False options['gpu'] = False options['truncate_gradient'] = truncate_gradient options['hash_inner_graph'] = 0 # 5.4 Construct the ScanOp instance local_op = scan_op.ScanOp(inputs=inputs, outputs=outputs, lengths=lengths, switches=[], mintaps=mintaps, index=t, options=options, as_repeatUntil=cond) # Note that we get here all the outputs followed by the update rules to # the shared variables we had in our scan # we know that we have (in this given order): # * len(states_and_outputs) real outputs # * len(additional_input_states) updates for numeric shared variable # * len(non_numeric_input_states) updates for non numeric shared # variables scan_inputs = [T] + inputs scan_outputs_update_rules = scan_utils.to_list(local_op(*scan_inputs)) # 5.5 Collect outputs and add permutation object scan_outputs = [] for pos in xrange(len(states_and_outputs)): out = scan_utils.ScanPermutation(mintaps[pos])( scan_outputs_update_rules[pos], t) scan_outputs.append(out[mintaps[pos]:]) # 5.6 Construct updates dictionary update_rules = scan_outputs_update_rules[len(states_and_outputs):] updates = {} for v, u in izip(original_numeric_shared_variables, update_rules[:len(additional_input_states)]): updates[v] = u[-1] for v, u in izip(original_non_numeric_shared_variables, update_rules[len(additional_input_states):]): updates[v] = u # Step 5.7 We are done and can return everything back to the user return scan_outputs, updates
7ac0b7d106bc2e1642827a2e9f79552eb418a918
7,960
def stock_analyst(stock_list): """This function accepts a list of data P and outputs the best day to buy(B) and sell(S) stock. Args: stock_list: expects a list of stocks as a parameter Returns: a string promting to buy stock if one has not bought stock i.e the value of stock is less than 1 If the value of stock is > 0 it returns the best days to stock at value and sell stock at maximum value """ B = stock_list.index(min(stock_list)) buy_value = min(stock_list) sell_value = -1 if buy_value > 1: for sell_indx in range(B, len(stock_list)): if sell_value < stock_list[sell_indx]: sell_value = stock_list[sell_indx] S = sell_indx else: return 'Buy stock first' return [B, S]
bbb3cd664ba0ea366e8ad6fa369ae3259cf52a02
7,961
def is_sync_available(admin_id): """Method to check the synchronization's availability about networks connection. Args: admin_id (str): Admin privileges flag. """ return r_synchronizer.is_sync_available()
712becdc6c9903d41e3d29602bb7dc07987c6867
7,962
import collections def on_second_thought(divider): """sort the characters according to number of times they appears in given text, returns the remaining word as a string """ unsorted_list = list(unsorted_string) # characters occurence determines the order occurence = collections.Counter(unsorted_list) # sort by characters frequency in descending order occurences_list = sorted(unsorted_list, key=occurence.get, reverse=True) # already sorted, duplicates would provide no value reduced_list = list(collections.OrderedDict.fromkeys(occurences_list)) divider_position = reduced_list.index(divider) # everything behind (and including) the divider is irrelevant return ''.join(reduced_list[:divider_position])
1295a67cf1ebce0f79e49566306eba6add1f2e35
7,963
def aiohttp_unused_port(loop, aiohttp_unused_port, socket_enabled): """Return aiohttp_unused_port and allow opening sockets.""" return aiohttp_unused_port
9c5d0c1125a7758be2e07a8f8aca6676429a841a
7,964
import os def os_specific_command_line(command_line): """ Gets the operating system specific command string. :param command_line: command line to execute. :type command_line: str """ current_os = os.environ["TEMPLATE_OS"] command = "/bin/bash -c '{}'" if current_os.lower() == "linux" else "cmd.exe /c \"{}\"" return command.format(command_line)
898b97a57841af3c671bf530c6a31460bd1882a7
7,965
def font_variant(name, tokens): """Expand the ``font-variant`` shorthand property. https://www.w3.org/TR/css-fonts-3/#font-variant-prop """ return expand_font_variant(tokens)
8bac3f0610c7951686504fd843c845d124f34ed6
7,966
def with_part_names(*part_names): """Add part names for garage.parts.assemble. Call this when you want to assemble these parts but do not want them to be passed to main. """ return lambda main: ensure_app(main).with_part_names(*part_names)
c62d495c2259139b1a9079697bcf784d12e6f9c2
7,967
def library_view(request): """View for image library.""" if request.user.is_authenticated: the_user = request.user albums = Album.objects.filter(user=the_user) context = {'the_user': the_user, 'albums': albums} return render(request, 'imager_profile/library.html', context)
948d239decfacabf5b3bba05e10739c7856db609
7,968
def combine_html_json_pbp(json_df, html_df, game_id, date): """ Join both data sources. First try merging on event id (which is the DataFrame index) if both DataFrames have the same number of rows. If they don't have the same number of rows, merge on: Period', Event, Seconds_Elapsed, p1_ID. :param json_df: json pbp DataFrame :param html_df: html pbp DataFrame :param game_id: id of game :param date: date of game :return: finished pbp """ # Don't need those columns to merge in json_df = json_df.drop(['p1_name', 'p2_name', 'p2_ID', 'p3_name', 'p3_ID'], axis=1) try: html_df.Period = html_df.Period.astype(int) # If they aren't equal it's usually due to the HTML containing a challenge event if html_df.shape[0] == json_df.shape[0]: json_df = json_df[['period', 'event', 'seconds_elapsed', 'xC', 'yC']] game_df = pd.merge(html_df, json_df, left_index=True, right_index=True, how='left') else: # We always merge if they aren't equal but we check if it's due to a challenge so we can print out a better # warning message for the user. # NOTE: May be slightly incorrect. It's possible for there to be a challenge and another issue for one game. if'CHL' in list(html_df.Event): shared.print_warning("The number of columns in the Html and Json pbp are different because the" " Json pbp, for some reason, does not include challenges. Will instead merge on " "Period, Event, Time, and p1_id.") else: shared.print_warning("The number of columns in the Html and json pbp are different because " "someone fucked up. Will instead merge on Period, Event, Time, and p1_id.") # Actual Merging game_df = pd.merge(html_df, json_df, left_on=['Period', 'Event', 'Seconds_Elapsed', 'p1_ID'], right_on=['period', 'event', 'seconds_elapsed', 'p1_ID'], how='left') # This is always done - because merge doesn't work well with shootouts game_df = game_df.drop_duplicates(subset=['Period', 'Event', 'Description', 'Seconds_Elapsed']) except Exception as e: shared.print_warning('Problem combining Html Json pbp for game {}'.format(game_id, e)) return game_df['Game_Id'] = game_id[-5:] game_df['Date'] = date return pd.DataFrame(game_df, columns=pbp_columns)
4f2aa3948fea4f64ac996f4052101daa556d1038
7,969
import json def read_json_file(path): """ Given a line-by-line JSON file, this function converts it to a Python dict and returns all such lines as a list. :param path: the path to the JSON file :returns items: a list of dictionaries read from a JSON file """ items = list() with open(path, 'r') as raw_data: for line in raw_data: line = json.loads(line) items.append(line) return items
15f898faca0dff0ca4b6c73ff31e037d822cf273
7,970
def get_boss_wage2(employee): """ Monadic version. """ return bind3(bind3(unit3(employee), Employee.get_boss), Employee.get_wage)
60524cc219a1c4438d310382aff519ee8ef5a66b
7,971
def keypoint_angle(kp1, kp2): """求两个keypoint的夹角 """ k = [ (kp1.angle - 180) if kp1.angle >= 180 else kp1.angle, (kp2.angle - 180) if kp2.angle >= 180 else kp2.angle ] if k[0] == k[1]: return 0 else: return abs(k[0] - k[1])
3feee667bcf767656da6334727b8d502be41d909
7,972
def get_args_static_distribute_cells(): """ Distribute ranges of cells across workers. :return: list of lists """ pop_names_list = [] gid_lists = [] for pop_name in context.pop_names: count = 0 gids = context.spike_trains[pop_name].keys() while count < len(gids): pop_names_list.append(pop_name) gid_lists.append(gids[count:count+context.gid_block_size]) count += context.gid_block_size return [pop_names_list, gid_lists]
42862d47533a8662b26c9875e5f62ceebb91ccec
7,973
import torch def le(input, other, *args, **kwargs): """ In ``treetensor``, you can get less-than-or-equal situation of the two tree tensors with :func:`le`. Examples:: >>> import torch >>> import treetensor.torch as ttorch >>> ttorch.le( ... torch.tensor([[1, 2], [3, 4]]), ... torch.tensor([[1, 1], [4, 4]]), ... ) tensor([[ True, False], [ True, True]]) >>> ttorch.le( ... ttorch.tensor({ ... 'a': [[1, 2], [3, 4]], ... 'b': [1.0, 1.5, 2.0], ... }), ... ttorch.tensor({ ... 'a': [[1, 1], [4, 4]], ... 'b': [1.3, 1.2, 2.0], ... }), ... ) <Tensor 0x7ff363bc6198> ├── a --> tensor([[ True, False], │ [ True, True]]) └── b --> tensor([ True, False, True]) """ return torch.le(input, other, *args, **kwargs)
fa96a544f7f449daf008c6cf9b68f3760de67487
7,974
import numpy def linear_to_srgb(data): """Convert linear color data to sRGB. Acessed from https://entropymine.com/imageworsener/srgbformula Parameters ---------- data: :class:`numpy.ndarray`, required Array of any shape containing linear data to be converted to sRGB. Returns ------- converted: :class:`numpy.ndarray` Array with the same shape as `data` containing values in sRGB space. """ return numpy.where(data <= 0.0031308, data * 12.92, 1.055 * numpy.power(data, 1 / 2.4) - 0.055)
01eae0f9a34204498aad86e3a0a38337c7ced919
7,975
def circuit_to_dagdependency(circuit): """Build a ``DAGDependency`` object from a ``QuantumCircuit``. Args: circuit (QuantumCircuit): the input circuits. Return: DAGDependency: the DAG representing the input circuit as a dag dependency. """ dagdependency = DAGDependency() dagdependency.name = circuit.name for register in circuit.qregs: dagdependency.add_qreg(register) for register in circuit.cregs: dagdependency.add_creg(register) for operation, qargs, cargs in circuit.data: dagdependency.add_op_node(operation, qargs, cargs) dagdependency._add_successors() return dagdependency
7356da47f3af1088226af765f83cd43413de0a1f
7,976
def tweets_factory(fixtures_factory): """Factory for tweets from YAML file""" def _tweets_factory(yaml_file): all_fixtures = fixtures_factory(yaml_file) return [t for t in all_fixtures if isinstance(t, TweetBase)] return _tweets_factory
19d6e7ffe57ec071d324d535458c2263496c109d
7,977
def monte_carlo(ds,duration,n,pval,timevar): """ pval: two-tailed pval """ x=0 mc = np.empty([ds.shape[1],ds.shape[2],n]) while x<n: dummy = np.random.randint(0, len(ds[timevar])-duration, size=1) # have to adjust size so total number of points is always the same mc[:,:,x] = ds[int(dummy):int(dummy+duration),::].mean(timevar) x=x+1 # derive percentile perc_upper = np.nanpercentile(mc,100-pval,axis=2) perc_lower = np.nanpercentile(mc,pval,axis=2) return perc_lower,perc_upper
040aaf1a6a6813095079262446a2226fec8948ee
7,978
def num_crl(wf_n): """Function computes the autocorrelation function from given vectors\ and the Discrete Fourier transform Args: wf_n(numpy array, complex): Wave function over time Returns: numpy array, complex: The wave function complex over time. numpy array, complex: The autocorrelation function over time. numpy array, complex: The Discrete Fourier Transformation function\ over frequency """ # setting up the time vector and deleting it from array time_vc = np.zeros([len(wf_n[0])]) time_vc = wf_n[0] wf_n = np.delete(wf_n, [0], axis=0) # the lenth of the vector t_wf = len(wf_n[0]) p_wf = len(wf_n[:, 0]) # turning array into complex comp_vc = np.zeros([p_wf, t_wf], dtype=np.complex_) for n in range(p_wf): comp_vc[:, n] = wf_n[n * 2] + wf_n[1 + n * 2] * 1j return comp_vc, time_vc
fc84cd7184fb04f2725b50439d9b5cfe223e2020
7,979
def resample_time_series(series, period="MS"): """ Resample and interpolate a time series dataframe so we have one row per time period (useful for FFT) Parameters ---------- df: DataFrame Dataframe with date as index col_name: string, Identifying the column we will pull out period: string Period for resampling Returns ------- Series: pandas Series with datetime index, and one column, one row per day """ # give the series a date index if the DataFrame is not index by date already # if df.index.name != 'date': # series.index = df.date # just in case the index isn't already datetime type series.index = pd.to_datetime(series.index) # resample to get one row per time period rseries = series.resample(period).mean() new_series = rseries.interpolate() return new_series
2e3d2b1cbe4a8a0cc13c33c19fe217364819e31d
7,980
def _timeliness_todo(columns, value, df, dateFormat=None, timeFormat=None): """ Returns what (columns, as in spark columns) to compute to get the results requested by the parameters. :param columns: :type columns: list :param value :type value: str :param df: :type df: DataFrame :param dateFormat: :type dateFormat: str :param timeFormat: :type timeFormat: str :return: Pyspark columns representing what to compute. """ assert (dateFormat is None or timeFormat is None) and ( not dateFormat is None or not timeFormat is None), "Pass either a dateFormat or a timeFormat, " \ "not both. " todo = [] types = dict(df.dtypes) if dateFormat: value_date = to_date(lit(value), dateFormat) for c in columns: if types[c] == "timestamp" or types[c] == "date": todo.append(sum(when(datediff(value_date, c) > 0, 1).otherwise(0)).alias(c)) elif types[c] == "string": todo.append(sum(when(datediff(value_date, to_date(c, dateFormat)) > 0, 1).otherwise(0)).alias(c)) else: print( "Type of a column on which the timeliness metric is run must be either timestamp, " "date or string, if the metric is being run on dateFormat.") exit() elif timeFormat: value_long = to_timestamp(lit(value), timeFormat).cast("long") # check if value contains a date and not only hours, minutes, seconds has_date = _contains_date(timeFormat) if has_date: for c in columns: if types[c] == "timestamp": todo.append(sum(when(value_long - col(c).cast("long") > 0, 1).otherwise(0)).alias(c)) elif types[c] == "string": todo.append( sum(when(value_long - to_timestamp(col(c), timeFormat).cast("long") > 0, 1).otherwise(0)).alias( c)) else: print( "Type of a column on which the timeliness metric is run must be either timestamp or string, if " "the metric is being run on a timeFormat") exit() else: for c in columns: if types[c] == "timestamp": """ If there is no years, months, days we must ignore the years, months, days in the timestamp. """ value_long = to_timestamp(lit(value), timeFormat) # remove years, months, days value_long = value_long.cast("long") - value_long.cast("date").cast("timestamp").cast("long") # check for difference, but only considering hours, minutes, seconds todo.append(sum( when( value_long - (col(c).cast("long") - col(c).cast("date").cast("timestamp").cast("long")) > 0, 1).otherwise(0)).alias(c)) elif types[c] == "string": """ If there are no years, months, days and the column is in the same format, meaning that it also has no years, months, days, this means that they will be both initialized to the same year, month, day; so years, months, days will be basically ignored. """ todo.append( sum(when((value_long - to_timestamp(c, timeFormat).cast("long")) > 0, 1).otherwise(0)).alias(c)) else: print( "Type of a column on which the timeliness metric is run must be either timestamp or string, if " "the metric is being run on a timeFormat") exit() return todo
ed3cc27179faff77323deb6c26822dc93cdc4fd4
7,981
from typing import Optional def build_arm( simulator, n_elem:int=11, override_params:Optional[dict]=None, attach_head:bool=None, # TODO: To be implemented attach_weight:Optional[bool]=None, # TODO: To be implemented ): """ Import default parameters (overridable) """ param = _OCTOPUS_PROPERTIES.copy() # Always copy parameter for safety if isinstance(override_params, dict): param.update(override_params) """ Import default parameters (non-overridable) """ arm_scale_param = _DEFAULT_SCALE_LENGTH.copy() """ Set up an arm """ L0 = arm_scale_param['base_length'] r0 = arm_scale_param['base_radius'] arm_pos = np.array([0.0, 0.0, 0.0]) arm_dir = np.array([1.0, 0.0, 0.0]) normal = np.array([0.0, 0.0, 1.0]) rod = CosseratRod.straight_rod( n_elements=n_elem, start=arm_pos, direction=arm_dir, normal=normal, **arm_scale_param, **param, ) simulator.append(rod) """Add gravity forces""" _g = -9.81 gravitational_acc = np.array([0.0, 0.0, _g]) simulator.add_forcing_to(rod).using( GravityForces, acc_gravity=gravitational_acc ) """Add friction forces (always the last thing before finalize)""" contact_k = 1e2 # TODO: These need to be global parameter to tune contact_nu = 1e1 period = 2.0 origin_plane = np.array([0.0, 0.0, -r0]) slip_velocity_tol = 1e-8 froude = 0.1 mu = L0 / (period * period * np.abs(_g) * froude) if param['friction_symmetry']: kinetic_mu_array = np.array( [mu, mu, mu] ) * param['friction_multiplier'] # [forward, backward, sideways] else: kinetic_mu_array = np.array( [mu, 1.5 * mu, 2.0 * mu] ) * param['friction_multiplier'] # [forward, backward, sideways] static_mu_array = 2 * kinetic_mu_array simulator.add_forcing_to(rod).using( AnisotropicFrictionalPlane, k=contact_k, nu=contact_nu, plane_origin=origin_plane, plane_normal=normal, slip_velocity_tol=slip_velocity_tol, static_mu_array=static_mu_array, kinetic_mu_array=kinetic_mu_array, ) return rod
891132a4c133f0d145af5f85c0399954fccc450e
7,982
def check_user(user, pw, DB): """ Check if user exists and if password is valid. Return the user's data as a dict or a string with an error message. """ userdata = DB.get(user) if not userdata: log.error("Unknown user: %s", user) return "Unknown user: %s" % user elif userdata.get(C.Password) != pw: log.error("Invalid password!") return "Invalid password!" return userdata
21e8c56c0f747bd105cec31e1cb5aea348b4af44
7,983
def get_single_response_value(dom_response_list: list, agg_function): """ Get value of a single scenario's response. :param dom_response_list: Single response provided as a list of one term. :param agg_function: Function to aggregate multiple responses. :return: Value of such observation. """ response_list = extract_list_from_dom(dom_object=dom_response_list[0], tag_name='Observation', attribute_name='Value') if len(response_list) == 0: response_value = np.NaN else: try: response_value = agg_function([float(item) for item in response_list]) except TypeError: response_value = np.NaN return response_value
7a7ef4f24a720a4611c48123061886b6bdb9f2f5
7,984
def sharpe_ratio(returns, periods=252): """ Create the Sharpe ratio for the strategy, based on a benchmark of zero (i.e. no risk-free rate information). Args: returns (list, Series) - A pandas Series representing period percentage returns. periods (int.) Daily (252), Hourly (252*6.5), Minutely(252*6.5*60) etc. Returns: float. The result """ return np.sqrt(periods) * (np.mean(returns)) / np.std(returns)
b06ef19c5512370ff98217f7fb565c25846b697e
7,985
def is_validated(user): """Is this user record validated?""" # An account is "validated" if it has the `validated` field set to True, or # no `validated` field at all (for accounts created before the "account # validation option" was enabled). return user.get("validated", True)
c1ddfc52a62e71a68798dc07e7576a4ae42aa17f
7,986
def current_chart_provider_monthly(): """ API for monthly provider chart """ mytime = dubwebdb.CTimes("%Y-%m", request.args.get('time_start'), request.args.get('time_end')) myids = dubwebdb.Ids(prv_id=sanitize_list(request.args.get('prvid')), team_id=sanitize_list(request.args.get('teamid')), project_id=request.args.get('prjid'), div_id=None) csv_only = request.args.get('dl_csv') if csv_only: myrows = dubwebdb.get_data_budget_provider(mytime, myids) return convert_to_download_csv(myrows) else: return dubwebdb.get_data_provider(mytime, myids, add_budget=True)
36f67b0323be8fc136175fa4f9fb4819b40ebb94
7,987
def create_round_meander(radius, theta=0, offset=Point()): """ Returns a single period of a meandering path based on radius and angle theta """ deg_to_rad = 2 * pi / 360 r = radius t = theta * deg_to_rad # The calculation to obtain the 'k' coefficient can be found here: # http://itc.ktu.lt/itc354/Riskus354.pdf # "APPROXIMATION OF A CUBIC BEZIER CURVE BY CIRCULAR ARCS AND VICE VERSA" # by Aleksas Riskus k = 0.5522847498 # the control points need to be shortened relative to the angle by this factor j = 2*t/pi path = "m %s,%s " % (-2*r*cos(t)-offset.x, -offset.y) path += "c %s,%s %s,%s %s,%s " % (-k*r*j*sin(t),-k*r*j*cos(t), -(r-r*cos(t)),-r*sin(t)+r*k*j, -(r-r*cos(t)),-r*sin(t)) path += "c %s,%s %s,%s %s,%s " % (0,-k*r, r-k*r,-r, r,-r) path += "c %s,%s %s,%s %s,%s " % (k*r,0, r,r-k*r, r,r) path += "c %s,%s %s,%s %s,%s " % (0,k*r*j, -(r-r*cos(t)-k*r*j*sin(t)),r*sin(t)-r*k*j*cos(t), -r+r*cos(t),r*sin(t)) path += "c %s,%s %s,%s %s,%s " % (-k*r*j*sin(t),k*r*j*cos(t), -(r-r*cos(t)),r*sin(t)-r*k*j, -(r-r*cos(t)),r*sin(t)) path += "c %s,%s %s,%s %s,%s " % (0,k*r, r-k*r,r, r,r) path += "c %s,%s %s,%s %s,%s " % (k*r,0, r,-r+k*r, r,-r) path += "c %s,%s %s,%s %s,%s " % (0,-k*r*j, -(r-r*cos(t)-k*r*j*sin(t)),-r*sin(t)+r*k*j*cos(t), -r+r*cos(t),-r*sin(t)) return path
c4379ef8f16486e9cdbd3353c5458a6c9523bb2d
7,988
from typing import Dict from typing import Any import torch import sys from pathlib import Path import socket def get_env_info() -> Dict[str, Any]: """Get the environment information.""" return { "k2-version": k2.version.__version__, "k2-build-type": k2.version.__build_type__, "k2-with-cuda": k2.with_cuda, "k2-git-sha1": k2.version.__git_sha1__, "k2-git-date": k2.version.__git_date__, "lhotse-version": lhotse.__version__, "torch-cuda-available": torch.cuda.is_available(), "torch-cuda-version": torch.version.cuda, "python-version": sys.version[:3], "icefall-git-branch": get_git_branch_name(), "icefall-git-sha1": get_git_sha1(), "icefall-git-date": get_git_date(), "icefall-path": str(Path(__file__).resolve().parent.parent), "k2-path": str(Path(k2.__file__).resolve()), "lhotse-path": str(Path(lhotse.__file__).resolve()), "hostname": socket.gethostname(), "IP address": socket.gethostbyname(socket.gethostname()), }
154b233e9f8bc6b16b5540a44df4003b8779d28e
7,989
import pickle def load_config(path): """Loads the config dict from a file at path; returns dict.""" with open(path, "rb") as f: config = pickle.load(f) return config
eb12aed2ebdeebacf3041f3e4880c714f99c052c
7,990
from datetime import datetime def _check_year(clinicaldf: pd.DataFrame, year_col: int, filename: str, allowed_string_values: list = []) -> str: """Check year columns Args: clinicaldf: Clinical dataframe year_col: YEAR column filename: Name of file allowed_string_values: list of other allowed string values Returns: Error message """ error = '' if process_functions.checkColExist(clinicaldf, year_col): # Deal with pre-redacted values and other allowed strings # first because can't int(text) because there are # instances that have <YYYY year_series = clinicaldf[year_col][ ~clinicaldf[year_col].isin(allowed_string_values) ] year_now = datetime.datetime.utcnow().year try: years = year_series.apply( lambda x: datetime.datetime.strptime( str(int(x)), '%Y').year > year_now ) # Make sure that none of the years are greater than the current # year. It can be the same, but can't future years. assert not years.any() except Exception: error = (f"{filename}: Please double check your {year_col} " "column, it must be an integer in YYYY format " f"<= {year_now}") # Tack on allowed string values if allowed_string_values: error += " or '{}'.\n".format( "', '".join(allowed_string_values) ) else: error += ".\n" else: error = f"{filename}: Must have {year_col} column.\n" return error
c1630b4196733baa6ef12db2990243b1052d01d5
7,991
def lower_strings(string_list): """ Helper function to return lowercase version of a list of strings. """ return [str(x).lower() for x in string_list]
58dcaccbc0f4ce8f22d80922a3ac5da26d7f42b1
7,992
def AAprime(): """ >> AAprime() aaprime and aprimea """ aprimeA = dot(transpose(ATable), ATable) # Aaprime = dot(ATable1, ATable) return aprimeA
f47d4df43ebcb8348e4a6fd4234b38bd18e92199
7,993
import os def normalize_group_path(group, suffix=None): """ :param group: :param suffix: :return: """ group = os.path.join('/', group) if suffix is not None: if not group.endswith(suffix): group = os.path.join(group, suffix.rstrip('/')) return group
31b0ef7eb808dce8ea51a0f4edbaec61e5c5cc2c
7,994
import _pickle import _io def _load_dataset(data_filename_or_set, comm, verbosity): """Loads a DataSet from the data_filename_or_set argument of functions in this module.""" printer = _baseobjs.VerbosityPrinter.create_printer(verbosity, comm) if isinstance(data_filename_or_set, str): if comm is None or comm.Get_rank() == 0: if _os.path.splitext(data_filename_or_set)[1] == ".pkl": with open(data_filename_or_set, 'rb') as pklfile: ds = _pickle.load(pklfile) else: ds = _io.read_dataset(data_filename_or_set, True, "aggregate", printer) if comm is not None: comm.bcast(ds, root=0) else: ds = comm.bcast(None, root=0) else: ds = data_filename_or_set # assume a Dataset object return ds
9c3a26a36202b4e8f35c795b7817d3fde8900a0b
7,995
import re def parse_contact_name(row, name_cols, strict=False, type='person'): """Parses a person's name with probablepeople library Concatenates all the contact name columns into a single string and then attempts to parse it into standardized name components and return a subset of the name parts that are useful for comparing contacts. This process eliminates notes and other non-name text from dirty data. Args: row (pd.Series): A record name_cols (list): A list of column names in the record, in order, that when concatenated comprise a person's name strict (boolean, optional): Whether or not to raise a RepeatedLabelError when parsing, if False, the last value of the repeated labels will be used for the parse type (str): Which probableparser to use: 'generic', 'person' or 'company' Returns: A subset (tuple of str, or np.nan) of the standardized name components, namely: (title, first, last, full_name) """ row = row.fillna('') concat = [] for col in name_cols: concat.append(row.get(col, '')) concat = ' '.join(concat) cleaned = re.sub(r'(not\s*available|not\s*provided|n/a)', '', concat, flags=re.IGNORECASE) try: parsed = probablepeople.tag(cleaned, type) except probablepeople.RepeatedLabelError as e: if strict: raise e problem_key, problem_vals, parsed = find_repeated_label(cleaned) parsed = (parsed, '') title = parsed[0].get('PrefixOther', np.nan) first = parsed[0].get('GivenName', np.nan) last = parsed[0].get('Surname', np.nan) try: full_name = first + ' ' + last except TypeError as e: full_name = np.nan return title, first, last, full_name
315b971344df60d2cbb4f0c4e1d820b37b07ddaf
7,996
def build_operator_attribute_dicts(parameters, n_op, prefix="op_"): """ Extracts elements of parameters dict whose keys begin with prefix and generates a list of dicts. The values of the relevant elements of parameters must be either single values or a list of length n_op, or else an exception will be raised. :param parameters: dict (or dict-like config object) containing a superset of operator parameters :type parameters: dict :param n_op: number of operators expected :type n_op: int :param prefix: prefix by which to filter out operator parameters :type prefix: str """ list_op_dicts = [dict() for i in range(n_op)] # initialize list of empty dicts for k in [x for x in parameters if x.startswith(prefix)]: # if only a single value is given, use it for all operators if type(parameters[k]) in [str, int, float, bool, type(None), dict]: for di in list_op_dicts: di[k] = parameters[k] # if a list of values is given and the length matches the number of operators, use them respectively elif len(parameters[k]) == n_op: for i, op in enumerate(list_op_dicts): op[k] = parameters[k][i] elif k == G_OP_REPO_TH_DEF: # TODO # lists as inputs for op for di in list_op_dicts: di[k] = parameters[k] # if parameter has invalid number of values, raise exception else: raise ValueError("Number of values for parameter", k, "equals neither n_op nor 1.", type(parameters[k])) return list_op_dicts
9bae1a83de78e26e20df11cdb42ac6171a63f46f
7,997
import os import subprocess def mock_data(rootdir, data_dir): """Build mock functional data from available atlases""" mock_dir = os.path.join(data_dir, 'mock') if not os.path.exists(mock_dir): subprocess.run("python setup_mock_data.py".split(), cwd=rootdir) return mock_dir
2eed6ba8da9849e099841f61af56f1a982151c66
7,998
def get_peer_addr(ifname): """Return the peer address of given peer interface. None if address not exist or not a peer-to-peer interface. """ for addr in IP.get_addr(label=ifname): attrs = dict(addr.get('attrs', [])) if 'IFA_ADDRESS' in attrs: return attrs['IFA_ADDRESS']
e92906c0c705eb42ec3bedae2959dabcad72f0d2
7,999