content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_add_many_columns_function(row_function, data_types): """Returns a function which adds several columns to a row based on given row function""" def add_many_columns(row): result = row_function(row) data = [] for i, data_type in enumerate(data_types): try: value = result[i] except TypeError as e: raise RuntimeError("UDF returned non-indexable value. Provided schema indicated an Indexable return type") except IndexError as e: raise RuntimeError("UDF return value did not match the number of items in the provided schema") cast_value = valid_data_types.cast(value, data_type) data.append(numpy_to_bson_friendly(cast_value)) # return json.dumps(data, cls=NumpyJSONEncoder) return data # return bson.binary.Binary(bson.BSON.encode({"array": data})) return add_many_columns
72bb0edae6ddd109beae118f691fc387b6bfdce7
21,808
import torch def sumlike_wrap(fun_name): """Handle torch.sum and torch.mean""" # Define appropriate torch function, the rest of the logic is the same assert fun_name in ['sum', 'mean'] torch_fun = getattr(torch, fun_name) @wraps(torch_fun) def sumlike_fun(input, dim=None, keepdim=False): nodim = dim is None if nodim: # Remove stable dims, then sum over data input = move_sdims(input, ()) data_sum = torch_fun(input.data) scale = input.scale.view(()) output = STensor(data_sum, scale) else: # Convert dim to list of non-negative indices to sum over dim_list = tupleize(dim, input.ndim) # Make summed indices data dims, then sum over data tensor new_sdims = tuple(i for i in input.stable_dims if i not in dim_list) input = move_sdims(input, new_sdims) data_sum = torch_fun(input.data, dim, keepdim=keepdim) scale = input.scale if not keepdim: scale = squeeze_dims(scale, dim_list) output = STensor(data_sum, scale) output.rescale_() return output # Register the new sum-like function STABLE_FUNCTIONS[torch_fun] = sumlike_fun
3aa0247b965dbbf5038e4b0f4f00b0ead9855270
21,810
def __crossover(n: int, g: np.matrix, m_list: np.array, f_list: np.array) -> np.matrix: """ :param n: half of g.shape[0] :param g: bin mat of genes :param m_list: male nums :param f_list: female nums :return: crossed-over bin mat of genes """ cros = np.random.randint(low=0, high=g.shape[1], size=n) g_cros = np.copy(g) for m, f, c in zip(m_list, f_list, cros): g_cros[[m, f], :c] = g_cros[[f, m], :c] return g_cros
93fbd5138bdf2e293fe0515a0096ab643fbf1953
21,811
def setup_system(): """ Galacitic center potential and Arches cluster position from Kruijssen 2014 """ potential = static_potentials.Galactic_Center_Potential_Kruijssen() cluster = Particle() # At time 2.05 in KDL15 cluster.position = [-17.55767, -53.26560, -9.39921] | units.parsec cluster.velocity = [-187.68008, 80.45276, 33.96556] | units.kms cluster.position += coordinate_correction return potential, cluster
9713feaa51bfb0430394a8e8171bdecd3590d5e2
21,812
from typing import Iterable from typing import List def collect_dynamic_libs(name: str, dest: str = ".", dependencies: bool = True, excludes: Iterable[str] = None) -> List: """ Collect DLLs for distribution **name**. Arguments: name: The distribution's project-name. dest: Target destination, defaults to ``'.'``. dependencies: Recursively collect libs for dependent distributions (recommended). excludes: Dependent distributions to skip, defaults to ``None``. Returns: List of DLLs in PyInstaller's ``(source, dest)`` format. This collects libraries only from Conda's shared ``lib`` (Unix) or ``Library/bin`` (Windows) folders. To collect from inside a distribution's installation use the regular :func:`PyInstaller.utils.hooks.collect_dynamic_libs`. """ _files = [] for file in files(name, dependencies, excludes): # A file is classified as a DLL if it lives inside the dedicated # ``lib_dir`` DLL folder. if file.parent == lib_dir: _files.append((str(file.locate()), dest)) return _files
5e4ed9f9d412c6e071d85fcb34091fbed0722258
21,814
import pickle def make_agreements(file) -> pd.DataFrame: """In some of the human conditions, we hold out questions. Each randomly generated agent is given our test and then asked it's opinion on every hold out question. agreements.pkl is a Dict[Experiment, Tuple(ndarray, ndarray)] where each array element contains the fraction of holdout questions a single agent answered correctly. The first array contains agents that passed our test, and the second contains agents that didn't pass our test. This method massages that data into a DataFrame with experiments as they keys, a column for predicted alignment, and a column for the fraction of holdout questions answered correctly. """ agreements = pd.Series(pickle.load(file)).reset_index() agreements = agreements.join( agreements.apply(lambda x: list(x[0]), result_type="expand", axis="columns"), rsuffix="_", ) del agreements["0"] agreements.columns = ["epsilon", "delta", "n", "aligned", "misaligned"] agreements = agreements.set_index(["epsilon", "delta", "n"]).stack().reset_index() agreements.columns = ["epsilon", "delta", "n", "aligned", "value"] agreements = agreements.explode("value") agreements["aligned"] = agreements.aligned == "aligned" agreements.value = agreements.value.apply(lambda x: float(x)) agreements = agreements.dropna() return agreements
e9cb9e45aa1c2ff5b694f6712da2892c6f44fd99
21,815
def column_as_html(column, table): """Return column as an HTML row.""" markup = "<tr>" markup += "<td class='field'>{0}</td>".format(column.name, column.comment) markup += "<td>{0}</td>".format(column.formattedType) # Check for Primary Key if table.isPrimaryKeyColumn(column): markup += "<td class='centered primary'>&#10004;</td>" else: markup += "<td class='centered'>&nbsp;</td>" # Check for Foreign Key if table.isForeignKeyColumn(column): markup += "<td class='centered foreign'><a href='#{0}s'>&#10004;</a></td>".format(column.name.replace(table.name, "")) else: markup += "<td class='centered'>&nbsp;</td>" # Check for Not Null attribute if column.isNotNull == 1: markup += "<td class='centered notnull'>&#10004;</td>" else: markup += "<td class='centered'>&nbsp;</td>" # Check for Unique attribute if is_unique(column, table): markup += "<td class='centered unique'>&#10004;</td>" else: markup += "<td class='centered'>&nbsp;</td>" # Check for Binary, Unsigned and Zero Fill attributes flags = list(column.flags) if flags.count("BINARY"): markup += "<td class='centered binary'>&#10004;</td>" else: markup += "<td class='centered'>&nbsp;</td>" if flags.count("UNSIGNED"): markup += "<td class='centered unsigned'>&#10004;</td>" else: markup += "<td class='centered'>&nbsp;</td>" if flags.count("ZEROFILL"): markup += "<td class='centered zerofill'>&#10004;</td>" else: markup += "<td class='centered'>&nbsp;</td>" # Check for Auto Increment attribute if column.autoIncrement == 1: markup += "<td class='centered autoincrement'>&#10004;</td>" else: markup += "<td class='centered'>&nbsp;</td>" # Default value markup += "<td>{0}</td>".format(column.defaultValue) # Comment markup += "<td class='comment'>{0}</td>".format(escape(column.comment)) markup += "</tr>" return markup
0c6ed56cd686a4359776022407b023d5733198c9
21,816
def covariation(x, y): """ Covariation of X and Y. :param list or tuple x: 1st array. :param list or tuple y: 2nd array. :return: covariation. :rtype: float :raise ValueError: when x or y is empty """ if x and y: m_x = mean(x) m_y = mean(y) dev_x = [i - m_x for i in x] dev_y = [i - m_y for i in x] return dot(dev_x, dev_y) / (len(x) - 1) else: raise ValueError('x or y is empty')
dd42467a453978edb5970b79653724f77c07beb7
21,817
def stripped_spaces_around(converter): """Make converter that strippes leading and trailing spaces. ``converter`` is called to further convert non-``None`` values. """ def stripped_text_converter(value): if value is None: return None return converter(value.strip()) return stripped_text_converter
b92f38d3eb8d191f615488bbd11503bae56ef6de
21,818
from typing import Union from typing import Literal from typing import Any def ootf_inverse( value: FloatingOrArrayLike, function: Union[ Literal["ITU-R BT.2100 HLG", "ITU-R BT.2100 PQ"], str ] = "ITU-R BT.2100 PQ", **kwargs: Any ) -> FloatingOrNDArray: """ Maps relative display linear light to scene linear light using given inverse opto-optical transfer function (OOTF / OOCF). Parameters ---------- value Value. function Inverse opto-optical transfer function (OOTF / OOCF). Other Parameters ---------------- kwargs {:func:`colour.models.ootf_inverse_HLG_BT2100`, :func:`colour.models.ootf_inverse_PQ_BT2100`}, See the documentation of the previously listed definitions. Returns ------- :class:`numpy.floating` or :class:`numpy.ndarray` Luminance of scene linear light. Examples -------- >>> ootf_inverse(779.988360834115840) # doctest: +ELLIPSIS 0.1000000... >>> ootf_inverse( # doctest: +ELLIPSIS ... 63.095734448019336, function='ITU-R BT.2100 HLG') 0.1000000... """ function = validate_method( function, OOTF_INVERSES, '"{0}" inverse "OOTF" is invalid, it must be one of {1}!', ) callable_ = OOTF_INVERSES[function] return callable_(value, **filter_kwargs(callable_, **kwargs))
65c7aa374d1daa086828b87a4f30802f63b4a3b7
21,819
def modified_query(benchmark, model_spec, run_index: int, epochs=108, stop_halfway=False): """ NOTE: Copied from https://github.com/google-research/nasbench/blob/b94247037ee470418a3e56dcb83814e9be83f3a8/nasbench/api.py#L204-L263 # noqa We changed the function in such a way that we now can specified the run index (index of the evaluation) which was in the original code sampled randomly. OLD DOCSTRING: Fetch one of the evaluations for this model spec. Each call will sample one of the config['num_repeats'] evaluations of the model. This means that repeated queries of the same model (or isomorphic models) may return identical metrics. This function will increment the budget counters for benchmarking purposes. See self.training_time_spent, and self.total_epochs_spent. This function also allows querying the evaluation metrics at the halfway point of training using stop_halfway. Using this option will increment the budget counters only up to the halfway point. Args: model_spec: ModelSpec object. epochs: number of epochs trained. Must be one of the evaluated number of epochs, [4, 12, 36, 108] for the full dataset. stop_halfway: if True, returned dict will only contain the training time and accuracies at the halfway point of training (num_epochs/2). Otherwise, returns the time and accuracies at the end of training (num_epochs). Returns: dict containing the evaluated data for this object. Raises: OutOfDomainError: if model_spec or num_epochs is outside the search space. """ if epochs not in benchmark.dataset.valid_epochs: raise OutOfDomainError('invalid number of epochs, must be one of %s' % benchmark.dataset.valid_epochs) fixed_stat, computed_stat = benchmark.dataset.get_metrics_from_spec(model_spec) # MODIFICATION: Use the run index instead of the sampled one. # sampled_index = random.randint(0, self.config['num_repeats'] - 1) computed_stat = computed_stat[epochs][run_index] data = {} data['module_adjacency'] = fixed_stat['module_adjacency'] data['module_operations'] = fixed_stat['module_operations'] data['trainable_parameters'] = fixed_stat['trainable_parameters'] if stop_halfway: data['training_time'] = computed_stat['halfway_training_time'] data['train_accuracy'] = computed_stat['halfway_train_accuracy'] data['validation_accuracy'] = computed_stat['halfway_validation_accuracy'] data['test_accuracy'] = computed_stat['halfway_test_accuracy'] else: data['training_time'] = computed_stat['final_training_time'] data['train_accuracy'] = computed_stat['final_train_accuracy'] data['validation_accuracy'] = computed_stat['final_validation_accuracy'] data['test_accuracy'] = computed_stat['final_test_accuracy'] benchmark.dataset.training_time_spent += data['training_time'] if stop_halfway: benchmark.dataset.total_epochs_spent += epochs // 2 else: benchmark.dataset.total_epochs_spent += epochs return data
21ccbafb230da1d984f53f36f41c6e9ceb0d7f18
21,821
def gauss(x, mu=0, sigma=1): """ Unnormalized Gaussian distribution. Parameters ---------- Returns ------- y : type(x) Gaussian evaluated at x. Notes ----- Some people use alpha (1/e point) instead of the sigma (standard deviation) to define the width of the Gaussian. They are related through: alpha = sigma * sqrt(2) """ return np.exp(-((x - mu)**2) / (2 * sigma**2))
24e5c5b9e42cc6b84e6d2c4aa9f2a26b44793112
21,822
def GetSpd(ea): """ Get current delta for the stack pointer @param ea: end address of the instruction i.e.the last address of the instruction+1 @return: The difference between the original SP upon entering the function and SP for the specified address """ func = idaapi.get_func(ea) if not func: return None return idaapi.get_spd(func, ea)
84c00ac2bb722e51d27813a35f55c8c59fdac579
21,823
def is_fugashi_ipadic_available(): """ Check if the library is available. This function checks if sentencepiece is available in your environment and returns the result as a bool value. Returns ------- _fugashi_ipadic_available : bool If True, fugashi wiht ipadic is available in your environment. Examples -------- >>> tokenizers.is_fugashi_ipadic_available() True """ return _fugashi_ipadic_available
cc3b80718691b2914f57c950452f2fbb253100d1
21,824
import torch def pad_to_sidelength(schematic, labels=None, nothing_id=0, sidelength=32): """Add padding to schematics to sidelength""" szs = list(schematic.size()) szs = np.add(szs, -sidelength) pad = [] # this is all backwards bc pytorch pad semantics :( for s in szs: if s >= 0: pad.append(0) else: pad.append(-s) pad.append(0) schematic = torch.nn.functional.pad(schematic, pad[::-1]) if labels is not None: labels = torch.nn.functional.pad(labels, pad[::-1], value=nothing_id) return schematic, labels
81a7bb8deb2474106715720f79e0d3ee8937557b
21,825
def segmentation_gaussian_measurement_batch( y_true, y_pred, gaussian_sigma=3, measurement=segmentation_losses.binary_crossentropy): """ Apply metric or loss measurement to a batch of data incorporating a 2D gaussian. Only works with batch size 1. Loop and call this function repeatedly over each sample to use a larger batch size. # Arguments y_true: is assumed to be [label, x_img_coord, y_image_coord] y_pred: is expected to be a 2D array of labels with shape [1, img_height, img_width, 1]. """ with K.name_scope(name='segmentation_gaussian_measurement_batch') as scope: if keras.backend.ndim(y_true) == 4: # sometimes the dimensions are expanded from 2 to 4 # to meet Keras' expectations. # In that case reduce them back to 2 y_true = K.squeeze(y_true, axis=-1) y_true = K.squeeze(y_true, axis=-1) y_pred_shape = tf.Tensor.get_shape(y_pred) batch_size = y_pred_shape[0] y_true = tf.split(y_true, batch_size) y_pred = tf.split(y_pred, batch_size) results = [] for y_true_img, y_pred_img in zip(y_true, y_pred): result = segmentation_gaussian_measurement( y_true=y_true_img, y_pred=y_pred_img, gaussian_sigma=gaussian_sigma, measurement=measurement ) results = results + [result] results = tf.concat(results, axis=0) return results
de88b6ee1175612f7fa8e41c98dc6e1b3287a034
21,826
def save_image(img: Image, img_format=None, quality=85): """ Сохранить картинку из потока в переменную для дальнейшей отправки по сети """ if img_format is None: img_format = img.format output_stream = BytesIO() output_stream.name = 'image.jpeg' # на Ubuntu почему-то нет jpg, но есть jpeg if img.format == 'JPEG': img.save(output_stream, img_format, quality=quality, optimize=True, progressive=True) else: img.convert('RGB').save(output_stream, format=img_format) output_stream.seek(0) return output_stream
5696745ad33a2b1b59718f1c4d4eedf0eda7cd46
21,827
def check_input(args: dict) -> dict: """ Check if user entries latitude and longitude are well formated. If ok, retruns a dict with lat and lng converted as flaots - args: dict. request.args """ lat = args.get("lat") lng = args.get("lng") if lat is None: abort(400, "Latitude parameter (lat) is missing") if lng is None: abort(400, "Longitude parameter (lng) is missing") return {"lat": check_lat_lng(lat, "latitude"), "lng": check_lat_lng(lng, "longitude")}
078fc0ae5665562d6849746788b1f7a5a88981eb
21,828
def adjust_learning_rate(epoch, total_epochs, only_ce_epochs, learning_rate, optimizer): """Adjust learning rate during training. Parameters ---------- epoch: Current training epoch. total_epochs: Total number of epochs for training. only_ce_epochs: Number of epochs for initial pretraining. learning_rate: Initial learning rate for training. """ #We dont want to consider the only ce #based epochs for the lr scheduler epoch = epoch - only_ce_epochs drocc_epochs = total_epochs - only_ce_epochs # lr = learning_rate if epoch <= drocc_epochs: lr = learning_rate * 0.001 if epoch <= 0.90 * drocc_epochs: lr = learning_rate * 0.01 if epoch <= 0.60 * drocc_epochs: lr = learning_rate * 0.1 if epoch <= 0.30 * drocc_epochs: lr = learning_rate for param_group in optimizer.param_groups: param_group['lr'] = lr return optimizer
94ddbb9fcc7676799f7e032f4ab6658b1b056b32
21,829
def dummy_nullgeod(): """ Equatorial Geodesic """ return Nulllike( metric="Kerr", metric_params=(0.5,), position=[4., np.pi / 2, 0.], momentum=[0., 0., 2.], steps=50, delta=0.5, return_cartesian=False, suppress_warnings=True, )
fd5af27cebd029fbcbcdab07f154ee4f4dff2575
21,830
def flatten(tensor): """Flattens a given tensor such that the channel axis is first. The shapes are transformed as follows: (N, C, D, H, W) -> (C, N * D * H * W) """ C = tensor.size(1) # new axis order axis_order = (1, 0) + tuple(range(2, tensor.dim())) # Transpose: (N, C, D, H, W) -> (C, N, D, H, W) transposed = tensor.permute(axis_order) # Flatten: (C, N, D, H, W) -> (C, N * D * H * W) return transposed.reshape(C, -1)
67a0d89ce98e6695a9d58b1f3ab2f403b09c89ce
21,832
from chiesa_correction import align_gvectors def compare_scalar_grids(gvecs0, nkm0, gvecs1, nkm1, atol=1e-6): """Compare two scalar fields sampled on regular grids Args: gvecs0 (np.array): first grid, (npt0, ndim) nkm0 (np.array): values, (npt0,) gvecs1 (np.array): second grid, (npt1, ndim), expect npt1<=npt0 nkm1 (np.array): values, (npt1,) Return: bool: True if same scalar field """ comm0, comm1 = align_gvectors(gvecs0, gvecs1) unique = len(gvecs1[comm1]) == len(gvecs1) # all unique gvecs are unique xmatch = np.allclose(gvecs0[comm0], gvecs1[comm1], atol=atol) # gvecs match ymatch = np.allclose(nkm0[comm0], nkm1[comm1], atol=atol) # nk match before unfold return np.array([unique, xmatch, ymatch], dtype=bool)
0f75d4387e4c8a5f497a85191df342ac33df1c11
21,833
def a_dot(t): """ Derivative of a, the scale factor :param t: :return: """ return H0 * ((3 / 2) * H0 * t) ** (-1 / 3)
b5557176f75ed45f6e5b38eb827d655779311e0a
21,834
def frame(x, frame_length, hop_length, axis=-1, name=None): """ Slice the N-dimensional (where N >= 1) input into (overlapping) frames. Args: x (Tensor): The input data which is a N-dimensional (where N >= 1) Tensor with shape `[..., seq_length]` or `[seq_length, ...]`. frame_length (int): Length of the frame and `0 < frame_length <= x.shape[axis]`. hop_length (int): Number of steps to advance between adjacent frames and `0 < hop_length`. axis (int, optional): Specify the axis to operate on the input Tensors. Its value should be 0(the first dimension) or -1(the last dimension). If not specified, the last axis is used by default. Returns: The output frames tensor with shape `[..., frame_length, num_frames]` if `axis==-1`, otherwise `[num_frames, frame_length, ...]` where `num_framse = 1 + (x.shape[axis] - frame_length) // hop_length` Examples: .. code-block:: python import paddle from paddle.signal import frame # 1D x = paddle.arange(8) y0 = frame(x, frame_length=4, hop_length=2, axis=-1) # [4, 3] # [[0, 2, 4], # [1, 3, 5], # [2, 4, 6], # [3, 5, 7]] y1 = frame(x, frame_length=4, hop_length=2, axis=0) # [3, 4] # [[0, 1, 2, 3], # [2, 3, 4, 5], # [4, 5, 6, 7]] # 2D x0 = paddle.arange(16).reshape([2, 8]) y0 = frame(x0, frame_length=4, hop_length=2, axis=-1) # [2, 4, 3] # [[[0, 2, 4], # [1, 3, 5], # [2, 4, 6], # [3, 5, 7]], # # [[8 , 10, 12], # [9 , 11, 13], # [10, 12, 14], # [11, 13, 15]]] x1 = paddle.arange(16).reshape([8, 2]) y1 = frame(x1, frame_length=4, hop_length=2, axis=0) # [3, 4, 2] # [[[0 , 1 ], # [2 , 3 ], # [4 , 5 ], # [6 , 7 ]], # # [4 , 5 ], # [6 , 7 ], # [8 , 9 ], # [10, 11]], # # [8 , 9 ], # [10, 11], # [12, 13], # [14, 15]]] # > 2D x0 = paddle.arange(32).reshape([2, 2, 8]) y0 = frame(x0, frame_length=4, hop_length=2, axis=-1) # [2, 2, 4, 3] x1 = paddle.arange(32).reshape([8, 2, 2]) y1 = frame(x1, frame_length=4, hop_length=2, axis=0) # [3, 4, 2, 2] """ if axis not in [0, -1]: raise ValueError(f'Unexpected axis: {axis}. It should be 0 or -1.') if not isinstance(frame_length, int) or frame_length <= 0: raise ValueError( f'Unexpected frame_length: {frame_length}. It should be an positive integer.' ) if not isinstance(hop_length, int) or hop_length <= 0: raise ValueError( f'Unexpected hop_length: {hop_length}. It should be an positive integer.' ) if frame_length > x.shape[axis]: raise ValueError( f'Attribute frame_length should be less equal than sequence length, ' f'but got ({frame_length}) > ({x.shape[axis]}).') op_type = 'frame' if in_dygraph_mode(): attrs = ('frame_length', frame_length, 'hop_length', hop_length, 'axis', axis) op = getattr(_C_ops, op_type) out = op(x, *attrs) else: check_variable_and_dtype( x, 'x', ['int32', 'int64', 'float16', 'float32', 'float64'], op_type) helper = LayerHelper(op_type, **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type=op_type, inputs={'X': x}, attrs={ 'frame_length': frame_length, 'hop_length': hop_length, 'axis': axis }, outputs={'Out': out}) return out
f43420ceefa8963579776c5234179274688c83d6
21,835
import functools def wrapped_partial(func: callable, *args, **kwargs) -> callable: """Wrap a function with partial args and kwargs. Args: func (callable): The function to be wrapped. *args (type): Args to be wrapped. **kwargs (type): Kwargs to be wrapped. Returns: callable: The wrapped function. """ partial_func = functools.partial(func, *args, **kwargs) functools.update_wrapper(partial_func, func) return partial_func
d8c3eb53e3c74104aa72acce545269c98585cd83
21,836
import typing def with_sfw_check( command: typing.Optional[CommandT] = None, /, *, error_message: typing.Optional[str] = "Command can only be used in SFW channels", halt_execution: bool = False, ) -> CallbackReturnT[CommandT]: """Only let a command run in a channel that's marked as sfw. Parameters ---------- command : typing.Optional[CommandT] The command to add this check to. Other Parameters ---------------- error_message : typing.Optional[str] The error message to send in response as a command error if the check fails. Defaults to "Command can only be used in DMs" and setting this to `None` will disable the error message allowing the command search to continue. halt_execution : bool Whether this check should raise `tanjun.errors.HaltExecution` to end the execution search when it fails instead of returning `False`. Defaults to `False`. Notes ----- * error_message takes priority over halt_execution. * For more information on how this is used with other parameters see `CallbackReturnT`. Returns ------- CallbackReturnT[CommandT] The command this check was added to. """ return _wrap_with_kwargs(command, sfw_check, halt_execution=halt_execution, error_message=error_message)
565d4e0f9e5f473a72692511a1ba3896717c9069
21,837
def algorithm_id_to_generation_class(algorithm_id): """ Returns the Generation class corresponding to the provided algorithm ID (as defined in settings). """ return _algorithm_id_to_class_data(algorithm_id)[1]
5cf4ede818832a57c1c279d5c78c43c2c214b9b5
21,838
def search(session, **kwargs): """ Searches the Discogs API for a release object Arguments: session (requests.Session) - API session object **kwargs (dict) - All kwargs are added as query parameters in the search call Returns: dict - The first result returned in the search Raises: Exception if release cannot be found """ try: url = DB_API + '/search?' for param, value in kwargs.items(): url += f'{param}={value}&' res = session.get(url) data = res.json() if res.status_code != 200 or 'results' not in data.keys(): raise Exception(f'Unexpected error when querying Discogs API ({res.status_code})') if not data['results']: raise Exception('No results found') return data['results'][0] except Exception as err: print(f'Failed to find release for search {kwargs} in Discogs database: {err}') raise
f67646b3060602b743eb4166a4aab5882b8a3c81
21,839
def _conv_general_precision_config_proto(precision): """Convert an integer to an XLA.PrecisionConfig.""" if precision is None: return None proto = xla_data_pb2.PrecisionConfig() proto.operand_precision.append(int(precision)) return proto
8b43272aadeccd4385ddb74bcf0691f4a779e4c1
21,840
def list_in_list(a, l): """Checks if a list is in a list and returns its index if it is (otherwise returns -1). Parameters ---------- a : list() List to search for. l : list() List to search through. """ return next((i for i, elem in enumerate(l) if elem == a), -1)
494d9a880bcd2084a0f50e292102dc8845cbbb16
21,842
def sent_to_idx(sent, word2idx, sequence_len): """ convert sentence to index array """ unknown_id = word2idx.get("UNKNOWN", 0) sent2idx = [word2idx.get(word, unknown_id) for word in sent.split("_")[:sequence_len]] return sent2idx
ffaa65741d8c24e02d5dfbec4ce84c03058ebeb8
21,844
from re import T def expand_sqs_results(settings: Settings, sqs_results: T.Iterable[SQSResult], timings: T.Optional[TimingDictionary] = None, include=('configuration',), inplace: bool = False) -> Settings: """ Serializes a list of :py:class:`sqsgenerator.public.SQSResult` into a JSON/YAML serializable string :param settings: the settings used to compute the {sqs_results} :type settings: AttrDict :param sqs_results: """ dump_include = list(include) if 'configuration' not in dump_include: dump_include += ['configuration'] result_document = make_result_document(settings, sqs_results, fields=dump_include, timings=timings) if inplace: settings.update(result_document) keys_to_remove = {'file_name', 'input_format', 'composition', 'iterations', 'max_output_configurations', 'mode', 'threads_per_rank', 'is_sublattice'} final_document = {k: v for k, v in settings.items() if k not in keys_to_remove} if 'sublattice' in final_document: final_document.update(final_document['sublattice']) del final_document['sublattice'] else: final_document = result_document return Settings(final_document)
316e6d66617f6715193502058eff173954214a89
21,845
def test_files_atlas(test_files): """ATLAS files""" # ssbio/test/test_files/atlas return op.join(test_files, 'atlas')
9ab8f55582d85e7f51c301b1cc0c80a5b7233b47
21,846
from modefit.basics import get_polyfit def _get_xaxis_polynomial_(xyv, degree=DEGREE, legendre=LEGENDRE, xmodel=None, clipping = [5,5]): """ """ x,y,v = xyv flagin = ((np.nanmean(y) - clipping[0] * np.nanstd(y)) < y) * (y< (np.nanmean(y) + clipping[1] * np.nanstd(y))) contmodel = get_polyfit(x[flagin], y[flagin], v[flagin], degree=degree, legendre=legendre) contmodel.fit(a0_guess=np.nanmedian(y[flagin])) if xmodel is not None: return contmodel.fitvalues, contmodel.model.get_model(x=xmodel)#, contmodel return contmodel.fitvalues
d0b4ebf790339154fe0d979ec720a9960be92da8
21,847
def generateKey(): """ Method to generate a encryption key """ try: key = Fernet.generate_key() updateClipboard(f"export LVMANAGER_PW={str(key)[2:-1]}") print(f"Key: {key}") print("Export command copied to clipboard. Save this value!") return True except Exception as e: print(f"Something went wrong\nException: {e}") return False
a0d197c499d978600c6d95879aab67d595648ffc
21,848
def _zpkbilinear(z, p, k, fs): """ Return a digital filter from an analog one using a bilinear transform """ z = np.atleast_1d(z) p = np.atleast_1d(p) degree = _relative_degree(z, p) fs2 = 2.0 * fs # Bilinear transform the poles and zeros z_z = (fs2 + z) / (fs2 - z) p_z = (fs2 + p) / (fs2 - p) # Any zeros that were at infinity get moved to the Nyquist frequency z_z = np.append(z_z, -np.ones(degree)) # Compensate for gain change k_z = k * np.real(np.prod(fs2 - z) / np.prod(fs2 - p)) return z_z, p_z, k_z
ef7e50ae81023edc599fb56e3c1e20a4579f8389
21,849
def so3exp(w): """ Maps so(3) --> SO(3) group with closed form expression. """ theta = np.linalg.norm(w) if theta < _EPS * 3: return np.eye(3) else: w_hat = S03_hat_operator(w) R = np.eye(3) + (np.sin(theta) / theta) * w_hat + ((1 - np.cos(theta)) / theta**2) * np.dot(w_hat, w_hat) return R
434168c7652311a850dbcb700343e445ac808c57
21,850
def bin2ppm(nproc_old, model_tags, region, npts, nproc, old_mesh_dir, old_model_dir, output_dir): """ convert the bin files to the ppm model. """ result = "" julia_path = get_julia("specfem_gll.jl/src/program/get_ppm_model.jl") latnproc, lonnproc = map(int, nproc.split("/")) nproc_ppm2netcdf = latnproc * lonnproc # ! note there is a issue of precompiling the code in a race condition, refer to https://github.com/simonbyrne/PkgLock.jl to solve the problem # result += "julia --project -e 'push!(LOAD_PATH, \"@pkglock\"); using PkgLock; PkgLock.instantiate_precompile()'\n" result += "module purge;module load GCC/8.2.0-2.31.1;module load OpenMPI/3.1.3;" result += f"srun -n {nproc_ppm2netcdf} julia '{julia_path}' --nproc_old {nproc_old} --old_mesh_dir {old_mesh_dir} --old_model_dir {old_model_dir} --model_tags {model_tags} --output_file {output_dir} --region {region} --npts {npts} --nproc {nproc}; \n" return result
2e8f8be993ca7d164faf2cfce4e1539a16764ad4
21,851
def st_sdata(obs, cols): """return string data in given observation numbers as a list of lists, one sub-list for each row; obs should be int or iterable of int; cols should be a single str or int or iterable of str or int """ obs, cols, _ = _parseObsColsVals(obs, cols) if not all(st_isstrvar(c) for c in cols): raise TypeError("only string Stata variables allowed") return [[_st_sdata(i,j) for j in cols] for i in obs]
7e08168a42043b7de379f7f513f25b6e88a89847
21,852
def list_data(args, data): """List all servers and files associated with this project.""" if len(data["remotes"]) > 0: print("Servers:") for server in data["remotes"]: if server["name"] == server["location"]: print(server["user"] + "@" + server["location"]) else: print( server["user"] + "@" + server["name"] + " (" + server["location"] + ")") else: print("No servers added") print("Included files and directories:") print(data["file"] + ".py") if len(data["files"]) > 0: print("\n".join(data["files"])) return data
6a005b6e605d81985fca85ca54fd9b29b28128f5
21,853
def vecInt(xx, vv, p, interpolation = 'weighted'): """ Interpolates the field around this position. call signature: vecInt(xx, vv, p, interpolation = 'weighted') Keyword arguments: *xx*: Position vector around which will be interpolated. *vv*: Vector field to be interpolated. *p*: Parameter struct. *interpolation*: Interpolation of the vector field. 'mean': takes the mean of the adjacent grid point. 'weighted': weights the adjacent grid points according to their distance. """ # find the adjacent indices i = (xx[0]-p.Ox)/p.dx if (i < 0): i = 0 if (i > p.nx-1): i = p.nx-1 ii = np.array([int(np.floor(i)), \ int(np.ceil(i))]) j = (xx[1]-p.Oy)/p.dy if (j < 0): j = 0 if (j > p.ny-1): j = p.ny-1 jj = np.array([int(np.floor(j)), \ int(np.ceil(j))]) k = (xx[2]-p.Oz)/p.dz if (k < 0): k = 0 if (k > p.nz-1): k = p.nz-1 kk = np.array([int(np.floor(k)), \ int(np.ceil(k))]) vv = np.swapaxes(vv, 1, 3) # interpolate the field if (interpolation == 'mean'): return np.mean(vv[:,ii[0]:ii[1]+1,jj[0]:jj[1]+1,kk[0]:kk[1]+1], axis = (1,2,3)) if(interpolation == 'weighted'): if (ii[0] == ii[1]): w1 = np.array([1,1]) else: w1 = (i-ii[::-1]) if (jj[0] == jj[1]): w2 = np.array([1,1]) else: w2 = (j-jj[::-1]) if (kk[0] == kk[1]): w3 = np.array([1,1]) else: w3 = (k-kk[::-1]) weight = abs(w1.reshape((2,1,1))*w2.reshape((1,2,1))*w3.reshape((1,1,2))) return np.sum(vv[:,ii[0]:ii[1]+1,jj[0]:jj[1]+1,kk[0]:kk[1]+1]*weight, axis = (1,2,3))/np.sum(weight)
c93572205e1d5a3c00ef21f1780a5184c695d988
21,854
def anchor_inside_flags(flat_anchors, valid_flags, img_shape, allowed_border=0, device='cuda'): """Anchor inside flags. :param flat_anchors: flat anchors :param valid_flags: valid flags :param img_shape: image meta info :param allowed_border: if allow border :return: inside flags """ img_h, img_w = img_shape[:2] if device == 'cuda': img_h = img_h.cuda() img_w = img_w.cuda() img_h = img_h.float() img_w = img_w.float() valid_flags = valid_flags.bool() if allowed_border >= 0: inside_flags = (valid_flags & (flat_anchors[:, 0] >= -allowed_border) & ( flat_anchors[:, 1] >= -allowed_border) & ( flat_anchors[:, 2] < img_w + allowed_border) & ( flat_anchors[:, 3] < img_h + allowed_border)) else: inside_flags = valid_flags return inside_flags
500fe39f51cbf52bd3417b14e7ab7dcb4ec2f9cc
21,855
def notinLRG_mask(primary=None, rflux=None, zflux=None, w1flux=None, rflux_snr=None, zflux_snr=None, w1flux_snr=None): """See :func:`~desitarget.sv1.sv1_cuts.isLRG` for details. Returns ------- :class:`array_like` ``True`` if and only if the object is NOT masked for poor quality. """ if primary is None: primary = np.ones_like(rflux, dtype='?') lrg = primary.copy() lrg &= (rflux_snr > 0) & (rflux > 0) # ADM quality in r. lrg &= (zflux_snr > 0) & (zflux > 0) # ADM quality in z. lrg &= (w1flux_snr > 4) & (w1flux > 0) # ADM quality in W1. return lrg
a89a02d017140f1321905695bbbcb34789b2e535
21,856
def get_theta_def(pos_balle:tuple, cote:str): """ Retourne les deux angles theta (voir les explications) pour que le goal soit aligné avec la balle. Ceux-ci sont calculés en fonction des deux poteaux pour avoir les deux "extrémités" pour être correctement alignées. Paramètres: - pos_balle : tuple - contient les positions x et y de la balle - cote : str - Côté que l'on attaque. d pour droite, g pour gauche ( par rapport au sens de l'axe des abscisses ) """ angles = [] if cote.lower() == "d": alphas = get_alpha(pos_balle, goal_droit) for alpha, poteau in zip(alphas, goal_droit): if pos_balle[1] > poteau[1]: angles.append(alpha) else: angles.append(-alpha) elif cote.lower() == "g": alphas = get_alpha(pos_balle, goal_gauche) for alpha, poteau in zip(alphas, goal_gauche): if pos_balle[1] > poteau[1]: angles.append(pi - alpha) else: angles.append(alpha - pi) return angles
0fdde277c4c63b593c5c40c595c7181539eb0fd1
21,857
def public_doc(): """Documentation for this api.""" return auto.html(groups=['public'], title='Ocean App Web Service Public Documentation')
37d343ca4159566f4191a9f8608378dea7ce1bb5
21,858
def getAllTeams(): """ returns the entire list of teams """ return Team.objects.order_by('name').all()
8e06518e417657d3a24d4261a71d9b0bda31af22
21,859
def parse_lamp_flags(flags): """Parses flags and returns a dict that represents the lamp states.""" # flags: [0123]{8} values = _swap_key_and_value(_LAMP_STATES) # {value: state} states = dict([ (color, values[flags[digit]]) for color, digit in _LAMP_DIGITS.items() ]) return {'lamps': states}
5a2416ebca980fd9d3ae717aaa4da3b008d76e95
21,860
def user_owns_item(function): """ Decorator that checks that the item was created by current user. """ @wraps(function) def wrapper(category_name, item_name, *args, **kwargs): category = db_session.query(Category ).filter_by(name=category_name).one() user_id = session['user_id'] item = db_session.query(Item ).filter_by(category=category, name=item_name ).one() if item.user_id == user_id: return function(category_name, item_name, *args, **kwargs) else: abort(403) return wrapper
912c93408b6297c338be6dc48414f3b4bb57aea3
21,861
def generate_bit_byte_overview(inputstring, number_of_indent_spaces=4, show_reverse_bitnumbering=False): """Generate a nice overview of a CAN frame. Args: inputstring (str): String that should be printed. Should be 64 characters long. number_of_indent_spaces (int): Size of indentation Raises: ValueError when *inputstring* has wrong length. Returns: A multi-line string. """ if len(inputstring) != constants.BITS_IN_FULL_DATA: raise ValueError("The inputstring is wrong length: {}. {!r}".format(len(inputstring), inputstring)) paddedstring = " ".join([inputstring[i:i + 8] for i in range(0, 64, 8)]) indent = " " * number_of_indent_spaces text = indent + " 111111 22221111 33222222 33333333 44444444 55555544 66665555\n" text += indent + "76543210 54321098 32109876 10987654 98765432 76543210 54321098 32109876\n" text += indent + "Byte0 Byte1 Byte2 Byte3 Byte4 Byte5 Byte6 Byte7\n" text += indent + paddedstring + "\n" if show_reverse_bitnumbering: text += indent + "66665555 55555544 44444444 33333333 33222222 22221111 111111\n" text += indent + "32109876 54321098 76543210 98765432 10987654 32109876 54321098 76543210\n" return text
325eafc0ca9a8d91e3774cc6bc8b91052b01d261
21,862
def return_list_of_file_paths(folder_path): """Returns a list of file paths Args: folder_path: The folder path were the files are in Returns: file_info: List of full file paths """ file_info = [] list_of_file_names = [fileName for fileName in listdir(folder_path) if isfile(join(folder_path, fileName))] list_of_file_paths = [join(folder_path, fileName) for fileName in listdir(folder_path) if isfile(join(folder_path, fileName))] file_info.append(list_of_file_names) file_info.append(list_of_file_paths) return file_info
7bc67a17b028d68d3ef99fc82cd03e21c34ec803
21,863
import numpy def artificial_signal( frequencys, sampling_frequency=16000, duration=0.025 ): """ Concatonates a sequence of sinusoids of frequency f in frequencies """ sins = map( lambda f : sinusoid(f, sampling_frequency, duration), frequencys) return numpy.concatenate( tuple(sins) )
ef67e5ca9b66da8c003108e2fe5eb4ba43d7a564
21,864
def _sources(): """Return the subdir name and extension of each of the contact prediction types. :return: Contact prediction types and location. :rtype: dict [list [str]] """ sources = _sourcenames() confiledir = ["deepmetapsicov", "deepmetapsicov", "deepmetapsicov"] confilesuffix = ["psicov", "ccmpred", "deepmetapsicov.con"] conkittype = ["psicov", "ccmpred", "psicov"] threshold = [0.2, 0.1, 0.1] outsinfo = {} for n in range(len(sources)): outsinfo[sources[n]] = [confiledir[n], confilesuffix[n], conkittype[n], threshold[n]] return outsinfo
f03b6059a106a5fe5619b2b673eb88d9b352e70f
21,865
def pdns_forward(hostname): """Get the IP addresses to which the given host has resolved.""" response = get(BASE_API_URL + "pdns/forward/{}".format(hostname)) return response
3022190035bc6acc0ff1d16da7616703ca339c53
21,866
def make_conv(in_channels, out_channels, conv_type="normal", kernel_size=3, mask_activation=None, version=2, mask_init_bias=0, depth_multiplier=1, **kwargs): """Create a convolution layer. Options: deformable, separable, or normal convolution """ assert conv_type in ("deformable", "separable", "normal") padding = (kernel_size-1)//2 if conv_type == "deformable": conv_layer = nn.Sequential( DeformableConv2dBlock( in_channels, out_channels, kernel_size, padding=padding, bias=False, mask_activation=mask_activation, version=version, mask_init_bias=mask_init_bias ), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True) ) elif conv_type == "separable": hidden_channels = in_channels * depth_multiplier conv_layer = nn.Sequential( # dw nn.Conv2d(in_channels, hidden_channels, kernel_size, padding=padding, groups=in_channels, bias=False), nn.BatchNorm2d(in_channels), nn.ReLU6(inplace=True), # pw nn.Conv2d(hidden_channels, out_channels, 1, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU6(inplace=True) ) nn.init.kaiming_normal_(conv_layer[0].weight, mode="fan_out", nonlinearity="relu") nn.init.kaiming_normal_(conv_layer[3].weight, mode="fan_out", nonlinearity="relu") else: # normal convolution conv_layer = nn.Sequential( nn.Conv2d(in_channels, out_channels, kernel_size, padding=padding, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True) ) nn.init.kaiming_normal_(conv_layer[0].weight, mode="fan_out", nonlinearity="relu") return conv_layer
515e0544aae6464c966612b3c32e23e878a9260d
21,867
import requests from selenium import webdriver from time import sleep from typing import Callable def url_to_html_func(kind="requests") -> Callable: """Get a url_to_html function of a given kind.""" url_to_html = None if kind == "requests": def url_to_html(url): r = requests.get(url) if r.status_code != 200: print( f"An error occured. Returning the response object for you to analyze: {r}" ) return r return r.content elif kind == "chrome": def url_to_html(url, wait=2): b = webdriver.Chrome() b.get(url) if isinstance(wait, (int, float)): sleep(wait) html = b.page_source b.close() return html else: raise ValueError(f"Unknown url_to_html value: {url_to_html}") assert callable(url_to_html), "Couldn't make a url_to_html function" return url_to_html
f19643f1d49212e643fc0fe50ce69f5f4a444c09
21,868
def head(filename, format=None, **kwargs): """ Returns the header of a file. Reads the information about the content of the file without actually loading the data. Returns either an Header class or an Archive accordingly if the file contains a single object or it is an archive, respectively. Parameters ---------- filename: str, file-like object The filename of the data file to read. It can also be a file-like object. format: str, Format One of the implemented formats. See documentation for more details. kwargs: dict Additional options for performing the reading. The list of options depends on the format. """ filename = find_file(filename) return formats.get_format(format, filename=filename).head(filename, **kwargs)
f7e0bb98f95b378bd582801b00a26c72aef0b677
21,871
def uncomment_magic( source, language="python", global_escape_flag=True, explicitly_code=True ): """Unescape Jupyter magics""" parser = StringParser(language) next_is_magic = False for pos, line in enumerate(source): if not parser.is_quoted() and ( next_is_magic or is_magic(line, language, global_escape_flag, explicitly_code) ): source[pos] = unesc(line, language) next_is_magic = language == "python" and _LINE_CONTINUATION_RE.match(line) parser.read_line(line) return source
4d3444844e51c4821f151a3ce9813c8475fa6bd7
21,872
from re import T import logging def clean_nice_ionice_parameters(value): """Verify that the passed parameters are not exploits""" if value: parser = ErrorCatchingArgumentParser() # Nice parameters parser.add_argument("-n", "--adjustment", type=int) # Ionice parameters, not supporting -p parser.add_argument("--classdata", type=int) parser.add_argument("-c", "--class", type=int) parser.add_argument("-t", "--ignore", action="store_true") try: parser.parse_args(value.split()) except ValueError: # Also log at start-up if invalid parameter was set in the ini msg = "%s: %s" % (T("Incorrect parameter"), value) logging.error(msg) return msg, None return None, value
9bbde29a4a8c19441d4c1510c29870c87d928142
21,873
def rand_alnum(length=0): """ Create a random string with random length :return: A random string of with length > 10 and length < 30. """ jibber = ''.join([letters, digits]) return ''.join(choice(jibber) for _ in xrange(length or randint(10, 30)))
7a095aabcec5428ea991220ae46c252c47b3436a
21,874
def _GenerateGstorageLink(c, p, b): """Generate Google storage link given channel, platform, and build.""" return 'gs://chromeos-releases/%s-channel/%s/%s/' % (c, p, b)
e5e4a0eb9e27b0f2d74b28289c8f02dc0454f438
21,875
def parse_decl(inputtype, flags): """ Parse type declaration @param inputtype: file name or C declarations (depending on the flags) @param flags: combination of PT_... constants or 0 @return: None on failure or (name, type, fields) tuple """ if len(inputtype) != 0 and inputtype[-1] != ';': inputtype = inputtype + ';' return ida_typeinf.idc_parse_decl(None, inputtype, flags)
a5cf042256a35cface8afd024d5d31ae5eccbe72
21,876
def post_attention(h, attn_vec, d_model, n_head, d_head, dropout, is_training, kernel_initializer, residual=True): """Post-attention processing.""" monitor_dict = {} # post-attention projection (back to `d_model`) proj_o = tf.get_variable("o/kernel", [d_model, n_head, d_head], dtype=h.dtype, initializer=kernel_initializer) einsum_prefix = get_einsum_prefix(attn_vec.shape.ndims - 2) einsum_str = "{0}nd,hnd->{0}h".format(einsum_prefix) attn_out = tf.einsum(einsum_str, attn_vec, proj_o) proj_bias = tf.get_variable("o/bias", [d_model], dtype=h.dtype, initializer=tf.zeros_initializer()) attn_out += proj_bias attn_out = tf.layers.dropout(attn_out, dropout, training=is_training) output, res_lnorm_dict = residual_and_layer_norm( h, attn_out, use_residual=residual) monitor_dict = update_monitor_dict(monitor_dict, res_lnorm_dict) return output, monitor_dict
d48157d3759ab273b78a45dd2d150e15f66b44bf
21,877
def get_recursively(in_dict, search_pattern): """ Takes a dict with nested lists and dicts, and searches all dicts for a key of the field provided. """ fields_found = [] for key, value in in_dict.items(): if key == search_pattern: fields_found.append(value) elif isinstance(value, dict): results = get_recursively(value, search_pattern) for result in results: fields_found.append(result) return(fields_found)
3c9011894c24c25a05d24f8b3b5369c9334dc2c7
21,878
def neals_funnel(ndims = 10, name = 'neals_funnel'): """Creates a funnel-shaped distribution. This distribution was first described in [1]. The distribution is constructed by transforming a N-D gaussian with scale [3, 1, ...] by scaling all but the first dimensions by `exp(x0 / 2)` where `x0` is the value of the first dimension. This distribution is notable for having a relatively very narrow "neck" region which is challenging for HMC to explore. This distribution resembles the posteriors of centrally parameterized hierarchical models. Args: ndims: Dimensionality of the distribution. Must be at least 2. name: Name to prepend to ops created in this function, as well as to the `code_name` in the returned `TargetDensity`. Returns: target: `TargetDensity` specifying the funnel distribution. The `distribution` attribute is an instance of `TransformedDistribution`. Raises: ValueError: If ndims < 2. #### References 1. Neal, R. M. (2003). Slice sampling. Annals of Statistics, 31(3), 705-767. """ if ndims < 2: raise ValueError(f'ndims must be at least 2, saw: {ndims}') with tf.name_scope(name): def bijector_fn(x): """Funnel transform.""" batch_shape = tf.shape(x)[:-1] scale = tf.concat( [ tf.ones(tf.concat([batch_shape, [1]], axis=0)), tf.exp(x[Ellipsis, :1] / 2) * tf.ones(tf.concat([batch_shape, [ndims - 1]], axis=0)), ], axis=-1, ) return tfb.Scale(scale) mg = tfd.MultivariateNormalDiag( loc=tf.zeros(ndims), scale_diag=[3.] + [1.] * (ndims - 1)) dist = tfd.TransformedDistribution( mg, bijector=tfb.MaskedAutoregressiveFlow(bijector_fn=bijector_fn)) return target_spec.TargetDensity.from_distribution( distribution=dist, constraining_bijectors=tfb.Identity(), expectations=dict( params=target_spec.expectation( fn=tf.identity, human_name='Parameters', # The trailing dimensions come from a product distribution of # independent standard normal and a log-normal with a scale of # 3 / 2. # See https://en.wikipedia.org/wiki/Product_distribution for the # formulas. # For the mean, the formulas yield zero. ground_truth_mean=np.zeros(ndims), # For the standard deviation, all means are zero and standard # deivations of the normals are 1, so the formula reduces to # `sqrt((sigma_log_normal + mean_log_normal**2))` which reduces # to `exp((sigma_log_normal)**2)`. ground_truth_standard_deviation=np.array([3.] + [np.exp((3. / 2)**2)] * (ndims - 1)), ),), code_name=f'{name}_ndims_{ndims}', human_name='Neal\'s Funnel', )
99719ce2e192034472bf8082c918c39a6ab1a96f
21,879
def _has_desired_permit(permits, acategory, astatus): """ return True if permits has one whose category_code and status_code match with the given ones """ if permits is None: return False for permit in permits: if permit.category_code == acategory and\ permit.status_code == astatus: return True return False
4cac23303e2b80e855e800a7d55b7826fabd9992
21,880
def colon(mac): """ aa:aa:aa:aa:aa:aa """ return _reformat(mac, separator=':', digit_grouping=2)
7930fdb449f99aa99a2c052be4eee24a8e4605ab
21,881
import requests from bs4 import BeautifulSoup import re def create_strings_from_wikipedia(minimum_length, count, lang): """ Create all string by randomly picking Wikipedia articles and taking sentences from them. """ sentences = [] while len(sentences) < count: # We fetch a random page page_url = "https://{}.wikipedia.org/wiki/Special:Random".format(lang) try: page = requests.get(page_url, timeout=3.0) # take into account timeouts except: continue soup = BeautifulSoup(page.text, "html.parser") for script in soup(["script", "style"]): script.extract() # Only take a certain length lines = list( filter( lambda s: len(s.split(" ")) > minimum_length and not "Wikipedia" in s and not "wikipedia" in s, [ " ".join(re.findall(r"[\w']+", s.strip()))[0:200] for s in soup.get_text().splitlines() ], ) ) # Remove the last lines that talks about contributing sentences.extend(lines[0: max([1, len(lines) - 5])]) return sentences[0:count]
92cc09a081a257d61530e3ddaf8f8215412e5b0d
21,882
def computeHashCheck(ringInputString, ringSize): """Calculate the knot hash check. Args: ringInputString (str): The list of ints to be hashed as a comma-separated list. ringSize (int): The size of the ring to be \"knotted\". Returns: int: Value of the hash check. """ ringInputList = [int(i) for i in ringInputString.split(',')] ringContents = [i for i in range(ringSize)] cursorPosition = 0 skipSize = 0 # Hashing algorithm as defined in AoC Day 10 instructions... for length in ringInputList: # # Duplicate the ring contents to allow for exceeding the length of the original list # doubleContents = ringContents + ringContents # Reverse the order of that length of elements in the list, starting with the element # at the current position sublist = doubleContents[cursorPosition:cursorPosition+length] sublist.reverse() doubleContents[cursorPosition:cursorPosition+length] = sublist if cursorPosition + length > ringSize: ringContents = doubleContents[ringSize:cursorPosition+ringSize] + doubleContents[cursorPosition:ringSize] else: ringContents = doubleContents[:ringSize] # Move the current position forward by that length plus the skip size cursorPosition = cursorPosition + length + skipSize # Deal with going around the ring if cursorPosition > ringSize: cursorPosition -= ringSize # Increase the skip size by one skipSize += 1 # The hash is then the product of the first two elements in the transformed list check = ringContents[0] * ringContents[1] #print(ringContents) return check
75dce4aacdd4ae03fa34532471a21a43a81fbd13
21,883
from masci_tools.util.xml.xml_setters_basic import xml_delete_tag from masci_tools.util.xml.common_functions import check_complex_xpath from typing import Union from typing import Iterable from typing import Any def delete_tag(xmltree: Union[etree._Element, etree._ElementTree], schema_dict: 'fleur_schema.SchemaDict', tag_name: str, complex_xpath: 'etree._xpath' = None, occurrences: Union[int, Iterable[int]] = None, **kwargs: Any) -> Union[etree._Element, etree._ElementTree]: """ This method deletes a tag with a uniquely identified xpath. :param xmltree: an xmltree that represents inp.xml :param schema_dict: InputSchemaDict containing all information about the structure of the input :param tag: str of the tag to delete :param complex_xpath: an optional xpath to use instead of the simple xpath for the evaluation :param occurrences: int or list of int. Which occurence of the parent nodes to delete a tag. By default all nodes are used. Kwargs: :param contains: str, this string has to be in the final path :param not_contains: str, this string has to NOT be in the final path :returns: xmltree with deleted tags """ base_xpath = schema_dict.tag_xpath(tag_name, **kwargs) if complex_xpath is None: complex_xpath = base_xpath check_complex_xpath(xmltree, base_xpath, complex_xpath) xmltree = xml_delete_tag(xmltree, complex_xpath, occurrences=occurrences) return xmltree
2e4d9276ecc8d42c0890e81aa0afa61adf23c178
21,884
def cart2pol_vectorised(x, y): """ A vectorised version of the cartesian to polar conversion. :param x: :param y: :return: """ r = np.sqrt(np.add(np.power(x, 2), np.power(y, 2))) th = np.arctan2(y, x) return r, th
dbef7d4663990a9e3c775e53649ab30e0dc8767a
21,885
def encryption(text): """ encryption function for saving ideas :param text: :return: """ return AES.new(cipher_key, AES.MODE_CBC, cipher_IV456).encrypt(text * 16)
c321dd2e0c95f15c9a4b04f1b13471a1f1a7aceb
21,886
def _concat(to_stack): """ function to stack (or concatentate) depending on dimensions """ if np.asarray(to_stack[0]).ndim >= 2: return np.concatenate(to_stack) else: return np.hstack(to_stack)
1b4ab755aed3e1823629301e83d070433d918c7c
21,888
import math def make_orthonormal_matrix(n): """ Makes a square matrix which is orthonormal by concatenating random Householder transformations Note: May not distribute uniformly in the O(n) manifold. Note: Naively using ortho_group, special_ortho_group in scipy will result in unbearable computing time! Not useful """ A = np.identity(n) d = np.zeros(n) d[n-1] = np.random.choice([-1.0, 1.0]) for k in range(n-2, -1, -1): # generate random Householder transformation x = np.random.randn(n-k) s = np.sqrt((x**2).sum()) # norm(x) sign = math.copysign(1.0, x[0]) s *= sign d[k] = -sign x[0] += s beta = s * x[0] # apply the transformation y = np.dot(x,A[k:n,:]) / beta A[k:n, :] -= np.outer(x,y) # change sign of rows A *= d.reshape(n,1) return A
ec7f39eba0d471f377519db86cee85a0b640593b
21,889
from typing import Dict from typing import Tuple def build_synthetic_dataset_cae(window_size:int, **kwargs:Dict)->Tuple[SingleGapWindowsSequence, SingleGapWindowsSequence]: """Return SingleGapWindowsSequence for training and testing. Parameters -------------------------- window_size: int, Windows size to use for rendering the synthetic datasets. """ return build_synthetic_dataset(window_size, SingleGapWindowsSequence, **kwargs)
9ab09aadf9578a3475a2e9bbaa7cfa75a3adacdf
21,891
import random def montecarlo_2048(game, simulations_per_move, steps, count_zeros=False, print_averages=True, return_scores=False): """ Test each possible move, run montecarlo simulations and return a dictionary of average scores, one score for each possible move """ # Retrieve game score at the current state game_score = game.calculate_score() # Retrieve list of possible moves allowed_moves = game.check_allowed_moves() # Create a dictionary to store average scores per allowable move average_scores = np.zeros(4) # Will contain 4 lists of scores, one list for each starting move (LEFT, DOWN, RIGHT, UP) scores_per_move = [[0]] * 4 for move in allowed_moves: score_list = [] for simulation in range(simulations_per_move): # Create a a copy of the game at the current state game_copy = deepcopy(game) game_copy.make_move(move) for i in range(steps): # Check if there is any move allowed if len(game_copy.check_allowed_moves()) > 0: # Pick a random move within the allowed ones random_move = random.choice(game_copy.check_allowed_moves()) game_copy.make_move(random_move) # append simulation result if count_zeros == True: score_list.append(game_copy.calculate_score(score_type="simple_sum")) else: score_list.append(game_copy.calculate_score(score_type="simple_sum")) scores_per_move[move-1] = score_list average_scores[move-1] = np.average(score_list) if print_averages: print("[1] LEFT score: ", average_scores[0]) print("[2] DOWN score: ", average_scores[1]) print("[3] RIGHT score: ", average_scores[2]) print("[4] UP score: ", average_scores[3]) print("average_scores: ", average_scores) choice = np.argmax(average_scores) + 1 steal = 0 for value in average_scores: if value > 0: steal = 1 if steal == 0: random_scores = np.zeros(4) random_scores[np.random.choice([0,1,2,3])] = 1 return random_scores if return_scores: return scores_per_move else: return average_scores
1bfd9beb78e6f832105b61d28af2218b6a86eb1a
21,892
def getAggregation(name, local=False, minOnly=False, maxOnly=False): """ Get aggregation. """ toReturn = STATISTICS[name].getStatistic() if local: return STATISTICS[name].getLocalValue() elif minOnly and "min" in toReturn: return toReturn["min"] elif maxOnly and "max" in toReturn: return toReturn["max"] else: return toReturn
2f009c4db871fe56a8c26ee75728259e33b53280
21,893
def get_gene_symbol(row): """Extracts gene name from annotation Args: row (pandas.Series): annotation info (str) at 'annotation' index Returns: gene_symbol (str): gene name(s) """ pd.options.mode.chained_assignment = None lst = row["annotation"].split(",") genes = [token.split("|")[0] for token in lst] gene_symbol = ",".join(set(genes)) return gene_symbol
d564266fa6a814b4c7cfce9f7f2fb8d5e1c1024f
21,894
from datetime import datetime def session_login(): """ Session login :return: """ print("Session Login") # Get the ID token sent by the client # id_token = request.headers.get('csfToken') id_token = request.values.get('idToken') # Set session expiration to 5 days. expires_in = datetime.timedelta(days=5) try: # Create the session cookie. This will also verify the ID token in the process. # The session cookie will have the same claims as the ID token. session_cookie = auth.create_session_cookie(id_token, expires_in=expires_in) response = jsonify({'status': 'success'}) # Set cookie policy for session cookie. expires = datetime.datetime.now() + expires_in response.set_cookie('session', session_cookie, expires=expires, httponly=True, secure=True) return response except exceptions.FirebaseError: return abort(401, 'Failed to create a session cookie')
6c5c27d50c4c62e3f62b67433f4161835f2a6478
21,895
from django.core.cache import get_cache def get_cache_factory(cache_type): """ Helper to only return a single instance of a cache As of django 1.7, may not be needed. """ if cache_type is None: cache_type = 'default' if not cache_type in cache_factory: cache_factory[cache_type] = get_cache(cache_type) return cache_factory[cache_type]
89da971b92395c4e604d51d66148688f2ab4f362
21,896
import six import pickle def ruleset_from_pickle(file): """ Read a pickled ruleset from disk This can be either pickled Rules or Ryu Rules. file: The readable binary file-like object, or the name of the input file return: A ruleset, a list of Rules """ if six.PY3: ruleset = pickle.load(file, encoding='latin1') else: ruleset = pickle.load(file) # Did we load a list of Rules()? if isinstance(ruleset, list) and ruleset and isinstance(ruleset[0], Rule): return ruleset # Must be Ryu rules if isinstance(ruleset, dict): ruleset = ruleset["flow_stats"] ruleset = [rule_from_ryu(r) for r in ruleset] return ruleset
f68e005ece3697126dd0528952e1610695054332
21,897
def delayed_read_band_data(fpar_dataset_name, qc_dataset_name): """Read band data from a HDF4 file. Assumes the first dimensions have a size 1. FparLai_QC. Bit no. 5-7 3-4 2 1 0 Acceptable values: 000 00 0 0 0 001 01 0 0 0 Unacceptable mask: 110 10 1 1 1 """ with rasterio.open(fpar_dataset_name) as dataset: fpar_data = dataset.read()[0] with rasterio.open(qc_dataset_name) as dataset: qc_data = dataset.read()[0] assert fpar_data.shape == tile_shape assert qc_data.shape == tile_shape # Ignore invalid and poor quality data. fpar_data[ np.logical_or(fpar_data > max_valid, np.bitwise_and(qc_data, 0b11010111)) ] = fill_value return fpar_data
cda95533b07101d58883c0f5fa32870c48c09e2a
21,899
def _aggregate_pop_simplified_comix( pop: pd.Series, target: pd.DataFrame ) -> pd.DataFrame: """ Aggregates the population matrix based on the CoMix table. :param pop: 1-year based population :param target: target dataframe we will want to multiply or divide with :return: Retuns a dataframe that can be multiplied with the comix matrix to get a table of contacts or it can be used to divide the contacts table to get the CoMix back """ agg = pd.DataFrame( { "[0,17)": [pop[:17].sum()], "[17,70)": [pop[17:69].sum()], "70+": [pop[70:].sum()], } ) return pd.concat([agg] * len(target.columns)).set_index(target.index).T
11ddfb103c95416b1a93577f90e12aa6159123eb
21,900
def authenticate(): """ Uses HTTP basic authentication to generate an authentication token. Any resource that requires authentication can use either basic auth or this token. """ token = serialize_token(basic_auth.current_user()) response = {'token': token.decode('ascii')} return jsonify(response)
adfd4d80bb08c6a0c3175495b4b2ab1aa0b898c6
21,901
import torch def split_image(image, N): """ image: (B, C, W, H) """ batches = [] for i in list(torch.split(image, N, dim=2)): batches.extend(list(torch.split(i, N, dim=3))) return batches
da51c3520dfee740a36d5e0241f3fd46a07f2752
21,902
def _upsample_add(x, y): """Upsample and add two feature maps. Args: x: (Variable) top feature map to be upsampled. y: (Variable) lateral feature map. Returns: (Variable) added feature map. Note in PyTorch, when input size is odd, the upsampled feature map with `F.upsample(..., scale_factor=2, mode='nearest')` maybe not equal to the lateral feature map size. e.g. original input size: [N,_,15,15] -> conv2d feature map size: [N,_,8,8] -> upsampled feature map size: [N,_,16,16] So we choose bilinear upsample which supports arbitrary output sizes. """ _, _, H, W = y.size() return F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True) + y
facac22b50906509138a479074a7744b737d554d
21,903
def get_eta_and_mu(alpha): """Get the value of eta and mu. See (4.46) of the PhD thesis of J.-M. Battini. Parameters ---------- alpha: float the angle of the rotation. Returns ------- The first coefficient eta: float. The second coefficient mu: float. """ if alpha == 0.: eta = 1 / 12 mu = 1 / 360 else: eta = (2 * sin(alpha) - alpha * (1 + cos(alpha))) / \ (2 * alpha ** 2 * sin(alpha)) mu = (alpha * (alpha + sin(alpha)) - 8 * sin(alpha / 2) ** 2) / \ (4 * alpha ** 4 * sin(alpha / 2) ** 2) return eta, mu
4f1a215a52deda250827d4ad3d06f8731c69dc9d
21,904
def load_data(city, month, day): """ Loads data for the specified city and filters by month and day if applicable. Args: (str) city - name of the city to analyze (str) month - name of the month to filter by, or "all" to apply no month filter (str) day - name of the day of week to filter by, or "all" to apply no day filter Returns: df_all - Pandas DataFrame containing city data with no filters df - Pandas DataFrame containing city data filtered by month and day """ print('Loading city data...') # Load DataFrame for city df = pd.read_csv(CITY_DATA[city]) # Convert start and end times to datetime type df['Start Time'] = pd.to_datetime(df['Start Time']) df['End Time'] = pd.to_datetime(df['End Time']) # Create multiple new DataFrame Time Series df['month'] = df['Start Time'].dt.month df['day_str'] = df['Start Time'].dt.weekday_name df['day_int'] = df['Start Time'].dt.weekday df['hour'] = df['Start Time'].dt.hour # Create side copy of df without filters df_all = df.copy() # Filter DataFrame by month month_idx = month_list.index(month) if month != 'All': df = df[df['month'] == month_idx] # Filter DataFrame by day of week if day != 'All': df = df[df['day_str'] == day] print('-'*40) return df_all, df
3d1c1ab7b2f346dab0fe3f01a262983c880eda34
21,905
def write_error_row(rowNum, errInfo): """Google Sheets API Code. Writes all team news link data from RSS feed to the NFL Team Articles speadsheet. https://docs.google.com/spreadsheets/d/1XiOZWw3S__3l20Fo0LzpMmnro9NYDulJtMko09KsZJQ/edit#gid=0 """ credentials = get_credentials() http = credentials.authorize(mgs.httplib2.Http()) discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?' 'version=v4') service = mgs.discovery.build('sheets', 'v4', http=http, discoveryServiceUrl=discoveryUrl) spreadsheet_id = '1XiOZWw3S__3l20Fo0LzpMmnro9NYDulJtMko09KsZJQ' value_input_option = 'RAW' rangeName = 'ERROR!A' + str(rowNum) values = errInfo body = { 'values': values } result = service.spreadsheets().values().update(spreadsheetId=spreadsheet_id, range=rangeName, valueInputOption=value_input_option, body=body).execute() return result
5a3d071d78656f6991103cac72c7ce46f025d689
21,906
import json def get_last_transaction(): """ return last transaction form blockchain """ try: transaction = w3.eth.get_transaction_by_block(w3.eth.blockNumber, 0) tx_dict = dict(transaction) tx_json = json.dumps(tx_dict, cls=HexJsonEncoder) return tx_json except Exception as err: print("Error '{0}' occurred.".format(err)) return {'error':'Error while fetching transaction'}
89a70c474670b6e691fde8424ffc78b739ee415e
21,907
def get_tp_model() -> TargetPlatformModel: """ A method that generates a default target platform model, with base 8-bit quantization configuration and 8, 4, 2 bits configuration list for mixed-precision quantization. NOTE: in order to generate a target platform model with different configurations but with the same Operators Sets (for tests, experiments, etc.), use this method implementation as a test-case, i.e., override the 'get_op_quantization_configs' method and use its output to call 'generate_tp_model' with your configurations. Returns: A TargetPlatformModel object. """ base_config, mixed_precision_cfg_list = get_op_quantization_configs() return generate_tp_model(default_config=base_config, base_config=base_config, mixed_precision_cfg_list=mixed_precision_cfg_list, name='qnnpack_tp_model')
5a501f18a090927f7aeba9883f3676ede595944c
21,908
from typing import Optional def check_sparv_version() -> Optional[bool]: """Check if the Sparv data dir is outdated. Returns: True if up to date, False if outdated, None if version file is missing. """ data_dir = paths.get_data_path() version_file = (data_dir / VERSION_FILE) if version_file.is_file(): return version_file.read_text() == __version__ return None
2c2ebeee0ad0c8a08c841b594ca45676a22407d7
21,910
def grav_n(expt_name, num_samples, num_particles, T_max, dt, srate, noise_std, seed): """2-body gravitational problem""" ##### ENERGY ##### def potential_energy(state): '''U=sum_i,j>i G m_i m_j / r_ij''' tot_energy = np.zeros((1, 1, state.shape[2])) for i in range(state.shape[0]): for j in range(i + 1, state.shape[0]): r_ij = ((state[i:i + 1, 1:3] - state[j:j + 1, 1:3]) ** 2).sum(1, keepdims=True) ** .5 m_i = state[i:i + 1, 0:1] m_j = state[j:j + 1, 0:1] tot_energy += m_i * m_j / r_ij U = -tot_energy.sum(0).squeeze() return U def kinetic_energy(state): '''T=sum_i .5*m*v^2''' energies = .5 * state[:, 0:1] * (state[:, 3:5] ** 2).sum(1, keepdims=True) T = energies.sum(0).squeeze() return T def total_energy(state): return potential_energy(state) + kinetic_energy(state) ##### DYNAMICS ##### def get_accelerations(state, epsilon=0): # shape of state is [bodies x properties] net_accs = [] # [nbodies x 2] for i in range(state.shape[0]): # number of bodies other_bodies = np.concatenate([state[:i, :], state[i + 1:, :]], axis=0) displacements = other_bodies[:, 1:3] - state[i, 1:3] # indexes 1:3 -> pxs, pys distances = (displacements ** 2).sum(1, keepdims=True) ** 0.5 masses = other_bodies[:, 0:1] # index 0 -> mass pointwise_accs = masses * displacements / (distances ** 3 + epsilon) # G=1 net_acc = pointwise_accs.sum(0, keepdims=True) net_accs.append(net_acc) net_accs = np.concatenate(net_accs, axis=0) return net_accs def update(t, state): state = state.reshape(-1, 5) # [bodies, properties] # print(state.shape) deriv = np.zeros_like(state) deriv[:, 1:3] = state[:, 3:5] # dx, dy = vx, vy deriv[:, 3:5] = get_accelerations(state) return deriv.reshape(-1) ##### INTEGRATION SETTINGS ##### def get_orbit(state, update_fn=update, t_points=100, t_span=[0, 2], **kwargs): if not 'rtol' in kwargs.keys(): kwargs['rtol'] = 1e-12 # kwargs['atol'] = 1e-12 # kwargs['atol'] = 1e-9 orbit_settings = locals() nbodies = state.shape[0] t_eval = np.arange(t_span[0], t_span[1], dt) if len(t_eval) != t_points: t_eval = t_eval[:-1] orbit_settings['t_eval'] = t_eval path = solve_ivp(fun=update_fn, t_span=t_span, y0=state.flatten(), t_eval=t_eval,method='DOP853', **kwargs) orbit = path['y'].reshape(nbodies, 5, t_points) return orbit, orbit_settings # spring_ivp = rk(update_fn, t_eval, state.reshape(-1), dt) # spring_ivp = np.array(spring_ivp) # print(spring_ivp.shape) # q, p = spring_ivp[:, 0], spring_ivp[:, 1] # dydt = [dynamics_fn(y, None) for y in spring_ivp] # dydt = np.stack(dydt).T # dqdt, dpdt = np.split(dydt, 2) # return spring_ivp.reshape(nbodies,5,t_points), 33 ##### INITIALIZE THE TWO BODIES ##### def random_config(orbit_noise=5e-2, min_radius=0.5, max_radius=1.5): state = np.zeros((2, 5)) state[:, 0] = 1 pos = np.random.rand(2) * (max_radius - min_radius) + min_radius r = np.sqrt(np.sum((pos ** 2))) # velocity that yields a circular orbit vel = np.flipud(pos) / (2 * r ** 1.5) vel[0] *= -1 vel *= 1 + orbit_noise * np.random.randn() # make the circular orbits SLIGHTLY elliptical state[:, 1:3] = pos state[:, 3:5] = vel state[1, 1:] *= -1 return state ##### HELPER FUNCTION ##### def coords2state(coords, nbodies=2, mass=1): timesteps = coords.shape[0] state = coords.T state = state.reshape(-1, nbodies, timesteps).transpose(1, 0, 2) mass_vec = mass * np.ones((nbodies, 1, timesteps)) state = np.concatenate([mass_vec, state], axis=1) return state ##### INTEGRATE AN ORBIT OR TWO ##### def sample_orbits(timesteps=50, trials=1000, nbodies=2, orbit_noise=5e-2, min_radius=0.5, max_radius=1.5, t_span=[0, 20], verbose=False, **kwargs): orbit_settings = locals() if verbose: print("Making a dataset of near-circular 2-body orbits:") x, dx, e, ks, ms = [], [], [], [], [] # samps_per_trial = np.ceil((T_max / srate)) # N = samps_per_trial * trials np.random.seed(seed) for _ in range(trials): state = random_config(orbit_noise, min_radius, max_radius) orbit, _ = get_orbit(state, t_points=timesteps, t_span=t_span, **kwargs) print(orbit.shape) batch = orbit.transpose(2, 0, 1).reshape(-1, 10) ssr = int(srate / dt) # (batch.shape) batch = batch[::ssr] # print('ssr') # print(batch.shape) sbx, sbdx, sbe = [], [], [] for state in batch: dstate = update(None, state) # reshape from [nbodies, state] where state=[m, qx, qy, px, py] # to [canonical_coords] = [qx1, qx2, qy1, qy2, px1,px2,....] coords = state.reshape(nbodies, 5).T[1:].flatten() dcoords = dstate.reshape(nbodies, 5).T[1:].flatten() # print(coords.shape) coords += np.random.randn(*coords.shape) * noise_std dcoords += np.random.randn(*dcoords.shape) * noise_std x.append(coords) dx.append(dcoords) shaped_state = state.copy().reshape(2, 5, 1) e.append(total_energy(shaped_state)) ks.append(np.ones(num_particles)) ms.append(np.ones(num_particles)) # print(len(x)) data = {'x': np.stack(x)[:, [0, 2, 1, 3, 4, 6, 5, 7]], 'dx': np.stack(dx)[:, [0, 2, 1, 3, 4, 6, 5, 7]], 'energy': np.stack(e), 'ks': np.stack(ks), 'mass': np.stack(ms)} return data return sample_orbits(timesteps=int(np.ceil(T_max / dt)), trials=num_samples, nbodies=2, orbit_noise=5e-2, min_radius=0.5, max_radius=1.5, t_span=[0, T_max], verbose=False)
05f8aa5f6b864440a8a69be35f23d3938114e133
21,911
def fit_spline_linear_extrapolation(cumul_observations, smoothing_fun=simple_mirroring, smoothed_dat=[], plotf=False, smoothep=True, smooth=0.5, ns=3, H=7): """ Linear extrapolation by splines on log daily cases Input: cumul_observations: cumulative observations, smoothed_dat: list of trends of incremental history, ns: optional smoothing window parameter, H: forecasting horison smooth: whether to compute mean from trend or from raw data Output: forecast on horison H in terms of cumulative numbers starting from the last observation """ if len(smoothed_dat) == 0: smoothed_dat = smoothing_fun(cumul_observations, Ws=ns) val_start = smoothed_dat[-1] dat = np.log(list(smoothed_dat + 1)) spl = csaps.UnivariateCubicSmoothingSpline(range(len(dat)), dat, smooth=smooth) dat_diff = np.diff(spl(np.arange(len(dat)))) x = np.arange(len(dat_diff)) spl = csaps.UnivariateCubicSmoothingSpline(x, dat_diff, smooth=smooth) dat_diff_sm = spl(x) step = dat_diff_sm[-1] - dat_diff_sm[-2] if smoothep: dat_forecast = dat_diff_sm[-1] + step * np.arange(1, H + 1) # + seasonality else: dat_forecast = dat_diff[-1] + step * np.arange(1, H + 1) # + seasonality forecast = np.insert(np.exp(np.cumsum(dat_forecast)) * val_start, 0, val_start) return forecast
62c0feacf87096a3889e63a2193bc93092b9dc02
21,912
def compute_dl_target(location): """ When the location is empty, set the location path to /usr/sys/inst.images return: return code : 0 - OK 1 - if error dl_target value or msg in case of error """ if not location or not location.strip(): loc = "/usr/sys/inst.images" else: loc = location.rstrip('/') dl_target = loc return 0, dl_target
419b9fcad59ca12b54ad981a9f3b265620a22ab1
21,913
def hz_to_angstrom(frequency): """Convert a frequency in Hz to a wavelength in Angstroms. Parameters ---------- frequency: float The frequency in Hz. Returns ------- The wavelength in Angstroms. """ return C / frequency / ANGSTROM
9fed63f7933c6d957a35de7244464d0303abf3ce
21,914
from re import T def is_literal(token): """ リテラル判定(文字列・数値) """ return token.ttype in T.Literal
46527a24660f8544951b999ec556a4cf12204087
21,915
def PutObject(*, session, bucket, key, content, type_="application/octet-stream"): """Saves data to S3 under specified filename and bucketname :param session: The session to use for AWS connection :type session: boto3.session.Session :param bucket: Name of bucket :type bucket: str :param key: Name of file :type key: str :param content: Data to save :type content: bytes | str :param type_: Content type of the data to put :type type_: str :return: The new S3 object :rtype: boto3.core.resource.S3Object """ s3conn = session.connect_to("s3") # Make sure, we have the bucket to add object to try: b = GetOrCreateBuckets(session, bucket) except Exception as e: # There is a chance that the user trying to PutObject does not have permissions # to Create/List Buckets. In such cases and error is thrown. We can still try to # save and assume the bucket already exists. pass # Now we can create the object S3Objects = session.get_collection("s3", "S3ObjectCollection") s3objects = S3Objects(connection=s3conn, bucket=bucket, key=key) if isinstance(content, str): bindata = content.encode("utf-8") else: bindata = content # Now we create the object return s3objects.create(key=key, acl="private", content_type=type_, body=bindata)
908581b7d61c3cce9a976b03a0bf7d3ed8c691ca
21,916
from io import StringIO def insert_sequences_into_tree(aln, moltype, params={}, write_log=True): """Returns a tree from Alignment object aln. aln: an xxx.Alignment object, or data that can be used to build one. moltype: cogent.core.moltype.MolType object params: dict of parameters to pass in to the RAxML app controller. The result will be an xxx.Alignment object, or None if tree fails. """ # convert aln to phy since seq_names need fixed to run through pplacer new_aln=get_align_for_phylip(StringIO(aln)) # convert aln to fasta in case it is not already a fasta file aln2 = Alignment(new_aln) seqs = aln2.toFasta() ih = '_input_as_multiline_string' pplacer_app = Pplacer(params=params, InputHandler=ih, WorkingDir=None, SuppressStderr=False, SuppressStdout=False) pplacer_result = pplacer_app(seqs) # write a log file if write_log: log_fp = join(params["--out-dir"],'log_pplacer_' + \ split(get_tmp_filename())[-1]) log_file=open(log_fp,'w') log_file.write(pplacer_result['StdOut'].read()) log_file.close() # use guppy to convert json file into a placement tree guppy_params={'tog':None} new_tree=build_tree_from_json_using_params(pplacer_result['json'].name, \ output_dir=params['--out-dir'], \ params=guppy_params) pplacer_result.cleanUp() return new_tree
d81167b49f2e375f17a227d708d5115af5d18549
21,917
from azure.cli.core.azclierror import CLIInternalError def billing_invoice_download(client, account_name=None, invoice_name=None, download_token=None, download_urls=None): """ Get URL to download invoice :param account_name: The ID that uniquely identifies a billing account. :param invoice_name: The ID that uniquely identifies an invoice. :param download_token: The download token with document source and document ID. :param download_urls: An array of download urls for individual. """ if account_name and invoice_name and download_token: return client.download_invoice(account_name, invoice_name, download_token) if account_name and download_urls: return client.download_multiple_modern_invoice(account_name, download_urls) if download_urls: return client.download_multiple_billing_subscription_invoice(download_urls) if invoice_name and download_token: return client.download_billing_subscription_invoice( invoice_name, download_token ) raise CLIInternalError( "Uncaught argument combinations for Azure CLI to handle. Please submit an issue" )
a75326953188e0aaf0145ceeaa791460ec0c0823
21,918
import re def find_classes(text): """ find line that contains a top-level open brace then look for class { in that line """ nest_level = 0 brace_re = re.compile("[\{\}]") classname_re = "[\w\<\>\:]+" class_re = re.compile( "(?:class|struct)\s*(\w+)\s*(?:\:\s*public\s*" + classname_re + "(?:,\s*public\s*" + classname_re + ")*)?\s*\{") classes = [] lines = text.split("\n") for (i,line) in enumerate(lines): if True:#nest_level == 0 and (i==0 or "template" not in lines[i-1]): classes.extend(class_re.findall(line)) braces = brace_re.findall(line) for brace in braces: if brace == "{": nest_level += 1 elif brace == "}": nest_level -= 1 return classes
126bc091a809e152c3d447ffdd103c764bc6c9ac
21,919