content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def relu(x): """The rectifier activation function. Only activates if argument x is positive. Args: x (ndarray): weighted sum of inputs """ # np.clip(x, 0, np.finfo(x.dtype).max, out=x) # return x return np.where(x >= 0, x, 0)
61b7a4ce252c72dd69251a8783c572c8128a01c5
3,649,046
def k_shortest_paths(G, source, target, k=1, weight='weight'): """Returns the k-shortest paths from source to target in a weighted graph flux_graph. Parameters ---------- flux_graph : NetworkX graph source : node Starting node target : node Ending node k : integer, optional (default=1) The number of shortest paths to find weight: string, optional (default='weight') Edge data key corresponding to the edge weight Returns ------- lengths, paths : lists Returns a tuple with two lists. The first list stores the length of each k-shortest path. The second list stores each k-shortest path. Raises ------ NetworkXNoPath If no path exists between source and target. Examples -------- >>> flux_graph=nx.complete_graph(5) >>> print(k_shortest_paths(flux_graph, 0, 4, 4)) ([1, 2, 2, 2], [[0, 4], [0, 1, 4], [0, 2, 4], [0, 3, 4]]) Notes ------ Edge weight attributes must be numerical and non-negative. Distances are calculated as sums of weighted edges traversed. """ if source == target: return ([0], [[source]]) length, path = nx.single_source_dijkstra(G, source, target, weight=weight) if target not in length: raise nx.NetworkXNoPath("node %s not reachable from %s" % (source, target)) lengths = [length[target]] paths = [path[target]] c = count() B = [] G_original = G.copy() for i in range(1, k): for j in range(len(paths[-1]) - 1): spur_node = paths[-1][j] root_path = paths[-1][:j + 1] edges_removed = [] for c_path in paths: if len(c_path) > j and root_path == c_path[:j + 1]: u = c_path[j] v = c_path[j + 1] if G.has_edge(u, v): edge_attr = G.edge[u][v] G.remove_edge(u, v) edges_removed.append((u, v, edge_attr)) for n in range(len(root_path) - 1): node = root_path[n] # out-edges for u, v, edge_attr in G.edges_iter(node, data=True): G.remove_edge(u, v) edges_removed.append((u, v, edge_attr)) if G.is_directed(): # in-edges for u, v, edge_attr in G.in_edges_iter(node, data=True): G.remove_edge(u, v) edges_removed.append((u, v, edge_attr)) spur_path_length, spur_path = nx.single_source_dijkstra(G, spur_node, target, weight=weight) if target in spur_path and spur_path[target]: total_path = root_path[:-1] + spur_path[target] total_path_length = get_path_length(G_original, root_path, weight) + spur_path_length[target] heappush(B, (total_path_length, next(c), total_path)) for e in edges_removed: u, v, edge_attr = e G.add_edge(u, v, edge_attr) if B: (l, _, p) = heappop(B) lengths.append(l) paths.append(p) else: break return (lengths, paths)
68918c78b1f33c07cd3494286a00b1c020256b56
3,649,047
def allowed_file(filename): """ Check the image extension Currently, only support jpg, jpeg and png """ return '.' in filename and \ filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
635f89b33c9150b5d7b68415bb85bb8c1d644d1f
3,649,048
def classical_gaussian_kernel(k, sigma): """ A function to generate a classical Gaussian kernel :param k: The size of the kernel, an integer :param sigma: variance of the gaussian distribution :return: A Gaussian kernel, a numpy array of shape (k,k) """ w = np.linspace(-(k - 1) / 2, (k - 1) / 2, k) x, y = np.meshgrid(w, w) kernel = 0.5*np.exp(-0.5*(x**2 + y**2)/(sigma**2))/(np.pi*sigma**2) return kernel
e1a94134e465f72a7d49e8bb950eb7a8ba97ac54
3,649,050
def collection_to_csv(collection): """ Upload collection value to CSV file :param collection: Collection :return: None """ print("collection_to_csv") final_df = pd.DataFrame() try: dict4json = [] n_documents = 0 for document in collection.get(): result_dict = document.to_dict() dict4json.append(result_dict) n_documents += 1 for result in dict4json: lst = result["result"] df = pd.DataFrame(lst) df = df.reindex(sorted(df.columns), axis=1) final_df = pd.concat([final_df, df]) except Exception as e: print(e) return pd.DataFrame() finally: return final_df
5d62e0fe1eebb190be47a05d822b8714d396125f
3,649,051
import six def validate_hatch(s): """ Validate a hatch pattern. A hatch pattern string can have any sequence of the following characters: ``\\ / | - + * . x o O``. """ if not isinstance(s, six.text_type): raise ValueError("Hatch pattern must be a string") unique_chars = set(s) unknown = (unique_chars - set(['\\', '/', '|', '-', '+', '*', '.', 'x', 'o', 'O'])) if unknown: raise ValueError("Unknown hatch symbol(s): %s" % list(unknown)) return s
4ddf056dab2681759a462005effc4ae5488a4461
3,649,052
from azure.cli.core.commands.client_factory import get_mgmt_service_client from azure.cli.core.profiles import ResourceType def iot_hub_service_factory(cli_ctx, *_): """ Factory for importing deps and getting service client resources. Args: cli_ctx (knack.cli.CLI): CLI context. *_ : all other args ignored. Returns: iot_hub_resource (IotHubClient.iot_hub_resource): operational resource for working with IoT Hub. """ return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_IOTHUB).iot_hub_resource
a38ae4a7fedaf8dcbaccba0873e4f519cb51af17
3,649,053
def filter_example(config, example, mode="train"): """ Whether filter a given example according to configure. :param config: config contains parameters for filtering example :param example: an example instance :param mode: "train" or "test", they differs in filter restrictions :return: boolean """ if mode == "train": return (len(example["ans_sent_tokens"]) > config.sent_limit or len(example["ques_tokens"]) > config.ques_limit or (example["y2_in_sent"] - example["y1_in_sent"]) > config.ans_limit) elif mode == "test": return (len(example["ans_sent_tokens"]) > config.sent_limit or len(example["ques_tokens"]) > config.ques_limit) else: print("mode must be train or test")
9c49990fe36c0a82d0a99a62fe810a19cd5a8749
3,649,054
def _dict_flatten(data): """Return flattened dict of input dict <data>. After https://codereview.stackexchange.com/revisions/21035/3 Parameters ---------- data : dict Input dict to flatten Returns ------- fdata : dict Flattened dict. """ def expand(key, value): """Expand list.""" if isinstance(value, dict): return [(key+'>'+k, v) for k, v in _dict_flatten(value).items()] else: return [(key, value)] return dict([item for k, v in data.items() for item in expand(k, v)])
a1db4a552ced44efa45fe4f86fbfe04871463356
3,649,055
def merkleroot(elements): """ Args: elements (List[str]): List of hashes that make the merkletree. Returns: str: The root element of the merkle tree. """ return Merkletree(elements).merkleroot
cd5d1e530fda62f9b92a51a03f5fd2cbbe6a9e62
3,649,056
def category_start(update, context): """Separate function for category selection to filter the options with inline keyboard.""" update.message.reply_text( "Choose a Group", reply_markup=create_category_inline(trx_categories.keys(), "group_sel"), ) return CATEGORY_REPLY_CHOOSE_TRX_OPTS
cfc299d8b81785d8418bfb4c280cf88e4137448b
3,649,057
def create_player(mode, race, char_name): """ Create the player's character """ # Evil if mode == 2: if race == 1: player = character.Goblin(char_name, 1, app) elif race == 2: player = character.Orc(char_name, 1, app) elif race == 3: player = character.Uruk(char_name, 1, app) else: player = character.Wizard(char_name, 1, app) # Good else: if race == 1: player = character.Human(char_name, 1, app) elif race == 2: player = character.Wizard(char_name, 1, app) elif race == 3: player = character.Warrior(char_name, 1, app) """elif race == 4: player = character.Hobbit(char_name, 1, app) elif race == 6: player = character.Bishop(char_name, 1, app) else: player = character.Wizard(char_name, 1, app)""" return player
30e143f0cca1053d6e10df0c438065747611e4af
3,649,059
def _get_item(i, j, block): """ Returns a single item from the block. Coords must be in block space. """ return block[i, j]
45a12ecb3959a75ad8f026616242ba64174441fc
3,649,060
def calculate_potentials_python(volume, mass, volume_material_mass, mass_material_mass): """ Easy to read python function which calculates potentials using two Python loops Still uses NumPy for the rote math. """ potentials = np.zeros(len(volume), dtype=np.float32) for volume_i, volume_coord in enumerate(volume): for mass_coord in mass: potentials[volume_i] += (G * volume_material_mass * mass_material_mass) / np.sqrt( np.square(volume_coord - mass_coord).sum()) return potentials
73395d31bb470ac96b0c05a140fe6e77f56e2d88
3,649,061
def rect2sphericalcoord3D( v: list[Number, Number, Number] ) -> list[float, float, float]: """Does a 3D coordinate transform from rectangular to spherical coordinate system p = The length of the hypotenuse or the magnitude of the vector theta = is the angle between the positive x-axis and p (azimuth) phi = is the angle between the positive z-axis and p (colatitude) Args: vspherecoord: [p, theta, phi] spherical coordinates Returns: [p: float, theta: float, phi: float] """ p = vmag(v) return [p, atan(v[1] / v[0]), acos(v[2] / p)]
8be197341e576465af389f8e20aea25a59fc3d1e
3,649,062
def GetAssignmentByKeyName(key_name): """Gets the assignment with the specified key name.""" return Assignment.get_by_key_name(key_name)
a14b9a2033bb995d53219568278d298f305861d7
3,649,063
def fit_integer_type(n, is_signed=True): """Determine the minimal space needed to store integers of maximal value n """ if is_signed: m = 1 types = [np.int8, np.int16, np.int32, np.int64] else: m = 0 types = [np.uint8, np.uint16, np.uint32, np.uint64] if n < 2 ** (8 - m): return types[0] elif n < 2 ** (16 - m): return types[1] elif n < 2 ** (32 - m): return types[2] elif n < 2 ** (64 - m): return types[3] else: raise ValueError('Values are too big to be represented by 64 bits \ integers!')
bd9ebd447893509b1144a32bac9f9757988b4a60
3,649,064
def admin_userforms_order_by_field(user_id): """ Set User's forms order_by preference """ if not g.is_admin: return jsonify("Forbidden"), 403 data = request.get_json(silent=True) if not 'order_by_field_name' in data: return jsonify("Not Acceptable"), 406 field_names = [ field['name'] for field in default_admin_userforms_field_index ] if not data['order_by_field_name'] in field_names: return jsonify("Not Acceptable"), 406 g.current_user.admin['userforms']['order_by'] = data['order_by_field_name'] flag_modified(g.current_user, 'admin') g.current_user.save() return jsonify( {'order_by_field_name': g.current_user.admin['userforms']['order_by']} ), 200
4d92dc6f9d562a91509ec2b03aff75c4bc376ead
3,649,065
def check_all_rows(A): """ Check if all rows in 2-dimensional matrix don't have more than one queen """ for row_inx in range(len(A)): # compute sum of row row_inx if sum(A[row_inx]) > 1: return False return True
e39f4ca3e401c02b13c5b55ed4389a7e6deceb40
3,649,066
from vtk.util.numpy_support import vtk_to_numpy, numpy_to_vtk def extract_largest_connected_region(vtk_im, label_id): """ Extrac the largest connected region of a vtk image Args: vtk_im: vtk image label_id: id of the label Return: new_im: processed vtk image """ fltr = vtk.vtkImageConnectivityFilter() fltr.SetScalarRange(label_id, label_id) fltr.SetExtractionModeToLargestRegion() fltr.SetInputData(vtk_im) fltr.Update() new_im = fltr.GetOutput() py_im = vtk_to_numpy(vtk_im.GetPointData().GetScalars()) py_mask = vtk_to_numpy(new_im.GetPointData().GetScalars()) mask = np.logical_and(py_im==label_id, py_mask==0) py_im[mask] = 0 vtk_im.GetPointData().SetScalars(numpy_to_vtk(py_im)) return vtk_im
c9510da15b4d3cade331aa3b9b3625af5706e417
3,649,067
def group_set_array_data_ptr(d): """ call view%set_external_data_ptr hide c_loc call and add target attribute """ # XXX - should this check the type/shape of value against the view? # typename - part of function name # nd - number of dimensions # f_type - fortran type # shape - :,:, to match nd if d['rank'] == 0: extents_decl = 'extents(1)' extents_asgn = 'extents(1) = 1_SIDRE_IndexType' else: extents_decl = 'extents(%d)' % d['rank'] extents_asgn = 'extents = shape(value, kind=SIDRE_IndexType)' return """ ! Generated by genfsidresplicer.py ! This function does nothing if view name does not exist in group. subroutine group_set_array_data_ptr_{typename}{nd}(grp, name, value) use iso_c_binding implicit none class(SidreGroup), intent(IN) :: grp character(len=*), intent(IN) :: name {f_type}, target, intent(IN) :: value{shape} integer(C_INT) :: lname type(SIDRE_SHROUD_view_capsule) view ! integer(SIDRE_IndexType) :: {extents_decl} ! integer(C_INT), parameter :: type = {sidre_type} type(C_PTR) addr, viewptr lname = len_trim(name) ! {extents_asgn} viewptr = c_group_get_view_from_name_bufferify(grp%cxxmem, name, lname, view) if (c_associated(view%addr)) then #ifdef USE_C_LOC_WITH_ASSUMED_SHAPE addr = c_loc(value) #else call SIDRE_C_LOC(value{lower_bound}, addr) #endif call c_view_set_external_data_ptr_only(view, addr) ! call c_view_apply_type_shape(rv%cxxmem, type, {rank}, extents) endif end subroutine group_set_array_data_ptr_{typename}{nd}""".format( extents_decl=extents_decl, extents_asgn=extents_asgn, **d)
36a18ca9099edf24d37386103f111bde7753ed46
3,649,069
from typing import cast def releaseTagName(version: Version) -> str: """ Compute the name of the release tag for the given version. """ return cast(str, version.public())
9f8e350a42e2b50657a87e89e592ae340ba3ee96
3,649,070
import time def get_calibrated_values(timeout=10): """Return an instance of CalibratedValues containing the 6 spectral bands.""" t_start = time.time() while _as7262.CONTROL.get_data_ready() == 0 and (time.time() - t_start) <= timeout: pass with _as7262.CALIBRATED_DATA as DATA: return CalibratedValues(DATA.get_r(), DATA.get_o(), DATA.get_y(), DATA.get_g(), DATA.get_b(), DATA.get_v())
69e5921b3e487ab3f3c3abbae8a2b237eb75b033
3,649,071
def customizable_admin(cls): """ Returns a customizable admin class """ class CustomSearchableAdmin(BaseAdmin): form = customizable_form(cls) def __init__(self, *args, **kwargs): super(CustomSearchableAdmin, self).__init__(*args, **kwargs) # add the custom fields to the fieldsets (if present) # @see customizable_form and ContentTypeCustomField if self.fieldsets: if isinstance(self.fieldsets, tuple): self.fieldsets = list(self.fieldsets) fieldset = ContentTypeCustomField.get_fieldset_for_model(self.form._meta.model) if fieldset: self.fieldsets.append(fieldset) def get_form(self, request, obj=None, **kwargs): ## modify visualization for certain users #if not request.user.is_superuser: # self.exclude.append('field_to_hide') # self.inlines.remove(UserInline) # pass form = super(CustomSearchableAdmin, self).get_form(request, obj, **kwargs) return form def get_changelist(self, request, **kwargs): return CustomChangeList def queryset(self, request): qs = super(CustomSearchableAdmin, self).queryset(request) #qs = qs.filter(Q(is_staff=True) | Q(is_superuser=True)) return qs def has_change_permission(self, request, obj=None): has_permission = super(CustomSearchableAdmin, self).has_change_permission(request, obj) #if obj is not None and not request.user.is_superuser and request.user.id != obj.user.id: return has_permission return CustomSearchableAdmin
dc6ced817b78b7cbf31cbf788d28fcff421d6b02
3,649,072
def restoreIm(transformeddata, pca, origshape, datamean, datastd): """Given a PCA object and transformeddata that consists of projections onto the PCs, return images by using the PCA's inverse transform and reshaping to the provided origshape.""" if transformeddata.shape[0] < transformeddata.shape[1]: transformeddata = np.transpose(transformeddata) data = pca.inverse_transform(transformeddata) # restore the shape and scale of the data before plotting data = data*datastd data = data + datamean data = np.transpose(data) return data.reshape(origshape)
ce8713648b166f7ce35bb47df33a6b99e2de8687
3,649,073
def sample(population, k=None): """Behaves like random.sample, but if k is omitted, it default to randint(1, len(population)), so that a non-empty sample is returned.""" population = list(population) if k is None: k = randint(1, len(population)) return random_sample(population, k)
46f7f3365c4574ed9cb09b54f25e30ff23fb3b8d
3,649,075
def add_momentum_ta(df, high, low, close, volume, fillna=False): """Add trend technical analysis features to dataframe. Args: df (pandas.core.frame.DataFrame): Dataframe base. high (str): Name of 'high' column. low (str): Name of 'low' column. close (str): Name of 'close' column. fillna(bool): if True, fill nan values. Returns: pandas.core.frame.DataFrame: Dataframe with new features. """ df['momentum1'] = rsi(df[close], n=14, fillna=fillna) df['momentum2'] = money_flow_index(df[high], df[low], df[close], df[volume], n=14, fillna=fillna) df['momentum3'] = tsi(df[close], r=25, s=13, fillna=fillna) return df
76239057526272874c34eb4250f642745dfc9990
3,649,077
def get_experiment_type(filename): """ Get the experiment type from the filename. The filename is assumed to be in the form of: '<reliability>_<durability>_<history kind>_<topic>_<timestamp>' :param filename: The filename to get the type. :return: A string where the timesptamp is taken out from the filename. """ file_type = '' filename = filename.split('/')[-1] elements = filename.split('_') for i in range(0, len(elements) - 3): file_type += '{}_'.format(elements[i]) file_type = file_type[:-1] return file_type
e1853a95d034b8f9e36ca65f6f5d200cbf4b86dc
3,649,078
from typing import Any def async_check_significant_change( hass: HomeAssistant, old_state: str, old_attrs: dict, new_state: str, new_attrs: dict, **kwargs: Any, ) -> bool | None: """Test if state significantly changed.""" if old_state != new_state: return True if old_attrs.get(ATTR_EFFECT) != new_attrs.get(ATTR_EFFECT): return True old_color = old_attrs.get(ATTR_HS_COLOR) new_color = new_attrs.get(ATTR_HS_COLOR) if old_color and new_color: # Range 0..360 if check_absolute_change(old_color[0], new_color[0], 5): return True # Range 0..100 if check_absolute_change(old_color[1], new_color[1], 3): return True if check_absolute_change( old_attrs.get(ATTR_BRIGHTNESS), new_attrs.get(ATTR_BRIGHTNESS), 3 ): return True if check_absolute_change( # Default range 153..500 old_attrs.get(ATTR_COLOR_TEMP), new_attrs.get(ATTR_COLOR_TEMP), 5, ): return True if check_absolute_change( # Range 0..255 old_attrs.get(ATTR_WHITE_VALUE), new_attrs.get(ATTR_WHITE_VALUE), 5, ): return True return False
2a3f91923f187a601b80a28aa750060dc0760e65
3,649,079
import warnings def array2string(a, max_line_width=None, precision=None, suppress_small=None, separator=' ', prefix="", style=np._NoValue, formatter=None, threshold=None, edgeitems=None, sign=None): """ Return a string representation of an array. Parameters ---------- a : ndarray Input array. max_line_width : int, optional The maximum number of columns the string should span. Newline characters splits the string appropriately after array elements. precision : int, optional Floating point precision. Default is the current printing precision (usually 8), which can be altered using `set_printoptions`. suppress_small : bool, optional Represent very small numbers as zero. A number is "very small" if it is smaller than the current printing precision. separator : str, optional Inserted between elements. prefix : str, optional An array is typically printed as:: 'prefix(' + array2string(a) + ')' The length of the prefix string is used to align the output correctly. style : _NoValue, optional Has no effect, do not use. .. deprecated:: 1.14.0 formatter : dict of callables, optional If not None, the keys should indicate the type(s) that the respective formatting function applies to. Callables should return a string. Types that are not specified (by their corresponding keys) are handled by the default formatters. Individual types for which a formatter can be set are:: - 'bool' - 'int' - 'timedelta' : a `numpy.timedelta64` - 'datetime' : a `numpy.datetime64` - 'float' - 'longfloat' : 128-bit floats - 'complexfloat' - 'longcomplexfloat' : composed of two 128-bit floats - 'numpystr' : types `numpy.string_` and `numpy.unicode_` - 'str' : all other strings Other keys that can be used to set a group of types at once are:: - 'all' : sets all types - 'int_kind' : sets 'int' - 'float_kind' : sets 'float' and 'longfloat' - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' - 'str_kind' : sets 'str' and 'numpystr' threshold : int, optional Total number of array elements which trigger summarization rather than full repr. edgeitems : int, optional Number of array items in summary at beginning and end of each dimension. sign : string, either '-', '+', ' ' or 'legacy', optional Controls printing of the sign of floating-point types. If '+', always print the sign of positive values. If ' ', always prints a space (whitespace character) in the sign position of positive values. If '-', omit the sign character of positive values. If 'legacy', print a space for positive values except in 0d arrays. Returns ------- array_str : str String representation of the array. Raises ------ TypeError if a callable in `formatter` does not return a string. See Also -------- array_str, array_repr, set_printoptions, get_printoptions Notes ----- If a formatter is specified for a certain type, the `precision` keyword is ignored for that type. This is a very flexible function; `array_repr` and `array_str` are using `array2string` internally so keywords with the same name should work identically in all three functions. Examples -------- >>> x = np.array([1e-16,1,2,3]) >>> print(np.array2string(x, precision=2, separator=',', ... suppress_small=True)) [ 0., 1., 2., 3.] >>> x = np.arange(3.) >>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) '[0.00 1.00 2.00]' >>> x = np.arange(3) >>> np.array2string(x, formatter={'int':lambda x: hex(x)}) '[0x0L 0x1L 0x2L]' """ # Deprecation 05-16-2017 v1.14 if style is not np._NoValue: warnings.warn("'style' argument is deprecated and no longer functional", DeprecationWarning, stacklevel=3) overrides = _make_options_dict(precision, threshold, edgeitems, max_line_width, suppress_small, None, None, sign, formatter) options = _format_options.copy() options.update(overrides) if a.size == 0: # treat as a null array if any of shape elements == 0 lst = "[]" else: lst = _array2string(a, options, separator, prefix) return lst
ab40c565d058a6836fbc9778ae0d8ceb5c3d6a99
3,649,080
def registered_types(): """ list of registered types """ return list(Registry.types.get_all().keys())
50ed8fd4d586d660e2dc48e01e9cd462b346f47e
3,649,081
from typing import Dict def is_retain_bg_files(config: Dict[str, ConfigVO] = None) -> bool: """ 在拉取新的壁纸前,是否保留旧的壁纸 """ key = const.Key.Task.RETAIN_BGS.value vo = config.get(key) if config else dao.get_config(key) return vo and vo.value
69fd845479c0afbc6d6b215d0680d7f6a9c35096
3,649,082
import pytz def getAwareTime(tt): """ Generates timezone aware timestamp from timezone unaware timestamp PARAMETERS ------------ :param tt: datatime timezome unaware timestamp RETURNS ------------ :return: datatime timezone aware timestamp """ timezone = pytz.timezone("Europe/Amsterdam") return (timezone.localize(tt))
1b286c92c7f5d8f0ff48d77296489fbd358c14ce
3,649,083
def xdfs(request, tmpdir, vol_name, dos_format): """return (xdf_file, xdf_size_spec, vol_name) for various disks""" size = request.param if size == "880K": file_name = tmpdir / "disk.adf" size = "" else: file_name = tmpdir / "disk-" + size + ".hdf" size = "size=" + size return XDFSpec(str(file_name), size, vol_name, dos_format)
0a9878ffe020ba1438844e000be5b9e4a8b2825a
3,649,084
def nfvi_get_networks(paging, callback): """ Get a list of networks """ cmd_id = _network_plugin.invoke_plugin('get_networks', paging, callback=callback) return cmd_id
432bd6a69e25cc7a80aa77b2a58fe99b0947b9a0
3,649,085
def get_fasta(uniprot_id): """Get the protein sequence for a UniProt ID as a string. Args: uniprot_id: Valid UniProt ID Returns: str: String of the protein (amino acid) sequence """ # Silencing the "Will be moved to Biokit" message with ssbio.utils.suppress_stdout(): return bsup.get_fasta_sequence(uniprot_id)
295a5bd30d3e0feaf99ecab7fa975c67f8b06248
3,649,086
def split_path(path, minsegs=1, maxsegs=None, rest_with_last=False): """ Validate and split the given HTTP request path. **Examples**:: ['a'] = split_path('/a') ['a', None] = split_path('/a', 1, 2) ['a', 'c'] = split_path('/a/c', 1, 2) ['a', 'c', 'o/r'] = split_path('/a/c/o/r', 1, 3, True) :param path: HTTP Request path to be split :param minsegs: Minimum number of segments to be extracted :param maxsegs: Maximum number of segments to be extracted :param rest_with_last: If True, trailing data will be returned as part of last segment. If False, and there is trailing data, raises ValueError. :returns: list of segments with a length of maxsegs (non-existant segments will return as None) :raises: ValueError if given an invalid path """ if not maxsegs: maxsegs = minsegs if minsegs > maxsegs: raise ValueError('minsegs > maxsegs: %d > %d' % (minsegs, maxsegs)) if rest_with_last: segs = path.split('/', maxsegs) minsegs += 1 maxsegs += 1 count = len(segs) if (segs[0] or count < minsegs or count > maxsegs or '' in segs[1:minsegs]): raise ValueError('Invalid path: %s' % quote(path)) else: minsegs += 1 maxsegs += 1 segs = path.split('/', maxsegs) count = len(segs) if (segs[0] or count < minsegs or count > maxsegs + 1 or '' in segs[1:minsegs] or (count == maxsegs + 1 and segs[maxsegs])): raise ValueError('Invalid path: %s' % quote(path)) segs = segs[1:maxsegs] segs.extend([None] * (maxsegs - 1 - len(segs))) return segs
d3824ebd63b784dadaf0a97e75049f79d1077ded
3,649,087
def get_purchases_formset(n_forms=0): """ Helper method that returns a Django formset for a dynamic amount of Purchases. Initially `n_forms` empty forms are shown. """ return modelformset_factory(Purchase, fields=('amount', 'fruit'), extra=n_forms)
b49ec71aef56eabb1781039af947ff510242925a
3,649,088
async def git_pull(): """ Pulls any changes down from github and returns the result of the command. _> changed: str """ cmd = Popen(["git", "pull"], stdout=PIPE) out, _ = cmd.communicate() out = out.decode() return out
ed32677a22b0f75c23af618f18833b5fc46bb3dc
3,649,089
def inverse_word_map(word_map): """ Create an inverse word mapping. :param word_map: word mapping """ return {v: k for k, v in word_map.items()}
4048a21ea1c75791a92d57ee0a440a6c9d31b6b9
3,649,090
def get_coalition_wins_sql_string_for_state(coalition_id,state_id): """ :type party_id: integer """ str = """ select lr.candidate_id, c.fullname as winning_candidate, lr.constituency_id, cons.name as constituency, lr.party_id, lr.max_votes, (lr.max_votes-sr.votes) as lead, sr.candidate_id, loosing_candidate.fullname as runner_up, loosing_party.name as runner_up_party, sr.party_id, winning_party.name, ltw.party_id from latest_results lr inner join latest_runners_up as sr on sr.constituency_id = lr.constituency_id inner join candidate c on c.id = lr.candidate_id inner join constituency cons on cons.id = lr.constituency_id inner join party winning_party on lr.party_id = winning_party.id inner join party loosing_party on loosing_party.id = sr.party_id inner join candidate loosing_candidate on loosing_candidate.id = sr.candidate_id inner join last_time_winners ltw on ltw.constituency_id = lr.constituency_id where winning_party.coalition_id = %s and cons.state_id = %s and lr.status = 'DECLARED' order by lead DESC""" % (coalition_id,state_id) return str;
76fb0704779e20e8a53ca80dc17c969f1e455d20
3,649,091
import numpy def computeAPLSF(data): """ Compute the LSF kernel for each chip """ index = 2047 ## define lsf range and pixel centers xlsf = numpy.linspace(-7.,7.,43) xcenter = numpy.arange(0,4096) ## compute LSF profiles for each chip as a function of pixel raw_out2_a = raw(xlsf,xcenter,data.lsfcoeff[0]) raw_out2_b = raw(xlsf,xcenter,data.lsfcoeff[1]) raw_out2_c = raw(xlsf,xcenter,data.lsfcoeff[2]) ## normalize raw_out2_a_norm = raw_out2_a/numpy.tile(numpy.sum(raw_out2_a,axis=1),(len(xlsf),1)).T raw_out2_b_norm = raw_out2_b/numpy.tile(numpy.sum(raw_out2_b,axis=1),(len(xlsf),1)).T raw_out2_c_norm = raw_out2_c/numpy.tile(numpy.sum(raw_out2_c,axis=1),(len(xlsf),1)).T return numpy.array([raw_out2_a_norm[index],raw_out2_b_norm[index],raw_out2_c_norm[index]])
5cd46d9feec10dd0a4eff1a5fe44e241bfeed539
3,649,092
def login(): """Log in a registered user by adding the user id to the session.""" if request.method == "POST": username = request.form["username"] password = request.form["password"] error = None user = User.query.filter_by(name=username).first() if user is None: error = "Incorrect username." elif not user.check_password(password): error = "Incorrect password." if error is None: db.session.clear() db.session["user_id"] = user["id"] return redirect(url_for("mainpage")) flash(error) return render_template("auth/login.html")
067b202e81d947589c0fe2262372856084b28e35
3,649,093
def _timedeltaformat(value, include_ms=False): """Formats a timedelta in a sane way. Ignores sub-second precision by default. """ if not value: return NON_BREAKING_HYPHEN + NON_BREAKING_HYPHEN total_seconds = value.total_seconds() suffix = '' if include_ms: ms = int(round(total_seconds-int(total_seconds), 3) * 1000) if ms: suffix = '.%03d' % ms hours, remainder = divmod(int(round(total_seconds)), 3600) minutes, seconds = divmod(remainder, 60) if hours: return '%d:%02d:%02d%s' % (hours, minutes, seconds, suffix) # Always prefix minutes, even if 0, otherwise this looks weird. Revisit this # decision if bikeshedding is desired. return '%d:%02d%s' % (minutes, seconds, suffix)
5c40caa1bd2e005746a44b1767eb4c3ed29b1603
3,649,094
def get_viame_src(url): """ Get image src from via.me API. """ END_POINT = 'http://via.me/api/v1/posts/' tmp = url.split('/') viame_id = tmp[-1][1:] address = END_POINT + viame_id result = httpget(address)['response']['post'] return result['thumb_300_url']
52b23fb64b30c97ef70b683e0176f88f8730e5c9
3,649,095
def Geom_BSplineCurve_MaxDegree(*args): """ * Returns the value of the maximum degree of the normalized B-spline basis functions in this package. :rtype: int """ return _Geom.Geom_BSplineCurve_MaxDegree(*args)
32729754ca89ce719b81f28fbf3f3c5ea5eb70eb
3,649,096
import torch def iou_score(pred_cls, true_cls, nclass, drop=(), mask=None): """ compute the intersection-over-union score both inputs should be categorical (as opposed to one-hot) """ assert pred_cls.shape == true_cls.shape, 'Shape of predictions should match GT' if mask is not None: assert mask.dim() == true_cls.dim(), \ 'Mask should have the same dimensions as inputs' intersect_ = torch.zeros(nclass - len(drop), device=pred_cls.get_device()) union_ = torch.zeros(nclass - len(drop), device=pred_cls.get_device()) idx = 0 for i in range(nclass): if i not in drop: intersect = (pred_cls == i).byte() + (true_cls == i).byte() if mask is not None: intersect *= mask.byte() intersect = intersect.eq(2).sum() union = (pred_cls == i).byte() + (true_cls == i).byte() if mask is not None: union *= mask.byte() union = union.ge(1).sum() intersect_[idx] = intersect union_[idx] = union idx += 1 return intersect_, union_
d38871f339b2126d418a7fca53fbfd874e263aa2
3,649,097
def check_args(source_path, args): """Checks lengths of supplied args match or raise an error. Lists can have only one element where they are automatically extended. Args: source_path(list(str)): List of source_paths supplied to turbiniactl. args(list(list)): List of args (i.e. name, source, partitions, etc) and their values supplied to turbiniactl. Raises: TurbiniaException: If length of args don't match. Returns: list(str): List of arg or None """ ret = list() if not args[0]: args[0] = source_path for arg in args: if not arg: arg = [None] if len(arg) > 1 and len(arg) != len(source_path): raise TurbiniaException( 'Number of passed in args ({0:d}) must equal to one or ' 'number of source_paths/disks ({1:d}).'.format( len(arg), len(source_path))) if len(arg) == 1: arg = [arg[0] for _ in source_path] ret.append(arg) return ret
23d50e875ac908b0ee3afd4521b1a2660843ffc6
3,649,098
from datetime import datetime import calendar import warnings import requests import zipfile def futures_dce_position_rank(date: str = "20160104") -> pd.DataFrame: """ 大连商品交易日每日持仓排名-具体合约 http://www.dce.com.cn/dalianshangpin/xqsj/tjsj26/rtj/rcjccpm/index.html :param date: 指定交易日; e.g., "20200511" :type date: str :return: 指定日期的持仓排名数据 :rtype: pandas.DataFrame """ date = cons.convert_date(date) if date is not None else datetime.date.today() if date.strftime('%Y%m%d') not in calendar: warnings.warn('%s非交易日' % date.strftime('%Y%m%d')) return {} url = "http://www.dce.com.cn/publicweb/quotesdata/exportMemberDealPosiQuotesBatchData.html" headers = { "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9", "Accept-Encoding": "gzip, deflate", "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8", "Cache-Control": "no-cache", "Connection": "keep-alive", "Content-Length": "160", "Content-Type": "application/x-www-form-urlencoded", "Host": "www.dce.com.cn", "Origin": "http://www.dce.com.cn", "Pragma": "no-cache", "Referer": "http://www.dce.com.cn/publicweb/quotesdata/memberDealPosiQuotes.html", "Upgrade-Insecure-Requests": "1", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36", } payload = { "memberDealPosiQuotes.variety": "a", "memberDealPosiQuotes.trade_type": "0", "contract.contract_id": "a2009", "contract.variety_id": "a", "year": date.year, "month": date.month - 1, "day": date.day, "batchExportFlag": "batch", } r = requests.post(url, payload, headers=headers) big_dict = dict() with zipfile.ZipFile(BytesIO(r.content), "r") as z: for i in z.namelist(): file_name = i.encode('cp437').decode('GBK') try: data = pd.read_table(z.open(i), header=None, sep="\t").iloc[:-6] if len(data) < 12: # 处理没有活跃合约的情况 big_dict[file_name.split("_")[1]] = pd.DataFrame() continue start_list = data[data.iloc[:, 0].str.find("名次") == 0].index.tolist() data = data.iloc[start_list[0]:, data.columns[data.iloc[start_list[0], :].notnull()]] data.reset_index(inplace=True, drop=True) start_list = data[data.iloc[:, 0].str.find("名次") == 0].index.tolist() end_list = data[data.iloc[:, 0].str.find("总计") == 0].index.tolist() part_one = data[start_list[0]: end_list[0]].iloc[1:, :] part_two = data[start_list[1]: end_list[1]].iloc[1:, :] part_three = data[start_list[2]: end_list[2]].iloc[1:, :] temp_df = pd.concat([part_one.reset_index(drop=True), part_two.reset_index(drop=True), part_three.reset_index(drop=True)], axis=1, ignore_index=True) temp_df.columns = ["名次", "会员简称", "成交量", "增减", "名次", "会员简称", "持买单量", "增减", "名次", "会员简称", "持卖单量", "增减"] temp_df["rank"] = range(1, len(temp_df) + 1) del temp_df["名次"] temp_df.columns = ["vol_party_name", "vol", "vol_chg", "long_party_name", "long_open_interest", "long_open_interest_chg", "short_party_name", "short_open_interest", "short_open_interest_chg", "rank"] temp_df["symbol"] = file_name.split("_")[1] temp_df["variety"] = file_name.split("_")[1][:-4].upper() temp_df = temp_df[["long_open_interest", "long_open_interest_chg", "long_party_name", "rank", "short_open_interest", "short_open_interest_chg", "short_party_name", "vol", "vol_chg", "vol_party_name", "symbol", "variety"]] big_dict[file_name.split("_")[1]] = temp_df except UnicodeDecodeError as e: try: data = pd.read_table(z.open(i), header=None, sep="\\s+", encoding="gb2312", skiprows=3) except: data = pd.read_table(z.open(i), header=None, sep="\\s+", encoding="gb2312", skiprows=4) start_list = data[data.iloc[:, 0].str.find("名次") == 0].index.tolist() end_list = data[data.iloc[:, 0].str.find("总计") == 0].index.tolist() part_one = data[start_list[0]: end_list[0]].iloc[1:, :] part_two = data[start_list[1]: end_list[1]].iloc[1:, :] part_three = data[start_list[2]: end_list[2]].iloc[1:, :] temp_df = pd.concat([part_one.reset_index(drop=True), part_two.reset_index(drop=True), part_three.reset_index(drop=True)], axis=1, ignore_index=True) temp_df.columns = ["名次", "会员简称", "成交量", "增减", "名次", "会员简称", "持买单量", "增减", "名次", "会员简称", "持卖单量", "增减"] temp_df["rank"] = range(1, len(temp_df) + 1) del temp_df["名次"] temp_df.columns = ["vol_party_name", "vol", "vol_chg", "long_party_name", "long_open_interest", "long_open_interest_chg", "short_party_name", "short_open_interest", "short_open_interest_chg", "rank"] temp_df["symbol"] = file_name.split("_")[1] temp_df["variety"] = file_name.split("_")[1][:-4].upper() temp_df = temp_df[["long_open_interest", "long_open_interest_chg", "long_party_name", "rank", "short_open_interest", "short_open_interest_chg", "short_party_name", "vol", "vol_chg", "vol_party_name", "symbol", "variety"]] big_dict[file_name.split("_")[1]] = temp_df return big_dict
3c4fa81a2fef317210be915f437c0885f5fcbbbd
3,649,099
import queue def task_checkqueue(storage): """ Task that watches a queue for messages and acts on them when received. """ # Get the queue object from the storage dictionary thequeue = storage.get("queue") try: # Use a timeout so it blocks for at-most 0.5 seconds while waiting for a message. Smaller values can be used to # increase the cycling of the task and responsiveness to Threadify control signals (like pause) if desired. msg = thequeue.get(block=True, timeout=.5) except queue.Empty: print("_", end="") else: if msg == "QUIT": return False # Print received message print("{:s}".format(msg), end="") return True
3c7e8cfda53abb0551916894719e66b3d27886e9
3,649,103
import torch def to_sparse(x): """ converts dense tensor x to sparse format """ x_typename = torch.typename(x).split('.')[-1] sparse_tensortype = getattr(torch.sparse, x_typename) indices = torch.nonzero(x) if len(indices.shape) == 0: # if all elements are zeros return sparse_tensortype(*x.shape) indices = indices.t() values = x[tuple(indices[i] for i in range(indices.shape[0]))] return sparse_tensortype(indices, values, x.size())
b9af99c3c6e41e4f6a73ad213f58338110329dbc
3,649,104
def get_top_article_categories(): """ 获取顶级文章分类列表 自定义模版标签 """ return Category.objects.filter(level=1)
88ed0aefe81b3190590974a38c9363f862b8db6c
3,649,105
def filter_variants_top_k(log, k, parameters=None): """ Keeps the top-k variants of the log Parameters ------------- log Event log k Number of variants that should be kept parameters Parameters Returns ------------- filtered_log Filtered log """ if parameters is None: parameters = {} variants = variants_get.get_variants_count(log, parameters=parameters) variant_count = [] for variant in variants: variant_count.append([variant, variants[variant]]) variant_count = sorted(variant_count, key=lambda x: (x[1], x[0]), reverse=True) variant_count = variant_count[:min(k, len(variant_count))] variants_to_filter = [x[0] for x in variant_count] return apply(log, variants_to_filter, parameters=parameters)
20e273f5d3ac88e3bc9d0795566b2536c10b5703
3,649,106
def get_weighted_average(embedding, x, w): """ Compute the weighted average vectors :param embedding: embedding[i,:] is the vector for word i :param x: x[i, :] are the indices of the words in sentence i :param w: w[i, :] are the weights for the words in sentence i :return: emb[i, :] are the weighted average vector for sentence i """ n_samples = x.shape[0] emb = np.zeros((n_samples, embedding.shape[1])) for i in range(n_samples): emb[i, :] = w[i, :].dot(embedding[x[i, :], :]) / np.count_nonzero(w[i, :]) return emb
e5cd9984e49075530f981c8600c7e6d86de3c113
3,649,107
from glyphsLib import glyphdata # Expensive import def _build_gdef(ufo): """Build a table GDEF statement for ligature carets.""" bases, ligatures, marks, carets = set(), set(), set(), {} category_key = GLYPHLIB_PREFIX + 'category' subCategory_key = GLYPHLIB_PREFIX + 'subCategory' for glyph in ufo: has_attaching_anchor = False for anchor in glyph.anchors: name = anchor.name if name and not name.startswith('_'): has_attaching_anchor = True if name and name.startswith('caret_') and 'x' in anchor: carets.setdefault(glyph.name, []).append(round(anchor['x'])) lib = glyph.lib glyphinfo = glyphdata.get_glyph(glyph.name) # first check glyph.lib for category/subCategory overrides; else use # global values from GlyphData category = lib.get(category_key) if category is None: category = glyphinfo.category subCategory = lib.get(subCategory_key) if subCategory is None: subCategory = glyphinfo.subCategory # Glyphs.app assigns glyph classes like this: # # * Base: any glyph that has an attaching anchor # (such as "top"; "_top" does not count) and is neither # classified as Ligature nor Mark using the definitions below; # # * Ligature: if subCategory is "Ligature" and the glyph has # at least one attaching anchor; # # * Mark: if category is "Mark" and subCategory is either # "Nonspacing" or "Spacing Combining"; # # * Compound: never assigned by Glyphs.app. # # https://github.com/googlei18n/glyphsLib/issues/85 # https://github.com/googlei18n/glyphsLib/pull/100#issuecomment-275430289 if subCategory == 'Ligature' and has_attaching_anchor: ligatures.add(glyph.name) elif category == 'Mark' and (subCategory == 'Nonspacing' or subCategory == 'Spacing Combining'): marks.add(glyph.name) elif has_attaching_anchor: bases.add(glyph.name) if not any((bases, ligatures, marks, carets)): return None lines = ['table GDEF {', ' # automatic'] glyphOrder = ufo.lib[PUBLIC_PREFIX + 'glyphOrder'] glyphIndex = lambda glyph: glyphOrder.index(glyph) fmt = lambda g: ('[%s]' % ' '.join(sorted(g, key=glyphIndex))) if g else '' lines.extend([ ' GlyphClassDef', ' %s, # Base' % fmt(bases), ' %s, # Liga' % fmt(ligatures), ' %s, # Mark' % fmt(marks), ' ;']) for glyph, caretPos in sorted(carets.items()): lines.append(' LigatureCaretByPos %s %s;' % (glyph, ' '.join(unicode(p) for p in sorted(caretPos)))) lines.append('} GDEF;') return '\n'.join(lines)
2163971557a8908cce5f142f2e9dfc7fe360f190
3,649,108
def winningRate2(r, s, X, Y): """ revised version, now we want to investigate how value of X and Y will affect. r: int = remaining round of game s: int = current score X: int = points winning for X-head Y: int = points wining for Y-head (assuming X and Y are both fair, and we always assume Y > X) """ if X > Y: X, Y = Y, X def rec(r, s): if (r, s) not in cache: if r < 1: raise (TypeError("r can not be smaller than 1.")) if r == 1: if s <= -Y: # only Y head for the win. cache[(r, s)] = 0 return cache[(r, s)] if s >= (-Y + 1) and s <= X: # play X or Y shall be the same cache[(r, s)] = 0.5 return cache[(r, s)] if s > X: # play X, guarenteed win cache[(r, s)] = 1 return cache[(r, s)] cache[(r, s)] = max( (rec(r - 1, s + X) + rec(r - 1, s - X)) / 2, (rec(r - 1, s + Y) + rec(r - 1, s - Y)) / 2, ) return cache[(r, s)] return rec(r, s)
d33b05aa429044cb76b33842e33b99c1d1d6de7f
3,649,109
def DayOfWeek(year,month,day): """DayOfWeek returns the day of week 1-7, 1 being Monday for the given year, month and day""" num=year*365 num=num+year//4+1 num=num-(year//100+1) num=num+year//400+1 if month<3 and LeapYear(year): num=num-1 return (num+MONTH_OFFSETS[month-1]+day+4)%7+1
41c974e1342e65d553702d0610e8dc9c671538a6
3,649,111
def chef_execute_cli_commands(configuration): """ API to generate sonic cli commands with the provided configuration :param configuration: :return: """ if not configuration: return False commands = "" action_run = "action:run" for module in configuration: if module == "vlans": member_commands = config_cmd = member_config_cmd = "" for action in configuration[module]: if action == "add": module_action = "vlan_add" member_action = "vlan_member_add" elif action == "del": module_action = "vlan_del" member_action = "vlan_member_del" commands += "execute '{}' do\n".format(module_action) member_commands += "execute '{}' do\n".format(member_action) for vlan_id in configuration[module][action]: config_cmd += "config vlan {} {}".format(action, vlan_id) + " && " if "members" in configuration[module][action][vlan_id]: for member in configuration[module][action][vlan_id]["members"]: untag = "" if member["tagged"] or member["tagged"] == "True" else "-u" member_config_cmd += "config vlan member {} {} {} {}".format(action, vlan_id, member["port"], untag).strip() + " && " else: member_commands = "" config_cmd = config_cmd.rstrip(" &") member_config_cmd = member_config_cmd.rstrip(" &") commands += " command '{}'\n".format(config_cmd) member_commands += " command '{}'\n".format(member_config_cmd) commands += " {}\n".format(action_run) commands += "end\n\n" if member_commands: member_commands += " {}\n".format(action_run) member_commands += "end\n\n" commands += member_commands if module == "fdbs": for action in configuration[module]: config_cmd = "" if action == "add": module_action = "fdb_add" elif action == "del": module_action = "fdb_del" commands += "execute '{}' do\n".format(module_action) for entry in configuration[module][action]: mac = entry["mac"] if "mac" in entry else "" vlan_id = entry["vlan_id"] if "vlan_id" in entry else "" port = entry["port"] if "port" in entry else "" if action == "del": config_cmd += "config mac {} {} {}".format(action, mac, vlan_id)+" && " else: config_cmd += "config mac {} {} {} {}".format(action, mac, vlan_id, port)+" && " config_cmd = config_cmd.rstrip(" && ") commands += " command '{}'\n".format(config_cmd) commands += " {}\n".format(action_run) commands += "end\n\n" if module == "lags": member_commands = "" for action in configuration[module]: fallback = min_links = config_cmd = member_config_cmd = "" if action == "add": module_action = "lag_add" member_action = "lag_member_add" elif action == "del": module_action = "lag_del" member_action = "lag_member_del" commands += "execute '{}' do\n".format(module_action) member_commands += "execute '{}' do\n".format(member_action) for portchannel in configuration[module][action]: portchannel_config = configuration[module][action][portchannel] if "fallback" in portchannel_config and ( portchannel_config["fallback"] or portchannel_config["fallback"] == "True"): fallback = "--fallback true" if "min-links" in portchannel_config: min_links = "--min-links {}".format(portchannel_config["min-links"]) config_cmd += "config portchannel {} {} {} {}".format(action, portchannel, fallback, min_links).strip() + " && " if "links" in configuration[module][action][portchannel]: for member in configuration[module][action][portchannel]["links"]: member_config_cmd += "config portchannel member {} {} {}".format(action, portchannel, member) + " && " else: member_commands = "" config_cmd = config_cmd.rstrip(" && ") member_config_cmd = member_config_cmd.rstrip(" && ") member_commands += " command '{}'\n".format(member_config_cmd) commands += " command '{}'\n".format(config_cmd) commands += " {}\n".format(action_run) commands += "end\n\n" if member_commands: member_commands += " {}\n".format(action_run) member_commands += "end\n\n" commands += member_commands if module == "interfaces": config_cmd = "" commands += "execute 'interface' do\n" for interface in configuration[module]: if "admin_status" in configuration[module][interface]: operation = "shutdown" if configuration[module][interface]["admin_status"] == "down" else "startup" config_cmd += "config interface {} {}".format(operation, interface) + " && " if "speed" in configuration[module][interface]: config_cmd += "config interface {} speed {}".format(interface, configuration[module][interface][ "speed"]) + " && " config_cmd = config_cmd.rstrip(" && ") commands += " command '{}'\n".format(config_cmd) commands += " {}\n".format(action_run) commands += "end\n\n" st.log("complete_command: \n{}".format(commands)) return commands
439b7310015a9707ea6796ea3d24577a5dec069f
3,649,113
def _normalize(vector): """Returns a normalized version of a numpy vector.""" return vector/np.sqrt(np.dot(vector, vector));
42942ea19af176f6c9fa0ad39b7e060dd518c086
3,649,114
def get_chol_factor(lower_tri_vals): """ Args: lower_tri_vals: numpy array, shaped as the number of lower triangular elements, number of observations. The values ordered according to np.tril_indices(p) where p is the dimension of the multivariate normal distn Returns: Nxpxp numpy array, with the lower triangle filled in. The diagonal is exponentiated. """ lower_size, N = lower_tri_vals.shape # solve p(p+3)/2 = lower_size to get the # number of dimensions. p = (-1 + (1 + 8 * lower_size) ** 0.5) / 2 p = int(p) if not isinstance(lower_tri_vals, np.ndarray): lower_tri_vals = np.array(lower_tri_vals) L = np.zeros((N, p, p)) for par_ind, (k, l) in enumerate(zip(*np.tril_indices(p))): if k == l: # Add a small number to avoid singular matrices. L[:, k, l] = np.exp(lower_tri_vals[par_ind, :]) + 1e-6 else: L[:, k, l] = lower_tri_vals[par_ind, :] return L
fa27afefb49a87bdeac8bceee9f95b34e6c01d3f
3,649,116
def get_angles_gram_mask(gram, mask): """ Input: (gram) square numpy array, (mask) square numpy array where 1 = select, 0 = do not select Output: (angles) numpy array or angles in mask in degrees """ angles = gram * mask angles = angles[angles != 0] angles = np.degrees(np.arccos(angles)) return angles
3303e318f42b2a7c3a15b4d267f07b7618026b25
3,649,117
def _expectation(p, kern1, feat1, kern2, feat2, nghp=None): """ Compute the expectation: expectation[n] = <Ka_{Z1, x_n} Kb_{x_n, Z2}>_p(x_n) - Ka_{.,.}, Kb_{.,.} :: Linear kernels Ka and Kb as well as Z1 and Z2 can differ from each other, but this is supported only if the Gaussian p is Diagonal (p.cov NxD) and Ka, Kb have disjoint active_dims in which case the joint expectations simplify into a product of expectations :return: NxMxM """ if kern1.on_separate_dims(kern2) and isinstance(p, DiagonalGaussian): # no joint expectations required eKxz1 = expectation(p, (kern1, feat1)) eKxz2 = expectation(p, (kern2, feat2)) return eKxz1[:, :, None] * eKxz2[:, None, :] if kern1 != kern2 or feat1 != feat2: raise NotImplementedError("The expectation over two kernels has only an " "analytical implementation if both kernels are equal.") kern = kern1 feat = feat1 with params_as_tensors_for(kern, feat): # use only active dimensions Xcov = kern._slice_cov(tf.matrix_diag(p.cov) if isinstance(p, DiagonalGaussian) else p.cov) Z, Xmu = kern._slice(feat.Z, p.mu) N = tf.shape(Xmu)[0] var_Z = kern.variance * Z tiled_Z = tf.tile(tf.expand_dims(var_Z, 0), (N, 1, 1)) # NxMxD XX = Xcov + tf.expand_dims(Xmu, 1) * tf.expand_dims(Xmu, 2) # NxDxD return tf.matmul(tf.matmul(tiled_Z, XX), tiled_Z, transpose_b=True)
105a6445f1a37e2208c65c8bcbcfe76227516991
3,649,118
def rotX(angle): """ ----------------------------------------------------------------------- Purpose: Calculate the matrix that represents a 3d rotation around the X axis. Input: Rotation angle in degrees Returns: A 3x3 matrix representing the rotation about angle around X axis. Reference: Diebel, J. 2006, Stanford University, Representing Attitude: Euler angles, Unit Quaternions and Rotation Vectors. http://ai.stanford.edu/~diebel/attitude.html Notes: Return the rotation matrix for a rotation around the X axis. This is a rotation in the YZ plane. Note that we construct a new vector with: xnew = R1.x In the literature, this rotation is usually called R1 ----------------------------------------------------------------------- """ a = d2r(angle) v = n.asmatrix(n.zeros((3,3), 'd')) cosa = n.cos(a) sina = n.sin(a) v[0,0] = 1.0; v[0,1] = 0.0; v[0,2] = 0.0; v[1,0] = 0.0; v[1,1] = cosa; v[1,2] = sina; v[2,0] = 0.0; v[2,1] = -sina; v[2,2] = cosa; return v
b1dd62497cf9db137edbd90f1ff4f6fdb36d54d5
3,649,119
import re def LF_positive_MeshTerm(report): """ Looking for positive mesh terms """ for idx in range(1,len(categories)): reg_pos = re.compile(categories[idx],re.IGNORECASE) reg_neg = re.compile('(No|without|resolution)\\s([a-zA-Z0-9\-,_]*\\s){0,10}'+categories[idx],re.IGNORECASE) for s in report.report_text.text.split("."): if reg_pos.search(s) and (not reg_neg.search(s)) and (not reg_equivocation.search(s)): return ABNORMAL_VAL return ABSTAIN_VAL
391d5775cf109c9f4b0d254162b93937882605ee
3,649,121
def user_ratio_shuffle_split_with_targets(X, train_ratio=0.8, n_valid_users=1000, n_test_users=1000, minimum_interaction=3, rand_state=None): """ Split given test / valid user records into subsets User records are splitted proportionally per user as same as `user_ratio_shuffle_split`. However, split is only made for randomly selected test / valid user population. Inputs: X (scipy.sparse.csr_matrix): user-item matrix train_ratio (float): ratio of training records per user n_valid_users (int): number of validation users n_test_users (int): number of testing users minimum_interaction (int): minimum interaction of user to be considered. if it's smaller than this, put all records to the training set rand_state (bool or int): random state seed number or None Returns: scipy.sparse.csr_matrix: training matrix scipy.sparse.csr_matrix: validation matrix scipy.sparse.csr_matrix: testing matrix """ # first draw valid / test users rnd_idx = np.random.permutation(X.shape[0]) valid_users = rnd_idx[:n_valid_users] test_users = rnd_idx[n_valid_users:n_valid_users + n_test_users] train_users = rnd_idx[n_valid_users + n_test_users:] # split records for valid / test users Xvl, Xvl_vl, Xvl_ts = user_ratio_shuffle_split(X[valid_users], train_ratio, 0.5, # valid_ratio minimum_interaction, rand_state) # merge them, as this scheme does not need within user validation set Xvl_ts = Xvl_vl + Xvl_ts Xts, Xts_vl, Xts_ts = user_ratio_shuffle_split(X[test_users], train_ratio, 0.5, # valid ratio minimum_interaction, rand_state) Xts_ts = Xts_vl + Xts_ts # merge # assign them back to the original data Xtr = X[train_users] Xtr_ = sp.vstack([Xvl, Xts, Xtr]) Xts_ = sp.vstack([Xvl_ts, Xts_ts, Xtr]) # un-shuffle reverse_idx = {j:i for i, j in enumerate(rnd_idx)} reverse_idx = [reverse_idx[i] for i in range(X.shape[0])] Xtr_ = Xtr_[reverse_idx] Xts_ = Xts_[reverse_idx] return Xtr_, Xts_, (train_users, valid_users, test_users)
0f6bc42e94caff49c0be29215b9a6ec4f2203ff7
3,649,122
import torch def slice_core(core_tensor, inputs): """ Get matrix slices by indexing or contracting inputs, depending on input dtype """ assert isinstance(core_tensor, torch.Tensor) assert isinstance(inputs, torch.Tensor) if is_int_type(inputs): return core_tensor[:, inputs, :] else: return torch.einsum("jak,ba->jbk", core_tensor, inputs)
01a70a678286977b3a36ca24a2f67dce4dbc01fe
3,649,123
import math def geo2xy(ff_lat_pto, ff_lng_pto, ff_lat_ref=cdf.M_REF_LAT, ff_lng_ref=cdf.M_REF_LNG): """ transforma coordenadas geográficas em coordenadas cartesianas :param ff_lat_pto: latitude em graus :param ff_lng_pto: longitude em graus :param ff_lat_ref: latitude do ponto de referência :param ff_lng_ref: longitude do ponto de referência :returns: coordenadas polares do ponto (azimute, distância em NM) """ # logger M_LOG.info(">> geo2xy") # check input assert -90. <= ff_lat_pto <= 90. assert -180. <= ff_lng_pto <= 180. assert -90. <= ff_lat_ref <= 90. assert -180. <= ff_lng_ref <= 180. # converte de geográfica para polar lf_azim, lf_dist = geo2pol(ff_lat_pto, ff_lng_pto, ff_lat_ref, ff_lng_ref) # converte de polar para cartesiana lf_x = lf_dist * math.sin(math.radians(lf_azim)) lf_y = lf_dist * math.cos(math.radians(lf_azim)) # correção das coordenadas X e Y devido ao efeito da declinação magnetica # lf_x, lf_y = decl_xyz(lf_x, lf_y, lf_z, f_ref.f_dcl_mag) # return return lf_x, lf_y
93dbe8a41aecb3029d5ac6fa3e68c740c625a486
3,649,125
def size(e): """ :rtype: Column """ return col(Size(parse(e)))
06b904583dc25a9e40b97ca6655bb0e5dcb28304
3,649,126
def b64pad(b64data): """Pad base64 string with '=' to achieve a length that is a multiple of 4 """ return b64data + '=' * (4 - (len(b64data) % 4))
bdc14821bfbdbf220ff371fbe5e486d3e682337b
3,649,127
def get_peak_electric_demand(points_on_line): """ Initialize Power Demand :param points_on_line: information about every node in study case :type points_on_line: GeoDataFrame :returns: - **dict_peak_el**: Value is the ELECTRIC peak demand depending on thermally connected or disconnected. :rtype: dict[node index][thermally connected bool] """ dict_peak_el = {} dict_peak_el['thermally_conn_peak_el'] = {} dict_peak_el['thermally_disconn_peak_el'] = {} for idx_node, node in points_on_line.iterrows(): if not np.isnan(node['GRID0_kW']): thermally_conn_peak_el = (node['Eal0_kW'] + node['Edata0_kW'] + node['Epro0_kW'] + node['Eaux0_kW'] + node['E_ww0_kW']) thermally_disconn_peak_el = (thermally_conn_peak_el + node['E_hs0_kW'] + node['E_cs0_kW']) dict_peak_el['thermally_conn_peak_el'][idx_node] = thermally_conn_peak_el / (S_BASE * 10 ** 3) # kW/MW dict_peak_el['thermally_disconn_peak_el'][idx_node] = thermally_disconn_peak_el / ( S_BASE * 10 ** 3) # kW / MW return dict_peak_el
015fe10835f49f060e24681335d77a79586e31ea
3,649,128
def _GetDatabaseLookupFunction(filename, flaps, omega_hat, thrust_coeff): """Produces a lookup function from an aero database file.""" db = load_database.AeroDatabase(filename) def _Lookup(alpha, beta, dflaps=None, domega=None): if dflaps is None: dflaps = np.zeros((system_types.kNumFlaps,)) if domega is None: domega = np.zeros((3,)) return db.CalcFMCoeff(alpha, beta, flaps + dflaps, omega_hat + domega, thrust_coeff) return _Lookup, db.format
5b1e7a4636aa824466e791b54dd7a67a4208962e
3,649,129
def parse_copy_core_dump(raw_result): """ Parse the 'parse_copy_core_dump' command raw output. :param str raw_result: copy core-dump raw result string. :rtype: dict :return: The parsed result of the copy core-dump to server: :: { 0:{ 'status': 'success' 'reason': 'core dump copied' } } """ if "Error code " in raw_result: return {"status": "failed", "reason": "Error found while coping"} if "No coredump found for" in raw_result: return {"status": "failed", "reason": "no core dump found"} if "Failed to validate instance ID" in raw_result: return {"status": "failed", "reason": "instance ID not valid"} if "ssh: connect to host" in raw_result: return {"status": "failed", "reason": "ssh-connection issue for SFTP"} if ( "copying ..." in raw_result and "Sent " in raw_result and "bytes" in raw_result and "seconds" in raw_result ): return {"status": "success", "reason": "core dump copied"} else: return {"status": "failed", "reason": "undefined error"}
4ce168c9bc8c462ecc36beba889adb36cc64135d
3,649,130
def _handle_requirements(hass, component, name): """Install the requirements for a component.""" if hass.config.skip_pip or not hasattr(component, 'REQUIREMENTS'): return True for req in component.REQUIREMENTS: if not pkg_util.install_package(req, target=hass.config.path('lib')): _LOGGER.error('Not initializing %s because could not install ' 'dependency %s', name, req) return False return True
efa0c371150a9aee9f26136a17ad1e33b9760340
3,649,131
def ticket_id_url(workspace, number): """ The url for a specific ticket in a specific workspace :param workspace: The workspace :param number: The number of the ticket :return: The url to fetch that specific ticket """ return basic_url + ' spaces/' + workspace + '/tickets/' + number + '.json'
a0ffeb53062c635f74feb011af793a2c00c361c4
3,649,132
def compute_lifting_parameter(lamb, lambda_plane_idxs, lambda_offset_idxs, cutoff): """One way to compute a per-particle "4D" offset in terms of an adjustable lamb and constant per-particle parameters. Notes ----- (ytz): this initializes the 4th dimension to a fixed plane adjust by an offset followed by a scaling by cutoff. lambda_plane_idxs are typically 0 or 1 and allows us to turn off an interaction independent of the lambda value. lambda_offset_idxs are typically 0 and 1, and allows us to adjust the w coordinate in a lambda-dependent way. """ w = cutoff * (lambda_plane_idxs + lambda_offset_idxs * lamb) return w
a9455ed67fcb21bcf1382fe66a77e0563f467421
3,649,134
import re import json def create_controller(): """ 1. Check the token 2. Call the worker method """ minimum_buffer_min = 3 if views.ds_token_ok(minimum_buffer_min): # 2. Call the worker method # More data validation would be a good idea here # Strip anything other than characters listed pattern = re.compile('([^\w \-\@\.\,])+') signer_email = pattern.sub('', request.form.get('signer_email')) signer_name = pattern.sub('', request.form.get('signer_name')) cc_email = pattern.sub('', request.form.get('cc_email')) cc_name = pattern.sub('', request.form.get('cc_name')) envelope_args = { 'signer_email': signer_email, 'signer_name': signer_name, 'cc_email': cc_email, 'cc_name': cc_name, 'status': 'sent', } args = { 'account_id': session['ds_account_id'], 'base_path': session['ds_base_path'], 'ds_access_token': session['ds_access_token'], 'envelope_args': envelope_args } try: results = worker(args) except ApiException as err: error_body_json = err and hasattr(err, 'body') and err.body # we can pull the DocuSign error code and message from the response body error_body = json.loads(error_body_json) error_code = error_body and 'errorCode' in error_body and error_body['errorCode'] error_message = error_body and 'message' in error_body and error_body['message'] # In production, may want to provide customized error messages and # remediation advice to the user. return render_template('error.html', err=err, error_code=error_code, error_message=error_message ) if results: session["envelope_id"] = results["envelope_id"] # Save for use by other examples # which need an envelopeId return render_template('example_done.html', title="Envelope sent", h1="Envelope sent", message=f"""The envelope has been created and sent!<br/> Envelope ID {results["envelope_id"]}.""" ) else: flash('Sorry, you need to re-authenticate.') # We could store the parameters of the requested operation # so it could be restarted automatically. # But since it should be rare to have a token issue here, # we'll make the user re-enter the form data after # authentication. session['eg'] = url_for(eg) return redirect(url_for('ds_must_authenticate'))
1aa777b66f110d575ea16531ca4bd72e0117e0b0
3,649,135
def reproduce_load_profile(neural_model, simulation_model: CHPP_HWT, input_data, logger): """ Tries to follow a real load profile """ # make sure the random seeds are different in each process #np.random.seed(int.from_bytes(os.urandom(4), byteorder='little')) temperature, powers, heat_demand = input_data time_step_count = powers.shape[0] # save initial states to restore them later result = {} result['temp_offset'] = max(-min(temperature) + 60, 0) temperature += result['temp_offset'] # determine the initial state simulation_model.eval() # sample with eval() setting simulation_model.chpp.mode = 0 if powers[0] > -3000 else 1 simulation_model.chpp.min_off_time = 900 simulation_model.chpp.min_on_time = 900 simulation_model.chpp.dwell_time = 900 simulation_model.hwt.temperature = temperature[0] simulation_model.demand.demand = heat_demand[0] simulation_model.demand.forecast_series = heat_demand[1:].reshape(-1,1) neural_model.load_state(simulation_model.state) simulation_model.train() # strict constraints (which the ANN should have learned) # do a forecast in order to predetermine the external input and the mask required to update inputs sampling_parameters = {} forecast, forecast_mask = simulation_model.forecast(time_step_count, **sampling_parameters) result['infeasible_at'] = time_step_count result['classified_infeasible_at'] = time_step_count delta_temp_ann = [] delta_temp_sim = [] for step, power in enumerate(powers): ann_feasible = neural_model.feasible_actions sim_feasible = simulation_model.feasible_actions delta_temp_ann.append(neural_model.state[-2] - temperature[step]) delta_temp_sim.append(simulation_model.state[-2] - temperature[step]) # identify the correct action to follow if power > -3000: # off action_choice = simulation_model.chpp.state_matrix[simulation_model.chpp.mode][0][0] else: # on action_choice = simulation_model.chpp.state_matrix[simulation_model.chpp.mode][1][0] if not np.isin(action_choice, sim_feasible) and result['infeasible_at'] >= time_step_count: # infeasible action and therefore an infeasible load profile # an entry smaller than time_step_count means it has already been detected as infeasible result['infeasible_at'] = step if not np.isin(action_choice, ann_feasible) and result['classified_infeasible_at'] >= time_step_count: # action deemed infeasible # an entry smaller than time_step_count means it has already been detected as infeasible result['classified_infeasible_at'] = step # keep going to see whether the simulation model can reproduce the schedule or not # while a not detected infeasibility is actually an error at this moment, # the remaining load schedule could still provide further indications that it is actually infeasible # (proceeding like this is also required for comparability with Bremer2015) state, interaction = neural_model.transition(action_choice) simulation_model.transition(action_choice) if step + 1 < time_step_count: # post processing to incorporate forecasts neural_model.state = state * (1-forecast_mask[step+1]) + forecast_mask[step+1] * forecast[step+1] #else: # reached final step without stopping due to a detected infeasibility result['delta_temp'] = delta_temp_ann result['[delta_temp]'] = delta_temp_sim return result
078fa837c0c82ea17c42ca949ba9bb33cdaeaaa0
3,649,136
def session_pca(imgs, mask_img, parameters, n_components=20, confounds=None, memory_level=0, memory=Memory(cachedir=None), verbose=0, copy=True): """Filter, mask and compute PCA on Niimg-like objects This is an helper function whose first call `base_masker.filter_and_mask` and then apply a PCA to reduce the number of time series. Parameters ---------- imgs: list of Niimg-like objects See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg. List of subject data mask_img: Niimg-like object See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg. Mask to apply on the data parameters: dictionary Dictionary of parameters passed to `filter_and_mask`. Please see the documentation of the `NiftiMasker` for more informations. confounds: CSV file path or 2D matrix This parameter is passed to signal.clean. Please see the corresponding documentation for details. n_components: integer, optional Number of components to be extracted by the PCA memory_level: integer, optional Integer indicating the level of memorization. The higher, the more function calls are cached. memory: joblib.Memory Used to cache the function calls. verbose: integer, optional Indicate the level of verbosity (0 means no messages). copy: boolean, optional Whether or not data should be copied """ data, affine = cache( filter_and_mask, memory, memory_level=memory_level, func_memory_level=2, ignore=['verbose', 'memory', 'memory_level', 'copy'])( imgs, mask_img, parameters, memory_level=memory_level, memory=memory, verbose=verbose, confounds=confounds, copy=copy) if n_components <= data.shape[0] // 4: U, S, _ = randomized_svd(data.T, n_components) else: U, S, _ = linalg.svd(data.T, full_matrices=False) U = U.T[:n_components].copy() S = S[:n_components] return U, S
f175ac8d9c39e133c34a4db215b5e87288bdafd4
3,649,137
def numpy_napoleon(prnt_doc, child_doc): """Behaves identically to the 'numpy' style, but abides by the docstring sections specified by the "Napoleon" standard. For more info regarding the Napoleon standard, see: http://sphinxcontrib-napoleon.readthedocs.io/en/latest/index.html#docstring-sections Example: - parent's docstring: ''' Parent's line Keyword Arguments ----------------- x: int description of x y: Union[None, int] description of y Raises ------ NotImplemented Error''' - child's docstring: ''' Child's line Returns ------- int Notes ----- notes blah blah''' - docstring that is ultimately inherited: ''' Child's line Keyword Arguments ----------------- x: int description of x y: Union[None, int] description of y Returns ------- int Notes ----- notes blah blah''' """ return merge_numpy_napoleon_docs(prnt_doc, child_doc)
1795ddd1cfeeb8aee07cb17a369c6043b5fda52f
3,649,138
def search_unique_identities_slice(db, term, offset, limit): """Look for unique identities using slicing. This function returns those unique identities which match with the given `term`. The term will be compared with name, email, username and source values of each identity. When an empty term is given, all unique identities will be returned. The results are limited by `offset` (starting on 0) and `limit`. Along with the list of unique identities, this function returns the total number of unique identities that match the given `term`. :param db: database manager :param term: term to match with unique identities data :param offset: return results starting on this position :param limit: maximum number of unique identities to return :raises InvalidValueError: raised when either the given value of `offset` or `limit` is lower than zero """ uidentities = [] pattern = '%' + term + '%' if term else None if offset < 0: raise InvalidValueError('offset must be greater than 0 - %s given' % str(offset)) if limit < 0: raise InvalidValueError('limit must be greater than 0 - %s given' % str(limit)) with db.connect() as session: query = session.query(UniqueIdentity).\ join(Identity).\ filter(UniqueIdentity.uuid == Identity.uuid) if pattern: query = query.filter(Identity.name.like(pattern) | Identity.email.like(pattern) | Identity.username.like(pattern) | Identity.source.like(pattern)) query = query.group_by(UniqueIdentity).\ order_by(UniqueIdentity.uuid) # Get the total number of unique identities for that search nuids = query.count() start = offset end = offset + limit uidentities = query.slice(start, end).all() # Detach objects from the session session.expunge_all() return uidentities, nuids
1cfed05995eb90427a0f4a9475cfd8f7737d7e59
3,649,139
def get_change_description(req_sheet, row_num): """ Accessor for Change Description Args: req_sheet: A variable holding an Excel Workbook sheet in memory. row_num: A variable holding the row # of the data being accessed. Returns: A string value of the Change Description """ return (req_sheet['B' + str(row_num)].value)
7d3f286fb2586bf7bed64de8bb0cbf156e1ff954
3,649,140
def reduce_pca(data_df, n_components=None): """ Uses PCA to reduce dimension. Parameters: data_df (DataFrame): The input data in DataFrame format n_components (float): The number of components or to reduce to. If the number if between 0 and 1, n_components is the % of the principal components will be kept. Default is all components. returns: DataFrame: returns the data in the reduced dimension """ new_df = data_df.reset_index(drop=True) data_np = new_df.to_numpy() #Standardize the data by removing the mean and scaling to unit variance pca_np = StandardScaler().fit_transform(data_np) pca = PCA(n_components) embedded = pca.fit_transform(pca_np) return(pd.DataFrame(embedded, index=data_df.index))
b4b8db256b5996ddf3101a7737cb1396bc5abd06
3,649,141
def create_disjoint_intervals(draw, dtype, n_intervals=10, dt=1, time_range=(0, 100), channel_range=(2000, 2119), length_range=(1, 1), ): """ Function which generates a hypothesis strategy for a fixed number of disjoint intervals :param dtype: Can be any strax-like dtype either with endtime or dt and length field. :param n_intervals: How many disjoint intervals should be returned. :param dt: Sampling field, only needed for length + dt fields. :param time_range: Time range in which random numbers will be generated. :param channel_range: Range of channels for which the disjoint intervals will be generated. For a single channel set min/max equal. :param length_range: Range how long time intervals can be. :return: hypothesis strategy which can be used in @given Note: You can use create_disjoint_intervals().example() to see an example. If you do not want to specify the bounds for any of the "_range" parameters set the corresponding bound to None. Somehow hypothesis complains that the creation of these events takes too long ~2 s for 50 intervals. You can disable the corresponding healt checks via:" @settings( suppress_health_check=[hypothesis.HealthCheck.large_base_example, hypothesis.HealthCheck.too_slow])" """ n = 0 if not hasattr(dtype, 'fields'): # Convert dtype into numpy dtype dtype = np.dtype(dtype) is_dt = True if 'endtime' in dtype.fields: # Check whether interval uses dt fields or endtime is_dt = False stratgey_example = np.zeros(n_intervals, dtype) if is_dt: stratgey_example['dt'] = dt while n < n_intervals: # Create interval values: time = draw(hst.integers(*time_range)) channel = draw(hst.integers(*channel_range)) length = draw(hst.integers(*length_range)) # Check if objects are disjoint: if _test_disjoint(stratgey_example[:n], time, length, channel, dt): stratgey_example[n]['time'] = time stratgey_example[n]['channel'] = channel if is_dt: stratgey_example[n]['length'] = length else: stratgey_example[n]['endtime'] = time + int(length * dt) n += 1 return stratgey_example
67ed49bd8d94067cb6647164fa44beb4f8d91314
3,649,142
def get_deleted_resources(): """Get a list of resources that failed to be deleted in OVN. Get a list of resources that have been deleted from neutron but not in OVN. Once a resource is deleted in Neutron the ``standard_attr_id`` foreign key in the ovn_revision_numbers table will be set to NULL. Upon successfully deleting the resource in OVN the entry in the ovn_revision_number should also be deleted but if something fails the entry will be kept and returned in this list so the maintenance thread can later fix it. """ sort_order = sa.case(value=models.OVNRevisionNumbers.resource_type, whens=ovn_const.MAINTENANCE_DELETE_TYPE_ORDER) session = db_api.get_reader_session() with session.begin(): return session.query(models.OVNRevisionNumbers).filter_by( standard_attr_id=None).order_by(sort_order).all()
6a37fd84933ceee3a2a537aee8a01315f5869200
3,649,143
def load_base_schema(base_schema=None, verbose=False): """Load base schema, schema contains base classes for sub-classing in user schemas. """ _base = base_schema or BASE_SCHEMA or [] _base_schema = [] if "schema.org" in _base: _base_schema.append( load_schemaorg(verbose=verbose) ) if "bioschemas" in _base: _base_schema.append( load_bioschemas(verbose=verbose) ) _base_schema = merge_schema(*_base_schema) return _base_schema
18fe2b7045aa6d8e7382c37093be053b619ec216
3,649,144
def endgame_score_connectfour(board, is_current_player_maximizer) : """Given an endgame board, returns 1000 if the maximizer has won, -1000 if the minimizer has won, or 0 in case of a tie.""" chains_1 = board.get_all_chains(current_player=is_current_player_maximizer) chains_2 = board.get_all_chains(current_player= not(is_current_player_maximizer)) for chain in chains_1: if len(chain) == 4: return 1000 for chain in chains_2: if len(chain) == 4: return -1000 return 0
bcb37381a9633377cb3405fbae45123e2a391df9
3,649,146
def add_colorbar(im, aspect=20, pad_fraction=0.5, **kwargs): """Add a vertical color bar to an image plot. Taken from https://stackoverflow.com/questions/18195758/set-matplotlib-colorbar-size-to-match-graph """ divider = axes_grid1.make_axes_locatable(im.axes) width = axes_grid1.axes_size.AxesY(im.axes, aspect=1.0 / aspect) pad = axes_grid1.axes_size.Fraction(pad_fraction, width) current_ax = plt.gca() cax = divider.append_axes("right", size=width, pad=pad) plt.sca(current_ax) return im.axes.figure.colorbar(im, cax=cax, **kwargs)
acb0b21139d10393c2605bc94671fc774ada3800
3,649,148
import time def wait_procs(procs, timeout, callback=None): """Convenience function which waits for a list of processes to terminate. Return a (gone, alive) tuple indicating which processes are gone and which ones are still alive. The gone ones will have a new 'retcode' attribute indicating process exit status (may be None). 'callback' is a callable function which gets called every time a process terminates (a Process instance is passed as callback argument). Function will return as soon as all processes terminate or when timeout occurs. Tipical use case is: - send SIGTERM to a list of processes - give them some time to terminate - send SIGKILL to those ones which are still alive Example: >>> def on_terminate(proc): ... print("process {} terminated".format(proc)) ... >>> for p in procs: ... p.terminate() ... >>> gone, still_alive = wait_procs(procs, 3, callback=on_terminate) >>> for p in still_alive: ... p.kill() """ def assert_gone(proc, timeout): try: retcode = proc.wait(timeout=timeout) except TimeoutExpired: pass else: if retcode is not None or not proc.is_running(): proc.retcode = retcode gone.add(proc) if callback is not None: callback(proc) timer = getattr(time, 'monotonic', time.time) gone = set() alive = set(procs) if callback is not None and not callable(callback): raise TypeError("callback %r is not a callable" % callable) deadline = timer() + timeout while alive: if timeout <= 0: break for proc in alive: # Make sure that every complete iteration (all processes) # will last max 1 sec. # We do this because we don't want to wait too long on a # single process: in case it terminates too late other # processes may disappear in the meantime and their PID # reused. try: max_timeout = 1.0 / (len(alive) - len(gone)) except ZeroDivisionError: max_timeout = 1.0 # one alive remaining timeout = min((deadline - timer()), max_timeout) if timeout <= 0: break assert_gone(proc, timeout) alive = alive - gone if alive: # Last attempt over processes survived so far. # timeout == 0 won't make this function wait any further. for proc in alive: assert_gone(proc, 0) alive = alive - gone return (list(gone), list(alive))
624a6a1286a662a6f9e2d0680898e89b71585a7a
3,649,149
def select(sel, truecase, falsecase): """ Multiplexer returning falsecase for select==0, otherwise truecase. :param WireVector sel: used as the select input to the multiplexer :param WireVector falsecase: the WireVector selected if select==0 :param WireVector truecase: the WireVector selected if select==1 Example of mux as "ternary operator" to take the max of 'a' and 5: select( a<5, truecase=a, falsecase=5) """ sel, f, t = (as_wires(w) for w in (sel, falsecase, truecase)) f, t = match_bitwidth(f, t) outwire = WireVector(bitwidth=len(f)) net = LogicNet(op='x', op_param=None, args=(sel, f, t), dests=(outwire,)) working_block().add_net(net) # this includes sanity check on the mux return outwire
134e62fa84a16560e16f72294c9d01b3118c80e4
3,649,150
def fish_collision(sprite1, sprite2): """Algorithm for determining if there is a collision between the sprites.""" if sprite1 == sprite2: return False else: return collide_circle(sprite1, sprite2)
846024639f971c755b9ae88f8db43695d1e7c5e2
3,649,151
def normalizePeriodList(periods): """ Normalize the list of periods by merging overlapping or consecutive ranges and sorting the list by each periods start. @param list: a list of tuples of L{Period}. The list is changed in place. """ # First sort the list def sortPeriods(p1, p2): """ Compare two periods. Sort by their start and then end times. A period is a L{Period}. @param p1: first period @param p2: second period @return: 1 if p1>p2, 0 if p1==p2, -1 if p1<p2 """ assert isinstance(p1, Period), "Period is not a Period: %r" % (p1,) assert isinstance(p2, Period), "Period is not a Period: %r" % (p2,) if p1.getStart() == p2.getStart(): cmp1 = p1.getEnd() cmp2 = p2.getEnd() else: cmp1 = p1.getStart() cmp2 = p2.getStart() return compareDateTime(cmp1, cmp2) for period in periods: period.adjustToUTC() periods.sort(cmp=sortPeriods) # Now merge overlaps and consecutive periods index = None p = None pe = None for i in xrange(len(periods)): if p is None: index = i p = periods[i] pe = p.getEnd() continue ie = periods[i].getEnd() if (pe >= periods[i].getStart()): if ie > pe: periods[index] = Period(periods[index].getStart(), ie) pe = ie periods[i] = None else: index = i p = periods[i] pe = p.getEnd() periods[:] = [x for x in periods if x]
d178123e8ef65b88e46130db24f96aa86b444b11
3,649,152
from typing import Callable from typing import Optional from typing import Any import inspect from typing import Dict def assemble(the_type: Callable[..., TypeT], profile: Optional[str] = None, **kwargs: Any) -> TypeT: """Create an instance of a certain type, using constructor injection if needed.""" ready_result = _create(the_type, profile) if ready_result is not None: return ready_result signature = inspect.signature(the_type) parameters = _get_parameters(signature) arguments: Dict[str, Any] = kwargs uses_manual_args = False for parameter_name, parameter_type in parameters.items(): if parameter_name in arguments: uses_manual_args = True continue if _is_list_type(parameter_type): parameter_components = _get_components( _get_list_type_elem_type(parameter_type), profile) arguments[parameter_name] = list(map(assemble, map(lambda comp: comp.get_type(), parameter_components))) else: parameter_component = _get_component(parameter_type, profile) param_factory = _get_factory(parameter_type, profile) if parameter_component is not None: arguments[parameter_name] = assemble( parameter_component.get_type(), profile) # parameter_type? elif param_factory: arguments[parameter_name] = param_factory.get_instance() result = the_type(**arguments) stored_component = _get_component(the_type, profile) if stored_component and not uses_manual_args: stored_component.set_instance_if_singleton(result) return result
e8a39d61ddcb8834daf45089f597754a2860a334
3,649,153
def coord_ijk_to_xyz(affine, coords): """ Converts voxel `coords` in cartesian space to `affine` space Parameters ---------- affine : (4, 4) array-like Affine matrix coords : (N,) list of list Image coordinate values, where each entry is a length three list of int denoting ijk coordinates in cartesian space Returns ------ xyz : (N, 3) numpy.ndarray Provided `coords` in `affine` space """ coords = _check_coord_inputs(coords) mni_coords = np.dot(affine, coords)[:3].T return mni_coords
c7099a588df3bd85a3a5a85451e15812564aae2f
3,649,154
def _get_create_statement(server, temp_datadir, frm_file, version, options, quiet=False): """Get the CREATE statement for the .frm file This method attempts to read the CREATE statement by copying the .frm file, altering the storage engine in the .frm file to MEMORY and issuing a SHOW CREATE statement for the table/view. If this method returns None, the operation was successful and the CREATE statement was printed. If a string is returned, there was at least one error (which will be printed) and the .frm file was not readable. The returned frm file path can be used to tell the user to use the diagnostic mode for reading files byte-by-byte. See the method read_frm_files_diagnostic() above. server[in] Server instance temp_datadir[in] New data directory frm_file[in] Tuple containing (db, table, path) for .frm file version[in] Version string for the current server options[in] Options from user Returns string - None on success, path to frm file on error """ verbosity = int(options.get("verbosity", 0)) quiet = options.get("quiet", False) new_engine = options.get("new_engine", None) frm_dir = options.get("frm_dir", ".{0}".format(os.sep)) user = options.get('user', 'root') if not quiet: print "#\n# Reading the %s.frm file." % frm_file[1] try: # 1) copy the file db = frm_file[0] if not db or db == ".": db = "test" db_name = db + "_temp" new_path = os.path.normpath(os.path.join(temp_datadir, db_name)) if not os.path.exists(new_path): os.mkdir(new_path) new_frm = os.path.join(new_path, frm_file[1] + ".frm") # Check name for decoding and decode try: if requires_decoding(frm_file[1]): new_frm_file = decode(frm_file[1]) frm_file = (frm_file[0], new_frm_file, frm_file[2]) shutil.copy(frm_file[2], new_path) # Check name for encoding and encode elif requires_encoding(frm_file[1]): new_frm_file = encode(frm_file[1]) + ".frm" new_frm = os.path.join(new_path, new_frm_file) shutil.copy(frm_file[2], new_frm) else: shutil.copy(frm_file[2], new_path) except: _, e, _ = sys.exc_info() print("ERROR: {0}".format(e)) # Set permissons on copied file if user context in play if user_change_as_root(options): subprocess.call(['chown', '-R', user, new_path]) subprocess.call(['chgrp', '-R', user, new_path]) server.exec_query("CREATE DATABASE IF NOT EXISTS %s" % db_name) frm = FrmReader(db_name, frm_file[1], new_frm, options) frm_type = frm.get_type() server.exec_query("FLUSH TABLES") if frm_type == "TABLE": # 2) change engine if it is a table current_engine = frm.change_storage_engine() # Abort read if restricted engine found if current_engine[1].upper() in _CANNOT_READ_ENGINE: print ("ERROR: Cannot process tables with the %s storage " "engine. Please use the diagnostic mode to read the " "%s file." % (current_engine[1].upper(), frm_file[1])) return frm_file[2] # Check server version server_version = None if version and len(current_engine) > 1 and current_engine[2]: server_version = (int(current_engine[2][0]), int(current_engine[2][1:3]), int(current_engine[2][3:])) if verbosity > 1 and not quiet: print ("# Server version in file: %s.%s.%s" % server_version) if not server.check_version_compat(server_version[0], server_version[1], server_version[2]): versions = (server_version[0], server_version[1], server_version[2], version[0], version[1], version[2]) print ("ERROR: The server version for this " "file is too low. It requires a server version " "%s.%s.%s or higher but your server is version " "%s.%s.%s. Try using a newer server or use " "diagnostic mode." % versions) return frm_file[2] # 3) show CREATE TABLE res = server.exec_query("SHOW CREATE TABLE `%s`.`%s`" % (db_name, frm_file[1])) create_str = res[0][1] if new_engine: create_str = create_str.replace("ENGINE=MEMORY", "ENGINE=%s" % new_engine) elif not current_engine[1].upper() == "MEMORY": create_str = create_str.replace("ENGINE=MEMORY", "ENGINE=%s" % current_engine[1]) if frm_file[0] and not frm_file[0] == ".": create_str = create_str.replace("CREATE TABLE ", "CREATE TABLE `%s`." % frm_file[0]) # if requested, generate the new .frm with the altered engine if new_engine: server.exec_query("ALTER TABLE `{0}`.`{1}` " "ENGINE={2}".format(db_name, frm_file[1], new_engine)) new_frm_file = os.path.join(frm_dir, "{0}.frm".format(frm_file[1])) if os.path.exists(new_frm_file): print("#\n# WARNING: Unable to create new .frm file. " "File exists.") else: try: shutil.copyfile(new_frm, new_frm_file) print("# Copy of .frm file with new storage " "engine saved as {0}.".format(new_frm_file)) except (IOError, OSError, shutil.Error) as e: print("# WARNING: Unable to create new .frm file. " "Error: {0}".format(e)) elif frm_type == "VIEW": # 5) show CREATE VIEW res = server.exec_query("SHOW CREATE VIEW %s.%s" % (db_name, frm_file[1])) create_str = res[0][1] if frm_file[0]: create_str = create_str.replace("CREATE VIEW ", "CREATE VIEW `%s`." % frm_file[0]) # Now we must replace the string for storage engine! print "#\n# CREATE statement for %s:\n#\n" % frm_file[2] print create_str print if frm_type == "TABLE" and options.get("show_stats", False): frm.show_statistics() except: print ("ERROR: Failed to correctly read the .frm file. Please try " "reading the file with the --diagnostic mode.") return frm_file[2] return None
953b97df9f7f01540d5f61ef8099282d4aab26d6
3,649,155
def delete_driver_vehicle(driver): """delete driver""" try: driver.vehicle = None driver.save() return driver, "success" except Exception as err: logger.error("deleteVehicleForDriverRecord@error") logger.error(err) return None, str(err)
e6de3c9d1ae0ce0ac2022fe8ce38c2eccbe3b8df
3,649,156
def passivity(s: npy.ndarray) -> npy.ndarray: """ Passivity metric for a multi-port network. A metric which is proportional to the amount of power lost in a multiport network, depending on the excitation port. Specifically, this returns a matrix who's diagonals are equal to the total power received at all ports, normalized to the power at a single excitement port. mathematically, this is a test for unitary-ness of the s-parameter matrix [#]_. for two port this is .. math:: \sqrt( |S_{11}|^2 + |S_{21}|^2 \, , \, |S_{22}|^2+|S_{12}|^2) in general it is .. math:: \\sqrt( S^H \\cdot S) where :math:`H` is conjugate transpose of S, and :math:`\\cdot` is dot product. Note ---- The total amount of power dissipated in a network depends on the port matches. For example, given a matched attenuator, this metric will yield the attenuation value. However, if the attenuator is cascaded with a mismatch, the power dissipated will not be equivalent to the attenuator value, nor equal for each excitation port. Returns ------- passivity : :class:`numpy.ndarray` of shape fxnxn References ------------ .. [#] http://en.wikipedia.org/wiki/Scattering_parameters#Lossless_networks """ if s.shape[-1] == 1: raise (ValueError('Doesn\'t exist for one ports')) pas_mat = s.copy() for f in range(len(s)): pas_mat[f, :, :] = npy.sqrt(npy.dot(s[f, :, :].conj().T, s[f, :, :])) return pas_mat
9b3629aae603d8de97982113333b87bb021972e4
3,649,157
def sample_distance(sampleA, sampleB, sigma): """ I know this isn't the best distance measure, alright. """ # RBF! gamma = 1 / (2 * sigma**2) similarity = np.exp(-gamma*(np.linalg.norm(sampleA - sampleB)**2)) distance = 1 - similarity return distance
1f1bb56d8e1876c9c9ab6b1d1db26ff549d86b81
3,649,158
from typing import Dict from typing import Set def get_filters(query_metadata: QueryMetadataTable) -> Dict[VertexPath, Set[FilterInfo]]: """Get the filters at each VertexPath.""" filters: Dict[VertexPath, Set[FilterInfo]] = {} for location, _ in query_metadata.registered_locations: filter_infos = query_metadata.get_filter_infos(location) filters.setdefault(_get_location_vertex_path(location), set()).update(filter_infos) return filters
79ef7accd1c8e1d100f48eb7086c842429a4a513
3,649,160
import numpy def auxiliary_equations(*, F, T_degC, I_sc_A_0, I_rs_1_A_0, n_1_0, I_rs_2_0_A, n_2_0, R_s_Ohm_0, G_p_S_0, E_g_eV_0, N_s, T_degC_0=T_degC_stc): """ Computes the auxiliary equations at F and T_degC for the 8-parameter DDM-G. Inputs (any broadcast-compatible combination of scalars and numpy arrays): Same as current_sum_at_diode_node(). Outputs (device-level, at each combination of broadcast inputs, return type is numpy.float64 for all scalar inputs): dict containing: I_ph_A photocurrent I_rs_1_A first diode reverse-saturation current n_1 first diode ideality factor I_rs_2_A second diode reverse-saturation current n_2 second diode ideality factor R_s_Ohm series resistance G_p_S parallel conductance N_s integer number of cells in series in each parallel string T_degC temperature """ # Temperatures must be in Kelvin. T_K = convert_temperature(T_degC, 'Celsius', 'Kelvin') T_K_0 = convert_temperature(T_degC_0, 'Celsius', 'Kelvin') # Optimization. V_therm_factor_V_0 = (N_s * k_B_J_per_K * T_K_0) / q_C # Compute variables at operating condition. # Compute band gap (constant). E_g_eV = E_g_eV_0 # Compute first diode ideality factor (constant). n_1 = n_1_0 # Compute first reverse-saturation current at T_degC (this is independent of F, I_sc_A_0, R_s_Ohm_0, and G_p_S_0). I_rs_1_A = I_rs_1_A_0 * (T_K / T_K_0)**3 * numpy.exp(E_g_eV / (n_1 * k_B_eV_per_K) * (1 / T_K_0 - 1 / T_K)) # Compute first diode ideality factor (constant). n_2 = n_2_0 # Compute first reverse-saturation current at T_degC (this is independent of F, I_sc_A_0, R_s_Ohm_0, and G_p_S_0). I_rs_2_A = I_rs_2_0_A * (T_K / T_K_0)**(5/2) * numpy.exp(E_g_eV / (n_2 * k_B_eV_per_K) * (1 / T_K_0 - 1 / T_K)) # Compute series resistance (constant). R_s_Ohm = R_s_Ohm_0 # Compute parallel conductance (constant). G_p_S = G_p_S_0 # Compute parallel conductance (photo-conductive shunt). # G_p_S = F * G_p_S_0 # Compute photo-generated current at F and T_degC (V=0 with I=Isc for this). expr1 = I_sc_A_0 * F expr2 = expr1 * R_s_Ohm I_ph_A = expr1 + I_rs_1_A * numpy.expm1(expr2 / (V_therm_factor_V_0 * n_1)) + \ I_rs_2_A * numpy.expm1(expr2 / (V_therm_factor_V_0 * n_2)) + G_p_S * expr2 return {'I_ph_A': I_ph_A, 'I_rs_1_A': I_rs_1_A, 'n_1': n_1, 'I_rs_2_A': I_rs_2_A, 'n_2': n_2, 'R_s_Ohm': R_s_Ohm, 'G_p_S': G_p_S, 'N_s': N_s, 'T_degC': T_degC}
5bbb988a7e4415f59a56985c2c867ec1a0dc5df2
3,649,161
from typing import List from typing import Dict def get_highest_confidence_transcript_for_each_session( transcripts: List[db_models.Transcript], ) -> List[db_models.Transcript]: """ Filter down a list transcript documents to just a single transcript per session taking the highest confidence transcript document. Parameters ---------- transcripts: List[db_models.Transcript] List of transcript database documents. Returns ------- transcripts: List[db_models.Transcript] Filtered list of transcript database documents where only a single transcript exists for each referenced session. """ # We can't use pandas groupby because sessions objects can't be naively compared # Instead we create a Dict of session id to document model # We update as we iterate through list of all transcripts selected_transcripts: Dict[str, pd.Series] = {} for transcript in transcripts: referenced_session_id = transcript.session_ref.ref.id if referenced_session_id not in selected_transcripts: selected_transcripts[referenced_session_id] = transcript # Multiple transcripts for a single session # pick the higher confidence elif ( transcript.confidence > selected_transcripts[referenced_session_id].confidence ): selected_transcripts[referenced_session_id] = transcript return list(selected_transcripts.values())
fccf657c5c670d8b3e275641d411ede34af91e41
3,649,163