content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def update_user(old_email, new_email=None, password=None): """Update the email and password of the user. Old_email is required, new_email and password are optional, if both parameters are empty update_user() will do nothing. Not asking for the current password is intentional, creating and updating are only possible while connected to the server via SSH. If a malicious person is on your server you got other problems than just protecting your blog account. :param old_email: the old email address of the user. :param new_email: the new email address of the user. :param password: the new password of the user. :return: True if the user was updated, even if no parameters where given. Otherwise it will return False if the user does not exist. """ db.connect() try: user = User.get(User.email == old_email) except User.DoesNotExist: print("The user: {} does not exist".format(old_email)) return False old_hash = user.password if new_email: user.email = new_email if password: user.password = bcrypt.hashpw(str.encode(password), bcrypt.gensalt(12)) user.save() print("The user has been updated:\n" "old email: {}\n" "new email: {}\n" "password has been updated: {}".format(old_email, old_email if new_email is None else new_email, old_hash != user.password)) db.close() return True
cb8f23ccd2d9e0d0b390358ef440af28d67e549d
29,360
from typing import Optional def get_text(text_node: Optional[ET.Element]) -> Optional[str]: """Return stripped text from node. None otherwise.""" if text_node is None: return None if not text_node.text: return None return text_node.text.strip()
2bb7c8ae6500d9a8ca5ef6be09dbf3abfc04a013
29,361
def function_d(d, d1, d2=1): """doc string""" return d + d1 + d2
92d3bb788191612c6a67f67a05bd703a02f43a04
29,362
from unittest.mock import patch def qgs_access_control_filter(): """ Mock some QgsAccessControlFilter methods: - __init__ which does not accept a mocked QgsServerInterface; - serverInterface to return the right server_iface. """ class DummyQgsAccessControlFilter: def __init__(self, server_iface): self.server_iface = server_iface def serverInterface(self): # noqa: ignore=N806 return self.server_iface with patch.multiple( "geomapfish_qgisserver.accesscontrol.QgsAccessControlFilter", __init__=DummyQgsAccessControlFilter.__init__, serverInterface=DummyQgsAccessControlFilter.serverInterface, ) as mocks: yield mocks
df84f1ff78c52376777c9238a3ee857c8c31f3d2
29,363
def ppmv2pa(x, p): """Convert ppmv to Pa Parameters ---------- x Gas pressure [ppmv] p total air pressure [Pa] Returns ------- pressure [Pa] """ return x * p / (1e6 + x)
974d79d022a7fb655040c7c2900988cd4a10f064
29,364
def make_elastic_uri(schema: str, user: str, secret: str, hostname: str, port: int) -> str: """Make an Elasticsearch URI. :param schema: the schema, e.g. http or https. :param user: Elasticsearch username. :param secret: Elasticsearch secret. :param hostname: Elasticsearch hostname. :param port: Elasticsearch port. :return: the full Elasticsearch URI. """ return f"{schema}://{user}:{secret}@{hostname}:{port}"
be959e98330913e75485006d1f4380a57e990a05
29,365
def _truncate(s: str, max_length: int) -> str: """Returns the input string s truncated to be at most max_length characters long. """ return s if len(s) <= max_length else s[0:max_length]
52c49c027057024eaa27a705a0d2c013bff7a2ce
29,366
def verify_days_of_week_struct(week, binary=False): """Given a dictionary, verify its keys are the correct days of the week and values are lists of 24 integers greater than zero. """ if set(DAYS_OF_WEEK) != set(week.keys()): return False # Each day must be a list of ints for _, v in week.items(): if not isinstance(v, list): return False if len(v) != DAY_LENGTH: return False # Every item should be an int >= 0 for d in v: if not isinstance(d, int): return False if d < 0: return False if d > 1 and binary is True: return False return True
57b4b23d0b492f2fc25a0bdb9d218c6fd9deefc0
29,367
def create_attention_mask_from_input_mask(from_tensor, to_mask): """Create 3D attention mask from a 2D tensor mask. Args: from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...]. to_mask: int32 Tensor of shape [batch_size, to_seq_length]. Returns: float Tensor of shape [batch_size, from_seq_length, to_seq_length]. """ from_shape = bert_utils.get_shape_list(from_tensor, expected_rank=[2, 3]) batch_size = from_shape[0] from_seq_length = from_shape[1] to_shape = bert_utils.get_shape_list(to_mask, expected_rank=2) to_seq_length = to_shape[1] to_mask = tf.cast( tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32) # We don't assume that `from_tensor` is a mask (although it could be). We # don't actually care if we attend *from* padding tokens (only *to* padding) # tokens so we create a tensor of all ones. # # `broadcast_ones` = [batch_size, from_seq_length, 1] broadcast_ones = tf.ones( shape=[batch_size, from_seq_length, 1], dtype=tf.float32) # Here we broadcast along two dimensions to create the mask. mask = broadcast_ones * to_mask return mask
d0a5c11108717c1e389d0940c9740d7a0c2671f0
29,368
def execute_inspection_visits_data_source(operator_context, return_value, non_data_function_args) -> BackendResult: """Execute inspections when the current operator is a data source and does not have parents in the DAG""" # pylint: disable=unused-argument inspection_count = len(singleton.inspections) iterators_for_inspections = iter_input_data_source(inspection_count, return_value, operator_context, non_data_function_args) return_value = execute_visits_and_store_results(iterators_for_inspections, return_value) return return_value
f42ce3cb7b0900a5e7458bf0ad478843860db0f9
29,369
def parse(template, delimiters=None, name='<string>'): """ Parse a template string and return a ParsedTemplate instance. Arguments: template: a template string. delimiters: a 2-tuple of delimiters. Defaults to the package default. Examples: >>> parsed = parse(u"Hey {{#who}}{{name}}!{{/who}}") >>> print(str(parsed).replace('u', '')) # This is a hack to get the test to pass both in Python 2 and 3. ['Hey ', _SectionNode(key='who', index_begin=12, index_end=21, parsed=[_InterpolateNode(key='name'), '!'])] """ if type(template) is not str: raise Exception("Template is not str: %s" % type(template)) parser = _Parser(delimiters) return parser.parse(template, name)
51a9da21831afb0b124cc9481f49b546a51a587a
29,370
def get_phases(t, P, t0): """ Given input times, a period (or posterior dist of periods) and time of transit center (or posterior), returns the phase at each time t. From juliet =] """ if type(t) is not float: phase = ((t - np.median(t0)) / np.median(P)) % 1 ii = np.where(phase >= 0.5)[0] phase[ii] = phase[ii] - 1.0 else: phase = ((t - np.median(t0)) / np.median(P)) % 1 if phase >= 0.5: phase = phase - 1.0 return phase
8d5e821112c7fffd0766dbb0158fd2f4034ef313
29,371
async def my_profile(current_user: User = Depends(get_current_active_user)): """GET Current user's information.""" return current_user
fd03fe06b9737565e338b3b3ccd5999b0da32cc1
29,372
def are_2d_vecs_collinear(u1, u2): """Check that two 2D vectors are collinear""" n1 = np.array([-u1[1], u1[0]]) dot_prod = n1.dot(u2) return np.abs(dot_prod) < TOL_COLLINEAR
2861b6316a5125799a91a471129bfc1ce2e91992
29,373
from datetime import datetime import time def TimeFromTicks(ticks: int) -> datetime.time: # pylint: disable=invalid-name """ Constructs an object holding a time value from the given ticks value. Ticks should be in number of seconds since the epoch. """ return Time(*time.gmtime(ticks)[3:6])
a53564b2890080a7fbe0f406ae76c7237c92c34a
29,374
import importlib def load_model(opt, dataloader): """ Load model based on the model name. Arguments: opt {[argparse.Namespace]} -- options dataloader {[dict]} -- dataloader class Returns: [model] -- Returned model """ model_name = opt.model model_path = f"lib.models.{model_name}" print('use model:',model_name) model_lib = importlib.import_module(model_path) model = getattr(model_lib, model_name.title()) return model(opt, dataloader)
8ad05c4a0f51c40851a9daecf81ed8bf9862979c
29,375
def process(cntrl): """ We have all are variables and parameters set in the object, attempt to login and post the data to the APIC """ if cntrl.aaaLogin() != 200: return (1, "Unable to login to controller") rc = cntrl.genericGET() if rc == 200: return (0, format_content(cntrl.get_content())) else: return (1, "%s: %s" % (rc, httplib.responses[rc]))
8e20f4b81314436e53713a418447072820e5c55b
29,376
from typing import Callable from typing import Iterator def create_token_swap_augmenter( level: float, respect_ents: bool = True, respect_eos: bool = True ) -> Callable[[Language, Example], Iterator[Example]]: """Creates an augmenter that randomly swaps two neighbouring tokens. Args: level (float): The probability to swap two tokens. respect_ents (bool, optional): Should the pipeline respect entities? Defaults to True. In which case it will not swap a token inside an entity with a token outside the entity span, unless it is a one word span. If false it will disregard correcting the entity labels. respect_eos (bool, optional): Should it respect end of sentence bounderies? Default to True, indicating that it will not swap and end of sentence token. If False it will disregard correcting the sentence start as this becomes arbitrary. Returns: Callable[[Language, Example], Iterator[Example]]: The augmenter. """ return partial( token_swap_augmenter, level=level, respect_eos=respect_eos, respect_ents=respect_ents, )
1fc41e75d96ea7d4153802f4e86f8ab25d7227c3
29,377
def getUnigram(str1): """ Input: a list of words, e.g., ['I', 'am', 'Denny'] Output: a list of unigram """ words = str1.split() assert type(words) == list return words
d540ee199ab62c383461893e91034399d22fe6d6
29,378
def expval_and_stddev(items, exp_ops=''): """Compute expectation values from distributions. .. versionadded:: 0.16.0 Parameters: items (list or dict or Counts or ProbDistribution or QuasiDistribution): Input distributions. exp_ops (str or dict or list): String or dict representation of diagonal qubit operators used in computing the expectation value. Returns: float : Expectation value. ndarray: Array of expectation values Notes: Cannot mix Counts and dicts with M3 Distributions in the same call. The dict operator format is a sparse diagonal format using bitstrings as the keys. """ return _expval_std(items, exp_ops=exp_ops, method=2)
c024797f00d87ece0b0e871530c747a93d151f3c
29,379
def newton_polish(polys,root,niter=100,tol=1e-8): """ Perform Newton's method on a system of N polynomials in M variables. Parameters ---------- polys : list A list of polynomial objects of the same type (MultiPower or MultiCheb). root : ndarray An initial guess for Newton's method, intended to be a candidate root from root_finder. niter : int A maximum number of iterations of Newton's method. tol : float Tolerance for convergence of Newton's method. Returns ------- x1 : ndarray The terminal point of Newton's method, an estimation for a root of the system """ m = len(polys) dim = max(poly.dim for poly in polys) f_x = np.empty(m,dtype="complex_") jac = np.empty((m,dim),dtype="complex_") def f(x): #f_x = np.empty(m,dtype="complex_") for i, poly in enumerate(polys): f_x[i] = poly(x) return f_x def Df(x): #jac = np.empty((m,dim),dtype="complex_") for i, poly in enumerate(polys): jac[i] = poly.grad(x) return jac i = 0 x0, x1 = root, root while True: if i == niter: break delta = np.linalg.solve(Df(x0),-f(x0)) x1 = delta + x0 if np.linalg.norm(delta) < tol: break x0 = x1 i+=1 return x1
cdf2b993bb34142cf82de9f7a2a003f7eb9a0a24
29,380
def COUNT(logic, n=2): """ 统计满足条件的周期数 :param logic: :param n: :return: """ return pd.Series(np.where(logic, 1, 0), index=logic.index).rolling(n).sum()
e175629e301152978e5d9a46caa4921e080048a8
29,381
import yaml def get_model(name): """ Get the warpped model given the name. Current support name: "COCO-Detection/retinanet_R_50_FPN"; "COCO-Detection/retinanet_R_101_FPN"; "COCO-Detection/faster_rcnn_R_50_FPN"; "COCO-Detection/faster_rcnn_R_101_FPN"; "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN"; "COCO-InstanceSegmentation/mask_rcnn_R_101_FPN"; Args: -- name (string): model name. Returns: -- model: warped model with visualization function. -- args: visualization config. -- cfg: detector cfg. -- predictor: D2 default predictor instances. """ if name == "COCO-Detection/retinanet_R_50_FPN": stream = open("./config/retina.yaml", 'r') args = config(yaml.load(stream, Loader=yaml.FullLoader)) stream.close() name_ = "COCO-Detection/retinanet_R_50_FPN_3x.yaml" predictor, cfg = load_default_predictor(name_) model = warp_retina(predictor.model) elif name == "COCO-Detection/retinanet_R_101_FPN": stream = open("./config/retina.yaml", 'r') args = config(yaml.load(stream, Loader=yaml.FullLoader)) stream.close() name_ = "COCO-Detection/retinanet_R_101_FPN_3x.yaml" predictor, cfg = load_default_predictor(name_) model = warp_retina(predictor.model) elif name == "COCO-Detection/faster_rcnn_R_50_FPN": stream = open("./config/fasterrcnn.yaml", 'r') args = config(yaml.load(stream, Loader=yaml.FullLoader)) stream.close() name_ = "COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml" predictor, cfg = load_default_predictor(name_) model = warp_rcnn(predictor.model) elif name == "COCO-Detection/faster_rcnn_R_101_FPN": stream = open("./config/fasterrcnn.yaml", 'r') args = config(yaml.load(stream, Loader=yaml.FullLoader)) stream.close() name_ = "COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml" predictor, cfg = load_default_predictor(name_) model = warp_rcnn(predictor.model) elif name == "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN": stream = open("./config/maskrcnn.yaml", 'r') args = config(yaml.load(stream, Loader=yaml.FullLoader)) stream.close() name_ = "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml" predictor, cfg = load_default_predictor(name_) model = warp_rcnn(predictor.model) elif name == "COCO-InstanceSegmentation/mask_rcnn_R_101_FPN": stream = open("./config/maskrcnn.yaml", 'r') args = config(yaml.load(stream, Loader=yaml.FullLoader)) stream.close() name_ = "COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml" predictor, cfg = load_default_predictor(name_) model = warp_rcnn(predictor.model) model.eval() return model, args, cfg, predictor
fe1842950c93d790623d6ba072c5cab48eb03eb9
29,382
def energy_sensor( gateway_nodes: dict[int, Sensor], energy_sensor_state: dict ) -> Sensor: """Load the energy sensor.""" nodes = update_gateway_nodes(gateway_nodes, energy_sensor_state) node = nodes[1] return node
8e2560aa8b442c94fb39d602b100a7aa8757de84
29,384
def compute_one_decoding_video_metrics(iterator, feed_dict, num_videos): """Computes the average of all the metric for one decoding. Args: iterator: dataset iterator. feed_dict: feed dict to initialize iterator. num_videos: number of videos. Returns: all_psnr: 2-D Numpy array, shape=(num_samples, num_frames) all_ssim: 2-D Numpy array, shape=(num_samples, num_frames) """ output, target = iterator.get_next() metrics = psnr_and_ssim(output, target) with tf.Session() as sess: sess.run(tf.local_variables_initializer()) initalizer = iterator._initializer # pylint: disable=protected-access if initalizer is not None: sess.run(initalizer, feed_dict=feed_dict) all_psnr, all_ssim = [], [] for i in range(num_videos): print("Computing video: %d" % i) psnr_np, ssim_np = sess.run(metrics) all_psnr.append(psnr_np) all_ssim.append(ssim_np) all_psnr = np.array(all_psnr) all_ssim = np.array(all_ssim) return all_psnr, all_ssim
8acd5cd1b564d22b26ebfd0ddd40fb76e90aa9a4
29,385
def geocode_locations(df: gpd.GeoDataFrame, loc_col: str): """ Geocode location names into polygon coordinates Parameters ---------- df: Geopandas DataFrame loc_col:str name of column in df which contains locations Returns ------- """ locations = geocode(df.loc[:, loc_col]) df["geometry"] = locations.loc[:, "geometry"] df["address"] = locations.loc[:, "address"] return df
c1ba4edfa31ca4d7d6a2e01a5c3342025936b085
29,388
import json def get_data(): """Get dummy data returned from the server.""" jwt_data = get_jwt() data = {'Heroes': ['Hero1', 'Hero2', 'Hero3']} json_response = json.dumps(data) return Response(json_response, status=Status.HTTP_OK_BASIC, mimetype='application/json')
c3df0a63dbb06822bbea1278c539ab1386e59d99
29,389
import pandas def coerce_integer(df): """ Loop through the columns of a df, if it is numeric, convert it to integer and fill nans with zeros. This is somewhat heavy-handed in an attempt to force Esri to recognize sparse columns as integers. """ # Numeric columns to not coerce to integer EXCEPT = ["latitude", "longitude", "zipCode"] def numeric_column_to_int(series): return ( series.fillna(0).astype(int) if pandas.api.types.is_numeric_dtype(series) and series.name not in EXCEPT else series ) return df.transform(numeric_column_to_int, axis=0)
d4b5963378a10a4bde6f7e1e2111908b83d90b7d
29,390
def read_cfg(floc, cfg_proc=process_cfg): """ Reads the given configuration file, returning a dict with the converted values supplemented by default values. :param floc: The location of the file to read. :param cfg_proc: The processor to use for the raw configuration values. Uses default values when the raw value is missing. :return: A dict of the processed configuration file's data. """ config = ConfigParser() good_files = config.read(floc) if good_files: main_proc = cfg_proc(dict(config.items(MAIN_SEC)), def_cfg_vals=DEF_CFG_VALS, req_keys=REQ_KEYS) if main_proc[NUM]: main_proc[NUM] = int(main_proc[NUM]) else: main_proc = {GAU_TPL_FILE: None, CONFIG_NAME: floc} for key, def_val in DEF_CFG_VALS.items(): main_proc[key] = def_val main_proc[DIH_DATA] = [] if main_proc[DIH_ROT] is not None: try: dih_list = main_proc[DIH_ROT].split(";") for dih in dih_list: dih_data = dih.split(",") if len(dih_data) != 5: raise IndexError # note: RDKit is zero-based with atom indices, thus subtracting one from each number dih_data[:4] = [int(x) - 1 for x in dih_data[:4]] # noinspection PyTypeChecker dih_data[4] = float(dih_data[4]) main_proc[DIH_DATA].append(dih_data) except (ValueError, IndexError): raise InvalidDataError("Error in parsing dihedral entry. Enter multiple dihedrals by separating data " "with a semicolon (';'). Each dihedral should be specified with 5 values, were the " "first four are one-based integer atom ids, and the last value is the rotation " "increment in degrees. ") if main_proc[MAX_CONF]: main_proc[MAX_CONF] = int(main_proc[MAX_CONF]) return main_proc
1ec276ad434ce36e32fab73b1cc65c05a14e032a
29,391
from datetime import datetime def convert_date(raw_date: str, dataserver=True): """ Convert raw date field into a value interpretable by the dataserver. The date is listed in mddyy format, """ date = datetime.strptime(raw_date, "%Y%m%d") if not dataserver: return date.strftime("%m/%d/%Y") return date.strftime("%m/%d/%YZ")
6fc9ec6bf5a336998e4bd9752abb8804251d8c33
29,393
from re import VERBOSE def getComputerMove(board): """ Given a board and the computer's letter, determine where to move and return that move. \n Here is our algorithm for our Tic Tac Toe AI: """ copy = getBoardCopy(board) for i in range(1, NUMBER_SPACES): if isSpaceFree(copy, i): # Play out the next move on a new copy of the board so we don't affect the actual game makeMove(copy, COMPUTER_LETTER, i) # Check if the computer could win on their next move, and take it. if isWinner(copy, COMPUTER_LETTER): if VERBOSE: print("Computer Decison 1: Best Move For Computer") return i # Check if the player could win on their next move, and block them. makeMove(copy, PLAYER_LETTER, i) if isWinner(copy, PLAYER_LETTER): if VERBOSE: print("Computer Decison 2: Block Players Best Move") return i # Try to take one of the corners, if they are free. computer_next_move = chooseRandomMoveFromList(board, [1, 3, 7, 9]) if computer_next_move is not None: if VERBOSE: print("Computer Decison 3: Go For A Corner") return computer_next_move # Try to take the center, if it is free. if isSpaceFree(board, 5): if VERBOSE: print("Computer Decison 4: Take The Center") return 5 # Move on one of the sides. if VERBOSE: print("Computer Decison 5: Take A Side") return chooseRandomMoveFromList(board, [2, 4, 6, 8])
83328215ca64170ec88c577ae41fcbd0e2076c47
29,394
from .. import Plane def triangular_prism(p1, p2, p3, height, ret_unique_vertices_and_faces=False): """ Tesselate a triangular prism whose base is the triangle `p1`, `p2`, `p3`. If the vertices are oriented in a counterclockwise direction, the prism extends from behind them. Args: p1 (np.ndarray): A 3D point on the base of the prism. p2 (np.ndarray): A 3D point on the base of the prism. p3 (np.ndarray): A 3D point on the base of the prism. height (float): The height of the prism, which should be positive. ret_unique_vertices_and_faces (bool): When `True` return a vertex array containing the unique vertices and an array of faces (i.e. vertex indices). When `False`, return a flattened array of triangle coordinates. Returns: object: - With `ret_unique_vertices_and_faces=True`: a tuple containing an `6x3` array of vertices and a `8x3` array of triangle faces. - With `ret_unique_vertices_and_faces=False`: a `8x3x3` matrix of flattened triangle coordinates. """ vg.shape.check(locals(), "p1", (3,)) vg.shape.check(locals(), "p2", (3,)) vg.shape.check(locals(), "p3", (3,)) if not isinstance(height, float): raise ValueError("`height` should be a number") base_plane = Plane.from_points(p1, p2, p3) lower_base_to_upper_base = height * -base_plane.normal vertices = np.vstack(([p1, p2, p3], [p1, p2, p3] + lower_base_to_upper_base)) faces = np.array( [ [0, 1, 2], # base [0, 3, 4], [0, 4, 1], # side 0, 3, 4, 1 [1, 4, 5], [1, 5, 2], # side 1, 4, 5, 2 [2, 5, 3], [2, 3, 0], # side 2, 5, 3, 0 [5, 4, 3], # base ], ) return _maybe_flatten(vertices, faces, ret_unique_vertices_and_faces)
99ecdc6054dba1f2b955b08bf082636cac546fb8
29,395
def chain_species_base(base, basesite, subunit, site1, site2, size, comp=1): """ Return a MonomerPattern representing a chained species, chained to a base complex. Parameters ---------- base : Monomer or MonomerPattern The base complex to which the growing chain will be attached. basesite : string Name of the site on complex where first subunit binds. subunit : Monomer or MonomerPattern The subunit of which the chain is composed. site1, site2 : string The names of the sites where one copy of `subunit` binds to the next. size : integer The number of subunits in the chain. comp : optional; a ComplexPattern to which the base molecule is attached. Returns ------- A ComplexPattern corresponding to the chain. Notes ----- Similar to pore_species, but never closes the chain. Examples -------- Get the ComplexPattern object representing a chain of size 4 bound to a base, which is itself bound to a complex: Model() Monomer('Base', ['b1', 'b2']) Monomer('Unit', ['p1', 'p2']) Monomer('Complex1', ['s1']) Monomer('Complex2', ['s1', 's2']) chain_tetramer = chain_species_base(Base(b1=1, b2=ANY), 'b1', Unit, 'p1', 'p2', 4, Complex1(s1=ANY) % Complex2(s1=ANY, s2=ANY)) Execution:: >>> Model() # doctest:+ELLIPSIS <Model '_interactive_' (monomers: 0, rules: 0, parameters: 0, expressions: 0, compartments: 0) at ...> >>> Monomer('Unit', ['p1', 'p2']) Monomer('Unit', ['p1', 'p2']) >>> Monomer('Base', ['b1', 'b2']) Monomer('Base', ['b1', 'b2']) >>> Monomer('Complex1', ['s1']) Monomer('Complex1', ['s1']) >>> Monomer('Complex2', ['s1', 's2']) Monomer('Complex2', ['s1', 's2']) >>> chain_species_base(Base(b2=ANY), 'b1', Unit, 'p1', 'p2', 4, Complex1(s1=ANY) % Complex2(s1=ANY, s2=ANY)) MatchOnce(Complex1(s1=ANY) % Complex2(s1=ANY, s2=ANY) % Base(b1=1, b2=ANY) % Unit(p1=1, p2=2) % Unit(p1=2, p2=3) % Unit(p1=3, p2=4) % Unit(p1=4, p2=None)) """ _verify_sites(base, basesite) _verify_sites(subunit, site1, site2) if size <= 0: raise ValueError("size must be an integer greater than 0") if comp == 1: compbase = base({basesite: 1}) else: compbase = comp % base({basesite: 1}) if size == 1: chainlink = compbase % subunit({site1: 1, site2: None}) elif size == 2: chainlink = compbase % subunit({site1: 1, site2: 2}) % \ subunit({site1: 2, site2: None}) else: # build up a ComplexPattern, starting with a single subunit chainbase = compbase chainlink = chainbase % subunit({site1: 1, site2: 2}) for i in range(2, size): chainlink %= subunit({site1: i, site2: i+1}) chainlink %= subunit({site1: size, site2: None}) chainlink.match_once = True return chainlink
b7b619a810b7d84ee64a92bcda4b4578d797be63
29,396
def find_one_item(itemname): """ GET the one item in the shop whose title matches itemname. :param itemname: The title to look for in the shop. :type itemname: str :return: dict(str, Decimal, int). A dict representing the requested item. :raise: werkzeug.exceptions.NotFound """ try: return MY_SHOP.get(itemname).dict() except KeyError: abort(404, "There's no product named {}!".format(itemname))
cec13fec0489489660da375e7b7fc2168324909f
29,397
import string def PromGraph(data_source, title, expressions, **kwargs): """Create a graph that renders Prometheus data. :param str data_source: The name of the data source that provides Prometheus data. :param title: The title of the graph. :param expressions: List of tuples of (legend, expr), where 'expr' is a Prometheus expression. Or a list of dict where keys are Target's args. :param kwargs: Passed on to Graph. """ letters = string.ascii_uppercase expressions = list(expressions) if len(expressions) > len(letters): raise ValueError( 'Too many expressions. Can support at most {}, but got {}'.format( len(letters), len(expressions))) if all(isinstance(expr, dict) for expr in expressions): targets = [ G.Target(refId=refId, **args) for (args, refId) in zip(expressions, letters)] else: targets = [ G.Target(expr=expr, legendFormat=legend, refId=refId) for ((legend, expr), refId) in zip(expressions, letters)] return G.Graph( title=title, dataSource=data_source, targets=targets, **kwargs )
7a2a8d0902bc9ef2fcc03e16678c4a40976bdb0e
29,398
def get_maxlevel(divs, maxlevel): """ Returns the maximum div level. """ for info in divs: if info['level'] > maxlevel: maxlevel = info['level'] if info.get('subdivs', None): maxlevel = get_maxlevel(info['subdivs'], maxlevel) return maxlevel
b7153ef84cb260a4b48c58315aa63fc5179fc06c
29,400
def prenut(ra, dec, mjd, degrees=True): """ Precess coordinate system to FK5 J2000. args: ra - arraylike, right ascension dec- arraylike, declination mjd- arraylike """ if degrees: c = np.pi/180. else: c = 1. raout = ra.astype(np.float)*c decout = dec.astype(np.float)*c pysla.prenut(raout, decout, mjd.astype(np.float)) return raout/c, decout/c
99cedddc4beacc99c2360ba835f39d927118c683
29,401
def make_model_and_optimizer(conf): """Function to define the model and optimizer for a config dictionary. Args: conf: Dictionary containing the output of hierachical argparse. Returns: model, optimizer. The main goal of this function is to make reloading for resuming and evaluation very simple. """ model = TasNet(conf["filterbank"], conf["masknet"]) # Define optimizer of this model optimizer = make_optimizer(model.parameters(), **conf["optim"]) return model, optimizer
93a0a7c8f5b31571c5d8a5130c5fd1de0f046adc
29,402
from re import T import numpy def trange(start: int, end: int, step: int=1, dtype: T.Dtype = None) -> T.Tensor: """ Generate a tensor like a python range. Args: start: The start of the range. end: The end of the range. step: The step of the range. Returns: tensor: A vector ranging from start to end in increments of step. Cast to float rather than int. """ return numpy.arange(start, end, step, dtype=dtype)
140a88069503f2bb372b8f18796a56bb41e465f3
29,403
def get_molecules(topology): """Group atoms into molecules.""" if 'atoms' not in topology: return None molecules = {} for atom in topology['atoms']: idx, mol_id, atom_type, charge = atom[0], atom[1], atom[2], atom[3] if mol_id not in molecules: molecules[mol_id] = {'atoms': [], 'types': [], 'charge': []} molecules[mol_id]['atoms'].append(idx) molecules[mol_id]['types'].append(atom_type) molecules[mol_id]['charge'].append(charge) return molecules
4bf63000c9d5b56bb9d35922ed521ce81cf3a6c1
29,404
def disk_partitions(all): """Return disk partitions.""" rawlist = _psutil_mswindows.get_disk_partitions(all) return [nt_partition(*x) for x in rawlist]
9c888e365fbd43bacb7ae2a9e025abdf14efcbff
29,405
from typing import Literal def simple_dataset() -> Dataset: """ This is a simple dataset with no BNodes that can be used in tests. Assumptions/assertions should not be made about the quads in it, other than that it contains no blank nodes. """ graph = Dataset() graph.default_context.add((EGSCHEMA.subject, EGSCHEMA.predicate, EGSCHEMA.object)) graph.default_context.add((EGURN.subject, EGURN.predicate, EGURN.object)) graph.default_context.add((EGHTTP.subject, EGHTTP.predicate, Literal("typeless"))) graph.get_context(EGSCHEMA.graph).add( (EGSCHEMA.subject, EGSCHEMA.predicate, EGSCHEMA.object) ) graph.get_context(EGSCHEMA.graph).add( (EGSCHEMA.subject, EGSCHEMA.predicate, Literal(12)) ) graph.get_context(EGSCHEMA.graph).add( ( EGHTTP.subject, EGHTTP.predicate, Literal("日本語の表記体系", lang="jpx"), ) ) graph.get_context(EGSCHEMA.graph).add( (EGURN.subject, EGSCHEMA.predicate, EGSCHEMA.subject) ) graph.get_context(EGURN.graph).add( (EGSCHEMA.subject, EGSCHEMA.predicate, EGSCHEMA.object) ) graph.get_context(EGURN.graph).add( (EGSCHEMA.subject, EGHTTP.predicate, EGHTTP.object) ) graph.get_context(EGURN.graph).add( (EGSCHEMA.subject, EGHTTP.predicate, Literal("XSD string", datatype=XSD.string)) ) return graph
ee897b222d95bc1e92160f925679b9c864c3674a
29,406
import unicodedata import re def slugify(value, allow_unicode=False): """ Taken from https://github.com/django/django/blob/master/django/utils/text.py Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated dashes to single dashes. Remove characters that aren't alphanumerics, underscores, or hyphens. Convert to lowercase. Also strip leading and trailing whitespace, dashes, and underscores. """ value = str(value).replace('-', 'ng').replace('.', 'pt') if allow_unicode: value = unicodedata.normalize('NFKC', value) else: value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii') value = re.sub(r'[^\w\s-]', '', value.lower()) return re.sub(r'[-\s]+', '-', value).strip('-_')
45a01d4552de0094b56b40a9c13102e59af32b5b
29,407
def is_between(start, stop, p): """Given three point check if the query point p is between the other two points Arguments: ---------- start: array(shape=(D, 1)) stop: array(shape=(D, 1)) p: array(shape=(D, 1)) """ # Make sure that the inputs are vectors assert_col_vectors(start, stop) assert_col_vectors(stop, p) # First make sure that the three points are collinear # if not is_collinear(start, p, stop): # return False v0 = p - start v1 = stop - p # Check that p is between start and stop v2 = stop - start dot = np.dot(v2.reshape(1, -1), v0) # Check that the total distance is equal to the distance from start-point # and from point-stop d = distance(stop, start) d1 = distance(start, p) d2 = distance(stop, p) if dot < 0 or not np.allclose(d1 + d2, d): return False return True
f7cf20420115a71fb66ce5f8f8045163fa34c7ff
29,408
def get_elasticsearch_type(): """ Getting the name of the main type used """ return settings.ELASTICSEARCH_TYPE
889cb6e698f88c38229b908dd92d8933ec36ba8e
29,409
def run_cmd_code(cmd, directory='/'): """Same as run_cmd but it returns also the return code. Parameters ---------- cmd : string command to run in a shell directory : string, default to '/' directory where to run the command Returns ------- std_out, std_err, return_code a triplet with standard output, standard error output and return code. std_out : string std_err : string return_code : int """ pipe = Popen(cmd, shell=True, cwd=directory, stdout=PIPE, stderr=PIPE) out, error = pipe.communicate() return_code = pipe.wait() return out, error, return_code
0e001d36df6e0b9b39827f36e1bda369e2185adf
29,410
def unpack_le32(data): """ Unpacks a little-endian 32-bit value from a bytearray :param data: 32-bit little endian bytearray representation of an integer :return: integer value """ _check_input_array(data, 4) return data[0] + (data[1] << 8) + (data[2] << 16) + (data[3] << 24)
c1cdd8f71dbb03769a2e681948300436e6cd735f
29,411
def InductionsFromPrescribedCtCq_ST(vr_bar,Ct,Cq,Lambda,bSwirl): """ Returns the stream tube theory inductions based on a given Ct and Cq. Based on script fGetInductions_Prescribed_CT_CQ_ST """ lambda_r=Lambda*vr_bar # --- Stream Tube theory a_ST = 1/2*(1-np.sqrt(1-Ct)) if bSwirl: a_prime_ST = Cq/(4*(1-a_ST)*lambda_r) # a_prime_ST = 0.5*(sqrt(1+(4*a_ST.*(1-a_ST)./lambda_r.^2))-1); else: a_prime_ST =0 return a_ST,a_prime_ST
6b254056b70d65dc20f89811e4938dd7ad5323f6
29,412
from typing import get_origin from typing import Union from typing import get_args def unwrap_Optional_type(t: type) -> type: """ Given an Optional[...], return the wrapped type """ if get_origin(t) is Union: # Optional[...] = Union[..., NoneType] args = tuple(a for a in get_args(t) if a is not type(None)) if len(args) == 1: return args[0] else: return Union[args] return t
6ffd9fa6dc95ba669b0afd23a36a2975e29c10da
29,413
def _normalize_angle(x, zero_centered=True): """Normalize angles. Take angles in radians and normalize them to [-pi, pi) or [0, 2 * pi) depending on `zero_centered`. """ if zero_centered: return (x + np.pi) % (2 * np.pi) - np.pi else: return x % (2 * np.pi)
2e73a9fb20743f4721c954a48ca838c1eaca5edd
29,415
def g_fam(arr): """ Returns the next array """ aux = 0 hol = [] while(aux +1 < arr.__len__()): if arr[aux] or arr[aux + 1]: hol.append(True) else: hol.append(False) aux += 1 return hol
4f0ed0d4ba205ef205579a2b150250760e7b38fe
29,416
import time def main_archive(args, cfg: Configuration): """Start running archival""" jobs = Job.get_running_jobs(cfg.log) print('...starting archive loop') firstit = True while True: if not firstit: print('Sleeping 60s until next iteration...') time.sleep(60) jobs = Job.get_running_jobs(cfg.log) firstit = False archive.archive(cfg.directories, jobs) return 0
17bc1c63896edae46f2db60bd9bbcbd699d6f1ab
29,417
def k2j(k, E, nu, plane_stress=False): """ Convert fracture Parameters ---------- k: float E: float Young's modulus in GPa. nu: float Poisson's ratio plane_stress: bool True for plane stress (default) or False for plane strain condition. Returns ------- float """ if plane_stress: E = E / (1 - nu ** 2) return k ** 2 / E
7fb34149c7fc9b557ab162632884f605693aa823
29,418
def get_worker_bonus(job_id, worker_id, con=None): """ :param job_id: :param worker_id: :param con: """ bonus_row = _get_worker_bonus_row(job_id, worker_id, con) if bonus_row is None: return 0 return bonus_row["bonus_cents"]
2b58723f275f26c9208a36b650d75596a21354b2
29,419
def get_attribute_distribution(): """ Attribute weights based on position and prototype, in this order: [potential, confidence, iq, speed, strength, agility, awareness, stamina, injury, run_off, pass_off, special_off, run_def, pass_def, special_def] """ attr_dist = { 'QB': { 'Gunslinger': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'Scrambler': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'Field General': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], }, 'HB': { 'Elusive': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'Power': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'All-Purpose': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], }, 'FB': { 'Blocking': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'Rushing': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], }, 'WR': { 'Possession': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'Deep Threat': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'Route Runner': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], }, 'TE': { 'Blocking': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'Receiving': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'Hybrid': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], }, 'LT': { 'Pass Protector': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'Run Blocker': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], }, 'LG': { 'Pass Protector': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'Run Blocker': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], }, 'C': { 'Pass Protector': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'Run Blocker': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], }, 'RG': { 'Pass Protector': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'Run Blocker': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], }, 'RT': { 'Pass Protector': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'Run Blocker': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], }, 'DE': { 'Pass Rusher': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'Run Stuffer': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], }, 'DT': { 'Pass Rusher': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'Run Stuffer': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], }, 'OLB': { 'Coverage': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'Run Stuffer': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], }, 'MLB': { 'Coverage': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'Run Stuffer': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], }, 'CB': { 'Ball Hawk': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'Shutdown': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], }, 'FS': { 'Ball Hawk': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'Shutdown': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], }, 'SS': { 'Ball Hawk': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'Run Stuffer': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], }, 'K': { 'Accurate': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'Power': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], }, 'P': { 'Coffin Corner': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'Power': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], } } return attr_dist
25dc83ba2f4bec4caaa88423e2607af300dcfbc4
29,420
def predict(product: Product): """Return ML predictions, see /docs for more information. Args: product: (Product) the parsed data from user request Returns: A dictionnary with the predicted nutrigrade and the related probability """ sample = { "energy": round(float(product.energy)), "saturated_fat": round(float(product.saturated_fat)), "sugars": round(float(product.sugars)), "salt": round(float(product.salt)), "fibers": round(float(product.fibers)), "group1_Beverages": 0, "group1_Cereals and potatoes": 0, "group1_Composite foods": 0, "group1_Fat and sauces": 0, "group1_Fruits and vegetables": 0, "group1_Milk and dairy products": 0, "group1_Sugary snacks": 0, "group1_unknown": 0, } # If category is detected then assign the property value to 1. formatted_category = "group1_{0}".format(product.pnns_group) if formatted_category in sample.keys(): sample[formatted_category] = 1 sample = list(sample.values()) # Predict the nutrigrade ! nutrigrade = model.predict([sample])[0] probability = model.predict_proba([sample]).argmax(1).item() # Return of the prediction and the probability return {"nutrigrade": nutrigrade, "probability": probability}
ca2456989b5cc82f56ed908c5d51471237c68d73
29,421
def get_stock_historicals(symbol, interval="5minute", span="week"): """Returns the historical data for a SYMBOL with data at every time INTERVAL over a given SPAN.""" assert span in ['day', 'week', 'month', '3month', 'year', '5year'] assert interval in ['5minute', '10minute', 'hour', 'day', 'week'] historicals = robin_stocks.stocks.get_stock_historicals(symbol, interval, span) process_historicals(historicals) return historicals
62612a94e385c8c3703e42f2ace49d4a37d598ef
29,422
import re def re_match_both2( item, args ): """Matches a regex with a group (argument 2) against the column (number in argument 1)""" # setup (re_col1, re_expr1, re_col2, re_expr2 ) = args if re_expr1 not in compiled_res: compiled_res[re_expr1] = re.compile(re_expr1) if re_expr2 not in compiled_res: compiled_res[re_expr2] = re.compile(re_expr2) # test if a match occurred match1 = compiled_res[re_expr1].search(item[re_col1]) match2 = compiled_res[re_expr2].search(item[re_col2]) if match1 and match2: if match2.group( 1 ) == None and match2.group( 2 ) == None: return ['',''] grp = "g1" if match2.group(1) == None: grp = "g2" return ["%s-%s" % (match1.group(1), grp ), ''] return ['', '']
13ac911e71324f54cde60f8c752603d21df98918
29,423
def at_least(actual_value, expected_value): """Assert that actual_value is at least expected_value.""" result = actual_value >= expected_value if result: return result else: raise AssertionError( "{!r} is LESS than {!r}".format(actual_value, expected_value) )
6897c863d64d1e4ce31e9b42df8aba04f1bbdd7a
29,424
import re import string def clean_text(text): """ Clean text : lower text + Remove '\n', '\r', URL, '’', numbers and double space + remove Punctuation Args: text (str) Return: text (str) """ text = str(text).lower() text = re.sub('\n', ' ', text) text = re.sub('\r', ' ', text) text = re.sub('\[.*?\]', ' ', text) text = re.sub('https?://\S+|www\.\S+', ' ', text) text = re.sub('[%s]' % re.escape(string.punctuation), ' ', text) # remove punctuation text = re.sub('’', ' ', text) text = re.sub('\w*\d\w*', ' ', text) text = re.sub(' +', ' ', text) return text
2d9ddf56a9eeb1a037ec24a8907d3c85c9bbee43
29,425
import fnmatch def fnmatch_list(filename, pattern_list): """ Check filename against a list of patterns using fnmatch """ if type(pattern_list) != list: pattern_list = [pattern_list] for pattern in pattern_list: if fnmatch(filename, pattern): return True return False
72204c3168c0a97ad13134dcb395edf5e44e149f
29,426
def math_div_str(numerator, denominator, accuracy=0, no_div=False): """ 除法 :param numerator: 分子 :param denominator: 分母 :param accuracy: 小数点精度 :param no_div: 是否需要除。如3/5,True为3/5,False为1/1.6 :return: """ if denominator == 0 or numerator == 0: return 0 if abs(numerator) < abs(denominator): if no_div: return '%d/%d' % (numerator, denominator) return '1/' + str(int(round(denominator / numerator, 0))) else: if not numerator % denominator: accuracy = 0 t = round(float(numerator) / float(denominator), accuracy) return str(int(t)) if accuracy == 0 else str(t)
bbcead0ec0f79d8915289b6e4ff23b0d6e4bf8ed
29,427
from datetime import datetime def create_comments(post): """ Helper to create remote comments. :param post: :return: """ comment_list = list() post = post.get('posts')[0] for c in post.get('comments'): comment = Comment() comment.author = create_author(c.get('author')) comment.comment = c.get('comment') comment.contentType = c.get('contentType') comment.content = comment.get_comment() comment.id = c.get('id') comment.published = utc.localize(datetime.strptime(c.get('published'), '%Y-%m-%dT%H:%M:%S.%fZ')) comment_list.append(comment) return comment_list
48bbad23a60efdd0ad47b2dfeb2e5b943a2743af
29,428
def zero_fuel(distance_to_pump, mpg, fuel_left): """ You were camping with your friends far away from home, but when it's time to go back, you realize that you fuel is running out and the nearest pump is 50 miles away! You know that on average, your car runs on about 25 miles per gallon. There are 2 gallons left. Considering these factors, write a function that tells you if it is possible to get to the pump or not. Function should return true (1 in Prolog) if it is possible and false (0 in Prolog) if not. The input values are always positive. :param distance_to_pump: an integer value, positive. :param mpg: an integer value, positive. :param fuel_left: an integer value, positive. :return: True if able to make journey to pump on fuel left, otherwise False. """ return distance_to_pump / mpg <= fuel_left
67a69b59d6f35a872f87e18ee0e8693af886c386
29,429
import itertools def get_routing_matrix( lambda_2, lambda_1_1, lambda_1_2, mu_1, mu_2, num_of_servers_1, num_of_servers_2, system_capacity_1, system_capacity_2, buffer_capacity_1, buffer_capacity_2, routing_function=get_weighted_mean_blocking_difference_between_two_systems, alpha=0, ): """ Get the optimal distribution matrix that consists of the proportion of individuals to be distributed to each hospital for all possible combinations of thresholds of the two hospitals (T_1, T_2). For every set of thresholds, the function fills the entries of the matrix using the proportion of individuals to distribute to hospital 1. Parameters ---------- lambda_2 : float lambda_1_1 : float lambda_1_2 : float mu_1 : float mu_2 : float num_of_servers_1 : int num_of_servers_2 : int system_capacity_1 : int system_capacity_2 : int buffer_capacity_1 : int buffer_capacity_2 : int routing_function : function, optional The function to use to get the optimal distribution of patients Returns ------- numpy array The matrix with proportions of all possible combinations of threshold """ routing_matrix = np.zeros((system_capacity_1, system_capacity_2)) for threshold_1, threshold_2 in itertools.product( range(1, system_capacity_1 + 1), range(1, system_capacity_2 + 1) ): opt = calculate_class_2_individuals_best_response( lambda_2=lambda_2, lambda_1_1=lambda_1_1, lambda_1_2=lambda_1_2, mu_1=mu_1, mu_2=mu_2, num_of_servers_1=num_of_servers_1, num_of_servers_2=num_of_servers_2, system_capacity_1=system_capacity_1, system_capacity_2=system_capacity_2, buffer_capacity_1=buffer_capacity_1, buffer_capacity_2=buffer_capacity_2, threshold_1=threshold_1, threshold_2=threshold_2, routing_function=routing_function, alpha=alpha, ) routing_matrix[threshold_1 - 1, threshold_2 - 1] = opt return routing_matrix
fad50cb2a160ba569788ea4f546eb4f0292d47c0
29,430
def astrange_to_symrange(astrange, arrays, arrname=None): """ Converts an AST range (array, [(start, end, skip)]) to a symbolic math range, using the obtained array sizes and resolved symbols. """ if arrname is not None: arrdesc = arrays[arrname] # If the array is a scalar, return None if arrdesc.shape is None: return None # If range is the entire array, use the array descriptor to obtain the # entire range if astrange is None: return [ (symbolic.pystr_to_symbolic(0), symbolic.pystr_to_symbolic(symbolic.symbol_name_or_value(s)) - 1, symbolic.pystr_to_symbolic(1)) for s in arrdesc.shape ] missing_slices = len(arrdesc.shape) - len(astrange) if missing_slices < 0: raise ValueError( 'Mismatching shape {} - range {} dimensions'.format( arrdesc.shape, astrange)) for i in range(missing_slices): astrange.append((None, None, None)) result = [None] * len(astrange) for i, r in enumerate(astrange): if isinstance(r, tuple): begin, end, skip = r # Default values if begin is None: begin = symbolic.pystr_to_symbolic(0) else: begin = symbolic.pystr_to_symbolic(unparse(begin)) if end is None and arrname is None: raise SyntaxError('Cannot define range without end') elif end is not None: end = symbolic.pystr_to_symbolic(unparse(end)) - 1 else: end = symbolic.pystr_to_symbolic( symbolic.symbol_name_or_value(arrdesc.shape[i])) - 1 if skip is None: skip = symbolic.pystr_to_symbolic(1) else: skip = symbolic.pystr_to_symbolic(unparse(skip)) else: # In the case where a single element is given begin = symbolic.pystr_to_symbolic(unparse(r)) end = begin skip = symbolic.pystr_to_symbolic(1) result[i] = (begin, end, skip) return result
eca988aac1d0b69ad45907b4d3dd1c6be2e914b3
29,431
def get_union(*args): """Return unioin of multiple input lists. """ return list(set().union(*args))
18025cfd37d64f15daf92aa2ae3e81176cae6e39
29,432
def retrieve(func): """ Decorator for Zotero read API methods; calls _retrieve_data() and passes the result to the correct processor, based on a lookup """ @wraps(func) def wrapped_f(self, *args, **kwargs): """ Returns result of _retrieve_data() func's return value is part of a URI, and it's this which is intercepted and passed to _retrieve_data: '/users/123/items?key=abc123' """ if kwargs: self.add_parameters(**kwargs) retrieved = self._retrieve_data(func(self, *args)) # we now always have links in the header response self.links = self._extract_links() # determine content and format, based on url params content = ( self.content.search(self.request.url) and self.content.search(self.request.url).group(0) or "bib" ) # JSON by default formats = { "application/atom+xml": "atom", "application/x-bibtex": "bibtex", "application/json": "json", "text/html": "snapshot", "text/plain": "plain", "application/pdf; charset=utf-8": "pdf", "application/pdf": "pdf", "application/msword": "doc", "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": "xlsx", "application/vnd.openxmlformats-officedocument.wordprocessingml.document": "docx", "application/vnd.openxmlformats-officedocument.presentationml.presentation": "pptx", "application/zip": "zip", "application/epub+zip": "zip", "audio/mpeg": "mp3", "video/mp4": "mp4", "audio/x-wav": "wav", "video/x-msvideo": "avi", "application/octet-stream": "octet", "application/x-tex": "tex", "application/x-texinfo": "texinfo", "image/jpeg": "jpeg", "image/png": "png", "image/gif": "gif", "image/tiff": "tiff", "application/postscript": "postscript", "application/rtf": "rtf", } # select format, or assume JSON content_type_header = self.request.headers["Content-Type"].lower() + ";" fmt = formats.get( # strip "; charset=..." segment content_type_header[0 : content_type_header.index(";")], "json", ) # clear all query parameters self.url_params = None # check to see whether it's tag data if "tags" in self.request.url: self.tag_data = False return self._tags_data(retrieved.json()) if fmt == "atom": parsed = feedparser.parse(retrieved.text) # select the correct processor processor = self.processors.get(content) # process the content correctly with a custom rule return processor(parsed) if fmt == "snapshot": # we need to dump as a zip! self.snapshot = True if fmt == "bibtex": parser = bibtexparser.bparser.BibTexParser(common_strings=True, ignore_nonstandard_types=False) return parser.parse(retrieved.text) # it's binary, so return raw content elif fmt != "json": return retrieved.content # no need to do anything special, return JSON else: return retrieved.json() return wrapped_f
8a2b441f42e26c69e39d1f22b7624350f3bef8b0
29,433
def specificity(y, z): """True negative rate `tn / (tn + fp)` """ tp, tn, fp, fn = contingency_table(y, z) return tn / (tn + fp+pseudocount)
bf1c835072463e14420939ef56aade365863f559
29,434
def get_model_inputs_from_database(scenario_id, subscenarios, subproblem, stage, conn): """ :param subscenarios: SubScenarios object with all subscenario info :param subproblem: :param stage: :param conn: database connection :return: """ c1 = conn.cursor() new_stor_costs = c1.execute( """ SELECT project, vintage, lifetime_yrs, annualized_real_cost_per_mw_yr, annualized_real_cost_per_mwh_yr FROM inputs_project_portfolios CROSS JOIN (SELECT period AS vintage FROM inputs_temporal_periods WHERE temporal_scenario_id = {}) as relevant_vintages INNER JOIN (SELECT project, vintage, lifetime_yrs, annualized_real_cost_per_mw_yr, annualized_real_cost_per_mwh_yr FROM inputs_project_new_cost WHERE project_new_cost_scenario_id = {}) as cost USING (project, vintage) WHERE project_portfolio_scenario_id = {} AND capacity_type = 'stor_new_bin' ;""".format( subscenarios.TEMPORAL_SCENARIO_ID, subscenarios.PROJECT_NEW_COST_SCENARIO_ID, subscenarios.PROJECT_PORTFOLIO_SCENARIO_ID, ) ) c2 = conn.cursor() new_stor_build_size = c2.execute( """SELECT project, binary_build_size_mw, binary_build_size_mwh FROM inputs_project_portfolios INNER JOIN (SELECT project, binary_build_size_mw, binary_build_size_mwh FROM inputs_project_new_binary_build_size WHERE project_new_binary_build_size_scenario_id = {}) USING (project) WHERE project_portfolio_scenario_id = {} AND capacity_type = 'stor_new_bin';""".format( subscenarios.PROJECT_NEW_BINARY_BUILD_SIZE_SCENARIO_ID, subscenarios.PROJECT_PORTFOLIO_SCENARIO_ID, ) ) return new_stor_costs, new_stor_build_size
b77e4155f89ecae0b0bc5e2517aa05a4291f73b5
29,437
def pad_image(image, padding): """ Pad an image's canvas by the amount of padding while filling the padded area with a reflection of the data. :param image: Image to pad in either [H,W] or [H,W,3] :param padding: Amount of padding to add to the image :return: Padded image, padding uses reflection along border """ if len(image.shape) < 3: # Grayscale image # Greyscale image (ground truth) image = np.lib.pad(image, ((padding, padding), (padding, padding)), 'reflect') elif len(image.shape) == 3: # RGB image image = np.lib.pad(image, ((padding, padding), (padding, padding), (0, 0)), 'reflect') else: assert False, "Method cannot pad 4D images" return image
ac797a201191c78a912f43908b214b3374de45d1
29,438
def delta_obj_size(object_image_size, model_image_size): """To compute the delta (scale b/w -inf and inf) value of object (width, height) from the image (width, height) using sigmoid (range [0, 1]). Since sigmoid transform the real input value between 0 and 1 thus allowing model to learn unconstrained. Parameters: ----------- object_image_size (tuple): True width and height of the object. Already scaled (model_size / true_img_size) according to the model images size by . model_image_size (tuple): width and height of model (since the prediction is on rescaled image) Returns: -------- tuple: the scaled width and height w.r.t. model """ obj_w, obj_h = object_image_size model_w, model_h = model_image_size delta_w = -(np.log((model_w / obj_w) + 1e-6 - 1)) # 1e-6 to avoid nan and is inverse of (1 / (1 + np.exp(-delta_w))) delta_h = -(np.log((model_h / obj_h) + 1e-6 - 1)) return delta_w, delta_h
4ba5935a8b87391ba59623d5de5a86f40f35cacd
29,439
def get_relname_info(name): """ locates the name (row) in the release map defined above and returns that objects properties (columns) """ return RELEASES_BY_NAME[name]
538d564d6d0a67101fa84931fd7f7e69ac83f8b2
29,440
def swap(bee_permutation, n_bees): """Foraging stage using the swap mutation method. This function simulates the foraging stage of the algorithm. It takes the current bee permutation of a single bee and mutates the order using a swap mutation step. `n_bees` forager bees are created by swapping two unique indices per row. Parameters ---------- bee_permutation: np.ndarray with shape (1, n_coordinates) Array representing the indexing permutation of the discrete bee coordinates. Returns ------- forager_bees: np.ndarray with shape (n_bees, n) The new indexing permutations, using the swap mutation approach. Examples -------- >>> bee_permutation = np.arange(10)[np.newaxis, :] >>> bee_permutation array([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]]) ... >>> swap(bee_permutation, 2) array([[6, 1, 2, 3, 4, 5, 0, 7, 8, 9], [0, 1, 9, 3, 4, 5, 6, 7, 8, 2]]) """ # Prepare the forager bees and mutation indices forager_bees, swap = _prepare_array(bee_permutation, n_bees) # Mutate the forager bees. `h` is helper array to coerces forage into the # correct shape. Question asked here for this procedure https:// # stackoverflow.com/questions/59936869/swap-multiple-indices-in-2d-array h = np.arange(n_bees)[:, np.newaxis] forager_bees[h, swap] = forager_bees[h, swap[:, ::-1]] return forager_bees.astype(int)
4679ebe27cba51c095cd0ece3a4aabfcdb6531a8
29,441
def update_boundaries(x=None): """ This is the main processing code. Every time a slider on a trackbar moves this procedure is called as the callback """ # get current positions of four trackbars maxHSV[0] = cv2.getTrackbarPos('Hmax', 'image') maxHSV[1] = cv2.getTrackbarPos('Smax', 'image') maxHSV[2] = cv2.getTrackbarPos('Vmax', 'image') minHSV[0] = cv2.getTrackbarPos('Hmin', 'image') minHSV[1] = cv2.getTrackbarPos('Smin', 'image') minHSV[2] = cv2.getTrackbarPos('Vmin', 'image') # create a bitmap based on the new threshold values mask_hsv = cv2.inRange(hsv, minHSV, maxHSV) # apply the bitmap mask on the original image display = cv2.bitwise_and(frame, frame, mask=mask_hsv) cv2.imshow('image', display) return x # unneeded line, just to avoid warnings about unused x
d484d242007f906f5cd707ee02d28c9cd580c844
29,442
def get_stream(stream_id, return_fields=None, ignore_exceptions=False): """This function retrieves the information on a single publication when supplied its ID. .. versionchanged:: 3.1.0 Changed the default ``return_fields`` value to ``None`` and adjusted the function accordingly. :param stream_id: The ID of the stream to retrieve :type stream_id: int, str :param return_fields: Specific fields to return if not all of the default fields are needed (Optional) :type return_fields: list, None :param ignore_exceptions: Determines whether nor not exceptions should be ignored (Default: ``False``) :type ignore_exceptions: bool :returns: A dictionary with the data for the publication :raises: :py:exc:`khorosjx.errors.exceptions.InvalidDatasetError`, :py:exc:`khorosjx.errors.exceptions.GETRequestError` """ # Verify that the core connection has been established verify_core_connection() # Retrieve the publication stream = core.get_data('streams', stream_id, return_json=False, all_fields=True) successful_response = errors.handlers.check_api_response(stream, ignore_exceptions=ignore_exceptions) if successful_response: stream = core.get_fields_from_api_response(stream.json(), 'stream', return_fields) return stream
14adff8dcff2bd89ace9a5ef642898e15b7eeaa7
29,443
def get_grade_mdata(): """Return default mdata map for Grade""" return { 'output_score': { 'element_label': { 'text': 'output score', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'instructions': { 'text': 'enter a decimal value.', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_decimal_values': [None], 'syntax': 'DECIMAL', 'decimal_scale': None, 'minimum_decimal': None, 'maximum_decimal': None, 'decimal_set': [], }, 'grade_system': { 'element_label': { 'text': 'grade system', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'instructions': { 'text': 'accepts an osid.id.Id object', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_id_values': [''], 'syntax': 'ID', 'id_set': [], }, 'input_score_end_range': { 'element_label': { 'text': 'input score end range', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'instructions': { 'text': 'enter a decimal value.', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_decimal_values': [None], 'syntax': 'DECIMAL', 'decimal_scale': None, 'minimum_decimal': None, 'maximum_decimal': None, 'decimal_set': [], }, 'input_score_start_range': { 'element_label': { 'text': 'input score start range', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'instructions': { 'text': 'enter a decimal value.', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_decimal_values': [None], 'syntax': 'DECIMAL', 'decimal_scale': None, 'minimum_decimal': None, 'maximum_decimal': None, 'decimal_set': [], }, }
ab44e7cbf67a050bdb08366ebe933cb62eb9b04c
29,444
def flatten_dict_join_keys(dct, join_symbol=" ", simplify_iterables=False): """ Flatten dict with defined key join symbol. :param dct: dict to flatten :param join_symbol: default value is " " :param simplify_iterables: each element of lists and ndarrays is represented as one key :return: """ if simplify_iterables: dct = ndarray_to_list_in_structure(dct) dct = list_to_dict_in_structure(dct, keys_to_str=True) return dict(flatten_dict(dct, join=lambda a, b: a + join_symbol + b))
a133d1a621e4c1fa7ccce78576527a0cf212c0e3
29,445
import logging def prev_factor(target, current): """Given a target to factorise, find the next highest factor above current""" assert(current<=target) candidates = factors(target) if len(candidates) == 1: return 1 logging.info("Selecting previous factor %d of %d given %d" % (candidates[candidates.index(current)-1], target, current)) return candidates[candidates.index(current)-1]
8190d48ec210670adb8dd0c2d72b1738561191ae
29,446
def compute_weight_BTEL1010(true_energy, simtel_spectral_slope=-2.0): """Compute the weight from requirement B-TEL-1010-Intensity-Resolution. Parameters ---------- true_energy: array_like simtel_spectral_slope: float Spectral slope from the simulation. """ target_slope = -2.62 # spectral slope from B-TEL-1010 spec_slope = simtel_spectral_slope # each pixel of the same image (row of data table) needs the same weight weight = np.power(true_energy / 200.0, target_slope - spec_slope) return weight
64e126822dda2d6ece24cf95e4aef48a656ba4c6
29,447
def blog_post(post_url): """Render post of given url.""" post = Post.query.filter_by(url=post_url).first() if post is None: abort(404) timezone_diff = timedelta(hours=post.timezone) return render_template('blog_post.html', post=post, tz_diff=timezone_diff)
a87068d4fb6394b452b96b83465529681737fc31
29,448
def l2_normalization( inputs, scaling=False, scale_initializer=init_ops.ones_initializer(), reuse=None, variables_collections=None, outputs_collections=None, data_format='NHWC', trainable=True, scope=None): """Implement L2 normalization on every feature (i.e. spatial normalization). Should be extended in some near future to other dimensions, providing a more flexible normalization framework. Args: inputs: a 4-D tensor with dimensions [batch_size, height, width, channels]. scaling: whether or not to add a post scaling operation along the dimensions which have been normalized. scale_initializer: An initializer for the weights. reuse: whether or not the layer and its variables should be reused. To be able to reuse the layer scope must be given. variables_collections: optional list of collections for all the variables or a dictionary containing a different list of collection per variable. outputs_collections: collection to add the outputs. data_format: NHWC or NCHW data format. trainable: If `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable). scope: Optional scope for `variable_scope`. Returns: A `Tensor` representing the output of the operation. """ with variable_scope.variable_scope( scope, 'L2Normalization', [inputs], reuse=reuse) as sc: inputs_shape = inputs.get_shape() inputs_rank = inputs_shape.ndims dtype = inputs.dtype.base_dtype if data_format == 'NHWC': # norm_dim = tf.range(1, inputs_rank-1) norm_dim = tf.range(inputs_rank-1, inputs_rank) params_shape = inputs_shape[-1:] elif data_format == 'NCHW': # norm_dim = tf.range(2, inputs_rank) norm_dim = tf.range(1, 2) params_shape = (inputs_shape[1]) # Normalize along spatial dimensions. outputs = nn.l2_normalize(inputs, norm_dim, epsilon=1e-12) # Additional scaling. if scaling: scale_collections = utils.get_variable_collections( variables_collections, 'scale') scale = variables.model_variable('gamma', shape=params_shape, dtype=dtype, initializer=scale_initializer, collections=scale_collections, trainable=trainable) if data_format == 'NHWC': outputs = tf.multiply(outputs, scale) elif data_format == 'NCHW': scale = tf.expand_dims(scale, axis=-1) scale = tf.expand_dims(scale, axis=-1) outputs = tf.multiply(outputs, scale) # outputs = tf.transpose(outputs, perm=(0, 2, 3, 1)) return utils.collect_named_outputs(outputs_collections, sc.original_name_scope, outputs)
b595699dcae6efd5c18fddc070905b1f41cc832b
29,449
def generate_com_filter(size_u, size_v): """ generate com base conv filter """ center_u = size_u // 2 center_v = size_v // 2 _filter = np.zeros((size_v, size_u, 2)) # 0 channel is for u, 1 channel is for v for i in range(size_v): for j in range(size_u): _filter[i, j, 0] = (j - center_u) / (size_u - 1) _filter[i, j, 1] = (i - center_v) / (size_v - 1) return _filter
9797739b05724b104c932e07662278443e15eefb
29,450
from typing import List def retrieve_scores_grouped_ordered_pair_by_slug(panelist_slug: str, database_connection: mysql.connector.connect ) -> List[tuple]: """Returns an list of tuples containing a score and the corresponding number of instances a panelist has scored that amount for the requested panelist slug Arguments: panelist_slug (str) database_connection (mysql.connector.connect) """ panelist_id = utility.convert_slug_to_id(panelist_slug, database_connection) if not panelist_id: return None return retrieve_scores_grouped_ordered_pair_by_id(panelist_id, database_connection, pre_validated_id=True)
b8efd970a8adcbcbe6bdf8a737502ce174e01531
29,451
from datetime import datetime def set_job_id(): """Define job id for output paths. Returns: job_id: Identifier for output paths. """ job_id = FLAGS.job_id if not job_id: job_id = datetime.datetime.now().strftime('%Y%m%d-%H%M%S') return job_id
974fa455de363a4c5f3fbcb598a3a002c00c2942
29,452
def is_owner(obj, user): """ Check if user is owner of the slice """ return obj and user in obj.owners
f0c49ffe8a8879d1d052f6fc37df596efa021a84
29,453
from typing import Dict from typing import List from typing import Optional def boxplot_errors_wrt_RUL( results_dict: Dict[str, List[PredictionResult]], nbins: int, y_axis_label: Optional[str] = None, x_axis_label: Optional[str] = None, ax=None, **kwargs, ): """Boxplots of difference between true and predicted RUL over Cross-validated results Parameters ---------- results_dict: Dict[str, List[PredictionResult]] Dictionary with the results of the fitted models nbins: int Number of bins to divide the y_axis_label: Optional[str]. Default None, Optional string to be added to the y axis x_axis_label: Optional[str]=None Optional string to be added to the x axis fig: Optional figure in which the plot will be ax: Optional. Default None Optional axis in which the plot will be drawed. If an axis is not provided, it will create one. Keyword arguments ----------------- **kwargs Return ------- fig, ax: """ if ax is None: fig, ax = plt.subplots(**kwargs) else: fig = ax.figure bin_edges, model_results = models_cv_results(results_dict, nbins) return _boxplot_errors_wrt_RUL_multiple_models( bin_edges, model_results, fig=fig, ax=ax, y_axis_label=y_axis_label, x_axis_label=x_axis_label, )
793f17df520c6474744b7d38055f717e9dfec287
29,454
def create_client(admin_user: str, key_file: str) -> CloudChannelServiceClient: """Creates the Channel Service API client Returns: The created Channel Service API client """ # [START channel_create_client] # Set up credentials with user impersonation credentials = service_account.Credentials.from_service_account_file( key_file, scopes=["https://www.googleapis.com/auth/apps.order"]) credentials_delegated = credentials.with_subject(admin_user) # Create the API client client = channel.CloudChannelServiceClient(credentials=credentials_delegated) print("=== Created client") # [END channel_create_client] return client
b1af051982ad737bdf66b609416c182e675d91f7
29,455
import string import random def password_generator(length=12, chars=None): """ Simple, naive password generator """ if not chars: chars = string.ascii_letters + string.digits return ''.join(random.choice(chars) for _ in range(length))
e94754e8d8ee3cf806ddbe092033f8cbc89496f7
29,457
def get_node_hint(node): """Return the 'capabilities:node' hint associated with the node """ capabilities = node.get('properties').get('capabilities') capabilities_dict = capabilities_to_dict(capabilities) if 'node' in capabilities_dict: return capabilities_dict['node'] return None
8fb28b38238d5c59db5fd42336d18292f4214963
29,458
def execPregionsExactCP(y, w, p=2,rho='none', inst='none', conseq='none'): #EXPLICAR QUÉ ES EL P-REGIONS """P-regions model The p-regions model, devised by [Duque_Church_Middleton2009]_, clusters a set of geographic areas into p spatially contiguous regions while minimizing within cluster heterogeneity. In Clusterpy, the p-regions model is formulated as a mixed integer-programming (MIP) problem and solved using the Gurobi optimizer. :: #CÓMO CORRERLO layer.cluster(...) layer.cluster('pRegionsExact',vars,<p>,<wType>,<std>,<dissolve>,<dataOperations>) :keyword vars: Area attribute(s) (e.g. ['SAR1','SAR2','POP']) :type vars: list :keyword p: Number of spatially contiguous regions to be generated. Default value p = 2. :type p: integer :keyword wType: Type of first-order contiguity-based spatial matrix: 'rook' or 'queen'. Default value wType = 'rook'. :type wType: string :keyword std: If = 1, then the variables will be standardized. :type std: binary :keyword dissolve: If = 1, then you will get a "child" instance of the layer that contains the new regions. Default value = 0. Note: Each child layer is saved in the attribute layer.results. The first algorithm that you run with dissolve=1 will have a child layer in layer.results[0]; the second algorithm that you run with dissolve=1 will be in layer.results[1], and so on. You can export a child as a shapefile with layer.result[<1,2,3..>].exportArcData('filename') :type dissolve: binary :keyword dataOperations: Dictionary which maps a variable to a list of operations to run on it. The dissolved layer will contains in it's data all the variables specified in this dictionary. Be sure to check the input layer's fieldNames before use this utility. :type dataOperations: dictionary The dictionary structure must be as showed bellow. >>> X = {} >>> X[variableName1] = [function1, function2,....] >>> X[variableName2] = [function1, function2,....] Where functions are strings which represents the name of the functions to be used on the given variableName. Functions could be,'sum','mean','min','max','meanDesv','stdDesv','med', 'mode','range','first','last','numberOfAreas. By deffault just ID variable is added to the dissolved map. """ # print "Running p-regions model (Duque, Church and Middleton, 2009)" # print "Number of areas: ", len(y) # print "Number of regions: ", p, "\n" start = tm.time() # PARAMETERS # Number of areas n = len(y) l=n-p # Area iterator numA = range(n) d={} temp=range(n-1) for i in temp: list1=[] for j in numA: if i<j: list1.append(distanceA2AEuclideanSquared([y[i],y[j]])[0][0]) d[i]=list1 #----------------------------------- try: # CONSTRUCTION OF THE MODEL # Tolerance to non-integer solutions tol = 1e-5#1e-9 #min value: 1e-9 # SUBTOUR ELIMINATION CONSTRAINTS def subtourelim(model, where): if where == GRB.callback.MIPSOL: vals = model.cbGetSolution(model.getVars()) varsx = model.getVars()[n*n:] varsx1 = [varsx[i:i+n] for i in range(0,len(varsx),n)] t1 = [vals[i:i+n] for i in range(0,n*n,n)] x1 = [vals[n*n+i:n*n+i+n] for i in range(0,n*n,n)] num = list(numA) cycle = [] #sets of areas involved in cycles while num: area = num[0] c =[area] acum = 0 k = 0 while True: if k==n: break if x1[area][k]>=1-tol:#==1: acum = 1 break k += 1 f=num.remove(area) for j in numA: if t1[area][j]>=1-tol:#==1: c.append(j) k=0 while True: if k==n: break if x1[j][k]>=1-tol:#==1: acum += 1 break k += 1 if num.count(j)!=0: b =num.remove(j) if acum==len(c) and acum>1: cycle.append(c) if len(cycle): # add a subtour elimination constraint for cycle_k in cycle: temp1 = 0 card = len(cycle_k) for i in cycle_k: for j in cycle_k: if j in w[i]: temp1 += varsx1[i][j] if temp1!=0: model.cbLazy(temp1 <= card-1) # Create the new model m=Model("pRegions") # Create variables # t_ij # 1 if areas i and j belongs to the same region # 0 otherwise t = [] for i in numA: t_i = [] for j in numA: t_i.append(m.addVar(vtype=GRB.BINARY,name="t_"+str([i,j]))) t.append(t_i) # x_ij # 1 if arc between adjacent areas i and j is selected for a tree graph # 0 otherwise x = [] for i in numA: x_i=[] for j in numA: x_i.append(m.addVar(vtype=GRB.BINARY,name="x_"+str([i,j]))) x.append(x_i) # Integrate new variables m.update() # Objective function of=0 for i in numA: for j in range(i+1,n): of+=t[i][j]*d[i][j-i-1] m.setObjective(of, GRB.MINIMIZE) # Constraints 1, 5 temp = 0 for i in numA: for j in w[i]: temp += x[i][j] m.addConstr(x[i][j]-t[i][j]<=tol,"c5_"+str([i,j])) m.addConstr(temp == l-tol,"c1") # Constraint 2 i = 0 for x_i in x: temp =[] for j in w[i]: temp.append(x_i[j]) m.addConstr(quicksum(temp) <=1-tol, "c2_"+str(i)) i += 1 # Constraints 3, 4 for i in numA: for j in numA: if i!=j: m.addConstr(t[i][j]-t[j][i]<=tol,"c4_"+str([i,j])) for em in numA: if em!=j: m.addConstr(t[i][j]+t[i][em]-t[j][em]<=1-tol,"c3_"+str([i,j,em])) # Constraint REDUNDANTE for i in numA: for j in numA: if i!=j: m.addConstr(x[i][j]+x[j][i]<=1,"c3_"+str([i,j,em])) m.update() #Writes the .lp file format of the model #m.write("test.lp") #To reduce memory use #m.setParam('Threads',1) #m.setParam('NodefileStart',0.1) # To disable optimization output #m.setParam('OutputFlag',False) #m.setParam('ScaleFlag',0) # To set the tolerance to non-integer solutions m.setParam('IntFeasTol', tol) m.setParam('LogFile', 'CP-'+str(conseq)+'-'+str(n)+'-'+str(p)+'-'+str(rho)+'-'+str(inst)) # To enable lazy constraints m.params.LazyConstraints = 1 m.params.timeLimit = 1800 #m.params.ResultFile= "resultados.sol" m.optimize(subtourelim) time = tm.time()-start # for v in m.getVars(): # if v.x >0: # print v.varName, v.x #import pdb; pdb.set_trace() # sol = [0 for k in numA] # num = list(numA) # regID=0 #Number of region # while num: # area = num[0] # sol[area]=regID # f = num.remove(area) # for j in numA: # if t[area][j].x>=1-tol:#==1: # sol[j] = regID # if num.count(j)!=0: # b = num.remove(j) # regID += 1 # print 'FINAL SOLUTION:', sol # print 'FINAL OF:', m.objVal # print 'FINAL bound:', m.objBound # print 'GAP:', m.MIPGap # print "running time", time # print "running timeGR", m.Runtime output = { "objectiveFunction": m.objVal, "bestBound": m.objBound, "running time": time, "algorithm": "pRegionsExactCP", #"regions" : len(sol), "r2a": "None",#sol, "distanceType" : "EuclideanSquared", "distanceStat" : "None", "selectionType" : "None", "ObjectiveFunctionType" : "None"} print "Done" return output except GurobiError: print 'Error reported'
ea5d165918c6f203cf3cc42f9a1422a538ff133a
29,459
def generate_scale(name, octave, major=True): """ Generates a sequence of MIDI note numbers for a scale (do re mi fa sol la si do). `name` specifies the base note, `octave` specifies in which octave the scale should be, and `major` designates whether the produced scale should be major or minor. """ scale = major_scale_progression if major else minor_scale_progression base_note = note_number(name+str(octave)) return [ base_note + x for x in scale ]
8276101a3ec7ddd340f5fa2c24e13b9b321e4307
29,460
def tensor_product(a, b, reshape=True): """ compute the tensor protuct of two matrices a and b if a is (n, m_a), b is (n, m_b), then the result is (n, m_a * m_b) if reshape = True. or (n, m_a, m_b) otherwise Parameters --------- a : array-like of shape (n, m_a) b : array-like of shape (n, m_b) reshape : bool, default True whether to reshape the result to be 2-dimensional ie (n, m_a * m_b) or return a 3-dimensional tensor ie (n, m_a, m_b) Returns ------- dense np.ndarray of shape (n, m_a * m_b) if reshape = True. or (n, m_a, m_b) otherwise """ assert a.ndim == 2, 'matrix a must be 2-dimensional, but found {} dimensions'.format(a.ndim) assert b.ndim == 2, 'matrix b must be 2-dimensional, but found {} dimensions'.format(b.ndim) na, ma = a.shape nb, mb = b.shape if na != nb: raise ValueError('both arguments must have the same number of samples') if sp.sparse.issparse(a): a = a.A if sp.sparse.issparse(b): b = b.A tensor = a[..., :, None] * b[..., None, :] if reshape: return tensor.reshape(na, ma * mb) return tensor
f7891d1cffa19fb8bdfd2adaa23d2aa94367b8ab
29,461
def disable_user( request, username ): """ Enable/disable an user account. If the account is disabled, the user won't be able to login. """ userModel = get_user_model() try: user = userModel.objects.get( username= username ) except userModel.DoesNotExist: raise Http404( "User doesn't exist." ) else: value = not user.is_active # only other staff users can enable/disable staff users if user.is_staff: if request.user.is_staff: user.is_active = value user.save() else: return HttpResponseForbidden( "Can't disable a staff member." ) else: user.is_active = value user.save() if value: message = "'{}' account is now active.".format( user ) else: message = "'{}' account is now disabled.".format( user ) utilities.set_message( request, message ) return HttpResponseRedirect( user.get_url() )
6500c053ee637cd47a4cec6feb7ec72001ccfb6a
29,462
from datetime import datetime def get_next_event(user: User) -> Event | None: """ Get event that provided user has next. """ current_time = datetime.datetime.now().hour*60 + datetime.datetime.now().minute return Event.query \ .join(Event.subject, aliased=True) \ .filter(Subject.user == user, Event.weekday == datetime.date.today().weekday(), Event.start_time > current_time, ) \ .order_by(asc(Event.start_time)).first()
ac52bb2a5b0e9f368fccbf93d05fbcc6184462dd
29,463
import math def affineToText(matrix): """ Converts a libcv matrix into human readable text """ tiltv = matrix[0,0] * matrix[1,1] rotv = (matrix[0,1] - matrix[1,0]) / 2.0 if abs(tiltv) > 1: tilt = degrees(math.acos(1.0/tiltv)) else: tilt = degrees(math.acos(tiltv)) if tilt > 90.0: tilt = tilt - 180.0 if abs(rotv) < 1: rot = degrees(math.asin(rotv)) else: rot = 180.0 mystr = ( "tiltang = %.2f, rotation = %.2f, shift = %.2f,%.2f" % (tilt, rot, matrix[2,0], matrix[2,1]) ) return mystr
14a754d804d509b1029c00ae40fbef70735d072f
29,464
import numpy def createBridgeSets(blocksize,operating,MPSS): """Use this function to create the iidx sets for bridges.""" sets = tuple() xul = blocksize[0]-operating xdl = operating yul = int(blocksize[0]/2+operating) ydl = int(blocksize[0]/2-operating) xts = xul xbs = xdl for i in range(MPSS): sets+=(tuple(product(numpy.arange(xdl,xul,1),numpy.arange(ydl,yul,1))),) xdl+=operating xul-=operating ydl-=operating yul+=operating return sets,sets[::-1]
a97f44a44e00f4375c3aae0162edca5b78bcd5f1
29,465
def get_uniq_id_with_dur(meta, deci=3): """ Return basename with offset and end time labels """ bare_uniq_id = get_uniqname_from_filepath(meta['audio_filepath']) if meta['offset'] is None and meta['duration'] is None: return bare_uniq_id if meta['offset']: offset = str(int(round(meta['offset'], deci) * pow(10, deci))) else: offset = 0 if meta['duration']: endtime = str(int(round(meta['offset'] + meta['duration'], deci) * pow(10, deci))) else: endtime = 'NULL' uniq_id = f"{bare_uniq_id}_{offset}_{endtime}" return uniq_id
62d93703a8b33bbc3e1a533aedac11fec8d59fb1
29,466
def delete(table, whereclause = None, **kwargs): """Return a ``DELETE`` clause element. This can also be called from a table directly via the table's ``delete()`` method. table The table to be updated. whereclause A ``ClauseElement`` describing the ``WHERE`` condition of the ``UPDATE`` statement. """ return _Delete(table, whereclause, **kwargs)
49d6d98083d4dee0cf7dac62e30ddf90cd383955
29,467
import random import string def oversized_junk(): """ Return a string of random lowercase letters that is over 4096 bytes long. """ return "".join(random.choice(string.ascii_lowercase) for _ in range(4097))
a7bbaadde1948e1644f708c0166aa7833bb25037
29,468